aboutsummaryrefslogtreecommitdiffstats
path: root/include
diff options
context:
space:
mode:
Diffstat (limited to 'include')
-rw-r--r--include/acpi/acbuffer.h40
-rw-r--r--include/acpi/acconfig.h44
-rw-r--r--include/acpi/acexcep.h52
-rw-r--r--include/acpi/acnames.h40
-rw-r--r--include/acpi/acoutput.h40
-rw-r--r--include/acpi/acpi.h40
-rw-r--r--include/acpi/acpi_bus.h5
-rw-r--r--include/acpi/acpiosxf.h40
-rw-r--r--include/acpi/acpixf.h69
-rw-r--r--include/acpi/acrestyp.h40
-rw-r--r--include/acpi/actbl.h40
-rw-r--r--include/acpi/actbl1.h1525
-rw-r--r--include/acpi/actbl2.h1969
-rw-r--r--include/acpi/actbl3.h1056
-rw-r--r--include/acpi/actypes.h77
-rw-r--r--include/acpi/acuuid.h40
-rw-r--r--include/acpi/battery.h21
-rw-r--r--include/acpi/nfit.h18
-rw-r--r--include/acpi/platform/acenv.h40
-rw-r--r--include/acpi/platform/acenvex.h40
-rw-r--r--include/acpi/platform/acgcc.h40
-rw-r--r--include/acpi/platform/acgccex.h40
-rw-r--r--include/acpi/platform/acintel.h40
-rw-r--r--include/acpi/platform/aclinux.h44
-rw-r--r--include/acpi/platform/aclinuxex.h40
-rw-r--r--include/asm-generic/5level-fixup.h1
-rw-r--r--include/asm-generic/atomic-instrumented.h476
-rw-r--r--include/asm-generic/atomic.h2
-rw-r--r--include/asm-generic/audit_dir_write.h2
-rw-r--r--include/asm-generic/barrier.h2
-rw-r--r--include/asm-generic/bitops/find.h20
-rw-r--r--include/asm-generic/bitops/lock.h3
-rw-r--r--include/asm-generic/bug.h1
-rw-r--r--include/asm-generic/clkdev.h30
-rw-r--r--include/asm-generic/dma-mapping.h10
-rw-r--r--include/asm-generic/error-injection.h35
-rw-r--r--include/asm-generic/exec.h2
-rw-r--r--include/asm-generic/io.h185
-rw-r--r--include/asm-generic/kvm_para.h5
-rw-r--r--include/asm-generic/pci_iomap.h8
-rw-r--r--include/asm-generic/pgtable-nop4d.h9
-rw-r--r--include/asm-generic/pgtable.h71
-rw-r--r--include/asm-generic/qrwlock_types.h1
-rw-r--r--include/asm-generic/sections.h8
-rw-r--r--include/asm-generic/switch_to.h2
-rw-r--r--include/asm-generic/vmlinux.lds.h25
-rw-r--r--include/clocksource/metag_generic.h21
-rw-r--r--include/clocksource/timer-ti-dm.h394
-rw-r--r--include/crypto/ablk_helper.h32
-rw-r--r--include/crypto/aead.h10
-rw-r--r--include/crypto/algapi.h1
-rw-r--r--include/crypto/chacha20.h3
-rw-r--r--include/crypto/engine.h68
-rw-r--r--include/crypto/hash.h57
-rw-r--r--include/crypto/if_alg.h2
-rw-r--r--include/crypto/internal/hash.h7
-rw-r--r--include/crypto/internal/scompress.h11
-rw-r--r--include/crypto/internal/simd.h7
-rw-r--r--include/crypto/lrw.h44
-rw-r--r--include/crypto/null.h10
-rw-r--r--include/crypto/poly1305.h2
-rw-r--r--include/crypto/salsa20.h27
-rw-r--r--include/crypto/sha3.h6
-rw-r--r--include/crypto/skcipher.h11
-rw-r--r--include/crypto/sm4.h28
-rw-r--r--include/crypto/speck.h62
-rw-r--r--include/crypto/xts.h17
-rw-r--r--include/drm/amd_asic_type.h1
-rw-r--r--include/drm/bridge/analogix_dp.h20
-rw-r--r--include/drm/bridge/dw_hdmi.h24
-rw-r--r--include/drm/bridge/dw_mipi_dsi.h17
-rw-r--r--include/drm/drmP.h197
-rw-r--r--include/drm/drm_atomic.h65
-rw-r--r--include/drm/drm_atomic_helper.h6
-rw-r--r--include/drm/drm_bridge.h35
-rw-r--r--include/drm/drm_cache.h2
-rw-r--r--include/drm/drm_color_mgmt.h31
-rw-r--r--include/drm/drm_connector.h76
-rw-r--r--include/drm/drm_crtc_helper.h1
-rw-r--r--include/drm/drm_device.h9
-rw-r--r--include/drm/drm_dp_helper.h67
-rw-r--r--include/drm/drm_drv.h23
-rw-r--r--include/drm/drm_edid.h2
-rw-r--r--include/drm/drm_encoder.h6
-rw-r--r--include/drm/drm_fb_cma_helper.h13
-rw-r--r--include/drm/drm_fb_helper.h57
-rw-r--r--include/drm/drm_file.h2
-rw-r--r--include/drm/drm_fourcc.h2
-rw-r--r--include/drm/drm_framebuffer.h8
-rw-r--r--include/drm/drm_gem.h15
-rw-r--r--include/drm/drm_gem_cma_helper.h16
-rw-r--r--include/drm/drm_hdcp.h41
-rw-r--r--include/drm/drm_mm.h2
-rw-r--r--include/drm/drm_mode_config.h39
-rw-r--r--include/drm/drm_mode_object.h24
-rw-r--r--include/drm/drm_modes.h24
-rw-r--r--include/drm/drm_modeset_helper.h3
-rw-r--r--include/drm/drm_modeset_helper_vtables.h3
-rw-r--r--include/drm/drm_plane.h46
-rw-r--r--include/drm/drm_plane_helper.h6
-rw-r--r--include/drm/drm_prime.h22
-rw-r--r--include/drm/drm_print.h218
-rw-r--r--include/drm/drm_property.h26
-rw-r--r--include/drm/drm_simple_kms_helper.h53
-rw-r--r--include/drm/drm_syncobj.h36
-rw-r--r--include/drm/drm_utils.h15
-rw-r--r--include/drm/drm_vblank.h20
-rw-r--r--include/drm/drm_vma_manager.h2
-rw-r--r--include/drm/gpu_scheduler.h173
-rw-r--r--include/drm/gpu_scheduler_trace.h82
-rw-r--r--include/drm/i915_component.h3
-rw-r--r--include/drm/i915_drm.h3
-rw-r--r--include/drm/i915_pciids.h69
-rw-r--r--include/drm/intel-gtt.h3
-rw-r--r--include/drm/spsc_queue.h122
-rw-r--r--include/drm/tinydrm/ili9341.h54
-rw-r--r--include/drm/tinydrm/mipi-dbi.h9
-rw-r--r--include/drm/tinydrm/tinydrm-helpers.h4
-rw-r--r--include/drm/tinydrm/tinydrm.h8
-rw-r--r--include/drm/ttm/ttm_bo_api.h187
-rw-r--r--include/drm/ttm/ttm_bo_driver.h365
-rw-r--r--include/drm/ttm/ttm_memory.h80
-rw-r--r--include/drm/ttm/ttm_page_alloc.h11
-rw-r--r--include/drm/ttm/ttm_tt.h272
-rw-r--r--include/dt-bindings/bus/ti-sysc.h22
-rw-r--r--include/dt-bindings/clock/am3.h108
-rw-r--r--include/dt-bindings/clock/am4.h113
-rw-r--r--include/dt-bindings/clock/aspeed-clock.h52
-rw-r--r--include/dt-bindings/clock/axg-clkc.h71
-rw-r--r--include/dt-bindings/clock/dm814.h45
-rw-r--r--include/dt-bindings/clock/dm816.h53
-rw-r--r--include/dt-bindings/clock/dra7.h172
-rw-r--r--include/dt-bindings/clock/hi3660-clock.h7
-rw-r--r--include/dt-bindings/clock/imx7d-clock.h5
-rw-r--r--include/dt-bindings/clock/jz4770-cgu.h58
-rw-r--r--include/dt-bindings/clock/omap5.h118
-rw-r--r--include/dt-bindings/clock/qcom,gcc-ipq8074.h222
-rw-r--r--include/dt-bindings/clock/sprd,sc9860-clk.h404
-rw-r--r--include/dt-bindings/clock/tegra194-clock.h321
-rw-r--r--include/dt-bindings/gpio/aspeed-gpio.h49
-rw-r--r--include/dt-bindings/gpio/gpio.h6
-rw-r--r--include/dt-bindings/gpio/meson-axg-gpio.h116
-rw-r--r--include/dt-bindings/gpio/meson8b-gpio.h121
-rw-r--r--include/dt-bindings/gpio/tegra194-gpio.h61
-rw-r--r--include/dt-bindings/input/gpio-keys.h13
-rw-r--r--include/dt-bindings/media/tda1997x.h74
-rw-r--r--include/dt-bindings/memory/tegra186-mc.h111
-rw-r--r--include/dt-bindings/mfd/stm32f7-rcc.h1
-rw-r--r--include/dt-bindings/net/ti-dp83867.h14
-rw-r--r--include/dt-bindings/pinctrl/am43xx.h3
-rw-r--r--include/dt-bindings/pinctrl/mt7623-pinfunc.h24
-rw-r--r--include/dt-bindings/pinctrl/stm32-pinfunc.h6
-rw-r--r--include/dt-bindings/power/mt2712-power.h29
-rw-r--r--include/dt-bindings/power/mt7623a-power.h10
-rw-r--r--include/dt-bindings/power/owl-s700-powergate.h19
-rw-r--r--include/dt-bindings/power/r8a77965-sysc.h30
-rw-r--r--include/dt-bindings/power/r8a77980-sysc.h43
-rw-r--r--include/dt-bindings/power/tegra194-powergate.h35
-rw-r--r--include/dt-bindings/reset/amlogic,meson-axg-reset.h124
-rw-r--r--include/dt-bindings/reset/stm32mp1-resets.h108
-rw-r--r--include/dt-bindings/reset/tegra194-reset.h152
-rw-r--r--include/dt-bindings/sound/rt5651.h15
-rw-r--r--include/kvm/arm_arch_timer.h2
-rw-r--r--include/kvm/arm_psci.h51
-rw-r--r--include/kvm/arm_vgic.h28
-rw-r--r--include/linux/acpi.h35
-rw-r--r--include/linux/acpi_iort.h7
-rw-r--r--include/linux/arch_topology.h2
-rw-r--r--include/linux/arm-smccc.h165
-rw-r--r--include/linux/arm_sdei.h79
-rw-r--r--include/linux/ata.h2
-rw-r--r--include/linux/atalk.h2
-rw-r--r--include/linux/audit.h6
-rw-r--r--include/linux/avf/virtchnl.h107
-rw-r--r--include/linux/backing-dev.h18
-rw-r--r--include/linux/backlight.h58
-rw-r--r--include/linux/bfin_mac.h30
-rw-r--r--include/linux/binfmts.h3
-rw-r--r--include/linux/bio.h28
-rw-r--r--include/linux/bitfield.h46
-rw-r--r--include/linux/bitmap.h85
-rw-r--r--include/linux/blk-cgroup.h9
-rw-r--r--include/linux/blk-mq-pci.h3
-rw-r--r--include/linux/blk-mq.h3
-rw-r--r--include/linux/blk_types.h51
-rw-r--r--include/linux/blkdev.h295
-rw-r--r--include/linux/bootmem.h9
-rw-r--r--include/linux/bpf-cgroup.h68
-rw-r--r--include/linux/bpf.h100
-rw-r--r--include/linux/bpf_types.h5
-rw-r--r--include/linux/bpf_verifier.h76
-rw-r--r--include/linux/brcmphy.h1
-rw-r--r--include/linux/bsg-lib.h7
-rw-r--r--include/linux/bsg.h35
-rw-r--r--include/linux/buffer_head.h6
-rw-r--r--include/linux/build_bug.h2
-rw-r--r--include/linux/bvec.h9
-rw-r--r--include/linux/byteorder/generic.h17
-rw-r--r--include/linux/can/dev.h7
-rw-r--r--include/linux/ceph/ceph_features.h1
-rw-r--r--include/linux/ceph/ceph_fs.h17
-rw-r--r--include/linux/ceph/libceph.h1
-rw-r--r--include/linux/ceph/messenger.h101
-rw-r--r--include/linux/ceph/osd_client.h19
-rw-r--r--include/linux/ceph/osdmap.h6
-rw-r--r--include/linux/ceph/striper.h69
-rw-r--r--include/linux/cgroup-defs.h8
-rw-r--r--include/linux/clk-provider.h43
-rw-r--r--include/linux/clk.h62
-rw-r--r--include/linux/clk/ti.h1
-rw-r--r--include/linux/clkdev.h7
-rw-r--r--include/linux/compat.h770
-rw-r--r--include/linux/compiler-clang.h14
-rw-r--r--include/linux/compiler-gcc.h38
-rw-r--r--include/linux/compiler.h31
-rw-r--r--include/linux/console.h58
-rw-r--r--include/linux/const.h9
-rw-r--r--include/linux/cper.h48
-rw-r--r--include/linux/cpu_cooling.h75
-rw-r--r--include/linux/cpufreq.h128
-rw-r--r--include/linux/cpuhotplug.h6
-rw-r--r--include/linux/cpuidle.h54
-rw-r--r--include/linux/cpumask.h4
-rw-r--r--include/linux/cpuset.h6
-rw-r--r--include/linux/crash_dump.h12
-rw-r--r--include/linux/crc-ccitt.h7
-rw-r--r--include/linux/crc32c.h1
-rw-r--r--include/linux/crypto.h18
-rw-r--r--include/linux/dax.h44
-rw-r--r--include/linux/dcache.h8
-rw-r--r--include/linux/device-mapper.h70
-rw-r--r--include/linux/device.h35
-rw-r--r--include/linux/dm-bufio.h148
-rw-r--r--include/linux/dma-buf.h2
-rw-r--r--include/linux/dma-direct.h64
-rw-r--r--include/linux/dma-fence-array.h3
-rw-r--r--include/linux/dma-fence.h2
-rw-r--r--include/linux/dma-mapping.h38
-rw-r--r--include/linux/dmaengine.h4
-rw-r--r--include/linux/dmapool.h30
-rw-r--r--include/linux/dmi.h4
-rw-r--r--include/linux/dsa/lan9303.h3
-rw-r--r--include/linux/edac.h3
-rw-r--r--include/linux/efi.h48
-rw-r--r--include/linux/elevator.h2
-rw-r--r--include/linux/error-injection.h27
-rw-r--r--include/linux/errseq.h2
-rw-r--r--include/linux/etherdevice.h2
-rw-r--r--include/linux/ethtool.h5
-rw-r--r--include/linux/eventfd.h14
-rw-r--r--include/linux/extcon.h6
-rw-r--r--include/linux/extcon/extcon-gpio.h47
-rw-r--r--include/linux/f2fs_fs.h34
-rw-r--r--include/linux/fault-inject.h5
-rw-r--r--include/linux/fb.h9
-rw-r--r--include/linux/fdtable.h5
-rw-r--r--include/linux/filter.h72
-rw-r--r--include/linux/firmware.h3
-rw-r--r--include/linux/fpga/fpga-bridge.h14
-rw-r--r--include/linux/fpga/fpga-mgr.h39
-rw-r--r--include/linux/fpga/fpga-region.h40
-rw-r--r--include/linux/fs.h62
-rw-r--r--include/linux/fscache-cache.h28
-rw-r--r--include/linux/fscache.h142
-rw-r--r--include/linux/fscrypt.h174
-rw-r--r--include/linux/fscrypt_notsupp.h59
-rw-r--r--include/linux/fscrypt_supp.h68
-rw-r--r--include/linux/fsl/mc.h562
-rw-r--r--include/linux/fsl_ifc.h6
-rw-r--r--include/linux/fsnotify_backend.h6
-rw-r--r--include/linux/ftrace.h12
-rw-r--r--include/linux/futex.h13
-rw-r--r--include/linux/fwnode.h4
-rw-r--r--include/linux/genetlink.h3
-rw-r--r--include/linux/genhd.h9
-rw-r--r--include/linux/genl_magic_func.h12
-rw-r--r--include/linux/gpio.h10
-rw-r--r--include/linux/gpio/consumer.h25
-rw-r--r--include/linux/gpio/driver.h19
-rw-r--r--include/linux/gpio/machine.h4
-rw-r--r--include/linux/gpio_keys.h2
-rw-r--r--include/linux/hid.h87
-rw-r--r--include/linux/highmem.h4
-rw-r--r--include/linux/hil_mlc.h6
-rw-r--r--include/linux/hmm.h222
-rw-r--r--include/linux/hp_sdc.h2
-rw-r--r--include/linux/hrtimer.h114
-rw-r--r--include/linux/hugetlb.h21
-rw-r--r--include/linux/hw_breakpoint.h7
-rw-r--r--include/linux/hwmon.h1
-rw-r--r--include/linux/hyperv.h25
-rw-r--r--include/linux/hypervisor.h17
-rw-r--r--include/linux/i2c-pca-platform.h3
-rw-r--r--include/linux/i2c.h142
-rw-r--r--include/linux/i7300_idle.h84
-rw-r--r--include/linux/ide.h8
-rw-r--r--include/linux/idr.h186
-rw-r--r--include/linux/ieee80211.h14
-rw-r--r--include/linux/if_link.h2
-rw-r--r--include/linux/if_macvlan.h2
-rw-r--r--include/linux/if_tap.h6
-rw-r--r--include/linux/if_tun.h25
-rw-r--r--include/linux/if_vlan.h103
-rw-r--r--include/linux/iio/adc/stm32-dfsdm-adc.h18
-rw-r--r--include/linux/iio/consumer.h37
-rw-r--r--include/linux/iio/hw-consumer.h21
-rw-r--r--include/linux/iio/iio.h32
-rw-r--r--include/linux/iio/machine.h7
-rw-r--r--include/linux/iio/trigger.h3
-rw-r--r--include/linux/iio/types.h28
-rw-r--r--include/linux/inet.h1
-rw-r--r--include/linux/inetdevice.h2
-rw-r--r--include/linux/init.h9
-rw-r--r--include/linux/init_task.h258
-rw-r--r--include/linux/input/gpio_tilt.h74
-rw-r--r--include/linux/integrity.h1
-rw-r--r--include/linux/intel-iommu.h14
-rw-r--r--include/linux/interrupt.h2
-rw-r--r--include/linux/iommu.h14
-rw-r--r--include/linux/ioport.h2
-rw-r--r--include/linux/ipc.h2
-rw-r--r--include/linux/ipmi-fru.h3
-rw-r--r--include/linux/ipmi.h21
-rw-r--r--include/linux/ipmi_smi.h21
-rw-r--r--include/linux/irq.h23
-rw-r--r--include/linux/irq_work.h11
-rw-r--r--include/linux/irqchip/arm-gic-v3.h5
-rw-r--r--include/linux/irqchip/arm-gic.h1
-rw-r--r--include/linux/irqchip/metag-ext.h34
-rw-r--r--include/linux/irqchip/metag.h25
-rw-r--r--include/linux/irqflags.h2
-rw-r--r--include/linux/iversion.h337
-rw-r--r--include/linux/jbd2.h431
-rw-r--r--include/linux/jiffies.h7
-rw-r--r--include/linux/jump_label.h14
-rw-r--r--include/linux/kallsyms.h74
-rw-r--r--include/linux/kasan.h21
-rw-r--r--include/linux/kconfig.h6
-rw-r--r--include/linux/kcore.h1
-rw-r--r--include/linux/kernel.h96
-rw-r--r--include/linux/kexec.h4
-rw-r--r--include/linux/kfifo.h11
-rw-r--r--include/linux/kobject.h3
-rw-r--r--include/linux/kobject_ns.h3
-rw-r--r--include/linux/kvm_host.h20
-rw-r--r--include/linux/kvm_para.h5
-rw-r--r--include/linux/led-class-flash.h4
-rw-r--r--include/linux/leds.h4
-rw-r--r--include/linux/libata.h1
-rw-r--r--include/linux/libfdt.h1
-rw-r--r--include/linux/libfdt_env.h6
-rw-r--r--include/linux/libnvdimm.h15
-rw-r--r--include/linux/libps2.h38
-rw-r--r--include/linux/lightnvm.h399
-rw-r--r--include/linux/linux_logo.h3
-rw-r--r--include/linux/list_lru.h3
-rw-r--r--include/linux/livepatch.h4
-rw-r--r--include/linux/lockd/lockd.h9
-rw-r--r--include/linux/lockdep.h7
-rw-r--r--include/linux/lockref.h2
-rw-r--r--include/linux/logic_pio.h123
-rw-r--r--include/linux/lsm_hooks.h507
-rw-r--r--include/linux/mdio.h8
-rw-r--r--include/linux/memblock.h18
-rw-r--r--include/linux/memcontrol.h187
-rw-r--r--include/linux/memory.h3
-rw-r--r--include/linux/memory_hotplug.h85
-rw-r--r--include/linux/memremap.h77
-rw-r--r--include/linux/mfd/axp20x.h7
-rw-r--r--include/linux/mfd/cros_ec.h4
-rw-r--r--include/linux/mfd/cros_ec_commands.h25
-rw-r--r--include/linux/mfd/da9055/pdata.h5
-rw-r--r--include/linux/mfd/palmas.h3
-rw-r--r--include/linux/mfd/rave-sp.h60
-rw-r--r--include/linux/mfd/samsung/rtc.h11
-rw-r--r--include/linux/mfd/stm32-lptimer.h6
-rw-r--r--include/linux/mfd/stm32-timers.h4
-rw-r--r--include/linux/mfd/syscon/imx6q-iomuxc-gpr.h2
-rw-r--r--include/linux/mfd/tmio.h21
-rw-r--r--include/linux/migrate.h9
-rw-r--r--include/linux/mlx4/device.h4
-rw-r--r--include/linux/mlx5/accel.h144
-rw-r--r--include/linux/mlx5/cq.h20
-rw-r--r--include/linux/mlx5/device.h35
-rw-r--r--include/linux/mlx5/driver.h158
-rw-r--r--include/linux/mlx5/eswitch.h58
-rw-r--r--include/linux/mlx5/fs.h16
-rw-r--r--include/linux/mlx5/fs_helpers.h142
-rw-r--r--include/linux/mlx5/mlx5_ifc.h305
-rw-r--r--include/linux/mlx5/mlx5_ifc_fpga.h92
-rw-r--r--include/linux/mlx5/port.h6
-rw-r--r--include/linux/mlx5/qp.h12
-rw-r--r--include/linux/mlx5/transobj.h25
-rw-r--r--include/linux/mlx5/vport.h7
-rw-r--r--include/linux/mm.h123
-rw-r--r--include/linux/mm_inline.h6
-rw-r--r--include/linux/mm_types.h156
-rw-r--r--include/linux/mman.h2
-rw-r--r--include/linux/mmc/host.h5
-rw-r--r--include/linux/mmc/slot-gpio.h2
-rw-r--r--include/linux/mmdebug.h8
-rw-r--r--include/linux/mmu_notifier.h30
-rw-r--r--include/linux/mmzone.h28
-rw-r--r--include/linux/mod_devicetable.h19
-rw-r--r--include/linux/module.h27
-rw-r--r--include/linux/mroute.h117
-rw-r--r--include/linux/mroute6.h70
-rw-r--r--include/linux/mroute_base.h474
-rw-r--r--include/linux/msg.h18
-rw-r--r--include/linux/mtd/bbm.h2
-rw-r--r--include/linux/mtd/map.h130
-rw-r--r--include/linux/mtd/mtd.h47
-rw-r--r--include/linux/mtd/nand.h731
-rw-r--r--include/linux/mtd/nand_ecc.h2
-rw-r--r--include/linux/mtd/ndfc.h2
-rw-r--r--include/linux/mtd/partitions.h1
-rw-r--r--include/linux/mtd/rawnand.h549
-rw-r--r--include/linux/mtd/spi-nor.h12
-rw-r--r--include/linux/mutex.h10
-rw-r--r--include/linux/mux/consumer.h5
-rw-r--r--include/linux/mux/driver.h5
-rw-r--r--include/linux/nd.h6
-rw-r--r--include/linux/net.h12
-rw-r--r--include/linux/net_dim.h380
-rw-r--r--include/linux/netdev_features.h5
-rw-r--r--include/linux/netdevice.h185
-rw-r--r--include/linux/netfilter.h116
-rw-r--r--include/linux/netfilter/ipset/ip_set.h6
-rw-r--r--include/linux/netfilter/ipset/ip_set_counter.h25
-rw-r--r--include/linux/netfilter/nfnetlink.h3
-rw-r--r--include/linux/netfilter/nfnetlink_acct.h3
-rw-r--r--include/linux/netfilter/x_tables.h9
-rw-r--r--include/linux/netfilter_defs.h12
-rw-r--r--include/linux/netfilter_ipv4.h46
-rw-r--r--include/linux/netfilter_ipv6.h19
-rw-r--r--include/linux/nfs4.h12
-rw-r--r--include/linux/node.h4
-rw-r--r--include/linux/nospec.h58
-rw-r--r--include/linux/ntb.h51
-rw-r--r--include/linux/nubus.h189
-rw-r--r--include/linux/nvme.h22
-rw-r--r--include/linux/nvmem-provider.h42
-rw-r--r--include/linux/of.h31
-rw-r--r--include/linux/of_dma.h5
-rw-r--r--include/linux/of_fdt.h18
-rw-r--r--include/linux/of_gpio.h8
-rw-r--r--include/linux/of_graph.h5
-rw-r--r--include/linux/of_iommu.h5
-rw-r--r--include/linux/of_net.h6
-rw-r--r--include/linux/of_pci.h25
-rw-r--r--include/linux/of_pdt.h6
-rw-r--r--include/linux/of_platform.h7
-rw-r--r--include/linux/omap-gpmc.h28
-rw-r--r--include/linux/page-flags.h27
-rw-r--r--include/linux/page-isolation.h3
-rw-r--r--include/linux/page_ref.h3
-rw-r--r--include/linux/pagemap.h4
-rw-r--r--include/linux/pagevec.h6
-rw-r--r--include/linux/pci-aspm.h35
-rw-r--r--include/linux/pci-dma-compat.h27
-rw-r--r--include/linux/pci-ecam.h13
-rw-r--r--include/linux/pci-ep-cfs.h5
-rw-r--r--include/linux/pci-epc.h51
-rw-r--r--include/linux/pci-epf.h7
-rw-r--r--include/linux/pci.h500
-rw-r--r--include/linux/pci_hotplug.h16
-rw-r--r--include/linux/pci_ids.h11
-rw-r--r--include/linux/pcieport_if.h71
-rw-r--r--include/linux/percpu-refcount.h24
-rw-r--r--include/linux/percpu_counter.h6
-rw-r--r--include/linux/perf/arm_pmu.h26
-rw-r--r--include/linux/perf_event.h44
-rw-r--r--include/linux/pfn_t.h13
-rw-r--r--include/linux/phy.h159
-rw-r--r--include/linux/phy/phy.h31
-rw-r--r--include/linux/phy_fixed.h9
-rw-r--r--include/linux/phylink.h212
-rw-r--r--include/linux/pinctrl/devinfo.h2
-rw-r--r--include/linux/pinctrl/pinconf-generic.h2
-rw-r--r--include/linux/pinctrl/pinctrl.h1
-rw-r--r--include/linux/pipe_fs_i.h5
-rw-r--r--include/linux/platform_data/asoc-ti-mcbsp.h12
-rw-r--r--include/linux/platform_data/at24.h2
-rw-r--r--include/linux/platform_data/bfin_rotary.h117
-rw-r--r--include/linux/platform_data/dmtimer-omap.h38
-rw-r--r--include/linux/platform_data/gpio-htc-egpio.h2
-rw-r--r--include/linux/platform_data/gpio-omap.h5
-rw-r--r--include/linux/platform_data/i2c-davinci.h5
-rw-r--r--include/linux/platform_data/i2c-pxa.h (renamed from include/linux/i2c/pxa-i2c.h)11
-rw-r--r--include/linux/platform_data/mlxcpld-hotplug.h99
-rw-r--r--include/linux/platform_data/mlxreg.h148
-rw-r--r--include/linux/platform_data/mms114.h24
-rw-r--r--include/linux/platform_data/mtd-nand-pxa3xx.h43
-rw-r--r--include/linux/platform_data/mtd-onenand-omap2.h34
-rw-r--r--include/linux/platform_data/phy-da8xx-usb.h21
-rw-r--r--include/linux/platform_data/pinctrl-adi2.h40
-rw-r--r--include/linux/platform_data/pm33xx.h42
-rw-r--r--include/linux/platform_data/si5351.h2
-rw-r--r--include/linux/platform_data/spi-omap2-mcspi.h8
-rw-r--r--include/linux/platform_data/spi-s3c64xx.h6
-rw-r--r--include/linux/platform_data/ti-sysc.h136
-rw-r--r--include/linux/pm.h16
-rw-r--r--include/linux/pm_wakeup.h7
-rw-r--r--include/linux/poll.h37
-rw-r--r--include/linux/posix-clock.h2
-rw-r--r--include/linux/posix-timers.h25
-rw-r--r--include/linux/posix_acl.h7
-rw-r--r--include/linux/power/bq27xxx_battery.h1
-rw-r--r--include/linux/power/smartreflex.h10
-rw-r--r--include/linux/power_supply.h2
-rw-r--r--include/linux/printk.h7
-rw-r--r--include/linux/proc_ns.h3
-rw-r--r--include/linux/property.h23
-rw-r--r--include/linux/psci.h17
-rw-r--r--include/linux/psp-sev.h606
-rw-r--r--include/linux/pstore_ram.h1
-rw-r--r--include/linux/ptp_classify.h4
-rw-r--r--include/linux/ptr_ring.h101
-rw-r--r--include/linux/qcom_scm.h3
-rw-r--r--include/linux/qed/common_hsi.h1264
-rw-r--r--include/linux/qed/eth_common.h396
-rw-r--r--include/linux/qed/fcoe_common.h940
-rw-r--r--include/linux/qed/iscsi_common.h1585
-rw-r--r--include/linux/qed/iwarp_common.h17
-rw-r--r--include/linux/qed/qed_eth_if.h38
-rw-r--r--include/linux/qed/qed_if.h57
-rw-r--r--include/linux/qed/qed_iscsi_if.h2
-rw-r--r--include/linux/qed/qed_ll2_if.h2
-rw-r--r--include/linux/qed/rdma_common.h27
-rw-r--r--include/linux/qed/roce_common.h18
-rw-r--r--include/linux/qed/storage_common.h91
-rw-r--r--include/linux/qed/tcp_common.h165
-rw-r--r--include/linux/quota.h1
-rw-r--r--include/linux/quotaops.h3
-rw-r--r--include/linux/radix-tree.h31
-rw-r--r--include/linux/raid/pq.h5
-rw-r--r--include/linux/raid_class.h1
-rw-r--r--include/linux/random.h4
-rw-r--r--include/linux/rcupdate.h35
-rw-r--r--include/linux/rcutiny.h1
-rw-r--r--include/linux/rcutree.h1
-rw-r--r--include/linux/refcount.h2
-rw-r--r--include/linux/regmap.h71
-rw-r--r--include/linux/regulator/da9211.h4
-rw-r--r--include/linux/regulator/driver.h5
-rw-r--r--include/linux/regulator/machine.h37
-rw-r--r--include/linux/remoteproc.h46
-rw-r--r--include/linux/reservation.h23
-rw-r--r--include/linux/reset-controller.h30
-rw-r--r--include/linux/reset.h50
-rw-r--r--include/linux/rhashtable.h50
-rw-r--r--include/linux/ring_buffer.h19
-rw-r--r--include/linux/rpmsg.h4
-rw-r--r--include/linux/rtc.h30
-rw-r--r--include/linux/rtnetlink.h22
-rw-r--r--include/linux/rtsx_common.h (renamed from include/linux/mfd/rtsx_common.h)0
-rw-r--r--include/linux/rtsx_pci.h (renamed from include/linux/mfd/rtsx_pci.h)248
-rw-r--r--include/linux/rtsx_usb.h (renamed from include/linux/mfd/rtsx_usb.h)0
-rw-r--r--include/linux/sbitmap.h8
-rw-r--r--include/linux/scatterlist.h52
-rw-r--r--include/linux/sched.h57
-rw-r--r--include/linux/sched/cpufreq.h7
-rw-r--r--include/linux/sched/deadline.h6
-rw-r--r--include/linux/sched/isolation.h1
-rw-r--r--include/linux/sched/mm.h66
-rw-r--r--include/linux/sched/nohz.h6
-rw-r--r--include/linux/sched/signal.h30
-rw-r--r--include/linux/sched/task.h14
-rw-r--r--include/linux/sched/task_stack.h2
-rw-r--r--include/linux/sched/topology.h12
-rw-r--r--include/linux/sched/user.h4
-rw-r--r--include/linux/scif.h20
-rw-r--r--include/linux/scmi_protocol.h277
-rw-r--r--include/linux/sctp.h37
-rw-r--r--include/linux/seccomp.h8
-rw-r--r--include/linux/security.h97
-rw-r--r--include/linux/sem.h40
-rw-r--r--include/linux/semaphore.h2
-rw-r--r--include/linux/seq_file.h20
-rw-r--r--include/linux/seqlock.h3
-rw-r--r--include/linux/serdev.h17
-rw-r--r--include/linux/serial_core.h12
-rw-r--r--include/linux/serial_s3c.h17
-rw-r--r--include/linux/set_memory.h12
-rw-r--r--include/linux/sfp.h112
-rw-r--r--include/linux/sh_eth.h3
-rw-r--r--include/linux/shm.h22
-rw-r--r--include/linux/shmem_fs.h6
-rw-r--r--include/linux/signal.h15
-rw-r--r--include/linux/siox.h77
-rw-r--r--include/linux/sizes.h4
-rw-r--r--include/linux/skb_array.h7
-rw-r--r--include/linux/skbuff.h56
-rw-r--r--include/linux/slab.h53
-rw-r--r--include/linux/slab_def.h3
-rw-r--r--include/linux/slimbus.h164
-rw-r--r--include/linux/slub_def.h27
-rw-r--r--include/linux/soc/brcmstb/brcmstb.h6
-rw-r--r--include/linux/soc/mediatek/infracfg.h11
-rw-r--r--include/linux/soc/qcom/mdt_loader.h3
-rw-r--r--include/linux/soc/qcom/qmi.h271
-rw-r--r--include/linux/soc/qcom/smd-rpm.h1
-rw-r--r--include/linux/soc/samsung/exynos-pmu.h5
-rw-r--r--include/linux/soc/samsung/exynos-regs-pmu.h6
-rw-r--r--include/linux/socket.h38
-rw-r--r--include/linux/sound.h2
-rw-r--r--include/linux/soundwire/sdw.h479
-rw-r--r--include/linux/soundwire/sdw_intel.h24
-rw-r--r--include/linux/soundwire/sdw_registers.h194
-rw-r--r--include/linux/soundwire/sdw_type.h19
-rw-r--r--include/linux/spi/spi_gpio.h49
-rw-r--r--include/linux/spinlock.h6
-rw-r--r--include/linux/srcu.h4
-rw-r--r--include/linux/srcutree.h8
-rw-r--r--include/linux/stddef.h10
-rw-r--r--include/linux/stm.h10
-rw-r--r--include/linux/string.h3
-rw-r--r--include/linux/sunrpc/clnt.h1
-rw-r--r--include/linux/sunrpc/sched.h3
-rw-r--r--include/linux/sunrpc/svc.h6
-rw-r--r--include/linux/sunrpc/svc_rdma.h5
-rw-r--r--include/linux/sunrpc/svc_xprt.h6
-rw-r--r--include/linux/sunrpc/xprtrdma.h2
-rw-r--r--include/linux/suspend.h30
-rw-r--r--include/linux/swap.h42
-rw-r--r--include/linux/swiotlb.h16
-rw-r--r--include/linux/switchtec.h26
-rw-r--r--include/linux/sync_core.h21
-rw-r--r--include/linux/syscalls.h1369
-rw-r--r--include/linux/sysctl.h3
-rw-r--r--include/linux/sysfs.h14
-rw-r--r--include/linux/tcp.h13
-rw-r--r--include/linux/tee_drv.h196
-rw-r--r--include/linux/thunderbolt.h19
-rw-r--r--include/linux/ti-emif-sram.h69
-rw-r--r--include/linux/tick.h29
-rw-r--r--include/linux/time32.h1
-rw-r--r--include/linux/timekeeper_internal.h4
-rw-r--r--include/linux/timekeeping.h37
-rw-r--r--include/linux/torture.h8
-rw-r--r--include/linux/tpm.h41
-rw-r--r--include/linux/tpm_eventlog.h124
-rw-r--r--include/linux/trace_events.h71
-rw-r--r--include/linux/tracepoint-defs.h6
-rw-r--r--include/linux/tracepoint.h5
-rw-r--r--include/linux/tty.h3
-rw-r--r--include/linux/tty_ldisc.h2
-rw-r--r--include/linux/types.h2
-rw-r--r--include/linux/u64_stats_sync.h22
-rw-r--r--include/linux/uaccess.h8
-rw-r--r--include/linux/usb.h7
-rw-r--r--include/linux/usb/audio-v2.h8
-rw-r--r--include/linux/usb/audio-v3.h395
-rw-r--r--include/linux/usb/composite.h3
-rw-r--r--include/linux/usb/gadget.h15
-rw-r--r--include/linux/usb/hcd.h8
-rw-r--r--include/linux/usb/musb.h7
-rw-r--r--include/linux/usb/of.h21
-rw-r--r--include/linux/usb/pd.h187
-rw-r--r--include/linux/usb/pd_ado.h42
-rw-r--r--include/linux/usb/pd_ext_sdb.h31
-rw-r--r--include/linux/usb/pd_vdo.h2
-rw-r--r--include/linux/usb/quirks.h3
-rw-r--r--include/linux/usb/renesas_usbhs.h9
-rw-r--r--include/linux/usb/role.h53
-rw-r--r--include/linux/usb/tcpm.h31
-rw-r--r--include/linux/usb/tilegx.h35
-rw-r--r--include/linux/usb/typec.h28
-rw-r--r--include/linux/usb/typec_mux.h55
-rw-r--r--include/linux/utsname.h6
-rw-r--r--include/linux/uuid.h1
-rw-r--r--include/linux/vbox_utils.h79
-rw-r--r--include/linux/vfio.h3
-rw-r--r--include/linux/vga_switcheroo.h6
-rw-r--r--include/linux/visorbus.h344
-rw-r--r--include/linux/vmstat.h28
-rw-r--r--include/linux/w1-gpio.h9
-rw-r--r--include/linux/wait.h124
-rw-r--r--include/linux/wait_bit.h95
-rw-r--r--include/linux/workqueue.h25
-rw-r--r--include/linux/xarray.h24
-rw-r--r--include/linux/zpool.h2
-rw-r--r--include/linux/zsmalloc.h2
-rw-r--r--include/media/blackfin/bfin_capture.h39
-rw-r--r--include/media/blackfin/ppi.h94
-rw-r--r--include/media/cec-notifier.h14
-rw-r--r--include/media/cec-pin.h14
-rw-r--r--include/media/cec.h44
-rw-r--r--include/media/demux.h600
-rw-r--r--include/media/dmxdev.h214
-rw-r--r--include/media/drv-intf/cx2341x.h144
-rw-r--r--include/media/drv-intf/exynos-fimc.h3
-rw-r--r--include/media/drv-intf/msp3400.h62
-rw-r--r--include/media/drv-intf/renesas-ceu.h26
-rw-r--r--include/media/drv-intf/saa7146.h2
-rw-r--r--include/media/dvb-usb-ids.h424
-rw-r--r--include/media/dvb_ca_en50221.h142
-rw-r--r--include/media/dvb_demux.h354
-rw-r--r--include/media/dvb_frontend.h795
-rw-r--r--include/media/dvb_math.h66
-rw-r--r--include/media/dvb_net.h93
-rw-r--r--include/media/dvb_ringbuffer.h280
-rw-r--r--include/media/dvb_vb2.h280
-rw-r--r--include/media/dvbdev.h468
-rw-r--r--include/media/i2c-addr.h43
-rw-r--r--include/media/i2c/ad9389b.h14
-rw-r--r--include/media/i2c/adv7511.h14
-rw-r--r--include/media/i2c/adv7604.h15
-rw-r--r--include/media/i2c/adv7842.h15
-rw-r--r--include/media/i2c/as3645a.h66
-rw-r--r--include/media/i2c/bt819.h4
-rw-r--r--include/media/i2c/ir-kbd-i2c.h6
-rw-r--r--include/media/i2c/m52790.h52
-rw-r--r--include/media/i2c/mt9t112.h17
-rw-r--r--include/media/i2c/ov772x.h6
-rw-r--r--include/media/i2c/saa6588.h1
-rw-r--r--include/media/i2c/saa7115.h12
-rw-r--r--include/media/i2c/tc358743.h18
-rw-r--r--include/media/i2c/tda1997x.h42
-rw-r--r--include/media/i2c/ths7303.h10
-rw-r--r--include/media/i2c/tvaudio.h17
-rw-r--r--include/media/i2c/tw9910.h9
-rw-r--r--include/media/i2c/uda1342.h15
-rw-r--r--include/media/i2c/upd64031a.h6
-rw-r--r--include/media/lirc.h1
-rw-r--r--include/media/lirc_dev.h192
-rw-r--r--include/media/media-devnode.h2
-rw-r--r--include/media/media-entity.h11
-rw-r--r--include/media/rc-core.h80
-rw-r--r--include/media/rc-map.h63
-rw-r--r--include/media/soc_camera.h2
-rw-r--r--include/media/tpg/v4l2-tpg.h (renamed from include/media/v4l2-tpg.h)59
-rw-r--r--include/media/tuner-types.h15
-rw-r--r--include/media/v4l2-async.h39
-rw-r--r--include/media/v4l2-common.h188
-rw-r--r--include/media/v4l2-ctrls.h17
-rw-r--r--include/media/v4l2-dev.h142
-rw-r--r--include/media/v4l2-device.h246
-rw-r--r--include/media/v4l2-dv-timings.h52
-rw-r--r--include/media/v4l2-event.h36
-rw-r--r--include/media/v4l2-fh.h1
-rw-r--r--include/media/v4l2-flash-led-class.h12
-rw-r--r--include/media/v4l2-fwnode.h12
-rw-r--r--include/media/v4l2-mediabus.h80
-rw-r--r--include/media/v4l2-mem2mem.h4
-rw-r--r--include/media/v4l2-rect.h14
-rw-r--r--include/media/v4l2-subdev.h267
-rw-r--r--include/media/v4l2-tpg-colors.h68
-rw-r--r--include/media/videobuf-core.h2
-rw-r--r--include/media/videobuf-dvb.h10
-rw-r--r--include/media/videobuf2-core.h550
-rw-r--r--include/media/videobuf2-dvb.h11
-rw-r--r--include/media/videobuf2-memops.h8
-rw-r--r--include/media/videobuf2-v4l2.h117
-rw-r--r--include/misc/cxl.h2
-rw-r--r--include/misc/ocxl-config.h45
-rw-r--r--include/misc/ocxl.h214
-rw-r--r--include/net/Space.h2
-rw-r--r--include/net/act_api.h35
-rw-r--r--include/net/addrconf.h13
-rw-r--r--include/net/af_rxrpc.h11
-rw-r--r--include/net/ax25.h2
-rw-r--r--include/net/bluetooth/bluetooth.h2
-rw-r--r--include/net/bluetooth/hci_core.h2
-rw-r--r--include/net/bluetooth/mgmt.h2
-rw-r--r--include/net/caif/cfpkt.h27
-rw-r--r--include/net/cfg80211.h195
-rw-r--r--include/net/compat.h11
-rw-r--r--include/net/devlink.h133
-rw-r--r--include/net/dn_route.h1
-rw-r--r--include/net/dsa.h83
-rw-r--r--include/net/dst.h48
-rw-r--r--include/net/dst_cache.h4
-rw-r--r--include/net/erspan.h240
-rw-r--r--include/net/ethoc.h1
-rw-r--r--include/net/fib_rules.h45
-rw-r--r--include/net/flow.h18
-rw-r--r--include/net/gen_stats.h3
-rw-r--r--include/net/gre.h3
-rw-r--r--include/net/ieee80211_radiotap.h2
-rw-r--r--include/net/inet_common.h4
-rw-r--r--include/net/inet_connection_sock.h16
-rw-r--r--include/net/inet_frag.h126
-rw-r--r--include/net/inet_hashtables.h29
-rw-r--r--include/net/inet_sock.h25
-rw-r--r--include/net/inet_timewait_sock.h5
-rw-r--r--include/net/ip.h46
-rw-r--r--include/net/ip6_fib.h49
-rw-r--r--include/net/ip6_route.h32
-rw-r--r--include/net/ip6_tunnel.h4
-rw-r--r--include/net/ip_fib.h32
-rw-r--r--include/net/ip_tunnels.h23
-rw-r--r--include/net/ip_vs.h3
-rw-r--r--include/net/ipv6.h74
-rw-r--r--include/net/iucv/af_iucv.h2
-rw-r--r--include/net/iw_handler.h2
-rw-r--r--include/net/llc_conn.h2
-rw-r--r--include/net/lwtunnel.h15
-rw-r--r--include/net/mac80211.h38
-rw-r--r--include/net/net_namespace.h55
-rw-r--r--include/net/netevent.h3
-rw-r--r--include/net/netfilter/ipv4/nf_conntrack_ipv4.h12
-rw-r--r--include/net/netfilter/ipv6/nf_conntrack_ipv6.h12
-rw-r--r--include/net/netfilter/nf_conntrack.h5
-rw-r--r--include/net/netfilter/nf_conntrack_count.h16
-rw-r--r--include/net/netfilter/nf_conntrack_helper.h3
-rw-r--r--include/net/netfilter/nf_conntrack_l4proto.h19
-rw-r--r--include/net/netfilter/nf_flow_table.h126
-rw-r--r--include/net/netfilter/nf_queue.h2
-rw-r--r--include/net/netfilter/nf_tables.h166
-rw-r--r--include/net/netfilter/nf_tables_ipv4.h27
-rw-r--r--include/net/netfilter/nf_tables_ipv6.h29
-rw-r--r--include/net/netfilter/xt_rateest.h4
-rw-r--r--include/net/netns/can.h4
-rw-r--r--include/net/netns/core.h5
-rw-r--r--include/net/netns/ipv4.h6
-rw-r--r--include/net/netns/ipv6.h8
-rw-r--r--include/net/netns/netfilter.h12
-rw-r--r--include/net/netns/nftables.h8
-rw-r--r--include/net/netns/sctp.h5
-rw-r--r--include/net/nexthop.h2
-rw-r--r--include/net/pkt_cls.h121
-rw-r--r--include/net/pkt_sched.h17
-rw-r--r--include/net/regulatory.h30
-rw-r--r--include/net/route.h10
-rw-r--r--include/net/rsi_91x.h56
-rw-r--r--include/net/rtnetlink.h4
-rw-r--r--include/net/sch_generic.h172
-rw-r--r--include/net/sctp/auth.h21
-rw-r--r--include/net/sctp/command.h1
-rw-r--r--include/net/sctp/constants.h9
-rw-r--r--include/net/sctp/sctp.h23
-rw-r--r--include/net/sctp/sm.h21
-rw-r--r--include/net/sctp/stream_interleave.h61
-rw-r--r--include/net/sctp/structs.h109
-rw-r--r--include/net/sctp/ulpevent.h23
-rw-r--r--include/net/sctp/ulpqueue.h10
-rw-r--r--include/net/sock.h86
-rw-r--r--include/net/tc_act/tc_csum.h16
-rw-r--r--include/net/tc_act/tc_mirred.h6
-rw-r--r--include/net/tcp.h71
-rw-r--r--include/net/tcp_states.h26
-rw-r--r--include/net/tls.h93
-rw-r--r--include/net/udp.h3
-rw-r--r--include/net/udplite.h1
-rw-r--r--include/net/vxlan.h2
-rw-r--r--include/net/wext.h4
-rw-r--r--include/net/xdp.h48
-rw-r--r--include/net/xfrm.h95
-rw-r--r--include/rdma/ib_addr.h47
-rw-r--r--include/rdma/ib_cache.h29
-rw-r--r--include/rdma/ib_hdrs.h19
-rw-r--r--include/rdma/ib_sa.h23
-rw-r--r--include/rdma/ib_verbs.h292
-rw-r--r--include/rdma/opa_addr.h16
-rw-r--r--include/rdma/rdma_cm.h63
-rw-r--r--include/rdma/rdma_cm_ib.h8
-rw-r--r--include/rdma/rdma_vt.h33
-rw-r--r--include/rdma/restrack.h177
-rw-r--r--include/rdma/uverbs_ioctl.h192
-rw-r--r--include/rdma/uverbs_named_ioctl.h90
-rw-r--r--include/rdma/uverbs_std_types.h34
-rw-r--r--include/scsi/libsas.h30
-rw-r--r--include/scsi/scsi.h2
-rw-r--r--include/scsi/scsi_cmnd.h12
-rw-r--r--include/scsi/scsi_host.h39
-rw-r--r--include/scsi/scsi_proto.h1
-rw-r--r--include/scsi/scsi_transport_fc.h4
-rw-r--r--include/scsi/scsi_transport_sas.h1
-rw-r--r--include/scsi/srp.h17
-rw-r--r--include/soc/arc/mcip.h5
-rw-r--r--include/soc/bcm2835/raspberrypi-firmware.h18
-rw-r--r--include/soc/tegra/bpmp.h4
-rw-r--r--include/soc/tegra/mc.h9
-rw-r--r--include/soc/tegra/pmc.h12
-rw-r--r--include/sound/ac97/regs.h2
-rw-r--r--include/sound/da7219.h2
-rw-r--r--include/sound/dmaengine_pcm.h7
-rw-r--r--include/sound/emu10k1.h4
-rw-r--r--include/sound/hdaudio.h5
-rw-r--r--include/sound/hdaudio_ext.h4
-rw-r--r--include/sound/hwdep.h2
-rw-r--r--include/sound/info.h2
-rw-r--r--include/sound/pcm.h8
-rw-r--r--include/sound/pcm_oss.h1
-rw-r--r--include/sound/rt5514.h2
-rw-r--r--include/sound/rt5645.h3
-rw-r--r--include/sound/rt5651.h29
-rw-r--r--include/sound/rt5659.h1
-rw-r--r--include/sound/soc-acpi-intel-match.h1
-rw-r--r--include/sound/soc-acpi.h14
-rw-r--r--include/sound/soc-dai.h5
-rw-r--r--include/sound/soc-dapm.h16
-rw-r--r--include/sound/soc.h25
-rw-r--r--include/target/target_core_backend.h1
-rw-r--r--include/target/target_core_base.h2
-rw-r--r--include/trace/bpf_probe.h92
-rw-r--r--include/trace/define_trace.h1
-rw-r--r--include/trace/events/afs.h69
-rw-r--r--include/trace/events/bridge.h4
-rw-r--r--include/trace/events/btrfs.h109
-rw-r--r--include/trace/events/cachefiles.h325
-rw-r--r--include/trace/events/ext4.h43
-rw-r--r--include/trace/events/f2fs.h5
-rw-r--r--include/trace/events/fscache.h537
-rw-r--r--include/trace/events/initcall.h66
-rw-r--r--include/trace/events/migrate.h2
-rw-r--r--include/trace/events/mmc.h4
-rw-r--r--include/trace/events/mmflags.h2
-rw-r--r--include/trace/events/net_probe_common.h44
-rw-r--r--include/trace/events/rcu.h79
-rw-r--r--include/trace/events/rdma.h129
-rw-r--r--include/trace/events/rpcrdma.h890
-rw-r--r--include/trace/events/rtc.h206
-rw-r--r--include/trace/events/rxrpc.h291
-rw-r--r--include/trace/events/sctp.h99
-rw-r--r--include/trace/events/siox.h66
-rw-r--r--include/trace/events/sock.h117
-rw-r--r--include/trace/events/sunrpc.h216
-rw-r--r--include/trace/events/tcp.h76
-rw-r--r--include/trace/events/thermal.h10
-rw-r--r--include/trace/events/timer.h37
-rw-r--r--include/trace/events/vmscan.h64
-rw-r--r--include/trace/events/xen.h2
-rw-r--r--include/uapi/asm-generic/mman-common.h3
-rw-r--r--include/uapi/asm-generic/poll.h4
-rw-r--r--include/uapi/asm-generic/siginfo.h78
-rw-r--r--include/uapi/asm-generic/unistd.h163
-rw-r--r--include/uapi/drm/amdgpu_drm.h19
-rw-r--r--include/uapi/drm/drm_fourcc.h38
-rw-r--r--include/uapi/drm/drm_mode.h43
-rw-r--r--include/uapi/drm/etnaviv_drm.h6
-rw-r--r--include/uapi/drm/exynos_drm.h192
-rw-r--r--include/uapi/drm/i915_drm.h189
-rw-r--r--include/uapi/drm/msm_drm.h2
-rw-r--r--include/uapi/drm/vc4_drm.h76
-rw-r--r--include/uapi/drm/virtgpu_drm.h1
-rw-r--r--include/uapi/linux/arm_sdei.h73
-rw-r--r--include/uapi/linux/batadv_packet.h635
-rw-r--r--include/uapi/linux/batman_adv.h111
-rw-r--r--include/uapi/linux/blktrace_api.h4
-rw-r--r--include/uapi/linux/bpf.h222
-rw-r--r--include/uapi/linux/bpf_common.h7
-rw-r--r--include/uapi/linux/bpf_perf_event.h1
-rw-r--r--include/uapi/linux/btrfs.h11
-rw-r--r--include/uapi/linux/btrfs_tree.h2
-rw-r--r--include/uapi/linux/can/netlink.h1
-rw-r--r--include/uapi/linux/cec-funcs.h29
-rw-r--r--include/uapi/linux/cec.h29
-rw-r--r--include/uapi/linux/const.h13
-rw-r--r--include/uapi/linux/devlink.h25
-rw-r--r--include/uapi/linux/dm-ioctl.h4
-rw-r--r--include/uapi/linux/dvb/dmx.h98
-rw-r--r--include/uapi/linux/dvb/frontend.h40
-rw-r--r--include/uapi/linux/dvb/version.h2
-rw-r--r--include/uapi/linux/dvb/video.h20
-rw-r--r--include/uapi/linux/elf.h4
-rw-r--r--include/uapi/linux/erspan.h52
-rw-r--r--include/uapi/linux/ethtool.h37
-rw-r--r--include/uapi/linux/eventpoll.h33
-rw-r--r--include/uapi/linux/fib_rules.h11
-rw-r--r--include/uapi/linux/fs.h6
-rw-r--r--include/uapi/linux/gfs2_ondisk.h62
-rw-r--r--include/uapi/linux/i2c.h3
-rw-r--r--include/uapi/linux/if_ether.h9
-rw-r--r--include/uapi/linux/if_link.h44
-rw-r--r--include/uapi/linux/if_macsec.h9
-rw-r--r--include/uapi/linux/if_tun.h2
-rw-r--r--include/uapi/linux/if_tunnel.h3
-rw-r--r--include/uapi/linux/inet_diag.h2
-rw-r--r--include/uapi/linux/inotify.h8
-rw-r--r--include/uapi/linux/input-event-codes.h1
-rw-r--r--include/uapi/linux/input.h11
-rw-r--r--include/uapi/linux/ipmi.h20
-rw-r--r--include/uapi/linux/ipmi_bmc.h16
-rw-r--r--include/uapi/linux/ipmi_msgdefs.h20
-rw-r--r--include/uapi/linux/irda.h252
-rw-r--r--include/uapi/linux/ixjuser.h721
-rw-r--r--include/uapi/linux/kfd_ioctl.h135
-rw-r--r--include/uapi/linux/kvm.h115
-rw-r--r--include/uapi/linux/l2tp.h6
-rw-r--r--include/uapi/linux/libc-compat.h6
-rw-r--r--include/uapi/linux/lightnvm.h9
-rw-r--r--include/uapi/linux/lirc.h83
-rw-r--r--include/uapi/linux/lp.h12
-rw-r--r--include/uapi/linux/media.h344
-rw-r--r--include/uapi/linux/membarrier.h74
-rw-r--r--include/uapi/linux/msdos_fs.h2
-rw-r--r--include/uapi/linux/msg.h1
-rw-r--r--include/uapi/linux/ncsi.h115
-rw-r--r--include/uapi/linux/ndctl.h56
-rw-r--r--include/uapi/linux/netfilter/nf_conntrack_common.h7
-rw-r--r--include/uapi/linux/netfilter/nf_tables.h88
-rw-r--r--include/uapi/linux/netfilter/nfnetlink_conntrack.h10
-rw-r--r--include/uapi/linux/netfilter/xt_connlimit.h2
-rw-r--r--include/uapi/linux/netfilter/xt_connmark.h10
-rw-r--r--include/uapi/linux/netfilter_arp.h3
-rw-r--r--include/uapi/linux/netfilter_bridge/ebt_ip.h15
-rw-r--r--include/uapi/linux/netfilter_bridge/ebtables.h16
-rw-r--r--include/uapi/linux/netfilter_decnet.h4
-rw-r--r--include/uapi/linux/netfilter_ipv4.h1
-rw-r--r--include/uapi/linux/netfilter_ipv6.h1
-rw-r--r--include/uapi/linux/netfilter_ipv6/ip6t_srh.h57
-rw-r--r--include/uapi/linux/nfs.h1
-rw-r--r--include/uapi/linux/nl80211.h140
-rw-r--r--include/uapi/linux/nubus.h23
-rw-r--r--include/uapi/linux/openvswitch.h1
-rw-r--r--include/uapi/linux/pci_regs.h37
-rw-r--r--include/uapi/linux/perf_event.h57
-rw-r--r--include/uapi/linux/pkt_cls.h4
-rw-r--r--include/uapi/linux/psci.h3
-rw-r--r--include/uapi/linux/psp-sev.h142
-rw-r--r--include/uapi/linux/ptrace.h6
-rw-r--r--include/uapi/linux/qemu_fw_cfg.h97
-rw-r--r--include/uapi/linux/rds.h8
-rw-r--r--include/uapi/linux/rtnetlink.h12
-rw-r--r--include/uapi/linux/sched.h5
-rw-r--r--include/uapi/linux/sctp.h47
-rw-r--r--include/uapi/linux/sem.h1
-rw-r--r--include/uapi/linux/serial_core.h3
-rw-r--r--include/uapi/linux/shm.h5
-rw-r--r--include/uapi/linux/stm.h13
-rw-r--r--include/uapi/linux/switchtec_ioctl.h3
-rw-r--r--include/uapi/linux/tc_ematch/tc_em_ipt.h20
-rw-r--r--include/uapi/linux/tcp.h3
-rw-r--r--include/uapi/linux/tee.h37
-rw-r--r--include/uapi/linux/telephony.h263
-rw-r--r--include/uapi/linux/time.h13
-rw-r--r--include/uapi/linux/tipc.h162
-rw-r--r--include/uapi/linux/tipc_netlink.h21
-rw-r--r--include/uapi/linux/tipc_sockets_diag.h17
-rw-r--r--include/uapi/linux/tls.h2
-rw-r--r--include/uapi/linux/types.h6
-rw-r--r--include/uapi/linux/usb/audio.h5
-rw-r--r--include/uapi/linux/usb/ch9.h4
-rw-r--r--include/uapi/linux/usbdevice_fs.h2
-rw-r--r--include/uapi/linux/uuid.h1
-rw-r--r--include/uapi/linux/uvcvideo.h26
-rw-r--r--include/uapi/linux/v4l2-controls.h189
-rw-r--r--include/uapi/linux/v4l2-mediabus.h4
-rw-r--r--include/uapi/linux/vbox_err.h151
-rw-r--r--include/uapi/linux/vbox_vmmdev_types.h226
-rw-r--r--include/uapi/linux/vboxguest.h330
-rw-r--r--include/uapi/linux/vfio.h99
-rw-r--r--include/uapi/linux/videodev2.h64
-rw-r--r--include/uapi/linux/virtio_balloon.h5
-rw-r--r--include/uapi/linux/virtio_net.h13
-rw-r--r--include/uapi/misc/cxl.h10
-rw-r--r--include/uapi/misc/ocxl.h66
-rw-r--r--include/uapi/rdma/bnxt_re-abi.h26
-rw-r--r--include/uapi/rdma/cxgb3-abi.h17
-rw-r--r--include/uapi/rdma/cxgb4-abi.h29
-rw-r--r--include/uapi/rdma/hfi/hfi1_ioctl.h32
-rw-r--r--include/uapi/rdma/hfi/hfi1_user.h10
-rw-r--r--include/uapi/rdma/hns-abi.h22
-rw-r--r--include/uapi/rdma/i40iw-abi.h107
-rw-r--r--include/uapi/rdma/ib_user_cm.h48
-rw-r--r--include/uapi/rdma/ib_user_ioctl_cmds.h134
-rw-r--r--include/uapi/rdma/ib_user_ioctl_verbs.h96
-rw-r--r--include/uapi/rdma/ib_user_mad.h4
-rw-r--r--include/uapi/rdma/ib_user_verbs.h206
-rw-r--r--include/uapi/rdma/mlx4-abi.h57
-rw-r--r--include/uapi/rdma/mlx5-abi.h111
-rw-r--r--include/uapi/rdma/mlx5_user_ioctl_cmds.h48
-rw-r--r--include/uapi/rdma/mlx5_user_ioctl_verbs.h43
-rw-r--r--include/uapi/rdma/mthca-abi.h10
-rw-r--r--include/uapi/rdma/nes-abi.h6
-rw-r--r--include/uapi/rdma/ocrdma-abi.h36
-rw-r--r--include/uapi/rdma/qedr-abi.h20
-rw-r--r--include/uapi/rdma/rdma_netlink.h112
-rw-r--r--include/uapi/rdma/rdma_user_cm.h49
-rw-r--r--include/uapi/rdma/rdma_user_ioctl.h38
-rw-r--r--include/uapi/rdma/rdma_user_ioctl_cmds.h99
-rw-r--r--include/uapi/rdma/rdma_user_rxe.h58
-rw-r--r--include/uapi/rdma/vmw_pvrdma-abi.h61
-rw-r--r--include/uapi/sound/asound.h10
-rw-r--r--include/uapi/sound/snd_sst_tokens.h17
-rw-r--r--include/video/exynos5433_decon.h209
-rw-r--r--include/video/exynos7_decon.h349
-rw-r--r--include/video/imx-ipu-v3.h2
-rw-r--r--include/video/of_display_timing.h5
-rw-r--r--include/video/udlfb.h3
1082 files changed, 47372 insertions, 18824 deletions
diff --git a/include/acpi/acbuffer.h b/include/acpi/acbuffer.h
index c77b91ff1149..6488d572739c 100644
--- a/include/acpi/acbuffer.h
+++ b/include/acpi/acbuffer.h
@@ -1,45 +1,11 @@
+/* SPDX-License-Identifier: BSD-3-Clause OR GPL-2.0 */
/******************************************************************************
*
* Name: acbuffer.h - Support for buffers returned by ACPI predefined names
*
- *****************************************************************************/
-
-/*
- * Copyright (C) 2000 - 2017, Intel Corp.
- * All rights reserved.
- *
- * Redistribution and use in source and binary forms, with or without
- * modification, are permitted provided that the following conditions
- * are met:
- * 1. Redistributions of source code must retain the above copyright
- * notice, this list of conditions, and the following disclaimer,
- * without modification.
- * 2. Redistributions in binary form must reproduce at minimum a disclaimer
- * substantially similar to the "NO WARRANTY" disclaimer below
- * ("Disclaimer") and any redistribution must be conditioned upon
- * including a substantially similar Disclaimer requirement for further
- * binary redistribution.
- * 3. Neither the names of the above-listed copyright holders nor the names
- * of any contributors may be used to endorse or promote products derived
- * from this software without specific prior written permission.
+ * Copyright (C) 2000 - 2018, Intel Corp.
*
- * Alternatively, this software may be distributed under the terms of the
- * GNU General Public License ("GPL") version 2 as published by the Free
- * Software Foundation.
- *
- * NO WARRANTY
- * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
- * "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
- * LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTIBILITY AND FITNESS FOR
- * A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
- * HOLDERS OR CONTRIBUTORS BE LIABLE FOR SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
- * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS
- * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION)
- * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT,
- * STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING
- * IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
- * POSSIBILITY OF SUCH DAMAGES.
- */
+ *****************************************************************************/
#ifndef __ACBUFFER_H__
#define __ACBUFFER_H__
diff --git a/include/acpi/acconfig.h b/include/acpi/acconfig.h
index 6db3b4668b1a..012c55cb22ba 100644
--- a/include/acpi/acconfig.h
+++ b/include/acpi/acconfig.h
@@ -1,46 +1,12 @@
+/* SPDX-License-Identifier: BSD-3-Clause OR GPL-2.0 */
/******************************************************************************
*
* Name: acconfig.h - Global configuration constants
*
+ * Copyright (C) 2000 - 2018, Intel Corp.
+ *
*****************************************************************************/
-/*
- * Copyright (C) 2000 - 2017, Intel Corp.
- * All rights reserved.
- *
- * Redistribution and use in source and binary forms, with or without
- * modification, are permitted provided that the following conditions
- * are met:
- * 1. Redistributions of source code must retain the above copyright
- * notice, this list of conditions, and the following disclaimer,
- * without modification.
- * 2. Redistributions in binary form must reproduce at minimum a disclaimer
- * substantially similar to the "NO WARRANTY" disclaimer below
- * ("Disclaimer") and any redistribution must be conditioned upon
- * including a substantially similar Disclaimer requirement for further
- * binary redistribution.
- * 3. Neither the names of the above-listed copyright holders nor the names
- * of any contributors may be used to endorse or promote products derived
- * from this software without specific prior written permission.
- *
- * Alternatively, this software may be distributed under the terms of the
- * GNU General Public License ("GPL") version 2 as published by the Free
- * Software Foundation.
- *
- * NO WARRANTY
- * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
- * "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
- * LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTIBILITY AND FITNESS FOR
- * A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
- * HOLDERS OR CONTRIBUTORS BE LIABLE FOR SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
- * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS
- * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION)
- * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT,
- * STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING
- * IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
- * POSSIBILITY OF SUCH DAMAGES.
- */
-
#ifndef _ACCONFIG_H
#define _ACCONFIG_H
@@ -145,9 +111,9 @@
#define ACPI_ADDRESS_RANGE_MAX 2
-/* Maximum number of While() loops before abort */
+/* Maximum time (default 30s) of While() loops before abort */
-#define ACPI_MAX_LOOP_COUNT 0x000FFFFF
+#define ACPI_MAX_LOOP_TIMEOUT 30
/******************************************************************************
*
diff --git a/include/acpi/acexcep.h b/include/acpi/acexcep.h
index 17d61b1f2511..226e5aeba6c2 100644
--- a/include/acpi/acexcep.h
+++ b/include/acpi/acexcep.h
@@ -1,45 +1,11 @@
+/* SPDX-License-Identifier: BSD-3-Clause OR GPL-2.0 */
/******************************************************************************
*
* Name: acexcep.h - Exception codes returned by the ACPI subsystem
*
- *****************************************************************************/
-
-/*
- * Copyright (C) 2000 - 2017, Intel Corp.
- * All rights reserved.
- *
- * Redistribution and use in source and binary forms, with or without
- * modification, are permitted provided that the following conditions
- * are met:
- * 1. Redistributions of source code must retain the above copyright
- * notice, this list of conditions, and the following disclaimer,
- * without modification.
- * 2. Redistributions in binary form must reproduce at minimum a disclaimer
- * substantially similar to the "NO WARRANTY" disclaimer below
- * ("Disclaimer") and any redistribution must be conditioned upon
- * including a substantially similar Disclaimer requirement for further
- * binary redistribution.
- * 3. Neither the names of the above-listed copyright holders nor the names
- * of any contributors may be used to endorse or promote products derived
- * from this software without specific prior written permission.
+ * Copyright (C) 2000 - 2018, Intel Corp.
*
- * Alternatively, this software may be distributed under the terms of the
- * GNU General Public License ("GPL") version 2 as published by the Free
- * Software Foundation.
- *
- * NO WARRANTY
- * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
- * "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
- * LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTIBILITY AND FITNESS FOR
- * A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
- * HOLDERS OR CONTRIBUTORS BE LIABLE FOR SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
- * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS
- * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION)
- * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT,
- * STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING
- * IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
- * POSSIBILITY OF SUCH DAMAGES.
- */
+ *****************************************************************************/
#ifndef __ACEXCEP_H__
#define __ACEXCEP_H__
@@ -130,8 +96,9 @@ struct acpi_exception_info {
#define AE_HEX_OVERFLOW EXCEP_ENV (0x0020)
#define AE_DECIMAL_OVERFLOW EXCEP_ENV (0x0021)
#define AE_OCTAL_OVERFLOW EXCEP_ENV (0x0022)
+#define AE_END_OF_TABLE EXCEP_ENV (0x0023)
-#define AE_CODE_ENV_MAX 0x0022
+#define AE_CODE_ENV_MAX 0x0023
/*
* Programmer exceptions
@@ -195,7 +162,7 @@ struct acpi_exception_info {
#define AE_AML_CIRCULAR_REFERENCE EXCEP_AML (0x001E)
#define AE_AML_BAD_RESOURCE_LENGTH EXCEP_AML (0x001F)
#define AE_AML_ILLEGAL_ADDRESS EXCEP_AML (0x0020)
-#define AE_AML_INFINITE_LOOP EXCEP_AML (0x0021)
+#define AE_AML_LOOP_TIMEOUT EXCEP_AML (0x0021)
#define AE_AML_UNINITIALIZED_NODE EXCEP_AML (0x0022)
#define AE_AML_TARGET_TYPE EXCEP_AML (0x0023)
@@ -275,7 +242,8 @@ static const struct acpi_exception_info acpi_gbl_exception_names_env[] = {
EXCEP_TXT("AE_DECIMAL_OVERFLOW",
"Overflow during ASCII decimal-to-binary conversion"),
EXCEP_TXT("AE_OCTAL_OVERFLOW",
- "Overflow during ASCII octal-to-binary conversion")
+ "Overflow during ASCII octal-to-binary conversion"),
+ EXCEP_TXT("AE_END_OF_TABLE", "Reached the end of table")
};
static const struct acpi_exception_info acpi_gbl_exception_names_pgm[] = {
@@ -368,8 +336,8 @@ static const struct acpi_exception_info acpi_gbl_exception_names_aml[] = {
"The length of a Resource Descriptor in the AML is incorrect"),
EXCEP_TXT("AE_AML_ILLEGAL_ADDRESS",
"A memory, I/O, or PCI configuration address is invalid"),
- EXCEP_TXT("AE_AML_INFINITE_LOOP",
- "An apparent infinite AML While loop, method was aborted"),
+ EXCEP_TXT("AE_AML_LOOP_TIMEOUT",
+ "An AML While loop exceeded the maximum execution time"),
EXCEP_TXT("AE_AML_UNINITIALIZED_NODE",
"A namespace node is uninitialized or unresolved"),
EXCEP_TXT("AE_AML_TARGET_TYPE",
diff --git a/include/acpi/acnames.h b/include/acpi/acnames.h
index d8dd3bf51ca7..7b289dd00a30 100644
--- a/include/acpi/acnames.h
+++ b/include/acpi/acnames.h
@@ -1,45 +1,11 @@
+/* SPDX-License-Identifier: BSD-3-Clause OR GPL-2.0 */
/******************************************************************************
*
* Name: acnames.h - Global names and strings
*
- *****************************************************************************/
-
-/*
- * Copyright (C) 2000 - 2017, Intel Corp.
- * All rights reserved.
- *
- * Redistribution and use in source and binary forms, with or without
- * modification, are permitted provided that the following conditions
- * are met:
- * 1. Redistributions of source code must retain the above copyright
- * notice, this list of conditions, and the following disclaimer,
- * without modification.
- * 2. Redistributions in binary form must reproduce at minimum a disclaimer
- * substantially similar to the "NO WARRANTY" disclaimer below
- * ("Disclaimer") and any redistribution must be conditioned upon
- * including a substantially similar Disclaimer requirement for further
- * binary redistribution.
- * 3. Neither the names of the above-listed copyright holders nor the names
- * of any contributors may be used to endorse or promote products derived
- * from this software without specific prior written permission.
+ * Copyright (C) 2000 - 2018, Intel Corp.
*
- * Alternatively, this software may be distributed under the terms of the
- * GNU General Public License ("GPL") version 2 as published by the Free
- * Software Foundation.
- *
- * NO WARRANTY
- * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
- * "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
- * LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTIBILITY AND FITNESS FOR
- * A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
- * HOLDERS OR CONTRIBUTORS BE LIABLE FOR SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
- * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS
- * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION)
- * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT,
- * STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING
- * IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
- * POSSIBILITY OF SUCH DAMAGES.
- */
+ *****************************************************************************/
#ifndef __ACNAMES_H__
#define __ACNAMES_H__
diff --git a/include/acpi/acoutput.h b/include/acpi/acoutput.h
index c2e664e74075..0a6c5bd92256 100644
--- a/include/acpi/acoutput.h
+++ b/include/acpi/acoutput.h
@@ -1,45 +1,11 @@
+/* SPDX-License-Identifier: BSD-3-Clause OR GPL-2.0 */
/******************************************************************************
*
* Name: acoutput.h -- debug output
*
- *****************************************************************************/
-
-/*
- * Copyright (C) 2000 - 2017, Intel Corp.
- * All rights reserved.
- *
- * Redistribution and use in source and binary forms, with or without
- * modification, are permitted provided that the following conditions
- * are met:
- * 1. Redistributions of source code must retain the above copyright
- * notice, this list of conditions, and the following disclaimer,
- * without modification.
- * 2. Redistributions in binary form must reproduce at minimum a disclaimer
- * substantially similar to the "NO WARRANTY" disclaimer below
- * ("Disclaimer") and any redistribution must be conditioned upon
- * including a substantially similar Disclaimer requirement for further
- * binary redistribution.
- * 3. Neither the names of the above-listed copyright holders nor the names
- * of any contributors may be used to endorse or promote products derived
- * from this software without specific prior written permission.
+ * Copyright (C) 2000 - 2018, Intel Corp.
*
- * Alternatively, this software may be distributed under the terms of the
- * GNU General Public License ("GPL") version 2 as published by the Free
- * Software Foundation.
- *
- * NO WARRANTY
- * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
- * "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
- * LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTIBILITY AND FITNESS FOR
- * A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
- * HOLDERS OR CONTRIBUTORS BE LIABLE FOR SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
- * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS
- * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION)
- * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT,
- * STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING
- * IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
- * POSSIBILITY OF SUCH DAMAGES.
- */
+ *****************************************************************************/
#ifndef __ACOUTPUT_H__
#define __ACOUTPUT_H__
diff --git a/include/acpi/acpi.h b/include/acpi/acpi.h
index 0887d7cbb7e1..ccdc5981bc91 100644
--- a/include/acpi/acpi.h
+++ b/include/acpi/acpi.h
@@ -1,45 +1,11 @@
+/* SPDX-License-Identifier: BSD-3-Clause OR GPL-2.0 */
/******************************************************************************
*
* Name: acpi.h - Master public include file used to interface to ACPICA
*
- *****************************************************************************/
-
-/*
- * Copyright (C) 2000 - 2017, Intel Corp.
- * All rights reserved.
- *
- * Redistribution and use in source and binary forms, with or without
- * modification, are permitted provided that the following conditions
- * are met:
- * 1. Redistributions of source code must retain the above copyright
- * notice, this list of conditions, and the following disclaimer,
- * without modification.
- * 2. Redistributions in binary form must reproduce at minimum a disclaimer
- * substantially similar to the "NO WARRANTY" disclaimer below
- * ("Disclaimer") and any redistribution must be conditioned upon
- * including a substantially similar Disclaimer requirement for further
- * binary redistribution.
- * 3. Neither the names of the above-listed copyright holders nor the names
- * of any contributors may be used to endorse or promote products derived
- * from this software without specific prior written permission.
+ * Copyright (C) 2000 - 2018, Intel Corp.
*
- * Alternatively, this software may be distributed under the terms of the
- * GNU General Public License ("GPL") version 2 as published by the Free
- * Software Foundation.
- *
- * NO WARRANTY
- * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
- * "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
- * LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTIBILITY AND FITNESS FOR
- * A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
- * HOLDERS OR CONTRIBUTORS BE LIABLE FOR SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
- * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS
- * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION)
- * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT,
- * STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING
- * IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
- * POSSIBILITY OF SUCH DAMAGES.
- */
+ *****************************************************************************/
#ifndef __ACPI_H__
#define __ACPI_H__
diff --git a/include/acpi/acpi_bus.h b/include/acpi/acpi_bus.h
index 79287629c888..ba4dd54f2c82 100644
--- a/include/acpi/acpi_bus.h
+++ b/include/acpi/acpi_bus.h
@@ -91,6 +91,9 @@ acpi_evaluate_dsm_typed(acpi_handle handle, const guid_t *guid, u64 rev,
bool acpi_dev_found(const char *hid);
bool acpi_dev_present(const char *hid, const char *uid, s64 hrv);
+const char *
+acpi_dev_get_first_match_name(const char *hid, const char *uid, s64 hrv);
+
#ifdef CONFIG_ACPI
#include <linux/proc_fs.h>
@@ -212,7 +215,7 @@ struct acpi_device_flags {
u32 of_compatible_ok:1;
u32 coherent_dma:1;
u32 cca_seen:1;
- u32 serial_bus_slave:1;
+ u32 enumeration_by_parent:1;
u32 reserved:19;
};
diff --git a/include/acpi/acpiosxf.h b/include/acpi/acpiosxf.h
index d5c0f5153c4e..540d35f06ad6 100644
--- a/include/acpi/acpiosxf.h
+++ b/include/acpi/acpiosxf.h
@@ -1,47 +1,13 @@
+/* SPDX-License-Identifier: BSD-3-Clause OR GPL-2.0 */
/******************************************************************************
*
* Name: acpiosxf.h - All interfaces to the OS Services Layer (OSL). These
* interfaces must be implemented by OSL to interface the
* ACPI components to the host operating system.
*
- *****************************************************************************/
-
-/*
- * Copyright (C) 2000 - 2017, Intel Corp.
- * All rights reserved.
- *
- * Redistribution and use in source and binary forms, with or without
- * modification, are permitted provided that the following conditions
- * are met:
- * 1. Redistributions of source code must retain the above copyright
- * notice, this list of conditions, and the following disclaimer,
- * without modification.
- * 2. Redistributions in binary form must reproduce at minimum a disclaimer
- * substantially similar to the "NO WARRANTY" disclaimer below
- * ("Disclaimer") and any redistribution must be conditioned upon
- * including a substantially similar Disclaimer requirement for further
- * binary redistribution.
- * 3. Neither the names of the above-listed copyright holders nor the names
- * of any contributors may be used to endorse or promote products derived
- * from this software without specific prior written permission.
+ * Copyright (C) 2000 - 2018, Intel Corp.
*
- * Alternatively, this software may be distributed under the terms of the
- * GNU General Public License ("GPL") version 2 as published by the Free
- * Software Foundation.
- *
- * NO WARRANTY
- * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
- * "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
- * LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTIBILITY AND FITNESS FOR
- * A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
- * HOLDERS OR CONTRIBUTORS BE LIABLE FOR SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
- * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS
- * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION)
- * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT,
- * STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING
- * IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
- * POSSIBILITY OF SUCH DAMAGES.
- */
+ *****************************************************************************/
#ifndef __ACPIOSXF_H__
#define __ACPIOSXF_H__
diff --git a/include/acpi/acpixf.h b/include/acpi/acpixf.h
index e1dd1a8d42b6..da0215ea9f44 100644
--- a/include/acpi/acpixf.h
+++ b/include/acpi/acpixf.h
@@ -1,52 +1,18 @@
+/* SPDX-License-Identifier: BSD-3-Clause OR GPL-2.0 */
/******************************************************************************
*
* Name: acpixf.h - External interfaces to the ACPI subsystem
*
- *****************************************************************************/
-
-/*
- * Copyright (C) 2000 - 2017, Intel Corp.
- * All rights reserved.
- *
- * Redistribution and use in source and binary forms, with or without
- * modification, are permitted provided that the following conditions
- * are met:
- * 1. Redistributions of source code must retain the above copyright
- * notice, this list of conditions, and the following disclaimer,
- * without modification.
- * 2. Redistributions in binary form must reproduce at minimum a disclaimer
- * substantially similar to the "NO WARRANTY" disclaimer below
- * ("Disclaimer") and any redistribution must be conditioned upon
- * including a substantially similar Disclaimer requirement for further
- * binary redistribution.
- * 3. Neither the names of the above-listed copyright holders nor the names
- * of any contributors may be used to endorse or promote products derived
- * from this software without specific prior written permission.
- *
- * Alternatively, this software may be distributed under the terms of the
- * GNU General Public License ("GPL") version 2 as published by the Free
- * Software Foundation.
+ * Copyright (C) 2000 - 2018, Intel Corp.
*
- * NO WARRANTY
- * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
- * "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
- * LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTIBILITY AND FITNESS FOR
- * A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
- * HOLDERS OR CONTRIBUTORS BE LIABLE FOR SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
- * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS
- * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION)
- * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT,
- * STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING
- * IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
- * POSSIBILITY OF SUCH DAMAGES.
- */
+ *****************************************************************************/
#ifndef __ACXFACE_H__
#define __ACXFACE_H__
/* Current ACPICA subsystem version in YYYYMMDD format */
-#define ACPI_CA_VERSION 0x20170831
+#define ACPI_CA_VERSION 0x20180313
#include <acpi/acconfig.h>
#include <acpi/actypes.h>
@@ -192,15 +158,18 @@ ACPI_INIT_GLOBAL(u8, acpi_gbl_do_not_use_xsdt, FALSE);
/*
* Optionally support group module level code.
+ * NOTE, this is essentially obsolete and will be removed soon
+ * (01/2018).
*/
-ACPI_INIT_GLOBAL(u8, acpi_gbl_group_module_level_code, TRUE);
+ACPI_INIT_GLOBAL(u8, acpi_gbl_group_module_level_code, FALSE);
/*
- * Optionally support module level code by parsing the entire table as
- * a term_list. Default is FALSE, do not execute entire table until some
- * lock order issues are fixed.
+ * Optionally support module level code by parsing an entire table as
+ * a method as it is loaded. Default is TRUE.
+ * NOTE, this is essentially obsolete and will be removed soon
+ * (01/2018).
*/
-ACPI_INIT_GLOBAL(u8, acpi_gbl_parse_table_as_term_list, FALSE);
+ACPI_INIT_GLOBAL(u8, acpi_gbl_execute_tables_as_methods, TRUE);
/*
* Optionally use 32-bit FADT addresses if and when there is a conflict
@@ -260,11 +229,21 @@ ACPI_INIT_GLOBAL(u8, acpi_gbl_osi_data, 0);
ACPI_INIT_GLOBAL(u8, acpi_gbl_reduced_hardware, FALSE);
/*
- * Maximum number of While() loop iterations before forced method abort.
+ * Maximum timeout for While() loop iterations before forced method abort.
* This mechanism is intended to prevent infinite loops during interpreter
* execution within a host kernel.
*/
-ACPI_INIT_GLOBAL(u32, acpi_gbl_max_loop_iterations, ACPI_MAX_LOOP_COUNT);
+ACPI_INIT_GLOBAL(u32, acpi_gbl_max_loop_iterations, ACPI_MAX_LOOP_TIMEOUT);
+
+/*
+ * Optionally ignore AE_NOT_FOUND errors from named reference package elements
+ * during DSDT/SSDT table loading. This reduces error "noise" in platforms
+ * whose firmware is carrying around a bunch of unused package objects that
+ * refer to non-existent named objects. However, If the AML actually tries to
+ * use such a package, the unresolved element(s) will be replaced with NULL
+ * elements.
+ */
+ACPI_INIT_GLOBAL(u8, acpi_gbl_ignore_package_resolution_errors, FALSE);
/*
* This mechanism is used to trace a specified AML method. The method is
diff --git a/include/acpi/acrestyp.h b/include/acpi/acrestyp.h
index 343dbdcef20c..724ad5f29a16 100644
--- a/include/acpi/acrestyp.h
+++ b/include/acpi/acrestyp.h
@@ -1,45 +1,11 @@
+/* SPDX-License-Identifier: BSD-3-Clause OR GPL-2.0 */
/******************************************************************************
*
* Name: acrestyp.h - Defines, types, and structures for resource descriptors
*
- *****************************************************************************/
-
-/*
- * Copyright (C) 2000 - 2017, Intel Corp.
- * All rights reserved.
- *
- * Redistribution and use in source and binary forms, with or without
- * modification, are permitted provided that the following conditions
- * are met:
- * 1. Redistributions of source code must retain the above copyright
- * notice, this list of conditions, and the following disclaimer,
- * without modification.
- * 2. Redistributions in binary form must reproduce at minimum a disclaimer
- * substantially similar to the "NO WARRANTY" disclaimer below
- * ("Disclaimer") and any redistribution must be conditioned upon
- * including a substantially similar Disclaimer requirement for further
- * binary redistribution.
- * 3. Neither the names of the above-listed copyright holders nor the names
- * of any contributors may be used to endorse or promote products derived
- * from this software without specific prior written permission.
+ * Copyright (C) 2000 - 2018, Intel Corp.
*
- * Alternatively, this software may be distributed under the terms of the
- * GNU General Public License ("GPL") version 2 as published by the Free
- * Software Foundation.
- *
- * NO WARRANTY
- * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
- * "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
- * LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTIBILITY AND FITNESS FOR
- * A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
- * HOLDERS OR CONTRIBUTORS BE LIABLE FOR SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
- * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS
- * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION)
- * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT,
- * STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING
- * IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
- * POSSIBILITY OF SUCH DAMAGES.
- */
+ *****************************************************************************/
#ifndef __ACRESTYP_H__
#define __ACRESTYP_H__
diff --git a/include/acpi/actbl.h b/include/acpi/actbl.h
index 89509b86cb54..517addd6b11d 100644
--- a/include/acpi/actbl.h
+++ b/include/acpi/actbl.h
@@ -1,45 +1,11 @@
+/* SPDX-License-Identifier: BSD-3-Clause OR GPL-2.0 */
/******************************************************************************
*
* Name: actbl.h - Basic ACPI Table Definitions
*
- *****************************************************************************/
-
-/*
- * Copyright (C) 2000 - 2017, Intel Corp.
- * All rights reserved.
- *
- * Redistribution and use in source and binary forms, with or without
- * modification, are permitted provided that the following conditions
- * are met:
- * 1. Redistributions of source code must retain the above copyright
- * notice, this list of conditions, and the following disclaimer,
- * without modification.
- * 2. Redistributions in binary form must reproduce at minimum a disclaimer
- * substantially similar to the "NO WARRANTY" disclaimer below
- * ("Disclaimer") and any redistribution must be conditioned upon
- * including a substantially similar Disclaimer requirement for further
- * binary redistribution.
- * 3. Neither the names of the above-listed copyright holders nor the names
- * of any contributors may be used to endorse or promote products derived
- * from this software without specific prior written permission.
+ * Copyright (C) 2000 - 2018, Intel Corp.
*
- * Alternatively, this software may be distributed under the terms of the
- * GNU General Public License ("GPL") version 2 as published by the Free
- * Software Foundation.
- *
- * NO WARRANTY
- * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
- * "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
- * LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTIBILITY AND FITNESS FOR
- * A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
- * HOLDERS OR CONTRIBUTORS BE LIABLE FOR SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
- * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS
- * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION)
- * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT,
- * STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING
- * IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
- * POSSIBILITY OF SUCH DAMAGES.
- */
+ *****************************************************************************/
#ifndef __ACTBL_H__
#define __ACTBL_H__
diff --git a/include/acpi/actbl1.h b/include/acpi/actbl1.h
index 7a89e6de94da..ab424509cae9 100644
--- a/include/acpi/actbl1.h
+++ b/include/acpi/actbl1.h
@@ -1,58 +1,22 @@
+/* SPDX-License-Identifier: BSD-3-Clause OR GPL-2.0 */
/******************************************************************************
*
* Name: actbl1.h - Additional ACPI table definitions
*
- *****************************************************************************/
-
-/*
- * Copyright (C) 2000 - 2017, Intel Corp.
- * All rights reserved.
- *
- * Redistribution and use in source and binary forms, with or without
- * modification, are permitted provided that the following conditions
- * are met:
- * 1. Redistributions of source code must retain the above copyright
- * notice, this list of conditions, and the following disclaimer,
- * without modification.
- * 2. Redistributions in binary form must reproduce at minimum a disclaimer
- * substantially similar to the "NO WARRANTY" disclaimer below
- * ("Disclaimer") and any redistribution must be conditioned upon
- * including a substantially similar Disclaimer requirement for further
- * binary redistribution.
- * 3. Neither the names of the above-listed copyright holders nor the names
- * of any contributors may be used to endorse or promote products derived
- * from this software without specific prior written permission.
- *
- * Alternatively, this software may be distributed under the terms of the
- * GNU General Public License ("GPL") version 2 as published by the Free
- * Software Foundation.
+ * Copyright (C) 2000 - 2018, Intel Corp.
*
- * NO WARRANTY
- * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
- * "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
- * LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTIBILITY AND FITNESS FOR
- * A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
- * HOLDERS OR CONTRIBUTORS BE LIABLE FOR SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
- * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS
- * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION)
- * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT,
- * STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING
- * IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
- * POSSIBILITY OF SUCH DAMAGES.
- */
+ *****************************************************************************/
#ifndef __ACTBL1_H__
#define __ACTBL1_H__
/*******************************************************************************
*
- * Additional ACPI Tables (1)
+ * Additional ACPI Tables
*
* These tables are not consumed directly by the ACPICA subsystem, but are
* included here to support device drivers and the AML disassembler.
*
- * The tables in this file are fully defined within the ACPI specification.
- *
******************************************************************************/
/*
@@ -60,21 +24,42 @@
* file. Useful because they make it more difficult to inadvertently type in
* the wrong signature.
*/
+#define ACPI_SIG_ASF "ASF!" /* Alert Standard Format table */
#define ACPI_SIG_BERT "BERT" /* Boot Error Record Table */
+#define ACPI_SIG_BGRT "BGRT" /* Boot Graphics Resource Table */
+#define ACPI_SIG_BOOT "BOOT" /* Simple Boot Flag Table */
#define ACPI_SIG_CPEP "CPEP" /* Corrected Platform Error Polling table */
+#define ACPI_SIG_CSRT "CSRT" /* Core System Resource Table */
+#define ACPI_SIG_DBG2 "DBG2" /* Debug Port table type 2 */
+#define ACPI_SIG_DBGP "DBGP" /* Debug Port table */
+#define ACPI_SIG_DMAR "DMAR" /* DMA Remapping table */
+#define ACPI_SIG_DRTM "DRTM" /* Dynamic Root of Trust for Measurement table */
#define ACPI_SIG_ECDT "ECDT" /* Embedded Controller Boot Resources Table */
#define ACPI_SIG_EINJ "EINJ" /* Error Injection table */
#define ACPI_SIG_ERST "ERST" /* Error Record Serialization Table */
-#define ACPI_SIG_HMAT "HMAT" /* Heterogeneous Memory Attributes Table */
+#define ACPI_SIG_FPDT "FPDT" /* Firmware Performance Data Table */
+#define ACPI_SIG_GTDT "GTDT" /* Generic Timer Description Table */
#define ACPI_SIG_HEST "HEST" /* Hardware Error Source Table */
-#define ACPI_SIG_MADT "APIC" /* Multiple APIC Description Table */
-#define ACPI_SIG_MSCT "MSCT" /* Maximum System Characteristics Table */
-#define ACPI_SIG_PDTT "PDTT" /* Processor Debug Trigger Table */
-#define ACPI_SIG_PPTT "PPTT" /* Processor Properties Topology Table */
-#define ACPI_SIG_SBST "SBST" /* Smart Battery Specification Table */
-#define ACPI_SIG_SLIT "SLIT" /* System Locality Distance Information Table */
-#define ACPI_SIG_SRAT "SRAT" /* System Resource Affinity Table */
-#define ACPI_SIG_NFIT "NFIT" /* NVDIMM Firmware Interface Table */
+#define ACPI_SIG_HMAT "HMAT" /* Heterogeneous Memory Attributes Table */
+#define ACPI_SIG_HPET "HPET" /* High Precision Event Timer table */
+#define ACPI_SIG_IBFT "IBFT" /* iSCSI Boot Firmware Table */
+
+#define ACPI_SIG_S3PT "S3PT" /* S3 Performance (sub)Table */
+#define ACPI_SIG_PCCS "PCC" /* PCC Shared Memory Region */
+
+/* Reserved table signatures */
+
+#define ACPI_SIG_MATR "MATR" /* Memory Address Translation Table */
+#define ACPI_SIG_MSDM "MSDM" /* Microsoft Data Management Table */
+
+/*
+ * These tables have been seen in the field, but no definition has been found
+ */
+#ifdef ACPI_UNDEFINED_TABLES
+#define ACPI_SIG_ATKG "ATKG"
+#define ACPI_SIG_GSCI "GSCI" /* GMCH SCI table */
+#define ACPI_SIG_IEIT "IEIT"
+#endif
/*
* All tables must be byte-packed to match the ACPI specification, since
@@ -121,6 +106,120 @@ struct acpi_whea_header {
/*******************************************************************************
*
+ * ASF - Alert Standard Format table (Signature "ASF!")
+ * Revision 0x10
+ *
+ * Conforms to the Alert Standard Format Specification V2.0, 23 April 2003
+ *
+ ******************************************************************************/
+
+struct acpi_table_asf {
+ struct acpi_table_header header; /* Common ACPI table header */
+};
+
+/* ASF subtable header */
+
+struct acpi_asf_header {
+ u8 type;
+ u8 reserved;
+ u16 length;
+};
+
+/* Values for Type field above */
+
+enum acpi_asf_type {
+ ACPI_ASF_TYPE_INFO = 0,
+ ACPI_ASF_TYPE_ALERT = 1,
+ ACPI_ASF_TYPE_CONTROL = 2,
+ ACPI_ASF_TYPE_BOOT = 3,
+ ACPI_ASF_TYPE_ADDRESS = 4,
+ ACPI_ASF_TYPE_RESERVED = 5
+};
+
+/*
+ * ASF subtables
+ */
+
+/* 0: ASF Information */
+
+struct acpi_asf_info {
+ struct acpi_asf_header header;
+ u8 min_reset_value;
+ u8 min_poll_interval;
+ u16 system_id;
+ u32 mfg_id;
+ u8 flags;
+ u8 reserved2[3];
+};
+
+/* Masks for Flags field above */
+
+#define ACPI_ASF_SMBUS_PROTOCOLS (1)
+
+/* 1: ASF Alerts */
+
+struct acpi_asf_alert {
+ struct acpi_asf_header header;
+ u8 assert_mask;
+ u8 deassert_mask;
+ u8 alerts;
+ u8 data_length;
+};
+
+struct acpi_asf_alert_data {
+ u8 address;
+ u8 command;
+ u8 mask;
+ u8 value;
+ u8 sensor_type;
+ u8 type;
+ u8 offset;
+ u8 source_type;
+ u8 severity;
+ u8 sensor_number;
+ u8 entity;
+ u8 instance;
+};
+
+/* 2: ASF Remote Control */
+
+struct acpi_asf_remote {
+ struct acpi_asf_header header;
+ u8 controls;
+ u8 data_length;
+ u16 reserved2;
+};
+
+struct acpi_asf_control_data {
+ u8 function;
+ u8 address;
+ u8 command;
+ u8 value;
+};
+
+/* 3: ASF RMCP Boot Options */
+
+struct acpi_asf_rmcp {
+ struct acpi_asf_header header;
+ u8 capabilities[7];
+ u8 completion_code;
+ u32 enterprise_id;
+ u8 command;
+ u16 parameter;
+ u16 boot_options;
+ u16 oem_parameters;
+};
+
+/* 4: ASF Address */
+
+struct acpi_asf_address {
+ struct acpi_asf_header header;
+ u8 eprom_address;
+ u8 devices;
+};
+
+/*******************************************************************************
+ *
* BERT - Boot Error Record Table (ACPI 4.0)
* Version 1
*
@@ -167,6 +266,43 @@ enum acpi_bert_error_severity {
/*******************************************************************************
*
+ * BGRT - Boot Graphics Resource Table (ACPI 5.0)
+ * Version 1
+ *
+ ******************************************************************************/
+
+struct acpi_table_bgrt {
+ struct acpi_table_header header; /* Common ACPI table header */
+ u16 version;
+ u8 status;
+ u8 image_type;
+ u64 image_address;
+ u32 image_offset_x;
+ u32 image_offset_y;
+};
+
+/* Flags for Status field above */
+
+#define ACPI_BGRT_DISPLAYED (1)
+#define ACPI_BGRT_ORIENTATION_OFFSET (3 << 1)
+
+/*******************************************************************************
+ *
+ * BOOT - Simple Boot Flag Table
+ * Version 1
+ *
+ * Conforms to the "Simple Boot Flag Specification", Version 2.1
+ *
+ ******************************************************************************/
+
+struct acpi_table_boot {
+ struct acpi_table_header header; /* Common ACPI table header */
+ u8 cmos_index; /* Index in CMOS RAM for the boot register */
+ u8 reserved[3];
+};
+
+/*******************************************************************************
+ *
* CPEP - Corrected Platform Error Polling table (ACPI 4.0)
* Version 1
*
@@ -188,6 +324,348 @@ struct acpi_cpep_polling {
/*******************************************************************************
*
+ * CSRT - Core System Resource Table
+ * Version 0
+ *
+ * Conforms to the "Core System Resource Table (CSRT)", November 14, 2011
+ *
+ ******************************************************************************/
+
+struct acpi_table_csrt {
+ struct acpi_table_header header; /* Common ACPI table header */
+};
+
+/* Resource Group subtable */
+
+struct acpi_csrt_group {
+ u32 length;
+ u32 vendor_id;
+ u32 subvendor_id;
+ u16 device_id;
+ u16 subdevice_id;
+ u16 revision;
+ u16 reserved;
+ u32 shared_info_length;
+
+ /* Shared data immediately follows (Length = shared_info_length) */
+};
+
+/* Shared Info subtable */
+
+struct acpi_csrt_shared_info {
+ u16 major_version;
+ u16 minor_version;
+ u32 mmio_base_low;
+ u32 mmio_base_high;
+ u32 gsi_interrupt;
+ u8 interrupt_polarity;
+ u8 interrupt_mode;
+ u8 num_channels;
+ u8 dma_address_width;
+ u16 base_request_line;
+ u16 num_handshake_signals;
+ u32 max_block_size;
+
+ /* Resource descriptors immediately follow (Length = Group length - shared_info_length) */
+};
+
+/* Resource Descriptor subtable */
+
+struct acpi_csrt_descriptor {
+ u32 length;
+ u16 type;
+ u16 subtype;
+ u32 uid;
+
+ /* Resource-specific information immediately follows */
+};
+
+/* Resource Types */
+
+#define ACPI_CSRT_TYPE_INTERRUPT 0x0001
+#define ACPI_CSRT_TYPE_TIMER 0x0002
+#define ACPI_CSRT_TYPE_DMA 0x0003
+
+/* Resource Subtypes */
+
+#define ACPI_CSRT_XRUPT_LINE 0x0000
+#define ACPI_CSRT_XRUPT_CONTROLLER 0x0001
+#define ACPI_CSRT_TIMER 0x0000
+#define ACPI_CSRT_DMA_CHANNEL 0x0000
+#define ACPI_CSRT_DMA_CONTROLLER 0x0001
+
+/*******************************************************************************
+ *
+ * DBG2 - Debug Port Table 2
+ * Version 0 (Both main table and subtables)
+ *
+ * Conforms to "Microsoft Debug Port Table 2 (DBG2)", December 10, 2015
+ *
+ ******************************************************************************/
+
+struct acpi_table_dbg2 {
+ struct acpi_table_header header; /* Common ACPI table header */
+ u32 info_offset;
+ u32 info_count;
+};
+
+struct acpi_dbg2_header {
+ u32 info_offset;
+ u32 info_count;
+};
+
+/* Debug Device Information Subtable */
+
+struct acpi_dbg2_device {
+ u8 revision;
+ u16 length;
+ u8 register_count; /* Number of base_address registers */
+ u16 namepath_length;
+ u16 namepath_offset;
+ u16 oem_data_length;
+ u16 oem_data_offset;
+ u16 port_type;
+ u16 port_subtype;
+ u16 reserved;
+ u16 base_address_offset;
+ u16 address_size_offset;
+ /*
+ * Data that follows:
+ * base_address (required) - Each in 12-byte Generic Address Structure format.
+ * address_size (required) - Array of u32 sizes corresponding to each base_address register.
+ * Namepath (required) - Null terminated string. Single dot if not supported.
+ * oem_data (optional) - Length is oem_data_length.
+ */
+};
+
+/* Types for port_type field above */
+
+#define ACPI_DBG2_SERIAL_PORT 0x8000
+#define ACPI_DBG2_1394_PORT 0x8001
+#define ACPI_DBG2_USB_PORT 0x8002
+#define ACPI_DBG2_NET_PORT 0x8003
+
+/* Subtypes for port_subtype field above */
+
+#define ACPI_DBG2_16550_COMPATIBLE 0x0000
+#define ACPI_DBG2_16550_SUBSET 0x0001
+#define ACPI_DBG2_ARM_PL011 0x0003
+#define ACPI_DBG2_ARM_SBSA_32BIT 0x000D
+#define ACPI_DBG2_ARM_SBSA_GENERIC 0x000E
+#define ACPI_DBG2_ARM_DCC 0x000F
+#define ACPI_DBG2_BCM2835 0x0010
+
+#define ACPI_DBG2_1394_STANDARD 0x0000
+
+#define ACPI_DBG2_USB_XHCI 0x0000
+#define ACPI_DBG2_USB_EHCI 0x0001
+
+/*******************************************************************************
+ *
+ * DBGP - Debug Port table
+ * Version 1
+ *
+ * Conforms to the "Debug Port Specification", Version 1.00, 2/9/2000
+ *
+ ******************************************************************************/
+
+struct acpi_table_dbgp {
+ struct acpi_table_header header; /* Common ACPI table header */
+ u8 type; /* 0=full 16550, 1=subset of 16550 */
+ u8 reserved[3];
+ struct acpi_generic_address debug_port;
+};
+
+/*******************************************************************************
+ *
+ * DMAR - DMA Remapping table
+ * Version 1
+ *
+ * Conforms to "Intel Virtualization Technology for Directed I/O",
+ * Version 2.3, October 2014
+ *
+ ******************************************************************************/
+
+struct acpi_table_dmar {
+ struct acpi_table_header header; /* Common ACPI table header */
+ u8 width; /* Host Address Width */
+ u8 flags;
+ u8 reserved[10];
+};
+
+/* Masks for Flags field above */
+
+#define ACPI_DMAR_INTR_REMAP (1)
+#define ACPI_DMAR_X2APIC_OPT_OUT (1<<1)
+#define ACPI_DMAR_X2APIC_MODE (1<<2)
+
+/* DMAR subtable header */
+
+struct acpi_dmar_header {
+ u16 type;
+ u16 length;
+};
+
+/* Values for subtable type in struct acpi_dmar_header */
+
+enum acpi_dmar_type {
+ ACPI_DMAR_TYPE_HARDWARE_UNIT = 0,
+ ACPI_DMAR_TYPE_RESERVED_MEMORY = 1,
+ ACPI_DMAR_TYPE_ROOT_ATS = 2,
+ ACPI_DMAR_TYPE_HARDWARE_AFFINITY = 3,
+ ACPI_DMAR_TYPE_NAMESPACE = 4,
+ ACPI_DMAR_TYPE_RESERVED = 5 /* 5 and greater are reserved */
+};
+
+/* DMAR Device Scope structure */
+
+struct acpi_dmar_device_scope {
+ u8 entry_type;
+ u8 length;
+ u16 reserved;
+ u8 enumeration_id;
+ u8 bus;
+};
+
+/* Values for entry_type in struct acpi_dmar_device_scope - device types */
+
+enum acpi_dmar_scope_type {
+ ACPI_DMAR_SCOPE_TYPE_NOT_USED = 0,
+ ACPI_DMAR_SCOPE_TYPE_ENDPOINT = 1,
+ ACPI_DMAR_SCOPE_TYPE_BRIDGE = 2,
+ ACPI_DMAR_SCOPE_TYPE_IOAPIC = 3,
+ ACPI_DMAR_SCOPE_TYPE_HPET = 4,
+ ACPI_DMAR_SCOPE_TYPE_NAMESPACE = 5,
+ ACPI_DMAR_SCOPE_TYPE_RESERVED = 6 /* 6 and greater are reserved */
+};
+
+struct acpi_dmar_pci_path {
+ u8 device;
+ u8 function;
+};
+
+/*
+ * DMAR Subtables, correspond to Type in struct acpi_dmar_header
+ */
+
+/* 0: Hardware Unit Definition */
+
+struct acpi_dmar_hardware_unit {
+ struct acpi_dmar_header header;
+ u8 flags;
+ u8 reserved;
+ u16 segment;
+ u64 address; /* Register Base Address */
+};
+
+/* Masks for Flags field above */
+
+#define ACPI_DMAR_INCLUDE_ALL (1)
+
+/* 1: Reserved Memory Defininition */
+
+struct acpi_dmar_reserved_memory {
+ struct acpi_dmar_header header;
+ u16 reserved;
+ u16 segment;
+ u64 base_address; /* 4K aligned base address */
+ u64 end_address; /* 4K aligned limit address */
+};
+
+/* Masks for Flags field above */
+
+#define ACPI_DMAR_ALLOW_ALL (1)
+
+/* 2: Root Port ATS Capability Reporting Structure */
+
+struct acpi_dmar_atsr {
+ struct acpi_dmar_header header;
+ u8 flags;
+ u8 reserved;
+ u16 segment;
+};
+
+/* Masks for Flags field above */
+
+#define ACPI_DMAR_ALL_PORTS (1)
+
+/* 3: Remapping Hardware Static Affinity Structure */
+
+struct acpi_dmar_rhsa {
+ struct acpi_dmar_header header;
+ u32 reserved;
+ u64 base_address;
+ u32 proximity_domain;
+};
+
+/* 4: ACPI Namespace Device Declaration Structure */
+
+struct acpi_dmar_andd {
+ struct acpi_dmar_header header;
+ u8 reserved[3];
+ u8 device_number;
+ char device_name[1];
+};
+
+/*******************************************************************************
+ *
+ * DRTM - Dynamic Root of Trust for Measurement table
+ * Conforms to "TCG D-RTM Architecture" June 17 2013, Version 1.0.0
+ * Table version 1
+ *
+ ******************************************************************************/
+
+struct acpi_table_drtm {
+ struct acpi_table_header header; /* Common ACPI table header */
+ u64 entry_base_address;
+ u64 entry_length;
+ u32 entry_address32;
+ u64 entry_address64;
+ u64 exit_address;
+ u64 log_area_address;
+ u32 log_area_length;
+ u64 arch_dependent_address;
+ u32 flags;
+};
+
+/* Flag Definitions for above */
+
+#define ACPI_DRTM_ACCESS_ALLOWED (1)
+#define ACPI_DRTM_ENABLE_GAP_CODE (1<<1)
+#define ACPI_DRTM_INCOMPLETE_MEASUREMENTS (1<<2)
+#define ACPI_DRTM_AUTHORITY_ORDER (1<<3)
+
+/* 1) Validated Tables List (64-bit addresses) */
+
+struct acpi_drtm_vtable_list {
+ u32 validated_table_count;
+ u64 validated_tables[1];
+};
+
+/* 2) Resources List (of Resource Descriptors) */
+
+/* Resource Descriptor */
+
+struct acpi_drtm_resource {
+ u8 size[7];
+ u8 type;
+ u64 address;
+};
+
+struct acpi_drtm_resource_list {
+ u32 resource_count;
+ struct acpi_drtm_resource resources[1];
+};
+
+/* 3) Platform-specific Identifiers List */
+
+struct acpi_drtm_dps_id {
+ u32 dps_id_length;
+ u8 dps_id[16];
+};
+
+/*******************************************************************************
+ *
* ECDT - Embedded Controller Boot Resources Table
* Version 1
*
@@ -402,6 +880,197 @@ struct acpi_erst_info {
/*******************************************************************************
*
+ * FPDT - Firmware Performance Data Table (ACPI 5.0)
+ * Version 1
+ *
+ ******************************************************************************/
+
+struct acpi_table_fpdt {
+ struct acpi_table_header header; /* Common ACPI table header */
+};
+
+/* FPDT subtable header (Performance Record Structure) */
+
+struct acpi_fpdt_header {
+ u16 type;
+ u8 length;
+ u8 revision;
+};
+
+/* Values for Type field above */
+
+enum acpi_fpdt_type {
+ ACPI_FPDT_TYPE_BOOT = 0,
+ ACPI_FPDT_TYPE_S3PERF = 1
+};
+
+/*
+ * FPDT subtables
+ */
+
+/* 0: Firmware Basic Boot Performance Record */
+
+struct acpi_fpdt_boot_pointer {
+ struct acpi_fpdt_header header;
+ u8 reserved[4];
+ u64 address;
+};
+
+/* 1: S3 Performance Table Pointer Record */
+
+struct acpi_fpdt_s3pt_pointer {
+ struct acpi_fpdt_header header;
+ u8 reserved[4];
+ u64 address;
+};
+
+/*
+ * S3PT - S3 Performance Table. This table is pointed to by the
+ * S3 Pointer Record above.
+ */
+struct acpi_table_s3pt {
+ u8 signature[4]; /* "S3PT" */
+ u32 length;
+};
+
+/*
+ * S3PT Subtables (Not part of the actual FPDT)
+ */
+
+/* Values for Type field in S3PT header */
+
+enum acpi_s3pt_type {
+ ACPI_S3PT_TYPE_RESUME = 0,
+ ACPI_S3PT_TYPE_SUSPEND = 1,
+ ACPI_FPDT_BOOT_PERFORMANCE = 2
+};
+
+struct acpi_s3pt_resume {
+ struct acpi_fpdt_header header;
+ u32 resume_count;
+ u64 full_resume;
+ u64 average_resume;
+};
+
+struct acpi_s3pt_suspend {
+ struct acpi_fpdt_header header;
+ u64 suspend_start;
+ u64 suspend_end;
+};
+
+/*
+ * FPDT Boot Performance Record (Not part of the actual FPDT)
+ */
+struct acpi_fpdt_boot {
+ struct acpi_fpdt_header header;
+ u8 reserved[4];
+ u64 reset_end;
+ u64 load_start;
+ u64 startup_start;
+ u64 exit_services_entry;
+ u64 exit_services_exit;
+};
+
+/*******************************************************************************
+ *
+ * GTDT - Generic Timer Description Table (ACPI 5.1)
+ * Version 2
+ *
+ ******************************************************************************/
+
+struct acpi_table_gtdt {
+ struct acpi_table_header header; /* Common ACPI table header */
+ u64 counter_block_addresss;
+ u32 reserved;
+ u32 secure_el1_interrupt;
+ u32 secure_el1_flags;
+ u32 non_secure_el1_interrupt;
+ u32 non_secure_el1_flags;
+ u32 virtual_timer_interrupt;
+ u32 virtual_timer_flags;
+ u32 non_secure_el2_interrupt;
+ u32 non_secure_el2_flags;
+ u64 counter_read_block_address;
+ u32 platform_timer_count;
+ u32 platform_timer_offset;
+};
+
+/* Flag Definitions: Timer Block Physical Timers and Virtual timers */
+
+#define ACPI_GTDT_INTERRUPT_MODE (1)
+#define ACPI_GTDT_INTERRUPT_POLARITY (1<<1)
+#define ACPI_GTDT_ALWAYS_ON (1<<2)
+
+/* Common GTDT subtable header */
+
+struct acpi_gtdt_header {
+ u8 type;
+ u16 length;
+};
+
+/* Values for GTDT subtable type above */
+
+enum acpi_gtdt_type {
+ ACPI_GTDT_TYPE_TIMER_BLOCK = 0,
+ ACPI_GTDT_TYPE_WATCHDOG = 1,
+ ACPI_GTDT_TYPE_RESERVED = 2 /* 2 and greater are reserved */
+};
+
+/* GTDT Subtables, correspond to Type in struct acpi_gtdt_header */
+
+/* 0: Generic Timer Block */
+
+struct acpi_gtdt_timer_block {
+ struct acpi_gtdt_header header;
+ u8 reserved;
+ u64 block_address;
+ u32 timer_count;
+ u32 timer_offset;
+};
+
+/* Timer Sub-Structure, one per timer */
+
+struct acpi_gtdt_timer_entry {
+ u8 frame_number;
+ u8 reserved[3];
+ u64 base_address;
+ u64 el0_base_address;
+ u32 timer_interrupt;
+ u32 timer_flags;
+ u32 virtual_timer_interrupt;
+ u32 virtual_timer_flags;
+ u32 common_flags;
+};
+
+/* Flag Definitions: timer_flags and virtual_timer_flags above */
+
+#define ACPI_GTDT_GT_IRQ_MODE (1)
+#define ACPI_GTDT_GT_IRQ_POLARITY (1<<1)
+
+/* Flag Definitions: common_flags above */
+
+#define ACPI_GTDT_GT_IS_SECURE_TIMER (1)
+#define ACPI_GTDT_GT_ALWAYS_ON (1<<1)
+
+/* 1: SBSA Generic Watchdog Structure */
+
+struct acpi_gtdt_watchdog {
+ struct acpi_gtdt_header header;
+ u8 reserved;
+ u64 refresh_frame_address;
+ u64 control_frame_address;
+ u32 timer_interrupt;
+ u32 timer_flags;
+};
+
+/* Flag Definitions: timer_flags above */
+
+#define ACPI_GTDT_WATCHDOG_IRQ_MODE (1)
+#define ACPI_GTDT_WATCHDOG_IRQ_POLARITY (1<<1)
+#define ACPI_GTDT_WATCHDOG_SECURE (1<<2)
+
+/*******************************************************************************
+ *
* HEST - Hardware Error Source Table (ACPI 4.0)
* Version 1
*
@@ -824,698 +1493,130 @@ struct acpi_hmat_cache {
/*******************************************************************************
*
- * MADT - Multiple APIC Description Table
- * Version 3
+ * HPET - High Precision Event Timer table
+ * Version 1
+ *
+ * Conforms to "IA-PC HPET (High Precision Event Timers) Specification",
+ * Version 1.0a, October 2004
*
******************************************************************************/
-struct acpi_table_madt {
+struct acpi_table_hpet {
struct acpi_table_header header; /* Common ACPI table header */
- u32 address; /* Physical address of local APIC */
- u32 flags;
-};
-
-/* Masks for Flags field above */
-
-#define ACPI_MADT_PCAT_COMPAT (1) /* 00: System also has dual 8259s */
-
-/* Values for PCATCompat flag */
-
-#define ACPI_MADT_DUAL_PIC 1
-#define ACPI_MADT_MULTIPLE_APIC 0
-
-/* Values for MADT subtable type in struct acpi_subtable_header */
-
-enum acpi_madt_type {
- ACPI_MADT_TYPE_LOCAL_APIC = 0,
- ACPI_MADT_TYPE_IO_APIC = 1,
- ACPI_MADT_TYPE_INTERRUPT_OVERRIDE = 2,
- ACPI_MADT_TYPE_NMI_SOURCE = 3,
- ACPI_MADT_TYPE_LOCAL_APIC_NMI = 4,
- ACPI_MADT_TYPE_LOCAL_APIC_OVERRIDE = 5,
- ACPI_MADT_TYPE_IO_SAPIC = 6,
- ACPI_MADT_TYPE_LOCAL_SAPIC = 7,
- ACPI_MADT_TYPE_INTERRUPT_SOURCE = 8,
- ACPI_MADT_TYPE_LOCAL_X2APIC = 9,
- ACPI_MADT_TYPE_LOCAL_X2APIC_NMI = 10,
- ACPI_MADT_TYPE_GENERIC_INTERRUPT = 11,
- ACPI_MADT_TYPE_GENERIC_DISTRIBUTOR = 12,
- ACPI_MADT_TYPE_GENERIC_MSI_FRAME = 13,
- ACPI_MADT_TYPE_GENERIC_REDISTRIBUTOR = 14,
- ACPI_MADT_TYPE_GENERIC_TRANSLATOR = 15,
- ACPI_MADT_TYPE_RESERVED = 16 /* 16 and greater are reserved */
-};
-
-/*
- * MADT Subtables, correspond to Type in struct acpi_subtable_header
- */
-
-/* 0: Processor Local APIC */
-
-struct acpi_madt_local_apic {
- struct acpi_subtable_header header;
- u8 processor_id; /* ACPI processor id */
- u8 id; /* Processor's local APIC id */
- u32 lapic_flags;
-};
-
-/* 1: IO APIC */
-
-struct acpi_madt_io_apic {
- struct acpi_subtable_header header;
- u8 id; /* I/O APIC ID */
- u8 reserved; /* reserved - must be zero */
- u32 address; /* APIC physical address */
- u32 global_irq_base; /* Global system interrupt where INTI lines start */
-};
-
-/* 2: Interrupt Override */
-
-struct acpi_madt_interrupt_override {
- struct acpi_subtable_header header;
- u8 bus; /* 0 - ISA */
- u8 source_irq; /* Interrupt source (IRQ) */
- u32 global_irq; /* Global system interrupt */
- u16 inti_flags;
-};
-
-/* 3: NMI Source */
-
-struct acpi_madt_nmi_source {
- struct acpi_subtable_header header;
- u16 inti_flags;
- u32 global_irq; /* Global system interrupt */
-};
-
-/* 4: Local APIC NMI */
-
-struct acpi_madt_local_apic_nmi {
- struct acpi_subtable_header header;
- u8 processor_id; /* ACPI processor id */
- u16 inti_flags;
- u8 lint; /* LINTn to which NMI is connected */
-};
-
-/* 5: Address Override */
-
-struct acpi_madt_local_apic_override {
- struct acpi_subtable_header header;
- u16 reserved; /* Reserved, must be zero */
- u64 address; /* APIC physical address */
-};
-
-/* 6: I/O Sapic */
-
-struct acpi_madt_io_sapic {
- struct acpi_subtable_header header;
- u8 id; /* I/O SAPIC ID */
- u8 reserved; /* Reserved, must be zero */
- u32 global_irq_base; /* Global interrupt for SAPIC start */
- u64 address; /* SAPIC physical address */
-};
-
-/* 7: Local Sapic */
-
-struct acpi_madt_local_sapic {
- struct acpi_subtable_header header;
- u8 processor_id; /* ACPI processor id */
- u8 id; /* SAPIC ID */
- u8 eid; /* SAPIC EID */
- u8 reserved[3]; /* Reserved, must be zero */
- u32 lapic_flags;
- u32 uid; /* Numeric UID - ACPI 3.0 */
- char uid_string[1]; /* String UID - ACPI 3.0 */
-};
-
-/* 8: Platform Interrupt Source */
-
-struct acpi_madt_interrupt_source {
- struct acpi_subtable_header header;
- u16 inti_flags;
- u8 type; /* 1=PMI, 2=INIT, 3=corrected */
- u8 id; /* Processor ID */
- u8 eid; /* Processor EID */
- u8 io_sapic_vector; /* Vector value for PMI interrupts */
- u32 global_irq; /* Global system interrupt */
- u32 flags; /* Interrupt Source Flags */
-};
-
-/* Masks for Flags field above */
-
-#define ACPI_MADT_CPEI_OVERRIDE (1)
-
-/* 9: Processor Local X2APIC (ACPI 4.0) */
-
-struct acpi_madt_local_x2apic {
- struct acpi_subtable_header header;
- u16 reserved; /* reserved - must be zero */
- u32 local_apic_id; /* Processor x2APIC ID */
- u32 lapic_flags;
- u32 uid; /* ACPI processor UID */
-};
-
-/* 10: Local X2APIC NMI (ACPI 4.0) */
-
-struct acpi_madt_local_x2apic_nmi {
- struct acpi_subtable_header header;
- u16 inti_flags;
- u32 uid; /* ACPI processor UID */
- u8 lint; /* LINTn to which NMI is connected */
- u8 reserved[3]; /* reserved - must be zero */
-};
-
-/* 11: Generic Interrupt (ACPI 5.0 + ACPI 6.0 changes) */
-
-struct acpi_madt_generic_interrupt {
- struct acpi_subtable_header header;
- u16 reserved; /* reserved - must be zero */
- u32 cpu_interface_number;
- u32 uid;
- u32 flags;
- u32 parking_version;
- u32 performance_interrupt;
- u64 parked_address;
- u64 base_address;
- u64 gicv_base_address;
- u64 gich_base_address;
- u32 vgic_interrupt;
- u64 gicr_base_address;
- u64 arm_mpidr;
- u8 efficiency_class;
- u8 reserved2[3];
-};
-
-/* Masks for Flags field above */
-
-/* ACPI_MADT_ENABLED (1) Processor is usable if set */
-#define ACPI_MADT_PERFORMANCE_IRQ_MODE (1<<1) /* 01: Performance Interrupt Mode */
-#define ACPI_MADT_VGIC_IRQ_MODE (1<<2) /* 02: VGIC Maintenance Interrupt mode */
-
-/* 12: Generic Distributor (ACPI 5.0 + ACPI 6.0 changes) */
-
-struct acpi_madt_generic_distributor {
- struct acpi_subtable_header header;
- u16 reserved; /* reserved - must be zero */
- u32 gic_id;
- u64 base_address;
- u32 global_irq_base;
- u8 version;
- u8 reserved2[3]; /* reserved - must be zero */
-};
-
-/* Values for Version field above */
-
-enum acpi_madt_gic_version {
- ACPI_MADT_GIC_VERSION_NONE = 0,
- ACPI_MADT_GIC_VERSION_V1 = 1,
- ACPI_MADT_GIC_VERSION_V2 = 2,
- ACPI_MADT_GIC_VERSION_V3 = 3,
- ACPI_MADT_GIC_VERSION_V4 = 4,
- ACPI_MADT_GIC_VERSION_RESERVED = 5 /* 5 and greater are reserved */
-};
-
-/* 13: Generic MSI Frame (ACPI 5.1) */
-
-struct acpi_madt_generic_msi_frame {
- struct acpi_subtable_header header;
- u16 reserved; /* reserved - must be zero */
- u32 msi_frame_id;
- u64 base_address;
- u32 flags;
- u16 spi_count;
- u16 spi_base;
+ u32 id; /* Hardware ID of event timer block */
+ struct acpi_generic_address address; /* Address of event timer block */
+ u8 sequence; /* HPET sequence number */
+ u16 minimum_tick; /* Main counter min tick, periodic mode */
+ u8 flags;
};
/* Masks for Flags field above */
-#define ACPI_MADT_OVERRIDE_SPI_VALUES (1)
-
-/* 14: Generic Redistributor (ACPI 5.1) */
-
-struct acpi_madt_generic_redistributor {
- struct acpi_subtable_header header;
- u16 reserved; /* reserved - must be zero */
- u64 base_address;
- u32 length;
-};
+#define ACPI_HPET_PAGE_PROTECT_MASK (3)
-/* 15: Generic Translator (ACPI 6.0) */
+/* Values for Page Protect flags */
-struct acpi_madt_generic_translator {
- struct acpi_subtable_header header;
- u16 reserved; /* reserved - must be zero */
- u32 translation_id;
- u64 base_address;
- u32 reserved2;
+enum acpi_hpet_page_protect {
+ ACPI_HPET_NO_PAGE_PROTECT = 0,
+ ACPI_HPET_PAGE_PROTECT4 = 1,
+ ACPI_HPET_PAGE_PROTECT64 = 2
};
-/*
- * Common flags fields for MADT subtables
- */
-
-/* MADT Local APIC flags */
-
-#define ACPI_MADT_ENABLED (1) /* 00: Processor is usable if set */
-
-/* MADT MPS INTI flags (inti_flags) */
-
-#define ACPI_MADT_POLARITY_MASK (3) /* 00-01: Polarity of APIC I/O input signals */
-#define ACPI_MADT_TRIGGER_MASK (3<<2) /* 02-03: Trigger mode of APIC input signals */
-
-/* Values for MPS INTI flags */
-
-#define ACPI_MADT_POLARITY_CONFORMS 0
-#define ACPI_MADT_POLARITY_ACTIVE_HIGH 1
-#define ACPI_MADT_POLARITY_RESERVED 2
-#define ACPI_MADT_POLARITY_ACTIVE_LOW 3
-
-#define ACPI_MADT_TRIGGER_CONFORMS (0)
-#define ACPI_MADT_TRIGGER_EDGE (1<<2)
-#define ACPI_MADT_TRIGGER_RESERVED (2<<2)
-#define ACPI_MADT_TRIGGER_LEVEL (3<<2)
-
/*******************************************************************************
*
- * MSCT - Maximum System Characteristics Table (ACPI 4.0)
+ * IBFT - Boot Firmware Table
* Version 1
*
- ******************************************************************************/
-
-struct acpi_table_msct {
- struct acpi_table_header header; /* Common ACPI table header */
- u32 proximity_offset; /* Location of proximity info struct(s) */
- u32 max_proximity_domains; /* Max number of proximity domains */
- u32 max_clock_domains; /* Max number of clock domains */
- u64 max_address; /* Max physical address in system */
-};
-
-/* subtable - Maximum Proximity Domain Information. Version 1 */
-
-struct acpi_msct_proximity {
- u8 revision;
- u8 length;
- u32 range_start; /* Start of domain range */
- u32 range_end; /* End of domain range */
- u32 processor_capacity;
- u64 memory_capacity; /* In bytes */
-};
-
-/*******************************************************************************
+ * Conforms to "iSCSI Boot Firmware Table (iBFT) as Defined in ACPI 3.0b
+ * Specification", Version 1.01, March 1, 2007
*
- * NFIT - NVDIMM Interface Table (ACPI 6.0+)
- * Version 1
+ * Note: It appears that this table is not intended to appear in the RSDT/XSDT.
+ * Therefore, it is not currently supported by the disassembler.
*
******************************************************************************/
-struct acpi_table_nfit {
+struct acpi_table_ibft {
struct acpi_table_header header; /* Common ACPI table header */
- u32 reserved; /* Reserved, must be zero */
+ u8 reserved[12];
};
-/* Subtable header for NFIT */
+/* IBFT common subtable header */
-struct acpi_nfit_header {
- u16 type;
+struct acpi_ibft_header {
+ u8 type;
+ u8 version;
u16 length;
-};
-
-/* Values for subtable type in struct acpi_nfit_header */
-
-enum acpi_nfit_type {
- ACPI_NFIT_TYPE_SYSTEM_ADDRESS = 0,
- ACPI_NFIT_TYPE_MEMORY_MAP = 1,
- ACPI_NFIT_TYPE_INTERLEAVE = 2,
- ACPI_NFIT_TYPE_SMBIOS = 3,
- ACPI_NFIT_TYPE_CONTROL_REGION = 4,
- ACPI_NFIT_TYPE_DATA_REGION = 5,
- ACPI_NFIT_TYPE_FLUSH_ADDRESS = 6,
- ACPI_NFIT_TYPE_RESERVED = 7 /* 7 and greater are reserved */
-};
-
-/*
- * NFIT Subtables
- */
-
-/* 0: System Physical Address Range Structure */
-
-struct acpi_nfit_system_address {
- struct acpi_nfit_header header;
- u16 range_index;
- u16 flags;
- u32 reserved; /* Reseved, must be zero */
- u32 proximity_domain;
- u8 range_guid[16];
- u64 address;
- u64 length;
- u64 memory_mapping;
-};
-
-/* Flags */
-
-#define ACPI_NFIT_ADD_ONLINE_ONLY (1) /* 00: Add/Online Operation Only */
-#define ACPI_NFIT_PROXIMITY_VALID (1<<1) /* 01: Proximity Domain Valid */
-
-/* Range Type GUIDs appear in the include/acuuid.h file */
-
-/* 1: Memory Device to System Address Range Map Structure */
-
-struct acpi_nfit_memory_map {
- struct acpi_nfit_header header;
- u32 device_handle;
- u16 physical_id;
- u16 region_id;
- u16 range_index;
- u16 region_index;
- u64 region_size;
- u64 region_offset;
- u64 address;
- u16 interleave_index;
- u16 interleave_ways;
- u16 flags;
- u16 reserved; /* Reserved, must be zero */
-};
-
-/* Flags */
-
-#define ACPI_NFIT_MEM_SAVE_FAILED (1) /* 00: Last SAVE to Memory Device failed */
-#define ACPI_NFIT_MEM_RESTORE_FAILED (1<<1) /* 01: Last RESTORE from Memory Device failed */
-#define ACPI_NFIT_MEM_FLUSH_FAILED (1<<2) /* 02: Platform flush failed */
-#define ACPI_NFIT_MEM_NOT_ARMED (1<<3) /* 03: Memory Device is not armed */
-#define ACPI_NFIT_MEM_HEALTH_OBSERVED (1<<4) /* 04: Memory Device observed SMART/health events */
-#define ACPI_NFIT_MEM_HEALTH_ENABLED (1<<5) /* 05: SMART/health events enabled */
-#define ACPI_NFIT_MEM_MAP_FAILED (1<<6) /* 06: Mapping to SPA failed */
-
-/* 2: Interleave Structure */
-
-struct acpi_nfit_interleave {
- struct acpi_nfit_header header;
- u16 interleave_index;
- u16 reserved; /* Reserved, must be zero */
- u32 line_count;
- u32 line_size;
- u32 line_offset[1]; /* Variable length */
-};
-
-/* 3: SMBIOS Management Information Structure */
-
-struct acpi_nfit_smbios {
- struct acpi_nfit_header header;
- u32 reserved; /* Reserved, must be zero */
- u8 data[1]; /* Variable length */
-};
-
-/* 4: NVDIMM Control Region Structure */
-
-struct acpi_nfit_control_region {
- struct acpi_nfit_header header;
- u16 region_index;
- u16 vendor_id;
- u16 device_id;
- u16 revision_id;
- u16 subsystem_vendor_id;
- u16 subsystem_device_id;
- u16 subsystem_revision_id;
- u8 valid_fields;
- u8 manufacturing_location;
- u16 manufacturing_date;
- u8 reserved[2]; /* Reserved, must be zero */
- u32 serial_number;
- u16 code;
- u16 windows;
- u64 window_size;
- u64 command_offset;
- u64 command_size;
- u64 status_offset;
- u64 status_size;
- u16 flags;
- u8 reserved1[6]; /* Reserved, must be zero */
-};
-
-/* Flags */
-
-#define ACPI_NFIT_CONTROL_BUFFERED (1) /* Block Data Windows implementation is buffered */
-
-/* valid_fields bits */
-
-#define ACPI_NFIT_CONTROL_MFG_INFO_VALID (1) /* Manufacturing fields are valid */
-
-/* 5: NVDIMM Block Data Window Region Structure */
-
-struct acpi_nfit_data_region {
- struct acpi_nfit_header header;
- u16 region_index;
- u16 windows;
- u64 offset;
- u64 size;
- u64 capacity;
- u64 start_address;
-};
-
-/* 6: Flush Hint Address Structure */
-
-struct acpi_nfit_flush_address {
- struct acpi_nfit_header header;
- u32 device_handle;
- u16 hint_count;
- u8 reserved[6]; /* Reserved, must be zero */
- u64 hint_address[1]; /* Variable length */
-};
-
-/*******************************************************************************
- *
- * PDTT - Processor Debug Trigger Table (ACPI 6.2)
- * Version 0
- *
- ******************************************************************************/
-
-struct acpi_table_pdtt {
- struct acpi_table_header header; /* Common ACPI table header */
- u8 trigger_count;
- u8 reserved[3];
- u32 array_offset;
-};
-
-/*
- * PDTT Communication Channel Identifier Structure.
- * The number of these structures is defined by trigger_count above,
- * starting at array_offset.
- */
-struct acpi_pdtt_channel {
- u16 sub_channel_id;
-};
-
-/* Mask and Flags for above */
-
-#define ACPI_PDTT_SUBCHANNEL_ID_MASK 0x00FF
-#define ACPI_PDTT_RUNTIME_TRIGGER (1<<8)
-#define ACPI_PPTT_WAIT_COMPLETION (1<<9)
-
-/*******************************************************************************
- *
- * PPTT - Processor Properties Topology Table (ACPI 6.2)
- * Version 1
- *
- ******************************************************************************/
-
-struct acpi_table_pptt {
- struct acpi_table_header header; /* Common ACPI table header */
+ u8 index;
+ u8 flags;
};
/* Values for Type field above */
-enum acpi_pptt_type {
- ACPI_PPTT_TYPE_PROCESSOR = 0,
- ACPI_PPTT_TYPE_CACHE = 1,
- ACPI_PPTT_TYPE_ID = 2,
- ACPI_PPTT_TYPE_RESERVED = 3
-};
-
-/* 0: Processor Hierarchy Node Structure */
-
-struct acpi_pptt_processor {
- struct acpi_subtable_header header;
- u16 reserved;
- u32 flags;
- u32 parent;
- u32 acpi_processor_id;
- u32 number_of_priv_resources;
-};
-
-/* Flags */
-
-#define ACPI_PPTT_PHYSICAL_PACKAGE (1) /* Physical package */
-#define ACPI_PPTT_ACPI_PROCESSOR_ID_VALID (2) /* ACPI Processor ID valid */
-
-/* 1: Cache Type Structure */
-
-struct acpi_pptt_cache {
- struct acpi_subtable_header header;
- u16 reserved;
- u32 flags;
- u32 next_level_of_cache;
- u32 size;
- u32 number_of_sets;
- u8 associativity;
- u8 attributes;
- u16 line_size;
-};
-
-/* Flags */
-
-#define ACPI_PPTT_SIZE_PROPERTY_VALID (1) /* Physical property valid */
-#define ACPI_PPTT_NUMBER_OF_SETS_VALID (1<<1) /* Number of sets valid */
-#define ACPI_PPTT_ASSOCIATIVITY_VALID (1<<2) /* Associativity valid */
-#define ACPI_PPTT_ALLOCATION_TYPE_VALID (1<<3) /* Allocation type valid */
-#define ACPI_PPTT_CACHE_TYPE_VALID (1<<4) /* Cache type valid */
-#define ACPI_PPTT_WRITE_POLICY_VALID (1<<5) /* Write policy valid */
-#define ACPI_PPTT_LINE_SIZE_VALID (1<<6) /* Line size valid */
-
-/* Masks for Attributes */
-
-#define ACPI_PPTT_MASK_ALLOCATION_TYPE (0x03) /* Allocation type */
-#define ACPI_PPTT_MASK_CACHE_TYPE (0x0C) /* Cache type */
-#define ACPI_PPTT_MASK_WRITE_POLICY (0x10) /* Write policy */
-
-/* 2: ID Structure */
-
-struct acpi_pptt_id {
- struct acpi_subtable_header header;
- u16 reserved;
- u32 vendor_id;
- u64 level1_id;
- u64 level2_id;
- u16 major_rev;
- u16 minor_rev;
- u16 spin_rev;
-};
-
-/*******************************************************************************
- *
- * SBST - Smart Battery Specification Table
- * Version 1
- *
- ******************************************************************************/
-
-struct acpi_table_sbst {
- struct acpi_table_header header; /* Common ACPI table header */
- u32 warning_level;
- u32 low_level;
- u32 critical_level;
-};
-
-/*******************************************************************************
- *
- * SLIT - System Locality Distance Information Table
- * Version 1
- *
- ******************************************************************************/
-
-struct acpi_table_slit {
- struct acpi_table_header header; /* Common ACPI table header */
- u64 locality_count;
- u8 entry[1]; /* Real size = localities^2 */
-};
-
-/*******************************************************************************
- *
- * SRAT - System Resource Affinity Table
- * Version 3
- *
- ******************************************************************************/
-
-struct acpi_table_srat {
- struct acpi_table_header header; /* Common ACPI table header */
- u32 table_revision; /* Must be value '1' */
- u64 reserved; /* Reserved, must be zero */
-};
-
-/* Values for subtable type in struct acpi_subtable_header */
-
-enum acpi_srat_type {
- ACPI_SRAT_TYPE_CPU_AFFINITY = 0,
- ACPI_SRAT_TYPE_MEMORY_AFFINITY = 1,
- ACPI_SRAT_TYPE_X2APIC_CPU_AFFINITY = 2,
- ACPI_SRAT_TYPE_GICC_AFFINITY = 3,
- ACPI_SRAT_TYPE_GIC_ITS_AFFINITY = 4, /* ACPI 6.2 */
- ACPI_SRAT_TYPE_RESERVED = 5 /* 5 and greater are reserved */
-};
-
-/*
- * SRAT Subtables, correspond to Type in struct acpi_subtable_header
- */
-
-/* 0: Processor Local APIC/SAPIC Affinity */
-
-struct acpi_srat_cpu_affinity {
- struct acpi_subtable_header header;
- u8 proximity_domain_lo;
- u8 apic_id;
- u32 flags;
- u8 local_sapic_eid;
- u8 proximity_domain_hi[3];
- u32 clock_domain;
-};
-
-/* Flags */
-
-#define ACPI_SRAT_CPU_USE_AFFINITY (1) /* 00: Use affinity structure */
-
-/* 1: Memory Affinity */
-
-struct acpi_srat_mem_affinity {
- struct acpi_subtable_header header;
- u32 proximity_domain;
- u16 reserved; /* Reserved, must be zero */
- u64 base_address;
- u64 length;
- u32 reserved1;
- u32 flags;
- u64 reserved2; /* Reserved, must be zero */
-};
-
-/* Flags */
-
-#define ACPI_SRAT_MEM_ENABLED (1) /* 00: Use affinity structure */
-#define ACPI_SRAT_MEM_HOT_PLUGGABLE (1<<1) /* 01: Memory region is hot pluggable */
-#define ACPI_SRAT_MEM_NON_VOLATILE (1<<2) /* 02: Memory region is non-volatile */
-
-/* 2: Processor Local X2_APIC Affinity (ACPI 4.0) */
-
-struct acpi_srat_x2apic_cpu_affinity {
- struct acpi_subtable_header header;
- u16 reserved; /* Reserved, must be zero */
- u32 proximity_domain;
- u32 apic_id;
- u32 flags;
- u32 clock_domain;
- u32 reserved2;
-};
-
-/* Flags for struct acpi_srat_cpu_affinity and struct acpi_srat_x2apic_cpu_affinity */
-
-#define ACPI_SRAT_CPU_ENABLED (1) /* 00: Use affinity structure */
-
-/* 3: GICC Affinity (ACPI 5.1) */
-
-struct acpi_srat_gicc_affinity {
- struct acpi_subtable_header header;
- u32 proximity_domain;
- u32 acpi_processor_uid;
- u32 flags;
- u32 clock_domain;
-};
-
-/* Flags for struct acpi_srat_gicc_affinity */
-
-#define ACPI_SRAT_GICC_ENABLED (1) /* 00: Use affinity structure */
-
-/* 4: GCC ITS Affinity (ACPI 6.2) */
-
-struct acpi_srat_gic_its_affinity {
- struct acpi_subtable_header header;
- u32 proximity_domain;
- u16 reserved;
- u32 its_id;
+enum acpi_ibft_type {
+ ACPI_IBFT_TYPE_NOT_USED = 0,
+ ACPI_IBFT_TYPE_CONTROL = 1,
+ ACPI_IBFT_TYPE_INITIATOR = 2,
+ ACPI_IBFT_TYPE_NIC = 3,
+ ACPI_IBFT_TYPE_TARGET = 4,
+ ACPI_IBFT_TYPE_EXTENSIONS = 5,
+ ACPI_IBFT_TYPE_RESERVED = 6 /* 6 and greater are reserved */
+};
+
+/* IBFT subtables */
+
+struct acpi_ibft_control {
+ struct acpi_ibft_header header;
+ u16 extensions;
+ u16 initiator_offset;
+ u16 nic0_offset;
+ u16 target0_offset;
+ u16 nic1_offset;
+ u16 target1_offset;
+};
+
+struct acpi_ibft_initiator {
+ struct acpi_ibft_header header;
+ u8 sns_server[16];
+ u8 slp_server[16];
+ u8 primary_server[16];
+ u8 secondary_server[16];
+ u16 name_length;
+ u16 name_offset;
+};
+
+struct acpi_ibft_nic {
+ struct acpi_ibft_header header;
+ u8 ip_address[16];
+ u8 subnet_mask_prefix;
+ u8 origin;
+ u8 gateway[16];
+ u8 primary_dns[16];
+ u8 secondary_dns[16];
+ u8 dhcp[16];
+ u16 vlan;
+ u8 mac_address[6];
+ u16 pci_address;
+ u16 name_length;
+ u16 name_offset;
+};
+
+struct acpi_ibft_target {
+ struct acpi_ibft_header header;
+ u8 target_ip_address[16];
+ u16 target_ip_socket;
+ u8 target_boot_lun[8];
+ u8 chap_type;
+ u8 nic_association;
+ u16 target_name_length;
+ u16 target_name_offset;
+ u16 chap_name_length;
+ u16 chap_name_offset;
+ u16 chap_secret_length;
+ u16 chap_secret_offset;
+ u16 reverse_chap_name_length;
+ u16 reverse_chap_name_offset;
+ u16 reverse_chap_secret_length;
+ u16 reverse_chap_secret_offset;
};
/* Reset to default packing */
diff --git a/include/acpi/actbl2.h b/include/acpi/actbl2.h
index 686b6f8c09dc..876012da8e6e 100644
--- a/include/acpi/actbl2.h
+++ b/include/acpi/actbl2.h
@@ -1,45 +1,11 @@
+/* SPDX-License-Identifier: BSD-3-Clause OR GPL-2.0 */
/******************************************************************************
*
* Name: actbl2.h - ACPI Table Definitions (tables not in ACPI spec)
*
- *****************************************************************************/
-
-/*
- * Copyright (C) 2000 - 2017, Intel Corp.
- * All rights reserved.
+ * Copyright (C) 2000 - 2018, Intel Corp.
*
- * Redistribution and use in source and binary forms, with or without
- * modification, are permitted provided that the following conditions
- * are met:
- * 1. Redistributions of source code must retain the above copyright
- * notice, this list of conditions, and the following disclaimer,
- * without modification.
- * 2. Redistributions in binary form must reproduce at minimum a disclaimer
- * substantially similar to the "NO WARRANTY" disclaimer below
- * ("Disclaimer") and any redistribution must be conditioned upon
- * including a substantially similar Disclaimer requirement for further
- * binary redistribution.
- * 3. Neither the names of the above-listed copyright holders nor the names
- * of any contributors may be used to endorse or promote products derived
- * from this software without specific prior written permission.
- *
- * Alternatively, this software may be distributed under the terms of the
- * GNU General Public License ("GPL") version 2 as published by the Free
- * Software Foundation.
- *
- * NO WARRANTY
- * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
- * "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
- * LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTIBILITY AND FITNESS FOR
- * A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
- * HOLDERS OR CONTRIBUTORS BE LIABLE FOR SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
- * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS
- * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION)
- * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT,
- * STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING
- * IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
- * POSSIBILITY OF SUCH DAMAGES.
- */
+ *****************************************************************************/
#ifndef __ACTBL2_H__
#define __ACTBL2_H__
@@ -51,9 +17,6 @@
* These tables are not consumed directly by the ACPICA subsystem, but are
* included here to support device drivers and the AML disassembler.
*
- * Generally, the tables in this file are defined by third-party specifications,
- * and are not defined directly by the ACPI specification itself.
- *
******************************************************************************/
/*
@@ -61,44 +24,25 @@
* file. Useful because they make it more difficult to inadvertently type in
* the wrong signature.
*/
-#define ACPI_SIG_ASF "ASF!" /* Alert Standard Format table */
-#define ACPI_SIG_BOOT "BOOT" /* Simple Boot Flag Table */
-#define ACPI_SIG_CSRT "CSRT" /* Core System Resource Table */
-#define ACPI_SIG_DBG2 "DBG2" /* Debug Port table type 2 */
-#define ACPI_SIG_DBGP "DBGP" /* Debug Port table */
-#define ACPI_SIG_DMAR "DMAR" /* DMA Remapping table */
-#define ACPI_SIG_HPET "HPET" /* High Precision Event Timer table */
-#define ACPI_SIG_IBFT "IBFT" /* iSCSI Boot Firmware Table */
#define ACPI_SIG_IORT "IORT" /* IO Remapping Table */
#define ACPI_SIG_IVRS "IVRS" /* I/O Virtualization Reporting Structure */
#define ACPI_SIG_LPIT "LPIT" /* Low Power Idle Table */
+#define ACPI_SIG_MADT "APIC" /* Multiple APIC Description Table */
#define ACPI_SIG_MCFG "MCFG" /* PCI Memory Mapped Configuration table */
#define ACPI_SIG_MCHI "MCHI" /* Management Controller Host Interface table */
+#define ACPI_SIG_MPST "MPST" /* Memory Power State Table */
+#define ACPI_SIG_MSCT "MSCT" /* Maximum System Characteristics Table */
#define ACPI_SIG_MSDM "MSDM" /* Microsoft Data Management Table */
#define ACPI_SIG_MTMR "MTMR" /* MID Timer table */
+#define ACPI_SIG_NFIT "NFIT" /* NVDIMM Firmware Interface Table */
+#define ACPI_SIG_PCCT "PCCT" /* Platform Communications Channel Table */
+#define ACPI_SIG_PDTT "PDTT" /* Platform Debug Trigger Table */
+#define ACPI_SIG_PMTT "PMTT" /* Platform Memory Topology Table */
+#define ACPI_SIG_PPTT "PPTT" /* Processor Properties Topology Table */
+#define ACPI_SIG_RASF "RASF" /* RAS Feature table */
+#define ACPI_SIG_SBST "SBST" /* Smart Battery Specification Table */
#define ACPI_SIG_SDEI "SDEI" /* Software Delegated Exception Interface Table */
-#define ACPI_SIG_SLIC "SLIC" /* Software Licensing Description Table */
-#define ACPI_SIG_SPCR "SPCR" /* Serial Port Console Redirection table */
-#define ACPI_SIG_SPMI "SPMI" /* Server Platform Management Interface table */
-#define ACPI_SIG_TCPA "TCPA" /* Trusted Computing Platform Alliance table */
-#define ACPI_SIG_TPM2 "TPM2" /* Trusted Platform Module 2.0 H/W interface table */
-#define ACPI_SIG_UEFI "UEFI" /* Uefi Boot Optimization Table */
-#define ACPI_SIG_VRTC "VRTC" /* Virtual Real Time Clock Table */
-#define ACPI_SIG_WAET "WAET" /* Windows ACPI Emulated devices Table */
-#define ACPI_SIG_WDAT "WDAT" /* Watchdog Action Table */
-#define ACPI_SIG_WDDT "WDDT" /* Watchdog Timer Description Table */
-#define ACPI_SIG_WDRT "WDRT" /* Watchdog Resource Table */
-#define ACPI_SIG_WSMT "WSMT" /* Windows SMM Security Migrations Table */
-#define ACPI_SIG_XXXX "XXXX" /* Intermediate AML header for ASL/ASL+ converter */
-
-#ifdef ACPI_UNDEFINED_TABLES
-/*
- * These tables have been seen in the field, but no definition has been found
- */
-#define ACPI_SIG_ATKG "ATKG"
-#define ACPI_SIG_GSCI "GSCI" /* GMCH SCI table */
-#define ACPI_SIG_IEIT "IEIT"
-#endif
+#define ACPI_SIG_SDEV "SDEV" /* Secure Devices table */
/*
* All tables must be byte-packed to match the ACPI specification, since
@@ -120,548 +64,6 @@
/*******************************************************************************
*
- * ASF - Alert Standard Format table (Signature "ASF!")
- * Revision 0x10
- *
- * Conforms to the Alert Standard Format Specification V2.0, 23 April 2003
- *
- ******************************************************************************/
-
-struct acpi_table_asf {
- struct acpi_table_header header; /* Common ACPI table header */
-};
-
-/* ASF subtable header */
-
-struct acpi_asf_header {
- u8 type;
- u8 reserved;
- u16 length;
-};
-
-/* Values for Type field above */
-
-enum acpi_asf_type {
- ACPI_ASF_TYPE_INFO = 0,
- ACPI_ASF_TYPE_ALERT = 1,
- ACPI_ASF_TYPE_CONTROL = 2,
- ACPI_ASF_TYPE_BOOT = 3,
- ACPI_ASF_TYPE_ADDRESS = 4,
- ACPI_ASF_TYPE_RESERVED = 5
-};
-
-/*
- * ASF subtables
- */
-
-/* 0: ASF Information */
-
-struct acpi_asf_info {
- struct acpi_asf_header header;
- u8 min_reset_value;
- u8 min_poll_interval;
- u16 system_id;
- u32 mfg_id;
- u8 flags;
- u8 reserved2[3];
-};
-
-/* Masks for Flags field above */
-
-#define ACPI_ASF_SMBUS_PROTOCOLS (1)
-
-/* 1: ASF Alerts */
-
-struct acpi_asf_alert {
- struct acpi_asf_header header;
- u8 assert_mask;
- u8 deassert_mask;
- u8 alerts;
- u8 data_length;
-};
-
-struct acpi_asf_alert_data {
- u8 address;
- u8 command;
- u8 mask;
- u8 value;
- u8 sensor_type;
- u8 type;
- u8 offset;
- u8 source_type;
- u8 severity;
- u8 sensor_number;
- u8 entity;
- u8 instance;
-};
-
-/* 2: ASF Remote Control */
-
-struct acpi_asf_remote {
- struct acpi_asf_header header;
- u8 controls;
- u8 data_length;
- u16 reserved2;
-};
-
-struct acpi_asf_control_data {
- u8 function;
- u8 address;
- u8 command;
- u8 value;
-};
-
-/* 3: ASF RMCP Boot Options */
-
-struct acpi_asf_rmcp {
- struct acpi_asf_header header;
- u8 capabilities[7];
- u8 completion_code;
- u32 enterprise_id;
- u8 command;
- u16 parameter;
- u16 boot_options;
- u16 oem_parameters;
-};
-
-/* 4: ASF Address */
-
-struct acpi_asf_address {
- struct acpi_asf_header header;
- u8 eprom_address;
- u8 devices;
-};
-
-/*******************************************************************************
- *
- * BOOT - Simple Boot Flag Table
- * Version 1
- *
- * Conforms to the "Simple Boot Flag Specification", Version 2.1
- *
- ******************************************************************************/
-
-struct acpi_table_boot {
- struct acpi_table_header header; /* Common ACPI table header */
- u8 cmos_index; /* Index in CMOS RAM for the boot register */
- u8 reserved[3];
-};
-
-/*******************************************************************************
- *
- * CSRT - Core System Resource Table
- * Version 0
- *
- * Conforms to the "Core System Resource Table (CSRT)", November 14, 2011
- *
- ******************************************************************************/
-
-struct acpi_table_csrt {
- struct acpi_table_header header; /* Common ACPI table header */
-};
-
-/* Resource Group subtable */
-
-struct acpi_csrt_group {
- u32 length;
- u32 vendor_id;
- u32 subvendor_id;
- u16 device_id;
- u16 subdevice_id;
- u16 revision;
- u16 reserved;
- u32 shared_info_length;
-
- /* Shared data immediately follows (Length = shared_info_length) */
-};
-
-/* Shared Info subtable */
-
-struct acpi_csrt_shared_info {
- u16 major_version;
- u16 minor_version;
- u32 mmio_base_low;
- u32 mmio_base_high;
- u32 gsi_interrupt;
- u8 interrupt_polarity;
- u8 interrupt_mode;
- u8 num_channels;
- u8 dma_address_width;
- u16 base_request_line;
- u16 num_handshake_signals;
- u32 max_block_size;
-
- /* Resource descriptors immediately follow (Length = Group length - shared_info_length) */
-};
-
-/* Resource Descriptor subtable */
-
-struct acpi_csrt_descriptor {
- u32 length;
- u16 type;
- u16 subtype;
- u32 uid;
-
- /* Resource-specific information immediately follows */
-};
-
-/* Resource Types */
-
-#define ACPI_CSRT_TYPE_INTERRUPT 0x0001
-#define ACPI_CSRT_TYPE_TIMER 0x0002
-#define ACPI_CSRT_TYPE_DMA 0x0003
-
-/* Resource Subtypes */
-
-#define ACPI_CSRT_XRUPT_LINE 0x0000
-#define ACPI_CSRT_XRUPT_CONTROLLER 0x0001
-#define ACPI_CSRT_TIMER 0x0000
-#define ACPI_CSRT_DMA_CHANNEL 0x0000
-#define ACPI_CSRT_DMA_CONTROLLER 0x0001
-
-/*******************************************************************************
- *
- * DBG2 - Debug Port Table 2
- * Version 0 (Both main table and subtables)
- *
- * Conforms to "Microsoft Debug Port Table 2 (DBG2)", December 10, 2015
- *
- ******************************************************************************/
-
-struct acpi_table_dbg2 {
- struct acpi_table_header header; /* Common ACPI table header */
- u32 info_offset;
- u32 info_count;
-};
-
-struct acpi_dbg2_header {
- u32 info_offset;
- u32 info_count;
-};
-
-/* Debug Device Information Subtable */
-
-struct acpi_dbg2_device {
- u8 revision;
- u16 length;
- u8 register_count; /* Number of base_address registers */
- u16 namepath_length;
- u16 namepath_offset;
- u16 oem_data_length;
- u16 oem_data_offset;
- u16 port_type;
- u16 port_subtype;
- u16 reserved;
- u16 base_address_offset;
- u16 address_size_offset;
- /*
- * Data that follows:
- * base_address (required) - Each in 12-byte Generic Address Structure format.
- * address_size (required) - Array of u32 sizes corresponding to each base_address register.
- * Namepath (required) - Null terminated string. Single dot if not supported.
- * oem_data (optional) - Length is oem_data_length.
- */
-};
-
-/* Types for port_type field above */
-
-#define ACPI_DBG2_SERIAL_PORT 0x8000
-#define ACPI_DBG2_1394_PORT 0x8001
-#define ACPI_DBG2_USB_PORT 0x8002
-#define ACPI_DBG2_NET_PORT 0x8003
-
-/* Subtypes for port_subtype field above */
-
-#define ACPI_DBG2_16550_COMPATIBLE 0x0000
-#define ACPI_DBG2_16550_SUBSET 0x0001
-#define ACPI_DBG2_ARM_PL011 0x0003
-#define ACPI_DBG2_ARM_SBSA_32BIT 0x000D
-#define ACPI_DBG2_ARM_SBSA_GENERIC 0x000E
-#define ACPI_DBG2_ARM_DCC 0x000F
-#define ACPI_DBG2_BCM2835 0x0010
-
-#define ACPI_DBG2_1394_STANDARD 0x0000
-
-#define ACPI_DBG2_USB_XHCI 0x0000
-#define ACPI_DBG2_USB_EHCI 0x0001
-
-/*******************************************************************************
- *
- * DBGP - Debug Port table
- * Version 1
- *
- * Conforms to the "Debug Port Specification", Version 1.00, 2/9/2000
- *
- ******************************************************************************/
-
-struct acpi_table_dbgp {
- struct acpi_table_header header; /* Common ACPI table header */
- u8 type; /* 0=full 16550, 1=subset of 16550 */
- u8 reserved[3];
- struct acpi_generic_address debug_port;
-};
-
-/*******************************************************************************
- *
- * DMAR - DMA Remapping table
- * Version 1
- *
- * Conforms to "Intel Virtualization Technology for Directed I/O",
- * Version 2.3, October 2014
- *
- ******************************************************************************/
-
-struct acpi_table_dmar {
- struct acpi_table_header header; /* Common ACPI table header */
- u8 width; /* Host Address Width */
- u8 flags;
- u8 reserved[10];
-};
-
-/* Masks for Flags field above */
-
-#define ACPI_DMAR_INTR_REMAP (1)
-#define ACPI_DMAR_X2APIC_OPT_OUT (1<<1)
-#define ACPI_DMAR_X2APIC_MODE (1<<2)
-
-/* DMAR subtable header */
-
-struct acpi_dmar_header {
- u16 type;
- u16 length;
-};
-
-/* Values for subtable type in struct acpi_dmar_header */
-
-enum acpi_dmar_type {
- ACPI_DMAR_TYPE_HARDWARE_UNIT = 0,
- ACPI_DMAR_TYPE_RESERVED_MEMORY = 1,
- ACPI_DMAR_TYPE_ROOT_ATS = 2,
- ACPI_DMAR_TYPE_HARDWARE_AFFINITY = 3,
- ACPI_DMAR_TYPE_NAMESPACE = 4,
- ACPI_DMAR_TYPE_RESERVED = 5 /* 5 and greater are reserved */
-};
-
-/* DMAR Device Scope structure */
-
-struct acpi_dmar_device_scope {
- u8 entry_type;
- u8 length;
- u16 reserved;
- u8 enumeration_id;
- u8 bus;
-};
-
-/* Values for entry_type in struct acpi_dmar_device_scope - device types */
-
-enum acpi_dmar_scope_type {
- ACPI_DMAR_SCOPE_TYPE_NOT_USED = 0,
- ACPI_DMAR_SCOPE_TYPE_ENDPOINT = 1,
- ACPI_DMAR_SCOPE_TYPE_BRIDGE = 2,
- ACPI_DMAR_SCOPE_TYPE_IOAPIC = 3,
- ACPI_DMAR_SCOPE_TYPE_HPET = 4,
- ACPI_DMAR_SCOPE_TYPE_NAMESPACE = 5,
- ACPI_DMAR_SCOPE_TYPE_RESERVED = 6 /* 6 and greater are reserved */
-};
-
-struct acpi_dmar_pci_path {
- u8 device;
- u8 function;
-};
-
-/*
- * DMAR Subtables, correspond to Type in struct acpi_dmar_header
- */
-
-/* 0: Hardware Unit Definition */
-
-struct acpi_dmar_hardware_unit {
- struct acpi_dmar_header header;
- u8 flags;
- u8 reserved;
- u16 segment;
- u64 address; /* Register Base Address */
-};
-
-/* Masks for Flags field above */
-
-#define ACPI_DMAR_INCLUDE_ALL (1)
-
-/* 1: Reserved Memory Defininition */
-
-struct acpi_dmar_reserved_memory {
- struct acpi_dmar_header header;
- u16 reserved;
- u16 segment;
- u64 base_address; /* 4K aligned base address */
- u64 end_address; /* 4K aligned limit address */
-};
-
-/* Masks for Flags field above */
-
-#define ACPI_DMAR_ALLOW_ALL (1)
-
-/* 2: Root Port ATS Capability Reporting Structure */
-
-struct acpi_dmar_atsr {
- struct acpi_dmar_header header;
- u8 flags;
- u8 reserved;
- u16 segment;
-};
-
-/* Masks for Flags field above */
-
-#define ACPI_DMAR_ALL_PORTS (1)
-
-/* 3: Remapping Hardware Static Affinity Structure */
-
-struct acpi_dmar_rhsa {
- struct acpi_dmar_header header;
- u32 reserved;
- u64 base_address;
- u32 proximity_domain;
-};
-
-/* 4: ACPI Namespace Device Declaration Structure */
-
-struct acpi_dmar_andd {
- struct acpi_dmar_header header;
- u8 reserved[3];
- u8 device_number;
- char device_name[1];
-};
-
-/*******************************************************************************
- *
- * HPET - High Precision Event Timer table
- * Version 1
- *
- * Conforms to "IA-PC HPET (High Precision Event Timers) Specification",
- * Version 1.0a, October 2004
- *
- ******************************************************************************/
-
-struct acpi_table_hpet {
- struct acpi_table_header header; /* Common ACPI table header */
- u32 id; /* Hardware ID of event timer block */
- struct acpi_generic_address address; /* Address of event timer block */
- u8 sequence; /* HPET sequence number */
- u16 minimum_tick; /* Main counter min tick, periodic mode */
- u8 flags;
-};
-
-/* Masks for Flags field above */
-
-#define ACPI_HPET_PAGE_PROTECT_MASK (3)
-
-/* Values for Page Protect flags */
-
-enum acpi_hpet_page_protect {
- ACPI_HPET_NO_PAGE_PROTECT = 0,
- ACPI_HPET_PAGE_PROTECT4 = 1,
- ACPI_HPET_PAGE_PROTECT64 = 2
-};
-
-/*******************************************************************************
- *
- * IBFT - Boot Firmware Table
- * Version 1
- *
- * Conforms to "iSCSI Boot Firmware Table (iBFT) as Defined in ACPI 3.0b
- * Specification", Version 1.01, March 1, 2007
- *
- * Note: It appears that this table is not intended to appear in the RSDT/XSDT.
- * Therefore, it is not currently supported by the disassembler.
- *
- ******************************************************************************/
-
-struct acpi_table_ibft {
- struct acpi_table_header header; /* Common ACPI table header */
- u8 reserved[12];
-};
-
-/* IBFT common subtable header */
-
-struct acpi_ibft_header {
- u8 type;
- u8 version;
- u16 length;
- u8 index;
- u8 flags;
-};
-
-/* Values for Type field above */
-
-enum acpi_ibft_type {
- ACPI_IBFT_TYPE_NOT_USED = 0,
- ACPI_IBFT_TYPE_CONTROL = 1,
- ACPI_IBFT_TYPE_INITIATOR = 2,
- ACPI_IBFT_TYPE_NIC = 3,
- ACPI_IBFT_TYPE_TARGET = 4,
- ACPI_IBFT_TYPE_EXTENSIONS = 5,
- ACPI_IBFT_TYPE_RESERVED = 6 /* 6 and greater are reserved */
-};
-
-/* IBFT subtables */
-
-struct acpi_ibft_control {
- struct acpi_ibft_header header;
- u16 extensions;
- u16 initiator_offset;
- u16 nic0_offset;
- u16 target0_offset;
- u16 nic1_offset;
- u16 target1_offset;
-};
-
-struct acpi_ibft_initiator {
- struct acpi_ibft_header header;
- u8 sns_server[16];
- u8 slp_server[16];
- u8 primary_server[16];
- u8 secondary_server[16];
- u16 name_length;
- u16 name_offset;
-};
-
-struct acpi_ibft_nic {
- struct acpi_ibft_header header;
- u8 ip_address[16];
- u8 subnet_mask_prefix;
- u8 origin;
- u8 gateway[16];
- u8 primary_dns[16];
- u8 secondary_dns[16];
- u8 dhcp[16];
- u16 vlan;
- u8 mac_address[6];
- u16 pci_address;
- u16 name_length;
- u16 name_offset;
-};
-
-struct acpi_ibft_target {
- struct acpi_ibft_header header;
- u8 target_ip_address[16];
- u16 target_ip_socket;
- u8 target_boot_lun[8];
- u8 chap_type;
- u8 nic_association;
- u16 target_name_length;
- u16 target_name_offset;
- u16 chap_name_length;
- u16 chap_name_offset;
- u16 chap_secret_length;
- u16 chap_secret_offset;
- u16 reverse_chap_name_length;
- u16 reverse_chap_name_offset;
- u16 reverse_chap_secret_length;
- u16 reverse_chap_secret_offset;
-};
-
-/*******************************************************************************
- *
* IORT - IO Remapping Table
*
* Conforms to "IO Remapping Table System Software on ARM Platforms",
@@ -810,6 +212,7 @@ struct acpi_iort_smmu_v3 {
u8 pxm;
u8 reserved1;
u16 reserved2;
+ u32 id_mapping_index;
};
/* Values for Model field above */
@@ -1047,6 +450,278 @@ struct acpi_lpit_native {
/*******************************************************************************
*
+ * MADT - Multiple APIC Description Table
+ * Version 3
+ *
+ ******************************************************************************/
+
+struct acpi_table_madt {
+ struct acpi_table_header header; /* Common ACPI table header */
+ u32 address; /* Physical address of local APIC */
+ u32 flags;
+};
+
+/* Masks for Flags field above */
+
+#define ACPI_MADT_PCAT_COMPAT (1) /* 00: System also has dual 8259s */
+
+/* Values for PCATCompat flag */
+
+#define ACPI_MADT_DUAL_PIC 1
+#define ACPI_MADT_MULTIPLE_APIC 0
+
+/* Values for MADT subtable type in struct acpi_subtable_header */
+
+enum acpi_madt_type {
+ ACPI_MADT_TYPE_LOCAL_APIC = 0,
+ ACPI_MADT_TYPE_IO_APIC = 1,
+ ACPI_MADT_TYPE_INTERRUPT_OVERRIDE = 2,
+ ACPI_MADT_TYPE_NMI_SOURCE = 3,
+ ACPI_MADT_TYPE_LOCAL_APIC_NMI = 4,
+ ACPI_MADT_TYPE_LOCAL_APIC_OVERRIDE = 5,
+ ACPI_MADT_TYPE_IO_SAPIC = 6,
+ ACPI_MADT_TYPE_LOCAL_SAPIC = 7,
+ ACPI_MADT_TYPE_INTERRUPT_SOURCE = 8,
+ ACPI_MADT_TYPE_LOCAL_X2APIC = 9,
+ ACPI_MADT_TYPE_LOCAL_X2APIC_NMI = 10,
+ ACPI_MADT_TYPE_GENERIC_INTERRUPT = 11,
+ ACPI_MADT_TYPE_GENERIC_DISTRIBUTOR = 12,
+ ACPI_MADT_TYPE_GENERIC_MSI_FRAME = 13,
+ ACPI_MADT_TYPE_GENERIC_REDISTRIBUTOR = 14,
+ ACPI_MADT_TYPE_GENERIC_TRANSLATOR = 15,
+ ACPI_MADT_TYPE_RESERVED = 16 /* 16 and greater are reserved */
+};
+
+/*
+ * MADT Subtables, correspond to Type in struct acpi_subtable_header
+ */
+
+/* 0: Processor Local APIC */
+
+struct acpi_madt_local_apic {
+ struct acpi_subtable_header header;
+ u8 processor_id; /* ACPI processor id */
+ u8 id; /* Processor's local APIC id */
+ u32 lapic_flags;
+};
+
+/* 1: IO APIC */
+
+struct acpi_madt_io_apic {
+ struct acpi_subtable_header header;
+ u8 id; /* I/O APIC ID */
+ u8 reserved; /* reserved - must be zero */
+ u32 address; /* APIC physical address */
+ u32 global_irq_base; /* Global system interrupt where INTI lines start */
+};
+
+/* 2: Interrupt Override */
+
+struct acpi_madt_interrupt_override {
+ struct acpi_subtable_header header;
+ u8 bus; /* 0 - ISA */
+ u8 source_irq; /* Interrupt source (IRQ) */
+ u32 global_irq; /* Global system interrupt */
+ u16 inti_flags;
+};
+
+/* 3: NMI Source */
+
+struct acpi_madt_nmi_source {
+ struct acpi_subtable_header header;
+ u16 inti_flags;
+ u32 global_irq; /* Global system interrupt */
+};
+
+/* 4: Local APIC NMI */
+
+struct acpi_madt_local_apic_nmi {
+ struct acpi_subtable_header header;
+ u8 processor_id; /* ACPI processor id */
+ u16 inti_flags;
+ u8 lint; /* LINTn to which NMI is connected */
+};
+
+/* 5: Address Override */
+
+struct acpi_madt_local_apic_override {
+ struct acpi_subtable_header header;
+ u16 reserved; /* Reserved, must be zero */
+ u64 address; /* APIC physical address */
+};
+
+/* 6: I/O Sapic */
+
+struct acpi_madt_io_sapic {
+ struct acpi_subtable_header header;
+ u8 id; /* I/O SAPIC ID */
+ u8 reserved; /* Reserved, must be zero */
+ u32 global_irq_base; /* Global interrupt for SAPIC start */
+ u64 address; /* SAPIC physical address */
+};
+
+/* 7: Local Sapic */
+
+struct acpi_madt_local_sapic {
+ struct acpi_subtable_header header;
+ u8 processor_id; /* ACPI processor id */
+ u8 id; /* SAPIC ID */
+ u8 eid; /* SAPIC EID */
+ u8 reserved[3]; /* Reserved, must be zero */
+ u32 lapic_flags;
+ u32 uid; /* Numeric UID - ACPI 3.0 */
+ char uid_string[1]; /* String UID - ACPI 3.0 */
+};
+
+/* 8: Platform Interrupt Source */
+
+struct acpi_madt_interrupt_source {
+ struct acpi_subtable_header header;
+ u16 inti_flags;
+ u8 type; /* 1=PMI, 2=INIT, 3=corrected */
+ u8 id; /* Processor ID */
+ u8 eid; /* Processor EID */
+ u8 io_sapic_vector; /* Vector value for PMI interrupts */
+ u32 global_irq; /* Global system interrupt */
+ u32 flags; /* Interrupt Source Flags */
+};
+
+/* Masks for Flags field above */
+
+#define ACPI_MADT_CPEI_OVERRIDE (1)
+
+/* 9: Processor Local X2APIC (ACPI 4.0) */
+
+struct acpi_madt_local_x2apic {
+ struct acpi_subtable_header header;
+ u16 reserved; /* reserved - must be zero */
+ u32 local_apic_id; /* Processor x2APIC ID */
+ u32 lapic_flags;
+ u32 uid; /* ACPI processor UID */
+};
+
+/* 10: Local X2APIC NMI (ACPI 4.0) */
+
+struct acpi_madt_local_x2apic_nmi {
+ struct acpi_subtable_header header;
+ u16 inti_flags;
+ u32 uid; /* ACPI processor UID */
+ u8 lint; /* LINTn to which NMI is connected */
+ u8 reserved[3]; /* reserved - must be zero */
+};
+
+/* 11: Generic Interrupt (ACPI 5.0 + ACPI 6.0 changes) */
+
+struct acpi_madt_generic_interrupt {
+ struct acpi_subtable_header header;
+ u16 reserved; /* reserved - must be zero */
+ u32 cpu_interface_number;
+ u32 uid;
+ u32 flags;
+ u32 parking_version;
+ u32 performance_interrupt;
+ u64 parked_address;
+ u64 base_address;
+ u64 gicv_base_address;
+ u64 gich_base_address;
+ u32 vgic_interrupt;
+ u64 gicr_base_address;
+ u64 arm_mpidr;
+ u8 efficiency_class;
+ u8 reserved2[3];
+};
+
+/* Masks for Flags field above */
+
+/* ACPI_MADT_ENABLED (1) Processor is usable if set */
+#define ACPI_MADT_PERFORMANCE_IRQ_MODE (1<<1) /* 01: Performance Interrupt Mode */
+#define ACPI_MADT_VGIC_IRQ_MODE (1<<2) /* 02: VGIC Maintenance Interrupt mode */
+
+/* 12: Generic Distributor (ACPI 5.0 + ACPI 6.0 changes) */
+
+struct acpi_madt_generic_distributor {
+ struct acpi_subtable_header header;
+ u16 reserved; /* reserved - must be zero */
+ u32 gic_id;
+ u64 base_address;
+ u32 global_irq_base;
+ u8 version;
+ u8 reserved2[3]; /* reserved - must be zero */
+};
+
+/* Values for Version field above */
+
+enum acpi_madt_gic_version {
+ ACPI_MADT_GIC_VERSION_NONE = 0,
+ ACPI_MADT_GIC_VERSION_V1 = 1,
+ ACPI_MADT_GIC_VERSION_V2 = 2,
+ ACPI_MADT_GIC_VERSION_V3 = 3,
+ ACPI_MADT_GIC_VERSION_V4 = 4,
+ ACPI_MADT_GIC_VERSION_RESERVED = 5 /* 5 and greater are reserved */
+};
+
+/* 13: Generic MSI Frame (ACPI 5.1) */
+
+struct acpi_madt_generic_msi_frame {
+ struct acpi_subtable_header header;
+ u16 reserved; /* reserved - must be zero */
+ u32 msi_frame_id;
+ u64 base_address;
+ u32 flags;
+ u16 spi_count;
+ u16 spi_base;
+};
+
+/* Masks for Flags field above */
+
+#define ACPI_MADT_OVERRIDE_SPI_VALUES (1)
+
+/* 14: Generic Redistributor (ACPI 5.1) */
+
+struct acpi_madt_generic_redistributor {
+ struct acpi_subtable_header header;
+ u16 reserved; /* reserved - must be zero */
+ u64 base_address;
+ u32 length;
+};
+
+/* 15: Generic Translator (ACPI 6.0) */
+
+struct acpi_madt_generic_translator {
+ struct acpi_subtable_header header;
+ u16 reserved; /* reserved - must be zero */
+ u32 translation_id;
+ u64 base_address;
+ u32 reserved2;
+};
+
+/*
+ * Common flags fields for MADT subtables
+ */
+
+/* MADT Local APIC flags */
+
+#define ACPI_MADT_ENABLED (1) /* 00: Processor is usable if set */
+
+/* MADT MPS INTI flags (inti_flags) */
+
+#define ACPI_MADT_POLARITY_MASK (3) /* 00-01: Polarity of APIC I/O input signals */
+#define ACPI_MADT_TRIGGER_MASK (3<<2) /* 02-03: Trigger mode of APIC input signals */
+
+/* Values for MPS INTI flags */
+
+#define ACPI_MADT_POLARITY_CONFORMS 0
+#define ACPI_MADT_POLARITY_ACTIVE_HIGH 1
+#define ACPI_MADT_POLARITY_RESERVED 2
+#define ACPI_MADT_POLARITY_ACTIVE_LOW 3
+
+#define ACPI_MADT_TRIGGER_CONFORMS (0)
+#define ACPI_MADT_TRIGGER_EDGE (1<<2)
+#define ACPI_MADT_TRIGGER_RESERVED (2<<2)
+#define ACPI_MADT_TRIGGER_LEVEL (3<<2)
+
+/*******************************************************************************
+ *
* MCFG - PCI Memory Mapped Configuration table and subtable
* Version 1
*
@@ -1097,6 +772,127 @@ struct acpi_table_mchi {
/*******************************************************************************
*
+ * MPST - Memory Power State Table (ACPI 5.0)
+ * Version 1
+ *
+ ******************************************************************************/
+
+#define ACPI_MPST_CHANNEL_INFO \
+ u8 channel_id; \
+ u8 reserved1[3]; \
+ u16 power_node_count; \
+ u16 reserved2;
+
+/* Main table */
+
+struct acpi_table_mpst {
+ struct acpi_table_header header; /* Common ACPI table header */
+ ACPI_MPST_CHANNEL_INFO /* Platform Communication Channel */
+};
+
+/* Memory Platform Communication Channel Info */
+
+struct acpi_mpst_channel {
+ ACPI_MPST_CHANNEL_INFO /* Platform Communication Channel */
+};
+
+/* Memory Power Node Structure */
+
+struct acpi_mpst_power_node {
+ u8 flags;
+ u8 reserved1;
+ u16 node_id;
+ u32 length;
+ u64 range_address;
+ u64 range_length;
+ u32 num_power_states;
+ u32 num_physical_components;
+};
+
+/* Values for Flags field above */
+
+#define ACPI_MPST_ENABLED 1
+#define ACPI_MPST_POWER_MANAGED 2
+#define ACPI_MPST_HOT_PLUG_CAPABLE 4
+
+/* Memory Power State Structure (follows POWER_NODE above) */
+
+struct acpi_mpst_power_state {
+ u8 power_state;
+ u8 info_index;
+};
+
+/* Physical Component ID Structure (follows POWER_STATE above) */
+
+struct acpi_mpst_component {
+ u16 component_id;
+};
+
+/* Memory Power State Characteristics Structure (follows all POWER_NODEs) */
+
+struct acpi_mpst_data_hdr {
+ u16 characteristics_count;
+ u16 reserved;
+};
+
+struct acpi_mpst_power_data {
+ u8 structure_id;
+ u8 flags;
+ u16 reserved1;
+ u32 average_power;
+ u32 power_saving;
+ u64 exit_latency;
+ u64 reserved2;
+};
+
+/* Values for Flags field above */
+
+#define ACPI_MPST_PRESERVE 1
+#define ACPI_MPST_AUTOENTRY 2
+#define ACPI_MPST_AUTOEXIT 4
+
+/* Shared Memory Region (not part of an ACPI table) */
+
+struct acpi_mpst_shared {
+ u32 signature;
+ u16 pcc_command;
+ u16 pcc_status;
+ u32 command_register;
+ u32 status_register;
+ u32 power_state_id;
+ u32 power_node_id;
+ u64 energy_consumed;
+ u64 average_power;
+};
+
+/*******************************************************************************
+ *
+ * MSCT - Maximum System Characteristics Table (ACPI 4.0)
+ * Version 1
+ *
+ ******************************************************************************/
+
+struct acpi_table_msct {
+ struct acpi_table_header header; /* Common ACPI table header */
+ u32 proximity_offset; /* Location of proximity info struct(s) */
+ u32 max_proximity_domains; /* Max number of proximity domains */
+ u32 max_clock_domains; /* Max number of clock domains */
+ u64 max_address; /* Max physical address in system */
+};
+
+/* subtable - Maximum Proximity Domain Information. Version 1 */
+
+struct acpi_msct_proximity {
+ u8 revision;
+ u8 length;
+ u32 range_start; /* Start of domain range */
+ u32 range_end; /* End of domain range */
+ u32 processor_capacity;
+ u64 memory_capacity; /* In bytes */
+};
+
+/*******************************************************************************
+ *
* MSDM - Microsoft Data Management table
*
* Conforms to "Microsoft Software Licensing Tables (SLIC and MSDM)",
@@ -1135,447 +931,778 @@ struct acpi_mtmr_entry {
/*******************************************************************************
*
- * SDEI - Software Delegated Exception Interface Descriptor Table
- *
- * Conforms to "Software Delegated Exception Interface (SDEI)" ARM DEN0054A,
- * May 8th, 2017. Copyright 2017 ARM Ltd.
+ * NFIT - NVDIMM Interface Table (ACPI 6.0+)
+ * Version 1
*
******************************************************************************/
-struct acpi_table_sdei {
+struct acpi_table_nfit {
struct acpi_table_header header; /* Common ACPI table header */
+ u32 reserved; /* Reserved, must be zero */
};
-/*******************************************************************************
- *
- * SLIC - Software Licensing Description Table
- *
- * Conforms to "Microsoft Software Licensing Tables (SLIC and MSDM)",
- * November 29, 2011. Copyright 2011 Microsoft
- *
- ******************************************************************************/
+/* Subtable header for NFIT */
+
+struct acpi_nfit_header {
+ u16 type;
+ u16 length;
+};
-/* Basic SLIC table is only the common ACPI header */
+/* Values for subtable type in struct acpi_nfit_header */
-struct acpi_table_slic {
- struct acpi_table_header header; /* Common ACPI table header */
+enum acpi_nfit_type {
+ ACPI_NFIT_TYPE_SYSTEM_ADDRESS = 0,
+ ACPI_NFIT_TYPE_MEMORY_MAP = 1,
+ ACPI_NFIT_TYPE_INTERLEAVE = 2,
+ ACPI_NFIT_TYPE_SMBIOS = 3,
+ ACPI_NFIT_TYPE_CONTROL_REGION = 4,
+ ACPI_NFIT_TYPE_DATA_REGION = 5,
+ ACPI_NFIT_TYPE_FLUSH_ADDRESS = 6,
+ ACPI_NFIT_TYPE_CAPABILITIES = 7,
+ ACPI_NFIT_TYPE_RESERVED = 8 /* 8 and greater are reserved */
};
-/*******************************************************************************
- *
- * SPCR - Serial Port Console Redirection table
- * Version 2
- *
- * Conforms to "Serial Port Console Redirection Table",
- * Version 1.03, August 10, 2015
- *
- ******************************************************************************/
+/*
+ * NFIT Subtables
+ */
-struct acpi_table_spcr {
- struct acpi_table_header header; /* Common ACPI table header */
- u8 interface_type; /* 0=full 16550, 1=subset of 16550 */
- u8 reserved[3];
- struct acpi_generic_address serial_port;
- u8 interrupt_type;
- u8 pc_interrupt;
- u32 interrupt;
- u8 baud_rate;
- u8 parity;
- u8 stop_bits;
- u8 flow_control;
- u8 terminal_type;
- u8 reserved1;
- u16 pci_device_id;
- u16 pci_vendor_id;
- u8 pci_bus;
- u8 pci_device;
- u8 pci_function;
- u32 pci_flags;
- u8 pci_segment;
- u32 reserved2;
+/* 0: System Physical Address Range Structure */
+
+struct acpi_nfit_system_address {
+ struct acpi_nfit_header header;
+ u16 range_index;
+ u16 flags;
+ u32 reserved; /* Reserved, must be zero */
+ u32 proximity_domain;
+ u8 range_guid[16];
+ u64 address;
+ u64 length;
+ u64 memory_mapping;
};
-/* Masks for pci_flags field above */
+/* Flags */
-#define ACPI_SPCR_DO_NOT_DISABLE (1)
+#define ACPI_NFIT_ADD_ONLINE_ONLY (1) /* 00: Add/Online Operation Only */
+#define ACPI_NFIT_PROXIMITY_VALID (1<<1) /* 01: Proximity Domain Valid */
-/* Values for Interface Type: See the definition of the DBG2 table */
+/* Range Type GUIDs appear in the include/acuuid.h file */
-/*******************************************************************************
- *
- * SPMI - Server Platform Management Interface table
- * Version 5
- *
- * Conforms to "Intelligent Platform Management Interface Specification
- * Second Generation v2.0", Document Revision 1.0, February 12, 2004 with
- * June 12, 2009 markup.
- *
- ******************************************************************************/
+/* 1: Memory Device to System Address Range Map Structure */
-struct acpi_table_spmi {
- struct acpi_table_header header; /* Common ACPI table header */
- u8 interface_type;
- u8 reserved; /* Must be 1 */
- u16 spec_revision; /* Version of IPMI */
- u8 interrupt_type;
- u8 gpe_number; /* GPE assigned */
- u8 reserved1;
- u8 pci_device_flag;
- u32 interrupt;
- struct acpi_generic_address ipmi_register;
- u8 pci_segment;
- u8 pci_bus;
- u8 pci_device;
- u8 pci_function;
- u8 reserved2;
+struct acpi_nfit_memory_map {
+ struct acpi_nfit_header header;
+ u32 device_handle;
+ u16 physical_id;
+ u16 region_id;
+ u16 range_index;
+ u16 region_index;
+ u64 region_size;
+ u64 region_offset;
+ u64 address;
+ u16 interleave_index;
+ u16 interleave_ways;
+ u16 flags;
+ u16 reserved; /* Reserved, must be zero */
};
-/* Values for interface_type above */
+/* Flags */
+
+#define ACPI_NFIT_MEM_SAVE_FAILED (1) /* 00: Last SAVE to Memory Device failed */
+#define ACPI_NFIT_MEM_RESTORE_FAILED (1<<1) /* 01: Last RESTORE from Memory Device failed */
+#define ACPI_NFIT_MEM_FLUSH_FAILED (1<<2) /* 02: Platform flush failed */
+#define ACPI_NFIT_MEM_NOT_ARMED (1<<3) /* 03: Memory Device is not armed */
+#define ACPI_NFIT_MEM_HEALTH_OBSERVED (1<<4) /* 04: Memory Device observed SMART/health events */
+#define ACPI_NFIT_MEM_HEALTH_ENABLED (1<<5) /* 05: SMART/health events enabled */
+#define ACPI_NFIT_MEM_MAP_FAILED (1<<6) /* 06: Mapping to SPA failed */
-enum acpi_spmi_interface_types {
- ACPI_SPMI_NOT_USED = 0,
- ACPI_SPMI_KEYBOARD = 1,
- ACPI_SPMI_SMI = 2,
- ACPI_SPMI_BLOCK_TRANSFER = 3,
- ACPI_SPMI_SMBUS = 4,
- ACPI_SPMI_RESERVED = 5 /* 5 and above are reserved */
+/* 2: Interleave Structure */
+
+struct acpi_nfit_interleave {
+ struct acpi_nfit_header header;
+ u16 interleave_index;
+ u16 reserved; /* Reserved, must be zero */
+ u32 line_count;
+ u32 line_size;
+ u32 line_offset[1]; /* Variable length */
};
-/*******************************************************************************
- *
- * TCPA - Trusted Computing Platform Alliance table
- * Version 2
- *
- * Conforms to "TCG ACPI Specification, Family 1.2 and 2.0",
- * Version 1.2, Revision 8
- * February 27, 2017
- *
- * NOTE: There are two versions of the table with the same signature --
- * the client version and the server version. The common platform_class
- * field is used to differentiate the two types of tables.
- *
- ******************************************************************************/
+/* 3: SMBIOS Management Information Structure */
-struct acpi_table_tcpa_hdr {
- struct acpi_table_header header; /* Common ACPI table header */
- u16 platform_class;
+struct acpi_nfit_smbios {
+ struct acpi_nfit_header header;
+ u32 reserved; /* Reserved, must be zero */
+ u8 data[1]; /* Variable length */
};
-/*
- * Values for platform_class above.
- * This is how the client and server subtables are differentiated
- */
-#define ACPI_TCPA_CLIENT_TABLE 0
-#define ACPI_TCPA_SERVER_TABLE 1
+/* 4: NVDIMM Control Region Structure */
-struct acpi_table_tcpa_client {
- u32 minimum_log_length; /* Minimum length for the event log area */
- u64 log_address; /* Address of the event log area */
+struct acpi_nfit_control_region {
+ struct acpi_nfit_header header;
+ u16 region_index;
+ u16 vendor_id;
+ u16 device_id;
+ u16 revision_id;
+ u16 subsystem_vendor_id;
+ u16 subsystem_device_id;
+ u16 subsystem_revision_id;
+ u8 valid_fields;
+ u8 manufacturing_location;
+ u16 manufacturing_date;
+ u8 reserved[2]; /* Reserved, must be zero */
+ u32 serial_number;
+ u16 code;
+ u16 windows;
+ u64 window_size;
+ u64 command_offset;
+ u64 command_size;
+ u64 status_offset;
+ u64 status_size;
+ u16 flags;
+ u8 reserved1[6]; /* Reserved, must be zero */
+};
+
+/* Flags */
+
+#define ACPI_NFIT_CONTROL_BUFFERED (1) /* Block Data Windows implementation is buffered */
+
+/* valid_fields bits */
+
+#define ACPI_NFIT_CONTROL_MFG_INFO_VALID (1) /* Manufacturing fields are valid */
+
+/* 5: NVDIMM Block Data Window Region Structure */
+
+struct acpi_nfit_data_region {
+ struct acpi_nfit_header header;
+ u16 region_index;
+ u16 windows;
+ u64 offset;
+ u64 size;
+ u64 capacity;
+ u64 start_address;
};
-struct acpi_table_tcpa_server {
- u16 reserved;
- u64 minimum_log_length; /* Minimum length for the event log area */
- u64 log_address; /* Address of the event log area */
- u16 spec_revision;
- u8 device_flags;
- u8 interrupt_flags;
- u8 gpe_number;
- u8 reserved2[3];
- u32 global_interrupt;
- struct acpi_generic_address address;
- u32 reserved3;
- struct acpi_generic_address config_address;
- u8 group;
- u8 bus; /* PCI Bus/Segment/Function numbers */
- u8 device;
- u8 function;
+/* 6: Flush Hint Address Structure */
+
+struct acpi_nfit_flush_address {
+ struct acpi_nfit_header header;
+ u32 device_handle;
+ u16 hint_count;
+ u8 reserved[6]; /* Reserved, must be zero */
+ u64 hint_address[1]; /* Variable length */
+};
+
+/* 7: Platform Capabilities Structure */
+
+struct acpi_nfit_capabilities {
+ struct acpi_nfit_header header;
+ u8 highest_capability;
+ u8 reserved[3]; /* Reserved, must be zero */
+ u32 capabilities;
+ u32 reserved2;
+};
+
+/* Capabilities Flags */
+
+#define ACPI_NFIT_CAPABILITY_CACHE_FLUSH (1) /* 00: Cache Flush to NVDIMM capable */
+#define ACPI_NFIT_CAPABILITY_MEM_FLUSH (1<<1) /* 01: Memory Flush to NVDIMM capable */
+#define ACPI_NFIT_CAPABILITY_MEM_MIRRORING (1<<2) /* 02: Memory Mirroring capable */
+
+/*
+ * NFIT/DVDIMM device handle support - used as the _ADR for each NVDIMM
+ */
+struct nfit_device_handle {
+ u32 handle;
};
-/* Values for device_flags above */
+/* Device handle construction and extraction macros */
+
+#define ACPI_NFIT_DIMM_NUMBER_MASK 0x0000000F
+#define ACPI_NFIT_CHANNEL_NUMBER_MASK 0x000000F0
+#define ACPI_NFIT_MEMORY_ID_MASK 0x00000F00
+#define ACPI_NFIT_SOCKET_ID_MASK 0x0000F000
+#define ACPI_NFIT_NODE_ID_MASK 0x0FFF0000
+
+#define ACPI_NFIT_DIMM_NUMBER_OFFSET 0
+#define ACPI_NFIT_CHANNEL_NUMBER_OFFSET 4
+#define ACPI_NFIT_MEMORY_ID_OFFSET 8
+#define ACPI_NFIT_SOCKET_ID_OFFSET 12
+#define ACPI_NFIT_NODE_ID_OFFSET 16
+
+/* Macro to construct a NFIT/NVDIMM device handle */
+
+#define ACPI_NFIT_BUILD_DEVICE_HANDLE(dimm, channel, memory, socket, node) \
+ ((dimm) | \
+ ((channel) << ACPI_NFIT_CHANNEL_NUMBER_OFFSET) | \
+ ((memory) << ACPI_NFIT_MEMORY_ID_OFFSET) | \
+ ((socket) << ACPI_NFIT_SOCKET_ID_OFFSET) | \
+ ((node) << ACPI_NFIT_NODE_ID_OFFSET))
-#define ACPI_TCPA_PCI_DEVICE (1)
-#define ACPI_TCPA_BUS_PNP (1<<1)
-#define ACPI_TCPA_ADDRESS_VALID (1<<2)
+/* Macros to extract individual fields from a NFIT/NVDIMM device handle */
-/* Values for interrupt_flags above */
+#define ACPI_NFIT_GET_DIMM_NUMBER(handle) \
+ ((handle) & ACPI_NFIT_DIMM_NUMBER_MASK)
-#define ACPI_TCPA_INTERRUPT_MODE (1)
-#define ACPI_TCPA_INTERRUPT_POLARITY (1<<1)
-#define ACPI_TCPA_SCI_VIA_GPE (1<<2)
-#define ACPI_TCPA_GLOBAL_INTERRUPT (1<<3)
+#define ACPI_NFIT_GET_CHANNEL_NUMBER(handle) \
+ (((handle) & ACPI_NFIT_CHANNEL_NUMBER_MASK) >> ACPI_NFIT_CHANNEL_NUMBER_OFFSET)
+
+#define ACPI_NFIT_GET_MEMORY_ID(handle) \
+ (((handle) & ACPI_NFIT_MEMORY_ID_MASK) >> ACPI_NFIT_MEMORY_ID_OFFSET)
+
+#define ACPI_NFIT_GET_SOCKET_ID(handle) \
+ (((handle) & ACPI_NFIT_SOCKET_ID_MASK) >> ACPI_NFIT_SOCKET_ID_OFFSET)
+
+#define ACPI_NFIT_GET_NODE_ID(handle) \
+ (((handle) & ACPI_NFIT_NODE_ID_MASK) >> ACPI_NFIT_NODE_ID_OFFSET)
/*******************************************************************************
*
- * TPM2 - Trusted Platform Module (TPM) 2.0 Hardware Interface Table
- * Version 4
- *
- * Conforms to "TCG ACPI Specification, Family 1.2 and 2.0",
- * Version 1.2, Revision 8
- * February 27, 2017
+ * PCCT - Platform Communications Channel Table (ACPI 5.0)
+ * Version 2 (ACPI 6.2)
*
******************************************************************************/
-struct acpi_table_tpm2 {
+struct acpi_table_pcct {
struct acpi_table_header header; /* Common ACPI table header */
- u16 platform_class;
- u16 reserved;
- u64 control_address;
- u32 start_method;
-
- /* Platform-specific data follows */
+ u32 flags;
+ u64 reserved;
};
-/* Values for start_method above */
+/* Values for Flags field above */
-#define ACPI_TPM2_NOT_ALLOWED 0
-#define ACPI_TPM2_START_METHOD 2
-#define ACPI_TPM2_MEMORY_MAPPED 6
-#define ACPI_TPM2_COMMAND_BUFFER 7
-#define ACPI_TPM2_COMMAND_BUFFER_WITH_START_METHOD 8
-#define ACPI_TPM2_COMMAND_BUFFER_WITH_ARM_SMC 11 /* V1.2 Rev 8 */
+#define ACPI_PCCT_DOORBELL 1
-/* Trailer appears after any start_method subtables */
+/* Values for subtable type in struct acpi_subtable_header */
-struct acpi_tpm2_trailer {
- u32 minimum_log_length; /* Minimum length for the event log area */
- u64 log_address; /* Address of the event log area */
+enum acpi_pcct_type {
+ ACPI_PCCT_TYPE_GENERIC_SUBSPACE = 0,
+ ACPI_PCCT_TYPE_HW_REDUCED_SUBSPACE = 1,
+ ACPI_PCCT_TYPE_HW_REDUCED_SUBSPACE_TYPE2 = 2, /* ACPI 6.1 */
+ ACPI_PCCT_TYPE_EXT_PCC_MASTER_SUBSPACE = 3, /* ACPI 6.2 */
+ ACPI_PCCT_TYPE_EXT_PCC_SLAVE_SUBSPACE = 4, /* ACPI 6.2 */
+ ACPI_PCCT_TYPE_RESERVED = 5 /* 5 and greater are reserved */
};
/*
- * Subtables (start_method-specific)
+ * PCCT Subtables, correspond to Type in struct acpi_subtable_header
*/
-/* 11: Start Method for ARM SMC (V1.2 Rev 8) */
+/* 0: Generic Communications Subspace */
-struct acpi_tpm2_arm_smc {
- u32 global_interrupt;
- u8 interrupt_flags;
- u8 operation_flags;
- u16 reserved;
- u32 function_id;
+struct acpi_pcct_subspace {
+ struct acpi_subtable_header header;
+ u8 reserved[6];
+ u64 base_address;
+ u64 length;
+ struct acpi_generic_address doorbell_register;
+ u64 preserve_mask;
+ u64 write_mask;
+ u32 latency;
+ u32 max_access_rate;
+ u16 min_turnaround_time;
+};
+
+/* 1: HW-reduced Communications Subspace (ACPI 5.1) */
+
+struct acpi_pcct_hw_reduced {
+ struct acpi_subtable_header header;
+ u32 platform_interrupt;
+ u8 flags;
+ u8 reserved;
+ u64 base_address;
+ u64 length;
+ struct acpi_generic_address doorbell_register;
+ u64 preserve_mask;
+ u64 write_mask;
+ u32 latency;
+ u32 max_access_rate;
+ u16 min_turnaround_time;
+};
+
+/* 2: HW-reduced Communications Subspace Type 2 (ACPI 6.1) */
+
+struct acpi_pcct_hw_reduced_type2 {
+ struct acpi_subtable_header header;
+ u32 platform_interrupt;
+ u8 flags;
+ u8 reserved;
+ u64 base_address;
+ u64 length;
+ struct acpi_generic_address doorbell_register;
+ u64 preserve_mask;
+ u64 write_mask;
+ u32 latency;
+ u32 max_access_rate;
+ u16 min_turnaround_time;
+ struct acpi_generic_address platform_ack_register;
+ u64 ack_preserve_mask;
+ u64 ack_write_mask;
};
-/* Values for interrupt_flags above */
+/* 3: Extended PCC Master Subspace Type 3 (ACPI 6.2) */
+
+struct acpi_pcct_ext_pcc_master {
+ struct acpi_subtable_header header;
+ u32 platform_interrupt;
+ u8 flags;
+ u8 reserved1;
+ u64 base_address;
+ u32 length;
+ struct acpi_generic_address doorbell_register;
+ u64 preserve_mask;
+ u64 write_mask;
+ u32 latency;
+ u32 max_access_rate;
+ u32 min_turnaround_time;
+ struct acpi_generic_address platform_ack_register;
+ u64 ack_preserve_mask;
+ u64 ack_set_mask;
+ u64 reserved2;
+ struct acpi_generic_address cmd_complete_register;
+ u64 cmd_complete_mask;
+ struct acpi_generic_address cmd_update_register;
+ u64 cmd_update_preserve_mask;
+ u64 cmd_update_set_mask;
+ struct acpi_generic_address error_status_register;
+ u64 error_status_mask;
+};
+
+/* 4: Extended PCC Slave Subspace Type 4 (ACPI 6.2) */
+
+struct acpi_pcct_ext_pcc_slave {
+ struct acpi_subtable_header header;
+ u32 platform_interrupt;
+ u8 flags;
+ u8 reserved1;
+ u64 base_address;
+ u32 length;
+ struct acpi_generic_address doorbell_register;
+ u64 preserve_mask;
+ u64 write_mask;
+ u32 latency;
+ u32 max_access_rate;
+ u32 min_turnaround_time;
+ struct acpi_generic_address platform_ack_register;
+ u64 ack_preserve_mask;
+ u64 ack_set_mask;
+ u64 reserved2;
+ struct acpi_generic_address cmd_complete_register;
+ u64 cmd_complete_mask;
+ struct acpi_generic_address cmd_update_register;
+ u64 cmd_update_preserve_mask;
+ u64 cmd_update_set_mask;
+ struct acpi_generic_address error_status_register;
+ u64 error_status_mask;
+};
+
+/* Values for doorbell flags above */
+
+#define ACPI_PCCT_INTERRUPT_POLARITY (1)
+#define ACPI_PCCT_INTERRUPT_MODE (1<<1)
+
+/*
+ * PCC memory structures (not part of the ACPI table)
+ */
-#define ACPI_TPM2_INTERRUPT_SUPPORT (1)
+/* Shared Memory Region */
-/* Values for operation_flags above */
+struct acpi_pcct_shared_memory {
+ u32 signature;
+ u16 command;
+ u16 status;
+};
-#define ACPI_TPM2_IDLE_SUPPORT (1)
+/* Extended PCC Subspace Shared Memory Region (ACPI 6.2) */
+
+struct acpi_pcct_ext_pcc_shared_memory {
+ u32 signature;
+ u32 flags;
+ u32 length;
+ u32 command;
+};
/*******************************************************************************
*
- * UEFI - UEFI Boot optimization Table
- * Version 1
- *
- * Conforms to "Unified Extensible Firmware Interface Specification",
- * Version 2.3, May 8, 2009
+ * PDTT - Platform Debug Trigger Table (ACPI 6.2)
+ * Version 0
*
******************************************************************************/
-struct acpi_table_uefi {
+struct acpi_table_pdtt {
struct acpi_table_header header; /* Common ACPI table header */
- u8 identifier[16]; /* UUID identifier */
- u16 data_offset; /* Offset of remaining data in table */
+ u8 trigger_count;
+ u8 reserved[3];
+ u32 array_offset;
+};
+
+/*
+ * PDTT Communication Channel Identifier Structure.
+ * The number of these structures is defined by trigger_count above,
+ * starting at array_offset.
+ */
+struct acpi_pdtt_channel {
+ u8 subchannel_id;
+ u8 flags;
};
+/* Flags for above */
+
+#define ACPI_PDTT_RUNTIME_TRIGGER (1)
+#define ACPI_PDTT_WAIT_COMPLETION (1<<1)
+
/*******************************************************************************
*
- * VRTC - Virtual Real Time Clock Table
+ * PMTT - Platform Memory Topology Table (ACPI 5.0)
* Version 1
*
- * Conforms to "Simple Firmware Interface Specification",
- * Draft 0.8.2, Oct 19, 2010
- * NOTE: The ACPI VRTC is equivalent to The SFI MRTC table.
- *
******************************************************************************/
-struct acpi_table_vrtc {
+struct acpi_table_pmtt {
struct acpi_table_header header; /* Common ACPI table header */
+ u32 reserved;
};
-/* VRTC entry */
+/* Common header for PMTT subtables that follow main table */
-struct acpi_vrtc_entry {
- struct acpi_generic_address physical_address;
- u32 irq;
+struct acpi_pmtt_header {
+ u8 type;
+ u8 reserved1;
+ u16 length;
+ u16 flags;
+ u16 reserved2;
+};
+
+/* Values for Type field above */
+
+#define ACPI_PMTT_TYPE_SOCKET 0
+#define ACPI_PMTT_TYPE_CONTROLLER 1
+#define ACPI_PMTT_TYPE_DIMM 2
+#define ACPI_PMTT_TYPE_RESERVED 3 /* 0x03-0xFF are reserved */
+
+/* Values for Flags field above */
+
+#define ACPI_PMTT_TOP_LEVEL 0x0001
+#define ACPI_PMTT_PHYSICAL 0x0002
+#define ACPI_PMTT_MEMORY_TYPE 0x000C
+
+/*
+ * PMTT subtables, correspond to Type in struct acpi_pmtt_header
+ */
+
+/* 0: Socket Structure */
+
+struct acpi_pmtt_socket {
+ struct acpi_pmtt_header header;
+ u16 socket_id;
+ u16 reserved;
+};
+
+/* 1: Memory Controller subtable */
+
+struct acpi_pmtt_controller {
+ struct acpi_pmtt_header header;
+ u32 read_latency;
+ u32 write_latency;
+ u32 read_bandwidth;
+ u32 write_bandwidth;
+ u16 access_width;
+ u16 alignment;
+ u16 reserved;
+ u16 domain_count;
+};
+
+/* 1a: Proximity Domain substructure */
+
+struct acpi_pmtt_domain {
+ u32 proximity_domain;
+};
+
+/* 2: Physical Component Identifier (DIMM) */
+
+struct acpi_pmtt_physical_component {
+ struct acpi_pmtt_header header;
+ u16 component_id;
+ u16 reserved;
+ u32 memory_size;
+ u32 bios_handle;
};
/*******************************************************************************
*
- * WAET - Windows ACPI Emulated devices Table
+ * PPTT - Processor Properties Topology Table (ACPI 6.2)
* Version 1
*
- * Conforms to "Windows ACPI Emulated Devices Table", version 1.0, April 6, 2009
- *
******************************************************************************/
-struct acpi_table_waet {
+struct acpi_table_pptt {
struct acpi_table_header header; /* Common ACPI table header */
+};
+
+/* Values for Type field above */
+
+enum acpi_pptt_type {
+ ACPI_PPTT_TYPE_PROCESSOR = 0,
+ ACPI_PPTT_TYPE_CACHE = 1,
+ ACPI_PPTT_TYPE_ID = 2,
+ ACPI_PPTT_TYPE_RESERVED = 3
+};
+
+/* 0: Processor Hierarchy Node Structure */
+
+struct acpi_pptt_processor {
+ struct acpi_subtable_header header;
+ u16 reserved;
u32 flags;
+ u32 parent;
+ u32 acpi_processor_id;
+ u32 number_of_priv_resources;
};
-/* Masks for Flags field above */
+/* Flags */
-#define ACPI_WAET_RTC_NO_ACK (1) /* RTC requires no int acknowledge */
-#define ACPI_WAET_TIMER_ONE_READ (1<<1) /* PM timer requires only one read */
+#define ACPI_PPTT_PHYSICAL_PACKAGE (1) /* Physical package */
+#define ACPI_PPTT_ACPI_PROCESSOR_ID_VALID (2) /* ACPI Processor ID valid */
-/*******************************************************************************
- *
- * WDAT - Watchdog Action Table
- * Version 1
- *
- * Conforms to "Hardware Watchdog Timers Design Specification",
- * Copyright 2006 Microsoft Corporation.
- *
- ******************************************************************************/
+/* 1: Cache Type Structure */
-struct acpi_table_wdat {
- struct acpi_table_header header; /* Common ACPI table header */
- u32 header_length; /* Watchdog Header Length */
- u16 pci_segment; /* PCI Segment number */
- u8 pci_bus; /* PCI Bus number */
- u8 pci_device; /* PCI Device number */
- u8 pci_function; /* PCI Function number */
- u8 reserved[3];
- u32 timer_period; /* Period of one timer count (msec) */
- u32 max_count; /* Maximum counter value supported */
- u32 min_count; /* Minimum counter value */
- u8 flags;
- u8 reserved2[3];
- u32 entries; /* Number of watchdog entries that follow */
+struct acpi_pptt_cache {
+ struct acpi_subtable_header header;
+ u16 reserved;
+ u32 flags;
+ u32 next_level_of_cache;
+ u32 size;
+ u32 number_of_sets;
+ u8 associativity;
+ u8 attributes;
+ u16 line_size;
};
-/* Masks for Flags field above */
+/* Flags */
+
+#define ACPI_PPTT_SIZE_PROPERTY_VALID (1) /* Physical property valid */
+#define ACPI_PPTT_NUMBER_OF_SETS_VALID (1<<1) /* Number of sets valid */
+#define ACPI_PPTT_ASSOCIATIVITY_VALID (1<<2) /* Associativity valid */
+#define ACPI_PPTT_ALLOCATION_TYPE_VALID (1<<3) /* Allocation type valid */
+#define ACPI_PPTT_CACHE_TYPE_VALID (1<<4) /* Cache type valid */
+#define ACPI_PPTT_WRITE_POLICY_VALID (1<<5) /* Write policy valid */
+#define ACPI_PPTT_LINE_SIZE_VALID (1<<6) /* Line size valid */
-#define ACPI_WDAT_ENABLED (1)
-#define ACPI_WDAT_STOPPED 0x80
+/* Masks for Attributes */
-/* WDAT Instruction Entries (actions) */
+#define ACPI_PPTT_MASK_ALLOCATION_TYPE (0x03) /* Allocation type */
+#define ACPI_PPTT_MASK_CACHE_TYPE (0x0C) /* Cache type */
+#define ACPI_PPTT_MASK_WRITE_POLICY (0x10) /* Write policy */
-struct acpi_wdat_entry {
- u8 action;
- u8 instruction;
+/* Attributes describing cache */
+#define ACPI_PPTT_CACHE_READ_ALLOCATE (0x0) /* Cache line is allocated on read */
+#define ACPI_PPTT_CACHE_WRITE_ALLOCATE (0x01) /* Cache line is allocated on write */
+#define ACPI_PPTT_CACHE_RW_ALLOCATE (0x02) /* Cache line is allocated on read and write */
+#define ACPI_PPTT_CACHE_RW_ALLOCATE_ALT (0x03) /* Alternate representation of above */
+
+#define ACPI_PPTT_CACHE_TYPE_DATA (0x0) /* Data cache */
+#define ACPI_PPTT_CACHE_TYPE_INSTR (1<<2) /* Instruction cache */
+#define ACPI_PPTT_CACHE_TYPE_UNIFIED (2<<2) /* Unified I & D cache */
+#define ACPI_PPTT_CACHE_TYPE_UNIFIED_ALT (3<<2) /* Alternate representation of above */
+
+#define ACPI_PPTT_CACHE_POLICY_WB (0x0) /* Cache is write back */
+#define ACPI_PPTT_CACHE_POLICY_WT (1<<4) /* Cache is write through */
+
+/* 2: ID Structure */
+
+struct acpi_pptt_id {
+ struct acpi_subtable_header header;
u16 reserved;
- struct acpi_generic_address register_region;
- u32 value; /* Value used with Read/Write register */
- u32 mask; /* Bitmask required for this register instruction */
-};
-
-/* Values for Action field above */
-
-enum acpi_wdat_actions {
- ACPI_WDAT_RESET = 1,
- ACPI_WDAT_GET_CURRENT_COUNTDOWN = 4,
- ACPI_WDAT_GET_COUNTDOWN = 5,
- ACPI_WDAT_SET_COUNTDOWN = 6,
- ACPI_WDAT_GET_RUNNING_STATE = 8,
- ACPI_WDAT_SET_RUNNING_STATE = 9,
- ACPI_WDAT_GET_STOPPED_STATE = 10,
- ACPI_WDAT_SET_STOPPED_STATE = 11,
- ACPI_WDAT_GET_REBOOT = 16,
- ACPI_WDAT_SET_REBOOT = 17,
- ACPI_WDAT_GET_SHUTDOWN = 18,
- ACPI_WDAT_SET_SHUTDOWN = 19,
- ACPI_WDAT_GET_STATUS = 32,
- ACPI_WDAT_SET_STATUS = 33,
- ACPI_WDAT_ACTION_RESERVED = 34 /* 34 and greater are reserved */
-};
-
-/* Values for Instruction field above */
-
-enum acpi_wdat_instructions {
- ACPI_WDAT_READ_VALUE = 0,
- ACPI_WDAT_READ_COUNTDOWN = 1,
- ACPI_WDAT_WRITE_VALUE = 2,
- ACPI_WDAT_WRITE_COUNTDOWN = 3,
- ACPI_WDAT_INSTRUCTION_RESERVED = 4, /* 4 and greater are reserved */
- ACPI_WDAT_PRESERVE_REGISTER = 0x80 /* Except for this value */
+ u32 vendor_id;
+ u64 level1_id;
+ u64 level2_id;
+ u16 major_rev;
+ u16 minor_rev;
+ u16 spin_rev;
};
/*******************************************************************************
*
- * WDDT - Watchdog Descriptor Table
+ * RASF - RAS Feature Table (ACPI 5.0)
* Version 1
*
- * Conforms to "Using the Intel ICH Family Watchdog Timer (WDT)",
- * Version 001, September 2002
- *
******************************************************************************/
-struct acpi_table_wddt {
+struct acpi_table_rasf {
struct acpi_table_header header; /* Common ACPI table header */
- u16 spec_version;
- u16 table_version;
- u16 pci_vendor_id;
- struct acpi_generic_address address;
- u16 max_count; /* Maximum counter value supported */
- u16 min_count; /* Minimum counter value supported */
- u16 period;
+ u8 channel_id[12];
+};
+
+/* RASF Platform Communication Channel Shared Memory Region */
+
+struct acpi_rasf_shared_memory {
+ u32 signature;
+ u16 command;
u16 status;
- u16 capability;
+ u16 version;
+ u8 capabilities[16];
+ u8 set_capabilities[16];
+ u16 num_parameter_blocks;
+ u32 set_capabilities_status;
+};
+
+/* RASF Parameter Block Structure Header */
+
+struct acpi_rasf_parameter_block {
+ u16 type;
+ u16 version;
+ u16 length;
+};
+
+/* RASF Parameter Block Structure for PATROL_SCRUB */
+
+struct acpi_rasf_patrol_scrub_parameter {
+ struct acpi_rasf_parameter_block header;
+ u16 patrol_scrub_command;
+ u64 requested_address_range[2];
+ u64 actual_address_range[2];
+ u16 flags;
+ u8 requested_speed;
+};
+
+/* Masks for Flags and Speed fields above */
+
+#define ACPI_RASF_SCRUBBER_RUNNING 1
+#define ACPI_RASF_SPEED (7<<1)
+#define ACPI_RASF_SPEED_SLOW (0<<1)
+#define ACPI_RASF_SPEED_MEDIUM (4<<1)
+#define ACPI_RASF_SPEED_FAST (7<<1)
+
+/* Channel Commands */
+
+enum acpi_rasf_commands {
+ ACPI_RASF_EXECUTE_RASF_COMMAND = 1
+};
+
+/* Platform RAS Capabilities */
+
+enum acpi_rasf_capabiliities {
+ ACPI_HW_PATROL_SCRUB_SUPPORTED = 0,
+ ACPI_SW_PATROL_SCRUB_EXPOSED = 1
+};
+
+/* Patrol Scrub Commands */
+
+enum acpi_rasf_patrol_scrub_commands {
+ ACPI_RASF_GET_PATROL_PARAMETERS = 1,
+ ACPI_RASF_START_PATROL_SCRUBBER = 2,
+ ACPI_RASF_STOP_PATROL_SCRUBBER = 3
};
-/* Flags for Status field above */
+/* Channel Command flags */
+
+#define ACPI_RASF_GENERATE_SCI (1<<15)
-#define ACPI_WDDT_AVAILABLE (1)
-#define ACPI_WDDT_ACTIVE (1<<1)
-#define ACPI_WDDT_TCO_OS_OWNED (1<<2)
-#define ACPI_WDDT_USER_RESET (1<<11)
-#define ACPI_WDDT_WDT_RESET (1<<12)
-#define ACPI_WDDT_POWER_FAIL (1<<13)
-#define ACPI_WDDT_UNKNOWN_RESET (1<<14)
+/* Status values */
-/* Flags for Capability field above */
+enum acpi_rasf_status {
+ ACPI_RASF_SUCCESS = 0,
+ ACPI_RASF_NOT_VALID = 1,
+ ACPI_RASF_NOT_SUPPORTED = 2,
+ ACPI_RASF_BUSY = 3,
+ ACPI_RASF_FAILED = 4,
+ ACPI_RASF_ABORTED = 5,
+ ACPI_RASF_INVALID_DATA = 6
+};
+
+/* Status flags */
-#define ACPI_WDDT_AUTO_RESET (1)
-#define ACPI_WDDT_ALERT_SUPPORT (1<<1)
+#define ACPI_RASF_COMMAND_COMPLETE (1)
+#define ACPI_RASF_SCI_DOORBELL (1<<1)
+#define ACPI_RASF_ERROR (1<<2)
+#define ACPI_RASF_STATUS (0x1F<<3)
/*******************************************************************************
*
- * WDRT - Watchdog Resource Table
+ * SBST - Smart Battery Specification Table
* Version 1
*
- * Conforms to "Watchdog Timer Hardware Requirements for Windows Server 2003",
- * Version 1.01, August 28, 2006
+ ******************************************************************************/
+
+struct acpi_table_sbst {
+ struct acpi_table_header header; /* Common ACPI table header */
+ u32 warning_level;
+ u32 low_level;
+ u32 critical_level;
+};
+
+/*******************************************************************************
+ *
+ * SDEI - Software Delegated Exception Interface Descriptor Table
+ *
+ * Conforms to "Software Delegated Exception Interface (SDEI)" ARM DEN0054A,
+ * May 8th, 2017. Copyright 2017 ARM Ltd.
*
******************************************************************************/
-struct acpi_table_wdrt {
+struct acpi_table_sdei {
struct acpi_table_header header; /* Common ACPI table header */
- struct acpi_generic_address control_register;
- struct acpi_generic_address count_register;
- u16 pci_device_id;
- u16 pci_vendor_id;
- u8 pci_bus; /* PCI Bus number */
- u8 pci_device; /* PCI Device number */
- u8 pci_function; /* PCI Function number */
- u8 pci_segment; /* PCI Segment number */
- u16 max_count; /* Maximum counter value supported */
- u8 units;
};
/*******************************************************************************
*
- * WSMT - Windows SMM Security Migrations Table
+ * SDEV - Secure Devices Table (ACPI 6.2)
* Version 1
*
- * Conforms to "Windows SMM Security Migrations Table",
- * Version 1.0, April 18, 2016
- *
******************************************************************************/
-struct acpi_table_wsmt {
+struct acpi_table_sdev {
struct acpi_table_header header; /* Common ACPI table header */
- u32 protection_flags;
};
-/* Flags for protection_flags field above */
+struct acpi_sdev_header {
+ u8 type;
+ u8 flags;
+ u16 length;
+};
+
+/* Values for subtable type above */
+
+enum acpi_sdev_type {
+ ACPI_SDEV_TYPE_NAMESPACE_DEVICE = 0,
+ ACPI_SDEV_TYPE_PCIE_ENDPOINT_DEVICE = 1,
+ ACPI_SDEV_TYPE_RESERVED = 2 /* 2 and greater are reserved */
+};
+
+/* Values for flags above */
+
+#define ACPI_SDEV_HANDOFF_TO_UNSECURE_OS (1)
+
+/*
+ * SDEV subtables
+ */
+
+/* 0: Namespace Device Based Secure Device Structure */
-#define ACPI_WSMT_FIXED_COMM_BUFFERS (1)
-#define ACPI_WSMT_COMM_BUFFER_NESTED_PTR_PROTECTION (2)
-#define ACPI_WSMT_SYSTEM_RESOURCE_PROTECTION (4)
+struct acpi_sdev_namespace {
+ struct acpi_sdev_header header;
+ u16 device_id_offset;
+ u16 device_id_length;
+ u16 vendor_data_offset;
+ u16 vendor_data_length;
+};
+
+/* 1: PCIe Endpoint Device Based Device Structure */
+
+struct acpi_sdev_pcie {
+ struct acpi_sdev_header header;
+ u16 segment;
+ u16 start_bus;
+ u16 path_offset;
+ u16 path_length;
+ u16 vendor_data_offset;
+ u16 vendor_data_length;
+};
+
+/* 1a: PCIe Endpoint path entry */
+
+struct acpi_sdev_pcie_path {
+ u8 device;
+ u8 function;
+};
/* Reset to default packing */
diff --git a/include/acpi/actbl3.h b/include/acpi/actbl3.h
index 5bde2e700530..501f341d1d92 100644
--- a/include/acpi/actbl3.h
+++ b/include/acpi/actbl3.h
@@ -1,59 +1,22 @@
+/* SPDX-License-Identifier: BSD-3-Clause OR GPL-2.0 */
/******************************************************************************
*
* Name: actbl3.h - ACPI Table Definitions
*
+ * Copyright (C) 2000 - 2018, Intel Corp.
+ *
*****************************************************************************/
-/*
- * Copyright (C) 2000 - 2017, Intel Corp.
- * All rights reserved.
- *
- * Redistribution and use in source and binary forms, with or without
- * modification, are permitted provided that the following conditions
- * are met:
- * 1. Redistributions of source code must retain the above copyright
- * notice, this list of conditions, and the following disclaimer,
- * without modification.
- * 2. Redistributions in binary form must reproduce at minimum a disclaimer
- * substantially similar to the "NO WARRANTY" disclaimer below
- * ("Disclaimer") and any redistribution must be conditioned upon
- * including a substantially similar Disclaimer requirement for further
- * binary redistribution.
- * 3. Neither the names of the above-listed copyright holders nor the names
- * of any contributors may be used to endorse or promote products derived
- * from this software without specific prior written permission.
- *
- * Alternatively, this software may be distributed under the terms of the
- * GNU General Public License ("GPL") version 2 as published by the Free
- * Software Foundation.
- *
- * NO WARRANTY
- * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
- * "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
- * LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTIBILITY AND FITNESS FOR
- * A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
- * HOLDERS OR CONTRIBUTORS BE LIABLE FOR SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
- * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS
- * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION)
- * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT,
- * STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING
- * IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
- * POSSIBILITY OF SUCH DAMAGES.
- */
-
#ifndef __ACTBL3_H__
#define __ACTBL3_H__
/*******************************************************************************
*
- * Additional ACPI Tables (3)
+ * Additional ACPI Tables
*
* These tables are not consumed directly by the ACPICA subsystem, but are
* included here to support device drivers and the AML disassembler.
*
- * In general, the tables in this file are fully defined within the ACPI
- * specification.
- *
******************************************************************************/
/*
@@ -61,25 +24,24 @@
* file. Useful because they make it more difficult to inadvertently type in
* the wrong signature.
*/
-#define ACPI_SIG_BGRT "BGRT" /* Boot Graphics Resource Table */
-#define ACPI_SIG_DRTM "DRTM" /* Dynamic Root of Trust for Measurement table */
-#define ACPI_SIG_FPDT "FPDT" /* Firmware Performance Data Table */
-#define ACPI_SIG_GTDT "GTDT" /* Generic Timer Description Table */
-#define ACPI_SIG_MPST "MPST" /* Memory Power State Table */
-#define ACPI_SIG_PCCT "PCCT" /* Platform Communications Channel Table */
-#define ACPI_SIG_PMTT "PMTT" /* Platform Memory Topology Table */
-#define ACPI_SIG_RASF "RASF" /* RAS Feature table */
+#define ACPI_SIG_SLIC "SLIC" /* Software Licensing Description Table */
+#define ACPI_SIG_SLIT "SLIT" /* System Locality Distance Information Table */
+#define ACPI_SIG_SPCR "SPCR" /* Serial Port Console Redirection table */
+#define ACPI_SIG_SPMI "SPMI" /* Server Platform Management Interface table */
+#define ACPI_SIG_SRAT "SRAT" /* System Resource Affinity Table */
#define ACPI_SIG_STAO "STAO" /* Status Override table */
+#define ACPI_SIG_TCPA "TCPA" /* Trusted Computing Platform Alliance table */
+#define ACPI_SIG_TPM2 "TPM2" /* Trusted Platform Module 2.0 H/W interface table */
+#define ACPI_SIG_UEFI "UEFI" /* Uefi Boot Optimization Table */
+#define ACPI_SIG_VRTC "VRTC" /* Virtual Real Time Clock Table */
+#define ACPI_SIG_WAET "WAET" /* Windows ACPI Emulated devices Table */
+#define ACPI_SIG_WDAT "WDAT" /* Watchdog Action Table */
+#define ACPI_SIG_WDDT "WDDT" /* Watchdog Timer Description Table */
+#define ACPI_SIG_WDRT "WDRT" /* Watchdog Resource Table */
#define ACPI_SIG_WPBT "WPBT" /* Windows Platform Binary Table */
+#define ACPI_SIG_WSMT "WSMT" /* Windows SMM Security Migrations Table */
#define ACPI_SIG_XENV "XENV" /* Xen Environment table */
-
-#define ACPI_SIG_S3PT "S3PT" /* S3 Performance (sub)Table */
-#define ACPI_SIG_PCCS "PCC" /* PCC Shared Memory Region */
-
-/* Reserved table signatures */
-
-#define ACPI_SIG_MATR "MATR" /* Memory Address Translation Table */
-#define ACPI_SIG_MSDM "MSDM" /* Microsoft Data Management Table */
+#define ACPI_SIG_XXXX "XXXX" /* Intermediate AML header for ASL/ASL+ converter */
/*
* All tables must be byte-packed to match the ACPI specification, since
@@ -101,721 +63,554 @@
/*******************************************************************************
*
- * BGRT - Boot Graphics Resource Table (ACPI 5.0)
- * Version 1
+ * SLIC - Software Licensing Description Table
+ *
+ * Conforms to "Microsoft Software Licensing Tables (SLIC and MSDM)",
+ * November 29, 2011. Copyright 2011 Microsoft
*
******************************************************************************/
-struct acpi_table_bgrt {
+/* Basic SLIC table is only the common ACPI header */
+
+struct acpi_table_slic {
struct acpi_table_header header; /* Common ACPI table header */
- u16 version;
- u8 status;
- u8 image_type;
- u64 image_address;
- u32 image_offset_x;
- u32 image_offset_y;
};
-/* Flags for Status field above */
-
-#define ACPI_BGRT_DISPLAYED (1)
-#define ACPI_BGRT_ORIENTATION_OFFSET (3 << 1)
-
/*******************************************************************************
*
- * DRTM - Dynamic Root of Trust for Measurement table
- * Conforms to "TCG D-RTM Architecture" June 17 2013, Version 1.0.0
- * Table version 1
+ * SLIT - System Locality Distance Information Table
+ * Version 1
*
******************************************************************************/
-struct acpi_table_drtm {
+struct acpi_table_slit {
struct acpi_table_header header; /* Common ACPI table header */
- u64 entry_base_address;
- u64 entry_length;
- u32 entry_address32;
- u64 entry_address64;
- u64 exit_address;
- u64 log_area_address;
- u32 log_area_length;
- u64 arch_dependent_address;
- u32 flags;
+ u64 locality_count;
+ u8 entry[1]; /* Real size = localities^2 */
};
-/* Flag Definitions for above */
-
-#define ACPI_DRTM_ACCESS_ALLOWED (1)
-#define ACPI_DRTM_ENABLE_GAP_CODE (1<<1)
-#define ACPI_DRTM_INCOMPLETE_MEASUREMENTS (1<<2)
-#define ACPI_DRTM_AUTHORITY_ORDER (1<<3)
-
-/* 1) Validated Tables List (64-bit addresses) */
-
-struct acpi_drtm_vtable_list {
- u32 validated_table_count;
- u64 validated_tables[1];
-};
-
-/* 2) Resources List (of Resource Descriptors) */
-
-/* Resource Descriptor */
+/*******************************************************************************
+ *
+ * SPCR - Serial Port Console Redirection table
+ * Version 2
+ *
+ * Conforms to "Serial Port Console Redirection Table",
+ * Version 1.03, August 10, 2015
+ *
+ ******************************************************************************/
-struct acpi_drtm_resource {
- u8 size[7];
- u8 type;
- u64 address;
+struct acpi_table_spcr {
+ struct acpi_table_header header; /* Common ACPI table header */
+ u8 interface_type; /* 0=full 16550, 1=subset of 16550 */
+ u8 reserved[3];
+ struct acpi_generic_address serial_port;
+ u8 interrupt_type;
+ u8 pc_interrupt;
+ u32 interrupt;
+ u8 baud_rate;
+ u8 parity;
+ u8 stop_bits;
+ u8 flow_control;
+ u8 terminal_type;
+ u8 reserved1;
+ u16 pci_device_id;
+ u16 pci_vendor_id;
+ u8 pci_bus;
+ u8 pci_device;
+ u8 pci_function;
+ u32 pci_flags;
+ u8 pci_segment;
+ u32 reserved2;
};
-struct acpi_drtm_resource_list {
- u32 resource_count;
- struct acpi_drtm_resource resources[1];
-};
+/* Masks for pci_flags field above */
-/* 3) Platform-specific Identifiers List */
+#define ACPI_SPCR_DO_NOT_DISABLE (1)
-struct acpi_drtm_dps_id {
- u32 dps_id_length;
- u8 dps_id[16];
-};
+/* Values for Interface Type: See the definition of the DBG2 table */
/*******************************************************************************
*
- * FPDT - Firmware Performance Data Table (ACPI 5.0)
- * Version 1
+ * SPMI - Server Platform Management Interface table
+ * Version 5
+ *
+ * Conforms to "Intelligent Platform Management Interface Specification
+ * Second Generation v2.0", Document Revision 1.0, February 12, 2004 with
+ * June 12, 2009 markup.
*
******************************************************************************/
-struct acpi_table_fpdt {
+struct acpi_table_spmi {
struct acpi_table_header header; /* Common ACPI table header */
+ u8 interface_type;
+ u8 reserved; /* Must be 1 */
+ u16 spec_revision; /* Version of IPMI */
+ u8 interrupt_type;
+ u8 gpe_number; /* GPE assigned */
+ u8 reserved1;
+ u8 pci_device_flag;
+ u32 interrupt;
+ struct acpi_generic_address ipmi_register;
+ u8 pci_segment;
+ u8 pci_bus;
+ u8 pci_device;
+ u8 pci_function;
+ u8 reserved2;
};
-/* FPDT subtable header (Performance Record Structure) */
-
-struct acpi_fpdt_header {
- u16 type;
- u8 length;
- u8 revision;
-};
-
-/* Values for Type field above */
+/* Values for interface_type above */
-enum acpi_fpdt_type {
- ACPI_FPDT_TYPE_BOOT = 0,
- ACPI_FPDT_TYPE_S3PERF = 1
+enum acpi_spmi_interface_types {
+ ACPI_SPMI_NOT_USED = 0,
+ ACPI_SPMI_KEYBOARD = 1,
+ ACPI_SPMI_SMI = 2,
+ ACPI_SPMI_BLOCK_TRANSFER = 3,
+ ACPI_SPMI_SMBUS = 4,
+ ACPI_SPMI_RESERVED = 5 /* 5 and above are reserved */
};
-/*
- * FPDT subtables
- */
-
-/* 0: Firmware Basic Boot Performance Record */
+/*******************************************************************************
+ *
+ * SRAT - System Resource Affinity Table
+ * Version 3
+ *
+ ******************************************************************************/
-struct acpi_fpdt_boot_pointer {
- struct acpi_fpdt_header header;
- u8 reserved[4];
- u64 address;
+struct acpi_table_srat {
+ struct acpi_table_header header; /* Common ACPI table header */
+ u32 table_revision; /* Must be value '1' */
+ u64 reserved; /* Reserved, must be zero */
};
-/* 1: S3 Performance Table Pointer Record */
-
-struct acpi_fpdt_s3pt_pointer {
- struct acpi_fpdt_header header;
- u8 reserved[4];
- u64 address;
-};
+/* Values for subtable type in struct acpi_subtable_header */
-/*
- * S3PT - S3 Performance Table. This table is pointed to by the
- * S3 Pointer Record above.
- */
-struct acpi_table_s3pt {
- u8 signature[4]; /* "S3PT" */
- u32 length;
+enum acpi_srat_type {
+ ACPI_SRAT_TYPE_CPU_AFFINITY = 0,
+ ACPI_SRAT_TYPE_MEMORY_AFFINITY = 1,
+ ACPI_SRAT_TYPE_X2APIC_CPU_AFFINITY = 2,
+ ACPI_SRAT_TYPE_GICC_AFFINITY = 3,
+ ACPI_SRAT_TYPE_GIC_ITS_AFFINITY = 4, /* ACPI 6.2 */
+ ACPI_SRAT_TYPE_RESERVED = 5 /* 5 and greater are reserved */
};
/*
- * S3PT Subtables (Not part of the actual FPDT)
+ * SRAT Subtables, correspond to Type in struct acpi_subtable_header
*/
-/* Values for Type field in S3PT header */
-
-enum acpi_s3pt_type {
- ACPI_S3PT_TYPE_RESUME = 0,
- ACPI_S3PT_TYPE_SUSPEND = 1,
- ACPI_FPDT_BOOT_PERFORMANCE = 2
-};
+/* 0: Processor Local APIC/SAPIC Affinity */
-struct acpi_s3pt_resume {
- struct acpi_fpdt_header header;
- u32 resume_count;
- u64 full_resume;
- u64 average_resume;
+struct acpi_srat_cpu_affinity {
+ struct acpi_subtable_header header;
+ u8 proximity_domain_lo;
+ u8 apic_id;
+ u32 flags;
+ u8 local_sapic_eid;
+ u8 proximity_domain_hi[3];
+ u32 clock_domain;
};
-struct acpi_s3pt_suspend {
- struct acpi_fpdt_header header;
- u64 suspend_start;
- u64 suspend_end;
-};
+/* Flags */
-/*
- * FPDT Boot Performance Record (Not part of the actual FPDT)
- */
-struct acpi_fpdt_boot {
- struct acpi_fpdt_header header;
- u8 reserved[4];
- u64 reset_end;
- u64 load_start;
- u64 startup_start;
- u64 exit_services_entry;
- u64 exit_services_exit;
-};
+#define ACPI_SRAT_CPU_USE_AFFINITY (1) /* 00: Use affinity structure */
-/*******************************************************************************
- *
- * GTDT - Generic Timer Description Table (ACPI 5.1)
- * Version 2
- *
- ******************************************************************************/
+/* 1: Memory Affinity */
-struct acpi_table_gtdt {
- struct acpi_table_header header; /* Common ACPI table header */
- u64 counter_block_addresss;
- u32 reserved;
- u32 secure_el1_interrupt;
- u32 secure_el1_flags;
- u32 non_secure_el1_interrupt;
- u32 non_secure_el1_flags;
- u32 virtual_timer_interrupt;
- u32 virtual_timer_flags;
- u32 non_secure_el2_interrupt;
- u32 non_secure_el2_flags;
- u64 counter_read_block_address;
- u32 platform_timer_count;
- u32 platform_timer_offset;
-};
-
-/* Flag Definitions: Timer Block Physical Timers and Virtual timers */
-
-#define ACPI_GTDT_INTERRUPT_MODE (1)
-#define ACPI_GTDT_INTERRUPT_POLARITY (1<<1)
-#define ACPI_GTDT_ALWAYS_ON (1<<2)
-
-/* Common GTDT subtable header */
-
-struct acpi_gtdt_header {
- u8 type;
- u16 length;
+struct acpi_srat_mem_affinity {
+ struct acpi_subtable_header header;
+ u32 proximity_domain;
+ u16 reserved; /* Reserved, must be zero */
+ u64 base_address;
+ u64 length;
+ u32 reserved1;
+ u32 flags;
+ u64 reserved2; /* Reserved, must be zero */
};
-/* Values for GTDT subtable type above */
-
-enum acpi_gtdt_type {
- ACPI_GTDT_TYPE_TIMER_BLOCK = 0,
- ACPI_GTDT_TYPE_WATCHDOG = 1,
- ACPI_GTDT_TYPE_RESERVED = 2 /* 2 and greater are reserved */
-};
+/* Flags */
-/* GTDT Subtables, correspond to Type in struct acpi_gtdt_header */
+#define ACPI_SRAT_MEM_ENABLED (1) /* 00: Use affinity structure */
+#define ACPI_SRAT_MEM_HOT_PLUGGABLE (1<<1) /* 01: Memory region is hot pluggable */
+#define ACPI_SRAT_MEM_NON_VOLATILE (1<<2) /* 02: Memory region is non-volatile */
-/* 0: Generic Timer Block */
+/* 2: Processor Local X2_APIC Affinity (ACPI 4.0) */
-struct acpi_gtdt_timer_block {
- struct acpi_gtdt_header header;
- u8 reserved;
- u64 block_address;
- u32 timer_count;
- u32 timer_offset;
+struct acpi_srat_x2apic_cpu_affinity {
+ struct acpi_subtable_header header;
+ u16 reserved; /* Reserved, must be zero */
+ u32 proximity_domain;
+ u32 apic_id;
+ u32 flags;
+ u32 clock_domain;
+ u32 reserved2;
};
-/* Timer Sub-Structure, one per timer */
+/* Flags for struct acpi_srat_cpu_affinity and struct acpi_srat_x2apic_cpu_affinity */
-struct acpi_gtdt_timer_entry {
- u8 frame_number;
- u8 reserved[3];
- u64 base_address;
- u64 el0_base_address;
- u32 timer_interrupt;
- u32 timer_flags;
- u32 virtual_timer_interrupt;
- u32 virtual_timer_flags;
- u32 common_flags;
-};
+#define ACPI_SRAT_CPU_ENABLED (1) /* 00: Use affinity structure */
-/* Flag Definitions: timer_flags and virtual_timer_flags above */
+/* 3: GICC Affinity (ACPI 5.1) */
-#define ACPI_GTDT_GT_IRQ_MODE (1)
-#define ACPI_GTDT_GT_IRQ_POLARITY (1<<1)
+struct acpi_srat_gicc_affinity {
+ struct acpi_subtable_header header;
+ u32 proximity_domain;
+ u32 acpi_processor_uid;
+ u32 flags;
+ u32 clock_domain;
+};
-/* Flag Definitions: common_flags above */
+/* Flags for struct acpi_srat_gicc_affinity */
-#define ACPI_GTDT_GT_IS_SECURE_TIMER (1)
-#define ACPI_GTDT_GT_ALWAYS_ON (1<<1)
+#define ACPI_SRAT_GICC_ENABLED (1) /* 00: Use affinity structure */
-/* 1: SBSA Generic Watchdog Structure */
+/* 4: GCC ITS Affinity (ACPI 6.2) */
-struct acpi_gtdt_watchdog {
- struct acpi_gtdt_header header;
- u8 reserved;
- u64 refresh_frame_address;
- u64 control_frame_address;
- u32 timer_interrupt;
- u32 timer_flags;
+struct acpi_srat_gic_its_affinity {
+ struct acpi_subtable_header header;
+ u32 proximity_domain;
+ u16 reserved;
+ u32 its_id;
};
-/* Flag Definitions: timer_flags above */
-
-#define ACPI_GTDT_WATCHDOG_IRQ_MODE (1)
-#define ACPI_GTDT_WATCHDOG_IRQ_POLARITY (1<<1)
-#define ACPI_GTDT_WATCHDOG_SECURE (1<<2)
-
/*******************************************************************************
*
- * MPST - Memory Power State Table (ACPI 5.0)
+ * STAO - Status Override Table (_STA override) - ACPI 6.0
* Version 1
*
+ * Conforms to "ACPI Specification for Status Override Table"
+ * 6 January 2015
+ *
******************************************************************************/
-#define ACPI_MPST_CHANNEL_INFO \
- u8 channel_id; \
- u8 reserved1[3]; \
- u16 power_node_count; \
- u16 reserved2;
-
-/* Main table */
-
-struct acpi_table_mpst {
+struct acpi_table_stao {
struct acpi_table_header header; /* Common ACPI table header */
- ACPI_MPST_CHANNEL_INFO /* Platform Communication Channel */
-};
-
-/* Memory Platform Communication Channel Info */
-
-struct acpi_mpst_channel {
- ACPI_MPST_CHANNEL_INFO /* Platform Communication Channel */
-};
-
-/* Memory Power Node Structure */
-
-struct acpi_mpst_power_node {
- u8 flags;
- u8 reserved1;
- u16 node_id;
- u32 length;
- u64 range_address;
- u64 range_length;
- u32 num_power_states;
- u32 num_physical_components;
+ u8 ignore_uart;
};
-/* Values for Flags field above */
-
-#define ACPI_MPST_ENABLED 1
-#define ACPI_MPST_POWER_MANAGED 2
-#define ACPI_MPST_HOT_PLUG_CAPABLE 4
-
-/* Memory Power State Structure (follows POWER_NODE above) */
+/*******************************************************************************
+ *
+ * TCPA - Trusted Computing Platform Alliance table
+ * Version 2
+ *
+ * TCG Hardware Interface Table for TPM 1.2 Clients and Servers
+ *
+ * Conforms to "TCG ACPI Specification, Family 1.2 and 2.0",
+ * Version 1.2, Revision 8
+ * February 27, 2017
+ *
+ * NOTE: There are two versions of the table with the same signature --
+ * the client version and the server version. The common platform_class
+ * field is used to differentiate the two types of tables.
+ *
+ ******************************************************************************/
-struct acpi_mpst_power_state {
- u8 power_state;
- u8 info_index;
+struct acpi_table_tcpa_hdr {
+ struct acpi_table_header header; /* Common ACPI table header */
+ u16 platform_class;
};
-/* Physical Component ID Structure (follows POWER_STATE above) */
+/*
+ * Values for platform_class above.
+ * This is how the client and server subtables are differentiated
+ */
+#define ACPI_TCPA_CLIENT_TABLE 0
+#define ACPI_TCPA_SERVER_TABLE 1
-struct acpi_mpst_component {
- u16 component_id;
+struct acpi_table_tcpa_client {
+ u32 minimum_log_length; /* Minimum length for the event log area */
+ u64 log_address; /* Address of the event log area */
};
-/* Memory Power State Characteristics Structure (follows all POWER_NODEs) */
-
-struct acpi_mpst_data_hdr {
- u16 characteristics_count;
+struct acpi_table_tcpa_server {
u16 reserved;
-};
-
-struct acpi_mpst_power_data {
- u8 structure_id;
- u8 flags;
- u16 reserved1;
- u32 average_power;
- u32 power_saving;
- u64 exit_latency;
- u64 reserved2;
-};
-
-/* Values for Flags field above */
-
-#define ACPI_MPST_PRESERVE 1
-#define ACPI_MPST_AUTOENTRY 2
-#define ACPI_MPST_AUTOEXIT 4
-
-/* Shared Memory Region (not part of an ACPI table) */
-
-struct acpi_mpst_shared {
- u32 signature;
- u16 pcc_command;
- u16 pcc_status;
- u32 command_register;
- u32 status_register;
- u32 power_state_id;
- u32 power_node_id;
- u64 energy_consumed;
- u64 average_power;
-};
+ u64 minimum_log_length; /* Minimum length for the event log area */
+ u64 log_address; /* Address of the event log area */
+ u16 spec_revision;
+ u8 device_flags;
+ u8 interrupt_flags;
+ u8 gpe_number;
+ u8 reserved2[3];
+ u32 global_interrupt;
+ struct acpi_generic_address address;
+ u32 reserved3;
+ struct acpi_generic_address config_address;
+ u8 group;
+ u8 bus; /* PCI Bus/Segment/Function numbers */
+ u8 device;
+ u8 function;
+};
+
+/* Values for device_flags above */
+
+#define ACPI_TCPA_PCI_DEVICE (1)
+#define ACPI_TCPA_BUS_PNP (1<<1)
+#define ACPI_TCPA_ADDRESS_VALID (1<<2)
+
+/* Values for interrupt_flags above */
+
+#define ACPI_TCPA_INTERRUPT_MODE (1)
+#define ACPI_TCPA_INTERRUPT_POLARITY (1<<1)
+#define ACPI_TCPA_SCI_VIA_GPE (1<<2)
+#define ACPI_TCPA_GLOBAL_INTERRUPT (1<<3)
/*******************************************************************************
*
- * PCCT - Platform Communications Channel Table (ACPI 5.0)
- * Version 2 (ACPI 6.2)
+ * TPM2 - Trusted Platform Module (TPM) 2.0 Hardware Interface Table
+ * Version 4
+ *
+ * TCG Hardware Interface Table for TPM 2.0 Clients and Servers
+ *
+ * Conforms to "TCG ACPI Specification, Family 1.2 and 2.0",
+ * Version 1.2, Revision 8
+ * February 27, 2017
*
******************************************************************************/
-struct acpi_table_pcct {
+struct acpi_table_tpm2 {
struct acpi_table_header header; /* Common ACPI table header */
- u32 flags;
- u64 reserved;
+ u16 platform_class;
+ u16 reserved;
+ u64 control_address;
+ u32 start_method;
+
+ /* Platform-specific data follows */
};
-/* Values for Flags field above */
+/* Values for start_method above */
-#define ACPI_PCCT_DOORBELL 1
+#define ACPI_TPM2_NOT_ALLOWED 0
+#define ACPI_TPM2_RESERVED1 1
+#define ACPI_TPM2_START_METHOD 2
+#define ACPI_TPM2_RESERVED3 3
+#define ACPI_TPM2_RESERVED4 4
+#define ACPI_TPM2_RESERVED5 5
+#define ACPI_TPM2_MEMORY_MAPPED 6
+#define ACPI_TPM2_COMMAND_BUFFER 7
+#define ACPI_TPM2_COMMAND_BUFFER_WITH_START_METHOD 8
+#define ACPI_TPM2_RESERVED9 9
+#define ACPI_TPM2_RESERVED10 10
+#define ACPI_TPM2_COMMAND_BUFFER_WITH_ARM_SMC 11 /* V1.2 Rev 8 */
+#define ACPI_TPM2_RESERVED 12
-/* Values for subtable type in struct acpi_subtable_header */
+/* Optional trailer appears after any start_method subtables */
-enum acpi_pcct_type {
- ACPI_PCCT_TYPE_GENERIC_SUBSPACE = 0,
- ACPI_PCCT_TYPE_HW_REDUCED_SUBSPACE = 1,
- ACPI_PCCT_TYPE_HW_REDUCED_SUBSPACE_TYPE2 = 2, /* ACPI 6.1 */
- ACPI_PCCT_TYPE_EXT_PCC_MASTER_SUBSPACE = 3, /* ACPI 6.2 */
- ACPI_PCCT_TYPE_EXT_PCC_SLAVE_SUBSPACE = 4, /* ACPI 6.2 */
- ACPI_PCCT_TYPE_RESERVED = 5 /* 5 and greater are reserved */
+struct acpi_tpm2_trailer {
+ u8 method_parameters[12];
+ u32 minimum_log_length; /* Minimum length for the event log area */
+ u64 log_address; /* Address of the event log area */
};
/*
- * PCCT Subtables, correspond to Type in struct acpi_subtable_header
+ * Subtables (start_method-specific)
*/
-/* 0: Generic Communications Subspace */
-
-struct acpi_pcct_subspace {
- struct acpi_subtable_header header;
- u8 reserved[6];
- u64 base_address;
- u64 length;
- struct acpi_generic_address doorbell_register;
- u64 preserve_mask;
- u64 write_mask;
- u32 latency;
- u32 max_access_rate;
- u16 min_turnaround_time;
-};
+/* 11: Start Method for ARM SMC (V1.2 Rev 8) */
-/* 1: HW-reduced Communications Subspace (ACPI 5.1) */
-
-struct acpi_pcct_hw_reduced {
- struct acpi_subtable_header header;
- u32 platform_interrupt;
- u8 flags;
- u8 reserved;
- u64 base_address;
- u64 length;
- struct acpi_generic_address doorbell_register;
- u64 preserve_mask;
- u64 write_mask;
- u32 latency;
- u32 max_access_rate;
- u16 min_turnaround_time;
-};
-
-/* 2: HW-reduced Communications Subspace Type 2 (ACPI 6.1) */
-
-struct acpi_pcct_hw_reduced_type2 {
- struct acpi_subtable_header header;
- u32 platform_interrupt;
- u8 flags;
- u8 reserved;
- u64 base_address;
- u64 length;
- struct acpi_generic_address doorbell_register;
- u64 preserve_mask;
- u64 write_mask;
- u32 latency;
- u32 max_access_rate;
- u16 min_turnaround_time;
- struct acpi_generic_address platform_ack_register;
- u64 ack_preserve_mask;
- u64 ack_write_mask;
+struct acpi_tpm2_arm_smc {
+ u32 global_interrupt;
+ u8 interrupt_flags;
+ u8 operation_flags;
+ u16 reserved;
+ u32 function_id;
};
-/* 3: Extended PCC Master Subspace Type 3 (ACPI 6.2) */
-
-struct acpi_pcct_ext_pcc_master {
- struct acpi_subtable_header header;
- u32 platform_interrupt;
- u8 flags;
- u8 reserved1;
- u64 base_address;
- u32 length;
- struct acpi_generic_address doorbell_register;
- u64 preserve_mask;
- u64 write_mask;
- u32 latency;
- u32 max_access_rate;
- u32 min_turnaround_time;
- struct acpi_generic_address platform_ack_register;
- u64 ack_preserve_mask;
- u64 ack_set_mask;
- u64 reserved2;
- struct acpi_generic_address cmd_complete_register;
- u64 cmd_complete_mask;
- struct acpi_generic_address cmd_update_register;
- u64 cmd_update_preserve_mask;
- u64 cmd_update_set_mask;
- struct acpi_generic_address error_status_register;
- u64 error_status_mask;
-};
-
-/* 4: Extended PCC Slave Subspace Type 4 (ACPI 6.2) */
-
-struct acpi_pcct_ext_pcc_slave {
- struct acpi_subtable_header header;
- u32 platform_interrupt;
- u8 flags;
- u8 reserved1;
- u64 base_address;
- u32 length;
- struct acpi_generic_address doorbell_register;
- u64 preserve_mask;
- u64 write_mask;
- u32 latency;
- u32 max_access_rate;
- u32 min_turnaround_time;
- struct acpi_generic_address platform_ack_register;
- u64 ack_preserve_mask;
- u64 ack_set_mask;
- u64 reserved2;
- struct acpi_generic_address cmd_complete_register;
- u64 cmd_complete_mask;
- struct acpi_generic_address cmd_update_register;
- u64 cmd_update_preserve_mask;
- u64 cmd_update_set_mask;
- struct acpi_generic_address error_status_register;
- u64 error_status_mask;
-};
-
-/* Values for doorbell flags above */
-
-#define ACPI_PCCT_INTERRUPT_POLARITY (1)
-#define ACPI_PCCT_INTERRUPT_MODE (1<<1)
+/* Values for interrupt_flags above */
-/*
- * PCC memory structures (not part of the ACPI table)
- */
+#define ACPI_TPM2_INTERRUPT_SUPPORT (1)
-/* Shared Memory Region */
+/* Values for operation_flags above */
-struct acpi_pcct_shared_memory {
- u32 signature;
- u16 command;
- u16 status;
-};
+#define ACPI_TPM2_IDLE_SUPPORT (1)
-/* Extended PCC Subspace Shared Memory Region (ACPI 6.2) */
+/*******************************************************************************
+ *
+ * UEFI - UEFI Boot optimization Table
+ * Version 1
+ *
+ * Conforms to "Unified Extensible Firmware Interface Specification",
+ * Version 2.3, May 8, 2009
+ *
+ ******************************************************************************/
-struct acpi_pcct_ext_pcc_shared_memory {
- u32 signature;
- u32 flags;
- u32 length;
- u32 command;
+struct acpi_table_uefi {
+ struct acpi_table_header header; /* Common ACPI table header */
+ u8 identifier[16]; /* UUID identifier */
+ u16 data_offset; /* Offset of remaining data in table */
};
/*******************************************************************************
*
- * PMTT - Platform Memory Topology Table (ACPI 5.0)
+ * VRTC - Virtual Real Time Clock Table
* Version 1
*
+ * Conforms to "Simple Firmware Interface Specification",
+ * Draft 0.8.2, Oct 19, 2010
+ * NOTE: The ACPI VRTC is equivalent to The SFI MRTC table.
+ *
******************************************************************************/
-struct acpi_table_pmtt {
+struct acpi_table_vrtc {
struct acpi_table_header header; /* Common ACPI table header */
- u32 reserved;
};
-/* Common header for PMTT subtables that follow main table */
+/* VRTC entry */
-struct acpi_pmtt_header {
- u8 type;
- u8 reserved1;
- u16 length;
- u16 flags;
- u16 reserved2;
+struct acpi_vrtc_entry {
+ struct acpi_generic_address physical_address;
+ u32 irq;
};
-/* Values for Type field above */
-
-#define ACPI_PMTT_TYPE_SOCKET 0
-#define ACPI_PMTT_TYPE_CONTROLLER 1
-#define ACPI_PMTT_TYPE_DIMM 2
-#define ACPI_PMTT_TYPE_RESERVED 3 /* 0x03-0xFF are reserved */
-
-/* Values for Flags field above */
-
-#define ACPI_PMTT_TOP_LEVEL 0x0001
-#define ACPI_PMTT_PHYSICAL 0x0002
-#define ACPI_PMTT_MEMORY_TYPE 0x000C
+/*******************************************************************************
+ *
+ * WAET - Windows ACPI Emulated devices Table
+ * Version 1
+ *
+ * Conforms to "Windows ACPI Emulated Devices Table", version 1.0, April 6, 2009
+ *
+ ******************************************************************************/
-/*
- * PMTT subtables, correspond to Type in struct acpi_pmtt_header
- */
+struct acpi_table_waet {
+ struct acpi_table_header header; /* Common ACPI table header */
+ u32 flags;
+};
-/* 0: Socket Structure */
+/* Masks for Flags field above */
-struct acpi_pmtt_socket {
- struct acpi_pmtt_header header;
- u16 socket_id;
- u16 reserved;
-};
+#define ACPI_WAET_RTC_NO_ACK (1) /* RTC requires no int acknowledge */
+#define ACPI_WAET_TIMER_ONE_READ (1<<1) /* PM timer requires only one read */
-/* 1: Memory Controller subtable */
+/*******************************************************************************
+ *
+ * WDAT - Watchdog Action Table
+ * Version 1
+ *
+ * Conforms to "Hardware Watchdog Timers Design Specification",
+ * Copyright 2006 Microsoft Corporation.
+ *
+ ******************************************************************************/
-struct acpi_pmtt_controller {
- struct acpi_pmtt_header header;
- u32 read_latency;
- u32 write_latency;
- u32 read_bandwidth;
- u32 write_bandwidth;
- u16 access_width;
- u16 alignment;
- u16 reserved;
- u16 domain_count;
+struct acpi_table_wdat {
+ struct acpi_table_header header; /* Common ACPI table header */
+ u32 header_length; /* Watchdog Header Length */
+ u16 pci_segment; /* PCI Segment number */
+ u8 pci_bus; /* PCI Bus number */
+ u8 pci_device; /* PCI Device number */
+ u8 pci_function; /* PCI Function number */
+ u8 reserved[3];
+ u32 timer_period; /* Period of one timer count (msec) */
+ u32 max_count; /* Maximum counter value supported */
+ u32 min_count; /* Minimum counter value */
+ u8 flags;
+ u8 reserved2[3];
+ u32 entries; /* Number of watchdog entries that follow */
};
-/* 1a: Proximity Domain substructure */
+/* Masks for Flags field above */
-struct acpi_pmtt_domain {
- u32 proximity_domain;
-};
+#define ACPI_WDAT_ENABLED (1)
+#define ACPI_WDAT_STOPPED 0x80
-/* 2: Physical Component Identifier (DIMM) */
+/* WDAT Instruction Entries (actions) */
-struct acpi_pmtt_physical_component {
- struct acpi_pmtt_header header;
- u16 component_id;
+struct acpi_wdat_entry {
+ u8 action;
+ u8 instruction;
u16 reserved;
- u32 memory_size;
- u32 bios_handle;
+ struct acpi_generic_address register_region;
+ u32 value; /* Value used with Read/Write register */
+ u32 mask; /* Bitmask required for this register instruction */
+};
+
+/* Values for Action field above */
+
+enum acpi_wdat_actions {
+ ACPI_WDAT_RESET = 1,
+ ACPI_WDAT_GET_CURRENT_COUNTDOWN = 4,
+ ACPI_WDAT_GET_COUNTDOWN = 5,
+ ACPI_WDAT_SET_COUNTDOWN = 6,
+ ACPI_WDAT_GET_RUNNING_STATE = 8,
+ ACPI_WDAT_SET_RUNNING_STATE = 9,
+ ACPI_WDAT_GET_STOPPED_STATE = 10,
+ ACPI_WDAT_SET_STOPPED_STATE = 11,
+ ACPI_WDAT_GET_REBOOT = 16,
+ ACPI_WDAT_SET_REBOOT = 17,
+ ACPI_WDAT_GET_SHUTDOWN = 18,
+ ACPI_WDAT_SET_SHUTDOWN = 19,
+ ACPI_WDAT_GET_STATUS = 32,
+ ACPI_WDAT_SET_STATUS = 33,
+ ACPI_WDAT_ACTION_RESERVED = 34 /* 34 and greater are reserved */
+};
+
+/* Values for Instruction field above */
+
+enum acpi_wdat_instructions {
+ ACPI_WDAT_READ_VALUE = 0,
+ ACPI_WDAT_READ_COUNTDOWN = 1,
+ ACPI_WDAT_WRITE_VALUE = 2,
+ ACPI_WDAT_WRITE_COUNTDOWN = 3,
+ ACPI_WDAT_INSTRUCTION_RESERVED = 4, /* 4 and greater are reserved */
+ ACPI_WDAT_PRESERVE_REGISTER = 0x80 /* Except for this value */
};
/*******************************************************************************
*
- * RASF - RAS Feature Table (ACPI 5.0)
+ * WDDT - Watchdog Descriptor Table
* Version 1
*
+ * Conforms to "Using the Intel ICH Family Watchdog Timer (WDT)",
+ * Version 001, September 2002
+ *
******************************************************************************/
-struct acpi_table_rasf {
+struct acpi_table_wddt {
struct acpi_table_header header; /* Common ACPI table header */
- u8 channel_id[12];
-};
-
-/* RASF Platform Communication Channel Shared Memory Region */
-
-struct acpi_rasf_shared_memory {
- u32 signature;
- u16 command;
+ u16 spec_version;
+ u16 table_version;
+ u16 pci_vendor_id;
+ struct acpi_generic_address address;
+ u16 max_count; /* Maximum counter value supported */
+ u16 min_count; /* Minimum counter value supported */
+ u16 period;
u16 status;
- u16 version;
- u8 capabilities[16];
- u8 set_capabilities[16];
- u16 num_parameter_blocks;
- u32 set_capabilities_status;
+ u16 capability;
};
-/* RASF Parameter Block Structure Header */
-
-struct acpi_rasf_parameter_block {
- u16 type;
- u16 version;
- u16 length;
-};
-
-/* RASF Parameter Block Structure for PATROL_SCRUB */
-
-struct acpi_rasf_patrol_scrub_parameter {
- struct acpi_rasf_parameter_block header;
- u16 patrol_scrub_command;
- u64 requested_address_range[2];
- u64 actual_address_range[2];
- u16 flags;
- u8 requested_speed;
-};
-
-/* Masks for Flags and Speed fields above */
-
-#define ACPI_RASF_SCRUBBER_RUNNING 1
-#define ACPI_RASF_SPEED (7<<1)
-#define ACPI_RASF_SPEED_SLOW (0<<1)
-#define ACPI_RASF_SPEED_MEDIUM (4<<1)
-#define ACPI_RASF_SPEED_FAST (7<<1)
-
-/* Channel Commands */
-
-enum acpi_rasf_commands {
- ACPI_RASF_EXECUTE_RASF_COMMAND = 1
-};
-
-/* Platform RAS Capabilities */
-
-enum acpi_rasf_capabiliities {
- ACPI_HW_PATROL_SCRUB_SUPPORTED = 0,
- ACPI_SW_PATROL_SCRUB_EXPOSED = 1
-};
-
-/* Patrol Scrub Commands */
-
-enum acpi_rasf_patrol_scrub_commands {
- ACPI_RASF_GET_PATROL_PARAMETERS = 1,
- ACPI_RASF_START_PATROL_SCRUBBER = 2,
- ACPI_RASF_STOP_PATROL_SCRUBBER = 3
-};
-
-/* Channel Command flags */
-
-#define ACPI_RASF_GENERATE_SCI (1<<15)
+/* Flags for Status field above */
-/* Status values */
+#define ACPI_WDDT_AVAILABLE (1)
+#define ACPI_WDDT_ACTIVE (1<<1)
+#define ACPI_WDDT_TCO_OS_OWNED (1<<2)
+#define ACPI_WDDT_USER_RESET (1<<11)
+#define ACPI_WDDT_WDT_RESET (1<<12)
+#define ACPI_WDDT_POWER_FAIL (1<<13)
+#define ACPI_WDDT_UNKNOWN_RESET (1<<14)
-enum acpi_rasf_status {
- ACPI_RASF_SUCCESS = 0,
- ACPI_RASF_NOT_VALID = 1,
- ACPI_RASF_NOT_SUPPORTED = 2,
- ACPI_RASF_BUSY = 3,
- ACPI_RASF_FAILED = 4,
- ACPI_RASF_ABORTED = 5,
- ACPI_RASF_INVALID_DATA = 6
-};
+/* Flags for Capability field above */
-/* Status flags */
-
-#define ACPI_RASF_COMMAND_COMPLETE (1)
-#define ACPI_RASF_SCI_DOORBELL (1<<1)
-#define ACPI_RASF_ERROR (1<<2)
-#define ACPI_RASF_STATUS (0x1F<<3)
+#define ACPI_WDDT_AUTO_RESET (1)
+#define ACPI_WDDT_ALERT_SUPPORT (1<<1)
/*******************************************************************************
*
- * STAO - Status Override Table (_STA override) - ACPI 6.0
+ * WDRT - Watchdog Resource Table
* Version 1
*
- * Conforms to "ACPI Specification for Status Override Table"
- * 6 January 2015
+ * Conforms to "Watchdog Timer Hardware Requirements for Windows Server 2003",
+ * Version 1.01, August 28, 2006
*
******************************************************************************/
-struct acpi_table_stao {
+struct acpi_table_wdrt {
struct acpi_table_header header; /* Common ACPI table header */
- u8 ignore_uart;
+ struct acpi_generic_address control_register;
+ struct acpi_generic_address count_register;
+ u16 pci_device_id;
+ u16 pci_vendor_id;
+ u8 pci_bus; /* PCI Bus number */
+ u8 pci_device; /* PCI Device number */
+ u8 pci_function; /* PCI Function number */
+ u8 pci_segment; /* PCI Segment number */
+ u16 max_count; /* Maximum counter value supported */
+ u8 units;
};
/*******************************************************************************
@@ -838,6 +633,27 @@ struct acpi_table_wpbt {
/*******************************************************************************
*
+ * WSMT - Windows SMM Security Migrations Table
+ * Version 1
+ *
+ * Conforms to "Windows SMM Security Migrations Table",
+ * Version 1.0, April 18, 2016
+ *
+ ******************************************************************************/
+
+struct acpi_table_wsmt {
+ struct acpi_table_header header; /* Common ACPI table header */
+ u32 protection_flags;
+};
+
+/* Flags for protection_flags field above */
+
+#define ACPI_WSMT_FIXED_COMM_BUFFERS (1)
+#define ACPI_WSMT_COMM_BUFFER_NESTED_PTR_PROTECTION (2)
+#define ACPI_WSMT_SYSTEM_RESOURCE_PROTECTION (4)
+
+/*******************************************************************************
+ *
* XENV - Xen Environment Table (ACPI 6.0)
* Version 1
*
diff --git a/include/acpi/actypes.h b/include/acpi/actypes.h
index 4f077edb9b81..1c530f95dc34 100644
--- a/include/acpi/actypes.h
+++ b/include/acpi/actypes.h
@@ -1,45 +1,11 @@
+/* SPDX-License-Identifier: BSD-3-Clause OR GPL-2.0 */
/******************************************************************************
*
* Name: actypes.h - Common data types for the entire ACPI subsystem
*
- *****************************************************************************/
-
-/*
- * Copyright (C) 2000 - 2017, Intel Corp.
- * All rights reserved.
- *
- * Redistribution and use in source and binary forms, with or without
- * modification, are permitted provided that the following conditions
- * are met:
- * 1. Redistributions of source code must retain the above copyright
- * notice, this list of conditions, and the following disclaimer,
- * without modification.
- * 2. Redistributions in binary form must reproduce at minimum a disclaimer
- * substantially similar to the "NO WARRANTY" disclaimer below
- * ("Disclaimer") and any redistribution must be conditioned upon
- * including a substantially similar Disclaimer requirement for further
- * binary redistribution.
- * 3. Neither the names of the above-listed copyright holders nor the names
- * of any contributors may be used to endorse or promote products derived
- * from this software without specific prior written permission.
+ * Copyright (C) 2000 - 2018, Intel Corp.
*
- * Alternatively, this software may be distributed under the terms of the
- * GNU General Public License ("GPL") version 2 as published by the Free
- * Software Foundation.
- *
- * NO WARRANTY
- * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
- * "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
- * LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTIBILITY AND FITNESS FOR
- * A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
- * HOLDERS OR CONTRIBUTORS BE LIABLE FOR SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
- * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS
- * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION)
- * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT,
- * STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING
- * IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
- * POSSIBILITY OF SUCH DAMAGES.
- */
+ *****************************************************************************/
#ifndef __ACTYPES_H__
#define __ACTYPES_H__
@@ -468,6 +434,8 @@ typedef void *acpi_handle; /* Actually a ptr to a NS Node */
#define ACPI_NSEC_PER_MSEC 1000000L
#define ACPI_NSEC_PER_SEC 1000000000L
+#define ACPI_TIME_AFTER(a, b) ((s64)((b) - (a)) < 0)
+
/* Owner IDs are used to track namespace nodes for selective deletion */
typedef u8 acpi_owner_id;
@@ -483,7 +451,7 @@ typedef u8 acpi_owner_id;
/*
* Constants with special meanings
*/
-#define ACPI_ROOT_OBJECT ACPI_ADD_PTR (acpi_handle, NULL, ACPI_MAX_PTR)
+#define ACPI_ROOT_OBJECT ((acpi_handle) ACPI_TO_POINTER (ACPI_MAX_PTR))
#define ACPI_WAIT_FOREVER 0xFFFF /* u16, as per ACPI spec */
#define ACPI_DO_NOT_WAIT 0
@@ -530,13 +498,13 @@ typedef u64 acpi_integer;
#define ACPI_CAST_INDIRECT_PTR(t, p) ((t **) (acpi_uintptr_t) (p))
#define ACPI_ADD_PTR(t, a, b) ACPI_CAST_PTR (t, (ACPI_CAST_PTR (u8, (a)) + (acpi_size)(b)))
#define ACPI_SUB_PTR(t, a, b) ACPI_CAST_PTR (t, (ACPI_CAST_PTR (u8, (a)) - (acpi_size)(b)))
-#define ACPI_PTR_DIFF(a, b) (acpi_size) (ACPI_CAST_PTR (u8, (a)) - ACPI_CAST_PTR (u8, (b)))
+#define ACPI_PTR_DIFF(a, b) ((acpi_size) (ACPI_CAST_PTR (u8, (a)) - ACPI_CAST_PTR (u8, (b))))
/* Pointer/Integer type conversions */
-#define ACPI_TO_POINTER(i) ACPI_ADD_PTR (void, (void *) NULL,(acpi_size) i)
-#define ACPI_TO_INTEGER(p) ACPI_PTR_DIFF (p, (void *) NULL)
-#define ACPI_OFFSET(d, f) ACPI_PTR_DIFF (&(((d *) 0)->f), (void *) NULL)
+#define ACPI_TO_POINTER(i) ACPI_ADD_PTR (void, (void *) 0, (acpi_size) (i))
+#define ACPI_TO_INTEGER(p) ACPI_PTR_DIFF (p, (void *) 0)
+#define ACPI_OFFSET(d, f) ACPI_PTR_DIFF (&(((d *) 0)->f), (void *) 0)
#define ACPI_PHYSADDR_TO_PTR(i) ACPI_TO_POINTER(i)
#define ACPI_PTR_TO_PHYSADDR(i) ACPI_TO_INTEGER(i)
@@ -569,17 +537,17 @@ typedef u64 acpi_integer;
******************************************************************************/
/*
- * Initialization sequence
+ * Initialization sequence options
*/
-#define ACPI_FULL_INITIALIZATION 0x00
-#define ACPI_NO_ADDRESS_SPACE_INIT 0x01
-#define ACPI_NO_HARDWARE_INIT 0x02
-#define ACPI_NO_EVENT_INIT 0x04
-#define ACPI_NO_HANDLER_INIT 0x08
-#define ACPI_NO_ACPI_ENABLE 0x10
-#define ACPI_NO_DEVICE_INIT 0x20
-#define ACPI_NO_OBJECT_INIT 0x40
-#define ACPI_NO_FACS_INIT 0x80
+#define ACPI_FULL_INITIALIZATION 0x0000
+#define ACPI_NO_FACS_INIT 0x0001
+#define ACPI_NO_ACPI_ENABLE 0x0002
+#define ACPI_NO_HARDWARE_INIT 0x0004
+#define ACPI_NO_EVENT_INIT 0x0008
+#define ACPI_NO_HANDLER_INIT 0x0010
+#define ACPI_NO_OBJECT_INIT 0x0020
+#define ACPI_NO_DEVICE_INIT 0x0040
+#define ACPI_NO_ADDRESS_SPACE_INIT 0x0080
/*
* Initialization state
@@ -800,6 +768,7 @@ typedef u32 acpi_event_status;
#define ACPI_GPE_CAN_WAKE (u8) 0x10
#define ACPI_GPE_AUTO_ENABLED (u8) 0x20
+#define ACPI_GPE_INITIALIZED (u8) 0x40
/*
* Flags for GPE and Lock interfaces
@@ -1191,7 +1160,6 @@ struct acpi_device_info {
u8 flags; /* Miscellaneous info */
u8 highest_dstates[4]; /* _sx_d values: 0xFF indicates not valid */
u8 lowest_dstates[5]; /* _sx_w values: 0xFF indicates not valid */
- u32 current_status; /* _STA value */
u64 address; /* _ADR value */
struct acpi_pnp_device_id hardware_id; /* _HID value */
struct acpi_pnp_device_id unique_id; /* _UID value */
@@ -1205,7 +1173,6 @@ struct acpi_device_info {
/* Flags for Valid field above (acpi_get_object_info) */
-#define ACPI_VALID_STA 0x0001
#define ACPI_VALID_ADR 0x0002
#define ACPI_VALID_HID 0x0004
#define ACPI_VALID_UID 0x0008
@@ -1299,6 +1266,8 @@ typedef enum {
#define ACPI_OSI_WIN_7 0x0B
#define ACPI_OSI_WIN_8 0x0C
#define ACPI_OSI_WIN_10 0x0D
+#define ACPI_OSI_WIN_10_RS1 0x0E
+#define ACPI_OSI_WIN_10_RS2 0x0F
/* Definitions of getopt */
diff --git a/include/acpi/acuuid.h b/include/acpi/acuuid.h
index b1a0a8a64c3d..e63f214531ad 100644
--- a/include/acpi/acuuid.h
+++ b/include/acpi/acuuid.h
@@ -1,45 +1,11 @@
+/* SPDX-License-Identifier: BSD-3-Clause OR GPL-2.0 */
/******************************************************************************
*
* Name: acuuid.h - ACPI-related UUID/GUID definitions
*
- *****************************************************************************/
-
-/*
- * Copyright (C) 2000 - 2017, Intel Corp.
- * All rights reserved.
- *
- * Redistribution and use in source and binary forms, with or without
- * modification, are permitted provided that the following conditions
- * are met:
- * 1. Redistributions of source code must retain the above copyright
- * notice, this list of conditions, and the following disclaimer,
- * without modification.
- * 2. Redistributions in binary form must reproduce at minimum a disclaimer
- * substantially similar to the "NO WARRANTY" disclaimer below
- * ("Disclaimer") and any redistribution must be conditioned upon
- * including a substantially similar Disclaimer requirement for further
- * binary redistribution.
- * 3. Neither the names of the above-listed copyright holders nor the names
- * of any contributors may be used to endorse or promote products derived
- * from this software without specific prior written permission.
+ * Copyright (C) 2000 - 2018, Intel Corp.
*
- * Alternatively, this software may be distributed under the terms of the
- * GNU General Public License ("GPL") version 2 as published by the Free
- * Software Foundation.
- *
- * NO WARRANTY
- * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
- * "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
- * LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTIBILITY AND FITNESS FOR
- * A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
- * HOLDERS OR CONTRIBUTORS BE LIABLE FOR SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
- * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS
- * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION)
- * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT,
- * STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING
- * IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
- * POSSIBILITY OF SUCH DAMAGES.
- */
+ *****************************************************************************/
#ifndef __ACUUID_H__
#define __ACUUID_H__
diff --git a/include/acpi/battery.h b/include/acpi/battery.h
new file mode 100644
index 000000000000..5d8f5d910c82
--- /dev/null
+++ b/include/acpi/battery.h
@@ -0,0 +1,21 @@
+/* SPDX-License-Identifier: GPL-2.0 */
+#ifndef __ACPI_BATTERY_H
+#define __ACPI_BATTERY_H
+
+#define ACPI_BATTERY_CLASS "battery"
+
+#define ACPI_BATTERY_NOTIFY_STATUS 0x80
+#define ACPI_BATTERY_NOTIFY_INFO 0x81
+#define ACPI_BATTERY_NOTIFY_THRESHOLD 0x82
+
+struct acpi_battery_hook {
+ const char *name;
+ int (*add_battery)(struct power_supply *battery);
+ int (*remove_battery)(struct power_supply *battery);
+ struct list_head list;
+};
+
+void battery_hook_register(struct acpi_battery_hook *hook);
+void battery_hook_unregister(struct acpi_battery_hook *hook);
+
+#endif
diff --git a/include/acpi/nfit.h b/include/acpi/nfit.h
new file mode 100644
index 000000000000..86ed07c1200d
--- /dev/null
+++ b/include/acpi/nfit.h
@@ -0,0 +1,18 @@
+/*
+ * SPDX-License-Identifier: GPL-2.0
+ * Copyright (C) 2018 Intel Corporation
+ */
+
+#ifndef __ACPI_NFIT_H
+#define __ACPI_NFIT_H
+
+#if IS_ENABLED(CONFIG_ACPI_NFIT)
+int nfit_get_smbios_id(u32 device_handle, u16 *flags);
+#else
+static inline int nfit_get_smbios_id(u32 device_handle, u16 *flags)
+{
+ return -EOPNOTSUPP;
+}
+#endif
+
+#endif /* __ACPI_NFIT_H */
diff --git a/include/acpi/platform/acenv.h b/include/acpi/platform/acenv.h
index 043fd559de6e..f444e5b0fdaa 100644
--- a/include/acpi/platform/acenv.h
+++ b/include/acpi/platform/acenv.h
@@ -1,45 +1,11 @@
+/* SPDX-License-Identifier: BSD-3-Clause OR GPL-2.0 */
/******************************************************************************
*
* Name: acenv.h - Host and compiler configuration
*
- *****************************************************************************/
-
-/*
- * Copyright (C) 2000 - 2017, Intel Corp.
- * All rights reserved.
- *
- * Redistribution and use in source and binary forms, with or without
- * modification, are permitted provided that the following conditions
- * are met:
- * 1. Redistributions of source code must retain the above copyright
- * notice, this list of conditions, and the following disclaimer,
- * without modification.
- * 2. Redistributions in binary form must reproduce at minimum a disclaimer
- * substantially similar to the "NO WARRANTY" disclaimer below
- * ("Disclaimer") and any redistribution must be conditioned upon
- * including a substantially similar Disclaimer requirement for further
- * binary redistribution.
- * 3. Neither the names of the above-listed copyright holders nor the names
- * of any contributors may be used to endorse or promote products derived
- * from this software without specific prior written permission.
+ * Copyright (C) 2000 - 2018, Intel Corp.
*
- * Alternatively, this software may be distributed under the terms of the
- * GNU General Public License ("GPL") version 2 as published by the Free
- * Software Foundation.
- *
- * NO WARRANTY
- * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
- * "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
- * LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTIBILITY AND FITNESS FOR
- * A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
- * HOLDERS OR CONTRIBUTORS BE LIABLE FOR SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
- * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS
- * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION)
- * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT,
- * STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING
- * IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
- * POSSIBILITY OF SUCH DAMAGES.
- */
+ *****************************************************************************/
#ifndef __ACENV_H__
#define __ACENV_H__
diff --git a/include/acpi/platform/acenvex.h b/include/acpi/platform/acenvex.h
index 127c848a1ba7..47d690eafe4c 100644
--- a/include/acpi/platform/acenvex.h
+++ b/include/acpi/platform/acenvex.h
@@ -1,45 +1,11 @@
+/* SPDX-License-Identifier: BSD-3-Clause OR GPL-2.0 */
/******************************************************************************
*
* Name: acenvex.h - Extra host and compiler configuration
*
- *****************************************************************************/
-
-/*
- * Copyright (C) 2000 - 2017, Intel Corp.
- * All rights reserved.
- *
- * Redistribution and use in source and binary forms, with or without
- * modification, are permitted provided that the following conditions
- * are met:
- * 1. Redistributions of source code must retain the above copyright
- * notice, this list of conditions, and the following disclaimer,
- * without modification.
- * 2. Redistributions in binary form must reproduce at minimum a disclaimer
- * substantially similar to the "NO WARRANTY" disclaimer below
- * ("Disclaimer") and any redistribution must be conditioned upon
- * including a substantially similar Disclaimer requirement for further
- * binary redistribution.
- * 3. Neither the names of the above-listed copyright holders nor the names
- * of any contributors may be used to endorse or promote products derived
- * from this software without specific prior written permission.
+ * Copyright (C) 2000 - 2018, Intel Corp.
*
- * Alternatively, this software may be distributed under the terms of the
- * GNU General Public License ("GPL") version 2 as published by the Free
- * Software Foundation.
- *
- * NO WARRANTY
- * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
- * "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
- * LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTIBILITY AND FITNESS FOR
- * A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
- * HOLDERS OR CONTRIBUTORS BE LIABLE FOR SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
- * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS
- * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION)
- * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT,
- * STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING
- * IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
- * POSSIBILITY OF SUCH DAMAGES.
- */
+ *****************************************************************************/
#ifndef __ACENVEX_H__
#define __ACENVEX_H__
diff --git a/include/acpi/platform/acgcc.h b/include/acpi/platform/acgcc.h
index 9c8f8b79644e..085db95a3dae 100644
--- a/include/acpi/platform/acgcc.h
+++ b/include/acpi/platform/acgcc.h
@@ -1,45 +1,11 @@
+/* SPDX-License-Identifier: BSD-3-Clause OR GPL-2.0 */
/******************************************************************************
*
* Name: acgcc.h - GCC specific defines, etc.
*
- *****************************************************************************/
-
-/*
- * Copyright (C) 2000 - 2017, Intel Corp.
- * All rights reserved.
- *
- * Redistribution and use in source and binary forms, with or without
- * modification, are permitted provided that the following conditions
- * are met:
- * 1. Redistributions of source code must retain the above copyright
- * notice, this list of conditions, and the following disclaimer,
- * without modification.
- * 2. Redistributions in binary form must reproduce at minimum a disclaimer
- * substantially similar to the "NO WARRANTY" disclaimer below
- * ("Disclaimer") and any redistribution must be conditioned upon
- * including a substantially similar Disclaimer requirement for further
- * binary redistribution.
- * 3. Neither the names of the above-listed copyright holders nor the names
- * of any contributors may be used to endorse or promote products derived
- * from this software without specific prior written permission.
+ * Copyright (C) 2000 - 2018, Intel Corp.
*
- * Alternatively, this software may be distributed under the terms of the
- * GNU General Public License ("GPL") version 2 as published by the Free
- * Software Foundation.
- *
- * NO WARRANTY
- * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
- * "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
- * LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTIBILITY AND FITNESS FOR
- * A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
- * HOLDERS OR CONTRIBUTORS BE LIABLE FOR SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
- * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS
- * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION)
- * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT,
- * STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING
- * IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
- * POSSIBILITY OF SUCH DAMAGES.
- */
+ *****************************************************************************/
#ifndef __ACGCC_H__
#define __ACGCC_H__
diff --git a/include/acpi/platform/acgccex.h b/include/acpi/platform/acgccex.h
index 4f701b288cec..5d2b667af829 100644
--- a/include/acpi/platform/acgccex.h
+++ b/include/acpi/platform/acgccex.h
@@ -1,45 +1,11 @@
+/* SPDX-License-Identifier: BSD-3-Clause OR GPL-2.0 */
/******************************************************************************
*
* Name: acgccex.h - Extra GCC specific defines, etc.
*
- *****************************************************************************/
-
-/*
- * Copyright (C) 2000 - 2017, Intel Corp.
- * All rights reserved.
- *
- * Redistribution and use in source and binary forms, with or without
- * modification, are permitted provided that the following conditions
- * are met:
- * 1. Redistributions of source code must retain the above copyright
- * notice, this list of conditions, and the following disclaimer,
- * without modification.
- * 2. Redistributions in binary form must reproduce at minimum a disclaimer
- * substantially similar to the "NO WARRANTY" disclaimer below
- * ("Disclaimer") and any redistribution must be conditioned upon
- * including a substantially similar Disclaimer requirement for further
- * binary redistribution.
- * 3. Neither the names of the above-listed copyright holders nor the names
- * of any contributors may be used to endorse or promote products derived
- * from this software without specific prior written permission.
+ * Copyright (C) 2000 - 2018, Intel Corp.
*
- * Alternatively, this software may be distributed under the terms of the
- * GNU General Public License ("GPL") version 2 as published by the Free
- * Software Foundation.
- *
- * NO WARRANTY
- * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
- * "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
- * LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTIBILITY AND FITNESS FOR
- * A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
- * HOLDERS OR CONTRIBUTORS BE LIABLE FOR SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
- * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS
- * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION)
- * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT,
- * STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING
- * IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
- * POSSIBILITY OF SUCH DAMAGES.
- */
+ *****************************************************************************/
#ifndef __ACGCCEX_H__
#define __ACGCCEX_H__
diff --git a/include/acpi/platform/acintel.h b/include/acpi/platform/acintel.h
index bdb6858e2458..626265833a54 100644
--- a/include/acpi/platform/acintel.h
+++ b/include/acpi/platform/acintel.h
@@ -1,45 +1,11 @@
+/* SPDX-License-Identifier: BSD-3-Clause OR GPL-2.0 */
/******************************************************************************
*
* Name: acintel.h - VC specific defines, etc.
*
- *****************************************************************************/
-
-/*
- * Copyright (C) 2000 - 2017, Intel Corp.
- * All rights reserved.
- *
- * Redistribution and use in source and binary forms, with or without
- * modification, are permitted provided that the following conditions
- * are met:
- * 1. Redistributions of source code must retain the above copyright
- * notice, this list of conditions, and the following disclaimer,
- * without modification.
- * 2. Redistributions in binary form must reproduce at minimum a disclaimer
- * substantially similar to the "NO WARRANTY" disclaimer below
- * ("Disclaimer") and any redistribution must be conditioned upon
- * including a substantially similar Disclaimer requirement for further
- * binary redistribution.
- * 3. Neither the names of the above-listed copyright holders nor the names
- * of any contributors may be used to endorse or promote products derived
- * from this software without specific prior written permission.
+ * Copyright (C) 2000 - 2018, Intel Corp.
*
- * Alternatively, this software may be distributed under the terms of the
- * GNU General Public License ("GPL") version 2 as published by the Free
- * Software Foundation.
- *
- * NO WARRANTY
- * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
- * "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
- * LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTIBILITY AND FITNESS FOR
- * A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
- * HOLDERS OR CONTRIBUTORS BE LIABLE FOR SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
- * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS
- * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION)
- * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT,
- * STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING
- * IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
- * POSSIBILITY OF SUCH DAMAGES.
- */
+ *****************************************************************************/
#ifndef __ACINTEL_H__
#define __ACINTEL_H__
diff --git a/include/acpi/platform/aclinux.h b/include/acpi/platform/aclinux.h
index 1b473efd9eb6..a0b232703302 100644
--- a/include/acpi/platform/aclinux.h
+++ b/include/acpi/platform/aclinux.h
@@ -1,45 +1,11 @@
+/* SPDX-License-Identifier: BSD-3-Clause OR GPL-2.0 */
/******************************************************************************
*
* Name: aclinux.h - OS specific defines, etc. for Linux
*
- *****************************************************************************/
-
-/*
- * Copyright (C) 2000 - 2017, Intel Corp.
- * All rights reserved.
- *
- * Redistribution and use in source and binary forms, with or without
- * modification, are permitted provided that the following conditions
- * are met:
- * 1. Redistributions of source code must retain the above copyright
- * notice, this list of conditions, and the following disclaimer,
- * without modification.
- * 2. Redistributions in binary form must reproduce at minimum a disclaimer
- * substantially similar to the "NO WARRANTY" disclaimer below
- * ("Disclaimer") and any redistribution must be conditioned upon
- * including a substantially similar Disclaimer requirement for further
- * binary redistribution.
- * 3. Neither the names of the above-listed copyright holders nor the names
- * of any contributors may be used to endorse or promote products derived
- * from this software without specific prior written permission.
+ * Copyright (C) 2000 - 2018, Intel Corp.
*
- * Alternatively, this software may be distributed under the terms of the
- * GNU General Public License ("GPL") version 2 as published by the Free
- * Software Foundation.
- *
- * NO WARRANTY
- * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
- * "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
- * LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTIBILITY AND FITNESS FOR
- * A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
- * HOLDERS OR CONTRIBUTORS BE LIABLE FOR SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
- * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS
- * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION)
- * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT,
- * STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING
- * IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
- * POSSIBILITY OF SUCH DAMAGES.
- */
+ *****************************************************************************/
#ifndef __ACLINUX_H__
#define __ACLINUX_H__
@@ -58,10 +24,12 @@
#define ACPI_USE_SYSTEM_CLIBRARY
#define ACPI_USE_DO_WHILE_0
+#define ACPI_IGNORE_PACKAGE_RESOLUTION_ERRORS
#ifdef __KERNEL__
#define ACPI_USE_SYSTEM_INTTYPES
+#define ACPI_USE_GPE_POLLING
/* Kernel specific ACPICA configuration */
@@ -206,7 +174,7 @@
#define ACPI_FLUSH_CPU_CACHE()
#define ACPI_CAST_PTHREAD_T(pthread) ((acpi_thread_id) (pthread))
-#if defined(__ia64__) || defined(__x86_64__) ||\
+#if defined(__ia64__) || (defined(__x86_64__) && !defined(__ILP32__)) ||\
defined(__aarch64__) || defined(__PPC64__) ||\
defined(__s390x__)
#define ACPI_MACHINE_WIDTH 64
diff --git a/include/acpi/platform/aclinuxex.h b/include/acpi/platform/aclinuxex.h
index efdff527f8fc..7e81475fe034 100644
--- a/include/acpi/platform/aclinuxex.h
+++ b/include/acpi/platform/aclinuxex.h
@@ -1,45 +1,11 @@
+/* SPDX-License-Identifier: BSD-3-Clause OR GPL-2.0 */
/******************************************************************************
*
* Name: aclinuxex.h - Extra OS specific defines, etc. for Linux
*
- *****************************************************************************/
-
-/*
- * Copyright (C) 2000 - 2017, Intel Corp.
- * All rights reserved.
- *
- * Redistribution and use in source and binary forms, with or without
- * modification, are permitted provided that the following conditions
- * are met:
- * 1. Redistributions of source code must retain the above copyright
- * notice, this list of conditions, and the following disclaimer,
- * without modification.
- * 2. Redistributions in binary form must reproduce at minimum a disclaimer
- * substantially similar to the "NO WARRANTY" disclaimer below
- * ("Disclaimer") and any redistribution must be conditioned upon
- * including a substantially similar Disclaimer requirement for further
- * binary redistribution.
- * 3. Neither the names of the above-listed copyright holders nor the names
- * of any contributors may be used to endorse or promote products derived
- * from this software without specific prior written permission.
+ * Copyright (C) 2000 - 2018, Intel Corp.
*
- * Alternatively, this software may be distributed under the terms of the
- * GNU General Public License ("GPL") version 2 as published by the Free
- * Software Foundation.
- *
- * NO WARRANTY
- * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
- * "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
- * LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTIBILITY AND FITNESS FOR
- * A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
- * HOLDERS OR CONTRIBUTORS BE LIABLE FOR SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
- * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS
- * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION)
- * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT,
- * STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING
- * IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
- * POSSIBILITY OF SUCH DAMAGES.
- */
+ *****************************************************************************/
#ifndef __ACLINUXEX_H__
#define __ACLINUXEX_H__
diff --git a/include/asm-generic/5level-fixup.h b/include/asm-generic/5level-fixup.h
index dfbd9d990637..9c2e0708eb82 100644
--- a/include/asm-generic/5level-fixup.h
+++ b/include/asm-generic/5level-fixup.h
@@ -8,6 +8,7 @@
#define P4D_SHIFT PGDIR_SHIFT
#define P4D_SIZE PGDIR_SIZE
#define P4D_MASK PGDIR_MASK
+#define MAX_PTRS_PER_P4D 1
#define PTRS_PER_P4D 1
#define p4d_t pgd_t
diff --git a/include/asm-generic/atomic-instrumented.h b/include/asm-generic/atomic-instrumented.h
new file mode 100644
index 000000000000..ec07f23678ea
--- /dev/null
+++ b/include/asm-generic/atomic-instrumented.h
@@ -0,0 +1,476 @@
+/*
+ * This file provides wrappers with KASAN instrumentation for atomic operations.
+ * To use this functionality an arch's atomic.h file needs to define all
+ * atomic operations with arch_ prefix (e.g. arch_atomic_read()) and include
+ * this file at the end. This file provides atomic_read() that forwards to
+ * arch_atomic_read() for actual atomic operation.
+ * Note: if an arch atomic operation is implemented by means of other atomic
+ * operations (e.g. atomic_read()/atomic_cmpxchg() loop), then it needs to use
+ * arch_ variants (i.e. arch_atomic_read()/arch_atomic_cmpxchg()) to avoid
+ * double instrumentation.
+ */
+
+#ifndef _LINUX_ATOMIC_INSTRUMENTED_H
+#define _LINUX_ATOMIC_INSTRUMENTED_H
+
+#include <linux/build_bug.h>
+#include <linux/kasan-checks.h>
+
+static __always_inline int atomic_read(const atomic_t *v)
+{
+ kasan_check_read(v, sizeof(*v));
+ return arch_atomic_read(v);
+}
+
+static __always_inline s64 atomic64_read(const atomic64_t *v)
+{
+ kasan_check_read(v, sizeof(*v));
+ return arch_atomic64_read(v);
+}
+
+static __always_inline void atomic_set(atomic_t *v, int i)
+{
+ kasan_check_write(v, sizeof(*v));
+ arch_atomic_set(v, i);
+}
+
+static __always_inline void atomic64_set(atomic64_t *v, s64 i)
+{
+ kasan_check_write(v, sizeof(*v));
+ arch_atomic64_set(v, i);
+}
+
+static __always_inline int atomic_xchg(atomic_t *v, int i)
+{
+ kasan_check_write(v, sizeof(*v));
+ return arch_atomic_xchg(v, i);
+}
+
+static __always_inline s64 atomic64_xchg(atomic64_t *v, s64 i)
+{
+ kasan_check_write(v, sizeof(*v));
+ return arch_atomic64_xchg(v, i);
+}
+
+static __always_inline int atomic_cmpxchg(atomic_t *v, int old, int new)
+{
+ kasan_check_write(v, sizeof(*v));
+ return arch_atomic_cmpxchg(v, old, new);
+}
+
+static __always_inline s64 atomic64_cmpxchg(atomic64_t *v, s64 old, s64 new)
+{
+ kasan_check_write(v, sizeof(*v));
+ return arch_atomic64_cmpxchg(v, old, new);
+}
+
+#ifdef arch_atomic_try_cmpxchg
+#define atomic_try_cmpxchg atomic_try_cmpxchg
+static __always_inline bool atomic_try_cmpxchg(atomic_t *v, int *old, int new)
+{
+ kasan_check_write(v, sizeof(*v));
+ kasan_check_read(old, sizeof(*old));
+ return arch_atomic_try_cmpxchg(v, old, new);
+}
+#endif
+
+#ifdef arch_atomic64_try_cmpxchg
+#define atomic64_try_cmpxchg atomic64_try_cmpxchg
+static __always_inline bool atomic64_try_cmpxchg(atomic64_t *v, s64 *old, s64 new)
+{
+ kasan_check_write(v, sizeof(*v));
+ kasan_check_read(old, sizeof(*old));
+ return arch_atomic64_try_cmpxchg(v, old, new);
+}
+#endif
+
+static __always_inline int __atomic_add_unless(atomic_t *v, int a, int u)
+{
+ kasan_check_write(v, sizeof(*v));
+ return __arch_atomic_add_unless(v, a, u);
+}
+
+
+static __always_inline bool atomic64_add_unless(atomic64_t *v, s64 a, s64 u)
+{
+ kasan_check_write(v, sizeof(*v));
+ return arch_atomic64_add_unless(v, a, u);
+}
+
+static __always_inline void atomic_inc(atomic_t *v)
+{
+ kasan_check_write(v, sizeof(*v));
+ arch_atomic_inc(v);
+}
+
+static __always_inline void atomic64_inc(atomic64_t *v)
+{
+ kasan_check_write(v, sizeof(*v));
+ arch_atomic64_inc(v);
+}
+
+static __always_inline void atomic_dec(atomic_t *v)
+{
+ kasan_check_write(v, sizeof(*v));
+ arch_atomic_dec(v);
+}
+
+static __always_inline void atomic64_dec(atomic64_t *v)
+{
+ kasan_check_write(v, sizeof(*v));
+ arch_atomic64_dec(v);
+}
+
+static __always_inline void atomic_add(int i, atomic_t *v)
+{
+ kasan_check_write(v, sizeof(*v));
+ arch_atomic_add(i, v);
+}
+
+static __always_inline void atomic64_add(s64 i, atomic64_t *v)
+{
+ kasan_check_write(v, sizeof(*v));
+ arch_atomic64_add(i, v);
+}
+
+static __always_inline void atomic_sub(int i, atomic_t *v)
+{
+ kasan_check_write(v, sizeof(*v));
+ arch_atomic_sub(i, v);
+}
+
+static __always_inline void atomic64_sub(s64 i, atomic64_t *v)
+{
+ kasan_check_write(v, sizeof(*v));
+ arch_atomic64_sub(i, v);
+}
+
+static __always_inline void atomic_and(int i, atomic_t *v)
+{
+ kasan_check_write(v, sizeof(*v));
+ arch_atomic_and(i, v);
+}
+
+static __always_inline void atomic64_and(s64 i, atomic64_t *v)
+{
+ kasan_check_write(v, sizeof(*v));
+ arch_atomic64_and(i, v);
+}
+
+static __always_inline void atomic_or(int i, atomic_t *v)
+{
+ kasan_check_write(v, sizeof(*v));
+ arch_atomic_or(i, v);
+}
+
+static __always_inline void atomic64_or(s64 i, atomic64_t *v)
+{
+ kasan_check_write(v, sizeof(*v));
+ arch_atomic64_or(i, v);
+}
+
+static __always_inline void atomic_xor(int i, atomic_t *v)
+{
+ kasan_check_write(v, sizeof(*v));
+ arch_atomic_xor(i, v);
+}
+
+static __always_inline void atomic64_xor(s64 i, atomic64_t *v)
+{
+ kasan_check_write(v, sizeof(*v));
+ arch_atomic64_xor(i, v);
+}
+
+static __always_inline int atomic_inc_return(atomic_t *v)
+{
+ kasan_check_write(v, sizeof(*v));
+ return arch_atomic_inc_return(v);
+}
+
+static __always_inline s64 atomic64_inc_return(atomic64_t *v)
+{
+ kasan_check_write(v, sizeof(*v));
+ return arch_atomic64_inc_return(v);
+}
+
+static __always_inline int atomic_dec_return(atomic_t *v)
+{
+ kasan_check_write(v, sizeof(*v));
+ return arch_atomic_dec_return(v);
+}
+
+static __always_inline s64 atomic64_dec_return(atomic64_t *v)
+{
+ kasan_check_write(v, sizeof(*v));
+ return arch_atomic64_dec_return(v);
+}
+
+static __always_inline s64 atomic64_inc_not_zero(atomic64_t *v)
+{
+ kasan_check_write(v, sizeof(*v));
+ return arch_atomic64_inc_not_zero(v);
+}
+
+static __always_inline s64 atomic64_dec_if_positive(atomic64_t *v)
+{
+ kasan_check_write(v, sizeof(*v));
+ return arch_atomic64_dec_if_positive(v);
+}
+
+static __always_inline bool atomic_dec_and_test(atomic_t *v)
+{
+ kasan_check_write(v, sizeof(*v));
+ return arch_atomic_dec_and_test(v);
+}
+
+static __always_inline bool atomic64_dec_and_test(atomic64_t *v)
+{
+ kasan_check_write(v, sizeof(*v));
+ return arch_atomic64_dec_and_test(v);
+}
+
+static __always_inline bool atomic_inc_and_test(atomic_t *v)
+{
+ kasan_check_write(v, sizeof(*v));
+ return arch_atomic_inc_and_test(v);
+}
+
+static __always_inline bool atomic64_inc_and_test(atomic64_t *v)
+{
+ kasan_check_write(v, sizeof(*v));
+ return arch_atomic64_inc_and_test(v);
+}
+
+static __always_inline int atomic_add_return(int i, atomic_t *v)
+{
+ kasan_check_write(v, sizeof(*v));
+ return arch_atomic_add_return(i, v);
+}
+
+static __always_inline s64 atomic64_add_return(s64 i, atomic64_t *v)
+{
+ kasan_check_write(v, sizeof(*v));
+ return arch_atomic64_add_return(i, v);
+}
+
+static __always_inline int atomic_sub_return(int i, atomic_t *v)
+{
+ kasan_check_write(v, sizeof(*v));
+ return arch_atomic_sub_return(i, v);
+}
+
+static __always_inline s64 atomic64_sub_return(s64 i, atomic64_t *v)
+{
+ kasan_check_write(v, sizeof(*v));
+ return arch_atomic64_sub_return(i, v);
+}
+
+static __always_inline int atomic_fetch_add(int i, atomic_t *v)
+{
+ kasan_check_write(v, sizeof(*v));
+ return arch_atomic_fetch_add(i, v);
+}
+
+static __always_inline s64 atomic64_fetch_add(s64 i, atomic64_t *v)
+{
+ kasan_check_write(v, sizeof(*v));
+ return arch_atomic64_fetch_add(i, v);
+}
+
+static __always_inline int atomic_fetch_sub(int i, atomic_t *v)
+{
+ kasan_check_write(v, sizeof(*v));
+ return arch_atomic_fetch_sub(i, v);
+}
+
+static __always_inline s64 atomic64_fetch_sub(s64 i, atomic64_t *v)
+{
+ kasan_check_write(v, sizeof(*v));
+ return arch_atomic64_fetch_sub(i, v);
+}
+
+static __always_inline int atomic_fetch_and(int i, atomic_t *v)
+{
+ kasan_check_write(v, sizeof(*v));
+ return arch_atomic_fetch_and(i, v);
+}
+
+static __always_inline s64 atomic64_fetch_and(s64 i, atomic64_t *v)
+{
+ kasan_check_write(v, sizeof(*v));
+ return arch_atomic64_fetch_and(i, v);
+}
+
+static __always_inline int atomic_fetch_or(int i, atomic_t *v)
+{
+ kasan_check_write(v, sizeof(*v));
+ return arch_atomic_fetch_or(i, v);
+}
+
+static __always_inline s64 atomic64_fetch_or(s64 i, atomic64_t *v)
+{
+ kasan_check_write(v, sizeof(*v));
+ return arch_atomic64_fetch_or(i, v);
+}
+
+static __always_inline int atomic_fetch_xor(int i, atomic_t *v)
+{
+ kasan_check_write(v, sizeof(*v));
+ return arch_atomic_fetch_xor(i, v);
+}
+
+static __always_inline s64 atomic64_fetch_xor(s64 i, atomic64_t *v)
+{
+ kasan_check_write(v, sizeof(*v));
+ return arch_atomic64_fetch_xor(i, v);
+}
+
+static __always_inline bool atomic_sub_and_test(int i, atomic_t *v)
+{
+ kasan_check_write(v, sizeof(*v));
+ return arch_atomic_sub_and_test(i, v);
+}
+
+static __always_inline bool atomic64_sub_and_test(s64 i, atomic64_t *v)
+{
+ kasan_check_write(v, sizeof(*v));
+ return arch_atomic64_sub_and_test(i, v);
+}
+
+static __always_inline bool atomic_add_negative(int i, atomic_t *v)
+{
+ kasan_check_write(v, sizeof(*v));
+ return arch_atomic_add_negative(i, v);
+}
+
+static __always_inline bool atomic64_add_negative(s64 i, atomic64_t *v)
+{
+ kasan_check_write(v, sizeof(*v));
+ return arch_atomic64_add_negative(i, v);
+}
+
+static __always_inline unsigned long
+cmpxchg_size(volatile void *ptr, unsigned long old, unsigned long new, int size)
+{
+ kasan_check_write(ptr, size);
+ switch (size) {
+ case 1:
+ return arch_cmpxchg((u8 *)ptr, (u8)old, (u8)new);
+ case 2:
+ return arch_cmpxchg((u16 *)ptr, (u16)old, (u16)new);
+ case 4:
+ return arch_cmpxchg((u32 *)ptr, (u32)old, (u32)new);
+ case 8:
+ BUILD_BUG_ON(sizeof(unsigned long) != 8);
+ return arch_cmpxchg((u64 *)ptr, (u64)old, (u64)new);
+ }
+ BUILD_BUG();
+ return 0;
+}
+
+#define cmpxchg(ptr, old, new) \
+({ \
+ ((__typeof__(*(ptr)))cmpxchg_size((ptr), (unsigned long)(old), \
+ (unsigned long)(new), sizeof(*(ptr)))); \
+})
+
+static __always_inline unsigned long
+sync_cmpxchg_size(volatile void *ptr, unsigned long old, unsigned long new,
+ int size)
+{
+ kasan_check_write(ptr, size);
+ switch (size) {
+ case 1:
+ return arch_sync_cmpxchg((u8 *)ptr, (u8)old, (u8)new);
+ case 2:
+ return arch_sync_cmpxchg((u16 *)ptr, (u16)old, (u16)new);
+ case 4:
+ return arch_sync_cmpxchg((u32 *)ptr, (u32)old, (u32)new);
+ case 8:
+ BUILD_BUG_ON(sizeof(unsigned long) != 8);
+ return arch_sync_cmpxchg((u64 *)ptr, (u64)old, (u64)new);
+ }
+ BUILD_BUG();
+ return 0;
+}
+
+#define sync_cmpxchg(ptr, old, new) \
+({ \
+ ((__typeof__(*(ptr)))sync_cmpxchg_size((ptr), \
+ (unsigned long)(old), (unsigned long)(new), \
+ sizeof(*(ptr)))); \
+})
+
+static __always_inline unsigned long
+cmpxchg_local_size(volatile void *ptr, unsigned long old, unsigned long new,
+ int size)
+{
+ kasan_check_write(ptr, size);
+ switch (size) {
+ case 1:
+ return arch_cmpxchg_local((u8 *)ptr, (u8)old, (u8)new);
+ case 2:
+ return arch_cmpxchg_local((u16 *)ptr, (u16)old, (u16)new);
+ case 4:
+ return arch_cmpxchg_local((u32 *)ptr, (u32)old, (u32)new);
+ case 8:
+ BUILD_BUG_ON(sizeof(unsigned long) != 8);
+ return arch_cmpxchg_local((u64 *)ptr, (u64)old, (u64)new);
+ }
+ BUILD_BUG();
+ return 0;
+}
+
+#define cmpxchg_local(ptr, old, new) \
+({ \
+ ((__typeof__(*(ptr)))cmpxchg_local_size((ptr), \
+ (unsigned long)(old), (unsigned long)(new), \
+ sizeof(*(ptr)))); \
+})
+
+static __always_inline u64
+cmpxchg64_size(volatile u64 *ptr, u64 old, u64 new)
+{
+ kasan_check_write(ptr, sizeof(*ptr));
+ return arch_cmpxchg64(ptr, old, new);
+}
+
+#define cmpxchg64(ptr, old, new) \
+({ \
+ ((__typeof__(*(ptr)))cmpxchg64_size((ptr), (u64)(old), \
+ (u64)(new))); \
+})
+
+static __always_inline u64
+cmpxchg64_local_size(volatile u64 *ptr, u64 old, u64 new)
+{
+ kasan_check_write(ptr, sizeof(*ptr));
+ return arch_cmpxchg64_local(ptr, old, new);
+}
+
+#define cmpxchg64_local(ptr, old, new) \
+({ \
+ ((__typeof__(*(ptr)))cmpxchg64_local_size((ptr), (u64)(old), \
+ (u64)(new))); \
+})
+
+/*
+ * Originally we had the following code here:
+ * __typeof__(p1) ____p1 = (p1);
+ * kasan_check_write(____p1, 2 * sizeof(*____p1));
+ * arch_cmpxchg_double(____p1, (p2), (o1), (o2), (n1), (n2));
+ * But it leads to compilation failures (see gcc issue 72873).
+ * So for now it's left non-instrumented.
+ * There are few callers of cmpxchg_double(), so it's not critical.
+ */
+#define cmpxchg_double(p1, p2, o1, o2, n1, n2) \
+({ \
+ arch_cmpxchg_double((p1), (p2), (o1), (o2), (n1), (n2)); \
+})
+
+#define cmpxchg_double_local(p1, p2, o1, o2, n1, n2) \
+({ \
+ arch_cmpxchg_double_local((p1), (p2), (o1), (o2), (n1), (n2)); \
+})
+
+#endif /* _LINUX_ATOMIC_INSTRUMENTED_H */
diff --git a/include/asm-generic/atomic.h b/include/asm-generic/atomic.h
index 3f38eb03649c..abe6dd9ca2a8 100644
--- a/include/asm-generic/atomic.h
+++ b/include/asm-generic/atomic.h
@@ -2,8 +2,6 @@
* Generic C implementation of atomic counter operations. Usable on
* UP systems only. Do not include in machine independent code.
*
- * Originally implemented for MN10300.
- *
* Copyright (C) 2007 Red Hat, Inc. All Rights Reserved.
* Written by David Howells (dhowells@redhat.com)
*
diff --git a/include/asm-generic/audit_dir_write.h b/include/asm-generic/audit_dir_write.h
index da09fb986459..dd5a9dd7a102 100644
--- a/include/asm-generic/audit_dir_write.h
+++ b/include/asm-generic/audit_dir_write.h
@@ -27,7 +27,9 @@ __NR_mknod,
__NR_mkdirat,
__NR_mknodat,
__NR_unlinkat,
+#ifdef __NR_renameat
__NR_renameat,
+#endif
__NR_linkat,
__NR_symlinkat,
#endif
diff --git a/include/asm-generic/barrier.h b/include/asm-generic/barrier.h
index fe297b599b0a..29458bbb2fa0 100644
--- a/include/asm-generic/barrier.h
+++ b/include/asm-generic/barrier.h
@@ -1,5 +1,5 @@
/*
- * Generic barrier definitions, originally based on MN10300 definitions.
+ * Generic barrier definitions.
*
* It should be possible to use these on really simple architectures,
* but it serves more as a starting point for new ports.
diff --git a/include/asm-generic/bitops/find.h b/include/asm-generic/bitops/find.h
index 1ba611e16fa0..8a1ee10014de 100644
--- a/include/asm-generic/bitops/find.h
+++ b/include/asm-generic/bitops/find.h
@@ -16,6 +16,22 @@ extern unsigned long find_next_bit(const unsigned long *addr, unsigned long
size, unsigned long offset);
#endif
+#ifndef find_next_and_bit
+/**
+ * find_next_and_bit - find the next set bit in both memory regions
+ * @addr1: The first address to base the search on
+ * @addr2: The second address to base the search on
+ * @offset: The bitnumber to start searching at
+ * @size: The bitmap size in bits
+ *
+ * Returns the bit number for the next set bit
+ * If no bits are set, returns @size.
+ */
+extern unsigned long find_next_and_bit(const unsigned long *addr1,
+ const unsigned long *addr2, unsigned long size,
+ unsigned long offset);
+#endif
+
#ifndef find_next_zero_bit
/**
* find_next_zero_bit - find the next cleared bit in a memory region
@@ -55,8 +71,12 @@ extern unsigned long find_first_zero_bit(const unsigned long *addr,
unsigned long size);
#else /* CONFIG_GENERIC_FIND_FIRST_BIT */
+#ifndef find_first_bit
#define find_first_bit(addr, size) find_next_bit((addr), (size), 0)
+#endif
+#ifndef find_first_zero_bit
#define find_first_zero_bit(addr, size) find_next_zero_bit((addr), (size), 0)
+#endif
#endif /* CONFIG_GENERIC_FIND_FIRST_BIT */
diff --git a/include/asm-generic/bitops/lock.h b/include/asm-generic/bitops/lock.h
index bc397573c43a..67ab280ad134 100644
--- a/include/asm-generic/bitops/lock.h
+++ b/include/asm-generic/bitops/lock.h
@@ -7,7 +7,8 @@
* @nr: Bit to set
* @addr: Address to count from
*
- * This operation is atomic and provides acquire barrier semantics.
+ * This operation is atomic and provides acquire barrier semantics if
+ * the returned value is 0.
* It can be used to implement bit locks.
*/
#define test_and_set_bit_lock(nr, addr) test_and_set_bit(nr, addr)
diff --git a/include/asm-generic/bug.h b/include/asm-generic/bug.h
index 963b755d19b0..a7613e1b0c87 100644
--- a/include/asm-generic/bug.h
+++ b/include/asm-generic/bug.h
@@ -52,6 +52,7 @@ struct bug_entry {
#ifndef HAVE_ARCH_BUG
#define BUG() do { \
printk("BUG: failure at %s:%d/%s()!\n", __FILE__, __LINE__, __func__); \
+ barrier_before_unreachable(); \
panic("BUG!"); \
} while (0)
#endif
diff --git a/include/asm-generic/clkdev.h b/include/asm-generic/clkdev.h
deleted file mode 100644
index 4ff334749ed5..000000000000
--- a/include/asm-generic/clkdev.h
+++ /dev/null
@@ -1,30 +0,0 @@
-/*
- * include/asm-generic/clkdev.h
- *
- * Based on the ARM clkdev.h:
- * Copyright (C) 2008 Russell King.
- *
- * This program is free software; you can redistribute it and/or modify
- * it under the terms of the GNU General Public License version 2 as
- * published by the Free Software Foundation.
- *
- * Helper for the clk API to assist looking up a struct clk.
- */
-#ifndef __ASM_CLKDEV_H
-#define __ASM_CLKDEV_H
-
-#include <linux/slab.h>
-
-#ifndef CONFIG_COMMON_CLK
-struct clk;
-
-static inline int __clk_get(struct clk *clk) { return 1; }
-static inline void __clk_put(struct clk *clk) { }
-#endif
-
-static inline struct clk_lookup_alloc *__clkdev_alloc(size_t size)
-{
- return kzalloc(size, GFP_KERNEL);
-}
-
-#endif
diff --git a/include/asm-generic/dma-mapping.h b/include/asm-generic/dma-mapping.h
new file mode 100644
index 000000000000..880a292d792f
--- /dev/null
+++ b/include/asm-generic/dma-mapping.h
@@ -0,0 +1,10 @@
+/* SPDX-License-Identifier: GPL-2.0 */
+#ifndef _ASM_GENERIC_DMA_MAPPING_H
+#define _ASM_GENERIC_DMA_MAPPING_H
+
+static inline const struct dma_map_ops *get_arch_dma_ops(struct bus_type *bus)
+{
+ return &dma_direct_ops;
+}
+
+#endif /* _ASM_GENERIC_DMA_MAPPING_H */
diff --git a/include/asm-generic/error-injection.h b/include/asm-generic/error-injection.h
new file mode 100644
index 000000000000..296c65442f00
--- /dev/null
+++ b/include/asm-generic/error-injection.h
@@ -0,0 +1,35 @@
+/* SPDX-License-Identifier: GPL-2.0 */
+#ifndef _ASM_GENERIC_ERROR_INJECTION_H
+#define _ASM_GENERIC_ERROR_INJECTION_H
+
+#if defined(__KERNEL__) && !defined(__ASSEMBLY__)
+enum {
+ EI_ETYPE_NONE, /* Dummy value for undefined case */
+ EI_ETYPE_NULL, /* Return NULL if failure */
+ EI_ETYPE_ERRNO, /* Return -ERRNO if failure */
+ EI_ETYPE_ERRNO_NULL, /* Return -ERRNO or NULL if failure */
+};
+
+struct error_injection_entry {
+ unsigned long addr;
+ int etype;
+};
+
+#ifdef CONFIG_FUNCTION_ERROR_INJECTION
+/*
+ * Whitelist ganerating macro. Specify functions which can be
+ * error-injectable using this macro.
+ */
+#define ALLOW_ERROR_INJECTION(fname, _etype) \
+static struct error_injection_entry __used \
+ __attribute__((__section__("_error_injection_whitelist"))) \
+ _eil_addr_##fname = { \
+ .addr = (unsigned long)fname, \
+ .etype = EI_ETYPE_##_etype, \
+ };
+#else
+#define ALLOW_ERROR_INJECTION(fname, _etype)
+#endif
+#endif
+
+#endif /* _ASM_GENERIC_ERROR_INJECTION_H */
diff --git a/include/asm-generic/exec.h b/include/asm-generic/exec.h
index 567766b0074a..32c0a216f576 100644
--- a/include/asm-generic/exec.h
+++ b/include/asm-generic/exec.h
@@ -1,4 +1,4 @@
-/* Generic process execution definitions, based on MN10300 definitions.
+/* Generic process execution definitions.
*
* It should be possible to use these on really simple architectures,
* but it serves more as a starting point for new ports.
diff --git a/include/asm-generic/io.h b/include/asm-generic/io.h
index b4531e3b2120..66d1d45fa2e1 100644
--- a/include/asm-generic/io.h
+++ b/include/asm-generic/io.h
@@ -1,4 +1,4 @@
-/* Generic I/O port emulation, based on MN10300 code
+/* Generic I/O port emulation.
*
* Copyright (C) 2007 Red Hat, Inc. All Rights Reserved.
* Written by David Howells (dhowells@redhat.com)
@@ -25,6 +25,50 @@
#define mmiowb() do {} while (0)
#endif
+#ifndef __io_br
+#define __io_br() barrier()
+#endif
+
+/* prevent prefetching of coherent DMA data ahead of a dma-complete */
+#ifndef __io_ar
+#ifdef rmb
+#define __io_ar() rmb()
+#else
+#define __io_ar() barrier()
+#endif
+#endif
+
+/* flush writes to coherent DMA data before possibly triggering a DMA read */
+#ifndef __io_bw
+#ifdef wmb
+#define __io_bw() wmb()
+#else
+#define __io_bw() barrier()
+#endif
+#endif
+
+/* serialize device access against a spin_unlock, usually handled there. */
+#ifndef __io_aw
+#define __io_aw() barrier()
+#endif
+
+#ifndef __io_pbw
+#define __io_pbw() __io_bw()
+#endif
+
+#ifndef __io_paw
+#define __io_paw() __io_aw()
+#endif
+
+#ifndef __io_pbr
+#define __io_pbr() __io_br()
+#endif
+
+#ifndef __io_par
+#define __io_par() __io_ar()
+#endif
+
+
/*
* __raw_{read,write}{b,w,l,q}() access memory in native endianness.
*
@@ -110,7 +154,12 @@ static inline void __raw_writeq(u64 value, volatile void __iomem *addr)
#define readb readb
static inline u8 readb(const volatile void __iomem *addr)
{
- return __raw_readb(addr);
+ u8 val;
+
+ __io_br();
+ val = __raw_readb(addr);
+ __io_ar();
+ return val;
}
#endif
@@ -118,7 +167,12 @@ static inline u8 readb(const volatile void __iomem *addr)
#define readw readw
static inline u16 readw(const volatile void __iomem *addr)
{
- return __le16_to_cpu(__raw_readw(addr));
+ u16 val;
+
+ __io_br();
+ val = __le16_to_cpu(__raw_readw(addr));
+ __io_ar();
+ return val;
}
#endif
@@ -126,7 +180,12 @@ static inline u16 readw(const volatile void __iomem *addr)
#define readl readl
static inline u32 readl(const volatile void __iomem *addr)
{
- return __le32_to_cpu(__raw_readl(addr));
+ u32 val;
+
+ __io_br();
+ val = __le32_to_cpu(__raw_readl(addr));
+ __io_ar();
+ return val;
}
#endif
@@ -135,7 +194,12 @@ static inline u32 readl(const volatile void __iomem *addr)
#define readq readq
static inline u64 readq(const volatile void __iomem *addr)
{
- return __le64_to_cpu(__raw_readq(addr));
+ u64 val;
+
+ __io_br();
+ val = __le64_to_cpu(__raw_readq(addr));
+ __io_ar();
+ return val;
}
#endif
#endif /* CONFIG_64BIT */
@@ -144,7 +208,9 @@ static inline u64 readq(const volatile void __iomem *addr)
#define writeb writeb
static inline void writeb(u8 value, volatile void __iomem *addr)
{
+ __io_bw();
__raw_writeb(value, addr);
+ __io_aw();
}
#endif
@@ -152,7 +218,9 @@ static inline void writeb(u8 value, volatile void __iomem *addr)
#define writew writew
static inline void writew(u16 value, volatile void __iomem *addr)
{
+ __io_bw();
__raw_writew(cpu_to_le16(value), addr);
+ __io_aw();
}
#endif
@@ -160,7 +228,9 @@ static inline void writew(u16 value, volatile void __iomem *addr)
#define writel writel
static inline void writel(u32 value, volatile void __iomem *addr)
{
+ __io_bw();
__raw_writel(__cpu_to_le32(value), addr);
+ __io_aw();
}
#endif
@@ -169,7 +239,9 @@ static inline void writel(u32 value, volatile void __iomem *addr)
#define writeq writeq
static inline void writeq(u64 value, volatile void __iomem *addr)
{
+ __io_bw();
__raw_writeq(__cpu_to_le64(value), addr);
+ __io_aw();
}
#endif
#endif /* CONFIG_64BIT */
@@ -180,35 +252,67 @@ static inline void writeq(u64 value, volatile void __iomem *addr)
* accesses.
*/
#ifndef readb_relaxed
-#define readb_relaxed readb
+#define readb_relaxed readb_relaxed
+static inline u8 readb_relaxed(const volatile void __iomem *addr)
+{
+ return __raw_readb(addr);
+}
#endif
#ifndef readw_relaxed
-#define readw_relaxed readw
+#define readw_relaxed readw_relaxed
+static inline u16 readw_relaxed(const volatile void __iomem *addr)
+{
+ return __le16_to_cpu(__raw_readw(addr));
+}
#endif
#ifndef readl_relaxed
-#define readl_relaxed readl
+#define readl_relaxed readl_relaxed
+static inline u32 readl_relaxed(const volatile void __iomem *addr)
+{
+ return __le32_to_cpu(__raw_readl(addr));
+}
#endif
#if defined(readq) && !defined(readq_relaxed)
-#define readq_relaxed readq
+#define readq_relaxed readq_relaxed
+static inline u64 readq_relaxed(const volatile void __iomem *addr)
+{
+ return __le64_to_cpu(__raw_readq(addr));
+}
#endif
#ifndef writeb_relaxed
-#define writeb_relaxed writeb
+#define writeb_relaxed writeb_relaxed
+static inline void writeb_relaxed(u8 value, volatile void __iomem *addr)
+{
+ __raw_writeb(value, addr);
+}
#endif
#ifndef writew_relaxed
-#define writew_relaxed writew
+#define writew_relaxed writew_relaxed
+static inline void writew_relaxed(u16 value, volatile void __iomem *addr)
+{
+ __raw_writew(cpu_to_le16(value), addr);
+}
#endif
#ifndef writel_relaxed
-#define writel_relaxed writel
+#define writel_relaxed writel_relaxed
+static inline void writel_relaxed(u32 value, volatile void __iomem *addr)
+{
+ __raw_writel(__cpu_to_le32(value), addr);
+}
#endif
#if defined(writeq) && !defined(writeq_relaxed)
-#define writeq_relaxed writeq
+#define writeq_relaxed writeq_relaxed
+static inline void writeq_relaxed(u64 value, volatile void __iomem *addr)
+{
+ __raw_writeq(__cpu_to_le64(value), addr);
+}
#endif
/*
@@ -351,6 +455,8 @@ static inline void writesq(volatile void __iomem *addr, const void *buffer,
#define IO_SPACE_LIMIT 0xffff
#endif
+#include <linux/logic_pio.h>
+
/*
* {in,out}{b,w,l}() access little endian I/O. {in,out}{b,w,l}_p() can be
* implemented on hardware that needs an additional delay for I/O accesses to
@@ -361,7 +467,12 @@ static inline void writesq(volatile void __iomem *addr, const void *buffer,
#define inb inb
static inline u8 inb(unsigned long addr)
{
- return readb(PCI_IOBASE + addr);
+ u8 val;
+
+ __io_pbr();
+ val = __raw_readb(PCI_IOBASE + addr);
+ __io_par();
+ return val;
}
#endif
@@ -369,7 +480,12 @@ static inline u8 inb(unsigned long addr)
#define inw inw
static inline u16 inw(unsigned long addr)
{
- return readw(PCI_IOBASE + addr);
+ u16 val;
+
+ __io_pbr();
+ val = __le16_to_cpu(__raw_readw(PCI_IOBASE + addr));
+ __io_par();
+ return val;
}
#endif
@@ -377,7 +493,12 @@ static inline u16 inw(unsigned long addr)
#define inl inl
static inline u32 inl(unsigned long addr)
{
- return readl(PCI_IOBASE + addr);
+ u32 val;
+
+ __io_pbr();
+ val = __le32_to_cpu(__raw_readl(PCI_IOBASE + addr));
+ __io_par();
+ return val;
}
#endif
@@ -385,7 +506,9 @@ static inline u32 inl(unsigned long addr)
#define outb outb
static inline void outb(u8 value, unsigned long addr)
{
- writeb(value, PCI_IOBASE + addr);
+ __io_pbw();
+ __raw_writeb(value, PCI_IOBASE + addr);
+ __io_paw();
}
#endif
@@ -393,7 +516,9 @@ static inline void outb(u8 value, unsigned long addr)
#define outw outw
static inline void outw(u16 value, unsigned long addr)
{
- writew(value, PCI_IOBASE + addr);
+ __io_pbw();
+ __raw_writew(cpu_to_le16(value), PCI_IOBASE + addr);
+ __io_paw();
}
#endif
@@ -401,7 +526,9 @@ static inline void outw(u16 value, unsigned long addr)
#define outl outl
static inline void outl(u32 value, unsigned long addr)
{
- writel(value, PCI_IOBASE + addr);
+ __io_pbw();
+ __raw_writel(cpu_to_le32(value), PCI_IOBASE + addr);
+ __io_paw();
}
#endif
@@ -852,7 +979,16 @@ static inline void __iomem *__ioremap(phys_addr_t offset, size_t size,
}
#endif
+#ifndef iounmap
+#define iounmap iounmap
+
+static inline void iounmap(void __iomem *addr)
+{
+}
+#endif
+#endif /* CONFIG_MMU */
#ifndef ioremap_nocache
+void __iomem *ioremap(phys_addr_t phys_addr, size_t size);
#define ioremap_nocache ioremap_nocache
static inline void __iomem *ioremap_nocache(phys_addr_t offset, size_t size)
{
@@ -884,22 +1020,13 @@ static inline void __iomem *ioremap_wt(phys_addr_t offset, size_t size)
}
#endif
-#ifndef iounmap
-#define iounmap iounmap
-
-static inline void iounmap(void __iomem *addr)
-{
-}
-#endif
-#endif /* CONFIG_MMU */
-
#ifdef CONFIG_HAS_IOPORT_MAP
#ifndef CONFIG_GENERIC_IOMAP
#ifndef ioport_map
#define ioport_map ioport_map
static inline void __iomem *ioport_map(unsigned long port, unsigned int nr)
{
- return PCI_IOBASE + (port & IO_SPACE_LIMIT);
+ return PCI_IOBASE + (port & MMIO_UPPER_LIMIT);
}
#endif
diff --git a/include/asm-generic/kvm_para.h b/include/asm-generic/kvm_para.h
index 18c6abe81fbd..728e5c5706c4 100644
--- a/include/asm-generic/kvm_para.h
+++ b/include/asm-generic/kvm_para.h
@@ -19,6 +19,11 @@ static inline unsigned int kvm_arch_para_features(void)
return 0;
}
+static inline unsigned int kvm_arch_para_hints(void)
+{
+ return 0;
+}
+
static inline bool kvm_para_available(void)
{
return false;
diff --git a/include/asm-generic/pci_iomap.h b/include/asm-generic/pci_iomap.h
index b1e17fcee2d0..d4f16dcc2ed7 100644
--- a/include/asm-generic/pci_iomap.h
+++ b/include/asm-generic/pci_iomap.h
@@ -1,12 +1,8 @@
-/* Generic I/O port emulation, based on MN10300 code
+/* SPDX-License-Identifier: GPL-2.0+ */
+/* Generic I/O port emulation.
*
* Copyright (C) 2007 Red Hat, Inc. All Rights Reserved.
* Written by David Howells (dhowells@redhat.com)
- *
- * This program is free software; you can redistribute it and/or
- * modify it under the terms of the GNU General Public Licence
- * as published by the Free Software Foundation; either version
- * 2 of the Licence, or (at your option) any later version.
*/
#ifndef __ASM_GENERIC_PCI_IOMAP_H
#define __ASM_GENERIC_PCI_IOMAP_H
diff --git a/include/asm-generic/pgtable-nop4d.h b/include/asm-generic/pgtable-nop4d.h
index 8f22f55de17a..1a29b2a0282b 100644
--- a/include/asm-generic/pgtable-nop4d.h
+++ b/include/asm-generic/pgtable-nop4d.h
@@ -8,10 +8,11 @@
typedef struct { pgd_t pgd; } p4d_t;
-#define P4D_SHIFT PGDIR_SHIFT
-#define PTRS_PER_P4D 1
-#define P4D_SIZE (1UL << P4D_SHIFT)
-#define P4D_MASK (~(P4D_SIZE-1))
+#define P4D_SHIFT PGDIR_SHIFT
+#define MAX_PTRS_PER_P4D 1
+#define PTRS_PER_P4D 1
+#define P4D_SIZE (1UL << P4D_SHIFT)
+#define P4D_MASK (~(P4D_SIZE-1))
/*
* The "pgd_xxx()" functions here are trivial for a folded two-level
diff --git a/include/asm-generic/pgtable.h b/include/asm-generic/pgtable.h
index 868e68561f91..f59639afaa39 100644
--- a/include/asm-generic/pgtable.h
+++ b/include/asm-generic/pgtable.h
@@ -309,19 +309,26 @@ extern void pgtable_trans_huge_deposit(struct mm_struct *mm, pmd_t *pmdp,
extern pgtable_t pgtable_trans_huge_withdraw(struct mm_struct *mm, pmd_t *pmdp);
#endif
-#ifndef __HAVE_ARCH_PMDP_INVALIDATE
-extern void pmdp_invalidate(struct vm_area_struct *vma, unsigned long address,
- pmd_t *pmdp);
-#endif
-
-#ifndef __HAVE_ARCH_PMDP_HUGE_SPLIT_PREPARE
-static inline void pmdp_huge_split_prepare(struct vm_area_struct *vma,
- unsigned long address, pmd_t *pmdp)
+#ifdef CONFIG_TRANSPARENT_HUGEPAGE
+/*
+ * This is an implementation of pmdp_establish() that is only suitable for an
+ * architecture that doesn't have hardware dirty/accessed bits. In this case we
+ * can't race with CPU which sets these bits and non-atomic aproach is fine.
+ */
+static inline pmd_t generic_pmdp_establish(struct vm_area_struct *vma,
+ unsigned long address, pmd_t *pmdp, pmd_t pmd)
{
-
+ pmd_t old_pmd = *pmdp;
+ set_pmd_at(vma->vm_mm, address, pmdp, pmd);
+ return old_pmd;
}
#endif
+#ifndef __HAVE_ARCH_PMDP_INVALIDATE
+extern pmd_t pmdp_invalidate(struct vm_area_struct *vma, unsigned long address,
+ pmd_t *pmdp);
+#endif
+
#ifndef __HAVE_ARCH_PTE_SAME
static inline int pte_same(pte_t pte_a, pte_t pte_b)
{
@@ -393,6 +400,42 @@ static inline int pud_same(pud_t pud_a, pud_t pud_b)
#endif /* CONFIG_TRANSPARENT_HUGEPAGE */
#endif
+#ifndef __HAVE_ARCH_DO_SWAP_PAGE
+/*
+ * Some architectures support metadata associated with a page. When a
+ * page is being swapped out, this metadata must be saved so it can be
+ * restored when the page is swapped back in. SPARC M7 and newer
+ * processors support an ADI (Application Data Integrity) tag for the
+ * page as metadata for the page. arch_do_swap_page() can restore this
+ * metadata when a page is swapped back in.
+ */
+static inline void arch_do_swap_page(struct mm_struct *mm,
+ struct vm_area_struct *vma,
+ unsigned long addr,
+ pte_t pte, pte_t oldpte)
+{
+
+}
+#endif
+
+#ifndef __HAVE_ARCH_UNMAP_ONE
+/*
+ * Some architectures support metadata associated with a page. When a
+ * page is being swapped out, this metadata must be saved so it can be
+ * restored when the page is swapped back in. SPARC M7 and newer
+ * processors support an ADI (Application Data Integrity) tag for the
+ * page as metadata for the page. arch_unmap_one() can save this
+ * metadata on a swap-out of a page.
+ */
+static inline int arch_unmap_one(struct mm_struct *mm,
+ struct vm_area_struct *vma,
+ unsigned long addr,
+ pte_t orig_pte)
+{
+ return 0;
+}
+#endif
+
#ifndef __HAVE_ARCH_PGD_OFFSET_GATE
#define pgd_offset_gate(mm, addr) pgd_offset(mm, addr)
#endif
@@ -976,6 +1019,8 @@ int pud_set_huge(pud_t *pud, phys_addr_t addr, pgprot_t prot);
int pmd_set_huge(pmd_t *pmd, phys_addr_t addr, pgprot_t prot);
int pud_clear_huge(pud_t *pud);
int pmd_clear_huge(pmd_t *pmd);
+int pud_free_pmd_page(pud_t *pud);
+int pmd_free_pte_page(pmd_t *pmd);
#else /* !CONFIG_HAVE_ARCH_HUGE_VMAP */
static inline int p4d_set_huge(p4d_t *p4d, phys_addr_t addr, pgprot_t prot)
{
@@ -1001,6 +1046,14 @@ static inline int pmd_clear_huge(pmd_t *pmd)
{
return 0;
}
+static inline int pud_free_pmd_page(pud_t *pud)
+{
+ return 0;
+}
+static inline int pmd_free_pte_page(pmd_t *pmd)
+{
+ return 0;
+}
#endif /* CONFIG_HAVE_ARCH_HUGE_VMAP */
#ifndef __HAVE_ARCH_FLUSH_PMD_TLB_RANGE
diff --git a/include/asm-generic/qrwlock_types.h b/include/asm-generic/qrwlock_types.h
index 137ecdd16daa..c36f1d5a2572 100644
--- a/include/asm-generic/qrwlock_types.h
+++ b/include/asm-generic/qrwlock_types.h
@@ -3,6 +3,7 @@
#define __ASM_GENERIC_QRWLOCK_TYPES_H
#include <linux/types.h>
+#include <asm/byteorder.h>
#include <asm/spinlock_types.h>
/*
diff --git a/include/asm-generic/sections.h b/include/asm-generic/sections.h
index 03cc5f9bba71..849cd8eb5ca0 100644
--- a/include/asm-generic/sections.h
+++ b/include/asm-generic/sections.h
@@ -30,6 +30,7 @@
* __ctors_start, __ctors_end
* __irqentry_text_start, __irqentry_text_end
* __softirqentry_text_start, __softirqentry_text_end
+ * __start_opd, __end_opd
*/
extern char _text[], _stext[], _etext[];
extern char _data[], _sdata[], _edata[];
@@ -49,12 +50,15 @@ extern char __start_once[], __end_once[];
/* Start and end of .ctors section - used for constructor calls. */
extern char __ctors_start[], __ctors_end[];
+/* Start and end of .opd section - used for function descriptors. */
+extern char __start_opd[], __end_opd[];
+
extern __visible const void __nosave_begin, __nosave_end;
-/* function descriptor handling (if any). Override
- * in asm/sections.h */
+/* Function descriptor handling (if any). Override in asm/sections.h */
#ifndef dereference_function_descriptor
#define dereference_function_descriptor(p) (p)
+#define dereference_kernel_function_descriptor(p) (p)
#endif
/* random extra sections (if any). Override
diff --git a/include/asm-generic/switch_to.h b/include/asm-generic/switch_to.h
index 052c4ac04fd5..986acc9d34bb 100644
--- a/include/asm-generic/switch_to.h
+++ b/include/asm-generic/switch_to.h
@@ -1,4 +1,4 @@
-/* Generic task switch macro wrapper, based on MN10300 definitions.
+/* Generic task switch macro wrapper.
*
* It should be possible to use these on really simple architectures,
* but it serves more as a starting point for new ports.
diff --git a/include/asm-generic/vmlinux.lds.h b/include/asm-generic/vmlinux.lds.h
index ee8b707d9fa9..278841c75b97 100644
--- a/include/asm-generic/vmlinux.lds.h
+++ b/include/asm-generic/vmlinux.lds.h
@@ -136,6 +136,15 @@
#define KPROBE_BLACKLIST()
#endif
+#ifdef CONFIG_FUNCTION_ERROR_INJECTION
+#define ERROR_INJECT_WHITELIST() STRUCT_ALIGN(); \
+ VMLINUX_SYMBOL(__start_error_injection_whitelist) = .;\
+ KEEP(*(_error_injection_whitelist)) \
+ VMLINUX_SYMBOL(__stop_error_injection_whitelist) = .;
+#else
+#define ERROR_INJECT_WHITELIST()
+#endif
+
#ifdef CONFIG_EVENT_TRACING
#define FTRACE_EVENTS() . = ALIGN(8); \
VMLINUX_SYMBOL(__start_ftrace_events) = .; \
@@ -169,6 +178,15 @@
#define TRACE_SYSCALLS()
#endif
+#ifdef CONFIG_BPF_EVENTS
+#define BPF_RAW_TP() STRUCT_ALIGN(); \
+ VMLINUX_SYMBOL(__start__bpf_raw_tp) = .; \
+ KEEP(*(__bpf_raw_tp_map)) \
+ VMLINUX_SYMBOL(__stop__bpf_raw_tp) = .;
+#else
+#define BPF_RAW_TP()
+#endif
+
#ifdef CONFIG_SERIAL_EARLYCON
#define EARLYCON_TABLE() STRUCT_ALIGN(); \
VMLINUX_SYMBOL(__earlycon_table) = .; \
@@ -240,6 +258,7 @@
LIKELY_PROFILE() \
BRANCH_PROFILE() \
TRACE_PRINTKS() \
+ BPF_RAW_TP() \
TRACEPOINT_STR()
/*
@@ -268,7 +287,11 @@
#define INIT_TASK_DATA(align) \
. = ALIGN(align); \
VMLINUX_SYMBOL(__start_init_task) = .; \
+ VMLINUX_SYMBOL(init_thread_union) = .; \
+ VMLINUX_SYMBOL(init_stack) = .; \
*(.data..init_task) \
+ *(.data..init_thread_info) \
+ . = VMLINUX_SYMBOL(__start_init_task) + THREAD_SIZE; \
VMLINUX_SYMBOL(__end_init_task) = .;
/*
@@ -564,6 +587,7 @@
FTRACE_EVENTS() \
TRACE_SYSCALLS() \
KPROBE_BLACKLIST() \
+ ERROR_INJECT_WHITELIST() \
MEM_DISCARD(init.rodata) \
CLK_OF_TABLES() \
RESERVEDMEM_OF_TABLES() \
@@ -575,7 +599,6 @@
IRQCHIP_OF_MATCH_TABLE() \
ACPI_PROBE_TABLE(irqchip) \
ACPI_PROBE_TABLE(timer) \
- ACPI_PROBE_TABLE(iort) \
EARLYCON_TABLE()
#define INIT_TEXT \
diff --git a/include/clocksource/metag_generic.h b/include/clocksource/metag_generic.h
deleted file mode 100644
index ac17e7d06cfb..000000000000
--- a/include/clocksource/metag_generic.h
+++ /dev/null
@@ -1,21 +0,0 @@
-/*
- * Copyright (C) 2013 Imaginaton Technologies Ltd.
- *
- * This program is free software; you can redistribute it and/or modify
- * it under the terms of the GNU General Public License version 2 as
- * published by the Free Software Foundation.
- *
- * This program is distributed in the hope that it will be useful,
- * but WITHOUT ANY WARRANTY; without even the implied warranty of
- * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
- * GNU General Public License for more details.
- *
- * You should have received a copy of the GNU General Public License
- * along with this program. If not, see <http://www.gnu.org/licenses/>.
- */
-#ifndef __CLKSOURCE_METAG_GENERIC_H
-#define __CLKSOURCE_METAG_GENERIC_H
-
-extern int metag_generic_timer_init(void);
-
-#endif /* __CLKSOURCE_METAG_GENERIC_H */
diff --git a/include/clocksource/timer-ti-dm.h b/include/clocksource/timer-ti-dm.h
new file mode 100644
index 000000000000..7d9598dc578d
--- /dev/null
+++ b/include/clocksource/timer-ti-dm.h
@@ -0,0 +1,394 @@
+/*
+ * OMAP Dual-Mode Timers
+ *
+ * Copyright (C) 2010 Texas Instruments Incorporated - http://www.ti.com/
+ * Tarun Kanti DebBarma <tarun.kanti@ti.com>
+ * Thara Gopinath <thara@ti.com>
+ *
+ * Platform device conversion and hwmod support.
+ *
+ * Copyright (C) 2005 Nokia Corporation
+ * Author: Lauri Leukkunen <lauri.leukkunen@nokia.com>
+ * PWM and clock framwork support by Timo Teras.
+ *
+ * This program is free software; you can redistribute it and/or modify it
+ * under the terms of the GNU General Public License as published by the
+ * Free Software Foundation; either version 2 of the License, or (at your
+ * option) any later version.
+ *
+ * THIS SOFTWARE IS PROVIDED ``AS IS'' AND ANY EXPRESS OR IMPLIED
+ * WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES OF
+ * MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE DISCLAIMED. IN
+ * NO EVENT SHALL THE AUTHOR BE LIABLE FOR ANY DIRECT, INDIRECT,
+ * INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT
+ * ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
+ * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF
+ * THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+ *
+ * You should have received a copy of the GNU General Public License along
+ * with this program; if not, write to the Free Software Foundation, Inc.,
+ * 675 Mass Ave, Cambridge, MA 02139, USA.
+ */
+
+#include <linux/delay.h>
+#include <linux/io.h>
+#include <linux/platform_device.h>
+
+#ifndef __CLOCKSOURCE_DMTIMER_H
+#define __CLOCKSOURCE_DMTIMER_H
+
+/* clock sources */
+#define OMAP_TIMER_SRC_SYS_CLK 0x00
+#define OMAP_TIMER_SRC_32_KHZ 0x01
+#define OMAP_TIMER_SRC_EXT_CLK 0x02
+
+/* timer interrupt enable bits */
+#define OMAP_TIMER_INT_CAPTURE (1 << 2)
+#define OMAP_TIMER_INT_OVERFLOW (1 << 1)
+#define OMAP_TIMER_INT_MATCH (1 << 0)
+
+/* trigger types */
+#define OMAP_TIMER_TRIGGER_NONE 0x00
+#define OMAP_TIMER_TRIGGER_OVERFLOW 0x01
+#define OMAP_TIMER_TRIGGER_OVERFLOW_AND_COMPARE 0x02
+
+/* posted mode types */
+#define OMAP_TIMER_NONPOSTED 0x00
+#define OMAP_TIMER_POSTED 0x01
+
+/* timer capabilities used in hwmod database */
+#define OMAP_TIMER_SECURE 0x80000000
+#define OMAP_TIMER_ALWON 0x40000000
+#define OMAP_TIMER_HAS_PWM 0x20000000
+#define OMAP_TIMER_NEEDS_RESET 0x10000000
+#define OMAP_TIMER_HAS_DSP_IRQ 0x08000000
+
+/*
+ * timer errata flags
+ *
+ * Errata i103/i767 impacts all OMAP3/4/5 devices including AM33xx. This
+ * errata prevents us from using posted mode on these devices, unless the
+ * timer counter register is never read. For more details please refer to
+ * the OMAP3/4/5 errata documents.
+ */
+#define OMAP_TIMER_ERRATA_I103_I767 0x80000000
+
+struct timer_regs {
+ u32 tidr;
+ u32 tier;
+ u32 twer;
+ u32 tclr;
+ u32 tcrr;
+ u32 tldr;
+ u32 ttrg;
+ u32 twps;
+ u32 tmar;
+ u32 tcar1;
+ u32 tsicr;
+ u32 tcar2;
+ u32 tpir;
+ u32 tnir;
+ u32 tcvr;
+ u32 tocr;
+ u32 towr;
+};
+
+struct omap_dm_timer {
+ int id;
+ int irq;
+ struct clk *fclk;
+
+ void __iomem *io_base;
+ void __iomem *irq_stat; /* TISR/IRQSTATUS interrupt status */
+ void __iomem *irq_ena; /* irq enable */
+ void __iomem *irq_dis; /* irq disable, only on v2 ip */
+ void __iomem *pend; /* write pending */
+ void __iomem *func_base; /* function register base */
+
+ unsigned long rate;
+ unsigned reserved:1;
+ unsigned posted:1;
+ struct timer_regs context;
+ int (*get_context_loss_count)(struct device *);
+ int ctx_loss_count;
+ int revision;
+ u32 capability;
+ u32 errata;
+ struct platform_device *pdev;
+ struct list_head node;
+};
+
+int omap_dm_timer_reserve_systimer(int id);
+struct omap_dm_timer *omap_dm_timer_request_by_cap(u32 cap);
+
+int omap_dm_timer_get_irq(struct omap_dm_timer *timer);
+
+u32 omap_dm_timer_modify_idlect_mask(u32 inputmask);
+
+int omap_dm_timer_trigger(struct omap_dm_timer *timer);
+
+int omap_dm_timers_active(void);
+
+/*
+ * Do not use the defines below, they are not needed. They should be only
+ * used by dmtimer.c and sys_timer related code.
+ */
+
+/*
+ * The interrupt registers are different between v1 and v2 ip.
+ * These registers are offsets from timer->iobase.
+ */
+#define OMAP_TIMER_ID_OFFSET 0x00
+#define OMAP_TIMER_OCP_CFG_OFFSET 0x10
+
+#define OMAP_TIMER_V1_SYS_STAT_OFFSET 0x14
+#define OMAP_TIMER_V1_STAT_OFFSET 0x18
+#define OMAP_TIMER_V1_INT_EN_OFFSET 0x1c
+
+#define OMAP_TIMER_V2_IRQSTATUS_RAW 0x24
+#define OMAP_TIMER_V2_IRQSTATUS 0x28
+#define OMAP_TIMER_V2_IRQENABLE_SET 0x2c
+#define OMAP_TIMER_V2_IRQENABLE_CLR 0x30
+
+/*
+ * The functional registers have a different base on v1 and v2 ip.
+ * These registers are offsets from timer->func_base. The func_base
+ * is samae as io_base for v1 and io_base + 0x14 for v2 ip.
+ *
+ */
+#define OMAP_TIMER_V2_FUNC_OFFSET 0x14
+
+#define _OMAP_TIMER_WAKEUP_EN_OFFSET 0x20
+#define _OMAP_TIMER_CTRL_OFFSET 0x24
+#define OMAP_TIMER_CTRL_GPOCFG (1 << 14)
+#define OMAP_TIMER_CTRL_CAPTMODE (1 << 13)
+#define OMAP_TIMER_CTRL_PT (1 << 12)
+#define OMAP_TIMER_CTRL_TCM_LOWTOHIGH (0x1 << 8)
+#define OMAP_TIMER_CTRL_TCM_HIGHTOLOW (0x2 << 8)
+#define OMAP_TIMER_CTRL_TCM_BOTHEDGES (0x3 << 8)
+#define OMAP_TIMER_CTRL_SCPWM (1 << 7)
+#define OMAP_TIMER_CTRL_CE (1 << 6) /* compare enable */
+#define OMAP_TIMER_CTRL_PRE (1 << 5) /* prescaler enable */
+#define OMAP_TIMER_CTRL_PTV_SHIFT 2 /* prescaler value shift */
+#define OMAP_TIMER_CTRL_POSTED (1 << 2)
+#define OMAP_TIMER_CTRL_AR (1 << 1) /* auto-reload enable */
+#define OMAP_TIMER_CTRL_ST (1 << 0) /* start timer */
+#define _OMAP_TIMER_COUNTER_OFFSET 0x28
+#define _OMAP_TIMER_LOAD_OFFSET 0x2c
+#define _OMAP_TIMER_TRIGGER_OFFSET 0x30
+#define _OMAP_TIMER_WRITE_PEND_OFFSET 0x34
+#define WP_NONE 0 /* no write pending bit */
+#define WP_TCLR (1 << 0)
+#define WP_TCRR (1 << 1)
+#define WP_TLDR (1 << 2)
+#define WP_TTGR (1 << 3)
+#define WP_TMAR (1 << 4)
+#define WP_TPIR (1 << 5)
+#define WP_TNIR (1 << 6)
+#define WP_TCVR (1 << 7)
+#define WP_TOCR (1 << 8)
+#define WP_TOWR (1 << 9)
+#define _OMAP_TIMER_MATCH_OFFSET 0x38
+#define _OMAP_TIMER_CAPTURE_OFFSET 0x3c
+#define _OMAP_TIMER_IF_CTRL_OFFSET 0x40
+#define _OMAP_TIMER_CAPTURE2_OFFSET 0x44 /* TCAR2, 34xx only */
+#define _OMAP_TIMER_TICK_POS_OFFSET 0x48 /* TPIR, 34xx only */
+#define _OMAP_TIMER_TICK_NEG_OFFSET 0x4c /* TNIR, 34xx only */
+#define _OMAP_TIMER_TICK_COUNT_OFFSET 0x50 /* TCVR, 34xx only */
+#define _OMAP_TIMER_TICK_INT_MASK_SET_OFFSET 0x54 /* TOCR, 34xx only */
+#define _OMAP_TIMER_TICK_INT_MASK_COUNT_OFFSET 0x58 /* TOWR, 34xx only */
+
+/* register offsets with the write pending bit encoded */
+#define WPSHIFT 16
+
+#define OMAP_TIMER_WAKEUP_EN_REG (_OMAP_TIMER_WAKEUP_EN_OFFSET \
+ | (WP_NONE << WPSHIFT))
+
+#define OMAP_TIMER_CTRL_REG (_OMAP_TIMER_CTRL_OFFSET \
+ | (WP_TCLR << WPSHIFT))
+
+#define OMAP_TIMER_COUNTER_REG (_OMAP_TIMER_COUNTER_OFFSET \
+ | (WP_TCRR << WPSHIFT))
+
+#define OMAP_TIMER_LOAD_REG (_OMAP_TIMER_LOAD_OFFSET \
+ | (WP_TLDR << WPSHIFT))
+
+#define OMAP_TIMER_TRIGGER_REG (_OMAP_TIMER_TRIGGER_OFFSET \
+ | (WP_TTGR << WPSHIFT))
+
+#define OMAP_TIMER_WRITE_PEND_REG (_OMAP_TIMER_WRITE_PEND_OFFSET \
+ | (WP_NONE << WPSHIFT))
+
+#define OMAP_TIMER_MATCH_REG (_OMAP_TIMER_MATCH_OFFSET \
+ | (WP_TMAR << WPSHIFT))
+
+#define OMAP_TIMER_CAPTURE_REG (_OMAP_TIMER_CAPTURE_OFFSET \
+ | (WP_NONE << WPSHIFT))
+
+#define OMAP_TIMER_IF_CTRL_REG (_OMAP_TIMER_IF_CTRL_OFFSET \
+ | (WP_NONE << WPSHIFT))
+
+#define OMAP_TIMER_CAPTURE2_REG (_OMAP_TIMER_CAPTURE2_OFFSET \
+ | (WP_NONE << WPSHIFT))
+
+#define OMAP_TIMER_TICK_POS_REG (_OMAP_TIMER_TICK_POS_OFFSET \
+ | (WP_TPIR << WPSHIFT))
+
+#define OMAP_TIMER_TICK_NEG_REG (_OMAP_TIMER_TICK_NEG_OFFSET \
+ | (WP_TNIR << WPSHIFT))
+
+#define OMAP_TIMER_TICK_COUNT_REG (_OMAP_TIMER_TICK_COUNT_OFFSET \
+ | (WP_TCVR << WPSHIFT))
+
+#define OMAP_TIMER_TICK_INT_MASK_SET_REG \
+ (_OMAP_TIMER_TICK_INT_MASK_SET_OFFSET | (WP_TOCR << WPSHIFT))
+
+#define OMAP_TIMER_TICK_INT_MASK_COUNT_REG \
+ (_OMAP_TIMER_TICK_INT_MASK_COUNT_OFFSET | (WP_TOWR << WPSHIFT))
+
+/*
+ * The below are inlined to optimize code size for system timers. Other code
+ * should not need these at all, see
+ * include/linux/platform_data/pwm_omap_dmtimer.h
+ */
+#if defined(CONFIG_ARCH_OMAP1) || defined(CONFIG_ARCH_OMAP2PLUS)
+static inline u32 __omap_dm_timer_read(struct omap_dm_timer *timer, u32 reg,
+ int posted)
+{
+ if (posted)
+ while (readl_relaxed(timer->pend) & (reg >> WPSHIFT))
+ cpu_relax();
+
+ return readl_relaxed(timer->func_base + (reg & 0xff));
+}
+
+static inline void __omap_dm_timer_write(struct omap_dm_timer *timer,
+ u32 reg, u32 val, int posted)
+{
+ if (posted)
+ while (readl_relaxed(timer->pend) & (reg >> WPSHIFT))
+ cpu_relax();
+
+ writel_relaxed(val, timer->func_base + (reg & 0xff));
+}
+
+static inline void __omap_dm_timer_init_regs(struct omap_dm_timer *timer)
+{
+ u32 tidr;
+
+ /* Assume v1 ip if bits [31:16] are zero */
+ tidr = readl_relaxed(timer->io_base);
+ if (!(tidr >> 16)) {
+ timer->revision = 1;
+ timer->irq_stat = timer->io_base + OMAP_TIMER_V1_STAT_OFFSET;
+ timer->irq_ena = timer->io_base + OMAP_TIMER_V1_INT_EN_OFFSET;
+ timer->irq_dis = timer->io_base + OMAP_TIMER_V1_INT_EN_OFFSET;
+ timer->pend = timer->io_base + _OMAP_TIMER_WRITE_PEND_OFFSET;
+ timer->func_base = timer->io_base;
+ } else {
+ timer->revision = 2;
+ timer->irq_stat = timer->io_base + OMAP_TIMER_V2_IRQSTATUS;
+ timer->irq_ena = timer->io_base + OMAP_TIMER_V2_IRQENABLE_SET;
+ timer->irq_dis = timer->io_base + OMAP_TIMER_V2_IRQENABLE_CLR;
+ timer->pend = timer->io_base +
+ _OMAP_TIMER_WRITE_PEND_OFFSET +
+ OMAP_TIMER_V2_FUNC_OFFSET;
+ timer->func_base = timer->io_base + OMAP_TIMER_V2_FUNC_OFFSET;
+ }
+}
+
+/*
+ * __omap_dm_timer_enable_posted - enables write posted mode
+ * @timer: pointer to timer instance handle
+ *
+ * Enables the write posted mode for the timer. When posted mode is enabled
+ * writes to certain timer registers are immediately acknowledged by the
+ * internal bus and hence prevents stalling the CPU waiting for the write to
+ * complete. Enabling this feature can improve performance for writing to the
+ * timer registers.
+ */
+static inline void __omap_dm_timer_enable_posted(struct omap_dm_timer *timer)
+{
+ if (timer->posted)
+ return;
+
+ if (timer->errata & OMAP_TIMER_ERRATA_I103_I767) {
+ timer->posted = OMAP_TIMER_NONPOSTED;
+ __omap_dm_timer_write(timer, OMAP_TIMER_IF_CTRL_REG, 0, 0);
+ return;
+ }
+
+ __omap_dm_timer_write(timer, OMAP_TIMER_IF_CTRL_REG,
+ OMAP_TIMER_CTRL_POSTED, 0);
+ timer->context.tsicr = OMAP_TIMER_CTRL_POSTED;
+ timer->posted = OMAP_TIMER_POSTED;
+}
+
+/**
+ * __omap_dm_timer_override_errata - override errata flags for a timer
+ * @timer: pointer to timer handle
+ * @errata: errata flags to be ignored
+ *
+ * For a given timer, override a timer errata by clearing the flags
+ * specified by the errata argument. A specific erratum should only be
+ * overridden for a timer if the timer is used in such a way the erratum
+ * has no impact.
+ */
+static inline void __omap_dm_timer_override_errata(struct omap_dm_timer *timer,
+ u32 errata)
+{
+ timer->errata &= ~errata;
+}
+
+static inline void __omap_dm_timer_stop(struct omap_dm_timer *timer,
+ int posted, unsigned long rate)
+{
+ u32 l;
+
+ l = __omap_dm_timer_read(timer, OMAP_TIMER_CTRL_REG, posted);
+ if (l & OMAP_TIMER_CTRL_ST) {
+ l &= ~0x1;
+ __omap_dm_timer_write(timer, OMAP_TIMER_CTRL_REG, l, posted);
+#ifdef CONFIG_ARCH_OMAP2PLUS
+ /* Readback to make sure write has completed */
+ __omap_dm_timer_read(timer, OMAP_TIMER_CTRL_REG, posted);
+ /*
+ * Wait for functional clock period x 3.5 to make sure that
+ * timer is stopped
+ */
+ udelay(3500000 / rate + 1);
+#endif
+ }
+
+ /* Ack possibly pending interrupt */
+ writel_relaxed(OMAP_TIMER_INT_OVERFLOW, timer->irq_stat);
+}
+
+static inline void __omap_dm_timer_load_start(struct omap_dm_timer *timer,
+ u32 ctrl, unsigned int load,
+ int posted)
+{
+ __omap_dm_timer_write(timer, OMAP_TIMER_COUNTER_REG, load, posted);
+ __omap_dm_timer_write(timer, OMAP_TIMER_CTRL_REG, ctrl, posted);
+}
+
+static inline void __omap_dm_timer_int_enable(struct omap_dm_timer *timer,
+ unsigned int value)
+{
+ writel_relaxed(value, timer->irq_ena);
+ __omap_dm_timer_write(timer, OMAP_TIMER_WAKEUP_EN_REG, value, 0);
+}
+
+static inline unsigned int
+__omap_dm_timer_read_counter(struct omap_dm_timer *timer, int posted)
+{
+ return __omap_dm_timer_read(timer, OMAP_TIMER_COUNTER_REG, posted);
+}
+
+static inline void __omap_dm_timer_write_status(struct omap_dm_timer *timer,
+ unsigned int value)
+{
+ writel_relaxed(value, timer->irq_stat);
+}
+#endif /* CONFIG_ARCH_OMAP1 || CONFIG_ARCH_OMAP2PLUS */
+#endif /* __CLOCKSOURCE_DMTIMER_H */
diff --git a/include/crypto/ablk_helper.h b/include/crypto/ablk_helper.h
deleted file mode 100644
index 4e655c2a4e15..000000000000
--- a/include/crypto/ablk_helper.h
+++ /dev/null
@@ -1,32 +0,0 @@
-/* SPDX-License-Identifier: GPL-2.0 */
-/*
- * Shared async block cipher helpers
- */
-
-#ifndef _CRYPTO_ABLK_HELPER_H
-#define _CRYPTO_ABLK_HELPER_H
-
-#include <linux/crypto.h>
-#include <linux/kernel.h>
-#include <crypto/cryptd.h>
-
-struct async_helper_ctx {
- struct cryptd_ablkcipher *cryptd_tfm;
-};
-
-extern int ablk_set_key(struct crypto_ablkcipher *tfm, const u8 *key,
- unsigned int key_len);
-
-extern int __ablk_encrypt(struct ablkcipher_request *req);
-
-extern int ablk_encrypt(struct ablkcipher_request *req);
-
-extern int ablk_decrypt(struct ablkcipher_request *req);
-
-extern void ablk_exit(struct crypto_tfm *tfm);
-
-extern int ablk_init_common(struct crypto_tfm *tfm, const char *drv_name);
-
-extern int ablk_init(struct crypto_tfm *tfm);
-
-#endif /* _CRYPTO_ABLK_HELPER_H */
diff --git a/include/crypto/aead.h b/include/crypto/aead.h
index 03b97629442c..1e26f790b03f 100644
--- a/include/crypto/aead.h
+++ b/include/crypto/aead.h
@@ -327,7 +327,12 @@ static inline struct crypto_aead *crypto_aead_reqtfm(struct aead_request *req)
*/
static inline int crypto_aead_encrypt(struct aead_request *req)
{
- return crypto_aead_alg(crypto_aead_reqtfm(req))->encrypt(req);
+ struct crypto_aead *aead = crypto_aead_reqtfm(req);
+
+ if (crypto_aead_get_flags(aead) & CRYPTO_TFM_NEED_KEY)
+ return -ENOKEY;
+
+ return crypto_aead_alg(aead)->encrypt(req);
}
/**
@@ -356,6 +361,9 @@ static inline int crypto_aead_decrypt(struct aead_request *req)
{
struct crypto_aead *aead = crypto_aead_reqtfm(req);
+ if (crypto_aead_get_flags(aead) & CRYPTO_TFM_NEED_KEY)
+ return -ENOKEY;
+
if (req->cryptlen < crypto_aead_authsize(aead))
return -EINVAL;
diff --git a/include/crypto/algapi.h b/include/crypto/algapi.h
index e3cebf640c00..1aba888241dd 100644
--- a/include/crypto/algapi.h
+++ b/include/crypto/algapi.h
@@ -30,7 +30,6 @@ struct crypto_type {
int (*init_tfm)(struct crypto_tfm *tfm);
void (*show)(struct seq_file *m, struct crypto_alg *alg);
int (*report)(struct sk_buff *skb, struct crypto_alg *alg);
- struct crypto_alg *(*lookup)(const char *name, u32 type, u32 mask);
void (*free)(struct crypto_instance *inst);
unsigned int type;
diff --git a/include/crypto/chacha20.h b/include/crypto/chacha20.h
index caaa470389e0..b83d66073db0 100644
--- a/include/crypto/chacha20.h
+++ b/include/crypto/chacha20.h
@@ -13,12 +13,13 @@
#define CHACHA20_IV_SIZE 16
#define CHACHA20_KEY_SIZE 32
#define CHACHA20_BLOCK_SIZE 64
+#define CHACHA20_BLOCK_WORDS (CHACHA20_BLOCK_SIZE / sizeof(u32))
struct chacha20_ctx {
u32 key[8];
};
-void chacha20_block(u32 *state, void *stream);
+void chacha20_block(u32 *state, u32 *stream);
void crypto_chacha20_init(u32 *state, struct chacha20_ctx *ctx, u8 *iv);
int crypto_chacha20_setkey(struct crypto_skcipher *tfm, const u8 *key,
unsigned int keysize);
diff --git a/include/crypto/engine.h b/include/crypto/engine.h
index dd04c1699b51..1cbec29af3d6 100644
--- a/include/crypto/engine.h
+++ b/include/crypto/engine.h
@@ -17,7 +17,10 @@
#include <linux/kernel.h>
#include <linux/kthread.h>
#include <crypto/algapi.h>
+#include <crypto/aead.h>
+#include <crypto/akcipher.h>
#include <crypto/hash.h>
+#include <crypto/skcipher.h>
#define ENGINE_NAME_LEN 30
/*
@@ -37,12 +40,6 @@
* @unprepare_crypt_hardware: there are currently no more requests on the
* queue so the subsystem notifies the driver that it may relax the
* hardware by issuing this call
- * @prepare_cipher_request: do some prepare if need before handle the current request
- * @unprepare_cipher_request: undo any work done by prepare_cipher_request()
- * @cipher_one_request: do encryption for current request
- * @prepare_hash_request: do some prepare if need before handle the current request
- * @unprepare_hash_request: undo any work done by prepare_hash_request()
- * @hash_one_request: do hash for current request
* @kworker: kthread worker struct for request pump
* @pump_requests: work struct for scheduling work to the request pump
* @priv_data: the engine private data
@@ -65,19 +62,6 @@ struct crypto_engine {
int (*prepare_crypt_hardware)(struct crypto_engine *engine);
int (*unprepare_crypt_hardware)(struct crypto_engine *engine);
- int (*prepare_cipher_request)(struct crypto_engine *engine,
- struct ablkcipher_request *req);
- int (*unprepare_cipher_request)(struct crypto_engine *engine,
- struct ablkcipher_request *req);
- int (*prepare_hash_request)(struct crypto_engine *engine,
- struct ahash_request *req);
- int (*unprepare_hash_request)(struct crypto_engine *engine,
- struct ahash_request *req);
- int (*cipher_one_request)(struct crypto_engine *engine,
- struct ablkcipher_request *req);
- int (*hash_one_request)(struct crypto_engine *engine,
- struct ahash_request *req);
-
struct kthread_worker *kworker;
struct kthread_work pump_requests;
@@ -85,19 +69,45 @@ struct crypto_engine {
struct crypto_async_request *cur_req;
};
-int crypto_transfer_cipher_request(struct crypto_engine *engine,
- struct ablkcipher_request *req,
- bool need_pump);
-int crypto_transfer_cipher_request_to_engine(struct crypto_engine *engine,
- struct ablkcipher_request *req);
-int crypto_transfer_hash_request(struct crypto_engine *engine,
- struct ahash_request *req, bool need_pump);
+/*
+ * struct crypto_engine_op - crypto hardware engine operations
+ * @prepare__request: do some prepare if need before handle the current request
+ * @unprepare_request: undo any work done by prepare_request()
+ * @do_one_request: do encryption for current request
+ */
+struct crypto_engine_op {
+ int (*prepare_request)(struct crypto_engine *engine,
+ void *areq);
+ int (*unprepare_request)(struct crypto_engine *engine,
+ void *areq);
+ int (*do_one_request)(struct crypto_engine *engine,
+ void *areq);
+};
+
+struct crypto_engine_ctx {
+ struct crypto_engine_op op;
+};
+
+int crypto_transfer_ablkcipher_request_to_engine(struct crypto_engine *engine,
+ struct ablkcipher_request *req);
+int crypto_transfer_aead_request_to_engine(struct crypto_engine *engine,
+ struct aead_request *req);
+int crypto_transfer_akcipher_request_to_engine(struct crypto_engine *engine,
+ struct akcipher_request *req);
int crypto_transfer_hash_request_to_engine(struct crypto_engine *engine,
- struct ahash_request *req);
-void crypto_finalize_cipher_request(struct crypto_engine *engine,
- struct ablkcipher_request *req, int err);
+ struct ahash_request *req);
+int crypto_transfer_skcipher_request_to_engine(struct crypto_engine *engine,
+ struct skcipher_request *req);
+void crypto_finalize_ablkcipher_request(struct crypto_engine *engine,
+ struct ablkcipher_request *req, int err);
+void crypto_finalize_aead_request(struct crypto_engine *engine,
+ struct aead_request *req, int err);
+void crypto_finalize_akcipher_request(struct crypto_engine *engine,
+ struct akcipher_request *req, int err);
void crypto_finalize_hash_request(struct crypto_engine *engine,
struct ahash_request *req, int err);
+void crypto_finalize_skcipher_request(struct crypto_engine *engine,
+ struct skcipher_request *req, int err);
int crypto_engine_start(struct crypto_engine *engine);
int crypto_engine_stop(struct crypto_engine *engine);
struct crypto_engine *crypto_engine_alloc_init(struct device *dev, bool rt);
diff --git a/include/crypto/hash.h b/include/crypto/hash.h
index 0ed31fd80242..76e432cab75d 100644
--- a/include/crypto/hash.h
+++ b/include/crypto/hash.h
@@ -71,12 +71,12 @@ struct ahash_request {
/**
* struct ahash_alg - asynchronous message digest definition
- * @init: Initialize the transformation context. Intended only to initialize the
+ * @init: **[mandatory]** Initialize the transformation context. Intended only to initialize the
* state of the HASH transformation at the beginning. This shall fill in
* the internal structures used during the entire duration of the whole
- * transformation. No data processing happens at this point.
- * Note: mandatory.
- * @update: Push a chunk of data into the driver for transformation. This
+ * transformation. No data processing happens at this point. Driver code
+ * implementation must not use req->result.
+ * @update: **[mandatory]** Push a chunk of data into the driver for transformation. This
* function actually pushes blocks of data from upper layers into the
* driver, which then passes those to the hardware as seen fit. This
* function must not finalize the HASH transformation by calculating the
@@ -84,21 +84,19 @@ struct ahash_request {
* transformation. This function shall not modify the transformation
* context, as this function may be called in parallel with the same
* transformation object. Data processing can happen synchronously
- * [SHASH] or asynchronously [AHASH] at this point.
- * Note: mandatory.
- * @final: Retrieve result from the driver. This function finalizes the
+ * [SHASH] or asynchronously [AHASH] at this point. Driver must not use
+ * req->result.
+ * @final: **[mandatory]** Retrieve result from the driver. This function finalizes the
* transformation and retrieves the resulting hash from the driver and
* pushes it back to upper layers. No data processing happens at this
* point unless hardware requires it to finish the transformation
* (then the data buffered by the device driver is processed).
- * Note: mandatory.
- * @finup: Combination of @update and @final. This function is effectively a
+ * @finup: **[optional]** Combination of @update and @final. This function is effectively a
* combination of @update and @final calls issued in sequence. As some
* hardware cannot do @update and @final separately, this callback was
* added to allow such hardware to be used at least by IPsec. Data
* processing can happen synchronously [SHASH] or asynchronously [AHASH]
* at this point.
- * Note: optional.
* @digest: Combination of @init and @update and @final. This function
* effectively behaves as the entire chain of operations, @init,
* @update and @final issued in sequence. Just like @finup, this was
@@ -124,11 +122,12 @@ struct ahash_request {
* you want to save partial result of the transformation after
* processing certain amount of data and reload this partial result
* multiple times later on for multiple re-use. No data processing
- * happens at this point.
+ * happens at this point. Driver must not use req->result.
* @import: Import partial state of the transformation. This function loads the
* entire state of the ongoing transformation from a provided block of
* data so the transformation can continue from this point onward. No
- * data processing happens at this point.
+ * data processing happens at this point. Driver must not use
+ * req->result.
* @halg: see struct hash_alg_common
*/
struct ahash_alg {
@@ -210,7 +209,6 @@ struct crypto_ahash {
unsigned int keylen);
unsigned int reqsize;
- bool has_setkey;
struct crypto_tfm base;
};
@@ -410,11 +408,6 @@ static inline void *ahash_request_ctx(struct ahash_request *req)
int crypto_ahash_setkey(struct crypto_ahash *tfm, const u8 *key,
unsigned int keylen);
-static inline bool crypto_ahash_has_setkey(struct crypto_ahash *tfm)
-{
- return tfm->has_setkey;
-}
-
/**
* crypto_ahash_finup() - update and finalize message digest
* @req: reference to the ahash_request handle that holds all information
@@ -487,7 +480,12 @@ static inline int crypto_ahash_export(struct ahash_request *req, void *out)
*/
static inline int crypto_ahash_import(struct ahash_request *req, const void *in)
{
- return crypto_ahash_reqtfm(req)->import(req, in);
+ struct crypto_ahash *tfm = crypto_ahash_reqtfm(req);
+
+ if (crypto_ahash_get_flags(tfm) & CRYPTO_TFM_NEED_KEY)
+ return -ENOKEY;
+
+ return tfm->import(req, in);
}
/**
@@ -503,7 +501,12 @@ static inline int crypto_ahash_import(struct ahash_request *req, const void *in)
*/
static inline int crypto_ahash_init(struct ahash_request *req)
{
- return crypto_ahash_reqtfm(req)->init(req);
+ struct crypto_ahash *tfm = crypto_ahash_reqtfm(req);
+
+ if (crypto_ahash_get_flags(tfm) & CRYPTO_TFM_NEED_KEY)
+ return -ENOKEY;
+
+ return tfm->init(req);
}
/**
@@ -855,7 +858,12 @@ static inline int crypto_shash_export(struct shash_desc *desc, void *out)
*/
static inline int crypto_shash_import(struct shash_desc *desc, const void *in)
{
- return crypto_shash_alg(desc->tfm)->import(desc, in);
+ struct crypto_shash *tfm = desc->tfm;
+
+ if (crypto_shash_get_flags(tfm) & CRYPTO_TFM_NEED_KEY)
+ return -ENOKEY;
+
+ return crypto_shash_alg(tfm)->import(desc, in);
}
/**
@@ -871,7 +879,12 @@ static inline int crypto_shash_import(struct shash_desc *desc, const void *in)
*/
static inline int crypto_shash_init(struct shash_desc *desc)
{
- return crypto_shash_alg(desc->tfm)->init(desc);
+ struct crypto_shash *tfm = desc->tfm;
+
+ if (crypto_shash_get_flags(tfm) & CRYPTO_TFM_NEED_KEY)
+ return -ENOKEY;
+
+ return crypto_shash_alg(tfm)->init(desc);
}
/**
diff --git a/include/crypto/if_alg.h b/include/crypto/if_alg.h
index f38227a78eae..482461d8931d 100644
--- a/include/crypto/if_alg.h
+++ b/include/crypto/if_alg.h
@@ -245,7 +245,7 @@ ssize_t af_alg_sendpage(struct socket *sock, struct page *page,
int offset, size_t size, int flags);
void af_alg_free_resources(struct af_alg_async_req *areq);
void af_alg_async_cb(struct crypto_async_request *_req, int err);
-unsigned int af_alg_poll(struct file *file, struct socket *sock,
+__poll_t af_alg_poll(struct file *file, struct socket *sock,
poll_table *wait);
struct af_alg_async_req *af_alg_alloc_areq(struct sock *sk,
unsigned int areqlen);
diff --git a/include/crypto/internal/hash.h b/include/crypto/internal/hash.h
index c2bae8da642c..a0b0ad9d585e 100644
--- a/include/crypto/internal/hash.h
+++ b/include/crypto/internal/hash.h
@@ -90,6 +90,8 @@ static inline bool crypto_shash_alg_has_setkey(struct shash_alg *alg)
return alg->setkey != shash_no_setkey;
}
+bool crypto_hash_alg_has_setkey(struct hash_alg_common *halg);
+
int crypto_init_ahash_spawn(struct crypto_ahash_spawn *spawn,
struct hash_alg_common *alg,
struct crypto_instance *inst);
@@ -124,11 +126,6 @@ int shash_ahash_update(struct ahash_request *req, struct shash_desc *desc);
int shash_ahash_finup(struct ahash_request *req, struct shash_desc *desc);
int shash_ahash_digest(struct ahash_request *req, struct shash_desc *desc);
-int ahash_mcryptd_update(struct ahash_request *desc);
-int ahash_mcryptd_final(struct ahash_request *desc);
-int ahash_mcryptd_finup(struct ahash_request *desc);
-int ahash_mcryptd_digest(struct ahash_request *desc);
-
int crypto_init_shash_ops_async(struct crypto_tfm *tfm);
static inline void *crypto_ahash_ctx(struct crypto_ahash *tfm)
diff --git a/include/crypto/internal/scompress.h b/include/crypto/internal/scompress.h
index ccad9b2c9bd6..0f6ddac1acfc 100644
--- a/include/crypto/internal/scompress.h
+++ b/include/crypto/internal/scompress.h
@@ -28,17 +28,6 @@ struct crypto_scomp {
* @free_ctx: Function frees context allocated with alloc_ctx
* @compress: Function performs a compress operation
* @decompress: Function performs a de-compress operation
- * @init: Initialize the cryptographic transformation object.
- * This function is used to initialize the cryptographic
- * transformation object. This function is called only once at
- * the instantiation time, right after the transformation context
- * was allocated. In case the cryptographic hardware has some
- * special requirements which need to be handled by software, this
- * function shall check for the precise requirement of the
- * transformation and put any software fallbacks in place.
- * @exit: Deinitialize the cryptographic transformation object. This is a
- * counterpart to @init, used to remove various changes set in
- * @init.
* @base: Common crypto API algorithm data structure
*/
struct scomp_alg {
diff --git a/include/crypto/internal/simd.h b/include/crypto/internal/simd.h
index 32ceb6929885..f18344518e32 100644
--- a/include/crypto/internal/simd.h
+++ b/include/crypto/internal/simd.h
@@ -7,6 +7,7 @@
#define _CRYPTO_INTERNAL_SIMD_H
struct simd_skcipher_alg;
+struct skcipher_alg;
struct simd_skcipher_alg *simd_skcipher_create_compat(const char *algname,
const char *drvname,
@@ -15,4 +16,10 @@ struct simd_skcipher_alg *simd_skcipher_create(const char *algname,
const char *basename);
void simd_skcipher_free(struct simd_skcipher_alg *alg);
+int simd_register_skciphers_compat(struct skcipher_alg *algs, int count,
+ struct simd_skcipher_alg **simd_algs);
+
+void simd_unregister_skciphers(struct skcipher_alg *algs, int count,
+ struct simd_skcipher_alg **simd_algs);
+
#endif /* _CRYPTO_INTERNAL_SIMD_H */
diff --git a/include/crypto/lrw.h b/include/crypto/lrw.h
deleted file mode 100644
index a9d44c06d081..000000000000
--- a/include/crypto/lrw.h
+++ /dev/null
@@ -1,44 +0,0 @@
-/* SPDX-License-Identifier: GPL-2.0 */
-#ifndef _CRYPTO_LRW_H
-#define _CRYPTO_LRW_H
-
-#include <crypto/b128ops.h>
-
-struct scatterlist;
-struct gf128mul_64k;
-struct blkcipher_desc;
-
-#define LRW_BLOCK_SIZE 16
-
-struct lrw_table_ctx {
- /* optimizes multiplying a random (non incrementing, as at the
- * start of a new sector) value with key2, we could also have
- * used 4k optimization tables or no optimization at all. In the
- * latter case we would have to store key2 here */
- struct gf128mul_64k *table;
- /* stores:
- * key2*{ 0,0,...0,0,0,0,1 }, key2*{ 0,0,...0,0,0,1,1 },
- * key2*{ 0,0,...0,0,1,1,1 }, key2*{ 0,0,...0,1,1,1,1 }
- * key2*{ 0,0,...1,1,1,1,1 }, etc
- * needed for optimized multiplication of incrementing values
- * with key2 */
- be128 mulinc[128];
-};
-
-int lrw_init_table(struct lrw_table_ctx *ctx, const u8 *tweak);
-void lrw_free_table(struct lrw_table_ctx *ctx);
-
-struct lrw_crypt_req {
- be128 *tbuf;
- unsigned int tbuflen;
-
- struct lrw_table_ctx *table_ctx;
- void *crypt_ctx;
- void (*crypt_fn)(void *ctx, u8 *blks, unsigned int nbytes);
-};
-
-int lrw_crypt(struct blkcipher_desc *desc, struct scatterlist *dst,
- struct scatterlist *src, unsigned int nbytes,
- struct lrw_crypt_req *req);
-
-#endif /* _CRYPTO_LRW_H */
diff --git a/include/crypto/null.h b/include/crypto/null.h
index 5757c0a4b321..15aeef6e30ef 100644
--- a/include/crypto/null.h
+++ b/include/crypto/null.h
@@ -12,14 +12,4 @@
struct crypto_skcipher *crypto_get_default_null_skcipher(void);
void crypto_put_default_null_skcipher(void);
-static inline struct crypto_skcipher *crypto_get_default_null_skcipher2(void)
-{
- return crypto_get_default_null_skcipher();
-}
-
-static inline void crypto_put_default_null_skcipher2(void)
-{
- crypto_put_default_null_skcipher();
-}
-
#endif
diff --git a/include/crypto/poly1305.h b/include/crypto/poly1305.h
index c65567d01e8e..f718a19da82f 100644
--- a/include/crypto/poly1305.h
+++ b/include/crypto/poly1305.h
@@ -31,8 +31,6 @@ struct poly1305_desc_ctx {
};
int crypto_poly1305_init(struct shash_desc *desc);
-int crypto_poly1305_setkey(struct crypto_shash *tfm,
- const u8 *key, unsigned int keylen);
unsigned int crypto_poly1305_setdesckey(struct poly1305_desc_ctx *dctx,
const u8 *src, unsigned int srclen);
int crypto_poly1305_update(struct shash_desc *desc,
diff --git a/include/crypto/salsa20.h b/include/crypto/salsa20.h
new file mode 100644
index 000000000000..19ed48aefc86
--- /dev/null
+++ b/include/crypto/salsa20.h
@@ -0,0 +1,27 @@
+/* SPDX-License-Identifier: GPL-2.0 */
+/*
+ * Common values for the Salsa20 algorithm
+ */
+
+#ifndef _CRYPTO_SALSA20_H
+#define _CRYPTO_SALSA20_H
+
+#include <linux/types.h>
+
+#define SALSA20_IV_SIZE 8
+#define SALSA20_MIN_KEY_SIZE 16
+#define SALSA20_MAX_KEY_SIZE 32
+#define SALSA20_BLOCK_SIZE 64
+
+struct crypto_skcipher;
+
+struct salsa20_ctx {
+ u32 initial_state[16];
+};
+
+void crypto_salsa20_init(u32 *state, const struct salsa20_ctx *ctx,
+ const u8 *iv);
+int crypto_salsa20_setkey(struct crypto_skcipher *tfm, const u8 *key,
+ unsigned int keysize);
+
+#endif /* _CRYPTO_SALSA20_H */
diff --git a/include/crypto/sha3.h b/include/crypto/sha3.h
index b9d9bd553b48..080f60c2e6b1 100644
--- a/include/crypto/sha3.h
+++ b/include/crypto/sha3.h
@@ -19,7 +19,6 @@
struct sha3_state {
u64 st[25];
- unsigned int md_len;
unsigned int rsiz;
unsigned int rsizw;
@@ -27,4 +26,9 @@ struct sha3_state {
u8 buf[SHA3_224_BLOCK_SIZE];
};
+int crypto_sha3_init(struct shash_desc *desc);
+int crypto_sha3_update(struct shash_desc *desc, const u8 *data,
+ unsigned int len);
+int crypto_sha3_final(struct shash_desc *desc, u8 *out);
+
#endif
diff --git a/include/crypto/skcipher.h b/include/crypto/skcipher.h
index 562001cb412b..2f327f090c3e 100644
--- a/include/crypto/skcipher.h
+++ b/include/crypto/skcipher.h
@@ -401,11 +401,6 @@ static inline int crypto_skcipher_setkey(struct crypto_skcipher *tfm,
return tfm->setkey(tfm, key, keylen);
}
-static inline bool crypto_skcipher_has_setkey(struct crypto_skcipher *tfm)
-{
- return tfm->keysize;
-}
-
static inline unsigned int crypto_skcipher_default_keysize(
struct crypto_skcipher *tfm)
{
@@ -442,6 +437,9 @@ static inline int crypto_skcipher_encrypt(struct skcipher_request *req)
{
struct crypto_skcipher *tfm = crypto_skcipher_reqtfm(req);
+ if (crypto_skcipher_get_flags(tfm) & CRYPTO_TFM_NEED_KEY)
+ return -ENOKEY;
+
return tfm->encrypt(req);
}
@@ -460,6 +458,9 @@ static inline int crypto_skcipher_decrypt(struct skcipher_request *req)
{
struct crypto_skcipher *tfm = crypto_skcipher_reqtfm(req);
+ if (crypto_skcipher_get_flags(tfm) & CRYPTO_TFM_NEED_KEY)
+ return -ENOKEY;
+
return tfm->decrypt(req);
}
diff --git a/include/crypto/sm4.h b/include/crypto/sm4.h
new file mode 100644
index 000000000000..b64e64d20b28
--- /dev/null
+++ b/include/crypto/sm4.h
@@ -0,0 +1,28 @@
+/* SPDX-License-Identifier: GPL-2.0 */
+
+/*
+ * Common values for the SM4 algorithm
+ * Copyright (C) 2018 ARM Limited or its affiliates.
+ */
+
+#ifndef _CRYPTO_SM4_H
+#define _CRYPTO_SM4_H
+
+#include <linux/types.h>
+#include <linux/crypto.h>
+
+#define SM4_KEY_SIZE 16
+#define SM4_BLOCK_SIZE 16
+#define SM4_RKEY_WORDS 32
+
+struct crypto_sm4_ctx {
+ u32 rkey_enc[SM4_RKEY_WORDS];
+ u32 rkey_dec[SM4_RKEY_WORDS];
+};
+
+int crypto_sm4_set_key(struct crypto_tfm *tfm, const u8 *in_key,
+ unsigned int key_len);
+int crypto_sm4_expand_key(struct crypto_sm4_ctx *ctx, const u8 *in_key,
+ unsigned int key_len);
+
+#endif
diff --git a/include/crypto/speck.h b/include/crypto/speck.h
new file mode 100644
index 000000000000..73cfc952d405
--- /dev/null
+++ b/include/crypto/speck.h
@@ -0,0 +1,62 @@
+// SPDX-License-Identifier: GPL-2.0
+/*
+ * Common values for the Speck algorithm
+ */
+
+#ifndef _CRYPTO_SPECK_H
+#define _CRYPTO_SPECK_H
+
+#include <linux/types.h>
+
+/* Speck128 */
+
+#define SPECK128_BLOCK_SIZE 16
+
+#define SPECK128_128_KEY_SIZE 16
+#define SPECK128_128_NROUNDS 32
+
+#define SPECK128_192_KEY_SIZE 24
+#define SPECK128_192_NROUNDS 33
+
+#define SPECK128_256_KEY_SIZE 32
+#define SPECK128_256_NROUNDS 34
+
+struct speck128_tfm_ctx {
+ u64 round_keys[SPECK128_256_NROUNDS];
+ int nrounds;
+};
+
+void crypto_speck128_encrypt(const struct speck128_tfm_ctx *ctx,
+ u8 *out, const u8 *in);
+
+void crypto_speck128_decrypt(const struct speck128_tfm_ctx *ctx,
+ u8 *out, const u8 *in);
+
+int crypto_speck128_setkey(struct speck128_tfm_ctx *ctx, const u8 *key,
+ unsigned int keysize);
+
+/* Speck64 */
+
+#define SPECK64_BLOCK_SIZE 8
+
+#define SPECK64_96_KEY_SIZE 12
+#define SPECK64_96_NROUNDS 26
+
+#define SPECK64_128_KEY_SIZE 16
+#define SPECK64_128_NROUNDS 27
+
+struct speck64_tfm_ctx {
+ u32 round_keys[SPECK64_128_NROUNDS];
+ int nrounds;
+};
+
+void crypto_speck64_encrypt(const struct speck64_tfm_ctx *ctx,
+ u8 *out, const u8 *in);
+
+void crypto_speck64_decrypt(const struct speck64_tfm_ctx *ctx,
+ u8 *out, const u8 *in);
+
+int crypto_speck64_setkey(struct speck64_tfm_ctx *ctx, const u8 *key,
+ unsigned int keysize);
+
+#endif /* _CRYPTO_SPECK_H */
diff --git a/include/crypto/xts.h b/include/crypto/xts.h
index 322aab6e78a7..34d94c95445a 100644
--- a/include/crypto/xts.h
+++ b/include/crypto/xts.h
@@ -6,27 +6,10 @@
#include <crypto/internal/skcipher.h>
#include <linux/fips.h>
-struct scatterlist;
-struct blkcipher_desc;
-
#define XTS_BLOCK_SIZE 16
-struct xts_crypt_req {
- le128 *tbuf;
- unsigned int tbuflen;
-
- void *tweak_ctx;
- void (*tweak_fn)(void *ctx, u8* dst, const u8* src);
- void *crypt_ctx;
- void (*crypt_fn)(void *ctx, u8 *blks, unsigned int nbytes);
-};
-
#define XTS_TWEAK_CAST(x) ((void (*)(void *, u8*, const u8*))(x))
-int xts_crypt(struct blkcipher_desc *desc, struct scatterlist *dst,
- struct scatterlist *src, unsigned int nbytes,
- struct xts_crypt_req *req);
-
static inline int xts_check_key(struct crypto_tfm *tfm,
const u8 *key, unsigned int keylen)
{
diff --git a/include/drm/amd_asic_type.h b/include/drm/amd_asic_type.h
index 599028f66585..6c731c52c071 100644
--- a/include/drm/amd_asic_type.h
+++ b/include/drm/amd_asic_type.h
@@ -45,6 +45,7 @@ enum amd_asic_type {
CHIP_POLARIS11,
CHIP_POLARIS12,
CHIP_VEGA10,
+ CHIP_VEGA12,
CHIP_RAVEN,
CHIP_LAST,
};
diff --git a/include/drm/bridge/analogix_dp.h b/include/drm/bridge/analogix_dp.h
index c99d6eaef1ac..e9a1116d2f8e 100644
--- a/include/drm/bridge/analogix_dp.h
+++ b/include/drm/bridge/analogix_dp.h
@@ -13,6 +13,8 @@
#include <drm/drm_crtc.h>
+struct analogix_dp_device;
+
enum analogix_dp_devtype {
EXYNOS_DP,
RK3288_DP,
@@ -29,6 +31,7 @@ struct analogix_dp_plat_data {
struct drm_panel *panel;
struct drm_encoder *encoder;
struct drm_connector *connector;
+ bool skip_connector;
int (*power_on)(struct analogix_dp_plat_data *);
int (*power_off)(struct analogix_dp_plat_data *);
@@ -38,16 +41,17 @@ struct analogix_dp_plat_data {
struct drm_connector *);
};
-int analogix_dp_psr_supported(struct device *dev);
-int analogix_dp_enable_psr(struct device *dev);
-int analogix_dp_disable_psr(struct device *dev);
+int analogix_dp_psr_enabled(struct analogix_dp_device *dp);
+int analogix_dp_enable_psr(struct analogix_dp_device *dp);
+int analogix_dp_disable_psr(struct analogix_dp_device *dp);
-int analogix_dp_resume(struct device *dev);
-int analogix_dp_suspend(struct device *dev);
+int analogix_dp_resume(struct analogix_dp_device *dp);
+int analogix_dp_suspend(struct analogix_dp_device *dp);
-int analogix_dp_bind(struct device *dev, struct drm_device *drm_dev,
- struct analogix_dp_plat_data *plat_data);
-void analogix_dp_unbind(struct device *dev, struct device *master, void *data);
+struct analogix_dp_device *
+analogix_dp_bind(struct device *dev, struct drm_device *drm_dev,
+ struct analogix_dp_plat_data *plat_data);
+void analogix_dp_unbind(struct analogix_dp_device *dp);
int analogix_dp_start_crc(struct drm_connector *connector);
int analogix_dp_stop_crc(struct drm_connector *connector);
diff --git a/include/drm/bridge/dw_hdmi.h b/include/drm/bridge/dw_hdmi.h
index 182f83283e24..dd2a8cf7d20b 100644
--- a/include/drm/bridge/dw_hdmi.h
+++ b/include/drm/bridge/dw_hdmi.h
@@ -143,12 +143,13 @@ struct dw_hdmi_plat_data {
unsigned long mpixelclock);
};
-int dw_hdmi_probe(struct platform_device *pdev,
- const struct dw_hdmi_plat_data *plat_data);
-void dw_hdmi_remove(struct platform_device *pdev);
-void dw_hdmi_unbind(struct device *dev);
-int dw_hdmi_bind(struct platform_device *pdev, struct drm_encoder *encoder,
- const struct dw_hdmi_plat_data *plat_data);
+struct dw_hdmi *dw_hdmi_probe(struct platform_device *pdev,
+ const struct dw_hdmi_plat_data *plat_data);
+void dw_hdmi_remove(struct dw_hdmi *hdmi);
+void dw_hdmi_unbind(struct dw_hdmi *hdmi);
+struct dw_hdmi *dw_hdmi_bind(struct platform_device *pdev,
+ struct drm_encoder *encoder,
+ const struct dw_hdmi_plat_data *plat_data);
void dw_hdmi_setup_rx_sense(struct device *dev, bool hpd, bool rx_sense);
@@ -157,7 +158,18 @@ void dw_hdmi_audio_enable(struct dw_hdmi *hdmi);
void dw_hdmi_audio_disable(struct dw_hdmi *hdmi);
/* PHY configuration */
+void dw_hdmi_phy_i2c_set_addr(struct dw_hdmi *hdmi, u8 address);
void dw_hdmi_phy_i2c_write(struct dw_hdmi *hdmi, unsigned short data,
unsigned char addr);
+void dw_hdmi_phy_gen2_pddq(struct dw_hdmi *hdmi, u8 enable);
+void dw_hdmi_phy_gen2_txpwron(struct dw_hdmi *hdmi, u8 enable);
+void dw_hdmi_phy_reset(struct dw_hdmi *hdmi);
+
+enum drm_connector_status dw_hdmi_phy_read_hpd(struct dw_hdmi *hdmi,
+ void *data);
+void dw_hdmi_phy_update_hpd(struct dw_hdmi *hdmi, void *data,
+ bool force, bool disabled, bool rxsense);
+void dw_hdmi_phy_setup_hpd(struct dw_hdmi *hdmi, void *data);
+
#endif /* __IMX_HDMI_H__ */
diff --git a/include/drm/bridge/dw_mipi_dsi.h b/include/drm/bridge/dw_mipi_dsi.h
index 9b30fec302c8..d9c6d549f971 100644
--- a/include/drm/bridge/dw_mipi_dsi.h
+++ b/include/drm/bridge/dw_mipi_dsi.h
@@ -10,6 +10,8 @@
#ifndef __DW_MIPI_DSI__
#define __DW_MIPI_DSI__
+struct dw_mipi_dsi;
+
struct dw_mipi_dsi_phy_ops {
int (*init)(void *priv_data);
int (*get_lane_mbps)(void *priv_data, struct drm_display_mode *mode,
@@ -29,11 +31,14 @@ struct dw_mipi_dsi_plat_data {
void *priv_data;
};
-int dw_mipi_dsi_probe(struct platform_device *pdev,
- const struct dw_mipi_dsi_plat_data *plat_data);
-void dw_mipi_dsi_remove(struct platform_device *pdev);
-int dw_mipi_dsi_bind(struct platform_device *pdev, struct drm_encoder *encoder,
- const struct dw_mipi_dsi_plat_data *plat_data);
-void dw_mipi_dsi_unbind(struct device *dev);
+struct dw_mipi_dsi *dw_mipi_dsi_probe(struct platform_device *pdev,
+ const struct dw_mipi_dsi_plat_data
+ *plat_data);
+void dw_mipi_dsi_remove(struct dw_mipi_dsi *dsi);
+struct dw_mipi_dsi *dw_mipi_dsi_bind(struct platform_device *pdev,
+ struct drm_encoder *encoder,
+ const struct dw_mipi_dsi_plat_data
+ *plat_data);
+void dw_mipi_dsi_unbind(struct dw_mipi_dsi *dsi);
#endif /* __DW_MIPI_DSI__ */
diff --git a/include/drm/drmP.h b/include/drm/drmP.h
index 59be1232d005..c6666cd09347 100644
--- a/include/drm/drmP.h
+++ b/include/drm/drmP.h
@@ -75,6 +75,7 @@
#include <drm/drm_sarea.h>
#include <drm/drm_drv.h>
#include <drm/drm_prime.h>
+#include <drm/drm_print.h>
#include <drm/drm_pci.h>
#include <drm/drm_file.h>
#include <drm/drm_debugfs.h>
@@ -94,212 +95,16 @@ struct dma_buf_attachment;
struct pci_dev;
struct pci_controller;
-/*
- * The following categories are defined:
- *
- * CORE: Used in the generic drm code: drm_ioctl.c, drm_mm.c, drm_memory.c, ...
- * This is the category used by the DRM_DEBUG() macro.
- *
- * DRIVER: Used in the vendor specific part of the driver: i915, radeon, ...
- * This is the category used by the DRM_DEBUG_DRIVER() macro.
- *
- * KMS: used in the modesetting code.
- * This is the category used by the DRM_DEBUG_KMS() macro.
- *
- * PRIME: used in the prime code.
- * This is the category used by the DRM_DEBUG_PRIME() macro.
- *
- * ATOMIC: used in the atomic code.
- * This is the category used by the DRM_DEBUG_ATOMIC() macro.
- *
- * VBL: used for verbose debug message in the vblank code
- * This is the category used by the DRM_DEBUG_VBL() macro.
- *
- * Enabling verbose debug messages is done through the drm.debug parameter,
- * each category being enabled by a bit.
- *
- * drm.debug=0x1 will enable CORE messages
- * drm.debug=0x2 will enable DRIVER messages
- * drm.debug=0x3 will enable CORE and DRIVER messages
- * ...
- * drm.debug=0x3f will enable all messages
- *
- * An interesting feature is that it's possible to enable verbose logging at
- * run-time by echoing the debug value in its sysfs node:
- * # echo 0xf > /sys/module/drm/parameters/debug
- */
-#define DRM_UT_NONE 0x00
-#define DRM_UT_CORE 0x01
-#define DRM_UT_DRIVER 0x02
-#define DRM_UT_KMS 0x04
-#define DRM_UT_PRIME 0x08
-#define DRM_UT_ATOMIC 0x10
-#define DRM_UT_VBL 0x20
-#define DRM_UT_STATE 0x40
-#define DRM_UT_LEASE 0x80
-
/***********************************************************************/
/** \name DRM template customization defaults */
/*@{*/
/***********************************************************************/
-/** \name Macros to make printk easier */
-/*@{*/
-
-#define _DRM_PRINTK(once, level, fmt, ...) \
- do { \
- printk##once(KERN_##level "[" DRM_NAME "] " fmt, \
- ##__VA_ARGS__); \
- } while (0)
-
-#define DRM_INFO(fmt, ...) \
- _DRM_PRINTK(, INFO, fmt, ##__VA_ARGS__)
-#define DRM_NOTE(fmt, ...) \
- _DRM_PRINTK(, NOTICE, fmt, ##__VA_ARGS__)
-#define DRM_WARN(fmt, ...) \
- _DRM_PRINTK(, WARNING, fmt, ##__VA_ARGS__)
-
-#define DRM_INFO_ONCE(fmt, ...) \
- _DRM_PRINTK(_once, INFO, fmt, ##__VA_ARGS__)
-#define DRM_NOTE_ONCE(fmt, ...) \
- _DRM_PRINTK(_once, NOTICE, fmt, ##__VA_ARGS__)
-#define DRM_WARN_ONCE(fmt, ...) \
- _DRM_PRINTK(_once, WARNING, fmt, ##__VA_ARGS__)
-
-/**
- * Error output.
- *
- * \param fmt printf() like format string.
- * \param arg arguments
- */
-#define DRM_DEV_ERROR(dev, fmt, ...) \
- drm_dev_printk(dev, KERN_ERR, DRM_UT_NONE, __func__, " *ERROR*",\
- fmt, ##__VA_ARGS__)
-#define DRM_ERROR(fmt, ...) \
- drm_printk(KERN_ERR, DRM_UT_NONE, fmt, ##__VA_ARGS__)
-
-/**
- * Rate limited error output. Like DRM_ERROR() but won't flood the log.
- *
- * \param fmt printf() like format string.
- * \param arg arguments
- */
-#define DRM_DEV_ERROR_RATELIMITED(dev, fmt, ...) \
-({ \
- static DEFINE_RATELIMIT_STATE(_rs, \
- DEFAULT_RATELIMIT_INTERVAL, \
- DEFAULT_RATELIMIT_BURST); \
- \
- if (__ratelimit(&_rs)) \
- DRM_DEV_ERROR(dev, fmt, ##__VA_ARGS__); \
-})
-#define DRM_ERROR_RATELIMITED(fmt, ...) \
- DRM_DEV_ERROR_RATELIMITED(NULL, fmt, ##__VA_ARGS__)
-
-#define DRM_DEV_INFO(dev, fmt, ...) \
- drm_dev_printk(dev, KERN_INFO, DRM_UT_NONE, __func__, "", fmt, \
- ##__VA_ARGS__)
-
-#define DRM_DEV_INFO_ONCE(dev, fmt, ...) \
-({ \
- static bool __print_once __read_mostly; \
- if (!__print_once) { \
- __print_once = true; \
- DRM_DEV_INFO(dev, fmt, ##__VA_ARGS__); \
- } \
-})
-
-/**
- * Debug output.
- *
- * \param fmt printf() like format string.
- * \param arg arguments
- */
-#define DRM_DEV_DEBUG(dev, fmt, args...) \
- drm_dev_printk(dev, KERN_DEBUG, DRM_UT_CORE, __func__, "", fmt, \
- ##args)
-#define DRM_DEBUG(fmt, ...) \
- drm_printk(KERN_DEBUG, DRM_UT_CORE, fmt, ##__VA_ARGS__)
-
-#define DRM_DEV_DEBUG_DRIVER(dev, fmt, args...) \
- drm_dev_printk(dev, KERN_DEBUG, DRM_UT_DRIVER, __func__, "", \
- fmt, ##args)
-#define DRM_DEBUG_DRIVER(fmt, ...) \
- drm_printk(KERN_DEBUG, DRM_UT_DRIVER, fmt, ##__VA_ARGS__)
-
-#define DRM_DEV_DEBUG_KMS(dev, fmt, args...) \
- drm_dev_printk(dev, KERN_DEBUG, DRM_UT_KMS, __func__, "", fmt, \
- ##args)
-#define DRM_DEBUG_KMS(fmt, ...) \
- drm_printk(KERN_DEBUG, DRM_UT_KMS, fmt, ##__VA_ARGS__)
-
-#define DRM_DEV_DEBUG_PRIME(dev, fmt, args...) \
- drm_dev_printk(dev, KERN_DEBUG, DRM_UT_PRIME, __func__, "", \
- fmt, ##args)
-#define DRM_DEBUG_PRIME(fmt, ...) \
- drm_printk(KERN_DEBUG, DRM_UT_PRIME, fmt, ##__VA_ARGS__)
-
-#define DRM_DEV_DEBUG_ATOMIC(dev, fmt, args...) \
- drm_dev_printk(dev, KERN_DEBUG, DRM_UT_ATOMIC, __func__, "", \
- fmt, ##args)
-#define DRM_DEBUG_ATOMIC(fmt, ...) \
- drm_printk(KERN_DEBUG, DRM_UT_ATOMIC, fmt, ##__VA_ARGS__)
-
-#define DRM_DEV_DEBUG_VBL(dev, fmt, args...) \
- drm_dev_printk(dev, KERN_DEBUG, DRM_UT_VBL, __func__, "", fmt, \
- ##args)
-#define DRM_DEBUG_VBL(fmt, ...) \
- drm_printk(KERN_DEBUG, DRM_UT_VBL, fmt, ##__VA_ARGS__)
-
-#define DRM_DEBUG_LEASE(fmt, ...) \
- drm_printk(KERN_DEBUG, DRM_UT_LEASE, fmt, ##__VA_ARGS__)
-
-#define _DRM_DEV_DEFINE_DEBUG_RATELIMITED(dev, level, fmt, args...) \
-({ \
- static DEFINE_RATELIMIT_STATE(_rs, \
- DEFAULT_RATELIMIT_INTERVAL, \
- DEFAULT_RATELIMIT_BURST); \
- if (__ratelimit(&_rs)) \
- drm_dev_printk(dev, KERN_DEBUG, DRM_UT_ ## level, \
- __func__, "", fmt, ##args); \
-})
-
-/**
- * Rate limited debug output. Like DRM_DEBUG() but won't flood the log.
- *
- * \param fmt printf() like format string.
- * \param arg arguments
- */
-#define DRM_DEV_DEBUG_RATELIMITED(dev, fmt, args...) \
- DEV__DRM_DEFINE_DEBUG_RATELIMITED(dev, CORE, fmt, ##args)
-#define DRM_DEBUG_RATELIMITED(fmt, args...) \
- DRM_DEV_DEBUG_RATELIMITED(NULL, fmt, ##args)
-#define DRM_DEV_DEBUG_DRIVER_RATELIMITED(dev, fmt, args...) \
- _DRM_DEV_DEFINE_DEBUG_RATELIMITED(dev, DRIVER, fmt, ##args)
-#define DRM_DEBUG_DRIVER_RATELIMITED(fmt, args...) \
- DRM_DEV_DEBUG_DRIVER_RATELIMITED(NULL, fmt, ##args)
-#define DRM_DEV_DEBUG_KMS_RATELIMITED(dev, fmt, args...) \
- _DRM_DEV_DEFINE_DEBUG_RATELIMITED(dev, KMS, fmt, ##args)
-#define DRM_DEBUG_KMS_RATELIMITED(fmt, args...) \
- DRM_DEV_DEBUG_KMS_RATELIMITED(NULL, fmt, ##args)
-#define DRM_DEV_DEBUG_PRIME_RATELIMITED(dev, fmt, args...) \
- _DRM_DEV_DEFINE_DEBUG_RATELIMITED(dev, PRIME, fmt, ##args)
-#define DRM_DEBUG_PRIME_RATELIMITED(fmt, args...) \
- DRM_DEV_DEBUG_PRIME_RATELIMITED(NULL, fmt, ##args)
-
-/* Format strings and argument splitters to simplify printing
- * various "complex" objects
- */
-
-/*@}*/
-
-/***********************************************************************/
/** \name Internal types and structures */
/*@{*/
#define DRM_IF_VERSION(maj, min) (maj << 16 | min)
-
/**
* drm_drv_uses_atomic_modeset - check if the driver implements
* atomic_commit()
diff --git a/include/drm/drm_atomic.h b/include/drm/drm_atomic.h
index 5afd6e364fb6..a57a8aa90ffb 100644
--- a/include/drm/drm_atomic.h
+++ b/include/drm/drm_atomic.h
@@ -134,6 +134,15 @@ struct drm_crtc_commit {
* &drm_pending_vblank_event pointer to clean up private events.
*/
struct drm_pending_vblank_event *event;
+
+ /**
+ * @abort_completion:
+ *
+ * A flag that's set after drm_atomic_helper_setup_commit takes a second
+ * reference for the completion of $drm_crtc_state.event. It's used by
+ * the free code to remove the second reference if commit fails.
+ */
+ bool abort_completion;
};
struct __drm_planes_state {
@@ -145,7 +154,7 @@ struct __drm_crtcs_state {
struct drm_crtc *ptr;
struct drm_crtc_state *state, *old_state, *new_state;
s32 __user *out_fence_ptr;
- unsigned last_vblank_count;
+ u64 last_vblank_count;
};
struct __drm_connnectors_state {
@@ -189,12 +198,40 @@ struct drm_private_state_funcs {
struct drm_private_state *state);
};
+/**
+ * struct drm_private_obj - base struct for driver private atomic object
+ *
+ * A driver private object is initialized by calling
+ * drm_atomic_private_obj_init() and cleaned up by calling
+ * drm_atomic_private_obj_fini().
+ *
+ * Currently only tracks the state update functions and the opaque driver
+ * private state itself, but in the future might also track which
+ * &drm_modeset_lock is required to duplicate and update this object's state.
+ */
struct drm_private_obj {
+ /**
+ * @state: Current atomic state for this driver private object.
+ */
struct drm_private_state *state;
+ /**
+ * @funcs:
+ *
+ * Functions to manipulate the state of this driver private object, see
+ * &drm_private_state_funcs.
+ */
const struct drm_private_state_funcs *funcs;
};
+/**
+ * struct drm_private_state - base struct for driver private object state
+ * @state: backpointer to global drm_atomic_state
+ *
+ * Currently only contains a backpointer to the overall atomic update, but in
+ * the future also might hold synchronization information similar to e.g.
+ * &drm_crtc.commit.
+ */
struct drm_private_state {
struct drm_atomic_state *state;
};
@@ -218,6 +255,10 @@ struct __drm_private_objs_state {
* @num_private_objs: size of the @private_objs array
* @private_objs: pointer to array of private object pointers
* @acquire_ctx: acquire context for this atomic modeset state update
+ *
+ * States are added to an atomic update by calling drm_atomic_get_crtc_state(),
+ * drm_atomic_get_plane_state(), drm_atomic_get_connector_state(), or for
+ * private state structures, drm_atomic_get_private_obj_state().
*/
struct drm_atomic_state {
struct kref ref;
@@ -713,6 +754,28 @@ void drm_state_dump(struct drm_device *dev, struct drm_printer *p);
(new_plane_state) = (__state)->planes[__i].new_state, 1))
/**
+ * for_each_oldnew_plane_in_state_reverse - iterate over all planes in an atomic
+ * update in reverse order
+ * @__state: &struct drm_atomic_state pointer
+ * @plane: &struct drm_plane iteration cursor
+ * @old_plane_state: &struct drm_plane_state iteration cursor for the old state
+ * @new_plane_state: &struct drm_plane_state iteration cursor for the new state
+ * @__i: int iteration cursor, for macro-internal use
+ *
+ * This iterates over all planes in an atomic update in reverse order,
+ * tracking both old and new state. This is useful in places where the
+ * state delta needs to be considered, for example in atomic check functions.
+ */
+#define for_each_oldnew_plane_in_state_reverse(__state, plane, old_plane_state, new_plane_state, __i) \
+ for ((__i) = ((__state)->dev->mode_config.num_total_plane - 1); \
+ (__i) >= 0; \
+ (__i)--) \
+ for_each_if ((__state)->planes[__i].ptr && \
+ ((plane) = (__state)->planes[__i].ptr, \
+ (old_plane_state) = (__state)->planes[__i].old_state,\
+ (new_plane_state) = (__state)->planes[__i].new_state, 1))
+
+/**
* for_each_old_plane_in_state - iterate over all planes in an atomic update
* @__state: &struct drm_atomic_state pointer
* @plane: &struct drm_plane iteration cursor
diff --git a/include/drm/drm_atomic_helper.h b/include/drm/drm_atomic_helper.h
index d2b56cc657e9..26aaba58d6ce 100644
--- a/include/drm/drm_atomic_helper.h
+++ b/include/drm/drm_atomic_helper.h
@@ -38,6 +38,12 @@ struct drm_private_state;
int drm_atomic_helper_check_modeset(struct drm_device *dev,
struct drm_atomic_state *state);
+int drm_atomic_helper_check_plane_state(struct drm_plane_state *plane_state,
+ const struct drm_crtc_state *crtc_state,
+ int min_scale,
+ int max_scale,
+ bool can_position,
+ bool can_update_disabled);
int drm_atomic_helper_check_planes(struct drm_device *dev,
struct drm_atomic_state *state);
int drm_atomic_helper_check(struct drm_device *dev,
diff --git a/include/drm/drm_bridge.h b/include/drm/drm_bridge.h
index 682d01ba920c..3270fec46979 100644
--- a/include/drm/drm_bridge.h
+++ b/include/drm/drm_bridge.h
@@ -29,6 +29,7 @@
#include <drm/drm_modes.h>
struct drm_bridge;
+struct drm_bridge_timings;
struct drm_panel;
/**
@@ -90,7 +91,7 @@ struct drm_bridge_funcs {
*
* drm_mode_status Enum
*/
- enum drm_mode_status (*mode_valid)(struct drm_bridge *crtc,
+ enum drm_mode_status (*mode_valid)(struct drm_bridge *bridge,
const struct drm_display_mode *mode);
/**
@@ -223,12 +224,43 @@ struct drm_bridge_funcs {
};
/**
+ * struct drm_bridge_timings - timing information for the bridge
+ */
+struct drm_bridge_timings {
+ /**
+ * @sampling_edge:
+ *
+ * Tells whether the bridge samples the digital input signal
+ * from the display engine on the positive or negative edge of the
+ * clock, this should reuse the DRM_BUS_FLAG_PIXDATA_[POS|NEG]EDGE
+ * bitwise flags from the DRM connector (bit 2 and 3 valid).
+ */
+ u32 sampling_edge;
+ /**
+ * @setup_time_ps:
+ *
+ * Defines the time in picoseconds the input data lines must be
+ * stable before the clock edge.
+ */
+ u32 setup_time_ps;
+ /**
+ * @hold_time_ps:
+ *
+ * Defines the time in picoseconds taken for the bridge to sample the
+ * input signal after the clock edge.
+ */
+ u32 hold_time_ps;
+};
+
+/**
* struct drm_bridge - central DRM bridge control structure
* @dev: DRM device this bridge belongs to
* @encoder: encoder to which this bridge is connected
* @next: the next bridge in the encoder chain
* @of_node: device node pointer to the bridge
* @list: to keep track of all added bridges
+ * @timings: the timing specification for the bridge, if any (may
+ * be NULL)
* @funcs: control functions
* @driver_private: pointer to the bridge driver's internal context
*/
@@ -240,6 +272,7 @@ struct drm_bridge {
struct device_node *of_node;
#endif
struct list_head list;
+ const struct drm_bridge_timings *timings;
const struct drm_bridge_funcs *funcs;
void *driver_private;
diff --git a/include/drm/drm_cache.h b/include/drm/drm_cache.h
index beab0f0d0cfb..bfe1639df02d 100644
--- a/include/drm/drm_cache.h
+++ b/include/drm/drm_cache.h
@@ -38,6 +38,8 @@
void drm_clflush_pages(struct page *pages[], unsigned long num_pages);
void drm_clflush_sg(struct sg_table *st);
void drm_clflush_virt_range(void *addr, unsigned long length);
+u64 drm_get_max_iomem(void);
+
static inline bool drm_arch_can_wc_memory(void)
{
diff --git a/include/drm/drm_color_mgmt.h b/include/drm/drm_color_mgmt.h
index 03a59cbce621..44f04233e3db 100644
--- a/include/drm/drm_color_mgmt.h
+++ b/include/drm/drm_color_mgmt.h
@@ -26,6 +26,7 @@
#include <linux/ctype.h>
struct drm_crtc;
+struct drm_plane;
uint32_t drm_color_lut_extract(uint32_t user_input, uint32_t bit_precision);
@@ -37,4 +38,34 @@ void drm_crtc_enable_color_mgmt(struct drm_crtc *crtc,
int drm_mode_crtc_set_gamma_size(struct drm_crtc *crtc,
int gamma_size);
+/**
+ * drm_color_lut_size - calculate the number of entries in the LUT
+ * @blob: blob containing the LUT
+ *
+ * Returns:
+ * The number of entries in the color LUT stored in @blob.
+ */
+static inline int drm_color_lut_size(const struct drm_property_blob *blob)
+{
+ return blob->length / sizeof(struct drm_color_lut);
+}
+
+enum drm_color_encoding {
+ DRM_COLOR_YCBCR_BT601,
+ DRM_COLOR_YCBCR_BT709,
+ DRM_COLOR_YCBCR_BT2020,
+ DRM_COLOR_ENCODING_MAX,
+};
+
+enum drm_color_range {
+ DRM_COLOR_YCBCR_LIMITED_RANGE,
+ DRM_COLOR_YCBCR_FULL_RANGE,
+ DRM_COLOR_RANGE_MAX,
+};
+
+int drm_plane_create_color_properties(struct drm_plane *plane,
+ u32 supported_encodings,
+ u32 supported_ranges,
+ enum drm_color_encoding default_encoding,
+ enum drm_color_range default_range);
#endif
diff --git a/include/drm/drm_connector.h b/include/drm/drm_connector.h
index 5971577016a2..675cc3f8cf85 100644
--- a/include/drm/drm_connector.h
+++ b/include/drm/drm_connector.h
@@ -177,6 +177,35 @@ enum drm_link_status {
};
/**
+ * enum drm_panel_orientation - panel_orientation info for &drm_display_info
+ *
+ * This enum is used to track the (LCD) panel orientation. There are no
+ * separate #defines for the uapi!
+ *
+ * @DRM_MODE_PANEL_ORIENTATION_UNKNOWN: The drm driver has not provided any
+ * panel orientation information (normal
+ * for non panels) in this case the "panel
+ * orientation" connector prop will not be
+ * attached.
+ * @DRM_MODE_PANEL_ORIENTATION_NORMAL: The top side of the panel matches the
+ * top side of the device's casing.
+ * @DRM_MODE_PANEL_ORIENTATION_BOTTOM_UP: The top side of the panel matches the
+ * bottom side of the device's casing, iow
+ * the panel is mounted upside-down.
+ * @DRM_MODE_PANEL_ORIENTATION_LEFT_UP: The left side of the panel matches the
+ * top side of the device's casing.
+ * @DRM_MODE_PANEL_ORIENTATION_RIGHT_UP: The right side of the panel matches the
+ * top side of the device's casing.
+ */
+enum drm_panel_orientation {
+ DRM_MODE_PANEL_ORIENTATION_UNKNOWN = -1,
+ DRM_MODE_PANEL_ORIENTATION_NORMAL = 0,
+ DRM_MODE_PANEL_ORIENTATION_BOTTOM_UP,
+ DRM_MODE_PANEL_ORIENTATION_LEFT_UP,
+ DRM_MODE_PANEL_ORIENTATION_RIGHT_UP,
+};
+
+/**
* struct drm_display_info - runtime data about the connected sink
*
* Describes a given display (e.g. CRT or flat panel) and its limitations. For
@@ -224,6 +253,15 @@ struct drm_display_info {
#define DRM_COLOR_FORMAT_YCRCB420 (1<<3)
/**
+ * @panel_orientation: Read only connector property for built-in panels,
+ * indicating the orientation of the panel vs the device's casing.
+ * drm_connector_init() sets this to DRM_MODE_PANEL_ORIENTATION_UNKNOWN.
+ * When not UNKNOWN this gets used by the drm_fb_helpers to rotate the
+ * fb to compensate and gets exported as prop to userspace.
+ */
+ int panel_orientation;
+
+ /**
* @color_formats: HDMI Color formats, selects between RGB and YCrCb
* modes. Used DRM_COLOR_FORMAT\_ defines, which are _not_ the same ones
* as used to describe the pixel format in framebuffers, and also don't
@@ -271,6 +309,11 @@ struct drm_display_info {
bool dvi_dual;
/**
+ * @has_hdmi_infoframe: Does the sink support the HDMI infoframe?
+ */
+ bool has_hdmi_infoframe;
+
+ /**
* @edid_hdmi_dc_modes: Mask of supported hdmi deep color modes. Even
* more stuff redundant with @bus_formats.
*/
@@ -299,7 +342,11 @@ int drm_display_info_set_bus_formats(struct drm_display_info *info,
/**
* struct drm_tv_connector_state - TV connector related states
* @subconnector: selected subconnector
- * @margins: left/right/top/bottom margins
+ * @margins: margins
+ * @margins.left: left margin
+ * @margins.right: right margin
+ * @margins.top: top margin
+ * @margins.bottom: bottom margin
* @mode: TV mode
* @brightness: brightness in percent
* @contrast: contrast in percent
@@ -376,6 +423,12 @@ struct drm_connector_state {
* upscaling, mostly used for built-in panels.
*/
unsigned int scaling_mode;
+
+ /**
+ * @content_protection: Connector property to request content
+ * protection. This is most commonly used for HDCP.
+ */
+ unsigned int content_protection;
};
/**
@@ -705,7 +758,6 @@ struct drm_cmdline_mode {
* @force: a DRM_FORCE_<foo> state for forced mode sets
* @override_edid: has the EDID been overwritten through debugfs for testing?
* @encoder_ids: valid encoders for this connector
- * @encoder: encoder driving this connector, if any
* @eld: EDID-like data, if present
* @latency_present: AV delay info from ELD, if found
* @video_latency: video latency info from ELD, if found
@@ -724,6 +776,7 @@ struct drm_cmdline_mode {
* @tile_h_size: horizontal size of this tile.
* @tile_v_size: vertical size of this tile.
* @scaling_mode_property: Optional atomic property to control the upscaling.
+ * @content_protection_property: Optional property to control content protection
*
* Each connector may be connected to one or more CRTCs, or may be clonable by
* another connector if they can share a CRTC. Each connector also has a specific
@@ -815,6 +868,12 @@ struct drm_connector {
struct drm_property *scaling_mode_property;
/**
+ * @content_protection_property: DRM ENUM property for content
+ * protection
+ */
+ struct drm_property *content_protection_property;
+
+ /**
* @path_blob_ptr:
*
* DRM blob property data for the DP MST path property.
@@ -875,7 +934,13 @@ struct drm_connector {
#define DRM_CONNECTOR_MAX_ENCODER 3
uint32_t encoder_ids[DRM_CONNECTOR_MAX_ENCODER];
- struct drm_encoder *encoder; /* currently active encoder */
+ /**
+ * @encoder: Currently bound encoder driving this connector, if any.
+ * Only really meaningful for non-atomic drivers. Atomic drivers should
+ * instead look at &drm_connector_state.best_encoder, and in case they
+ * need the CRTC driving this output, &drm_connector_state.crtc.
+ */
+ struct drm_encoder *encoder;
#define MAX_ELD_BYTES 128
/* EDID bits */
@@ -1017,6 +1082,7 @@ const char *drm_get_dvi_i_subconnector_name(int val);
const char *drm_get_dvi_i_select_name(int val);
const char *drm_get_tv_subconnector_name(int val);
const char *drm_get_tv_select_name(int val);
+const char *drm_get_content_protection_name(int val);
int drm_mode_create_dvi_i_properties(struct drm_device *dev);
int drm_mode_create_tv_properties(struct drm_device *dev,
@@ -1025,6 +1091,8 @@ int drm_mode_create_tv_properties(struct drm_device *dev,
int drm_mode_create_scaling_mode_property(struct drm_device *dev);
int drm_connector_attach_scaling_mode_property(struct drm_connector *connector,
u32 scaling_mode_mask);
+int drm_connector_attach_content_protection_property(
+ struct drm_connector *connector);
int drm_mode_create_aspect_ratio_property(struct drm_device *dev);
int drm_mode_create_suggested_offset_properties(struct drm_device *dev);
@@ -1035,6 +1103,8 @@ int drm_mode_connector_update_edid_property(struct drm_connector *connector,
const struct edid *edid);
void drm_mode_connector_set_link_status_property(struct drm_connector *connector,
uint64_t link_status);
+int drm_connector_init_panel_orientation_property(
+ struct drm_connector *connector, int width, int height);
/**
* struct drm_tile_group - Tile group metadata
diff --git a/include/drm/drm_crtc_helper.h b/include/drm/drm_crtc_helper.h
index 76e237bd989b..6914633037a5 100644
--- a/include/drm/drm_crtc_helper.h
+++ b/include/drm/drm_crtc_helper.h
@@ -77,5 +77,6 @@ void drm_kms_helper_hotplug_event(struct drm_device *dev);
void drm_kms_helper_poll_disable(struct drm_device *dev);
void drm_kms_helper_poll_enable(struct drm_device *dev);
+bool drm_kms_helper_is_poll_worker(void);
#endif
diff --git a/include/drm/drm_device.h b/include/drm/drm_device.h
index e21af87a2f3c..7c4fa32f3fc6 100644
--- a/include/drm/drm_device.h
+++ b/include/drm/drm_device.h
@@ -17,6 +17,7 @@ struct drm_vblank_crtc;
struct drm_sg_mem;
struct drm_local_map;
struct drm_vma_offset_manager;
+struct drm_fb_helper;
struct inode;
@@ -185,6 +186,14 @@ struct drm_device {
struct drm_vma_offset_manager *vma_offset_manager;
/*@} */
int switch_power_state;
+
+ /**
+ * @fb_helper:
+ *
+ * Pointer to the fbdev emulation structure.
+ * Set by drm_fb_helper_init() and cleared by drm_fb_helper_fini().
+ */
+ struct drm_fb_helper *fb_helper;
};
#endif
diff --git a/include/drm/drm_dp_helper.h b/include/drm/drm_dp_helper.h
index 2623a1255481..62903bae0221 100644
--- a/include/drm/drm_dp_helper.h
+++ b/include/drm/drm_dp_helper.h
@@ -75,6 +75,7 @@
#define DP_MAX_DOWNSPREAD 0x003
# define DP_MAX_DOWNSPREAD_0_5 (1 << 0)
# define DP_NO_AUX_HANDSHAKE_LINK_TRAINING (1 << 6)
+# define DP_TPS4_SUPPORTED (1 << 7)
#define DP_NORP 0x004
@@ -287,6 +288,7 @@
#define DP_PSR_SUPPORT 0x070 /* XXX 1.2? */
# define DP_PSR_IS_SUPPORTED 1
# define DP_PSR2_IS_SUPPORTED 2 /* eDP 1.4 */
+# define DP_PSR2_WITH_Y_COORD_IS_SUPPORTED 3 /* eDP 1.4a */
#define DP_PSR_CAPS 0x071 /* XXX 1.2? */
# define DP_PSR_NO_TRAIN_ON_EXIT 1
@@ -328,12 +330,20 @@
# define DP_DS_12BPC 2
# define DP_DS_16BPC 3
+/* DP Forward error Correction Registers */
+#define DP_FEC_CAPABILITY 0x090 /* 1.4 */
+# define DP_FEC_CAPABLE (1 << 0)
+# define DP_FEC_UNCORR_BLK_ERROR_COUNT_CAP (1 << 1)
+# define DP_FEC_CORR_BLK_ERROR_COUNT_CAP (1 << 2)
+# define DP_FEC_BIT_ERROR_COUNT_CAP (1 << 3)
+
/* link configuration */
#define DP_LINK_BW_SET 0x100
# define DP_LINK_RATE_TABLE 0x00 /* eDP 1.4 */
# define DP_LINK_BW_1_62 0x06
# define DP_LINK_BW_2_7 0x0a
# define DP_LINK_BW_5_4 0x14 /* 1.2 */
+# define DP_LINK_BW_8_1 0x1e /* 1.4 */
#define DP_LANE_COUNT_SET 0x101
# define DP_LANE_COUNT_MASK 0x0f
@@ -344,7 +354,9 @@
# define DP_TRAINING_PATTERN_1 1
# define DP_TRAINING_PATTERN_2 2
# define DP_TRAINING_PATTERN_3 3 /* 1.2 */
+# define DP_TRAINING_PATTERN_4 7 /* 1.4 */
# define DP_TRAINING_PATTERN_MASK 0x3
+# define DP_TRAINING_PATTERN_MASK_1_4 0xf
/* DPCD 1.1 only. For DPCD >= 1.2 see per-lane DP_LINK_QUAL_LANEn_SET */
# define DP_LINK_QUAL_PATTERN_11_DISABLE (0 << 2)
@@ -441,6 +453,19 @@
#define DP_UPSTREAM_DEVICE_DP_PWR_NEED 0x118 /* 1.2 */
# define DP_PWR_NOT_NEEDED (1 << 0)
+#define DP_FEC_CONFIGURATION 0x120 /* 1.4 */
+# define DP_FEC_READY (1 << 0)
+# define DP_FEC_ERR_COUNT_SEL_MASK (7 << 1)
+# define DP_FEC_ERR_COUNT_DIS (0 << 1)
+# define DP_FEC_UNCORR_BLK_ERROR_COUNT (1 << 1)
+# define DP_FEC_CORR_BLK_ERROR_COUNT (2 << 1)
+# define DP_FEC_BIT_ERROR_COUNT (3 << 1)
+# define DP_FEC_LANE_SELECT_MASK (3 << 4)
+# define DP_FEC_LANE_0_SELECT (0 << 4)
+# define DP_FEC_LANE_1_SELECT (1 << 4)
+# define DP_FEC_LANE_2_SELECT (2 << 4)
+# define DP_FEC_LANE_3_SELECT (3 << 4)
+
#define DP_AUX_FRAME_SYNC_VALUE 0x15c /* eDP 1.4 */
# define DP_AUX_FRAME_SYNC_VALID (1 << 0)
@@ -616,6 +641,16 @@
#define DP_TEST_SINK 0x270
# define DP_TEST_SINK_START (1 << 0)
+#define DP_FEC_STATUS 0x280 /* 1.4 */
+# define DP_FEC_DECODE_EN_DETECTED (1 << 0)
+# define DP_FEC_DECODE_DIS_DETECTED (1 << 1)
+
+#define DP_FEC_ERROR_COUNT_LSB 0x0281 /* 1.4 */
+
+#define DP_FEC_ERROR_COUNT_MSB 0x0282 /* 1.4 */
+# define DP_FEC_ERROR_COUNT_MASK 0x7F
+# define DP_FEC_ERR_COUNT_VALID (1 << 7)
+
#define DP_PAYLOAD_TABLE_UPDATE_STATUS 0x2c0 /* 1.2 MST */
# define DP_PAYLOAD_TABLE_UPDATED (1 << 0)
# define DP_PAYLOAD_ACT_HANDLED (1 << 1)
@@ -635,6 +670,7 @@
# define DP_SET_POWER_D0 0x1
# define DP_SET_POWER_D3 0x2
# define DP_SET_POWER_MASK 0x3
+# define DP_SET_POWER_D3_AUX_ON 0x5
#define DP_EDP_DPCD_REV 0x700 /* eDP 1.2 */
# define DP_EDP_11 0x00
@@ -835,6 +871,23 @@
#define DP_CEC_TX_MESSAGE_BUFFER 0x3020
#define DP_CEC_MESSAGE_BUFFER_LENGTH 0x10
+#define DP_AUX_HDCP_BKSV 0x68000
+#define DP_AUX_HDCP_RI_PRIME 0x68005
+#define DP_AUX_HDCP_AKSV 0x68007
+#define DP_AUX_HDCP_AN 0x6800C
+#define DP_AUX_HDCP_V_PRIME(h) (0x68014 + h * 4)
+#define DP_AUX_HDCP_BCAPS 0x68028
+# define DP_BCAPS_REPEATER_PRESENT BIT(1)
+# define DP_BCAPS_HDCP_CAPABLE BIT(0)
+#define DP_AUX_HDCP_BSTATUS 0x68029
+# define DP_BSTATUS_REAUTH_REQ BIT(3)
+# define DP_BSTATUS_LINK_FAILURE BIT(2)
+# define DP_BSTATUS_R0_PRIME_READY BIT(1)
+# define DP_BSTATUS_READY BIT(0)
+#define DP_AUX_HDCP_BINFO 0x6802A
+#define DP_AUX_HDCP_KSV_FIFO 0x6802C
+#define DP_AUX_HDCP_AINFO 0x6803B
+
/* DP 1.2 Sideband message defines */
/* peer device type - DP 1.2a Table 2-92 */
#define DP_PEER_DEVICE_NONE 0x0
@@ -970,6 +1023,20 @@ drm_dp_tps3_supported(const u8 dpcd[DP_RECEIVER_CAP_SIZE])
}
static inline bool
+drm_dp_tps4_supported(const u8 dpcd[DP_RECEIVER_CAP_SIZE])
+{
+ return dpcd[DP_DPCD_REV] >= 0x14 &&
+ dpcd[DP_MAX_DOWNSPREAD] & DP_TPS4_SUPPORTED;
+}
+
+static inline u8
+drm_dp_training_pattern_mask(const u8 dpcd[DP_RECEIVER_CAP_SIZE])
+{
+ return (dpcd[DP_DPCD_REV] >= 0x14) ? DP_TRAINING_PATTERN_MASK_1_4 :
+ DP_TRAINING_PATTERN_MASK;
+}
+
+static inline bool
drm_dp_is_branch(const u8 dpcd[DP_RECEIVER_CAP_SIZE])
{
return dpcd[DP_DOWNSTREAMPORT_PRESENT] & DP_DWN_STRM_PORT_PRESENT;
diff --git a/include/drm/drm_drv.h b/include/drm/drm_drv.h
index 412e83a4d3db..d23dcdd1bd95 100644
--- a/include/drm/drm_drv.h
+++ b/include/drm/drm_drv.h
@@ -39,6 +39,7 @@ struct drm_minor;
struct dma_buf_attachment;
struct drm_display_mode;
struct drm_mode_create_dumb;
+struct drm_printer;
/* driver capabilities and requirements mask */
#define DRIVER_USE_AGP 0x1
@@ -55,6 +56,7 @@ struct drm_mode_create_dumb;
#define DRIVER_ATOMIC 0x10000
#define DRIVER_KMS_LEGACY_CONTEXT 0x20000
#define DRIVER_SYNCOBJ 0x40000
+#define DRIVER_PREFER_XBGR_30BPP 0x80000
/**
* struct drm_driver - DRM driver structure
@@ -429,6 +431,20 @@ struct drm_driver {
void (*gem_close_object) (struct drm_gem_object *, struct drm_file *);
/**
+ * @gem_print_info:
+ *
+ * If driver subclasses struct &drm_gem_object, it can implement this
+ * optional hook for printing additional driver specific info.
+ *
+ * drm_printf_indent() should be used in the callback passing it the
+ * indent argument.
+ *
+ * This callback is called from drm_gem_print_info().
+ */
+ void (*gem_print_info)(struct drm_printer *p, unsigned int indent,
+ const struct drm_gem_object *obj);
+
+ /**
* @gem_create_object: constructor for gem objects
*
* Hook for allocating the GEM object struct, for use by core
@@ -592,13 +608,6 @@ struct drm_driver {
int dev_priv_size;
};
-__printf(6, 7)
-void drm_dev_printk(const struct device *dev, const char *level,
- unsigned int category, const char *function_name,
- const char *prefix, const char *format, ...);
-__printf(3, 4)
-void drm_printk(const char *level, unsigned int category,
- const char *format, ...);
extern unsigned int drm_debug;
int drm_dev_init(struct drm_device *dev,
diff --git a/include/drm/drm_edid.h b/include/drm/drm_edid.h
index efe6d5a8e834..8d89a9c3748d 100644
--- a/include/drm/drm_edid.h
+++ b/include/drm/drm_edid.h
@@ -333,7 +333,6 @@ struct drm_encoder;
struct drm_connector;
struct drm_display_mode;
-void drm_edid_to_eld(struct drm_connector *connector, struct edid *edid);
int drm_edid_to_sad(struct edid *edid, struct cea_sad **sads);
int drm_edid_to_speaker_allocation(struct edid *edid, u8 **sadb);
int drm_av_sync_delay(struct drm_connector *connector,
@@ -357,6 +356,7 @@ drm_hdmi_avi_infoframe_from_display_mode(struct hdmi_avi_infoframe *frame,
bool is_hdmi2_sink);
int
drm_hdmi_vendor_infoframe_from_display_mode(struct hdmi_vendor_infoframe *frame,
+ struct drm_connector *connector,
const struct drm_display_mode *mode);
void
drm_hdmi_avi_infoframe_quant_range(struct hdmi_avi_infoframe *frame,
diff --git a/include/drm/drm_encoder.h b/include/drm/drm_encoder.h
index ee4cfbe63c52..fb299696c7c4 100644
--- a/include/drm/drm_encoder.h
+++ b/include/drm/drm_encoder.h
@@ -88,7 +88,6 @@ struct drm_encoder_funcs {
* @head: list management
* @base: base KMS object
* @name: human readable name, can be overwritten by the driver
- * @crtc: currently bound CRTC
* @bridge: bridge associated to the encoder
* @funcs: control functions
* @helper_private: mid-layer private data
@@ -166,6 +165,11 @@ struct drm_encoder {
*/
uint32_t possible_clones;
+ /**
+ * @crtc: Currently bound CRTC, only really meaningful for non-atomic
+ * drivers. Atomic drivers should instead check
+ * &drm_connector_state.crtc.
+ */
struct drm_crtc *crtc;
struct drm_bridge *bridge;
const struct drm_encoder_funcs *funcs;
diff --git a/include/drm/drm_fb_cma_helper.h b/include/drm/drm_fb_cma_helper.h
index faf56c53df28..d532f88a8d55 100644
--- a/include/drm/drm_fb_cma_helper.h
+++ b/include/drm/drm_fb_cma_helper.h
@@ -16,6 +16,13 @@ struct drm_mode_fb_cmd2;
struct drm_plane;
struct drm_plane_state;
+int drm_fb_cma_fbdev_init_with_funcs(struct drm_device *dev,
+ unsigned int preferred_bpp, unsigned int max_conn_count,
+ const struct drm_framebuffer_funcs *funcs);
+int drm_fb_cma_fbdev_init(struct drm_device *dev, unsigned int preferred_bpp,
+ unsigned int max_conn_count);
+void drm_fb_cma_fbdev_fini(struct drm_device *dev);
+
struct drm_fbdev_cma *drm_fbdev_cma_init_with_funcs(struct drm_device *dev,
unsigned int preferred_bpp, unsigned int max_conn_count,
const struct drm_framebuffer_funcs *funcs);
@@ -36,11 +43,5 @@ dma_addr_t drm_fb_cma_get_gem_addr(struct drm_framebuffer *fb,
struct drm_plane_state *state,
unsigned int plane);
-#ifdef CONFIG_DEBUG_FS
-struct seq_file;
-
-int drm_fb_cma_debugfs_show(struct seq_file *m, void *arg);
-#endif
-
#endif
diff --git a/include/drm/drm_fb_helper.h b/include/drm/drm_fb_helper.h
index 33fe95927742..b069433e7fc1 100644
--- a/include/drm/drm_fb_helper.h
+++ b/include/drm/drm_fb_helper.h
@@ -33,6 +33,7 @@
struct drm_fb_helper;
#include <drm/drm_crtc.h>
+#include <drm/drm_device.h>
#include <linux/kgdb.h>
enum mode_set_atomic {
@@ -48,6 +49,7 @@ struct drm_fb_helper_crtc {
struct drm_mode_set mode_set;
struct drm_display_mode *desired_mode;
int x, y;
+ int rotation;
};
/**
@@ -159,6 +161,13 @@ struct drm_fb_helper {
int connector_count;
int connector_info_alloc_count;
/**
+ * @sw_rotations:
+ * Bitmask of all rotations requested for panel-orientation which
+ * could not be handled in hardware. If only one bit is set
+ * fbdev->fbcon_rotate_hint gets set to the requested rotation.
+ */
+ int sw_rotations;
+ /**
* @connector_info:
*
* Array of per-connector information. Do not iterate directly, but use
@@ -267,6 +276,7 @@ void drm_fb_helper_unlink_fbi(struct drm_fb_helper *fb_helper);
void drm_fb_helper_deferred_io(struct fb_info *info,
struct list_head *pagelist);
+int drm_fb_helper_defio_init(struct drm_fb_helper *fb_helper);
ssize_t drm_fb_helper_sys_read(struct fb_info *info, char __user *buf,
size_t count, loff_t *ppos);
@@ -310,6 +320,16 @@ drm_pick_cmdline_mode(struct drm_fb_helper_connector *fb_helper_conn);
int drm_fb_helper_add_one_connector(struct drm_fb_helper *fb_helper, struct drm_connector *connector);
int drm_fb_helper_remove_one_connector(struct drm_fb_helper *fb_helper,
struct drm_connector *connector);
+
+int drm_fb_helper_fbdev_setup(struct drm_device *dev,
+ struct drm_fb_helper *fb_helper,
+ const struct drm_fb_helper_funcs *funcs,
+ unsigned int preferred_bpp,
+ unsigned int max_conn_count);
+void drm_fb_helper_fbdev_teardown(struct drm_device *dev);
+
+void drm_fb_helper_lastclose(struct drm_device *dev);
+void drm_fb_helper_output_poll_changed(struct drm_device *dev);
#else
static inline void drm_fb_helper_prepare(struct drm_device *dev,
struct drm_fb_helper *helper,
@@ -321,11 +341,17 @@ static inline int drm_fb_helper_init(struct drm_device *dev,
struct drm_fb_helper *helper,
int max_conn)
{
+ /* So drivers can use it to free the struct */
+ helper->dev = dev;
+ dev->fb_helper = helper;
+
return 0;
}
static inline void drm_fb_helper_fini(struct drm_fb_helper *helper)
{
+ if (helper && helper->dev)
+ helper->dev->fb_helper = NULL;
}
static inline int drm_fb_helper_blank(int blank, struct fb_info *info)
@@ -398,6 +424,11 @@ static inline void drm_fb_helper_deferred_io(struct fb_info *info,
{
}
+static inline int drm_fb_helper_defio_init(struct drm_fb_helper *fb_helper)
+{
+ return -ENODEV;
+}
+
static inline ssize_t drm_fb_helper_sys_read(struct fb_info *info,
char __user *buf, size_t count,
loff_t *ppos)
@@ -507,6 +538,32 @@ drm_fb_helper_remove_one_connector(struct drm_fb_helper *fb_helper,
return 0;
}
+static inline int
+drm_fb_helper_fbdev_setup(struct drm_device *dev,
+ struct drm_fb_helper *fb_helper,
+ const struct drm_fb_helper_funcs *funcs,
+ unsigned int preferred_bpp,
+ unsigned int max_conn_count)
+{
+ /* So drivers can use it to free the struct */
+ dev->fb_helper = fb_helper;
+
+ return 0;
+}
+
+static inline void drm_fb_helper_fbdev_teardown(struct drm_device *dev)
+{
+ dev->fb_helper = NULL;
+}
+
+static inline void drm_fb_helper_lastclose(struct drm_device *dev)
+{
+}
+
+static inline void drm_fb_helper_output_poll_changed(struct drm_device *dev)
+{
+}
+
#endif
static inline int
diff --git a/include/drm/drm_file.h b/include/drm/drm_file.h
index 0e0c868451a5..5176c3797680 100644
--- a/include/drm/drm_file.h
+++ b/include/drm/drm_file.h
@@ -364,7 +364,7 @@ int drm_open(struct inode *inode, struct file *filp);
ssize_t drm_read(struct file *filp, char __user *buffer,
size_t count, loff_t *offset);
int drm_release(struct inode *inode, struct file *filp);
-unsigned int drm_poll(struct file *filp, struct poll_table_struct *wait);
+__poll_t drm_poll(struct file *filp, struct poll_table_struct *wait);
int drm_event_reserve_init_locked(struct drm_device *dev,
struct drm_file *file_priv,
struct drm_pending_event *p,
diff --git a/include/drm/drm_fourcc.h b/include/drm/drm_fourcc.h
index 6942e84b6edd..3e86408dac9f 100644
--- a/include/drm/drm_fourcc.h
+++ b/include/drm/drm_fourcc.h
@@ -38,6 +38,7 @@ struct drm_mode_fb_cmd2;
* @cpp: Number of bytes per pixel (per plane)
* @hsub: Horizontal chroma subsampling factor
* @vsub: Vertical chroma subsampling factor
+ * @has_alpha: Does the format embeds an alpha component?
*/
struct drm_format_info {
u32 format;
@@ -46,6 +47,7 @@ struct drm_format_info {
u8 cpp[3];
u8 hsub;
u8 vsub;
+ bool has_alpha;
};
/**
diff --git a/include/drm/drm_framebuffer.h b/include/drm/drm_framebuffer.h
index 4c5ee4ae54df..c50502c656e5 100644
--- a/include/drm/drm_framebuffer.h
+++ b/include/drm/drm_framebuffer.h
@@ -121,6 +121,12 @@ struct drm_framebuffer {
* @base: base modeset object structure, contains the reference count.
*/
struct drm_mode_object base;
+
+ /**
+ * @comm: Name of the process allocating the fb, used for fb dumping.
+ */
+ char comm[TASK_COMM_LEN];
+
/**
* @format: framebuffer format information
*/
@@ -264,7 +270,7 @@ static inline void drm_framebuffer_unreference(struct drm_framebuffer *fb)
*
* This functions returns the framebuffer's reference count.
*/
-static inline uint32_t drm_framebuffer_read_refcount(struct drm_framebuffer *fb)
+static inline uint32_t drm_framebuffer_read_refcount(const struct drm_framebuffer *fb)
{
return kref_read(&fb->base.refcount);
}
diff --git a/include/drm/drm_gem.h b/include/drm/drm_gem.h
index 9c55c2acaa2b..3583b98a1718 100644
--- a/include/drm/drm_gem.h
+++ b/include/drm/drm_gem.h
@@ -116,21 +116,6 @@ struct drm_gem_object {
int name;
/**
- * @read_domains:
- *
- * Read memory domains. These monitor which caches contain read/write data
- * related to the object. When transitioning from one set of domains
- * to another, the driver is called to ensure that caches are suitably
- * flushed and invalidated.
- */
- uint32_t read_domains;
-
- /**
- * @write_domain: Corresponding unique write memory domain.
- */
- uint32_t write_domain;
-
- /**
* @dma_buf:
*
* dma-buf associated with this GEM object.
diff --git a/include/drm/drm_gem_cma_helper.h b/include/drm/drm_gem_cma_helper.h
index 520e3feb502c..19777145cf8e 100644
--- a/include/drm/drm_gem_cma_helper.h
+++ b/include/drm/drm_gem_cma_helper.h
@@ -9,7 +9,9 @@
* struct drm_gem_cma_object - GEM object backed by CMA memory allocations
* @base: base GEM object
* @paddr: physical address of the backing memory
- * @sgt: scatter/gather table for imported PRIME buffers
+ * @sgt: scatter/gather table for imported PRIME buffers. The table can have
+ * more than one entry but they are guaranteed to have contiguous
+ * DMA addresses.
* @vaddr: kernel virtual address of the backing memory
*/
struct drm_gem_cma_object {
@@ -21,11 +23,8 @@ struct drm_gem_cma_object {
void *vaddr;
};
-static inline struct drm_gem_cma_object *
-to_drm_gem_cma_obj(struct drm_gem_object *gem_obj)
-{
- return container_of(gem_obj, struct drm_gem_cma_object, base);
-}
+#define to_drm_gem_cma_obj(gem_obj) \
+ container_of(gem_obj, struct drm_gem_cma_object, base)
#ifndef CONFIG_MMU
#define DRM_GEM_CMA_UNMAPPED_AREA_FOPS \
@@ -91,9 +90,8 @@ unsigned long drm_gem_cma_get_unmapped_area(struct file *filp,
unsigned long flags);
#endif
-#ifdef CONFIG_DEBUG_FS
-void drm_gem_cma_describe(struct drm_gem_cma_object *obj, struct seq_file *m);
-#endif
+void drm_gem_cma_print_info(struct drm_printer *p, unsigned int indent,
+ const struct drm_gem_object *obj);
struct sg_table *drm_gem_cma_prime_get_sg_table(struct drm_gem_object *obj);
struct drm_gem_object *
diff --git a/include/drm/drm_hdcp.h b/include/drm/drm_hdcp.h
new file mode 100644
index 000000000000..562fa7df2637
--- /dev/null
+++ b/include/drm/drm_hdcp.h
@@ -0,0 +1,41 @@
+/* SPDX-License-Identifier: MIT */
+/*
+ * Copyright (C) 2017 Google, Inc.
+ *
+ * Authors:
+ * Sean Paul <seanpaul@chromium.org>
+ */
+
+#ifndef _DRM_HDCP_H_INCLUDED_
+#define _DRM_HDCP_H_INCLUDED_
+
+/* Period of hdcp checks (to ensure we're still authenticated) */
+#define DRM_HDCP_CHECK_PERIOD_MS (128 * 16)
+
+/* Shared lengths/masks between HDMI/DVI/DisplayPort */
+#define DRM_HDCP_AN_LEN 8
+#define DRM_HDCP_BSTATUS_LEN 2
+#define DRM_HDCP_KSV_LEN 5
+#define DRM_HDCP_RI_LEN 2
+#define DRM_HDCP_V_PRIME_PART_LEN 4
+#define DRM_HDCP_V_PRIME_NUM_PARTS 5
+#define DRM_HDCP_NUM_DOWNSTREAM(x) (x & 0x3f)
+#define DRM_HDCP_MAX_CASCADE_EXCEEDED(x) (x & BIT(3))
+#define DRM_HDCP_MAX_DEVICE_EXCEEDED(x) (x & BIT(7))
+
+/* Slave address for the HDCP registers in the receiver */
+#define DRM_HDCP_DDC_ADDR 0x3A
+
+/* HDCP register offsets for HDMI/DVI devices */
+#define DRM_HDCP_DDC_BKSV 0x00
+#define DRM_HDCP_DDC_RI_PRIME 0x08
+#define DRM_HDCP_DDC_AKSV 0x10
+#define DRM_HDCP_DDC_AN 0x18
+#define DRM_HDCP_DDC_V_PRIME(h) (0x20 + h * 4)
+#define DRM_HDCP_DDC_BCAPS 0x40
+#define DRM_HDCP_DDC_BCAPS_REPEATER_PRESENT BIT(6)
+#define DRM_HDCP_DDC_BCAPS_KSV_FIFO_READY BIT(5)
+#define DRM_HDCP_DDC_BSTATUS 0x41
+#define DRM_HDCP_DDC_KSV_FIFO 0x43
+
+#endif
diff --git a/include/drm/drm_mm.h b/include/drm/drm_mm.h
index 8d10fc97801c..101f566ae43d 100644
--- a/include/drm/drm_mm.h
+++ b/include/drm/drm_mm.h
@@ -386,7 +386,7 @@ int drm_mm_insert_node_in_range(struct drm_mm *mm,
* @color: opaque tag value to use for this node
* @mode: fine-tune the allocation search and placement
*
- * This is a simplified version of drm_mm_insert_node_in_range_generic() with no
+ * This is a simplified version of drm_mm_insert_node_in_range() with no
* range restrictions applied.
*
* The preallocated node must be cleared to 0.
diff --git a/include/drm/drm_mode_config.h b/include/drm/drm_mode_config.h
index b0ce26d71296..7569f22ffef6 100644
--- a/include/drm/drm_mode_config.h
+++ b/include/drm/drm_mode_config.h
@@ -36,6 +36,7 @@ struct drm_device;
struct drm_atomic_state;
struct drm_mode_fb_cmd2;
struct drm_format_info;
+struct drm_display_mode;
/**
* struct drm_mode_config_funcs - basic driver provided mode setting functions
@@ -102,6 +103,17 @@ struct drm_mode_config_funcs {
void (*output_poll_changed)(struct drm_device *dev);
/**
+ * @mode_valid:
+ *
+ * Device specific validation of display modes. Can be used to reject
+ * modes that can never be supported. Only device wide constraints can
+ * be checked here. crtc/encoder/bridge/connector specific constraints
+ * should be checked in the .mode_valid() hook for each specific object.
+ */
+ enum drm_mode_status (*mode_valid)(struct drm_device *dev,
+ const struct drm_display_mode *mode);
+
+ /**
* @atomic_check:
*
* This is the only hook to validate an atomic modeset update. This
@@ -269,6 +281,9 @@ struct drm_mode_config_funcs {
* state easily. If this hook is implemented, drivers must also
* implement @atomic_state_clear and @atomic_state_free.
*
+ * Subclassing of &drm_atomic_state is deprecated in favour of using
+ * &drm_private_state and &drm_private_obj.
+ *
* RETURNS:
*
* A new &drm_atomic_state on success or NULL on failure.
@@ -290,6 +305,9 @@ struct drm_mode_config_funcs {
*
* Drivers that implement this must call drm_atomic_state_default_clear()
* to clear common state.
+ *
+ * Subclassing of &drm_atomic_state is deprecated in favour of using
+ * &drm_private_state and &drm_private_obj.
*/
void (*atomic_state_clear)(struct drm_atomic_state *state);
@@ -302,6 +320,9 @@ struct drm_mode_config_funcs {
*
* Drivers that implement this must call
* drm_atomic_state_default_release() to release common resources.
+ *
+ * Subclassing of &drm_atomic_state is deprecated in favour of using
+ * &drm_private_state and &drm_private_obj.
*/
void (*atomic_state_free)(struct drm_atomic_state *state);
};
@@ -751,6 +772,13 @@ struct drm_mode_config {
*/
struct drm_property *non_desktop_property;
+ /**
+ * @panel_orientation_property: Optional connector property indicating
+ * how the lcd-panel is mounted inside the casing (e.g. normal or
+ * upside-down).
+ */
+ struct drm_property *panel_orientation_property;
+
/* dumb ioctl parameters */
uint32_t preferred_depth, prefer_shadow;
@@ -768,7 +796,7 @@ struct drm_mode_config {
bool allow_fb_modifiers;
/**
- * @modifiers: Plane property to list support modifier/format
+ * @modifiers_property: Plane property to list support modifier/format
* combination.
*/
struct drm_property *modifiers_property;
@@ -776,6 +804,15 @@ struct drm_mode_config {
/* cursor size */
uint32_t cursor_width, cursor_height;
+ /**
+ * @suspend_state:
+ *
+ * Atomic state when suspended.
+ * Set by drm_mode_config_helper_suspend() and cleared by
+ * drm_mode_config_helper_resume().
+ */
+ struct drm_atomic_state *suspend_state;
+
const struct drm_mode_config_helper_funcs *helper_private;
};
diff --git a/include/drm/drm_mode_object.h b/include/drm/drm_mode_object.h
index 7ba3913f30b5..c34a3e8030e1 100644
--- a/include/drm/drm_mode_object.h
+++ b/include/drm/drm_mode_object.h
@@ -120,30 +120,6 @@ struct drm_mode_object *drm_mode_object_find(struct drm_device *dev,
void drm_mode_object_get(struct drm_mode_object *obj);
void drm_mode_object_put(struct drm_mode_object *obj);
-/**
- * drm_mode_object_reference - acquire a mode object reference
- * @obj: DRM mode object
- *
- * This is a compatibility alias for drm_mode_object_get() and should not be
- * used by new code.
- */
-static inline void drm_mode_object_reference(struct drm_mode_object *obj)
-{
- drm_mode_object_get(obj);
-}
-
-/**
- * drm_mode_object_unreference - release a mode object reference
- * @obj: DRM mode object
- *
- * This is a compatibility alias for drm_mode_object_put() and should not be
- * used by new code.
- */
-static inline void drm_mode_object_unreference(struct drm_mode_object *obj)
-{
- drm_mode_object_put(obj);
-}
-
int drm_object_property_set_value(struct drm_mode_object *obj,
struct drm_property *property,
uint64_t val);
diff --git a/include/drm/drm_modes.h b/include/drm/drm_modes.h
index 9f3421c8efcd..0d310beae6af 100644
--- a/include/drm/drm_modes.h
+++ b/include/drm/drm_modes.h
@@ -131,9 +131,6 @@ enum drm_mode_status {
MODE_ERROR = -1
};
-#define DRM_MODE_TYPE_CLOCK_CRTC_C (DRM_MODE_TYPE_CLOCK_C | \
- DRM_MODE_TYPE_CRTC_C)
-
#define DRM_MODE(nm, t, c, hd, hss, hse, ht, hsk, vd, vss, vse, vt, vs, f) \
.name = nm, .status = 0, .type = (t), .clock = (c), \
.hdisplay = (hd), .hsync_start = (hss), .hsync_end = (hse), \
@@ -245,26 +242,25 @@ struct drm_display_mode {
* A bitmask of flags, mostly about the source of a mode. Possible flags
* are:
*
- * - DRM_MODE_TYPE_BUILTIN: Meant for hard-coded modes, effectively
- * unused.
* - DRM_MODE_TYPE_PREFERRED: Preferred mode, usually the native
* resolution of an LCD panel. There should only be one preferred
* mode per connector at any given time.
* - DRM_MODE_TYPE_DRIVER: Mode created by the driver, which is all of
* them really. Drivers must set this bit for all modes they create
* and expose to userspace.
+ * - DRM_MODE_TYPE_USERDEF: Mode defined via kernel command line
*
* Plus a big list of flags which shouldn't be used at all, but are
- * still around since these flags are also used in the userspace ABI:
+ * still around since these flags are also used in the userspace ABI.
+ * We no longer accept modes with these types though:
*
+ * - DRM_MODE_TYPE_BUILTIN: Meant for hard-coded modes, unused.
+ * Use DRM_MODE_TYPE_DRIVER instead.
* - DRM_MODE_TYPE_DEFAULT: Again a leftover, use
* DRM_MODE_TYPE_PREFERRED instead.
* - DRM_MODE_TYPE_CLOCK_C and DRM_MODE_TYPE_CRTC_C: Define leftovers
* which are stuck around for hysterical raisins only. No one has an
* idea what they were meant for. Don't use.
- * - DRM_MODE_TYPE_USERDEF: Mode defined by userspace, again a vestige
- * from older kms designs where userspace had to first add a custom
- * mode to the kernel's mode list before it could use it. Don't use.
*/
unsigned int type;
@@ -299,8 +295,8 @@ struct drm_display_mode {
* - DRM_MODE_FLAG_PCSYNC: composite sync is active high.
* - DRM_MODE_FLAG_NCSYNC: composite sync is active low.
* - DRM_MODE_FLAG_HSKEW: hskew provided (not used?).
- * - DRM_MODE_FLAG_BCAST: not used?
- * - DRM_MODE_FLAG_PIXMUX: not used?
+ * - DRM_MODE_FLAG_BCAST: <deprecated>
+ * - DRM_MODE_FLAG_PIXMUX: <deprecated>
* - DRM_MODE_FLAG_DBLCLK: double-clocked mode.
* - DRM_MODE_FLAG_CLKDIV2: half-clocked mode.
*
@@ -448,7 +444,8 @@ struct drm_display_mode *drm_mode_create(struct drm_device *dev);
void drm_mode_destroy(struct drm_device *dev, struct drm_display_mode *mode);
void drm_mode_convert_to_umode(struct drm_mode_modeinfo *out,
const struct drm_display_mode *in);
-int drm_mode_convert_umode(struct drm_display_mode *out,
+int drm_mode_convert_umode(struct drm_device *dev,
+ struct drm_display_mode *out,
const struct drm_mode_modeinfo *in);
void drm_mode_probed_add(struct drm_connector *connector, struct drm_display_mode *mode);
void drm_mode_debug_printmodeline(const struct drm_display_mode *mode);
@@ -501,7 +498,8 @@ bool drm_mode_equal_no_clocks_no_stereo(const struct drm_display_mode *mode1,
const struct drm_display_mode *mode2);
/* for use by the crtc helper probe functions */
-enum drm_mode_status drm_mode_validate_basic(const struct drm_display_mode *mode);
+enum drm_mode_status drm_mode_validate_driver(struct drm_device *dev,
+ const struct drm_display_mode *mode);
enum drm_mode_status drm_mode_validate_size(const struct drm_display_mode *mode,
int maxX, int maxY);
enum drm_mode_status
diff --git a/include/drm/drm_modeset_helper.h b/include/drm/drm_modeset_helper.h
index cb0ec92e11e6..efa337f03129 100644
--- a/include/drm/drm_modeset_helper.h
+++ b/include/drm/drm_modeset_helper.h
@@ -34,4 +34,7 @@ void drm_helper_mode_fill_fb_struct(struct drm_device *dev,
int drm_crtc_init(struct drm_device *dev, struct drm_crtc *crtc,
const struct drm_crtc_funcs *funcs);
+int drm_mode_config_helper_suspend(struct drm_device *dev);
+int drm_mode_config_helper_resume(struct drm_device *dev);
+
#endif
diff --git a/include/drm/drm_modeset_helper_vtables.h b/include/drm/drm_modeset_helper_vtables.h
index 16646c44b7df..3e76ca805b0f 100644
--- a/include/drm/drm_modeset_helper_vtables.h
+++ b/include/drm/drm_modeset_helper_vtables.h
@@ -801,9 +801,6 @@ struct drm_connector_helper_funcs {
* resolution can call drm_add_modes_noedid(), and mark the preferred
* one using drm_set_preferred_mode().
*
- * Finally drivers that support audio probably want to update the ELD
- * data, too, using drm_edid_to_eld().
- *
* This function is only called after the @detect hook has indicated
* that a sink is connected and when the EDID isn't overridden through
* sysfs or the kernel commandline.
diff --git a/include/drm/drm_plane.h b/include/drm/drm_plane.h
index 571615079230..f7bf4a48b1c3 100644
--- a/include/drm/drm_plane.h
+++ b/include/drm/drm_plane.h
@@ -26,6 +26,7 @@
#include <linux/list.h>
#include <linux/ctype.h>
#include <drm/drm_mode_object.h>
+#include <drm/drm_color_mgmt.h>
struct drm_crtc;
struct drm_printer;
@@ -112,6 +113,20 @@ struct drm_plane_state {
unsigned int zpos;
unsigned int normalized_zpos;
+ /**
+ * @color_encoding:
+ *
+ * Color encoding for non RGB formats
+ */
+ enum drm_color_encoding color_encoding;
+
+ /**
+ * @color_range:
+ *
+ * Color range for non RGB formats
+ */
+ enum drm_color_range color_range;
+
/* Clipped coordinates */
struct drm_rect src, dst;
@@ -474,8 +489,8 @@ enum drm_plane_type {
* @format_types: array of formats supported by this plane
* @format_count: number of formats supported
* @format_default: driver hasn't supplied supported formats for the plane
- * @crtc: currently bound CRTC
- * @fb: currently bound fb
+ * @modifiers: array of modifiers supported by this plane
+ * @modifier_count: number of modifiers supported
* @old_fb: Temporary tracking of the old fb while a modeset is ongoing. Used by
* drm_mode_set_config_internal() to implement correct refcounting.
* @funcs: helper functions
@@ -512,7 +527,17 @@ struct drm_plane {
uint64_t *modifiers;
unsigned int modifier_count;
+ /**
+ * @crtc: Currently bound CRTC, only really meaningful for non-atomic
+ * drivers. Atomic drivers should instead check &drm_plane_state.crtc.
+ */
struct drm_crtc *crtc;
+
+ /**
+ * @fb: Currently bound framebuffer, only really meaningful for
+ * non-atomic drivers. Atomic drivers should instead check
+ * &drm_plane_state.fb.
+ */
struct drm_framebuffer *fb;
struct drm_framebuffer *old_fb;
@@ -548,6 +573,23 @@ struct drm_plane {
struct drm_property *zpos_property;
struct drm_property *rotation_property;
+
+ /**
+ * @color_encoding_property:
+ *
+ * Optional "COLOR_ENCODING" enum property for specifying
+ * color encoding for non RGB formats.
+ * See drm_plane_create_color_properties().
+ */
+ struct drm_property *color_encoding_property;
+ /**
+ * @color_range_property:
+ *
+ * Optional "COLOR_RANGE" enum property for specifying
+ * color range for non RGB formats.
+ * See drm_plane_create_color_properties().
+ */
+ struct drm_property *color_range_property;
};
#define obj_to_plane(x) container_of(x, struct drm_plane, base)
diff --git a/include/drm/drm_plane_helper.h b/include/drm/drm_plane_helper.h
index 7c8a00ceadb7..28d7ce620729 100644
--- a/include/drm/drm_plane_helper.h
+++ b/include/drm/drm_plane_helper.h
@@ -38,17 +38,11 @@
*/
#define DRM_PLANE_HELPER_NO_SCALING (1<<16)
-int drm_plane_helper_check_state(struct drm_plane_state *state,
- const struct drm_rect *clip,
- int min_scale, int max_scale,
- bool can_position,
- bool can_update_disabled);
int drm_plane_helper_check_update(struct drm_plane *plane,
struct drm_crtc *crtc,
struct drm_framebuffer *fb,
struct drm_rect *src,
struct drm_rect *dest,
- const struct drm_rect *clip,
unsigned int rotation,
int min_scale,
int max_scale,
diff --git a/include/drm/drm_prime.h b/include/drm/drm_prime.h
index 9cd9e36f77b5..4d5f5d6cf6a6 100644
--- a/include/drm/drm_prime.h
+++ b/include/drm/drm_prime.h
@@ -54,6 +54,9 @@ struct device;
struct dma_buf_export_info;
struct dma_buf;
+struct dma_buf_attachment;
+
+enum dma_data_direction;
struct drm_device;
struct drm_gem_object;
@@ -79,6 +82,25 @@ int drm_gem_prime_fd_to_handle(struct drm_device *dev,
struct dma_buf *drm_gem_dmabuf_export(struct drm_device *dev,
struct dma_buf_export_info *exp_info);
void drm_gem_dmabuf_release(struct dma_buf *dma_buf);
+int drm_gem_map_attach(struct dma_buf *dma_buf, struct device *target_dev,
+ struct dma_buf_attachment *attach);
+void drm_gem_map_detach(struct dma_buf *dma_buf,
+ struct dma_buf_attachment *attach);
+struct sg_table *drm_gem_map_dma_buf(struct dma_buf_attachment *attach,
+ enum dma_data_direction dir);
+void drm_gem_unmap_dma_buf(struct dma_buf_attachment *attach,
+ struct sg_table *sgt,
+ enum dma_data_direction dir);
+void *drm_gem_dmabuf_vmap(struct dma_buf *dma_buf);
+void drm_gem_dmabuf_vunmap(struct dma_buf *dma_buf, void *vaddr);
+void *drm_gem_dmabuf_kmap_atomic(struct dma_buf *dma_buf,
+ unsigned long page_num);
+void drm_gem_dmabuf_kunmap_atomic(struct dma_buf *dma_buf,
+ unsigned long page_num, void *addr);
+void *drm_gem_dmabuf_kmap(struct dma_buf *dma_buf, unsigned long page_num);
+void drm_gem_dmabuf_kunmap(struct dma_buf *dma_buf, unsigned long page_num,
+ void *addr);
+int drm_gem_dmabuf_mmap(struct dma_buf *dma_buf, struct vm_area_struct *vma);
int drm_prime_sg_to_page_addr_arrays(struct sg_table *sgt, struct page **pages,
dma_addr_t *addrs, int max_pages);
diff --git a/include/drm/drm_print.h b/include/drm/drm_print.h
index ca4d7c6321f2..e1a46e9991cc 100644
--- a/include/drm/drm_print.h
+++ b/include/drm/drm_print.h
@@ -80,6 +80,29 @@ void __drm_printfn_debug(struct drm_printer *p, struct va_format *vaf);
__printf(2, 3)
void drm_printf(struct drm_printer *p, const char *f, ...);
+__printf(2, 0)
+/**
+ * drm_vprintf - print to a &drm_printer stream
+ * @p: the &drm_printer
+ * @fmt: format string
+ * @va: the va_list
+ */
+static inline void
+drm_vprintf(struct drm_printer *p, const char *fmt, va_list *va)
+{
+ struct va_format vaf = { .fmt = fmt, .va = va };
+
+ p->printfn(p, &vaf);
+}
+
+/**
+ * drm_printf_indent - Print to a &drm_printer stream with indentation
+ * @printer: DRM printer
+ * @indent: Tab indentation level (max 5)
+ * @fmt: Format string
+ */
+#define drm_printf_indent(printer, indent, fmt, ...) \
+ drm_printf((printer), "%.*s" fmt, (indent), "\t\t\t\t\tX", ##__VA_ARGS__)
/**
* drm_seq_file_printer - construct a &drm_printer that outputs to &seq_file
@@ -128,4 +151,199 @@ static inline struct drm_printer drm_debug_printer(const char *prefix)
};
return p;
}
+
+/*
+ * The following categories are defined:
+ *
+ * CORE: Used in the generic drm code: drm_ioctl.c, drm_mm.c, drm_memory.c, ...
+ * This is the category used by the DRM_DEBUG() macro.
+ *
+ * DRIVER: Used in the vendor specific part of the driver: i915, radeon, ...
+ * This is the category used by the DRM_DEBUG_DRIVER() macro.
+ *
+ * KMS: used in the modesetting code.
+ * This is the category used by the DRM_DEBUG_KMS() macro.
+ *
+ * PRIME: used in the prime code.
+ * This is the category used by the DRM_DEBUG_PRIME() macro.
+ *
+ * ATOMIC: used in the atomic code.
+ * This is the category used by the DRM_DEBUG_ATOMIC() macro.
+ *
+ * VBL: used for verbose debug message in the vblank code
+ * This is the category used by the DRM_DEBUG_VBL() macro.
+ *
+ * Enabling verbose debug messages is done through the drm.debug parameter,
+ * each category being enabled by a bit.
+ *
+ * drm.debug=0x1 will enable CORE messages
+ * drm.debug=0x2 will enable DRIVER messages
+ * drm.debug=0x3 will enable CORE and DRIVER messages
+ * ...
+ * drm.debug=0x3f will enable all messages
+ *
+ * An interesting feature is that it's possible to enable verbose logging at
+ * run-time by echoing the debug value in its sysfs node:
+ * # echo 0xf > /sys/module/drm/parameters/debug
+ */
+#define DRM_UT_NONE 0x00
+#define DRM_UT_CORE 0x01
+#define DRM_UT_DRIVER 0x02
+#define DRM_UT_KMS 0x04
+#define DRM_UT_PRIME 0x08
+#define DRM_UT_ATOMIC 0x10
+#define DRM_UT_VBL 0x20
+#define DRM_UT_STATE 0x40
+#define DRM_UT_LEASE 0x80
+
+__printf(3, 4)
+void drm_dev_printk(const struct device *dev, const char *level,
+ const char *format, ...);
+__printf(3, 4)
+void drm_dev_dbg(const struct device *dev, unsigned int category,
+ const char *format, ...);
+
+__printf(2, 3)
+void drm_dbg(unsigned int category, const char *format, ...);
+__printf(1, 2)
+void drm_err(const char *format, ...);
+
+/* Macros to make printk easier */
+
+#define _DRM_PRINTK(once, level, fmt, ...) \
+ printk##once(KERN_##level "[" DRM_NAME "] " fmt, ##__VA_ARGS__)
+
+#define DRM_INFO(fmt, ...) \
+ _DRM_PRINTK(, INFO, fmt, ##__VA_ARGS__)
+#define DRM_NOTE(fmt, ...) \
+ _DRM_PRINTK(, NOTICE, fmt, ##__VA_ARGS__)
+#define DRM_WARN(fmt, ...) \
+ _DRM_PRINTK(, WARNING, fmt, ##__VA_ARGS__)
+
+#define DRM_INFO_ONCE(fmt, ...) \
+ _DRM_PRINTK(_once, INFO, fmt, ##__VA_ARGS__)
+#define DRM_NOTE_ONCE(fmt, ...) \
+ _DRM_PRINTK(_once, NOTICE, fmt, ##__VA_ARGS__)
+#define DRM_WARN_ONCE(fmt, ...) \
+ _DRM_PRINTK(_once, WARNING, fmt, ##__VA_ARGS__)
+
+/**
+ * Error output.
+ *
+ * @dev: device pointer
+ * @fmt: printf() like format string.
+ */
+#define DRM_DEV_ERROR(dev, fmt, ...) \
+ drm_dev_printk(dev, KERN_ERR, "*ERROR* " fmt, ##__VA_ARGS__)
+#define DRM_ERROR(fmt, ...) \
+ drm_err(fmt, ##__VA_ARGS__)
+
+/**
+ * Rate limited error output. Like DRM_ERROR() but won't flood the log.
+ *
+ * @dev: device pointer
+ * @fmt: printf() like format string.
+ */
+#define DRM_DEV_ERROR_RATELIMITED(dev, fmt, ...) \
+({ \
+ static DEFINE_RATELIMIT_STATE(_rs, \
+ DEFAULT_RATELIMIT_INTERVAL, \
+ DEFAULT_RATELIMIT_BURST); \
+ \
+ if (__ratelimit(&_rs)) \
+ DRM_DEV_ERROR(dev, fmt, ##__VA_ARGS__); \
+})
+#define DRM_ERROR_RATELIMITED(fmt, ...) \
+ DRM_DEV_ERROR_RATELIMITED(NULL, fmt, ##__VA_ARGS__)
+
+#define DRM_DEV_INFO(dev, fmt, ...) \
+ drm_dev_printk(dev, KERN_INFO, fmt, ##__VA_ARGS__)
+
+#define DRM_DEV_INFO_ONCE(dev, fmt, ...) \
+({ \
+ static bool __print_once __read_mostly; \
+ if (!__print_once) { \
+ __print_once = true; \
+ DRM_DEV_INFO(dev, fmt, ##__VA_ARGS__); \
+ } \
+})
+
+/**
+ * Debug output.
+ *
+ * @dev: device pointer
+ * @fmt: printf() like format string.
+ */
+#define DRM_DEV_DEBUG(dev, fmt, ...) \
+ drm_dev_dbg(dev, DRM_UT_CORE, fmt, ##__VA_ARGS__)
+#define DRM_DEBUG(fmt, ...) \
+ drm_dbg(DRM_UT_CORE, fmt, ##__VA_ARGS__)
+
+#define DRM_DEV_DEBUG_DRIVER(dev, fmt, ...) \
+ drm_dev_dbg(dev, DRM_UT_DRIVER, fmt, ##__VA_ARGS__)
+#define DRM_DEBUG_DRIVER(fmt, ...) \
+ drm_dbg(DRM_UT_DRIVER, fmt, ##__VA_ARGS__)
+
+#define DRM_DEV_DEBUG_KMS(dev, fmt, ...) \
+ drm_dev_dbg(dev, DRM_UT_KMS, fmt, ##__VA_ARGS__)
+#define DRM_DEBUG_KMS(fmt, ...) \
+ drm_dbg(DRM_UT_KMS, fmt, ##__VA_ARGS__)
+
+#define DRM_DEV_DEBUG_PRIME(dev, fmt, ...) \
+ drm_dev_dbg(dev, DRM_UT_PRIME, fmt, ##__VA_ARGS__)
+#define DRM_DEBUG_PRIME(fmt, ...) \
+ drm_dbg(DRM_UT_PRIME, fmt, ##__VA_ARGS__)
+
+#define DRM_DEV_DEBUG_ATOMIC(dev, fmt, ...) \
+ drm_dev_dbg(dev, DRM_UT_ATOMIC, fmt, ##__VA_ARGS__)
+#define DRM_DEBUG_ATOMIC(fmt, ...) \
+ drm_dbg(DRM_UT_ATOMIC, fmt, ##__VA_ARGS__)
+
+#define DRM_DEV_DEBUG_VBL(dev, fmt, ...) \
+ drm_dev_dbg(dev, DRM_UT_VBL, fmt, ##__VA_ARGS__)
+#define DRM_DEBUG_VBL(fmt, ...) \
+ drm_dbg(DRM_UT_VBL, fmt, ##__VA_ARGS__)
+
+#define DRM_DEBUG_LEASE(fmt, ...) \
+ drm_dbg(DRM_UT_LEASE, fmt, ##__VA_ARGS__)
+
+#define _DRM_DEV_DEFINE_DEBUG_RATELIMITED(dev, category, fmt, ...) \
+({ \
+ static DEFINE_RATELIMIT_STATE(_rs, \
+ DEFAULT_RATELIMIT_INTERVAL, \
+ DEFAULT_RATELIMIT_BURST); \
+ if (__ratelimit(&_rs)) \
+ drm_dev_dbg(dev, category, fmt, ##__VA_ARGS__); \
+})
+
+/**
+ * Rate limited debug output. Like DRM_DEBUG() but won't flood the log.
+ *
+ * @dev: device pointer
+ * @fmt: printf() like format string.
+ */
+#define DRM_DEV_DEBUG_RATELIMITED(dev, fmt, ...) \
+ _DEV_DRM_DEFINE_DEBUG_RATELIMITED(dev, DRM_UT_CORE, \
+ fmt, ##__VA_ARGS__)
+#define DRM_DEBUG_RATELIMITED(fmt, ...) \
+ DRM_DEV_DEBUG_RATELIMITED(NULL, fmt, ##__VA_ARGS__)
+
+#define DRM_DEV_DEBUG_DRIVER_RATELIMITED(dev, fmt, ...) \
+ _DRM_DEV_DEFINE_DEBUG_RATELIMITED(dev, DRM_UT_DRIVER, \
+ fmt, ##__VA_ARGS__)
+#define DRM_DEBUG_DRIVER_RATELIMITED(fmt, ...) \
+ DRM_DEV_DEBUG_DRIVER_RATELIMITED(NULL, fmt, ##__VA_ARGS__)
+
+#define DRM_DEV_DEBUG_KMS_RATELIMITED(dev, fmt, ...) \
+ _DRM_DEV_DEFINE_DEBUG_RATELIMITED(dev, DRM_UT_KMS, \
+ fmt, ##__VA_ARGS__)
+#define DRM_DEBUG_KMS_RATELIMITED(fmt, ...) \
+ DRM_DEV_DEBUG_KMS_RATELIMITED(NULL, fmt, ##__VA_ARGS__)
+
+#define DRM_DEV_DEBUG_PRIME_RATELIMITED(dev, fmt, ...) \
+ _DRM_DEV_DEFINE_DEBUG_RATELIMITED(dev, DRM_UT_PRIME, \
+ fmt, ##__VA_ARGS__)
+#define DRM_DEBUG_PRIME_RATELIMITED(fmt, ...) \
+ DRM_DEV_DEBUG_PRIME_RATELIMITED(NULL, fmt, ##__VA_ARGS__)
+
#endif /* DRM_PRINT_H_ */
diff --git a/include/drm/drm_property.h b/include/drm/drm_property.h
index 8a522b4bed40..d1423c7f3c73 100644
--- a/include/drm/drm_property.h
+++ b/include/drm/drm_property.h
@@ -209,7 +209,7 @@ struct drm_property_blob {
struct list_head head_global;
struct list_head head_file;
size_t length;
- unsigned char data[];
+ void *data;
};
struct drm_prop_enum_list {
@@ -237,27 +237,29 @@ static inline bool drm_property_type_is(struct drm_property *property,
return property->flags & type;
}
-struct drm_property *drm_property_create(struct drm_device *dev, int flags,
- const char *name, int num_values);
-struct drm_property *drm_property_create_enum(struct drm_device *dev, int flags,
- const char *name,
+struct drm_property *drm_property_create(struct drm_device *dev,
+ u32 flags, const char *name,
+ int num_values);
+struct drm_property *drm_property_create_enum(struct drm_device *dev,
+ u32 flags, const char *name,
const struct drm_prop_enum_list *props,
int num_values);
struct drm_property *drm_property_create_bitmask(struct drm_device *dev,
- int flags, const char *name,
+ u32 flags, const char *name,
const struct drm_prop_enum_list *props,
int num_props,
uint64_t supported_bits);
-struct drm_property *drm_property_create_range(struct drm_device *dev, int flags,
- const char *name,
+struct drm_property *drm_property_create_range(struct drm_device *dev,
+ u32 flags, const char *name,
uint64_t min, uint64_t max);
struct drm_property *drm_property_create_signed_range(struct drm_device *dev,
- int flags, const char *name,
+ u32 flags, const char *name,
int64_t min, int64_t max);
struct drm_property *drm_property_create_object(struct drm_device *dev,
- int flags, const char *name, uint32_t type);
-struct drm_property *drm_property_create_bool(struct drm_device *dev, int flags,
- const char *name);
+ u32 flags, const char *name,
+ uint32_t type);
+struct drm_property *drm_property_create_bool(struct drm_device *dev,
+ u32 flags, const char *name);
int drm_property_add_enum(struct drm_property *property, int index,
uint64_t value, const char *name);
void drm_property_destroy(struct drm_device *dev, struct drm_property *property);
diff --git a/include/drm/drm_simple_kms_helper.h b/include/drm/drm_simple_kms_helper.h
index 6d9adbb46293..1b4e352143fd 100644
--- a/include/drm/drm_simple_kms_helper.h
+++ b/include/drm/drm_simple_kms_helper.h
@@ -22,6 +22,41 @@ struct drm_simple_display_pipe;
*/
struct drm_simple_display_pipe_funcs {
/**
+ * @mode_valid:
+ *
+ * This callback is used to check if a specific mode is valid in the
+ * crtc used in this simple display pipe. This should be implemented
+ * if the display pipe has some sort of restriction in the modes
+ * it can display. For example, a given display pipe may be responsible
+ * to set a clock value. If the clock can not produce all the values
+ * for the available modes then this callback can be used to restrict
+ * the number of modes to only the ones that can be displayed. Another
+ * reason can be bandwidth mitigation: the memory port on the display
+ * controller can have bandwidth limitations not allowing pixel data
+ * to be fetched at any rate.
+ *
+ * This hook is used by the probe helpers to filter the mode list in
+ * drm_helper_probe_single_connector_modes(), and it is used by the
+ * atomic helpers to validate modes supplied by userspace in
+ * drm_atomic_helper_check_modeset().
+ *
+ * This function is optional.
+ *
+ * NOTE:
+ *
+ * Since this function is both called from the check phase of an atomic
+ * commit, and the mode validation in the probe paths it is not allowed
+ * to look at anything else but the passed-in mode, and validate it
+ * against configuration-invariant hardware constraints.
+ *
+ * RETURNS:
+ *
+ * drm_mode_status Enum
+ */
+ enum drm_mode_status (*mode_valid)(struct drm_crtc *crtc,
+ const struct drm_display_mode *mode);
+
+ /**
* @enable:
*
* This function should be used to enable the pipeline.
@@ -93,6 +128,24 @@ struct drm_simple_display_pipe_funcs {
*/
void (*cleanup_fb)(struct drm_simple_display_pipe *pipe,
struct drm_plane_state *plane_state);
+
+ /**
+ * @enable_vblank:
+ *
+ * Optional, called by &drm_crtc_funcs.enable_vblank. Please read
+ * the documentation for the &drm_crtc_funcs.enable_vblank hook for
+ * more details.
+ */
+ int (*enable_vblank)(struct drm_simple_display_pipe *pipe);
+
+ /**
+ * @disable_vblank:
+ *
+ * Optional, called by &drm_crtc_funcs.disable_vblank. Please read
+ * the documentation for the &drm_crtc_funcs.disable_vblank hook for
+ * more details.
+ */
+ void (*disable_vblank)(struct drm_simple_display_pipe *pipe);
};
/**
diff --git a/include/drm/drm_syncobj.h b/include/drm/drm_syncobj.h
index 43e2f382d2f0..3980602472c0 100644
--- a/include/drm/drm_syncobj.h
+++ b/include/drm/drm_syncobj.h
@@ -33,36 +33,31 @@ struct drm_syncobj_cb;
/**
* struct drm_syncobj - sync object.
*
- * This structure defines a generic sync object which wraps a dma fence.
+ * This structure defines a generic sync object which wraps a &dma_fence.
*/
struct drm_syncobj {
/**
- * @refcount:
- *
- * Reference count of this object.
+ * @refcount: Reference count of this object.
*/
struct kref refcount;
/**
* @fence:
* NULL or a pointer to the fence bound to this object.
*
- * This field should not be used directly. Use drm_syncobj_fence_get
- * and drm_syncobj_replace_fence instead.
+ * This field should not be used directly. Use drm_syncobj_fence_get()
+ * and drm_syncobj_replace_fence() instead.
*/
- struct dma_fence *fence;
+ struct dma_fence __rcu *fence;
/**
- * @cb_list:
- * List of callbacks to call when the fence gets replaced
+ * @cb_list: List of callbacks to call when the &fence gets replaced.
*/
struct list_head cb_list;
/**
- * @lock:
- * locks cb_list and write-locks fence.
+ * @lock: Protects &cb_list and write-locks &fence.
*/
spinlock_t lock;
/**
- * @file:
- * a file backing for this syncobj.
+ * @file: A file backing for this syncobj.
*/
struct file *file;
};
@@ -73,7 +68,7 @@ typedef void (*drm_syncobj_func_t)(struct drm_syncobj *syncobj,
/**
* struct drm_syncobj_cb - callback for drm_syncobj_add_callback
* @node: used by drm_syncob_add_callback to append this struct to
- * syncobj::cb_list
+ * &drm_syncobj.cb_list
* @func: drm_syncobj_func_t to call
*
* This struct will be initialized by drm_syncobj_add_callback, additional
@@ -92,7 +87,7 @@ void drm_syncobj_free(struct kref *kref);
* drm_syncobj_get - acquire a syncobj reference
* @obj: sync object
*
- * This acquires additional reference to @obj. It is illegal to call this
+ * This acquires an additional reference to @obj. It is illegal to call this
* without already holding a reference. No locks required.
*/
static inline void
@@ -111,6 +106,17 @@ drm_syncobj_put(struct drm_syncobj *obj)
kref_put(&obj->refcount, drm_syncobj_free);
}
+/**
+ * drm_syncobj_fence_get - get a reference to a fence in a sync object
+ * @syncobj: sync object.
+ *
+ * This acquires additional reference to &drm_syncobj.fence contained in @obj,
+ * if not NULL. It is illegal to call this without already holding a reference.
+ * No locks required.
+ *
+ * Returns:
+ * Either the fence of @obj or NULL if there's none.
+ */
static inline struct dma_fence *
drm_syncobj_fence_get(struct drm_syncobj *syncobj)
{
diff --git a/include/drm/drm_utils.h b/include/drm/drm_utils.h
new file mode 100644
index 000000000000..a803988d8579
--- /dev/null
+++ b/include/drm/drm_utils.h
@@ -0,0 +1,15 @@
+/* SPDX-License-Identifier: MIT */
+/*
+ * Function prototypes for misc. drm utility functions.
+ * Specifically this file is for function prototypes for functions which
+ * may also be used outside of drm code (e.g. in fbdev drivers).
+ *
+ * Copyright (C) 2017 Hans de Goede <hdegoede@redhat.com>
+ */
+
+#ifndef __DRM_UTILS_H__
+#define __DRM_UTILS_H__
+
+int drm_get_panel_orientation_quirk(int width, int height);
+
+#endif
diff --git a/include/drm/drm_vblank.h b/include/drm/drm_vblank.h
index 848b463a0af5..d25a9603ab57 100644
--- a/include/drm/drm_vblank.h
+++ b/include/drm/drm_vblank.h
@@ -55,8 +55,24 @@ struct drm_pending_vblank_event {
* @event: Actual event which will be sent to userspace.
*/
union {
+ /**
+ * @event.base: DRM event base class.
+ */
struct drm_event base;
+
+ /**
+ * @event.vbl:
+ *
+ * Event payload for vblank events, requested through
+ * either the MODE_PAGE_FLIP or MODE_ATOMIC IOCTL. Also
+ * generated by the legacy WAIT_VBLANK IOCTL, but new userspace
+ * should use MODE_QUEUE_SEQUENCE and &event.seq instead.
+ */
struct drm_event_vblank vbl;
+
+ /**
+ * @event.seq: Event payload for the MODE_QUEUEU_SEQUENCE IOCTL.
+ */
struct drm_event_crtc_sequence seq;
} event;
};
@@ -179,7 +195,9 @@ void drm_crtc_wait_one_vblank(struct drm_crtc *crtc);
void drm_crtc_vblank_off(struct drm_crtc *crtc);
void drm_crtc_vblank_reset(struct drm_crtc *crtc);
void drm_crtc_vblank_on(struct drm_crtc *crtc);
-u32 drm_crtc_accurate_vblank_count(struct drm_crtc *crtc);
+u64 drm_crtc_accurate_vblank_count(struct drm_crtc *crtc);
+void drm_vblank_restore(struct drm_device *dev, unsigned int pipe);
+void drm_crtc_vblank_restore(struct drm_crtc *crtc);
bool drm_calc_vbltimestamp_from_scanoutpos(struct drm_device *dev,
unsigned int pipe, int *max_error,
diff --git a/include/drm/drm_vma_manager.h b/include/drm/drm_vma_manager.h
index d84d52f6d2b1..8758df94e9a0 100644
--- a/include/drm/drm_vma_manager.h
+++ b/include/drm/drm_vma_manager.h
@@ -152,7 +152,7 @@ static inline void drm_vma_node_reset(struct drm_vma_offset_node *node)
* Start address of @node for page-based addressing. 0 if the node does not
* have an offset allocated.
*/
-static inline unsigned long drm_vma_node_start(struct drm_vma_offset_node *node)
+static inline unsigned long drm_vma_node_start(const struct drm_vma_offset_node *node)
{
return node->vm_node.start;
}
diff --git a/include/drm/gpu_scheduler.h b/include/drm/gpu_scheduler.h
new file mode 100644
index 000000000000..dfd54fb94e10
--- /dev/null
+++ b/include/drm/gpu_scheduler.h
@@ -0,0 +1,173 @@
+/*
+ * Copyright 2015 Advanced Micro Devices, Inc.
+ *
+ * Permission is hereby granted, free of charge, to any person obtaining a
+ * copy of this software and associated documentation files (the "Software"),
+ * to deal in the Software without restriction, including without limitation
+ * the rights to use, copy, modify, merge, publish, distribute, sublicense,
+ * and/or sell copies of the Software, and to permit persons to whom the
+ * Software is furnished to do so, subject to the following conditions:
+ *
+ * The above copyright notice and this permission notice shall be included in
+ * all copies or substantial portions of the Software.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
+ * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
+ * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
+ * THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR
+ * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE,
+ * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
+ * OTHER DEALINGS IN THE SOFTWARE.
+ *
+ */
+
+#ifndef _DRM_GPU_SCHEDULER_H_
+#define _DRM_GPU_SCHEDULER_H_
+
+#include <drm/spsc_queue.h>
+#include <linux/dma-fence.h>
+
+struct drm_gpu_scheduler;
+struct drm_sched_rq;
+
+enum drm_sched_priority {
+ DRM_SCHED_PRIORITY_MIN,
+ DRM_SCHED_PRIORITY_LOW = DRM_SCHED_PRIORITY_MIN,
+ DRM_SCHED_PRIORITY_NORMAL,
+ DRM_SCHED_PRIORITY_HIGH_SW,
+ DRM_SCHED_PRIORITY_HIGH_HW,
+ DRM_SCHED_PRIORITY_KERNEL,
+ DRM_SCHED_PRIORITY_MAX,
+ DRM_SCHED_PRIORITY_INVALID = -1,
+ DRM_SCHED_PRIORITY_UNSET = -2
+};
+
+/**
+ * A scheduler entity is a wrapper around a job queue or a group
+ * of other entities. Entities take turns emitting jobs from their
+ * job queues to corresponding hardware ring based on scheduling
+ * policy.
+*/
+struct drm_sched_entity {
+ struct list_head list;
+ struct drm_sched_rq *rq;
+ spinlock_t rq_lock;
+ struct drm_gpu_scheduler *sched;
+
+ spinlock_t queue_lock;
+ struct spsc_queue job_queue;
+
+ atomic_t fence_seq;
+ uint64_t fence_context;
+
+ struct dma_fence *dependency;
+ struct dma_fence_cb cb;
+ atomic_t *guilty; /* points to ctx's guilty */
+};
+
+/**
+ * Run queue is a set of entities scheduling command submissions for
+ * one specific ring. It implements the scheduling policy that selects
+ * the next entity to emit commands from.
+*/
+struct drm_sched_rq {
+ spinlock_t lock;
+ struct list_head entities;
+ struct drm_sched_entity *current_entity;
+};
+
+struct drm_sched_fence {
+ struct dma_fence scheduled;
+ struct dma_fence finished;
+ struct dma_fence_cb cb;
+ struct dma_fence *parent;
+ struct drm_gpu_scheduler *sched;
+ spinlock_t lock;
+ void *owner;
+};
+
+struct drm_sched_fence *to_drm_sched_fence(struct dma_fence *f);
+
+struct drm_sched_job {
+ struct spsc_node queue_node;
+ struct drm_gpu_scheduler *sched;
+ struct drm_sched_fence *s_fence;
+ struct dma_fence_cb finish_cb;
+ struct work_struct finish_work;
+ struct list_head node;
+ struct delayed_work work_tdr;
+ uint64_t id;
+ atomic_t karma;
+ enum drm_sched_priority s_priority;
+};
+
+static inline bool drm_sched_invalidate_job(struct drm_sched_job *s_job,
+ int threshold)
+{
+ return (s_job && atomic_inc_return(&s_job->karma) > threshold);
+}
+
+/**
+ * Define the backend operations called by the scheduler,
+ * these functions should be implemented in driver side
+*/
+struct drm_sched_backend_ops {
+ struct dma_fence *(*dependency)(struct drm_sched_job *sched_job,
+ struct drm_sched_entity *s_entity);
+ struct dma_fence *(*run_job)(struct drm_sched_job *sched_job);
+ void (*timedout_job)(struct drm_sched_job *sched_job);
+ void (*free_job)(struct drm_sched_job *sched_job);
+};
+
+/**
+ * One scheduler is implemented for each hardware ring
+*/
+struct drm_gpu_scheduler {
+ const struct drm_sched_backend_ops *ops;
+ uint32_t hw_submission_limit;
+ long timeout;
+ const char *name;
+ struct drm_sched_rq sched_rq[DRM_SCHED_PRIORITY_MAX];
+ wait_queue_head_t wake_up_worker;
+ wait_queue_head_t job_scheduled;
+ atomic_t hw_rq_count;
+ atomic64_t job_id_count;
+ struct task_struct *thread;
+ struct list_head ring_mirror_list;
+ spinlock_t job_list_lock;
+ int hang_limit;
+};
+
+int drm_sched_init(struct drm_gpu_scheduler *sched,
+ const struct drm_sched_backend_ops *ops,
+ uint32_t hw_submission, unsigned hang_limit, long timeout,
+ const char *name);
+void drm_sched_fini(struct drm_gpu_scheduler *sched);
+
+int drm_sched_entity_init(struct drm_gpu_scheduler *sched,
+ struct drm_sched_entity *entity,
+ struct drm_sched_rq *rq,
+ uint32_t jobs, atomic_t *guilty);
+void drm_sched_entity_fini(struct drm_gpu_scheduler *sched,
+ struct drm_sched_entity *entity);
+void drm_sched_entity_push_job(struct drm_sched_job *sched_job,
+ struct drm_sched_entity *entity);
+void drm_sched_entity_set_rq(struct drm_sched_entity *entity,
+ struct drm_sched_rq *rq);
+
+struct drm_sched_fence *drm_sched_fence_create(
+ struct drm_sched_entity *s_entity, void *owner);
+void drm_sched_fence_scheduled(struct drm_sched_fence *fence);
+void drm_sched_fence_finished(struct drm_sched_fence *fence);
+int drm_sched_job_init(struct drm_sched_job *job,
+ struct drm_gpu_scheduler *sched,
+ struct drm_sched_entity *entity,
+ void *owner);
+void drm_sched_hw_job_reset(struct drm_gpu_scheduler *sched,
+ struct drm_sched_job *job);
+void drm_sched_job_recovery(struct drm_gpu_scheduler *sched);
+bool drm_sched_dependency_optimized(struct dma_fence* fence,
+ struct drm_sched_entity *entity);
+void drm_sched_job_kickout(struct drm_sched_job *s_job);
+
+#endif
diff --git a/include/drm/gpu_scheduler_trace.h b/include/drm/gpu_scheduler_trace.h
new file mode 100644
index 000000000000..0789e8d0a0e1
--- /dev/null
+++ b/include/drm/gpu_scheduler_trace.h
@@ -0,0 +1,82 @@
+/*
+ * Copyright 2017 Advanced Micro Devices, Inc.
+ *
+ * Permission is hereby granted, free of charge, to any person obtaining a
+ * copy of this software and associated documentation files (the "Software"),
+ * to deal in the Software without restriction, including without limitation
+ * the rights to use, copy, modify, merge, publish, distribute, sublicense,
+ * and/or sell copies of the Software, and to permit persons to whom the
+ * Software is furnished to do so, subject to the following conditions:
+ *
+ * The above copyright notice and this permission notice shall be included in
+ * all copies or substantial portions of the Software.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
+ * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
+ * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
+ * THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR
+ * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE,
+ * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
+ * OTHER DEALINGS IN THE SOFTWARE.
+ *
+ */
+
+#if !defined(_GPU_SCHED_TRACE_H) || defined(TRACE_HEADER_MULTI_READ)
+#define _GPU_SCHED_TRACE_H_
+
+#include <linux/stringify.h>
+#include <linux/types.h>
+#include <linux/tracepoint.h>
+
+#include <drm/drmP.h>
+
+#undef TRACE_SYSTEM
+#define TRACE_SYSTEM gpu_scheduler
+#define TRACE_INCLUDE_FILE gpu_scheduler_trace
+
+TRACE_EVENT(drm_sched_job,
+ TP_PROTO(struct drm_sched_job *sched_job, struct drm_sched_entity *entity),
+ TP_ARGS(sched_job, entity),
+ TP_STRUCT__entry(
+ __field(struct drm_sched_entity *, entity)
+ __field(struct dma_fence *, fence)
+ __field(const char *, name)
+ __field(uint64_t, id)
+ __field(u32, job_count)
+ __field(int, hw_job_count)
+ ),
+
+ TP_fast_assign(
+ __entry->entity = entity;
+ __entry->id = sched_job->id;
+ __entry->fence = &sched_job->s_fence->finished;
+ __entry->name = sched_job->sched->name;
+ __entry->job_count = spsc_queue_count(&entity->job_queue);
+ __entry->hw_job_count = atomic_read(
+ &sched_job->sched->hw_rq_count);
+ ),
+ TP_printk("entity=%p, id=%llu, fence=%p, ring=%s, job count:%u, hw job count:%d",
+ __entry->entity, __entry->id,
+ __entry->fence, __entry->name,
+ __entry->job_count, __entry->hw_job_count)
+);
+
+TRACE_EVENT(drm_sched_process_job,
+ TP_PROTO(struct drm_sched_fence *fence),
+ TP_ARGS(fence),
+ TP_STRUCT__entry(
+ __field(struct dma_fence *, fence)
+ ),
+
+ TP_fast_assign(
+ __entry->fence = &fence->finished;
+ ),
+ TP_printk("fence=%p signaled", __entry->fence)
+);
+
+#endif
+
+/* This part must be outside protection */
+#undef TRACE_INCLUDE_PATH
+#define TRACE_INCLUDE_PATH .
+#include <trace/define_trace.h>
diff --git a/include/drm/i915_component.h b/include/drm/i915_component.h
index 545c6e0fea7d..346b1f5cb180 100644
--- a/include/drm/i915_component.h
+++ b/include/drm/i915_component.h
@@ -26,9 +26,8 @@
/* MAX_PORT is the number of port
* It must be sync with I915_MAX_PORTS defined i915_drv.h
- * 5 should be enough as only HSW, BDW, SKL need such fix.
*/
-#define MAX_PORTS 5
+#define MAX_PORTS 6
/**
* struct i915_audio_component_ops - Ops implemented by i915 driver, called by hda driver
diff --git a/include/drm/i915_drm.h b/include/drm/i915_drm.h
index 4e1b274e1164..c9e5a6621b95 100644
--- a/include/drm/i915_drm.h
+++ b/include/drm/i915_drm.h
@@ -36,6 +36,9 @@ extern bool i915_gpu_lower(void);
extern bool i915_gpu_busy(void);
extern bool i915_gpu_turbo_disable(void);
+/* Exported from arch/x86/kernel/early-quirks.c */
+extern struct resource intel_graphics_stolen_res;
+
/*
* The Bridge device's PCI config space has information about the
* fb aperture size and the amount of pre-reserved memory.
diff --git a/include/drm/i915_pciids.h b/include/drm/i915_pciids.h
index 972a25633525..70f0c2535b87 100644
--- a/include/drm/i915_pciids.h
+++ b/include/drm/i915_pciids.h
@@ -373,43 +373,74 @@
/* CFL S */
#define INTEL_CFL_S_GT1_IDS(info) \
INTEL_VGA_DEVICE(0x3E90, info), /* SRV GT1 */ \
- INTEL_VGA_DEVICE(0x3E93, info) /* SRV GT1 */
+ INTEL_VGA_DEVICE(0x3E93, info), /* SRV GT1 */ \
+ INTEL_VGA_DEVICE(0x3E99, info) /* SRV GT1 */
#define INTEL_CFL_S_GT2_IDS(info) \
INTEL_VGA_DEVICE(0x3E91, info), /* SRV GT2 */ \
INTEL_VGA_DEVICE(0x3E92, info), /* SRV GT2 */ \
- INTEL_VGA_DEVICE(0x3E96, info) /* SRV GT2 */
+ INTEL_VGA_DEVICE(0x3E96, info), /* SRV GT2 */ \
+ INTEL_VGA_DEVICE(0x3E9A, info) /* SRV GT2 */
/* CFL H */
#define INTEL_CFL_H_GT2_IDS(info) \
INTEL_VGA_DEVICE(0x3E9B, info), /* Halo GT2 */ \
INTEL_VGA_DEVICE(0x3E94, info) /* Halo GT2 */
-/* CFL U */
+/* CFL U GT1 */
+#define INTEL_CFL_U_GT1_IDS(info) \
+ INTEL_VGA_DEVICE(0x3EA1, info), \
+ INTEL_VGA_DEVICE(0x3EA4, info)
+
+/* CFL U GT2 */
+#define INTEL_CFL_U_GT2_IDS(info) \
+ INTEL_VGA_DEVICE(0x3EA0, info), \
+ INTEL_VGA_DEVICE(0x3EA3, info), \
+ INTEL_VGA_DEVICE(0x3EA9, info)
+
+/* CFL U GT3 */
#define INTEL_CFL_U_GT3_IDS(info) \
+ INTEL_VGA_DEVICE(0x3EA2, info), /* ULT GT3 */ \
+ INTEL_VGA_DEVICE(0x3EA5, info), /* ULT GT3 */ \
INTEL_VGA_DEVICE(0x3EA6, info), /* ULT GT3 */ \
INTEL_VGA_DEVICE(0x3EA7, info), /* ULT GT3 */ \
- INTEL_VGA_DEVICE(0x3EA8, info), /* ULT GT3 */ \
- INTEL_VGA_DEVICE(0x3EA5, info) /* ULT GT3 */
+ INTEL_VGA_DEVICE(0x3EA8, info) /* ULT GT3 */
-/* CNL U 2+2 */
-#define INTEL_CNL_U_GT2_IDS(info) \
- INTEL_VGA_DEVICE(0x5A52, info), \
- INTEL_VGA_DEVICE(0x5A5A, info), \
- INTEL_VGA_DEVICE(0x5A42, info), \
- INTEL_VGA_DEVICE(0x5A4A, info)
+#define INTEL_CFL_IDS(info) \
+ INTEL_CFL_S_GT1_IDS(info), \
+ INTEL_CFL_S_GT2_IDS(info), \
+ INTEL_CFL_H_GT2_IDS(info), \
+ INTEL_CFL_U_GT1_IDS(info), \
+ INTEL_CFL_U_GT2_IDS(info), \
+ INTEL_CFL_U_GT3_IDS(info)
-/* CNL Y 2+2 */
-#define INTEL_CNL_Y_GT2_IDS(info) \
+/* CNL */
+#define INTEL_CNL_IDS(info) \
INTEL_VGA_DEVICE(0x5A51, info), \
INTEL_VGA_DEVICE(0x5A59, info), \
INTEL_VGA_DEVICE(0x5A41, info), \
INTEL_VGA_DEVICE(0x5A49, info), \
- INTEL_VGA_DEVICE(0x5A71, info), \
- INTEL_VGA_DEVICE(0x5A79, info)
-
-#define INTEL_CNL_IDS(info) \
- INTEL_CNL_U_GT2_IDS(info), \
- INTEL_CNL_Y_GT2_IDS(info)
+ INTEL_VGA_DEVICE(0x5A52, info), \
+ INTEL_VGA_DEVICE(0x5A5A, info), \
+ INTEL_VGA_DEVICE(0x5A42, info), \
+ INTEL_VGA_DEVICE(0x5A4A, info), \
+ INTEL_VGA_DEVICE(0x5A50, info), \
+ INTEL_VGA_DEVICE(0x5A40, info), \
+ INTEL_VGA_DEVICE(0x5A54, info), \
+ INTEL_VGA_DEVICE(0x5A5C, info), \
+ INTEL_VGA_DEVICE(0x5A44, info), \
+ INTEL_VGA_DEVICE(0x5A4C, info)
+
+/* ICL */
+#define INTEL_ICL_11_IDS(info) \
+ INTEL_VGA_DEVICE(0x8A50, info), \
+ INTEL_VGA_DEVICE(0x8A51, info), \
+ INTEL_VGA_DEVICE(0x8A5C, info), \
+ INTEL_VGA_DEVICE(0x8A5D, info), \
+ INTEL_VGA_DEVICE(0x8A52, info), \
+ INTEL_VGA_DEVICE(0x8A5A, info), \
+ INTEL_VGA_DEVICE(0x8A5B, info), \
+ INTEL_VGA_DEVICE(0x8A71, info), \
+ INTEL_VGA_DEVICE(0x8A70, info)
#endif /* _I915_PCIIDS_H */
diff --git a/include/drm/intel-gtt.h b/include/drm/intel-gtt.h
index c5db7975c640..2324c84a25c0 100644
--- a/include/drm/intel-gtt.h
+++ b/include/drm/intel-gtt.h
@@ -5,9 +5,8 @@
#define _DRM_INTEL_GTT_H
void intel_gtt_get(u64 *gtt_total,
- u32 *stolen_size,
phys_addr_t *mappable_base,
- u64 *mappable_end);
+ resource_size_t *mappable_end);
int intel_gmch_probe(struct pci_dev *bridge_pdev, struct pci_dev *gpu_pdev,
struct agp_bridge_data *bridge);
diff --git a/include/drm/spsc_queue.h b/include/drm/spsc_queue.h
new file mode 100644
index 000000000000..125f096c88cb
--- /dev/null
+++ b/include/drm/spsc_queue.h
@@ -0,0 +1,122 @@
+/*
+ * Copyright 2017 Advanced Micro Devices, Inc.
+ *
+ * Permission is hereby granted, free of charge, to any person obtaining a
+ * copy of this software and associated documentation files (the "Software"),
+ * to deal in the Software without restriction, including without limitation
+ * the rights to use, copy, modify, merge, publish, distribute, sublicense,
+ * and/or sell copies of the Software, and to permit persons to whom the
+ * Software is furnished to do so, subject to the following conditions:
+ *
+ * The above copyright notice and this permission notice shall be included in
+ * all copies or substantial portions of the Software.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
+ * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
+ * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
+ * THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR
+ * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE,
+ * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
+ * OTHER DEALINGS IN THE SOFTWARE.
+ *
+ */
+
+#ifndef DRM_SCHEDULER_SPSC_QUEUE_H_
+#define DRM_SCHEDULER_SPSC_QUEUE_H_
+
+#include <linux/atomic.h>
+#include <linux/preempt.h>
+
+/** SPSC lockless queue */
+
+struct spsc_node {
+
+ /* Stores spsc_node* */
+ struct spsc_node *next;
+};
+
+struct spsc_queue {
+
+ struct spsc_node *head;
+
+ /* atomic pointer to struct spsc_node* */
+ atomic_long_t tail;
+
+ atomic_t job_count;
+};
+
+static inline void spsc_queue_init(struct spsc_queue *queue)
+{
+ queue->head = NULL;
+ atomic_long_set(&queue->tail, (long)&queue->head);
+ atomic_set(&queue->job_count, 0);
+}
+
+static inline struct spsc_node *spsc_queue_peek(struct spsc_queue *queue)
+{
+ return queue->head;
+}
+
+static inline int spsc_queue_count(struct spsc_queue *queue)
+{
+ return atomic_read(&queue->job_count);
+}
+
+static inline bool spsc_queue_push(struct spsc_queue *queue, struct spsc_node *node)
+{
+ struct spsc_node **tail;
+
+ node->next = NULL;
+
+ preempt_disable();
+
+ tail = (struct spsc_node **)atomic_long_xchg(&queue->tail, (long)&node->next);
+ WRITE_ONCE(*tail, node);
+ atomic_inc(&queue->job_count);
+
+ /*
+ * In case of first element verify new node will be visible to the consumer
+ * thread when we ping the kernel thread that there is new work to do.
+ */
+ smp_wmb();
+
+ preempt_enable();
+
+ return tail == &queue->head;
+}
+
+
+static inline struct spsc_node *spsc_queue_pop(struct spsc_queue *queue)
+{
+ struct spsc_node *next, *node;
+
+ /* Verify reading from memory and not the cache */
+ smp_rmb();
+
+ node = READ_ONCE(queue->head);
+
+ if (!node)
+ return NULL;
+
+ next = READ_ONCE(node->next);
+ WRITE_ONCE(queue->head, next);
+
+ if (unlikely(!next)) {
+ /* slowpath for the last element in the queue */
+
+ if (atomic_long_cmpxchg(&queue->tail,
+ (long)&node->next, (long) &queue->head) != (long)&node->next) {
+ /* Updating tail failed wait for new next to appear */
+ do {
+ smp_rmb();
+ } while (unlikely(!(queue->head = READ_ONCE(node->next))));
+ }
+ }
+
+ atomic_dec(&queue->job_count);
+ return node;
+}
+
+
+
+#endif /* DRM_SCHEDULER_SPSC_QUEUE_H_ */
diff --git a/include/drm/tinydrm/ili9341.h b/include/drm/tinydrm/ili9341.h
deleted file mode 100644
index 807a09f43cad..000000000000
--- a/include/drm/tinydrm/ili9341.h
+++ /dev/null
@@ -1,54 +0,0 @@
-/*
- * ILI9341 LCD controller
- *
- * Copyright 2016 Noralf Trønnes
- *
- * This program is free software; you can redistribute it and/or modify
- * it under the terms of the GNU General Public License as published by
- * the Free Software Foundation; either version 2 of the License, or
- * (at your option) any later version.
- */
-
-#ifndef __LINUX_ILI9341_H
-#define __LINUX_ILI9341_H
-
-#define ILI9341_FRMCTR1 0xb1
-#define ILI9341_FRMCTR2 0xb2
-#define ILI9341_FRMCTR3 0xb3
-#define ILI9341_INVTR 0xb4
-#define ILI9341_PRCTR 0xb5
-#define ILI9341_DISCTRL 0xb6
-#define ILI9341_ETMOD 0xb7
-
-#define ILI9341_PWCTRL1 0xc0
-#define ILI9341_PWCTRL2 0xc1
-#define ILI9341_VMCTRL1 0xc5
-#define ILI9341_VMCTRL2 0xc7
-#define ILI9341_PWCTRLA 0xcb
-#define ILI9341_PWCTRLB 0xcf
-
-#define ILI9341_RDID1 0xda
-#define ILI9341_RDID2 0xdb
-#define ILI9341_RDID3 0xdc
-#define ILI9341_RDID4 0xd3
-
-#define ILI9341_PGAMCTRL 0xe0
-#define ILI9341_NGAMCTRL 0xe1
-#define ILI9341_DGAMCTRL1 0xe2
-#define ILI9341_DGAMCTRL2 0xe3
-#define ILI9341_DTCTRLA 0xe8
-#define ILI9341_DTCTRLB 0xea
-#define ILI9341_PWRSEQ 0xed
-
-#define ILI9341_EN3GAM 0xf2
-#define ILI9341_IFCTRL 0xf6
-#define ILI9341_PUMPCTRL 0xf7
-
-#define ILI9341_MADCTL_MH BIT(2)
-#define ILI9341_MADCTL_BGR BIT(3)
-#define ILI9341_MADCTL_ML BIT(4)
-#define ILI9341_MADCTL_MV BIT(5)
-#define ILI9341_MADCTL_MX BIT(6)
-#define ILI9341_MADCTL_MY BIT(7)
-
-#endif /* __LINUX_ILI9341_H */
diff --git a/include/drm/tinydrm/mipi-dbi.h b/include/drm/tinydrm/mipi-dbi.h
index 83346ddb9dba..44e824af2ef6 100644
--- a/include/drm/tinydrm/mipi-dbi.h
+++ b/include/drm/tinydrm/mipi-dbi.h
@@ -67,15 +67,18 @@ int mipi_dbi_init(struct device *dev, struct mipi_dbi *mipi,
const struct drm_simple_display_pipe_funcs *pipe_funcs,
struct drm_driver *driver,
const struct drm_display_mode *mode, unsigned int rotation);
-void mipi_dbi_pipe_enable(struct drm_simple_display_pipe *pipe,
- struct drm_crtc_state *crtc_state);
+void mipi_dbi_enable_flush(struct mipi_dbi *mipi);
void mipi_dbi_pipe_disable(struct drm_simple_display_pipe *pipe);
void mipi_dbi_hw_reset(struct mipi_dbi *mipi);
bool mipi_dbi_display_is_on(struct mipi_dbi *mipi);
+int mipi_dbi_poweron_reset(struct mipi_dbi *mipi);
+int mipi_dbi_poweron_conditional_reset(struct mipi_dbi *mipi);
+u32 mipi_dbi_spi_cmd_max_speed(struct spi_device *spi, size_t len);
int mipi_dbi_command_read(struct mipi_dbi *mipi, u8 cmd, u8 *val);
int mipi_dbi_command_buf(struct mipi_dbi *mipi, u8 cmd, u8 *data, size_t len);
-
+int mipi_dbi_buf_copy(void *dst, struct drm_framebuffer *fb,
+ struct drm_clip_rect *clip, bool swap);
/**
* mipi_dbi_command - MIPI DCS command with optional parameter(s)
* @mipi: MIPI structure
diff --git a/include/drm/tinydrm/tinydrm-helpers.h b/include/drm/tinydrm/tinydrm-helpers.h
index d554ded60ee9..0a4ddbc04c60 100644
--- a/include/drm/tinydrm/tinydrm-helpers.h
+++ b/include/drm/tinydrm/tinydrm-helpers.h
@@ -46,10 +46,6 @@ void tinydrm_xrgb8888_to_rgb565(u16 *dst, void *vaddr,
void tinydrm_xrgb8888_to_gray8(u8 *dst, void *vaddr, struct drm_framebuffer *fb,
struct drm_clip_rect *clip);
-struct backlight_device *tinydrm_of_find_backlight(struct device *dev);
-int tinydrm_enable_backlight(struct backlight_device *backlight);
-int tinydrm_disable_backlight(struct backlight_device *backlight);
-
size_t tinydrm_spi_max_transfer_size(struct spi_device *spi, size_t max_len);
bool tinydrm_spi_bpw_supported(struct spi_device *spi, u8 bpw);
int tinydrm_spi_transfer(struct spi_device *spi, u32 speed_hz,
diff --git a/include/drm/tinydrm/tinydrm.h b/include/drm/tinydrm/tinydrm.h
index 4774fe3d4273..07a9a11fe19d 100644
--- a/include/drm/tinydrm/tinydrm.h
+++ b/include/drm/tinydrm/tinydrm.h
@@ -19,16 +19,12 @@
* @drm: DRM device
* @pipe: Display pipe structure
* @dirty_lock: Serializes framebuffer flushing
- * @fbdev_cma: CMA fbdev structure
- * @suspend_state: Atomic state when suspended
* @fb_funcs: Framebuffer functions used when creating framebuffers
*/
struct tinydrm_device {
struct drm_device *drm;
struct drm_simple_display_pipe pipe;
struct mutex dirty_lock;
- struct drm_fbdev_cma *fbdev_cma;
- struct drm_atomic_state *suspend_state;
const struct drm_framebuffer_funcs *fb_funcs;
};
@@ -46,6 +42,7 @@ pipe_to_tinydrm(struct drm_simple_display_pipe *pipe)
*/
#define TINYDRM_GEM_DRIVER_OPS \
.gem_free_object = tinydrm_gem_cma_free_object, \
+ .gem_print_info = drm_gem_cma_print_info, \
.gem_vm_ops = &drm_gem_cma_vm_ops, \
.prime_handle_to_fd = drm_gem_prime_handle_to_fd, \
.prime_fd_to_handle = drm_gem_prime_fd_to_handle, \
@@ -81,7 +78,6 @@ pipe_to_tinydrm(struct drm_simple_display_pipe *pipe)
.type = DRM_MODE_TYPE_DRIVER, \
.clock = 1 /* pass validation */
-void tinydrm_lastclose(struct drm_device *drm);
void tinydrm_gem_cma_free_object(struct drm_gem_object *gem_obj);
struct drm_gem_object *
tinydrm_gem_cma_prime_import_sg_table(struct drm_device *drm,
@@ -92,8 +88,6 @@ int devm_tinydrm_init(struct device *parent, struct tinydrm_device *tdev,
struct drm_driver *driver);
int devm_tinydrm_register(struct tinydrm_device *tdev);
void tinydrm_shutdown(struct tinydrm_device *tdev);
-int tinydrm_suspend(struct tinydrm_device *tdev);
-int tinydrm_resume(struct tinydrm_device *tdev);
void tinydrm_display_pipe_update(struct drm_simple_display_pipe *pipe,
struct drm_plane_state *old_state);
diff --git a/include/drm/ttm/ttm_bo_api.h b/include/drm/ttm/ttm_bo_api.h
index fa07be197945..c67977aa1a0e 100644
--- a/include/drm/ttm/ttm_bo_api.h
+++ b/include/drm/ttm/ttm_bo_api.h
@@ -41,6 +41,8 @@
#include <linux/bitmap.h>
#include <linux/reservation.h>
+struct ttm_bo_global;
+
struct ttm_bo_device;
struct drm_mm_node;
@@ -169,7 +171,6 @@ struct ttm_buffer_object {
* Members constant at init.
*/
- struct ttm_bo_global *glob;
struct ttm_bo_device *bdev;
enum ttm_bo_type type;
void (*destroy) (struct ttm_buffer_object *);
@@ -224,7 +225,6 @@ struct ttm_buffer_object {
*/
uint64_t offset; /* GPU address space is independent of CPU word size */
- uint32_t cur_placement;
struct sg_table *sg;
@@ -260,6 +260,30 @@ struct ttm_bo_kmap_obj {
};
/**
+ * struct ttm_operation_ctx
+ *
+ * @interruptible: Sleep interruptible if sleeping.
+ * @no_wait_gpu: Return immediately if the GPU is busy.
+ * @resv: Reservation object to allow reserved evictions with.
+ * @flags: Including the following flags
+ *
+ * Context for TTM operations like changing buffer placement or general memory
+ * allocation.
+ */
+struct ttm_operation_ctx {
+ bool interruptible;
+ bool no_wait_gpu;
+ struct reservation_object *resv;
+ uint64_t bytes_moved;
+ uint32_t flags;
+};
+
+/* Allow eviction of reserved BOs */
+#define TTM_OPT_FLAG_ALLOW_RES_EVICT 0x1
+/* when serving page fault or suspend, allow alloc anyway */
+#define TTM_OPT_FLAG_FORCE_ALLOC 0x2
+
+/**
* ttm_bo_reference - reference a struct ttm_buffer_object
*
* @bo: The buffer object.
@@ -288,8 +312,7 @@ ttm_bo_reference(struct ttm_buffer_object *bo)
* Returns -EBUSY if no_wait is true and the buffer is busy.
* Returns -ERESTARTSYS if interrupted by a signal.
*/
-extern int ttm_bo_wait(struct ttm_buffer_object *bo,
- bool interruptible, bool no_wait);
+int ttm_bo_wait(struct ttm_buffer_object *bo, bool interruptible, bool no_wait);
/**
* ttm_bo_mem_compat - Check if proposed placement is compatible with a bo
@@ -300,17 +323,15 @@ extern int ttm_bo_wait(struct ttm_buffer_object *bo,
*
* Returns true if the placement is compatible
*/
-extern bool ttm_bo_mem_compat(struct ttm_placement *placement,
- struct ttm_mem_reg *mem,
- uint32_t *new_flags);
+bool ttm_bo_mem_compat(struct ttm_placement *placement, struct ttm_mem_reg *mem,
+ uint32_t *new_flags);
/**
* ttm_bo_validate
*
* @bo: The buffer object.
* @placement: Proposed placement for the buffer object.
- * @interruptible: Sleep interruptible if sleeping.
- * @no_wait_gpu: Return immediately if the GPU is busy.
+ * @ctx: validation parameters.
*
* Changes placement and caching policy of the buffer object
* according proposed placement.
@@ -320,10 +341,9 @@ extern bool ttm_bo_mem_compat(struct ttm_placement *placement,
* -EBUSY if no_wait is true and buffer busy.
* -ERESTARTSYS if interrupted by a signal.
*/
-extern int ttm_bo_validate(struct ttm_buffer_object *bo,
- struct ttm_placement *placement,
- bool interruptible,
- bool no_wait_gpu);
+int ttm_bo_validate(struct ttm_buffer_object *bo,
+ struct ttm_placement *placement,
+ struct ttm_operation_ctx *ctx);
/**
* ttm_bo_unref
@@ -332,7 +352,7 @@ extern int ttm_bo_validate(struct ttm_buffer_object *bo,
*
* Unreference and clear a pointer to a buffer object.
*/
-extern void ttm_bo_unref(struct ttm_buffer_object **bo);
+void ttm_bo_unref(struct ttm_buffer_object **bo);
/**
* ttm_bo_add_to_lru
@@ -344,7 +364,7 @@ extern void ttm_bo_unref(struct ttm_buffer_object **bo);
* This function must be called with struct ttm_bo_global::lru_lock held, and
* is typically called immediately prior to unreserving a bo.
*/
-extern void ttm_bo_add_to_lru(struct ttm_buffer_object *bo);
+void ttm_bo_add_to_lru(struct ttm_buffer_object *bo);
/**
* ttm_bo_del_from_lru
@@ -356,7 +376,7 @@ extern void ttm_bo_add_to_lru(struct ttm_buffer_object *bo);
* and is usually called just immediately after the bo has been reserved to
* avoid recursive reservation from lru lists.
*/
-extern void ttm_bo_del_from_lru(struct ttm_buffer_object *bo);
+void ttm_bo_del_from_lru(struct ttm_buffer_object *bo);
/**
* ttm_bo_move_to_lru_tail
@@ -367,7 +387,7 @@ extern void ttm_bo_del_from_lru(struct ttm_buffer_object *bo);
* object. This function must be called with struct ttm_bo_global::lru_lock
* held, and is used to make a BO less likely to be considered for eviction.
*/
-extern void ttm_bo_move_to_lru_tail(struct ttm_buffer_object *bo);
+void ttm_bo_move_to_lru_tail(struct ttm_buffer_object *bo);
/**
* ttm_bo_lock_delayed_workqueue
@@ -376,15 +396,14 @@ extern void ttm_bo_move_to_lru_tail(struct ttm_buffer_object *bo);
* Returns
* True if the workqueue was queued at the time
*/
-extern int ttm_bo_lock_delayed_workqueue(struct ttm_bo_device *bdev);
+int ttm_bo_lock_delayed_workqueue(struct ttm_bo_device *bdev);
/**
* ttm_bo_unlock_delayed_workqueue
*
* Allows the delayed workqueue to run.
*/
-extern void ttm_bo_unlock_delayed_workqueue(struct ttm_bo_device *bdev,
- int resched);
+void ttm_bo_unlock_delayed_workqueue(struct ttm_bo_device *bdev, int resched);
/**
* ttm_bo_eviction_valuable
@@ -411,8 +430,7 @@ bool ttm_bo_eviction_valuable(struct ttm_buffer_object *bo,
* -EBUSY if the buffer is busy and no_wait is true.
* -ERESTARTSYS if interrupted by a signal.
*/
-extern int
-ttm_bo_synccpu_write_grab(struct ttm_buffer_object *bo, bool no_wait);
+int ttm_bo_synccpu_write_grab(struct ttm_buffer_object *bo, bool no_wait);
/**
* ttm_bo_synccpu_write_release:
@@ -421,7 +439,7 @@ ttm_bo_synccpu_write_grab(struct ttm_buffer_object *bo, bool no_wait);
*
* Releases a synccpu lock.
*/
-extern void ttm_bo_synccpu_write_release(struct ttm_buffer_object *bo);
+void ttm_bo_synccpu_write_release(struct ttm_buffer_object *bo);
/**
* ttm_bo_acc_size
@@ -448,13 +466,7 @@ size_t ttm_bo_dma_acc_size(struct ttm_bo_device *bdev,
* @type: Requested type of buffer object.
* @flags: Initial placement flags.
* @page_alignment: Data alignment in pages.
- * @interruptible: If needing to sleep to wait for GPU resources,
- * sleep interruptible.
- * @persistent_swap_storage: Usually the swap storage is deleted for buffers
- * pinned in physical memory. If this behaviour is not desired, this member
- * holds a pointer to a persistent shmem object. Typically, this would
- * point to the shmem object backing a GEM object if TTM is used to back a
- * GEM user interface.
+ * @ctx: TTM operation context for memory allocation.
* @acc_size: Accounted size for this object.
* @resv: Pointer to a reservation_object, or NULL to let ttm allocate one.
* @destroy: Destroy function. Use NULL for kfree().
@@ -480,18 +492,17 @@ size_t ttm_bo_dma_acc_size(struct ttm_bo_device *bdev,
* -ERESTARTSYS: Interrupted by signal while sleeping waiting for resources.
*/
-extern int ttm_bo_init_reserved(struct ttm_bo_device *bdev,
- struct ttm_buffer_object *bo,
- unsigned long size,
- enum ttm_bo_type type,
- struct ttm_placement *placement,
- uint32_t page_alignment,
- bool interrubtible,
- struct file *persistent_swap_storage,
- size_t acc_size,
- struct sg_table *sg,
- struct reservation_object *resv,
- void (*destroy) (struct ttm_buffer_object *));
+int ttm_bo_init_reserved(struct ttm_bo_device *bdev,
+ struct ttm_buffer_object *bo,
+ unsigned long size,
+ enum ttm_bo_type type,
+ struct ttm_placement *placement,
+ uint32_t page_alignment,
+ struct ttm_operation_ctx *ctx,
+ size_t acc_size,
+ struct sg_table *sg,
+ struct reservation_object *resv,
+ void (*destroy) (struct ttm_buffer_object *));
/**
* ttm_bo_init
@@ -504,7 +515,6 @@ extern int ttm_bo_init_reserved(struct ttm_bo_device *bdev,
* @page_alignment: Data alignment in pages.
* @interruptible: If needing to sleep to wait for GPU resources,
* sleep interruptible.
- * @persistent_swap_storage: Usually the swap storage is deleted for buffers
* pinned in physical memory. If this behaviour is not desired, this member
* holds a pointer to a persistent shmem object. Typically, this would
* point to the shmem object backing a GEM object if TTM is used to back a
@@ -531,19 +541,12 @@ extern int ttm_bo_init_reserved(struct ttm_bo_device *bdev,
* -EINVAL: Invalid placement flags.
* -ERESTARTSYS: Interrupted by signal while sleeping waiting for resources.
*/
-
-extern int ttm_bo_init(struct ttm_bo_device *bdev,
- struct ttm_buffer_object *bo,
- unsigned long size,
- enum ttm_bo_type type,
- struct ttm_placement *placement,
- uint32_t page_alignment,
- bool interrubtible,
- struct file *persistent_swap_storage,
- size_t acc_size,
- struct sg_table *sg,
- struct reservation_object *resv,
- void (*destroy) (struct ttm_buffer_object *));
+int ttm_bo_init(struct ttm_bo_device *bdev, struct ttm_buffer_object *bo,
+ unsigned long size, enum ttm_bo_type type,
+ struct ttm_placement *placement,
+ uint32_t page_alignment, bool interrubtible, size_t acc_size,
+ struct sg_table *sg, struct reservation_object *resv,
+ void (*destroy) (struct ttm_buffer_object *));
/**
* ttm_bo_create
@@ -555,11 +558,6 @@ extern int ttm_bo_init(struct ttm_bo_device *bdev,
* @page_alignment: Data alignment in pages.
* @interruptible: If needing to sleep while waiting for GPU resources,
* sleep interruptible.
- * @persistent_swap_storage: Usually the swap storage is deleted for buffers
- * pinned in physical memory. If this behaviour is not desired, this member
- * holds a pointer to a persistent shmem object. Typically, this would
- * point to the shmem object backing a GEM object if TTM is used to back a
- * GEM user interface.
* @p_bo: On successful completion *p_bo points to the created object.
*
* This function allocates a ttm_buffer_object, and then calls ttm_bo_init
@@ -569,15 +567,10 @@ extern int ttm_bo_init(struct ttm_bo_device *bdev,
* -EINVAL: Invalid placement flags.
* -ERESTARTSYS: Interrupted by signal while waiting for resources.
*/
-
-extern int ttm_bo_create(struct ttm_bo_device *bdev,
- unsigned long size,
- enum ttm_bo_type type,
- struct ttm_placement *placement,
- uint32_t page_alignment,
- bool interruptible,
- struct file *persistent_swap_storage,
- struct ttm_buffer_object **p_bo);
+int ttm_bo_create(struct ttm_bo_device *bdev, unsigned long size,
+ enum ttm_bo_type type, struct ttm_placement *placement,
+ uint32_t page_alignment, bool interruptible,
+ struct ttm_buffer_object **p_bo);
/**
* ttm_bo_init_mm
@@ -594,9 +587,9 @@ extern int ttm_bo_create(struct ttm_bo_device *bdev,
* -ENOMEM: Not enough memory.
* May also return driver-specified errors.
*/
+int ttm_bo_init_mm(struct ttm_bo_device *bdev, unsigned type,
+ unsigned long p_size);
-extern int ttm_bo_init_mm(struct ttm_bo_device *bdev, unsigned type,
- unsigned long p_size);
/**
* ttm_bo_clean_mm
*
@@ -623,8 +616,7 @@ extern int ttm_bo_init_mm(struct ttm_bo_device *bdev, unsigned type,
* -EINVAL: invalid or uninitialized memory type.
* -EBUSY: There are still buffers left in this memory type.
*/
-
-extern int ttm_bo_clean_mm(struct ttm_bo_device *bdev, unsigned mem_type);
+int ttm_bo_clean_mm(struct ttm_bo_device *bdev, unsigned mem_type);
/**
* ttm_bo_evict_mm
@@ -644,8 +636,7 @@ extern int ttm_bo_clean_mm(struct ttm_bo_device *bdev, unsigned mem_type);
* -ERESTARTSYS: The call was interrupted by a signal while waiting to
* evict a buffer.
*/
-
-extern int ttm_bo_evict_mm(struct ttm_bo_device *bdev, unsigned mem_type);
+int ttm_bo_evict_mm(struct ttm_bo_device *bdev, unsigned mem_type);
/**
* ttm_kmap_obj_virtual
@@ -658,7 +649,6 @@ extern int ttm_bo_evict_mm(struct ttm_bo_device *bdev, unsigned mem_type);
* If *is_iomem is 1 on return, the virtual address points to an io memory area,
* that should strictly be accessed by the iowriteXX() and similar functions.
*/
-
static inline void *ttm_kmap_obj_virtual(struct ttm_bo_kmap_obj *map,
bool *is_iomem)
{
@@ -682,9 +672,8 @@ static inline void *ttm_kmap_obj_virtual(struct ttm_bo_kmap_obj *map,
* -ENOMEM: Out of memory.
* -EINVAL: Invalid range.
*/
-
-extern int ttm_bo_kmap(struct ttm_buffer_object *bo, unsigned long start_page,
- unsigned long num_pages, struct ttm_bo_kmap_obj *map);
+int ttm_bo_kmap(struct ttm_buffer_object *bo, unsigned long start_page,
+ unsigned long num_pages, struct ttm_bo_kmap_obj *map);
/**
* ttm_bo_kunmap
@@ -693,8 +682,7 @@ extern int ttm_bo_kmap(struct ttm_buffer_object *bo, unsigned long start_page,
*
* Unmaps a kernel map set up by ttm_bo_kmap.
*/
-
-extern void ttm_bo_kunmap(struct ttm_bo_kmap_obj *map);
+void ttm_bo_kunmap(struct ttm_bo_kmap_obj *map);
/**
* ttm_fbdev_mmap - mmap fbdev memory backed by a ttm buffer object.
@@ -706,20 +694,7 @@ extern void ttm_bo_kunmap(struct ttm_bo_kmap_obj *map);
* This function is intended to be called by the fbdev mmap method
* if the fbdev address space is to be backed by a bo.
*/
-
-extern int ttm_fbdev_mmap(struct vm_area_struct *vma,
- struct ttm_buffer_object *bo);
-
-/**
- * ttm_bo_default_iomem_pfn - get a pfn for a page offset
- *
- * @bo: the BO we need to look up the pfn for
- * @page_offset: offset inside the BO to look up.
- *
- * Calculate the PFN for iomem based mappings during page fault
- */
-unsigned long ttm_bo_default_io_mem_pfn(struct ttm_buffer_object *bo,
- unsigned long page_offset);
+int ttm_fbdev_mmap(struct vm_area_struct *vma, struct ttm_buffer_object *bo);
/**
* ttm_bo_mmap - mmap out of the ttm device address space.
@@ -731,9 +706,12 @@ unsigned long ttm_bo_default_io_mem_pfn(struct ttm_buffer_object *bo,
* This function is intended to be called by the device mmap method.
* if the device address space is to be backed by the bo manager.
*/
+int ttm_bo_mmap(struct file *filp, struct vm_area_struct *vma,
+ struct ttm_bo_device *bdev);
-extern int ttm_bo_mmap(struct file *filp, struct vm_area_struct *vma,
- struct ttm_bo_device *bdev);
+void *ttm_kmap_atomic_prot(struct page *page, pgprot_t prot);
+
+void ttm_kunmap_atomic_prot(void *addr, pgprot_t prot);
/**
* ttm_bo_io
@@ -755,11 +733,12 @@ extern int ttm_bo_mmap(struct file *filp, struct vm_area_struct *vma,
* the function may return -ERESTARTSYS if
* interrupted by a signal.
*/
+ssize_t ttm_bo_io(struct ttm_bo_device *bdev, struct file *filp,
+ const char __user *wbuf, char __user *rbuf,
+ size_t count, loff_t *f_pos, bool write);
-extern ssize_t ttm_bo_io(struct ttm_bo_device *bdev, struct file *filp,
- const char __user *wbuf, char __user *rbuf,
- size_t count, loff_t *f_pos, bool write);
-
-extern void ttm_bo_swapout_all(struct ttm_bo_device *bdev);
-extern int ttm_bo_wait_unreserved(struct ttm_buffer_object *bo);
+int ttm_bo_swapout(struct ttm_bo_global *glob,
+ struct ttm_operation_ctx *ctx);
+void ttm_bo_swapout_all(struct ttm_bo_device *bdev);
+int ttm_bo_wait_unreserved(struct ttm_buffer_object *bo);
#endif
diff --git a/include/drm/ttm/ttm_bo_driver.h b/include/drm/ttm/ttm_bo_driver.h
index 5f821a9b3a1f..3234cc322e70 100644
--- a/include/drm/ttm/ttm_bo_driver.h
+++ b/include/drm/ttm/ttm_bo_driver.h
@@ -42,113 +42,10 @@
#include "ttm_memory.h"
#include "ttm_module.h"
#include "ttm_placement.h"
+#include "ttm_tt.h"
#define TTM_MAX_BO_PRIORITY 4U
-struct ttm_backend_func {
- /**
- * struct ttm_backend_func member bind
- *
- * @ttm: Pointer to a struct ttm_tt.
- * @bo_mem: Pointer to a struct ttm_mem_reg describing the
- * memory type and location for binding.
- *
- * Bind the backend pages into the aperture in the location
- * indicated by @bo_mem. This function should be able to handle
- * differences between aperture and system page sizes.
- */
- int (*bind) (struct ttm_tt *ttm, struct ttm_mem_reg *bo_mem);
-
- /**
- * struct ttm_backend_func member unbind
- *
- * @ttm: Pointer to a struct ttm_tt.
- *
- * Unbind previously bound backend pages. This function should be
- * able to handle differences between aperture and system page sizes.
- */
- int (*unbind) (struct ttm_tt *ttm);
-
- /**
- * struct ttm_backend_func member destroy
- *
- * @ttm: Pointer to a struct ttm_tt.
- *
- * Destroy the backend. This will be call back from ttm_tt_destroy so
- * don't call ttm_tt_destroy from the callback or infinite loop.
- */
- void (*destroy) (struct ttm_tt *ttm);
-};
-
-#define TTM_PAGE_FLAG_WRITE (1 << 3)
-#define TTM_PAGE_FLAG_SWAPPED (1 << 4)
-#define TTM_PAGE_FLAG_PERSISTENT_SWAP (1 << 5)
-#define TTM_PAGE_FLAG_ZERO_ALLOC (1 << 6)
-#define TTM_PAGE_FLAG_DMA32 (1 << 7)
-#define TTM_PAGE_FLAG_SG (1 << 8)
-
-enum ttm_caching_state {
- tt_uncached,
- tt_wc,
- tt_cached
-};
-
-/**
- * struct ttm_tt
- *
- * @bdev: Pointer to a struct ttm_bo_device.
- * @func: Pointer to a struct ttm_backend_func that describes
- * the backend methods.
- * @dummy_read_page: Page to map where the ttm_tt page array contains a NULL
- * pointer.
- * @pages: Array of pages backing the data.
- * @num_pages: Number of pages in the page array.
- * @bdev: Pointer to the current struct ttm_bo_device.
- * @be: Pointer to the ttm backend.
- * @swap_storage: Pointer to shmem struct file for swap storage.
- * @caching_state: The current caching state of the pages.
- * @state: The current binding state of the pages.
- *
- * This is a structure holding the pages, caching- and aperture binding
- * status for a buffer object that isn't backed by fixed (VRAM / AGP)
- * memory.
- */
-
-struct ttm_tt {
- struct ttm_bo_device *bdev;
- struct ttm_backend_func *func;
- struct page *dummy_read_page;
- struct page **pages;
- uint32_t page_flags;
- unsigned long num_pages;
- struct sg_table *sg; /* for SG objects via dma-buf */
- struct ttm_bo_global *glob;
- struct file *swap_storage;
- enum ttm_caching_state caching_state;
- enum {
- tt_bound,
- tt_unbound,
- tt_unpopulated,
- } state;
-};
-
-/**
- * struct ttm_dma_tt
- *
- * @ttm: Base ttm_tt struct.
- * @dma_address: The DMA (bus) addresses of the pages
- * @pages_list: used by some page allocation backend
- *
- * This is a structure holding the pages, caching- and aperture binding
- * status for a buffer object that isn't backed by fixed (VRAM / AGP)
- * memory.
- */
-struct ttm_dma_tt {
- struct ttm_tt ttm;
- dma_addr_t *dma_address;
- struct list_head pages_list;
-};
-
#define TTM_MEMTYPE_FLAG_FIXED (1 << 0) /* Fixed (on-card) PCI memory */
#define TTM_MEMTYPE_FLAG_MAPPABLE (1 << 1) /* Memory mappable */
#define TTM_MEMTYPE_FLAG_CMA (1 << 3) /* Can't map aperture */
@@ -328,20 +225,16 @@ struct ttm_bo_driver {
/**
* ttm_tt_create
*
- * @bdev: pointer to a struct ttm_bo_device:
- * @size: Size of the data needed backing.
+ * @bo: The buffer object to create the ttm for.
* @page_flags: Page flags as identified by TTM_PAGE_FLAG_XX flags.
- * @dummy_read_page: See struct ttm_bo_device.
*
* Create a struct ttm_tt to back data with system memory pages.
* No pages are actually allocated.
* Returns:
* NULL: Out of memory.
*/
- struct ttm_tt *(*ttm_tt_create)(struct ttm_bo_device *bdev,
- unsigned long size,
- uint32_t page_flags,
- struct page *dummy_read_page);
+ struct ttm_tt *(*ttm_tt_create)(struct ttm_buffer_object *bo,
+ uint32_t page_flags);
/**
* ttm_tt_populate
@@ -352,7 +245,8 @@ struct ttm_bo_driver {
* Returns:
* -ENOMEM: Out of memory.
*/
- int (*ttm_tt_populate)(struct ttm_tt *ttm);
+ int (*ttm_tt_populate)(struct ttm_tt *ttm,
+ struct ttm_operation_ctx *ctx);
/**
* ttm_tt_unpopulate
@@ -409,15 +303,13 @@ struct ttm_bo_driver {
* @bo: the buffer to move
* @evict: whether this motion is evicting the buffer from
* the graphics address space
- * @interruptible: Use interruptible sleeps if possible when sleeping.
- * @no_wait: whether this should give up and return -EBUSY
- * if this move would require sleeping
+ * @ctx: context for this move with parameters
* @new_mem: the new memory region receiving the buffer
*
* Move a buffer between two memory regions.
*/
int (*move)(struct ttm_buffer_object *bo, bool evict,
- bool interruptible, bool no_wait_gpu,
+ struct ttm_operation_ctx *ctx,
struct ttm_mem_reg *new_mem);
/**
@@ -524,7 +416,6 @@ struct ttm_bo_global {
struct kobject kobj;
struct ttm_mem_global *mem_glob;
struct page *dummy_read_page;
- struct ttm_mem_shrink shrink;
struct mutex device_list_mutex;
spinlock_t lru_lock;
@@ -558,6 +449,7 @@ struct ttm_bo_global {
* @dev_mapping: A pointer to the struct address_space representing the
* device address space.
* @wq: Work queue structure for the delayed delete workqueue.
+ * @no_retry: Don't retry allocation if it fails
*
*/
@@ -594,6 +486,8 @@ struct ttm_bo_device {
struct delayed_work wq;
bool need_dma32;
+
+ bool no_retry;
};
/**
@@ -613,100 +507,6 @@ ttm_flag_masked(uint32_t *old, uint32_t new, uint32_t mask)
return *old;
}
-/**
- * ttm_tt_init
- *
- * @ttm: The struct ttm_tt.
- * @bdev: pointer to a struct ttm_bo_device:
- * @size: Size of the data needed backing.
- * @page_flags: Page flags as identified by TTM_PAGE_FLAG_XX flags.
- * @dummy_read_page: See struct ttm_bo_device.
- *
- * Create a struct ttm_tt to back data with system memory pages.
- * No pages are actually allocated.
- * Returns:
- * NULL: Out of memory.
- */
-extern int ttm_tt_init(struct ttm_tt *ttm, struct ttm_bo_device *bdev,
- unsigned long size, uint32_t page_flags,
- struct page *dummy_read_page);
-extern int ttm_dma_tt_init(struct ttm_dma_tt *ttm_dma, struct ttm_bo_device *bdev,
- unsigned long size, uint32_t page_flags,
- struct page *dummy_read_page);
-
-/**
- * ttm_tt_fini
- *
- * @ttm: the ttm_tt structure.
- *
- * Free memory of ttm_tt structure
- */
-extern void ttm_tt_fini(struct ttm_tt *ttm);
-extern void ttm_dma_tt_fini(struct ttm_dma_tt *ttm_dma);
-
-/**
- * ttm_ttm_bind:
- *
- * @ttm: The struct ttm_tt containing backing pages.
- * @bo_mem: The struct ttm_mem_reg identifying the binding location.
- *
- * Bind the pages of @ttm to an aperture location identified by @bo_mem
- */
-extern int ttm_tt_bind(struct ttm_tt *ttm, struct ttm_mem_reg *bo_mem);
-
-/**
- * ttm_ttm_destroy:
- *
- * @ttm: The struct ttm_tt.
- *
- * Unbind, unpopulate and destroy common struct ttm_tt.
- */
-extern void ttm_tt_destroy(struct ttm_tt *ttm);
-
-/**
- * ttm_ttm_unbind:
- *
- * @ttm: The struct ttm_tt.
- *
- * Unbind a struct ttm_tt.
- */
-extern void ttm_tt_unbind(struct ttm_tt *ttm);
-
-/**
- * ttm_tt_swapin:
- *
- * @ttm: The struct ttm_tt.
- *
- * Swap in a previously swap out ttm_tt.
- */
-extern int ttm_tt_swapin(struct ttm_tt *ttm);
-
-/**
- * ttm_tt_set_placement_caching:
- *
- * @ttm A struct ttm_tt the backing pages of which will change caching policy.
- * @placement: Flag indicating the desired caching policy.
- *
- * This function will change caching policy of any default kernel mappings of
- * the pages backing @ttm. If changing from cached to uncached or
- * write-combined,
- * all CPU caches will first be flushed to make sure the data of the pages
- * hit RAM. This function may be very costly as it involves global TLB
- * and cache flushes and potential page splitting / combining.
- */
-extern int ttm_tt_set_placement_caching(struct ttm_tt *ttm, uint32_t placement);
-extern int ttm_tt_swapout(struct ttm_tt *ttm,
- struct file *persistent_swap_storage);
-
-/**
- * ttm_tt_unpopulate - free pages from a ttm
- *
- * @ttm: Pointer to the ttm_tt structure
- *
- * Calls the driver method to free all pages from a ttm
- */
-extern void ttm_tt_unpopulate(struct ttm_tt *ttm);
-
/*
* ttm_bo.c
*/
@@ -720,8 +520,7 @@ extern void ttm_tt_unpopulate(struct ttm_tt *ttm);
* Returns true if the memory described by @mem is PCI memory,
* false otherwise.
*/
-extern bool ttm_mem_reg_is_pci(struct ttm_bo_device *bdev,
- struct ttm_mem_reg *mem);
+bool ttm_mem_reg_is_pci(struct ttm_bo_device *bdev, struct ttm_mem_reg *mem);
/**
* ttm_bo_mem_space
@@ -742,21 +541,19 @@ extern bool ttm_mem_reg_is_pci(struct ttm_bo_device *bdev,
* fragmentation or concurrent allocators.
* -ERESTARTSYS: An interruptible sleep was interrupted by a signal.
*/
-extern int ttm_bo_mem_space(struct ttm_buffer_object *bo,
- struct ttm_placement *placement,
- struct ttm_mem_reg *mem,
- bool interruptible,
- bool no_wait_gpu);
+int ttm_bo_mem_space(struct ttm_buffer_object *bo,
+ struct ttm_placement *placement,
+ struct ttm_mem_reg *mem,
+ struct ttm_operation_ctx *ctx);
-extern void ttm_bo_mem_put(struct ttm_buffer_object *bo,
+void ttm_bo_mem_put(struct ttm_buffer_object *bo, struct ttm_mem_reg *mem);
+void ttm_bo_mem_put_locked(struct ttm_buffer_object *bo,
struct ttm_mem_reg *mem);
-extern void ttm_bo_mem_put_locked(struct ttm_buffer_object *bo,
- struct ttm_mem_reg *mem);
-extern void ttm_bo_global_release(struct drm_global_reference *ref);
-extern int ttm_bo_global_init(struct drm_global_reference *ref);
+void ttm_bo_global_release(struct drm_global_reference *ref);
+int ttm_bo_global_init(struct drm_global_reference *ref);
-extern int ttm_bo_device_release(struct ttm_bo_device *bdev);
+int ttm_bo_device_release(struct ttm_bo_device *bdev);
/**
* ttm_bo_device_init
@@ -773,18 +570,17 @@ extern int ttm_bo_device_release(struct ttm_bo_device *bdev);
* Returns:
* !0: Failure.
*/
-extern int ttm_bo_device_init(struct ttm_bo_device *bdev,
- struct ttm_bo_global *glob,
- struct ttm_bo_driver *driver,
- struct address_space *mapping,
- uint64_t file_page_offset, bool need_dma32);
+int ttm_bo_device_init(struct ttm_bo_device *bdev, struct ttm_bo_global *glob,
+ struct ttm_bo_driver *driver,
+ struct address_space *mapping,
+ uint64_t file_page_offset, bool need_dma32);
/**
* ttm_bo_unmap_virtual
*
* @bo: tear down the virtual mappings for this BO
*/
-extern void ttm_bo_unmap_virtual(struct ttm_buffer_object *bo);
+void ttm_bo_unmap_virtual(struct ttm_buffer_object *bo);
/**
* ttm_bo_unmap_virtual
@@ -793,16 +589,15 @@ extern void ttm_bo_unmap_virtual(struct ttm_buffer_object *bo);
*
* The caller must take ttm_mem_io_lock before calling this function.
*/
-extern void ttm_bo_unmap_virtual_locked(struct ttm_buffer_object *bo);
+void ttm_bo_unmap_virtual_locked(struct ttm_buffer_object *bo);
-extern int ttm_mem_io_reserve_vm(struct ttm_buffer_object *bo);
-extern void ttm_mem_io_free_vm(struct ttm_buffer_object *bo);
-extern int ttm_mem_io_lock(struct ttm_mem_type_manager *man,
- bool interruptible);
-extern void ttm_mem_io_unlock(struct ttm_mem_type_manager *man);
+int ttm_mem_io_reserve_vm(struct ttm_buffer_object *bo);
+void ttm_mem_io_free_vm(struct ttm_buffer_object *bo);
+int ttm_mem_io_lock(struct ttm_mem_type_manager *man, bool interruptible);
+void ttm_mem_io_unlock(struct ttm_mem_type_manager *man);
-extern void ttm_bo_del_sub_from_lru(struct ttm_buffer_object *bo);
-extern void ttm_bo_add_to_lru(struct ttm_buffer_object *bo);
+void ttm_bo_del_sub_from_lru(struct ttm_buffer_object *bo);
+void ttm_bo_add_to_lru(struct ttm_buffer_object *bo);
/**
* __ttm_bo_reserve:
@@ -836,14 +631,14 @@ static inline int __ttm_bo_reserve(struct ttm_buffer_object *bo,
if (WARN_ON(ticket))
return -EBUSY;
- success = ww_mutex_trylock(&bo->resv->lock);
+ success = reservation_object_trylock(bo->resv);
return success ? 0 : -EBUSY;
}
if (interruptible)
- ret = ww_mutex_lock_interruptible(&bo->resv->lock, ticket);
+ ret = reservation_object_lock_interruptible(bo->resv, ticket);
else
- ret = ww_mutex_lock(&bo->resv->lock, ticket);
+ ret = reservation_object_lock(bo->resv, ticket);
if (ret == -EINTR)
return -ERESTARTSYS;
return ret;
@@ -941,18 +736,6 @@ static inline int ttm_bo_reserve_slowpath(struct ttm_buffer_object *bo,
}
/**
- * __ttm_bo_unreserve
- * @bo: A pointer to a struct ttm_buffer_object.
- *
- * Unreserve a previous reservation of @bo where the buffer object is
- * already on lru lists.
- */
-static inline void __ttm_bo_unreserve(struct ttm_buffer_object *bo)
-{
- ww_mutex_unlock(&bo->resv->lock);
-}
-
-/**
* ttm_bo_unreserve
*
* @bo: A pointer to a struct ttm_buffer_object.
@@ -962,24 +745,11 @@ static inline void __ttm_bo_unreserve(struct ttm_buffer_object *bo)
static inline void ttm_bo_unreserve(struct ttm_buffer_object *bo)
{
if (!(bo->mem.placement & TTM_PL_FLAG_NO_EVICT)) {
- spin_lock(&bo->glob->lru_lock);
+ spin_lock(&bo->bdev->glob->lru_lock);
ttm_bo_add_to_lru(bo);
- spin_unlock(&bo->glob->lru_lock);
+ spin_unlock(&bo->bdev->glob->lru_lock);
}
- __ttm_bo_unreserve(bo);
-}
-
-/**
- * ttm_bo_unreserve_ticket
- * @bo: A pointer to a struct ttm_buffer_object.
- * @ticket: ww_acquire_ctx used for reserving
- *
- * Unreserve a previous reservation of @bo made with @ticket.
- */
-static inline void ttm_bo_unreserve_ticket(struct ttm_buffer_object *bo,
- struct ww_acquire_ctx *t)
-{
- ttm_bo_unreserve(bo);
+ reservation_object_unlock(bo->resv);
}
/*
@@ -1008,9 +778,9 @@ void ttm_mem_io_free(struct ttm_bo_device *bdev,
* !0: Failure.
*/
-extern int ttm_bo_move_ttm(struct ttm_buffer_object *bo,
- bool interruptible, bool no_wait_gpu,
- struct ttm_mem_reg *new_mem);
+int ttm_bo_move_ttm(struct ttm_buffer_object *bo,
+ struct ttm_operation_ctx *ctx,
+ struct ttm_mem_reg *new_mem);
/**
* ttm_bo_move_memcpy
@@ -1030,9 +800,9 @@ extern int ttm_bo_move_ttm(struct ttm_buffer_object *bo,
* !0: Failure.
*/
-extern int ttm_bo_move_memcpy(struct ttm_buffer_object *bo,
- bool interruptible, bool no_wait_gpu,
- struct ttm_mem_reg *new_mem);
+int ttm_bo_move_memcpy(struct ttm_buffer_object *bo,
+ struct ttm_operation_ctx *ctx,
+ struct ttm_mem_reg *new_mem);
/**
* ttm_bo_free_old_node
@@ -1041,7 +811,7 @@ extern int ttm_bo_move_memcpy(struct ttm_buffer_object *bo,
*
* Utility function to free an old placement after a successful move.
*/
-extern void ttm_bo_free_old_node(struct ttm_buffer_object *bo);
+void ttm_bo_free_old_node(struct ttm_buffer_object *bo);
/**
* ttm_bo_move_accel_cleanup.
@@ -1058,10 +828,9 @@ extern void ttm_bo_free_old_node(struct ttm_buffer_object *bo);
* destroyed when the move is complete. This will help pipeline
* buffer moves.
*/
-
-extern int ttm_bo_move_accel_cleanup(struct ttm_buffer_object *bo,
- struct dma_fence *fence, bool evict,
- struct ttm_mem_reg *new_mem);
+int ttm_bo_move_accel_cleanup(struct ttm_buffer_object *bo,
+ struct dma_fence *fence, bool evict,
+ struct ttm_mem_reg *new_mem);
/**
* ttm_bo_pipeline_move.
@@ -1079,6 +848,15 @@ int ttm_bo_pipeline_move(struct ttm_buffer_object *bo,
struct ttm_mem_reg *new_mem);
/**
+ * ttm_bo_pipeline_gutting.
+ *
+ * @bo: A pointer to a struct ttm_buffer_object.
+ *
+ * Pipelined gutting a BO of it's backing store.
+ */
+int ttm_bo_pipeline_gutting(struct ttm_buffer_object *bo);
+
+/**
* ttm_io_prot
*
* @c_state: Caching state.
@@ -1087,33 +865,8 @@ int ttm_bo_pipeline_move(struct ttm_buffer_object *bo,
* Utility function that returns the pgprot_t that should be used for
* setting up a PTE with the caching model indicated by @c_state.
*/
-extern pgprot_t ttm_io_prot(uint32_t caching_flags, pgprot_t tmp);
+pgprot_t ttm_io_prot(uint32_t caching_flags, pgprot_t tmp);
extern const struct ttm_mem_type_manager_func ttm_bo_manager_func;
-#if IS_ENABLED(CONFIG_AGP)
-#include <linux/agp_backend.h>
-
-/**
- * ttm_agp_tt_create
- *
- * @bdev: Pointer to a struct ttm_bo_device.
- * @bridge: The agp bridge this device is sitting on.
- * @size: Size of the data needed backing.
- * @page_flags: Page flags as identified by TTM_PAGE_FLAG_XX flags.
- * @dummy_read_page: See struct ttm_bo_device.
- *
- *
- * Create a TTM backend that uses the indicated AGP bridge as an aperture
- * for TT memory. This function uses the linux agpgart interface to
- * bind and unbind memory backing a ttm_tt.
- */
-extern struct ttm_tt *ttm_agp_tt_create(struct ttm_bo_device *bdev,
- struct agp_bridge_data *bridge,
- unsigned long size, uint32_t page_flags,
- struct page *dummy_read_page);
-int ttm_agp_tt_populate(struct ttm_tt *ttm);
-void ttm_agp_tt_unpopulate(struct ttm_tt *ttm);
-#endif
-
#endif
diff --git a/include/drm/ttm/ttm_memory.h b/include/drm/ttm/ttm_memory.h
index 2c1e3598effe..737b5fed8003 100644
--- a/include/drm/ttm/ttm_memory.h
+++ b/include/drm/ttm/ttm_memory.h
@@ -35,20 +35,7 @@
#include <linux/errno.h>
#include <linux/kobject.h>
#include <linux/mm.h>
-
-/**
- * struct ttm_mem_shrink - callback to shrink TTM memory usage.
- *
- * @do_shrink: The callback function.
- *
- * Arguments to the do_shrink functions are intended to be passed using
- * inheritance. That is, the argument class derives from struct ttm_mem_shrink,
- * and can be accessed using container_of().
- */
-
-struct ttm_mem_shrink {
- int (*do_shrink) (struct ttm_mem_shrink *);
-};
+#include "ttm_bo_api.h"
/**
* struct ttm_mem_global - Global memory accounting structure.
@@ -62,6 +49,8 @@ struct ttm_mem_shrink {
* @work: The workqueue callback for the shrink queue.
* @lock: Lock to protect the @shrink - and the memory accounting members,
* that is, essentially the whole structure with some exceptions.
+ * @lower_mem_limit: include lower limit of swap space and lower limit of
+ * system memory.
* @zones: Array of pointers to accounting zones.
* @num_zones: Number of populated entries in the @zones array.
* @zone_kernel: Pointer to the kernel zone.
@@ -76,10 +65,11 @@ struct ttm_mem_shrink {
struct ttm_mem_zone;
struct ttm_mem_global {
struct kobject kobj;
- struct ttm_mem_shrink *shrink;
+ struct ttm_bo_global *bo_glob;
struct workqueue_struct *swap_queue;
struct work_struct work;
spinlock_t lock;
+ uint64_t lower_mem_limit;
struct ttm_mem_zone *zones[TTM_MEM_MAX_ZONES];
unsigned int num_zones;
struct ttm_mem_zone *zone_kernel;
@@ -90,69 +80,19 @@ struct ttm_mem_global {
#endif
};
-/**
- * ttm_mem_init_shrink - initialize a struct ttm_mem_shrink object
- *
- * @shrink: The object to initialize.
- * @func: The callback function.
- */
-
-static inline void ttm_mem_init_shrink(struct ttm_mem_shrink *shrink,
- int (*func) (struct ttm_mem_shrink *))
-{
- shrink->do_shrink = func;
-}
-
-/**
- * ttm_mem_register_shrink - register a struct ttm_mem_shrink object.
- *
- * @glob: The struct ttm_mem_global object to register with.
- * @shrink: An initialized struct ttm_mem_shrink object to register.
- *
- * Returns:
- * -EBUSY: There's already a callback registered. (May change).
- */
-
-static inline int ttm_mem_register_shrink(struct ttm_mem_global *glob,
- struct ttm_mem_shrink *shrink)
-{
- spin_lock(&glob->lock);
- if (glob->shrink != NULL) {
- spin_unlock(&glob->lock);
- return -EBUSY;
- }
- glob->shrink = shrink;
- spin_unlock(&glob->lock);
- return 0;
-}
-
-/**
- * ttm_mem_unregister_shrink - unregister a struct ttm_mem_shrink object.
- *
- * @glob: The struct ttm_mem_global object to unregister from.
- * @shrink: A previously registert struct ttm_mem_shrink object.
- *
- */
-
-static inline void ttm_mem_unregister_shrink(struct ttm_mem_global *glob,
- struct ttm_mem_shrink *shrink)
-{
- spin_lock(&glob->lock);
- BUG_ON(glob->shrink != shrink);
- glob->shrink = NULL;
- spin_unlock(&glob->lock);
-}
-
extern int ttm_mem_global_init(struct ttm_mem_global *glob);
extern void ttm_mem_global_release(struct ttm_mem_global *glob);
extern int ttm_mem_global_alloc(struct ttm_mem_global *glob, uint64_t memory,
- bool no_wait, bool interruptible);
+ struct ttm_operation_ctx *ctx);
extern void ttm_mem_global_free(struct ttm_mem_global *glob,
uint64_t amount);
extern int ttm_mem_global_alloc_page(struct ttm_mem_global *glob,
- struct page *page, uint64_t size);
+ struct page *page, uint64_t size,
+ struct ttm_operation_ctx *ctx);
extern void ttm_mem_global_free_page(struct ttm_mem_global *glob,
struct page *page, uint64_t size);
extern size_t ttm_round_pot(size_t size);
extern uint64_t ttm_get_kernel_zone_memory_size(struct ttm_mem_global *glob);
+extern bool ttm_check_under_lowerlimit(struct ttm_mem_global *glob,
+ uint64_t num_pages, struct ttm_operation_ctx *ctx);
#endif
diff --git a/include/drm/ttm/ttm_page_alloc.h b/include/drm/ttm/ttm_page_alloc.h
index 593811362a91..4d9b019d253c 100644
--- a/include/drm/ttm/ttm_page_alloc.h
+++ b/include/drm/ttm/ttm_page_alloc.h
@@ -47,7 +47,7 @@ void ttm_page_alloc_fini(void);
*
* Add backing pages to all of @ttm
*/
-int ttm_pool_populate(struct ttm_tt *ttm);
+int ttm_pool_populate(struct ttm_tt *ttm, struct ttm_operation_ctx *ctx);
/**
* ttm_pool_unpopulate:
@@ -61,7 +61,8 @@ void ttm_pool_unpopulate(struct ttm_tt *ttm);
/**
* Populates and DMA maps pages to fullfil a ttm_dma_populate() request
*/
-int ttm_populate_and_map_pages(struct device *dev, struct ttm_dma_tt *tt);
+int ttm_populate_and_map_pages(struct device *dev, struct ttm_dma_tt *tt,
+ struct ttm_operation_ctx *ctx);
/**
* Unpopulates and DMA unmaps pages as part of a
@@ -89,7 +90,8 @@ void ttm_dma_page_alloc_fini(void);
*/
int ttm_dma_page_alloc_debugfs(struct seq_file *m, void *data);
-int ttm_dma_populate(struct ttm_dma_tt *ttm_dma, struct device *dev);
+int ttm_dma_populate(struct ttm_dma_tt *ttm_dma, struct device *dev,
+ struct ttm_operation_ctx *ctx);
void ttm_dma_unpopulate(struct ttm_dma_tt *ttm_dma, struct device *dev);
#else
@@ -106,7 +108,8 @@ static inline int ttm_dma_page_alloc_debugfs(struct seq_file *m, void *data)
return 0;
}
static inline int ttm_dma_populate(struct ttm_dma_tt *ttm_dma,
- struct device *dev)
+ struct device *dev,
+ struct ttm_operation_ctx *ctx)
{
return -ENOMEM;
}
diff --git a/include/drm/ttm/ttm_tt.h b/include/drm/ttm/ttm_tt.h
new file mode 100644
index 000000000000..c0e928abf592
--- /dev/null
+++ b/include/drm/ttm/ttm_tt.h
@@ -0,0 +1,272 @@
+/**************************************************************************
+ *
+ * Copyright (c) 2006-2009 Vmware, Inc., Palo Alto, CA., USA
+ * All Rights Reserved.
+ *
+ * Permission is hereby granted, free of charge, to any person obtaining a
+ * copy of this software and associated documentation files (the
+ * "Software"), to deal in the Software without restriction, including
+ * without limitation the rights to use, copy, modify, merge, publish,
+ * distribute, sub license, and/or sell copies of the Software, and to
+ * permit persons to whom the Software is furnished to do so, subject to
+ * the following conditions:
+ *
+ * The above copyright notice and this permission notice (including the
+ * next paragraph) shall be included in all copies or substantial portions
+ * of the Software.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
+ * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
+ * FITNESS FOR A PARTICULAR PURPOSE AND NON-INFRINGEMENT. IN NO EVENT SHALL
+ * THE COPYRIGHT HOLDERS, AUTHORS AND/OR ITS SUPPLIERS BE LIABLE FOR ANY CLAIM,
+ * DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR
+ * OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE
+ * USE OR OTHER DEALINGS IN THE SOFTWARE.
+ *
+ **************************************************************************/
+#ifndef _TTM_TT_H_
+#define _TTM_TT_H_
+
+#include <linux/types.h>
+
+struct ttm_tt;
+struct ttm_mem_reg;
+struct ttm_buffer_object;
+struct ttm_operation_ctx;
+
+#define TTM_PAGE_FLAG_WRITE (1 << 3)
+#define TTM_PAGE_FLAG_SWAPPED (1 << 4)
+#define TTM_PAGE_FLAG_PERSISTENT_SWAP (1 << 5)
+#define TTM_PAGE_FLAG_ZERO_ALLOC (1 << 6)
+#define TTM_PAGE_FLAG_DMA32 (1 << 7)
+#define TTM_PAGE_FLAG_SG (1 << 8)
+#define TTM_PAGE_FLAG_NO_RETRY (1 << 9)
+
+enum ttm_caching_state {
+ tt_uncached,
+ tt_wc,
+ tt_cached
+};
+
+struct ttm_backend_func {
+ /**
+ * struct ttm_backend_func member bind
+ *
+ * @ttm: Pointer to a struct ttm_tt.
+ * @bo_mem: Pointer to a struct ttm_mem_reg describing the
+ * memory type and location for binding.
+ *
+ * Bind the backend pages into the aperture in the location
+ * indicated by @bo_mem. This function should be able to handle
+ * differences between aperture and system page sizes.
+ */
+ int (*bind) (struct ttm_tt *ttm, struct ttm_mem_reg *bo_mem);
+
+ /**
+ * struct ttm_backend_func member unbind
+ *
+ * @ttm: Pointer to a struct ttm_tt.
+ *
+ * Unbind previously bound backend pages. This function should be
+ * able to handle differences between aperture and system page sizes.
+ */
+ int (*unbind) (struct ttm_tt *ttm);
+
+ /**
+ * struct ttm_backend_func member destroy
+ *
+ * @ttm: Pointer to a struct ttm_tt.
+ *
+ * Destroy the backend. This will be call back from ttm_tt_destroy so
+ * don't call ttm_tt_destroy from the callback or infinite loop.
+ */
+ void (*destroy) (struct ttm_tt *ttm);
+};
+
+/**
+ * struct ttm_tt
+ *
+ * @bdev: Pointer to a struct ttm_bo_device.
+ * @func: Pointer to a struct ttm_backend_func that describes
+ * the backend methods.
+ * pointer.
+ * @pages: Array of pages backing the data.
+ * @num_pages: Number of pages in the page array.
+ * @bdev: Pointer to the current struct ttm_bo_device.
+ * @be: Pointer to the ttm backend.
+ * @swap_storage: Pointer to shmem struct file for swap storage.
+ * @caching_state: The current caching state of the pages.
+ * @state: The current binding state of the pages.
+ *
+ * This is a structure holding the pages, caching- and aperture binding
+ * status for a buffer object that isn't backed by fixed (VRAM / AGP)
+ * memory.
+ */
+struct ttm_tt {
+ struct ttm_bo_device *bdev;
+ struct ttm_backend_func *func;
+ struct page **pages;
+ uint32_t page_flags;
+ unsigned long num_pages;
+ struct sg_table *sg; /* for SG objects via dma-buf */
+ struct file *swap_storage;
+ enum ttm_caching_state caching_state;
+ enum {
+ tt_bound,
+ tt_unbound,
+ tt_unpopulated,
+ } state;
+};
+
+/**
+ * struct ttm_dma_tt
+ *
+ * @ttm: Base ttm_tt struct.
+ * @dma_address: The DMA (bus) addresses of the pages
+ * @pages_list: used by some page allocation backend
+ *
+ * This is a structure holding the pages, caching- and aperture binding
+ * status for a buffer object that isn't backed by fixed (VRAM / AGP)
+ * memory.
+ */
+struct ttm_dma_tt {
+ struct ttm_tt ttm;
+ dma_addr_t *dma_address;
+ struct list_head pages_list;
+};
+
+/**
+ * ttm_tt_create
+ *
+ * @bo: pointer to a struct ttm_buffer_object
+ * @zero_alloc: true if allocated pages needs to be zeroed
+ *
+ * Make sure we have a TTM structure allocated for the given BO.
+ * No pages are actually allocated.
+ */
+int ttm_tt_create(struct ttm_buffer_object *bo, bool zero_alloc);
+
+/**
+ * ttm_tt_init
+ *
+ * @ttm: The struct ttm_tt.
+ * @bo: The buffer object we create the ttm for.
+ * @page_flags: Page flags as identified by TTM_PAGE_FLAG_XX flags.
+ *
+ * Create a struct ttm_tt to back data with system memory pages.
+ * No pages are actually allocated.
+ * Returns:
+ * NULL: Out of memory.
+ */
+int ttm_tt_init(struct ttm_tt *ttm, struct ttm_buffer_object *bo,
+ uint32_t page_flags);
+int ttm_dma_tt_init(struct ttm_dma_tt *ttm_dma, struct ttm_buffer_object *bo,
+ uint32_t page_flags);
+int ttm_sg_tt_init(struct ttm_dma_tt *ttm_dma, struct ttm_buffer_object *bo,
+ uint32_t page_flags);
+
+/**
+ * ttm_tt_fini
+ *
+ * @ttm: the ttm_tt structure.
+ *
+ * Free memory of ttm_tt structure
+ */
+void ttm_tt_fini(struct ttm_tt *ttm);
+void ttm_dma_tt_fini(struct ttm_dma_tt *ttm_dma);
+
+/**
+ * ttm_ttm_bind:
+ *
+ * @ttm: The struct ttm_tt containing backing pages.
+ * @bo_mem: The struct ttm_mem_reg identifying the binding location.
+ *
+ * Bind the pages of @ttm to an aperture location identified by @bo_mem
+ */
+int ttm_tt_bind(struct ttm_tt *ttm, struct ttm_mem_reg *bo_mem,
+ struct ttm_operation_ctx *ctx);
+
+/**
+ * ttm_ttm_destroy:
+ *
+ * @ttm: The struct ttm_tt.
+ *
+ * Unbind, unpopulate and destroy common struct ttm_tt.
+ */
+void ttm_tt_destroy(struct ttm_tt *ttm);
+
+/**
+ * ttm_ttm_unbind:
+ *
+ * @ttm: The struct ttm_tt.
+ *
+ * Unbind a struct ttm_tt.
+ */
+void ttm_tt_unbind(struct ttm_tt *ttm);
+
+/**
+ * ttm_tt_swapin:
+ *
+ * @ttm: The struct ttm_tt.
+ *
+ * Swap in a previously swap out ttm_tt.
+ */
+int ttm_tt_swapin(struct ttm_tt *ttm);
+
+/**
+ * ttm_tt_set_placement_caching:
+ *
+ * @ttm A struct ttm_tt the backing pages of which will change caching policy.
+ * @placement: Flag indicating the desired caching policy.
+ *
+ * This function will change caching policy of any default kernel mappings of
+ * the pages backing @ttm. If changing from cached to uncached or
+ * write-combined,
+ * all CPU caches will first be flushed to make sure the data of the pages
+ * hit RAM. This function may be very costly as it involves global TLB
+ * and cache flushes and potential page splitting / combining.
+ */
+int ttm_tt_set_placement_caching(struct ttm_tt *ttm, uint32_t placement);
+int ttm_tt_swapout(struct ttm_tt *ttm, struct file *persistent_swap_storage);
+
+/**
+ * ttm_tt_populate - allocate pages for a ttm
+ *
+ * @ttm: Pointer to the ttm_tt structure
+ *
+ * Calls the driver method to allocate pages for a ttm
+ */
+int ttm_tt_populate(struct ttm_tt *ttm, struct ttm_operation_ctx *ctx);
+
+/**
+ * ttm_tt_unpopulate - free pages from a ttm
+ *
+ * @ttm: Pointer to the ttm_tt structure
+ *
+ * Calls the driver method to free all pages from a ttm
+ */
+void ttm_tt_unpopulate(struct ttm_tt *ttm);
+
+#if IS_ENABLED(CONFIG_AGP)
+#include <linux/agp_backend.h>
+
+/**
+ * ttm_agp_tt_create
+ *
+ * @bo: Buffer object we allocate the ttm for.
+ * @bridge: The agp bridge this device is sitting on.
+ * @page_flags: Page flags as identified by TTM_PAGE_FLAG_XX flags.
+ *
+ *
+ * Create a TTM backend that uses the indicated AGP bridge as an aperture
+ * for TT memory. This function uses the linux agpgart interface to
+ * bind and unbind memory backing a ttm_tt.
+ */
+struct ttm_tt *ttm_agp_tt_create(struct ttm_buffer_object *bo,
+ struct agp_bridge_data *bridge,
+ uint32_t page_flags);
+int ttm_agp_tt_populate(struct ttm_tt *ttm, struct ttm_operation_ctx *ctx);
+void ttm_agp_tt_unpopulate(struct ttm_tt *ttm);
+#endif
+
+#endif
diff --git a/include/dt-bindings/bus/ti-sysc.h b/include/dt-bindings/bus/ti-sysc.h
new file mode 100644
index 000000000000..2c005376ac0e
--- /dev/null
+++ b/include/dt-bindings/bus/ti-sysc.h
@@ -0,0 +1,22 @@
+/* TI sysc interconnect target module defines */
+
+/* Generic sysc found on omap2 and later, also known as type1 */
+#define SYSC_OMAP2_CLOCKACTIVITY (3 << 8)
+#define SYSC_OMAP2_EMUFREE (1 << 5)
+#define SYSC_OMAP2_ENAWAKEUP (1 << 2)
+#define SYSC_OMAP2_SOFTRESET (1 << 1)
+#define SYSC_OMAP2_AUTOIDLE (1 << 0)
+
+/* Generic sysc found on omap4 and later, also known as type2 */
+#define SYSC_OMAP4_DMADISABLE (1 << 16)
+#define SYSC_OMAP4_FREEEMU (1 << 1) /* Also known as EMUFREE */
+#define SYSC_OMAP4_SOFTRESET (1 << 0)
+
+/* SmartReflex sysc found on 36xx and later */
+#define SYSC_OMAP3_SR_ENAWAKEUP (1 << 26)
+
+/* SYSCONFIG STANDBYMODE/MIDLEMODE/SIDLEMODE supported by hardware */
+#define SYSC_IDLE_FORCE 0
+#define SYSC_IDLE_NO 1
+#define SYSC_IDLE_SMART 2
+#define SYSC_IDLE_SMART_WKUP 3
diff --git a/include/dt-bindings/clock/am3.h b/include/dt-bindings/clock/am3.h
new file mode 100644
index 000000000000..b396f00e481d
--- /dev/null
+++ b/include/dt-bindings/clock/am3.h
@@ -0,0 +1,108 @@
+/*
+ * Copyright 2017 Texas Instruments, Inc.
+ *
+ * This software is licensed under the terms of the GNU General Public
+ * License version 2, as published by the Free Software Foundation, and
+ * may be copied, distributed, and modified under those terms.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+ * GNU General Public License for more details.
+ */
+#ifndef __DT_BINDINGS_CLK_AM3_H
+#define __DT_BINDINGS_CLK_AM3_H
+
+#define AM3_CLKCTRL_OFFSET 0x0
+#define AM3_CLKCTRL_INDEX(offset) ((offset) - AM3_CLKCTRL_OFFSET)
+
+/* l4_per clocks */
+#define AM3_L4_PER_CLKCTRL_OFFSET 0x14
+#define AM3_L4_PER_CLKCTRL_INDEX(offset) ((offset) - AM3_L4_PER_CLKCTRL_OFFSET)
+#define AM3_CPGMAC0_CLKCTRL AM3_L4_PER_CLKCTRL_INDEX(0x14)
+#define AM3_LCDC_CLKCTRL AM3_L4_PER_CLKCTRL_INDEX(0x18)
+#define AM3_USB_OTG_HS_CLKCTRL AM3_L4_PER_CLKCTRL_INDEX(0x1c)
+#define AM3_TPTC0_CLKCTRL AM3_L4_PER_CLKCTRL_INDEX(0x24)
+#define AM3_EMIF_CLKCTRL AM3_L4_PER_CLKCTRL_INDEX(0x28)
+#define AM3_OCMCRAM_CLKCTRL AM3_L4_PER_CLKCTRL_INDEX(0x2c)
+#define AM3_GPMC_CLKCTRL AM3_L4_PER_CLKCTRL_INDEX(0x30)
+#define AM3_MCASP0_CLKCTRL AM3_L4_PER_CLKCTRL_INDEX(0x34)
+#define AM3_UART6_CLKCTRL AM3_L4_PER_CLKCTRL_INDEX(0x38)
+#define AM3_MMC1_CLKCTRL AM3_L4_PER_CLKCTRL_INDEX(0x3c)
+#define AM3_ELM_CLKCTRL AM3_L4_PER_CLKCTRL_INDEX(0x40)
+#define AM3_I2C3_CLKCTRL AM3_L4_PER_CLKCTRL_INDEX(0x44)
+#define AM3_I2C2_CLKCTRL AM3_L4_PER_CLKCTRL_INDEX(0x48)
+#define AM3_SPI0_CLKCTRL AM3_L4_PER_CLKCTRL_INDEX(0x4c)
+#define AM3_SPI1_CLKCTRL AM3_L4_PER_CLKCTRL_INDEX(0x50)
+#define AM3_L4_LS_CLKCTRL AM3_L4_PER_CLKCTRL_INDEX(0x60)
+#define AM3_MCASP1_CLKCTRL AM3_L4_PER_CLKCTRL_INDEX(0x68)
+#define AM3_UART2_CLKCTRL AM3_L4_PER_CLKCTRL_INDEX(0x6c)
+#define AM3_UART3_CLKCTRL AM3_L4_PER_CLKCTRL_INDEX(0x70)
+#define AM3_UART4_CLKCTRL AM3_L4_PER_CLKCTRL_INDEX(0x74)
+#define AM3_UART5_CLKCTRL AM3_L4_PER_CLKCTRL_INDEX(0x78)
+#define AM3_TIMER7_CLKCTRL AM3_L4_PER_CLKCTRL_INDEX(0x7c)
+#define AM3_TIMER2_CLKCTRL AM3_L4_PER_CLKCTRL_INDEX(0x80)
+#define AM3_TIMER3_CLKCTRL AM3_L4_PER_CLKCTRL_INDEX(0x84)
+#define AM3_TIMER4_CLKCTRL AM3_L4_PER_CLKCTRL_INDEX(0x88)
+#define AM3_RNG_CLKCTRL AM3_L4_PER_CLKCTRL_INDEX(0x90)
+#define AM3_AES_CLKCTRL AM3_L4_PER_CLKCTRL_INDEX(0x94)
+#define AM3_SHAM_CLKCTRL AM3_L4_PER_CLKCTRL_INDEX(0xa0)
+#define AM3_GPIO2_CLKCTRL AM3_L4_PER_CLKCTRL_INDEX(0xac)
+#define AM3_GPIO3_CLKCTRL AM3_L4_PER_CLKCTRL_INDEX(0xb0)
+#define AM3_GPIO4_CLKCTRL AM3_L4_PER_CLKCTRL_INDEX(0xb4)
+#define AM3_TPCC_CLKCTRL AM3_L4_PER_CLKCTRL_INDEX(0xbc)
+#define AM3_D_CAN0_CLKCTRL AM3_L4_PER_CLKCTRL_INDEX(0xc0)
+#define AM3_D_CAN1_CLKCTRL AM3_L4_PER_CLKCTRL_INDEX(0xc4)
+#define AM3_EPWMSS1_CLKCTRL AM3_L4_PER_CLKCTRL_INDEX(0xcc)
+#define AM3_EPWMSS0_CLKCTRL AM3_L4_PER_CLKCTRL_INDEX(0xd4)
+#define AM3_EPWMSS2_CLKCTRL AM3_L4_PER_CLKCTRL_INDEX(0xd8)
+#define AM3_L3_INSTR_CLKCTRL AM3_L4_PER_CLKCTRL_INDEX(0xdc)
+#define AM3_L3_MAIN_CLKCTRL AM3_L4_PER_CLKCTRL_INDEX(0xe0)
+#define AM3_PRUSS_CLKCTRL AM3_L4_PER_CLKCTRL_INDEX(0xe8)
+#define AM3_TIMER5_CLKCTRL AM3_L4_PER_CLKCTRL_INDEX(0xec)
+#define AM3_TIMER6_CLKCTRL AM3_L4_PER_CLKCTRL_INDEX(0xf0)
+#define AM3_MMC2_CLKCTRL AM3_L4_PER_CLKCTRL_INDEX(0xf4)
+#define AM3_MMC3_CLKCTRL AM3_L4_PER_CLKCTRL_INDEX(0xf8)
+#define AM3_TPTC1_CLKCTRL AM3_L4_PER_CLKCTRL_INDEX(0xfc)
+#define AM3_TPTC2_CLKCTRL AM3_L4_PER_CLKCTRL_INDEX(0x100)
+#define AM3_SPINLOCK_CLKCTRL AM3_L4_PER_CLKCTRL_INDEX(0x10c)
+#define AM3_MAILBOX_CLKCTRL AM3_L4_PER_CLKCTRL_INDEX(0x110)
+#define AM3_L4_HS_CLKCTRL AM3_L4_PER_CLKCTRL_INDEX(0x120)
+#define AM3_OCPWP_CLKCTRL AM3_L4_PER_CLKCTRL_INDEX(0x130)
+#define AM3_CLKDIV32K_CLKCTRL AM3_L4_PER_CLKCTRL_INDEX(0x14c)
+
+/* l4_wkup clocks */
+#define AM3_L4_WKUP_CLKCTRL_OFFSET 0x4
+#define AM3_L4_WKUP_CLKCTRL_INDEX(offset) ((offset) - AM3_L4_WKUP_CLKCTRL_OFFSET)
+#define AM3_CONTROL_CLKCTRL AM3_L4_WKUP_CLKCTRL_INDEX(0x4)
+#define AM3_GPIO1_CLKCTRL AM3_L4_WKUP_CLKCTRL_INDEX(0x8)
+#define AM3_L4_WKUP_CLKCTRL AM3_L4_WKUP_CLKCTRL_INDEX(0xc)
+#define AM3_DEBUGSS_CLKCTRL AM3_L4_WKUP_CLKCTRL_INDEX(0x14)
+#define AM3_WKUP_M3_CLKCTRL AM3_L4_WKUP_CLKCTRL_INDEX(0xb0)
+#define AM3_UART1_CLKCTRL AM3_L4_WKUP_CLKCTRL_INDEX(0xb4)
+#define AM3_I2C1_CLKCTRL AM3_L4_WKUP_CLKCTRL_INDEX(0xb8)
+#define AM3_ADC_TSC_CLKCTRL AM3_L4_WKUP_CLKCTRL_INDEX(0xbc)
+#define AM3_SMARTREFLEX0_CLKCTRL AM3_L4_WKUP_CLKCTRL_INDEX(0xc0)
+#define AM3_TIMER1_CLKCTRL AM3_L4_WKUP_CLKCTRL_INDEX(0xc4)
+#define AM3_SMARTREFLEX1_CLKCTRL AM3_L4_WKUP_CLKCTRL_INDEX(0xc8)
+#define AM3_WD_TIMER2_CLKCTRL AM3_L4_WKUP_CLKCTRL_INDEX(0xd4)
+
+/* mpu clocks */
+#define AM3_MPU_CLKCTRL_OFFSET 0x4
+#define AM3_MPU_CLKCTRL_INDEX(offset) ((offset) - AM3_MPU_CLKCTRL_OFFSET)
+#define AM3_MPU_CLKCTRL AM3_MPU_CLKCTRL_INDEX(0x4)
+
+/* l4_rtc clocks */
+#define AM3_RTC_CLKCTRL AM3_CLKCTRL_INDEX(0x0)
+
+/* gfx_l3 clocks */
+#define AM3_GFX_L3_CLKCTRL_OFFSET 0x4
+#define AM3_GFX_L3_CLKCTRL_INDEX(offset) ((offset) - AM3_GFX_L3_CLKCTRL_OFFSET)
+#define AM3_GFX_CLKCTRL AM3_GFX_L3_CLKCTRL_INDEX(0x4)
+
+/* l4_cefuse clocks */
+#define AM3_L4_CEFUSE_CLKCTRL_OFFSET 0x20
+#define AM3_L4_CEFUSE_CLKCTRL_INDEX(offset) ((offset) - AM3_L4_CEFUSE_CLKCTRL_OFFSET)
+#define AM3_CEFUSE_CLKCTRL AM3_L4_CEFUSE_CLKCTRL_INDEX(0x20)
+
+#endif
diff --git a/include/dt-bindings/clock/am4.h b/include/dt-bindings/clock/am4.h
new file mode 100644
index 000000000000..d21df00b3270
--- /dev/null
+++ b/include/dt-bindings/clock/am4.h
@@ -0,0 +1,113 @@
+/*
+ * Copyright 2017 Texas Instruments, Inc.
+ *
+ * This software is licensed under the terms of the GNU General Public
+ * License version 2, as published by the Free Software Foundation, and
+ * may be copied, distributed, and modified under those terms.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+ * GNU General Public License for more details.
+ */
+#ifndef __DT_BINDINGS_CLK_AM4_H
+#define __DT_BINDINGS_CLK_AM4_H
+
+#define AM4_CLKCTRL_OFFSET 0x20
+#define AM4_CLKCTRL_INDEX(offset) ((offset) - AM4_CLKCTRL_OFFSET)
+
+/* l4_wkup clocks */
+#define AM4_ADC_TSC_CLKCTRL AM4_CLKCTRL_INDEX(0x120)
+#define AM4_L4_WKUP_CLKCTRL AM4_CLKCTRL_INDEX(0x220)
+#define AM4_WKUP_M3_CLKCTRL AM4_CLKCTRL_INDEX(0x228)
+#define AM4_COUNTER_32K_CLKCTRL AM4_CLKCTRL_INDEX(0x230)
+#define AM4_TIMER1_CLKCTRL AM4_CLKCTRL_INDEX(0x328)
+#define AM4_WD_TIMER2_CLKCTRL AM4_CLKCTRL_INDEX(0x338)
+#define AM4_I2C1_CLKCTRL AM4_CLKCTRL_INDEX(0x340)
+#define AM4_UART1_CLKCTRL AM4_CLKCTRL_INDEX(0x348)
+#define AM4_SMARTREFLEX0_CLKCTRL AM4_CLKCTRL_INDEX(0x350)
+#define AM4_SMARTREFLEX1_CLKCTRL AM4_CLKCTRL_INDEX(0x358)
+#define AM4_CONTROL_CLKCTRL AM4_CLKCTRL_INDEX(0x360)
+#define AM4_GPIO1_CLKCTRL AM4_CLKCTRL_INDEX(0x368)
+
+/* mpu clocks */
+#define AM4_MPU_CLKCTRL AM4_CLKCTRL_INDEX(0x20)
+
+/* gfx_l3 clocks */
+#define AM4_GFX_CLKCTRL AM4_CLKCTRL_INDEX(0x20)
+
+/* l4_rtc clocks */
+#define AM4_RTC_CLKCTRL AM4_CLKCTRL_INDEX(0x20)
+
+/* l4_per clocks */
+#define AM4_L3_MAIN_CLKCTRL AM4_CLKCTRL_INDEX(0x20)
+#define AM4_AES_CLKCTRL AM4_CLKCTRL_INDEX(0x28)
+#define AM4_DES_CLKCTRL AM4_CLKCTRL_INDEX(0x30)
+#define AM4_L3_INSTR_CLKCTRL AM4_CLKCTRL_INDEX(0x40)
+#define AM4_OCMCRAM_CLKCTRL AM4_CLKCTRL_INDEX(0x50)
+#define AM4_SHAM_CLKCTRL AM4_CLKCTRL_INDEX(0x58)
+#define AM4_VPFE0_CLKCTRL AM4_CLKCTRL_INDEX(0x68)
+#define AM4_VPFE1_CLKCTRL AM4_CLKCTRL_INDEX(0x70)
+#define AM4_TPCC_CLKCTRL AM4_CLKCTRL_INDEX(0x78)
+#define AM4_TPTC0_CLKCTRL AM4_CLKCTRL_INDEX(0x80)
+#define AM4_TPTC1_CLKCTRL AM4_CLKCTRL_INDEX(0x88)
+#define AM4_TPTC2_CLKCTRL AM4_CLKCTRL_INDEX(0x90)
+#define AM4_L4_HS_CLKCTRL AM4_CLKCTRL_INDEX(0xa0)
+#define AM4_GPMC_CLKCTRL AM4_CLKCTRL_INDEX(0x220)
+#define AM4_MCASP0_CLKCTRL AM4_CLKCTRL_INDEX(0x238)
+#define AM4_MCASP1_CLKCTRL AM4_CLKCTRL_INDEX(0x240)
+#define AM4_MMC3_CLKCTRL AM4_CLKCTRL_INDEX(0x248)
+#define AM4_QSPI_CLKCTRL AM4_CLKCTRL_INDEX(0x258)
+#define AM4_USB_OTG_SS0_CLKCTRL AM4_CLKCTRL_INDEX(0x260)
+#define AM4_USB_OTG_SS1_CLKCTRL AM4_CLKCTRL_INDEX(0x268)
+#define AM4_PRUSS_CLKCTRL AM4_CLKCTRL_INDEX(0x320)
+#define AM4_L4_LS_CLKCTRL AM4_CLKCTRL_INDEX(0x420)
+#define AM4_D_CAN0_CLKCTRL AM4_CLKCTRL_INDEX(0x428)
+#define AM4_D_CAN1_CLKCTRL AM4_CLKCTRL_INDEX(0x430)
+#define AM4_EPWMSS0_CLKCTRL AM4_CLKCTRL_INDEX(0x438)
+#define AM4_EPWMSS1_CLKCTRL AM4_CLKCTRL_INDEX(0x440)
+#define AM4_EPWMSS2_CLKCTRL AM4_CLKCTRL_INDEX(0x448)
+#define AM4_EPWMSS3_CLKCTRL AM4_CLKCTRL_INDEX(0x450)
+#define AM4_EPWMSS4_CLKCTRL AM4_CLKCTRL_INDEX(0x458)
+#define AM4_EPWMSS5_CLKCTRL AM4_CLKCTRL_INDEX(0x460)
+#define AM4_ELM_CLKCTRL AM4_CLKCTRL_INDEX(0x468)
+#define AM4_GPIO2_CLKCTRL AM4_CLKCTRL_INDEX(0x478)
+#define AM4_GPIO3_CLKCTRL AM4_CLKCTRL_INDEX(0x480)
+#define AM4_GPIO4_CLKCTRL AM4_CLKCTRL_INDEX(0x488)
+#define AM4_GPIO5_CLKCTRL AM4_CLKCTRL_INDEX(0x490)
+#define AM4_GPIO6_CLKCTRL AM4_CLKCTRL_INDEX(0x498)
+#define AM4_HDQ1W_CLKCTRL AM4_CLKCTRL_INDEX(0x4a0)
+#define AM4_I2C2_CLKCTRL AM4_CLKCTRL_INDEX(0x4a8)
+#define AM4_I2C3_CLKCTRL AM4_CLKCTRL_INDEX(0x4b0)
+#define AM4_MAILBOX_CLKCTRL AM4_CLKCTRL_INDEX(0x4b8)
+#define AM4_MMC1_CLKCTRL AM4_CLKCTRL_INDEX(0x4c0)
+#define AM4_MMC2_CLKCTRL AM4_CLKCTRL_INDEX(0x4c8)
+#define AM4_RNG_CLKCTRL AM4_CLKCTRL_INDEX(0x4e0)
+#define AM4_SPI0_CLKCTRL AM4_CLKCTRL_INDEX(0x500)
+#define AM4_SPI1_CLKCTRL AM4_CLKCTRL_INDEX(0x508)
+#define AM4_SPI2_CLKCTRL AM4_CLKCTRL_INDEX(0x510)
+#define AM4_SPI3_CLKCTRL AM4_CLKCTRL_INDEX(0x518)
+#define AM4_SPI4_CLKCTRL AM4_CLKCTRL_INDEX(0x520)
+#define AM4_SPINLOCK_CLKCTRL AM4_CLKCTRL_INDEX(0x528)
+#define AM4_TIMER2_CLKCTRL AM4_CLKCTRL_INDEX(0x530)
+#define AM4_TIMER3_CLKCTRL AM4_CLKCTRL_INDEX(0x538)
+#define AM4_TIMER4_CLKCTRL AM4_CLKCTRL_INDEX(0x540)
+#define AM4_TIMER5_CLKCTRL AM4_CLKCTRL_INDEX(0x548)
+#define AM4_TIMER6_CLKCTRL AM4_CLKCTRL_INDEX(0x550)
+#define AM4_TIMER7_CLKCTRL AM4_CLKCTRL_INDEX(0x558)
+#define AM4_TIMER8_CLKCTRL AM4_CLKCTRL_INDEX(0x560)
+#define AM4_TIMER9_CLKCTRL AM4_CLKCTRL_INDEX(0x568)
+#define AM4_TIMER10_CLKCTRL AM4_CLKCTRL_INDEX(0x570)
+#define AM4_TIMER11_CLKCTRL AM4_CLKCTRL_INDEX(0x578)
+#define AM4_UART2_CLKCTRL AM4_CLKCTRL_INDEX(0x580)
+#define AM4_UART3_CLKCTRL AM4_CLKCTRL_INDEX(0x588)
+#define AM4_UART4_CLKCTRL AM4_CLKCTRL_INDEX(0x590)
+#define AM4_UART5_CLKCTRL AM4_CLKCTRL_INDEX(0x598)
+#define AM4_UART6_CLKCTRL AM4_CLKCTRL_INDEX(0x5a0)
+#define AM4_OCP2SCP0_CLKCTRL AM4_CLKCTRL_INDEX(0x5b8)
+#define AM4_OCP2SCP1_CLKCTRL AM4_CLKCTRL_INDEX(0x5c0)
+#define AM4_EMIF_CLKCTRL AM4_CLKCTRL_INDEX(0x720)
+#define AM4_DSS_CORE_CLKCTRL AM4_CLKCTRL_INDEX(0xa20)
+#define AM4_CPGMAC0_CLKCTRL AM4_CLKCTRL_INDEX(0xb20)
+
+#endif
diff --git a/include/dt-bindings/clock/aspeed-clock.h b/include/dt-bindings/clock/aspeed-clock.h
new file mode 100644
index 000000000000..d3558d897a4d
--- /dev/null
+++ b/include/dt-bindings/clock/aspeed-clock.h
@@ -0,0 +1,52 @@
+/* SPDX-License-Identifier: (GPL-2.0+ OR MIT) */
+
+#ifndef DT_BINDINGS_ASPEED_CLOCK_H
+#define DT_BINDINGS_ASPEED_CLOCK_H
+
+#define ASPEED_CLK_GATE_ECLK 0
+#define ASPEED_CLK_GATE_GCLK 1
+#define ASPEED_CLK_GATE_MCLK 2
+#define ASPEED_CLK_GATE_VCLK 3
+#define ASPEED_CLK_GATE_BCLK 4
+#define ASPEED_CLK_GATE_DCLK 5
+#define ASPEED_CLK_GATE_REFCLK 6
+#define ASPEED_CLK_GATE_USBPORT2CLK 7
+#define ASPEED_CLK_GATE_LCLK 8
+#define ASPEED_CLK_GATE_USBUHCICLK 9
+#define ASPEED_CLK_GATE_D1CLK 10
+#define ASPEED_CLK_GATE_YCLK 11
+#define ASPEED_CLK_GATE_USBPORT1CLK 12
+#define ASPEED_CLK_GATE_UART1CLK 13
+#define ASPEED_CLK_GATE_UART2CLK 14
+#define ASPEED_CLK_GATE_UART5CLK 15
+#define ASPEED_CLK_GATE_ESPICLK 16
+#define ASPEED_CLK_GATE_MAC1CLK 17
+#define ASPEED_CLK_GATE_MAC2CLK 18
+#define ASPEED_CLK_GATE_RSACLK 19
+#define ASPEED_CLK_GATE_UART3CLK 20
+#define ASPEED_CLK_GATE_UART4CLK 21
+#define ASPEED_CLK_GATE_SDCLKCLK 22
+#define ASPEED_CLK_GATE_LHCCLK 23
+#define ASPEED_CLK_HPLL 24
+#define ASPEED_CLK_AHB 25
+#define ASPEED_CLK_APB 26
+#define ASPEED_CLK_UART 27
+#define ASPEED_CLK_SDIO 28
+#define ASPEED_CLK_ECLK 29
+#define ASPEED_CLK_ECLK_MUX 30
+#define ASPEED_CLK_LHCLK 31
+#define ASPEED_CLK_MAC 32
+#define ASPEED_CLK_BCLK 33
+#define ASPEED_CLK_MPLL 34
+
+#define ASPEED_RESET_XDMA 0
+#define ASPEED_RESET_MCTP 1
+#define ASPEED_RESET_ADC 2
+#define ASPEED_RESET_JTAG_MASTER 3
+#define ASPEED_RESET_MIC 4
+#define ASPEED_RESET_PWM 5
+#define ASPEED_RESET_PCIVGA 6
+#define ASPEED_RESET_I2C 7
+#define ASPEED_RESET_AHB 8
+
+#endif
diff --git a/include/dt-bindings/clock/axg-clkc.h b/include/dt-bindings/clock/axg-clkc.h
new file mode 100644
index 000000000000..941ac70e7f30
--- /dev/null
+++ b/include/dt-bindings/clock/axg-clkc.h
@@ -0,0 +1,71 @@
+/* SPDX-License-Identifier: (GPL-2.0+ OR MIT) */
+/*
+ * Meson-AXG clock tree IDs
+ *
+ * Copyright (c) 2017 Amlogic, Inc. All rights reserved.
+ */
+
+#ifndef __AXG_CLKC_H
+#define __AXG_CLKC_H
+
+#define CLKID_SYS_PLL 0
+#define CLKID_FIXED_PLL 1
+#define CLKID_FCLK_DIV2 2
+#define CLKID_FCLK_DIV3 3
+#define CLKID_FCLK_DIV4 4
+#define CLKID_FCLK_DIV5 5
+#define CLKID_FCLK_DIV7 6
+#define CLKID_GP0_PLL 7
+#define CLKID_CLK81 10
+#define CLKID_MPLL0 11
+#define CLKID_MPLL1 12
+#define CLKID_MPLL2 13
+#define CLKID_MPLL3 14
+#define CLKID_DDR 15
+#define CLKID_AUDIO_LOCKER 16
+#define CLKID_MIPI_DSI_HOST 17
+#define CLKID_ISA 18
+#define CLKID_PL301 19
+#define CLKID_PERIPHS 20
+#define CLKID_SPICC0 21
+#define CLKID_I2C 22
+#define CLKID_RNG0 23
+#define CLKID_UART0 24
+#define CLKID_MIPI_DSI_PHY 25
+#define CLKID_SPICC1 26
+#define CLKID_PCIE_A 27
+#define CLKID_PCIE_B 28
+#define CLKID_HIU_IFACE 29
+#define CLKID_ASSIST_MISC 30
+#define CLKID_SD_EMMC_B 31
+#define CLKID_SD_EMMC_C 32
+#define CLKID_DMA 33
+#define CLKID_SPI 34
+#define CLKID_AUDIO 35
+#define CLKID_ETH 36
+#define CLKID_UART1 37
+#define CLKID_G2D 38
+#define CLKID_USB0 39
+#define CLKID_USB1 40
+#define CLKID_RESET 41
+#define CLKID_USB 42
+#define CLKID_AHB_ARB0 43
+#define CLKID_EFUSE 44
+#define CLKID_BOOT_ROM 45
+#define CLKID_AHB_DATA_BUS 46
+#define CLKID_AHB_CTRL_BUS 47
+#define CLKID_USB1_DDR_BRIDGE 48
+#define CLKID_USB0_DDR_BRIDGE 49
+#define CLKID_MMC_PCLK 50
+#define CLKID_VPU_INTR 51
+#define CLKID_SEC_AHB_AHB3_BRIDGE 52
+#define CLKID_GIC 53
+#define CLKID_AO_MEDIA_CPU 54
+#define CLKID_AO_AHB_SRAM 55
+#define CLKID_AO_AHB_BUS 56
+#define CLKID_AO_IFACE 57
+#define CLKID_AO_I2C 58
+#define CLKID_SD_EMMC_B_CLK0 59
+#define CLKID_SD_EMMC_C_CLK0 60
+
+#endif /* __AXG_CLKC_H */
diff --git a/include/dt-bindings/clock/dm814.h b/include/dt-bindings/clock/dm814.h
new file mode 100644
index 000000000000..0e7099a344e1
--- /dev/null
+++ b/include/dt-bindings/clock/dm814.h
@@ -0,0 +1,45 @@
+/*
+ * Copyright 2017 Texas Instruments, Inc.
+ *
+ * This software is licensed under the terms of the GNU General Public
+ * License version 2, as published by the Free Software Foundation, and
+ * may be copied, distributed, and modified under those terms.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+ * GNU General Public License for more details.
+ */
+#ifndef __DT_BINDINGS_CLK_DM814_H
+#define __DT_BINDINGS_CLK_DM814_H
+
+#define DM814_CLKCTRL_OFFSET 0x0
+#define DM814_CLKCTRL_INDEX(offset) ((offset) - DM814_CLKCTRL_OFFSET)
+
+/* default clocks */
+#define DM814_USB_OTG_HS_CLKCTRL DM814_CLKCTRL_INDEX(0x58)
+
+/* alwon clocks */
+#define DM814_UART1_CLKCTRL DM814_CLKCTRL_INDEX(0x150)
+#define DM814_UART2_CLKCTRL DM814_CLKCTRL_INDEX(0x154)
+#define DM814_UART3_CLKCTRL DM814_CLKCTRL_INDEX(0x158)
+#define DM814_GPIO1_CLKCTRL DM814_CLKCTRL_INDEX(0x15c)
+#define DM814_GPIO2_CLKCTRL DM814_CLKCTRL_INDEX(0x160)
+#define DM814_I2C1_CLKCTRL DM814_CLKCTRL_INDEX(0x164)
+#define DM814_I2C2_CLKCTRL DM814_CLKCTRL_INDEX(0x168)
+#define DM814_WD_TIMER_CLKCTRL DM814_CLKCTRL_INDEX(0x18c)
+#define DM814_MCSPI1_CLKCTRL DM814_CLKCTRL_INDEX(0x190)
+#define DM814_GPMC_CLKCTRL DM814_CLKCTRL_INDEX(0x1d0)
+#define DM814_CPGMAC0_CLKCTRL DM814_CLKCTRL_INDEX(0x1d4)
+#define DM814_MPU_CLKCTRL DM814_CLKCTRL_INDEX(0x1dc)
+#define DM814_RTC_CLKCTRL DM814_CLKCTRL_INDEX(0x1f0)
+#define DM814_TPCC_CLKCTRL DM814_CLKCTRL_INDEX(0x1f4)
+#define DM814_TPTC0_CLKCTRL DM814_CLKCTRL_INDEX(0x1f8)
+#define DM814_TPTC1_CLKCTRL DM814_CLKCTRL_INDEX(0x1fc)
+#define DM814_TPTC2_CLKCTRL DM814_CLKCTRL_INDEX(0x200)
+#define DM814_TPTC3_CLKCTRL DM814_CLKCTRL_INDEX(0x204)
+#define DM814_MMC1_CLKCTRL DM814_CLKCTRL_INDEX(0x21c)
+#define DM814_MMC2_CLKCTRL DM814_CLKCTRL_INDEX(0x220)
+#define DM814_MMC3_CLKCTRL DM814_CLKCTRL_INDEX(0x224)
+
+#endif
diff --git a/include/dt-bindings/clock/dm816.h b/include/dt-bindings/clock/dm816.h
new file mode 100644
index 000000000000..69e8a36d783e
--- /dev/null
+++ b/include/dt-bindings/clock/dm816.h
@@ -0,0 +1,53 @@
+/*
+ * Copyright 2017 Texas Instruments, Inc.
+ *
+ * This software is licensed under the terms of the GNU General Public
+ * License version 2, as published by the Free Software Foundation, and
+ * may be copied, distributed, and modified under those terms.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+ * GNU General Public License for more details.
+ */
+#ifndef __DT_BINDINGS_CLK_DM816_H
+#define __DT_BINDINGS_CLK_DM816_H
+
+#define DM816_CLKCTRL_OFFSET 0x0
+#define DM816_CLKCTRL_INDEX(offset) ((offset) - DM816_CLKCTRL_OFFSET)
+
+/* default clocks */
+#define DM816_USB_OTG_HS_CLKCTRL DM816_CLKCTRL_INDEX(0x58)
+
+/* alwon clocks */
+#define DM816_UART1_CLKCTRL DM816_CLKCTRL_INDEX(0x150)
+#define DM816_UART2_CLKCTRL DM816_CLKCTRL_INDEX(0x154)
+#define DM816_UART3_CLKCTRL DM816_CLKCTRL_INDEX(0x158)
+#define DM816_GPIO1_CLKCTRL DM816_CLKCTRL_INDEX(0x15c)
+#define DM816_GPIO2_CLKCTRL DM816_CLKCTRL_INDEX(0x160)
+#define DM816_I2C1_CLKCTRL DM816_CLKCTRL_INDEX(0x164)
+#define DM816_I2C2_CLKCTRL DM816_CLKCTRL_INDEX(0x168)
+#define DM816_TIMER1_CLKCTRL DM816_CLKCTRL_INDEX(0x170)
+#define DM816_TIMER2_CLKCTRL DM816_CLKCTRL_INDEX(0x174)
+#define DM816_TIMER3_CLKCTRL DM816_CLKCTRL_INDEX(0x178)
+#define DM816_TIMER4_CLKCTRL DM816_CLKCTRL_INDEX(0x17c)
+#define DM816_TIMER5_CLKCTRL DM816_CLKCTRL_INDEX(0x180)
+#define DM816_TIMER6_CLKCTRL DM816_CLKCTRL_INDEX(0x184)
+#define DM816_TIMER7_CLKCTRL DM816_CLKCTRL_INDEX(0x188)
+#define DM816_WD_TIMER_CLKCTRL DM816_CLKCTRL_INDEX(0x18c)
+#define DM816_MCSPI1_CLKCTRL DM816_CLKCTRL_INDEX(0x190)
+#define DM816_MAILBOX_CLKCTRL DM816_CLKCTRL_INDEX(0x194)
+#define DM816_SPINBOX_CLKCTRL DM816_CLKCTRL_INDEX(0x198)
+#define DM816_MMC1_CLKCTRL DM816_CLKCTRL_INDEX(0x1b0)
+#define DM816_GPMC_CLKCTRL DM816_CLKCTRL_INDEX(0x1d0)
+#define DM816_DAVINCI_MDIO_CLKCTRL DM816_CLKCTRL_INDEX(0x1d4)
+#define DM816_EMAC1_CLKCTRL DM816_CLKCTRL_INDEX(0x1d8)
+#define DM816_MPU_CLKCTRL DM816_CLKCTRL_INDEX(0x1dc)
+#define DM816_RTC_CLKCTRL DM816_CLKCTRL_INDEX(0x1f0)
+#define DM816_TPCC_CLKCTRL DM816_CLKCTRL_INDEX(0x1f4)
+#define DM816_TPTC0_CLKCTRL DM816_CLKCTRL_INDEX(0x1f8)
+#define DM816_TPTC1_CLKCTRL DM816_CLKCTRL_INDEX(0x1fc)
+#define DM816_TPTC2_CLKCTRL DM816_CLKCTRL_INDEX(0x200)
+#define DM816_TPTC3_CLKCTRL DM816_CLKCTRL_INDEX(0x204)
+
+#endif
diff --git a/include/dt-bindings/clock/dra7.h b/include/dt-bindings/clock/dra7.h
new file mode 100644
index 000000000000..5e1061b15aed
--- /dev/null
+++ b/include/dt-bindings/clock/dra7.h
@@ -0,0 +1,172 @@
+/*
+ * Copyright 2017 Texas Instruments, Inc.
+ *
+ * This software is licensed under the terms of the GNU General Public
+ * License version 2, as published by the Free Software Foundation, and
+ * may be copied, distributed, and modified under those terms.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+ * GNU General Public License for more details.
+ */
+#ifndef __DT_BINDINGS_CLK_DRA7_H
+#define __DT_BINDINGS_CLK_DRA7_H
+
+#define DRA7_CLKCTRL_OFFSET 0x20
+#define DRA7_CLKCTRL_INDEX(offset) ((offset) - DRA7_CLKCTRL_OFFSET)
+
+/* mpu clocks */
+#define DRA7_MPU_CLKCTRL DRA7_CLKCTRL_INDEX(0x20)
+
+/* ipu clocks */
+#define DRA7_IPU_CLKCTRL_OFFSET 0x40
+#define DRA7_IPU_CLKCTRL_INDEX(offset) ((offset) - DRA7_IPU_CLKCTRL_OFFSET)
+#define DRA7_MCASP1_CLKCTRL DRA7_IPU_CLKCTRL_INDEX(0x50)
+#define DRA7_TIMER5_CLKCTRL DRA7_IPU_CLKCTRL_INDEX(0x58)
+#define DRA7_TIMER6_CLKCTRL DRA7_IPU_CLKCTRL_INDEX(0x60)
+#define DRA7_TIMER7_CLKCTRL DRA7_IPU_CLKCTRL_INDEX(0x68)
+#define DRA7_TIMER8_CLKCTRL DRA7_IPU_CLKCTRL_INDEX(0x70)
+#define DRA7_I2C5_CLKCTRL DRA7_IPU_CLKCTRL_INDEX(0x78)
+#define DRA7_UART6_CLKCTRL DRA7_IPU_CLKCTRL_INDEX(0x80)
+
+/* rtc clocks */
+#define DRA7_RTC_CLKCTRL_OFFSET 0x40
+#define DRA7_RTC_CLKCTRL_INDEX(offset) ((offset) - DRA7_RTC_CLKCTRL_OFFSET)
+#define DRA7_RTCSS_CLKCTRL DRA7_RTC_CLKCTRL_INDEX(0x44)
+
+/* coreaon clocks */
+#define DRA7_SMARTREFLEX_MPU_CLKCTRL DRA7_CLKCTRL_INDEX(0x28)
+#define DRA7_SMARTREFLEX_CORE_CLKCTRL DRA7_CLKCTRL_INDEX(0x38)
+
+/* l3main1 clocks */
+#define DRA7_L3_MAIN_1_CLKCTRL DRA7_CLKCTRL_INDEX(0x20)
+#define DRA7_GPMC_CLKCTRL DRA7_CLKCTRL_INDEX(0x28)
+#define DRA7_TPCC_CLKCTRL DRA7_CLKCTRL_INDEX(0x70)
+#define DRA7_TPTC0_CLKCTRL DRA7_CLKCTRL_INDEX(0x78)
+#define DRA7_TPTC1_CLKCTRL DRA7_CLKCTRL_INDEX(0x80)
+#define DRA7_VCP1_CLKCTRL DRA7_CLKCTRL_INDEX(0x88)
+#define DRA7_VCP2_CLKCTRL DRA7_CLKCTRL_INDEX(0x90)
+
+/* dma clocks */
+#define DRA7_DMA_SYSTEM_CLKCTRL DRA7_CLKCTRL_INDEX(0x20)
+
+/* emif clocks */
+#define DRA7_DMM_CLKCTRL DRA7_CLKCTRL_INDEX(0x20)
+
+/* atl clocks */
+#define DRA7_ATL_CLKCTRL_OFFSET 0x0
+#define DRA7_ATL_CLKCTRL_INDEX(offset) ((offset) - DRA7_ATL_CLKCTRL_OFFSET)
+#define DRA7_ATL_CLKCTRL DRA7_ATL_CLKCTRL_INDEX(0x0)
+
+/* l4cfg clocks */
+#define DRA7_L4_CFG_CLKCTRL DRA7_CLKCTRL_INDEX(0x20)
+#define DRA7_SPINLOCK_CLKCTRL DRA7_CLKCTRL_INDEX(0x28)
+#define DRA7_MAILBOX1_CLKCTRL DRA7_CLKCTRL_INDEX(0x30)
+#define DRA7_MAILBOX2_CLKCTRL DRA7_CLKCTRL_INDEX(0x48)
+#define DRA7_MAILBOX3_CLKCTRL DRA7_CLKCTRL_INDEX(0x50)
+#define DRA7_MAILBOX4_CLKCTRL DRA7_CLKCTRL_INDEX(0x58)
+#define DRA7_MAILBOX5_CLKCTRL DRA7_CLKCTRL_INDEX(0x60)
+#define DRA7_MAILBOX6_CLKCTRL DRA7_CLKCTRL_INDEX(0x68)
+#define DRA7_MAILBOX7_CLKCTRL DRA7_CLKCTRL_INDEX(0x70)
+#define DRA7_MAILBOX8_CLKCTRL DRA7_CLKCTRL_INDEX(0x78)
+#define DRA7_MAILBOX9_CLKCTRL DRA7_CLKCTRL_INDEX(0x80)
+#define DRA7_MAILBOX10_CLKCTRL DRA7_CLKCTRL_INDEX(0x88)
+#define DRA7_MAILBOX11_CLKCTRL DRA7_CLKCTRL_INDEX(0x90)
+#define DRA7_MAILBOX12_CLKCTRL DRA7_CLKCTRL_INDEX(0x98)
+#define DRA7_MAILBOX13_CLKCTRL DRA7_CLKCTRL_INDEX(0xa0)
+
+/* l3instr clocks */
+#define DRA7_L3_MAIN_2_CLKCTRL DRA7_CLKCTRL_INDEX(0x20)
+#define DRA7_L3_INSTR_CLKCTRL DRA7_CLKCTRL_INDEX(0x28)
+
+/* dss clocks */
+#define DRA7_DSS_CORE_CLKCTRL DRA7_CLKCTRL_INDEX(0x20)
+#define DRA7_BB2D_CLKCTRL DRA7_CLKCTRL_INDEX(0x30)
+
+/* l3init clocks */
+#define DRA7_MMC1_CLKCTRL DRA7_CLKCTRL_INDEX(0x28)
+#define DRA7_MMC2_CLKCTRL DRA7_CLKCTRL_INDEX(0x30)
+#define DRA7_USB_OTG_SS2_CLKCTRL DRA7_CLKCTRL_INDEX(0x40)
+#define DRA7_USB_OTG_SS3_CLKCTRL DRA7_CLKCTRL_INDEX(0x48)
+#define DRA7_USB_OTG_SS4_CLKCTRL DRA7_CLKCTRL_INDEX(0x50)
+#define DRA7_SATA_CLKCTRL DRA7_CLKCTRL_INDEX(0x88)
+#define DRA7_PCIE1_CLKCTRL DRA7_CLKCTRL_INDEX(0xb0)
+#define DRA7_PCIE2_CLKCTRL DRA7_CLKCTRL_INDEX(0xb8)
+#define DRA7_GMAC_CLKCTRL DRA7_CLKCTRL_INDEX(0xd0)
+#define DRA7_OCP2SCP1_CLKCTRL DRA7_CLKCTRL_INDEX(0xe0)
+#define DRA7_OCP2SCP3_CLKCTRL DRA7_CLKCTRL_INDEX(0xe8)
+#define DRA7_USB_OTG_SS1_CLKCTRL DRA7_CLKCTRL_INDEX(0xf0)
+
+/* l4per clocks */
+#define DRA7_L4PER_CLKCTRL_OFFSET 0x0
+#define DRA7_L4PER_CLKCTRL_INDEX(offset) ((offset) - DRA7_L4PER_CLKCTRL_OFFSET)
+#define DRA7_L4_PER2_CLKCTRL DRA7_L4PER_CLKCTRL_INDEX(0xc)
+#define DRA7_L4_PER3_CLKCTRL DRA7_L4PER_CLKCTRL_INDEX(0x14)
+#define DRA7_TIMER10_CLKCTRL DRA7_L4PER_CLKCTRL_INDEX(0x28)
+#define DRA7_TIMER11_CLKCTRL DRA7_L4PER_CLKCTRL_INDEX(0x30)
+#define DRA7_TIMER2_CLKCTRL DRA7_L4PER_CLKCTRL_INDEX(0x38)
+#define DRA7_TIMER3_CLKCTRL DRA7_L4PER_CLKCTRL_INDEX(0x40)
+#define DRA7_TIMER4_CLKCTRL DRA7_L4PER_CLKCTRL_INDEX(0x48)
+#define DRA7_TIMER9_CLKCTRL DRA7_L4PER_CLKCTRL_INDEX(0x50)
+#define DRA7_ELM_CLKCTRL DRA7_L4PER_CLKCTRL_INDEX(0x58)
+#define DRA7_GPIO2_CLKCTRL DRA7_L4PER_CLKCTRL_INDEX(0x60)
+#define DRA7_GPIO3_CLKCTRL DRA7_L4PER_CLKCTRL_INDEX(0x68)
+#define DRA7_GPIO4_CLKCTRL DRA7_L4PER_CLKCTRL_INDEX(0x70)
+#define DRA7_GPIO5_CLKCTRL DRA7_L4PER_CLKCTRL_INDEX(0x78)
+#define DRA7_GPIO6_CLKCTRL DRA7_L4PER_CLKCTRL_INDEX(0x80)
+#define DRA7_HDQ1W_CLKCTRL DRA7_L4PER_CLKCTRL_INDEX(0x88)
+#define DRA7_EPWMSS1_CLKCTRL DRA7_L4PER_CLKCTRL_INDEX(0x90)
+#define DRA7_EPWMSS2_CLKCTRL DRA7_L4PER_CLKCTRL_INDEX(0x98)
+#define DRA7_I2C1_CLKCTRL DRA7_L4PER_CLKCTRL_INDEX(0xa0)
+#define DRA7_I2C2_CLKCTRL DRA7_L4PER_CLKCTRL_INDEX(0xa8)
+#define DRA7_I2C3_CLKCTRL DRA7_L4PER_CLKCTRL_INDEX(0xb0)
+#define DRA7_I2C4_CLKCTRL DRA7_L4PER_CLKCTRL_INDEX(0xb8)
+#define DRA7_L4_PER1_CLKCTRL DRA7_L4PER_CLKCTRL_INDEX(0xc0)
+#define DRA7_EPWMSS0_CLKCTRL DRA7_L4PER_CLKCTRL_INDEX(0xc4)
+#define DRA7_TIMER13_CLKCTRL DRA7_L4PER_CLKCTRL_INDEX(0xc8)
+#define DRA7_TIMER14_CLKCTRL DRA7_L4PER_CLKCTRL_INDEX(0xd0)
+#define DRA7_TIMER15_CLKCTRL DRA7_L4PER_CLKCTRL_INDEX(0xd8)
+#define DRA7_MCSPI1_CLKCTRL DRA7_L4PER_CLKCTRL_INDEX(0xf0)
+#define DRA7_MCSPI2_CLKCTRL DRA7_L4PER_CLKCTRL_INDEX(0xf8)
+#define DRA7_MCSPI3_CLKCTRL DRA7_L4PER_CLKCTRL_INDEX(0x100)
+#define DRA7_MCSPI4_CLKCTRL DRA7_L4PER_CLKCTRL_INDEX(0x108)
+#define DRA7_GPIO7_CLKCTRL DRA7_L4PER_CLKCTRL_INDEX(0x110)
+#define DRA7_GPIO8_CLKCTRL DRA7_L4PER_CLKCTRL_INDEX(0x118)
+#define DRA7_MMC3_CLKCTRL DRA7_L4PER_CLKCTRL_INDEX(0x120)
+#define DRA7_MMC4_CLKCTRL DRA7_L4PER_CLKCTRL_INDEX(0x128)
+#define DRA7_TIMER16_CLKCTRL DRA7_L4PER_CLKCTRL_INDEX(0x130)
+#define DRA7_QSPI_CLKCTRL DRA7_L4PER_CLKCTRL_INDEX(0x138)
+#define DRA7_UART1_CLKCTRL DRA7_L4PER_CLKCTRL_INDEX(0x140)
+#define DRA7_UART2_CLKCTRL DRA7_L4PER_CLKCTRL_INDEX(0x148)
+#define DRA7_UART3_CLKCTRL DRA7_L4PER_CLKCTRL_INDEX(0x150)
+#define DRA7_UART4_CLKCTRL DRA7_L4PER_CLKCTRL_INDEX(0x158)
+#define DRA7_MCASP2_CLKCTRL DRA7_L4PER_CLKCTRL_INDEX(0x160)
+#define DRA7_MCASP3_CLKCTRL DRA7_L4PER_CLKCTRL_INDEX(0x168)
+#define DRA7_UART5_CLKCTRL DRA7_L4PER_CLKCTRL_INDEX(0x170)
+#define DRA7_MCASP5_CLKCTRL DRA7_L4PER_CLKCTRL_INDEX(0x178)
+#define DRA7_MCASP8_CLKCTRL DRA7_L4PER_CLKCTRL_INDEX(0x190)
+#define DRA7_MCASP4_CLKCTRL DRA7_L4PER_CLKCTRL_INDEX(0x198)
+#define DRA7_AES1_CLKCTRL DRA7_L4PER_CLKCTRL_INDEX(0x1a0)
+#define DRA7_AES2_CLKCTRL DRA7_L4PER_CLKCTRL_INDEX(0x1a8)
+#define DRA7_DES_CLKCTRL DRA7_L4PER_CLKCTRL_INDEX(0x1b0)
+#define DRA7_RNG_CLKCTRL DRA7_L4PER_CLKCTRL_INDEX(0x1c0)
+#define DRA7_SHAM_CLKCTRL DRA7_L4PER_CLKCTRL_INDEX(0x1c8)
+#define DRA7_UART7_CLKCTRL DRA7_L4PER_CLKCTRL_INDEX(0x1d0)
+#define DRA7_UART8_CLKCTRL DRA7_L4PER_CLKCTRL_INDEX(0x1e0)
+#define DRA7_UART9_CLKCTRL DRA7_L4PER_CLKCTRL_INDEX(0x1e8)
+#define DRA7_DCAN2_CLKCTRL DRA7_L4PER_CLKCTRL_INDEX(0x1f0)
+#define DRA7_MCASP6_CLKCTRL DRA7_L4PER_CLKCTRL_INDEX(0x204)
+#define DRA7_MCASP7_CLKCTRL DRA7_L4PER_CLKCTRL_INDEX(0x208)
+
+/* wkupaon clocks */
+#define DRA7_L4_WKUP_CLKCTRL DRA7_CLKCTRL_INDEX(0x20)
+#define DRA7_WD_TIMER2_CLKCTRL DRA7_CLKCTRL_INDEX(0x30)
+#define DRA7_GPIO1_CLKCTRL DRA7_CLKCTRL_INDEX(0x38)
+#define DRA7_TIMER1_CLKCTRL DRA7_CLKCTRL_INDEX(0x40)
+#define DRA7_TIMER12_CLKCTRL DRA7_CLKCTRL_INDEX(0x48)
+#define DRA7_COUNTER_32K_CLKCTRL DRA7_CLKCTRL_INDEX(0x50)
+#define DRA7_UART10_CLKCTRL DRA7_CLKCTRL_INDEX(0x80)
+#define DRA7_DCAN1_CLKCTRL DRA7_CLKCTRL_INDEX(0x88)
+
+#endif
diff --git a/include/dt-bindings/clock/hi3660-clock.h b/include/dt-bindings/clock/hi3660-clock.h
index adb768d447a5..75d583eb84dd 100644
--- a/include/dt-bindings/clock/hi3660-clock.h
+++ b/include/dt-bindings/clock/hi3660-clock.h
@@ -208,4 +208,11 @@
#define HI3660_CLK_I2C6_IOMCU 3
#define HI3660_CLK_IOMCU_PERI0 4
+/* clk in stub clock */
+#define HI3660_CLK_STUB_CLUSTER0 0
+#define HI3660_CLK_STUB_CLUSTER1 1
+#define HI3660_CLK_STUB_GPU 2
+#define HI3660_CLK_STUB_DDR 3
+#define HI3660_CLK_STUB_NUM 4
+
#endif /* __DTS_HI3660_CLOCK_H */
diff --git a/include/dt-bindings/clock/imx7d-clock.h b/include/dt-bindings/clock/imx7d-clock.h
index e2f99ae72d5c..b2325d3e236a 100644
--- a/include/dt-bindings/clock/imx7d-clock.h
+++ b/include/dt-bindings/clock/imx7d-clock.h
@@ -452,5 +452,8 @@
#define IMX7D_OCOTP_CLK 439
#define IMX7D_NAND_RAWNAND_CLK 440
#define IMX7D_NAND_USDHC_BUS_RAWNAND_CLK 441
-#define IMX7D_CLK_END 442
+#define IMX7D_SNVS_CLK 442
+#define IMX7D_CAAM_CLK 443
+#define IMX7D_KPP_ROOT_CLK 444
+#define IMX7D_CLK_END 445
#endif /* __DT_BINDINGS_CLOCK_IMX7D_H */
diff --git a/include/dt-bindings/clock/jz4770-cgu.h b/include/dt-bindings/clock/jz4770-cgu.h
new file mode 100644
index 000000000000..d68a7695a1f8
--- /dev/null
+++ b/include/dt-bindings/clock/jz4770-cgu.h
@@ -0,0 +1,58 @@
+/* SPDX-License-Identifier: GPL-2.0 */
+/*
+ * This header provides clock numbers for the ingenic,jz4770-cgu DT binding.
+ */
+
+#ifndef __DT_BINDINGS_CLOCK_JZ4770_CGU_H__
+#define __DT_BINDINGS_CLOCK_JZ4770_CGU_H__
+
+#define JZ4770_CLK_EXT 0
+#define JZ4770_CLK_OSC32K 1
+#define JZ4770_CLK_PLL0 2
+#define JZ4770_CLK_PLL1 3
+#define JZ4770_CLK_CCLK 4
+#define JZ4770_CLK_H0CLK 5
+#define JZ4770_CLK_H1CLK 6
+#define JZ4770_CLK_H2CLK 7
+#define JZ4770_CLK_C1CLK 8
+#define JZ4770_CLK_PCLK 9
+#define JZ4770_CLK_MMC0_MUX 10
+#define JZ4770_CLK_MMC0 11
+#define JZ4770_CLK_MMC1_MUX 12
+#define JZ4770_CLK_MMC1 13
+#define JZ4770_CLK_MMC2_MUX 14
+#define JZ4770_CLK_MMC2 15
+#define JZ4770_CLK_CIM 16
+#define JZ4770_CLK_UHC 17
+#define JZ4770_CLK_GPU 18
+#define JZ4770_CLK_BCH 19
+#define JZ4770_CLK_LPCLK_MUX 20
+#define JZ4770_CLK_GPS 21
+#define JZ4770_CLK_SSI_MUX 22
+#define JZ4770_CLK_PCM_MUX 23
+#define JZ4770_CLK_I2S 24
+#define JZ4770_CLK_OTG 25
+#define JZ4770_CLK_SSI0 26
+#define JZ4770_CLK_SSI1 27
+#define JZ4770_CLK_SSI2 28
+#define JZ4770_CLK_PCM0 29
+#define JZ4770_CLK_PCM1 30
+#define JZ4770_CLK_DMA 31
+#define JZ4770_CLK_I2C0 32
+#define JZ4770_CLK_I2C1 33
+#define JZ4770_CLK_I2C2 34
+#define JZ4770_CLK_UART0 35
+#define JZ4770_CLK_UART1 36
+#define JZ4770_CLK_UART2 37
+#define JZ4770_CLK_UART3 38
+#define JZ4770_CLK_IPU 39
+#define JZ4770_CLK_ADC 40
+#define JZ4770_CLK_AIC 41
+#define JZ4770_CLK_AUX 42
+#define JZ4770_CLK_VPU 43
+#define JZ4770_CLK_UHC_PHY 44
+#define JZ4770_CLK_OTG_PHY 45
+#define JZ4770_CLK_EXT512 46
+#define JZ4770_CLK_RTC 47
+
+#endif /* __DT_BINDINGS_CLOCK_JZ4770_CGU_H__ */
diff --git a/include/dt-bindings/clock/omap5.h b/include/dt-bindings/clock/omap5.h
new file mode 100644
index 000000000000..f51821a91216
--- /dev/null
+++ b/include/dt-bindings/clock/omap5.h
@@ -0,0 +1,118 @@
+/*
+ * Copyright 2017 Texas Instruments, Inc.
+ *
+ * This software is licensed under the terms of the GNU General Public
+ * License version 2, as published by the Free Software Foundation, and
+ * may be copied, distributed, and modified under those terms.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+ * GNU General Public License for more details.
+ */
+#ifndef __DT_BINDINGS_CLK_OMAP5_H
+#define __DT_BINDINGS_CLK_OMAP5_H
+
+#define OMAP5_CLKCTRL_OFFSET 0x20
+#define OMAP5_CLKCTRL_INDEX(offset) ((offset) - OMAP5_CLKCTRL_OFFSET)
+
+/* mpu clocks */
+#define OMAP5_MPU_CLKCTRL OMAP5_CLKCTRL_INDEX(0x20)
+
+/* dsp clocks */
+#define OMAP5_MMU_DSP_CLKCTRL OMAP5_CLKCTRL_INDEX(0x20)
+
+/* abe clocks */
+#define OMAP5_L4_ABE_CLKCTRL OMAP5_CLKCTRL_INDEX(0x20)
+#define OMAP5_MCPDM_CLKCTRL OMAP5_CLKCTRL_INDEX(0x30)
+#define OMAP5_DMIC_CLKCTRL OMAP5_CLKCTRL_INDEX(0x38)
+#define OMAP5_MCBSP1_CLKCTRL OMAP5_CLKCTRL_INDEX(0x48)
+#define OMAP5_MCBSP2_CLKCTRL OMAP5_CLKCTRL_INDEX(0x50)
+#define OMAP5_MCBSP3_CLKCTRL OMAP5_CLKCTRL_INDEX(0x58)
+#define OMAP5_TIMER5_CLKCTRL OMAP5_CLKCTRL_INDEX(0x68)
+#define OMAP5_TIMER6_CLKCTRL OMAP5_CLKCTRL_INDEX(0x70)
+#define OMAP5_TIMER7_CLKCTRL OMAP5_CLKCTRL_INDEX(0x78)
+#define OMAP5_TIMER8_CLKCTRL OMAP5_CLKCTRL_INDEX(0x80)
+
+/* l3main1 clocks */
+#define OMAP5_L3_MAIN_1_CLKCTRL OMAP5_CLKCTRL_INDEX(0x20)
+
+/* l3main2 clocks */
+#define OMAP5_L3_MAIN_2_CLKCTRL OMAP5_CLKCTRL_INDEX(0x20)
+
+/* ipu clocks */
+#define OMAP5_MMU_IPU_CLKCTRL OMAP5_CLKCTRL_INDEX(0x20)
+
+/* dma clocks */
+#define OMAP5_DMA_SYSTEM_CLKCTRL OMAP5_CLKCTRL_INDEX(0x20)
+
+/* emif clocks */
+#define OMAP5_DMM_CLKCTRL OMAP5_CLKCTRL_INDEX(0x20)
+#define OMAP5_EMIF1_CLKCTRL OMAP5_CLKCTRL_INDEX(0x30)
+#define OMAP5_EMIF2_CLKCTRL OMAP5_CLKCTRL_INDEX(0x38)
+
+/* l4cfg clocks */
+#define OMAP5_L4_CFG_CLKCTRL OMAP5_CLKCTRL_INDEX(0x20)
+#define OMAP5_SPINLOCK_CLKCTRL OMAP5_CLKCTRL_INDEX(0x28)
+#define OMAP5_MAILBOX_CLKCTRL OMAP5_CLKCTRL_INDEX(0x30)
+
+/* l3instr clocks */
+#define OMAP5_L3_MAIN_3_CLKCTRL OMAP5_CLKCTRL_INDEX(0x20)
+#define OMAP5_L3_INSTR_CLKCTRL OMAP5_CLKCTRL_INDEX(0x28)
+
+/* l4per clocks */
+#define OMAP5_TIMER10_CLKCTRL OMAP5_CLKCTRL_INDEX(0x28)
+#define OMAP5_TIMER11_CLKCTRL OMAP5_CLKCTRL_INDEX(0x30)
+#define OMAP5_TIMER2_CLKCTRL OMAP5_CLKCTRL_INDEX(0x38)
+#define OMAP5_TIMER3_CLKCTRL OMAP5_CLKCTRL_INDEX(0x40)
+#define OMAP5_TIMER4_CLKCTRL OMAP5_CLKCTRL_INDEX(0x48)
+#define OMAP5_TIMER9_CLKCTRL OMAP5_CLKCTRL_INDEX(0x50)
+#define OMAP5_GPIO2_CLKCTRL OMAP5_CLKCTRL_INDEX(0x60)
+#define OMAP5_GPIO3_CLKCTRL OMAP5_CLKCTRL_INDEX(0x68)
+#define OMAP5_GPIO4_CLKCTRL OMAP5_CLKCTRL_INDEX(0x70)
+#define OMAP5_GPIO5_CLKCTRL OMAP5_CLKCTRL_INDEX(0x78)
+#define OMAP5_GPIO6_CLKCTRL OMAP5_CLKCTRL_INDEX(0x80)
+#define OMAP5_I2C1_CLKCTRL OMAP5_CLKCTRL_INDEX(0xa0)
+#define OMAP5_I2C2_CLKCTRL OMAP5_CLKCTRL_INDEX(0xa8)
+#define OMAP5_I2C3_CLKCTRL OMAP5_CLKCTRL_INDEX(0xb0)
+#define OMAP5_I2C4_CLKCTRL OMAP5_CLKCTRL_INDEX(0xb8)
+#define OMAP5_L4_PER_CLKCTRL OMAP5_CLKCTRL_INDEX(0xc0)
+#define OMAP5_MCSPI1_CLKCTRL OMAP5_CLKCTRL_INDEX(0xf0)
+#define OMAP5_MCSPI2_CLKCTRL OMAP5_CLKCTRL_INDEX(0xf8)
+#define OMAP5_MCSPI3_CLKCTRL OMAP5_CLKCTRL_INDEX(0x100)
+#define OMAP5_MCSPI4_CLKCTRL OMAP5_CLKCTRL_INDEX(0x108)
+#define OMAP5_GPIO7_CLKCTRL OMAP5_CLKCTRL_INDEX(0x110)
+#define OMAP5_GPIO8_CLKCTRL OMAP5_CLKCTRL_INDEX(0x118)
+#define OMAP5_MMC3_CLKCTRL OMAP5_CLKCTRL_INDEX(0x120)
+#define OMAP5_MMC4_CLKCTRL OMAP5_CLKCTRL_INDEX(0x128)
+#define OMAP5_UART1_CLKCTRL OMAP5_CLKCTRL_INDEX(0x140)
+#define OMAP5_UART2_CLKCTRL OMAP5_CLKCTRL_INDEX(0x148)
+#define OMAP5_UART3_CLKCTRL OMAP5_CLKCTRL_INDEX(0x150)
+#define OMAP5_UART4_CLKCTRL OMAP5_CLKCTRL_INDEX(0x158)
+#define OMAP5_MMC5_CLKCTRL OMAP5_CLKCTRL_INDEX(0x160)
+#define OMAP5_I2C5_CLKCTRL OMAP5_CLKCTRL_INDEX(0x168)
+#define OMAP5_UART5_CLKCTRL OMAP5_CLKCTRL_INDEX(0x170)
+#define OMAP5_UART6_CLKCTRL OMAP5_CLKCTRL_INDEX(0x178)
+
+/* dss clocks */
+#define OMAP5_DSS_CORE_CLKCTRL OMAP5_CLKCTRL_INDEX(0x20)
+
+/* l3init clocks */
+#define OMAP5_MMC1_CLKCTRL OMAP5_CLKCTRL_INDEX(0x28)
+#define OMAP5_MMC2_CLKCTRL OMAP5_CLKCTRL_INDEX(0x30)
+#define OMAP5_USB_HOST_HS_CLKCTRL OMAP5_CLKCTRL_INDEX(0x58)
+#define OMAP5_USB_TLL_HS_CLKCTRL OMAP5_CLKCTRL_INDEX(0x68)
+#define OMAP5_SATA_CLKCTRL OMAP5_CLKCTRL_INDEX(0x88)
+#define OMAP5_OCP2SCP1_CLKCTRL OMAP5_CLKCTRL_INDEX(0xe0)
+#define OMAP5_OCP2SCP3_CLKCTRL OMAP5_CLKCTRL_INDEX(0xe8)
+#define OMAP5_USB_OTG_SS_CLKCTRL OMAP5_CLKCTRL_INDEX(0xf0)
+
+/* wkupaon clocks */
+#define OMAP5_L4_WKUP_CLKCTRL OMAP5_CLKCTRL_INDEX(0x20)
+#define OMAP5_WD_TIMER2_CLKCTRL OMAP5_CLKCTRL_INDEX(0x30)
+#define OMAP5_GPIO1_CLKCTRL OMAP5_CLKCTRL_INDEX(0x38)
+#define OMAP5_TIMER1_CLKCTRL OMAP5_CLKCTRL_INDEX(0x40)
+#define OMAP5_COUNTER_32K_CLKCTRL OMAP5_CLKCTRL_INDEX(0x50)
+#define OMAP5_KBD_CLKCTRL OMAP5_CLKCTRL_INDEX(0x78)
+
+#endif
diff --git a/include/dt-bindings/clock/qcom,gcc-ipq8074.h b/include/dt-bindings/clock/qcom,gcc-ipq8074.h
index 370c83c3bccc..238f872e52f4 100644
--- a/include/dt-bindings/clock/qcom,gcc-ipq8074.h
+++ b/include/dt-bindings/clock/qcom,gcc-ipq8074.h
@@ -58,6 +58,186 @@
#define GCC_QPIC_AHB_CLK 41
#define GCC_QPIC_CLK 42
#define PCNOC_BFDCD_CLK_SRC 43
+#define GPLL2_MAIN 44
+#define GPLL2 45
+#define GPLL4_MAIN 46
+#define GPLL4 47
+#define GPLL6_MAIN 48
+#define GPLL6 49
+#define UBI32_PLL_MAIN 50
+#define UBI32_PLL 51
+#define NSS_CRYPTO_PLL_MAIN 52
+#define NSS_CRYPTO_PLL 53
+#define PCIE0_AXI_CLK_SRC 54
+#define PCIE0_AUX_CLK_SRC 55
+#define PCIE0_PIPE_CLK_SRC 56
+#define PCIE1_AXI_CLK_SRC 57
+#define PCIE1_AUX_CLK_SRC 58
+#define PCIE1_PIPE_CLK_SRC 59
+#define SDCC1_APPS_CLK_SRC 60
+#define SDCC1_ICE_CORE_CLK_SRC 61
+#define SDCC2_APPS_CLK_SRC 62
+#define USB0_MASTER_CLK_SRC 63
+#define USB0_AUX_CLK_SRC 64
+#define USB0_MOCK_UTMI_CLK_SRC 65
+#define USB0_PIPE_CLK_SRC 66
+#define USB1_MASTER_CLK_SRC 67
+#define USB1_AUX_CLK_SRC 68
+#define USB1_MOCK_UTMI_CLK_SRC 69
+#define USB1_PIPE_CLK_SRC 70
+#define GCC_XO_CLK_SRC 71
+#define SYSTEM_NOC_BFDCD_CLK_SRC 72
+#define NSS_CE_CLK_SRC 73
+#define NSS_NOC_BFDCD_CLK_SRC 74
+#define NSS_CRYPTO_CLK_SRC 75
+#define NSS_UBI0_CLK_SRC 76
+#define NSS_UBI0_DIV_CLK_SRC 77
+#define NSS_UBI1_CLK_SRC 78
+#define NSS_UBI1_DIV_CLK_SRC 79
+#define UBI_MPT_CLK_SRC 80
+#define NSS_IMEM_CLK_SRC 81
+#define NSS_PPE_CLK_SRC 82
+#define NSS_PORT1_RX_CLK_SRC 83
+#define NSS_PORT1_RX_DIV_CLK_SRC 84
+#define NSS_PORT1_TX_CLK_SRC 85
+#define NSS_PORT1_TX_DIV_CLK_SRC 86
+#define NSS_PORT2_RX_CLK_SRC 87
+#define NSS_PORT2_RX_DIV_CLK_SRC 88
+#define NSS_PORT2_TX_CLK_SRC 89
+#define NSS_PORT2_TX_DIV_CLK_SRC 90
+#define NSS_PORT3_RX_CLK_SRC 91
+#define NSS_PORT3_RX_DIV_CLK_SRC 92
+#define NSS_PORT3_TX_CLK_SRC 93
+#define NSS_PORT3_TX_DIV_CLK_SRC 94
+#define NSS_PORT4_RX_CLK_SRC 95
+#define NSS_PORT4_RX_DIV_CLK_SRC 96
+#define NSS_PORT4_TX_CLK_SRC 97
+#define NSS_PORT4_TX_DIV_CLK_SRC 98
+#define NSS_PORT5_RX_CLK_SRC 99
+#define NSS_PORT5_RX_DIV_CLK_SRC 100
+#define NSS_PORT5_TX_CLK_SRC 101
+#define NSS_PORT5_TX_DIV_CLK_SRC 102
+#define NSS_PORT6_RX_CLK_SRC 103
+#define NSS_PORT6_RX_DIV_CLK_SRC 104
+#define NSS_PORT6_TX_CLK_SRC 105
+#define NSS_PORT6_TX_DIV_CLK_SRC 106
+#define CRYPTO_CLK_SRC 107
+#define GP1_CLK_SRC 108
+#define GP2_CLK_SRC 109
+#define GP3_CLK_SRC 110
+#define GCC_PCIE0_AHB_CLK 111
+#define GCC_PCIE0_AUX_CLK 112
+#define GCC_PCIE0_AXI_M_CLK 113
+#define GCC_PCIE0_AXI_S_CLK 114
+#define GCC_PCIE0_PIPE_CLK 115
+#define GCC_SYS_NOC_PCIE0_AXI_CLK 116
+#define GCC_PCIE1_AHB_CLK 117
+#define GCC_PCIE1_AUX_CLK 118
+#define GCC_PCIE1_AXI_M_CLK 119
+#define GCC_PCIE1_AXI_S_CLK 120
+#define GCC_PCIE1_PIPE_CLK 121
+#define GCC_SYS_NOC_PCIE1_AXI_CLK 122
+#define GCC_USB0_AUX_CLK 123
+#define GCC_SYS_NOC_USB0_AXI_CLK 124
+#define GCC_USB0_MASTER_CLK 125
+#define GCC_USB0_MOCK_UTMI_CLK 126
+#define GCC_USB0_PHY_CFG_AHB_CLK 127
+#define GCC_USB0_PIPE_CLK 128
+#define GCC_USB0_SLEEP_CLK 129
+#define GCC_USB1_AUX_CLK 130
+#define GCC_SYS_NOC_USB1_AXI_CLK 131
+#define GCC_USB1_MASTER_CLK 132
+#define GCC_USB1_MOCK_UTMI_CLK 133
+#define GCC_USB1_PHY_CFG_AHB_CLK 134
+#define GCC_USB1_PIPE_CLK 135
+#define GCC_USB1_SLEEP_CLK 136
+#define GCC_SDCC1_AHB_CLK 137
+#define GCC_SDCC1_APPS_CLK 138
+#define GCC_SDCC1_ICE_CORE_CLK 139
+#define GCC_SDCC2_AHB_CLK 140
+#define GCC_SDCC2_APPS_CLK 141
+#define GCC_MEM_NOC_NSS_AXI_CLK 142
+#define GCC_NSS_CE_APB_CLK 143
+#define GCC_NSS_CE_AXI_CLK 144
+#define GCC_NSS_CFG_CLK 145
+#define GCC_NSS_CRYPTO_CLK 146
+#define GCC_NSS_CSR_CLK 147
+#define GCC_NSS_EDMA_CFG_CLK 148
+#define GCC_NSS_EDMA_CLK 149
+#define GCC_NSS_IMEM_CLK 150
+#define GCC_NSS_NOC_CLK 151
+#define GCC_NSS_PPE_BTQ_CLK 152
+#define GCC_NSS_PPE_CFG_CLK 153
+#define GCC_NSS_PPE_CLK 154
+#define GCC_NSS_PPE_IPE_CLK 155
+#define GCC_NSS_PTP_REF_CLK 156
+#define GCC_NSSNOC_CE_APB_CLK 157
+#define GCC_NSSNOC_CE_AXI_CLK 158
+#define GCC_NSSNOC_CRYPTO_CLK 159
+#define GCC_NSSNOC_PPE_CFG_CLK 160
+#define GCC_NSSNOC_PPE_CLK 161
+#define GCC_NSSNOC_QOSGEN_REF_CLK 162
+#define GCC_NSSNOC_SNOC_CLK 163
+#define GCC_NSSNOC_TIMEOUT_REF_CLK 164
+#define GCC_NSSNOC_UBI0_AHB_CLK 165
+#define GCC_NSSNOC_UBI1_AHB_CLK 166
+#define GCC_UBI0_AHB_CLK 167
+#define GCC_UBI0_AXI_CLK 168
+#define GCC_UBI0_NC_AXI_CLK 169
+#define GCC_UBI0_CORE_CLK 170
+#define GCC_UBI0_MPT_CLK 171
+#define GCC_UBI1_AHB_CLK 172
+#define GCC_UBI1_AXI_CLK 173
+#define GCC_UBI1_NC_AXI_CLK 174
+#define GCC_UBI1_CORE_CLK 175
+#define GCC_UBI1_MPT_CLK 176
+#define GCC_CMN_12GPLL_AHB_CLK 177
+#define GCC_CMN_12GPLL_SYS_CLK 178
+#define GCC_MDIO_AHB_CLK 179
+#define GCC_UNIPHY0_AHB_CLK 180
+#define GCC_UNIPHY0_SYS_CLK 181
+#define GCC_UNIPHY1_AHB_CLK 182
+#define GCC_UNIPHY1_SYS_CLK 183
+#define GCC_UNIPHY2_AHB_CLK 184
+#define GCC_UNIPHY2_SYS_CLK 185
+#define GCC_NSS_PORT1_RX_CLK 186
+#define GCC_NSS_PORT1_TX_CLK 187
+#define GCC_NSS_PORT2_RX_CLK 188
+#define GCC_NSS_PORT2_TX_CLK 189
+#define GCC_NSS_PORT3_RX_CLK 190
+#define GCC_NSS_PORT3_TX_CLK 191
+#define GCC_NSS_PORT4_RX_CLK 192
+#define GCC_NSS_PORT4_TX_CLK 193
+#define GCC_NSS_PORT5_RX_CLK 194
+#define GCC_NSS_PORT5_TX_CLK 195
+#define GCC_NSS_PORT6_RX_CLK 196
+#define GCC_NSS_PORT6_TX_CLK 197
+#define GCC_PORT1_MAC_CLK 198
+#define GCC_PORT2_MAC_CLK 199
+#define GCC_PORT3_MAC_CLK 200
+#define GCC_PORT4_MAC_CLK 201
+#define GCC_PORT5_MAC_CLK 202
+#define GCC_PORT6_MAC_CLK 203
+#define GCC_UNIPHY0_PORT1_RX_CLK 204
+#define GCC_UNIPHY0_PORT1_TX_CLK 205
+#define GCC_UNIPHY0_PORT2_RX_CLK 206
+#define GCC_UNIPHY0_PORT2_TX_CLK 207
+#define GCC_UNIPHY0_PORT3_RX_CLK 208
+#define GCC_UNIPHY0_PORT3_TX_CLK 209
+#define GCC_UNIPHY0_PORT4_RX_CLK 210
+#define GCC_UNIPHY0_PORT4_TX_CLK 211
+#define GCC_UNIPHY0_PORT5_RX_CLK 212
+#define GCC_UNIPHY0_PORT5_TX_CLK 213
+#define GCC_UNIPHY1_PORT5_RX_CLK 214
+#define GCC_UNIPHY1_PORT5_TX_CLK 215
+#define GCC_UNIPHY2_PORT6_RX_CLK 216
+#define GCC_UNIPHY2_PORT6_TX_CLK 217
+#define GCC_CRYPTO_AHB_CLK 218
+#define GCC_CRYPTO_AXI_CLK 219
+#define GCC_CRYPTO_CLK 220
+#define GCC_GP1_CLK 221
+#define GCC_GP2_CLK 222
+#define GCC_GP3_CLK 223
#define GCC_BLSP1_BCR 0
#define GCC_BLSP1_QUP1_BCR 1
@@ -148,5 +328,47 @@
#define GCC_APC0_VOLTAGE_DROOP_DETECTOR_BCR 86
#define GCC_APC1_VOLTAGE_DROOP_DETECTOR_BCR 87
#define GCC_SMMU_CATS_BCR 88
+#define GCC_UBI0_AXI_ARES 89
+#define GCC_UBI0_AHB_ARES 90
+#define GCC_UBI0_NC_AXI_ARES 91
+#define GCC_UBI0_DBG_ARES 92
+#define GCC_UBI0_CORE_CLAMP_ENABLE 93
+#define GCC_UBI0_CLKRST_CLAMP_ENABLE 94
+#define GCC_UBI1_AXI_ARES 95
+#define GCC_UBI1_AHB_ARES 96
+#define GCC_UBI1_NC_AXI_ARES 97
+#define GCC_UBI1_DBG_ARES 98
+#define GCC_UBI1_CORE_CLAMP_ENABLE 99
+#define GCC_UBI1_CLKRST_CLAMP_ENABLE 100
+#define GCC_NSS_CFG_ARES 101
+#define GCC_NSS_IMEM_ARES 102
+#define GCC_NSS_NOC_ARES 103
+#define GCC_NSS_CRYPTO_ARES 104
+#define GCC_NSS_CSR_ARES 105
+#define GCC_NSS_CE_APB_ARES 106
+#define GCC_NSS_CE_AXI_ARES 107
+#define GCC_NSSNOC_CE_APB_ARES 108
+#define GCC_NSSNOC_CE_AXI_ARES 109
+#define GCC_NSSNOC_UBI0_AHB_ARES 110
+#define GCC_NSSNOC_UBI1_AHB_ARES 111
+#define GCC_NSSNOC_SNOC_ARES 112
+#define GCC_NSSNOC_CRYPTO_ARES 113
+#define GCC_NSSNOC_ATB_ARES 114
+#define GCC_NSSNOC_QOSGEN_REF_ARES 115
+#define GCC_NSSNOC_TIMEOUT_REF_ARES 116
+#define GCC_PCIE0_PIPE_ARES 117
+#define GCC_PCIE0_SLEEP_ARES 118
+#define GCC_PCIE0_CORE_STICKY_ARES 119
+#define GCC_PCIE0_AXI_MASTER_ARES 120
+#define GCC_PCIE0_AXI_SLAVE_ARES 121
+#define GCC_PCIE0_AHB_ARES 122
+#define GCC_PCIE0_AXI_MASTER_STICKY_ARES 123
+#define GCC_PCIE1_PIPE_ARES 124
+#define GCC_PCIE1_SLEEP_ARES 125
+#define GCC_PCIE1_CORE_STICKY_ARES 126
+#define GCC_PCIE1_AXI_MASTER_ARES 127
+#define GCC_PCIE1_AXI_SLAVE_ARES 128
+#define GCC_PCIE1_AHB_ARES 129
+#define GCC_PCIE1_AXI_MASTER_STICKY_ARES 130
#endif
diff --git a/include/dt-bindings/clock/sprd,sc9860-clk.h b/include/dt-bindings/clock/sprd,sc9860-clk.h
new file mode 100644
index 000000000000..4cb202f090c2
--- /dev/null
+++ b/include/dt-bindings/clock/sprd,sc9860-clk.h
@@ -0,0 +1,404 @@
+// SPDX-License-Identifier: (GPL-2.0+ OR MIT)
+//
+// Spreadtrum SC9860 platform clocks
+//
+// Copyright (C) 2017, Spreadtrum Communications Inc.
+
+#ifndef _DT_BINDINGS_CLK_SC9860_H_
+#define _DT_BINDINGS_CLK_SC9860_H_
+
+#define CLK_FAC_4M 0
+#define CLK_FAC_2M 1
+#define CLK_FAC_1M 2
+#define CLK_FAC_250K 3
+#define CLK_FAC_RPLL0_26M 4
+#define CLK_FAC_RPLL1_26M 5
+#define CLK_FAC_RCO25M 6
+#define CLK_FAC_RCO4M 7
+#define CLK_FAC_RCO2M 8
+#define CLK_FAC_3K2 9
+#define CLK_FAC_1K 10
+#define CLK_MPLL0_GATE 11
+#define CLK_MPLL1_GATE 12
+#define CLK_DPLL0_GATE 13
+#define CLK_DPLL1_GATE 14
+#define CLK_LTEPLL0_GATE 15
+#define CLK_TWPLL_GATE 16
+#define CLK_LTEPLL1_GATE 17
+#define CLK_RPLL0_GATE 18
+#define CLK_RPLL1_GATE 19
+#define CLK_CPPLL_GATE 20
+#define CLK_GPLL_GATE 21
+#define CLK_PMU_GATE_NUM (CLK_GPLL_GATE + 1)
+
+#define CLK_MPLL0 0
+#define CLK_MPLL1 1
+#define CLK_DPLL0 2
+#define CLK_DPLL1 3
+#define CLK_RPLL0 4
+#define CLK_RPLL1 5
+#define CLK_TWPLL 6
+#define CLK_LTEPLL0 7
+#define CLK_LTEPLL1 8
+#define CLK_GPLL 9
+#define CLK_CPPLL 10
+#define CLK_GPLL_42M5 11
+#define CLK_TWPLL_768M 12
+#define CLK_TWPLL_384M 13
+#define CLK_TWPLL_192M 14
+#define CLK_TWPLL_96M 15
+#define CLK_TWPLL_48M 16
+#define CLK_TWPLL_24M 17
+#define CLK_TWPLL_12M 18
+#define CLK_TWPLL_512M 19
+#define CLK_TWPLL_256M 20
+#define CLK_TWPLL_128M 21
+#define CLK_TWPLL_64M 22
+#define CLK_TWPLL_307M2 23
+#define CLK_TWPLL_153M6 24
+#define CLK_TWPLL_76M8 25
+#define CLK_TWPLL_51M2 26
+#define CLK_TWPLL_38M4 27
+#define CLK_TWPLL_19M2 28
+#define CLK_L0_614M4 29
+#define CLK_L0_409M6 30
+#define CLK_L0_38M 31
+#define CLK_L1_38M 32
+#define CLK_RPLL0_192M 33
+#define CLK_RPLL0_96M 34
+#define CLK_RPLL0_48M 35
+#define CLK_RPLL1_468M 36
+#define CLK_RPLL1_192M 37
+#define CLK_RPLL1_96M 38
+#define CLK_RPLL1_64M 39
+#define CLK_RPLL1_48M 40
+#define CLK_DPLL0_50M 41
+#define CLK_DPLL1_50M 42
+#define CLK_CPPLL_50M 43
+#define CLK_M0_39M 44
+#define CLK_M1_63M 45
+#define CLK_PLL_NUM (CLK_M1_63M + 1)
+
+
+#define CLK_AP_APB 0
+#define CLK_AP_USB3 1
+#define CLK_UART0 2
+#define CLK_UART1 3
+#define CLK_UART2 4
+#define CLK_UART3 5
+#define CLK_UART4 6
+#define CLK_I2C0 7
+#define CLK_I2C1 8
+#define CLK_I2C2 9
+#define CLK_I2C3 10
+#define CLK_I2C4 11
+#define CLK_I2C5 12
+#define CLK_SPI0 13
+#define CLK_SPI1 14
+#define CLK_SPI2 15
+#define CLK_SPI3 16
+#define CLK_IIS0 17
+#define CLK_IIS1 18
+#define CLK_IIS2 19
+#define CLK_IIS3 20
+#define CLK_AP_CLK_NUM (CLK_IIS3 + 1)
+
+#define CLK_AON_APB 0
+#define CLK_AUX0 1
+#define CLK_AUX1 2
+#define CLK_AUX2 3
+#define CLK_PROBE 4
+#define CLK_SP_AHB 5
+#define CLK_CCI 6
+#define CLK_GIC 7
+#define CLK_CSSYS 8
+#define CLK_SDIO0_2X 9
+#define CLK_SDIO1_2X 10
+#define CLK_SDIO2_2X 11
+#define CLK_EMMC_2X 12
+#define CLK_SDIO0_1X 13
+#define CLK_SDIO1_1X 14
+#define CLK_SDIO2_1X 15
+#define CLK_EMMC_1X 16
+#define CLK_ADI 17
+#define CLK_PWM0 18
+#define CLK_PWM1 19
+#define CLK_PWM2 20
+#define CLK_PWM3 21
+#define CLK_EFUSE 22
+#define CLK_CM3_UART0 23
+#define CLK_CM3_UART1 24
+#define CLK_THM 25
+#define CLK_CM3_I2C0 26
+#define CLK_CM3_I2C1 27
+#define CLK_CM4_SPI 28
+#define CLK_AON_I2C 29
+#define CLK_AVS 30
+#define CLK_CA53_DAP 31
+#define CLK_CA53_TS 32
+#define CLK_DJTAG_TCK 33
+#define CLK_PMU 34
+#define CLK_PMU_26M 35
+#define CLK_DEBOUNCE 36
+#define CLK_OTG2_REF 37
+#define CLK_USB3_REF 38
+#define CLK_AP_AXI 39
+#define CLK_AON_PREDIV_NUM (CLK_AP_AXI + 1)
+
+#define CLK_USB3_EB 0
+#define CLK_USB3_SUSPEND_EB 1
+#define CLK_USB3_REF_EB 2
+#define CLK_DMA_EB 3
+#define CLK_SDIO0_EB 4
+#define CLK_SDIO1_EB 5
+#define CLK_SDIO2_EB 6
+#define CLK_EMMC_EB 7
+#define CLK_ROM_EB 8
+#define CLK_BUSMON_EB 9
+#define CLK_CC63S_EB 10
+#define CLK_CC63P_EB 11
+#define CLK_CE0_EB 12
+#define CLK_CE1_EB 13
+#define CLK_APAHB_GATE_NUM (CLK_CE1_EB + 1)
+
+#define CLK_AVS_LIT_EB 0
+#define CLK_AVS_BIG_EB 1
+#define CLK_AP_INTC5_EB 2
+#define CLK_GPIO_EB 3
+#define CLK_PWM0_EB 4
+#define CLK_PWM1_EB 5
+#define CLK_PWM2_EB 6
+#define CLK_PWM3_EB 7
+#define CLK_KPD_EB 8
+#define CLK_AON_SYS_EB 9
+#define CLK_AP_SYS_EB 10
+#define CLK_AON_TMR_EB 11
+#define CLK_AP_TMR0_EB 12
+#define CLK_EFUSE_EB 13
+#define CLK_EIC_EB 14
+#define CLK_PUB1_REG_EB 15
+#define CLK_ADI_EB 16
+#define CLK_AP_INTC0_EB 17
+#define CLK_AP_INTC1_EB 18
+#define CLK_AP_INTC2_EB 19
+#define CLK_AP_INTC3_EB 20
+#define CLK_AP_INTC4_EB 21
+#define CLK_SPLK_EB 22
+#define CLK_MSPI_EB 23
+#define CLK_PUB0_REG_EB 24
+#define CLK_PIN_EB 25
+#define CLK_AON_CKG_EB 26
+#define CLK_GPU_EB 27
+#define CLK_APCPU_TS0_EB 28
+#define CLK_APCPU_TS1_EB 29
+#define CLK_DAP_EB 30
+#define CLK_I2C_EB 31
+#define CLK_PMU_EB 32
+#define CLK_THM_EB 33
+#define CLK_AUX0_EB 34
+#define CLK_AUX1_EB 35
+#define CLK_AUX2_EB 36
+#define CLK_PROBE_EB 37
+#define CLK_GPU0_AVS_EB 38
+#define CLK_GPU1_AVS_EB 39
+#define CLK_APCPU_WDG_EB 40
+#define CLK_AP_TMR1_EB 41
+#define CLK_AP_TMR2_EB 42
+#define CLK_DISP_EMC_EB 43
+#define CLK_ZIP_EMC_EB 44
+#define CLK_GSP_EMC_EB 45
+#define CLK_OSC_AON_EB 46
+#define CLK_LVDS_TRX_EB 47
+#define CLK_LVDS_TCXO_EB 48
+#define CLK_MDAR_EB 49
+#define CLK_RTC4M0_CAL_EB 50
+#define CLK_RCT100M_CAL_EB 51
+#define CLK_DJTAG_EB 52
+#define CLK_MBOX_EB 53
+#define CLK_AON_DMA_EB 54
+#define CLK_DBG_EMC_EB 55
+#define CLK_LVDS_PLL_DIV_EN 56
+#define CLK_DEF_EB 57
+#define CLK_AON_APB_RSV0 58
+#define CLK_ORP_JTAG_EB 59
+#define CLK_VSP_EB 60
+#define CLK_CAM_EB 61
+#define CLK_DISP_EB 62
+#define CLK_DBG_AXI_IF_EB 63
+#define CLK_SDIO0_2X_EN 64
+#define CLK_SDIO1_2X_EN 65
+#define CLK_SDIO2_2X_EN 66
+#define CLK_EMMC_2X_EN 67
+#define CLK_AON_GATE_NUM (CLK_EMMC_2X_EN + 1)
+
+#define CLK_LIT_MCU 0
+#define CLK_BIG_MCU 1
+#define CLK_AONSECURE_NUM (CLK_BIG_MCU + 1)
+
+#define CLK_AGCP_IIS0_EB 0
+#define CLK_AGCP_IIS1_EB 1
+#define CLK_AGCP_IIS2_EB 2
+#define CLK_AGCP_IIS3_EB 3
+#define CLK_AGCP_UART_EB 4
+#define CLK_AGCP_DMACP_EB 5
+#define CLK_AGCP_DMAAP_EB 6
+#define CLK_AGCP_ARC48K_EB 7
+#define CLK_AGCP_SRC44P1K_EB 8
+#define CLK_AGCP_MCDT_EB 9
+#define CLK_AGCP_VBCIFD_EB 10
+#define CLK_AGCP_VBC_EB 11
+#define CLK_AGCP_SPINLOCK_EB 12
+#define CLK_AGCP_ICU_EB 13
+#define CLK_AGCP_AP_ASHB_EB 14
+#define CLK_AGCP_CP_ASHB_EB 15
+#define CLK_AGCP_AUD_EB 16
+#define CLK_AGCP_AUDIF_EB 17
+#define CLK_AGCP_GATE_NUM (CLK_AGCP_AUDIF_EB + 1)
+
+#define CLK_GPU 0
+#define CLK_GPU_NUM (CLK_GPU + 1)
+
+#define CLK_AHB_VSP 0
+#define CLK_VSP 1
+#define CLK_VSP_ENC 2
+#define CLK_VPP 3
+#define CLK_VSP_26M 4
+#define CLK_VSP_NUM (CLK_VSP_26M + 1)
+
+#define CLK_VSP_DEC_EB 0
+#define CLK_VSP_CKG_EB 1
+#define CLK_VSP_MMU_EB 2
+#define CLK_VSP_ENC_EB 3
+#define CLK_VPP_EB 4
+#define CLK_VSP_26M_EB 5
+#define CLK_VSP_AXI_GATE 6
+#define CLK_VSP_ENC_GATE 7
+#define CLK_VPP_AXI_GATE 8
+#define CLK_VSP_BM_GATE 9
+#define CLK_VSP_ENC_BM_GATE 10
+#define CLK_VPP_BM_GATE 11
+#define CLK_VSP_GATE_NUM (CLK_VPP_BM_GATE + 1)
+
+#define CLK_AHB_CAM 0
+#define CLK_SENSOR0 1
+#define CLK_SENSOR1 2
+#define CLK_SENSOR2 3
+#define CLK_MIPI_CSI0_EB 4
+#define CLK_MIPI_CSI1_EB 5
+#define CLK_CAM_NUM (CLK_MIPI_CSI1_EB + 1)
+
+#define CLK_DCAM0_EB 0
+#define CLK_DCAM1_EB 1
+#define CLK_ISP0_EB 2
+#define CLK_CSI0_EB 3
+#define CLK_CSI1_EB 4
+#define CLK_JPG0_EB 5
+#define CLK_JPG1_EB 6
+#define CLK_CAM_CKG_EB 7
+#define CLK_CAM_MMU_EB 8
+#define CLK_ISP1_EB 9
+#define CLK_CPP_EB 10
+#define CLK_MMU_PF_EB 11
+#define CLK_ISP2_EB 12
+#define CLK_DCAM2ISP_IF_EB 13
+#define CLK_ISP2DCAM_IF_EB 14
+#define CLK_ISP_LCLK_EB 15
+#define CLK_ISP_ICLK_EB 16
+#define CLK_ISP_MCLK_EB 17
+#define CLK_ISP_PCLK_EB 18
+#define CLK_ISP_ISP2DCAM_EB 19
+#define CLK_DCAM0_IF_EB 20
+#define CLK_CLK26M_IF_EB 21
+#define CLK_CPHY0_GATE 22
+#define CLK_MIPI_CSI0_GATE 23
+#define CLK_CPHY1_GATE 24
+#define CLK_MIPI_CSI1 25
+#define CLK_DCAM0_AXI_GATE 26
+#define CLK_DCAM1_AXI_GATE 27
+#define CLK_SENSOR0_GATE 28
+#define CLK_SENSOR1_GATE 29
+#define CLK_JPG0_AXI_GATE 30
+#define CLK_GPG1_AXI_GATE 31
+#define CLK_ISP0_AXI_GATE 32
+#define CLK_ISP1_AXI_GATE 33
+#define CLK_ISP2_AXI_GATE 34
+#define CLK_CPP_AXI_GATE 35
+#define CLK_D0_IF_AXI_GATE 36
+#define CLK_D2I_IF_AXI_GATE 37
+#define CLK_I2D_IF_AXI_GATE 38
+#define CLK_SPARE_AXI_GATE 39
+#define CLK_SENSOR2_GATE 40
+#define CLK_D0IF_IN_D_EN 41
+#define CLK_D1IF_IN_D_EN 42
+#define CLK_D0IF_IN_D2I_EN 43
+#define CLK_D1IF_IN_D2I_EN 44
+#define CLK_IA_IN_D2I_EN 45
+#define CLK_IB_IN_D2I_EN 46
+#define CLK_IC_IN_D2I_EN 47
+#define CLK_IA_IN_I_EN 48
+#define CLK_IB_IN_I_EN 49
+#define CLK_IC_IN_I_EN 50
+#define CLK_CAM_GATE_NUM (CLK_IC_IN_I_EN + 1)
+
+#define CLK_AHB_DISP 0
+#define CLK_DISPC0_DPI 1
+#define CLK_DISPC1_DPI 2
+#define CLK_DISP_NUM (CLK_DISPC1_DPI + 1)
+
+#define CLK_DISPC0_EB 0
+#define CLK_DISPC1_EB 1
+#define CLK_DISPC_MMU_EB 2
+#define CLK_GSP0_EB 3
+#define CLK_GSP1_EB 4
+#define CLK_GSP0_MMU_EB 5
+#define CLK_GSP1_MMU_EB 6
+#define CLK_DSI0_EB 7
+#define CLK_DSI1_EB 8
+#define CLK_DISP_CKG_EB 9
+#define CLK_DISP_GPU_EB 10
+#define CLK_GPU_MTX_EB 11
+#define CLK_GSP_MTX_EB 12
+#define CLK_TMC_MTX_EB 13
+#define CLK_DISPC_MTX_EB 14
+#define CLK_DPHY0_GATE 15
+#define CLK_DPHY1_GATE 16
+#define CLK_GSP0_A_GATE 17
+#define CLK_GSP1_A_GATE 18
+#define CLK_GSP0_F_GATE 19
+#define CLK_GSP1_F_GATE 20
+#define CLK_D_MTX_F_GATE 21
+#define CLK_D_MTX_A_GATE 22
+#define CLK_D_NOC_F_GATE 23
+#define CLK_D_NOC_A_GATE 24
+#define CLK_GSP_MTX_F_GATE 25
+#define CLK_GSP_MTX_A_GATE 26
+#define CLK_GSP_NOC_F_GATE 27
+#define CLK_GSP_NOC_A_GATE 28
+#define CLK_DISPM0IDLE_GATE 29
+#define CLK_GSPM0IDLE_GATE 30
+#define CLK_DISP_GATE_NUM (CLK_GSPM0IDLE_GATE + 1)
+
+#define CLK_SIM0_EB 0
+#define CLK_IIS0_EB 1
+#define CLK_IIS1_EB 2
+#define CLK_IIS2_EB 3
+#define CLK_IIS3_EB 4
+#define CLK_SPI0_EB 5
+#define CLK_SPI1_EB 6
+#define CLK_SPI2_EB 7
+#define CLK_I2C0_EB 8
+#define CLK_I2C1_EB 9
+#define CLK_I2C2_EB 10
+#define CLK_I2C3_EB 11
+#define CLK_I2C4_EB 12
+#define CLK_I2C5_EB 13
+#define CLK_UART0_EB 14
+#define CLK_UART1_EB 15
+#define CLK_UART2_EB 16
+#define CLK_UART3_EB 17
+#define CLK_UART4_EB 18
+#define CLK_AP_CKG_EB 19
+#define CLK_SPI3_EB 20
+#define CLK_APAPB_GATE_NUM (CLK_SPI3_EB + 1)
+
+#endif /* _DT_BINDINGS_CLK_SC9860_H_ */
diff --git a/include/dt-bindings/clock/tegra194-clock.h b/include/dt-bindings/clock/tegra194-clock.h
new file mode 100644
index 000000000000..a2ff66342d69
--- /dev/null
+++ b/include/dt-bindings/clock/tegra194-clock.h
@@ -0,0 +1,321 @@
+/* SPDX-License-Identifier: GPL-2.0 */
+/* Copyright (c) 2018, NVIDIA CORPORATION. All rights reserved. */
+
+#ifndef __ABI_MACH_T194_CLOCK_H
+#define __ABI_MACH_T194_CLOCK_H
+
+#define TEGRA194_CLK_ACTMON 1
+#define TEGRA194_CLK_ADSP 2
+#define TEGRA194_CLK_ADSPNEON 3
+#define TEGRA194_CLK_AHUB 4
+#define TEGRA194_CLK_APB2APE 5
+#define TEGRA194_CLK_APE 6
+#define TEGRA194_CLK_AUD_MCLK 7
+#define TEGRA194_CLK_AXI_CBB 8
+#define TEGRA194_CLK_CAN1 9
+#define TEGRA194_CLK_CAN1_HOST 10
+#define TEGRA194_CLK_CAN2 11
+#define TEGRA194_CLK_CAN2_HOST 12
+#define TEGRA194_CLK_CEC 13
+#define TEGRA194_CLK_CLK_M 14
+#define TEGRA194_CLK_DMIC1 15
+#define TEGRA194_CLK_DMIC2 16
+#define TEGRA194_CLK_DMIC3 17
+#define TEGRA194_CLK_DMIC4 18
+#define TEGRA194_CLK_DPAUX 19
+#define TEGRA194_CLK_DPAUX1 20
+#define TEGRA194_CLK_ACLK 21
+#define TEGRA194_CLK_MSS_ENCRYPT 22
+#define TEGRA194_CLK_EQOS_RX_INPUT 23
+#define TEGRA194_CLK_IQC2 24
+#define TEGRA194_CLK_AON_APB 25
+#define TEGRA194_CLK_AON_NIC 26
+#define TEGRA194_CLK_AON_CPU_NIC 27
+#define TEGRA194_CLK_PLLA1 28
+#define TEGRA194_CLK_DSPK1 29
+#define TEGRA194_CLK_DSPK2 30
+#define TEGRA194_CLK_EMC 31
+#define TEGRA194_CLK_EQOS_AXI 32
+#define TEGRA194_CLK_EQOS_PTP_REF 33
+#define TEGRA194_CLK_EQOS_RX 34
+#define TEGRA194_CLK_EQOS_TX 35
+#define TEGRA194_CLK_EXTPERIPH1 36
+#define TEGRA194_CLK_EXTPERIPH2 37
+#define TEGRA194_CLK_EXTPERIPH3 38
+#define TEGRA194_CLK_EXTPERIPH4 39
+#define TEGRA194_CLK_FUSE 40
+#define TEGRA194_CLK_GPCCLK 41
+#define TEGRA194_CLK_GPU_PWR 42
+#define TEGRA194_CLK_HDA 43
+#define TEGRA194_CLK_HDA2CODEC_2X 44
+#define TEGRA194_CLK_HDA2HDMICODEC 45
+#define TEGRA194_CLK_HOST1X 46
+#define TEGRA194_CLK_HSIC_TRK 47
+#define TEGRA194_CLK_I2C1 48
+#define TEGRA194_CLK_I2C2 49
+#define TEGRA194_CLK_I2C3 50
+#define TEGRA194_CLK_I2C4 51
+#define TEGRA194_CLK_I2C6 52
+#define TEGRA194_CLK_I2C7 53
+#define TEGRA194_CLK_I2C8 54
+#define TEGRA194_CLK_I2C9 55
+#define TEGRA194_CLK_I2S1 56
+#define TEGRA194_CLK_I2S1_SYNC_INPUT 57
+#define TEGRA194_CLK_I2S2 58
+#define TEGRA194_CLK_I2S2_SYNC_INPUT 59
+#define TEGRA194_CLK_I2S3 60
+#define TEGRA194_CLK_I2S3_SYNC_INPUT 61
+#define TEGRA194_CLK_I2S4 62
+#define TEGRA194_CLK_I2S4_SYNC_INPUT 63
+#define TEGRA194_CLK_I2S5 64
+#define TEGRA194_CLK_I2S5_SYNC_INPUT 65
+#define TEGRA194_CLK_I2S6 66
+#define TEGRA194_CLK_I2S6_SYNC_INPUT 67
+#define TEGRA194_CLK_IQC1 68
+#define TEGRA194_CLK_ISP 69
+#define TEGRA194_CLK_KFUSE 70
+#define TEGRA194_CLK_MAUD 71
+#define TEGRA194_CLK_MIPI_CAL 72
+#define TEGRA194_CLK_MPHY_CORE_PLL_FIXED 73
+#define TEGRA194_CLK_MPHY_L0_RX_ANA 74
+#define TEGRA194_CLK_MPHY_L0_RX_LS_BIT 75
+#define TEGRA194_CLK_MPHY_L0_RX_SYMB 76
+#define TEGRA194_CLK_MPHY_L0_TX_LS_3XBIT 77
+#define TEGRA194_CLK_MPHY_L0_TX_SYMB 78
+#define TEGRA194_CLK_MPHY_L1_RX_ANA 79
+#define TEGRA194_CLK_MPHY_TX_1MHZ_REF 80
+#define TEGRA194_CLK_NVCSI 81
+#define TEGRA194_CLK_NVCSILP 82
+#define TEGRA194_CLK_NVDEC 83
+#define TEGRA194_CLK_NVDISPLAYHUB 84
+#define TEGRA194_CLK_NVDISPLAY_DISP 85
+#define TEGRA194_CLK_NVDISPLAY_P0 86
+#define TEGRA194_CLK_NVDISPLAY_P1 87
+#define TEGRA194_CLK_NVDISPLAY_P2 88
+#define TEGRA194_CLK_NVENC 89
+#define TEGRA194_CLK_NVJPG 90
+#define TEGRA194_CLK_OSC 91
+#define TEGRA194_CLK_AON_TOUCH 92
+#define TEGRA194_CLK_PLLA 93
+#define TEGRA194_CLK_PLLAON 94
+#define TEGRA194_CLK_PLLD 95
+#define TEGRA194_CLK_PLLD2 96
+#define TEGRA194_CLK_PLLD3 97
+#define TEGRA194_CLK_PLLDP 98
+#define TEGRA194_CLK_PLLD4 99
+#define TEGRA194_CLK_PLLE 100
+#define TEGRA194_CLK_PLLP 101
+#define TEGRA194_CLK_PLLP_OUT0 102
+#define TEGRA194_CLK_UTMIPLL 103
+#define TEGRA194_CLK_PLLA_OUT0 104
+#define TEGRA194_CLK_PWM1 105
+#define TEGRA194_CLK_PWM2 106
+#define TEGRA194_CLK_PWM3 107
+#define TEGRA194_CLK_PWM4 108
+#define TEGRA194_CLK_PWM5 109
+#define TEGRA194_CLK_PWM6 110
+#define TEGRA194_CLK_PWM7 111
+#define TEGRA194_CLK_PWM8 112
+#define TEGRA194_CLK_RCE_CPU_NIC 113
+#define TEGRA194_CLK_RCE_NIC 114
+#define TEGRA194_CLK_SATA 115
+#define TEGRA194_CLK_SATA_OOB 116
+#define TEGRA194_CLK_AON_I2C_SLOW 117
+#define TEGRA194_CLK_SCE_CPU_NIC 118
+#define TEGRA194_CLK_SCE_NIC 119
+#define TEGRA194_CLK_SDMMC1 120
+#define TEGRA194_CLK_UPHY_PLL3 121
+#define TEGRA194_CLK_SDMMC3 122
+#define TEGRA194_CLK_SDMMC4 123
+#define TEGRA194_CLK_SE 124
+#define TEGRA194_CLK_SOR0_OUT 125
+#define TEGRA194_CLK_SOR0_REF 126
+#define TEGRA194_CLK_SOR0_PAD_CLKOUT 127
+#define TEGRA194_CLK_SOR1_OUT 128
+#define TEGRA194_CLK_SOR1_REF 129
+#define TEGRA194_CLK_SOR1_PAD_CLKOUT 130
+#define TEGRA194_CLK_SOR_SAFE 131
+#define TEGRA194_CLK_IQC1_IN 132
+#define TEGRA194_CLK_IQC2_IN 133
+#define TEGRA194_CLK_DMIC5 134
+#define TEGRA194_CLK_SPI1 135
+#define TEGRA194_CLK_SPI2 136
+#define TEGRA194_CLK_SPI3 137
+#define TEGRA194_CLK_I2C_SLOW 138
+#define TEGRA194_CLK_SYNC_DMIC1 139
+#define TEGRA194_CLK_SYNC_DMIC2 140
+#define TEGRA194_CLK_SYNC_DMIC3 141
+#define TEGRA194_CLK_SYNC_DMIC4 142
+#define TEGRA194_CLK_SYNC_DSPK1 143
+#define TEGRA194_CLK_SYNC_DSPK2 144
+#define TEGRA194_CLK_SYNC_I2S1 145
+#define TEGRA194_CLK_SYNC_I2S2 146
+#define TEGRA194_CLK_SYNC_I2S3 147
+#define TEGRA194_CLK_SYNC_I2S4 148
+#define TEGRA194_CLK_SYNC_I2S5 149
+#define TEGRA194_CLK_SYNC_I2S6 150
+#define TEGRA194_CLK_MPHY_FORCE_LS_MODE 151
+#define TEGRA194_CLK_TACH 152
+#define TEGRA194_CLK_TSEC 153
+#define TEGRA194_CLK_TSECB 154
+#define TEGRA194_CLK_UARTA 155
+#define TEGRA194_CLK_UARTB 156
+#define TEGRA194_CLK_UARTC 157
+#define TEGRA194_CLK_UARTD 158
+#define TEGRA194_CLK_UARTE 159
+#define TEGRA194_CLK_UARTF 160
+#define TEGRA194_CLK_UARTG 161
+#define TEGRA194_CLK_UART_FST_MIPI_CAL 162
+#define TEGRA194_CLK_UFSDEV_REF 163
+#define TEGRA194_CLK_UFSHC 164
+#define TEGRA194_CLK_USB2_TRK 165
+#define TEGRA194_CLK_VI 166
+#define TEGRA194_CLK_VIC 167
+#define TEGRA194_CLK_PVA0_AXI 168
+#define TEGRA194_CLK_PVA0_VPS0 169
+#define TEGRA194_CLK_PVA0_VPS1 170
+#define TEGRA194_CLK_PVA1_AXI 171
+#define TEGRA194_CLK_PVA1_VPS0 172
+#define TEGRA194_CLK_PVA1_VPS1 173
+#define TEGRA194_CLK_DLA0_FALCON 174
+#define TEGRA194_CLK_DLA0_CORE 175
+#define TEGRA194_CLK_DLA1_FALCON 176
+#define TEGRA194_CLK_DLA1_CORE 177
+#define TEGRA194_CLK_SOR2_OUT 178
+#define TEGRA194_CLK_SOR2_REF 179
+#define TEGRA194_CLK_SOR2_PAD_CLKOUT 180
+#define TEGRA194_CLK_SOR3_OUT 181
+#define TEGRA194_CLK_SOR3_REF 182
+#define TEGRA194_CLK_SOR3_PAD_CLKOUT 183
+#define TEGRA194_CLK_NVDISPLAY_P3 184
+#define TEGRA194_CLK_DPAUX2 185
+#define TEGRA194_CLK_DPAUX3 186
+#define TEGRA194_CLK_NVDEC1 187
+#define TEGRA194_CLK_NVENC1 188
+#define TEGRA194_CLK_SE_FREE 189
+#define TEGRA194_CLK_UARTH 190
+#define TEGRA194_CLK_FUSE_SERIAL 191
+#define TEGRA194_CLK_QSPI0 192
+#define TEGRA194_CLK_QSPI1 193
+#define TEGRA194_CLK_QSPI0_PM 194
+#define TEGRA194_CLK_QSPI1_PM 195
+#define TEGRA194_CLK_VI_CONST 196
+#define TEGRA194_CLK_NAFLL_BPMP 197
+#define TEGRA194_CLK_NAFLL_SCE 198
+#define TEGRA194_CLK_NAFLL_NVDEC 199
+#define TEGRA194_CLK_NAFLL_NVJPG 200
+#define TEGRA194_CLK_NAFLL_TSEC 201
+#define TEGRA194_CLK_NAFLL_TSECB 202
+#define TEGRA194_CLK_NAFLL_VI 203
+#define TEGRA194_CLK_NAFLL_SE 204
+#define TEGRA194_CLK_NAFLL_NVENC 205
+#define TEGRA194_CLK_NAFLL_ISP 206
+#define TEGRA194_CLK_NAFLL_VIC 207
+#define TEGRA194_CLK_NAFLL_NVDISPLAYHUB 208
+#define TEGRA194_CLK_NAFLL_AXICBB 209
+#define TEGRA194_CLK_NAFLL_DLA 210
+#define TEGRA194_CLK_NAFLL_PVA_CORE 211
+#define TEGRA194_CLK_NAFLL_PVA_VPS 212
+#define TEGRA194_CLK_NAFLL_CVNAS 213
+#define TEGRA194_CLK_NAFLL_RCE 214
+#define TEGRA194_CLK_NAFLL_NVENC1 215
+#define TEGRA194_CLK_NAFLL_DLA_FALCON 216
+#define TEGRA194_CLK_NAFLL_NVDEC1 217
+#define TEGRA194_CLK_NAFLL_GPU 218
+#define TEGRA194_CLK_SDMMC_LEGACY_TM 219
+#define TEGRA194_CLK_PEX0_CORE_0 220
+#define TEGRA194_CLK_PEX0_CORE_1 221
+#define TEGRA194_CLK_PEX0_CORE_2 222
+#define TEGRA194_CLK_PEX0_CORE_3 223
+#define TEGRA194_CLK_PEX0_CORE_4 224
+#define TEGRA194_CLK_PEX1_CORE_5 225
+#define TEGRA194_CLK_PEX_REF1 226
+#define TEGRA194_CLK_PEX_REF2 227
+#define TEGRA194_CLK_CSI_A 229
+#define TEGRA194_CLK_CSI_B 230
+#define TEGRA194_CLK_CSI_C 231
+#define TEGRA194_CLK_CSI_D 232
+#define TEGRA194_CLK_CSI_E 233
+#define TEGRA194_CLK_CSI_F 234
+#define TEGRA194_CLK_CSI_G 235
+#define TEGRA194_CLK_CSI_H 236
+#define TEGRA194_CLK_PLLC4 237
+#define TEGRA194_CLK_PLLC4_OUT 238
+#define TEGRA194_CLK_PLLC4_OUT1 239
+#define TEGRA194_CLK_PLLC4_OUT2 240
+#define TEGRA194_CLK_PLLC4_MUXED 241
+#define TEGRA194_CLK_PLLC4_VCO_DIV2 242
+#define TEGRA194_CLK_CSI_A_PAD 244
+#define TEGRA194_CLK_CSI_B_PAD 245
+#define TEGRA194_CLK_CSI_C_PAD 246
+#define TEGRA194_CLK_CSI_D_PAD 247
+#define TEGRA194_CLK_CSI_E_PAD 248
+#define TEGRA194_CLK_CSI_F_PAD 249
+#define TEGRA194_CLK_CSI_G_PAD 250
+#define TEGRA194_CLK_CSI_H_PAD 251
+#define TEGRA194_CLK_PEX_SATA_USB_RX_BYP 254
+#define TEGRA194_CLK_PEX_USB_PAD_PLL0_MGMT 255
+#define TEGRA194_CLK_PEX_USB_PAD_PLL1_MGMT 256
+#define TEGRA194_CLK_PEX_USB_PAD_PLL2_MGMT 257
+#define TEGRA194_CLK_PEX_USB_PAD_PLL3_MGMT 258
+#define TEGRA194_CLK_XUSB_CORE_DEV 265
+#define TEGRA194_CLK_XUSB_CORE_MUX 266
+#define TEGRA194_CLK_XUSB_CORE_HOST 267
+#define TEGRA194_CLK_XUSB_CORE_SS 268
+#define TEGRA194_CLK_XUSB_FALCON 269
+#define TEGRA194_CLK_XUSB_FALCON_HOST 270
+#define TEGRA194_CLK_XUSB_FALCON_SS 271
+#define TEGRA194_CLK_XUSB_FS 272
+#define TEGRA194_CLK_XUSB_FS_HOST 273
+#define TEGRA194_CLK_XUSB_FS_DEV 274
+#define TEGRA194_CLK_XUSB_SS 275
+#define TEGRA194_CLK_XUSB_SS_DEV 276
+#define TEGRA194_CLK_XUSB_SS_SUPERSPEED 277
+#define TEGRA194_CLK_PLLDISPHUB 278
+#define TEGRA194_CLK_PLLDISPHUB_DIV 279
+#define TEGRA194_CLK_NAFLL_CLUSTER0 280
+#define TEGRA194_CLK_NAFLL_CLUSTER1 281
+#define TEGRA194_CLK_NAFLL_CLUSTER2 282
+#define TEGRA194_CLK_NAFLL_CLUSTER3 283
+#define TEGRA194_CLK_CAN1_CORE 284
+#define TEGRA194_CLK_CAN2_CORE 285
+#define TEGRA194_CLK_PLLA1_OUT1 286
+#define TEGRA194_CLK_PLLREFE_VCOOUT 288
+#define TEGRA194_CLK_CLK_32K 289
+#define TEGRA194_CLK_SPDIFIN_SYNC_INPUT 290
+#define TEGRA194_CLK_UTMIPLL_CLKOUT48 291
+#define TEGRA194_CLK_UTMIPLL_CLKOUT480 292
+#define TEGRA194_CLK_CVNAS 293
+#define TEGRA194_CLK_PLLNVCSI 294
+#define TEGRA194_CLK_PVA0_CPU_AXI 295
+#define TEGRA194_CLK_PVA1_CPU_AXI 296
+#define TEGRA194_CLK_PVA0_VPS 297
+#define TEGRA194_CLK_PVA1_VPS 298
+#define TEGRA194_CLK_DLA0_FALCON_MUX 299
+#define TEGRA194_CLK_DLA1_FALCON_MUX 300
+#define TEGRA194_CLK_DLA0_CORE_MUX 301
+#define TEGRA194_CLK_DLA1_CORE_MUX 302
+#define TEGRA194_CLK_UTMIPLL_HPS 304
+#define TEGRA194_CLK_I2C5 305
+#define TEGRA194_CLK_I2C10 306
+#define TEGRA194_CLK_BPMP_CPU_NIC 307
+#define TEGRA194_CLK_BPMP_APB 308
+#define TEGRA194_CLK_TSC 309
+#define TEGRA194_CLK_EMCSA 310
+#define TEGRA194_CLK_EMCSB 311
+#define TEGRA194_CLK_EMCSC 312
+#define TEGRA194_CLK_EMCSD 313
+#define TEGRA194_CLK_PLLC 314
+#define TEGRA194_CLK_PLLC2 315
+#define TEGRA194_CLK_PLLC3 316
+#define TEGRA194_CLK_TSC_REF 317
+#define TEGRA194_CLK_FUSE_BURN 318
+#define TEGRA194_CLK_PEX0_CORE_0M 319
+#define TEGRA194_CLK_PEX0_CORE_1M 320
+#define TEGRA194_CLK_PEX0_CORE_2M 321
+#define TEGRA194_CLK_PEX0_CORE_3M 322
+#define TEGRA194_CLK_PEX0_CORE_4M 323
+#define TEGRA194_CLK_PEX1_CORE_5M 324
+#define TEGRA194_CLK_PLLE_HPS 326
+
+#endif
diff --git a/include/dt-bindings/gpio/aspeed-gpio.h b/include/dt-bindings/gpio/aspeed-gpio.h
new file mode 100644
index 000000000000..56fc4889b2c4
--- /dev/null
+++ b/include/dt-bindings/gpio/aspeed-gpio.h
@@ -0,0 +1,49 @@
+/* SPDX-License-Identifier: GPL-2.0+ */
+/*
+ * This header provides constants for binding aspeed,*-gpio.
+ *
+ * The first cell in Aspeed's GPIO specifier is the GPIO ID. The macros below
+ * provide names for this.
+ *
+ * The second cell contains standard flag values specified in gpio.h.
+ */
+
+#ifndef _DT_BINDINGS_GPIO_ASPEED_GPIO_H
+#define _DT_BINDINGS_GPIO_ASPEED_GPIO_H
+
+#include <dt-bindings/gpio/gpio.h>
+
+#define ASPEED_GPIO_PORT_A 0
+#define ASPEED_GPIO_PORT_B 1
+#define ASPEED_GPIO_PORT_C 2
+#define ASPEED_GPIO_PORT_D 3
+#define ASPEED_GPIO_PORT_E 4
+#define ASPEED_GPIO_PORT_F 5
+#define ASPEED_GPIO_PORT_G 6
+#define ASPEED_GPIO_PORT_H 7
+#define ASPEED_GPIO_PORT_I 8
+#define ASPEED_GPIO_PORT_J 9
+#define ASPEED_GPIO_PORT_K 10
+#define ASPEED_GPIO_PORT_L 11
+#define ASPEED_GPIO_PORT_M 12
+#define ASPEED_GPIO_PORT_N 13
+#define ASPEED_GPIO_PORT_O 14
+#define ASPEED_GPIO_PORT_P 15
+#define ASPEED_GPIO_PORT_Q 16
+#define ASPEED_GPIO_PORT_R 17
+#define ASPEED_GPIO_PORT_S 18
+#define ASPEED_GPIO_PORT_T 19
+#define ASPEED_GPIO_PORT_U 20
+#define ASPEED_GPIO_PORT_V 21
+#define ASPEED_GPIO_PORT_W 22
+#define ASPEED_GPIO_PORT_X 23
+#define ASPEED_GPIO_PORT_Y 24
+#define ASPEED_GPIO_PORT_Z 25
+#define ASPEED_GPIO_PORT_AA 26
+#define ASPEED_GPIO_PORT_AB 27
+#define ASPEED_GPIO_PORT_AC 28
+
+#define ASPEED_GPIO(port, offset) \
+ ((ASPEED_GPIO_PORT_##port * 8) + offset)
+
+#endif
diff --git a/include/dt-bindings/gpio/gpio.h b/include/dt-bindings/gpio/gpio.h
index dd549ff04295..2cc10ae4bbb7 100644
--- a/include/dt-bindings/gpio/gpio.h
+++ b/include/dt-bindings/gpio/gpio.h
@@ -29,8 +29,8 @@
#define GPIO_OPEN_DRAIN (GPIO_SINGLE_ENDED | GPIO_LINE_OPEN_DRAIN)
#define GPIO_OPEN_SOURCE (GPIO_SINGLE_ENDED | GPIO_LINE_OPEN_SOURCE)
-/* Bit 3 express GPIO suspend/resume persistence */
-#define GPIO_SLEEP_MAINTAIN_VALUE 0
-#define GPIO_SLEEP_MAY_LOSE_VALUE 8
+/* Bit 3 express GPIO suspend/resume and reset persistence */
+#define GPIO_PERSISTENT 0
+#define GPIO_TRANSITORY 8
#endif
diff --git a/include/dt-bindings/gpio/meson-axg-gpio.h b/include/dt-bindings/gpio/meson-axg-gpio.h
new file mode 100644
index 000000000000..25bb1fffa97a
--- /dev/null
+++ b/include/dt-bindings/gpio/meson-axg-gpio.h
@@ -0,0 +1,116 @@
+/*
+ * Copyright (c) 2017 Amlogic, Inc. All rights reserved.
+ * Author: Xingyu Chen <xingyu.chen@amlogic.com>
+ *
+ * SPDX-License-Identifier: GPL-2.0+
+ */
+
+#ifndef _DT_BINDINGS_MESON_AXG_GPIO_H
+#define _DT_BINDINGS_MESON_AXG_GPIO_H
+
+/* First GPIO chip */
+#define GPIOAO_0 0
+#define GPIOAO_1 1
+#define GPIOAO_2 2
+#define GPIOAO_3 3
+#define GPIOAO_4 4
+#define GPIOAO_5 5
+#define GPIOAO_6 6
+#define GPIOAO_7 7
+#define GPIOAO_8 8
+#define GPIOAO_9 9
+#define GPIOAO_10 10
+#define GPIOAO_11 11
+#define GPIOAO_12 12
+#define GPIOAO_13 13
+#define GPIO_TEST_N 14
+
+/* Second GPIO chip */
+#define GPIOZ_0 0
+#define GPIOZ_1 1
+#define GPIOZ_2 2
+#define GPIOZ_3 3
+#define GPIOZ_4 4
+#define GPIOZ_5 5
+#define GPIOZ_6 6
+#define GPIOZ_7 7
+#define GPIOZ_8 8
+#define GPIOZ_9 9
+#define GPIOZ_10 10
+#define BOOT_0 11
+#define BOOT_1 12
+#define BOOT_2 13
+#define BOOT_3 14
+#define BOOT_4 15
+#define BOOT_5 16
+#define BOOT_6 17
+#define BOOT_7 18
+#define BOOT_8 19
+#define BOOT_9 20
+#define BOOT_10 21
+#define BOOT_11 22
+#define BOOT_12 23
+#define BOOT_13 24
+#define BOOT_14 25
+#define GPIOA_0 26
+#define GPIOA_1 27
+#define GPIOA_2 28
+#define GPIOA_3 29
+#define GPIOA_4 30
+#define GPIOA_5 31
+#define GPIOA_6 32
+#define GPIOA_7 33
+#define GPIOA_8 34
+#define GPIOA_9 35
+#define GPIOA_10 36
+#define GPIOA_11 37
+#define GPIOA_12 38
+#define GPIOA_13 39
+#define GPIOA_14 40
+#define GPIOA_15 41
+#define GPIOA_16 42
+#define GPIOA_17 43
+#define GPIOA_18 44
+#define GPIOA_19 45
+#define GPIOA_20 46
+#define GPIOX_0 47
+#define GPIOX_1 48
+#define GPIOX_2 49
+#define GPIOX_3 50
+#define GPIOX_4 51
+#define GPIOX_5 52
+#define GPIOX_6 53
+#define GPIOX_7 54
+#define GPIOX_8 55
+#define GPIOX_9 56
+#define GPIOX_10 57
+#define GPIOX_11 58
+#define GPIOX_12 59
+#define GPIOX_13 60
+#define GPIOX_14 61
+#define GPIOX_15 62
+#define GPIOX_16 63
+#define GPIOX_17 64
+#define GPIOX_18 65
+#define GPIOX_19 66
+#define GPIOX_20 67
+#define GPIOX_21 68
+#define GPIOX_22 69
+#define GPIOY_0 70
+#define GPIOY_1 71
+#define GPIOY_2 72
+#define GPIOY_3 73
+#define GPIOY_4 74
+#define GPIOY_5 75
+#define GPIOY_6 76
+#define GPIOY_7 77
+#define GPIOY_8 78
+#define GPIOY_9 79
+#define GPIOY_10 80
+#define GPIOY_11 81
+#define GPIOY_12 82
+#define GPIOY_13 83
+#define GPIOY_14 84
+#define GPIOY_15 85
+
+#endif /* _DT_BINDINGS_MESON_AXG_GPIO_H */
diff --git a/include/dt-bindings/gpio/meson8b-gpio.h b/include/dt-bindings/gpio/meson8b-gpio.h
index c38cb20d7182..bf0d76fa0e7b 100644
--- a/include/dt-bindings/gpio/meson8b-gpio.h
+++ b/include/dt-bindings/gpio/meson8b-gpio.h
@@ -15,18 +15,113 @@
#ifndef _DT_BINDINGS_MESON8B_GPIO_H
#define _DT_BINDINGS_MESON8B_GPIO_H
-#include <dt-bindings/gpio/meson8-gpio.h>
-
-/* GPIO Bank DIF */
-#define DIF_0_P 120
-#define DIF_0_N 121
-#define DIF_1_P 122
-#define DIF_1_N 123
-#define DIF_2_P 124
-#define DIF_2_N 125
-#define DIF_3_P 126
-#define DIF_3_N 127
-#define DIF_4_P 128
-#define DIF_4_N 129
+/* EE (CBUS) GPIO chip */
+#define GPIOX_0 0
+#define GPIOX_1 1
+#define GPIOX_2 2
+#define GPIOX_3 3
+#define GPIOX_4 4
+#define GPIOX_5 5
+#define GPIOX_6 6
+#define GPIOX_7 7
+#define GPIOX_8 8
+#define GPIOX_9 9
+#define GPIOX_10 10
+#define GPIOX_11 11
+#define GPIOX_16 12
+#define GPIOX_17 13
+#define GPIOX_18 14
+#define GPIOX_19 15
+#define GPIOX_20 16
+#define GPIOX_21 17
+
+#define GPIOY_0 18
+#define GPIOY_1 19
+#define GPIOY_3 20
+#define GPIOY_6 21
+#define GPIOY_7 22
+#define GPIOY_8 23
+#define GPIOY_9 24
+#define GPIOY_10 25
+#define GPIOY_11 26
+#define GPIOY_12 27
+#define GPIOY_13 28
+#define GPIOY_14 29
+
+#define GPIODV_9 30
+#define GPIODV_24 31
+#define GPIODV_25 32
+#define GPIODV_26 33
+#define GPIODV_27 34
+#define GPIODV_28 35
+#define GPIODV_29 36
+
+#define GPIOH_0 37
+#define GPIOH_1 38
+#define GPIOH_2 39
+#define GPIOH_3 40
+#define GPIOH_4 41
+#define GPIOH_5 42
+#define GPIOH_6 43
+#define GPIOH_7 44
+#define GPIOH_8 45
+#define GPIOH_9 46
+
+#define CARD_0 47
+#define CARD_1 48
+#define CARD_2 49
+#define CARD_3 50
+#define CARD_4 51
+#define CARD_5 52
+#define CARD_6 53
+
+#define BOOT_0 54
+#define BOOT_1 55
+#define BOOT_2 56
+#define BOOT_3 57
+#define BOOT_4 58
+#define BOOT_5 59
+#define BOOT_6 60
+#define BOOT_7 61
+#define BOOT_8 62
+#define BOOT_9 63
+#define BOOT_10 64
+#define BOOT_11 65
+#define BOOT_12 66
+#define BOOT_13 67
+#define BOOT_14 68
+#define BOOT_15 69
+#define BOOT_16 70
+#define BOOT_17 71
+#define BOOT_18 72
+
+#define DIF_0_P 73
+#define DIF_0_N 74
+#define DIF_1_P 75
+#define DIF_1_N 76
+#define DIF_2_P 77
+#define DIF_2_N 78
+#define DIF_3_P 79
+#define DIF_3_N 80
+#define DIF_4_P 81
+#define DIF_4_N 82
+
+/* AO GPIO chip */
+#define GPIOAO_0 0
+#define GPIOAO_1 1
+#define GPIOAO_2 2
+#define GPIOAO_3 3
+#define GPIOAO_4 4
+#define GPIOAO_5 5
+#define GPIOAO_6 6
+#define GPIOAO_7 7
+#define GPIOAO_8 8
+#define GPIOAO_9 9
+#define GPIOAO_10 10
+#define GPIOAO_11 11
+#define GPIOAO_12 12
+#define GPIOAO_13 13
+#define GPIO_BSD_EN 14
+#define GPIO_TEST_N 15
#endif /* _DT_BINDINGS_MESON8B_GPIO_H */
diff --git a/include/dt-bindings/gpio/tegra194-gpio.h b/include/dt-bindings/gpio/tegra194-gpio.h
new file mode 100644
index 000000000000..ede860225f6b
--- /dev/null
+++ b/include/dt-bindings/gpio/tegra194-gpio.h
@@ -0,0 +1,61 @@
+/* SPDX-License-Identifier: GPL-2.0 */
+/* Copyright (c) 2018, NVIDIA CORPORATION. All rights reserved. */
+
+/*
+ * This header provides constants for binding nvidia,tegra194-gpio*.
+ *
+ * The first cell in Tegra's GPIO specifier is the GPIO ID. The macros below
+ * provide names for this.
+ *
+ * The second cell contains standard flag values specified in gpio.h.
+ */
+
+#ifndef _DT_BINDINGS_GPIO_TEGRA194_GPIO_H
+#define _DT_BINDINGS_GPIO_TEGRA194_GPIO_H
+
+#include <dt-bindings/gpio/gpio.h>
+
+/* GPIOs implemented by main GPIO controller */
+#define TEGRA194_MAIN_GPIO_PORT_A 0
+#define TEGRA194_MAIN_GPIO_PORT_B 1
+#define TEGRA194_MAIN_GPIO_PORT_C 2
+#define TEGRA194_MAIN_GPIO_PORT_D 3
+#define TEGRA194_MAIN_GPIO_PORT_E 4
+#define TEGRA194_MAIN_GPIO_PORT_F 5
+#define TEGRA194_MAIN_GPIO_PORT_G 6
+#define TEGRA194_MAIN_GPIO_PORT_H 7
+#define TEGRA194_MAIN_GPIO_PORT_I 8
+#define TEGRA194_MAIN_GPIO_PORT_J 9
+#define TEGRA194_MAIN_GPIO_PORT_K 10
+#define TEGRA194_MAIN_GPIO_PORT_L 11
+#define TEGRA194_MAIN_GPIO_PORT_M 12
+#define TEGRA194_MAIN_GPIO_PORT_N 13
+#define TEGRA194_MAIN_GPIO_PORT_O 14
+#define TEGRA194_MAIN_GPIO_PORT_P 15
+#define TEGRA194_MAIN_GPIO_PORT_Q 16
+#define TEGRA194_MAIN_GPIO_PORT_R 17
+#define TEGRA194_MAIN_GPIO_PORT_S 18
+#define TEGRA194_MAIN_GPIO_PORT_T 19
+#define TEGRA194_MAIN_GPIO_PORT_U 20
+#define TEGRA194_MAIN_GPIO_PORT_V 21
+#define TEGRA194_MAIN_GPIO_PORT_W 22
+#define TEGRA194_MAIN_GPIO_PORT_X 23
+#define TEGRA194_MAIN_GPIO_PORT_Y 24
+#define TEGRA194_MAIN_GPIO_PORT_Z 25
+#define TEGRA194_MAIN_GPIO_PORT_FF 26
+#define TEGRA194_MAIN_GPIO_PORT_GG 27
+
+#define TEGRA194_MAIN_GPIO(port, offset) \
+ ((TEGRA194_MAIN_GPIO_PORT_##port * 8) + offset)
+
+/* GPIOs implemented by AON GPIO controller */
+#define TEGRA194_AON_GPIO_PORT_AA 0
+#define TEGRA194_AON_GPIO_PORT_BB 1
+#define TEGRA194_AON_GPIO_PORT_CC 2
+#define TEGRA194_AON_GPIO_PORT_DD 3
+#define TEGRA194_AON_GPIO_PORT_EE 4
+
+#define TEGRA194_AON_GPIO(port, offset) \
+ ((TEGRA194_AON_GPIO_PORT_##port * 8) + offset)
+
+#endif
diff --git a/include/dt-bindings/input/gpio-keys.h b/include/dt-bindings/input/gpio-keys.h
new file mode 100644
index 000000000000..8962df79e753
--- /dev/null
+++ b/include/dt-bindings/input/gpio-keys.h
@@ -0,0 +1,13 @@
+/* SPDX-License-Identifier: GPL-2.0 */
+/*
+ * This header provides constants for gpio keys bindings.
+ */
+
+#ifndef _DT_BINDINGS_GPIO_KEYS_H
+#define _DT_BINDINGS_GPIO_KEYS_H
+
+#define EV_ACT_ANY 0x00 /* asserted or deasserted */
+#define EV_ACT_ASSERTED 0x01 /* asserted */
+#define EV_ACT_DEASSERTED 0x02 /* deasserted */
+
+#endif /* _DT_BINDINGS_GPIO_KEYS_H */
diff --git a/include/dt-bindings/media/tda1997x.h b/include/dt-bindings/media/tda1997x.h
new file mode 100644
index 000000000000..bd9fbd718ec9
--- /dev/null
+++ b/include/dt-bindings/media/tda1997x.h
@@ -0,0 +1,74 @@
+/* SPDX-License-Identifier: GPL-2.0 */
+/*
+ * Copyright (C) 2017 Gateworks Corporation
+ */
+#ifndef _DT_BINDINGS_MEDIA_TDA1997X_H
+#define _DT_BINDINGS_MEDIA_TDA1997X_H
+
+/* TDA19973 36bit Video Port control registers */
+#define TDA1997X_VP36_35_32 0
+#define TDA1997X_VP36_31_28 1
+#define TDA1997X_VP36_27_24 2
+#define TDA1997X_VP36_23_20 3
+#define TDA1997X_VP36_19_16 4
+#define TDA1997X_VP36_15_12 5
+#define TDA1997X_VP36_11_08 6
+#define TDA1997X_VP36_07_04 7
+#define TDA1997X_VP36_03_00 8
+
+/* TDA19971 24bit Video Port control registers */
+#define TDA1997X_VP24_V23_20 0
+#define TDA1997X_VP24_V19_16 1
+#define TDA1997X_VP24_V15_12 3
+#define TDA1997X_VP24_V11_08 4
+#define TDA1997X_VP24_V07_04 6
+#define TDA1997X_VP24_V03_00 7
+
+/* Pin groups */
+#define TDA1997X_VP_OUT_EN 0x80 /* enable output group */
+#define TDA1997X_VP_HIZ 0x40 /* hi-Z output group when not used */
+#define TDA1997X_VP_SWP 0x10 /* pin-swap output group */
+#define TDA1997X_R_CR_CBCR_3_0 (0 | TDA1997X_VP_OUT_EN | TDA1997X_VP_HIZ)
+#define TDA1997X_R_CR_CBCR_7_4 (1 | TDA1997X_VP_OUT_EN | TDA1997X_VP_HIZ)
+#define TDA1997X_R_CR_CBCR_11_8 (2 | TDA1997X_VP_OUT_EN | TDA1997X_VP_HIZ)
+#define TDA1997X_B_CB_3_0 (3 | TDA1997X_VP_OUT_EN | TDA1997X_VP_HIZ)
+#define TDA1997X_B_CB_7_4 (4 | TDA1997X_VP_OUT_EN | TDA1997X_VP_HIZ)
+#define TDA1997X_B_CB_11_8 (5 | TDA1997X_VP_OUT_EN | TDA1997X_VP_HIZ)
+#define TDA1997X_G_Y_3_0 (6 | TDA1997X_VP_OUT_EN | TDA1997X_VP_HIZ)
+#define TDA1997X_G_Y_7_4 (7 | TDA1997X_VP_OUT_EN | TDA1997X_VP_HIZ)
+#define TDA1997X_G_Y_11_8 (8 | TDA1997X_VP_OUT_EN | TDA1997X_VP_HIZ)
+/* pinswapped groups */
+#define TDA1997X_R_CR_CBCR_3_0_S (TDA1997X_R_CR_CBCR_3_0 | TDA1997X_VP_SWAP)
+#define TDA1997X_R_CR_CBCR_7_4_S (TDA1997X_R_CR_CBCR_7_4 | TDA1997X_VP_SWAP)
+#define TDA1997X_R_CR_CBCR_11_8_S (TDA1997X_R_CR_CBCR_11_8 | TDA1997X_VP_SWAP)
+#define TDA1997X_B_CB_3_0_S (TDA1997X_B_CB_3_0 | TDA1997X_VP_SWAP)
+#define TDA1997X_B_CB_7_4_S (TDA1997X_B_CB_7_4 | TDA1997X_VP_SWAP)
+#define TDA1997X_B_CB_11_8_S (TDA1997X_B_CB_11_8 | TDA1997X_VP_SWAP)
+#define TDA1997X_G_Y_3_0_S (TDA1997X_G_Y_3_0 | TDA1997X_VP_SWAP)
+#define TDA1997X_G_Y_7_4_S (TDA1997X_G_Y_7_4 | TDA1997X_VP_SWAP)
+#define TDA1997X_G_Y_11_8_S (TDA1997X_G_Y_11_8 | TDA1997X_VP_SWAP)
+
+/* Audio bus DAI format */
+#define TDA1997X_I2S16 1 /* I2S 16bit */
+#define TDA1997X_I2S32 2 /* I2S 32bit */
+#define TDA1997X_SPDIF 3 /* SPDIF */
+#define TDA1997X_OBA 4 /* One Bit Audio */
+#define TDA1997X_DST 5 /* Direct Stream Transfer */
+#define TDA1997X_I2S16_HBR 6 /* HBR straight in I2S 16bit mode */
+#define TDA1997X_I2S16_HBR_DEMUX 7 /* HBR demux in I2S 16bit mode */
+#define TDA1997X_I2S32_HBR_DEMUX 8 /* HBR demux in I2S 32bit mode */
+#define TDA1997X_SPDIF_HBR_DEMUX 9 /* HBR demux in SPDIF mode */
+
+/* Audio bus channel layout */
+#define TDA1997X_LAYOUT0 0 /* 2-channel */
+#define TDA1997X_LAYOUT1 1 /* 8-channel */
+
+/* Audio bus clock */
+#define TDA1997X_ACLK_16FS 0
+#define TDA1997X_ACLK_32FS 1
+#define TDA1997X_ACLK_64FS 2
+#define TDA1997X_ACLK_128FS 3
+#define TDA1997X_ACLK_256FS 4
+#define TDA1997X_ACLK_512FS 5
+
+#endif /* _DT_BINDINGS_MEDIA_TDA1997X_H */
diff --git a/include/dt-bindings/memory/tegra186-mc.h b/include/dt-bindings/memory/tegra186-mc.h
new file mode 100644
index 000000000000..64813536aec9
--- /dev/null
+++ b/include/dt-bindings/memory/tegra186-mc.h
@@ -0,0 +1,111 @@
+#ifndef DT_BINDINGS_MEMORY_TEGRA186_MC_H
+#define DT_BINDINGS_MEMORY_TEGRA186_MC_H
+
+/* special clients */
+#define TEGRA186_SID_INVALID 0x00
+#define TEGRA186_SID_PASSTHROUGH 0x7f
+
+/* host1x clients */
+#define TEGRA186_SID_HOST1X 0x01
+#define TEGRA186_SID_CSI 0x02
+#define TEGRA186_SID_VIC 0x03
+#define TEGRA186_SID_VI 0x04
+#define TEGRA186_SID_ISP 0x05
+#define TEGRA186_SID_NVDEC 0x06
+#define TEGRA186_SID_NVENC 0x07
+#define TEGRA186_SID_NVJPG 0x08
+#define TEGRA186_SID_NVDISPLAY 0x09
+#define TEGRA186_SID_TSEC 0x0a
+#define TEGRA186_SID_TSECB 0x0b
+#define TEGRA186_SID_SE 0x0c
+#define TEGRA186_SID_SE1 0x0d
+#define TEGRA186_SID_SE2 0x0e
+#define TEGRA186_SID_SE3 0x0f
+
+/* GPU clients */
+#define TEGRA186_SID_GPU 0x10
+
+/* other SoC clients */
+#define TEGRA186_SID_AFI 0x11
+#define TEGRA186_SID_HDA 0x12
+#define TEGRA186_SID_ETR 0x13
+#define TEGRA186_SID_EQOS 0x14
+#define TEGRA186_SID_UFSHC 0x15
+#define TEGRA186_SID_AON 0x16
+#define TEGRA186_SID_SDMMC4 0x17
+#define TEGRA186_SID_SDMMC3 0x18
+#define TEGRA186_SID_SDMMC2 0x19
+#define TEGRA186_SID_SDMMC1 0x1a
+#define TEGRA186_SID_XUSB_HOST 0x1b
+#define TEGRA186_SID_XUSB_DEV 0x1c
+#define TEGRA186_SID_SATA 0x1d
+#define TEGRA186_SID_APE 0x1e
+#define TEGRA186_SID_SCE 0x1f
+
+/* GPC DMA clients */
+#define TEGRA186_SID_GPCDMA_0 0x20
+#define TEGRA186_SID_GPCDMA_1 0x21
+#define TEGRA186_SID_GPCDMA_2 0x22
+#define TEGRA186_SID_GPCDMA_3 0x23
+#define TEGRA186_SID_GPCDMA_4 0x24
+#define TEGRA186_SID_GPCDMA_5 0x25
+#define TEGRA186_SID_GPCDMA_6 0x26
+#define TEGRA186_SID_GPCDMA_7 0x27
+
+/* APE DMA clients */
+#define TEGRA186_SID_APE_1 0x28
+#define TEGRA186_SID_APE_2 0x29
+
+/* camera RTCPU */
+#define TEGRA186_SID_RCE 0x2a
+
+/* camera RTCPU on host1x address space */
+#define TEGRA186_SID_RCE_1X 0x2b
+
+/* APE DMA clients */
+#define TEGRA186_SID_APE_3 0x2c
+
+/* camera RTCPU running on APE */
+#define TEGRA186_SID_APE_CAM 0x2d
+#define TEGRA186_SID_APE_CAM_1X 0x2e
+
+/*
+ * The BPMP has its SID value hardcoded in the firmware. Changing it requires
+ * considerable effort.
+ */
+#define TEGRA186_SID_BPMP 0x32
+
+/* for SMMU tests */
+#define TEGRA186_SID_SMMU_TEST 0x33
+
+/* host1x virtualization channels */
+#define TEGRA186_SID_HOST1X_CTX0 0x38
+#define TEGRA186_SID_HOST1X_CTX1 0x39
+#define TEGRA186_SID_HOST1X_CTX2 0x3a
+#define TEGRA186_SID_HOST1X_CTX3 0x3b
+#define TEGRA186_SID_HOST1X_CTX4 0x3c
+#define TEGRA186_SID_HOST1X_CTX5 0x3d
+#define TEGRA186_SID_HOST1X_CTX6 0x3e
+#define TEGRA186_SID_HOST1X_CTX7 0x3f
+
+/* host1x command buffers */
+#define TEGRA186_SID_HOST1X_VM0 0x40
+#define TEGRA186_SID_HOST1X_VM1 0x41
+#define TEGRA186_SID_HOST1X_VM2 0x42
+#define TEGRA186_SID_HOST1X_VM3 0x43
+#define TEGRA186_SID_HOST1X_VM4 0x44
+#define TEGRA186_SID_HOST1X_VM5 0x45
+#define TEGRA186_SID_HOST1X_VM6 0x46
+#define TEGRA186_SID_HOST1X_VM7 0x47
+
+/* SE data buffers */
+#define TEGRA186_SID_SE_VM0 0x48
+#define TEGRA186_SID_SE_VM1 0x49
+#define TEGRA186_SID_SE_VM2 0x4a
+#define TEGRA186_SID_SE_VM3 0x4b
+#define TEGRA186_SID_SE_VM4 0x4c
+#define TEGRA186_SID_SE_VM5 0x4d
+#define TEGRA186_SID_SE_VM6 0x4e
+#define TEGRA186_SID_SE_VM7 0x4f
+
+#endif
diff --git a/include/dt-bindings/mfd/stm32f7-rcc.h b/include/dt-bindings/mfd/stm32f7-rcc.h
index 8b7b7197ffd7..a90f3613c584 100644
--- a/include/dt-bindings/mfd/stm32f7-rcc.h
+++ b/include/dt-bindings/mfd/stm32f7-rcc.h
@@ -91,6 +91,7 @@
#define STM32F7_RCC_APB2_TIM8 1
#define STM32F7_RCC_APB2_USART1 4
#define STM32F7_RCC_APB2_USART6 5
+#define STM32F7_RCC_APB2_SDMMC2 7
#define STM32F7_RCC_APB2_ADC1 8
#define STM32F7_RCC_APB2_ADC2 9
#define STM32F7_RCC_APB2_ADC3 10
diff --git a/include/dt-bindings/net/ti-dp83867.h b/include/dt-bindings/net/ti-dp83867.h
index 172744a72eb7..7b1656427cbe 100644
--- a/include/dt-bindings/net/ti-dp83867.h
+++ b/include/dt-bindings/net/ti-dp83867.h
@@ -42,4 +42,18 @@
#define DP83867_RGMIIDCTL_3_75_NS 0xe
#define DP83867_RGMIIDCTL_4_00_NS 0xf
+/* IO_MUX_CFG - Clock output selection */
+#define DP83867_CLK_O_SEL_CHN_A_RCLK 0x0
+#define DP83867_CLK_O_SEL_CHN_B_RCLK 0x1
+#define DP83867_CLK_O_SEL_CHN_C_RCLK 0x2
+#define DP83867_CLK_O_SEL_CHN_D_RCLK 0x3
+#define DP83867_CLK_O_SEL_CHN_A_RCLK_DIV5 0x4
+#define DP83867_CLK_O_SEL_CHN_B_RCLK_DIV5 0x5
+#define DP83867_CLK_O_SEL_CHN_C_RCLK_DIV5 0x6
+#define DP83867_CLK_O_SEL_CHN_D_RCLK_DIV5 0x7
+#define DP83867_CLK_O_SEL_CHN_A_TCLK 0x8
+#define DP83867_CLK_O_SEL_CHN_B_TCLK 0x9
+#define DP83867_CLK_O_SEL_CHN_C_TCLK 0xA
+#define DP83867_CLK_O_SEL_CHN_D_TCLK 0xB
+#define DP83867_CLK_O_SEL_REF_CLK 0xC
#endif
diff --git a/include/dt-bindings/pinctrl/am43xx.h b/include/dt-bindings/pinctrl/am43xx.h
index a69e310789c5..6ce4a32f77d4 100644
--- a/include/dt-bindings/pinctrl/am43xx.h
+++ b/include/dt-bindings/pinctrl/am43xx.h
@@ -25,7 +25,8 @@
#define DS0_FORCE_OFF_MODE (1 << 24)
#define DS0_INPUT (1 << 25)
#define DS0_FORCE_OUT_HIGH (1 << 26)
-#define DS0_PULL_UP_DOWN_EN (1 << 27)
+#define DS0_PULL_UP_DOWN_EN (0 << 27)
+#define DS0_PULL_UP_DOWN_DIS (1 << 27)
#define DS0_PULL_UP_SEL (1 << 28)
#define WAKEUP_ENABLE (1 << 29)
diff --git a/include/dt-bindings/pinctrl/mt7623-pinfunc.h b/include/dt-bindings/pinctrl/mt7623-pinfunc.h
index 2d6a7b1d7be2..4878a67a844c 100644
--- a/include/dt-bindings/pinctrl/mt7623-pinfunc.h
+++ b/include/dt-bindings/pinctrl/mt7623-pinfunc.h
@@ -251,6 +251,12 @@
#define MT7623_PIN_76_SCL0_FUNC_GPIO76 (MTK_PIN_NO(76) | 0)
#define MT7623_PIN_76_SCL0_FUNC_SCL0 (MTK_PIN_NO(76) | 1)
+#define MT7623_PIN_77_SDA2_FUNC_GPIO77 (MTK_PIN_NO(77) | 0)
+#define MT7623_PIN_77_SDA2_FUNC_SDA2 (MTK_PIN_NO(77) | 1)
+
+#define MT7623_PIN_78_SCL2_FUNC_GPIO78 (MTK_PIN_NO(78) | 0)
+#define MT7623_PIN_78_SCL2_FUNC_SCL2 (MTK_PIN_NO(78) | 1)
+
#define MT7623_PIN_79_URXD0_FUNC_GPIO79 (MTK_PIN_NO(79) | 0)
#define MT7623_PIN_79_URXD0_FUNC_URXD0 (MTK_PIN_NO(79) | 1)
#define MT7623_PIN_79_URXD0_FUNC_UTXD0 (MTK_PIN_NO(79) | 2)
@@ -291,6 +297,24 @@
#define MT7623_PIN_100_MIPI_TDP0_FUNC_GPIO100 (MTK_PIN_NO(100) | 0)
#define MT7623_PIN_100_MIPI_TDP0_FUNC_TDP0 (MTK_PIN_NO(100) | 1)
+#define MT7623_PIN_101_SPI2_CSN_FUNC_GPIO101 (MTK_PIN_NO(101) | 0)
+#define MT7623_PIN_101_SPI2_CSN_FUNC_SPI2_CS (MTK_PIN_NO(101) | 1)
+#define MT7623_PIN_101_SPI2_CSN_FUNC_SCL3 (MTK_PIN_NO(101) | 3)
+
+#define MT7623_PIN_102_SPI2_MI_FUNC_GPIO102 (MTK_PIN_NO(102) | 0)
+#define MT7623_PIN_102_SPI2_MI_FUNC_SPI2_MI (MTK_PIN_NO(102) | 1)
+#define MT7623_PIN_102_SPI2_MI_FUNC_SPI2_MO (MTK_PIN_NO(102) | 2)
+#define MT7623_PIN_102_SPI2_MI_FUNC_SDA3 (MTK_PIN_NO(102) | 3)
+
+#define MT7623_PIN_103_SPI2_MO_FUNC_GPIO103 (MTK_PIN_NO(103) | 0)
+#define MT7623_PIN_103_SPI2_MO_FUNC_SPI2_MO (MTK_PIN_NO(103) | 1)
+#define MT7623_PIN_103_SPI2_MO_FUNC_SPI2_MI (MTK_PIN_NO(103) | 2)
+#define MT7623_PIN_103_SPI2_MO_FUNC_SCL3 (MTK_PIN_NO(103) | 3)
+
+#define MT7623_PIN_104_SPI2_CK_FUNC_GPIO104 (MTK_PIN_NO(104) | 0)
+#define MT7623_PIN_104_SPI2_CK_FUNC_SPI2_CK (MTK_PIN_NO(104) | 1)
+#define MT7623_PIN_104_SPI2_CK_FUNC_SDA3 (MTK_PIN_NO(104) | 3)
+
#define MT7623_PIN_105_MSDC1_CMD_FUNC_GPIO105 (MTK_PIN_NO(105) | 0)
#define MT7623_PIN_105_MSDC1_CMD_FUNC_MSDC1_CMD (MTK_PIN_NO(105) | 1)
#define MT7623_PIN_105_MSDC1_CMD_FUNC_SDA1 (MTK_PIN_NO(105) | 3)
diff --git a/include/dt-bindings/pinctrl/stm32-pinfunc.h b/include/dt-bindings/pinctrl/stm32-pinfunc.h
index b8dfe31821e6..b5a2174a6386 100644
--- a/include/dt-bindings/pinctrl/stm32-pinfunc.h
+++ b/include/dt-bindings/pinctrl/stm32-pinfunc.h
@@ -1,3 +1,9 @@
+/* SPDX-License-Identifier: (GPL-2.0+ OR BSD-3-Clause) */
+/*
+ * Copyright (C) STMicroelectronics 2017 - All Rights Reserved
+ * Author: Torgue Alexandre <alexandre.torgue@st.com> for STMicroelectronics.
+ */
+
#ifndef _DT_BINDINGS_STM32_PINFUNC_H
#define _DT_BINDINGS_STM32_PINFUNC_H
diff --git a/include/dt-bindings/power/mt2712-power.h b/include/dt-bindings/power/mt2712-power.h
new file mode 100644
index 000000000000..2c147817efc2
--- /dev/null
+++ b/include/dt-bindings/power/mt2712-power.h
@@ -0,0 +1,29 @@
+/*
+ * Copyright (C) 2017 MediaTek Inc.
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License version 2 as
+ * published by the Free Software Foundation.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.
+ * See http://www.gnu.org/licenses/gpl-2.0.html for more details.
+ */
+
+#ifndef _DT_BINDINGS_POWER_MT2712_POWER_H
+#define _DT_BINDINGS_POWER_MT2712_POWER_H
+
+#define MT2712_POWER_DOMAIN_MM 0
+#define MT2712_POWER_DOMAIN_VDEC 1
+#define MT2712_POWER_DOMAIN_VENC 2
+#define MT2712_POWER_DOMAIN_ISP 3
+#define MT2712_POWER_DOMAIN_AUDIO 4
+#define MT2712_POWER_DOMAIN_USB 5
+#define MT2712_POWER_DOMAIN_USB2 6
+#define MT2712_POWER_DOMAIN_MFG 7
+#define MT2712_POWER_DOMAIN_MFG_SC1 8
+#define MT2712_POWER_DOMAIN_MFG_SC2 9
+#define MT2712_POWER_DOMAIN_MFG_SC3 10
+
+#endif /* _DT_BINDINGS_POWER_MT2712_POWER_H */
diff --git a/include/dt-bindings/power/mt7623a-power.h b/include/dt-bindings/power/mt7623a-power.h
new file mode 100644
index 000000000000..2544822aa76b
--- /dev/null
+++ b/include/dt-bindings/power/mt7623a-power.h
@@ -0,0 +1,10 @@
+/* SPDX-License-Identifier: GPL-2.0 */
+#ifndef _DT_BINDINGS_POWER_MT7623A_POWER_H
+#define _DT_BINDINGS_POWER_MT7623A_POWER_H
+
+#define MT7623A_POWER_DOMAIN_CONN 0
+#define MT7623A_POWER_DOMAIN_ETH 1
+#define MT7623A_POWER_DOMAIN_HIF 2
+#define MT7623A_POWER_DOMAIN_IFR_MSC 3
+
+#endif /* _DT_BINDINGS_POWER_MT7623A_POWER_H */
diff --git a/include/dt-bindings/power/owl-s700-powergate.h b/include/dt-bindings/power/owl-s700-powergate.h
new file mode 100644
index 000000000000..4cf1aefbf09c
--- /dev/null
+++ b/include/dt-bindings/power/owl-s700-powergate.h
@@ -0,0 +1,19 @@
+// SPDX-License-Identifier: (GPL-2.0+ OR MIT)
+/*
+ * Actions Semi S700 SPS
+ *
+ * Copyright (c) 2017 Andreas Färber
+ */
+#ifndef DT_BINDINGS_POWER_OWL_S700_POWERGATE_H
+#define DT_BINDINGS_POWER_OWL_S700_POWERGATE_H
+
+#define S700_PD_VDE 0
+#define S700_PD_VCE_SI 1
+#define S700_PD_USB2_1 2
+#define S700_PD_HDE 3
+#define S700_PD_DMA 4
+#define S700_PD_DS 5
+#define S700_PD_USB3 6
+#define S700_PD_USB2_0 7
+
+#endif
diff --git a/include/dt-bindings/power/r8a77965-sysc.h b/include/dt-bindings/power/r8a77965-sysc.h
new file mode 100644
index 000000000000..05a4b5917314
--- /dev/null
+++ b/include/dt-bindings/power/r8a77965-sysc.h
@@ -0,0 +1,30 @@
+/* SPDX-License-Identifier: GPL-2.0 */
+/*
+ * Copyright (C) 2018 Jacopo Mondi <jacopo+renesas@jmondi.org>
+ * Copyright (C) 2016 Glider bvba
+ */
+
+#ifndef __DT_BINDINGS_POWER_R8A77965_SYSC_H__
+#define __DT_BINDINGS_POWER_R8A77965_SYSC_H__
+
+/*
+ * These power domain indices match the numbers of the interrupt bits
+ * representing the power areas in the various Interrupt Registers
+ * (e.g. SYSCISR, Interrupt Status Register)
+ */
+
+#define R8A77965_PD_CA57_CPU0 0
+#define R8A77965_PD_CA57_CPU1 1
+#define R8A77965_PD_A3VP 9
+#define R8A77965_PD_CA57_SCU 12
+#define R8A77965_PD_CR7 13
+#define R8A77965_PD_A3VC 14
+#define R8A77965_PD_3DG_A 17
+#define R8A77965_PD_3DG_B 18
+#define R8A77965_PD_A3IR 24
+#define R8A77965_PD_A2VC1 26
+
+/* Always-on power area */
+#define R8A77965_PD_ALWAYS_ON 32
+
+#endif /* __DT_BINDINGS_POWER_R8A77965_SYSC_H__ */
diff --git a/include/dt-bindings/power/r8a77980-sysc.h b/include/dt-bindings/power/r8a77980-sysc.h
new file mode 100644
index 000000000000..2c90c1237725
--- /dev/null
+++ b/include/dt-bindings/power/r8a77980-sysc.h
@@ -0,0 +1,43 @@
+/* SPDX-License-Identifier: GPL-2.0
+ *
+ * Copyright (C) 2018 Renesas Electronics Corp.
+ * Copyright (C) 2018 Cogent Embedded, Inc.
+ */
+#ifndef __DT_BINDINGS_POWER_R8A77980_SYSC_H__
+#define __DT_BINDINGS_POWER_R8A77980_SYSC_H__
+
+/*
+ * These power domain indices match the numbers of the interrupt bits
+ * representing the power areas in the various Interrupt Registers
+ * (e.g. SYSCISR, Interrupt Status Register)
+ */
+
+#define R8A77980_PD_A2SC2 0
+#define R8A77980_PD_A2SC3 1
+#define R8A77980_PD_A2SC4 2
+#define R8A77980_PD_A2PD0 3
+#define R8A77980_PD_A2PD1 4
+#define R8A77980_PD_CA53_CPU0 5
+#define R8A77980_PD_CA53_CPU1 6
+#define R8A77980_PD_CA53_CPU2 7
+#define R8A77980_PD_CA53_CPU3 8
+#define R8A77980_PD_A2CN 10
+#define R8A77980_PD_A3VIP 11
+#define R8A77980_PD_A2IR5 12
+#define R8A77980_PD_CR7 13
+#define R8A77980_PD_A2IR4 15
+#define R8A77980_PD_CA53_SCU 21
+#define R8A77980_PD_A2IR0 23
+#define R8A77980_PD_A3IR 24
+#define R8A77980_PD_A3VIP1 25
+#define R8A77980_PD_A3VIP2 26
+#define R8A77980_PD_A2IR1 27
+#define R8A77980_PD_A2IR2 28
+#define R8A77980_PD_A2IR3 29
+#define R8A77980_PD_A2SC0 30
+#define R8A77980_PD_A2SC1 31
+
+/* Always-on power area */
+#define R8A77980_PD_ALWAYS_ON 32
+
+#endif /* __DT_BINDINGS_POWER_R8A77980_SYSC_H__ */
diff --git a/include/dt-bindings/power/tegra194-powergate.h b/include/dt-bindings/power/tegra194-powergate.h
new file mode 100644
index 000000000000..82253742a493
--- /dev/null
+++ b/include/dt-bindings/power/tegra194-powergate.h
@@ -0,0 +1,35 @@
+/* SPDX-License-Identifier: GPL-2.0 */
+/* Copyright (c) 2018, NVIDIA CORPORATION. All rights reserved. */
+
+#ifndef __ABI_MACH_T194_POWERGATE_T194_H_
+#define __ABI_MACH_T194_POWERGATE_T194_H_
+
+#define TEGRA194_POWER_DOMAIN_AUD 1
+#define TEGRA194_POWER_DOMAIN_DISP 2
+#define TEGRA194_POWER_DOMAIN_DISPB 3
+#define TEGRA194_POWER_DOMAIN_DISPC 4
+#define TEGRA194_POWER_DOMAIN_ISPA 5
+#define TEGRA194_POWER_DOMAIN_NVDECA 6
+#define TEGRA194_POWER_DOMAIN_NVJPG 7
+#define TEGRA194_POWER_DOMAIN_NVENCA 8
+#define TEGRA194_POWER_DOMAIN_NVENCB 9
+#define TEGRA194_POWER_DOMAIN_NVDECB 10
+#define TEGRA194_POWER_DOMAIN_SAX 11
+#define TEGRA194_POWER_DOMAIN_VE 12
+#define TEGRA194_POWER_DOMAIN_VIC 13
+#define TEGRA194_POWER_DOMAIN_XUSBA 14
+#define TEGRA194_POWER_DOMAIN_XUSBB 15
+#define TEGRA194_POWER_DOMAIN_XUSBC 16
+#define TEGRA194_POWER_DOMAIN_PCIEX8A 17
+#define TEGRA194_POWER_DOMAIN_PCIEX4A 18
+#define TEGRA194_POWER_DOMAIN_PCIEX1A 19
+#define TEGRA194_POWER_DOMAIN_PCIEX8B 21
+#define TEGRA194_POWER_DOMAIN_PVAA 22
+#define TEGRA194_POWER_DOMAIN_PVAB 23
+#define TEGRA194_POWER_DOMAIN_DLAA 24
+#define TEGRA194_POWER_DOMAIN_DLAB 25
+#define TEGRA194_POWER_DOMAIN_CV 26
+#define TEGRA194_POWER_DOMAIN_GPU 27
+#define TEGRA194_POWER_DOMAIN_MAX 27
+
+#endif
diff --git a/include/dt-bindings/reset/amlogic,meson-axg-reset.h b/include/dt-bindings/reset/amlogic,meson-axg-reset.h
new file mode 100644
index 000000000000..ad6f55dabd6d
--- /dev/null
+++ b/include/dt-bindings/reset/amlogic,meson-axg-reset.h
@@ -0,0 +1,124 @@
+/*
+ *
+ * Copyright (c) 2016 BayLibre, SAS.
+ * Author: Neil Armstrong <narmstrong@baylibre.com>
+ *
+ * Copyright (c) 2017 Amlogic, inc.
+ * Author: Yixun Lan <yixun.lan@amlogic.com>
+ *
+ * SPDX-License-Identifier: (GPL-2.0+ OR BSD)
+ */
+
+#ifndef _DT_BINDINGS_AMLOGIC_MESON_AXG_RESET_H
+#define _DT_BINDINGS_AMLOGIC_MESON_AXG_RESET_H
+
+/* RESET0 */
+#define RESET_HIU 0
+#define RESET_PCIE_A 1
+#define RESET_PCIE_B 2
+#define RESET_DDR_TOP 3
+/* 4 */
+#define RESET_VIU 5
+#define RESET_PCIE_PHY 6
+#define RESET_PCIE_APB 7
+/* 8 */
+/* 9 */
+#define RESET_VENC 10
+#define RESET_ASSIST 11
+/* 12 */
+#define RESET_VCBUS 13
+/* 14 */
+/* 15 */
+#define RESET_GIC 16
+#define RESET_CAPB3_DECODE 17
+/* 18-21 */
+#define RESET_SYS_CPU_CAPB3 22
+#define RESET_CBUS_CAPB3 23
+#define RESET_AHB_CNTL 24
+#define RESET_AHB_DATA 25
+#define RESET_VCBUS_CLK81 26
+#define RESET_MMC 27
+/* 28-31 */
+/* RESET1 */
+/* 32 */
+/* 33 */
+#define RESET_USB_OTG 34
+#define RESET_DDR 35
+#define RESET_AO_RESET 36
+/* 37 */
+#define RESET_AHB_SRAM 38
+/* 39 */
+/* 40 */
+#define RESET_DMA 41
+#define RESET_ISA 42
+#define RESET_ETHERNET 43
+/* 44 */
+#define RESET_SD_EMMC_B 45
+#define RESET_SD_EMMC_C 46
+#define RESET_ROM_BOOT 47
+#define RESET_SYS_CPU_0 48
+#define RESET_SYS_CPU_1 49
+#define RESET_SYS_CPU_2 50
+#define RESET_SYS_CPU_3 51
+#define RESET_SYS_CPU_CORE_0 52
+#define RESET_SYS_CPU_CORE_1 53
+#define RESET_SYS_CPU_CORE_2 54
+#define RESET_SYS_CPU_CORE_3 55
+#define RESET_SYS_PLL_DIV 56
+#define RESET_SYS_CPU_AXI 57
+#define RESET_SYS_CPU_L2 58
+#define RESET_SYS_CPU_P 59
+#define RESET_SYS_CPU_MBIST 60
+/* 61-63 */
+/* RESET2 */
+/* 64 */
+/* 65 */
+#define RESET_AUDIO 66
+/* 67 */
+#define RESET_MIPI_HOST 68
+#define RESET_AUDIO_LOCKER 69
+#define RESET_GE2D 70
+/* 71-76 */
+#define RESET_AO_CPU_RESET 77
+/* 78-95 */
+/* RESET3 */
+#define RESET_RING_OSCILLATOR 96
+/* 97-127 */
+/* RESET4 */
+/* 128 */
+/* 129 */
+#define RESET_MIPI_PHY 130
+/* 131-140 */
+#define RESET_VENCL 141
+#define RESET_I2C_MASTER_2 142
+#define RESET_I2C_MASTER_1 143
+/* 144-159 */
+/* RESET5 */
+/* 160-191 */
+/* RESET6 */
+#define RESET_PERIPHS_GENERAL 192
+#define RESET_PERIPHS_SPICC 193
+/* 194 */
+/* 195 */
+#define RESET_PERIPHS_I2C_MASTER_0 196
+/* 197-200 */
+#define RESET_PERIPHS_UART_0 201
+#define RESET_PERIPHS_UART_1 202
+/* 203-204 */
+#define RESET_PERIPHS_SPI_0 205
+#define RESET_PERIPHS_I2C_MASTER_3 206
+/* 207-223 */
+/* RESET7 */
+#define RESET_USB_DDR_0 224
+#define RESET_USB_DDR_1 225
+#define RESET_USB_DDR_2 226
+#define RESET_USB_DDR_3 227
+/* 228 */
+#define RESET_DEVICE_MMC_ARB 229
+/* 230 */
+#define RESET_VID_LOCK 231
+#define RESET_A9_DMC_PIPEL 232
+#define RESET_DMC_VPU_PIPEL 233
+/* 234-255 */
+
+#endif
diff --git a/include/dt-bindings/reset/stm32mp1-resets.h b/include/dt-bindings/reset/stm32mp1-resets.h
new file mode 100644
index 000000000000..f0c3aaef67a0
--- /dev/null
+++ b/include/dt-bindings/reset/stm32mp1-resets.h
@@ -0,0 +1,108 @@
+/* SPDX-License-Identifier: GPL-2.0 or BSD-3-Clause */
+/*
+ * Copyright (C) STMicroelectronics 2018 - All Rights Reserved
+ * Author: Gabriel Fernandez <gabriel.fernandez@st.com> for STMicroelectronics.
+ */
+
+#ifndef _DT_BINDINGS_STM32MP1_RESET_H_
+#define _DT_BINDINGS_STM32MP1_RESET_H_
+
+#define LTDC_R 3072
+#define DSI_R 3076
+#define DDRPERFM_R 3080
+#define USBPHY_R 3088
+#define SPI6_R 3136
+#define I2C4_R 3138
+#define I2C6_R 3139
+#define USART1_R 3140
+#define STGEN_R 3156
+#define GPIOZ_R 3200
+#define CRYP1_R 3204
+#define HASH1_R 3205
+#define RNG1_R 3206
+#define AXIM_R 3216
+#define GPU_R 3269
+#define ETHMAC_R 3274
+#define FMC_R 3276
+#define QSPI_R 3278
+#define SDMMC1_R 3280
+#define SDMMC2_R 3281
+#define CRC1_R 3284
+#define USBH_R 3288
+#define MDMA_R 3328
+#define MCU_R 8225
+#define TIM2_R 19456
+#define TIM3_R 19457
+#define TIM4_R 19458
+#define TIM5_R 19459
+#define TIM6_R 19460
+#define TIM7_R 19461
+#define TIM12_R 16462
+#define TIM13_R 16463
+#define TIM14_R 16464
+#define LPTIM1_R 19465
+#define SPI2_R 19467
+#define SPI3_R 19468
+#define USART2_R 19470
+#define USART3_R 19471
+#define UART4_R 19472
+#define UART5_R 19473
+#define UART7_R 19474
+#define UART8_R 19475
+#define I2C1_R 19477
+#define I2C2_R 19478
+#define I2C3_R 19479
+#define I2C5_R 19480
+#define SPDIF_R 19482
+#define CEC_R 19483
+#define DAC12_R 19485
+#define MDIO_R 19847
+#define TIM1_R 19520
+#define TIM8_R 19521
+#define TIM15_R 19522
+#define TIM16_R 19523
+#define TIM17_R 19524
+#define SPI1_R 19528
+#define SPI4_R 19529
+#define SPI5_R 19530
+#define USART6_R 19533
+#define SAI1_R 19536
+#define SAI2_R 19537
+#define SAI3_R 19538
+#define DFSDM_R 19540
+#define FDCAN_R 19544
+#define LPTIM2_R 19584
+#define LPTIM3_R 19585
+#define LPTIM4_R 19586
+#define LPTIM5_R 19587
+#define SAI4_R 19592
+#define SYSCFG_R 19595
+#define VREF_R 19597
+#define TMPSENS_R 19600
+#define PMBCTRL_R 19601
+#define DMA1_R 19648
+#define DMA2_R 19649
+#define DMAMUX_R 19650
+#define ADC12_R 19653
+#define USBO_R 19656
+#define SDMMC3_R 19664
+#define CAMITF_R 19712
+#define CRYP2_R 19716
+#define HASH2_R 19717
+#define RNG2_R 19718
+#define CRC2_R 19719
+#define HSEM_R 19723
+#define MBOX_R 19724
+#define GPIOA_R 19776
+#define GPIOB_R 19777
+#define GPIOC_R 19778
+#define GPIOD_R 19779
+#define GPIOE_R 19780
+#define GPIOF_R 19781
+#define GPIOG_R 19782
+#define GPIOH_R 19783
+#define GPIOI_R 19784
+#define GPIOJ_R 19785
+#define GPIOK_R 19786
+
+#endif /* _DT_BINDINGS_STM32MP1_RESET_H_ */
diff --git a/include/dt-bindings/reset/tegra194-reset.h b/include/dt-bindings/reset/tegra194-reset.h
new file mode 100644
index 000000000000..473afaa25bfb
--- /dev/null
+++ b/include/dt-bindings/reset/tegra194-reset.h
@@ -0,0 +1,152 @@
+/* SPDX-License-Identifier: GPL-2.0 */
+/* Copyright (c) 2018, NVIDIA CORPORATION. All rights reserved. */
+
+#ifndef __ABI_MACH_T194_RESET_H
+#define __ABI_MACH_T194_RESET_H
+
+#define TEGRA194_RESET_ACTMON 1
+#define TEGRA194_RESET_ADSP_ALL 2
+#define TEGRA194_RESET_AFI 3
+#define TEGRA194_RESET_CAN1 4
+#define TEGRA194_RESET_CAN2 5
+#define TEGRA194_RESET_DLA0 6
+#define TEGRA194_RESET_DLA1 7
+#define TEGRA194_RESET_DPAUX 8
+#define TEGRA194_RESET_DPAUX1 9
+#define TEGRA194_RESET_DPAUX2 10
+#define TEGRA194_RESET_DPAUX3 11
+#define TEGRA194_RESET_EQOS 17
+#define TEGRA194_RESET_GPCDMA 18
+#define TEGRA194_RESET_GPU 19
+#define TEGRA194_RESET_HDA 20
+#define TEGRA194_RESET_HDA2CODEC_2X 21
+#define TEGRA194_RESET_HDA2HDMICODEC 22
+#define TEGRA194_RESET_HOST1X 23
+#define TEGRA194_RESET_I2C1 24
+#define TEGRA194_RESET_I2C10 25
+#define TEGRA194_RESET_RSVD_26 26
+#define TEGRA194_RESET_RSVD_27 27
+#define TEGRA194_RESET_RSVD_28 28
+#define TEGRA194_RESET_I2C2 29
+#define TEGRA194_RESET_I2C3 30
+#define TEGRA194_RESET_I2C4 31
+#define TEGRA194_RESET_I2C6 32
+#define TEGRA194_RESET_I2C7 33
+#define TEGRA194_RESET_I2C8 34
+#define TEGRA194_RESET_I2C9 35
+#define TEGRA194_RESET_ISP 36
+#define TEGRA194_RESET_MIPI_CAL 37
+#define TEGRA194_RESET_MPHY_CLK_CTL 38
+#define TEGRA194_RESET_MPHY_L0_RX 39
+#define TEGRA194_RESET_MPHY_L0_TX 40
+#define TEGRA194_RESET_MPHY_L1_RX 41
+#define TEGRA194_RESET_MPHY_L1_TX 42
+#define TEGRA194_RESET_NVCSI 43
+#define TEGRA194_RESET_NVDEC 44
+#define TEGRA194_RESET_NVDISPLAY0_HEAD0 45
+#define TEGRA194_RESET_NVDISPLAY0_HEAD1 46
+#define TEGRA194_RESET_NVDISPLAY0_HEAD2 47
+#define TEGRA194_RESET_NVDISPLAY0_HEAD3 48
+#define TEGRA194_RESET_NVDISPLAY0_MISC 49
+#define TEGRA194_RESET_NVDISPLAY0_WGRP0 50
+#define TEGRA194_RESET_NVDISPLAY0_WGRP1 51
+#define TEGRA194_RESET_NVDISPLAY0_WGRP2 52
+#define TEGRA194_RESET_NVDISPLAY0_WGRP3 53
+#define TEGRA194_RESET_NVDISPLAY0_WGRP4 54
+#define TEGRA194_RESET_NVDISPLAY0_WGRP5 55
+#define TEGRA194_RESET_RSVD_56 56
+#define TEGRA194_RESET_RSVD_57 57
+#define TEGRA194_RESET_RSVD_58 58
+#define TEGRA194_RESET_NVENC 59
+#define TEGRA194_RESET_NVENC1 60
+#define TEGRA194_RESET_NVJPG 61
+#define TEGRA194_RESET_PCIE 62
+#define TEGRA194_RESET_PCIEXCLK 63
+#define TEGRA194_RESET_RSVD_64 64
+#define TEGRA194_RESET_RSVD_65 65
+#define TEGRA194_RESET_PVA0_ALL 66
+#define TEGRA194_RESET_PVA1_ALL 67
+#define TEGRA194_RESET_PWM1 68
+#define TEGRA194_RESET_PWM2 69
+#define TEGRA194_RESET_PWM3 70
+#define TEGRA194_RESET_PWM4 71
+#define TEGRA194_RESET_PWM5 72
+#define TEGRA194_RESET_PWM6 73
+#define TEGRA194_RESET_PWM7 74
+#define TEGRA194_RESET_PWM8 75
+#define TEGRA194_RESET_QSPI0 76
+#define TEGRA194_RESET_QSPI1 77
+#define TEGRA194_RESET_SATA 78
+#define TEGRA194_RESET_SATACOLD 79
+#define TEGRA194_RESET_SCE_ALL 80
+#define TEGRA194_RESET_RCE_ALL 81
+#define TEGRA194_RESET_SDMMC1 82
+#define TEGRA194_RESET_RSVD_83 83
+#define TEGRA194_RESET_SDMMC3 84
+#define TEGRA194_RESET_SDMMC4 85
+#define TEGRA194_RESET_SE 86
+#define TEGRA194_RESET_SOR0 87
+#define TEGRA194_RESET_SOR1 88
+#define TEGRA194_RESET_SOR2 89
+#define TEGRA194_RESET_SOR3 90
+#define TEGRA194_RESET_SPI1 91
+#define TEGRA194_RESET_SPI2 92
+#define TEGRA194_RESET_SPI3 93
+#define TEGRA194_RESET_SPI4 94
+#define TEGRA194_RESET_TACH 95
+#define TEGRA194_RESET_RSVD_96 96
+#define TEGRA194_RESET_TSCTNVI 97
+#define TEGRA194_RESET_TSEC 98
+#define TEGRA194_RESET_TSECB 99
+#define TEGRA194_RESET_UARTA 100
+#define TEGRA194_RESET_UARTB 101
+#define TEGRA194_RESET_UARTC 102
+#define TEGRA194_RESET_UARTD 103
+#define TEGRA194_RESET_UARTE 104
+#define TEGRA194_RESET_UARTF 105
+#define TEGRA194_RESET_UARTG 106
+#define TEGRA194_RESET_UARTH 107
+#define TEGRA194_RESET_UFSHC 108
+#define TEGRA194_RESET_UFSHC_AXI_M 109
+#define TEGRA194_RESET_UFSHC_LP_SEQ 110
+#define TEGRA194_RESET_RSVD_111 111
+#define TEGRA194_RESET_VI 112
+#define TEGRA194_RESET_VIC 113
+#define TEGRA194_RESET_XUSB_PADCTL 114
+#define TEGRA194_RESET_NVDEC1 115
+#define TEGRA194_RESET_PEX0_CORE_0 116
+#define TEGRA194_RESET_PEX0_CORE_1 117
+#define TEGRA194_RESET_PEX0_CORE_2 118
+#define TEGRA194_RESET_PEX0_CORE_3 119
+#define TEGRA194_RESET_PEX0_CORE_4 120
+#define TEGRA194_RESET_PEX0_CORE_0_APB 121
+#define TEGRA194_RESET_PEX0_CORE_1_APB 122
+#define TEGRA194_RESET_PEX0_CORE_2_APB 123
+#define TEGRA194_RESET_PEX0_CORE_3_APB 124
+#define TEGRA194_RESET_PEX0_CORE_4_APB 125
+#define TEGRA194_RESET_PEX0_COMMON_APB 126
+#define TEGRA194_RESET_PEX1_CORE_5 129
+#define TEGRA194_RESET_PEX1_CORE_5_APB 130
+#define TEGRA194_RESET_CVNAS 131
+#define TEGRA194_RESET_CVNAS_FCM 132
+#define TEGRA194_RESET_DMIC5 144
+#define TEGRA194_RESET_APE 145
+#define TEGRA194_RESET_PEX_USB_UPHY 146
+#define TEGRA194_RESET_PEX_USB_UPHY_L0 147
+#define TEGRA194_RESET_PEX_USB_UPHY_L1 148
+#define TEGRA194_RESET_PEX_USB_UPHY_L2 149
+#define TEGRA194_RESET_PEX_USB_UPHY_L3 150
+#define TEGRA194_RESET_PEX_USB_UPHY_L4 151
+#define TEGRA194_RESET_PEX_USB_UPHY_L5 152
+#define TEGRA194_RESET_PEX_USB_UPHY_L6 153
+#define TEGRA194_RESET_PEX_USB_UPHY_L7 154
+#define TEGRA194_RESET_PEX_USB_UPHY_L8 155
+#define TEGRA194_RESET_PEX_USB_UPHY_L9 156
+#define TEGRA194_RESET_PEX_USB_UPHY_L10 157
+#define TEGRA194_RESET_PEX_USB_UPHY_L11 158
+#define TEGRA194_RESET_PEX_USB_UPHY_PLL0 159
+#define TEGRA194_RESET_PEX_USB_UPHY_PLL1 160
+#define TEGRA194_RESET_PEX_USB_UPHY_PLL2 161
+#define TEGRA194_RESET_PEX_USB_UPHY_PLL3 162
+
+#endif
diff --git a/include/dt-bindings/sound/rt5651.h b/include/dt-bindings/sound/rt5651.h
new file mode 100644
index 000000000000..2f2dac915168
--- /dev/null
+++ b/include/dt-bindings/sound/rt5651.h
@@ -0,0 +1,15 @@
+/* SPDX-License-Identifier: GPL-2.0 */
+#ifndef __DT_RT5651_H
+#define __DT_RT5651_H
+
+#define RT5651_JD_NULL 0
+#define RT5651_JD1_1 1
+#define RT5651_JD1_2 2
+#define RT5651_JD2 3
+
+#define RT5651_OVCD_SF_0P5 0
+#define RT5651_OVCD_SF_0P75 1
+#define RT5651_OVCD_SF_1P0 2
+#define RT5651_OVCD_SF_1P5 3
+
+#endif /* __DT_RT5651_H */
diff --git a/include/kvm/arm_arch_timer.h b/include/kvm/arm_arch_timer.h
index 9da6ce22803f..6502feb9524b 100644
--- a/include/kvm/arm_arch_timer.h
+++ b/include/kvm/arm_arch_timer.h
@@ -90,6 +90,8 @@ void kvm_timer_vcpu_put(struct kvm_vcpu *vcpu);
void kvm_timer_init_vhe(void);
+bool kvm_arch_timer_get_input_level(int vintid);
+
#define vcpu_vtimer(v) (&(v)->arch.timer_cpu.vtimer)
#define vcpu_ptimer(v) (&(v)->arch.timer_cpu.ptimer)
diff --git a/include/kvm/arm_psci.h b/include/kvm/arm_psci.h
new file mode 100644
index 000000000000..e518e4e3dfb5
--- /dev/null
+++ b/include/kvm/arm_psci.h
@@ -0,0 +1,51 @@
+/*
+ * Copyright (C) 2012,2013 - ARM Ltd
+ * Author: Marc Zyngier <marc.zyngier@arm.com>
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License version 2 as
+ * published by the Free Software Foundation.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+ * GNU General Public License for more details.
+ *
+ * You should have received a copy of the GNU General Public License
+ * along with this program. If not, see <http://www.gnu.org/licenses/>.
+ */
+
+#ifndef __KVM_ARM_PSCI_H__
+#define __KVM_ARM_PSCI_H__
+
+#include <linux/kvm_host.h>
+#include <uapi/linux/psci.h>
+
+#define KVM_ARM_PSCI_0_1 PSCI_VERSION(0, 1)
+#define KVM_ARM_PSCI_0_2 PSCI_VERSION(0, 2)
+#define KVM_ARM_PSCI_1_0 PSCI_VERSION(1, 0)
+
+#define KVM_ARM_PSCI_LATEST KVM_ARM_PSCI_1_0
+
+/*
+ * We need the KVM pointer independently from the vcpu as we can call
+ * this from HYP, and need to apply kern_hyp_va on it...
+ */
+static inline int kvm_psci_version(struct kvm_vcpu *vcpu, struct kvm *kvm)
+{
+ /*
+ * Our PSCI implementation stays the same across versions from
+ * v0.2 onward, only adding the few mandatory functions (such
+ * as FEATURES with 1.0) that are required by newer
+ * revisions. It is thus safe to return the latest.
+ */
+ if (test_bit(KVM_ARM_VCPU_PSCI_0_2, vcpu->arch.features))
+ return KVM_ARM_PSCI_LATEST;
+
+ return KVM_ARM_PSCI_0_1;
+}
+
+
+int kvm_hvc_call_handler(struct kvm_vcpu *vcpu);
+
+#endif /* __KVM_ARM_PSCI_H__ */
diff --git a/include/kvm/arm_vgic.h b/include/kvm/arm_vgic.h
index 8c896540a72c..24f03941ada8 100644
--- a/include/kvm/arm_vgic.h
+++ b/include/kvm/arm_vgic.h
@@ -57,11 +57,15 @@ struct vgic_global {
/* Physical address of vgic virtual cpu interface */
phys_addr_t vcpu_base;
- /* GICV mapping */
+ /* GICV mapping, kernel VA */
void __iomem *vcpu_base_va;
+ /* GICV mapping, HYP VA */
+ void __iomem *vcpu_hyp_va;
- /* virtual control interface mapping */
+ /* virtual control interface mapping, kernel VA */
void __iomem *vctrl_base;
+ /* virtual control interface mapping, HYP VA */
+ void __iomem *vctrl_hyp;
/* Number of implemented list registers */
int nr_lr;
@@ -130,6 +134,17 @@ struct vgic_irq {
u8 priority;
enum vgic_irq_config config; /* Level or edge */
+ /*
+ * Callback function pointer to in-kernel devices that can tell us the
+ * state of the input level of mapped level-triggered IRQ faster than
+ * peaking into the physical GIC.
+ *
+ * Always called in non-preemptible section and the functions can use
+ * kvm_arm_get_running_vcpu() to get the vcpu pointer for private
+ * IRQs.
+ */
+ bool (*get_input_level)(int vintid);
+
void *owner; /* Opaque pointer to reserve an interrupt
for in-kernel devices. */
};
@@ -198,10 +213,6 @@ struct vgic_dist {
int nr_spis;
- /* TODO: Consider moving to global state */
- /* Virtual control interface mapping */
- void __iomem *vctrl_base;
-
/* base addresses in guest physical address space: */
gpa_t vgic_dist_base; /* distributor */
union {
@@ -252,7 +263,6 @@ struct vgic_dist {
struct vgic_v2_cpu_if {
u32 vgic_hcr;
u32 vgic_vmcr;
- u64 vgic_elrsr; /* Saved only */
u32 vgic_apr;
u32 vgic_lr[VGIC_V2_MAX_LRS];
};
@@ -261,7 +271,6 @@ struct vgic_v3_cpu_if {
u32 vgic_hcr;
u32 vgic_vmcr;
u32 vgic_sre; /* Restored only, change ignored */
- u32 vgic_elrsr; /* Saved only */
u32 vgic_ap0r[4];
u32 vgic_ap1r[4];
u64 vgic_lr[VGIC_V3_MAX_LRS];
@@ -331,7 +340,7 @@ void kvm_vgic_init_cpu_hardware(void);
int kvm_vgic_inject_irq(struct kvm *kvm, int cpuid, unsigned int intid,
bool level, void *owner);
int kvm_vgic_map_phys_irq(struct kvm_vcpu *vcpu, unsigned int host_irq,
- u32 vintid);
+ u32 vintid, bool (*get_input_level)(int vindid));
int kvm_vgic_unmap_phys_irq(struct kvm_vcpu *vcpu, unsigned int vintid);
bool kvm_vgic_map_is_active(struct kvm_vcpu *vcpu, unsigned int vintid);
@@ -349,6 +358,7 @@ void kvm_vgic_put(struct kvm_vcpu *vcpu);
bool kvm_vcpu_has_pending_irqs(struct kvm_vcpu *vcpu);
void kvm_vgic_sync_hwstate(struct kvm_vcpu *vcpu);
void kvm_vgic_flush_hwstate(struct kvm_vcpu *vcpu);
+void kvm_vgic_reset_mapped_irq(struct kvm_vcpu *vcpu, u32 vintid);
void vgic_v3_dispatch_sgi(struct kvm_vcpu *vcpu, u64 reg);
diff --git a/include/linux/acpi.h b/include/linux/acpi.h
index dc1ebfeeb5ec..15bfb15c2fa5 100644
--- a/include/linux/acpi.h
+++ b/include/linux/acpi.h
@@ -56,6 +56,8 @@ static inline acpi_handle acpi_device_handle(struct acpi_device *adev)
#define ACPI_COMPANION_SET(dev, adev) set_primary_fwnode(dev, (adev) ? \
acpi_fwnode_handle(adev) : NULL)
#define ACPI_HANDLE(dev) acpi_device_handle(ACPI_COMPANION(dev))
+#define ACPI_HANDLE_FWNODE(fwnode) \
+ acpi_device_handle(to_acpi_device_node(fwnode))
static inline struct fwnode_handle *acpi_alloc_fwnode_static(void)
{
@@ -451,6 +453,7 @@ void __init acpi_no_s4_hw_signature(void);
void __init acpi_old_suspend_ordering(void);
void __init acpi_nvs_nosave(void);
void __init acpi_nvs_nosave_s3(void);
+void __init acpi_sleep_no_blacklist(void);
#endif /* CONFIG_PM_SLEEP */
struct acpi_osc_context {
@@ -584,6 +587,7 @@ extern int acpi_nvs_for_each_region(int (*func)(__u64, __u64, void *),
const struct acpi_device_id *acpi_match_device(const struct acpi_device_id *ids,
const struct device *dev);
+const void *acpi_device_get_match_data(const struct device *dev);
extern bool acpi_driver_match_device(struct device *dev,
const struct device_driver *drv);
int acpi_device_uevent_modalias(struct device *, struct kobj_uevent_env *);
@@ -619,6 +623,13 @@ bool acpi_gtdt_c3stop(int type);
int acpi_arch_timer_mem_init(struct arch_timer_mem *timer_mem, int *timer_count);
#endif
+#ifndef ACPI_HAVE_ARCH_GET_ROOT_POINTER
+static inline u64 acpi_arch_get_root_pointer(void)
+{
+ return 0;
+}
+#endif
+
#else /* !CONFIG_ACPI */
#define acpi_disabled 1
@@ -626,6 +637,7 @@ int acpi_arch_timer_mem_init(struct arch_timer_mem *timer_mem, int *timer_count)
#define ACPI_COMPANION(dev) (NULL)
#define ACPI_COMPANION_SET(dev, adev) do { } while (0)
#define ACPI_HANDLE(dev) (NULL)
+#define ACPI_HANDLE_FWNODE(fwnode) (NULL)
#define ACPI_DEVICE_CLASS(_cls, _msk) .cls = (0), .cls_msk = (0),
struct fwnode_handle;
@@ -640,6 +652,12 @@ static inline bool acpi_dev_present(const char *hid, const char *uid, s64 hrv)
return false;
}
+static inline const char *
+acpi_dev_get_first_match_name(const char *hid, const char *uid, s64 hrv)
+{
+ return NULL;
+}
+
static inline bool is_acpi_node(struct fwnode_handle *fwnode)
{
return false;
@@ -755,6 +773,11 @@ static inline const struct acpi_device_id *acpi_match_device(
return NULL;
}
+static inline const void *acpi_device_get_match_data(const struct device *dev)
+{
+ return NULL;
+}
+
static inline bool acpi_driver_match_device(struct device *dev,
const struct device_driver *drv)
{
@@ -978,6 +1001,11 @@ struct acpi_gpio_mapping {
const char *name;
const struct acpi_gpio_params *data;
unsigned int size;
+
+/* Ignore IoRestriction field */
+#define ACPI_GPIO_QUIRK_NO_IO_RESTRICTION BIT(0)
+
+ unsigned int quirks;
};
#if defined(CONFIG_ACPI) && defined(CONFIG_GPIOLIB)
@@ -1242,9 +1270,12 @@ static inline bool acpi_has_watchdog(void) { return false; }
#ifdef CONFIG_ACPI_SPCR_TABLE
extern bool qdf2400_e44_present;
-int parse_spcr(bool earlycon);
+int acpi_parse_spcr(bool enable_earlycon, bool enable_console);
#else
-static inline int parse_spcr(bool earlycon) { return 0; }
+static inline int acpi_parse_spcr(bool enable_earlycon, bool enable_console)
+{
+ return 0;
+}
#endif
#if IS_ENABLED(CONFIG_ACPI_GENERIC_GSI)
diff --git a/include/linux/acpi_iort.h b/include/linux/acpi_iort.h
index 2f7a29242b87..38cd77b39a64 100644
--- a/include/linux/acpi_iort.h
+++ b/include/linux/acpi_iort.h
@@ -26,7 +26,8 @@
#define IORT_IRQ_MASK(irq) (irq & 0xffffffffULL)
#define IORT_IRQ_TRIGGER_MASK(irq) ((irq >> 32) & 0xffffffffULL)
-int iort_register_domain_token(int trans_id, struct fwnode_handle *fw_node);
+int iort_register_domain_token(int trans_id, phys_addr_t base,
+ struct fwnode_handle *fw_node);
void iort_deregister_domain_token(int trans_id);
struct fwnode_handle *iort_find_domain_token(int trans_id);
#ifdef CONFIG_ACPI_IORT
@@ -38,6 +39,7 @@ int iort_pmsi_get_dev_id(struct device *dev, u32 *dev_id);
/* IOMMU interface */
void iort_dma_setup(struct device *dev, u64 *dma_addr, u64 *size);
const struct iommu_ops *iort_iommu_configure(struct device *dev);
+int iort_iommu_msi_get_resv_regions(struct device *dev, struct list_head *head);
#else
static inline void acpi_iort_init(void) { }
static inline u32 iort_msi_map_rid(struct device *dev, u32 req_id)
@@ -52,6 +54,9 @@ static inline void iort_dma_setup(struct device *dev, u64 *dma_addr,
static inline const struct iommu_ops *iort_iommu_configure(
struct device *dev)
{ return NULL; }
+static inline
+int iort_iommu_msi_get_resv_regions(struct device *dev, struct list_head *head)
+{ return 0; }
#endif
#endif /* __ACPI_IORT_H__ */
diff --git a/include/linux/arch_topology.h b/include/linux/arch_topology.h
index 304511267c82..2b709416de05 100644
--- a/include/linux/arch_topology.h
+++ b/include/linux/arch_topology.h
@@ -27,7 +27,7 @@ void topology_set_cpu_scale(unsigned int cpu, unsigned long capacity);
DECLARE_PER_CPU(unsigned long, freq_scale);
static inline
-unsigned long topology_get_freq_scale(struct sched_domain *sd, int cpu)
+unsigned long topology_get_freq_scale(int cpu)
{
return per_cpu(freq_scale, cpu);
}
diff --git a/include/linux/arm-smccc.h b/include/linux/arm-smccc.h
index 4c5bca38c653..a031897fca76 100644
--- a/include/linux/arm-smccc.h
+++ b/include/linux/arm-smccc.h
@@ -14,14 +14,16 @@
#ifndef __LINUX_ARM_SMCCC_H
#define __LINUX_ARM_SMCCC_H
+#include <uapi/linux/const.h>
+
/*
* This file provides common defines for ARM SMC Calling Convention as
* specified in
* http://infocenter.arm.com/help/topic/com.arm.doc.den0028a/index.html
*/
-#define ARM_SMCCC_STD_CALL 0
-#define ARM_SMCCC_FAST_CALL 1
+#define ARM_SMCCC_STD_CALL _AC(0,U)
+#define ARM_SMCCC_FAST_CALL _AC(1,U)
#define ARM_SMCCC_TYPE_SHIFT 31
#define ARM_SMCCC_SMC_32 0
@@ -60,6 +62,24 @@
#define ARM_SMCCC_QUIRK_NONE 0
#define ARM_SMCCC_QUIRK_QCOM_A6 1 /* Save/restore register a6 */
+#define ARM_SMCCC_VERSION_1_0 0x10000
+#define ARM_SMCCC_VERSION_1_1 0x10001
+
+#define ARM_SMCCC_VERSION_FUNC_ID \
+ ARM_SMCCC_CALL_VAL(ARM_SMCCC_FAST_CALL, \
+ ARM_SMCCC_SMC_32, \
+ 0, 0)
+
+#define ARM_SMCCC_ARCH_FEATURES_FUNC_ID \
+ ARM_SMCCC_CALL_VAL(ARM_SMCCC_FAST_CALL, \
+ ARM_SMCCC_SMC_32, \
+ 0, 1)
+
+#define ARM_SMCCC_ARCH_WORKAROUND_1 \
+ ARM_SMCCC_CALL_VAL(ARM_SMCCC_FAST_CALL, \
+ ARM_SMCCC_SMC_32, \
+ 0, 0x8000)
+
#ifndef __ASSEMBLY__
#include <linux/linkage.h>
@@ -130,5 +150,146 @@ asmlinkage void __arm_smccc_hvc(unsigned long a0, unsigned long a1,
#define arm_smccc_hvc_quirk(...) __arm_smccc_hvc(__VA_ARGS__)
+/* SMCCC v1.1 implementation madness follows */
+#ifdef CONFIG_ARM64
+
+#define SMCCC_SMC_INST "smc #0"
+#define SMCCC_HVC_INST "hvc #0"
+
+#elif defined(CONFIG_ARM)
+#include <asm/opcodes-sec.h>
+#include <asm/opcodes-virt.h>
+
+#define SMCCC_SMC_INST __SMC(0)
+#define SMCCC_HVC_INST __HVC(0)
+
+#endif
+
+#define ___count_args(_0, _1, _2, _3, _4, _5, _6, _7, _8, x, ...) x
+
+#define __count_args(...) \
+ ___count_args(__VA_ARGS__, 7, 6, 5, 4, 3, 2, 1, 0)
+
+#define __constraint_write_0 \
+ "+r" (r0), "=&r" (r1), "=&r" (r2), "=&r" (r3)
+#define __constraint_write_1 \
+ "+r" (r0), "+r" (r1), "=&r" (r2), "=&r" (r3)
+#define __constraint_write_2 \
+ "+r" (r0), "+r" (r1), "+r" (r2), "=&r" (r3)
+#define __constraint_write_3 \
+ "+r" (r0), "+r" (r1), "+r" (r2), "+r" (r3)
+#define __constraint_write_4 __constraint_write_3
+#define __constraint_write_5 __constraint_write_4
+#define __constraint_write_6 __constraint_write_5
+#define __constraint_write_7 __constraint_write_6
+
+#define __constraint_read_0
+#define __constraint_read_1
+#define __constraint_read_2
+#define __constraint_read_3
+#define __constraint_read_4 "r" (r4)
+#define __constraint_read_5 __constraint_read_4, "r" (r5)
+#define __constraint_read_6 __constraint_read_5, "r" (r6)
+#define __constraint_read_7 __constraint_read_6, "r" (r7)
+
+#define __declare_arg_0(a0, res) \
+ struct arm_smccc_res *___res = res; \
+ register u32 r0 asm("r0") = a0; \
+ register unsigned long r1 asm("r1"); \
+ register unsigned long r2 asm("r2"); \
+ register unsigned long r3 asm("r3")
+
+#define __declare_arg_1(a0, a1, res) \
+ struct arm_smccc_res *___res = res; \
+ register u32 r0 asm("r0") = a0; \
+ register typeof(a1) r1 asm("r1") = a1; \
+ register unsigned long r2 asm("r2"); \
+ register unsigned long r3 asm("r3")
+
+#define __declare_arg_2(a0, a1, a2, res) \
+ struct arm_smccc_res *___res = res; \
+ register u32 r0 asm("r0") = a0; \
+ register typeof(a1) r1 asm("r1") = a1; \
+ register typeof(a2) r2 asm("r2") = a2; \
+ register unsigned long r3 asm("r3")
+
+#define __declare_arg_3(a0, a1, a2, a3, res) \
+ struct arm_smccc_res *___res = res; \
+ register u32 r0 asm("r0") = a0; \
+ register typeof(a1) r1 asm("r1") = a1; \
+ register typeof(a2) r2 asm("r2") = a2; \
+ register typeof(a3) r3 asm("r3") = a3
+
+#define __declare_arg_4(a0, a1, a2, a3, a4, res) \
+ __declare_arg_3(a0, a1, a2, a3, res); \
+ register typeof(a4) r4 asm("r4") = a4
+
+#define __declare_arg_5(a0, a1, a2, a3, a4, a5, res) \
+ __declare_arg_4(a0, a1, a2, a3, a4, res); \
+ register typeof(a5) r5 asm("r5") = a5
+
+#define __declare_arg_6(a0, a1, a2, a3, a4, a5, a6, res) \
+ __declare_arg_5(a0, a1, a2, a3, a4, a5, res); \
+ register typeof(a6) r6 asm("r6") = a6
+
+#define __declare_arg_7(a0, a1, a2, a3, a4, a5, a6, a7, res) \
+ __declare_arg_6(a0, a1, a2, a3, a4, a5, a6, res); \
+ register typeof(a7) r7 asm("r7") = a7
+
+#define ___declare_args(count, ...) __declare_arg_ ## count(__VA_ARGS__)
+#define __declare_args(count, ...) ___declare_args(count, __VA_ARGS__)
+
+#define ___constraints(count) \
+ : __constraint_write_ ## count \
+ : __constraint_read_ ## count \
+ : "memory"
+#define __constraints(count) ___constraints(count)
+
+/*
+ * We have an output list that is not necessarily used, and GCC feels
+ * entitled to optimise the whole sequence away. "volatile" is what
+ * makes it stick.
+ */
+#define __arm_smccc_1_1(inst, ...) \
+ do { \
+ __declare_args(__count_args(__VA_ARGS__), __VA_ARGS__); \
+ asm volatile(inst "\n" \
+ __constraints(__count_args(__VA_ARGS__))); \
+ if (___res) \
+ *___res = (typeof(*___res)){r0, r1, r2, r3}; \
+ } while (0)
+
+/*
+ * arm_smccc_1_1_smc() - make an SMCCC v1.1 compliant SMC call
+ *
+ * This is a variadic macro taking one to eight source arguments, and
+ * an optional return structure.
+ *
+ * @a0-a7: arguments passed in registers 0 to 7
+ * @res: result values from registers 0 to 3
+ *
+ * This macro is used to make SMC calls following SMC Calling Convention v1.1.
+ * The content of the supplied param are copied to registers 0 to 7 prior
+ * to the SMC instruction. The return values are updated with the content
+ * from register 0 to 3 on return from the SMC instruction if not NULL.
+ */
+#define arm_smccc_1_1_smc(...) __arm_smccc_1_1(SMCCC_SMC_INST, __VA_ARGS__)
+
+/*
+ * arm_smccc_1_1_hvc() - make an SMCCC v1.1 compliant HVC call
+ *
+ * This is a variadic macro taking one to eight source arguments, and
+ * an optional return structure.
+ *
+ * @a0-a7: arguments passed in registers 0 to 7
+ * @res: result values from registers 0 to 3
+ *
+ * This macro is used to make HVC calls following SMC Calling Convention v1.1.
+ * The content of the supplied param are copied to registers 0 to 7 prior
+ * to the HVC instruction. The return values are updated with the content
+ * from register 0 to 3 on return from the HVC instruction if not NULL.
+ */
+#define arm_smccc_1_1_hvc(...) __arm_smccc_1_1(SMCCC_HVC_INST, __VA_ARGS__)
+
#endif /*__ASSEMBLY__*/
#endif /*__LINUX_ARM_SMCCC_H*/
diff --git a/include/linux/arm_sdei.h b/include/linux/arm_sdei.h
new file mode 100644
index 000000000000..942afbd544b7
--- /dev/null
+++ b/include/linux/arm_sdei.h
@@ -0,0 +1,79 @@
+// SPDX-License-Identifier: GPL-2.0
+// Copyright (C) 2017 Arm Ltd.
+#ifndef __LINUX_ARM_SDEI_H
+#define __LINUX_ARM_SDEI_H
+
+#include <uapi/linux/arm_sdei.h>
+
+enum sdei_conduit_types {
+ CONDUIT_INVALID = 0,
+ CONDUIT_SMC,
+ CONDUIT_HVC,
+};
+
+#include <asm/sdei.h>
+
+/* Arch code should override this to set the entry point from firmware... */
+#ifndef sdei_arch_get_entry_point
+#define sdei_arch_get_entry_point(conduit) (0)
+#endif
+
+/*
+ * When an event occurs sdei_event_handler() will call a user-provided callback
+ * like this in NMI context on the CPU that received the event.
+ */
+typedef int (sdei_event_callback)(u32 event, struct pt_regs *regs, void *arg);
+
+/*
+ * Register your callback to claim an event. The event must be described
+ * by firmware.
+ */
+int sdei_event_register(u32 event_num, sdei_event_callback *cb, void *arg);
+
+/*
+ * Calls to sdei_event_unregister() may return EINPROGRESS. Keep calling
+ * it until it succeeds.
+ */
+int sdei_event_unregister(u32 event_num);
+
+int sdei_event_enable(u32 event_num);
+int sdei_event_disable(u32 event_num);
+
+#ifdef CONFIG_ARM_SDE_INTERFACE
+/* For use by arch code when CPU hotplug notifiers are not appropriate. */
+int sdei_mask_local_cpu(void);
+int sdei_unmask_local_cpu(void);
+#else
+static inline int sdei_mask_local_cpu(void) { return 0; }
+static inline int sdei_unmask_local_cpu(void) { return 0; }
+#endif /* CONFIG_ARM_SDE_INTERFACE */
+
+
+/*
+ * This struct represents an event that has been registered. The driver
+ * maintains a list of all events, and which ones are registered. (Private
+ * events have one entry in the list, but are registered on each CPU).
+ * A pointer to this struct is passed to firmware, and back to the event
+ * handler. The event handler can then use this to invoke the registered
+ * callback, without having to walk the list.
+ *
+ * For CPU private events, this structure is per-cpu.
+ */
+struct sdei_registered_event {
+ /* For use by arch code: */
+ struct pt_regs interrupted_regs;
+
+ sdei_event_callback *callback;
+ void *callback_arg;
+ u32 event_num;
+ u8 priority;
+};
+
+/* The arch code entry point should then call this when an event arrives. */
+int notrace sdei_event_handler(struct pt_regs *regs,
+ struct sdei_registered_event *arg);
+
+/* arch code may use this to retrieve the extra registers. */
+int sdei_api_event_context(u32 query, u64 *result);
+
+#endif /* __LINUX_ARM_SDEI_H */
diff --git a/include/linux/ata.h b/include/linux/ata.h
index c7a353825450..40d150ad7e07 100644
--- a/include/linux/ata.h
+++ b/include/linux/ata.h
@@ -448,6 +448,8 @@ enum {
ATA_SET_MAX_LOCK = 0x02,
ATA_SET_MAX_UNLOCK = 0x03,
ATA_SET_MAX_FREEZE_LOCK = 0x04,
+ ATA_SET_MAX_PASSWD_DMA = 0x05,
+ ATA_SET_MAX_UNLOCK_DMA = 0x06,
/* feature values for DEVICE CONFIGURATION OVERLAY */
ATA_DCO_RESTORE = 0xC0,
diff --git a/include/linux/atalk.h b/include/linux/atalk.h
index 4d356e168692..40373920ea58 100644
--- a/include/linux/atalk.h
+++ b/include/linux/atalk.h
@@ -113,10 +113,12 @@ extern void aarp_proto_init(void);
/* Inter module exports */
/* Give a device find its atif control structure */
+#if IS_ENABLED(CONFIG_IRDA) || IS_ENABLED(CONFIG_ATALK)
static inline struct atalk_iface *atalk_find_dev(struct net_device *dev)
{
return dev->atalk_ptr;
}
+#endif
extern struct atalk_addr *atalk_find_dev_addr(struct net_device *dev);
extern struct net_device *atrtr_get_dev(struct atalk_addr *sa);
diff --git a/include/linux/audit.h b/include/linux/audit.h
index af410d9fbf2d..75d5b031e802 100644
--- a/include/linux/audit.h
+++ b/include/linux/audit.h
@@ -146,8 +146,7 @@ extern void audit_log_d_path(struct audit_buffer *ab,
const struct path *path);
extern void audit_log_key(struct audit_buffer *ab,
char *key);
-extern void audit_log_link_denied(const char *operation,
- const struct path *link);
+extern void audit_log_link_denied(const char *operation);
extern void audit_log_lost(const char *message);
extern int audit_log_task_context(struct audit_buffer *ab);
@@ -194,8 +193,7 @@ static inline void audit_log_d_path(struct audit_buffer *ab,
{ }
static inline void audit_log_key(struct audit_buffer *ab, char *key)
{ }
-static inline void audit_log_link_denied(const char *string,
- const struct path *link)
+static inline void audit_log_link_denied(const char *string)
{ }
static inline int audit_log_task_context(struct audit_buffer *ab)
{
diff --git a/include/linux/avf/virtchnl.h b/include/linux/avf/virtchnl.h
index 3ce61342fa31..b0a7f315bfbe 100644
--- a/include/linux/avf/virtchnl.h
+++ b/include/linux/avf/virtchnl.h
@@ -136,15 +136,21 @@ enum virtchnl_ops {
VIRTCHNL_OP_ENABLE_VLAN_STRIPPING = 27,
VIRTCHNL_OP_DISABLE_VLAN_STRIPPING = 28,
VIRTCHNL_OP_REQUEST_QUEUES = 29,
+ VIRTCHNL_OP_ENABLE_CHANNELS = 30,
+ VIRTCHNL_OP_DISABLE_CHANNELS = 31,
+ VIRTCHNL_OP_ADD_CLOUD_FILTER = 32,
+ VIRTCHNL_OP_DEL_CLOUD_FILTER = 33,
};
-/* This macro is used to generate a compilation error if a structure
+/* These macros are used to generate compilation errors if a structure/union
* is not exactly the correct length. It gives a divide by zero error if the
- * structure is not of the correct size, otherwise it creates an enum that is
- * never used.
+ * structure/union is not of the correct size, otherwise it creates an enum
+ * that is never used.
*/
#define VIRTCHNL_CHECK_STRUCT_LEN(n, X) enum virtchnl_static_assert_enum_##X \
{ virtchnl_static_assert_##X = (n)/((sizeof(struct X) == (n)) ? 1 : 0) }
+#define VIRTCHNL_CHECK_UNION_LEN(n, X) enum virtchnl_static_asset_enum_##X \
+ { virtchnl_static_assert_##X = (n)/((sizeof(union X) == (n)) ? 1 : 0) }
/* Virtual channel message descriptor. This overlays the admin queue
* descriptor. All other data is passed in external buffers.
@@ -244,6 +250,7 @@ VIRTCHNL_CHECK_STRUCT_LEN(16, virtchnl_vsi_resource);
#define VIRTCHNL_VF_OFFLOAD_ENCAP 0X00100000
#define VIRTCHNL_VF_OFFLOAD_ENCAP_CSUM 0X00200000
#define VIRTCHNL_VF_OFFLOAD_RX_ENCAP_CSUM 0X00400000
+#define VIRTCHNL_VF_OFFLOAD_ADQ 0X00800000
#define VF_BASE_MODE_OFFLOADS (VIRTCHNL_VF_OFFLOAD_L2 | \
VIRTCHNL_VF_OFFLOAD_VLAN | \
@@ -496,6 +503,81 @@ struct virtchnl_rss_hena {
VIRTCHNL_CHECK_STRUCT_LEN(8, virtchnl_rss_hena);
+/* VIRTCHNL_OP_ENABLE_CHANNELS
+ * VIRTCHNL_OP_DISABLE_CHANNELS
+ * VF sends these messages to enable or disable channels based on
+ * the user specified queue count and queue offset for each traffic class.
+ * This struct encompasses all the information that the PF needs from
+ * VF to create a channel.
+ */
+struct virtchnl_channel_info {
+ u16 count; /* number of queues in a channel */
+ u16 offset; /* queues in a channel start from 'offset' */
+ u32 pad;
+ u64 max_tx_rate;
+};
+
+VIRTCHNL_CHECK_STRUCT_LEN(16, virtchnl_channel_info);
+
+struct virtchnl_tc_info {
+ u32 num_tc;
+ u32 pad;
+ struct virtchnl_channel_info list[1];
+};
+
+VIRTCHNL_CHECK_STRUCT_LEN(24, virtchnl_tc_info);
+
+/* VIRTCHNL_ADD_CLOUD_FILTER
+ * VIRTCHNL_DEL_CLOUD_FILTER
+ * VF sends these messages to add or delete a cloud filter based on the
+ * user specified match and action filters. These structures encompass
+ * all the information that the PF needs from the VF to add/delete a
+ * cloud filter.
+ */
+
+struct virtchnl_l4_spec {
+ u8 src_mac[ETH_ALEN];
+ u8 dst_mac[ETH_ALEN];
+ __be16 vlan_id;
+ __be16 pad; /* reserved for future use */
+ __be32 src_ip[4];
+ __be32 dst_ip[4];
+ __be16 src_port;
+ __be16 dst_port;
+};
+
+VIRTCHNL_CHECK_STRUCT_LEN(52, virtchnl_l4_spec);
+
+union virtchnl_flow_spec {
+ struct virtchnl_l4_spec tcp_spec;
+ u8 buffer[128]; /* reserved for future use */
+};
+
+VIRTCHNL_CHECK_UNION_LEN(128, virtchnl_flow_spec);
+
+enum virtchnl_action {
+ /* action types */
+ VIRTCHNL_ACTION_DROP = 0,
+ VIRTCHNL_ACTION_TC_REDIRECT,
+};
+
+enum virtchnl_flow_type {
+ /* flow types */
+ VIRTCHNL_TCP_V4_FLOW = 0,
+ VIRTCHNL_TCP_V6_FLOW,
+};
+
+struct virtchnl_filter {
+ union virtchnl_flow_spec data;
+ union virtchnl_flow_spec mask;
+ enum virtchnl_flow_type flow_type;
+ enum virtchnl_action action;
+ u32 action_meta;
+ __u8 field_flags;
+};
+
+VIRTCHNL_CHECK_STRUCT_LEN(272, virtchnl_filter);
+
/* VIRTCHNL_OP_EVENT
* PF sends this message to inform the VF driver of events that may affect it.
* No direct response is expected from the VF, though it may generate other
@@ -711,6 +793,25 @@ virtchnl_vc_validate_vf_msg(struct virtchnl_version_info *ver, u32 v_opcode,
case VIRTCHNL_OP_REQUEST_QUEUES:
valid_len = sizeof(struct virtchnl_vf_res_request);
break;
+ case VIRTCHNL_OP_ENABLE_CHANNELS:
+ valid_len = sizeof(struct virtchnl_tc_info);
+ if (msglen >= valid_len) {
+ struct virtchnl_tc_info *vti =
+ (struct virtchnl_tc_info *)msg;
+ valid_len += vti->num_tc *
+ sizeof(struct virtchnl_channel_info);
+ if (vti->num_tc == 0)
+ err_msg_format = true;
+ }
+ break;
+ case VIRTCHNL_OP_DISABLE_CHANNELS:
+ break;
+ case VIRTCHNL_OP_ADD_CLOUD_FILTER:
+ valid_len = sizeof(struct virtchnl_filter);
+ break;
+ case VIRTCHNL_OP_DEL_CLOUD_FILTER:
+ valid_len = sizeof(struct virtchnl_filter);
+ break;
/* These are always errors coming from the VF. */
case VIRTCHNL_OP_EVENT:
case VIRTCHNL_OP_UNKNOWN:
diff --git a/include/linux/backing-dev.h b/include/linux/backing-dev.h
index e54e7e0033eb..09da0f124699 100644
--- a/include/linux/backing-dev.h
+++ b/include/linux/backing-dev.h
@@ -175,7 +175,7 @@ static inline int wb_congested(struct bdi_writeback *wb, int cong_bits)
}
long congestion_wait(int sync, long timeout);
-long wait_iff_congested(struct pglist_data *pgdat, int sync, long timeout);
+long wait_iff_congested(int sync, long timeout);
static inline bool bdi_cap_synchronous_io(struct backing_dev_info *bdi)
{
@@ -329,15 +329,15 @@ static inline bool inode_to_wb_is_valid(struct inode *inode)
* @inode: inode of interest
*
* Returns the wb @inode is currently associated with. The caller must be
- * holding either @inode->i_lock, @inode->i_mapping->tree_lock, or the
+ * holding either @inode->i_lock, the i_pages lock, or the
* associated wb's list_lock.
*/
-static inline struct bdi_writeback *inode_to_wb(struct inode *inode)
+static inline struct bdi_writeback *inode_to_wb(const struct inode *inode)
{
#ifdef CONFIG_LOCKDEP
WARN_ON_ONCE(debug_locks &&
(!lockdep_is_held(&inode->i_lock) &&
- !lockdep_is_held(&inode->i_mapping->tree_lock) &&
+ !lockdep_is_held(&inode->i_mapping->i_pages.xa_lock) &&
!lockdep_is_held(&inode->i_wb->list_lock)));
#endif
return inode->i_wb;
@@ -349,7 +349,7 @@ static inline struct bdi_writeback *inode_to_wb(struct inode *inode)
* @lockedp: temp bool output param, to be passed to the end function
*
* The caller wants to access the wb associated with @inode but isn't
- * holding inode->i_lock, mapping->tree_lock or wb->list_lock. This
+ * holding inode->i_lock, the i_pages lock or wb->list_lock. This
* function determines the wb associated with @inode and ensures that the
* association doesn't change until the transaction is finished with
* unlocked_inode_to_wb_end().
@@ -370,11 +370,11 @@ unlocked_inode_to_wb_begin(struct inode *inode, bool *lockedp)
*lockedp = smp_load_acquire(&inode->i_state) & I_WB_SWITCH;
if (unlikely(*lockedp))
- spin_lock_irq(&inode->i_mapping->tree_lock);
+ xa_lock_irq(&inode->i_mapping->i_pages);
/*
- * Protected by either !I_WB_SWITCH + rcu_read_lock() or tree_lock.
- * inode_to_wb() will bark. Deref directly.
+ * Protected by either !I_WB_SWITCH + rcu_read_lock() or the i_pages
+ * lock. inode_to_wb() will bark. Deref directly.
*/
return inode->i_wb;
}
@@ -387,7 +387,7 @@ unlocked_inode_to_wb_begin(struct inode *inode, bool *lockedp)
static inline void unlocked_inode_to_wb_end(struct inode *inode, bool locked)
{
if (unlikely(locked))
- spin_unlock_irq(&inode->i_mapping->tree_lock);
+ xa_unlock_irq(&inode->i_mapping->i_pages);
rcu_read_unlock();
}
diff --git a/include/linux/backlight.h b/include/linux/backlight.h
index af7003548593..2baab6f3861d 100644
--- a/include/linux/backlight.h
+++ b/include/linux/backlight.h
@@ -130,6 +130,48 @@ static inline int backlight_update_status(struct backlight_device *bd)
return ret;
}
+/**
+ * backlight_enable - Enable backlight
+ * @bd: the backlight device to enable
+ */
+static inline int backlight_enable(struct backlight_device *bd)
+{
+ if (!bd)
+ return 0;
+
+ bd->props.power = FB_BLANK_UNBLANK;
+ bd->props.fb_blank = FB_BLANK_UNBLANK;
+ bd->props.state &= ~BL_CORE_FBBLANK;
+
+ return backlight_update_status(bd);
+}
+
+/**
+ * backlight_disable - Disable backlight
+ * @bd: the backlight device to disable
+ */
+static inline int backlight_disable(struct backlight_device *bd)
+{
+ if (!bd)
+ return 0;
+
+ bd->props.power = FB_BLANK_POWERDOWN;
+ bd->props.fb_blank = FB_BLANK_POWERDOWN;
+ bd->props.state |= BL_CORE_FBBLANK;
+
+ return backlight_update_status(bd);
+}
+
+/**
+ * backlight_put - Drop backlight reference
+ * @bd: the backlight device to put
+ */
+static inline void backlight_put(struct backlight_device *bd)
+{
+ if (bd)
+ put_device(&bd->dev);
+}
+
extern struct backlight_device *backlight_device_register(const char *name,
struct device *dev, void *devdata, const struct backlight_ops *ops,
const struct backlight_properties *props);
@@ -173,4 +215,20 @@ of_find_backlight_by_node(struct device_node *node)
}
#endif
+#if IS_ENABLED(CONFIG_BACKLIGHT_CLASS_DEVICE)
+struct backlight_device *of_find_backlight(struct device *dev);
+struct backlight_device *devm_of_find_backlight(struct device *dev);
+#else
+static inline struct backlight_device *of_find_backlight(struct device *dev)
+{
+ return NULL;
+}
+
+static inline struct backlight_device *
+devm_of_find_backlight(struct device *dev)
+{
+ return NULL;
+}
+#endif
+
#endif
diff --git a/include/linux/bfin_mac.h b/include/linux/bfin_mac.h
deleted file mode 100644
index a69554ef8476..000000000000
--- a/include/linux/bfin_mac.h
+++ /dev/null
@@ -1,30 +0,0 @@
-/*
- * Blackfin On-Chip MAC Driver
- *
- * Copyright 2004-2010 Analog Devices Inc.
- *
- * Enter bugs at http://blackfin.uclinux.org/
- *
- * Licensed under the GPL-2 or later.
- */
-
-#ifndef _LINUX_BFIN_MAC_H_
-#define _LINUX_BFIN_MAC_H_
-
-#include <linux/phy.h>
-
-struct bfin_phydev_platform_data {
- unsigned short addr;
- int irq;
-};
-
-struct bfin_mii_bus_platform_data {
- int phydev_number;
- struct bfin_phydev_platform_data *phydev_data;
- const unsigned short *mac_peripherals;
- int phy_mode;
- unsigned int phy_mask;
- unsigned short vlan1_mask, vlan2_mask;
-};
-
-#endif
diff --git a/include/linux/binfmts.h b/include/linux/binfmts.h
index b0abe21d6cc9..4955e0863b83 100644
--- a/include/linux/binfmts.h
+++ b/include/linux/binfmts.h
@@ -61,6 +61,8 @@ struct linux_binprm {
unsigned interp_flags;
unsigned interp_data;
unsigned long loader, exec;
+
+ struct rlimit rlim_stack; /* Saved RLIMIT_STACK used during exec. */
} __randomize_layout;
#define BINPRM_FLAGS_ENFORCE_NONDUMP_BIT 0
@@ -118,6 +120,7 @@ extern int __must_check remove_arg_zero(struct linux_binprm *);
extern int search_binary_handler(struct linux_binprm *);
extern int flush_old_exec(struct linux_binprm * bprm);
extern void setup_new_exec(struct linux_binprm * bprm);
+extern void finalize_exec(struct linux_binprm *bprm);
extern void would_dump(struct linux_binprm *, struct file *);
extern int suid_dumpable;
diff --git a/include/linux/bio.h b/include/linux/bio.h
index 23d29b39f71e..ce547a25e8ae 100644
--- a/include/linux/bio.h
+++ b/include/linux/bio.h
@@ -300,6 +300,29 @@ static inline void bio_get_last_bvec(struct bio *bio, struct bio_vec *bv)
bv->bv_len = iter.bi_bvec_done;
}
+static inline unsigned bio_pages_all(struct bio *bio)
+{
+ WARN_ON_ONCE(bio_flagged(bio, BIO_CLONED));
+ return bio->bi_vcnt;
+}
+
+static inline struct bio_vec *bio_first_bvec_all(struct bio *bio)
+{
+ WARN_ON_ONCE(bio_flagged(bio, BIO_CLONED));
+ return bio->bi_io_vec;
+}
+
+static inline struct page *bio_first_page_all(struct bio *bio)
+{
+ return bio_first_bvec_all(bio)->bv_page;
+}
+
+static inline struct bio_vec *bio_last_bvec_all(struct bio *bio)
+{
+ WARN_ON_ONCE(bio_flagged(bio, BIO_CLONED));
+ return &bio->bi_io_vec[bio->bi_vcnt - 1];
+}
+
enum bip_flags {
BIP_BLOCK_INTEGRITY = 1 << 0, /* block layer owns integrity data */
BIP_MAPPED_INTEGRITY = 1 << 1, /* ref tag has been remapped */
@@ -477,7 +500,6 @@ static inline void bio_flush_dcache_pages(struct bio *bi)
#endif
extern void bio_copy_data(struct bio *dst, struct bio *src);
-extern int bio_alloc_pages(struct bio *bio, gfp_t gfp);
extern void bio_free_pages(struct bio *bio);
extern struct bio *bio_copy_user_iov(struct request_queue *,
@@ -489,6 +511,7 @@ void zero_fill_bio(struct bio *bio);
extern struct bio_vec *bvec_alloc(gfp_t, int, unsigned long *, mempool_t *);
extern void bvec_free(mempool_t *, struct bio_vec *, unsigned int);
extern unsigned int bvec_nr_vecs(unsigned short idx);
+extern const char *bio_devname(struct bio *bio, char *buffer);
#define bio_set_dev(bio, bdev) \
do { \
@@ -507,9 +530,6 @@ do { \
#define bio_dev(bio) \
disk_devt((bio)->bi_disk)
-#define bio_devname(bio, buf) \
- __bdevname(bio_dev(bio), (buf))
-
#ifdef CONFIG_BLK_CGROUP
int bio_associate_blkcg(struct bio *bio, struct cgroup_subsys_state *blkcg_css);
void bio_disassociate_task(struct bio *bio);
diff --git a/include/linux/bitfield.h b/include/linux/bitfield.h
index 1030651f8309..cf2588d81148 100644
--- a/include/linux/bitfield.h
+++ b/include/linux/bitfield.h
@@ -16,6 +16,7 @@
#define _LINUX_BITFIELD_H
#include <linux/build_bug.h>
+#include <asm/byteorder.h>
/*
* Bitfield access macros
@@ -103,4 +104,49 @@
(typeof(_mask))(((_reg) & (_mask)) >> __bf_shf(_mask)); \
})
+extern void __compiletime_warning("value doesn't fit into mask")
+__field_overflow(void);
+extern void __compiletime_error("bad bitfield mask")
+__bad_mask(void);
+static __always_inline u64 field_multiplier(u64 field)
+{
+ if ((field | (field - 1)) & ((field | (field - 1)) + 1))
+ __bad_mask();
+ return field & -field;
+}
+static __always_inline u64 field_mask(u64 field)
+{
+ return field / field_multiplier(field);
+}
+#define ____MAKE_OP(type,base,to,from) \
+static __always_inline __##type type##_encode_bits(base v, base field) \
+{ \
+ if (__builtin_constant_p(v) && (v & ~field_multiplier(field))) \
+ __field_overflow(); \
+ return to((v & field_mask(field)) * field_multiplier(field)); \
+} \
+static __always_inline __##type type##_replace_bits(__##type old, \
+ base val, base field) \
+{ \
+ return (old & ~to(field)) | type##_encode_bits(val, field); \
+} \
+static __always_inline void type##p_replace_bits(__##type *p, \
+ base val, base field) \
+{ \
+ *p = (*p & ~to(field)) | type##_encode_bits(val, field); \
+} \
+static __always_inline base type##_get_bits(__##type v, base field) \
+{ \
+ return (from(v) & field)/field_multiplier(field); \
+}
+#define __MAKE_OP(size) \
+ ____MAKE_OP(le##size,u##size,cpu_to_le##size,le##size##_to_cpu) \
+ ____MAKE_OP(be##size,u##size,cpu_to_be##size,be##size##_to_cpu) \
+ ____MAKE_OP(u##size,u##size,,)
+__MAKE_OP(16)
+__MAKE_OP(32)
+__MAKE_OP(64)
+#undef __MAKE_OP
+#undef ____MAKE_OP
+
#endif
diff --git a/include/linux/bitmap.h b/include/linux/bitmap.h
index 3489253e38fc..1ee46f492267 100644
--- a/include/linux/bitmap.h
+++ b/include/linux/bitmap.h
@@ -64,9 +64,14 @@
* bitmap_find_free_region(bitmap, bits, order) Find and allocate bit region
* bitmap_release_region(bitmap, pos, order) Free specified bit region
* bitmap_allocate_region(bitmap, pos, order) Allocate specified bit region
- * bitmap_from_u32array(dst, nbits, buf, nwords) *dst = *buf (nwords 32b words)
- * bitmap_to_u32array(buf, nwords, src, nbits) *buf = *dst (nwords 32b words)
+ * bitmap_from_arr32(dst, buf, nbits) Copy nbits from u32[] buf to dst
+ * bitmap_to_arr32(buf, src, nbits) Copy nbits from buf to u32[] dst
*
+ * Note, bitmap_zero() and bitmap_fill() operate over the region of
+ * unsigned longs, that is, bits behind bitmap till the unsigned long
+ * boundary will be zeroed or filled as well. Consider to use
+ * bitmap_clear() or bitmap_set() to make explicit zeroing or filling
+ * respectively.
*/
/**
@@ -83,8 +88,12 @@
* test_and_change_bit(bit, addr) Change bit and return old value
* find_first_zero_bit(addr, nbits) Position first zero bit in *addr
* find_first_bit(addr, nbits) Position first set bit in *addr
- * find_next_zero_bit(addr, nbits, bit) Position next zero bit in *addr >= bit
+ * find_next_zero_bit(addr, nbits, bit)
+ * Position next zero bit in *addr >= bit
* find_next_bit(addr, nbits, bit) Position next set bit in *addr >= bit
+ * find_next_and_bit(addr1, addr2, nbits, bit)
+ * Same as find_next_bit, but in
+ * (*addr1 & *addr2)
*
*/
@@ -174,14 +183,7 @@ extern void bitmap_fold(unsigned long *dst, const unsigned long *orig,
extern int bitmap_find_free_region(unsigned long *bitmap, unsigned int bits, int order);
extern void bitmap_release_region(unsigned long *bitmap, unsigned int pos, int order);
extern int bitmap_allocate_region(unsigned long *bitmap, unsigned int pos, int order);
-extern unsigned int bitmap_from_u32array(unsigned long *bitmap,
- unsigned int nbits,
- const u32 *buf,
- unsigned int nwords);
-extern unsigned int bitmap_to_u32array(u32 *buf,
- unsigned int nwords,
- const unsigned long *bitmap,
- unsigned int nbits);
+
#ifdef __BIG_ENDIAN
extern void bitmap_copy_le(unsigned long *dst, const unsigned long *src, unsigned int nbits);
#else
@@ -209,12 +211,12 @@ static inline void bitmap_zero(unsigned long *dst, unsigned int nbits)
static inline void bitmap_fill(unsigned long *dst, unsigned int nbits)
{
- unsigned int nlongs = BITS_TO_LONGS(nbits);
- if (!small_const_nbits(nbits)) {
- unsigned int len = (nlongs - 1) * sizeof(unsigned long);
- memset(dst, 0xff, len);
+ if (small_const_nbits(nbits))
+ *dst = ~0UL;
+ else {
+ unsigned int len = BITS_TO_LONGS(nbits) * sizeof(unsigned long);
+ memset(dst, 0xff, len);
}
- dst[nlongs - 1] = BITMAP_LAST_WORD_MASK(nbits);
}
static inline void bitmap_copy(unsigned long *dst, const unsigned long *src,
@@ -228,6 +230,35 @@ static inline void bitmap_copy(unsigned long *dst, const unsigned long *src,
}
}
+/*
+ * Copy bitmap and clear tail bits in last word.
+ */
+static inline void bitmap_copy_clear_tail(unsigned long *dst,
+ const unsigned long *src, unsigned int nbits)
+{
+ bitmap_copy(dst, src, nbits);
+ if (nbits % BITS_PER_LONG)
+ dst[nbits / BITS_PER_LONG] &= BITMAP_LAST_WORD_MASK(nbits);
+}
+
+/*
+ * On 32-bit systems bitmaps are represented as u32 arrays internally, and
+ * therefore conversion is not needed when copying data from/to arrays of u32.
+ */
+#if BITS_PER_LONG == 64
+extern void bitmap_from_arr32(unsigned long *bitmap, const u32 *buf,
+ unsigned int nbits);
+extern void bitmap_to_arr32(u32 *buf, const unsigned long *bitmap,
+ unsigned int nbits);
+#else
+#define bitmap_from_arr32(bitmap, buf, nbits) \
+ bitmap_copy_clear_tail((unsigned long *) (bitmap), \
+ (const unsigned long *) (buf), (nbits))
+#define bitmap_to_arr32(buf, bitmap, nbits) \
+ bitmap_copy_clear_tail((unsigned long *) (buf), \
+ (const unsigned long *) (bitmap), (nbits))
+#endif
+
static inline int bitmap_and(unsigned long *dst, const unsigned long *src1,
const unsigned long *src2, unsigned int nbits)
{
@@ -271,12 +302,20 @@ static inline void bitmap_complement(unsigned long *dst, const unsigned long *sr
__bitmap_complement(dst, src, nbits);
}
+#ifdef __LITTLE_ENDIAN
+#define BITMAP_MEM_ALIGNMENT 8
+#else
+#define BITMAP_MEM_ALIGNMENT (8 * sizeof(unsigned long))
+#endif
+#define BITMAP_MEM_MASK (BITMAP_MEM_ALIGNMENT - 1)
+
static inline int bitmap_equal(const unsigned long *src1,
const unsigned long *src2, unsigned int nbits)
{
if (small_const_nbits(nbits))
return !((*src1 ^ *src2) & BITMAP_LAST_WORD_MASK(nbits));
- if (__builtin_constant_p(nbits & 7) && IS_ALIGNED(nbits, 8))
+ if (__builtin_constant_p(nbits & BITMAP_MEM_MASK) &&
+ IS_ALIGNED(nbits, BITMAP_MEM_ALIGNMENT))
return !memcmp(src1, src2, nbits / 8);
return __bitmap_equal(src1, src2, nbits);
}
@@ -327,8 +366,10 @@ static __always_inline void bitmap_set(unsigned long *map, unsigned int start,
{
if (__builtin_constant_p(nbits) && nbits == 1)
__set_bit(start, map);
- else if (__builtin_constant_p(start & 7) && IS_ALIGNED(start, 8) &&
- __builtin_constant_p(nbits & 7) && IS_ALIGNED(nbits, 8))
+ else if (__builtin_constant_p(start & BITMAP_MEM_MASK) &&
+ IS_ALIGNED(start, BITMAP_MEM_ALIGNMENT) &&
+ __builtin_constant_p(nbits & BITMAP_MEM_MASK) &&
+ IS_ALIGNED(nbits, BITMAP_MEM_ALIGNMENT))
memset((char *)map + start / 8, 0xff, nbits / 8);
else
__bitmap_set(map, start, nbits);
@@ -339,8 +380,10 @@ static __always_inline void bitmap_clear(unsigned long *map, unsigned int start,
{
if (__builtin_constant_p(nbits) && nbits == 1)
__clear_bit(start, map);
- else if (__builtin_constant_p(start & 7) && IS_ALIGNED(start, 8) &&
- __builtin_constant_p(nbits & 7) && IS_ALIGNED(nbits, 8))
+ else if (__builtin_constant_p(start & BITMAP_MEM_MASK) &&
+ IS_ALIGNED(start, BITMAP_MEM_ALIGNMENT) &&
+ __builtin_constant_p(nbits & BITMAP_MEM_MASK) &&
+ IS_ALIGNED(nbits, BITMAP_MEM_ALIGNMENT))
memset((char *)map + start / 8, 0, nbits / 8);
else
__bitmap_clear(map, start, nbits);
diff --git a/include/linux/blk-cgroup.h b/include/linux/blk-cgroup.h
index e9825ff57b15..6c666fd7de3c 100644
--- a/include/linux/blk-cgroup.h
+++ b/include/linux/blk-cgroup.h
@@ -88,6 +88,7 @@ struct blkg_policy_data {
/* the blkg and policy id this per-policy data belongs to */
struct blkcg_gq *blkg;
int plid;
+ bool offline;
};
/*
@@ -660,12 +661,14 @@ static inline void blkg_rwstat_reset(struct blkg_rwstat *rwstat)
static inline void blkg_rwstat_add_aux(struct blkg_rwstat *to,
struct blkg_rwstat *from)
{
- struct blkg_rwstat v = blkg_rwstat_read(from);
+ u64 sum[BLKG_RWSTAT_NR];
int i;
for (i = 0; i < BLKG_RWSTAT_NR; i++)
- atomic64_add(atomic64_read(&v.aux_cnt[i]) +
- atomic64_read(&from->aux_cnt[i]),
+ sum[i] = percpu_counter_sum_positive(&from->cpu_cnt[i]);
+
+ for (i = 0; i < BLKG_RWSTAT_NR; i++)
+ atomic64_add(sum[i] + atomic64_read(&from->aux_cnt[i]),
&to->aux_cnt[i]);
}
diff --git a/include/linux/blk-mq-pci.h b/include/linux/blk-mq-pci.h
index 6338551e0fb9..9f4c17f0d2d8 100644
--- a/include/linux/blk-mq-pci.h
+++ b/include/linux/blk-mq-pci.h
@@ -5,6 +5,7 @@
struct blk_mq_tag_set;
struct pci_dev;
-int blk_mq_pci_map_queues(struct blk_mq_tag_set *set, struct pci_dev *pdev);
+int blk_mq_pci_map_queues(struct blk_mq_tag_set *set, struct pci_dev *pdev,
+ int offset);
#endif /* _LINUX_BLK_MQ_PCI_H */
diff --git a/include/linux/blk-mq.h b/include/linux/blk-mq.h
index 95c9a5c862e2..8efcf49796a3 100644
--- a/include/linux/blk-mq.h
+++ b/include/linux/blk-mq.h
@@ -51,6 +51,7 @@ struct blk_mq_hw_ctx {
unsigned int queue_num;
atomic_t nr_active;
+ unsigned int nr_expired;
struct hlist_node cpuhp_dead;
struct kobject kobj;
@@ -65,7 +66,7 @@ struct blk_mq_hw_ctx {
#endif
/* Must be the last member - see also blk_mq_hw_ctx_size(). */
- struct srcu_struct queue_rq_srcu[0];
+ struct srcu_struct srcu[0];
};
struct blk_mq_tag_set {
diff --git a/include/linux/blk_types.h b/include/linux/blk_types.h
index 9e7d8bd776d2..17b18b91ebac 100644
--- a/include/linux/blk_types.h
+++ b/include/linux/blk_types.h
@@ -20,8 +20,13 @@ typedef void (bio_end_io_t) (struct bio *);
/*
* Block error status values. See block/blk-core:blk_errors for the details.
+ * Alpha cannot write a byte atomically, so we need to use 32-bit value.
*/
+#if defined(CONFIG_ALPHA) && !defined(__alpha_bwx__)
+typedef u32 __bitwise blk_status_t;
+#else
typedef u8 __bitwise blk_status_t;
+#endif
#define BLK_STS_OK 0
#define BLK_STS_NOTSUPP ((__force blk_status_t)1)
#define BLK_STS_TIMEOUT ((__force blk_status_t)2)
@@ -39,6 +44,52 @@ typedef u8 __bitwise blk_status_t;
#define BLK_STS_AGAIN ((__force blk_status_t)12)
+/*
+ * BLK_STS_DEV_RESOURCE is returned from the driver to the block layer if
+ * device related resources are unavailable, but the driver can guarantee
+ * that the queue will be rerun in the future once resources become
+ * available again. This is typically the case for device specific
+ * resources that are consumed for IO. If the driver fails allocating these
+ * resources, we know that inflight (or pending) IO will free these
+ * resource upon completion.
+ *
+ * This is different from BLK_STS_RESOURCE in that it explicitly references
+ * a device specific resource. For resources of wider scope, allocation
+ * failure can happen without having pending IO. This means that we can't
+ * rely on request completions freeing these resources, as IO may not be in
+ * flight. Examples of that are kernel memory allocations, DMA mappings, or
+ * any other system wide resources.
+ */
+#define BLK_STS_DEV_RESOURCE ((__force blk_status_t)13)
+
+/**
+ * blk_path_error - returns true if error may be path related
+ * @error: status the request was completed with
+ *
+ * Description:
+ * This classifies block error status into non-retryable errors and ones
+ * that may be successful if retried on a failover path.
+ *
+ * Return:
+ * %false - retrying failover path will not help
+ * %true - may succeed if retried
+ */
+static inline bool blk_path_error(blk_status_t error)
+{
+ switch (error) {
+ case BLK_STS_NOTSUPP:
+ case BLK_STS_NOSPC:
+ case BLK_STS_TARGET:
+ case BLK_STS_NEXUS:
+ case BLK_STS_MEDIUM:
+ case BLK_STS_PROTECTION:
+ return false;
+ }
+
+ /* Anything else could be a path failure, so should be retried */
+ return true;
+}
+
struct blk_issue_stat {
u64 stat;
};
diff --git a/include/linux/blkdev.h b/include/linux/blkdev.h
index 0ce8a372d506..9af3e0f430bc 100644
--- a/include/linux/blkdev.h
+++ b/include/linux/blkdev.h
@@ -27,6 +27,8 @@
#include <linux/percpu-refcount.h>
#include <linux/scatterlist.h>
#include <linux/blkzoned.h>
+#include <linux/seqlock.h>
+#include <linux/u64_stats_sync.h>
struct module;
struct scsi_ioctl_command;
@@ -47,7 +49,7 @@ struct blk_stat_callback;
#define BLKDEV_MIN_RQ 4
#define BLKDEV_MAX_RQ 128 /* Default maximum */
-/* Must be consisitent with blk_mq_poll_stats_bkt() */
+/* Must be consistent with blk_mq_poll_stats_bkt() */
#define BLK_MQ_POLL_STATS_BKTS 16
/*
@@ -121,6 +123,12 @@ typedef __u32 __bitwise req_flags_t;
/* Look at ->special_vec for the actual data payload instead of the
bio chain. */
#define RQF_SPECIAL_PAYLOAD ((__force req_flags_t)(1 << 18))
+/* The per-zone write lock is held for this request */
+#define RQF_ZONE_WRITE_LOCKED ((__force req_flags_t)(1 << 19))
+/* timeout is expired */
+#define RQF_MQ_TIMEOUT_EXPIRED ((__force req_flags_t)(1 << 20))
+/* already slept for hybrid poll */
+#define RQF_MQ_POLL_SLEPT ((__force req_flags_t)(1 << 21))
/* flags that prevent us from merging requests: */
#define RQF_NOMERGE_FLAGS \
@@ -133,12 +141,6 @@ typedef __u32 __bitwise req_flags_t;
* especially blk_mq_rq_ctx_init() to take care of the added fields.
*/
struct request {
- struct list_head queuelist;
- union {
- struct __call_single_data csd;
- u64 fifo_time;
- };
-
struct request_queue *q;
struct blk_mq_ctx *mq_ctx;
@@ -148,8 +150,6 @@ struct request {
int internal_tag;
- unsigned long atomic_flags;
-
/* the following two fields are internal, NEVER access directly */
unsigned int __data_len; /* total data len */
int tag;
@@ -158,6 +158,8 @@ struct request {
struct bio *bio;
struct bio *biotail;
+ struct list_head queuelist;
+
/*
* The hash is used inside the scheduler, and killed once the
* request reaches the dispatch list. The ipi_list is only used
@@ -205,19 +207,16 @@ struct request {
struct hd_struct *part;
unsigned long start_time;
struct blk_issue_stat issue_stat;
-#ifdef CONFIG_BLK_CGROUP
- struct request_list *rl; /* rl this rq is alloced from */
- unsigned long long start_time_ns;
- unsigned long long io_start_time_ns; /* when passed to hardware */
-#endif
/* Number of scatter-gather DMA addr+len pairs after
* physical address coalescing is performed.
*/
unsigned short nr_phys_segments;
+
#if defined(CONFIG_BLK_DEV_INTEGRITY)
unsigned short nr_integrity_segments;
#endif
+ unsigned short write_hint;
unsigned short ioprio;
unsigned int timeout;
@@ -226,11 +225,37 @@ struct request {
unsigned int extra_len; /* length of alignment and padding */
- unsigned short write_hint;
+ /*
+ * On blk-mq, the lower bits of ->gstate (generation number and
+ * state) carry the MQ_RQ_* state value and the upper bits the
+ * generation number which is monotonically incremented and used to
+ * distinguish the reuse instances.
+ *
+ * ->gstate_seq allows updates to ->gstate and other fields
+ * (currently ->deadline) during request start to be read
+ * atomically from the timeout path, so that it can operate on a
+ * coherent set of information.
+ */
+ seqcount_t gstate_seq;
+ u64 gstate;
+
+ /*
+ * ->aborted_gstate is used by the timeout to claim a specific
+ * recycle instance of this request. See blk_mq_timeout_work().
+ */
+ struct u64_stats_sync aborted_gstate_sync;
+ u64 aborted_gstate;
+
+ /* access through blk_rq_set_deadline, blk_rq_deadline */
+ unsigned long __deadline;
- unsigned long deadline;
struct list_head timeout_list;
+ union {
+ struct __call_single_data csd;
+ u64 fifo_time;
+ };
+
/*
* completion callback.
*/
@@ -239,6 +264,12 @@ struct request {
/* for bidi */
struct request *next_rq;
+
+#ifdef CONFIG_BLK_CGROUP
+ struct request_list *rl; /* rl this rq is alloced from */
+ unsigned long long start_time_ns;
+ unsigned long long io_start_time_ns; /* when passed to hardware */
+#endif
};
static inline bool blk_op_is_scsi(unsigned int op)
@@ -564,6 +595,22 @@ struct request_queue {
struct queue_limits limits;
/*
+ * Zoned block device information for request dispatch control.
+ * nr_zones is the total number of zones of the device. This is always
+ * 0 for regular block devices. seq_zones_bitmap is a bitmap of nr_zones
+ * bits which indicates if a zone is conventional (bit clear) or
+ * sequential (bit set). seq_zones_wlock is a bitmap of nr_zones
+ * bits which indicates if a zone is write locked, that is, if a write
+ * request targeting the zone was dispatched. All three fields are
+ * initialized by the low level device driver (e.g. scsi/sd.c).
+ * Stacking drivers (device mappers) may or may not initialize
+ * these fields.
+ */
+ unsigned int nr_zones;
+ unsigned long *seq_zones_bitmap;
+ unsigned long *seq_zones_wlock;
+
+ /*
* sg stuff
*/
unsigned int sg_timeout;
@@ -660,73 +707,10 @@ struct request_queue {
(1 << QUEUE_FLAG_SAME_COMP) | \
(1 << QUEUE_FLAG_POLL))
-/*
- * @q->queue_lock is set while a queue is being initialized. Since we know
- * that no other threads access the queue object before @q->queue_lock has
- * been set, it is safe to manipulate queue flags without holding the
- * queue_lock if @q->queue_lock == NULL. See also blk_alloc_queue_node() and
- * blk_init_allocated_queue().
- */
-static inline void queue_lockdep_assert_held(struct request_queue *q)
-{
- if (q->queue_lock)
- lockdep_assert_held(q->queue_lock);
-}
-
-static inline void queue_flag_set_unlocked(unsigned int flag,
- struct request_queue *q)
-{
- __set_bit(flag, &q->queue_flags);
-}
-
-static inline int queue_flag_test_and_clear(unsigned int flag,
- struct request_queue *q)
-{
- queue_lockdep_assert_held(q);
-
- if (test_bit(flag, &q->queue_flags)) {
- __clear_bit(flag, &q->queue_flags);
- return 1;
- }
-
- return 0;
-}
-
-static inline int queue_flag_test_and_set(unsigned int flag,
- struct request_queue *q)
-{
- queue_lockdep_assert_held(q);
-
- if (!test_bit(flag, &q->queue_flags)) {
- __set_bit(flag, &q->queue_flags);
- return 0;
- }
-
- return 1;
-}
-
-static inline void queue_flag_set(unsigned int flag, struct request_queue *q)
-{
- queue_lockdep_assert_held(q);
- __set_bit(flag, &q->queue_flags);
-}
-
-static inline void queue_flag_clear_unlocked(unsigned int flag,
- struct request_queue *q)
-{
- __clear_bit(flag, &q->queue_flags);
-}
-
-static inline int queue_in_flight(struct request_queue *q)
-{
- return q->in_flight[0] + q->in_flight[1];
-}
-
-static inline void queue_flag_clear(unsigned int flag, struct request_queue *q)
-{
- queue_lockdep_assert_held(q);
- __clear_bit(flag, &q->queue_flags);
-}
+void blk_queue_flag_set(unsigned int flag, struct request_queue *q);
+void blk_queue_flag_clear(unsigned int flag, struct request_queue *q);
+bool blk_queue_flag_test_and_set(unsigned int flag, struct request_queue *q);
+bool blk_queue_flag_test_and_clear(unsigned int flag, struct request_queue *q);
#define blk_queue_tagged(q) test_bit(QUEUE_FLAG_QUEUED, &(q)->queue_flags)
#define blk_queue_stopped(q) test_bit(QUEUE_FLAG_STOPPED, &(q)->queue_flags)
@@ -757,6 +741,11 @@ static inline void queue_flag_clear(unsigned int flag, struct request_queue *q)
extern int blk_set_preempt_only(struct request_queue *q);
extern void blk_clear_preempt_only(struct request_queue *q);
+static inline int queue_in_flight(struct request_queue *q)
+{
+ return q->in_flight[0] + q->in_flight[1];
+}
+
static inline bool blk_account_rq(struct request *rq)
{
return (rq->rq_flags & RQF_STARTED) && !blk_rq_is_passthrough(rq);
@@ -807,6 +796,27 @@ static inline unsigned int blk_queue_zone_sectors(struct request_queue *q)
return blk_queue_is_zoned(q) ? q->limits.chunk_sectors : 0;
}
+static inline unsigned int blk_queue_nr_zones(struct request_queue *q)
+{
+ return q->nr_zones;
+}
+
+static inline unsigned int blk_queue_zone_no(struct request_queue *q,
+ sector_t sector)
+{
+ if (!blk_queue_is_zoned(q))
+ return 0;
+ return sector >> ilog2(q->limits.chunk_sectors);
+}
+
+static inline bool blk_queue_zone_is_seq(struct request_queue *q,
+ sector_t sector)
+{
+ if (!blk_queue_is_zoned(q) || !q->seq_zones_bitmap)
+ return false;
+ return test_bit(blk_queue_zone_no(q, sector), q->seq_zones_bitmap);
+}
+
static inline bool rq_is_sync(struct request *rq)
{
return op_is_sync(rq->cmd_flags);
@@ -1012,6 +1022,19 @@ static inline struct request_queue *bdev_get_queue(struct block_device *bdev)
}
/*
+ * The basic unit of block I/O is a sector. It is used in a number of contexts
+ * in Linux (blk, bio, genhd). The size of one sector is 512 = 2**9
+ * bytes. Variables of type sector_t represent an offset or size that is a
+ * multiple of 512 bytes. Hence these two constants.
+ */
+#ifndef SECTOR_SHIFT
+#define SECTOR_SHIFT 9
+#endif
+#ifndef SECTOR_SIZE
+#define SECTOR_SIZE (1 << SECTOR_SHIFT)
+#endif
+
+/*
* blk_rq_pos() : the current sector
* blk_rq_bytes() : bytes left in the entire request
* blk_rq_cur_bytes() : bytes left in the current segment
@@ -1038,12 +1061,22 @@ extern unsigned int blk_rq_err_bytes(const struct request *rq);
static inline unsigned int blk_rq_sectors(const struct request *rq)
{
- return blk_rq_bytes(rq) >> 9;
+ return blk_rq_bytes(rq) >> SECTOR_SHIFT;
}
static inline unsigned int blk_rq_cur_sectors(const struct request *rq)
{
- return blk_rq_cur_bytes(rq) >> 9;
+ return blk_rq_cur_bytes(rq) >> SECTOR_SHIFT;
+}
+
+static inline unsigned int blk_rq_zone_no(struct request *rq)
+{
+ return blk_queue_zone_no(rq->q, blk_rq_pos(rq));
+}
+
+static inline unsigned int blk_rq_zone_is_seq(struct request *rq)
+{
+ return blk_queue_zone_is_seq(rq->q, blk_rq_pos(rq));
}
/*
@@ -1063,7 +1096,8 @@ static inline unsigned int blk_queue_get_max_sectors(struct request_queue *q,
int op)
{
if (unlikely(op == REQ_OP_DISCARD || op == REQ_OP_SECURE_ERASE))
- return min(q->limits.max_discard_sectors, UINT_MAX >> 9);
+ return min(q->limits.max_discard_sectors,
+ UINT_MAX >> SECTOR_SHIFT);
if (unlikely(op == REQ_OP_WRITE_SAME))
return q->limits.max_write_same_sectors;
@@ -1243,7 +1277,8 @@ extern long nr_blockdev_pages(void);
bool __must_check blk_get_queue(struct request_queue *);
struct request_queue *blk_alloc_queue(gfp_t);
-struct request_queue *blk_alloc_queue_node(gfp_t, int);
+struct request_queue *blk_alloc_queue_node(gfp_t gfp_mask, int node_id,
+ spinlock_t *lock);
extern void blk_put_queue(struct request_queue *);
extern void blk_set_queue_dying(struct request_queue *);
@@ -1374,16 +1409,21 @@ extern int blkdev_issue_zeroout(struct block_device *bdev, sector_t sector,
static inline int sb_issue_discard(struct super_block *sb, sector_t block,
sector_t nr_blocks, gfp_t gfp_mask, unsigned long flags)
{
- return blkdev_issue_discard(sb->s_bdev, block << (sb->s_blocksize_bits - 9),
- nr_blocks << (sb->s_blocksize_bits - 9),
+ return blkdev_issue_discard(sb->s_bdev,
+ block << (sb->s_blocksize_bits -
+ SECTOR_SHIFT),
+ nr_blocks << (sb->s_blocksize_bits -
+ SECTOR_SHIFT),
gfp_mask, flags);
}
static inline int sb_issue_zeroout(struct super_block *sb, sector_t block,
sector_t nr_blocks, gfp_t gfp_mask)
{
return blkdev_issue_zeroout(sb->s_bdev,
- block << (sb->s_blocksize_bits - 9),
- nr_blocks << (sb->s_blocksize_bits - 9),
+ block << (sb->s_blocksize_bits -
+ SECTOR_SHIFT),
+ nr_blocks << (sb->s_blocksize_bits -
+ SECTOR_SHIFT),
gfp_mask, 0);
}
@@ -1490,7 +1530,8 @@ static inline int queue_alignment_offset(struct request_queue *q)
static inline int queue_limit_alignment_offset(struct queue_limits *lim, sector_t sector)
{
unsigned int granularity = max(lim->physical_block_size, lim->io_min);
- unsigned int alignment = sector_div(sector, granularity >> 9) << 9;
+ unsigned int alignment = sector_div(sector, granularity >> SECTOR_SHIFT)
+ << SECTOR_SHIFT;
return (granularity + lim->alignment_offset - alignment) % granularity;
}
@@ -1524,8 +1565,8 @@ static inline int queue_limit_discard_alignment(struct queue_limits *lim, sector
return 0;
/* Why are these in bytes, not sectors? */
- alignment = lim->discard_alignment >> 9;
- granularity = lim->discard_granularity >> 9;
+ alignment = lim->discard_alignment >> SECTOR_SHIFT;
+ granularity = lim->discard_granularity >> SECTOR_SHIFT;
if (!granularity)
return 0;
@@ -1536,7 +1577,7 @@ static inline int queue_limit_discard_alignment(struct queue_limits *lim, sector
offset = (granularity + alignment - offset) % granularity;
/* Turn it back into bytes, gaah */
- return offset << 9;
+ return offset << SECTOR_SHIFT;
}
static inline int bdev_discard_alignment(struct block_device *bdev)
@@ -1595,7 +1636,15 @@ static inline unsigned int bdev_zone_sectors(struct block_device *bdev)
if (q)
return blk_queue_zone_sectors(q);
+ return 0;
+}
+
+static inline unsigned int bdev_nr_zones(struct block_device *bdev)
+{
+ struct request_queue *q = bdev_get_queue(bdev);
+ if (q)
+ return blk_queue_nr_zones(q);
return 0;
}
@@ -1731,8 +1780,6 @@ static inline bool req_gap_front_merge(struct request *req, struct bio *bio)
int kblockd_schedule_work(struct work_struct *work);
int kblockd_schedule_work_on(int cpu, struct work_struct *work);
-int kblockd_schedule_delayed_work(struct delayed_work *dwork, unsigned long delay);
-int kblockd_schedule_delayed_work_on(int cpu, struct delayed_work *dwork, unsigned long delay);
int kblockd_mod_delayed_work_on(int cpu, struct delayed_work *dwork, unsigned long delay);
#ifdef CONFIG_BLK_CGROUP
@@ -1971,6 +2018,60 @@ extern int __blkdev_driver_ioctl(struct block_device *, fmode_t, unsigned int,
extern int bdev_read_page(struct block_device *, sector_t, struct page *);
extern int bdev_write_page(struct block_device *, sector_t, struct page *,
struct writeback_control *);
+
+#ifdef CONFIG_BLK_DEV_ZONED
+bool blk_req_needs_zone_write_lock(struct request *rq);
+void __blk_req_zone_write_lock(struct request *rq);
+void __blk_req_zone_write_unlock(struct request *rq);
+
+static inline void blk_req_zone_write_lock(struct request *rq)
+{
+ if (blk_req_needs_zone_write_lock(rq))
+ __blk_req_zone_write_lock(rq);
+}
+
+static inline void blk_req_zone_write_unlock(struct request *rq)
+{
+ if (rq->rq_flags & RQF_ZONE_WRITE_LOCKED)
+ __blk_req_zone_write_unlock(rq);
+}
+
+static inline bool blk_req_zone_is_write_locked(struct request *rq)
+{
+ return rq->q->seq_zones_wlock &&
+ test_bit(blk_rq_zone_no(rq), rq->q->seq_zones_wlock);
+}
+
+static inline bool blk_req_can_dispatch_to_zone(struct request *rq)
+{
+ if (!blk_req_needs_zone_write_lock(rq))
+ return true;
+ return !blk_req_zone_is_write_locked(rq);
+}
+#else
+static inline bool blk_req_needs_zone_write_lock(struct request *rq)
+{
+ return false;
+}
+
+static inline void blk_req_zone_write_lock(struct request *rq)
+{
+}
+
+static inline void blk_req_zone_write_unlock(struct request *rq)
+{
+}
+static inline bool blk_req_zone_is_write_locked(struct request *rq)
+{
+ return false;
+}
+
+static inline bool blk_req_can_dispatch_to_zone(struct request *rq)
+{
+ return true;
+}
+#endif /* CONFIG_BLK_DEV_ZONED */
+
#else /* CONFIG_BLOCK */
struct block_device;
diff --git a/include/linux/bootmem.h b/include/linux/bootmem.h
index a53063e9d7d8..7942a96b1a9d 100644
--- a/include/linux/bootmem.h
+++ b/include/linux/bootmem.h
@@ -364,15 +364,6 @@ static inline void __init memblock_free_late(
}
#endif /* defined(CONFIG_HAVE_MEMBLOCK) && defined(CONFIG_NO_BOOTMEM) */
-#ifdef CONFIG_HAVE_ARCH_ALLOC_REMAP
-extern void *alloc_remap(int nid, unsigned long size);
-#else
-static inline void *alloc_remap(int nid, unsigned long size)
-{
- return NULL;
-}
-#endif /* CONFIG_HAVE_ARCH_ALLOC_REMAP */
-
extern void *alloc_large_system_hash(const char *tablename,
unsigned long bucketsize,
unsigned long numentries,
diff --git a/include/linux/bpf-cgroup.h b/include/linux/bpf-cgroup.h
index a7f16e0f8d68..30d15e64b993 100644
--- a/include/linux/bpf-cgroup.h
+++ b/include/linux/bpf-cgroup.h
@@ -6,6 +6,7 @@
#include <uapi/linux/bpf.h>
struct sock;
+struct sockaddr;
struct cgroup;
struct sk_buff;
struct bpf_sock_ops_kern;
@@ -63,6 +64,10 @@ int __cgroup_bpf_run_filter_skb(struct sock *sk,
int __cgroup_bpf_run_filter_sk(struct sock *sk,
enum bpf_attach_type type);
+int __cgroup_bpf_run_filter_sock_addr(struct sock *sk,
+ struct sockaddr *uaddr,
+ enum bpf_attach_type type);
+
int __cgroup_bpf_run_filter_sock_ops(struct sock *sk,
struct bpf_sock_ops_kern *sock_ops,
enum bpf_attach_type type);
@@ -93,16 +98,64 @@ int __cgroup_bpf_check_dev_permission(short dev_type, u32 major, u32 minor,
__ret; \
})
+#define BPF_CGROUP_RUN_SK_PROG(sk, type) \
+({ \
+ int __ret = 0; \
+ if (cgroup_bpf_enabled) { \
+ __ret = __cgroup_bpf_run_filter_sk(sk, type); \
+ } \
+ __ret; \
+})
+
#define BPF_CGROUP_RUN_PROG_INET_SOCK(sk) \
+ BPF_CGROUP_RUN_SK_PROG(sk, BPF_CGROUP_INET_SOCK_CREATE)
+
+#define BPF_CGROUP_RUN_PROG_INET4_POST_BIND(sk) \
+ BPF_CGROUP_RUN_SK_PROG(sk, BPF_CGROUP_INET4_POST_BIND)
+
+#define BPF_CGROUP_RUN_PROG_INET6_POST_BIND(sk) \
+ BPF_CGROUP_RUN_SK_PROG(sk, BPF_CGROUP_INET6_POST_BIND)
+
+#define BPF_CGROUP_RUN_SA_PROG(sk, uaddr, type) \
+({ \
+ int __ret = 0; \
+ if (cgroup_bpf_enabled) \
+ __ret = __cgroup_bpf_run_filter_sock_addr(sk, uaddr, type); \
+ __ret; \
+})
+
+#define BPF_CGROUP_RUN_SA_PROG_LOCK(sk, uaddr, type) \
({ \
int __ret = 0; \
- if (cgroup_bpf_enabled && sk) { \
- __ret = __cgroup_bpf_run_filter_sk(sk, \
- BPF_CGROUP_INET_SOCK_CREATE); \
+ if (cgroup_bpf_enabled) { \
+ lock_sock(sk); \
+ __ret = __cgroup_bpf_run_filter_sock_addr(sk, uaddr, type); \
+ release_sock(sk); \
} \
__ret; \
})
+#define BPF_CGROUP_RUN_PROG_INET4_BIND(sk, uaddr) \
+ BPF_CGROUP_RUN_SA_PROG(sk, uaddr, BPF_CGROUP_INET4_BIND)
+
+#define BPF_CGROUP_RUN_PROG_INET6_BIND(sk, uaddr) \
+ BPF_CGROUP_RUN_SA_PROG(sk, uaddr, BPF_CGROUP_INET6_BIND)
+
+#define BPF_CGROUP_PRE_CONNECT_ENABLED(sk) (cgroup_bpf_enabled && \
+ sk->sk_prot->pre_connect)
+
+#define BPF_CGROUP_RUN_PROG_INET4_CONNECT(sk, uaddr) \
+ BPF_CGROUP_RUN_SA_PROG(sk, uaddr, BPF_CGROUP_INET4_CONNECT)
+
+#define BPF_CGROUP_RUN_PROG_INET6_CONNECT(sk, uaddr) \
+ BPF_CGROUP_RUN_SA_PROG(sk, uaddr, BPF_CGROUP_INET6_CONNECT)
+
+#define BPF_CGROUP_RUN_PROG_INET4_CONNECT_LOCK(sk, uaddr) \
+ BPF_CGROUP_RUN_SA_PROG_LOCK(sk, uaddr, BPF_CGROUP_INET4_CONNECT)
+
+#define BPF_CGROUP_RUN_PROG_INET6_CONNECT_LOCK(sk, uaddr) \
+ BPF_CGROUP_RUN_SA_PROG_LOCK(sk, uaddr, BPF_CGROUP_INET6_CONNECT)
+
#define BPF_CGROUP_RUN_PROG_SOCK_OPS(sock_ops) \
({ \
int __ret = 0; \
@@ -132,9 +185,18 @@ struct cgroup_bpf {};
static inline void cgroup_bpf_put(struct cgroup *cgrp) {}
static inline int cgroup_bpf_inherit(struct cgroup *cgrp) { return 0; }
+#define BPF_CGROUP_PRE_CONNECT_ENABLED(sk) (0)
#define BPF_CGROUP_RUN_PROG_INET_INGRESS(sk,skb) ({ 0; })
#define BPF_CGROUP_RUN_PROG_INET_EGRESS(sk,skb) ({ 0; })
#define BPF_CGROUP_RUN_PROG_INET_SOCK(sk) ({ 0; })
+#define BPF_CGROUP_RUN_PROG_INET4_BIND(sk, uaddr) ({ 0; })
+#define BPF_CGROUP_RUN_PROG_INET6_BIND(sk, uaddr) ({ 0; })
+#define BPF_CGROUP_RUN_PROG_INET4_POST_BIND(sk) ({ 0; })
+#define BPF_CGROUP_RUN_PROG_INET6_POST_BIND(sk) ({ 0; })
+#define BPF_CGROUP_RUN_PROG_INET4_CONNECT(sk, uaddr) ({ 0; })
+#define BPF_CGROUP_RUN_PROG_INET4_CONNECT_LOCK(sk, uaddr) ({ 0; })
+#define BPF_CGROUP_RUN_PROG_INET6_CONNECT(sk, uaddr) ({ 0; })
+#define BPF_CGROUP_RUN_PROG_INET6_CONNECT_LOCK(sk, uaddr) ({ 0; })
#define BPF_CGROUP_RUN_PROG_SOCK_OPS(sock_ops) ({ 0; })
#define BPF_CGROUP_RUN_PROG_DEVICE_CGROUP(type,major,minor,access) ({ 0; })
diff --git a/include/linux/bpf.h b/include/linux/bpf.h
index 0b25cf87b6d6..95a7abd0ee92 100644
--- a/include/linux/bpf.h
+++ b/include/linux/bpf.h
@@ -17,13 +17,16 @@
#include <linux/numa.h>
#include <linux/wait.h>
+struct bpf_verifier_env;
struct perf_event;
struct bpf_prog;
struct bpf_map;
+struct sock;
/* map is generic key/value storage optionally accesible by eBPF programs */
struct bpf_map_ops {
/* funcs callable from userspace (via syscall) */
+ int (*map_alloc_check)(union bpf_attr *attr);
struct bpf_map *(*map_alloc)(union bpf_attr *attr);
void (*map_release)(struct bpf_map *map, struct file *map_file);
void (*map_free)(struct bpf_map *map);
@@ -72,6 +75,33 @@ struct bpf_map {
char name[BPF_OBJ_NAME_LEN];
};
+struct bpf_offloaded_map;
+
+struct bpf_map_dev_ops {
+ int (*map_get_next_key)(struct bpf_offloaded_map *map,
+ void *key, void *next_key);
+ int (*map_lookup_elem)(struct bpf_offloaded_map *map,
+ void *key, void *value);
+ int (*map_update_elem)(struct bpf_offloaded_map *map,
+ void *key, void *value, u64 flags);
+ int (*map_delete_elem)(struct bpf_offloaded_map *map, void *key);
+};
+
+struct bpf_offloaded_map {
+ struct bpf_map map;
+ struct net_device *netdev;
+ const struct bpf_map_dev_ops *dev_ops;
+ void *dev_priv;
+ struct list_head offloads;
+};
+
+static inline struct bpf_offloaded_map *map_to_offmap(struct bpf_map *map)
+{
+ return container_of(map, struct bpf_offloaded_map, map);
+}
+
+extern const struct bpf_map_ops bpf_map_offload_ops;
+
/* function argument constraints */
enum bpf_arg_type {
ARG_DONTCARE = 0, /* unused argument in helper function */
@@ -178,12 +208,15 @@ struct bpf_prog_ops {
struct bpf_verifier_ops {
/* return eBPF function prototype for verification */
- const struct bpf_func_proto *(*get_func_proto)(enum bpf_func_id func_id);
+ const struct bpf_func_proto *
+ (*get_func_proto)(enum bpf_func_id func_id,
+ const struct bpf_prog *prog);
/* return true if 'size' wide access at offset 'off' within bpf_context
* with 'type' (read or write) is allowed
*/
bool (*is_valid_access)(int off, int size, enum bpf_access_type type,
+ const struct bpf_prog *prog,
struct bpf_insn_access_aux *info);
int (*gen_prologue)(struct bpf_insn *insn, bool direct_write,
const struct bpf_prog *prog);
@@ -193,14 +226,20 @@ struct bpf_verifier_ops {
struct bpf_prog *prog, u32 *target_size);
};
-struct bpf_dev_offload {
+struct bpf_prog_offload_ops {
+ int (*insn_hook)(struct bpf_verifier_env *env,
+ int insn_idx, int prev_insn_idx);
+};
+
+struct bpf_prog_offload {
struct bpf_prog *prog;
struct net_device *netdev;
void *dev_priv;
struct list_head offloads;
bool dev_state;
- bool verifier_running;
- wait_queue_head_t verifier_done;
+ const struct bpf_prog_offload_ops *dev_ops;
+ void *jited_image;
+ u32 jited_len;
};
struct bpf_prog_aux {
@@ -209,6 +248,10 @@ struct bpf_prog_aux {
u32 max_ctx_offset;
u32 stack_depth;
u32 id;
+ u32 func_cnt;
+ bool offload_requested;
+ struct bpf_prog **func;
+ void *jit_data; /* JIT specific data. arch dependent */
struct latch_tree_node ksym_tnode;
struct list_head ksym_lnode;
const struct bpf_prog_ops *ops;
@@ -220,7 +263,7 @@ struct bpf_prog_aux {
#ifdef CONFIG_SECURITY
void *security;
#endif
- struct bpf_dev_offload *offload;
+ struct bpf_prog_offload *offload;
union {
struct work_struct work;
struct rcu_head rcu;
@@ -295,6 +338,9 @@ int bpf_prog_array_copy_to_user(struct bpf_prog_array __rcu *progs,
void bpf_prog_array_delete_safe(struct bpf_prog_array __rcu *progs,
struct bpf_prog *old_prog);
+int bpf_prog_array_copy_info(struct bpf_prog_array __rcu *array,
+ __u32 __user *prog_ids, u32 request_cnt,
+ __u32 __user *prog_cnt);
int bpf_prog_array_copy(struct bpf_prog_array __rcu *old_array,
struct bpf_prog *exclude_prog,
struct bpf_prog *include_prog,
@@ -355,6 +401,9 @@ void bpf_prog_put(struct bpf_prog *prog);
int __bpf_prog_charge(struct user_struct *user, u32 pages);
void __bpf_prog_uncharge(struct user_struct *user, u32 pages);
+void bpf_prog_free_id(struct bpf_prog *prog, bool do_idr_lock);
+void bpf_map_free_id(struct bpf_map *map, bool do_idr_lock);
+
struct bpf_map *bpf_map_get_with_uref(u32 ufd);
struct bpf_map *__bpf_map_get(struct fd f);
struct bpf_map * __must_check bpf_map_inc(struct bpf_map *map, bool uref);
@@ -363,6 +412,7 @@ void bpf_map_put(struct bpf_map *map);
int bpf_map_precharge_memlock(u32 pages);
void *bpf_map_area_alloc(size_t size, int numa_node);
void bpf_map_area_free(void *base);
+void bpf_map_init_from_attr(struct bpf_map *map, union bpf_attr *attr);
extern int sysctl_unprivileged_bpf_disabled;
@@ -409,6 +459,7 @@ static inline void bpf_long_memcpy(void *dst, const void *src, u32 size)
/* verify correctness of eBPF program */
int bpf_check(struct bpf_prog **fp, union bpf_attr *attr);
+void bpf_patch_call_args(struct bpf_insn *insn, u32 stack_depth);
/* Map specifics */
struct net_device *__dev_map_lookup_elem(struct bpf_map *map, u32 key);
@@ -536,14 +587,35 @@ bool bpf_prog_get_ok(struct bpf_prog *, enum bpf_prog_type *, bool);
int bpf_prog_offload_compile(struct bpf_prog *prog);
void bpf_prog_offload_destroy(struct bpf_prog *prog);
+int bpf_prog_offload_info_fill(struct bpf_prog_info *info,
+ struct bpf_prog *prog);
+
+int bpf_map_offload_info_fill(struct bpf_map_info *info, struct bpf_map *map);
+
+int bpf_map_offload_lookup_elem(struct bpf_map *map, void *key, void *value);
+int bpf_map_offload_update_elem(struct bpf_map *map,
+ void *key, void *value, u64 flags);
+int bpf_map_offload_delete_elem(struct bpf_map *map, void *key);
+int bpf_map_offload_get_next_key(struct bpf_map *map,
+ void *key, void *next_key);
+
+bool bpf_offload_dev_match(struct bpf_prog *prog, struct bpf_map *map);
#if defined(CONFIG_NET) && defined(CONFIG_BPF_SYSCALL)
int bpf_prog_offload_init(struct bpf_prog *prog, union bpf_attr *attr);
static inline bool bpf_prog_is_dev_bound(struct bpf_prog_aux *aux)
{
- return aux->offload;
+ return aux->offload_requested;
}
+
+static inline bool bpf_map_is_dev_bound(struct bpf_map *map)
+{
+ return unlikely(map->ops == &bpf_map_offload_ops);
+}
+
+struct bpf_map *bpf_map_offload_map_alloc(union bpf_attr *attr);
+void bpf_map_offload_map_free(struct bpf_map *map);
#else
static inline int bpf_prog_offload_init(struct bpf_prog *prog,
union bpf_attr *attr)
@@ -555,9 +627,23 @@ static inline bool bpf_prog_is_dev_bound(struct bpf_prog_aux *aux)
{
return false;
}
+
+static inline bool bpf_map_is_dev_bound(struct bpf_map *map)
+{
+ return false;
+}
+
+static inline struct bpf_map *bpf_map_offload_map_alloc(union bpf_attr *attr)
+{
+ return ERR_PTR(-EOPNOTSUPP);
+}
+
+static inline void bpf_map_offload_map_free(struct bpf_map *map)
+{
+}
#endif /* CONFIG_NET && CONFIG_BPF_SYSCALL */
-#if defined(CONFIG_STREAM_PARSER) && defined(CONFIG_BPF_SYSCALL)
+#if defined(CONFIG_STREAM_PARSER) && defined(CONFIG_BPF_SYSCALL) && defined(CONFIG_INET)
struct sock *__sock_map_lookup_elem(struct bpf_map *map, u32 key);
int sock_map_prog(struct bpf_map *map, struct bpf_prog *prog, u32 type);
#else
diff --git a/include/linux/bpf_types.h b/include/linux/bpf_types.h
index 978c1d9c9383..2b28fcf6f6ae 100644
--- a/include/linux/bpf_types.h
+++ b/include/linux/bpf_types.h
@@ -8,16 +8,19 @@ BPF_PROG_TYPE(BPF_PROG_TYPE_SCHED_ACT, tc_cls_act)
BPF_PROG_TYPE(BPF_PROG_TYPE_XDP, xdp)
BPF_PROG_TYPE(BPF_PROG_TYPE_CGROUP_SKB, cg_skb)
BPF_PROG_TYPE(BPF_PROG_TYPE_CGROUP_SOCK, cg_sock)
+BPF_PROG_TYPE(BPF_PROG_TYPE_CGROUP_SOCK_ADDR, cg_sock_addr)
BPF_PROG_TYPE(BPF_PROG_TYPE_LWT_IN, lwt_inout)
BPF_PROG_TYPE(BPF_PROG_TYPE_LWT_OUT, lwt_inout)
BPF_PROG_TYPE(BPF_PROG_TYPE_LWT_XMIT, lwt_xmit)
BPF_PROG_TYPE(BPF_PROG_TYPE_SOCK_OPS, sock_ops)
BPF_PROG_TYPE(BPF_PROG_TYPE_SK_SKB, sk_skb)
+BPF_PROG_TYPE(BPF_PROG_TYPE_SK_MSG, sk_msg)
#endif
#ifdef CONFIG_BPF_EVENTS
BPF_PROG_TYPE(BPF_PROG_TYPE_KPROBE, kprobe)
BPF_PROG_TYPE(BPF_PROG_TYPE_TRACEPOINT, tracepoint)
BPF_PROG_TYPE(BPF_PROG_TYPE_PERF_EVENT, perf_event)
+BPF_PROG_TYPE(BPF_PROG_TYPE_RAW_TRACEPOINT, raw_tracepoint)
#endif
#ifdef CONFIG_CGROUP_BPF
BPF_PROG_TYPE(BPF_PROG_TYPE_CGROUP_DEVICE, cg_dev)
@@ -42,7 +45,7 @@ BPF_MAP_TYPE(BPF_MAP_TYPE_ARRAY_OF_MAPS, array_of_maps_map_ops)
BPF_MAP_TYPE(BPF_MAP_TYPE_HASH_OF_MAPS, htab_of_maps_map_ops)
#ifdef CONFIG_NET
BPF_MAP_TYPE(BPF_MAP_TYPE_DEVMAP, dev_map_ops)
-#ifdef CONFIG_STREAM_PARSER
+#if defined(CONFIG_STREAM_PARSER) && defined(CONFIG_INET)
BPF_MAP_TYPE(BPF_MAP_TYPE_SOCKMAP, sock_map_ops)
#endif
BPF_MAP_TYPE(BPF_MAP_TYPE_CPUMAP, cpu_map_ops)
diff --git a/include/linux/bpf_verifier.h b/include/linux/bpf_verifier.h
index 1632bb13ad8a..7e61c395fddf 100644
--- a/include/linux/bpf_verifier.h
+++ b/include/linux/bpf_verifier.h
@@ -76,6 +76,14 @@ struct bpf_reg_state {
s64 smax_value; /* maximum possible (s64)value */
u64 umin_value; /* minimum possible (u64)value */
u64 umax_value; /* maximum possible (u64)value */
+ /* Inside the callee two registers can be both PTR_TO_STACK like
+ * R1=fp-8 and R2=fp-8, but one of them points to this function stack
+ * while another to the caller's stack. To differentiate them 'frameno'
+ * is used which is an index in bpf_verifier_state->frame[] array
+ * pointing to bpf_func_state.
+ * This field must be second to last, for states_equal() reasons.
+ */
+ u32 frameno;
/* This field must be last, for states_equal() reasons. */
enum bpf_reg_liveness live;
};
@@ -83,7 +91,8 @@ struct bpf_reg_state {
enum bpf_stack_slot_type {
STACK_INVALID, /* nothing was stored in this stack slot */
STACK_SPILL, /* register spilled into stack */
- STACK_MISC /* BPF program wrote some data into this slot */
+ STACK_MISC, /* BPF program wrote some data into this slot */
+ STACK_ZERO, /* BPF program wrote constant zero */
};
#define BPF_REG_SIZE 8 /* size of eBPF register in bytes */
@@ -96,13 +105,34 @@ struct bpf_stack_state {
/* state of the program:
* type of all registers and stack info
*/
-struct bpf_verifier_state {
+struct bpf_func_state {
struct bpf_reg_state regs[MAX_BPF_REG];
struct bpf_verifier_state *parent;
+ /* index of call instruction that called into this func */
+ int callsite;
+ /* stack frame number of this function state from pov of
+ * enclosing bpf_verifier_state.
+ * 0 = main function, 1 = first callee.
+ */
+ u32 frameno;
+ /* subprog number == index within subprog_stack_depth
+ * zero == main subprog
+ */
+ u32 subprogno;
+
+ /* should be second to last. See copy_func_state() */
int allocated_stack;
struct bpf_stack_state *stack;
};
+#define MAX_CALL_FRAMES 8
+struct bpf_verifier_state {
+ /* call stack tracking */
+ struct bpf_func_state *frame[MAX_CALL_FRAMES];
+ struct bpf_verifier_state *parent;
+ u32 curframe;
+};
+
/* linked list of verifier states used to prune search */
struct bpf_verifier_state_list {
struct bpf_verifier_state state;
@@ -113,6 +143,7 @@ struct bpf_insn_aux_data {
union {
enum bpf_reg_type ptr_type; /* pointer type for load/store insns */
struct bpf_map *map_ptr; /* pointer for call insn into lookup_elem */
+ s32 call_imm; /* saved imm field of call insn */
};
int ctx_field_size; /* the ctx field size for load insn, maybe 0 */
bool seen; /* this insn was processed by the verifier */
@@ -122,7 +153,7 @@ struct bpf_insn_aux_data {
#define BPF_VERIFIER_TMP_LOG_SIZE 1024
-struct bpf_verifer_log {
+struct bpf_verifier_log {
u32 level;
char kbuf[BPF_VERIFIER_TMP_LOG_SIZE];
char __user *ubuf;
@@ -130,16 +161,17 @@ struct bpf_verifer_log {
u32 len_total;
};
-static inline bool bpf_verifier_log_full(const struct bpf_verifer_log *log)
+static inline bool bpf_verifier_log_full(const struct bpf_verifier_log *log)
{
return log->len_used >= log->len_total - 1;
}
-struct bpf_verifier_env;
-struct bpf_ext_analyzer_ops {
- int (*insn_hook)(struct bpf_verifier_env *env,
- int insn_idx, int prev_insn_idx);
-};
+static inline bool bpf_verifier_log_needed(const struct bpf_verifier_log *log)
+{
+ return log->level && log->ubuf && !bpf_verifier_log_full(log);
+}
+
+#define BPF_MAX_SUBPROGS 256
/* single container for all structs
* one verifier_env per bpf_check() call
@@ -152,29 +184,33 @@ struct bpf_verifier_env {
bool strict_alignment; /* perform strict pointer alignment checks */
struct bpf_verifier_state *cur_state; /* current verifier state */
struct bpf_verifier_state_list **explored_states; /* search pruning optimization */
- const struct bpf_ext_analyzer_ops *dev_ops; /* device analyzer ops */
struct bpf_map *used_maps[MAX_USED_MAPS]; /* array of map's used by eBPF program */
u32 used_map_cnt; /* number of used maps */
u32 id_gen; /* used to generate unique reg IDs */
bool allow_ptr_leaks;
bool seen_direct_write;
struct bpf_insn_aux_data *insn_aux_data; /* array of per-insn state */
-
- struct bpf_verifer_log log;
+ struct bpf_verifier_log log;
+ u32 subprog_starts[BPF_MAX_SUBPROGS];
+ /* computes the stack depth of each bpf function */
+ u16 subprog_stack_depth[BPF_MAX_SUBPROGS + 1];
+ u32 subprog_cnt;
};
+void bpf_verifier_vlog(struct bpf_verifier_log *log, const char *fmt,
+ va_list args);
+__printf(2, 3) void bpf_verifier_log_write(struct bpf_verifier_env *env,
+ const char *fmt, ...);
+
static inline struct bpf_reg_state *cur_regs(struct bpf_verifier_env *env)
{
- return env->cur_state->regs;
+ struct bpf_verifier_state *cur = env->cur_state;
+
+ return cur->frame[cur->curframe]->regs;
}
-#if defined(CONFIG_NET) && defined(CONFIG_BPF_SYSCALL)
int bpf_prog_offload_verifier_prep(struct bpf_verifier_env *env);
-#else
-static inline int bpf_prog_offload_verifier_prep(struct bpf_verifier_env *env)
-{
- return -EOPNOTSUPP;
-}
-#endif
+int bpf_prog_offload_verify_insn(struct bpf_verifier_env *env,
+ int insn_idx, int prev_insn_idx);
#endif /* _LINUX_BPF_VERIFIER_H */
diff --git a/include/linux/brcmphy.h b/include/linux/brcmphy.h
index 8ff86b4c1b8a..d3339dd48b1a 100644
--- a/include/linux/brcmphy.h
+++ b/include/linux/brcmphy.h
@@ -14,6 +14,7 @@
#define PHY_ID_BCM5241 0x0143bc30
#define PHY_ID_BCMAC131 0x0143bc70
#define PHY_ID_BCM5481 0x0143bca0
+#define PHY_ID_BCM5395 0x0143bcf0
#define PHY_ID_BCM54810 0x03625d00
#define PHY_ID_BCM5482 0x0143bcb0
#define PHY_ID_BCM5411 0x00206070
diff --git a/include/linux/bsg-lib.h b/include/linux/bsg-lib.h
index b1be0233ce35..28a7ccc55c89 100644
--- a/include/linux/bsg-lib.h
+++ b/include/linux/bsg-lib.h
@@ -38,12 +38,12 @@ struct bsg_buffer {
};
struct bsg_job {
- struct scsi_request sreq;
struct device *dev;
- struct request *req;
struct kref kref;
+ unsigned int timeout;
+
/* Transport/driver specific request/reply structs */
void *request;
void *reply;
@@ -63,6 +63,9 @@ struct bsg_job {
struct bsg_buffer request_payload;
struct bsg_buffer reply_payload;
+ int result;
+ unsigned int reply_payload_rcv_len;
+
void *dd_data; /* Used for driver-specific storage */
};
diff --git a/include/linux/bsg.h b/include/linux/bsg.h
index 2a202e41a3af..0c7dd9ceb139 100644
--- a/include/linux/bsg.h
+++ b/include/linux/bsg.h
@@ -1,34 +1,43 @@
/* SPDX-License-Identifier: GPL-2.0 */
-#ifndef BSG_H
-#define BSG_H
+#ifndef _LINUX_BSG_H
+#define _LINUX_BSG_H
#include <uapi/linux/bsg.h>
+struct request;
+
+#ifdef CONFIG_BLK_DEV_BSG
+struct bsg_ops {
+ int (*check_proto)(struct sg_io_v4 *hdr);
+ int (*fill_hdr)(struct request *rq, struct sg_io_v4 *hdr,
+ fmode_t mode);
+ int (*complete_rq)(struct request *rq, struct sg_io_v4 *hdr);
+ void (*free_rq)(struct request *rq);
+};
-#if defined(CONFIG_BLK_DEV_BSG)
struct bsg_class_device {
struct device *class_dev;
struct device *parent;
int minor;
struct request_queue *queue;
struct kref ref;
+ const struct bsg_ops *ops;
void (*release)(struct device *);
};
-extern int bsg_register_queue(struct request_queue *q,
- struct device *parent, const char *name,
- void (*release)(struct device *));
-extern void bsg_unregister_queue(struct request_queue *);
+int bsg_register_queue(struct request_queue *q, struct device *parent,
+ const char *name, const struct bsg_ops *ops,
+ void (*release)(struct device *));
+int bsg_scsi_register_queue(struct request_queue *q, struct device *parent);
+void bsg_unregister_queue(struct request_queue *q);
#else
-static inline int bsg_register_queue(struct request_queue *q,
- struct device *parent, const char *name,
- void (*release)(struct device *))
+static inline int bsg_scsi_register_queue(struct request_queue *q,
+ struct device *parent)
{
return 0;
}
static inline void bsg_unregister_queue(struct request_queue *q)
{
}
-#endif
-
-#endif
+#endif /* CONFIG_BLK_DEV_BSG */
+#endif /* _LINUX_BSG_H */
diff --git a/include/linux/buffer_head.h b/include/linux/buffer_head.h
index 8b1bf8d3d4a2..894e5d125de6 100644
--- a/include/linux/buffer_head.h
+++ b/include/linux/buffer_head.h
@@ -81,11 +81,14 @@ struct buffer_head {
/*
* macro tricks to expand the set_buffer_foo(), clear_buffer_foo()
* and buffer_foo() functions.
+ * To avoid reset buffer flags that are already set, because that causes
+ * a costly cache line transition, check the flag first.
*/
#define BUFFER_FNS(bit, name) \
static __always_inline void set_buffer_##name(struct buffer_head *bh) \
{ \
- set_bit(BH_##bit, &(bh)->b_state); \
+ if (!test_bit(BH_##bit, &(bh)->b_state)) \
+ set_bit(BH_##bit, &(bh)->b_state); \
} \
static __always_inline void clear_buffer_##name(struct buffer_head *bh) \
{ \
@@ -151,7 +154,6 @@ void buffer_check_dirty_writeback(struct page *page,
void mark_buffer_dirty(struct buffer_head *bh);
void mark_buffer_write_io_error(struct buffer_head *bh);
-void init_buffer(struct buffer_head *, bh_end_io_t *, void *);
void touch_buffer(struct buffer_head *bh);
void set_bh_page(struct buffer_head *bh,
struct page *page, unsigned long offset);
diff --git a/include/linux/build_bug.h b/include/linux/build_bug.h
index 3efed0d742a0..43d1fd50d433 100644
--- a/include/linux/build_bug.h
+++ b/include/linux/build_bug.h
@@ -8,7 +8,6 @@
#define __BUILD_BUG_ON_NOT_POWER_OF_2(n) (0)
#define BUILD_BUG_ON_NOT_POWER_OF_2(n) (0)
#define BUILD_BUG_ON_ZERO(e) (0)
-#define BUILD_BUG_ON_NULL(e) ((void *)0)
#define BUILD_BUG_ON_INVALID(e) (0)
#define BUILD_BUG_ON_MSG(cond, msg) (0)
#define BUILD_BUG_ON(condition) (0)
@@ -28,7 +27,6 @@
* aren't permitted).
*/
#define BUILD_BUG_ON_ZERO(e) (sizeof(struct { int:(-!!(e)); }))
-#define BUILD_BUG_ON_NULL(e) ((void *)sizeof(struct { int:(-!!(e)); }))
/*
* BUILD_BUG_ON_INVALID() permits the compiler to check the validity of the
diff --git a/include/linux/bvec.h b/include/linux/bvec.h
index ec8a4d7af6bd..fe7a22dd133b 100644
--- a/include/linux/bvec.h
+++ b/include/linux/bvec.h
@@ -125,4 +125,13 @@ static inline bool bvec_iter_rewind(const struct bio_vec *bv,
((bvl = bvec_iter_bvec((bio_vec), (iter))), 1); \
bvec_iter_advance((bio_vec), &(iter), (bvl).bv_len))
+/* for iterating one bio from start to end */
+#define BVEC_ITER_ALL_INIT (struct bvec_iter) \
+{ \
+ .bi_sector = 0, \
+ .bi_size = UINT_MAX, \
+ .bi_idx = 0, \
+ .bi_bvec_done = 0, \
+}
+
#endif /* __LINUX_BVEC_ITER_H */
diff --git a/include/linux/byteorder/generic.h b/include/linux/byteorder/generic.h
index 451aaa0786ae..4b13e0a3e15b 100644
--- a/include/linux/byteorder/generic.h
+++ b/include/linux/byteorder/generic.h
@@ -156,6 +156,23 @@ static inline void le64_add_cpu(__le64 *var, u64 val)
*var = cpu_to_le64(le64_to_cpu(*var) + val);
}
+/* XXX: this stuff can be optimized */
+static inline void le32_to_cpu_array(u32 *buf, unsigned int words)
+{
+ while (words--) {
+ __le32_to_cpus(buf);
+ buf++;
+ }
+}
+
+static inline void cpu_to_le32_array(u32 *buf, unsigned int words)
+{
+ while (words--) {
+ __cpu_to_le32s(buf);
+ buf++;
+ }
+}
+
static inline void be16_add_cpu(__be16 *var, u16 val)
{
*var = cpu_to_be16(be16_to_cpu(*var) + val);
diff --git a/include/linux/can/dev.h b/include/linux/can/dev.h
index 61f1cf2d9f44..055aaf5ed9af 100644
--- a/include/linux/can/dev.h
+++ b/include/linux/can/dev.h
@@ -46,6 +46,7 @@ struct can_priv {
unsigned int bitrate_const_cnt;
const u32 *data_bitrate_const;
unsigned int data_bitrate_const_cnt;
+ u32 bitrate_max;
struct can_clock clock;
enum can_state state;
@@ -166,6 +167,12 @@ void can_put_echo_skb(struct sk_buff *skb, struct net_device *dev,
unsigned int can_get_echo_skb(struct net_device *dev, unsigned int idx);
void can_free_echo_skb(struct net_device *dev, unsigned int idx);
+#ifdef CONFIG_OF
+void of_can_transceiver(struct net_device *dev);
+#else
+static inline void of_can_transceiver(struct net_device *dev) { }
+#endif
+
struct sk_buff *alloc_can_skb(struct net_device *dev, struct can_frame **cf);
struct sk_buff *alloc_canfd_skb(struct net_device *dev,
struct canfd_frame **cfd);
diff --git a/include/linux/ceph/ceph_features.h b/include/linux/ceph/ceph_features.h
index 59042d5ac520..3901927cf6a0 100644
--- a/include/linux/ceph/ceph_features.h
+++ b/include/linux/ceph/ceph_features.h
@@ -204,6 +204,7 @@ DEFINE_CEPH_FEATURE_DEPRECATED(63, 1, RESERVED_BROKEN, LUMINOUS) // client-facin
CEPH_FEATURE_OSD_PRIMARY_AFFINITY | \
CEPH_FEATURE_MSGR_KEEPALIVE2 | \
CEPH_FEATURE_OSD_POOLRESEND | \
+ CEPH_FEATURE_MDS_QUOTA | \
CEPH_FEATURE_CRUSH_V4 | \
CEPH_FEATURE_NEW_OSDOP_ENCODING | \
CEPH_FEATURE_SERVER_JEWEL | \
diff --git a/include/linux/ceph/ceph_fs.h b/include/linux/ceph/ceph_fs.h
index 88dd51381aaf..7ecfc88314d8 100644
--- a/include/linux/ceph/ceph_fs.h
+++ b/include/linux/ceph/ceph_fs.h
@@ -134,6 +134,7 @@ struct ceph_dir_layout {
#define CEPH_MSG_CLIENT_LEASE 0x311
#define CEPH_MSG_CLIENT_SNAP 0x312
#define CEPH_MSG_CLIENT_CAPRELEASE 0x313
+#define CEPH_MSG_CLIENT_QUOTA 0x314
/* pool ops */
#define CEPH_MSG_POOLOP_REPLY 48
@@ -807,4 +808,20 @@ struct ceph_mds_snap_realm {
} __attribute__ ((packed));
/* followed by my snap list, then prior parent snap list */
+/*
+ * quotas
+ */
+struct ceph_mds_quota {
+ __le64 ino; /* ino */
+ struct ceph_timespec rctime;
+ __le64 rbytes; /* dir stats */
+ __le64 rfiles;
+ __le64 rsubdirs;
+ __u8 struct_v; /* compat */
+ __u8 struct_compat;
+ __le32 struct_len;
+ __le64 max_bytes; /* quota max. bytes */
+ __le64 max_files; /* quota max. files */
+} __attribute__ ((packed));
+
#endif
diff --git a/include/linux/ceph/libceph.h b/include/linux/ceph/libceph.h
index c2ec44cf5098..49c93b9308d7 100644
--- a/include/linux/ceph/libceph.h
+++ b/include/linux/ceph/libceph.h
@@ -262,6 +262,7 @@ extern struct kmem_cache *ceph_cap_cachep;
extern struct kmem_cache *ceph_cap_flush_cachep;
extern struct kmem_cache *ceph_dentry_cachep;
extern struct kmem_cache *ceph_file_cachep;
+extern struct kmem_cache *ceph_dir_file_cachep;
/* ceph_common.c */
extern bool libceph_compatible(void *data);
diff --git a/include/linux/ceph/messenger.h b/include/linux/ceph/messenger.h
index ead9d85f1c11..c7dfcb8a1fb2 100644
--- a/include/linux/ceph/messenger.h
+++ b/include/linux/ceph/messenger.h
@@ -76,6 +76,7 @@ enum ceph_msg_data_type {
#ifdef CONFIG_BLOCK
CEPH_MSG_DATA_BIO, /* data source/destination is a bio list */
#endif /* CONFIG_BLOCK */
+ CEPH_MSG_DATA_BVECS, /* data source/destination is a bio_vec array */
};
static __inline__ bool ceph_msg_data_type_valid(enum ceph_msg_data_type type)
@@ -87,22 +88,106 @@ static __inline__ bool ceph_msg_data_type_valid(enum ceph_msg_data_type type)
#ifdef CONFIG_BLOCK
case CEPH_MSG_DATA_BIO:
#endif /* CONFIG_BLOCK */
+ case CEPH_MSG_DATA_BVECS:
return true;
default:
return false;
}
}
+#ifdef CONFIG_BLOCK
+
+struct ceph_bio_iter {
+ struct bio *bio;
+ struct bvec_iter iter;
+};
+
+#define __ceph_bio_iter_advance_step(it, n, STEP) do { \
+ unsigned int __n = (n), __cur_n; \
+ \
+ while (__n) { \
+ BUG_ON(!(it)->iter.bi_size); \
+ __cur_n = min((it)->iter.bi_size, __n); \
+ (void)(STEP); \
+ bio_advance_iter((it)->bio, &(it)->iter, __cur_n); \
+ if (!(it)->iter.bi_size && (it)->bio->bi_next) { \
+ dout("__ceph_bio_iter_advance_step next bio\n"); \
+ (it)->bio = (it)->bio->bi_next; \
+ (it)->iter = (it)->bio->bi_iter; \
+ } \
+ __n -= __cur_n; \
+ } \
+} while (0)
+
+/*
+ * Advance @it by @n bytes.
+ */
+#define ceph_bio_iter_advance(it, n) \
+ __ceph_bio_iter_advance_step(it, n, 0)
+
+/*
+ * Advance @it by @n bytes, executing BVEC_STEP for each bio_vec.
+ */
+#define ceph_bio_iter_advance_step(it, n, BVEC_STEP) \
+ __ceph_bio_iter_advance_step(it, n, ({ \
+ struct bio_vec bv; \
+ struct bvec_iter __cur_iter; \
+ \
+ __cur_iter = (it)->iter; \
+ __cur_iter.bi_size = __cur_n; \
+ __bio_for_each_segment(bv, (it)->bio, __cur_iter, __cur_iter) \
+ (void)(BVEC_STEP); \
+ }))
+
+#endif /* CONFIG_BLOCK */
+
+struct ceph_bvec_iter {
+ struct bio_vec *bvecs;
+ struct bvec_iter iter;
+};
+
+#define __ceph_bvec_iter_advance_step(it, n, STEP) do { \
+ BUG_ON((n) > (it)->iter.bi_size); \
+ (void)(STEP); \
+ bvec_iter_advance((it)->bvecs, &(it)->iter, (n)); \
+} while (0)
+
+/*
+ * Advance @it by @n bytes.
+ */
+#define ceph_bvec_iter_advance(it, n) \
+ __ceph_bvec_iter_advance_step(it, n, 0)
+
+/*
+ * Advance @it by @n bytes, executing BVEC_STEP for each bio_vec.
+ */
+#define ceph_bvec_iter_advance_step(it, n, BVEC_STEP) \
+ __ceph_bvec_iter_advance_step(it, n, ({ \
+ struct bio_vec bv; \
+ struct bvec_iter __cur_iter; \
+ \
+ __cur_iter = (it)->iter; \
+ __cur_iter.bi_size = (n); \
+ for_each_bvec(bv, (it)->bvecs, __cur_iter, __cur_iter) \
+ (void)(BVEC_STEP); \
+ }))
+
+#define ceph_bvec_iter_shorten(it, n) do { \
+ BUG_ON((n) > (it)->iter.bi_size); \
+ (it)->iter.bi_size = (n); \
+} while (0)
+
struct ceph_msg_data {
struct list_head links; /* ceph_msg->data */
enum ceph_msg_data_type type;
union {
#ifdef CONFIG_BLOCK
struct {
- struct bio *bio;
- size_t bio_length;
+ struct ceph_bio_iter bio_pos;
+ u32 bio_length;
};
#endif /* CONFIG_BLOCK */
+ struct ceph_bvec_iter bvec_pos;
struct {
struct page **pages; /* NOT OWNER. */
size_t length; /* total # bytes */
@@ -122,11 +207,9 @@ struct ceph_msg_data_cursor {
bool need_crc; /* crc update needed */
union {
#ifdef CONFIG_BLOCK
- struct { /* bio */
- struct bio *bio; /* bio from list */
- struct bvec_iter bvec_iter;
- };
+ struct ceph_bio_iter bio_iter;
#endif /* CONFIG_BLOCK */
+ struct bvec_iter bvec_iter;
struct { /* pages */
unsigned int page_offset; /* offset in page */
unsigned short page_index; /* index in array */
@@ -290,9 +373,11 @@ extern void ceph_msg_data_add_pages(struct ceph_msg *msg, struct page **pages,
extern void ceph_msg_data_add_pagelist(struct ceph_msg *msg,
struct ceph_pagelist *pagelist);
#ifdef CONFIG_BLOCK
-extern void ceph_msg_data_add_bio(struct ceph_msg *msg, struct bio *bio,
- size_t length);
+void ceph_msg_data_add_bio(struct ceph_msg *msg, struct ceph_bio_iter *bio_pos,
+ u32 length);
#endif /* CONFIG_BLOCK */
+void ceph_msg_data_add_bvecs(struct ceph_msg *msg,
+ struct ceph_bvec_iter *bvec_pos);
extern struct ceph_msg *ceph_msg_new(int type, int front_len, gfp_t flags,
bool can_fail);
diff --git a/include/linux/ceph/osd_client.h b/include/linux/ceph/osd_client.h
index 52fb37d1c2a5..528ccc943cee 100644
--- a/include/linux/ceph/osd_client.h
+++ b/include/linux/ceph/osd_client.h
@@ -57,6 +57,7 @@ enum ceph_osd_data_type {
#ifdef CONFIG_BLOCK
CEPH_OSD_DATA_TYPE_BIO,
#endif /* CONFIG_BLOCK */
+ CEPH_OSD_DATA_TYPE_BVECS,
};
struct ceph_osd_data {
@@ -72,10 +73,11 @@ struct ceph_osd_data {
struct ceph_pagelist *pagelist;
#ifdef CONFIG_BLOCK
struct {
- struct bio *bio; /* list of bios */
- size_t bio_length; /* total in list */
+ struct ceph_bio_iter bio_pos;
+ u32 bio_length;
};
#endif /* CONFIG_BLOCK */
+ struct ceph_bvec_iter bvec_pos;
};
};
@@ -405,10 +407,14 @@ extern void osd_req_op_extent_osd_data_pagelist(struct ceph_osd_request *,
unsigned int which,
struct ceph_pagelist *pagelist);
#ifdef CONFIG_BLOCK
-extern void osd_req_op_extent_osd_data_bio(struct ceph_osd_request *,
- unsigned int which,
- struct bio *bio, size_t bio_length);
+void osd_req_op_extent_osd_data_bio(struct ceph_osd_request *osd_req,
+ unsigned int which,
+ struct ceph_bio_iter *bio_pos,
+ u32 bio_length);
#endif /* CONFIG_BLOCK */
+void osd_req_op_extent_osd_data_bvec_pos(struct ceph_osd_request *osd_req,
+ unsigned int which,
+ struct ceph_bvec_iter *bvec_pos);
extern void osd_req_op_cls_request_data_pagelist(struct ceph_osd_request *,
unsigned int which,
@@ -418,6 +424,9 @@ extern void osd_req_op_cls_request_data_pages(struct ceph_osd_request *,
struct page **pages, u64 length,
u32 alignment, bool pages_from_pool,
bool own_pages);
+void osd_req_op_cls_request_data_bvecs(struct ceph_osd_request *osd_req,
+ unsigned int which,
+ struct bio_vec *bvecs, u32 bytes);
extern void osd_req_op_cls_response_data_pages(struct ceph_osd_request *,
unsigned int which,
struct page **pages, u64 length,
diff --git a/include/linux/ceph/osdmap.h b/include/linux/ceph/osdmap.h
index d41fad99c0fa..e71fb222c7c3 100644
--- a/include/linux/ceph/osdmap.h
+++ b/include/linux/ceph/osdmap.h
@@ -5,7 +5,6 @@
#include <linux/rbtree.h>
#include <linux/ceph/types.h>
#include <linux/ceph/decode.h>
-#include <linux/ceph/ceph_fs.h>
#include <linux/crush/crush.h>
/*
@@ -280,11 +279,6 @@ bool ceph_osds_changed(const struct ceph_osds *old_acting,
const struct ceph_osds *new_acting,
bool any_change);
-/* calculate mapping of a file extent to an object */
-extern int ceph_calc_file_object_mapping(struct ceph_file_layout *layout,
- u64 off, u64 len,
- u64 *bno, u64 *oxoff, u64 *oxlen);
-
int __ceph_object_locator_to_pg(struct ceph_pg_pool_info *pi,
const struct ceph_object_id *oid,
const struct ceph_object_locator *oloc,
diff --git a/include/linux/ceph/striper.h b/include/linux/ceph/striper.h
new file mode 100644
index 000000000000..cbd0d24b7148
--- /dev/null
+++ b/include/linux/ceph/striper.h
@@ -0,0 +1,69 @@
+/* SPDX-License-Identifier: GPL-2.0 */
+#ifndef _LINUX_CEPH_STRIPER_H
+#define _LINUX_CEPH_STRIPER_H
+
+#include <linux/list.h>
+#include <linux/types.h>
+
+struct ceph_file_layout;
+
+void ceph_calc_file_object_mapping(struct ceph_file_layout *l,
+ u64 off, u64 len,
+ u64 *objno, u64 *objoff, u32 *xlen);
+
+struct ceph_object_extent {
+ struct list_head oe_item;
+ u64 oe_objno;
+ u64 oe_off;
+ u64 oe_len;
+};
+
+static inline void ceph_object_extent_init(struct ceph_object_extent *ex)
+{
+ INIT_LIST_HEAD(&ex->oe_item);
+}
+
+/*
+ * Called for each mapped stripe unit.
+ *
+ * @bytes: number of bytes mapped, i.e. the minimum of the full length
+ * requested (file extent length) or the remainder of the stripe
+ * unit within an object
+ */
+typedef void (*ceph_object_extent_fn_t)(struct ceph_object_extent *ex,
+ u32 bytes, void *arg);
+
+int ceph_file_to_extents(struct ceph_file_layout *l, u64 off, u64 len,
+ struct list_head *object_extents,
+ struct ceph_object_extent *alloc_fn(void *arg),
+ void *alloc_arg,
+ ceph_object_extent_fn_t action_fn,
+ void *action_arg);
+int ceph_iterate_extents(struct ceph_file_layout *l, u64 off, u64 len,
+ struct list_head *object_extents,
+ ceph_object_extent_fn_t action_fn,
+ void *action_arg);
+
+struct ceph_file_extent {
+ u64 fe_off;
+ u64 fe_len;
+};
+
+static inline u64 ceph_file_extents_bytes(struct ceph_file_extent *file_extents,
+ u32 num_file_extents)
+{
+ u64 bytes = 0;
+ u32 i;
+
+ for (i = 0; i < num_file_extents; i++)
+ bytes += file_extents[i].fe_len;
+
+ return bytes;
+}
+
+int ceph_extent_to_file(struct ceph_file_layout *l,
+ u64 objno, u64 objoff, u64 objlen,
+ struct ceph_file_extent **file_extents,
+ u32 *num_file_extents);
+
+#endif
diff --git a/include/linux/cgroup-defs.h b/include/linux/cgroup-defs.h
index 8b7fd8eeccee..dc5b70449dc6 100644
--- a/include/linux/cgroup-defs.h
+++ b/include/linux/cgroup-defs.h
@@ -151,8 +151,8 @@ struct cgroup_subsys_state {
atomic_t online_cnt;
/* percpu_ref killing and RCU release */
- struct rcu_head rcu_head;
struct work_struct destroy_work;
+ struct rcu_work destroy_rwork;
/*
* PI: the parent css. Placed here for cache proximity to following
@@ -561,7 +561,7 @@ struct cftype {
/*
* Control Group subsystem type.
- * See Documentation/cgroups/cgroups.txt for details
+ * See Documentation/cgroup-v1/cgroups.txt for details
*/
struct cgroup_subsys {
struct cgroup_subsys_state *(*css_alloc)(struct cgroup_subsys_state *parent_css);
@@ -755,13 +755,13 @@ struct sock_cgroup_data {
* updaters and return part of the previous pointer as the prioidx or
* classid. Such races are short-lived and the result isn't critical.
*/
-static inline u16 sock_cgroup_prioidx(struct sock_cgroup_data *skcd)
+static inline u16 sock_cgroup_prioidx(const struct sock_cgroup_data *skcd)
{
/* fallback to 1 which is always the ID of the root cgroup */
return (skcd->is_data & 1) ? skcd->prioidx : 1;
}
-static inline u32 sock_cgroup_classid(struct sock_cgroup_data *skcd)
+static inline u32 sock_cgroup_classid(const struct sock_cgroup_data *skcd)
{
/* fallback to 0 which is the unconfigured default classid */
return (skcd->is_data & 1) ? skcd->classid : 0;
diff --git a/include/linux/clk-provider.h b/include/linux/clk-provider.h
index 7c925e6211f1..f711be6e8c44 100644
--- a/include/linux/clk-provider.h
+++ b/include/linux/clk-provider.h
@@ -20,6 +20,8 @@
* flags used across common struct clk. these flags should only affect the
* top-level framework. custom flags for dealing with hardware specifics
* belong in struct clk_foo
+ *
+ * Please update clk_flags[] in drivers/clk/clk.c when making changes here!
*/
#define CLK_SET_RATE_GATE BIT(0) /* must be gated across rate change */
#define CLK_SET_PARENT_GATE BIT(1) /* must be gated across re-parent */
@@ -412,7 +414,7 @@ extern const struct clk_ops clk_divider_ro_ops;
unsigned long divider_recalc_rate(struct clk_hw *hw, unsigned long parent_rate,
unsigned int val, const struct clk_div_table *table,
- unsigned long flags);
+ unsigned long flags, unsigned long width);
long divider_round_rate_parent(struct clk_hw *hw, struct clk_hw *parent,
unsigned long rate, unsigned long *prate,
const struct clk_div_table *table,
@@ -744,6 +746,7 @@ unsigned long clk_hw_get_rate(const struct clk_hw *hw);
unsigned long __clk_get_flags(struct clk *clk);
unsigned long clk_hw_get_flags(const struct clk_hw *hw);
bool clk_hw_is_prepared(const struct clk_hw *hw);
+bool clk_hw_rate_is_protected(const struct clk_hw *hw);
bool clk_hw_is_enabled(const struct clk_hw *hw);
bool __clk_is_enabled(struct clk *clk);
struct clk *__clk_lookup(const char *name);
@@ -806,6 +809,44 @@ extern struct of_device_id __clk_of_table;
} \
OF_DECLARE_1(clk, name, compat, name##_of_clk_init_driver)
+#define CLK_HW_INIT(_name, _parent, _ops, _flags) \
+ (&(struct clk_init_data) { \
+ .flags = _flags, \
+ .name = _name, \
+ .parent_names = (const char *[]) { _parent }, \
+ .num_parents = 1, \
+ .ops = _ops, \
+ })
+
+#define CLK_HW_INIT_PARENTS(_name, _parents, _ops, _flags) \
+ (&(struct clk_init_data) { \
+ .flags = _flags, \
+ .name = _name, \
+ .parent_names = _parents, \
+ .num_parents = ARRAY_SIZE(_parents), \
+ .ops = _ops, \
+ })
+
+#define CLK_HW_INIT_NO_PARENT(_name, _ops, _flags) \
+ (&(struct clk_init_data) { \
+ .flags = _flags, \
+ .name = _name, \
+ .parent_names = NULL, \
+ .num_parents = 0, \
+ .ops = _ops, \
+ })
+
+#define CLK_FIXED_FACTOR(_struct, _name, _parent, \
+ _div, _mult, _flags) \
+ struct clk_fixed_factor _struct = { \
+ .div = _div, \
+ .mult = _mult, \
+ .hw.init = CLK_HW_INIT(_name, \
+ _parent, \
+ &clk_fixed_factor_ops, \
+ _flags), \
+ }
+
#ifdef CONFIG_OF
int of_clk_add_provider(struct device_node *np,
struct clk *(*clk_src_get)(struct of_phandle_args *args,
diff --git a/include/linux/clk.h b/include/linux/clk.h
index 12c96d94d1fa..4c4ef9f34db3 100644
--- a/include/linux/clk.h
+++ b/include/linux/clk.h
@@ -331,6 +331,38 @@ struct clk *devm_clk_get(struct device *dev, const char *id);
*/
struct clk *devm_get_clk_from_child(struct device *dev,
struct device_node *np, const char *con_id);
+/**
+ * clk_rate_exclusive_get - get exclusivity over the rate control of a
+ * producer
+ * @clk: clock source
+ *
+ * This function allows drivers to get exclusive control over the rate of a
+ * provider. It prevents any other consumer to execute, even indirectly,
+ * opereation which could alter the rate of the provider or cause glitches
+ *
+ * If exlusivity is claimed more than once on clock, even by the same driver,
+ * the rate effectively gets locked as exclusivity can't be preempted.
+ *
+ * Must not be called from within atomic context.
+ *
+ * Returns success (0) or negative errno.
+ */
+int clk_rate_exclusive_get(struct clk *clk);
+
+/**
+ * clk_rate_exclusive_put - release exclusivity over the rate control of a
+ * producer
+ * @clk: clock source
+ *
+ * This function allows drivers to release the exclusivity it previously got
+ * from clk_rate_exclusive_get()
+ *
+ * The caller must balance the number of clk_rate_exclusive_get() and
+ * clk_rate_exclusive_put() calls.
+ *
+ * Must not be called from within atomic context.
+ */
+void clk_rate_exclusive_put(struct clk *clk);
/**
* clk_enable - inform the system when the clock source should be running.
@@ -473,6 +505,23 @@ long clk_round_rate(struct clk *clk, unsigned long rate);
int clk_set_rate(struct clk *clk, unsigned long rate);
/**
+ * clk_set_rate_exclusive- set the clock rate and claim exclusivity over
+ * clock source
+ * @clk: clock source
+ * @rate: desired clock rate in Hz
+ *
+ * This helper function allows drivers to atomically set the rate of a producer
+ * and claim exclusivity over the rate control of the producer.
+ *
+ * It is essentially a combination of clk_set_rate() and
+ * clk_rate_exclusite_get(). Caller must balance this call with a call to
+ * clk_rate_exclusive_put()
+ *
+ * Returns success (0) or negative errno.
+ */
+int clk_set_rate_exclusive(struct clk *clk, unsigned long rate);
+
+/**
* clk_has_parent - check if a clock is a possible parent for another
* @clk: clock source
* @parent: parent clock source
@@ -583,6 +632,14 @@ static inline void clk_bulk_put(int num_clks, struct clk_bulk_data *clks) {}
static inline void devm_clk_put(struct device *dev, struct clk *clk) {}
+
+static inline int clk_rate_exclusive_get(struct clk *clk)
+{
+ return 0;
+}
+
+static inline void clk_rate_exclusive_put(struct clk *clk) {}
+
static inline int clk_enable(struct clk *clk)
{
return 0;
@@ -609,6 +666,11 @@ static inline int clk_set_rate(struct clk *clk, unsigned long rate)
return 0;
}
+static inline int clk_set_rate_exclusive(struct clk *clk, unsigned long rate)
+{
+ return 0;
+}
+
static inline long clk_round_rate(struct clk *clk, unsigned long rate)
{
return 0;
diff --git a/include/linux/clk/ti.h b/include/linux/clk/ti.h
index d18da839b810..7e3bceee3489 100644
--- a/include/linux/clk/ti.h
+++ b/include/linux/clk/ti.h
@@ -203,6 +203,7 @@ enum {
TI_CLKM_PRM,
TI_CLKM_SCRM,
TI_CLKM_CTRL,
+ TI_CLKM_CTRL_AUX,
TI_CLKM_PLLSS,
CLK_MAX_MEMMAPS
};
diff --git a/include/linux/clkdev.h b/include/linux/clkdev.h
index 2eabc862abdb..4890ff033220 100644
--- a/include/linux/clkdev.h
+++ b/include/linux/clkdev.h
@@ -12,7 +12,7 @@
#ifndef __CLKDEV_H
#define __CLKDEV_H
-#include <asm/clkdev.h>
+#include <linux/slab.h>
struct clk;
struct clk_hw;
@@ -52,9 +52,4 @@ int clk_add_alias(const char *, const char *, const char *, struct device *);
int clk_register_clkdev(struct clk *, const char *, const char *);
int clk_hw_register_clkdev(struct clk_hw *, const char *, const char *);
-#ifdef CONFIG_COMMON_CLK
-int __clk_get(struct clk *clk);
-void __clk_put(struct clk *clk);
-#endif
-
#endif
diff --git a/include/linux/compat.h b/include/linux/compat.h
index 0fc36406f32c..f188eab10570 100644
--- a/include/linux/compat.h
+++ b/include/linux/compat.h
@@ -17,6 +17,7 @@
#include <linux/if.h>
#include <linux/fs.h>
#include <linux/aio_abi.h> /* for aio_context_t */
+#include <linux/uaccess.h>
#include <linux/unistd.h>
#include <asm/compat.h>
@@ -32,6 +33,8 @@
#endif
#define COMPAT_SYSCALL_DEFINE0(name) \
+ asmlinkage long compat_sys_##name(void); \
+ ALLOW_ERROR_INJECTION(compat_sys_##name, ERRNO); \
asmlinkage long compat_sys_##name(void)
#define COMPAT_SYSCALL_DEFINE1(name, ...) \
@@ -48,8 +51,10 @@
COMPAT_SYSCALL_DEFINEx(6, _##name, __VA_ARGS__)
#define COMPAT_SYSCALL_DEFINEx(x, name, ...) \
+ asmlinkage long compat_sys##name(__MAP(x,__SC_DECL,__VA_ARGS__));\
asmlinkage long compat_sys##name(__MAP(x,__SC_DECL,__VA_ARGS__))\
__attribute__((alias(__stringify(compat_SyS##name)))); \
+ ALLOW_ERROR_INJECTION(compat_sys##name, ERRNO); \
static inline long C_SYSC##name(__MAP(x,__SC_DECL,__VA_ARGS__));\
asmlinkage long compat_SyS##name(__MAP(x,__SC_LONG,__VA_ARGS__));\
asmlinkage long compat_SyS##name(__MAP(x,__SC_LONG,__VA_ARGS__))\
@@ -157,6 +162,106 @@ struct compat_sigaction {
compat_sigset_t sa_mask __packed;
};
+typedef union compat_sigval {
+ compat_int_t sival_int;
+ compat_uptr_t sival_ptr;
+} compat_sigval_t;
+
+typedef struct compat_siginfo {
+ int si_signo;
+#ifndef __ARCH_HAS_SWAPPED_SIGINFO
+ int si_errno;
+ int si_code;
+#else
+ int si_code;
+ int si_errno;
+#endif
+
+ union {
+ int _pad[128/sizeof(int) - 3];
+
+ /* kill() */
+ struct {
+ compat_pid_t _pid; /* sender's pid */
+ __compat_uid32_t _uid; /* sender's uid */
+ } _kill;
+
+ /* POSIX.1b timers */
+ struct {
+ compat_timer_t _tid; /* timer id */
+ int _overrun; /* overrun count */
+ compat_sigval_t _sigval; /* same as below */
+ } _timer;
+
+ /* POSIX.1b signals */
+ struct {
+ compat_pid_t _pid; /* sender's pid */
+ __compat_uid32_t _uid; /* sender's uid */
+ compat_sigval_t _sigval;
+ } _rt;
+
+ /* SIGCHLD */
+ struct {
+ compat_pid_t _pid; /* which child */
+ __compat_uid32_t _uid; /* sender's uid */
+ int _status; /* exit code */
+ compat_clock_t _utime;
+ compat_clock_t _stime;
+ } _sigchld;
+
+#ifdef CONFIG_X86_X32_ABI
+ /* SIGCHLD (x32 version) */
+ struct {
+ compat_pid_t _pid; /* which child */
+ __compat_uid32_t _uid; /* sender's uid */
+ int _status; /* exit code */
+ compat_s64 _utime;
+ compat_s64 _stime;
+ } _sigchld_x32;
+#endif
+
+ /* SIGILL, SIGFPE, SIGSEGV, SIGBUS, SIGTRAP, SIGEMT */
+ struct {
+ compat_uptr_t _addr; /* faulting insn/memory ref. */
+#ifdef __ARCH_SI_TRAPNO
+ int _trapno; /* TRAP # which caused the signal */
+#endif
+#define __COMPAT_ADDR_BND_PKEY_PAD (__alignof__(compat_uptr_t) < sizeof(short) ? \
+ sizeof(short) : __alignof__(compat_uptr_t))
+ union {
+ /*
+ * used when si_code=BUS_MCEERR_AR or
+ * used when si_code=BUS_MCEERR_AO
+ */
+ short int _addr_lsb; /* Valid LSB of the reported address. */
+ /* used when si_code=SEGV_BNDERR */
+ struct {
+ char _dummy_bnd[__COMPAT_ADDR_BND_PKEY_PAD];
+ compat_uptr_t _lower;
+ compat_uptr_t _upper;
+ } _addr_bnd;
+ /* used when si_code=SEGV_PKUERR */
+ struct {
+ char _dummy_pkey[__COMPAT_ADDR_BND_PKEY_PAD];
+ u32 _pkey;
+ } _addr_pkey;
+ };
+ } _sigfault;
+
+ /* SIGPOLL */
+ struct {
+ compat_long_t _band; /* POLL_IN, POLL_OUT, POLL_MSG */
+ int _fd;
+ } _sigpoll;
+
+ struct {
+ compat_uptr_t _call_addr; /* calling user insn */
+ int _syscall; /* triggering system call number */
+ unsigned int _arch; /* AUDIT_ARCH_* of syscall */
+ } _sigsys;
+ } _sifields;
+} compat_siginfo_t;
+
/*
* These functions operate on 32- or 64-bit specs depending on
* COMPAT_USE_64BIT_TIME, hence the void user pointer arguments.
@@ -206,10 +311,6 @@ extern int put_compat_rusage(const struct rusage *,
struct compat_siginfo;
-extern asmlinkage long compat_sys_waitid(int, compat_pid_t,
- struct compat_siginfo __user *, int,
- struct compat_rusage __user *);
-
struct compat_dirent {
u32 d_ino;
compat_off_t d_off;
@@ -323,87 +424,6 @@ struct compat_msgbuf;
extern void compat_exit_robust_list(struct task_struct *curr);
-asmlinkage long
-compat_sys_set_robust_list(struct compat_robust_list_head __user *head,
- compat_size_t len);
-asmlinkage long
-compat_sys_get_robust_list(int pid, compat_uptr_t __user *head_ptr,
- compat_size_t __user *len_ptr);
-
-asmlinkage long compat_sys_ipc(u32, int, int, u32, compat_uptr_t, u32);
-asmlinkage long compat_sys_shmat(int shmid, compat_uptr_t shmaddr, int shmflg);
-asmlinkage long compat_sys_semctl(int semid, int semnum, int cmd, int arg);
-asmlinkage long compat_sys_msgsnd(int msqid, compat_uptr_t msgp,
- compat_ssize_t msgsz, int msgflg);
-asmlinkage long compat_sys_msgrcv(int msqid, compat_uptr_t msgp,
- compat_ssize_t msgsz, compat_long_t msgtyp, int msgflg);
-long compat_sys_msgctl(int first, int second, void __user *uptr);
-long compat_sys_shmctl(int first, int second, void __user *uptr);
-long compat_sys_semtimedop(int semid, struct sembuf __user *tsems,
- unsigned nsems, const struct compat_timespec __user *timeout);
-asmlinkage long compat_sys_keyctl(u32 option,
- u32 arg2, u32 arg3, u32 arg4, u32 arg5);
-asmlinkage long compat_sys_ustat(unsigned dev, struct compat_ustat __user *u32);
-
-asmlinkage ssize_t compat_sys_readv(compat_ulong_t fd,
- const struct compat_iovec __user *vec, compat_ulong_t vlen);
-asmlinkage ssize_t compat_sys_writev(compat_ulong_t fd,
- const struct compat_iovec __user *vec, compat_ulong_t vlen);
-asmlinkage ssize_t compat_sys_preadv(compat_ulong_t fd,
- const struct compat_iovec __user *vec,
- compat_ulong_t vlen, u32 pos_low, u32 pos_high);
-asmlinkage ssize_t compat_sys_pwritev(compat_ulong_t fd,
- const struct compat_iovec __user *vec,
- compat_ulong_t vlen, u32 pos_low, u32 pos_high);
-asmlinkage ssize_t compat_sys_preadv2(compat_ulong_t fd,
- const struct compat_iovec __user *vec,
- compat_ulong_t vlen, u32 pos_low, u32 pos_high, rwf_t flags);
-asmlinkage ssize_t compat_sys_pwritev2(compat_ulong_t fd,
- const struct compat_iovec __user *vec,
- compat_ulong_t vlen, u32 pos_low, u32 pos_high, rwf_t flags);
-
-#ifdef __ARCH_WANT_COMPAT_SYS_PREADV64
-asmlinkage long compat_sys_preadv64(unsigned long fd,
- const struct compat_iovec __user *vec,
- unsigned long vlen, loff_t pos);
-#endif
-
-#ifdef __ARCH_WANT_COMPAT_SYS_PWRITEV64
-asmlinkage long compat_sys_pwritev64(unsigned long fd,
- const struct compat_iovec __user *vec,
- unsigned long vlen, loff_t pos);
-#endif
-
-#ifdef __ARCH_WANT_COMPAT_SYS_PREADV64V2
-asmlinkage long compat_sys_readv64v2(unsigned long fd,
- const struct compat_iovec __user *vec,
- unsigned long vlen, loff_t pos, rwf_t flags);
-#endif
-
-#ifdef __ARCH_WANT_COMPAT_SYS_PWRITEV64V2
-asmlinkage long compat_sys_pwritev64v2(unsigned long fd,
- const struct compat_iovec __user *vec,
- unsigned long vlen, loff_t pos, rwf_t flags);
-#endif
-
-asmlinkage long compat_sys_lseek(unsigned int, compat_off_t, unsigned int);
-
-asmlinkage long compat_sys_execve(const char __user *filename, const compat_uptr_t __user *argv,
- const compat_uptr_t __user *envp);
-asmlinkage long compat_sys_execveat(int dfd, const char __user *filename,
- const compat_uptr_t __user *argv,
- const compat_uptr_t __user *envp, int flags);
-
-asmlinkage long compat_sys_select(int n, compat_ulong_t __user *inp,
- compat_ulong_t __user *outp, compat_ulong_t __user *exp,
- struct compat_timeval __user *tvp);
-
-asmlinkage long compat_sys_old_select(struct compat_sel_arg_struct __user *arg);
-
-asmlinkage long compat_sys_wait4(compat_pid_t pid,
- compat_uint_t __user *stat_addr, int options,
- struct compat_rusage __user *ru);
-
#define BITS_PER_COMPAT_LONG (8*sizeof(compat_long_t))
#define BITS_TO_COMPAT_LONGS(bits) DIV_ROUND_UP(bits, BITS_PER_COMPAT_LONG)
@@ -412,17 +432,10 @@ long compat_get_bitmap(unsigned long *mask, const compat_ulong_t __user *umask,
unsigned long bitmap_size);
long compat_put_bitmap(compat_ulong_t __user *umask, unsigned long *mask,
unsigned long bitmap_size);
-int copy_siginfo_from_user32(siginfo_t *to, struct compat_siginfo __user *from);
+int copy_siginfo_from_user32(siginfo_t *to, const struct compat_siginfo __user *from);
int copy_siginfo_to_user32(struct compat_siginfo __user *to, const siginfo_t *from);
int get_compat_sigevent(struct sigevent *event,
const struct compat_sigevent __user *u_event);
-long compat_sys_rt_tgsigqueueinfo(compat_pid_t tgid, compat_pid_t pid, int sig,
- struct compat_siginfo __user *uinfo);
-#ifdef CONFIG_COMPAT_OLD_SIGACTION
-asmlinkage long compat_sys_sigaction(int sig,
- const struct compat_old_sigaction __user *act,
- struct compat_old_sigaction __user *oact);
-#endif
static inline int compat_timeval_compare(struct compat_timeval *lhs,
struct compat_timeval *rhs)
@@ -444,20 +457,30 @@ static inline int compat_timespec_compare(struct compat_timespec *lhs,
return lhs->tv_nsec - rhs->tv_nsec;
}
-asmlinkage long compat_sys_gettimeofday(struct compat_timeval __user *tv,
- struct timezone __user *tz);
-asmlinkage long compat_sys_settimeofday(struct compat_timeval __user *tv,
- struct timezone __user *tz);
-
-asmlinkage long compat_sys_adjtimex(struct compat_timex __user *utp);
-
extern int get_compat_sigset(sigset_t *set, const compat_sigset_t __user *compat);
-extern int put_compat_sigset(compat_sigset_t __user *compat,
- const sigset_t *set, unsigned int size);
-asmlinkage long compat_sys_migrate_pages(compat_pid_t pid,
- compat_ulong_t maxnode, const compat_ulong_t __user *old_nodes,
- const compat_ulong_t __user *new_nodes);
+/*
+ * Defined inline such that size can be compile time constant, which avoids
+ * CONFIG_HARDENED_USERCOPY complaining about copies from task_struct
+ */
+static inline int
+put_compat_sigset(compat_sigset_t __user *compat, const sigset_t *set,
+ unsigned int size)
+{
+ /* size <= sizeof(compat_sigset_t) <= sizeof(sigset_t) */
+#ifdef __BIG_ENDIAN
+ compat_sigset_t v;
+ switch (_NSIG_WORDS) {
+ case 4: v.sig[7] = (set->sig[3] >> 32); v.sig[6] = set->sig[3];
+ case 3: v.sig[5] = (set->sig[2] >> 32); v.sig[4] = set->sig[2];
+ case 2: v.sig[3] = (set->sig[1] >> 32); v.sig[2] = set->sig[1];
+ case 1: v.sig[1] = (set->sig[0] >> 32); v.sig[0] = set->sig[0];
+ }
+ return copy_to_user(compat, &v, size) ? -EFAULT : 0;
+#else
+ return copy_to_user(compat, set, size) ? -EFAULT : 0;
+#endif
+}
extern int compat_ptrace_request(struct task_struct *child,
compat_long_t request,
@@ -465,100 +488,126 @@ extern int compat_ptrace_request(struct task_struct *child,
extern long compat_arch_ptrace(struct task_struct *child, compat_long_t request,
compat_ulong_t addr, compat_ulong_t data);
-asmlinkage long compat_sys_ptrace(compat_long_t request, compat_long_t pid,
- compat_long_t addr, compat_long_t data);
-asmlinkage long compat_sys_lookup_dcookie(u32, u32, char __user *, compat_size_t);
+struct epoll_event; /* fortunately, this one is fixed-layout */
+
+extern ssize_t compat_rw_copy_check_uvector(int type,
+ const struct compat_iovec __user *uvector,
+ unsigned long nr_segs,
+ unsigned long fast_segs, struct iovec *fast_pointer,
+ struct iovec **ret_pointer);
+
+extern void __user *compat_alloc_user_space(unsigned long len);
+
+int compat_restore_altstack(const compat_stack_t __user *uss);
+int __compat_save_altstack(compat_stack_t __user *, unsigned long);
+#define compat_save_altstack_ex(uss, sp) do { \
+ compat_stack_t __user *__uss = uss; \
+ struct task_struct *t = current; \
+ put_user_ex(ptr_to_compat((void __user *)t->sas_ss_sp), &__uss->ss_sp); \
+ put_user_ex(t->sas_ss_flags, &__uss->ss_flags); \
+ put_user_ex(t->sas_ss_size, &__uss->ss_size); \
+ if (t->sas_ss_flags & SS_AUTODISARM) \
+ sas_ss_reset(t); \
+} while (0);
+
/*
- * epoll (fs/eventpoll.c) compat bits follow ...
+ * These syscall function prototypes are kept in the same order as
+ * include/uapi/asm-generic/unistd.h. Deprecated or obsolete system calls
+ * go below.
+ *
+ * Please note that these prototypes here are only provided for information
+ * purposes, for static analysis, and for linking from the syscall table.
+ * These functions should not be called elsewhere from kernel code.
*/
-struct epoll_event; /* fortunately, this one is fixed-layout */
+asmlinkage long compat_sys_io_setup(unsigned nr_reqs, u32 __user *ctx32p);
+asmlinkage long compat_sys_io_submit(compat_aio_context_t ctx_id, int nr,
+ u32 __user *iocb);
+asmlinkage long compat_sys_io_getevents(compat_aio_context_t ctx_id,
+ compat_long_t min_nr,
+ compat_long_t nr,
+ struct io_event __user *events,
+ struct compat_timespec __user *timeout);
+
+/* fs/cookies.c */
+asmlinkage long compat_sys_lookup_dcookie(u32, u32, char __user *, compat_size_t);
+
+/* fs/eventpoll.c */
asmlinkage long compat_sys_epoll_pwait(int epfd,
struct epoll_event __user *events,
int maxevents, int timeout,
const compat_sigset_t __user *sigmask,
compat_size_t sigsetsize);
-asmlinkage long compat_sys_utime(const char __user *filename,
- struct compat_utimbuf __user *t);
-asmlinkage long compat_sys_utimensat(unsigned int dfd,
- const char __user *filename,
- struct compat_timespec __user *t,
- int flags);
+/* fs/fcntl.c */
+asmlinkage long compat_sys_fcntl(unsigned int fd, unsigned int cmd,
+ compat_ulong_t arg);
+asmlinkage long compat_sys_fcntl64(unsigned int fd, unsigned int cmd,
+ compat_ulong_t arg);
-asmlinkage long compat_sys_time(compat_time_t __user *tloc);
-asmlinkage long compat_sys_stime(compat_time_t __user *tptr);
-asmlinkage long compat_sys_signalfd(int ufd,
- const compat_sigset_t __user *sigmask,
- compat_size_t sigsetsize);
-asmlinkage long compat_sys_timerfd_settime(int ufd, int flags,
- const struct compat_itimerspec __user *utmr,
- struct compat_itimerspec __user *otmr);
-asmlinkage long compat_sys_timerfd_gettime(int ufd,
- struct compat_itimerspec __user *otmr);
+/* fs/ioctl.c */
+asmlinkage long compat_sys_ioctl(unsigned int fd, unsigned int cmd,
+ compat_ulong_t arg);
-asmlinkage long compat_sys_move_pages(pid_t pid, compat_ulong_t nr_pages,
- __u32 __user *pages,
- const int __user *nodes,
- int __user *status,
- int flags);
-asmlinkage long compat_sys_futimesat(unsigned int dfd,
- const char __user *filename,
- struct compat_timeval __user *t);
-asmlinkage long compat_sys_utimes(const char __user *filename,
- struct compat_timeval __user *t);
-asmlinkage long compat_sys_newstat(const char __user *filename,
- struct compat_stat __user *statbuf);
-asmlinkage long compat_sys_newlstat(const char __user *filename,
- struct compat_stat __user *statbuf);
-asmlinkage long compat_sys_newfstatat(unsigned int dfd,
- const char __user *filename,
- struct compat_stat __user *statbuf,
- int flag);
-asmlinkage long compat_sys_newfstat(unsigned int fd,
- struct compat_stat __user *statbuf);
+/* fs/namespace.c */
+asmlinkage long compat_sys_mount(const char __user *dev_name,
+ const char __user *dir_name,
+ const char __user *type, compat_ulong_t flags,
+ const void __user *data);
+
+/* fs/open.c */
asmlinkage long compat_sys_statfs(const char __user *pathname,
struct compat_statfs __user *buf);
-asmlinkage long compat_sys_fstatfs(unsigned int fd,
- struct compat_statfs __user *buf);
asmlinkage long compat_sys_statfs64(const char __user *pathname,
compat_size_t sz,
struct compat_statfs64 __user *buf);
+asmlinkage long compat_sys_fstatfs(unsigned int fd,
+ struct compat_statfs __user *buf);
asmlinkage long compat_sys_fstatfs64(unsigned int fd, compat_size_t sz,
struct compat_statfs64 __user *buf);
-asmlinkage long compat_sys_fcntl64(unsigned int fd, unsigned int cmd,
- compat_ulong_t arg);
-asmlinkage long compat_sys_fcntl(unsigned int fd, unsigned int cmd,
- compat_ulong_t arg);
-asmlinkage long compat_sys_io_setup(unsigned nr_reqs, u32 __user *ctx32p);
-asmlinkage long compat_sys_io_getevents(compat_aio_context_t ctx_id,
- compat_long_t min_nr,
- compat_long_t nr,
- struct io_event __user *events,
- struct compat_timespec __user *timeout);
-asmlinkage long compat_sys_io_submit(compat_aio_context_t ctx_id, int nr,
- u32 __user *iocb);
-asmlinkage long compat_sys_mount(const char __user *dev_name,
- const char __user *dir_name,
- const char __user *type, compat_ulong_t flags,
- const void __user *data);
-asmlinkage long compat_sys_old_readdir(unsigned int fd,
- struct compat_old_linux_dirent __user *,
- unsigned int count);
+asmlinkage long compat_sys_truncate(const char __user *, compat_off_t);
+asmlinkage long compat_sys_ftruncate(unsigned int, compat_ulong_t);
+/* No generic prototype for truncate64, ftruncate64, fallocate */
+asmlinkage long compat_sys_openat(int dfd, const char __user *filename,
+ int flags, umode_t mode);
+
+/* fs/readdir.c */
asmlinkage long compat_sys_getdents(unsigned int fd,
struct compat_linux_dirent __user *dirent,
unsigned int count);
-asmlinkage long compat_sys_vmsplice(int fd, const struct compat_iovec __user *,
- unsigned int nr_segs, unsigned int flags);
-asmlinkage long compat_sys_open(const char __user *filename, int flags,
- umode_t mode);
-asmlinkage long compat_sys_openat(int dfd, const char __user *filename,
- int flags, umode_t mode);
-asmlinkage long compat_sys_open_by_handle_at(int mountdirfd,
- struct file_handle __user *handle,
- int flags);
-asmlinkage long compat_sys_truncate(const char __user *, compat_off_t);
-asmlinkage long compat_sys_ftruncate(unsigned int, compat_ulong_t);
+
+/* fs/read_write.c */
+asmlinkage long compat_sys_lseek(unsigned int, compat_off_t, unsigned int);
+asmlinkage ssize_t compat_sys_readv(compat_ulong_t fd,
+ const struct compat_iovec __user *vec, compat_ulong_t vlen);
+asmlinkage ssize_t compat_sys_writev(compat_ulong_t fd,
+ const struct compat_iovec __user *vec, compat_ulong_t vlen);
+/* No generic prototype for pread64 and pwrite64 */
+asmlinkage ssize_t compat_sys_preadv(compat_ulong_t fd,
+ const struct compat_iovec __user *vec,
+ compat_ulong_t vlen, u32 pos_low, u32 pos_high);
+asmlinkage ssize_t compat_sys_pwritev(compat_ulong_t fd,
+ const struct compat_iovec __user *vec,
+ compat_ulong_t vlen, u32 pos_low, u32 pos_high);
+#ifdef __ARCH_WANT_COMPAT_SYS_PREADV64
+asmlinkage long compat_sys_preadv64(unsigned long fd,
+ const struct compat_iovec __user *vec,
+ unsigned long vlen, loff_t pos);
+#endif
+
+#ifdef __ARCH_WANT_COMPAT_SYS_PWRITEV64
+asmlinkage long compat_sys_pwritev64(unsigned long fd,
+ const struct compat_iovec __user *vec,
+ unsigned long vlen, loff_t pos);
+#endif
+
+/* fs/sendfile.c */
+asmlinkage long compat_sys_sendfile(int out_fd, int in_fd,
+ compat_off_t __user *offset, compat_size_t count);
+asmlinkage long compat_sys_sendfile64(int out_fd, int in_fd,
+ compat_loff_t __user *offset, compat_size_t count);
+
+/* fs/select.c */
asmlinkage long compat_sys_pselect6(int n, compat_ulong_t __user *inp,
compat_ulong_t __user *outp,
compat_ulong_t __user *exp,
@@ -569,110 +618,149 @@ asmlinkage long compat_sys_ppoll(struct pollfd __user *ufds,
struct compat_timespec __user *tsp,
const compat_sigset_t __user *sigmask,
compat_size_t sigsetsize);
+
+/* fs/signalfd.c */
asmlinkage long compat_sys_signalfd4(int ufd,
const compat_sigset_t __user *sigmask,
compat_size_t sigsetsize, int flags);
-asmlinkage long compat_sys_get_mempolicy(int __user *policy,
- compat_ulong_t __user *nmask,
- compat_ulong_t maxnode,
- compat_ulong_t addr,
- compat_ulong_t flags);
-asmlinkage long compat_sys_set_mempolicy(int mode, compat_ulong_t __user *nmask,
- compat_ulong_t maxnode);
-asmlinkage long compat_sys_mbind(compat_ulong_t start, compat_ulong_t len,
- compat_ulong_t mode,
- compat_ulong_t __user *nmask,
- compat_ulong_t maxnode, compat_ulong_t flags);
-asmlinkage long compat_sys_setsockopt(int fd, int level, int optname,
- char __user *optval, unsigned int optlen);
-asmlinkage long compat_sys_sendmsg(int fd, struct compat_msghdr __user *msg,
- unsigned flags);
-asmlinkage long compat_sys_sendmmsg(int fd, struct compat_mmsghdr __user *mmsg,
- unsigned vlen, unsigned int flags);
-asmlinkage long compat_sys_recvmsg(int fd, struct compat_msghdr __user *msg,
- unsigned int flags);
-asmlinkage long compat_sys_recv(int fd, void __user *buf, compat_size_t len,
- unsigned flags);
-asmlinkage long compat_sys_recvfrom(int fd, void __user *buf, compat_size_t len,
- unsigned flags, struct sockaddr __user *addr,
- int __user *addrlen);
-asmlinkage long compat_sys_recvmmsg(int fd, struct compat_mmsghdr __user *mmsg,
- unsigned vlen, unsigned int flags,
- struct compat_timespec __user *timeout);
+/* fs/splice.c */
+asmlinkage long compat_sys_vmsplice(int fd, const struct compat_iovec __user *,
+ unsigned int nr_segs, unsigned int flags);
+
+/* fs/stat.c */
+asmlinkage long compat_sys_newfstatat(unsigned int dfd,
+ const char __user *filename,
+ struct compat_stat __user *statbuf,
+ int flag);
+asmlinkage long compat_sys_newfstat(unsigned int fd,
+ struct compat_stat __user *statbuf);
+
+/* fs/sync.c: No generic prototype for sync_file_range and sync_file_range2 */
+
+/* fs/timerfd.c */
+asmlinkage long compat_sys_timerfd_gettime(int ufd,
+ struct compat_itimerspec __user *otmr);
+asmlinkage long compat_sys_timerfd_settime(int ufd, int flags,
+ const struct compat_itimerspec __user *utmr,
+ struct compat_itimerspec __user *otmr);
+
+/* fs/utimes.c */
+asmlinkage long compat_sys_utimensat(unsigned int dfd,
+ const char __user *filename,
+ struct compat_timespec __user *t,
+ int flags);
+
+/* kernel/exit.c */
+asmlinkage long compat_sys_waitid(int, compat_pid_t,
+ struct compat_siginfo __user *, int,
+ struct compat_rusage __user *);
+
+
+
+/* kernel/futex.c */
+asmlinkage long compat_sys_futex(u32 __user *uaddr, int op, u32 val,
+ struct compat_timespec __user *utime, u32 __user *uaddr2,
+ u32 val3);
+asmlinkage long
+compat_sys_set_robust_list(struct compat_robust_list_head __user *head,
+ compat_size_t len);
+asmlinkage long
+compat_sys_get_robust_list(int pid, compat_uptr_t __user *head_ptr,
+ compat_size_t __user *len_ptr);
+
+/* kernel/hrtimer.c */
asmlinkage long compat_sys_nanosleep(struct compat_timespec __user *rqtp,
struct compat_timespec __user *rmtp);
+
+/* kernel/itimer.c */
asmlinkage long compat_sys_getitimer(int which,
struct compat_itimerval __user *it);
asmlinkage long compat_sys_setitimer(int which,
struct compat_itimerval __user *in,
struct compat_itimerval __user *out);
-asmlinkage long compat_sys_times(struct compat_tms __user *tbuf);
-asmlinkage long compat_sys_setrlimit(unsigned int resource,
- struct compat_rlimit __user *rlim);
-asmlinkage long compat_sys_getrlimit(unsigned int resource,
- struct compat_rlimit __user *rlim);
-asmlinkage long compat_sys_getrusage(int who, struct compat_rusage __user *ru);
-asmlinkage long compat_sys_sched_setaffinity(compat_pid_t pid,
- unsigned int len,
- compat_ulong_t __user *user_mask_ptr);
-asmlinkage long compat_sys_sched_getaffinity(compat_pid_t pid,
- unsigned int len,
- compat_ulong_t __user *user_mask_ptr);
+
+/* kernel/kexec.c */
+asmlinkage long compat_sys_kexec_load(compat_ulong_t entry,
+ compat_ulong_t nr_segments,
+ struct compat_kexec_segment __user *,
+ compat_ulong_t flags);
+
+/* kernel/posix-timers.c */
asmlinkage long compat_sys_timer_create(clockid_t which_clock,
struct compat_sigevent __user *timer_event_spec,
timer_t __user *created_timer_id);
+asmlinkage long compat_sys_timer_gettime(timer_t timer_id,
+ struct compat_itimerspec __user *setting);
asmlinkage long compat_sys_timer_settime(timer_t timer_id, int flags,
struct compat_itimerspec __user *new,
struct compat_itimerspec __user *old);
-asmlinkage long compat_sys_timer_gettime(timer_t timer_id,
- struct compat_itimerspec __user *setting);
asmlinkage long compat_sys_clock_settime(clockid_t which_clock,
struct compat_timespec __user *tp);
asmlinkage long compat_sys_clock_gettime(clockid_t which_clock,
struct compat_timespec __user *tp);
-asmlinkage long compat_sys_clock_adjtime(clockid_t which_clock,
- struct compat_timex __user *tp);
asmlinkage long compat_sys_clock_getres(clockid_t which_clock,
struct compat_timespec __user *tp);
asmlinkage long compat_sys_clock_nanosleep(clockid_t which_clock, int flags,
struct compat_timespec __user *rqtp,
struct compat_timespec __user *rmtp);
-asmlinkage long compat_sys_rt_sigtimedwait(compat_sigset_t __user *uthese,
- struct compat_siginfo __user *uinfo,
- struct compat_timespec __user *uts, compat_size_t sigsetsize);
+
+/* kernel/ptrace.c */
+asmlinkage long compat_sys_ptrace(compat_long_t request, compat_long_t pid,
+ compat_long_t addr, compat_long_t data);
+
+/* kernel/sched/core.c */
+asmlinkage long compat_sys_sched_setaffinity(compat_pid_t pid,
+ unsigned int len,
+ compat_ulong_t __user *user_mask_ptr);
+asmlinkage long compat_sys_sched_getaffinity(compat_pid_t pid,
+ unsigned int len,
+ compat_ulong_t __user *user_mask_ptr);
+asmlinkage long compat_sys_sched_rr_get_interval(compat_pid_t pid,
+ struct compat_timespec __user *interval);
+
+/* kernel/signal.c */
+asmlinkage long compat_sys_sigaltstack(const compat_stack_t __user *uss_ptr,
+ compat_stack_t __user *uoss_ptr);
asmlinkage long compat_sys_rt_sigsuspend(compat_sigset_t __user *unewset,
compat_size_t sigsetsize);
-asmlinkage long compat_sys_rt_sigprocmask(int how, compat_sigset_t __user *set,
- compat_sigset_t __user *oset,
- compat_size_t sigsetsize);
-asmlinkage long compat_sys_rt_sigpending(compat_sigset_t __user *uset,
- compat_size_t sigsetsize);
#ifndef CONFIG_ODD_RT_SIGACTION
asmlinkage long compat_sys_rt_sigaction(int,
const struct compat_sigaction __user *,
struct compat_sigaction __user *,
compat_size_t);
#endif
+asmlinkage long compat_sys_rt_sigprocmask(int how, compat_sigset_t __user *set,
+ compat_sigset_t __user *oset,
+ compat_size_t sigsetsize);
+asmlinkage long compat_sys_rt_sigpending(compat_sigset_t __user *uset,
+ compat_size_t sigsetsize);
+asmlinkage long compat_sys_rt_sigtimedwait(compat_sigset_t __user *uthese,
+ struct compat_siginfo __user *uinfo,
+ struct compat_timespec __user *uts, compat_size_t sigsetsize);
asmlinkage long compat_sys_rt_sigqueueinfo(compat_pid_t pid, int sig,
struct compat_siginfo __user *uinfo);
+/* No generic prototype for rt_sigreturn */
+
+/* kernel/sys.c */
+asmlinkage long compat_sys_times(struct compat_tms __user *tbuf);
+asmlinkage long compat_sys_getrlimit(unsigned int resource,
+ struct compat_rlimit __user *rlim);
+asmlinkage long compat_sys_setrlimit(unsigned int resource,
+ struct compat_rlimit __user *rlim);
+asmlinkage long compat_sys_getrusage(int who, struct compat_rusage __user *ru);
+
+/* kernel/time.c */
+asmlinkage long compat_sys_gettimeofday(struct compat_timeval __user *tv,
+ struct timezone __user *tz);
+asmlinkage long compat_sys_settimeofday(struct compat_timeval __user *tv,
+ struct timezone __user *tz);
+asmlinkage long compat_sys_adjtimex(struct compat_timex __user *utp);
+
+/* kernel/timer.c */
asmlinkage long compat_sys_sysinfo(struct compat_sysinfo __user *info);
-asmlinkage long compat_sys_ioctl(unsigned int fd, unsigned int cmd,
- compat_ulong_t arg);
-asmlinkage long compat_sys_futex(u32 __user *uaddr, int op, u32 val,
- struct compat_timespec __user *utime, u32 __user *uaddr2,
- u32 val3);
-asmlinkage long compat_sys_getsockopt(int fd, int level, int optname,
- char __user *optval, int __user *optlen);
-asmlinkage long compat_sys_kexec_load(compat_ulong_t entry,
- compat_ulong_t nr_segments,
- struct compat_kexec_segment __user *,
- compat_ulong_t flags);
-asmlinkage long compat_sys_mq_getsetattr(mqd_t mqdes,
- const struct compat_mq_attr __user *u_mqstat,
- struct compat_mq_attr __user *u_omqstat);
-asmlinkage long compat_sys_mq_notify(mqd_t mqdes,
- const struct compat_sigevent __user *u_notification);
+
+/* ipc/mqueue.c */
asmlinkage long compat_sys_mq_open(const char __user *u_name,
int oflag, compat_mode_t mode,
struct compat_mq_attr __user *u_attr);
@@ -684,17 +772,92 @@ asmlinkage ssize_t compat_sys_mq_timedreceive(mqd_t mqdes,
char __user *u_msg_ptr,
compat_size_t msg_len, unsigned int __user *u_msg_prio,
const struct compat_timespec __user *u_abs_timeout);
-asmlinkage long compat_sys_socketcall(int call, u32 __user *args);
-asmlinkage long compat_sys_sysctl(struct compat_sysctl_args __user *args);
+asmlinkage long compat_sys_mq_notify(mqd_t mqdes,
+ const struct compat_sigevent __user *u_notification);
+asmlinkage long compat_sys_mq_getsetattr(mqd_t mqdes,
+ const struct compat_mq_attr __user *u_mqstat,
+ struct compat_mq_attr __user *u_omqstat);
-extern ssize_t compat_rw_copy_check_uvector(int type,
- const struct compat_iovec __user *uvector,
- unsigned long nr_segs,
- unsigned long fast_segs, struct iovec *fast_pointer,
- struct iovec **ret_pointer);
+/* ipc/msg.c */
+asmlinkage long compat_sys_msgctl(int first, int second, void __user *uptr);
+asmlinkage long compat_sys_msgrcv(int msqid, compat_uptr_t msgp,
+ compat_ssize_t msgsz, compat_long_t msgtyp, int msgflg);
+asmlinkage long compat_sys_msgsnd(int msqid, compat_uptr_t msgp,
+ compat_ssize_t msgsz, int msgflg);
-extern void __user *compat_alloc_user_space(unsigned long len);
+/* ipc/sem.c */
+asmlinkage long compat_sys_semctl(int semid, int semnum, int cmd, int arg);
+asmlinkage long compat_sys_semtimedop(int semid, struct sembuf __user *tsems,
+ unsigned nsems, const struct compat_timespec __user *timeout);
+
+/* ipc/shm.c */
+asmlinkage long compat_sys_shmctl(int first, int second, void __user *uptr);
+asmlinkage long compat_sys_shmat(int shmid, compat_uptr_t shmaddr, int shmflg);
+
+/* net/socket.c */
+asmlinkage long compat_sys_recvfrom(int fd, void __user *buf, compat_size_t len,
+ unsigned flags, struct sockaddr __user *addr,
+ int __user *addrlen);
+asmlinkage long compat_sys_setsockopt(int fd, int level, int optname,
+ char __user *optval, unsigned int optlen);
+asmlinkage long compat_sys_getsockopt(int fd, int level, int optname,
+ char __user *optval, int __user *optlen);
+asmlinkage long compat_sys_sendmsg(int fd, struct compat_msghdr __user *msg,
+ unsigned flags);
+asmlinkage long compat_sys_recvmsg(int fd, struct compat_msghdr __user *msg,
+ unsigned int flags);
+
+/* mm/filemap.c: No generic prototype for readahead */
+
+/* security/keys/keyctl.c */
+asmlinkage long compat_sys_keyctl(u32 option,
+ u32 arg2, u32 arg3, u32 arg4, u32 arg5);
+
+/* arch/example/kernel/sys_example.c */
+asmlinkage long compat_sys_execve(const char __user *filename, const compat_uptr_t __user *argv,
+ const compat_uptr_t __user *envp);
+
+/* mm/fadvise.c: No generic prototype for fadvise64_64 */
+
+/* mm/, CONFIG_MMU only */
+asmlinkage long compat_sys_mbind(compat_ulong_t start, compat_ulong_t len,
+ compat_ulong_t mode,
+ compat_ulong_t __user *nmask,
+ compat_ulong_t maxnode, compat_ulong_t flags);
+asmlinkage long compat_sys_get_mempolicy(int __user *policy,
+ compat_ulong_t __user *nmask,
+ compat_ulong_t maxnode,
+ compat_ulong_t addr,
+ compat_ulong_t flags);
+asmlinkage long compat_sys_set_mempolicy(int mode, compat_ulong_t __user *nmask,
+ compat_ulong_t maxnode);
+asmlinkage long compat_sys_migrate_pages(compat_pid_t pid,
+ compat_ulong_t maxnode, const compat_ulong_t __user *old_nodes,
+ const compat_ulong_t __user *new_nodes);
+asmlinkage long compat_sys_move_pages(pid_t pid, compat_ulong_t nr_pages,
+ __u32 __user *pages,
+ const int __user *nodes,
+ int __user *status,
+ int flags);
+asmlinkage long compat_sys_rt_tgsigqueueinfo(compat_pid_t tgid,
+ compat_pid_t pid, int sig,
+ struct compat_siginfo __user *uinfo);
+asmlinkage long compat_sys_recvmmsg(int fd, struct compat_mmsghdr __user *mmsg,
+ unsigned vlen, unsigned int flags,
+ struct compat_timespec __user *timeout);
+asmlinkage long compat_sys_wait4(compat_pid_t pid,
+ compat_uint_t __user *stat_addr, int options,
+ struct compat_rusage __user *ru);
+asmlinkage long compat_sys_fanotify_mark(int, unsigned int, __u32, __u32,
+ int, const char __user *);
+asmlinkage long compat_sys_open_by_handle_at(int mountdirfd,
+ struct file_handle __user *handle,
+ int flags);
+asmlinkage long compat_sys_clock_adjtime(clockid_t which_clock,
+ struct compat_timex __user *tp);
+asmlinkage long compat_sys_sendmmsg(int fd, struct compat_mmsghdr __user *mmsg,
+ unsigned vlen, unsigned int flags);
asmlinkage ssize_t compat_sys_process_vm_readv(compat_pid_t pid,
const struct compat_iovec __user *lvec,
compat_ulong_t liovcnt, const struct compat_iovec __user *rvec,
@@ -703,14 +866,77 @@ asmlinkage ssize_t compat_sys_process_vm_writev(compat_pid_t pid,
const struct compat_iovec __user *lvec,
compat_ulong_t liovcnt, const struct compat_iovec __user *rvec,
compat_ulong_t riovcnt, compat_ulong_t flags);
+asmlinkage long compat_sys_execveat(int dfd, const char __user *filename,
+ const compat_uptr_t __user *argv,
+ const compat_uptr_t __user *envp, int flags);
+asmlinkage ssize_t compat_sys_preadv2(compat_ulong_t fd,
+ const struct compat_iovec __user *vec,
+ compat_ulong_t vlen, u32 pos_low, u32 pos_high, rwf_t flags);
+asmlinkage ssize_t compat_sys_pwritev2(compat_ulong_t fd,
+ const struct compat_iovec __user *vec,
+ compat_ulong_t vlen, u32 pos_low, u32 pos_high, rwf_t flags);
+#ifdef __ARCH_WANT_COMPAT_SYS_PREADV64V2
+asmlinkage long compat_sys_readv64v2(unsigned long fd,
+ const struct compat_iovec __user *vec,
+ unsigned long vlen, loff_t pos, rwf_t flags);
+#endif
-asmlinkage long compat_sys_sendfile(int out_fd, int in_fd,
- compat_off_t __user *offset, compat_size_t count);
-asmlinkage long compat_sys_sendfile64(int out_fd, int in_fd,
- compat_loff_t __user *offset, compat_size_t count);
-asmlinkage long compat_sys_sigaltstack(const compat_stack_t __user *uss_ptr,
- compat_stack_t __user *uoss_ptr);
+#ifdef __ARCH_WANT_COMPAT_SYS_PWRITEV64V2
+asmlinkage long compat_sys_pwritev64v2(unsigned long fd,
+ const struct compat_iovec __user *vec,
+ unsigned long vlen, loff_t pos, rwf_t flags);
+#endif
+
+
+/*
+ * Deprecated system calls which are still defined in
+ * include/uapi/asm-generic/unistd.h and wanted by >= 1 arch
+ */
+
+/* __ARCH_WANT_SYSCALL_NO_AT */
+asmlinkage long compat_sys_open(const char __user *filename, int flags,
+ umode_t mode);
+asmlinkage long compat_sys_utimes(const char __user *filename,
+ struct compat_timeval __user *t);
+
+/* __ARCH_WANT_SYSCALL_NO_FLAGS */
+asmlinkage long compat_sys_signalfd(int ufd,
+ const compat_sigset_t __user *sigmask,
+ compat_size_t sigsetsize);
+
+/* __ARCH_WANT_SYSCALL_OFF_T */
+asmlinkage long compat_sys_newstat(const char __user *filename,
+ struct compat_stat __user *statbuf);
+asmlinkage long compat_sys_newlstat(const char __user *filename,
+ struct compat_stat __user *statbuf);
+
+/* __ARCH_WANT_SYSCALL_DEPRECATED */
+asmlinkage long compat_sys_time(compat_time_t __user *tloc);
+asmlinkage long compat_sys_utime(const char __user *filename,
+ struct compat_utimbuf __user *t);
+asmlinkage long compat_sys_futimesat(unsigned int dfd,
+ const char __user *filename,
+ struct compat_timeval __user *t);
+asmlinkage long compat_sys_select(int n, compat_ulong_t __user *inp,
+ compat_ulong_t __user *outp, compat_ulong_t __user *exp,
+ struct compat_timeval __user *tvp);
+asmlinkage long compat_sys_ustat(unsigned dev, struct compat_ustat __user *u32);
+asmlinkage long compat_sys_recv(int fd, void __user *buf, compat_size_t len,
+ unsigned flags);
+asmlinkage long compat_sys_sysctl(struct compat_sysctl_args __user *args);
+
+/* obsolete: fs/readdir.c */
+asmlinkage long compat_sys_old_readdir(unsigned int fd,
+ struct compat_old_linux_dirent __user *,
+ unsigned int count);
+/* obsolete: fs/select.c */
+asmlinkage long compat_sys_old_select(struct compat_sel_arg_struct __user *arg);
+
+/* obsolete: ipc */
+asmlinkage long compat_sys_ipc(u32, int, int, u32, compat_uptr_t, u32);
+
+/* obsolete: kernel/signal.c */
#ifdef __ARCH_WANT_SYS_SIGPENDING
asmlinkage long compat_sys_sigpending(compat_old_sigset_t __user *set);
#endif
@@ -719,26 +945,18 @@ asmlinkage long compat_sys_sigpending(compat_old_sigset_t __user *set);
asmlinkage long compat_sys_sigprocmask(int how, compat_old_sigset_t __user *nset,
compat_old_sigset_t __user *oset);
#endif
+#ifdef CONFIG_COMPAT_OLD_SIGACTION
+asmlinkage long compat_sys_sigaction(int sig,
+ const struct compat_old_sigaction __user *act,
+ struct compat_old_sigaction __user *oact);
+#endif
-int compat_restore_altstack(const compat_stack_t __user *uss);
-int __compat_save_altstack(compat_stack_t __user *, unsigned long);
-#define compat_save_altstack_ex(uss, sp) do { \
- compat_stack_t __user *__uss = uss; \
- struct task_struct *t = current; \
- put_user_ex(ptr_to_compat((void __user *)t->sas_ss_sp), &__uss->ss_sp); \
- put_user_ex(t->sas_ss_flags, &__uss->ss_flags); \
- put_user_ex(t->sas_ss_size, &__uss->ss_size); \
- if (t->sas_ss_flags & SS_AUTODISARM) \
- sas_ss_reset(t); \
-} while (0);
-
-asmlinkage long compat_sys_sched_rr_get_interval(compat_pid_t pid,
- struct compat_timespec __user *interval);
+/* obsolete: kernel/time/time.c */
+asmlinkage long compat_sys_stime(compat_time_t __user *tptr);
-asmlinkage long compat_sys_fanotify_mark(int, unsigned int, __u32, __u32,
- int, const char __user *);
+/* obsolete: net/socket.c */
+asmlinkage long compat_sys_socketcall(int call, u32 __user *args);
-asmlinkage long compat_sys_arch_prctl(int option, unsigned long arg2);
/*
* For most but not all architectures, "am I in a compat syscall?" and
diff --git a/include/linux/compiler-clang.h b/include/linux/compiler-clang.h
index 3b609edffa8f..ceb96ecab96e 100644
--- a/include/linux/compiler-clang.h
+++ b/include/linux/compiler-clang.h
@@ -17,5 +17,15 @@
*/
#define __UNIQUE_ID(prefix) __PASTE(__PASTE(__UNIQUE_ID_, prefix), __COUNTER__)
-#define randomized_struct_fields_start struct {
-#define randomized_struct_fields_end };
+/* all clang versions usable with the kernel support KASAN ABI version 5 */
+#define KASAN_ABI_VERSION 5
+
+/* emulate gcc's __SANITIZE_ADDRESS__ flag */
+#if __has_feature(address_sanitizer)
+#define __SANITIZE_ADDRESS__
+#endif
+
+/* Clang doesn't have a way to turn it off per-function, yet. */
+#ifdef __noretpoline
+#undef __noretpoline
+#endif
diff --git a/include/linux/compiler-gcc.h b/include/linux/compiler-gcc.h
index 631354acfa72..b4bf73f5e38f 100644
--- a/include/linux/compiler-gcc.h
+++ b/include/linux/compiler-gcc.h
@@ -93,6 +93,10 @@
#define __weak __attribute__((weak))
#define __alias(symbol) __attribute__((alias(#symbol)))
+#ifdef RETPOLINE
+#define __noretpoline __attribute__((indirect_branch("keep")))
+#endif
+
/*
* it doesn't make sense on ARM (currently the only user of __naked)
* to trace naked functions because then mcount is called without
@@ -167,8 +171,6 @@
#if GCC_VERSION >= 40100
# define __compiletime_object_size(obj) __builtin_object_size(obj, 0)
-
-#define __nostackprotector __attribute__((__optimize__("no-stack-protector")))
#endif
#if GCC_VERSION >= 40300
@@ -196,6 +198,11 @@
#endif /* __CHECKER__ */
#endif /* GCC_VERSION >= 40300 */
+#if GCC_VERSION >= 40400
+#define __optimize(level) __attribute__((__optimize__(level)))
+#define __nostackprotector __optimize("no-stack-protector")
+#endif /* GCC_VERSION >= 40400 */
+
#if GCC_VERSION >= 40500
#ifndef __CHECKER__
@@ -205,6 +212,15 @@
#endif
/*
+ * calling noreturn functions, __builtin_unreachable() and __builtin_trap()
+ * confuse the stack allocation in gcc, leading to overly large stack
+ * frames, see https://gcc.gnu.org/bugzilla/show_bug.cgi?id=82365
+ *
+ * Adding an empty inline assembly before it works around the problem
+ */
+#define barrier_before_unreachable() asm volatile("")
+
+/*
* Mark a position in code as unreachable. This can be used to
* suppress control flow warnings after asm blocks that transfer
* control elsewhere.
@@ -214,7 +230,11 @@
* unreleased. Really, we need to have autoconf for the kernel.
*/
#define unreachable() \
- do { annotate_unreachable(); __builtin_unreachable(); } while (0)
+ do { \
+ annotate_unreachable(); \
+ barrier_before_unreachable(); \
+ __builtin_unreachable(); \
+ } while (0)
/* Mark a function definition as prohibited from being cloned. */
#define __noclone __attribute__((__noclone__, __optimize__("no-tracer")))
@@ -222,6 +242,9 @@
#if defined(RANDSTRUCT_PLUGIN) && !defined(__CHECKER__)
#define __randomize_layout __attribute__((randomize_layout))
#define __no_randomize_layout __attribute__((no_randomize_layout))
+/* This anon struct can add padding, so only enable it under randstruct. */
+#define randomized_struct_fields_start struct {
+#define randomized_struct_fields_end } __randomize_layout;
#endif
#endif /* GCC_VERSION >= 40500 */
@@ -236,15 +259,6 @@
*/
#define __visible __attribute__((externally_visible))
-/*
- * RANDSTRUCT_PLUGIN wants to use an anonymous struct, but it is only
- * possible since GCC 4.6. To provide as much build testing coverage
- * as possible, this is used for all GCC 4.6+ builds, and not just on
- * RANDSTRUCT_PLUGIN builds.
- */
-#define randomized_struct_fields_start struct {
-#define randomized_struct_fields_end } __randomize_layout;
-
#endif /* GCC_VERSION >= 40600 */
diff --git a/include/linux/compiler.h b/include/linux/compiler.h
index 52e611ab9a6c..ab4711c63601 100644
--- a/include/linux/compiler.h
+++ b/include/linux/compiler.h
@@ -86,6 +86,11 @@ void ftrace_likely_update(struct ftrace_likely_data *f, int val,
# define barrier_data(ptr) barrier()
#endif
+/* workaround for GCC PR82365 if needed */
+#ifndef barrier_before_unreachable
+# define barrier_before_unreachable() do { } while (0)
+#endif
+
/* Unreachable code */
#ifdef CONFIG_STACK_VALIDATION
/*
@@ -185,23 +190,21 @@ void __read_once_size(const volatile void *p, void *res, int size)
#ifdef CONFIG_KASAN
/*
- * This function is not 'inline' because __no_sanitize_address confilcts
+ * We can't declare function 'inline' because __no_sanitize_address confilcts
* with inlining. Attempt to inline it may cause a build failure.
* https://gcc.gnu.org/bugzilla/show_bug.cgi?id=67368
* '__maybe_unused' allows us to avoid defined-but-not-used warnings.
*/
-static __no_sanitize_address __maybe_unused
-void __read_once_size_nocheck(const volatile void *p, void *res, int size)
-{
- __READ_ONCE_SIZE;
-}
+# define __no_kasan_or_inline __no_sanitize_address __maybe_unused
#else
-static __always_inline
+# define __no_kasan_or_inline __always_inline
+#endif
+
+static __no_kasan_or_inline
void __read_once_size_nocheck(const volatile void *p, void *res, int size)
{
__READ_ONCE_SIZE;
}
-#endif
static __always_inline void __write_once_size(volatile void *p, void *res, int size)
{
@@ -240,6 +243,7 @@ static __always_inline void __write_once_size(volatile void *p, void *res, int s
* required ordering.
*/
#include <asm/barrier.h>
+#include <linux/kasan-checks.h>
#define __READ_ONCE(x, check) \
({ \
@@ -259,6 +263,13 @@ static __always_inline void __write_once_size(volatile void *p, void *res, int s
*/
#define READ_ONCE_NOCHECK(x) __READ_ONCE(x, 0)
+static __no_kasan_or_inline
+unsigned long read_word_at_a_time(const void *addr)
+{
+ kasan_check_read(addr, 1);
+ return *(unsigned long *)addr;
+}
+
#define WRITE_ONCE(x, val) \
({ \
union { typeof(x) __val; char __c[1]; } __u = \
@@ -271,6 +282,10 @@ static __always_inline void __write_once_size(volatile void *p, void *res, int s
#endif /* __ASSEMBLY__ */
+#ifndef __optimize
+# define __optimize(level)
+#endif
+
/* Compile time object size, -1 for unknown */
#ifndef __compiletime_object_size
# define __compiletime_object_size(obj) -1
diff --git a/include/linux/console.h b/include/linux/console.h
index b8920a031a3e..dfd6b0e97855 100644
--- a/include/linux/console.h
+++ b/include/linux/console.h
@@ -46,46 +46,52 @@ enum con_scroll {
struct consw {
struct module *owner;
const char *(*con_startup)(void);
- void (*con_init)(struct vc_data *, int);
- void (*con_deinit)(struct vc_data *);
- void (*con_clear)(struct vc_data *, int, int, int, int);
- void (*con_putc)(struct vc_data *, int, int, int);
- void (*con_putcs)(struct vc_data *, const unsigned short *, int, int, int);
- void (*con_cursor)(struct vc_data *, int);
- bool (*con_scroll)(struct vc_data *, unsigned int top,
+ void (*con_init)(struct vc_data *vc, int init);
+ void (*con_deinit)(struct vc_data *vc);
+ void (*con_clear)(struct vc_data *vc, int sy, int sx, int height,
+ int width);
+ void (*con_putc)(struct vc_data *vc, int c, int ypos, int xpos);
+ void (*con_putcs)(struct vc_data *vc, const unsigned short *s,
+ int count, int ypos, int xpos);
+ void (*con_cursor)(struct vc_data *vc, int mode);
+ bool (*con_scroll)(struct vc_data *vc, unsigned int top,
unsigned int bottom, enum con_scroll dir,
unsigned int lines);
- int (*con_switch)(struct vc_data *);
- int (*con_blank)(struct vc_data *, int, int);
- int (*con_font_set)(struct vc_data *, struct console_font *, unsigned);
- int (*con_font_get)(struct vc_data *, struct console_font *);
- int (*con_font_default)(struct vc_data *, struct console_font *, char *);
- int (*con_font_copy)(struct vc_data *, int);
- int (*con_resize)(struct vc_data *, unsigned int, unsigned int,
- unsigned int);
- void (*con_set_palette)(struct vc_data *,
+ int (*con_switch)(struct vc_data *vc);
+ int (*con_blank)(struct vc_data *vc, int blank, int mode_switch);
+ int (*con_font_set)(struct vc_data *vc, struct console_font *font,
+ unsigned int flags);
+ int (*con_font_get)(struct vc_data *vc, struct console_font *font);
+ int (*con_font_default)(struct vc_data *vc,
+ struct console_font *font, char *name);
+ int (*con_font_copy)(struct vc_data *vc, int con);
+ int (*con_resize)(struct vc_data *vc, unsigned int width,
+ unsigned int height, unsigned int user);
+ void (*con_set_palette)(struct vc_data *vc,
const unsigned char *table);
- void (*con_scrolldelta)(struct vc_data *, int lines);
- int (*con_set_origin)(struct vc_data *);
- void (*con_save_screen)(struct vc_data *);
- u8 (*con_build_attr)(struct vc_data *, u8, u8, u8, u8, u8, u8);
- void (*con_invert_region)(struct vc_data *, u16 *, int);
- u16 *(*con_screen_pos)(struct vc_data *, int);
- unsigned long (*con_getxy)(struct vc_data *, unsigned long, int *, int *);
+ void (*con_scrolldelta)(struct vc_data *vc, int lines);
+ int (*con_set_origin)(struct vc_data *vc);
+ void (*con_save_screen)(struct vc_data *vc);
+ u8 (*con_build_attr)(struct vc_data *vc, u8 color, u8 intensity,
+ u8 blink, u8 underline, u8 reverse, u8 italic);
+ void (*con_invert_region)(struct vc_data *vc, u16 *p, int count);
+ u16 *(*con_screen_pos)(struct vc_data *vc, int offset);
+ unsigned long (*con_getxy)(struct vc_data *vc, unsigned long position,
+ int *px, int *py);
/*
* Flush the video console driver's scrollback buffer
*/
- void (*con_flush_scrollback)(struct vc_data *);
+ void (*con_flush_scrollback)(struct vc_data *vc);
/*
* Prepare the console for the debugger. This includes, but is not
* limited to, unblanking the console, loading an appropriate
* palette, and allowing debugger generated output.
*/
- int (*con_debug_enter)(struct vc_data *);
+ int (*con_debug_enter)(struct vc_data *vc);
/*
* Restore the console to its pre-debug state as closely as possible.
*/
- int (*con_debug_leave)(struct vc_data *);
+ int (*con_debug_leave)(struct vc_data *vc);
};
extern const struct consw *conswitchp;
diff --git a/include/linux/const.h b/include/linux/const.h
new file mode 100644
index 000000000000..7b55a55f5911
--- /dev/null
+++ b/include/linux/const.h
@@ -0,0 +1,9 @@
+#ifndef _LINUX_CONST_H
+#define _LINUX_CONST_H
+
+#include <uapi/linux/const.h>
+
+#define UL(x) (_UL(x))
+#define ULL(x) (_ULL(x))
+
+#endif /* _LINUX_CONST_H */
diff --git a/include/linux/cper.h b/include/linux/cper.h
index 723e952fde0d..d14ef4e77c8a 100644
--- a/include/linux/cper.h
+++ b/include/linux/cper.h
@@ -275,6 +275,50 @@ enum {
#define CPER_ARM_INFO_FLAGS_PROPAGATED BIT(2)
#define CPER_ARM_INFO_FLAGS_OVERFLOW BIT(3)
+#define CPER_ARM_CACHE_ERROR 0
+#define CPER_ARM_TLB_ERROR 1
+#define CPER_ARM_BUS_ERROR 2
+#define CPER_ARM_VENDOR_ERROR 3
+#define CPER_ARM_MAX_TYPE CPER_ARM_VENDOR_ERROR
+
+#define CPER_ARM_ERR_VALID_TRANSACTION_TYPE BIT(0)
+#define CPER_ARM_ERR_VALID_OPERATION_TYPE BIT(1)
+#define CPER_ARM_ERR_VALID_LEVEL BIT(2)
+#define CPER_ARM_ERR_VALID_PROC_CONTEXT_CORRUPT BIT(3)
+#define CPER_ARM_ERR_VALID_CORRECTED BIT(4)
+#define CPER_ARM_ERR_VALID_PRECISE_PC BIT(5)
+#define CPER_ARM_ERR_VALID_RESTARTABLE_PC BIT(6)
+#define CPER_ARM_ERR_VALID_PARTICIPATION_TYPE BIT(7)
+#define CPER_ARM_ERR_VALID_TIME_OUT BIT(8)
+#define CPER_ARM_ERR_VALID_ADDRESS_SPACE BIT(9)
+#define CPER_ARM_ERR_VALID_MEM_ATTRIBUTES BIT(10)
+#define CPER_ARM_ERR_VALID_ACCESS_MODE BIT(11)
+
+#define CPER_ARM_ERR_TRANSACTION_SHIFT 16
+#define CPER_ARM_ERR_TRANSACTION_MASK GENMASK(1,0)
+#define CPER_ARM_ERR_OPERATION_SHIFT 18
+#define CPER_ARM_ERR_OPERATION_MASK GENMASK(3,0)
+#define CPER_ARM_ERR_LEVEL_SHIFT 22
+#define CPER_ARM_ERR_LEVEL_MASK GENMASK(2,0)
+#define CPER_ARM_ERR_PC_CORRUPT_SHIFT 25
+#define CPER_ARM_ERR_PC_CORRUPT_MASK GENMASK(0,0)
+#define CPER_ARM_ERR_CORRECTED_SHIFT 26
+#define CPER_ARM_ERR_CORRECTED_MASK GENMASK(0,0)
+#define CPER_ARM_ERR_PRECISE_PC_SHIFT 27
+#define CPER_ARM_ERR_PRECISE_PC_MASK GENMASK(0,0)
+#define CPER_ARM_ERR_RESTARTABLE_PC_SHIFT 28
+#define CPER_ARM_ERR_RESTARTABLE_PC_MASK GENMASK(0,0)
+#define CPER_ARM_ERR_PARTICIPATION_TYPE_SHIFT 29
+#define CPER_ARM_ERR_PARTICIPATION_TYPE_MASK GENMASK(1,0)
+#define CPER_ARM_ERR_TIME_OUT_SHIFT 31
+#define CPER_ARM_ERR_TIME_OUT_MASK GENMASK(0,0)
+#define CPER_ARM_ERR_ADDRESS_SPACE_SHIFT 32
+#define CPER_ARM_ERR_ADDRESS_SPACE_MASK GENMASK(1,0)
+#define CPER_ARM_ERR_MEM_ATTRIBUTES_SHIFT 34
+#define CPER_ARM_ERR_MEM_ATTRIBUTES_MASK GENMASK(8,0)
+#define CPER_ARM_ERR_ACCESS_MODE_SHIFT 43
+#define CPER_ARM_ERR_ACCESS_MODE_MASK GENMASK(0,0)
+
/*
* All tables and structs must be byte-packed to match CPER
* specification, since the tables are provided by the system BIOS
@@ -494,6 +538,8 @@ struct cper_sec_pcie {
/* Reset to default packing */
#pragma pack()
+extern const char * const cper_proc_error_type_strs[4];
+
u64 cper_next_record_id(void);
const char *cper_severity_str(unsigned int);
const char *cper_mem_err_type_str(unsigned int);
@@ -503,5 +549,7 @@ void cper_mem_err_pack(const struct cper_sec_mem_err *,
struct cper_mem_err_compact *);
const char *cper_mem_err_unpack(struct trace_seq *,
struct cper_mem_err_compact *);
+void cper_print_proc_arm(const char *pfx,
+ const struct cper_sec_proc_arm *proc);
#endif
diff --git a/include/linux/cpu_cooling.h b/include/linux/cpu_cooling.h
index d4292ebc5c8b..de0dafb9399d 100644
--- a/include/linux/cpu_cooling.h
+++ b/include/linux/cpu_cooling.h
@@ -30,9 +30,6 @@
struct cpufreq_policy;
-typedef int (*get_static_t)(cpumask_t *cpumask, int interval,
- unsigned long voltage, u32 *power);
-
#ifdef CONFIG_CPU_THERMAL
/**
* cpufreq_cooling_register - function to create cpufreq cooling device.
@@ -41,43 +38,6 @@ typedef int (*get_static_t)(cpumask_t *cpumask, int interval,
struct thermal_cooling_device *
cpufreq_cooling_register(struct cpufreq_policy *policy);
-struct thermal_cooling_device *
-cpufreq_power_cooling_register(struct cpufreq_policy *policy,
- u32 capacitance, get_static_t plat_static_func);
-
-/**
- * of_cpufreq_cooling_register - create cpufreq cooling device based on DT.
- * @np: a valid struct device_node to the cooling device device tree node.
- * @policy: cpufreq policy.
- */
-#ifdef CONFIG_THERMAL_OF
-struct thermal_cooling_device *
-of_cpufreq_cooling_register(struct device_node *np,
- struct cpufreq_policy *policy);
-
-struct thermal_cooling_device *
-of_cpufreq_power_cooling_register(struct device_node *np,
- struct cpufreq_policy *policy,
- u32 capacitance,
- get_static_t plat_static_func);
-#else
-static inline struct thermal_cooling_device *
-of_cpufreq_cooling_register(struct device_node *np,
- struct cpufreq_policy *policy)
-{
- return ERR_PTR(-ENOSYS);
-}
-
-static inline struct thermal_cooling_device *
-of_cpufreq_power_cooling_register(struct device_node *np,
- struct cpufreq_policy *policy,
- u32 capacitance,
- get_static_t plat_static_func)
-{
- return NULL;
-}
-#endif
-
/**
* cpufreq_cooling_unregister - function to remove cpufreq cooling device.
* @cdev: thermal cooling device pointer.
@@ -90,34 +50,27 @@ cpufreq_cooling_register(struct cpufreq_policy *policy)
{
return ERR_PTR(-ENOSYS);
}
-static inline struct thermal_cooling_device *
-cpufreq_power_cooling_register(struct cpufreq_policy *policy,
- u32 capacitance, get_static_t plat_static_func)
-{
- return NULL;
-}
-static inline struct thermal_cooling_device *
-of_cpufreq_cooling_register(struct device_node *np,
- struct cpufreq_policy *policy)
+static inline
+void cpufreq_cooling_unregister(struct thermal_cooling_device *cdev)
{
- return ERR_PTR(-ENOSYS);
+ return;
}
+#endif /* CONFIG_CPU_THERMAL */
+#if defined(CONFIG_THERMAL_OF) && defined(CONFIG_CPU_THERMAL)
+/**
+ * of_cpufreq_cooling_register - create cpufreq cooling device based on DT.
+ * @policy: cpufreq policy.
+ */
+struct thermal_cooling_device *
+of_cpufreq_cooling_register(struct cpufreq_policy *policy);
+#else
static inline struct thermal_cooling_device *
-of_cpufreq_power_cooling_register(struct device_node *np,
- struct cpufreq_policy *policy,
- u32 capacitance,
- get_static_t plat_static_func)
+of_cpufreq_cooling_register(struct cpufreq_policy *policy)
{
return NULL;
}
-
-static inline
-void cpufreq_cooling_unregister(struct thermal_cooling_device *cdev)
-{
- return;
-}
-#endif /* CONFIG_CPU_THERMAL */
+#endif /* defined(CONFIG_THERMAL_OF) && defined(CONFIG_CPU_THERMAL) */
#endif /* __CPU_COOLING_H__ */
diff --git a/include/linux/cpufreq.h b/include/linux/cpufreq.h
index 065f3a8eb486..87f48dd932eb 100644
--- a/include/linux/cpufreq.h
+++ b/include/linux/cpufreq.h
@@ -629,6 +629,18 @@ static inline void dev_pm_opp_free_cpufreq_table(struct device *dev,
for (pos = table; pos->frequency != CPUFREQ_TABLE_END; pos++)
/*
+ * cpufreq_for_each_entry_idx - iterate over a cpufreq_frequency_table
+ * with index
+ * @pos: the cpufreq_frequency_table * to use as a loop cursor.
+ * @table: the cpufreq_frequency_table * to iterate over.
+ * @idx: the table entry currently being processed
+ */
+
+#define cpufreq_for_each_entry_idx(pos, table, idx) \
+ for (pos = table, idx = 0; pos->frequency != CPUFREQ_TABLE_END; \
+ pos++, idx++)
+
+/*
* cpufreq_for_each_valid_entry - iterate over a cpufreq_frequency_table
* excluding CPUFREQ_ENTRY_INVALID frequencies.
* @pos: the cpufreq_frequency_table * to use as a loop cursor.
@@ -641,6 +653,21 @@ static inline void dev_pm_opp_free_cpufreq_table(struct device *dev,
continue; \
else
+/*
+ * cpufreq_for_each_valid_entry_idx - iterate with index over a cpufreq
+ * frequency_table excluding CPUFREQ_ENTRY_INVALID frequencies.
+ * @pos: the cpufreq_frequency_table * to use as a loop cursor.
+ * @table: the cpufreq_frequency_table * to iterate over.
+ * @idx: the table entry currently being processed
+ */
+
+#define cpufreq_for_each_valid_entry_idx(pos, table, idx) \
+ cpufreq_for_each_entry_idx(pos, table, idx) \
+ if (pos->frequency == CPUFREQ_ENTRY_INVALID) \
+ continue; \
+ else
+
+
int cpufreq_frequency_table_cpuinfo(struct cpufreq_policy *policy,
struct cpufreq_frequency_table *table);
@@ -667,19 +694,20 @@ static inline int cpufreq_table_find_index_al(struct cpufreq_policy *policy,
unsigned int target_freq)
{
struct cpufreq_frequency_table *table = policy->freq_table;
- struct cpufreq_frequency_table *pos, *best = table - 1;
+ struct cpufreq_frequency_table *pos;
unsigned int freq;
+ int idx, best = -1;
- cpufreq_for_each_valid_entry(pos, table) {
+ cpufreq_for_each_valid_entry_idx(pos, table, idx) {
freq = pos->frequency;
if (freq >= target_freq)
- return pos - table;
+ return idx;
- best = pos;
+ best = idx;
}
- return best - table;
+ return best;
}
/* Find lowest freq at or above target in a table in descending order */
@@ -687,28 +715,29 @@ static inline int cpufreq_table_find_index_dl(struct cpufreq_policy *policy,
unsigned int target_freq)
{
struct cpufreq_frequency_table *table = policy->freq_table;
- struct cpufreq_frequency_table *pos, *best = table - 1;
+ struct cpufreq_frequency_table *pos;
unsigned int freq;
+ int idx, best = -1;
- cpufreq_for_each_valid_entry(pos, table) {
+ cpufreq_for_each_valid_entry_idx(pos, table, idx) {
freq = pos->frequency;
if (freq == target_freq)
- return pos - table;
+ return idx;
if (freq > target_freq) {
- best = pos;
+ best = idx;
continue;
}
/* No freq found above target_freq */
- if (best == table - 1)
- return pos - table;
+ if (best == -1)
+ return idx;
- return best - table;
+ return best;
}
- return best - table;
+ return best;
}
/* Works only on sorted freq-tables */
@@ -728,28 +757,29 @@ static inline int cpufreq_table_find_index_ah(struct cpufreq_policy *policy,
unsigned int target_freq)
{
struct cpufreq_frequency_table *table = policy->freq_table;
- struct cpufreq_frequency_table *pos, *best = table - 1;
+ struct cpufreq_frequency_table *pos;
unsigned int freq;
+ int idx, best = -1;
- cpufreq_for_each_valid_entry(pos, table) {
+ cpufreq_for_each_valid_entry_idx(pos, table, idx) {
freq = pos->frequency;
if (freq == target_freq)
- return pos - table;
+ return idx;
if (freq < target_freq) {
- best = pos;
+ best = idx;
continue;
}
/* No freq found below target_freq */
- if (best == table - 1)
- return pos - table;
+ if (best == -1)
+ return idx;
- return best - table;
+ return best;
}
- return best - table;
+ return best;
}
/* Find highest freq at or below target in a table in descending order */
@@ -757,19 +787,20 @@ static inline int cpufreq_table_find_index_dh(struct cpufreq_policy *policy,
unsigned int target_freq)
{
struct cpufreq_frequency_table *table = policy->freq_table;
- struct cpufreq_frequency_table *pos, *best = table - 1;
+ struct cpufreq_frequency_table *pos;
unsigned int freq;
+ int idx, best = -1;
- cpufreq_for_each_valid_entry(pos, table) {
+ cpufreq_for_each_valid_entry_idx(pos, table, idx) {
freq = pos->frequency;
if (freq <= target_freq)
- return pos - table;
+ return idx;
- best = pos;
+ best = idx;
}
- return best - table;
+ return best;
}
/* Works only on sorted freq-tables */
@@ -789,32 +820,33 @@ static inline int cpufreq_table_find_index_ac(struct cpufreq_policy *policy,
unsigned int target_freq)
{
struct cpufreq_frequency_table *table = policy->freq_table;
- struct cpufreq_frequency_table *pos, *best = table - 1;
+ struct cpufreq_frequency_table *pos;
unsigned int freq;
+ int idx, best = -1;
- cpufreq_for_each_valid_entry(pos, table) {
+ cpufreq_for_each_valid_entry_idx(pos, table, idx) {
freq = pos->frequency;
if (freq == target_freq)
- return pos - table;
+ return idx;
if (freq < target_freq) {
- best = pos;
+ best = idx;
continue;
}
/* No freq found below target_freq */
- if (best == table - 1)
- return pos - table;
+ if (best == -1)
+ return idx;
/* Choose the closest freq */
- if (target_freq - best->frequency > freq - target_freq)
- return pos - table;
+ if (target_freq - table[best].frequency > freq - target_freq)
+ return idx;
- return best - table;
+ return best;
}
- return best - table;
+ return best;
}
/* Find closest freq to target in a table in descending order */
@@ -822,32 +854,33 @@ static inline int cpufreq_table_find_index_dc(struct cpufreq_policy *policy,
unsigned int target_freq)
{
struct cpufreq_frequency_table *table = policy->freq_table;
- struct cpufreq_frequency_table *pos, *best = table - 1;
+ struct cpufreq_frequency_table *pos;
unsigned int freq;
+ int idx, best = -1;
- cpufreq_for_each_valid_entry(pos, table) {
+ cpufreq_for_each_valid_entry_idx(pos, table, idx) {
freq = pos->frequency;
if (freq == target_freq)
- return pos - table;
+ return idx;
if (freq > target_freq) {
- best = pos;
+ best = idx;
continue;
}
/* No freq found above target_freq */
- if (best == table - 1)
- return pos - table;
+ if (best == -1)
+ return idx;
/* Choose the closest freq */
- if (best->frequency - target_freq > target_freq - freq)
- return pos - table;
+ if (table[best].frequency - target_freq > target_freq - freq)
+ return idx;
- return best - table;
+ return best;
}
- return best - table;
+ return best;
}
/* Works only on sorted freq-tables */
@@ -927,8 +960,7 @@ extern void arch_set_freq_scale(struct cpumask *cpus, unsigned long cur_freq,
extern struct freq_attr cpufreq_freq_attr_scaling_available_freqs;
extern struct freq_attr cpufreq_freq_attr_scaling_boost_freqs;
extern struct freq_attr *cpufreq_generic_attr[];
-int cpufreq_table_validate_and_show(struct cpufreq_policy *policy,
- struct cpufreq_frequency_table *table);
+int cpufreq_table_validate_and_sort(struct cpufreq_policy *policy);
unsigned int cpufreq_generic_get(unsigned int cpu);
int cpufreq_generic_init(struct cpufreq_policy *policy,
diff --git a/include/linux/cpuhotplug.h b/include/linux/cpuhotplug.h
index 1a32e558eb11..8796ba387152 100644
--- a/include/linux/cpuhotplug.h
+++ b/include/linux/cpuhotplug.h
@@ -29,7 +29,6 @@ enum cpuhp_state {
CPUHP_PERF_PREPARE,
CPUHP_PERF_X86_PREPARE,
CPUHP_PERF_X86_AMD_UNCORE_PREP,
- CPUHP_PERF_BFIN,
CPUHP_PERF_POWER,
CPUHP_PERF_SUPERH,
CPUHP_X86_HPET_DEAD,
@@ -59,6 +58,7 @@ enum cpuhp_state {
CPUHP_PCI_XGENE_DEAD,
CPUHP_IOMMU_INTEL_DEAD,
CPUHP_LUSTRE_CFS_DEAD,
+ CPUHP_AP_ARM_CACHE_B15_RAC_DEAD,
CPUHP_WORKQUEUE_PREP,
CPUHP_POWER_NUMA_PREPARE,
CPUHP_HRTIMERS_PREPARE,
@@ -107,8 +107,8 @@ enum cpuhp_state {
CPUHP_AP_PERF_X86_CQM_STARTING,
CPUHP_AP_PERF_X86_CSTATE_STARTING,
CPUHP_AP_PERF_XTENSA_STARTING,
- CPUHP_AP_PERF_METAG_STARTING,
CPUHP_AP_MIPS_OP_LOONGSON3_STARTING,
+ CPUHP_AP_ARM_SDEI_STARTING,
CPUHP_AP_ARM_VFP_STARTING,
CPUHP_AP_ARM64_DEBUG_MONITORS_STARTING,
CPUHP_AP_PERF_ARM_HW_BREAKPOINT_STARTING,
@@ -120,7 +120,6 @@ enum cpuhp_state {
CPUHP_AP_JCORE_TIMER_STARTING,
CPUHP_AP_EXYNOS4_MCT_TIMER_STARTING,
CPUHP_AP_ARM_TWD_STARTING,
- CPUHP_AP_METAG_TIMER_STARTING,
CPUHP_AP_QCOM_TIMER_STARTING,
CPUHP_AP_ARMADA_TIMER_STARTING,
CPUHP_AP_MARCO_TIMER_STARTING,
@@ -137,6 +136,7 @@ enum cpuhp_state {
CPUHP_AP_ARM64_ISNDEP_STARTING,
CPUHP_AP_SMPCFD_DYING,
CPUHP_AP_X86_TBOOT_DYING,
+ CPUHP_AP_ARM_CACHE_B15_RAC_DYING,
CPUHP_AP_ONLINE,
CPUHP_TEARDOWN_CPU,
CPUHP_AP_ONLINE_IDLE,
diff --git a/include/linux/cpuidle.h b/include/linux/cpuidle.h
index 8f7788d23b57..1eefabf1621f 100644
--- a/include/linux/cpuidle.h
+++ b/include/linux/cpuidle.h
@@ -33,6 +33,10 @@ struct cpuidle_state_usage {
unsigned long long disable;
unsigned long long usage;
unsigned long long time; /* in US */
+#ifdef CONFIG_SUSPEND
+ unsigned long long s2idle_usage;
+ unsigned long long s2idle_time; /* in US */
+#endif
};
struct cpuidle_state {
@@ -131,7 +135,8 @@ extern bool cpuidle_not_available(struct cpuidle_driver *drv,
struct cpuidle_device *dev);
extern int cpuidle_select(struct cpuidle_driver *drv,
- struct cpuidle_device *dev);
+ struct cpuidle_device *dev,
+ bool *stop_tick);
extern int cpuidle_enter(struct cpuidle_driver *drv,
struct cpuidle_device *dev, int index);
extern void cpuidle_reflect(struct cpuidle_device *dev, int index);
@@ -163,7 +168,7 @@ static inline bool cpuidle_not_available(struct cpuidle_driver *drv,
struct cpuidle_device *dev)
{return true; }
static inline int cpuidle_select(struct cpuidle_driver *drv,
- struct cpuidle_device *dev)
+ struct cpuidle_device *dev, bool *stop_tick)
{return -ENODEV; }
static inline int cpuidle_enter(struct cpuidle_driver *drv,
struct cpuidle_device *dev, int index)
@@ -225,7 +230,7 @@ static inline void cpuidle_coupled_parallel_barrier(struct cpuidle_device *dev,
}
#endif
-#ifdef CONFIG_ARCH_HAS_CPU_RELAX
+#if defined(CONFIG_CPU_IDLE) && defined(CONFIG_ARCH_HAS_CPU_RELAX)
void cpuidle_poll_state_init(struct cpuidle_driver *drv);
#else
static inline void cpuidle_poll_state_init(struct cpuidle_driver *drv) {}
@@ -246,7 +251,8 @@ struct cpuidle_governor {
struct cpuidle_device *dev);
int (*select) (struct cpuidle_driver *drv,
- struct cpuidle_device *dev);
+ struct cpuidle_device *dev,
+ bool *stop_tick);
void (*reflect) (struct cpuidle_device *dev, int index);
};
@@ -257,22 +263,30 @@ static inline int cpuidle_register_governor(struct cpuidle_governor *gov)
{return 0;}
#endif
-#define CPU_PM_CPU_IDLE_ENTER(low_level_idle_enter, idx) \
-({ \
- int __ret; \
- \
- if (!idx) { \
- cpu_do_idle(); \
- return idx; \
- } \
- \
- __ret = cpu_pm_enter(); \
- if (!__ret) { \
- __ret = low_level_idle_enter(idx); \
- cpu_pm_exit(); \
- } \
- \
- __ret ? -1 : idx; \
+#define __CPU_PM_CPU_IDLE_ENTER(low_level_idle_enter, idx, is_retention) \
+({ \
+ int __ret = 0; \
+ \
+ if (!idx) { \
+ cpu_do_idle(); \
+ return idx; \
+ } \
+ \
+ if (!is_retention) \
+ __ret = cpu_pm_enter(); \
+ if (!__ret) { \
+ __ret = low_level_idle_enter(idx); \
+ if (!is_retention) \
+ cpu_pm_exit(); \
+ } \
+ \
+ __ret ? -1 : idx; \
})
+#define CPU_PM_CPU_IDLE_ENTER(low_level_idle_enter, idx) \
+ __CPU_PM_CPU_IDLE_ENTER(low_level_idle_enter, idx, 0)
+
+#define CPU_PM_CPU_IDLE_ENTER_RETENTION(low_level_idle_enter, idx) \
+ __CPU_PM_CPU_IDLE_ENTER(low_level_idle_enter, idx, 1)
+
#endif /* _LINUX_CPUIDLE_H */
diff --git a/include/linux/cpumask.h b/include/linux/cpumask.h
index 75b565194437..bf53d893ad02 100644
--- a/include/linux/cpumask.h
+++ b/include/linux/cpumask.h
@@ -170,6 +170,8 @@ static inline unsigned int cpumask_local_spread(unsigned int i, int node)
for ((cpu) = 0; (cpu) < 1; (cpu)++, (void)mask)
#define for_each_cpu_not(cpu, mask) \
for ((cpu) = 0; (cpu) < 1; (cpu)++, (void)mask)
+#define for_each_cpu_wrap(cpu, mask, start) \
+ for ((cpu) = 0; (cpu) < 1; (cpu)++, (void)mask, (void)(start))
#define for_each_cpu_and(cpu, mask, and) \
for ((cpu) = 0; (cpu) < 1; (cpu)++, (void)mask, (void)and)
#else
@@ -640,7 +642,7 @@ static inline int cpulist_parse(const char *buf, struct cpumask *dstp)
/**
* cpumask_size - size to allocate for a 'struct cpumask' in bytes
*/
-static inline size_t cpumask_size(void)
+static inline unsigned int cpumask_size(void)
{
return BITS_TO_LONGS(nr_cpumask_bits) * sizeof(long);
}
diff --git a/include/linux/cpuset.h b/include/linux/cpuset.h
index 1b8e41597ef5..934633a05d20 100644
--- a/include/linux/cpuset.h
+++ b/include/linux/cpuset.h
@@ -112,7 +112,7 @@ static inline int cpuset_do_slab_mem_spread(void)
return task_spread_slab(current);
}
-extern int current_cpuset_is_being_rebound(void);
+extern bool current_cpuset_is_being_rebound(void);
extern void rebuild_sched_domains(void);
@@ -247,9 +247,9 @@ static inline int cpuset_do_slab_mem_spread(void)
return 0;
}
-static inline int current_cpuset_is_being_rebound(void)
+static inline bool current_cpuset_is_being_rebound(void)
{
- return 0;
+ return false;
}
static inline void rebuild_sched_domains(void)
diff --git a/include/linux/crash_dump.h b/include/linux/crash_dump.h
index a992e6ca2f1c..f7ac2aa93269 100644
--- a/include/linux/crash_dump.h
+++ b/include/linux/crash_dump.h
@@ -2,13 +2,13 @@
#ifndef LINUX_CRASH_DUMP_H
#define LINUX_CRASH_DUMP_H
-#ifdef CONFIG_CRASH_DUMP
#include <linux/kexec.h>
#include <linux/proc_fs.h>
#include <linux/elf.h>
#include <asm/pgtable.h> /* for pgprot_t */
+#ifdef CONFIG_CRASH_DUMP
#define ELFCORE_ADDR_MAX (-1ULL)
#define ELFCORE_ADDR_ERR (-2ULL)
@@ -52,13 +52,13 @@ void vmcore_cleanup(void);
* has passed the elf core header address on command line.
*
* This is not just a test if CONFIG_CRASH_DUMP is enabled or not. It will
- * return 1 if CONFIG_CRASH_DUMP=y and if kernel is booting after a panic of
- * previous kernel.
+ * return true if CONFIG_CRASH_DUMP=y and if kernel is booting after a panic
+ * of previous kernel.
*/
-static inline int is_kdump_kernel(void)
+static inline bool is_kdump_kernel(void)
{
- return (elfcorehdr_addr != ELFCORE_ADDR_MAX) ? 1 : 0;
+ return elfcorehdr_addr != ELFCORE_ADDR_MAX;
}
/* is_vmcore_usable() checks if the kernel is booting after a panic and
@@ -89,7 +89,7 @@ extern int register_oldmem_pfn_is_ram(int (*fn)(unsigned long pfn));
extern void unregister_oldmem_pfn_is_ram(void);
#else /* !CONFIG_CRASH_DUMP */
-static inline int is_kdump_kernel(void) { return 0; }
+static inline bool is_kdump_kernel(void) { return 0; }
#endif /* CONFIG_CRASH_DUMP */
extern unsigned long saved_max_pfn;
diff --git a/include/linux/crc-ccitt.h b/include/linux/crc-ccitt.h
index cd4f420231ba..72c92c396bb8 100644
--- a/include/linux/crc-ccitt.h
+++ b/include/linux/crc-ccitt.h
@@ -5,12 +5,19 @@
#include <linux/types.h>
extern u16 const crc_ccitt_table[256];
+extern u16 const crc_ccitt_false_table[256];
extern u16 crc_ccitt(u16 crc, const u8 *buffer, size_t len);
+extern u16 crc_ccitt_false(u16 crc, const u8 *buffer, size_t len);
static inline u16 crc_ccitt_byte(u16 crc, const u8 c)
{
return (crc >> 8) ^ crc_ccitt_table[(crc ^ c) & 0xff];
}
+static inline u16 crc_ccitt_false_byte(u16 crc, const u8 c)
+{
+ return (crc << 8) ^ crc_ccitt_false_table[(crc >> 8) ^ c];
+}
+
#endif /* _LINUX_CRC_CCITT_H */
diff --git a/include/linux/crc32c.h b/include/linux/crc32c.h
index 357ae4611a45..bd21af828ff6 100644
--- a/include/linux/crc32c.h
+++ b/include/linux/crc32c.h
@@ -5,6 +5,7 @@
#include <linux/types.h>
extern u32 crc32c(u32 crc, const void *address, unsigned int length);
+extern const char *crc32c_impl(void);
/* This macro exists for backwards-compatibility. */
#define crc32c_le crc32c
diff --git a/include/linux/crypto.h b/include/linux/crypto.h
index 78508ca4b108..6eb06101089f 100644
--- a/include/linux/crypto.h
+++ b/include/linux/crypto.h
@@ -107,8 +107,16 @@
#define CRYPTO_ALG_INTERNAL 0x00002000
/*
+ * Set if the algorithm has a ->setkey() method but can be used without
+ * calling it first, i.e. there is a default key.
+ */
+#define CRYPTO_ALG_OPTIONAL_KEY 0x00004000
+
+/*
* Transform masks and values (for crt_flags).
*/
+#define CRYPTO_TFM_NEED_KEY 0x00000001
+
#define CRYPTO_TFM_REQ_MASK 0x000fff00
#define CRYPTO_TFM_RES_MASK 0xfff00000
@@ -427,6 +435,14 @@ struct compress_alg {
* @cra_exit: Deinitialize the cryptographic transformation object. This is a
* counterpart to @cra_init, used to remove various changes set in
* @cra_init.
+ * @cra_u.ablkcipher: Union member which contains an asynchronous block cipher
+ * definition. See @struct @ablkcipher_alg.
+ * @cra_u.blkcipher: Union member which contains a synchronous block cipher
+ * definition See @struct @blkcipher_alg.
+ * @cra_u.cipher: Union member which contains a single-block symmetric cipher
+ * definition. See @struct @cipher_alg.
+ * @cra_u.compress: Union member which contains a (de)compression algorithm.
+ * See @struct @compress_alg.
* @cra_module: Owner of this transformation implementation. Set to THIS_MODULE
* @cra_list: internally used
* @cra_users: internally used
@@ -447,7 +463,7 @@ struct crypto_alg {
unsigned int cra_alignmask;
int cra_priority;
- atomic_t cra_refcnt;
+ refcount_t cra_refcnt;
char cra_name[CRYPTO_MAX_ALG_NAME];
char cra_driver_name[CRYPTO_MAX_ALG_NAME];
diff --git a/include/linux/dax.h b/include/linux/dax.h
index 5258346c558c..f9eb22ad341e 100644
--- a/include/linux/dax.h
+++ b/include/linux/dax.h
@@ -26,18 +26,42 @@ extern struct attribute_group dax_attribute_group;
#if IS_ENABLED(CONFIG_DAX)
struct dax_device *dax_get_by_host(const char *host);
+struct dax_device *alloc_dax(void *private, const char *host,
+ const struct dax_operations *ops);
void put_dax(struct dax_device *dax_dev);
+void kill_dax(struct dax_device *dax_dev);
+void dax_write_cache(struct dax_device *dax_dev, bool wc);
+bool dax_write_cache_enabled(struct dax_device *dax_dev);
#else
static inline struct dax_device *dax_get_by_host(const char *host)
{
return NULL;
}
-
+static inline struct dax_device *alloc_dax(void *private, const char *host,
+ const struct dax_operations *ops)
+{
+ /*
+ * Callers should check IS_ENABLED(CONFIG_DAX) to know if this
+ * NULL is an error or expected.
+ */
+ return NULL;
+}
static inline void put_dax(struct dax_device *dax_dev)
{
}
+static inline void kill_dax(struct dax_device *dax_dev)
+{
+}
+static inline void dax_write_cache(struct dax_device *dax_dev, bool wc)
+{
+}
+static inline bool dax_write_cache_enabled(struct dax_device *dax_dev)
+{
+ return false;
+}
#endif
+struct writeback_control;
int bdev_dax_pgoff(struct block_device *, sector_t, size_t, pgoff_t *pgoff);
#if IS_ENABLED(CONFIG_FS_DAX)
int __bdev_dax_supported(struct super_block *sb, int blocksize);
@@ -57,6 +81,8 @@ static inline void fs_put_dax(struct dax_device *dax_dev)
}
struct dax_device *fs_dax_get_by_bdev(struct block_device *bdev);
+int dax_writeback_mapping_range(struct address_space *mapping,
+ struct block_device *bdev, struct writeback_control *wbc);
#else
static inline int bdev_dax_supported(struct super_block *sb, int blocksize)
{
@@ -76,27 +102,28 @@ static inline struct dax_device *fs_dax_get_by_bdev(struct block_device *bdev)
{
return NULL;
}
+
+static inline int dax_writeback_mapping_range(struct address_space *mapping,
+ struct block_device *bdev, struct writeback_control *wbc)
+{
+ return -EOPNOTSUPP;
+}
#endif
int dax_read_lock(void);
void dax_read_unlock(int id);
-struct dax_device *alloc_dax(void *private, const char *host,
- const struct dax_operations *ops);
bool dax_alive(struct dax_device *dax_dev);
-void kill_dax(struct dax_device *dax_dev);
void *dax_get_private(struct dax_device *dax_dev);
long dax_direct_access(struct dax_device *dax_dev, pgoff_t pgoff, long nr_pages,
void **kaddr, pfn_t *pfn);
size_t dax_copy_from_iter(struct dax_device *dax_dev, pgoff_t pgoff, void *addr,
size_t bytes, struct iov_iter *i);
void dax_flush(struct dax_device *dax_dev, void *addr, size_t size);
-void dax_write_cache(struct dax_device *dax_dev, bool wc);
-bool dax_write_cache_enabled(struct dax_device *dax_dev);
ssize_t dax_iomap_rw(struct kiocb *iocb, struct iov_iter *iter,
const struct iomap_ops *ops);
int dax_iomap_fault(struct vm_fault *vmf, enum page_entry_size pe_size,
- pfn_t *pfnp, const struct iomap_ops *ops);
+ pfn_t *pfnp, int *errp, const struct iomap_ops *ops);
int dax_finish_sync_fault(struct vm_fault *vmf, enum page_entry_size pe_size,
pfn_t pfn);
int dax_delete_mapping_entry(struct address_space *mapping, pgoff_t index);
@@ -121,7 +148,4 @@ static inline bool dax_mapping(struct address_space *mapping)
return mapping->host && IS_DAX(mapping->host);
}
-struct writeback_control;
-int dax_writeback_mapping_range(struct address_space *mapping,
- struct block_device *bdev, struct writeback_control *wbc);
#endif
diff --git a/include/linux/dcache.h b/include/linux/dcache.h
index 65cd8ab60b7a..94acbde17bb1 100644
--- a/include/linux/dcache.h
+++ b/include/linux/dcache.h
@@ -56,9 +56,7 @@ struct qstr {
#define QSTR_INIT(n,l) { { { .len = l } }, .name = n }
-extern const char empty_string[];
extern const struct qstr empty_name;
-extern const char slash_string[];
extern const struct qstr slash_name;
struct dentry_stat_t {
@@ -227,6 +225,7 @@ extern seqlock_t rename_lock;
*/
extern void d_instantiate(struct dentry *, struct inode *);
extern struct dentry * d_instantiate_unique(struct dentry *, struct inode *);
+extern struct dentry * d_instantiate_anon(struct dentry *, struct inode *);
extern int d_instantiate_no_diralias(struct dentry *, struct inode *);
extern void __d_drop(struct dentry *dentry);
extern void d_drop(struct dentry *dentry);
@@ -235,6 +234,7 @@ extern void d_set_d_op(struct dentry *dentry, const struct dentry_operations *op
/* allocate/de-allocate */
extern struct dentry * d_alloc(struct dentry *, const struct qstr *);
+extern struct dentry * d_alloc_anon(struct super_block *);
extern struct dentry * d_alloc_pseudo(struct super_block *, const struct qstr *);
extern struct dentry * d_alloc_parallel(struct dentry *, const struct qstr *,
wait_queue_head_t *);
@@ -359,7 +359,7 @@ static inline void dont_mount(struct dentry *dentry)
extern void __d_lookup_done(struct dentry *);
-static inline int d_in_lookup(struct dentry *dentry)
+static inline int d_in_lookup(const struct dentry *dentry)
{
return dentry->d_flags & DCACHE_PAR_LOOKUP;
}
@@ -487,7 +487,7 @@ static inline bool d_really_is_positive(const struct dentry *dentry)
return dentry->d_inode != NULL;
}
-static inline int simple_positive(struct dentry *dentry)
+static inline int simple_positive(const struct dentry *dentry)
{
return d_really_is_positive(dentry) && !d_unhashed(dentry);
}
diff --git a/include/linux/device-mapper.h b/include/linux/device-mapper.h
index a5538433c927..31fef7c34185 100644
--- a/include/linux/device-mapper.h
+++ b/include/linux/device-mapper.h
@@ -28,6 +28,7 @@ enum dm_queue_mode {
DM_TYPE_REQUEST_BASED = 2,
DM_TYPE_MQ_REQUEST_BASED = 3,
DM_TYPE_DAX_BIO_BASED = 4,
+ DM_TYPE_NVME_BIO_BASED = 5,
};
typedef enum { STATUSTYPE_INFO, STATUSTYPE_TABLE } status_type_t;
@@ -86,10 +87,10 @@ typedef void (*dm_resume_fn) (struct dm_target *ti);
typedef void (*dm_status_fn) (struct dm_target *ti, status_type_t status_type,
unsigned status_flags, char *result, unsigned maxlen);
-typedef int (*dm_message_fn) (struct dm_target *ti, unsigned argc, char **argv);
+typedef int (*dm_message_fn) (struct dm_target *ti, unsigned argc, char **argv,
+ char *result, unsigned maxlen);
-typedef int (*dm_prepare_ioctl_fn) (struct dm_target *ti,
- struct block_device **bdev, fmode_t *mode);
+typedef int (*dm_prepare_ioctl_fn) (struct dm_target *ti, struct block_device **bdev);
/*
* These iteration functions are typically used to check (and combine)
@@ -221,14 +222,6 @@ struct target_type {
#define dm_target_is_wildcard(type) ((type)->features & DM_TARGET_WILDCARD)
/*
- * Some targets need to be sent the same WRITE bio severals times so
- * that they can send copies of it to different devices. This function
- * examines any supplied bio and returns the number of copies of it the
- * target requires.
- */
-typedef unsigned (*dm_num_write_bios_fn) (struct dm_target *ti, struct bio *bio);
-
-/*
* A target implements own bio data integrity.
*/
#define DM_TARGET_INTEGRITY 0x00000010
@@ -274,6 +267,12 @@ struct dm_target {
unsigned num_discard_bios;
/*
+ * The number of secure erase bios that will be submitted to the target.
+ * The bio number can be accessed with dm_bio_get_target_bio_nr.
+ */
+ unsigned num_secure_erase_bios;
+
+ /*
* The number of WRITE SAME bios that will be submitted to the target.
* The bio number can be accessed with dm_bio_get_target_bio_nr.
*/
@@ -291,13 +290,6 @@ struct dm_target {
*/
unsigned per_io_data_size;
- /*
- * If defined, this function is called to find out how many
- * duplicate bios should be sent to the target when writing
- * data.
- */
- dm_num_write_bios_fn num_write_bios;
-
/* target specific data */
void *private;
@@ -329,35 +321,9 @@ struct dm_target_callbacks {
int (*congested_fn) (struct dm_target_callbacks *, int);
};
-/*
- * For bio-based dm.
- * One of these is allocated for each bio.
- * This structure shouldn't be touched directly by target drivers.
- * It is here so that we can inline dm_per_bio_data and
- * dm_bio_from_per_bio_data
- */
-struct dm_target_io {
- struct dm_io *io;
- struct dm_target *ti;
- unsigned target_bio_nr;
- unsigned *len_ptr;
- struct bio clone;
-};
-
-static inline void *dm_per_bio_data(struct bio *bio, size_t data_size)
-{
- return (char *)bio - offsetof(struct dm_target_io, clone) - data_size;
-}
-
-static inline struct bio *dm_bio_from_per_bio_data(void *data, size_t data_size)
-{
- return (struct bio *)((char *)data + data_size + offsetof(struct dm_target_io, clone));
-}
-
-static inline unsigned dm_bio_get_target_bio_nr(const struct bio *bio)
-{
- return container_of(bio, struct dm_target_io, clone)->target_bio_nr;
-}
+void *dm_per_bio_data(struct bio *bio, size_t data_size);
+struct bio *dm_bio_from_per_bio_data(void *data, size_t data_size);
+unsigned dm_bio_get_target_bio_nr(const struct bio *bio);
int dm_register_target(struct target_type *t);
void dm_unregister_target(struct target_type *t);
@@ -500,6 +466,11 @@ void dm_table_set_type(struct dm_table *t, enum dm_queue_mode type);
int dm_table_complete(struct dm_table *t);
/*
+ * Destroy the table when finished.
+ */
+void dm_table_destroy(struct dm_table *t);
+
+/*
* Target may require that it is never sent I/O larger than len.
*/
int __must_check dm_set_target_max_io_len(struct dm_target *ti, sector_t len);
@@ -577,14 +548,13 @@ do { \
#define DMEMIT(x...) sz += ((sz >= maxlen) ? \
0 : scnprintf(result + sz, maxlen - sz, x))
-#define SECTOR_SHIFT 9
-
/*
* Definitions of return values from target end_io function.
*/
#define DM_ENDIO_DONE 0
#define DM_ENDIO_INCOMPLETE 1
#define DM_ENDIO_REQUEUE 2
+#define DM_ENDIO_DELAY_REQUEUE 3
/*
* Definitions of return values from target map function.
@@ -592,7 +562,7 @@ do { \
#define DM_MAPIO_SUBMITTED 0
#define DM_MAPIO_REMAPPED 1
#define DM_MAPIO_REQUEUE DM_ENDIO_REQUEUE
-#define DM_MAPIO_DELAY_REQUEUE 3
+#define DM_MAPIO_DELAY_REQUEUE DM_ENDIO_DELAY_REQUEUE
#define DM_MAPIO_KILL 4
#define dm_sector_div64(x, y)( \
diff --git a/include/linux/device.h b/include/linux/device.h
index 9d32000725da..0059b99e1f25 100644
--- a/include/linux/device.h
+++ b/include/linux/device.h
@@ -1,3 +1,4 @@
+// SPDX-License-Identifier: GPL-2.0
/*
* device.h - generic, centralized driver model
*
@@ -5,8 +6,6 @@
* Copyright (c) 2004-2009 Greg Kroah-Hartman <gregkh@suse.de>
* Copyright (c) 2008-2009 Novell Inc.
*
- * This file is released under the GPLv2
- *
* See Documentation/driver-model/ for more information.
*/
@@ -21,7 +20,6 @@
#include <linux/compiler.h>
#include <linux/types.h>
#include <linux/mutex.h>
-#include <linux/pinctrl/devinfo.h>
#include <linux/pm.h>
#include <linux/atomic.h>
#include <linux/ratelimit.h>
@@ -42,6 +40,7 @@ struct fwnode_handle;
struct iommu_ops;
struct iommu_group;
struct iommu_fwspec;
+struct dev_pin_info;
struct bus_attribute {
struct attribute attr;
@@ -257,6 +256,7 @@ enum probe_type {
* automatically.
* @pm: Power management operations of the device which matched
* this driver.
+ * @coredump: Called through sysfs to initiate a device coredump.
* @p: Driver core's private data, no one other than the driver
* core can touch this.
*
@@ -288,6 +288,7 @@ struct device_driver {
const struct attribute_group **groups;
const struct dev_pm_ops *pm;
+ int (*coredump) (struct device *dev);
struct driver_private *p;
};
@@ -301,7 +302,6 @@ extern struct device_driver *driver_find(const char *name,
extern int driver_probe_done(void);
extern void wait_for_device_probe(void);
-
/* sysfs interface for exporting driver attributes */
struct driver_attribute {
@@ -575,6 +575,9 @@ ssize_t device_store_bool(struct device *dev, struct device_attribute *attr,
#define DEVICE_ATTR(_name, _mode, _show, _store) \
struct device_attribute dev_attr_##_name = __ATTR(_name, _mode, _show, _store)
+#define DEVICE_ATTR_PREALLOC(_name, _mode, _show, _store) \
+ struct device_attribute dev_attr_##_name = \
+ __ATTR_PREALLOC(_name, _mode, _show, _store)
#define DEVICE_ATTR_RW(_name) \
struct device_attribute dev_attr_##_name = __ATTR_RW(_name)
#define DEVICE_ATTR_RO(_name) \
@@ -728,6 +731,28 @@ struct device_dma_parameters {
};
/**
+ * struct device_connection - Device Connection Descriptor
+ * @endpoint: The names of the two devices connected together
+ * @id: Unique identifier for the connection
+ * @list: List head, private, for internal use only
+ */
+struct device_connection {
+ const char *endpoint[2];
+ const char *id;
+ struct list_head list;
+};
+
+void *device_connection_find_match(struct device *dev, const char *con_id,
+ void *data,
+ void *(*match)(struct device_connection *con,
+ int ep, void *data));
+
+struct device *device_connection_find(struct device *dev, const char *con_id);
+
+void device_connection_add(struct device_connection *con);
+void device_connection_remove(struct device_connection *con);
+
+/**
* enum device_link_state - Device link states.
* @DL_STATE_NONE: The presence of the drivers is not being tracked.
* @DL_STATE_DORMANT: None of the supplier/consumer drivers is present.
@@ -767,6 +792,7 @@ enum device_link_state {
* @status: The state of the link (with respect to the presence of drivers).
* @flags: Link flags.
* @rpm_active: Whether or not the consumer device is runtime-PM-active.
+ * @kref: Count repeated addition of the same link.
* @rcu_head: An RCU head to use for deferred execution of SRCU callbacks.
*/
struct device_link {
@@ -777,6 +803,7 @@ struct device_link {
enum device_link_state status;
u32 flags;
bool rpm_active;
+ struct kref kref;
#ifdef CONFIG_SRCU
struct rcu_head rcu_head;
#endif
diff --git a/include/linux/dm-bufio.h b/include/linux/dm-bufio.h
new file mode 100644
index 000000000000..3c8b7d274bd9
--- /dev/null
+++ b/include/linux/dm-bufio.h
@@ -0,0 +1,148 @@
+/*
+ * Copyright (C) 2009-2011 Red Hat, Inc.
+ *
+ * Author: Mikulas Patocka <mpatocka@redhat.com>
+ *
+ * This file is released under the GPL.
+ */
+
+#ifndef _LINUX_DM_BUFIO_H
+#define _LINUX_DM_BUFIO_H
+
+#include <linux/blkdev.h>
+#include <linux/types.h>
+
+/*----------------------------------------------------------------*/
+
+struct dm_bufio_client;
+struct dm_buffer;
+
+/*
+ * Create a buffered IO cache on a given device
+ */
+struct dm_bufio_client *
+dm_bufio_client_create(struct block_device *bdev, unsigned block_size,
+ unsigned reserved_buffers, unsigned aux_size,
+ void (*alloc_callback)(struct dm_buffer *),
+ void (*write_callback)(struct dm_buffer *));
+
+/*
+ * Release a buffered IO cache.
+ */
+void dm_bufio_client_destroy(struct dm_bufio_client *c);
+
+/*
+ * Set the sector range.
+ * When this function is called, there must be no I/O in progress on the bufio
+ * client.
+ */
+void dm_bufio_set_sector_offset(struct dm_bufio_client *c, sector_t start);
+
+/*
+ * WARNING: to avoid deadlocks, these conditions are observed:
+ *
+ * - At most one thread can hold at most "reserved_buffers" simultaneously.
+ * - Each other threads can hold at most one buffer.
+ * - Threads which call only dm_bufio_get can hold unlimited number of
+ * buffers.
+ */
+
+/*
+ * Read a given block from disk. Returns pointer to data. Returns a
+ * pointer to dm_buffer that can be used to release the buffer or to make
+ * it dirty.
+ */
+void *dm_bufio_read(struct dm_bufio_client *c, sector_t block,
+ struct dm_buffer **bp);
+
+/*
+ * Like dm_bufio_read, but return buffer from cache, don't read
+ * it. If the buffer is not in the cache, return NULL.
+ */
+void *dm_bufio_get(struct dm_bufio_client *c, sector_t block,
+ struct dm_buffer **bp);
+
+/*
+ * Like dm_bufio_read, but don't read anything from the disk. It is
+ * expected that the caller initializes the buffer and marks it dirty.
+ */
+void *dm_bufio_new(struct dm_bufio_client *c, sector_t block,
+ struct dm_buffer **bp);
+
+/*
+ * Prefetch the specified blocks to the cache.
+ * The function starts to read the blocks and returns without waiting for
+ * I/O to finish.
+ */
+void dm_bufio_prefetch(struct dm_bufio_client *c,
+ sector_t block, unsigned n_blocks);
+
+/*
+ * Release a reference obtained with dm_bufio_{read,get,new}. The data
+ * pointer and dm_buffer pointer is no longer valid after this call.
+ */
+void dm_bufio_release(struct dm_buffer *b);
+
+/*
+ * Mark a buffer dirty. It should be called after the buffer is modified.
+ *
+ * In case of memory pressure, the buffer may be written after
+ * dm_bufio_mark_buffer_dirty, but before dm_bufio_write_dirty_buffers. So
+ * dm_bufio_write_dirty_buffers guarantees that the buffer is on-disk but
+ * the actual writing may occur earlier.
+ */
+void dm_bufio_mark_buffer_dirty(struct dm_buffer *b);
+
+/*
+ * Mark a part of the buffer dirty.
+ *
+ * The specified part of the buffer is scheduled to be written. dm-bufio may
+ * write the specified part of the buffer or it may write a larger superset.
+ */
+void dm_bufio_mark_partial_buffer_dirty(struct dm_buffer *b,
+ unsigned start, unsigned end);
+
+/*
+ * Initiate writing of dirty buffers, without waiting for completion.
+ */
+void dm_bufio_write_dirty_buffers_async(struct dm_bufio_client *c);
+
+/*
+ * Write all dirty buffers. Guarantees that all dirty buffers created prior
+ * to this call are on disk when this call exits.
+ */
+int dm_bufio_write_dirty_buffers(struct dm_bufio_client *c);
+
+/*
+ * Send an empty write barrier to the device to flush hardware disk cache.
+ */
+int dm_bufio_issue_flush(struct dm_bufio_client *c);
+
+/*
+ * Like dm_bufio_release but also move the buffer to the new
+ * block. dm_bufio_write_dirty_buffers is needed to commit the new block.
+ */
+void dm_bufio_release_move(struct dm_buffer *b, sector_t new_block);
+
+/*
+ * Free the given buffer.
+ * This is just a hint, if the buffer is in use or dirty, this function
+ * does nothing.
+ */
+void dm_bufio_forget(struct dm_bufio_client *c, sector_t block);
+
+/*
+ * Set the minimum number of buffers before cleanup happens.
+ */
+void dm_bufio_set_minimum_buffers(struct dm_bufio_client *c, unsigned n);
+
+unsigned dm_bufio_get_block_size(struct dm_bufio_client *c);
+sector_t dm_bufio_get_device_size(struct dm_bufio_client *c);
+sector_t dm_bufio_get_block_number(struct dm_buffer *b);
+void *dm_bufio_get_block_data(struct dm_buffer *b);
+void *dm_bufio_get_aux_data(struct dm_buffer *b);
+struct dm_bufio_client *dm_bufio_get_client(struct dm_buffer *b);
+
+/*----------------------------------------------------------------*/
+
+#endif
diff --git a/include/linux/dma-buf.h b/include/linux/dma-buf.h
index 79f27d60ec66..085db2fee2d7 100644
--- a/include/linux/dma-buf.h
+++ b/include/linux/dma-buf.h
@@ -301,7 +301,7 @@ struct dma_buf {
struct dma_fence_cb cb;
wait_queue_head_t *poll;
- unsigned long active;
+ __poll_t active;
} cb_excl, cb_shared;
};
diff --git a/include/linux/dma-direct.h b/include/linux/dma-direct.h
new file mode 100644
index 000000000000..53ad6a47f513
--- /dev/null
+++ b/include/linux/dma-direct.h
@@ -0,0 +1,64 @@
+/* SPDX-License-Identifier: GPL-2.0 */
+#ifndef _LINUX_DMA_DIRECT_H
+#define _LINUX_DMA_DIRECT_H 1
+
+#include <linux/dma-mapping.h>
+#include <linux/mem_encrypt.h>
+
+#ifdef CONFIG_ARCH_HAS_PHYS_TO_DMA
+#include <asm/dma-direct.h>
+#else
+static inline dma_addr_t __phys_to_dma(struct device *dev, phys_addr_t paddr)
+{
+ dma_addr_t dev_addr = (dma_addr_t)paddr;
+
+ return dev_addr - ((dma_addr_t)dev->dma_pfn_offset << PAGE_SHIFT);
+}
+
+static inline phys_addr_t __dma_to_phys(struct device *dev, dma_addr_t dev_addr)
+{
+ phys_addr_t paddr = (phys_addr_t)dev_addr;
+
+ return paddr + ((phys_addr_t)dev->dma_pfn_offset << PAGE_SHIFT);
+}
+
+static inline bool dma_capable(struct device *dev, dma_addr_t addr, size_t size)
+{
+ if (!dev->dma_mask)
+ return false;
+
+ return addr + size - 1 <= *dev->dma_mask;
+}
+#endif /* !CONFIG_ARCH_HAS_PHYS_TO_DMA */
+
+/*
+ * If memory encryption is supported, phys_to_dma will set the memory encryption
+ * bit in the DMA address, and dma_to_phys will clear it. The raw __phys_to_dma
+ * and __dma_to_phys versions should only be used on non-encrypted memory for
+ * special occasions like DMA coherent buffers.
+ */
+static inline dma_addr_t phys_to_dma(struct device *dev, phys_addr_t paddr)
+{
+ return __sme_set(__phys_to_dma(dev, paddr));
+}
+
+static inline phys_addr_t dma_to_phys(struct device *dev, dma_addr_t daddr)
+{
+ return __sme_clr(__dma_to_phys(dev, daddr));
+}
+
+#ifdef CONFIG_ARCH_HAS_DMA_MARK_CLEAN
+void dma_mark_clean(void *addr, size_t size);
+#else
+static inline void dma_mark_clean(void *addr, size_t size)
+{
+}
+#endif /* CONFIG_ARCH_HAS_DMA_MARK_CLEAN */
+
+void *dma_direct_alloc(struct device *dev, size_t size, dma_addr_t *dma_handle,
+ gfp_t gfp, unsigned long attrs);
+void dma_direct_free(struct device *dev, size_t size, void *cpu_addr,
+ dma_addr_t dma_addr, unsigned long attrs);
+int dma_direct_supported(struct device *dev, u64 mask);
+
+#endif /* _LINUX_DMA_DIRECT_H */
diff --git a/include/linux/dma-fence-array.h b/include/linux/dma-fence-array.h
index 332a5420243c..bc8940ca280d 100644
--- a/include/linux/dma-fence-array.h
+++ b/include/linux/dma-fence-array.h
@@ -21,6 +21,7 @@
#define __LINUX_DMA_FENCE_ARRAY_H
#include <linux/dma-fence.h>
+#include <linux/irq_work.h>
/**
* struct dma_fence_array_cb - callback helper for fence array
@@ -47,6 +48,8 @@ struct dma_fence_array {
unsigned num_fences;
atomic_t num_pending;
struct dma_fence **fences;
+
+ struct irq_work work;
};
extern const struct dma_fence_ops dma_fence_array_ops;
diff --git a/include/linux/dma-fence.h b/include/linux/dma-fence.h
index efdabbb64e3c..4c008170fe65 100644
--- a/include/linux/dma-fence.h
+++ b/include/linux/dma-fence.h
@@ -242,7 +242,7 @@ static inline struct dma_fence *dma_fence_get_rcu(struct dma_fence *fence)
* The caller is required to hold the RCU read lock.
*/
static inline struct dma_fence *
-dma_fence_get_rcu_safe(struct dma_fence * __rcu *fencep)
+dma_fence_get_rcu_safe(struct dma_fence __rcu **fencep)
{
do {
struct dma_fence *fence;
diff --git a/include/linux/dma-mapping.h b/include/linux/dma-mapping.h
index 81ed9b2d84dc..f8ab1c0f589e 100644
--- a/include/linux/dma-mapping.h
+++ b/include/linux/dma-mapping.h
@@ -136,7 +136,7 @@ struct dma_map_ops {
int is_phys;
};
-extern const struct dma_map_ops dma_noop_ops;
+extern const struct dma_map_ops dma_direct_ops;
extern const struct dma_map_ops dma_virt_ops;
#define DMA_BIT_MASK(n) (((n) == 64) ? ~0ULL : ((1ULL<<(n))-1))
@@ -212,14 +212,14 @@ static inline void set_dma_ops(struct device *dev,
}
#else
/*
- * Define the dma api to allow compilation but not linking of
- * dma dependent code. Code that depends on the dma-mapping
- * API needs to set 'depends on HAS_DMA' in its Kconfig
+ * Define the dma api to allow compilation of dma dependent code.
+ * Code that depends on the dma-mapping API needs to set 'depends on HAS_DMA'
+ * in its Kconfig, unless it already depends on <something> || COMPILE_TEST,
+ * where <something> guarantuees the availability of the dma-mapping API.
*/
-extern const struct dma_map_ops bad_dma_ops;
static inline const struct dma_map_ops *get_dma_ops(struct device *dev)
{
- return &bad_dma_ops;
+ return NULL;
}
#endif
@@ -513,10 +513,14 @@ static inline void *dma_alloc_attrs(struct device *dev, size_t size,
void *cpu_addr;
BUG_ON(!ops);
+ WARN_ON_ONCE(dev && !dev->coherent_dma_mask);
if (dma_alloc_from_dev_coherent(dev, size, dma_handle, &cpu_addr))
return cpu_addr;
+ /* let the implementation decide on the zone to allocate from: */
+ flag &= ~(__GFP_DMA | __GFP_DMA32 | __GFP_HIGHMEM);
+
if (!arch_dma_alloc_attrs(&dev, &flag))
return NULL;
if (!ops->alloc)
@@ -568,6 +572,14 @@ static inline int dma_mapping_error(struct device *dev, dma_addr_t dma_addr)
return 0;
}
+/*
+ * This is a hack for the legacy x86 forbid_dac and iommu_sac_force. Please
+ * don't use this in new code.
+ */
+#ifndef arch_dma_supported
+#define arch_dma_supported(dev, mask) (1)
+#endif
+
static inline void dma_check_mask(struct device *dev, u64 mask)
{
if (sme_active() && (mask < (((u64)sme_get_me_mask() << 1) - 1)))
@@ -580,6 +592,9 @@ static inline int dma_supported(struct device *dev, u64 mask)
if (!ops)
return 0;
+ if (!arch_dma_supported(dev, mask))
+ return 0;
+
if (!ops->dma_supported)
return 1;
return ops->dma_supported(dev, mask);
@@ -692,7 +707,7 @@ static inline int dma_set_seg_boundary(struct device *dev, unsigned long mask)
#ifndef dma_max_pfn
static inline unsigned long dma_max_pfn(struct device *dev)
{
- return *dev->dma_mask >> PAGE_SHIFT;
+ return (*dev->dma_mask >> PAGE_SHIFT) + dev->dma_pfn_offset;
}
#endif
@@ -757,10 +772,19 @@ static inline void dma_deconfigure(struct device *dev) {}
/*
* Managed DMA API
*/
+#ifdef CONFIG_HAS_DMA
extern void *dmam_alloc_coherent(struct device *dev, size_t size,
dma_addr_t *dma_handle, gfp_t gfp);
extern void dmam_free_coherent(struct device *dev, size_t size, void *vaddr,
dma_addr_t dma_handle);
+#else /* !CONFIG_HAS_DMA */
+static inline void *dmam_alloc_coherent(struct device *dev, size_t size,
+ dma_addr_t *dma_handle, gfp_t gfp)
+{ return NULL; }
+static inline void dmam_free_coherent(struct device *dev, size_t size,
+ void *vaddr, dma_addr_t dma_handle) { }
+#endif /* !CONFIG_HAS_DMA */
+
extern void *dmam_alloc_attrs(struct device *dev, size_t size,
dma_addr_t *dma_handle, gfp_t gfp,
unsigned long attrs);
diff --git a/include/linux/dmaengine.h b/include/linux/dmaengine.h
index f838764993eb..861be5cab1df 100644
--- a/include/linux/dmaengine.h
+++ b/include/linux/dmaengine.h
@@ -470,7 +470,11 @@ typedef void (*dma_async_tx_callback_result)(void *dma_async_param,
const struct dmaengine_result *result);
struct dmaengine_unmap_data {
+#if IS_ENABLED(CONFIG_DMA_ENGINE_RAID)
+ u16 map_cnt;
+#else
u8 map_cnt;
+#endif
u8 to_cnt;
u8 from_cnt;
u8 bidi_cnt;
diff --git a/include/linux/dmapool.h b/include/linux/dmapool.h
index 53ba737505df..f632ecfb4238 100644
--- a/include/linux/dmapool.h
+++ b/include/linux/dmapool.h
@@ -16,6 +16,8 @@
struct device;
+#ifdef CONFIG_HAS_DMA
+
struct dma_pool *dma_pool_create(const char *name, struct device *dev,
size_t size, size_t align, size_t allocation);
@@ -23,13 +25,6 @@ void dma_pool_destroy(struct dma_pool *pool);
void *dma_pool_alloc(struct dma_pool *pool, gfp_t mem_flags,
dma_addr_t *handle);
-
-static inline void *dma_pool_zalloc(struct dma_pool *pool, gfp_t mem_flags,
- dma_addr_t *handle)
-{
- return dma_pool_alloc(pool, mem_flags | __GFP_ZERO, handle);
-}
-
void dma_pool_free(struct dma_pool *pool, void *vaddr, dma_addr_t addr);
/*
@@ -39,5 +34,26 @@ struct dma_pool *dmam_pool_create(const char *name, struct device *dev,
size_t size, size_t align, size_t allocation);
void dmam_pool_destroy(struct dma_pool *pool);
+#else /* !CONFIG_HAS_DMA */
+static inline struct dma_pool *dma_pool_create(const char *name,
+ struct device *dev, size_t size, size_t align, size_t allocation)
+{ return NULL; }
+static inline void dma_pool_destroy(struct dma_pool *pool) { }
+static inline void *dma_pool_alloc(struct dma_pool *pool, gfp_t mem_flags,
+ dma_addr_t *handle) { return NULL; }
+static inline void dma_pool_free(struct dma_pool *pool, void *vaddr,
+ dma_addr_t addr) { }
+static inline struct dma_pool *dmam_pool_create(const char *name,
+ struct device *dev, size_t size, size_t align, size_t allocation)
+{ return NULL; }
+static inline void dmam_pool_destroy(struct dma_pool *pool) { }
+#endif /* !CONFIG_HAS_DMA */
+
+static inline void *dma_pool_zalloc(struct dma_pool *pool, gfp_t mem_flags,
+ dma_addr_t *handle)
+{
+ return dma_pool_alloc(pool, mem_flags | __GFP_ZERO, handle);
+}
+
#endif
diff --git a/include/linux/dmi.h b/include/linux/dmi.h
index 46e151172d95..c46fdb36700b 100644
--- a/include/linux/dmi.h
+++ b/include/linux/dmi.h
@@ -106,6 +106,7 @@ extern void dmi_scan_machine(void);
extern void dmi_memdev_walk(void);
extern void dmi_set_dump_stack_arch_desc(void);
extern bool dmi_get_date(int field, int *yearp, int *monthp, int *dayp);
+extern int dmi_get_bios_year(void);
extern int dmi_name_in_vendors(const char *str);
extern int dmi_name_in_serial(const char *str);
extern int dmi_available;
@@ -113,6 +114,7 @@ extern int dmi_walk(void (*decode)(const struct dmi_header *, void *),
void *private_data);
extern bool dmi_match(enum dmi_field f, const char *str);
extern void dmi_memdev_name(u16 handle, const char **bank, const char **device);
+extern u64 dmi_memdev_size(u16 handle);
#else
@@ -133,6 +135,7 @@ static inline bool dmi_get_date(int field, int *yearp, int *monthp, int *dayp)
*dayp = 0;
return false;
}
+static inline int dmi_get_bios_year(void) { return -ENXIO; }
static inline int dmi_name_in_vendors(const char *s) { return 0; }
static inline int dmi_name_in_serial(const char *s) { return 0; }
#define dmi_available 0
@@ -142,6 +145,7 @@ static inline bool dmi_match(enum dmi_field f, const char *str)
{ return false; }
static inline void dmi_memdev_name(u16 handle, const char **bank,
const char **device) { }
+static inline u64 dmi_memdev_size(u16 handle) { return ~0ul; }
static inline const struct dmi_system_id *
dmi_first_match(const struct dmi_system_id *list) { return NULL; }
diff --git a/include/linux/dsa/lan9303.h b/include/linux/dsa/lan9303.h
index f48a85c377de..b4f22112ba75 100644
--- a/include/linux/dsa/lan9303.h
+++ b/include/linux/dsa/lan9303.h
@@ -23,9 +23,10 @@ struct lan9303 {
struct regmap_irq_chip_data *irq_data;
struct gpio_desc *reset_gpio;
u32 reset_duration; /* in [ms] */
- bool phy_addr_sel_strap;
+ int phy_addr_base;
struct dsa_switch *ds;
struct mutex indirect_mutex; /* protect indexed register access */
+ struct mutex alr_mutex; /* protect ALR access */
const struct lan9303_phy_ops *ops;
bool is_bridged; /* true if port 1 and 2 are bridged */
diff --git a/include/linux/edac.h b/include/linux/edac.h
index cd75c173fd00..bffb97828ed6 100644
--- a/include/linux/edac.h
+++ b/include/linux/edac.h
@@ -186,6 +186,7 @@ static inline char *mc_event_error_type(const unsigned int err_type)
* @MEM_RDDR4: Registered DDR4 RAM
* This is a variant of the DDR4 memories.
* @MEM_LRDDR4: Load-Reduced DDR4 memory.
+ * @MEM_NVDIMM: Non-volatile RAM
*/
enum mem_type {
MEM_EMPTY = 0,
@@ -209,6 +210,7 @@ enum mem_type {
MEM_DDR4,
MEM_RDDR4,
MEM_LRDDR4,
+ MEM_NVDIMM,
};
#define MEM_FLAG_EMPTY BIT(MEM_EMPTY)
@@ -231,6 +233,7 @@ enum mem_type {
#define MEM_FLAG_DDR4 BIT(MEM_DDR4)
#define MEM_FLAG_RDDR4 BIT(MEM_RDDR4)
#define MEM_FLAG_LRDDR4 BIT(MEM_LRDDR4)
+#define MEM_FLAG_NVDIMM BIT(MEM_NVDIMM)
/**
* enum edac-type - Error Detection and Correction capabilities and mode
diff --git a/include/linux/efi.h b/include/linux/efi.h
index 29fdf8029cf6..f1b7d68ac460 100644
--- a/include/linux/efi.h
+++ b/include/linux/efi.h
@@ -475,6 +475,39 @@ typedef struct {
u64 get_all;
} apple_properties_protocol_64_t;
+typedef struct {
+ u32 get_capability;
+ u32 get_event_log;
+ u32 hash_log_extend_event;
+ u32 submit_command;
+ u32 get_active_pcr_banks;
+ u32 set_active_pcr_banks;
+ u32 get_result_of_set_active_pcr_banks;
+} efi_tcg2_protocol_32_t;
+
+typedef struct {
+ u64 get_capability;
+ u64 get_event_log;
+ u64 hash_log_extend_event;
+ u64 submit_command;
+ u64 get_active_pcr_banks;
+ u64 set_active_pcr_banks;
+ u64 get_result_of_set_active_pcr_banks;
+} efi_tcg2_protocol_64_t;
+
+typedef u32 efi_tcg2_event_log_format;
+
+typedef struct {
+ void *get_capability;
+ efi_status_t (*get_event_log)(efi_handle_t, efi_tcg2_event_log_format,
+ efi_physical_addr_t *, efi_physical_addr_t *, efi_bool_t *);
+ void *hash_log_extend_event;
+ void *submit_command;
+ void *get_active_pcr_banks;
+ void *set_active_pcr_banks;
+ void *get_result_of_set_active_pcr_banks;
+} efi_tcg2_protocol_t;
+
/*
* Types and defines for EFI ResetSystem
*/
@@ -625,6 +658,7 @@ void efi_native_runtime_setup(void);
#define EFI_MEMORY_ATTRIBUTES_TABLE_GUID EFI_GUID(0xdcfa911d, 0x26eb, 0x469f, 0xa2, 0x20, 0x38, 0xb7, 0xdc, 0x46, 0x12, 0x20)
#define EFI_CONSOLE_OUT_DEVICE_GUID EFI_GUID(0xd3b36f2c, 0xd551, 0x11d4, 0x9a, 0x46, 0x00, 0x90, 0x27, 0x3f, 0xc1, 0x4d)
#define APPLE_PROPERTIES_PROTOCOL_GUID EFI_GUID(0x91bd12fe, 0xf6c3, 0x44fb, 0xa5, 0xb7, 0x51, 0x22, 0xab, 0x30, 0x3a, 0xe0)
+#define EFI_TCG2_PROTOCOL_GUID EFI_GUID(0x607f766c, 0x7455, 0x42be, 0x93, 0x0b, 0xe4, 0xd7, 0x6d, 0xb2, 0x72, 0x0f)
#define EFI_IMAGE_SECURITY_DATABASE_GUID EFI_GUID(0xd719b2cb, 0x3d3a, 0x4596, 0xa3, 0xbc, 0xda, 0xd0, 0x0e, 0x67, 0x65, 0x6f)
#define EFI_SHIM_LOCK_GUID EFI_GUID(0x605dab50, 0xe046, 0x4300, 0xab, 0xb6, 0x3d, 0xd8, 0x10, 0xdd, 0x8b, 0x23)
@@ -637,6 +671,7 @@ void efi_native_runtime_setup(void);
#define LINUX_EFI_ARM_SCREEN_INFO_TABLE_GUID EFI_GUID(0xe03fc20a, 0x85dc, 0x406e, 0xb9, 0x0e, 0x4a, 0xb5, 0x02, 0x37, 0x1d, 0x95)
#define LINUX_EFI_LOADER_ENTRY_GUID EFI_GUID(0x4a67b082, 0x0a4c, 0x41cf, 0xb6, 0xc7, 0x44, 0x0b, 0x29, 0xbb, 0x8c, 0x4f)
#define LINUX_EFI_RANDOM_SEED_TABLE_GUID EFI_GUID(0x1ce1e5bc, 0x7ceb, 0x42f2, 0x81, 0xe5, 0x8a, 0xad, 0xf1, 0x80, 0xf5, 0x7b)
+#define LINUX_EFI_TPM_EVENT_LOG_GUID EFI_GUID(0xb7799cb0, 0xeca2, 0x4943, 0x96, 0x67, 0x1f, 0xae, 0x07, 0xb7, 0x47, 0xfa)
typedef struct {
efi_guid_t guid;
@@ -911,6 +946,7 @@ extern struct efi {
unsigned long properties_table; /* properties table */
unsigned long mem_attr_table; /* memory attributes table */
unsigned long rng_seed; /* UEFI firmware random seed */
+ unsigned long tpm_log; /* TPM2 Event Log table */
efi_get_time_t *get_time;
efi_set_time_t *set_time;
efi_get_wakeup_time_t *get_wakeup_time;
@@ -930,6 +966,8 @@ extern struct efi {
unsigned long flags;
} efi;
+extern struct mm_struct efi_mm;
+
static inline int
efi_guidcmp (efi_guid_t left, efi_guid_t right)
{
@@ -1536,6 +1574,8 @@ static inline void
efi_enable_reset_attack_mitigation(efi_system_table_t *sys_table_arg) { }
#endif
+void efi_retrieve_tpm2_eventlog(efi_system_table_t *sys_table);
+
/*
* Arch code can implement the following three template macros, avoiding
* reptition for the void/non-void return cases of {__,}efi_call_virt():
@@ -1603,4 +1643,12 @@ struct linux_efi_random_seed {
u8 bits[];
};
+struct linux_efi_tpm_eventlog {
+ u32 size;
+ u8 version;
+ u8 log[];
+};
+
+extern int efi_tpm_eventlog_init(void);
+
#endif /* _LINUX_EFI_H */
diff --git a/include/linux/elevator.h b/include/linux/elevator.h
index 3d794b3dc532..6d9e230dffd2 100644
--- a/include/linux/elevator.h
+++ b/include/linux/elevator.h
@@ -198,8 +198,6 @@ extern bool elv_attempt_insert_merge(struct request_queue *, struct request *);
extern void elv_requeue_request(struct request_queue *, struct request *);
extern struct request *elv_former_request(struct request_queue *, struct request *);
extern struct request *elv_latter_request(struct request_queue *, struct request *);
-extern int elv_register_queue(struct request_queue *q);
-extern void elv_unregister_queue(struct request_queue *q);
extern int elv_may_queue(struct request_queue *, unsigned int);
extern void elv_completed_request(struct request_queue *, struct request *);
extern int elv_set_request(struct request_queue *q, struct request *rq,
diff --git a/include/linux/error-injection.h b/include/linux/error-injection.h
new file mode 100644
index 000000000000..280c61ecbf20
--- /dev/null
+++ b/include/linux/error-injection.h
@@ -0,0 +1,27 @@
+/* SPDX-License-Identifier: GPL-2.0 */
+#ifndef _LINUX_ERROR_INJECTION_H
+#define _LINUX_ERROR_INJECTION_H
+
+#ifdef CONFIG_FUNCTION_ERROR_INJECTION
+
+#include <asm/error-injection.h>
+
+extern bool within_error_injection_list(unsigned long addr);
+extern int get_injectable_error_type(unsigned long addr);
+
+#else /* !CONFIG_FUNCTION_ERROR_INJECTION */
+
+#include <asm-generic/error-injection.h>
+static inline bool within_error_injection_list(unsigned long addr)
+{
+ return false;
+}
+
+static inline int get_injectable_error_type(unsigned long addr)
+{
+ return EI_ETYPE_NONE;
+}
+
+#endif
+
+#endif /* _LINUX_ERROR_INJECTION_H */
diff --git a/include/linux/errseq.h b/include/linux/errseq.h
index 6ffae9c5052d..fc2777770768 100644
--- a/include/linux/errseq.h
+++ b/include/linux/errseq.h
@@ -1,6 +1,6 @@
/* SPDX-License-Identifier: GPL-2.0 */
/*
- * See Documentation/errseq.rst and lib/errseq.c
+ * See Documentation/core-api/errseq.rst and lib/errseq.c
*/
#ifndef _LINUX_ERRSEQ_H
#define _LINUX_ERRSEQ_H
diff --git a/include/linux/etherdevice.h b/include/linux/etherdevice.h
index 263dbcad22fc..79563840c295 100644
--- a/include/linux/etherdevice.h
+++ b/include/linux/etherdevice.h
@@ -31,7 +31,7 @@
#ifdef __KERNEL__
struct device;
int eth_platform_get_mac_address(struct device *dev, u8 *mac_addr);
-unsigned char *arch_get_platform_get_mac_address(void);
+unsigned char *arch_get_platform_mac_address(void);
u32 eth_get_headlen(void *data, unsigned int max_len);
__be16 eth_type_trans(struct sk_buff *skb, struct net_device *dev);
extern const struct header_ops eth_header_ops;
diff --git a/include/linux/ethtool.h b/include/linux/ethtool.h
index 2ec41a7eb54f..ebe41811ed34 100644
--- a/include/linux/ethtool.h
+++ b/include/linux/ethtool.h
@@ -371,6 +371,11 @@ struct ethtool_ops {
u8 *hfunc);
int (*set_rxfh)(struct net_device *, const u32 *indir,
const u8 *key, const u8 hfunc);
+ int (*get_rxfh_context)(struct net_device *, u32 *indir, u8 *key,
+ u8 *hfunc, u32 rss_context);
+ int (*set_rxfh_context)(struct net_device *, const u32 *indir,
+ const u8 *key, const u8 hfunc,
+ u32 *rss_context, bool delete);
void (*get_channels)(struct net_device *, struct ethtool_channels *);
int (*set_channels)(struct net_device *, struct ethtool_channels *);
int (*get_dump_flag)(struct net_device *, struct ethtool_dump *);
diff --git a/include/linux/eventfd.h b/include/linux/eventfd.h
index 60b2985e8a18..7094718b653b 100644
--- a/include/linux/eventfd.h
+++ b/include/linux/eventfd.h
@@ -26,18 +26,16 @@
#define EFD_SHARED_FCNTL_FLAGS (O_CLOEXEC | O_NONBLOCK)
#define EFD_FLAGS_SET (EFD_SHARED_FCNTL_FLAGS | EFD_SEMAPHORE)
+struct eventfd_ctx;
struct file;
#ifdef CONFIG_EVENTFD
-struct file *eventfd_file_create(unsigned int count, int flags);
-struct eventfd_ctx *eventfd_ctx_get(struct eventfd_ctx *ctx);
void eventfd_ctx_put(struct eventfd_ctx *ctx);
struct file *eventfd_fget(int fd);
struct eventfd_ctx *eventfd_ctx_fdget(int fd);
struct eventfd_ctx *eventfd_ctx_fileget(struct file *file);
__u64 eventfd_signal(struct eventfd_ctx *ctx, __u64 n);
-ssize_t eventfd_ctx_read(struct eventfd_ctx *ctx, int no_wait, __u64 *cnt);
int eventfd_ctx_remove_wait_queue(struct eventfd_ctx *ctx, wait_queue_entry_t *wait,
__u64 *cnt);
@@ -47,10 +45,6 @@ int eventfd_ctx_remove_wait_queue(struct eventfd_ctx *ctx, wait_queue_entry_t *w
* Ugly ugly ugly error layer to support modules that uses eventfd but
* pretend to work in !CONFIG_EVENTFD configurations. Namely, AIO.
*/
-static inline struct file *eventfd_file_create(unsigned int count, int flags)
-{
- return ERR_PTR(-ENOSYS);
-}
static inline struct eventfd_ctx *eventfd_ctx_fdget(int fd)
{
@@ -67,12 +61,6 @@ static inline void eventfd_ctx_put(struct eventfd_ctx *ctx)
}
-static inline ssize_t eventfd_ctx_read(struct eventfd_ctx *ctx, int no_wait,
- __u64 *cnt)
-{
- return -ENOSYS;
-}
-
static inline int eventfd_ctx_remove_wait_queue(struct eventfd_ctx *ctx,
wait_queue_entry_t *wait, __u64 *cnt)
{
diff --git a/include/linux/extcon.h b/include/linux/extcon.h
index 6d94e82c8ad9..7f033b1ea568 100644
--- a/include/linux/extcon.h
+++ b/include/linux/extcon.h
@@ -230,6 +230,7 @@ extern void devm_extcon_unregister_notifier_all(struct device *dev,
* Following APIs get the extcon_dev from devicetree or by through extcon name.
*/
extern struct extcon_dev *extcon_get_extcon_dev(const char *extcon_name);
+extern struct extcon_dev *extcon_find_edev_by_node(struct device_node *node);
extern struct extcon_dev *extcon_get_edev_by_phandle(struct device *dev,
int index);
@@ -283,6 +284,11 @@ static inline struct extcon_dev *extcon_get_extcon_dev(const char *extcon_name)
return ERR_PTR(-ENODEV);
}
+static inline struct extcon_dev *extcon_find_edev_by_node(struct device_node *node)
+{
+ return ERR_PTR(-ENODEV);
+}
+
static inline struct extcon_dev *extcon_get_edev_by_phandle(struct device *dev,
int index)
{
diff --git a/include/linux/extcon/extcon-gpio.h b/include/linux/extcon/extcon-gpio.h
deleted file mode 100644
index 7cacafb78b09..000000000000
--- a/include/linux/extcon/extcon-gpio.h
+++ /dev/null
@@ -1,47 +0,0 @@
-/*
- * Single-state GPIO extcon driver based on extcon class
- *
- * Copyright (C) 2012 Samsung Electronics
- * Author: MyungJoo Ham <myungjoo.ham@samsung.com>
- *
- * based on switch class driver
- * Copyright (C) 2008 Google, Inc.
- * Author: Mike Lockwood <lockwood@android.com>
- *
- * This software is licensed under the terms of the GNU General Public
- * License version 2, as published by the Free Software Foundation, and
- * may be copied, distributed, and modified under those terms.
- *
- * This program is distributed in the hope that it will be useful,
- * but WITHOUT ANY WARRANTY; without even the implied warranty of
- * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
- * GNU General Public License for more details.
- */
-#ifndef __EXTCON_GPIO_H__
-#define __EXTCON_GPIO_H__ __FILE__
-
-#include <linux/extcon.h>
-
-/**
- * struct gpio_extcon_pdata - A simple GPIO-controlled extcon device.
- * @extcon_id: The unique id of specific external connector.
- * @gpio: Corresponding GPIO.
- * @gpio_active_low: Boolean describing whether gpio active state is 1 or 0
- * If true, low state of gpio means active.
- * If false, high state of gpio means active.
- * @debounce: Debounce time for GPIO IRQ in ms.
- * @irq_flags: IRQ Flags (e.g., IRQF_TRIGGER_LOW).
- * @check_on_resume: Boolean describing whether to check the state of gpio
- * while resuming from sleep.
- */
-struct gpio_extcon_pdata {
- unsigned int extcon_id;
- unsigned gpio;
- bool gpio_active_low;
- unsigned long debounce;
- unsigned long irq_flags;
-
- bool check_on_resume;
-};
-
-#endif /* __EXTCON_GPIO_H__ */
diff --git a/include/linux/f2fs_fs.h b/include/linux/f2fs_fs.h
index 43e98d30d2df..aa5db8b5521a 100644
--- a/include/linux/f2fs_fs.h
+++ b/include/linux/f2fs_fs.h
@@ -21,6 +21,7 @@
#define F2FS_BLKSIZE 4096 /* support only 4KB block */
#define F2FS_BLKSIZE_BITS 12 /* bits for F2FS_BLKSIZE */
#define F2FS_MAX_EXTENSION 64 /* # of extension entries */
+#define F2FS_EXTENSION_LEN 8 /* max size of extension */
#define F2FS_BLK_ALIGN(x) (((x) + F2FS_BLKSIZE - 1) >> F2FS_BLKSIZE_BITS)
#define NULL_ADDR ((block_t)0) /* used as block_t addresses */
@@ -38,15 +39,14 @@
#define F2FS_MAX_QUOTAS 3
-#define F2FS_IO_SIZE(sbi) (1 << (sbi)->write_io_size_bits) /* Blocks */
-#define F2FS_IO_SIZE_KB(sbi) (1 << ((sbi)->write_io_size_bits + 2)) /* KB */
-#define F2FS_IO_SIZE_BYTES(sbi) (1 << ((sbi)->write_io_size_bits + 12)) /* B */
-#define F2FS_IO_SIZE_BITS(sbi) ((sbi)->write_io_size_bits) /* power of 2 */
+#define F2FS_IO_SIZE(sbi) (1 << F2FS_OPTION(sbi).write_io_size_bits) /* Blocks */
+#define F2FS_IO_SIZE_KB(sbi) (1 << (F2FS_OPTION(sbi).write_io_size_bits + 2)) /* KB */
+#define F2FS_IO_SIZE_BYTES(sbi) (1 << (F2FS_OPTION(sbi).write_io_size_bits + 12)) /* B */
+#define F2FS_IO_SIZE_BITS(sbi) (F2FS_OPTION(sbi).write_io_size_bits) /* power of 2 */
#define F2FS_IO_SIZE_MASK(sbi) (F2FS_IO_SIZE(sbi) - 1)
/* This flag is used by node and meta inodes, and by recovery */
#define GFP_F2FS_ZERO (GFP_NOFS | __GFP_ZERO)
-#define GFP_F2FS_HIGH_ZERO (GFP_NOFS | __GFP_ZERO | __GFP_HIGHMEM)
/*
* For further optimization on multi-head logs, on-disk layout supports maximum
@@ -102,7 +102,7 @@ struct f2fs_super_block {
__u8 uuid[16]; /* 128-bit uuid for volume */
__le16 volume_name[MAX_VOLUME_NAME]; /* volume name */
__le32 extension_count; /* # of extensions below */
- __u8 extension_list[F2FS_MAX_EXTENSION][8]; /* extension array */
+ __u8 extension_list[F2FS_MAX_EXTENSION][F2FS_EXTENSION_LEN];/* extension array */
__le32 cp_payload;
__u8 version[VERSION_LEN]; /* the kernel version */
__u8 init_version[VERSION_LEN]; /* the initial kernel version */
@@ -111,12 +111,15 @@ struct f2fs_super_block {
__u8 encrypt_pw_salt[16]; /* Salt used for string2key algorithm */
struct f2fs_device devs[MAX_DEVICES]; /* device list */
__le32 qf_ino[F2FS_MAX_QUOTAS]; /* quota inode numbers */
- __u8 reserved[315]; /* valid reserved region */
+ __u8 hot_ext_count; /* # of hot file extension */
+ __u8 reserved[314]; /* valid reserved region */
} __packed;
/*
* For checkpoint
*/
+#define CP_LARGE_NAT_BITMAP_FLAG 0x00000400
+#define CP_NOCRC_RECOVERY_FLAG 0x00000200
#define CP_TRIMMED_FLAG 0x00000100
#define CP_NAT_BITS_FLAG 0x00000080
#define CP_CRC_RECOVERY_FLAG 0x00000040
@@ -212,6 +215,7 @@ struct f2fs_extent {
#define F2FS_DATA_EXIST 0x08 /* file inline data exist flag */
#define F2FS_INLINE_DOTS 0x10 /* file having implicit dot dentries */
#define F2FS_EXTRA_ATTR 0x20 /* file having extra attribute */
+#define F2FS_PIN_FILE 0x40 /* file should not be gced */
struct f2fs_inode {
__le16 i_mode; /* file mode */
@@ -229,7 +233,13 @@ struct f2fs_inode {
__le32 i_ctime_nsec; /* change time in nano scale */
__le32 i_mtime_nsec; /* modification time in nano scale */
__le32 i_generation; /* file version (for NFS) */
- __le32 i_current_depth; /* only for directory depth */
+ union {
+ __le32 i_current_depth; /* only for directory depth */
+ __le16 i_gc_failures; /*
+ * # of gc failures on pinned file.
+ * only for regular files.
+ */
+ };
__le32 i_xattr_nid; /* nid to save xattr */
__le32 i_flags; /* file attributes */
__le32 i_pino; /* parent inode number */
@@ -245,8 +255,10 @@ struct f2fs_inode {
__le16 i_inline_xattr_size; /* inline xattr size, unit: 4 bytes */
__le32 i_projid; /* project id */
__le32 i_inode_checksum;/* inode meta checksum */
+ __le64 i_crtime; /* creation time */
+ __le32 i_crtime_nsec; /* creation time in nano scale */
__le32 i_extra_end[0]; /* for attribute size calculation */
- };
+ } __packed;
__le32 i_addr[DEF_ADDRS_PER_INODE]; /* Pointers to data blocks */
};
__le32 i_nid[DEF_NIDS_PER_INODE]; /* direct(2), indirect(2),
@@ -293,6 +305,10 @@ struct f2fs_node {
*/
#define NAT_ENTRY_PER_BLOCK (PAGE_SIZE / sizeof(struct f2fs_nat_entry))
#define NAT_ENTRY_BITMAP_SIZE ((NAT_ENTRY_PER_BLOCK + 7) / 8)
+#define NAT_ENTRY_BITMAP_SIZE_ALIGNED \
+ ((NAT_ENTRY_BITMAP_SIZE + BITS_PER_LONG - 1) / \
+ BITS_PER_LONG * BITS_PER_LONG)
+
struct f2fs_nat_entry {
__u8 version; /* latest version of cached nat entry */
diff --git a/include/linux/fault-inject.h b/include/linux/fault-inject.h
index c3c95d18bf43..7e6c77740413 100644
--- a/include/linux/fault-inject.h
+++ b/include/linux/fault-inject.h
@@ -64,10 +64,11 @@ static inline struct dentry *fault_create_debugfs_attr(const char *name,
struct kmem_cache;
+int should_failslab(struct kmem_cache *s, gfp_t gfpflags);
#ifdef CONFIG_FAILSLAB
-extern bool should_failslab(struct kmem_cache *s, gfp_t gfpflags);
+extern bool __should_failslab(struct kmem_cache *s, gfp_t gfpflags);
#else
-static inline bool should_failslab(struct kmem_cache *s, gfp_t gfpflags)
+static inline bool __should_failslab(struct kmem_cache *s, gfp_t gfpflags)
{
return false;
}
diff --git a/include/linux/fb.h b/include/linux/fb.h
index bc24e48e396d..aa74a228bb92 100644
--- a/include/linux/fb.h
+++ b/include/linux/fb.h
@@ -465,6 +465,11 @@ struct fb_info {
atomic_t count;
int node;
int flags;
+ /*
+ * -1 by default, set to a FB_ROTATE_* value by the driver, if it knows
+ * a lcd is not mounted upright and fbcon should rotate to compensate.
+ */
+ int fbcon_rotate_hint;
struct mutex lock; /* Lock for open/release/ioctl funcs */
struct mutex mm_lock; /* Lock for fb_mmap and smem_* fields */
struct fb_var_screeninfo var; /* Current var */
@@ -564,7 +569,9 @@ static inline struct apertures_struct *alloc_apertures(unsigned int max_num) {
#define fb_memcpy_fromfb sbus_memcpy_fromio
#define fb_memcpy_tofb sbus_memcpy_toio
-#elif defined(__i386__) || defined(__alpha__) || defined(__x86_64__) || defined(__hppa__) || defined(__sh__) || defined(__powerpc__) || defined(__avr32__) || defined(__bfin__) || defined(__arm__)
+#elif defined(__i386__) || defined(__alpha__) || defined(__x86_64__) || \
+ defined(__hppa__) || defined(__sh__) || defined(__powerpc__) || \
+ defined(__arm__) || defined(__aarch64__)
#define fb_readb __raw_readb
#define fb_readw __raw_readw
diff --git a/include/linux/fdtable.h b/include/linux/fdtable.h
index 1c65817673db..41615f38bcff 100644
--- a/include/linux/fdtable.h
+++ b/include/linux/fdtable.h
@@ -10,6 +10,7 @@
#include <linux/compiler.h>
#include <linux/spinlock.h>
#include <linux/rcupdate.h>
+#include <linux/nospec.h>
#include <linux/types.h>
#include <linux/init.h>
#include <linux/fs.h>
@@ -82,8 +83,10 @@ static inline struct file *__fcheck_files(struct files_struct *files, unsigned i
{
struct fdtable *fdt = rcu_dereference_raw(files->fdt);
- if (fd < fdt->max_fds)
+ if (fd < fdt->max_fds) {
+ fd = array_index_nospec(fd, fdt->max_fds);
return rcu_dereference_raw(fdt->fd[fd]);
+ }
return NULL;
}
diff --git a/include/linux/filter.h b/include/linux/filter.h
index 80b5b482cb46..fc4e8f91b03d 100644
--- a/include/linux/filter.h
+++ b/include/linux/filter.h
@@ -18,6 +18,7 @@
#include <linux/capability.h>
#include <linux/cryptohash.h>
#include <linux/set_memory.h>
+#include <linux/kallsyms.h>
#include <net/sch_generic.h>
@@ -28,6 +29,7 @@ struct sk_buff;
struct sock;
struct seccomp_data;
struct bpf_prog_aux;
+struct xdp_rxq_info;
/* ArgX, context and stack frame pointer register positions. Note,
* Arg1, Arg2, Arg3, etc are used as argument mappings of function
@@ -58,6 +60,9 @@ struct bpf_prog_aux;
/* unused opcode to mark special call to bpf_tail_call() helper */
#define BPF_TAIL_CALL 0xf0
+/* unused opcode to mark call to interpreter with arguments */
+#define BPF_CALL_ARGS 0xe0
+
/* As per nm, we expose JITed images as text (code) section for
* kallsyms. That way, tools like perf can find it to match
* addresses.
@@ -367,7 +372,7 @@ struct bpf_prog_aux;
#define BPF_LDST_BYTES(insn) \
({ \
- const int __size = bpf_size_to_bytes(BPF_SIZE(insn->code)); \
+ const int __size = bpf_size_to_bytes(BPF_SIZE((insn)->code)); \
WARN_ON(__size < 0); \
__size; \
})
@@ -455,11 +460,16 @@ struct bpf_binary_header {
struct bpf_prog {
u16 pages; /* Number of allocated pages */
u16 jited:1, /* Is our filter JIT'ed? */
+ jit_requested:1,/* archs need to JIT the prog */
locked:1, /* Program image locked? */
gpl_compatible:1, /* Is filter GPL compatible? */
cb_access:1, /* Is control block accessed? */
- dst_needed:1; /* Do we need dst entry? */
+ dst_needed:1, /* Do we need dst entry? */
+ blinded:1, /* Was blinded */
+ is_func:1, /* program is a bpf function */
+ kprobe_override:1; /* Do we override a kprobe? */
enum bpf_prog_type type; /* Type of BPF program */
+ enum bpf_attach_type expected_attach_type; /* For some prog types */
u32 len; /* Number of filter blocks */
u32 jited_len; /* Size of jited insns in bytes */
u8 tag[BPF_TAG_SIZE];
@@ -495,6 +505,25 @@ struct xdp_buff {
void *data_end;
void *data_meta;
void *data_hard_start;
+ struct xdp_rxq_info *rxq;
+};
+
+struct sk_msg_buff {
+ void *data;
+ void *data_end;
+ __u32 apply_bytes;
+ __u32 cork_bytes;
+ int sg_copybreak;
+ int sg_start;
+ int sg_curr;
+ int sg_end;
+ struct scatterlist sg_data[MAX_SKB_FRAGS];
+ bool sg_copy[MAX_SKB_FRAGS];
+ __u32 key;
+ __u32 flags;
+ struct bpf_map *map;
+ struct sk_buff *skb;
+ struct list_head list;
};
/* Compute the linear packet data range [data, data_end) which
@@ -678,6 +707,8 @@ static inline int sk_filter(struct sock *sk, struct sk_buff *skb)
struct bpf_prog *bpf_prog_select_runtime(struct bpf_prog *fp, int *err);
void bpf_prog_free(struct bpf_prog *fp);
+bool bpf_opcode_in_insntable(u8 code);
+
struct bpf_prog *bpf_prog_alloc(unsigned int size, gfp_t gfp_extra_flags);
struct bpf_prog *bpf_prog_realloc(struct bpf_prog *fp_old, unsigned int size,
gfp_t gfp_extra_flags);
@@ -709,11 +740,22 @@ bool sk_filter_charge(struct sock *sk, struct sk_filter *fp);
void sk_filter_uncharge(struct sock *sk, struct sk_filter *fp);
u64 __bpf_call_base(u64 r1, u64 r2, u64 r3, u64 r4, u64 r5);
+#define __bpf_call_base_args \
+ ((u64 (*)(u64, u64, u64, u64, u64, const struct bpf_insn *)) \
+ __bpf_call_base)
struct bpf_prog *bpf_int_jit_compile(struct bpf_prog *prog);
void bpf_jit_compile(struct bpf_prog *prog);
bool bpf_helper_changes_pkt_data(void *func);
+static inline bool bpf_dump_raw_ok(void)
+{
+ /* Reconstruction of call-sites is dependent on kallsyms,
+ * thus make dump the same restriction.
+ */
+ return kallsyms_show_value() == 1;
+}
+
struct bpf_prog *bpf_patch_insn_single(struct bpf_prog *prog, u32 off,
const struct bpf_insn *patch, u32 len);
@@ -748,6 +790,7 @@ xdp_data_meta_unsupported(const struct xdp_buff *xdp)
void bpf_warn_invalid_xdp_action(u32 act);
struct sock *do_sk_redirect_map(struct sk_buff *skb);
+struct sock *do_msg_redirect_map(struct sk_msg_buff *md);
#ifdef CONFIG_BPF_JIT
extern int bpf_jit_enable;
@@ -797,7 +840,7 @@ static inline bool bpf_prog_ebpf_jited(const struct bpf_prog *fp)
return fp->jited && bpf_jit_is_ebpf();
}
-static inline bool bpf_jit_blinding_enabled(void)
+static inline bool bpf_jit_blinding_enabled(struct bpf_prog *prog)
{
/* These are the prerequisites, should someone ever have the
* idea to call blinding outside of them, we make sure to
@@ -805,7 +848,7 @@ static inline bool bpf_jit_blinding_enabled(void)
*/
if (!bpf_jit_is_ebpf())
return false;
- if (!bpf_jit_enable)
+ if (!prog->jit_requested)
return false;
if (!bpf_jit_harden)
return false;
@@ -978,13 +1021,34 @@ static inline int bpf_tell_extensions(void)
return SKF_AD_MAX;
}
+struct bpf_sock_addr_kern {
+ struct sock *sk;
+ struct sockaddr *uaddr;
+ /* Temporary "register" to make indirect stores to nested structures
+ * defined above. We need three registers to make such a store, but
+ * only two (src and dst) are available at convert_ctx_access time
+ */
+ u64 tmp_reg;
+};
+
struct bpf_sock_ops_kern {
struct sock *sk;
u32 op;
union {
+ u32 args[4];
u32 reply;
u32 replylong[4];
};
+ u32 is_fullsock;
+ u64 temp; /* temp and everything after is not
+ * initialized to 0 before calling
+ * the BPF program. New fields that
+ * should be initialized to 0 should
+ * be inserted before temp.
+ * temp is scratch storage used by
+ * sock_ops_convert_ctx_access
+ * as temporary storage of a register.
+ */
};
#endif /* __LINUX_FILTER_H__ */
diff --git a/include/linux/firmware.h b/include/linux/firmware.h
index d4508080348d..41050417cafb 100644
--- a/include/linux/firmware.h
+++ b/include/linux/firmware.h
@@ -85,4 +85,7 @@ static inline int request_firmware_into_buf(const struct firmware **firmware_p,
}
#endif
+
+int firmware_request_cache(struct device *device, const char *name);
+
#endif
diff --git a/include/linux/fpga/fpga-bridge.h b/include/linux/fpga/fpga-bridge.h
index aa66c87c120b..3694821a6d2d 100644
--- a/include/linux/fpga/fpga-bridge.h
+++ b/include/linux/fpga/fpga-bridge.h
@@ -1,10 +1,11 @@
/* SPDX-License-Identifier: GPL-2.0 */
-#include <linux/device.h>
-#include <linux/fpga/fpga-mgr.h>
#ifndef _LINUX_FPGA_BRIDGE_H
#define _LINUX_FPGA_BRIDGE_H
+#include <linux/device.h>
+#include <linux/fpga/fpga-mgr.h>
+
struct fpga_bridge;
/**
@@ -12,11 +13,13 @@ struct fpga_bridge;
* @enable_show: returns the FPGA bridge's status
* @enable_set: set a FPGA bridge as enabled or disabled
* @fpga_bridge_remove: set FPGA into a specific state during driver remove
+ * @groups: optional attribute groups.
*/
struct fpga_bridge_ops {
int (*enable_show)(struct fpga_bridge *bridge);
int (*enable_set)(struct fpga_bridge *bridge, bool enable);
void (*fpga_bridge_remove)(struct fpga_bridge *bridge);
+ const struct attribute_group **groups;
};
/**
@@ -43,6 +46,8 @@ struct fpga_bridge {
struct fpga_bridge *of_fpga_bridge_get(struct device_node *node,
struct fpga_image_info *info);
+struct fpga_bridge *fpga_bridge_get(struct device *dev,
+ struct fpga_image_info *info);
void fpga_bridge_put(struct fpga_bridge *bridge);
int fpga_bridge_enable(struct fpga_bridge *bridge);
int fpga_bridge_disable(struct fpga_bridge *bridge);
@@ -50,9 +55,12 @@ int fpga_bridge_disable(struct fpga_bridge *bridge);
int fpga_bridges_enable(struct list_head *bridge_list);
int fpga_bridges_disable(struct list_head *bridge_list);
void fpga_bridges_put(struct list_head *bridge_list);
-int fpga_bridge_get_to_list(struct device_node *np,
+int fpga_bridge_get_to_list(struct device *dev,
struct fpga_image_info *info,
struct list_head *bridge_list);
+int of_fpga_bridge_get_to_list(struct device_node *np,
+ struct fpga_image_info *info,
+ struct list_head *bridge_list);
int fpga_bridge_register(struct device *dev, const char *name,
const struct fpga_bridge_ops *br_ops, void *priv);
diff --git a/include/linux/fpga/fpga-mgr.h b/include/linux/fpga/fpga-mgr.h
index bfa14bc023fb..3c6de23aabdf 100644
--- a/include/linux/fpga/fpga-mgr.h
+++ b/include/linux/fpga/fpga-mgr.h
@@ -1,7 +1,8 @@
/*
* FPGA Framework
*
- * Copyright (C) 2013-2015 Altera Corporation
+ * Copyright (C) 2013-2016 Altera Corporation
+ * Copyright (C) 2017 Intel Corporation
*
* This program is free software; you can redistribute it and/or modify it
* under the terms and conditions of the GNU General Public License,
@@ -15,12 +16,12 @@
* You should have received a copy of the GNU General Public License along with
* this program. If not, see <http://www.gnu.org/licenses/>.
*/
-#include <linux/mutex.h>
-#include <linux/platform_device.h>
-
#ifndef _LINUX_FPGA_MGR_H
#define _LINUX_FPGA_MGR_H
+#include <linux/mutex.h>
+#include <linux/platform_device.h>
+
struct fpga_manager;
struct sg_table;
@@ -83,12 +84,26 @@ enum fpga_mgr_states {
* @disable_timeout_us: maximum time to disable traffic through bridge (uSec)
* @config_complete_timeout_us: maximum time for FPGA to switch to operating
* status in the write_complete op.
+ * @firmware_name: name of FPGA image firmware file
+ * @sgt: scatter/gather table containing FPGA image
+ * @buf: contiguous buffer containing FPGA image
+ * @count: size of buf
+ * @dev: device that owns this
+ * @overlay: Device Tree overlay
*/
struct fpga_image_info {
u32 flags;
u32 enable_timeout_us;
u32 disable_timeout_us;
u32 config_complete_timeout_us;
+ char *firmware_name;
+ struct sg_table *sgt;
+ const char *buf;
+ size_t count;
+ struct device *dev;
+#ifdef CONFIG_OF
+ struct device_node *overlay;
+#endif
};
/**
@@ -100,6 +115,7 @@ struct fpga_image_info {
* @write_sg: write the scatter list of configuration data to the FPGA
* @write_complete: set FPGA to operating state after writing is done
* @fpga_remove: optional: Set FPGA into a specific state during driver remove
+ * @groups: optional attribute groups.
*
* fpga_manager_ops are the low level functions implemented by a specific
* fpga manager driver. The optional ones are tested for NULL before being
@@ -116,6 +132,7 @@ struct fpga_manager_ops {
int (*write_complete)(struct fpga_manager *mgr,
struct fpga_image_info *info);
void (*fpga_remove)(struct fpga_manager *mgr);
+ const struct attribute_group **groups;
};
/**
@@ -138,14 +155,14 @@ struct fpga_manager {
#define to_fpga_manager(d) container_of(d, struct fpga_manager, dev)
-int fpga_mgr_buf_load(struct fpga_manager *mgr, struct fpga_image_info *info,
- const char *buf, size_t count);
-int fpga_mgr_buf_load_sg(struct fpga_manager *mgr, struct fpga_image_info *info,
- struct sg_table *sgt);
+struct fpga_image_info *fpga_image_info_alloc(struct device *dev);
+
+void fpga_image_info_free(struct fpga_image_info *info);
+
+int fpga_mgr_load(struct fpga_manager *mgr, struct fpga_image_info *info);
-int fpga_mgr_firmware_load(struct fpga_manager *mgr,
- struct fpga_image_info *info,
- const char *image_name);
+int fpga_mgr_lock(struct fpga_manager *mgr);
+void fpga_mgr_unlock(struct fpga_manager *mgr);
struct fpga_manager *of_fpga_mgr_get(struct device_node *node);
diff --git a/include/linux/fpga/fpga-region.h b/include/linux/fpga/fpga-region.h
new file mode 100644
index 000000000000..b6520318ab9c
--- /dev/null
+++ b/include/linux/fpga/fpga-region.h
@@ -0,0 +1,40 @@
+#ifndef _FPGA_REGION_H
+#define _FPGA_REGION_H
+
+#include <linux/device.h>
+#include <linux/fpga/fpga-mgr.h>
+#include <linux/fpga/fpga-bridge.h>
+
+/**
+ * struct fpga_region - FPGA Region structure
+ * @dev: FPGA Region device
+ * @mutex: enforces exclusive reference to region
+ * @bridge_list: list of FPGA bridges specified in region
+ * @mgr: FPGA manager
+ * @info: FPGA image info
+ * @priv: private data
+ * @get_bridges: optional function to get bridges to a list
+ * @groups: optional attribute groups.
+ */
+struct fpga_region {
+ struct device dev;
+ struct mutex mutex; /* for exclusive reference to region */
+ struct list_head bridge_list;
+ struct fpga_manager *mgr;
+ struct fpga_image_info *info;
+ void *priv;
+ int (*get_bridges)(struct fpga_region *region);
+ const struct attribute_group **groups;
+};
+
+#define to_fpga_region(d) container_of(d, struct fpga_region, dev)
+
+struct fpga_region *fpga_region_class_find(
+ struct device *start, const void *data,
+ int (*match)(struct device *, const void *));
+
+int fpga_region_program_fpga(struct fpga_region *region);
+int fpga_region_register(struct device *dev, struct fpga_region *region);
+int fpga_region_unregister(struct fpga_region *region);
+
+#endif /* _FPGA_REGION_H */
diff --git a/include/linux/fs.h b/include/linux/fs.h
index 511fbaabf624..92efaf1f8977 100644
--- a/include/linux/fs.h
+++ b/include/linux/fs.h
@@ -13,6 +13,7 @@
#include <linux/list_lru.h>
#include <linux/llist.h>
#include <linux/radix-tree.h>
+#include <linux/xarray.h>
#include <linux/rbtree.h>
#include <linux/init.h>
#include <linux/pid.h>
@@ -390,12 +391,11 @@ int pagecache_write_end(struct file *, struct address_space *mapping,
struct address_space {
struct inode *host; /* owner: inode, block_device */
- struct radix_tree_root page_tree; /* radix tree of all pages */
- spinlock_t tree_lock; /* and lock protecting it */
+ struct radix_tree_root i_pages; /* cached pages */
atomic_t i_mmap_writable;/* count VM_SHARED mappings */
struct rb_root_cached i_mmap; /* tree of private and shared mappings */
struct rw_semaphore i_mmap_rwsem; /* protect tree, count, list */
- /* Protected by tree_lock together with the radix tree */
+ /* Protected by the i_pages lock */
unsigned long nrpages; /* number of total pages */
/* number of shadow or DAX exceptional entries */
unsigned long nrexceptional;
@@ -639,7 +639,7 @@ struct inode {
struct hlist_head i_dentry;
struct rcu_head i_rcu;
};
- u64 i_version;
+ atomic64_t i_version;
atomic_t i_count;
atomic_t i_dio_count;
atomic_t i_writecount;
@@ -748,6 +748,11 @@ static inline void inode_lock_nested(struct inode *inode, unsigned subclass)
down_write_nested(&inode->i_rwsem, subclass);
}
+static inline void inode_lock_shared_nested(struct inode *inode, unsigned subclass)
+{
+ down_read_nested(&inode->i_rwsem, subclass);
+}
+
void lock_two_nondirectories(struct inode *, struct inode*);
void unlock_two_nondirectories(struct inode *, struct inode*);
@@ -1312,9 +1317,12 @@ extern int send_sigurg(struct fown_struct *fown);
#define SB_I_CGROUPWB 0x00000001 /* cgroup-aware writeback enabled */
#define SB_I_NOEXEC 0x00000002 /* Ignore executables on this fs */
#define SB_I_NODEV 0x00000004 /* Ignore devices on this fs */
+#define SB_I_MULTIROOT 0x00000008 /* Multiple roots to the dentry tree */
/* sb->s_iflags to limit user namespace mounts */
#define SB_I_USERNS_VISIBLE 0x00000010 /* fstype already mounted */
+#define SB_I_IMA_UNVERIFIABLE_SIGNATURE 0x00000020
+#define SB_I_UNTRUSTED_MOUNTER 0x00000040
/* Possible states of 'frozen' field */
enum {
@@ -1359,7 +1367,7 @@ struct super_block {
const struct fscrypt_operations *s_cop;
- struct hlist_bl_head s_anon; /* anonymous dentries for (nfs) exporting */
+ struct hlist_bl_head s_roots; /* alternate root dentries for NFS */
struct list_head s_mounts; /* list of mounts; _not_ for fs use */
struct block_device *s_bdev;
struct backing_dev_info *s_bdi;
@@ -1608,6 +1616,10 @@ extern int vfs_whiteout(struct inode *, struct dentry *);
extern struct dentry *vfs_tmpfile(struct dentry *dentry, umode_t mode,
int open_flag);
+int vfs_mkobj(struct dentry *, umode_t,
+ int (*f)(struct dentry *, umode_t, void *),
+ void *);
+
/*
* VFS file helper functions.
*/
@@ -1698,7 +1710,7 @@ struct file_operations {
ssize_t (*write_iter) (struct kiocb *, struct iov_iter *);
int (*iterate) (struct file *, struct dir_context *);
int (*iterate_shared) (struct file *, struct dir_context *);
- unsigned int (*poll) (struct file *, struct poll_table_struct *);
+ __poll_t (*poll) (struct file *, struct poll_table_struct *);
long (*unlocked_ioctl) (struct file *, unsigned int, unsigned long);
long (*compat_ioctl) (struct file *, unsigned int, unsigned long);
int (*mmap) (struct file *, struct vm_area_struct *);
@@ -1977,7 +1989,7 @@ static inline void init_sync_kiocb(struct kiocb *kiocb, struct file *filp)
*
* I_WB_SWITCH Cgroup bdi_writeback switching in progress. Used to
* synchronize competing switching instances and to tell
- * wb stat updates to grab mapping->tree_lock. See
+ * wb stat updates to grab the i_pages lock. See
* inode_switch_wb_work_fn() for details.
*
* I_OVL_INUSE Used by overlayfs to get exclusive ownership on upper
@@ -2005,7 +2017,8 @@ static inline void init_sync_kiocb(struct kiocb *kiocb, struct file *filp)
#define I_WB_SWITCH (1 << 13)
#define I_OVL_INUSE (1 << 14)
-#define I_DIRTY (I_DIRTY_SYNC | I_DIRTY_DATASYNC | I_DIRTY_PAGES)
+#define I_DIRTY_INODE (I_DIRTY_SYNC | I_DIRTY_DATASYNC)
+#define I_DIRTY (I_DIRTY_INODE | I_DIRTY_PAGES)
#define I_DIRTY_ALL (I_DIRTY | I_DIRTY_TIME)
extern void __mark_inode_dirty(struct inode *, int);
@@ -2036,21 +2049,6 @@ static inline void inode_dec_link_count(struct inode *inode)
mark_inode_dirty(inode);
}
-/**
- * inode_inc_iversion - increments i_version
- * @inode: inode that need to be updated
- *
- * Every time the inode is modified, the i_version field will be incremented.
- * The filesystem has to be mounted with i_version flag
- */
-
-static inline void inode_inc_iversion(struct inode *inode)
-{
- spin_lock(&inode->i_lock);
- inode->i_version++;
- spin_unlock(&inode->i_lock);
-}
-
enum file_time_flags {
S_ATIME = 1,
S_MTIME = 2,
@@ -2386,8 +2384,8 @@ struct audit_names;
struct filename {
const char *name; /* pointer to actual string */
const __user char *uptr; /* original userland pointer */
- struct audit_names *aname;
int refcnt;
+ struct audit_names *aname;
const char iname[];
};
@@ -2699,7 +2697,6 @@ extern sector_t bmap(struct inode *, sector_t);
#endif
extern int notify_change(struct dentry *, struct iattr *, struct inode **);
extern int inode_permission(struct inode *, int);
-extern int __inode_permission(struct inode *, int);
extern int generic_permission(struct inode *, int);
extern int __check_sticky(struct inode *dir, struct inode *inode);
@@ -2983,15 +2980,10 @@ enum {
/* filesystem does not support filling holes */
DIO_SKIP_HOLES = 0x02,
-
- /* filesystem can handle aio writes beyond i_size */
- DIO_ASYNC_EXTEND = 0x04,
-
- /* inode/fs/bdev does not need truncate protection */
- DIO_SKIP_DIO_COUNT = 0x08,
};
void dio_end_io(struct bio *bio);
+void dio_warn_stale_pagecache(struct file *filp);
ssize_t __blockdev_direct_IO(struct kiocb *iocb, struct inode *inode,
struct block_device *bdev, struct iov_iter *iter,
@@ -3135,6 +3127,10 @@ extern int simple_rmdir(struct inode *, struct dentry *);
extern int simple_rename(struct inode *, struct dentry *,
struct inode *, struct dentry *, unsigned int);
extern int noop_fsync(struct file *, loff_t, loff_t, int);
+extern int noop_set_page_dirty(struct page *page);
+extern void noop_invalidatepage(struct page *page, unsigned int offset,
+ unsigned int length);
+extern ssize_t noop_direct_IO(struct kiocb *iocb, struct iov_iter *iter);
extern int simple_empty(struct dentry *);
extern int simple_readpage(struct file *file, struct page *page);
extern int simple_write_begin(struct file *file, struct address_space *mapping,
@@ -3204,7 +3200,7 @@ static inline bool vma_is_fsdax(struct vm_area_struct *vma)
if (!vma_is_dax(vma))
return false;
inode = file_inode(vma->vm_file);
- if (inode->i_mode == S_IFCHR)
+ if (S_ISCHR(inode->i_mode))
return false; /* device-dax */
return true;
}
@@ -3239,6 +3235,8 @@ static inline int kiocb_set_rw_flags(struct kiocb *ki, rwf_t flags)
ki->ki_flags |= IOCB_DSYNC;
if (flags & RWF_SYNC)
ki->ki_flags |= (IOCB_DSYNC | IOCB_SYNC);
+ if (flags & RWF_APPEND)
+ ki->ki_flags |= IOCB_APPEND;
return 0;
}
diff --git a/include/linux/fscache-cache.h b/include/linux/fscache-cache.h
index 4c467ef50159..34cf0fdd7dc7 100644
--- a/include/linux/fscache-cache.h
+++ b/include/linux/fscache-cache.h
@@ -29,6 +29,18 @@ struct fscache_cache_ops;
struct fscache_object;
struct fscache_operation;
+enum fscache_obj_ref_trace {
+ fscache_obj_get_add_to_deps,
+ fscache_obj_get_queue,
+ fscache_obj_put_alloc_fail,
+ fscache_obj_put_attach_fail,
+ fscache_obj_put_drop_obj,
+ fscache_obj_put_enq_dep,
+ fscache_obj_put_queue,
+ fscache_obj_put_work,
+ fscache_obj_ref__nr_traces
+};
+
/*
* cache tag definition
*/
@@ -123,7 +135,8 @@ extern void fscache_op_work_func(struct work_struct *work);
extern void fscache_enqueue_operation(struct fscache_operation *);
extern void fscache_op_complete(struct fscache_operation *, bool);
extern void fscache_put_operation(struct fscache_operation *);
-extern void fscache_operation_init(struct fscache_operation *,
+extern void fscache_operation_init(struct fscache_cookie *,
+ struct fscache_operation *,
fscache_operation_processor_t,
fscache_operation_cancel_t,
fscache_operation_release_t);
@@ -185,7 +198,7 @@ static inline void fscache_retrieval_complete(struct fscache_retrieval *op,
{
atomic_sub(n_pages, &op->n_pages);
if (atomic_read(&op->n_pages) <= 0)
- fscache_op_complete(&op->op, true);
+ fscache_op_complete(&op->op, false);
}
/**
@@ -231,7 +244,8 @@ struct fscache_cache_ops {
void (*lookup_complete)(struct fscache_object *object);
/* increment the usage count on this object (may fail if unmounting) */
- struct fscache_object *(*grab_object)(struct fscache_object *object);
+ struct fscache_object *(*grab_object)(struct fscache_object *object,
+ enum fscache_obj_ref_trace why);
/* pin an object in the cache */
int (*pin_object)(struct fscache_object *object);
@@ -254,7 +268,8 @@ struct fscache_cache_ops {
void (*drop_object)(struct fscache_object *object);
/* dispose of a reference to an object */
- void (*put_object)(struct fscache_object *object);
+ void (*put_object)(struct fscache_object *object,
+ enum fscache_obj_ref_trace why);
/* sync a cache */
void (*sync_cache)(struct fscache_cache *cache);
@@ -496,7 +511,7 @@ static inline bool __fscache_unuse_cookie(struct fscache_cookie *cookie)
static inline void __fscache_wake_unused_cookie(struct fscache_cookie *cookie)
{
- wake_up_atomic_t(&cookie->n_active);
+ wake_up_var(&cookie->n_active);
}
/**
@@ -538,7 +553,8 @@ extern bool fscache_object_sleep_till_congested(signed long *timeoutp);
extern enum fscache_checkaux fscache_check_aux(struct fscache_object *object,
const void *data,
- uint16_t datalen);
+ uint16_t datalen,
+ loff_t object_size);
extern void fscache_object_retrying_stale(struct fscache_object *object);
diff --git a/include/linux/fscache.h b/include/linux/fscache.h
index fe0c349684fa..84b90a79d75a 100644
--- a/include/linux/fscache.h
+++ b/include/linux/fscache.h
@@ -22,6 +22,7 @@
#include <linux/list.h>
#include <linux/pagemap.h>
#include <linux/pagevec.h>
+#include <linux/list_bl.h>
#if defined(CONFIG_FSCACHE) || defined(CONFIG_FSCACHE_MODULE)
#define fscache_available() (1)
@@ -83,45 +84,15 @@ struct fscache_cookie_def {
const void *parent_netfs_data,
const void *cookie_netfs_data);
- /* get an index key
- * - should store the key data in the buffer
- * - should return the amount of data stored
- * - not permitted to return an error
- * - the netfs data from the cookie being used as the source is
- * presented
- */
- uint16_t (*get_key)(const void *cookie_netfs_data,
- void *buffer,
- uint16_t bufmax);
-
- /* get certain file attributes from the netfs data
- * - this function can be absent for an index
- * - not permitted to return an error
- * - the netfs data from the cookie being used as the source is
- * presented
- */
- void (*get_attr)(const void *cookie_netfs_data, uint64_t *size);
-
- /* get the auxiliary data from netfs data
- * - this function can be absent if the index carries no state data
- * - should store the auxiliary data in the buffer
- * - should return the amount of amount stored
- * - not permitted to return an error
- * - the netfs data from the cookie being used as the source is
- * presented
- */
- uint16_t (*get_aux)(const void *cookie_netfs_data,
- void *buffer,
- uint16_t bufmax);
-
/* consult the netfs about the state of an object
* - this function can be absent if the index carries no state data
* - the netfs data from the cookie being used as the target is
- * presented, as is the auxiliary data
+ * presented, as is the auxiliary data and the object size
*/
enum fscache_checkaux (*check_aux)(void *cookie_netfs_data,
const void *data,
- uint16_t datalen);
+ uint16_t datalen,
+ loff_t object_size);
/* get an extra reference on a read context
* - this function can be absent if the completion function doesn't
@@ -154,7 +125,6 @@ struct fscache_netfs {
uint32_t version; /* indexing version */
const char *name; /* filesystem name */
struct fscache_cookie *primary_index;
- struct list_head link; /* internal link */
};
/*
@@ -173,6 +143,7 @@ struct fscache_cookie {
struct hlist_head backing_objects; /* object(s) backing this file/index */
const struct fscache_cookie_def *def; /* definition */
struct fscache_cookie *parent; /* parent of this entry */
+ struct hlist_bl_node hash_link; /* Link in hash table */
void *netfs_data; /* back pointer to netfs */
struct radix_tree_root stores; /* pages to be stored on this cookie */
#define FSCACHE_COOKIE_PENDING_TAG 0 /* pages tag: pending write to cache */
@@ -186,6 +157,22 @@ struct fscache_cookie {
#define FSCACHE_COOKIE_RELINQUISHED 4 /* T if cookie has been relinquished */
#define FSCACHE_COOKIE_ENABLED 5 /* T if cookie is enabled */
#define FSCACHE_COOKIE_ENABLEMENT_LOCK 6 /* T if cookie is being en/disabled */
+#define FSCACHE_COOKIE_AUX_UPDATED 8 /* T if the auxiliary data was updated */
+#define FSCACHE_COOKIE_ACQUIRED 9 /* T if cookie is in use */
+#define FSCACHE_COOKIE_RELINQUISHING 10 /* T if cookie is being relinquished */
+
+ u8 type; /* Type of object */
+ u8 key_len; /* Length of index key */
+ u8 aux_len; /* Length of auxiliary data */
+ u32 key_hash; /* Hash of parent, type, key, len */
+ union {
+ void *key; /* Index key */
+ u8 inline_key[16]; /* - If the key is short enough */
+ };
+ union {
+ void *aux; /* Auxiliary data */
+ u8 inline_aux[8]; /* - If the aux data is short enough */
+ };
};
static inline bool fscache_cookie_enabled(struct fscache_cookie *cookie)
@@ -208,10 +195,12 @@ extern void __fscache_release_cache_tag(struct fscache_cache_tag *);
extern struct fscache_cookie *__fscache_acquire_cookie(
struct fscache_cookie *,
const struct fscache_cookie_def *,
- void *, bool);
-extern void __fscache_relinquish_cookie(struct fscache_cookie *, bool);
-extern int __fscache_check_consistency(struct fscache_cookie *);
-extern void __fscache_update_cookie(struct fscache_cookie *);
+ const void *, size_t,
+ const void *, size_t,
+ void *, loff_t, bool);
+extern void __fscache_relinquish_cookie(struct fscache_cookie *, const void *, bool);
+extern int __fscache_check_consistency(struct fscache_cookie *, const void *);
+extern void __fscache_update_cookie(struct fscache_cookie *, const void *);
extern int __fscache_attr_changed(struct fscache_cookie *);
extern void __fscache_invalidate(struct fscache_cookie *);
extern void __fscache_wait_on_invalidate(struct fscache_cookie *);
@@ -228,7 +217,7 @@ extern int __fscache_read_or_alloc_pages(struct fscache_cookie *,
void *,
gfp_t);
extern int __fscache_alloc_page(struct fscache_cookie *, struct page *, gfp_t);
-extern int __fscache_write_page(struct fscache_cookie *, struct page *, gfp_t);
+extern int __fscache_write_page(struct fscache_cookie *, struct page *, loff_t, gfp_t);
extern void __fscache_uncache_page(struct fscache_cookie *, struct page *);
extern bool __fscache_check_page_write(struct fscache_cookie *, struct page *);
extern void __fscache_wait_on_page_write(struct fscache_cookie *, struct page *);
@@ -238,8 +227,8 @@ extern void __fscache_uncache_all_inode_pages(struct fscache_cookie *,
struct inode *);
extern void __fscache_readpages_cancel(struct fscache_cookie *cookie,
struct list_head *pages);
-extern void __fscache_disable_cookie(struct fscache_cookie *, bool);
-extern void __fscache_enable_cookie(struct fscache_cookie *,
+extern void __fscache_disable_cookie(struct fscache_cookie *, const void *, bool);
+extern void __fscache_enable_cookie(struct fscache_cookie *, const void *, loff_t,
bool (*)(void *), void *);
/**
@@ -317,8 +306,13 @@ void fscache_release_cache_tag(struct fscache_cache_tag *tag)
* fscache_acquire_cookie - Acquire a cookie to represent a cache object
* @parent: The cookie that's to be the parent of this one
* @def: A description of the cache object, including callback operations
+ * @index_key: The index key for this cookie
+ * @index_key_len: Size of the index key
+ * @aux_data: The auxiliary data for the cookie (may be NULL)
+ * @aux_data_len: Size of the auxiliary data buffer
* @netfs_data: An arbitrary piece of data to be kept in the cookie to
* represent the cache object to the netfs
+ * @object_size: The initial size of object
* @enable: Whether or not to enable a data cookie immediately
*
* This function is used to inform FS-Cache about part of an index hierarchy
@@ -332,12 +326,19 @@ static inline
struct fscache_cookie *fscache_acquire_cookie(
struct fscache_cookie *parent,
const struct fscache_cookie_def *def,
+ const void *index_key,
+ size_t index_key_len,
+ const void *aux_data,
+ size_t aux_data_len,
void *netfs_data,
+ loff_t object_size,
bool enable)
{
if (fscache_cookie_valid(parent) && fscache_cookie_enabled(parent))
- return __fscache_acquire_cookie(parent, def, netfs_data,
- enable);
+ return __fscache_acquire_cookie(parent, def,
+ index_key, index_key_len,
+ aux_data, aux_data_len,
+ netfs_data, object_size, enable);
else
return NULL;
}
@@ -346,36 +347,44 @@ struct fscache_cookie *fscache_acquire_cookie(
* fscache_relinquish_cookie - Return the cookie to the cache, maybe discarding
* it
* @cookie: The cookie being returned
+ * @aux_data: The updated auxiliary data for the cookie (may be NULL)
* @retire: True if the cache object the cookie represents is to be discarded
*
* This function returns a cookie to the cache, forcibly discarding the
- * associated cache object if retire is set to true.
+ * associated cache object if retire is set to true. The opportunity is
+ * provided to update the auxiliary data in the cache before the object is
+ * disconnected.
*
* See Documentation/filesystems/caching/netfs-api.txt for a complete
* description.
*/
static inline
-void fscache_relinquish_cookie(struct fscache_cookie *cookie, bool retire)
+void fscache_relinquish_cookie(struct fscache_cookie *cookie,
+ const void *aux_data,
+ bool retire)
{
if (fscache_cookie_valid(cookie))
- __fscache_relinquish_cookie(cookie, retire);
+ __fscache_relinquish_cookie(cookie, aux_data, retire);
}
/**
- * fscache_check_consistency - Request that if the cache is updated
+ * fscache_check_consistency - Request validation of a cache's auxiliary data
* @cookie: The cookie representing the cache object
+ * @aux_data: The updated auxiliary data for the cookie (may be NULL)
*
- * Request an consistency check from fscache, which passes the request
- * to the backing cache.
+ * Request an consistency check from fscache, which passes the request to the
+ * backing cache. The auxiliary data on the cookie will be updated first if
+ * @aux_data is set.
*
* Returns 0 if consistent and -ESTALE if inconsistent. May also
* return -ENOMEM and -ERESTARTSYS.
*/
static inline
-int fscache_check_consistency(struct fscache_cookie *cookie)
+int fscache_check_consistency(struct fscache_cookie *cookie,
+ const void *aux_data)
{
if (fscache_cookie_valid(cookie) && fscache_cookie_enabled(cookie))
- return __fscache_check_consistency(cookie);
+ return __fscache_check_consistency(cookie, aux_data);
else
return 0;
}
@@ -383,18 +392,20 @@ int fscache_check_consistency(struct fscache_cookie *cookie)
/**
* fscache_update_cookie - Request that a cache object be updated
* @cookie: The cookie representing the cache object
+ * @aux_data: The updated auxiliary data for the cookie (may be NULL)
*
* Request an update of the index data for the cache object associated with the
- * cookie.
+ * cookie. The auxiliary data on the cookie will be updated first if @aux_data
+ * is set.
*
* See Documentation/filesystems/caching/netfs-api.txt for a complete
* description.
*/
static inline
-void fscache_update_cookie(struct fscache_cookie *cookie)
+void fscache_update_cookie(struct fscache_cookie *cookie, const void *aux_data)
{
if (fscache_cookie_valid(cookie) && fscache_cookie_enabled(cookie))
- __fscache_update_cookie(cookie);
+ __fscache_update_cookie(cookie, aux_data);
}
/**
@@ -648,6 +659,7 @@ void fscache_readpages_cancel(struct fscache_cookie *cookie,
* fscache_write_page - Request storage of a page in the cache
* @cookie: The cookie representing the cache object
* @page: The netfs page to store
+ * @object_size: Updated size of object
* @gfp: The conditions under which memory allocation should be made
*
* Request the contents of the netfs page be written into the cache. This
@@ -665,10 +677,11 @@ void fscache_readpages_cancel(struct fscache_cookie *cookie,
static inline
int fscache_write_page(struct fscache_cookie *cookie,
struct page *page,
+ loff_t object_size,
gfp_t gfp)
{
if (fscache_cookie_valid(cookie) && fscache_cookie_enabled(cookie))
- return __fscache_write_page(cookie, page, gfp);
+ return __fscache_write_page(cookie, page, object_size, gfp);
else
return -ENOBUFS;
}
@@ -780,6 +793,7 @@ void fscache_uncache_all_inode_pages(struct fscache_cookie *cookie,
/**
* fscache_disable_cookie - Disable a cookie
* @cookie: The cookie representing the cache object
+ * @aux_data: The updated auxiliary data for the cookie (may be NULL)
* @invalidate: Invalidate the backing object
*
* Disable a cookie from accepting further alloc, read, write, invalidate,
@@ -790,34 +804,44 @@ void fscache_uncache_all_inode_pages(struct fscache_cookie *cookie,
*
* If @invalidate is set, then the backing object will be invalidated and
* detached, otherwise it will just be detached.
+ *
+ * If @aux_data is set, then auxiliary data will be updated from that.
*/
static inline
-void fscache_disable_cookie(struct fscache_cookie *cookie, bool invalidate)
+void fscache_disable_cookie(struct fscache_cookie *cookie,
+ const void *aux_data,
+ bool invalidate)
{
if (fscache_cookie_valid(cookie) && fscache_cookie_enabled(cookie))
- __fscache_disable_cookie(cookie, invalidate);
+ __fscache_disable_cookie(cookie, aux_data, invalidate);
}
/**
* fscache_enable_cookie - Reenable a cookie
* @cookie: The cookie representing the cache object
+ * @aux_data: The updated auxiliary data for the cookie (may be NULL)
+ * @object_size: Current size of object
* @can_enable: A function to permit enablement once lock is held
* @data: Data for can_enable()
*
* Reenable a previously disabled cookie, allowing it to accept further alloc,
* read, write, invalidate, update or acquire operations. An attempt will be
- * made to immediately reattach the cookie to a backing object.
+ * made to immediately reattach the cookie to a backing object. If @aux_data
+ * is set, the auxiliary data attached to the cookie will be updated.
*
* The can_enable() function is called (if not NULL) once the enablement lock
* is held to rule on whether enablement is still permitted to go ahead.
*/
static inline
void fscache_enable_cookie(struct fscache_cookie *cookie,
+ const void *aux_data,
+ loff_t object_size,
bool (*can_enable)(void *data),
void *data)
{
if (fscache_cookie_valid(cookie) && !fscache_cookie_enabled(cookie))
- __fscache_enable_cookie(cookie, can_enable, data);
+ __fscache_enable_cookie(cookie, aux_data, object_size,
+ can_enable, data);
}
#endif /* _LINUX_FSCACHE_H */
diff --git a/include/linux/fscrypt.h b/include/linux/fscrypt.h
index 08b4b40c5aa8..952ab97af325 100644
--- a/include/linux/fscrypt.h
+++ b/include/linux/fscrypt.h
@@ -14,42 +14,13 @@
#ifndef _LINUX_FSCRYPT_H
#define _LINUX_FSCRYPT_H
-#include <linux/key.h>
#include <linux/fs.h>
-#include <linux/mm.h>
-#include <linux/bio.h>
-#include <linux/dcache.h>
-#include <crypto/skcipher.h>
-#include <uapi/linux/fs.h>
#define FS_CRYPTO_BLOCK_SIZE 16
+struct fscrypt_ctx;
struct fscrypt_info;
-struct fscrypt_ctx {
- union {
- struct {
- struct page *bounce_page; /* Ciphertext page */
- struct page *control_page; /* Original page */
- } w;
- struct {
- struct bio *bio;
- struct work_struct work;
- } r;
- struct list_head free_list; /* Free list */
- };
- u8 flags; /* Flags */
-};
-
-/**
- * For encrypted symlinks, the ciphertext length is stored at the beginning
- * of the string in little-endian format.
- */
-struct fscrypt_symlink_data {
- __le16 len;
- char encrypted_path[1];
-} __packed;
-
struct fscrypt_str {
unsigned char *name;
u32 len;
@@ -68,89 +39,14 @@ struct fscrypt_name {
#define fname_name(p) ((p)->disk_name.name)
#define fname_len(p) ((p)->disk_name.len)
-/*
- * fscrypt superblock flags
- */
-#define FS_CFLG_OWN_PAGES (1U << 1)
-
-/*
- * crypto opertions for filesystems
- */
-struct fscrypt_operations {
- unsigned int flags;
- const char *key_prefix;
- int (*get_context)(struct inode *, void *, size_t);
- int (*set_context)(struct inode *, const void *, size_t, void *);
- bool (*dummy_context)(struct inode *);
- bool (*empty_dir)(struct inode *);
- unsigned (*max_namelen)(struct inode *);
-};
-
/* Maximum value for the third parameter of fscrypt_operations.set_context(). */
#define FSCRYPT_SET_CONTEXT_MAX_SIZE 28
-static inline bool fscrypt_dummy_context_enabled(struct inode *inode)
-{
- if (inode->i_sb->s_cop->dummy_context &&
- inode->i_sb->s_cop->dummy_context(inode))
- return true;
- return false;
-}
-
-static inline bool fscrypt_valid_enc_modes(u32 contents_mode,
- u32 filenames_mode)
-{
- if (contents_mode == FS_ENCRYPTION_MODE_AES_128_CBC &&
- filenames_mode == FS_ENCRYPTION_MODE_AES_128_CTS)
- return true;
-
- if (contents_mode == FS_ENCRYPTION_MODE_AES_256_XTS &&
- filenames_mode == FS_ENCRYPTION_MODE_AES_256_CTS)
- return true;
-
- return false;
-}
-
-static inline bool fscrypt_is_dot_dotdot(const struct qstr *str)
-{
- if (str->len == 1 && str->name[0] == '.')
- return true;
-
- if (str->len == 2 && str->name[0] == '.' && str->name[1] == '.')
- return true;
-
- return false;
-}
-
#if __FS_HAS_ENCRYPTION
-
-static inline struct page *fscrypt_control_page(struct page *page)
-{
- return ((struct fscrypt_ctx *)page_private(page))->w.control_page;
-}
-
-static inline bool fscrypt_has_encryption_key(const struct inode *inode)
-{
- return (inode->i_crypt_info != NULL);
-}
-
#include <linux/fscrypt_supp.h>
-
-#else /* !__FS_HAS_ENCRYPTION */
-
-static inline struct page *fscrypt_control_page(struct page *page)
-{
- WARN_ON_ONCE(1);
- return ERR_PTR(-EINVAL);
-}
-
-static inline bool fscrypt_has_encryption_key(const struct inode *inode)
-{
- return 0;
-}
-
+#else
#include <linux/fscrypt_notsupp.h>
-#endif /* __FS_HAS_ENCRYPTION */
+#endif
/**
* fscrypt_require_key - require an inode's encryption key
@@ -291,4 +187,68 @@ static inline int fscrypt_prepare_setattr(struct dentry *dentry,
return 0;
}
+/**
+ * fscrypt_prepare_symlink - prepare to create a possibly-encrypted symlink
+ * @dir: directory in which the symlink is being created
+ * @target: plaintext symlink target
+ * @len: length of @target excluding null terminator
+ * @max_len: space the filesystem has available to store the symlink target
+ * @disk_link: (out) the on-disk symlink target being prepared
+ *
+ * This function computes the size the symlink target will require on-disk,
+ * stores it in @disk_link->len, and validates it against @max_len. An
+ * encrypted symlink may be longer than the original.
+ *
+ * Additionally, @disk_link->name is set to @target if the symlink will be
+ * unencrypted, but left NULL if the symlink will be encrypted. For encrypted
+ * symlinks, the filesystem must call fscrypt_encrypt_symlink() to create the
+ * on-disk target later. (The reason for the two-step process is that some
+ * filesystems need to know the size of the symlink target before creating the
+ * inode, e.g. to determine whether it will be a "fast" or "slow" symlink.)
+ *
+ * Return: 0 on success, -ENAMETOOLONG if the symlink target is too long,
+ * -ENOKEY if the encryption key is missing, or another -errno code if a problem
+ * occurred while setting up the encryption key.
+ */
+static inline int fscrypt_prepare_symlink(struct inode *dir,
+ const char *target,
+ unsigned int len,
+ unsigned int max_len,
+ struct fscrypt_str *disk_link)
+{
+ if (IS_ENCRYPTED(dir) || fscrypt_dummy_context_enabled(dir))
+ return __fscrypt_prepare_symlink(dir, len, max_len, disk_link);
+
+ disk_link->name = (unsigned char *)target;
+ disk_link->len = len + 1;
+ if (disk_link->len > max_len)
+ return -ENAMETOOLONG;
+ return 0;
+}
+
+/**
+ * fscrypt_encrypt_symlink - encrypt the symlink target if needed
+ * @inode: symlink inode
+ * @target: plaintext symlink target
+ * @len: length of @target excluding null terminator
+ * @disk_link: (in/out) the on-disk symlink target being prepared
+ *
+ * If the symlink target needs to be encrypted, then this function encrypts it
+ * into @disk_link->name. fscrypt_prepare_symlink() must have been called
+ * previously to compute @disk_link->len. If the filesystem did not allocate a
+ * buffer for @disk_link->name after calling fscrypt_prepare_link(), then one
+ * will be kmalloc()'ed and the filesystem will be responsible for freeing it.
+ *
+ * Return: 0 on success, -errno on failure
+ */
+static inline int fscrypt_encrypt_symlink(struct inode *inode,
+ const char *target,
+ unsigned int len,
+ struct fscrypt_str *disk_link)
+{
+ if (IS_ENCRYPTED(inode))
+ return __fscrypt_encrypt_symlink(inode, target, len, disk_link);
+ return 0;
+}
+
#endif /* _LINUX_FSCRYPT_H */
diff --git a/include/linux/fscrypt_notsupp.h b/include/linux/fscrypt_notsupp.h
index 63e58808519a..44b50c04bae9 100644
--- a/include/linux/fscrypt_notsupp.h
+++ b/include/linux/fscrypt_notsupp.h
@@ -14,6 +14,16 @@
#ifndef _LINUX_FSCRYPT_NOTSUPP_H
#define _LINUX_FSCRYPT_NOTSUPP_H
+static inline bool fscrypt_has_encryption_key(const struct inode *inode)
+{
+ return false;
+}
+
+static inline bool fscrypt_dummy_context_enabled(struct inode *inode)
+{
+ return false;
+}
+
/* crypto.c */
static inline struct fscrypt_ctx *fscrypt_get_ctx(const struct inode *inode,
gfp_t gfp_flags)
@@ -43,6 +53,11 @@ static inline int fscrypt_decrypt_page(const struct inode *inode,
return -EOPNOTSUPP;
}
+static inline struct page *fscrypt_control_page(struct page *page)
+{
+ WARN_ON_ONCE(1);
+ return ERR_PTR(-EINVAL);
+}
static inline void fscrypt_restore_control_page(struct page *page)
{
@@ -90,8 +105,7 @@ static inline int fscrypt_get_encryption_info(struct inode *inode)
return -EOPNOTSUPP;
}
-static inline void fscrypt_put_encryption_info(struct inode *inode,
- struct fscrypt_info *ci)
+static inline void fscrypt_put_encryption_info(struct inode *inode)
{
return;
}
@@ -116,16 +130,8 @@ static inline void fscrypt_free_filename(struct fscrypt_name *fname)
return;
}
-static inline u32 fscrypt_fname_encrypted_size(const struct inode *inode,
- u32 ilen)
-{
- /* never happens */
- WARN_ON(1);
- return 0;
-}
-
static inline int fscrypt_fname_alloc_buffer(const struct inode *inode,
- u32 ilen,
+ u32 max_encrypted_len,
struct fscrypt_str *crypto_str)
{
return -EOPNOTSUPP;
@@ -144,13 +150,6 @@ static inline int fscrypt_fname_disk_to_usr(struct inode *inode,
return -EOPNOTSUPP;
}
-static inline int fscrypt_fname_usr_to_disk(struct inode *inode,
- const struct qstr *iname,
- struct fscrypt_str *oname)
-{
- return -EOPNOTSUPP;
-}
-
static inline bool fscrypt_match_name(const struct fscrypt_name *fname,
const u8 *de_name, u32 de_name_len)
{
@@ -208,4 +207,28 @@ static inline int __fscrypt_prepare_lookup(struct inode *dir,
return -EOPNOTSUPP;
}
+static inline int __fscrypt_prepare_symlink(struct inode *dir,
+ unsigned int len,
+ unsigned int max_len,
+ struct fscrypt_str *disk_link)
+{
+ return -EOPNOTSUPP;
+}
+
+static inline int __fscrypt_encrypt_symlink(struct inode *inode,
+ const char *target,
+ unsigned int len,
+ struct fscrypt_str *disk_link)
+{
+ return -EOPNOTSUPP;
+}
+
+static inline const char *fscrypt_get_symlink(struct inode *inode,
+ const void *caddr,
+ unsigned int max_size,
+ struct delayed_call *done)
+{
+ return ERR_PTR(-EOPNOTSUPP);
+}
+
#endif /* _LINUX_FSCRYPT_NOTSUPP_H */
diff --git a/include/linux/fscrypt_supp.h b/include/linux/fscrypt_supp.h
index cf9e9fc02f0a..477a7a6504d2 100644
--- a/include/linux/fscrypt_supp.h
+++ b/include/linux/fscrypt_supp.h
@@ -11,8 +11,54 @@
#ifndef _LINUX_FSCRYPT_SUPP_H
#define _LINUX_FSCRYPT_SUPP_H
+#include <linux/mm.h>
+#include <linux/slab.h>
+
+/*
+ * fscrypt superblock flags
+ */
+#define FS_CFLG_OWN_PAGES (1U << 1)
+
+/*
+ * crypto operations for filesystems
+ */
+struct fscrypt_operations {
+ unsigned int flags;
+ const char *key_prefix;
+ int (*get_context)(struct inode *, void *, size_t);
+ int (*set_context)(struct inode *, const void *, size_t, void *);
+ bool (*dummy_context)(struct inode *);
+ bool (*empty_dir)(struct inode *);
+ unsigned (*max_namelen)(struct inode *);
+};
+
+struct fscrypt_ctx {
+ union {
+ struct {
+ struct page *bounce_page; /* Ciphertext page */
+ struct page *control_page; /* Original page */
+ } w;
+ struct {
+ struct bio *bio;
+ struct work_struct work;
+ } r;
+ struct list_head free_list; /* Free list */
+ };
+ u8 flags; /* Flags */
+};
+
+static inline bool fscrypt_has_encryption_key(const struct inode *inode)
+{
+ return (inode->i_crypt_info != NULL);
+}
+
+static inline bool fscrypt_dummy_context_enabled(struct inode *inode)
+{
+ return inode->i_sb->s_cop->dummy_context &&
+ inode->i_sb->s_cop->dummy_context(inode);
+}
+
/* crypto.c */
-extern struct kmem_cache *fscrypt_info_cachep;
extern struct fscrypt_ctx *fscrypt_get_ctx(const struct inode *, gfp_t);
extern void fscrypt_release_ctx(struct fscrypt_ctx *);
extern struct page *fscrypt_encrypt_page(const struct inode *, struct page *,
@@ -20,6 +66,12 @@ extern struct page *fscrypt_encrypt_page(const struct inode *, struct page *,
u64, gfp_t);
extern int fscrypt_decrypt_page(const struct inode *, struct page *, unsigned int,
unsigned int, u64);
+
+static inline struct page *fscrypt_control_page(struct page *page)
+{
+ return ((struct fscrypt_ctx *)page_private(page))->w.control_page;
+}
+
extern void fscrypt_restore_control_page(struct page *);
extern const struct dentry_operations fscrypt_d_ops;
@@ -44,7 +96,7 @@ extern int fscrypt_inherit_context(struct inode *, struct inode *,
void *, bool);
/* keyinfo.c */
extern int fscrypt_get_encryption_info(struct inode *);
-extern void fscrypt_put_encryption_info(struct inode *, struct fscrypt_info *);
+extern void fscrypt_put_encryption_info(struct inode *);
/* fname.c */
extern int fscrypt_setup_filename(struct inode *, const struct qstr *,
@@ -55,14 +107,11 @@ static inline void fscrypt_free_filename(struct fscrypt_name *fname)
kfree(fname->crypto_buf.name);
}
-extern u32 fscrypt_fname_encrypted_size(const struct inode *, u32);
extern int fscrypt_fname_alloc_buffer(const struct inode *, u32,
struct fscrypt_str *);
extern void fscrypt_fname_free_buffer(struct fscrypt_str *);
extern int fscrypt_fname_disk_to_usr(struct inode *, u32, u32,
const struct fscrypt_str *, struct fscrypt_str *);
-extern int fscrypt_fname_usr_to_disk(struct inode *, const struct qstr *,
- struct fscrypt_str *);
#define FSCRYPT_FNAME_MAX_UNDIGESTED_SIZE 32
@@ -153,5 +202,14 @@ extern int __fscrypt_prepare_rename(struct inode *old_dir,
struct dentry *new_dentry,
unsigned int flags);
extern int __fscrypt_prepare_lookup(struct inode *dir, struct dentry *dentry);
+extern int __fscrypt_prepare_symlink(struct inode *dir, unsigned int len,
+ unsigned int max_len,
+ struct fscrypt_str *disk_link);
+extern int __fscrypt_encrypt_symlink(struct inode *inode, const char *target,
+ unsigned int len,
+ struct fscrypt_str *disk_link);
+extern const char *fscrypt_get_symlink(struct inode *inode, const void *caddr,
+ unsigned int max_size,
+ struct delayed_call *done);
#endif /* _LINUX_FSCRYPT_SUPP_H */
diff --git a/include/linux/fsl/mc.h b/include/linux/fsl/mc.h
new file mode 100644
index 000000000000..f27cb14088a4
--- /dev/null
+++ b/include/linux/fsl/mc.h
@@ -0,0 +1,562 @@
+/* SPDX-License-Identifier: GPL-2.0 */
+/*
+ * Freescale Management Complex (MC) bus public interface
+ *
+ * Copyright (C) 2014-2016 Freescale Semiconductor, Inc.
+ * Author: German Rivera <German.Rivera@freescale.com>
+ *
+ */
+#ifndef _FSL_MC_H_
+#define _FSL_MC_H_
+
+#include <linux/device.h>
+#include <linux/mod_devicetable.h>
+#include <linux/interrupt.h>
+
+#define FSL_MC_VENDOR_FREESCALE 0x1957
+
+struct irq_domain;
+struct msi_domain_info;
+
+struct fsl_mc_device;
+struct fsl_mc_io;
+
+/**
+ * struct fsl_mc_driver - MC object device driver object
+ * @driver: Generic device driver
+ * @match_id_table: table of supported device matching Ids
+ * @probe: Function called when a device is added
+ * @remove: Function called when a device is removed
+ * @shutdown: Function called at shutdown time to quiesce the device
+ * @suspend: Function called when a device is stopped
+ * @resume: Function called when a device is resumed
+ *
+ * Generic DPAA device driver object for device drivers that are registered
+ * with a DPRC bus. This structure is to be embedded in each device-specific
+ * driver structure.
+ */
+struct fsl_mc_driver {
+ struct device_driver driver;
+ const struct fsl_mc_device_id *match_id_table;
+ int (*probe)(struct fsl_mc_device *dev);
+ int (*remove)(struct fsl_mc_device *dev);
+ void (*shutdown)(struct fsl_mc_device *dev);
+ int (*suspend)(struct fsl_mc_device *dev, pm_message_t state);
+ int (*resume)(struct fsl_mc_device *dev);
+};
+
+#define to_fsl_mc_driver(_drv) \
+ container_of(_drv, struct fsl_mc_driver, driver)
+
+/**
+ * enum fsl_mc_pool_type - Types of allocatable MC bus resources
+ *
+ * Entries in these enum are used as indices in the array of resource
+ * pools of an fsl_mc_bus object.
+ */
+enum fsl_mc_pool_type {
+ FSL_MC_POOL_DPMCP = 0x0, /* corresponds to "dpmcp" in the MC */
+ FSL_MC_POOL_DPBP, /* corresponds to "dpbp" in the MC */
+ FSL_MC_POOL_DPCON, /* corresponds to "dpcon" in the MC */
+ FSL_MC_POOL_IRQ,
+
+ /*
+ * NOTE: New resource pool types must be added before this entry
+ */
+ FSL_MC_NUM_POOL_TYPES
+};
+
+/**
+ * struct fsl_mc_resource - MC generic resource
+ * @type: type of resource
+ * @id: unique MC resource Id within the resources of the same type
+ * @data: pointer to resource-specific data if the resource is currently
+ * allocated, or NULL if the resource is not currently allocated.
+ * @parent_pool: pointer to the parent resource pool from which this
+ * resource is allocated from.
+ * @node: Node in the free list of the corresponding resource pool
+ *
+ * NOTE: This structure is to be embedded as a field of specific
+ * MC resource structures.
+ */
+struct fsl_mc_resource {
+ enum fsl_mc_pool_type type;
+ s32 id;
+ void *data;
+ struct fsl_mc_resource_pool *parent_pool;
+ struct list_head node;
+};
+
+/**
+ * struct fsl_mc_device_irq - MC object device message-based interrupt
+ * @msi_desc: pointer to MSI descriptor allocated by fsl_mc_msi_alloc_descs()
+ * @mc_dev: MC object device that owns this interrupt
+ * @dev_irq_index: device-relative IRQ index
+ * @resource: MC generic resource associated with the interrupt
+ */
+struct fsl_mc_device_irq {
+ struct msi_desc *msi_desc;
+ struct fsl_mc_device *mc_dev;
+ u8 dev_irq_index;
+ struct fsl_mc_resource resource;
+};
+
+#define to_fsl_mc_irq(_mc_resource) \
+ container_of(_mc_resource, struct fsl_mc_device_irq, resource)
+
+/* Opened state - Indicates that an object is open by at least one owner */
+#define FSL_MC_OBJ_STATE_OPEN 0x00000001
+/* Plugged state - Indicates that the object is plugged */
+#define FSL_MC_OBJ_STATE_PLUGGED 0x00000002
+
+/**
+ * Shareability flag - Object flag indicating no memory shareability.
+ * the object generates memory accesses that are non coherent with other
+ * masters;
+ * user is responsible for proper memory handling through IOMMU configuration.
+ */
+#define FSL_MC_OBJ_FLAG_NO_MEM_SHAREABILITY 0x0001
+
+/**
+ * struct fsl_mc_obj_desc - Object descriptor
+ * @type: Type of object: NULL terminated string
+ * @id: ID of logical object resource
+ * @vendor: Object vendor identifier
+ * @ver_major: Major version number
+ * @ver_minor: Minor version number
+ * @irq_count: Number of interrupts supported by the object
+ * @region_count: Number of mappable regions supported by the object
+ * @state: Object state: combination of FSL_MC_OBJ_STATE_ states
+ * @label: Object label: NULL terminated string
+ * @flags: Object's flags
+ */
+struct fsl_mc_obj_desc {
+ char type[16];
+ int id;
+ u16 vendor;
+ u16 ver_major;
+ u16 ver_minor;
+ u8 irq_count;
+ u8 region_count;
+ u32 state;
+ char label[16];
+ u16 flags;
+};
+
+/**
+ * Bit masks for a MC object device (struct fsl_mc_device) flags
+ */
+#define FSL_MC_IS_DPRC 0x0001
+
+/**
+ * struct fsl_mc_device - MC object device object
+ * @dev: Linux driver model device object
+ * @dma_mask: Default DMA mask
+ * @flags: MC object device flags
+ * @icid: Isolation context ID for the device
+ * @mc_handle: MC handle for the corresponding MC object opened
+ * @mc_io: Pointer to MC IO object assigned to this device or
+ * NULL if none.
+ * @obj_desc: MC description of the DPAA device
+ * @regions: pointer to array of MMIO region entries
+ * @irqs: pointer to array of pointers to interrupts allocated to this device
+ * @resource: generic resource associated with this MC object device, if any.
+ *
+ * Generic device object for MC object devices that are "attached" to a
+ * MC bus.
+ *
+ * NOTES:
+ * - For a non-DPRC object its icid is the same as its parent DPRC's icid.
+ * - The SMMU notifier callback gets invoked after device_add() has been
+ * called for an MC object device, but before the device-specific probe
+ * callback gets called.
+ * - DP_OBJ_DPRC objects are the only MC objects that have built-in MC
+ * portals. For all other MC objects, their device drivers are responsible for
+ * allocating MC portals for them by calling fsl_mc_portal_allocate().
+ * - Some types of MC objects (e.g., DP_OBJ_DPBP, DP_OBJ_DPCON) are
+ * treated as resources that can be allocated/deallocated from the
+ * corresponding resource pool in the object's parent DPRC, using the
+ * fsl_mc_object_allocate()/fsl_mc_object_free() functions. These MC objects
+ * are known as "allocatable" objects. For them, the corresponding
+ * fsl_mc_device's 'resource' points to the associated resource object.
+ * For MC objects that are not allocatable (e.g., DP_OBJ_DPRC, DP_OBJ_DPNI),
+ * 'resource' is NULL.
+ */
+struct fsl_mc_device {
+ struct device dev;
+ u64 dma_mask;
+ u16 flags;
+ u16 icid;
+ u16 mc_handle;
+ struct fsl_mc_io *mc_io;
+ struct fsl_mc_obj_desc obj_desc;
+ struct resource *regions;
+ struct fsl_mc_device_irq **irqs;
+ struct fsl_mc_resource *resource;
+};
+
+#define to_fsl_mc_device(_dev) \
+ container_of(_dev, struct fsl_mc_device, dev)
+
+#define MC_CMD_NUM_OF_PARAMS 7
+
+struct mc_cmd_header {
+ u8 src_id;
+ u8 flags_hw;
+ u8 status;
+ u8 flags_sw;
+ __le16 token;
+ __le16 cmd_id;
+};
+
+struct fsl_mc_command {
+ u64 header;
+ u64 params[MC_CMD_NUM_OF_PARAMS];
+};
+
+enum mc_cmd_status {
+ MC_CMD_STATUS_OK = 0x0, /* Completed successfully */
+ MC_CMD_STATUS_READY = 0x1, /* Ready to be processed */
+ MC_CMD_STATUS_AUTH_ERR = 0x3, /* Authentication error */
+ MC_CMD_STATUS_NO_PRIVILEGE = 0x4, /* No privilege */
+ MC_CMD_STATUS_DMA_ERR = 0x5, /* DMA or I/O error */
+ MC_CMD_STATUS_CONFIG_ERR = 0x6, /* Configuration error */
+ MC_CMD_STATUS_TIMEOUT = 0x7, /* Operation timed out */
+ MC_CMD_STATUS_NO_RESOURCE = 0x8, /* No resources */
+ MC_CMD_STATUS_NO_MEMORY = 0x9, /* No memory available */
+ MC_CMD_STATUS_BUSY = 0xA, /* Device is busy */
+ MC_CMD_STATUS_UNSUPPORTED_OP = 0xB, /* Unsupported operation */
+ MC_CMD_STATUS_INVALID_STATE = 0xC /* Invalid state */
+};
+
+/*
+ * MC command flags
+ */
+
+/* High priority flag */
+#define MC_CMD_FLAG_PRI 0x80
+/* Command completion flag */
+#define MC_CMD_FLAG_INTR_DIS 0x01
+
+static inline u64 mc_encode_cmd_header(u16 cmd_id,
+ u32 cmd_flags,
+ u16 token)
+{
+ u64 header = 0;
+ struct mc_cmd_header *hdr = (struct mc_cmd_header *)&header;
+
+ hdr->cmd_id = cpu_to_le16(cmd_id);
+ hdr->token = cpu_to_le16(token);
+ hdr->status = MC_CMD_STATUS_READY;
+ if (cmd_flags & MC_CMD_FLAG_PRI)
+ hdr->flags_hw = MC_CMD_FLAG_PRI;
+ if (cmd_flags & MC_CMD_FLAG_INTR_DIS)
+ hdr->flags_sw = MC_CMD_FLAG_INTR_DIS;
+
+ return header;
+}
+
+static inline u16 mc_cmd_hdr_read_token(struct fsl_mc_command *cmd)
+{
+ struct mc_cmd_header *hdr = (struct mc_cmd_header *)&cmd->header;
+ u16 token = le16_to_cpu(hdr->token);
+
+ return token;
+}
+
+struct mc_rsp_create {
+ __le32 object_id;
+};
+
+struct mc_rsp_api_ver {
+ __le16 major_ver;
+ __le16 minor_ver;
+};
+
+static inline u32 mc_cmd_read_object_id(struct fsl_mc_command *cmd)
+{
+ struct mc_rsp_create *rsp_params;
+
+ rsp_params = (struct mc_rsp_create *)cmd->params;
+ return le32_to_cpu(rsp_params->object_id);
+}
+
+static inline void mc_cmd_read_api_version(struct fsl_mc_command *cmd,
+ u16 *major_ver,
+ u16 *minor_ver)
+{
+ struct mc_rsp_api_ver *rsp_params;
+
+ rsp_params = (struct mc_rsp_api_ver *)cmd->params;
+ *major_ver = le16_to_cpu(rsp_params->major_ver);
+ *minor_ver = le16_to_cpu(rsp_params->minor_ver);
+}
+
+/**
+ * Bit masks for a MC I/O object (struct fsl_mc_io) flags
+ */
+#define FSL_MC_IO_ATOMIC_CONTEXT_PORTAL 0x0001
+
+/**
+ * struct fsl_mc_io - MC I/O object to be passed-in to mc_send_command()
+ * @dev: device associated with this Mc I/O object
+ * @flags: flags for mc_send_command()
+ * @portal_size: MC command portal size in bytes
+ * @portal_phys_addr: MC command portal physical address
+ * @portal_virt_addr: MC command portal virtual address
+ * @dpmcp_dev: pointer to the DPMCP device associated with the MC portal.
+ *
+ * Fields are only meaningful if the FSL_MC_IO_ATOMIC_CONTEXT_PORTAL flag is not
+ * set:
+ * @mutex: Mutex to serialize mc_send_command() calls that use the same MC
+ * portal, if the fsl_mc_io object was created with the
+ * FSL_MC_IO_ATOMIC_CONTEXT_PORTAL flag off. mc_send_command() calls for this
+ * fsl_mc_io object must be made only from non-atomic context.
+ *
+ * Fields are only meaningful if the FSL_MC_IO_ATOMIC_CONTEXT_PORTAL flag is
+ * set:
+ * @spinlock: Spinlock to serialize mc_send_command() calls that use the same MC
+ * portal, if the fsl_mc_io object was created with the
+ * FSL_MC_IO_ATOMIC_CONTEXT_PORTAL flag on. mc_send_command() calls for this
+ * fsl_mc_io object can be made from atomic or non-atomic context.
+ */
+struct fsl_mc_io {
+ struct device *dev;
+ u16 flags;
+ u32 portal_size;
+ phys_addr_t portal_phys_addr;
+ void __iomem *portal_virt_addr;
+ struct fsl_mc_device *dpmcp_dev;
+ union {
+ /*
+ * This field is only meaningful if the
+ * FSL_MC_IO_ATOMIC_CONTEXT_PORTAL flag is not set
+ */
+ struct mutex mutex; /* serializes mc_send_command() */
+
+ /*
+ * This field is only meaningful if the
+ * FSL_MC_IO_ATOMIC_CONTEXT_PORTAL flag is set
+ */
+ spinlock_t spinlock; /* serializes mc_send_command() */
+ };
+};
+
+int mc_send_command(struct fsl_mc_io *mc_io, struct fsl_mc_command *cmd);
+
+#ifdef CONFIG_FSL_MC_BUS
+#define dev_is_fsl_mc(_dev) ((_dev)->bus == &fsl_mc_bus_type)
+#else
+/* If fsl-mc bus is not present device cannot belong to fsl-mc bus */
+#define dev_is_fsl_mc(_dev) (0)
+#endif
+
+/*
+ * module_fsl_mc_driver() - Helper macro for drivers that don't do
+ * anything special in module init/exit. This eliminates a lot of
+ * boilerplate. Each module may only use this macro once, and
+ * calling it replaces module_init() and module_exit()
+ */
+#define module_fsl_mc_driver(__fsl_mc_driver) \
+ module_driver(__fsl_mc_driver, fsl_mc_driver_register, \
+ fsl_mc_driver_unregister)
+
+/*
+ * Macro to avoid include chaining to get THIS_MODULE
+ */
+#define fsl_mc_driver_register(drv) \
+ __fsl_mc_driver_register(drv, THIS_MODULE)
+
+int __must_check __fsl_mc_driver_register(struct fsl_mc_driver *fsl_mc_driver,
+ struct module *owner);
+
+void fsl_mc_driver_unregister(struct fsl_mc_driver *driver);
+
+int __must_check fsl_mc_portal_allocate(struct fsl_mc_device *mc_dev,
+ u16 mc_io_flags,
+ struct fsl_mc_io **new_mc_io);
+
+void fsl_mc_portal_free(struct fsl_mc_io *mc_io);
+
+int fsl_mc_portal_reset(struct fsl_mc_io *mc_io);
+
+int __must_check fsl_mc_object_allocate(struct fsl_mc_device *mc_dev,
+ enum fsl_mc_pool_type pool_type,
+ struct fsl_mc_device **new_mc_adev);
+
+void fsl_mc_object_free(struct fsl_mc_device *mc_adev);
+
+struct irq_domain *fsl_mc_msi_create_irq_domain(struct fwnode_handle *fwnode,
+ struct msi_domain_info *info,
+ struct irq_domain *parent);
+
+int __must_check fsl_mc_allocate_irqs(struct fsl_mc_device *mc_dev);
+
+void fsl_mc_free_irqs(struct fsl_mc_device *mc_dev);
+
+extern struct bus_type fsl_mc_bus_type;
+
+extern struct device_type fsl_mc_bus_dprc_type;
+extern struct device_type fsl_mc_bus_dpni_type;
+extern struct device_type fsl_mc_bus_dpio_type;
+extern struct device_type fsl_mc_bus_dpsw_type;
+extern struct device_type fsl_mc_bus_dpbp_type;
+extern struct device_type fsl_mc_bus_dpcon_type;
+extern struct device_type fsl_mc_bus_dpmcp_type;
+extern struct device_type fsl_mc_bus_dpmac_type;
+extern struct device_type fsl_mc_bus_dprtc_type;
+
+static inline bool is_fsl_mc_bus_dprc(const struct fsl_mc_device *mc_dev)
+{
+ return mc_dev->dev.type == &fsl_mc_bus_dprc_type;
+}
+
+static inline bool is_fsl_mc_bus_dpni(const struct fsl_mc_device *mc_dev)
+{
+ return mc_dev->dev.type == &fsl_mc_bus_dpni_type;
+}
+
+static inline bool is_fsl_mc_bus_dpio(const struct fsl_mc_device *mc_dev)
+{
+ return mc_dev->dev.type == &fsl_mc_bus_dpio_type;
+}
+
+static inline bool is_fsl_mc_bus_dpsw(const struct fsl_mc_device *mc_dev)
+{
+ return mc_dev->dev.type == &fsl_mc_bus_dpsw_type;
+}
+
+static inline bool is_fsl_mc_bus_dpbp(const struct fsl_mc_device *mc_dev)
+{
+ return mc_dev->dev.type == &fsl_mc_bus_dpbp_type;
+}
+
+static inline bool is_fsl_mc_bus_dpcon(const struct fsl_mc_device *mc_dev)
+{
+ return mc_dev->dev.type == &fsl_mc_bus_dpcon_type;
+}
+
+static inline bool is_fsl_mc_bus_dpmcp(const struct fsl_mc_device *mc_dev)
+{
+ return mc_dev->dev.type == &fsl_mc_bus_dpmcp_type;
+}
+
+static inline bool is_fsl_mc_bus_dpmac(const struct fsl_mc_device *mc_dev)
+{
+ return mc_dev->dev.type == &fsl_mc_bus_dpmac_type;
+}
+
+static inline bool is_fsl_mc_bus_dprtc(const struct fsl_mc_device *mc_dev)
+{
+ return mc_dev->dev.type == &fsl_mc_bus_dprtc_type;
+}
+
+/*
+ * Data Path Buffer Pool (DPBP) API
+ * Contains initialization APIs and runtime control APIs for DPBP
+ */
+
+int dpbp_open(struct fsl_mc_io *mc_io,
+ u32 cmd_flags,
+ int dpbp_id,
+ u16 *token);
+
+int dpbp_close(struct fsl_mc_io *mc_io,
+ u32 cmd_flags,
+ u16 token);
+
+int dpbp_enable(struct fsl_mc_io *mc_io,
+ u32 cmd_flags,
+ u16 token);
+
+int dpbp_disable(struct fsl_mc_io *mc_io,
+ u32 cmd_flags,
+ u16 token);
+
+int dpbp_reset(struct fsl_mc_io *mc_io,
+ u32 cmd_flags,
+ u16 token);
+
+/**
+ * struct dpbp_attr - Structure representing DPBP attributes
+ * @id: DPBP object ID
+ * @bpid: Hardware buffer pool ID; should be used as an argument in
+ * acquire/release operations on buffers
+ */
+struct dpbp_attr {
+ int id;
+ u16 bpid;
+};
+
+int dpbp_get_attributes(struct fsl_mc_io *mc_io,
+ u32 cmd_flags,
+ u16 token,
+ struct dpbp_attr *attr);
+
+/* Data Path Concentrator (DPCON) API
+ * Contains initialization APIs and runtime control APIs for DPCON
+ */
+
+/**
+ * Use it to disable notifications; see dpcon_set_notification()
+ */
+#define DPCON_INVALID_DPIO_ID (int)(-1)
+
+int dpcon_open(struct fsl_mc_io *mc_io,
+ u32 cmd_flags,
+ int dpcon_id,
+ u16 *token);
+
+int dpcon_close(struct fsl_mc_io *mc_io,
+ u32 cmd_flags,
+ u16 token);
+
+int dpcon_enable(struct fsl_mc_io *mc_io,
+ u32 cmd_flags,
+ u16 token);
+
+int dpcon_disable(struct fsl_mc_io *mc_io,
+ u32 cmd_flags,
+ u16 token);
+
+int dpcon_reset(struct fsl_mc_io *mc_io,
+ u32 cmd_flags,
+ u16 token);
+
+/**
+ * struct dpcon_attr - Structure representing DPCON attributes
+ * @id: DPCON object ID
+ * @qbman_ch_id: Channel ID to be used by dequeue operation
+ * @num_priorities: Number of priorities for the DPCON channel (1-8)
+ */
+struct dpcon_attr {
+ int id;
+ u16 qbman_ch_id;
+ u8 num_priorities;
+};
+
+int dpcon_get_attributes(struct fsl_mc_io *mc_io,
+ u32 cmd_flags,
+ u16 token,
+ struct dpcon_attr *attr);
+
+/**
+ * struct dpcon_notification_cfg - Structure representing notification params
+ * @dpio_id: DPIO object ID; must be configured with a notification channel;
+ * to disable notifications set it to 'DPCON_INVALID_DPIO_ID';
+ * @priority: Priority selection within the DPIO channel; valid values
+ * are 0-7, depending on the number of priorities in that channel
+ * @user_ctx: User context value provided with each CDAN message
+ */
+struct dpcon_notification_cfg {
+ int dpio_id;
+ u8 priority;
+ u64 user_ctx;
+};
+
+int dpcon_set_notification(struct fsl_mc_io *mc_io,
+ u32 cmd_flags,
+ u16 token,
+ struct dpcon_notification_cfg *cfg);
+
+#endif /* _FSL_MC_H_ */
diff --git a/include/linux/fsl_ifc.h b/include/linux/fsl_ifc.h
index c332f0a45607..3fdfede2f0f3 100644
--- a/include/linux/fsl_ifc.h
+++ b/include/linux/fsl_ifc.h
@@ -734,11 +734,7 @@ struct fsl_ifc_nand {
u32 res19[0x10];
__be32 nand_fsr;
u32 res20;
- /* The V1 nand_eccstat is actually 4 words that overlaps the
- * V2 nand_eccstat.
- */
- __be32 v1_nand_eccstat[2];
- __be32 v2_nand_eccstat[6];
+ __be32 nand_eccstat[8];
u32 res21[0x1c];
__be32 nanndcr;
u32 res22[0x2];
diff --git a/include/linux/fsnotify_backend.h b/include/linux/fsnotify_backend.h
index 067d52e95f02..9f1edb92c97e 100644
--- a/include/linux/fsnotify_backend.h
+++ b/include/linux/fsnotify_backend.h
@@ -331,6 +331,12 @@ extern int fsnotify_add_event(struct fsnotify_group *group,
struct fsnotify_event *event,
int (*merge)(struct list_head *,
struct fsnotify_event *));
+/* Queue overflow event to a notification group */
+static inline void fsnotify_queue_overflow(struct fsnotify_group *group)
+{
+ fsnotify_add_event(group, group->overflow_event, NULL);
+}
+
/* true if the group notification queue is empty */
extern bool fsnotify_notify_queue_is_empty(struct fsnotify_group *group);
/* return, but do not dequeue the first event on the notification queue */
diff --git a/include/linux/ftrace.h b/include/linux/ftrace.h
index 3319df9727aa..9c3c9a319e48 100644
--- a/include/linux/ftrace.h
+++ b/include/linux/ftrace.h
@@ -766,9 +766,6 @@ typedef int (*trace_func_graph_ent_t)(struct ftrace_graph_ent *); /* entry */
#ifdef CONFIG_FUNCTION_GRAPH_TRACER
-/* for init task */
-#define INIT_FTRACE_GRAPH .ret_stack = NULL,
-
/*
* Stack of return addresses for functions
* of a thread.
@@ -846,7 +843,6 @@ static inline void unpause_graph_tracing(void)
#else /* !CONFIG_FUNCTION_GRAPH_TRACER */
#define __notrace_funcgraph
-#define INIT_FTRACE_GRAPH
static inline void ftrace_graph_init_task(struct task_struct *t) { }
static inline void ftrace_graph_exit_task(struct task_struct *t) { }
@@ -925,10 +921,6 @@ extern int tracepoint_printk;
extern void disable_trace_on_warning(void);
extern int __disable_trace_on_warning;
-#ifdef CONFIG_PREEMPT
-#define INIT_TRACE_RECURSION .trace_recursion = 0,
-#endif
-
int tracepoint_printk_sysctl(struct ctl_table *table, int write,
void __user *buffer, size_t *lenp,
loff_t *ppos);
@@ -937,10 +929,6 @@ int tracepoint_printk_sysctl(struct ctl_table *table, int write,
static inline void disable_trace_on_warning(void) { }
#endif /* CONFIG_TRACING */
-#ifndef INIT_TRACE_RECURSION
-#define INIT_TRACE_RECURSION
-#endif
-
#ifdef CONFIG_FTRACE_SYSCALLS
unsigned long arch_syscall_addr(int nr);
diff --git a/include/linux/futex.h b/include/linux/futex.h
index c0fb9a24bbd2..821ae502d3d8 100644
--- a/include/linux/futex.h
+++ b/include/linux/futex.h
@@ -9,9 +9,6 @@ struct inode;
struct mm_struct;
struct task_struct;
-long do_futex(u32 __user *uaddr, int op, u32 val, ktime_t *timeout,
- u32 __user *uaddr2, u32 val2, u32 val3);
-
extern int
handle_futex_death(u32 __user *uaddr, struct task_struct *curr, int pi);
@@ -55,6 +52,9 @@ union futex_key {
#ifdef CONFIG_FUTEX
extern void exit_robust_list(struct task_struct *curr);
+
+long do_futex(u32 __user *uaddr, int op, u32 val, ktime_t *timeout,
+ u32 __user *uaddr2, u32 val2, u32 val3);
#ifdef CONFIG_HAVE_FUTEX_CMPXCHG
#define futex_cmpxchg_enabled 1
#else
@@ -64,6 +64,13 @@ extern int futex_cmpxchg_enabled;
static inline void exit_robust_list(struct task_struct *curr)
{
}
+
+static inline long do_futex(u32 __user *uaddr, int op, u32 val,
+ ktime_t *timeout, u32 __user *uaddr2,
+ u32 val2, u32 val3)
+{
+ return -EINVAL;
+}
#endif
#ifdef CONFIG_FUTEX_PI
diff --git a/include/linux/fwnode.h b/include/linux/fwnode.h
index 411a84c6c400..4fe8f289b3f6 100644
--- a/include/linux/fwnode.h
+++ b/include/linux/fwnode.h
@@ -15,6 +15,7 @@
#include <linux/types.h>
struct fwnode_operations;
+struct device;
struct fwnode_handle {
struct fwnode_handle *secondary;
@@ -51,6 +52,7 @@ struct fwnode_reference_args {
* struct fwnode_operations - Operations for fwnode interface
* @get: Get a reference to an fwnode.
* @put: Put a reference to an fwnode.
+ * @device_get_match_data: Return the device driver match data.
* @property_present: Return true if a property is present.
* @property_read_integer_array: Read an array of integer properties. Return
* zero on success, a negative error code
@@ -71,6 +73,8 @@ struct fwnode_operations {
struct fwnode_handle *(*get)(struct fwnode_handle *fwnode);
void (*put)(struct fwnode_handle *fwnode);
bool (*device_is_available)(const struct fwnode_handle *fwnode);
+ const void *(*device_get_match_data)(const struct fwnode_handle *fwnode,
+ const struct device *dev);
bool (*property_present)(const struct fwnode_handle *fwnode,
const char *propname);
int (*property_read_int_array)(const struct fwnode_handle *fwnode,
diff --git a/include/linux/genetlink.h b/include/linux/genetlink.h
index ecc2928e8046..bc738504ab4a 100644
--- a/include/linux/genetlink.h
+++ b/include/linux/genetlink.h
@@ -31,8 +31,7 @@ extern wait_queue_head_t genl_sk_destructing_waitq;
* @p: The pointer to read, prior to dereferencing
*
* Return the value of the specified RCU-protected pointer, but omit
- * both the smp_read_barrier_depends() and the READ_ONCE(), because
- * caller holds genl mutex.
+ * the READ_ONCE(), because caller holds genl mutex.
*/
#define genl_dereference(p) \
rcu_dereference_protected(p, lockdep_genl_is_held())
diff --git a/include/linux/genhd.h b/include/linux/genhd.h
index 5144ebe046c9..c826b0b5232a 100644
--- a/include/linux/genhd.h
+++ b/include/linux/genhd.h
@@ -198,6 +198,7 @@ struct gendisk {
void *private_data;
int flags;
+ struct rw_semaphore lookup_sem;
struct kobject *slave_dir;
struct timer_rand_state *random;
@@ -395,6 +396,11 @@ static inline void add_disk(struct gendisk *disk)
{
device_add_disk(NULL, disk);
}
+extern void device_add_disk_no_queue_reg(struct device *parent, struct gendisk *disk);
+static inline void add_disk_no_queue_reg(struct gendisk *disk)
+{
+ device_add_disk_no_queue_reg(NULL, disk);
+}
extern void del_gendisk(struct gendisk *gp);
extern struct gendisk *get_gendisk(dev_t dev, int *partno);
@@ -595,8 +601,9 @@ extern void delete_partition(struct gendisk *, int);
extern void printk_all_partitions(void);
extern struct gendisk *__alloc_disk_node(int minors, int node_id);
-extern struct kobject *get_disk(struct gendisk *disk);
+extern struct kobject *get_disk_and_module(struct gendisk *disk);
extern void put_disk(struct gendisk *disk);
+extern void put_disk_and_module(struct gendisk *disk);
extern void blk_register_region(dev_t devt, unsigned long range,
struct module *module,
struct kobject *(*probe)(dev_t, int *, void *),
diff --git a/include/linux/genl_magic_func.h b/include/linux/genl_magic_func.h
index 604967609e55..83f81ac53282 100644
--- a/include/linux/genl_magic_func.h
+++ b/include/linux/genl_magic_func.h
@@ -2,6 +2,7 @@
#ifndef GENL_MAGIC_FUNC_H
#define GENL_MAGIC_FUNC_H
+#include <linux/build_bug.h>
#include <linux/genl_magic_struct.h>
/*
@@ -132,17 +133,6 @@ static void dprint_array(const char *dir, int nla_type,
* use one static buffer for parsing of nested attributes */
static struct nlattr *nested_attr_tb[128];
-#ifndef BUILD_BUG_ON
-/* Force a compilation error if condition is true */
-#define BUILD_BUG_ON(condition) ((void)BUILD_BUG_ON_ZERO(condition))
-/* Force a compilation error if condition is true, but also produce a
- result (of value 0 and type size_t), so the expression can be used
- e.g. in a structure initializer (or where-ever else comma expressions
- aren't permitted). */
-#define BUILD_BUG_ON_ZERO(e) (sizeof(struct { int:-!!(e); }))
-#define BUILD_BUG_ON_NULL(e) ((void *)sizeof(struct { int:-!!(e); }))
-#endif
-
#undef GENL_struct
#define GENL_struct(tag_name, tag_number, s_name, s_fields) \
/* *_from_attrs functions are static, but potentially unused */ \
diff --git a/include/linux/gpio.h b/include/linux/gpio.h
index 8ef7fc0ce0f0..91ed23468530 100644
--- a/include/linux/gpio.h
+++ b/include/linux/gpio.h
@@ -1,4 +1,14 @@
/* SPDX-License-Identifier: GPL-2.0 */
+/*
+ * <linux/gpio.h>
+ *
+ * This is the LEGACY GPIO bulk include file, including legacy APIs. It is
+ * used for GPIO drivers still referencing the global GPIO numberspace,
+ * and should not be included in new code.
+ *
+ * If you're implementing a GPIO driver, only include <linux/gpio/driver.h>
+ * If you're implementing a GPIO consumer, only include <linux/gpio/consumer.h>
+ */
#ifndef __LINUX_GPIO_H
#define __LINUX_GPIO_H
diff --git a/include/linux/gpio/consumer.h b/include/linux/gpio/consumer.h
index 7447d85dbe2f..dbd065963296 100644
--- a/include/linux/gpio/consumer.h
+++ b/include/linux/gpio/consumer.h
@@ -139,6 +139,7 @@ void gpiod_set_raw_array_value_cansleep(unsigned int array_size,
int *value_array);
int gpiod_set_debounce(struct gpio_desc *desc, unsigned debounce);
+int gpiod_set_transitory(struct gpio_desc *desc, bool transitory);
int gpiod_is_active_low(const struct gpio_desc *desc);
int gpiod_cansleep(const struct gpio_desc *desc);
@@ -150,8 +151,14 @@ struct gpio_desc *gpio_to_desc(unsigned gpio);
int desc_to_gpio(const struct gpio_desc *desc);
/* Child properties interface */
+struct device_node;
struct fwnode_handle;
+struct gpio_desc *devm_gpiod_get_from_of_node(struct device *dev,
+ struct device_node *node,
+ const char *propname, int index,
+ enum gpiod_flags dflags,
+ const char *label);
struct gpio_desc *fwnode_get_named_gpiod(struct fwnode_handle *fwnode,
const char *propname, int index,
enum gpiod_flags dflags,
@@ -431,6 +438,13 @@ static inline int gpiod_set_debounce(struct gpio_desc *desc, unsigned debounce)
return -ENOSYS;
}
+static inline int gpiod_set_transitory(struct gpio_desc *desc, bool transitory)
+{
+ /* GPIO can never have been requested */
+ WARN_ON(1);
+ return -ENOSYS;
+}
+
static inline int gpiod_is_active_low(const struct gpio_desc *desc)
{
/* GPIO can never have been requested */
@@ -464,9 +478,20 @@ static inline int desc_to_gpio(const struct gpio_desc *desc)
}
/* Child properties interface */
+struct device_node;
struct fwnode_handle;
static inline
+struct gpio_desc *devm_gpiod_get_from_of_node(struct device *dev,
+ struct device_node *node,
+ const char *propname, int index,
+ enum gpiod_flags dflags,
+ const char *label)
+{
+ return ERR_PTR(-ENOSYS);
+}
+
+static inline
struct gpio_desc *fwnode_get_named_gpiod(struct fwnode_handle *fwnode,
const char *propname, int index,
enum gpiod_flags dflags,
diff --git a/include/linux/gpio/driver.h b/include/linux/gpio/driver.h
index 7258cd676df4..5382b5183b7e 100644
--- a/include/linux/gpio/driver.h
+++ b/include/linux/gpio/driver.h
@@ -288,6 +288,21 @@ struct gpio_chip {
struct gpio_irq_chip irq;
#endif
+ /**
+ * @need_valid_mask:
+ *
+ * If set core allocates @valid_mask with all bits set to one.
+ */
+ bool need_valid_mask;
+
+ /**
+ * @valid_mask:
+ *
+ * If not %NULL holds bitmask of GPIOs which are valid to be used
+ * from the chip.
+ */
+ unsigned long *valid_mask;
+
#if defined(CONFIG_OF_GPIO)
/*
* If CONFIG_OF is enabled, then all GPIO controllers described in the
@@ -384,6 +399,7 @@ bool gpiochip_line_is_open_source(struct gpio_chip *chip, unsigned int offset);
/* Sleep persistence inquiry for drivers */
bool gpiochip_line_is_persistent(struct gpio_chip *chip, unsigned int offset);
+bool gpiochip_line_is_valid(const struct gpio_chip *chip, unsigned int offset);
/* get driver data */
void *gpiochip_get_data(struct gpio_chip *chip);
@@ -436,6 +452,9 @@ int gpiochip_irqchip_add_key(struct gpio_chip *gpiochip,
struct lock_class_key *lock_key,
struct lock_class_key *request_key);
+bool gpiochip_irqchip_irq_valid(const struct gpio_chip *gpiochip,
+ unsigned int offset);
+
#ifdef CONFIG_LOCKDEP
/*
diff --git a/include/linux/gpio/machine.h b/include/linux/gpio/machine.h
index 846be7c69a52..b2f2dc638463 100644
--- a/include/linux/gpio/machine.h
+++ b/include/linux/gpio/machine.h
@@ -10,8 +10,8 @@ enum gpio_lookup_flags {
GPIO_ACTIVE_LOW = (1 << 0),
GPIO_OPEN_DRAIN = (1 << 1),
GPIO_OPEN_SOURCE = (1 << 2),
- GPIO_SLEEP_MAINTAIN_VALUE = (0 << 3),
- GPIO_SLEEP_MAY_LOSE_VALUE = (1 << 3),
+ GPIO_PERSISTENT = (0 << 3),
+ GPIO_TRANSITORY = (1 << 3),
};
/**
diff --git a/include/linux/gpio_keys.h b/include/linux/gpio_keys.h
index d06bf77400f1..7160df54a6fe 100644
--- a/include/linux/gpio_keys.h
+++ b/include/linux/gpio_keys.h
@@ -13,6 +13,7 @@ struct device;
* @desc: label that will be attached to button's gpio
* @type: input event type (%EV_KEY, %EV_SW, %EV_ABS)
* @wakeup: configure the button as a wake-up source
+ * @wakeup_event_action: event action to trigger wakeup
* @debounce_interval: debounce ticks interval in msecs
* @can_disable: %true indicates that userspace is allowed to
* disable button via sysfs
@@ -26,6 +27,7 @@ struct gpio_keys_button {
const char *desc;
unsigned int type;
int wakeup;
+ int wakeup_event_action;
int debounce_interval;
bool can_disable;
int value;
diff --git a/include/linux/hid.h b/include/linux/hid.h
index d491027a7c22..8da3e1f48195 100644
--- a/include/linux/hid.h
+++ b/include/linux/hid.h
@@ -26,6 +26,7 @@
#define __HID_H
+#include <linux/bitops.h>
#include <linux/types.h>
#include <linux/slab.h>
#include <linux/list.h>
@@ -281,6 +282,7 @@ struct hid_item {
#define HID_DG_DEVICECONFIG 0x000d000e
#define HID_DG_DEVICESETTINGS 0x000d0023
+#define HID_DG_AZIMUTH 0x000d003f
#define HID_DG_CONFIDENCE 0x000d0047
#define HID_DG_WIDTH 0x000d0048
#define HID_DG_HEIGHT 0x000d0049
@@ -309,13 +311,13 @@ struct hid_item {
* HID connect requests
*/
-#define HID_CONNECT_HIDINPUT 0x01
-#define HID_CONNECT_HIDINPUT_FORCE 0x02
-#define HID_CONNECT_HIDRAW 0x04
-#define HID_CONNECT_HIDDEV 0x08
-#define HID_CONNECT_HIDDEV_FORCE 0x10
-#define HID_CONNECT_FF 0x20
-#define HID_CONNECT_DRIVER 0x40
+#define HID_CONNECT_HIDINPUT BIT(0)
+#define HID_CONNECT_HIDINPUT_FORCE BIT(1)
+#define HID_CONNECT_HIDRAW BIT(2)
+#define HID_CONNECT_HIDDEV BIT(3)
+#define HID_CONNECT_HIDDEV_FORCE BIT(4)
+#define HID_CONNECT_FF BIT(5)
+#define HID_CONNECT_DRIVER BIT(6)
#define HID_CONNECT_DEFAULT (HID_CONNECT_HIDINPUT|HID_CONNECT_HIDRAW| \
HID_CONNECT_HIDDEV|HID_CONNECT_FF)
@@ -328,24 +330,25 @@ struct hid_item {
*/
#define MAX_USBHID_BOOT_QUIRKS 4
-#define HID_QUIRK_INVERT 0x00000001
-#define HID_QUIRK_NOTOUCH 0x00000002
-#define HID_QUIRK_IGNORE 0x00000004
-#define HID_QUIRK_NOGET 0x00000008
-#define HID_QUIRK_HIDDEV_FORCE 0x00000010
-#define HID_QUIRK_BADPAD 0x00000020
-#define HID_QUIRK_MULTI_INPUT 0x00000040
-#define HID_QUIRK_HIDINPUT_FORCE 0x00000080
-#define HID_QUIRK_NO_EMPTY_INPUT 0x00000100
-/* 0x00000200 reserved for backward compatibility, was NO_INIT_INPUT_REPORTS */
-#define HID_QUIRK_ALWAYS_POLL 0x00000400
-#define HID_QUIRK_SKIP_OUTPUT_REPORTS 0x00010000
-#define HID_QUIRK_SKIP_OUTPUT_REPORT_ID 0x00020000
-#define HID_QUIRK_NO_OUTPUT_REPORTS_ON_INTR_EP 0x00040000
-#define HID_QUIRK_FULLSPEED_INTERVAL 0x10000000
-#define HID_QUIRK_NO_INIT_REPORTS 0x20000000
-#define HID_QUIRK_NO_IGNORE 0x40000000
-#define HID_QUIRK_NO_INPUT_SYNC 0x80000000
+#define HID_QUIRK_INVERT BIT(0)
+#define HID_QUIRK_NOTOUCH BIT(1)
+#define HID_QUIRK_IGNORE BIT(2)
+#define HID_QUIRK_NOGET BIT(3)
+#define HID_QUIRK_HIDDEV_FORCE BIT(4)
+#define HID_QUIRK_BADPAD BIT(5)
+#define HID_QUIRK_MULTI_INPUT BIT(6)
+#define HID_QUIRK_HIDINPUT_FORCE BIT(7)
+/* BIT(8) reserved for backward compatibility, was HID_QUIRK_NO_EMPTY_INPUT */
+/* BIT(9) reserved for backward compatibility, was NO_INIT_INPUT_REPORTS */
+#define HID_QUIRK_ALWAYS_POLL BIT(10)
+#define HID_QUIRK_SKIP_OUTPUT_REPORTS BIT(16)
+#define HID_QUIRK_SKIP_OUTPUT_REPORT_ID BIT(17)
+#define HID_QUIRK_NO_OUTPUT_REPORTS_ON_INTR_EP BIT(18)
+#define HID_QUIRK_HAVE_SPECIAL_DRIVER BIT(19)
+#define HID_QUIRK_FULLSPEED_INTERVAL BIT(28)
+#define HID_QUIRK_NO_INIT_REPORTS BIT(29)
+#define HID_QUIRK_NO_IGNORE BIT(30)
+#define HID_QUIRK_NO_INPUT_SYNC BIT(31)
/*
* HID device groups
@@ -492,13 +495,13 @@ struct hid_output_fifo {
char *raw_report;
};
-#define HID_CLAIMED_INPUT 1
-#define HID_CLAIMED_HIDDEV 2
-#define HID_CLAIMED_HIDRAW 4
-#define HID_CLAIMED_DRIVER 8
+#define HID_CLAIMED_INPUT BIT(0)
+#define HID_CLAIMED_HIDDEV BIT(1)
+#define HID_CLAIMED_HIDRAW BIT(2)
+#define HID_CLAIMED_DRIVER BIT(3)
-#define HID_STAT_ADDED 1
-#define HID_STAT_PARSED 2
+#define HID_STAT_ADDED BIT(0)
+#define HID_STAT_PARSED BIT(1)
struct hid_input {
struct list_head list;
@@ -671,6 +674,7 @@ struct hid_usage_id {
* to be called)
* @dyn_list: list of dynamically added device ids
* @dyn_lock: lock protecting @dyn_list
+ * @match: check if the given device is handled by this driver
* @probe: new device inserted
* @remove: device removed (NULL if not a hot-plug capable driver)
* @report_table: on which reports to call raw_event (NULL means all)
@@ -711,6 +715,7 @@ struct hid_driver {
struct list_head dyn_list;
spinlock_t dyn_lock;
+ bool (*match)(struct hid_device *dev, bool ignore_special_driver);
int (*probe)(struct hid_device *dev, const struct hid_device_id *id);
void (*remove)(struct hid_device *dev);
@@ -814,6 +819,8 @@ extern bool hid_ignore(struct hid_device *);
extern int hid_add_device(struct hid_device *);
extern void hid_destroy_device(struct hid_device *);
+extern struct bus_type hid_bus_type;
+
extern int __must_check __hid_register_driver(struct hid_driver *,
struct module *, const char *mod_name);
@@ -841,7 +848,7 @@ extern int hidinput_connect(struct hid_device *hid, unsigned int force);
extern void hidinput_disconnect(struct hid_device *);
int hid_set_field(struct hid_field *, unsigned, __s32);
-int hid_input_report(struct hid_device *, int type, u8 *, int, int);
+int hid_input_report(struct hid_device *, int type, u8 *, u32, int);
int hidinput_find_field(struct hid_device *hid, unsigned int type, unsigned int code, struct hid_field **field);
struct hid_field *hidinput_get_led_field(struct hid_device *hid);
unsigned int hidinput_count_leds(struct hid_device *hid);
@@ -860,8 +867,12 @@ int hid_open_report(struct hid_device *device);
int hid_check_keys_pressed(struct hid_device *hid);
int hid_connect(struct hid_device *hid, unsigned int connect_mask);
void hid_disconnect(struct hid_device *hid);
-const struct hid_device_id *hid_match_id(struct hid_device *hdev,
+bool hid_match_one_id(const struct hid_device *hdev,
+ const struct hid_device_id *id);
+const struct hid_device_id *hid_match_id(const struct hid_device *hdev,
const struct hid_device_id *id);
+const struct hid_device_id *hid_match_device(struct hid_device *hdev,
+ struct hid_driver *hdrv);
s32 hid_snto32(__u32 value, unsigned n);
__u32 hid_field_extract(const struct hid_device *hid, __u8 *report,
unsigned offset, unsigned n);
@@ -1088,19 +1099,19 @@ static inline void hid_hw_wait(struct hid_device *hdev)
*
* @report: the report we want to know the length
*/
-static inline int hid_report_len(struct hid_report *report)
+static inline u32 hid_report_len(struct hid_report *report)
{
/* equivalent to DIV_ROUND_UP(report->size, 8) + !!(report->id > 0) */
return ((report->size - 1) >> 3) + 1 + (report->id > 0);
}
-int hid_report_raw_event(struct hid_device *hid, int type, u8 *data, int size,
+int hid_report_raw_event(struct hid_device *hid, int type, u8 *data, u32 size,
int interrupt);
/* HID quirks API */
-u32 usbhid_lookup_quirk(const u16 idVendor, const u16 idProduct);
-int usbhid_quirks_init(char **quirks_param);
-void usbhid_quirks_exit(void);
+unsigned long hid_lookup_quirk(const struct hid_device *hdev);
+int hid_quirks_init(char **quirks_param, __u16 bus, int count);
+void hid_quirks_exit(__u16 bus);
#ifdef CONFIG_HID_PID
int hid_pidff_init(struct hid_device *hid);
diff --git a/include/linux/highmem.h b/include/linux/highmem.h
index 776f90f3a1cd..0690679832d4 100644
--- a/include/linux/highmem.h
+++ b/include/linux/highmem.h
@@ -237,6 +237,8 @@ static inline void copy_user_highpage(struct page *to, struct page *from,
#endif
+#ifndef __HAVE_ARCH_COPY_HIGHPAGE
+
static inline void copy_highpage(struct page *to, struct page *from)
{
char *vfrom, *vto;
@@ -248,4 +250,6 @@ static inline void copy_highpage(struct page *to, struct page *from)
kunmap_atomic(vfrom);
}
+#endif
+
#endif /* _LINUX_HIGHMEM_H */
diff --git a/include/linux/hil_mlc.h b/include/linux/hil_mlc.h
index 394a8405dd74..774f7d3b8f6a 100644
--- a/include/linux/hil_mlc.h
+++ b/include/linux/hil_mlc.h
@@ -144,12 +144,12 @@ struct hil_mlc {
hil_packet ipacket[16];
hil_packet imatch;
int icount;
- struct timeval instart;
- suseconds_t intimeout;
+ unsigned long instart;
+ unsigned long intimeout;
int ddi; /* Last operational device id */
int lcv; /* LCV to throttle loops */
- struct timeval lcv_tv; /* Time loop was started */
+ time64_t lcv_time; /* Time loop was started */
int di_map[7]; /* Maps below items to live devs */
struct hil_mlc_devinfo di[HIL_MLC_DEVMEM];
diff --git a/include/linux/hmm.h b/include/linux/hmm.h
index 325017ad9311..39988924de3a 100644
--- a/include/linux/hmm.h
+++ b/include/linux/hmm.h
@@ -80,76 +80,145 @@
struct hmm;
/*
- * hmm_pfn_t - HMM uses its own pfn type to keep several flags per page
+ * hmm_pfn_flag_e - HMM flag enums
*
* Flags:
- * HMM_PFN_VALID: pfn is valid
- * HMM_PFN_READ: CPU page table has read permission set
+ * HMM_PFN_VALID: pfn is valid. It has, at least, read permission.
* HMM_PFN_WRITE: CPU page table has write permission set
+ * HMM_PFN_DEVICE_PRIVATE: private device memory (ZONE_DEVICE)
+ *
+ * The driver provide a flags array, if driver valid bit for an entry is bit
+ * 3 ie (entry & (1 << 3)) is true if entry is valid then driver must provide
+ * an array in hmm_range.flags with hmm_range.flags[HMM_PFN_VALID] == 1 << 3.
+ * Same logic apply to all flags. This is same idea as vm_page_prot in vma
+ * except that this is per device driver rather than per architecture.
+ */
+enum hmm_pfn_flag_e {
+ HMM_PFN_VALID = 0,
+ HMM_PFN_WRITE,
+ HMM_PFN_DEVICE_PRIVATE,
+ HMM_PFN_FLAG_MAX
+};
+
+/*
+ * hmm_pfn_value_e - HMM pfn special value
+ *
+ * Flags:
* HMM_PFN_ERROR: corresponding CPU page table entry points to poisoned memory
- * HMM_PFN_EMPTY: corresponding CPU page table entry is pte_none()
+ * HMM_PFN_NONE: corresponding CPU page table entry is pte_none()
* HMM_PFN_SPECIAL: corresponding CPU page table entry is special; i.e., the
* result of vm_insert_pfn() or vm_insert_page(). Therefore, it should not
* be mirrored by a device, because the entry will never have HMM_PFN_VALID
* set and the pfn value is undefined.
- * HMM_PFN_DEVICE_UNADDRESSABLE: unaddressable device memory (ZONE_DEVICE)
+ *
+ * Driver provide entry value for none entry, error entry and special entry,
+ * driver can alias (ie use same value for error and special for instance). It
+ * should not alias none and error or special.
+ *
+ * HMM pfn value returned by hmm_vma_get_pfns() or hmm_vma_fault() will be:
+ * hmm_range.values[HMM_PFN_ERROR] if CPU page table entry is poisonous,
+ * hmm_range.values[HMM_PFN_NONE] if there is no CPU page table
+ * hmm_range.values[HMM_PFN_SPECIAL] if CPU page table entry is a special one
*/
-typedef unsigned long hmm_pfn_t;
+enum hmm_pfn_value_e {
+ HMM_PFN_ERROR,
+ HMM_PFN_NONE,
+ HMM_PFN_SPECIAL,
+ HMM_PFN_VALUE_MAX
+};
-#define HMM_PFN_VALID (1 << 0)
-#define HMM_PFN_READ (1 << 1)
-#define HMM_PFN_WRITE (1 << 2)
-#define HMM_PFN_ERROR (1 << 3)
-#define HMM_PFN_EMPTY (1 << 4)
-#define HMM_PFN_SPECIAL (1 << 5)
-#define HMM_PFN_DEVICE_UNADDRESSABLE (1 << 6)
-#define HMM_PFN_SHIFT 7
+/*
+ * struct hmm_range - track invalidation lock on virtual address range
+ *
+ * @vma: the vm area struct for the range
+ * @list: all range lock are on a list
+ * @start: range virtual start address (inclusive)
+ * @end: range virtual end address (exclusive)
+ * @pfns: array of pfns (big enough for the range)
+ * @flags: pfn flags to match device driver page table
+ * @values: pfn value for some special case (none, special, error, ...)
+ * @pfn_shifts: pfn shift value (should be <= PAGE_SHIFT)
+ * @valid: pfns array did not change since it has been fill by an HMM function
+ */
+struct hmm_range {
+ struct vm_area_struct *vma;
+ struct list_head list;
+ unsigned long start;
+ unsigned long end;
+ uint64_t *pfns;
+ const uint64_t *flags;
+ const uint64_t *values;
+ uint8_t pfn_shift;
+ bool valid;
+};
/*
- * hmm_pfn_t_to_page() - return struct page pointed to by a valid hmm_pfn_t
- * @pfn: hmm_pfn_t to convert to struct page
- * Returns: struct page pointer if pfn is a valid hmm_pfn_t, NULL otherwise
+ * hmm_pfn_to_page() - return struct page pointed to by a valid HMM pfn
+ * @range: range use to decode HMM pfn value
+ * @pfn: HMM pfn value to get corresponding struct page from
+ * Returns: struct page pointer if pfn is a valid HMM pfn, NULL otherwise
*
- * If the hmm_pfn_t is valid (ie valid flag set) then return the struct page
- * matching the pfn value stored in the hmm_pfn_t. Otherwise return NULL.
+ * If the HMM pfn is valid (ie valid flag set) then return the struct page
+ * matching the pfn value stored in the HMM pfn. Otherwise return NULL.
*/
-static inline struct page *hmm_pfn_t_to_page(hmm_pfn_t pfn)
+static inline struct page *hmm_pfn_to_page(const struct hmm_range *range,
+ uint64_t pfn)
{
- if (!(pfn & HMM_PFN_VALID))
+ if (pfn == range->values[HMM_PFN_NONE])
+ return NULL;
+ if (pfn == range->values[HMM_PFN_ERROR])
return NULL;
- return pfn_to_page(pfn >> HMM_PFN_SHIFT);
+ if (pfn == range->values[HMM_PFN_SPECIAL])
+ return NULL;
+ if (!(pfn & range->flags[HMM_PFN_VALID]))
+ return NULL;
+ return pfn_to_page(pfn >> range->pfn_shift);
}
/*
- * hmm_pfn_t_to_pfn() - return pfn value store in a hmm_pfn_t
- * @pfn: hmm_pfn_t to extract pfn from
- * Returns: pfn value if hmm_pfn_t is valid, -1UL otherwise
+ * hmm_pfn_to_pfn() - return pfn value store in a HMM pfn
+ * @range: range use to decode HMM pfn value
+ * @pfn: HMM pfn value to extract pfn from
+ * Returns: pfn value if HMM pfn is valid, -1UL otherwise
*/
-static inline unsigned long hmm_pfn_t_to_pfn(hmm_pfn_t pfn)
+static inline unsigned long hmm_pfn_to_pfn(const struct hmm_range *range,
+ uint64_t pfn)
{
- if (!(pfn & HMM_PFN_VALID))
+ if (pfn == range->values[HMM_PFN_NONE])
+ return -1UL;
+ if (pfn == range->values[HMM_PFN_ERROR])
+ return -1UL;
+ if (pfn == range->values[HMM_PFN_SPECIAL])
+ return -1UL;
+ if (!(pfn & range->flags[HMM_PFN_VALID]))
return -1UL;
- return (pfn >> HMM_PFN_SHIFT);
+ return (pfn >> range->pfn_shift);
}
/*
- * hmm_pfn_t_from_page() - create a valid hmm_pfn_t value from struct page
- * @page: struct page pointer for which to create the hmm_pfn_t
- * Returns: valid hmm_pfn_t for the page
+ * hmm_pfn_from_page() - create a valid HMM pfn value from struct page
+ * @range: range use to encode HMM pfn value
+ * @page: struct page pointer for which to create the HMM pfn
+ * Returns: valid HMM pfn for the page
*/
-static inline hmm_pfn_t hmm_pfn_t_from_page(struct page *page)
+static inline uint64_t hmm_pfn_from_page(const struct hmm_range *range,
+ struct page *page)
{
- return (page_to_pfn(page) << HMM_PFN_SHIFT) | HMM_PFN_VALID;
+ return (page_to_pfn(page) << range->pfn_shift) |
+ range->flags[HMM_PFN_VALID];
}
/*
- * hmm_pfn_t_from_pfn() - create a valid hmm_pfn_t value from pfn
- * @pfn: pfn value for which to create the hmm_pfn_t
- * Returns: valid hmm_pfn_t for the pfn
+ * hmm_pfn_from_pfn() - create a valid HMM pfn value from pfn
+ * @range: range use to encode HMM pfn value
+ * @pfn: pfn value for which to create the HMM pfn
+ * Returns: valid HMM pfn for the pfn
*/
-static inline hmm_pfn_t hmm_pfn_t_from_pfn(unsigned long pfn)
+static inline uint64_t hmm_pfn_from_pfn(const struct hmm_range *range,
+ unsigned long pfn)
{
- return (pfn << HMM_PFN_SHIFT) | HMM_PFN_VALID;
+ return (pfn << range->pfn_shift) |
+ range->flags[HMM_PFN_VALID];
}
@@ -218,6 +287,16 @@ enum hmm_update_type {
* @update: callback to update range on a device
*/
struct hmm_mirror_ops {
+ /* release() - release hmm_mirror
+ *
+ * @mirror: pointer to struct hmm_mirror
+ *
+ * This is called when the mm_struct is being released.
+ * The callback should make sure no references to the mirror occur
+ * after the callback returns.
+ */
+ void (*release)(struct hmm_mirror *mirror);
+
/* sync_cpu_device_pagetables() - synchronize page tables
*
* @mirror: pointer to struct hmm_mirror
@@ -262,23 +341,6 @@ void hmm_mirror_unregister(struct hmm_mirror *mirror);
/*
- * struct hmm_range - track invalidation lock on virtual address range
- *
- * @list: all range lock are on a list
- * @start: range virtual start address (inclusive)
- * @end: range virtual end address (exclusive)
- * @pfns: array of pfns (big enough for the range)
- * @valid: pfns array did not change since it has been fill by an HMM function
- */
-struct hmm_range {
- struct list_head list;
- unsigned long start;
- unsigned long end;
- hmm_pfn_t *pfns;
- bool valid;
-};
-
-/*
* To snapshot the CPU page table, call hmm_vma_get_pfns(), then take a device
* driver lock that serializes device page table updates, then call
* hmm_vma_range_done(), to check if the snapshot is still valid. The same
@@ -291,17 +353,13 @@ struct hmm_range {
*
* IF YOU DO NOT FOLLOW THE ABOVE RULE THE SNAPSHOT CONTENT MIGHT BE INVALID !
*/
-int hmm_vma_get_pfns(struct vm_area_struct *vma,
- struct hmm_range *range,
- unsigned long start,
- unsigned long end,
- hmm_pfn_t *pfns);
-bool hmm_vma_range_done(struct vm_area_struct *vma, struct hmm_range *range);
+int hmm_vma_get_pfns(struct hmm_range *range);
+bool hmm_vma_range_done(struct hmm_range *range);
/*
* Fault memory on behalf of device driver. Unlike handle_mm_fault(), this will
- * not migrate any device memory back to system memory. The hmm_pfn_t array will
+ * not migrate any device memory back to system memory. The HMM pfn array will
* be updated with the fault result and current snapshot of the CPU page table
* for the range.
*
@@ -310,22 +368,26 @@ bool hmm_vma_range_done(struct vm_area_struct *vma, struct hmm_range *range);
* function returns -EAGAIN.
*
* Return value does not reflect if the fault was successful for every single
- * address or not. Therefore, the caller must to inspect the hmm_pfn_t array to
+ * address or not. Therefore, the caller must to inspect the HMM pfn array to
* determine fault status for each address.
*
* Trying to fault inside an invalid vma will result in -EINVAL.
*
* See the function description in mm/hmm.c for further documentation.
*/
-int hmm_vma_fault(struct vm_area_struct *vma,
- struct hmm_range *range,
- unsigned long start,
- unsigned long end,
- hmm_pfn_t *pfns,
- bool write,
- bool block);
-#endif /* IS_ENABLED(CONFIG_HMM_MIRROR) */
+int hmm_vma_fault(struct hmm_range *range, bool block);
+/* Below are for HMM internal use only! Not to be used by device driver! */
+void hmm_mm_destroy(struct mm_struct *mm);
+
+static inline void hmm_mm_init(struct mm_struct *mm)
+{
+ mm->hmm = NULL;
+}
+#else /* IS_ENABLED(CONFIG_HMM_MIRROR) */
+static inline void hmm_mm_destroy(struct mm_struct *mm) {}
+static inline void hmm_mm_init(struct mm_struct *mm) {}
+#endif /* IS_ENABLED(CONFIG_HMM_MIRROR) */
#if IS_ENABLED(CONFIG_DEVICE_PRIVATE) || IS_ENABLED(CONFIG_DEVICE_PUBLIC)
struct hmm_devmem;
@@ -498,23 +560,9 @@ struct hmm_device {
struct hmm_device *hmm_device_new(void *drvdata);
void hmm_device_put(struct hmm_device *hmm_device);
#endif /* CONFIG_DEVICE_PRIVATE || CONFIG_DEVICE_PUBLIC */
-#endif /* IS_ENABLED(CONFIG_HMM) */
-
-/* Below are for HMM internal use only! Not to be used by device driver! */
-#if IS_ENABLED(CONFIG_HMM_MIRROR)
-void hmm_mm_destroy(struct mm_struct *mm);
-
-static inline void hmm_mm_init(struct mm_struct *mm)
-{
- mm->hmm = NULL;
-}
-#else /* IS_ENABLED(CONFIG_HMM_MIRROR) */
-static inline void hmm_mm_destroy(struct mm_struct *mm) {}
-static inline void hmm_mm_init(struct mm_struct *mm) {}
-#endif /* IS_ENABLED(CONFIG_HMM_MIRROR) */
-
-
#else /* IS_ENABLED(CONFIG_HMM) */
static inline void hmm_mm_destroy(struct mm_struct *mm) {}
static inline void hmm_mm_init(struct mm_struct *mm) {}
+#endif /* IS_ENABLED(CONFIG_HMM) */
+
#endif /* LINUX_HMM_H */
diff --git a/include/linux/hp_sdc.h b/include/linux/hp_sdc.h
index d392975d8887..6f1dee7e67e0 100644
--- a/include/linux/hp_sdc.h
+++ b/include/linux/hp_sdc.h
@@ -281,7 +281,7 @@ typedef struct {
hp_sdc_transaction *tq[HP_SDC_QUEUE_LEN]; /* All pending read/writes */
int rcurr, rqty; /* Current read transact in process */
- struct timeval rtv; /* Time when current read started */
+ ktime_t rtime; /* Time when current read started */
int wcurr; /* Current write transact in process */
int dev_err; /* carries status from registration */
diff --git a/include/linux/hrtimer.h b/include/linux/hrtimer.h
index 012c37fdb688..a2656c3ebe81 100644
--- a/include/linux/hrtimer.h
+++ b/include/linux/hrtimer.h
@@ -28,13 +28,29 @@ struct hrtimer_cpu_base;
/*
* Mode arguments of xxx_hrtimer functions:
+ *
+ * HRTIMER_MODE_ABS - Time value is absolute
+ * HRTIMER_MODE_REL - Time value is relative to now
+ * HRTIMER_MODE_PINNED - Timer is bound to CPU (is only considered
+ * when starting the timer)
+ * HRTIMER_MODE_SOFT - Timer callback function will be executed in
+ * soft irq context
*/
enum hrtimer_mode {
- HRTIMER_MODE_ABS = 0x0, /* Time value is absolute */
- HRTIMER_MODE_REL = 0x1, /* Time value is relative to now */
- HRTIMER_MODE_PINNED = 0x02, /* Timer is bound to CPU */
- HRTIMER_MODE_ABS_PINNED = 0x02,
- HRTIMER_MODE_REL_PINNED = 0x03,
+ HRTIMER_MODE_ABS = 0x00,
+ HRTIMER_MODE_REL = 0x01,
+ HRTIMER_MODE_PINNED = 0x02,
+ HRTIMER_MODE_SOFT = 0x04,
+
+ HRTIMER_MODE_ABS_PINNED = HRTIMER_MODE_ABS | HRTIMER_MODE_PINNED,
+ HRTIMER_MODE_REL_PINNED = HRTIMER_MODE_REL | HRTIMER_MODE_PINNED,
+
+ HRTIMER_MODE_ABS_SOFT = HRTIMER_MODE_ABS | HRTIMER_MODE_SOFT,
+ HRTIMER_MODE_REL_SOFT = HRTIMER_MODE_REL | HRTIMER_MODE_SOFT,
+
+ HRTIMER_MODE_ABS_PINNED_SOFT = HRTIMER_MODE_ABS_PINNED | HRTIMER_MODE_SOFT,
+ HRTIMER_MODE_REL_PINNED_SOFT = HRTIMER_MODE_REL_PINNED | HRTIMER_MODE_SOFT,
+
};
/*
@@ -87,6 +103,7 @@ enum hrtimer_restart {
* @base: pointer to the timer base (per cpu and per clock)
* @state: state information (See bit values above)
* @is_rel: Set if the timer was armed relative
+ * @is_soft: Set if hrtimer will be expired in soft interrupt context.
*
* The hrtimer structure must be initialized by hrtimer_init()
*/
@@ -97,6 +114,7 @@ struct hrtimer {
struct hrtimer_clock_base *base;
u8 state;
u8 is_rel;
+ u8 is_soft;
};
/**
@@ -112,9 +130,9 @@ struct hrtimer_sleeper {
};
#ifdef CONFIG_64BIT
-# define HRTIMER_CLOCK_BASE_ALIGN 64
+# define __hrtimer_clock_base_align ____cacheline_aligned
#else
-# define HRTIMER_CLOCK_BASE_ALIGN 32
+# define __hrtimer_clock_base_align
#endif
/**
@@ -123,48 +141,55 @@ struct hrtimer_sleeper {
* @index: clock type index for per_cpu support when moving a
* timer to a base on another cpu.
* @clockid: clock id for per_cpu support
+ * @seq: seqcount around __run_hrtimer
+ * @running: pointer to the currently running hrtimer
* @active: red black tree root node for the active timers
* @get_time: function to retrieve the current time of the clock
* @offset: offset of this clock to the monotonic base
*/
struct hrtimer_clock_base {
struct hrtimer_cpu_base *cpu_base;
- int index;
+ unsigned int index;
clockid_t clockid;
+ seqcount_t seq;
+ struct hrtimer *running;
struct timerqueue_head active;
ktime_t (*get_time)(void);
ktime_t offset;
-} __attribute__((__aligned__(HRTIMER_CLOCK_BASE_ALIGN)));
+} __hrtimer_clock_base_align;
enum hrtimer_base_type {
HRTIMER_BASE_MONOTONIC,
HRTIMER_BASE_REALTIME,
- HRTIMER_BASE_BOOTTIME,
HRTIMER_BASE_TAI,
+ HRTIMER_BASE_MONOTONIC_SOFT,
+ HRTIMER_BASE_REALTIME_SOFT,
+ HRTIMER_BASE_TAI_SOFT,
HRTIMER_MAX_CLOCK_BASES,
};
-/*
+/**
* struct hrtimer_cpu_base - the per cpu clock bases
* @lock: lock protecting the base and associated clock bases
* and timers
- * @seq: seqcount around __run_hrtimer
- * @running: pointer to the currently running hrtimer
* @cpu: cpu number
* @active_bases: Bitfield to mark bases with active timers
* @clock_was_set_seq: Sequence counter of clock was set events
- * @migration_enabled: The migration of hrtimers to other cpus is enabled
- * @nohz_active: The nohz functionality is enabled
- * @expires_next: absolute time of the next event which was scheduled
- * via clock_set_next_event()
- * @next_timer: Pointer to the first expiring timer
- * @in_hrtirq: hrtimer_interrupt() is currently executing
* @hres_active: State of high resolution mode
+ * @in_hrtirq: hrtimer_interrupt() is currently executing
* @hang_detected: The last hrtimer interrupt detected a hang
+ * @softirq_activated: displays, if the softirq is raised - update of softirq
+ * related settings is not required then.
* @nr_events: Total number of hrtimer interrupt events
* @nr_retries: Total number of hrtimer interrupt retries
* @nr_hangs: Total number of hrtimer interrupt hangs
* @max_hang_time: Maximum time spent in hrtimer_interrupt
+ * @expires_next: absolute time of the next event, is required for remote
+ * hrtimer enqueue; it is the total first expiry time (hard
+ * and soft hrtimer are taken into account)
+ * @next_timer: Pointer to the first expiring timer
+ * @softirq_expires_next: Time to check, if soft queues needs also to be expired
+ * @softirq_next_timer: Pointer to the first expiring softirq based timer
* @clock_base: array of clock bases for this cpu
*
* Note: next_timer is just an optimization for __remove_hrtimer().
@@ -173,31 +198,28 @@ enum hrtimer_base_type {
*/
struct hrtimer_cpu_base {
raw_spinlock_t lock;
- seqcount_t seq;
- struct hrtimer *running;
unsigned int cpu;
unsigned int active_bases;
unsigned int clock_was_set_seq;
- bool migration_enabled;
- bool nohz_active;
+ unsigned int hres_active : 1,
+ in_hrtirq : 1,
+ hang_detected : 1,
+ softirq_activated : 1;
#ifdef CONFIG_HIGH_RES_TIMERS
- unsigned int in_hrtirq : 1,
- hres_active : 1,
- hang_detected : 1;
- ktime_t expires_next;
- struct hrtimer *next_timer;
unsigned int nr_events;
- unsigned int nr_retries;
- unsigned int nr_hangs;
+ unsigned short nr_retries;
+ unsigned short nr_hangs;
unsigned int max_hang_time;
#endif
+ ktime_t expires_next;
+ struct hrtimer *next_timer;
+ ktime_t softirq_expires_next;
+ struct hrtimer *softirq_next_timer;
struct hrtimer_clock_base clock_base[HRTIMER_MAX_CLOCK_BASES];
} ____cacheline_aligned;
static inline void hrtimer_set_expires(struct hrtimer *timer, ktime_t time)
{
- BUILD_BUG_ON(sizeof(struct hrtimer_clock_base) > HRTIMER_CLOCK_BASE_ALIGN);
-
timer->node.expires = time;
timer->_softexpires = time;
}
@@ -266,16 +288,17 @@ static inline ktime_t hrtimer_cb_get_time(struct hrtimer *timer)
return timer->base->get_time();
}
+static inline int hrtimer_is_hres_active(struct hrtimer *timer)
+{
+ return IS_ENABLED(CONFIG_HIGH_RES_TIMERS) ?
+ timer->base->cpu_base->hres_active : 0;
+}
+
#ifdef CONFIG_HIGH_RES_TIMERS
struct clock_event_device;
extern void hrtimer_interrupt(struct clock_event_device *dev);
-static inline int hrtimer_is_hres_active(struct hrtimer *timer)
-{
- return timer->base->cpu_base->hres_active;
-}
-
/*
* The resolution of the clocks. The resolution value is returned in
* the clock_getres() system call to give application programmers an
@@ -298,11 +321,6 @@ extern unsigned int hrtimer_resolution;
#define hrtimer_resolution (unsigned int)LOW_RES_NSEC
-static inline int hrtimer_is_hres_active(struct hrtimer *timer)
-{
- return 0;
-}
-
static inline void clock_was_set_delayed(void) { }
#endif
@@ -365,11 +383,12 @@ extern void hrtimer_start_range_ns(struct hrtimer *timer, ktime_t tim,
u64 range_ns, const enum hrtimer_mode mode);
/**
- * hrtimer_start - (re)start an hrtimer on the current CPU
+ * hrtimer_start - (re)start an hrtimer
* @timer: the timer to be added
* @tim: expiry time
- * @mode: expiry mode: absolute (HRTIMER_MODE_ABS) or
- * relative (HRTIMER_MODE_REL)
+ * @mode: timer mode: absolute (HRTIMER_MODE_ABS) or
+ * relative (HRTIMER_MODE_REL), and pinned (HRTIMER_MODE_PINNED);
+ * softirq based mode is considered for debug purpose only!
*/
static inline void hrtimer_start(struct hrtimer *timer, ktime_t tim,
const enum hrtimer_mode mode)
@@ -405,6 +424,7 @@ static inline ktime_t hrtimer_get_remaining(const struct hrtimer *timer)
}
extern u64 hrtimer_get_next_event(void);
+extern u64 hrtimer_next_event_without(const struct hrtimer *exclude);
extern bool hrtimer_active(const struct hrtimer *timer);
@@ -422,7 +442,7 @@ static inline int hrtimer_is_queued(struct hrtimer *timer)
*/
static inline int hrtimer_callback_running(struct hrtimer *timer)
{
- return timer->base->cpu_base->running == timer;
+ return timer->base->running == timer;
}
/* Forward a hrtimer so it expires after now: */
@@ -466,7 +486,7 @@ extern int schedule_hrtimeout_range(ktime_t *expires, u64 delta,
extern int schedule_hrtimeout_range_clock(ktime_t *expires,
u64 delta,
const enum hrtimer_mode mode,
- int clock);
+ clockid_t clock_id);
extern int schedule_hrtimeout(ktime_t *expires, const enum hrtimer_mode mode);
/* Soft interrupt function to run the hrtimer queues: */
diff --git a/include/linux/hugetlb.h b/include/linux/hugetlb.h
index 82a25880714a..36fa6a2a82e3 100644
--- a/include/linux/hugetlb.h
+++ b/include/linux/hugetlb.h
@@ -119,6 +119,7 @@ long hugetlb_unreserve_pages(struct inode *inode, long start, long end,
long freed);
bool isolate_huge_page(struct page *page, struct list_head *list);
void putback_active_hugepage(struct page *page);
+void move_hugetlb_state(struct page *oldpage, struct page *newpage, int reason);
void free_huge_page(struct page *page);
void hugetlb_fix_reserve_counts(struct inode *inode);
extern struct mutex *hugetlb_fault_mutex_table;
@@ -129,7 +130,6 @@ u32 hugetlb_fault_mutex_hash(struct hstate *h, struct mm_struct *mm,
pte_t *huge_pmd_share(struct mm_struct *mm, unsigned long addr, pud_t *pud);
-extern int hugepages_treat_as_movable;
extern int sysctl_hugetlb_shm_group;
extern struct list_head huge_boot_pages;
@@ -158,6 +158,7 @@ unsigned long hugetlb_change_protection(struct vm_area_struct *vma,
unsigned long address, unsigned long end, pgprot_t newprot);
bool is_hugetlb_entry_migration(pte_t pte);
+
#else /* !CONFIG_HUGETLB_PAGE */
static inline void reset_vma_resv_huge_pages(struct vm_area_struct *vma)
@@ -198,6 +199,7 @@ static inline bool isolate_huge_page(struct page *page, struct list_head *list)
return false;
}
#define putback_active_hugepage(p) do {} while (0)
+#define move_hugetlb_state(old, new, reason) do {} while (0)
static inline unsigned long hugetlb_change_protection(struct vm_area_struct *vma,
unsigned long address, unsigned long end, pgprot_t newprot)
@@ -271,6 +273,17 @@ static inline struct hugetlbfs_sb_info *HUGETLBFS_SB(struct super_block *sb)
return sb->s_fs_info;
}
+struct hugetlbfs_inode_info {
+ struct shared_policy policy;
+ struct inode vfs_inode;
+ unsigned int seals;
+};
+
+static inline struct hugetlbfs_inode_info *HUGETLBFS_I(struct inode *inode)
+{
+ return container_of(inode, struct hugetlbfs_inode_info, vfs_inode);
+}
+
extern const struct file_operations hugetlbfs_file_operations;
extern const struct vm_operations_struct hugetlb_vm_ops;
struct file *hugetlb_file_setup(const char *name, size_t size, vm_flags_t acct,
@@ -343,10 +356,10 @@ struct huge_bootmem_page {
struct page *alloc_huge_page(struct vm_area_struct *vma,
unsigned long addr, int avoid_reserve);
struct page *alloc_huge_page_node(struct hstate *h, int nid);
-struct page *alloc_huge_page_noerr(struct vm_area_struct *vma,
- unsigned long addr, int avoid_reserve);
struct page *alloc_huge_page_nodemask(struct hstate *h, int preferred_nid,
nodemask_t *nmask);
+struct page *alloc_huge_page_vma(struct hstate *h, struct vm_area_struct *vma,
+ unsigned long address);
int huge_add_to_page_cache(struct page *page, struct address_space *mapping,
pgoff_t idx);
@@ -524,7 +537,7 @@ struct hstate {};
#define alloc_huge_page(v, a, r) NULL
#define alloc_huge_page_node(h, nid) NULL
#define alloc_huge_page_nodemask(h, preferred_nid, nmask) NULL
-#define alloc_huge_page_noerr(v, a, r) NULL
+#define alloc_huge_page_vma(h, vma, address) NULL
#define alloc_bootmem_huge_page(h) NULL
#define hstate_file(f) NULL
#define hstate_sizelog(s) NULL
diff --git a/include/linux/hw_breakpoint.h b/include/linux/hw_breakpoint.h
index cf045885a499..6058c3844a76 100644
--- a/include/linux/hw_breakpoint.h
+++ b/include/linux/hw_breakpoint.h
@@ -53,6 +53,9 @@ register_user_hw_breakpoint(struct perf_event_attr *attr,
/* FIXME: only change from the attr, and don't unregister */
extern int
modify_user_hw_breakpoint(struct perf_event *bp, struct perf_event_attr *attr);
+extern int
+modify_user_hw_breakpoint_check(struct perf_event *bp, struct perf_event_attr *attr,
+ bool check);
/*
* Kernel breakpoints are not associated with any particular thread.
@@ -97,6 +100,10 @@ register_user_hw_breakpoint(struct perf_event_attr *attr,
static inline int
modify_user_hw_breakpoint(struct perf_event *bp,
struct perf_event_attr *attr) { return -ENOSYS; }
+static inline int
+modify_user_hw_breakpoint_check(struct perf_event *bp, struct perf_event_attr *attr,
+ bool check) { return -ENOSYS; }
+
static inline struct perf_event *
register_wide_hw_breakpoint_cpu(struct perf_event_attr *attr,
perf_overflow_handler_t triggered,
diff --git a/include/linux/hwmon.h b/include/linux/hwmon.h
index ceb751987c40..e5fd2707b6df 100644
--- a/include/linux/hwmon.h
+++ b/include/linux/hwmon.h
@@ -29,6 +29,7 @@ enum hwmon_sensor_types {
hwmon_humidity,
hwmon_fan,
hwmon_pwm,
+ hwmon_max,
};
enum hwmon_chip_attributes {
diff --git a/include/linux/hyperv.h b/include/linux/hyperv.h
index 6c9336626592..192ed8fbc403 100644
--- a/include/linux/hyperv.h
+++ b/include/linux/hyperv.h
@@ -26,7 +26,6 @@
#define _HYPERV_H
#include <uapi/linux/hyperv.h>
-#include <uapi/asm/hyperv.h>
#include <linux/types.h>
#include <linux/scatterlist.h>
@@ -127,28 +126,6 @@ struct hv_ring_buffer_info {
u32 priv_read_index;
};
-/*
- *
- * hv_get_ringbuffer_availbytes()
- *
- * Get number of bytes available to read and to write to
- * for the specified ring buffer
- */
-static inline void
-hv_get_ringbuffer_availbytes(const struct hv_ring_buffer_info *rbi,
- u32 *read, u32 *write)
-{
- u32 read_loc, write_loc, dsize;
-
- /* Capture the read/write indices before they changed */
- read_loc = rbi->ring_buffer->read_index;
- write_loc = rbi->ring_buffer->write_index;
- dsize = rbi->ring_datasize;
-
- *write = write_loc >= read_loc ? dsize - (write_loc - read_loc) :
- read_loc - write_loc;
- *read = dsize - *write;
-}
static inline u32 hv_get_bytes_to_read(const struct hv_ring_buffer_info *rbi)
{
@@ -866,7 +843,7 @@ struct vmbus_channel {
/*
* NUMA distribution policy:
- * We support teo policies:
+ * We support two policies:
* 1) Balanced: Here all performance critical channels are
* distributed evenly amongst all the NUMA nodes.
* This policy will be the default policy.
diff --git a/include/linux/hypervisor.h b/include/linux/hypervisor.h
index b19563f9a8eb..fc08b433c856 100644
--- a/include/linux/hypervisor.h
+++ b/include/linux/hypervisor.h
@@ -8,15 +8,28 @@
*/
#ifdef CONFIG_X86
+
+#include <asm/jailhouse_para.h>
#include <asm/x86_init.h>
+
static inline void hypervisor_pin_vcpu(int cpu)
{
x86_platform.hyper.pin_vcpu(cpu);
}
-#else
+
+#else /* !CONFIG_X86 */
+
+#include <linux/of.h>
+
static inline void hypervisor_pin_vcpu(int cpu)
{
}
-#endif
+
+static inline bool jailhouse_paravirt(void)
+{
+ return of_find_compatible_node(NULL, NULL, "jailhouse,cell");
+}
+
+#endif /* !CONFIG_X86 */
#endif /* __LINUX_HYPEVISOR_H */
diff --git a/include/linux/i2c-pca-platform.h b/include/linux/i2c-pca-platform.h
index 0e5f7c77d1d8..c37329432a8e 100644
--- a/include/linux/i2c-pca-platform.h
+++ b/include/linux/i2c-pca-platform.h
@@ -3,9 +3,6 @@
#define I2C_PCA9564_PLATFORM_H
struct i2c_pca9564_pf_platform_data {
- int gpio; /* pin to reset chip. driver will work when
- * not supplied (negative value), but it
- * cannot exit some error conditions then */
int i2c_clock_speed; /* values are defined in linux/i2c-algo-pca.h */
int timeout; /* timeout in jiffies */
};
diff --git a/include/linux/i2c.h b/include/linux/i2c.h
index 0f774406fad0..44ad14e016b5 100644
--- a/include/linux/i2c.h
+++ b/include/linux/i2c.h
@@ -47,6 +47,7 @@ struct i2c_algorithm;
struct i2c_adapter;
struct i2c_client;
struct i2c_driver;
+struct i2c_device_identity;
union i2c_smbus_data;
struct i2c_board_info;
enum i2c_slave_event;
@@ -55,7 +56,7 @@ typedef int (*i2c_slave_cb_t)(struct i2c_client *, enum i2c_slave_event, u8 *);
struct module;
struct property_entry;
-#if defined(CONFIG_I2C) || defined(CONFIG_I2C_MODULE)
+#if IS_ENABLED(CONFIG_I2C)
/*
* The master routines are the ones normally used to transmit data to devices
* on a bus (or read from them). Apart from two basic transfer functions to
@@ -63,10 +64,68 @@ struct property_entry;
* transmit an arbitrary number of messages without interruption.
* @count must be be less than 64k since msg.len is u16.
*/
-extern int i2c_master_send(const struct i2c_client *client, const char *buf,
- int count);
-extern int i2c_master_recv(const struct i2c_client *client, char *buf,
- int count);
+extern int i2c_transfer_buffer_flags(const struct i2c_client *client,
+ char *buf, int count, u16 flags);
+
+/**
+ * i2c_master_recv - issue a single I2C message in master receive mode
+ * @client: Handle to slave device
+ * @buf: Where to store data read from slave
+ * @count: How many bytes to read, must be less than 64k since msg.len is u16
+ *
+ * Returns negative errno, or else the number of bytes read.
+ */
+static inline int i2c_master_recv(const struct i2c_client *client,
+ char *buf, int count)
+{
+ return i2c_transfer_buffer_flags(client, buf, count, I2C_M_RD);
+};
+
+/**
+ * i2c_master_recv_dmasafe - issue a single I2C message in master receive mode
+ * using a DMA safe buffer
+ * @client: Handle to slave device
+ * @buf: Where to store data read from slave, must be safe to use with DMA
+ * @count: How many bytes to read, must be less than 64k since msg.len is u16
+ *
+ * Returns negative errno, or else the number of bytes read.
+ */
+static inline int i2c_master_recv_dmasafe(const struct i2c_client *client,
+ char *buf, int count)
+{
+ return i2c_transfer_buffer_flags(client, buf, count,
+ I2C_M_RD | I2C_M_DMA_SAFE);
+};
+
+/**
+ * i2c_master_send - issue a single I2C message in master transmit mode
+ * @client: Handle to slave device
+ * @buf: Data that will be written to the slave
+ * @count: How many bytes to write, must be less than 64k since msg.len is u16
+ *
+ * Returns negative errno, or else the number of bytes written.
+ */
+static inline int i2c_master_send(const struct i2c_client *client,
+ const char *buf, int count)
+{
+ return i2c_transfer_buffer_flags(client, (char *)buf, count, 0);
+};
+
+/**
+ * i2c_master_send_dmasafe - issue a single I2C message in master transmit mode
+ * using a DMA safe buffer
+ * @client: Handle to slave device
+ * @buf: Data that will be written to the slave, must be safe to use with DMA
+ * @count: How many bytes to write, must be less than 64k since msg.len is u16
+ *
+ * Returns negative errno, or else the number of bytes written.
+ */
+static inline int i2c_master_send_dmasafe(const struct i2c_client *client,
+ const char *buf, int count)
+{
+ return i2c_transfer_buffer_flags(client, (char *)buf, count,
+ I2C_M_DMA_SAFE);
+};
/* Transfer num messages.
*/
@@ -128,8 +187,37 @@ extern s32 i2c_smbus_write_i2c_block_data(const struct i2c_client *client,
extern s32
i2c_smbus_read_i2c_block_data_or_emulated(const struct i2c_client *client,
u8 command, u8 length, u8 *values);
+int i2c_get_device_id(const struct i2c_client *client,
+ struct i2c_device_identity *id);
#endif /* I2C */
+/**
+ * struct i2c_device_identity - i2c client device identification
+ * @manufacturer_id: 0 - 4095, database maintained by NXP
+ * @part_id: 0 - 511, according to manufacturer
+ * @die_revision: 0 - 7, according to manufacturer
+ */
+struct i2c_device_identity {
+ u16 manufacturer_id;
+#define I2C_DEVICE_ID_NXP_SEMICONDUCTORS 0
+#define I2C_DEVICE_ID_NXP_SEMICONDUCTORS_1 1
+#define I2C_DEVICE_ID_NXP_SEMICONDUCTORS_2 2
+#define I2C_DEVICE_ID_NXP_SEMICONDUCTORS_3 3
+#define I2C_DEVICE_ID_RAMTRON_INTERNATIONAL 4
+#define I2C_DEVICE_ID_ANALOG_DEVICES 5
+#define I2C_DEVICE_ID_STMICROELECTRONICS 6
+#define I2C_DEVICE_ID_ON_SEMICONDUCTOR 7
+#define I2C_DEVICE_ID_SPRINTEK_CORPORATION 8
+#define I2C_DEVICE_ID_ESPROS_PHOTONICS_AG 9
+#define I2C_DEVICE_ID_FUJITSU_SEMICONDUCTOR 10
+#define I2C_DEVICE_ID_FLIR 11
+#define I2C_DEVICE_ID_O2MICRO 12
+#define I2C_DEVICE_ID_ATMEL 13
+#define I2C_DEVICE_ID_NONE 0xffff
+ u16 part_id;
+ u8 die_revision;
+};
+
enum i2c_alert_protocol {
I2C_PROTOCOL_SMBUS_ALERT,
I2C_PROTOCOL_SMBUS_HOST_NOTIFY,
@@ -354,7 +442,7 @@ struct i2c_board_info {
.type = dev_type, .addr = (dev_addr)
-#if defined(CONFIG_I2C) || defined(CONFIG_I2C_MODULE)
+#if IS_ENABLED(CONFIG_I2C)
/* Add-on boards should register/unregister their devices; e.g. a board
* with integrated I2C, a config eeprom, sensors, and a codec that's
* used in conjunction with the primary hardware.
@@ -485,40 +573,43 @@ struct i2c_timings {
/**
* struct i2c_bus_recovery_info - I2C bus recovery information
* @recover_bus: Recover routine. Either pass driver's recover_bus() routine, or
- * i2c_generic_scl_recovery() or i2c_generic_gpio_recovery().
+ * i2c_generic_scl_recovery().
* @get_scl: This gets current value of SCL line. Mandatory for generic SCL
- * recovery. Used internally for generic GPIO recovery.
- * @set_scl: This sets/clears SCL line. Mandatory for generic SCL recovery. Used
- * internally for generic GPIO recovery.
+ * recovery. Populated internally for generic GPIO recovery.
+ * @set_scl: This sets/clears the SCL line. Mandatory for generic SCL recovery.
+ * Populated internally for generic GPIO recovery.
* @get_sda: This gets current value of SDA line. Optional for generic SCL
- * recovery. Used internally, if sda_gpio is a valid GPIO, for generic GPIO
- * recovery.
+ * recovery. Populated internally, if sda_gpio is a valid GPIO, for generic
+ * GPIO recovery.
+ * @set_sda: This sets/clears the SDA line. Optional for generic SCL recovery.
+ * Populated internally, if sda_gpio is a valid GPIO, for generic GPIO
+ * recovery.
* @prepare_recovery: This will be called before starting recovery. Platform may
* configure padmux here for SDA/SCL line or something else they want.
* @unprepare_recovery: This will be called after completing recovery. Platform
* may configure padmux here for SDA/SCL line or something else they want.
- * @scl_gpio: gpio number of the SCL line. Only required for GPIO recovery.
- * @sda_gpio: gpio number of the SDA line. Only required for GPIO recovery.
+ * @scl_gpiod: gpiod of the SCL line. Only required for GPIO recovery.
+ * @sda_gpiod: gpiod of the SDA line. Only required for GPIO recovery.
*/
struct i2c_bus_recovery_info {
- int (*recover_bus)(struct i2c_adapter *);
+ int (*recover_bus)(struct i2c_adapter *adap);
- int (*get_scl)(struct i2c_adapter *);
- void (*set_scl)(struct i2c_adapter *, int val);
- int (*get_sda)(struct i2c_adapter *);
+ int (*get_scl)(struct i2c_adapter *adap);
+ void (*set_scl)(struct i2c_adapter *adap, int val);
+ int (*get_sda)(struct i2c_adapter *adap);
+ void (*set_sda)(struct i2c_adapter *adap, int val);
- void (*prepare_recovery)(struct i2c_adapter *);
- void (*unprepare_recovery)(struct i2c_adapter *);
+ void (*prepare_recovery)(struct i2c_adapter *adap);
+ void (*unprepare_recovery)(struct i2c_adapter *adap);
/* gpio recovery */
- int scl_gpio;
- int sda_gpio;
+ struct gpio_desc *scl_gpiod;
+ struct gpio_desc *sda_gpiod;
};
int i2c_recover_bus(struct i2c_adapter *adap);
/* Generic recovery routines */
-int i2c_generic_gpio_recovery(struct i2c_adapter *adap);
int i2c_generic_scl_recovery(struct i2c_adapter *adap);
/**
@@ -706,7 +797,7 @@ i2c_unlock_adapter(struct i2c_adapter *adapter)
/* administration...
*/
-#if defined(CONFIG_I2C) || defined(CONFIG_I2C_MODULE)
+#if IS_ENABLED(CONFIG_I2C)
extern int i2c_add_adapter(struct i2c_adapter *);
extern void i2c_del_adapter(struct i2c_adapter *);
extern int i2c_add_numbered_adapter(struct i2c_adapter *);
@@ -769,6 +860,9 @@ static inline u8 i2c_8bit_addr_from_msg(const struct i2c_msg *msg)
return (msg->addr << 1) | (msg->flags & I2C_M_RD ? 1 : 0);
}
+u8 *i2c_get_dma_safe_msg_buf(struct i2c_msg *msg, unsigned int threshold);
+void i2c_release_dma_safe_msg_buf(struct i2c_msg *msg, u8 *buf);
+
int i2c_handle_smbus_host_notify(struct i2c_adapter *adap, unsigned short addr);
/**
* module_i2c_driver() - Helper macro for registering a modular I2C driver
diff --git a/include/linux/i7300_idle.h b/include/linux/i7300_idle.h
deleted file mode 100644
index 4dbe651f71f5..000000000000
--- a/include/linux/i7300_idle.h
+++ /dev/null
@@ -1,84 +0,0 @@
-/* SPDX-License-Identifier: GPL-2.0 */
-
-#ifndef I7300_IDLE_H
-#define I7300_IDLE_H
-
-#include <linux/pci.h>
-
-/*
- * I/O AT controls (PCI bus 0 device 8 function 0)
- * DIMM controls (PCI bus 0 device 16 function 1)
- */
-#define IOAT_BUS 0
-#define IOAT_DEVFN PCI_DEVFN(8, 0)
-#define MEMCTL_BUS 0
-#define MEMCTL_DEVFN PCI_DEVFN(16, 1)
-
-struct fbd_ioat {
- unsigned int vendor;
- unsigned int ioat_dev;
- unsigned int enabled;
-};
-
-/*
- * The i5000 chip-set has the same hooks as the i7300
- * but it is not enabled by default and must be manually
- * manually enabled with "forceload=1" because it is
- * only lightly validated.
- */
-
-static const struct fbd_ioat fbd_ioat_list[] = {
- {PCI_VENDOR_ID_INTEL, PCI_DEVICE_ID_INTEL_IOAT_CNB, 1},
- {PCI_VENDOR_ID_INTEL, PCI_DEVICE_ID_INTEL_IOAT, 0},
- {0, 0}
-};
-
-/* table of devices that work with this driver */
-static const struct pci_device_id pci_tbl[] = {
- { PCI_DEVICE(PCI_VENDOR_ID_INTEL, PCI_DEVICE_ID_INTEL_FBD_CNB) },
- { PCI_DEVICE(PCI_VENDOR_ID_INTEL, PCI_DEVICE_ID_INTEL_5000_ERR) },
- { } /* Terminating entry */
-};
-
-/* Check for known platforms with I/O-AT */
-static inline int i7300_idle_platform_probe(struct pci_dev **fbd_dev,
- struct pci_dev **ioat_dev,
- int enable_all)
-{
- int i;
- struct pci_dev *memdev, *dmadev;
-
- memdev = pci_get_bus_and_slot(MEMCTL_BUS, MEMCTL_DEVFN);
- if (!memdev)
- return -ENODEV;
-
- for (i = 0; pci_tbl[i].vendor != 0; i++) {
- if (memdev->vendor == pci_tbl[i].vendor &&
- memdev->device == pci_tbl[i].device) {
- break;
- }
- }
- if (pci_tbl[i].vendor == 0)
- return -ENODEV;
-
- dmadev = pci_get_bus_and_slot(IOAT_BUS, IOAT_DEVFN);
- if (!dmadev)
- return -ENODEV;
-
- for (i = 0; fbd_ioat_list[i].vendor != 0; i++) {
- if (dmadev->vendor == fbd_ioat_list[i].vendor &&
- dmadev->device == fbd_ioat_list[i].ioat_dev) {
- if (!(fbd_ioat_list[i].enabled || enable_all))
- continue;
- if (fbd_dev)
- *fbd_dev = memdev;
- if (ioat_dev)
- *ioat_dev = dmadev;
-
- return 0;
- }
- }
- return -ENODEV;
-}
-
-#endif
diff --git a/include/linux/ide.h b/include/linux/ide.h
index 771989d25ef8..ca9d34feb572 100644
--- a/include/linux/ide.h
+++ b/include/linux/ide.h
@@ -25,15 +25,10 @@
#include <asm/byteorder.h>
#include <asm/io.h>
-#if defined(CONFIG_CRIS) || defined(CONFIG_FRV) || defined(CONFIG_MN10300)
-# define SUPPORT_VLB_SYNC 0
-#else
-# define SUPPORT_VLB_SYNC 1
-#endif
-
/*
* Probably not wise to fiddle with these
*/
+#define SUPPORT_VLB_SYNC 1
#define IDE_DEFAULT_MAX_FAILURES 1
#define ERROR_MAX 8 /* Max read/write errors per sector */
#define ERROR_RESET 3 /* Reset controller every 4th retry */
@@ -165,7 +160,6 @@ struct ide_io_ports {
*/
#define PARTN_BITS 6 /* number of minor dev bits for partitions */
#define MAX_DRIVES 2 /* per interface; 2 assumed by lots of code */
-#define SECTOR_SIZE 512
/*
* Timeouts for various operations:
diff --git a/include/linux/idr.h b/include/linux/idr.h
index fa14f834e4ed..e856f4e0ab35 100644
--- a/include/linux/idr.h
+++ b/include/linux/idr.h
@@ -15,10 +15,10 @@
#include <linux/radix-tree.h>
#include <linux/gfp.h>
#include <linux/percpu.h>
-#include <linux/bug.h>
struct idr {
struct radix_tree_root idr_rt;
+ unsigned int idr_base;
unsigned int idr_next;
};
@@ -29,13 +29,31 @@ struct idr {
#define IDR_FREE 0
/* Set the IDR flag and the IDR_FREE tag */
-#define IDR_RT_MARKER ((__force gfp_t)(3 << __GFP_BITS_SHIFT))
+#define IDR_RT_MARKER (ROOT_IS_IDR | (__force gfp_t) \
+ (1 << (ROOT_TAG_SHIFT + IDR_FREE)))
-#define IDR_INIT \
-{ \
- .idr_rt = RADIX_TREE_INIT(IDR_RT_MARKER) \
+#define IDR_INIT_BASE(name, base) { \
+ .idr_rt = RADIX_TREE_INIT(name, IDR_RT_MARKER), \
+ .idr_base = (base), \
+ .idr_next = 0, \
}
-#define DEFINE_IDR(name) struct idr name = IDR_INIT
+
+/**
+ * IDR_INIT() - Initialise an IDR.
+ * @name: Name of IDR.
+ *
+ * A freshly-initialised IDR contains no IDs.
+ */
+#define IDR_INIT(name) IDR_INIT_BASE(name, 0)
+
+/**
+ * DEFINE_IDR() - Define a statically-allocated IDR.
+ * @name: Name of IDR.
+ *
+ * An IDR defined using this macro is ready for use with no additional
+ * initialisation required. It contains no IDs.
+ */
+#define DEFINE_IDR(name) struct idr name = IDR_INIT(name)
/**
* idr_get_cursor - Return the current position of the cyclic allocator
@@ -82,80 +100,52 @@ static inline void idr_set_cursor(struct idr *idr, unsigned int val)
void idr_preload(gfp_t gfp_mask);
-int idr_alloc_cmn(struct idr *idr, void *ptr, unsigned long *index,
- unsigned long start, unsigned long end, gfp_t gfp,
- bool ext);
-
-/**
- * idr_alloc - allocate an id
- * @idr: idr handle
- * @ptr: pointer to be associated with the new id
- * @start: the minimum id (inclusive)
- * @end: the maximum id (exclusive)
- * @gfp: memory allocation flags
- *
- * Allocates an unused ID in the range [start, end). Returns -ENOSPC
- * if there are no unused IDs in that range.
- *
- * Note that @end is treated as max when <= 0. This is to always allow
- * using @start + N as @end as long as N is inside integer range.
- *
- * Simultaneous modifications to the @idr are not allowed and should be
- * prevented by the user, usually with a lock. idr_alloc() may be called
- * concurrently with read-only accesses to the @idr, such as idr_find() and
- * idr_for_each_entry().
- */
-static inline int idr_alloc(struct idr *idr, void *ptr,
- int start, int end, gfp_t gfp)
-{
- unsigned long id;
- int ret;
-
- if (WARN_ON_ONCE(start < 0))
- return -EINVAL;
-
- ret = idr_alloc_cmn(idr, ptr, &id, start, end, gfp, false);
-
- if (ret)
- return ret;
-
- return id;
-}
-
-static inline int idr_alloc_ext(struct idr *idr, void *ptr,
- unsigned long *index,
- unsigned long start,
- unsigned long end,
- gfp_t gfp)
-{
- return idr_alloc_cmn(idr, ptr, index, start, end, gfp, true);
-}
-
-int idr_alloc_cyclic(struct idr *, void *entry, int start, int end, gfp_t);
+int idr_alloc(struct idr *, void *ptr, int start, int end, gfp_t);
+int __must_check idr_alloc_u32(struct idr *, void *ptr, u32 *id,
+ unsigned long max, gfp_t);
+int idr_alloc_cyclic(struct idr *, void *ptr, int start, int end, gfp_t);
+void *idr_remove(struct idr *, unsigned long id);
+void *idr_find(const struct idr *, unsigned long id);
int idr_for_each(const struct idr *,
int (*fn)(int id, void *p, void *data), void *data);
void *idr_get_next(struct idr *, int *nextid);
-void *idr_get_next_ext(struct idr *idr, unsigned long *nextid);
-void *idr_replace(struct idr *, void *, int id);
-void *idr_replace_ext(struct idr *idr, void *ptr, unsigned long id);
+void *idr_get_next_ul(struct idr *, unsigned long *nextid);
+void *idr_replace(struct idr *, void *, unsigned long id);
void idr_destroy(struct idr *);
-static inline void *idr_remove_ext(struct idr *idr, unsigned long id)
-{
- return radix_tree_delete_item(&idr->idr_rt, id, NULL);
-}
-
-static inline void *idr_remove(struct idr *idr, int id)
+/**
+ * idr_init_base() - Initialise an IDR.
+ * @idr: IDR handle.
+ * @base: The base value for the IDR.
+ *
+ * This variation of idr_init() creates an IDR which will allocate IDs
+ * starting at %base.
+ */
+static inline void idr_init_base(struct idr *idr, int base)
{
- return idr_remove_ext(idr, id);
+ INIT_RADIX_TREE(&idr->idr_rt, IDR_RT_MARKER);
+ idr->idr_base = base;
+ idr->idr_next = 0;
}
+/**
+ * idr_init() - Initialise an IDR.
+ * @idr: IDR handle.
+ *
+ * Initialise a dynamically allocated IDR. To initialise a
+ * statically allocated IDR, use DEFINE_IDR().
+ */
static inline void idr_init(struct idr *idr)
{
- INIT_RADIX_TREE(&idr->idr_rt, IDR_RT_MARKER);
- idr->idr_next = 0;
+ idr_init_base(idr, 0);
}
+/**
+ * idr_is_empty() - Are there any IDs allocated?
+ * @idr: IDR handle.
+ *
+ * Return: %true if any IDs have been allocated from this IDR.
+ */
static inline bool idr_is_empty(const struct idr *idr)
{
return radix_tree_empty(&idr->idr_rt) &&
@@ -174,50 +164,38 @@ static inline void idr_preload_end(void)
}
/**
- * idr_find - return pointer for given id
- * @idr: idr handle
- * @id: lookup key
- *
- * Return the pointer given the id it has been registered with. A %NULL
- * return indicates that @id is not valid or you passed %NULL in
- * idr_get_new().
+ * idr_for_each_entry() - Iterate over an IDR's elements of a given type.
+ * @idr: IDR handle.
+ * @entry: The type * to use as cursor
+ * @id: Entry ID.
*
- * This function can be called under rcu_read_lock(), given that the leaf
- * pointers lifetimes are correctly managed.
+ * @entry and @id do not need to be initialized before the loop, and
+ * after normal termination @entry is left with the value NULL. This
+ * is convenient for a "not found" value.
*/
-static inline void *idr_find_ext(const struct idr *idr, unsigned long id)
-{
- return radix_tree_lookup(&idr->idr_rt, id);
-}
-
-static inline void *idr_find(const struct idr *idr, int id)
-{
- return idr_find_ext(idr, id);
-}
+#define idr_for_each_entry(idr, entry, id) \
+ for (id = 0; ((entry) = idr_get_next(idr, &(id))) != NULL; ++id)
/**
- * idr_for_each_entry - iterate over an idr's elements of a given type
- * @idr: idr handle
- * @entry: the type * to use as cursor
- * @id: id entry's key
+ * idr_for_each_entry_ul() - Iterate over an IDR's elements of a given type.
+ * @idr: IDR handle.
+ * @entry: The type * to use as cursor.
+ * @id: Entry ID.
*
* @entry and @id do not need to be initialized before the loop, and
- * after normal terminatinon @entry is left with the value NULL. This
+ * after normal termination @entry is left with the value NULL. This
* is convenient for a "not found" value.
*/
-#define idr_for_each_entry(idr, entry, id) \
- for (id = 0; ((entry) = idr_get_next(idr, &(id))) != NULL; ++id)
-#define idr_for_each_entry_ext(idr, entry, id) \
- for (id = 0; ((entry) = idr_get_next_ext(idr, &(id))) != NULL; ++id)
+#define idr_for_each_entry_ul(idr, entry, id) \
+ for (id = 0; ((entry) = idr_get_next_ul(idr, &(id))) != NULL; ++id)
/**
- * idr_for_each_entry_continue - continue iteration over an idr's elements of a given type
- * @idr: idr handle
- * @entry: the type * to use as cursor
- * @id: id entry's key
+ * idr_for_each_entry_continue() - Continue iteration over an IDR's elements of a given type
+ * @idr: IDR handle.
+ * @entry: The type * to use as a cursor.
+ * @id: Entry ID.
*
- * Continue to iterate over list of given type, continuing after
- * the current position.
+ * Continue to iterate over entries, continuing after the current position.
*/
#define idr_for_each_entry_continue(idr, entry, id) \
for ((entry) = idr_get_next((idr), &(id)); \
@@ -242,10 +220,10 @@ struct ida {
struct radix_tree_root ida_rt;
};
-#define IDA_INIT { \
- .ida_rt = RADIX_TREE_INIT(IDR_RT_MARKER | GFP_NOWAIT), \
+#define IDA_INIT(name) { \
+ .ida_rt = RADIX_TREE_INIT(name, IDR_RT_MARKER | GFP_NOWAIT), \
}
-#define DEFINE_IDA(name) struct ida name = IDA_INIT
+#define DEFINE_IDA(name) struct ida name = IDA_INIT(name)
int ida_pre_get(struct ida *ida, gfp_t gfp_mask);
int ida_get_new_above(struct ida *ida, int starting_id, int *p_id);
diff --git a/include/linux/ieee80211.h b/include/linux/ieee80211.h
index ee6657a0ed69..8fe7e4306816 100644
--- a/include/linux/ieee80211.h
+++ b/include/linux/ieee80211.h
@@ -8,6 +8,7 @@
* Copyright (c) 2006, Michael Wu <flamingice@sourmilk.net>
* Copyright (c) 2013 - 2014 Intel Mobile Communications GmbH
* Copyright (c) 2016 - 2017 Intel Deutschland GmbH
+ * Copyright (c) 2018 Intel Corporation
*
* This program is free software; you can redistribute it and/or modify
* it under the terms of the GNU General Public License version 2 as
@@ -2111,7 +2112,7 @@ enum ieee80211_key_len {
#define FILS_ERP_MAX_REALM_LEN 253
#define FILS_ERP_MAX_RRK_LEN 64
-#define PMK_MAX_LEN 48
+#define PMK_MAX_LEN 64
/* Public action codes (IEEE Std 802.11-2016, 9.6.8.1, Table 9-307) */
enum ieee80211_pub_actioncode {
@@ -2502,6 +2503,17 @@ static inline u8 *ieee80211_get_qos_ctl(struct ieee80211_hdr *hdr)
}
/**
+ * ieee80211_get_tid - get qos TID
+ * @hdr: the frame
+ */
+static inline u8 ieee80211_get_tid(struct ieee80211_hdr *hdr)
+{
+ u8 *qc = ieee80211_get_qos_ctl(hdr);
+
+ return qc[0] & IEEE80211_QOS_CTL_TID_MASK;
+}
+
+/**
* ieee80211_get_SA - get pointer to SA
* @hdr: the frame
*
diff --git a/include/linux/if_link.h b/include/linux/if_link.h
index 4c54611e03e9..622658dfbf0a 100644
--- a/include/linux/if_link.h
+++ b/include/linux/if_link.h
@@ -13,6 +13,8 @@ struct ifla_vf_stats {
__u64 tx_bytes;
__u64 broadcast;
__u64 multicast;
+ __u64 rx_dropped;
+ __u64 tx_dropped;
};
struct ifla_vf_info {
diff --git a/include/linux/if_macvlan.h b/include/linux/if_macvlan.h
index bedf54b6f943..4cb7aeeafce0 100644
--- a/include/linux/if_macvlan.h
+++ b/include/linux/if_macvlan.h
@@ -30,10 +30,10 @@ struct macvlan_dev {
enum macvlan_mode mode;
u16 flags;
int nest_level;
+ unsigned int macaddr_count;
#ifdef CONFIG_NET_POLL_CONTROLLER
struct netpoll *netpoll;
#endif
- unsigned int macaddr_count;
};
static inline void macvlan_count_rx(const struct macvlan_dev *vlan,
diff --git a/include/linux/if_tap.h b/include/linux/if_tap.h
index 3ecef57c31e3..8e66866c11be 100644
--- a/include/linux/if_tap.h
+++ b/include/linux/if_tap.h
@@ -4,7 +4,7 @@
#if IS_ENABLED(CONFIG_TAP)
struct socket *tap_get_socket(struct file *);
-struct skb_array *tap_get_skb_array(struct file *file);
+struct ptr_ring *tap_get_ptr_ring(struct file *file);
#else
#include <linux/err.h>
#include <linux/errno.h>
@@ -14,7 +14,7 @@ static inline struct socket *tap_get_socket(struct file *f)
{
return ERR_PTR(-EINVAL);
}
-static inline struct skb_array *tap_get_skb_array(struct file *f)
+static inline struct ptr_ring *tap_get_ptr_ring(struct file *f)
{
return ERR_PTR(-EINVAL);
}
@@ -70,7 +70,7 @@ struct tap_queue {
u16 queue_index;
bool enabled;
struct list_head next;
- struct skb_array skb_array;
+ struct ptr_ring ring;
};
rx_handler_result_t tap_handle_frame(struct sk_buff **pskb);
diff --git a/include/linux/if_tun.h b/include/linux/if_tun.h
index bf9bdf42d577..fd00170b494f 100644
--- a/include/linux/if_tun.h
+++ b/include/linux/if_tun.h
@@ -17,9 +17,15 @@
#include <uapi/linux/if_tun.h>
+#define TUN_XDP_FLAG 0x1UL
+
#if defined(CONFIG_TUN) || defined(CONFIG_TUN_MODULE)
struct socket *tun_get_socket(struct file *);
-struct skb_array *tun_get_skb_array(struct file *file);
+struct ptr_ring *tun_get_tx_ring(struct file *file);
+bool tun_is_xdp_buff(void *ptr);
+void *tun_xdp_to_ptr(void *ptr);
+void *tun_ptr_to_xdp(void *ptr);
+void tun_ptr_free(void *ptr);
#else
#include <linux/err.h>
#include <linux/errno.h>
@@ -29,9 +35,24 @@ static inline struct socket *tun_get_socket(struct file *f)
{
return ERR_PTR(-EINVAL);
}
-static inline struct skb_array *tun_get_skb_array(struct file *f)
+static inline struct ptr_ring *tun_get_tx_ring(struct file *f)
{
return ERR_PTR(-EINVAL);
}
+static inline bool tun_is_xdp_buff(void *ptr)
+{
+ return false;
+}
+static inline void *tun_xdp_to_ptr(void *ptr)
+{
+ return NULL;
+}
+static inline void *tun_ptr_to_xdp(void *ptr)
+{
+ return NULL;
+}
+static inline void tun_ptr_free(void *ptr)
+{
+}
#endif /* CONFIG_TUN */
#endif /* __IF_TUN_H */
diff --git a/include/linux/if_vlan.h b/include/linux/if_vlan.h
index 5e6a2d4dc366..d11f41d5269f 100644
--- a/include/linux/if_vlan.h
+++ b/include/linux/if_vlan.h
@@ -83,6 +83,30 @@ static inline bool is_vlan_dev(const struct net_device *dev)
#define skb_vlan_tag_get_id(__skb) ((__skb)->vlan_tci & VLAN_VID_MASK)
#define skb_vlan_tag_get_prio(__skb) ((__skb)->vlan_tci & VLAN_PRIO_MASK)
+static inline int vlan_get_rx_ctag_filter_info(struct net_device *dev)
+{
+ ASSERT_RTNL();
+ return notifier_to_errno(call_netdevice_notifiers(NETDEV_CVLAN_FILTER_PUSH_INFO, dev));
+}
+
+static inline void vlan_drop_rx_ctag_filter_info(struct net_device *dev)
+{
+ ASSERT_RTNL();
+ call_netdevice_notifiers(NETDEV_CVLAN_FILTER_DROP_INFO, dev);
+}
+
+static inline int vlan_get_rx_stag_filter_info(struct net_device *dev)
+{
+ ASSERT_RTNL();
+ return notifier_to_errno(call_netdevice_notifiers(NETDEV_SVLAN_FILTER_PUSH_INFO, dev));
+}
+
+static inline void vlan_drop_rx_stag_filter_info(struct net_device *dev)
+{
+ ASSERT_RTNL();
+ call_netdevice_notifiers(NETDEV_SVLAN_FILTER_DROP_INFO, dev);
+}
+
/**
* struct vlan_pcpu_stats - VLAN percpu rx/tx stats
* @rx_packets: number of received packets
@@ -300,32 +324,47 @@ static inline bool vlan_hw_offload_capable(netdev_features_t features,
}
/**
- * __vlan_insert_tag - regular VLAN tag inserting
+ * __vlan_insert_inner_tag - inner VLAN tag inserting
* @skb: skbuff to tag
* @vlan_proto: VLAN encapsulation protocol
* @vlan_tci: VLAN TCI to insert
+ * @mac_len: MAC header length including outer vlan headers
*
- * Inserts the VLAN tag into @skb as part of the payload
+ * Inserts the VLAN tag into @skb as part of the payload at offset mac_len
* Returns error if skb_cow_head failes.
*
* Does not change skb->protocol so this function can be used during receive.
*/
-static inline int __vlan_insert_tag(struct sk_buff *skb,
- __be16 vlan_proto, u16 vlan_tci)
+static inline int __vlan_insert_inner_tag(struct sk_buff *skb,
+ __be16 vlan_proto, u16 vlan_tci,
+ unsigned int mac_len)
{
struct vlan_ethhdr *veth;
if (skb_cow_head(skb, VLAN_HLEN) < 0)
return -ENOMEM;
- veth = skb_push(skb, VLAN_HLEN);
+ skb_push(skb, VLAN_HLEN);
- /* Move the mac addresses to the beginning of the new header. */
- memmove(skb->data, skb->data + VLAN_HLEN, 2 * ETH_ALEN);
+ /* Move the mac header sans proto to the beginning of the new header. */
+ if (likely(mac_len > ETH_TLEN))
+ memmove(skb->data, skb->data + VLAN_HLEN, mac_len - ETH_TLEN);
skb->mac_header -= VLAN_HLEN;
+ veth = (struct vlan_ethhdr *)(skb->data + mac_len - ETH_HLEN);
+
/* first, the ethernet type */
- veth->h_vlan_proto = vlan_proto;
+ if (likely(mac_len >= ETH_TLEN)) {
+ /* h_vlan_encapsulated_proto should already be populated, and
+ * skb->data has space for h_vlan_proto
+ */
+ veth->h_vlan_proto = vlan_proto;
+ } else {
+ /* h_vlan_encapsulated_proto should not be populated, and
+ * skb->data has no space for h_vlan_proto
+ */
+ veth->h_vlan_encapsulated_proto = skb->protocol;
+ }
/* now, the TCI */
veth->h_vlan_TCI = htons(vlan_tci);
@@ -334,12 +373,30 @@ static inline int __vlan_insert_tag(struct sk_buff *skb,
}
/**
- * vlan_insert_tag - regular VLAN tag inserting
+ * __vlan_insert_tag - regular VLAN tag inserting
* @skb: skbuff to tag
* @vlan_proto: VLAN encapsulation protocol
* @vlan_tci: VLAN TCI to insert
*
* Inserts the VLAN tag into @skb as part of the payload
+ * Returns error if skb_cow_head failes.
+ *
+ * Does not change skb->protocol so this function can be used during receive.
+ */
+static inline int __vlan_insert_tag(struct sk_buff *skb,
+ __be16 vlan_proto, u16 vlan_tci)
+{
+ return __vlan_insert_inner_tag(skb, vlan_proto, vlan_tci, ETH_HLEN);
+}
+
+/**
+ * vlan_insert_inner_tag - inner VLAN tag inserting
+ * @skb: skbuff to tag
+ * @vlan_proto: VLAN encapsulation protocol
+ * @vlan_tci: VLAN TCI to insert
+ * @mac_len: MAC header length including outer vlan headers
+ *
+ * Inserts the VLAN tag into @skb as part of the payload at offset mac_len
* Returns a VLAN tagged skb. If a new skb is created, @skb is freed.
*
* Following the skb_unshare() example, in case of error, the calling function
@@ -347,12 +404,14 @@ static inline int __vlan_insert_tag(struct sk_buff *skb,
*
* Does not change skb->protocol so this function can be used during receive.
*/
-static inline struct sk_buff *vlan_insert_tag(struct sk_buff *skb,
- __be16 vlan_proto, u16 vlan_tci)
+static inline struct sk_buff *vlan_insert_inner_tag(struct sk_buff *skb,
+ __be16 vlan_proto,
+ u16 vlan_tci,
+ unsigned int mac_len)
{
int err;
- err = __vlan_insert_tag(skb, vlan_proto, vlan_tci);
+ err = __vlan_insert_inner_tag(skb, vlan_proto, vlan_tci, mac_len);
if (err) {
dev_kfree_skb_any(skb);
return NULL;
@@ -361,6 +420,26 @@ static inline struct sk_buff *vlan_insert_tag(struct sk_buff *skb,
}
/**
+ * vlan_insert_tag - regular VLAN tag inserting
+ * @skb: skbuff to tag
+ * @vlan_proto: VLAN encapsulation protocol
+ * @vlan_tci: VLAN TCI to insert
+ *
+ * Inserts the VLAN tag into @skb as part of the payload
+ * Returns a VLAN tagged skb. If a new skb is created, @skb is freed.
+ *
+ * Following the skb_unshare() example, in case of error, the calling function
+ * doesn't have to worry about freeing the original skb.
+ *
+ * Does not change skb->protocol so this function can be used during receive.
+ */
+static inline struct sk_buff *vlan_insert_tag(struct sk_buff *skb,
+ __be16 vlan_proto, u16 vlan_tci)
+{
+ return vlan_insert_inner_tag(skb, vlan_proto, vlan_tci, ETH_HLEN);
+}
+
+/**
* vlan_insert_tag_set_proto - regular VLAN tag inserting
* @skb: skbuff to tag
* @vlan_proto: VLAN encapsulation protocol
diff --git a/include/linux/iio/adc/stm32-dfsdm-adc.h b/include/linux/iio/adc/stm32-dfsdm-adc.h
new file mode 100644
index 000000000000..e7dc7a542a4e
--- /dev/null
+++ b/include/linux/iio/adc/stm32-dfsdm-adc.h
@@ -0,0 +1,18 @@
+/* SPDX-License-Identifier: GPL-2.0 */
+/*
+ * This file discribe the STM32 DFSDM IIO driver API for audio part
+ *
+ * Copyright (C) 2017, STMicroelectronics - All Rights Reserved
+ * Author(s): Arnaud Pouliquen <arnaud.pouliquen@st.com>.
+ */
+
+#ifndef STM32_DFSDM_ADC_H
+#define STM32_DFSDM_ADC_H
+
+int stm32_dfsdm_get_buff_cb(struct iio_dev *iio_dev,
+ int (*cb)(const void *data, size_t size,
+ void *private),
+ void *private);
+int stm32_dfsdm_release_buff_cb(struct iio_dev *iio_dev);
+
+#endif
diff --git a/include/linux/iio/consumer.h b/include/linux/iio/consumer.h
index 5e347a9805fd..9887f4f8e2a8 100644
--- a/include/linux/iio/consumer.h
+++ b/include/linux/iio/consumer.h
@@ -134,6 +134,17 @@ struct iio_cb_buffer *iio_channel_get_all_cb(struct device *dev,
void *private),
void *private);
/**
+ * iio_channel_cb_set_buffer_watermark() - set the buffer watermark.
+ * @cb_buffer: The callback buffer from whom we want the channel
+ * information.
+ * @watermark: buffer watermark in bytes.
+ *
+ * This function allows to configure the buffer watermark.
+ */
+int iio_channel_cb_set_buffer_watermark(struct iio_cb_buffer *cb_buffer,
+ size_t watermark);
+
+/**
* iio_channel_release_all_cb() - release and unregister the callback.
* @cb_buffer: The callback buffer that was allocated.
*/
@@ -216,6 +227,32 @@ int iio_read_channel_average_raw(struct iio_channel *chan, int *val);
int iio_read_channel_processed(struct iio_channel *chan, int *val);
/**
+ * iio_write_channel_attribute() - Write values to the device attribute.
+ * @chan: The channel being queried.
+ * @val: Value being written.
+ * @val2: Value being written.val2 use depends on attribute type.
+ * @attribute: info attribute to be read.
+ *
+ * Returns an error code or 0.
+ */
+int iio_write_channel_attribute(struct iio_channel *chan, int val,
+ int val2, enum iio_chan_info_enum attribute);
+
+/**
+ * iio_read_channel_attribute() - Read values from the device attribute.
+ * @chan: The channel being queried.
+ * @val: Value being written.
+ * @val2: Value being written.Val2 use depends on attribute type.
+ * @attribute: info attribute to be written.
+ *
+ * Returns an error code if failed. Else returns a description of what is in val
+ * and val2, such as IIO_VAL_INT_PLUS_MICRO telling us we have a value of val
+ * + val2/1e6
+ */
+int iio_read_channel_attribute(struct iio_channel *chan, int *val,
+ int *val2, enum iio_chan_info_enum attribute);
+
+/**
* iio_write_channel_raw() - write to a given channel
* @chan: The channel being queried.
* @val: Value being written.
diff --git a/include/linux/iio/hw-consumer.h b/include/linux/iio/hw-consumer.h
new file mode 100644
index 000000000000..44d48bb1d39f
--- /dev/null
+++ b/include/linux/iio/hw-consumer.h
@@ -0,0 +1,21 @@
+/* SPDX-License-Identifier: GPL-2.0 */
+/*
+ * Industrial I/O in kernel hardware consumer interface
+ *
+ * Copyright 2017 Analog Devices Inc.
+ * Author: Lars-Peter Clausen <lars@metafoo.de>
+ */
+
+#ifndef LINUX_IIO_HW_CONSUMER_H
+#define LINUX_IIO_HW_CONSUMER_H
+
+struct iio_hw_consumer;
+
+struct iio_hw_consumer *iio_hw_consumer_alloc(struct device *dev);
+void iio_hw_consumer_free(struct iio_hw_consumer *hwc);
+struct iio_hw_consumer *devm_iio_hw_consumer_alloc(struct device *dev);
+void devm_iio_hw_consumer_free(struct device *dev, struct iio_hw_consumer *hwc);
+int iio_hw_consumer_enable(struct iio_hw_consumer *hwc);
+void iio_hw_consumer_disable(struct iio_hw_consumer *hwc);
+
+#endif
diff --git a/include/linux/iio/iio.h b/include/linux/iio/iio.h
index 20b61347ea58..11579fd4126e 100644
--- a/include/linux/iio/iio.h
+++ b/include/linux/iio/iio.h
@@ -20,34 +20,6 @@
* Currently assumes nano seconds.
*/
-enum iio_chan_info_enum {
- IIO_CHAN_INFO_RAW = 0,
- IIO_CHAN_INFO_PROCESSED,
- IIO_CHAN_INFO_SCALE,
- IIO_CHAN_INFO_OFFSET,
- IIO_CHAN_INFO_CALIBSCALE,
- IIO_CHAN_INFO_CALIBBIAS,
- IIO_CHAN_INFO_PEAK,
- IIO_CHAN_INFO_PEAK_SCALE,
- IIO_CHAN_INFO_QUADRATURE_CORRECTION_RAW,
- IIO_CHAN_INFO_AVERAGE_RAW,
- IIO_CHAN_INFO_LOW_PASS_FILTER_3DB_FREQUENCY,
- IIO_CHAN_INFO_HIGH_PASS_FILTER_3DB_FREQUENCY,
- IIO_CHAN_INFO_SAMP_FREQ,
- IIO_CHAN_INFO_FREQUENCY,
- IIO_CHAN_INFO_PHASE,
- IIO_CHAN_INFO_HARDWAREGAIN,
- IIO_CHAN_INFO_HYSTERESIS,
- IIO_CHAN_INFO_INT_TIME,
- IIO_CHAN_INFO_ENABLE,
- IIO_CHAN_INFO_CALIBHEIGHT,
- IIO_CHAN_INFO_CALIBWEIGHT,
- IIO_CHAN_INFO_DEBOUNCE_COUNT,
- IIO_CHAN_INFO_DEBOUNCE_TIME,
- IIO_CHAN_INFO_CALIBEMISSIVITY,
- IIO_CHAN_INFO_OVERSAMPLING_RATIO,
-};
-
enum iio_shared_by {
IIO_SEPARATE,
IIO_SHARED_BY_TYPE,
@@ -606,8 +578,8 @@ const struct iio_chan_spec
* iio_device_register() - register a device with the IIO subsystem
* @indio_dev: Device structure filled by the device driver
**/
-#define iio_device_register(iio_dev) \
- __iio_device_register((iio_dev), THIS_MODULE)
+#define iio_device_register(indio_dev) \
+ __iio_device_register((indio_dev), THIS_MODULE)
int __iio_device_register(struct iio_dev *indio_dev, struct module *this_mod);
void iio_device_unregister(struct iio_dev *indio_dev);
/**
diff --git a/include/linux/iio/machine.h b/include/linux/iio/machine.h
index 1601a2a63a72..5e1cfa75f652 100644
--- a/include/linux/iio/machine.h
+++ b/include/linux/iio/machine.h
@@ -28,4 +28,11 @@ struct iio_map {
void *consumer_data;
};
+#define IIO_MAP(_provider_channel, _consumer_dev_name, _consumer_channel) \
+{ \
+ .adc_channel_label = _provider_channel, \
+ .consumer_dev_name = _consumer_dev_name, \
+ .consumer_channel = _consumer_channel, \
+}
+
#endif
diff --git a/include/linux/iio/trigger.h b/include/linux/iio/trigger.h
index 7d5e44518379..b19b7204ef84 100644
--- a/include/linux/iio/trigger.h
+++ b/include/linux/iio/trigger.h
@@ -43,12 +43,13 @@ struct iio_trigger_ops {
/**
* struct iio_trigger - industrial I/O trigger device
* @ops: [DRIVER] operations structure
+ * @owner: [INTERN] owner of this driver module
* @id: [INTERN] unique id number
* @name: [DRIVER] unique name
* @dev: [DRIVER] associated device (if relevant)
* @list: [INTERN] used in maintenance of global trigger list
* @alloc_list: [DRIVER] used for driver specific trigger list
- * @use_count: use count for the trigger
+ * @use_count: [INTERN] use count for the trigger.
* @subirq_chip: [INTERN] associate 'virtual' irq chip.
* @subirq_base: [INTERN] base number for irqs provided by trigger.
* @subirqs: [INTERN] information about the 'child' irqs.
diff --git a/include/linux/iio/types.h b/include/linux/iio/types.h
index 2aa7b6384d64..6eb3d683ef62 100644
--- a/include/linux/iio/types.h
+++ b/include/linux/iio/types.h
@@ -34,4 +34,32 @@ enum iio_available_type {
IIO_AVAIL_RANGE,
};
+enum iio_chan_info_enum {
+ IIO_CHAN_INFO_RAW = 0,
+ IIO_CHAN_INFO_PROCESSED,
+ IIO_CHAN_INFO_SCALE,
+ IIO_CHAN_INFO_OFFSET,
+ IIO_CHAN_INFO_CALIBSCALE,
+ IIO_CHAN_INFO_CALIBBIAS,
+ IIO_CHAN_INFO_PEAK,
+ IIO_CHAN_INFO_PEAK_SCALE,
+ IIO_CHAN_INFO_QUADRATURE_CORRECTION_RAW,
+ IIO_CHAN_INFO_AVERAGE_RAW,
+ IIO_CHAN_INFO_LOW_PASS_FILTER_3DB_FREQUENCY,
+ IIO_CHAN_INFO_HIGH_PASS_FILTER_3DB_FREQUENCY,
+ IIO_CHAN_INFO_SAMP_FREQ,
+ IIO_CHAN_INFO_FREQUENCY,
+ IIO_CHAN_INFO_PHASE,
+ IIO_CHAN_INFO_HARDWAREGAIN,
+ IIO_CHAN_INFO_HYSTERESIS,
+ IIO_CHAN_INFO_INT_TIME,
+ IIO_CHAN_INFO_ENABLE,
+ IIO_CHAN_INFO_CALIBHEIGHT,
+ IIO_CHAN_INFO_CALIBWEIGHT,
+ IIO_CHAN_INFO_DEBOUNCE_COUNT,
+ IIO_CHAN_INFO_DEBOUNCE_TIME,
+ IIO_CHAN_INFO_CALIBEMISSIVITY,
+ IIO_CHAN_INFO_OVERSAMPLING_RATIO,
+};
+
#endif /* _IIO_TYPES_H_ */
diff --git a/include/linux/inet.h b/include/linux/inet.h
index 636ebe87e6f8..97defc1139e9 100644
--- a/include/linux/inet.h
+++ b/include/linux/inet.h
@@ -59,5 +59,6 @@ extern int in6_pton(const char *src, int srclen, u8 *dst, int delim, const char
extern int inet_pton_with_scope(struct net *net, unsigned short af,
const char *src, const char *port, struct sockaddr_storage *addr);
+extern bool inet_addr_is_any(struct sockaddr *addr);
#endif /* _LINUX_INET_H */
diff --git a/include/linux/inetdevice.h b/include/linux/inetdevice.h
index 1ac5bf95bfdd..e16fe7d44a71 100644
--- a/include/linux/inetdevice.h
+++ b/include/linux/inetdevice.h
@@ -173,7 +173,7 @@ static inline struct net_device *ip_dev_find(struct net *net, __be32 addr)
}
int inet_addr_onlink(struct in_device *in_dev, __be32 a, __be32 b);
-int devinet_ioctl(struct net *net, unsigned int cmd, void __user *);
+int devinet_ioctl(struct net *net, unsigned int cmd, struct ifreq *);
void devinet_init(void);
struct in_device *inetdev_by_index(struct net *, int);
__be32 inet_select_addr(const struct net_device *dev, __be32 dst, int scope);
diff --git a/include/linux/init.h b/include/linux/init.h
index ea1b31101d9e..bc27cf03c41e 100644
--- a/include/linux/init.h
+++ b/include/linux/init.h
@@ -5,6 +5,13 @@
#include <linux/compiler.h>
#include <linux/types.h>
+/* Built-in __init functions needn't be compiled with retpoline */
+#if defined(__noretpoline) && !defined(MODULE)
+#define __noinitretpoline __noretpoline
+#else
+#define __noinitretpoline
+#endif
+
/* These macros are used to mark some functions or
* initialized data (doesn't apply to uninitialized data)
* as `initialization' functions. The kernel can take this
@@ -40,7 +47,7 @@
/* These are for everybody (although not all archs will actually
discard it in modules) */
-#define __init __section(.init.text) __cold __latent_entropy
+#define __init __section(.init.text) __cold __latent_entropy __noinitretpoline
#define __initdata __section(.init.data)
#define __initconst __section(.init.rodata)
#define __exitdata __section(.exit.data)
diff --git a/include/linux/init_task.h b/include/linux/init_task.h
index 6a532629c983..a454b8aeb938 100644
--- a/include/linux/init_task.h
+++ b/include/linux/init_task.h
@@ -21,22 +21,11 @@
#include <asm/thread_info.h>
-#ifdef CONFIG_SMP
-# define INIT_PUSHABLE_TASKS(tsk) \
- .pushable_tasks = PLIST_NODE_INIT(tsk.pushable_tasks, MAX_PRIO),
-#else
-# define INIT_PUSHABLE_TASKS(tsk)
-#endif
-
extern struct files_struct init_files;
extern struct fs_struct init_fs;
-
-#ifdef CONFIG_CPUSETS
-#define INIT_CPUSET_SEQ(tsk) \
- .mems_allowed_seq = SEQCNT_ZERO(tsk.mems_allowed_seq),
-#else
-#define INIT_CPUSET_SEQ(tsk)
-#endif
+extern struct nsproxy init_nsproxy;
+extern struct group_info init_groups;
+extern struct cred init_cred;
#ifndef CONFIG_VIRT_CPU_ACCOUNTING_NATIVE
#define INIT_PREV_CPUTIME(x) .prev_cputime = { \
@@ -47,67 +36,16 @@ extern struct fs_struct init_fs;
#endif
#ifdef CONFIG_POSIX_TIMERS
-#define INIT_POSIX_TIMERS(s) \
- .posix_timers = LIST_HEAD_INIT(s.posix_timers),
#define INIT_CPU_TIMERS(s) \
.cpu_timers = { \
LIST_HEAD_INIT(s.cpu_timers[0]), \
LIST_HEAD_INIT(s.cpu_timers[1]), \
- LIST_HEAD_INIT(s.cpu_timers[2]), \
- },
-#define INIT_CPUTIMER(s) \
- .cputimer = { \
- .cputime_atomic = INIT_CPUTIME_ATOMIC, \
- .running = false, \
- .checking_timer = false, \
+ LIST_HEAD_INIT(s.cpu_timers[2]), \
},
#else
-#define INIT_POSIX_TIMERS(s)
#define INIT_CPU_TIMERS(s)
-#define INIT_CPUTIMER(s)
#endif
-#define INIT_SIGNALS(sig) { \
- .nr_threads = 1, \
- .thread_head = LIST_HEAD_INIT(init_task.thread_node), \
- .wait_chldexit = __WAIT_QUEUE_HEAD_INITIALIZER(sig.wait_chldexit),\
- .shared_pending = { \
- .list = LIST_HEAD_INIT(sig.shared_pending.list), \
- .signal = {{0}}}, \
- INIT_POSIX_TIMERS(sig) \
- INIT_CPU_TIMERS(sig) \
- .rlim = INIT_RLIMITS, \
- INIT_CPUTIMER(sig) \
- INIT_PREV_CPUTIME(sig) \
- .cred_guard_mutex = \
- __MUTEX_INITIALIZER(sig.cred_guard_mutex), \
-}
-
-extern struct nsproxy init_nsproxy;
-
-#define INIT_SIGHAND(sighand) { \
- .count = ATOMIC_INIT(1), \
- .action = { { { .sa_handler = SIG_DFL, } }, }, \
- .siglock = __SPIN_LOCK_UNLOCKED(sighand.siglock), \
- .signalfd_wqh = __WAIT_QUEUE_HEAD_INITIALIZER(sighand.signalfd_wqh), \
-}
-
-extern struct group_info init_groups;
-
-#define INIT_STRUCT_PID { \
- .count = ATOMIC_INIT(1), \
- .tasks = { \
- { .first = NULL }, \
- { .first = NULL }, \
- { .first = NULL }, \
- }, \
- .level = 0, \
- .numbers = { { \
- .nr = 0, \
- .ns = &init_pid_ns, \
- }, } \
-}
-
#define INIT_PID_LINK(type) \
{ \
.node = { \
@@ -117,192 +55,16 @@ extern struct group_info init_groups;
.pid = &init_struct_pid, \
}
-#ifdef CONFIG_AUDITSYSCALL
-#define INIT_IDS \
- .loginuid = INVALID_UID, \
- .sessionid = (unsigned int)-1,
-#else
-#define INIT_IDS
-#endif
-
-#ifdef CONFIG_PREEMPT_RCU
-#define INIT_TASK_RCU_PREEMPT(tsk) \
- .rcu_read_lock_nesting = 0, \
- .rcu_read_unlock_special.s = 0, \
- .rcu_node_entry = LIST_HEAD_INIT(tsk.rcu_node_entry), \
- .rcu_blocked_node = NULL,
-#else
-#define INIT_TASK_RCU_PREEMPT(tsk)
-#endif
-#ifdef CONFIG_TASKS_RCU
-#define INIT_TASK_RCU_TASKS(tsk) \
- .rcu_tasks_holdout = false, \
- .rcu_tasks_holdout_list = \
- LIST_HEAD_INIT(tsk.rcu_tasks_holdout_list), \
- .rcu_tasks_idle_cpu = -1,
-#else
-#define INIT_TASK_RCU_TASKS(tsk)
-#endif
-
-extern struct cred init_cred;
-
-#ifdef CONFIG_CGROUP_SCHED
-# define INIT_CGROUP_SCHED(tsk) \
- .sched_task_group = &root_task_group,
-#else
-# define INIT_CGROUP_SCHED(tsk)
-#endif
-
-#ifdef CONFIG_PERF_EVENTS
-# define INIT_PERF_EVENTS(tsk) \
- .perf_event_mutex = \
- __MUTEX_INITIALIZER(tsk.perf_event_mutex), \
- .perf_event_list = LIST_HEAD_INIT(tsk.perf_event_list),
-#else
-# define INIT_PERF_EVENTS(tsk)
-#endif
-
-#ifdef CONFIG_VIRT_CPU_ACCOUNTING_GEN
-# define INIT_VTIME(tsk) \
- .vtime.seqcount = SEQCNT_ZERO(tsk.vtime.seqcount), \
- .vtime.starttime = 0, \
- .vtime.state = VTIME_SYS,
-#else
-# define INIT_VTIME(tsk)
-#endif
-
#define INIT_TASK_COMM "swapper"
-#ifdef CONFIG_RT_MUTEXES
-# define INIT_RT_MUTEXES(tsk) \
- .pi_waiters = RB_ROOT_CACHED, \
- .pi_top_task = NULL,
-#else
-# define INIT_RT_MUTEXES(tsk)
-#endif
-
-#ifdef CONFIG_NUMA_BALANCING
-# define INIT_NUMA_BALANCING(tsk) \
- .numa_preferred_nid = -1, \
- .numa_group = NULL, \
- .numa_faults = NULL,
-#else
-# define INIT_NUMA_BALANCING(tsk)
-#endif
-
-#ifdef CONFIG_KASAN
-# define INIT_KASAN(tsk) \
- .kasan_depth = 1,
-#else
-# define INIT_KASAN(tsk)
-#endif
-
-#ifdef CONFIG_LIVEPATCH
-# define INIT_LIVEPATCH(tsk) \
- .patch_state = KLP_UNDEFINED,
-#else
-# define INIT_LIVEPATCH(tsk)
-#endif
-
-#ifdef CONFIG_THREAD_INFO_IN_TASK
-# define INIT_TASK_TI(tsk) \
- .thread_info = INIT_THREAD_INFO(tsk), \
- .stack_refcount = ATOMIC_INIT(1),
-#else
-# define INIT_TASK_TI(tsk)
-#endif
-
-#ifdef CONFIG_SECURITY
-#define INIT_TASK_SECURITY .security = NULL,
-#else
-#define INIT_TASK_SECURITY
-#endif
-
-/*
- * INIT_TASK is used to set up the first task table, touch at
- * your own risk!. Base=0, limit=0x1fffff (=2MB)
- */
-#define INIT_TASK(tsk) \
-{ \
- INIT_TASK_TI(tsk) \
- .state = 0, \
- .stack = init_stack, \
- .usage = ATOMIC_INIT(2), \
- .flags = PF_KTHREAD, \
- .prio = MAX_PRIO-20, \
- .static_prio = MAX_PRIO-20, \
- .normal_prio = MAX_PRIO-20, \
- .policy = SCHED_NORMAL, \
- .cpus_allowed = CPU_MASK_ALL, \
- .nr_cpus_allowed= NR_CPUS, \
- .mm = NULL, \
- .active_mm = &init_mm, \
- .restart_block = { \
- .fn = do_no_restart_syscall, \
- }, \
- .se = { \
- .group_node = LIST_HEAD_INIT(tsk.se.group_node), \
- }, \
- .rt = { \
- .run_list = LIST_HEAD_INIT(tsk.rt.run_list), \
- .time_slice = RR_TIMESLICE, \
- }, \
- .tasks = LIST_HEAD_INIT(tsk.tasks), \
- INIT_PUSHABLE_TASKS(tsk) \
- INIT_CGROUP_SCHED(tsk) \
- .ptraced = LIST_HEAD_INIT(tsk.ptraced), \
- .ptrace_entry = LIST_HEAD_INIT(tsk.ptrace_entry), \
- .real_parent = &tsk, \
- .parent = &tsk, \
- .children = LIST_HEAD_INIT(tsk.children), \
- .sibling = LIST_HEAD_INIT(tsk.sibling), \
- .group_leader = &tsk, \
- RCU_POINTER_INITIALIZER(real_cred, &init_cred), \
- RCU_POINTER_INITIALIZER(cred, &init_cred), \
- .comm = INIT_TASK_COMM, \
- .thread = INIT_THREAD, \
- .fs = &init_fs, \
- .files = &init_files, \
- .signal = &init_signals, \
- .sighand = &init_sighand, \
- .nsproxy = &init_nsproxy, \
- .pending = { \
- .list = LIST_HEAD_INIT(tsk.pending.list), \
- .signal = {{0}}}, \
- .blocked = {{0}}, \
- .alloc_lock = __SPIN_LOCK_UNLOCKED(tsk.alloc_lock), \
- .journal_info = NULL, \
- INIT_CPU_TIMERS(tsk) \
- .pi_lock = __RAW_SPIN_LOCK_UNLOCKED(tsk.pi_lock), \
- .timer_slack_ns = 50000, /* 50 usec default slack */ \
- .pids = { \
- [PIDTYPE_PID] = INIT_PID_LINK(PIDTYPE_PID), \
- [PIDTYPE_PGID] = INIT_PID_LINK(PIDTYPE_PGID), \
- [PIDTYPE_SID] = INIT_PID_LINK(PIDTYPE_SID), \
- }, \
- .thread_group = LIST_HEAD_INIT(tsk.thread_group), \
- .thread_node = LIST_HEAD_INIT(init_signals.thread_head), \
- INIT_IDS \
- INIT_PERF_EVENTS(tsk) \
- INIT_TRACE_IRQFLAGS \
- INIT_LOCKDEP \
- INIT_FTRACE_GRAPH \
- INIT_TRACE_RECURSION \
- INIT_TASK_RCU_PREEMPT(tsk) \
- INIT_TASK_RCU_TASKS(tsk) \
- INIT_CPUSET_SEQ(tsk) \
- INIT_RT_MUTEXES(tsk) \
- INIT_PREV_CPUTIME(tsk) \
- INIT_VTIME(tsk) \
- INIT_NUMA_BALANCING(tsk) \
- INIT_KASAN(tsk) \
- INIT_LIVEPATCH(tsk) \
- INIT_TASK_SECURITY \
-}
-
-
/* Attach to the init_task data structure for proper alignment */
+#ifdef CONFIG_ARCH_TASK_STRUCT_ON_STACK
#define __init_task_data __attribute__((__section__(".data..init_task")))
+#else
+#define __init_task_data /**/
+#endif
+/* Attach to the thread_info data structure for proper alignment */
+#define __init_thread_info __attribute__((__section__(".data..init_thread_info")))
#endif
diff --git a/include/linux/input/gpio_tilt.h b/include/linux/input/gpio_tilt.h
deleted file mode 100644
index f9d932476a80..000000000000
--- a/include/linux/input/gpio_tilt.h
+++ /dev/null
@@ -1,74 +0,0 @@
-/* SPDX-License-Identifier: GPL-2.0 */
-#ifndef _INPUT_GPIO_TILT_H
-#define _INPUT_GPIO_TILT_H
-
-/**
- * struct gpio_tilt_axis - Axis used by the tilt switch
- * @axis: Constant describing the axis, e.g. ABS_X
- * @min: minimum value for abs_param
- * @max: maximum value for abs_param
- * @fuzz: fuzz value for abs_param
- * @flat: flat value for abs_param
- */
-struct gpio_tilt_axis {
- int axis;
- int min;
- int max;
- int fuzz;
- int flat;
-};
-
-/**
- * struct gpio_tilt_state - state description
- * @gpios: bitfield of gpio target-states for the value
- * @axes: array containing the axes settings for the gpio state
- * The array indizes must correspond to the axes defined
- * in platform_data
- *
- * This structure describes a supported axis settings
- * and the necessary gpio-state which represent it.
- *
- * The n-th bit in the bitfield describes the state of the n-th GPIO
- * from the gpios-array defined in gpio_regulator_config below.
- */
-struct gpio_tilt_state {
- int gpios;
- int *axes;
-};
-
-/**
- * struct gpio_tilt_platform_data
- * @gpios: Array containing the gpios determining the tilt state
- * @nr_gpios: Number of gpios
- * @axes: Array of gpio_tilt_axis descriptions
- * @nr_axes: Number of axes
- * @states: Array of gpio_tilt_state entries describing
- * the gpio state for specific tilts
- * @nr_states: Number of states available
- * @debounce_interval: debounce ticks interval in msecs
- * @poll_interval: polling interval in msecs - for polling driver only
- * @enable: callback to enable the tilt switch
- * @disable: callback to disable the tilt switch
- *
- * This structure contains gpio-tilt-switch configuration
- * information that must be passed by platform code to the
- * gpio-tilt input driver.
- */
-struct gpio_tilt_platform_data {
- struct gpio *gpios;
- int nr_gpios;
-
- struct gpio_tilt_axis *axes;
- int nr_axes;
-
- struct gpio_tilt_state *states;
- int nr_states;
-
- int debounce_interval;
-
- unsigned int poll_interval;
- int (*enable)(struct device *dev);
- void (*disable)(struct device *dev);
-};
-
-#endif
diff --git a/include/linux/integrity.h b/include/linux/integrity.h
index c2d6082a1a4c..858d3f4a2241 100644
--- a/include/linux/integrity.h
+++ b/include/linux/integrity.h
@@ -14,6 +14,7 @@
enum integrity_status {
INTEGRITY_PASS = 0,
+ INTEGRITY_PASS_IMMUTABLE,
INTEGRITY_FAIL,
INTEGRITY_NOLABEL,
INTEGRITY_NOXATTRS,
diff --git a/include/linux/intel-iommu.h b/include/linux/intel-iommu.h
index f3274d9f46a2..ef169d67df92 100644
--- a/include/linux/intel-iommu.h
+++ b/include/linux/intel-iommu.h
@@ -83,7 +83,9 @@
/*
* Decoding Capability Register
*/
+#define cap_5lp_support(c) (((c) >> 60) & 1)
#define cap_pi_support(c) (((c) >> 59) & 1)
+#define cap_fl1gp_support(c) (((c) >> 56) & 1)
#define cap_read_drain(c) (((c) >> 55) & 1)
#define cap_write_drain(c) (((c) >> 54) & 1)
#define cap_max_amask_val(c) (((c) >> 48) & 0x3f)
@@ -207,12 +209,12 @@
#define DMA_FECTL_IM (((u32)1) << 31)
/* FSTS_REG */
-#define DMA_FSTS_PPF ((u32)2)
-#define DMA_FSTS_PFO ((u32)1)
-#define DMA_FSTS_IQE (1 << 4)
-#define DMA_FSTS_ICE (1 << 5)
-#define DMA_FSTS_ITE (1 << 6)
-#define DMA_FSTS_PRO (1 << 7)
+#define DMA_FSTS_PFO (1 << 0) /* Primary Fault Overflow */
+#define DMA_FSTS_PPF (1 << 1) /* Primary Pending Fault */
+#define DMA_FSTS_IQE (1 << 4) /* Invalidation Queue Error */
+#define DMA_FSTS_ICE (1 << 5) /* Invalidation Completion Error */
+#define DMA_FSTS_ITE (1 << 6) /* Invalidation Time-out Error */
+#define DMA_FSTS_PRO (1 << 7) /* Page Request Overflow */
#define dma_fsts_fault_record_index(s) (((s) >> 8) & 0xff)
/* FRCD_REG, 32 bits access */
diff --git a/include/linux/interrupt.h b/include/linux/interrupt.h
index 69c238210325..5426627f9c55 100644
--- a/include/linux/interrupt.h
+++ b/include/linux/interrupt.h
@@ -4,9 +4,7 @@
#define _LINUX_INTERRUPT_H
#include <linux/kernel.h>
-#include <linux/linkage.h>
#include <linux/bitops.h>
-#include <linux/preempt.h>
#include <linux/cpumask.h>
#include <linux/irqreturn.h>
#include <linux/irqnr.h>
diff --git a/include/linux/iommu.h b/include/linux/iommu.h
index 41b8c5757859..19938ee6eb31 100644
--- a/include/linux/iommu.h
+++ b/include/linux/iommu.h
@@ -465,23 +465,23 @@ static inline int iommu_map(struct iommu_domain *domain, unsigned long iova,
return -ENODEV;
}
-static inline int iommu_unmap(struct iommu_domain *domain, unsigned long iova,
- size_t size)
+static inline size_t iommu_unmap(struct iommu_domain *domain,
+ unsigned long iova, size_t size)
{
- return -ENODEV;
+ return 0;
}
-static inline int iommu_unmap_fast(struct iommu_domain *domain, unsigned long iova,
- int gfp_order)
+static inline size_t iommu_unmap_fast(struct iommu_domain *domain,
+ unsigned long iova, int gfp_order)
{
- return -ENODEV;
+ return 0;
}
static inline size_t iommu_map_sg(struct iommu_domain *domain,
unsigned long iova, struct scatterlist *sg,
unsigned int nents, int prot)
{
- return -ENODEV;
+ return 0;
}
static inline void iommu_flush_tlb_all(struct iommu_domain *domain)
diff --git a/include/linux/ioport.h b/include/linux/ioport.h
index 93b4183cf53d..da0ebaec25f0 100644
--- a/include/linux/ioport.h
+++ b/include/linux/ioport.h
@@ -265,7 +265,7 @@ extern struct resource * __devm_request_region(struct device *dev,
extern void __devm_release_region(struct device *dev, struct resource *parent,
resource_size_t start, resource_size_t n);
extern int iomem_map_sanity_check(resource_size_t addr, unsigned long size);
-extern int iomem_is_exclusive(u64 addr);
+extern bool iomem_is_exclusive(u64 addr);
extern int
walk_system_ram_range(unsigned long start_pfn, unsigned long nr_pages,
diff --git a/include/linux/ipc.h b/include/linux/ipc.h
index 821b2f260992..6cc2df7f7ac9 100644
--- a/include/linux/ipc.h
+++ b/include/linux/ipc.h
@@ -8,8 +8,6 @@
#include <uapi/linux/ipc.h>
#include <linux/refcount.h>
-#define IPCMNI 32768 /* <= MAX_INT limit for ipc arrays (including sysctl changes) */
-
/* used by in-kernel data structures */
struct kern_ipc_perm {
spinlock_t lock;
diff --git a/include/linux/ipmi-fru.h b/include/linux/ipmi-fru.h
index 4d3a76380e32..05c9422624c6 100644
--- a/include/linux/ipmi-fru.h
+++ b/include/linux/ipmi-fru.h
@@ -1,9 +1,8 @@
+/* SPDX-License-Identifier: GPL-2.0+ */
/*
* Copyright (C) 2012 CERN (www.cern.ch)
* Author: Alessandro Rubini <rubini@gnudd.com>
*
- * Released according to the GNU GPL, version 2 or any later version.
- *
* This work is part of the White Rabbit project, a research effort led
* by CERN, the European Institute for Nuclear Research.
*/
diff --git a/include/linux/ipmi.h b/include/linux/ipmi.h
index f4ffacf4fe9d..8b0626cec980 100644
--- a/include/linux/ipmi.h
+++ b/include/linux/ipmi.h
@@ -1,3 +1,4 @@
+/* SPDX-License-Identifier: GPL-2.0+ */
/*
* ipmi.h
*
@@ -9,26 +10,6 @@
*
* Copyright 2002 MontaVista Software Inc.
*
- * This program is free software; you can redistribute it and/or modify it
- * under the terms of the GNU General Public License as published by the
- * Free Software Foundation; either version 2 of the License, or (at your
- * option) any later version.
- *
- *
- * THIS SOFTWARE IS PROVIDED ``AS IS'' AND ANY EXPRESS OR IMPLIED
- * WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES OF
- * MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE DISCLAIMED.
- * IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR ANY DIRECT, INDIRECT,
- * INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING,
- * BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS
- * OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND
- * ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR
- * TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE
- * USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
- *
- * You should have received a copy of the GNU General Public License along
- * with this program; if not, write to the Free Software Foundation, Inc.,
- * 675 Mass Ave, Cambridge, MA 02139, USA.
*/
#ifndef __LINUX_IPMI_H
#define __LINUX_IPMI_H
diff --git a/include/linux/ipmi_smi.h b/include/linux/ipmi_smi.h
index 5be51281e14d..af457b5a689e 100644
--- a/include/linux/ipmi_smi.h
+++ b/include/linux/ipmi_smi.h
@@ -1,3 +1,4 @@
+/* SPDX-License-Identifier: GPL-2.0+ */
/*
* ipmi_smi.h
*
@@ -9,26 +10,6 @@
*
* Copyright 2002 MontaVista Software Inc.
*
- * This program is free software; you can redistribute it and/or modify it
- * under the terms of the GNU General Public License as published by the
- * Free Software Foundation; either version 2 of the License, or (at your
- * option) any later version.
- *
- *
- * THIS SOFTWARE IS PROVIDED ``AS IS'' AND ANY EXPRESS OR IMPLIED
- * WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES OF
- * MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE DISCLAIMED.
- * IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR ANY DIRECT, INDIRECT,
- * INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING,
- * BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS
- * OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND
- * ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR
- * TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE
- * USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
- *
- * You should have received a copy of the GNU General Public License along
- * with this program; if not, write to the Free Software Foundation, Inc.,
- * 675 Mass Ave, Cambridge, MA 02139, USA.
*/
#ifndef __LINUX_IPMI_SMI_H
diff --git a/include/linux/irq.h b/include/linux/irq.h
index a0231e96a578..65916a305f3d 100644
--- a/include/linux/irq.h
+++ b/include/linux/irq.h
@@ -10,18 +10,13 @@
* Thanks. --rmk
*/
-#include <linux/smp.h>
-#include <linux/linkage.h>
#include <linux/cache.h>
#include <linux/spinlock.h>
#include <linux/cpumask.h>
-#include <linux/gfp.h>
#include <linux/irqhandler.h>
#include <linux/irqreturn.h>
#include <linux/irqnr.h>
-#include <linux/errno.h>
#include <linux/topology.h>
-#include <linux/wait.h>
#include <linux/io.h>
#include <linux/slab.h>
@@ -1170,4 +1165,22 @@ int __ipi_send_mask(struct irq_desc *desc, const struct cpumask *dest);
int ipi_send_single(unsigned int virq, unsigned int cpu);
int ipi_send_mask(unsigned int virq, const struct cpumask *dest);
+#ifdef CONFIG_GENERIC_IRQ_MULTI_HANDLER
+/*
+ * Registers a generic IRQ handling function as the top-level IRQ handler in
+ * the system, which is generally the first C code called from an assembly
+ * architecture-specific interrupt handler.
+ *
+ * Returns 0 on success, or -EBUSY if an IRQ handler has already been
+ * registered.
+ */
+int __init set_handle_irq(void (*handle_irq)(struct pt_regs *));
+
+/*
+ * Allows interrupt handlers to find the irqchip that's been registered as the
+ * top-level IRQ handler.
+ */
+extern void (*handle_arch_irq)(struct pt_regs *) __ro_after_init;
+#endif
+
#endif /* _LINUX_IRQ_H */
diff --git a/include/linux/irq_work.h b/include/linux/irq_work.h
index 0e81035b678f..b11fcdfd0770 100644
--- a/include/linux/irq_work.h
+++ b/include/linux/irq_work.h
@@ -13,10 +13,13 @@
* busy NULL, 2 -> {free, claimed} : callback in progress, can be claimed
*/
-#define IRQ_WORK_PENDING 1UL
-#define IRQ_WORK_BUSY 2UL
-#define IRQ_WORK_FLAGS 3UL
-#define IRQ_WORK_LAZY 4UL /* Doesn't want IPI, wait for tick */
+#define IRQ_WORK_PENDING BIT(0)
+#define IRQ_WORK_BUSY BIT(1)
+
+/* Doesn't want IPI, wait for tick: */
+#define IRQ_WORK_LAZY BIT(2)
+
+#define IRQ_WORK_CLAIMED (IRQ_WORK_PENDING | IRQ_WORK_BUSY)
struct irq_work {
unsigned long flags;
diff --git a/include/linux/irqchip/arm-gic-v3.h b/include/linux/irqchip/arm-gic-v3.h
index c00c4c33e432..f5af3b594e6e 100644
--- a/include/linux/irqchip/arm-gic-v3.h
+++ b/include/linux/irqchip/arm-gic-v3.h
@@ -106,6 +106,7 @@
#define GICR_PIDR2 GICD_PIDR2
#define GICR_CTLR_ENABLE_LPIS (1UL << 0)
+#define GICR_CTLR_RWP (1UL << 3)
#define GICR_TYPER_CPU_NUMBER(r) (((r) >> 8) & 0xffff)
@@ -312,7 +313,8 @@
#define GITS_TYPER_DEVBITS_SHIFT 13
#define GITS_TYPER_DEVBITS(r) ((((r) >> GITS_TYPER_DEVBITS_SHIFT) & 0x1f) + 1)
#define GITS_TYPER_PTA (1UL << 19)
-#define GITS_TYPER_HWCOLLCNT_SHIFT 24
+#define GITS_TYPER_HCC_SHIFT 24
+#define GITS_TYPER_HCC(r) (((r) >> GITS_TYPER_HCC_SHIFT) & 0xff)
#define GITS_TYPER_VMOVP (1ULL << 37)
#define GITS_IIDR_REV_SHIFT 12
@@ -503,6 +505,7 @@
#define ICH_HCR_EN (1 << 0)
#define ICH_HCR_UIE (1 << 1)
+#define ICH_HCR_NPIE (1 << 3)
#define ICH_HCR_TC (1 << 10)
#define ICH_HCR_TALL0 (1 << 11)
#define ICH_HCR_TALL1 (1 << 12)
diff --git a/include/linux/irqchip/arm-gic.h b/include/linux/irqchip/arm-gic.h
index d3453ee072fc..68d8b1f73682 100644
--- a/include/linux/irqchip/arm-gic.h
+++ b/include/linux/irqchip/arm-gic.h
@@ -84,6 +84,7 @@
#define GICH_HCR_EN (1 << 0)
#define GICH_HCR_UIE (1 << 1)
+#define GICH_HCR_NPIE (1 << 3)
#define GICH_LR_VIRTUALID (0x3ff << 0)
#define GICH_LR_PHYSID_CPUID_SHIFT (10)
diff --git a/include/linux/irqchip/metag-ext.h b/include/linux/irqchip/metag-ext.h
deleted file mode 100644
index d120496370b9..000000000000
--- a/include/linux/irqchip/metag-ext.h
+++ /dev/null
@@ -1,34 +0,0 @@
-/* SPDX-License-Identifier: GPL-2.0 */
-/*
- * Copyright (C) 2012 Imagination Technologies
- */
-
-#ifndef _LINUX_IRQCHIP_METAG_EXT_H_
-#define _LINUX_IRQCHIP_METAG_EXT_H_
-
-struct irq_data;
-struct platform_device;
-
-/* called from core irq code at init */
-int init_external_IRQ(void);
-
-/*
- * called from SoC init_irq() callback to dynamically indicate the lack of
- * HWMASKEXT registers.
- */
-void meta_intc_no_mask(void);
-
-/*
- * These allow SoCs to specialise the interrupt controller from their init_irq
- * callbacks.
- */
-
-extern struct irq_chip meta_intc_edge_chip;
-extern struct irq_chip meta_intc_level_chip;
-
-/* this should be called in the mask callback */
-void meta_intc_mask_irq_simple(struct irq_data *data);
-/* this should be called in the unmask callback */
-void meta_intc_unmask_irq_simple(struct irq_data *data);
-
-#endif /* _LINUX_IRQCHIP_METAG_EXT_H_ */
diff --git a/include/linux/irqchip/metag.h b/include/linux/irqchip/metag.h
deleted file mode 100644
index 0adcf449e4e4..000000000000
--- a/include/linux/irqchip/metag.h
+++ /dev/null
@@ -1,25 +0,0 @@
-/* SPDX-License-Identifier: GPL-2.0 */
-/*
- * Copyright (C) 2011 Imagination Technologies
- */
-
-#ifndef _LINUX_IRQCHIP_METAG_H_
-#define _LINUX_IRQCHIP_METAG_H_
-
-#include <linux/errno.h>
-
-#ifdef CONFIG_METAG_PERFCOUNTER_IRQS
-extern int init_internal_IRQ(void);
-extern int internal_irq_map(unsigned int hw);
-#else
-static inline int init_internal_IRQ(void)
-{
- return 0;
-}
-static inline int internal_irq_map(unsigned int hw)
-{
- return -EINVAL;
-}
-#endif
-
-#endif /* _LINUX_IRQCHIP_METAG_H_ */
diff --git a/include/linux/irqflags.h b/include/linux/irqflags.h
index 1b3996ff3f16..9700f00bbc04 100644
--- a/include/linux/irqflags.h
+++ b/include/linux/irqflags.h
@@ -40,7 +40,6 @@ do { \
do { \
current->softirq_context--; \
} while (0)
-# define INIT_TRACE_IRQFLAGS .softirqs_enabled = 1,
#else
# define trace_hardirqs_on() do { } while (0)
# define trace_hardirqs_off() do { } while (0)
@@ -54,7 +53,6 @@ do { \
# define trace_hardirq_exit() do { } while (0)
# define lockdep_softirq_enter() do { } while (0)
# define lockdep_softirq_exit() do { } while (0)
-# define INIT_TRACE_IRQFLAGS
#endif
#if defined(CONFIG_IRQSOFF_TRACER) || \
diff --git a/include/linux/iversion.h b/include/linux/iversion.h
new file mode 100644
index 000000000000..be50ef7cedab
--- /dev/null
+++ b/include/linux/iversion.h
@@ -0,0 +1,337 @@
+/* SPDX-License-Identifier: GPL-2.0 */
+#ifndef _LINUX_IVERSION_H
+#define _LINUX_IVERSION_H
+
+#include <linux/fs.h>
+
+/*
+ * The inode->i_version field:
+ * ---------------------------
+ * The change attribute (i_version) is mandated by NFSv4 and is mostly for
+ * knfsd, but is also used for other purposes (e.g. IMA). The i_version must
+ * appear different to observers if there was a change to the inode's data or
+ * metadata since it was last queried.
+ *
+ * Observers see the i_version as a 64-bit number that never decreases. If it
+ * remains the same since it was last checked, then nothing has changed in the
+ * inode. If it's different then something has changed. Observers cannot infer
+ * anything about the nature or magnitude of the changes from the value, only
+ * that the inode has changed in some fashion.
+ *
+ * Not all filesystems properly implement the i_version counter. Subsystems that
+ * want to use i_version field on an inode should first check whether the
+ * filesystem sets the SB_I_VERSION flag (usually via the IS_I_VERSION macro).
+ *
+ * Those that set SB_I_VERSION will automatically have their i_version counter
+ * incremented on writes to normal files. If the SB_I_VERSION is not set, then
+ * the VFS will not touch it on writes, and the filesystem can use it how it
+ * wishes. Note that the filesystem is always responsible for updating the
+ * i_version on namespace changes in directories (mkdir, rmdir, unlink, etc.).
+ * We consider these sorts of filesystems to have a kernel-managed i_version.
+ *
+ * It may be impractical for filesystems to keep i_version updates atomic with
+ * respect to the changes that cause them. They should, however, guarantee
+ * that i_version updates are never visible before the changes that caused
+ * them. Also, i_version updates should never be delayed longer than it takes
+ * the original change to reach disk.
+ *
+ * This implementation uses the low bit in the i_version field as a flag to
+ * track when the value has been queried. If it has not been queried since it
+ * was last incremented, we can skip the increment in most cases.
+ *
+ * In the event that we're updating the ctime, we will usually go ahead and
+ * bump the i_version anyway. Since that has to go to stable storage in some
+ * fashion, we might as well increment it as well.
+ *
+ * With this implementation, the value should always appear to observers to
+ * increase over time if the file has changed. It's recommended to use
+ * inode_eq_iversion() helper to compare values.
+ *
+ * Note that some filesystems (e.g. NFS and AFS) just use the field to store
+ * a server-provided value (for the most part). For that reason, those
+ * filesystems do not set SB_I_VERSION. These filesystems are considered to
+ * have a self-managed i_version.
+ *
+ * Persistently storing the i_version
+ * ----------------------------------
+ * Queries of the i_version field are not gated on them hitting the backing
+ * store. It's always possible that the host could crash after allowing
+ * a query of the value but before it has made it to disk.
+ *
+ * To mitigate this problem, filesystems should always use
+ * inode_set_iversion_queried when loading an existing inode from disk. This
+ * ensures that the next attempted inode increment will result in the value
+ * changing.
+ *
+ * Storing the value to disk therefore does not count as a query, so those
+ * filesystems should use inode_peek_iversion to grab the value to be stored.
+ * There is no need to flag the value as having been queried in that case.
+ */
+
+/*
+ * We borrow the lowest bit in the i_version to use as a flag to tell whether
+ * it has been queried since we last incremented it. If it has, then we must
+ * increment it on the next change. After that, we can clear the flag and
+ * avoid incrementing it again until it has again been queried.
+ */
+#define I_VERSION_QUERIED_SHIFT (1)
+#define I_VERSION_QUERIED (1ULL << (I_VERSION_QUERIED_SHIFT - 1))
+#define I_VERSION_INCREMENT (1ULL << I_VERSION_QUERIED_SHIFT)
+
+/**
+ * inode_set_iversion_raw - set i_version to the specified raw value
+ * @inode: inode to set
+ * @val: new i_version value to set
+ *
+ * Set @inode's i_version field to @val. This function is for use by
+ * filesystems that self-manage the i_version.
+ *
+ * For example, the NFS client stores its NFSv4 change attribute in this way,
+ * and the AFS client stores the data_version from the server here.
+ */
+static inline void
+inode_set_iversion_raw(struct inode *inode, u64 val)
+{
+ atomic64_set(&inode->i_version, val);
+}
+
+/**
+ * inode_peek_iversion_raw - grab a "raw" iversion value
+ * @inode: inode from which i_version should be read
+ *
+ * Grab a "raw" inode->i_version value and return it. The i_version is not
+ * flagged or converted in any way. This is mostly used to access a self-managed
+ * i_version.
+ *
+ * With those filesystems, we want to treat the i_version as an entirely
+ * opaque value.
+ */
+static inline u64
+inode_peek_iversion_raw(const struct inode *inode)
+{
+ return atomic64_read(&inode->i_version);
+}
+
+/**
+ * inode_set_iversion - set i_version to a particular value
+ * @inode: inode to set
+ * @val: new i_version value to set
+ *
+ * Set @inode's i_version field to @val. This function is for filesystems with
+ * a kernel-managed i_version, for initializing a newly-created inode from
+ * scratch.
+ *
+ * In this case, we do not set the QUERIED flag since we know that this value
+ * has never been queried.
+ */
+static inline void
+inode_set_iversion(struct inode *inode, u64 val)
+{
+ inode_set_iversion_raw(inode, val << I_VERSION_QUERIED_SHIFT);
+}
+
+/**
+ * inode_set_iversion_queried - set i_version to a particular value as quereied
+ * @inode: inode to set
+ * @val: new i_version value to set
+ *
+ * Set @inode's i_version field to @val, and flag it for increment on the next
+ * change.
+ *
+ * Filesystems that persistently store the i_version on disk should use this
+ * when loading an existing inode from disk.
+ *
+ * When loading in an i_version value from a backing store, we can't be certain
+ * that it wasn't previously viewed before being stored. Thus, we must assume
+ * that it was, to ensure that we don't end up handing out the same value for
+ * different versions of the same inode.
+ */
+static inline void
+inode_set_iversion_queried(struct inode *inode, u64 val)
+{
+ inode_set_iversion_raw(inode, (val << I_VERSION_QUERIED_SHIFT) |
+ I_VERSION_QUERIED);
+}
+
+/**
+ * inode_maybe_inc_iversion - increments i_version
+ * @inode: inode with the i_version that should be updated
+ * @force: increment the counter even if it's not necessary?
+ *
+ * Every time the inode is modified, the i_version field must be seen to have
+ * changed by any observer.
+ *
+ * If "force" is set or the QUERIED flag is set, then ensure that we increment
+ * the value, and clear the queried flag.
+ *
+ * In the common case where neither is set, then we can return "false" without
+ * updating i_version.
+ *
+ * If this function returns false, and no other metadata has changed, then we
+ * can avoid logging the metadata.
+ */
+static inline bool
+inode_maybe_inc_iversion(struct inode *inode, bool force)
+{
+ u64 cur, old, new;
+
+ /*
+ * The i_version field is not strictly ordered with any other inode
+ * information, but the legacy inode_inc_iversion code used a spinlock
+ * to serialize increments.
+ *
+ * Here, we add full memory barriers to ensure that any de-facto
+ * ordering with other info is preserved.
+ *
+ * This barrier pairs with the barrier in inode_query_iversion()
+ */
+ smp_mb();
+ cur = inode_peek_iversion_raw(inode);
+ for (;;) {
+ /* If flag is clear then we needn't do anything */
+ if (!force && !(cur & I_VERSION_QUERIED))
+ return false;
+
+ /* Since lowest bit is flag, add 2 to avoid it */
+ new = (cur & ~I_VERSION_QUERIED) + I_VERSION_INCREMENT;
+
+ old = atomic64_cmpxchg(&inode->i_version, cur, new);
+ if (likely(old == cur))
+ break;
+ cur = old;
+ }
+ return true;
+}
+
+
+/**
+ * inode_inc_iversion - forcibly increment i_version
+ * @inode: inode that needs to be updated
+ *
+ * Forcbily increment the i_version field. This always results in a change to
+ * the observable value.
+ */
+static inline void
+inode_inc_iversion(struct inode *inode)
+{
+ inode_maybe_inc_iversion(inode, true);
+}
+
+/**
+ * inode_iversion_need_inc - is the i_version in need of being incremented?
+ * @inode: inode to check
+ *
+ * Returns whether the inode->i_version counter needs incrementing on the next
+ * change. Just fetch the value and check the QUERIED flag.
+ */
+static inline bool
+inode_iversion_need_inc(struct inode *inode)
+{
+ return inode_peek_iversion_raw(inode) & I_VERSION_QUERIED;
+}
+
+/**
+ * inode_inc_iversion_raw - forcibly increment raw i_version
+ * @inode: inode that needs to be updated
+ *
+ * Forcbily increment the raw i_version field. This always results in a change
+ * to the raw value.
+ *
+ * NFS will use the i_version field to store the value from the server. It
+ * mostly treats it as opaque, but in the case where it holds a write
+ * delegation, it must increment the value itself. This function does that.
+ */
+static inline void
+inode_inc_iversion_raw(struct inode *inode)
+{
+ atomic64_inc(&inode->i_version);
+}
+
+/**
+ * inode_peek_iversion - read i_version without flagging it to be incremented
+ * @inode: inode from which i_version should be read
+ *
+ * Read the inode i_version counter for an inode without registering it as a
+ * query.
+ *
+ * This is typically used by local filesystems that need to store an i_version
+ * on disk. In that situation, it's not necessary to flag it as having been
+ * viewed, as the result won't be used to gauge changes from that point.
+ */
+static inline u64
+inode_peek_iversion(const struct inode *inode)
+{
+ return inode_peek_iversion_raw(inode) >> I_VERSION_QUERIED_SHIFT;
+}
+
+/**
+ * inode_query_iversion - read i_version for later use
+ * @inode: inode from which i_version should be read
+ *
+ * Read the inode i_version counter. This should be used by callers that wish
+ * to store the returned i_version for later comparison. This will guarantee
+ * that a later query of the i_version will result in a different value if
+ * anything has changed.
+ *
+ * In this implementation, we fetch the current value, set the QUERIED flag and
+ * then try to swap it into place with a cmpxchg, if it wasn't already set. If
+ * that fails, we try again with the newly fetched value from the cmpxchg.
+ */
+static inline u64
+inode_query_iversion(struct inode *inode)
+{
+ u64 cur, old, new;
+
+ cur = inode_peek_iversion_raw(inode);
+ for (;;) {
+ /* If flag is already set, then no need to swap */
+ if (cur & I_VERSION_QUERIED) {
+ /*
+ * This barrier (and the implicit barrier in the
+ * cmpxchg below) pairs with the barrier in
+ * inode_maybe_inc_iversion().
+ */
+ smp_mb();
+ break;
+ }
+
+ new = cur | I_VERSION_QUERIED;
+ old = atomic64_cmpxchg(&inode->i_version, cur, new);
+ if (likely(old == cur))
+ break;
+ cur = old;
+ }
+ return cur >> I_VERSION_QUERIED_SHIFT;
+}
+
+/**
+ * inode_eq_iversion_raw - check whether the raw i_version counter has changed
+ * @inode: inode to check
+ * @old: old value to check against its i_version
+ *
+ * Compare the current raw i_version counter with a previous one. Returns true
+ * if they are the same or false if they are different.
+ */
+static inline bool
+inode_eq_iversion_raw(const struct inode *inode, u64 old)
+{
+ return inode_peek_iversion_raw(inode) == old;
+}
+
+/**
+ * inode_eq_iversion - check whether the i_version counter has changed
+ * @inode: inode to check
+ * @old: old value to check against its i_version
+ *
+ * Compare an i_version counter with a previous one. Returns true if they are
+ * the same, and false if they are different.
+ *
+ * Note that we don't need to set the QUERIED flag in this case, as the value
+ * in the inode is not being recorded for later use.
+ */
+static inline bool
+inode_eq_iversion(const struct inode *inode, u64 old)
+{
+ return inode_peek_iversion(inode) == old;
+}
+#endif
diff --git a/include/linux/jbd2.h b/include/linux/jbd2.h
index 296d1e0ea87b..b708e5169d1d 100644
--- a/include/linux/jbd2.h
+++ b/include/linux/jbd2.h
@@ -418,26 +418,41 @@ static inline void jbd_unlock_bh_journal_head(struct buffer_head *bh)
#define JI_WAIT_DATA (1 << __JI_WAIT_DATA)
/**
- * struct jbd_inode is the structure linking inodes in ordered mode
- * present in a transaction so that we can sync them during commit.
+ * struct jbd_inode - The jbd_inode type is the structure linking inodes in
+ * ordered mode present in a transaction so that we can sync them during commit.
*/
struct jbd2_inode {
- /* Which transaction does this inode belong to? Either the running
- * transaction or the committing one. [j_list_lock] */
+ /**
+ * @i_transaction:
+ *
+ * Which transaction does this inode belong to? Either the running
+ * transaction or the committing one. [j_list_lock]
+ */
transaction_t *i_transaction;
- /* Pointer to the running transaction modifying inode's data in case
- * there is already a committing transaction touching it. [j_list_lock] */
+ /**
+ * @i_next_transaction:
+ *
+ * Pointer to the running transaction modifying inode's data in case
+ * there is already a committing transaction touching it. [j_list_lock]
+ */
transaction_t *i_next_transaction;
- /* List of inodes in the i_transaction [j_list_lock] */
+ /**
+ * @i_list: List of inodes in the i_transaction [j_list_lock]
+ */
struct list_head i_list;
- /* VFS inode this inode belongs to [constant during the lifetime
- * of the structure] */
+ /**
+ * @i_vfs_inode:
+ *
+ * VFS inode this inode belongs to [constant for lifetime of structure]
+ */
struct inode *i_vfs_inode;
- /* Flags of inode [j_list_lock] */
+ /**
+ * @i_flags: Flags of inode [j_list_lock]
+ */
unsigned long i_flags;
};
@@ -447,12 +462,20 @@ struct jbd2_revoke_table_s;
* struct handle_s - The handle_s type is the concrete type associated with
* handle_t.
* @h_transaction: Which compound transaction is this update a part of?
+ * @h_journal: Which journal handle belongs to - used iff h_reserved set.
+ * @h_rsv_handle: Handle reserved for finishing the logical operation.
* @h_buffer_credits: Number of remaining buffers we are allowed to dirty.
- * @h_ref: Reference count on this handle
- * @h_err: Field for caller's use to track errors through large fs operations
- * @h_sync: flag for sync-on-close
- * @h_jdata: flag to force data journaling
- * @h_aborted: flag indicating fatal error on handle
+ * @h_ref: Reference count on this handle.
+ * @h_err: Field for caller's use to track errors through large fs operations.
+ * @h_sync: Flag for sync-on-close.
+ * @h_jdata: Flag to force data journaling.
+ * @h_reserved: Flag for handle for reserved credits.
+ * @h_aborted: Flag indicating fatal error on handle.
+ * @h_type: For handle statistics.
+ * @h_line_no: For handle statistics.
+ * @h_start_jiffies: Handle Start time.
+ * @h_requested_credits: Holds @h_buffer_credits after handle is started.
+ * @saved_alloc_context: Saved context while transaction is open.
**/
/* Docbook can't yet cope with the bit fields, but will leave the documentation
@@ -462,32 +485,23 @@ struct jbd2_revoke_table_s;
struct jbd2_journal_handle
{
union {
- /* Which compound transaction is this update a part of? */
transaction_t *h_transaction;
/* Which journal handle belongs to - used iff h_reserved set */
journal_t *h_journal;
};
- /* Handle reserved for finishing the logical operation */
handle_t *h_rsv_handle;
-
- /* Number of remaining buffers we are allowed to dirty: */
int h_buffer_credits;
-
- /* Reference count on this handle */
int h_ref;
-
- /* Field for caller's use to track errors through large fs */
- /* operations */
int h_err;
/* Flags [no locking] */
- unsigned int h_sync: 1; /* sync-on-close */
- unsigned int h_jdata: 1; /* force data journaling */
- unsigned int h_reserved: 1; /* handle with reserved credits */
- unsigned int h_aborted: 1; /* fatal error on handle */
- unsigned int h_type: 8; /* for handle statistics */
- unsigned int h_line_no: 16; /* for handle statistics */
+ unsigned int h_sync: 1;
+ unsigned int h_jdata: 1;
+ unsigned int h_reserved: 1;
+ unsigned int h_aborted: 1;
+ unsigned int h_type: 8;
+ unsigned int h_line_no: 16;
unsigned long h_start_jiffies;
unsigned int h_requested_credits;
@@ -729,228 +743,253 @@ jbd2_time_diff(unsigned long start, unsigned long end)
/**
* struct journal_s - The journal_s type is the concrete type associated with
* journal_t.
- * @j_flags: General journaling state flags
- * @j_errno: Is there an outstanding uncleared error on the journal (from a
- * prior abort)?
- * @j_sb_buffer: First part of superblock buffer
- * @j_superblock: Second part of superblock buffer
- * @j_format_version: Version of the superblock format
- * @j_state_lock: Protect the various scalars in the journal
- * @j_barrier_count: Number of processes waiting to create a barrier lock
- * @j_barrier: The barrier lock itself
- * @j_running_transaction: The current running transaction..
- * @j_committing_transaction: the transaction we are pushing to disk
- * @j_checkpoint_transactions: a linked circular list of all transactions
- * waiting for checkpointing
- * @j_wait_transaction_locked: Wait queue for waiting for a locked transaction
- * to start committing, or for a barrier lock to be released
- * @j_wait_done_commit: Wait queue for waiting for commit to complete
- * @j_wait_commit: Wait queue to trigger commit
- * @j_wait_updates: Wait queue to wait for updates to complete
- * @j_wait_reserved: Wait queue to wait for reserved buffer credits to drop
- * @j_checkpoint_mutex: Mutex for locking against concurrent checkpoints
- * @j_head: Journal head - identifies the first unused block in the journal
- * @j_tail: Journal tail - identifies the oldest still-used block in the
- * journal.
- * @j_free: Journal free - how many free blocks are there in the journal?
- * @j_first: The block number of the first usable block
- * @j_last: The block number one beyond the last usable block
- * @j_dev: Device where we store the journal
- * @j_blocksize: blocksize for the location where we store the journal.
- * @j_blk_offset: starting block offset for into the device where we store the
- * journal
- * @j_fs_dev: Device which holds the client fs. For internal journal this will
- * be equal to j_dev
- * @j_reserved_credits: Number of buffers reserved from the running transaction
- * @j_maxlen: Total maximum capacity of the journal region on disk.
- * @j_list_lock: Protects the buffer lists and internal buffer state.
- * @j_inode: Optional inode where we store the journal. If present, all journal
- * block numbers are mapped into this inode via bmap().
- * @j_tail_sequence: Sequence number of the oldest transaction in the log
- * @j_transaction_sequence: Sequence number of the next transaction to grant
- * @j_commit_sequence: Sequence number of the most recently committed
- * transaction
- * @j_commit_request: Sequence number of the most recent transaction wanting
- * commit
- * @j_uuid: Uuid of client object.
- * @j_task: Pointer to the current commit thread for this journal
- * @j_max_transaction_buffers: Maximum number of metadata buffers to allow in a
- * single compound commit transaction
- * @j_commit_interval: What is the maximum transaction lifetime before we begin
- * a commit?
- * @j_commit_timer: The timer used to wakeup the commit thread
- * @j_revoke_lock: Protect the revoke table
- * @j_revoke: The revoke table - maintains the list of revoked blocks in the
- * current transaction.
- * @j_revoke_table: alternate revoke tables for j_revoke
- * @j_wbuf: array of buffer_heads for jbd2_journal_commit_transaction
- * @j_wbufsize: maximum number of buffer_heads allowed in j_wbuf, the
- * number that will fit in j_blocksize
- * @j_last_sync_writer: most recent pid which did a synchronous write
- * @j_history_lock: Protect the transactions statistics history
- * @j_proc_entry: procfs entry for the jbd statistics directory
- * @j_stats: Overall statistics
- * @j_private: An opaque pointer to fs-private information.
- * @j_trans_commit_map: Lockdep entity to track transaction commit dependencies
*/
-
struct journal_s
{
- /* General journaling state flags [j_state_lock] */
+ /**
+ * @j_flags: General journaling state flags [j_state_lock]
+ */
unsigned long j_flags;
- /*
+ /**
+ * @j_errno:
+ *
* Is there an outstanding uncleared error on the journal (from a prior
* abort)? [j_state_lock]
*/
int j_errno;
- /* The superblock buffer */
+ /**
+ * @j_sb_buffer: The first part of the superblock buffer.
+ */
struct buffer_head *j_sb_buffer;
+
+ /**
+ * @j_superblock: The second part of the superblock buffer.
+ */
journal_superblock_t *j_superblock;
- /* Version of the superblock format */
+ /**
+ * @j_format_version: Version of the superblock format.
+ */
int j_format_version;
- /*
- * Protect the various scalars in the journal
+ /**
+ * @j_state_lock: Protect the various scalars in the journal.
*/
rwlock_t j_state_lock;
- /*
+ /**
+ * @j_barrier_count:
+ *
* Number of processes waiting to create a barrier lock [j_state_lock]
*/
int j_barrier_count;
- /* The barrier lock itself */
+ /**
+ * @j_barrier: The barrier lock itself.
+ */
struct mutex j_barrier;
- /*
+ /**
+ * @j_running_transaction:
+ *
* Transactions: The current running transaction...
* [j_state_lock] [caller holding open handle]
*/
transaction_t *j_running_transaction;
- /*
+ /**
+ * @j_committing_transaction:
+ *
* the transaction we are pushing to disk
* [j_state_lock] [caller holding open handle]
*/
transaction_t *j_committing_transaction;
- /*
+ /**
+ * @j_checkpoint_transactions:
+ *
* ... and a linked circular list of all transactions waiting for
* checkpointing. [j_list_lock]
*/
transaction_t *j_checkpoint_transactions;
- /*
+ /**
+ * @j_wait_transaction_locked:
+ *
* Wait queue for waiting for a locked transaction to start committing,
- * or for a barrier lock to be released
+ * or for a barrier lock to be released.
*/
wait_queue_head_t j_wait_transaction_locked;
- /* Wait queue for waiting for commit to complete */
+ /**
+ * @j_wait_done_commit: Wait queue for waiting for commit to complete.
+ */
wait_queue_head_t j_wait_done_commit;
- /* Wait queue to trigger commit */
+ /**
+ * @j_wait_commit: Wait queue to trigger commit.
+ */
wait_queue_head_t j_wait_commit;
- /* Wait queue to wait for updates to complete */
+ /**
+ * @j_wait_updates: Wait queue to wait for updates to complete.
+ */
wait_queue_head_t j_wait_updates;
- /* Wait queue to wait for reserved buffer credits to drop */
+ /**
+ * @j_wait_reserved:
+ *
+ * Wait queue to wait for reserved buffer credits to drop.
+ */
wait_queue_head_t j_wait_reserved;
- /* Semaphore for locking against concurrent checkpoints */
+ /**
+ * @j_checkpoint_mutex:
+ *
+ * Semaphore for locking against concurrent checkpoints.
+ */
struct mutex j_checkpoint_mutex;
- /*
+ /**
+ * @j_chkpt_bhs:
+ *
* List of buffer heads used by the checkpoint routine. This
* was moved from jbd2_log_do_checkpoint() to reduce stack
* usage. Access to this array is controlled by the
- * j_checkpoint_mutex. [j_checkpoint_mutex]
+ * @j_checkpoint_mutex. [j_checkpoint_mutex]
*/
struct buffer_head *j_chkpt_bhs[JBD2_NR_BATCH];
-
- /*
+
+ /**
+ * @j_head:
+ *
* Journal head: identifies the first unused block in the journal.
* [j_state_lock]
*/
unsigned long j_head;
- /*
+ /**
+ * @j_tail:
+ *
* Journal tail: identifies the oldest still-used block in the journal.
* [j_state_lock]
*/
unsigned long j_tail;
- /*
+ /**
+ * @j_free:
+ *
* Journal free: how many free blocks are there in the journal?
* [j_state_lock]
*/
unsigned long j_free;
- /*
- * Journal start and end: the block numbers of the first usable block
- * and one beyond the last usable block in the journal. [j_state_lock]
+ /**
+ * @j_first:
+ *
+ * The block number of the first usable block in the journal
+ * [j_state_lock].
*/
unsigned long j_first;
+
+ /**
+ * @j_last:
+ *
+ * The block number one beyond the last usable block in the journal
+ * [j_state_lock].
+ */
unsigned long j_last;
- /*
- * Device, blocksize and starting block offset for the location where we
- * store the journal.
+ /**
+ * @j_dev: Device where we store the journal.
*/
struct block_device *j_dev;
+
+ /**
+ * @j_blocksize: Block size for the location where we store the journal.
+ */
int j_blocksize;
+
+ /**
+ * @j_blk_offset:
+ *
+ * Starting block offset into the device where we store the journal.
+ */
unsigned long long j_blk_offset;
+
+ /**
+ * @j_devname: Journal device name.
+ */
char j_devname[BDEVNAME_SIZE+24];
- /*
+ /**
+ * @j_fs_dev:
+ *
* Device which holds the client fs. For internal journal this will be
* equal to j_dev.
*/
struct block_device *j_fs_dev;
- /* Total maximum capacity of the journal region on disk. */
+ /**
+ * @j_maxlen: Total maximum capacity of the journal region on disk.
+ */
unsigned int j_maxlen;
- /* Number of buffers reserved from the running transaction */
+ /**
+ * @j_reserved_credits:
+ *
+ * Number of buffers reserved from the running transaction.
+ */
atomic_t j_reserved_credits;
- /*
- * Protects the buffer lists and internal buffer state.
+ /**
+ * @j_list_lock: Protects the buffer lists and internal buffer state.
*/
spinlock_t j_list_lock;
- /* Optional inode where we store the journal. If present, all */
- /* journal block numbers are mapped into this inode via */
- /* bmap(). */
+ /**
+ * @j_inode:
+ *
+ * Optional inode where we store the journal. If present, all
+ * journal block numbers are mapped into this inode via bmap().
+ */
struct inode *j_inode;
- /*
+ /**
+ * @j_tail_sequence:
+ *
* Sequence number of the oldest transaction in the log [j_state_lock]
*/
tid_t j_tail_sequence;
- /*
+ /**
+ * @j_transaction_sequence:
+ *
* Sequence number of the next transaction to grant [j_state_lock]
*/
tid_t j_transaction_sequence;
- /*
+ /**
+ * @j_commit_sequence:
+ *
* Sequence number of the most recently committed transaction
* [j_state_lock].
*/
tid_t j_commit_sequence;
- /*
+ /**
+ * @j_commit_request:
+ *
* Sequence number of the most recent transaction wanting commit
* [j_state_lock]
*/
tid_t j_commit_request;
- /*
+ /**
+ * @j_uuid:
+ *
* Journal uuid: identifies the object (filesystem, LVM volume etc)
* backed by this journal. This will eventually be replaced by an array
* of uuids, allowing us to index multiple devices within a single
@@ -958,85 +997,151 @@ struct journal_s
*/
__u8 j_uuid[16];
- /* Pointer to the current commit thread for this journal */
+ /**
+ * @j_task: Pointer to the current commit thread for this journal.
+ */
struct task_struct *j_task;
- /*
+ /**
+ * @j_max_transaction_buffers:
+ *
* Maximum number of metadata buffers to allow in a single compound
- * commit transaction
+ * commit transaction.
*/
int j_max_transaction_buffers;
- /*
+ /**
+ * @j_commit_interval:
+ *
* What is the maximum transaction lifetime before we begin a commit?
*/
unsigned long j_commit_interval;
- /* The timer used to wakeup the commit thread: */
+ /**
+ * @j_commit_timer: The timer used to wakeup the commit thread.
+ */
struct timer_list j_commit_timer;
- /*
- * The revoke table: maintains the list of revoked blocks in the
- * current transaction. [j_revoke_lock]
+ /**
+ * @j_revoke_lock: Protect the revoke table.
*/
spinlock_t j_revoke_lock;
+
+ /**
+ * @j_revoke:
+ *
+ * The revoke table - maintains the list of revoked blocks in the
+ * current transaction.
+ */
struct jbd2_revoke_table_s *j_revoke;
+
+ /**
+ * @j_revoke_table: Alternate revoke tables for j_revoke.
+ */
struct jbd2_revoke_table_s *j_revoke_table[2];
- /*
- * array of bhs for jbd2_journal_commit_transaction
+ /**
+ * @j_wbuf: Array of bhs for jbd2_journal_commit_transaction.
*/
struct buffer_head **j_wbuf;
+
+ /**
+ * @j_wbufsize:
+ *
+ * Size of @j_wbuf array.
+ */
int j_wbufsize;
- /*
- * this is the pid of hte last person to run a synchronous operation
- * through the journal
+ /**
+ * @j_last_sync_writer:
+ *
+ * The pid of the last person to run a synchronous operation
+ * through the journal.
*/
pid_t j_last_sync_writer;
- /*
- * the average amount of time in nanoseconds it takes to commit a
+ /**
+ * @j_average_commit_time:
+ *
+ * The average amount of time in nanoseconds it takes to commit a
* transaction to disk. [j_state_lock]
*/
u64 j_average_commit_time;
- /*
- * minimum and maximum times that we should wait for
- * additional filesystem operations to get batched into a
- * synchronous handle in microseconds
+ /**
+ * @j_min_batch_time:
+ *
+ * Minimum time that we should wait for additional filesystem operations
+ * to get batched into a synchronous handle in microseconds.
*/
u32 j_min_batch_time;
+
+ /**
+ * @j_max_batch_time:
+ *
+ * Maximum time that we should wait for additional filesystem operations
+ * to get batched into a synchronous handle in microseconds.
+ */
u32 j_max_batch_time;
- /* This function is called when a transaction is closed */
+ /**
+ * @j_commit_callback:
+ *
+ * This function is called when a transaction is closed.
+ */
void (*j_commit_callback)(journal_t *,
transaction_t *);
/*
* Journal statistics
*/
+
+ /**
+ * @j_history_lock: Protect the transactions statistics history.
+ */
spinlock_t j_history_lock;
+
+ /**
+ * @j_proc_entry: procfs entry for the jbd statistics directory.
+ */
struct proc_dir_entry *j_proc_entry;
+
+ /**
+ * @j_stats: Overall statistics.
+ */
struct transaction_stats_s j_stats;
- /* Failed journal commit ID */
+ /**
+ * @j_failed_commit: Failed journal commit ID.
+ */
unsigned int j_failed_commit;
- /*
+ /**
+ * @j_private:
+ *
* An opaque pointer to fs-private information. ext3 puts its
- * superblock pointer here
+ * superblock pointer here.
*/
void *j_private;
- /* Reference to checksum algorithm driver via cryptoapi */
+ /**
+ * @j_chksum_driver:
+ *
+ * Reference to checksum algorithm driver via cryptoapi.
+ */
struct crypto_shash *j_chksum_driver;
- /* Precomputed journal UUID checksum for seeding other checksums */
+ /**
+ * @j_csum_seed:
+ *
+ * Precomputed journal UUID checksum for seeding other checksums.
+ */
__u32 j_csum_seed;
#ifdef CONFIG_DEBUG_LOCK_ALLOC
- /*
+ /**
+ * @j_trans_commit_map:
+ *
* Lockdep entity to track transaction commit dependencies. Handles
* hold this "lock" for read, when we wait for commit, we acquire the
* "lock" for writing. This matches the properties of jbd2 journalling
diff --git a/include/linux/jiffies.h b/include/linux/jiffies.h
index 9385aa57497b..a27cf6652327 100644
--- a/include/linux/jiffies.h
+++ b/include/linux/jiffies.h
@@ -62,8 +62,11 @@ extern int register_refined_jiffies(long clock_tick_rate);
/* TICK_NSEC is the time between ticks in nsec assuming SHIFTED_HZ */
#define TICK_NSEC ((NSEC_PER_SEC+HZ/2)/HZ)
-/* TICK_USEC is the time between ticks in usec assuming fake USER_HZ */
-#define TICK_USEC ((1000000UL + USER_HZ/2) / USER_HZ)
+/* TICK_USEC is the time between ticks in usec assuming SHIFTED_HZ */
+#define TICK_USEC ((USEC_PER_SEC + HZ/2) / HZ)
+
+/* USER_TICK_USEC is the time between ticks in usec assuming fake USER_HZ */
+#define USER_TICK_USEC ((1000000UL + USER_HZ/2) / USER_HZ)
#ifndef __jiffy_arch_data
#define __jiffy_arch_data
diff --git a/include/linux/jump_label.h b/include/linux/jump_label.h
index c7b368c734af..b46b541c67c4 100644
--- a/include/linux/jump_label.h
+++ b/include/linux/jump_label.h
@@ -151,6 +151,7 @@ extern struct jump_entry __start___jump_table[];
extern struct jump_entry __stop___jump_table[];
extern void jump_label_init(void);
+extern void jump_label_invalidate_initmem(void);
extern void jump_label_lock(void);
extern void jump_label_unlock(void);
extern void arch_jump_label_transform(struct jump_entry *entry,
@@ -160,6 +161,8 @@ extern void arch_jump_label_transform_static(struct jump_entry *entry,
extern int jump_label_text_reserved(void *start, void *end);
extern void static_key_slow_inc(struct static_key *key);
extern void static_key_slow_dec(struct static_key *key);
+extern void static_key_slow_inc_cpuslocked(struct static_key *key);
+extern void static_key_slow_dec_cpuslocked(struct static_key *key);
extern void jump_label_apply_nops(struct module *mod);
extern int static_key_count(struct static_key *key);
extern void static_key_enable(struct static_key *key);
@@ -196,6 +199,8 @@ static __always_inline void jump_label_init(void)
static_key_initialized = true;
}
+static inline void jump_label_invalidate_initmem(void) {}
+
static __always_inline bool static_key_false(struct static_key *key)
{
if (unlikely(static_key_count(key) > 0))
@@ -222,6 +227,9 @@ static inline void static_key_slow_dec(struct static_key *key)
atomic_dec(&key->enabled);
}
+#define static_key_slow_inc_cpuslocked(key) static_key_slow_inc(key)
+#define static_key_slow_dec_cpuslocked(key) static_key_slow_dec(key)
+
static inline int jump_label_text_reserved(void *start, void *end)
{
return 0;
@@ -388,7 +396,7 @@ extern bool ____wrong_branch_error(void);
branch = !arch_static_branch_jump(&(x)->key, true); \
else \
branch = ____wrong_branch_error(); \
- branch; \
+ likely(branch); \
})
#define static_branch_unlikely(x) \
@@ -400,7 +408,7 @@ extern bool ____wrong_branch_error(void);
branch = arch_static_branch(&(x)->key, false); \
else \
branch = ____wrong_branch_error(); \
- branch; \
+ unlikely(branch); \
})
#else /* !HAVE_JUMP_LABEL */
@@ -416,6 +424,8 @@ extern bool ____wrong_branch_error(void);
#define static_branch_inc(x) static_key_slow_inc(&(x)->key)
#define static_branch_dec(x) static_key_slow_dec(&(x)->key)
+#define static_branch_inc_cpuslocked(x) static_key_slow_inc_cpuslocked(&(x)->key)
+#define static_branch_dec_cpuslocked(x) static_key_slow_dec_cpuslocked(&(x)->key)
/*
* Normal usage; boolean enable/disable.
diff --git a/include/linux/kallsyms.h b/include/linux/kallsyms.h
index bd118a6c60cb..657a83b943f0 100644
--- a/include/linux/kallsyms.h
+++ b/include/linux/kallsyms.h
@@ -9,6 +9,10 @@
#include <linux/errno.h>
#include <linux/kernel.h>
#include <linux/stddef.h>
+#include <linux/mm.h>
+#include <linux/module.h>
+
+#include <asm/sections.h>
#define KSYM_NAME_LEN 128
#define KSYM_SYMBOL_LEN (sizeof("%s+%#lx/%#lx [%s]") + (KSYM_NAME_LEN - 1) + \
@@ -16,6 +20,56 @@
struct module;
+static inline int is_kernel_inittext(unsigned long addr)
+{
+ if (addr >= (unsigned long)_sinittext
+ && addr <= (unsigned long)_einittext)
+ return 1;
+ return 0;
+}
+
+static inline int is_kernel_text(unsigned long addr)
+{
+ if ((addr >= (unsigned long)_stext && addr <= (unsigned long)_etext) ||
+ arch_is_kernel_text(addr))
+ return 1;
+ return in_gate_area_no_mm(addr);
+}
+
+static inline int is_kernel(unsigned long addr)
+{
+ if (addr >= (unsigned long)_stext && addr <= (unsigned long)_end)
+ return 1;
+ return in_gate_area_no_mm(addr);
+}
+
+static inline int is_ksym_addr(unsigned long addr)
+{
+ if (IS_ENABLED(CONFIG_KALLSYMS_ALL))
+ return is_kernel(addr);
+
+ return is_kernel_text(addr) || is_kernel_inittext(addr);
+}
+
+static inline void *dereference_symbol_descriptor(void *ptr)
+{
+#ifdef HAVE_DEREFERENCE_FUNCTION_DESCRIPTOR
+ struct module *mod;
+
+ ptr = dereference_kernel_function_descriptor(ptr);
+ if (is_ksym_addr((unsigned long)ptr))
+ return ptr;
+
+ preempt_disable();
+ mod = __module_address((unsigned long)ptr);
+ preempt_enable();
+
+ if (mod)
+ ptr = dereference_module_function_descriptor(mod, ptr);
+#endif
+ return ptr;
+}
+
#ifdef CONFIG_KALLSYMS
/* Lookup the address for a symbol. Returns 0 if not found. */
unsigned long kallsyms_lookup_name(const char *name);
@@ -40,9 +94,6 @@ extern int sprint_symbol(char *buffer, unsigned long address);
extern int sprint_symbol_no_offset(char *buffer, unsigned long address);
extern int sprint_backtrace(char *buffer, unsigned long address);
-/* Look up a kernel symbol and print it to the kernel messages. */
-extern void __print_symbol(const char *fmt, unsigned long address);
-
int lookup_symbol_name(unsigned long addr, char *symname);
int lookup_symbol_attrs(unsigned long addr, unsigned long *size, unsigned long *offset, char *modname, char *name);
@@ -112,26 +163,11 @@ static inline int kallsyms_show_value(void)
return false;
}
-/* Stupid that this does nothing, but I didn't create this mess. */
-#define __print_symbol(fmt, addr)
#endif /*CONFIG_KALLSYMS*/
-/* This macro allows us to keep printk typechecking */
-static __printf(1, 2)
-void __check_printsym_format(const char *fmt, ...)
-{
-}
-
-static inline void print_symbol(const char *fmt, unsigned long addr)
-{
- __check_printsym_format(fmt, "");
- __print_symbol(fmt, (unsigned long)
- __builtin_extract_return_addr((void *)addr));
-}
-
static inline void print_ip_sym(unsigned long ip)
{
- printk("[<%p>] %pS\n", (void *) ip, (void *) ip);
+ printk("[<%px>] %pS\n", (void *) ip, (void *) ip);
}
#endif /*_LINUX_KALLSYMS_H*/
diff --git a/include/linux/kasan.h b/include/linux/kasan.h
index e3eb834c9a35..de784fd11d12 100644
--- a/include/linux/kasan.h
+++ b/include/linux/kasan.h
@@ -11,8 +11,6 @@ struct task_struct;
#ifdef CONFIG_KASAN
-#define KASAN_SHADOW_SCALE_SHIFT 3
-
#include <asm/kasan.h>
#include <asm/pgtable.h>
@@ -20,7 +18,7 @@ extern unsigned char kasan_zero_page[PAGE_SIZE];
extern pte_t kasan_zero_pte[PTRS_PER_PTE];
extern pmd_t kasan_zero_pmd[PTRS_PER_PMD];
extern pud_t kasan_zero_pud[PTRS_PER_PUD];
-extern p4d_t kasan_zero_p4d[PTRS_PER_P4D];
+extern p4d_t kasan_zero_p4d[MAX_PTRS_PER_P4D];
void kasan_populate_zero_shadow(const void *shadow_start,
const void *shadow_end);
@@ -45,7 +43,7 @@ void kasan_unpoison_stack_above_sp_to(const void *watermark);
void kasan_alloc_pages(struct page *page, unsigned int order);
void kasan_free_pages(struct page *page, unsigned int order);
-void kasan_cache_create(struct kmem_cache *cache, size_t *size,
+void kasan_cache_create(struct kmem_cache *cache, unsigned int *size,
slab_flags_t *flags);
void kasan_cache_shrink(struct kmem_cache *cache);
void kasan_cache_shutdown(struct kmem_cache *cache);
@@ -56,14 +54,14 @@ void kasan_poison_object_data(struct kmem_cache *cache, void *object);
void kasan_init_slab_obj(struct kmem_cache *cache, const void *object);
void kasan_kmalloc_large(const void *ptr, size_t size, gfp_t flags);
-void kasan_kfree_large(const void *ptr);
-void kasan_poison_kfree(void *ptr);
+void kasan_kfree_large(void *ptr, unsigned long ip);
+void kasan_poison_kfree(void *ptr, unsigned long ip);
void kasan_kmalloc(struct kmem_cache *s, const void *object, size_t size,
gfp_t flags);
void kasan_krealloc(const void *object, size_t new_size, gfp_t flags);
void kasan_slab_alloc(struct kmem_cache *s, void *object, gfp_t flags);
-bool kasan_slab_free(struct kmem_cache *s, void *object);
+bool kasan_slab_free(struct kmem_cache *s, void *object, unsigned long ip);
struct kasan_cache {
int alloc_meta_offset;
@@ -94,7 +92,7 @@ static inline void kasan_alloc_pages(struct page *page, unsigned int order) {}
static inline void kasan_free_pages(struct page *page, unsigned int order) {}
static inline void kasan_cache_create(struct kmem_cache *cache,
- size_t *size,
+ unsigned int *size,
slab_flags_t *flags) {}
static inline void kasan_cache_shrink(struct kmem_cache *cache) {}
static inline void kasan_cache_shutdown(struct kmem_cache *cache) {}
@@ -108,8 +106,8 @@ static inline void kasan_init_slab_obj(struct kmem_cache *cache,
const void *object) {}
static inline void kasan_kmalloc_large(void *ptr, size_t size, gfp_t flags) {}
-static inline void kasan_kfree_large(const void *ptr) {}
-static inline void kasan_poison_kfree(void *ptr) {}
+static inline void kasan_kfree_large(void *ptr, unsigned long ip) {}
+static inline void kasan_poison_kfree(void *ptr, unsigned long ip) {}
static inline void kasan_kmalloc(struct kmem_cache *s, const void *object,
size_t size, gfp_t flags) {}
static inline void kasan_krealloc(const void *object, size_t new_size,
@@ -117,7 +115,8 @@ static inline void kasan_krealloc(const void *object, size_t new_size,
static inline void kasan_slab_alloc(struct kmem_cache *s, void *object,
gfp_t flags) {}
-static inline bool kasan_slab_free(struct kmem_cache *s, void *object)
+static inline bool kasan_slab_free(struct kmem_cache *s, void *object,
+ unsigned long ip)
{
return false;
}
diff --git a/include/linux/kconfig.h b/include/linux/kconfig.h
index fec5076eda91..cc8fa109cfa3 100644
--- a/include/linux/kconfig.h
+++ b/include/linux/kconfig.h
@@ -4,6 +4,12 @@
#include <generated/autoconf.h>
+#ifdef CONFIG_CPU_BIG_ENDIAN
+#define __BIG_ENDIAN 4321
+#else
+#define __LITTLE_ENDIAN 1234
+#endif
+
#define __ARG_PLACEHOLDER_1 0,
#define __take_second_arg(__ignored, val, ...) val
diff --git a/include/linux/kcore.h b/include/linux/kcore.h
index 7ff25a808fef..80db19d3a505 100644
--- a/include/linux/kcore.h
+++ b/include/linux/kcore.h
@@ -10,6 +10,7 @@ enum kcore_type {
KCORE_VMALLOC,
KCORE_RAM,
KCORE_VMEMMAP,
+ KCORE_USER,
KCORE_OTHER,
};
diff --git a/include/linux/kernel.h b/include/linux/kernel.h
index ce51455e2adf..6a1eb0b0aad9 100644
--- a/include/linux/kernel.h
+++ b/include/linux/kernel.h
@@ -439,7 +439,8 @@ extern long simple_strtol(const char *,char **,unsigned int);
extern unsigned long long simple_strtoull(const char *,char **,unsigned int);
extern long long simple_strtoll(const char *,char **,unsigned int);
-extern int num_to_str(char *buf, int size, unsigned long long num);
+extern int num_to_str(char *buf, int size,
+ unsigned long long num, unsigned int width);
/* lib/printf utilities */
@@ -472,6 +473,7 @@ extern bool parse_option_str(const char *str, const char *option);
extern char *next_arg(char *args, char **param, char **val);
extern int core_kernel_text(unsigned long addr);
+extern int init_kernel_text(unsigned long addr);
extern int core_kernel_data(unsigned long addr);
extern int __kernel_text_address(unsigned long addr);
extern int kernel_text_address(unsigned long addr);
@@ -479,6 +481,15 @@ extern int func_ptr_is_kernel_text(void *ptr);
unsigned long int_sqrt(unsigned long);
+#if BITS_PER_LONG < 64
+u32 int_sqrt64(u64 x);
+#else
+static inline u32 int_sqrt64(u64 x)
+{
+ return (u32)int_sqrt(x);
+}
+#endif
+
extern void bust_spinlocks(int yes);
extern int oops_in_progress; /* If set, an oops, panic(), BUG() or die() is in progress */
extern int panic_timeout;
@@ -533,6 +544,7 @@ extern enum system_states {
SYSTEM_RESTART,
} system_state;
+/* This cannot be an enum because some may be used in assembly source. */
#define TAINT_PROPRIETARY_MODULE 0
#define TAINT_FORCED_MODULE 1
#define TAINT_CPU_OUT_OF_SPEC 2
@@ -550,7 +562,8 @@ extern enum system_states {
#define TAINT_SOFTLOCKUP 14
#define TAINT_LIVEPATCH 15
#define TAINT_AUX 16
-#define TAINT_FLAGS_COUNT 17
+#define TAINT_RANDSTRUCT 17
+#define TAINT_FLAGS_COUNT 18
struct taint_flag {
char c_true; /* character printed when tainted */
@@ -782,41 +795,59 @@ static inline void ftrace_dump(enum ftrace_dump_mode oops_dump_mode) { }
#endif /* CONFIG_TRACING */
/*
- * min()/max()/clamp() macros that also do
- * strict type-checking.. See the
- * "unnecessary" pointer comparison.
+ * min()/max()/clamp() macros must accomplish three things:
+ *
+ * - avoid multiple evaluations of the arguments (so side-effects like
+ * "x++" happen only once) when non-constant.
+ * - perform strict type-checking (to generate warnings instead of
+ * nasty runtime surprises). See the "unnecessary" pointer comparison
+ * in __typecheck().
+ * - retain result as a constant expressions when called with only
+ * constant expressions (to avoid tripping VLA warnings in stack
+ * allocation usage).
+ */
+#define __typecheck(x, y) \
+ (!!(sizeof((typeof(x) *)1 == (typeof(y) *)1)))
+
+/*
+ * This returns a constant expression while determining if an argument is
+ * a constant expression, most importantly without evaluating the argument.
+ * Glory to Martin Uecker <Martin.Uecker@med.uni-goettingen.de>
*/
-#define __min(t1, t2, min1, min2, x, y) ({ \
- t1 min1 = (x); \
- t2 min2 = (y); \
- (void) (&min1 == &min2); \
- min1 < min2 ? min1 : min2; })
+#define __is_constexpr(x) \
+ (sizeof(int) == sizeof(*(8 ? ((void *)((long)(x) * 0l)) : (int *)8)))
+
+#define __no_side_effects(x, y) \
+ (__is_constexpr(x) && __is_constexpr(y))
+
+#define __safe_cmp(x, y) \
+ (__typecheck(x, y) && __no_side_effects(x, y))
+
+#define __cmp(x, y, op) ((x) op (y) ? (x) : (y))
+
+#define __cmp_once(x, y, unique_x, unique_y, op) ({ \
+ typeof(x) unique_x = (x); \
+ typeof(y) unique_y = (y); \
+ __cmp(unique_x, unique_y, op); })
+
+#define __careful_cmp(x, y, op) \
+ __builtin_choose_expr(__safe_cmp(x, y), \
+ __cmp(x, y, op), \
+ __cmp_once(x, y, __UNIQUE_ID(__x), __UNIQUE_ID(__y), op))
/**
* min - return minimum of two values of the same or compatible types
* @x: first value
* @y: second value
*/
-#define min(x, y) \
- __min(typeof(x), typeof(y), \
- __UNIQUE_ID(min1_), __UNIQUE_ID(min2_), \
- x, y)
-
-#define __max(t1, t2, max1, max2, x, y) ({ \
- t1 max1 = (x); \
- t2 max2 = (y); \
- (void) (&max1 == &max2); \
- max1 > max2 ? max1 : max2; })
+#define min(x, y) __careful_cmp(x, y, <)
/**
* max - return maximum of two values of the same or compatible types
* @x: first value
* @y: second value
*/
-#define max(x, y) \
- __max(typeof(x), typeof(y), \
- __UNIQUE_ID(max1_), __UNIQUE_ID(max2_), \
- x, y)
+#define max(x, y) __careful_cmp(x, y, >)
/**
* min3 - return minimum of three values
@@ -868,10 +899,7 @@ static inline void ftrace_dump(enum ftrace_dump_mode oops_dump_mode) { }
* @x: first value
* @y: second value
*/
-#define min_t(type, x, y) \
- __min(type, type, \
- __UNIQUE_ID(min1_), __UNIQUE_ID(min2_), \
- x, y)
+#define min_t(type, x, y) __careful_cmp((type)(x), (type)(y), <)
/**
* max_t - return maximum of two values, using the specified type
@@ -879,10 +907,7 @@ static inline void ftrace_dump(enum ftrace_dump_mode oops_dump_mode) { }
* @x: first value
* @y: second value
*/
-#define max_t(type, x, y) \
- __max(type, type, \
- __UNIQUE_ID(min1_), __UNIQUE_ID(min2_), \
- x, y)
+#define max_t(type, x, y) __careful_cmp((type)(x), (type)(y), >)
/**
* clamp_t - return a value clamped to a given range using a given type
@@ -918,6 +943,13 @@ static inline void ftrace_dump(enum ftrace_dump_mode oops_dump_mode) { }
#define swap(a, b) \
do { typeof(a) __tmp = (a); (a) = (b); (b) = __tmp; } while (0)
+/* This counts to 12. Any more, it will return 13th argument. */
+#define __COUNT_ARGS(_0, _1, _2, _3, _4, _5, _6, _7, _8, _9, _10, _11, _12, _n, X...) _n
+#define COUNT_ARGS(X...) __COUNT_ARGS(, ##X, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0)
+
+#define __CONCAT(a, b) a ## b
+#define CONCATENATE(a, b) __CONCAT(a, b)
+
/**
* container_of - cast a member of a structure out to the containing structure
* @ptr: the pointer to the member.
diff --git a/include/linux/kexec.h b/include/linux/kexec.h
index f16f6ceb3875..0ebcbeb21056 100644
--- a/include/linux/kexec.h
+++ b/include/linux/kexec.h
@@ -223,10 +223,6 @@ struct kimage {
extern void machine_kexec(struct kimage *image);
extern int machine_kexec_prepare(struct kimage *image);
extern void machine_kexec_cleanup(struct kimage *image);
-extern asmlinkage long sys_kexec_load(unsigned long entry,
- unsigned long nr_segments,
- struct kexec_segment __user *segments,
- unsigned long flags);
extern int kernel_kexec(void);
extern struct page *kimage_alloc_control_pages(struct kimage *image,
unsigned int order);
diff --git a/include/linux/kfifo.h b/include/linux/kfifo.h
index 7b45959ebd92..89fc8dc7bf38 100644
--- a/include/linux/kfifo.h
+++ b/include/linux/kfifo.h
@@ -41,11 +41,11 @@
*/
/*
- * Note about locking : There is no locking required until only * one reader
- * and one writer is using the fifo and no kfifo_reset() will be * called
- * kfifo_reset_out() can be safely used, until it will be only called
+ * Note about locking: There is no locking required until only one reader
+ * and one writer is using the fifo and no kfifo_reset() will be called.
+ * kfifo_reset_out() can be safely used, until it will be only called
* in the reader thread.
- * For multiple writer and one reader there is only a need to lock the writer.
+ * For multiple writer and one reader there is only a need to lock the writer.
* And vice versa for only one writer and multiple reader there is only a need
* to lock the reader.
*/
@@ -113,7 +113,8 @@ struct kfifo_rec_ptr_2 __STRUCT_KFIFO_PTR(unsigned char, 2, void);
* array is a part of the structure and the fifo type where the array is
* outside of the fifo structure.
*/
-#define __is_kfifo_ptr(fifo) (sizeof(*fifo) == sizeof(struct __kfifo))
+#define __is_kfifo_ptr(fifo) \
+ (sizeof(*fifo) == sizeof(STRUCT_KFIFO_PTR(typeof(*(fifo)->type))))
/**
* DECLARE_KFIFO_PTR - macro to declare a fifo pointer object
diff --git a/include/linux/kobject.h b/include/linux/kobject.h
index e0a6205caa71..7f6f93c3df9c 100644
--- a/include/linux/kobject.h
+++ b/include/linux/kobject.h
@@ -1,3 +1,4 @@
+// SPDX-License-Identifier: GPL-2.0
/*
* kobject.h - generic kernel object infrastructure.
*
@@ -6,8 +7,6 @@
* Copyright (c) 2006-2008 Greg Kroah-Hartman <greg@kroah.com>
* Copyright (c) 2006-2008 Novell Inc.
*
- * This file is released under the GPLv2.
- *
* Please read Documentation/kobject.txt before using the kobject
* interface, ESPECIALLY the parts about reference counts and object
* destructors.
diff --git a/include/linux/kobject_ns.h b/include/linux/kobject_ns.h
index df32d2508290..069aa2ebef90 100644
--- a/include/linux/kobject_ns.h
+++ b/include/linux/kobject_ns.h
@@ -1,3 +1,4 @@
+// SPDX-License-Identifier: GPL-2.0
/* Kernel object name space definitions
*
* Copyright (c) 2002-2003 Patrick Mochel
@@ -7,8 +8,6 @@
*
* Split from kobject.h by David Howells (dhowells@redhat.com)
*
- * This file is released under the GPLv2.
- *
* Please read Documentation/kobject.txt before using the kobject
* interface, ESPECIALLY the parts about reference counts and object
* destructors.
diff --git a/include/linux/kvm_host.h b/include/linux/kvm_host.h
index 6bdd4b9f6611..6930c63126c7 100644
--- a/include/linux/kvm_host.h
+++ b/include/linux/kvm_host.h
@@ -533,7 +533,7 @@ static inline int kvm_vcpu_get_idx(struct kvm_vcpu *vcpu)
int kvm_vcpu_init(struct kvm_vcpu *vcpu, struct kvm *kvm, unsigned id);
void kvm_vcpu_uninit(struct kvm_vcpu *vcpu);
-int __must_check vcpu_load(struct kvm_vcpu *vcpu);
+void vcpu_load(struct kvm_vcpu *vcpu);
void vcpu_put(struct kvm_vcpu *vcpu);
#ifdef __KVM_HAVE_IOAPIC
@@ -1105,7 +1105,6 @@ static inline void kvm_irq_routing_update(struct kvm *kvm)
{
}
#endif
-void kvm_arch_irq_routing_update(struct kvm *kvm);
static inline int kvm_ioeventfd(struct kvm *kvm, struct kvm_ioeventfd *args)
{
@@ -1114,6 +1113,8 @@ static inline int kvm_ioeventfd(struct kvm *kvm, struct kvm_ioeventfd *args)
#endif /* CONFIG_HAVE_KVM_EVENTFD */
+void kvm_arch_irq_routing_update(struct kvm *kvm);
+
static inline void kvm_make_request(int req, struct kvm_vcpu *vcpu)
{
/*
@@ -1260,4 +1261,19 @@ static inline bool vcpu_valid_wakeup(struct kvm_vcpu *vcpu)
}
#endif /* CONFIG_HAVE_KVM_INVALID_WAKEUPS */
+#ifdef CONFIG_HAVE_KVM_VCPU_ASYNC_IOCTL
+long kvm_arch_vcpu_async_ioctl(struct file *filp,
+ unsigned int ioctl, unsigned long arg);
+#else
+static inline long kvm_arch_vcpu_async_ioctl(struct file *filp,
+ unsigned int ioctl,
+ unsigned long arg)
+{
+ return -ENOIOCTLCMD;
+}
+#endif /* CONFIG_HAVE_KVM_VCPU_ASYNC_IOCTL */
+
+void kvm_arch_mmu_notifier_invalidate_range(struct kvm *kvm,
+ unsigned long start, unsigned long end);
+
#endif
diff --git a/include/linux/kvm_para.h b/include/linux/kvm_para.h
index 51f6ef2c2ff4..f23b90b02898 100644
--- a/include/linux/kvm_para.h
+++ b/include/linux/kvm_para.h
@@ -9,4 +9,9 @@ static inline bool kvm_para_has_feature(unsigned int feature)
{
return !!(kvm_arch_para_features() & (1UL << feature));
}
+
+static inline bool kvm_para_has_hint(unsigned int feature)
+{
+ return !!(kvm_arch_para_hints() & (1UL << feature));
+}
#endif /* __LINUX_KVM_PARA_H */
diff --git a/include/linux/led-class-flash.h b/include/linux/led-class-flash.h
index e97966d1fb8d..700efaa9e115 100644
--- a/include/linux/led-class-flash.h
+++ b/include/linux/led-class-flash.h
@@ -121,6 +121,8 @@ extern void led_classdev_flash_unregister(struct led_classdev_flash *fled_cdev);
static inline int led_set_flash_strobe(struct led_classdev_flash *fled_cdev,
bool state)
{
+ if (!fled_cdev)
+ return -EINVAL;
return fled_cdev->ops->strobe_set(fled_cdev, state);
}
@@ -136,6 +138,8 @@ static inline int led_set_flash_strobe(struct led_classdev_flash *fled_cdev,
static inline int led_get_flash_strobe(struct led_classdev_flash *fled_cdev,
bool *state)
{
+ if (!fled_cdev)
+ return -EINVAL;
if (fled_cdev->ops->strobe_get)
return fled_cdev->ops->strobe_get(fled_cdev, state);
diff --git a/include/linux/leds.h b/include/linux/leds.h
index 5579c64c8fd6..b7e82550e655 100644
--- a/include/linux/leds.h
+++ b/include/linux/leds.h
@@ -346,9 +346,9 @@ static inline void *led_get_trigger_data(struct led_classdev *led_cdev)
/* Trigger specific functions */
#ifdef CONFIG_LEDS_TRIGGER_DISK
-extern void ledtrig_disk_activity(void);
+extern void ledtrig_disk_activity(bool write);
#else
-static inline void ledtrig_disk_activity(void) {}
+static inline void ledtrig_disk_activity(bool write) {}
#endif
#ifdef CONFIG_LEDS_TRIGGER_MTD
diff --git a/include/linux/libata.h b/include/linux/libata.h
index ed9826b21c5e..1795fecdea17 100644
--- a/include/linux/libata.h
+++ b/include/linux/libata.h
@@ -617,6 +617,7 @@ struct ata_host {
void *private_data;
struct ata_port_operations *ops;
unsigned long flags;
+ struct kref kref;
struct mutex eh_mutex;
struct task_struct *eh_owner;
diff --git a/include/linux/libfdt.h b/include/linux/libfdt.h
index 27ba06e5d117..90ed4ebfa692 100644
--- a/include/linux/libfdt.h
+++ b/include/linux/libfdt.h
@@ -3,7 +3,6 @@
#define _INCLUDE_LIBFDT_H_
#include <linux/libfdt_env.h>
-#include "../../scripts/dtc/libfdt/fdt.h"
#include "../../scripts/dtc/libfdt/libfdt.h"
#endif /* _INCLUDE_LIBFDT_H_ */
diff --git a/include/linux/libfdt_env.h b/include/linux/libfdt_env.h
index 14997285e53d..c6ac1fe7ec68 100644
--- a/include/linux/libfdt_env.h
+++ b/include/linux/libfdt_env.h
@@ -1,6 +1,6 @@
/* SPDX-License-Identifier: GPL-2.0 */
-#ifndef _LIBFDT_ENV_H
-#define _LIBFDT_ENV_H
+#ifndef LIBFDT_ENV_H
+#define LIBFDT_ENV_H
#include <linux/string.h>
@@ -15,4 +15,4 @@ typedef __be64 fdt64_t;
#define fdt64_to_cpu(x) be64_to_cpu(x)
#define cpu_to_fdt64(x) cpu_to_be64(x)
-#endif /* _LIBFDT_ENV_H */
+#endif /* LIBFDT_ENV_H */
diff --git a/include/linux/libnvdimm.h b/include/linux/libnvdimm.h
index f8109ddb5ef1..097072c5a852 100644
--- a/include/linux/libnvdimm.h
+++ b/include/linux/libnvdimm.h
@@ -47,6 +47,17 @@ enum {
/* region flag indicating to direct-map persistent memory by default */
ND_REGION_PAGEMAP = 0,
+ /*
+ * Platform ensures entire CPU store data path is flushed to pmem on
+ * system power loss.
+ */
+ ND_REGION_PERSIST_CACHE = 1,
+ /*
+ * Platform provides mechanisms to automatically flush outstanding
+ * write data from memory controler to pmem on system power loss.
+ * (ADR)
+ */
+ ND_REGION_PERSIST_MEMCTRL = 2,
/* mark newly adjusted resources as requiring a label update */
DPA_RESOURCE_ADJUSTED = 1 << 0,
@@ -65,12 +76,14 @@ typedef int (*ndctl_fn)(struct nvdimm_bus_descriptor *nd_desc,
struct nvdimm *nvdimm, unsigned int cmd, void *buf,
unsigned int buf_len, int *cmd_rc);
+struct device_node;
struct nvdimm_bus_descriptor {
const struct attribute_group **attr_groups;
unsigned long bus_dsm_mask;
unsigned long cmd_mask;
struct module *module;
char *provider_name;
+ struct device_node *of_node;
ndctl_fn ndctl;
int (*flush_probe)(struct nvdimm_bus_descriptor *nd_desc);
int (*clear_to_send)(struct nvdimm_bus_descriptor *nd_desc,
@@ -112,6 +125,7 @@ struct nd_region_desc {
int num_lanes;
int numa_node;
unsigned long flags;
+ struct device_node *of_node;
};
struct device;
@@ -153,6 +167,7 @@ void nvdimm_bus_unregister(struct nvdimm_bus *nvdimm_bus);
struct nvdimm_bus *to_nvdimm_bus(struct device *dev);
struct nvdimm *to_nvdimm(struct device *dev);
struct nd_region *to_nd_region(struct device *dev);
+struct device *nd_region_dev(struct nd_region *nd_region);
struct nd_blk_region *to_nd_blk_region(struct device *dev);
struct nvdimm_bus_descriptor *to_nd_desc(struct nvdimm_bus *nvdimm_bus);
struct device *to_nvdimm_bus_dev(struct nvdimm_bus *nvdimm_bus);
diff --git a/include/linux/libps2.h b/include/linux/libps2.h
index 4ad06e824f76..5f18fe02ae37 100644
--- a/include/linux/libps2.h
+++ b/include/linux/libps2.h
@@ -10,7 +10,13 @@
* the Free Software Foundation.
*/
+#include <linux/bitops.h>
+#include <linux/mutex.h>
+#include <linux/types.h>
+#include <linux/wait.h>
+#define PS2_CMD_SETSCALE11 0x00e6
+#define PS2_CMD_SETRES 0x10e8
#define PS2_CMD_GETID 0x02f2
#define PS2_CMD_RESET_BAT 0x02ff
@@ -20,11 +26,12 @@
#define PS2_RET_NAK 0xfe
#define PS2_RET_ERR 0xfc
-#define PS2_FLAG_ACK 1 /* Waiting for ACK/NAK */
-#define PS2_FLAG_CMD 2 /* Waiting for command to finish */
-#define PS2_FLAG_CMD1 4 /* Waiting for the first byte of command response */
-#define PS2_FLAG_WAITID 8 /* Command execiting is GET ID */
-#define PS2_FLAG_NAK 16 /* Last transmission was NAKed */
+#define PS2_FLAG_ACK BIT(0) /* Waiting for ACK/NAK */
+#define PS2_FLAG_CMD BIT(1) /* Waiting for a command to finish */
+#define PS2_FLAG_CMD1 BIT(2) /* Waiting for the first byte of command response */
+#define PS2_FLAG_WAITID BIT(3) /* Command executing is GET ID */
+#define PS2_FLAG_NAK BIT(4) /* Last transmission was NAKed */
+#define PS2_FLAG_ACK_CMD BIT(5) /* Waiting to ACK the command (first) byte */
struct ps2dev {
struct serio *serio;
@@ -36,21 +43,22 @@ struct ps2dev {
wait_queue_head_t wait;
unsigned long flags;
- unsigned char cmdbuf[8];
- unsigned char cmdcnt;
- unsigned char nak;
+ u8 cmdbuf[8];
+ u8 cmdcnt;
+ u8 nak;
};
void ps2_init(struct ps2dev *ps2dev, struct serio *serio);
-int ps2_sendbyte(struct ps2dev *ps2dev, unsigned char byte, int timeout);
-void ps2_drain(struct ps2dev *ps2dev, int maxbytes, int timeout);
+int ps2_sendbyte(struct ps2dev *ps2dev, u8 byte, unsigned int timeout);
+void ps2_drain(struct ps2dev *ps2dev, size_t maxbytes, unsigned int timeout);
void ps2_begin_command(struct ps2dev *ps2dev);
void ps2_end_command(struct ps2dev *ps2dev);
-int __ps2_command(struct ps2dev *ps2dev, unsigned char *param, int command);
-int ps2_command(struct ps2dev *ps2dev, unsigned char *param, int command);
-int ps2_handle_ack(struct ps2dev *ps2dev, unsigned char data);
-int ps2_handle_response(struct ps2dev *ps2dev, unsigned char data);
+int __ps2_command(struct ps2dev *ps2dev, u8 *param, unsigned int command);
+int ps2_command(struct ps2dev *ps2dev, u8 *param, unsigned int command);
+int ps2_sliced_command(struct ps2dev *ps2dev, u8 command);
+bool ps2_handle_ack(struct ps2dev *ps2dev, u8 data);
+bool ps2_handle_response(struct ps2dev *ps2dev, u8 data);
void ps2_cmd_aborted(struct ps2dev *ps2dev);
-int ps2_is_keyboard_id(char id);
+bool ps2_is_keyboard_id(u8 id);
#endif /* _LIBPS2_H */
diff --git a/include/linux/lightnvm.h b/include/linux/lightnvm.h
index 2d1d9de06728..6e0859b9d4d2 100644
--- a/include/linux/lightnvm.h
+++ b/include/linux/lightnvm.h
@@ -16,26 +16,58 @@ enum {
NVM_IOTYPE_GC = 1,
};
-#define NVM_BLK_BITS (16)
-#define NVM_PG_BITS (16)
-#define NVM_SEC_BITS (8)
-#define NVM_PL_BITS (8)
-#define NVM_LUN_BITS (8)
-#define NVM_CH_BITS (7)
+/* common format */
+#define NVM_GEN_CH_BITS (8)
+#define NVM_GEN_LUN_BITS (8)
+#define NVM_GEN_BLK_BITS (16)
+#define NVM_GEN_RESERVED (32)
+
+/* 1.2 format */
+#define NVM_12_PG_BITS (16)
+#define NVM_12_PL_BITS (4)
+#define NVM_12_SEC_BITS (4)
+#define NVM_12_RESERVED (8)
+
+/* 2.0 format */
+#define NVM_20_SEC_BITS (24)
+#define NVM_20_RESERVED (8)
+
+enum {
+ NVM_OCSSD_SPEC_12 = 12,
+ NVM_OCSSD_SPEC_20 = 20,
+};
struct ppa_addr {
/* Generic structure for all addresses */
union {
+ /* generic device format */
struct {
- u64 blk : NVM_BLK_BITS;
- u64 pg : NVM_PG_BITS;
- u64 sec : NVM_SEC_BITS;
- u64 pl : NVM_PL_BITS;
- u64 lun : NVM_LUN_BITS;
- u64 ch : NVM_CH_BITS;
- u64 reserved : 1;
+ u64 ch : NVM_GEN_CH_BITS;
+ u64 lun : NVM_GEN_LUN_BITS;
+ u64 blk : NVM_GEN_BLK_BITS;
+ u64 reserved : NVM_GEN_RESERVED;
+ } a;
+
+ /* 1.2 device format */
+ struct {
+ u64 ch : NVM_GEN_CH_BITS;
+ u64 lun : NVM_GEN_LUN_BITS;
+ u64 blk : NVM_GEN_BLK_BITS;
+ u64 pg : NVM_12_PG_BITS;
+ u64 pl : NVM_12_PL_BITS;
+ u64 sec : NVM_12_SEC_BITS;
+ u64 reserved : NVM_12_RESERVED;
} g;
+ /* 2.0 device format */
+ struct {
+ u64 grp : NVM_GEN_CH_BITS;
+ u64 pu : NVM_GEN_LUN_BITS;
+ u64 chk : NVM_GEN_BLK_BITS;
+ u64 sec : NVM_20_SEC_BITS;
+ u64 reserved : NVM_20_RESERVED;
+ } m;
+
struct {
u64 line : 63;
u64 is_cached : 1;
@@ -49,13 +81,13 @@ struct nvm_rq;
struct nvm_id;
struct nvm_dev;
struct nvm_tgt_dev;
+struct nvm_chk_meta;
-typedef int (nvm_l2p_update_fn)(u64, u32, __le64 *, void *);
-typedef int (nvm_id_fn)(struct nvm_dev *, struct nvm_id *);
-typedef int (nvm_get_l2p_tbl_fn)(struct nvm_dev *, u64, u32,
- nvm_l2p_update_fn *, void *);
+typedef int (nvm_id_fn)(struct nvm_dev *);
typedef int (nvm_op_bb_tbl_fn)(struct nvm_dev *, struct ppa_addr, u8 *);
typedef int (nvm_op_set_bb_fn)(struct nvm_dev *, struct ppa_addr *, int, int);
+typedef int (nvm_get_chk_meta_fn)(struct nvm_dev *, struct nvm_chk_meta *,
+ sector_t, int);
typedef int (nvm_submit_io_fn)(struct nvm_dev *, struct nvm_rq *);
typedef int (nvm_submit_io_sync_fn)(struct nvm_dev *, struct nvm_rq *);
typedef void *(nvm_create_dma_pool_fn)(struct nvm_dev *, char *);
@@ -66,10 +98,11 @@ typedef void (nvm_dev_dma_free_fn)(void *, void*, dma_addr_t);
struct nvm_dev_ops {
nvm_id_fn *identity;
- nvm_get_l2p_tbl_fn *get_l2p_tbl;
nvm_op_bb_tbl_fn *get_bb_tbl;
nvm_op_set_bb_fn *set_bb_tbl;
+ nvm_get_chk_meta_fn *get_chk_meta;
+
nvm_submit_io_fn *submit_io;
nvm_submit_io_sync_fn *submit_io_sync;
@@ -77,8 +110,6 @@ struct nvm_dev_ops {
nvm_destroy_dma_pool_fn *destroy_dma_pool;
nvm_dev_dma_alloc_fn *dev_dma_alloc;
nvm_dev_dma_free_fn *dev_dma_free;
-
- unsigned int max_phys_sect;
};
#ifdef CONFIG_NVM
@@ -112,8 +143,6 @@ enum {
NVM_RSP_WARN_HIGHECC = 0x4700,
/* Device opcodes */
- NVM_OP_HBREAD = 0x02,
- NVM_OP_HBWRITE = 0x81,
NVM_OP_PWRITE = 0x91,
NVM_OP_PREAD = 0x92,
NVM_OP_ERASE = 0x90,
@@ -160,53 +189,75 @@ struct nvm_id_lp_tbl {
struct nvm_id_lp_mlc mlc;
};
-struct nvm_id_group {
- u8 mtype;
- u8 fmtype;
- u8 num_ch;
- u8 num_lun;
- u8 num_pln;
- u16 num_blk;
- u16 num_pg;
- u16 fpg_sz;
- u16 csecs;
- u16 sos;
- u32 trdt;
- u32 trdm;
- u32 tprt;
- u32 tprm;
- u32 tbet;
- u32 tbem;
- u32 mpos;
- u32 mccap;
- u16 cpar;
-
- struct nvm_id_lp_tbl lptbl;
-};
-
-struct nvm_addr_format {
- u8 ch_offset;
+struct nvm_addrf_12 {
u8 ch_len;
- u8 lun_offset;
u8 lun_len;
- u8 pln_offset;
+ u8 blk_len;
+ u8 pg_len;
u8 pln_len;
+ u8 sec_len;
+
+ u8 ch_offset;
+ u8 lun_offset;
u8 blk_offset;
- u8 blk_len;
u8 pg_offset;
- u8 pg_len;
- u8 sect_offset;
- u8 sect_len;
+ u8 pln_offset;
+ u8 sec_offset;
+
+ u64 ch_mask;
+ u64 lun_mask;
+ u64 blk_mask;
+ u64 pg_mask;
+ u64 pln_mask;
+ u64 sec_mask;
};
-struct nvm_id {
- u8 ver_id;
- u8 vmnt;
- u32 cap;
- u32 dom;
- struct nvm_addr_format ppaf;
- struct nvm_id_group grp;
-} __packed;
+struct nvm_addrf {
+ u8 ch_len;
+ u8 lun_len;
+ u8 chk_len;
+ u8 sec_len;
+ u8 rsv_len[2];
+
+ u8 ch_offset;
+ u8 lun_offset;
+ u8 chk_offset;
+ u8 sec_offset;
+ u8 rsv_off[2];
+
+ u64 ch_mask;
+ u64 lun_mask;
+ u64 chk_mask;
+ u64 sec_mask;
+ u64 rsv_mask[2];
+};
+
+enum {
+ /* Chunk states */
+ NVM_CHK_ST_FREE = 1 << 0,
+ NVM_CHK_ST_CLOSED = 1 << 1,
+ NVM_CHK_ST_OPEN = 1 << 2,
+ NVM_CHK_ST_OFFLINE = 1 << 3,
+
+ /* Chunk types */
+ NVM_CHK_TP_W_SEQ = 1 << 0,
+ NVM_CHK_TP_W_RAN = 1 << 1,
+ NVM_CHK_TP_SZ_SPEC = 1 << 4,
+};
+
+/*
+ * Note: The structure size is linked to nvme_nvm_chk_meta such that the same
+ * buffer can be used when converting from little endian to cpu addressing.
+ */
+struct nvm_chk_meta {
+ u8 state;
+ u8 type;
+ u8 wi;
+ u8 rsvd[5];
+ u64 slba;
+ u64 cnlb;
+ u64 wp;
+};
struct nvm_target {
struct list_head list;
@@ -217,10 +268,16 @@ struct nvm_target {
#define ADDR_EMPTY (~0ULL)
+#define NVM_TARGET_DEFAULT_OP (101)
+#define NVM_TARGET_MIN_OP (3)
+#define NVM_TARGET_MAX_OP (80)
+
#define NVM_VERSION_MAJOR 1
#define NVM_VERSION_MINOR 0
#define NVM_VERSION_PATCH 0
+#define NVM_MAX_VLBA (64) /* max logical blocks in a vector command */
+
struct nvm_rq;
typedef void (nvm_end_io_fn)(struct nvm_rq *);
@@ -239,7 +296,6 @@ struct nvm_rq {
void *meta_list;
dma_addr_t dma_meta_list;
- struct completion *wait;
nvm_end_io_fn *end_io;
uint8_t opcode;
@@ -268,31 +324,69 @@ enum {
NVM_BLK_ST_BAD = 0x8, /* Bad block */
};
-/* Device generic information */
+/* Instance geometry */
struct nvm_geo {
- int nr_chnls;
- int nr_luns;
- int luns_per_chnl; /* -1 if channels are not symmetric */
- int nr_planes;
- int sec_per_pg; /* only sectors for a single page */
- int pgs_per_blk;
- int blks_per_lun;
- int fpg_size;
- int pfpg_size; /* size of buffer if all pages are to be read */
- int sec_size;
- int oob_size;
- int mccap;
- struct nvm_addr_format ppaf;
-
- /* Calculated/Cached values. These do not reflect the actual usable
- * blocks at run-time.
- */
- int max_rq_size;
- int plane_mode; /* drive device in single, double or quad mode */
-
- int sec_per_pl; /* all sectors across planes */
- int sec_per_blk;
- int sec_per_lun;
+ /* device reported version */
+ u8 major_ver_id;
+ u8 minor_ver_id;
+
+ /* kernel short version */
+ u8 version;
+
+ /* instance specific geometry */
+ int num_ch;
+ int num_lun; /* per channel */
+
+ /* calculated values */
+ int all_luns; /* across channels */
+ int all_chunks; /* across channels */
+
+ int op; /* over-provision in instance */
+
+ sector_t total_secs; /* across channels */
+
+ /* chunk geometry */
+ u32 num_chk; /* chunks per lun */
+ u32 clba; /* sectors per chunk */
+ u16 csecs; /* sector size */
+ u16 sos; /* out-of-band area size */
+
+ /* device write constrains */
+ u32 ws_min; /* minimum write size */
+ u32 ws_opt; /* optimal write size */
+ u32 mw_cunits; /* distance required for successful read */
+ u32 maxoc; /* maximum open chunks */
+ u32 maxocpu; /* maximum open chunks per parallel unit */
+
+ /* device capabilities */
+ u32 mccap;
+
+ /* device timings */
+ u32 trdt; /* Avg. Tread (ns) */
+ u32 trdm; /* Max Tread (ns) */
+ u32 tprt; /* Avg. Tprog (ns) */
+ u32 tprm; /* Max Tprog (ns) */
+ u32 tbet; /* Avg. Terase (ns) */
+ u32 tbem; /* Max Terase (ns) */
+
+ /* generic address format */
+ struct nvm_addrf addrf;
+
+ /* 1.2 compatibility */
+ u8 vmnt;
+ u32 cap;
+ u32 dom;
+
+ u8 mtype;
+ u8 fmtype;
+
+ u16 cpar;
+ u32 mpos;
+
+ u8 num_pln;
+ u8 pln_mode;
+ u16 num_pg;
+ u16 fpg_sz;
};
/* sub-device structure */
@@ -303,9 +397,6 @@ struct nvm_tgt_dev {
/* Base ppas for target LUNs */
struct ppa_addr *luns;
- sector_t total_secs;
-
- struct nvm_id identity;
struct request_queue *q;
struct nvm_dev *parent;
@@ -320,17 +411,9 @@ struct nvm_dev {
/* Device information */
struct nvm_geo geo;
- /* lower page table */
- int lps_per_blk;
- int *lptbl;
-
- unsigned long total_secs;
-
unsigned long *lun_map;
void *dma_pool;
- struct nvm_id identity;
-
/* Backend device */
struct request_queue *q;
char name[DISK_NAME_LEN];
@@ -346,95 +429,60 @@ struct nvm_dev {
struct list_head targets;
};
-static inline struct ppa_addr linear_to_generic_addr(struct nvm_geo *geo,
- u64 pba)
+static inline struct ppa_addr generic_to_dev_addr(struct nvm_dev *dev,
+ struct ppa_addr r)
{
+ struct nvm_geo *geo = &dev->geo;
struct ppa_addr l;
- int secs, pgs, blks, luns;
- sector_t ppa = pba;
-
- l.ppa = 0;
-
- div_u64_rem(ppa, geo->sec_per_pg, &secs);
- l.g.sec = secs;
-
- sector_div(ppa, geo->sec_per_pg);
- div_u64_rem(ppa, geo->pgs_per_blk, &pgs);
- l.g.pg = pgs;
-
- sector_div(ppa, geo->pgs_per_blk);
- div_u64_rem(ppa, geo->blks_per_lun, &blks);
- l.g.blk = blks;
-
- sector_div(ppa, geo->blks_per_lun);
- div_u64_rem(ppa, geo->luns_per_chnl, &luns);
- l.g.lun = luns;
- sector_div(ppa, geo->luns_per_chnl);
- l.g.ch = ppa;
+ if (geo->version == NVM_OCSSD_SPEC_12) {
+ struct nvm_addrf_12 *ppaf = (struct nvm_addrf_12 *)&geo->addrf;
- return l;
-}
-
-static inline struct ppa_addr generic_to_dev_addr(struct nvm_tgt_dev *tgt_dev,
- struct ppa_addr r)
-{
- struct nvm_geo *geo = &tgt_dev->geo;
- struct ppa_addr l;
+ l.ppa = ((u64)r.g.ch) << ppaf->ch_offset;
+ l.ppa |= ((u64)r.g.lun) << ppaf->lun_offset;
+ l.ppa |= ((u64)r.g.blk) << ppaf->blk_offset;
+ l.ppa |= ((u64)r.g.pg) << ppaf->pg_offset;
+ l.ppa |= ((u64)r.g.pl) << ppaf->pln_offset;
+ l.ppa |= ((u64)r.g.sec) << ppaf->sec_offset;
+ } else {
+ struct nvm_addrf *lbaf = &geo->addrf;
- l.ppa = ((u64)r.g.blk) << geo->ppaf.blk_offset;
- l.ppa |= ((u64)r.g.pg) << geo->ppaf.pg_offset;
- l.ppa |= ((u64)r.g.sec) << geo->ppaf.sect_offset;
- l.ppa |= ((u64)r.g.pl) << geo->ppaf.pln_offset;
- l.ppa |= ((u64)r.g.lun) << geo->ppaf.lun_offset;
- l.ppa |= ((u64)r.g.ch) << geo->ppaf.ch_offset;
+ l.ppa = ((u64)r.m.grp) << lbaf->ch_offset;
+ l.ppa |= ((u64)r.m.pu) << lbaf->lun_offset;
+ l.ppa |= ((u64)r.m.chk) << lbaf->chk_offset;
+ l.ppa |= ((u64)r.m.sec) << lbaf->sec_offset;
+ }
return l;
}
-static inline struct ppa_addr dev_to_generic_addr(struct nvm_tgt_dev *tgt_dev,
+static inline struct ppa_addr dev_to_generic_addr(struct nvm_dev *dev,
struct ppa_addr r)
{
- struct nvm_geo *geo = &tgt_dev->geo;
+ struct nvm_geo *geo = &dev->geo;
struct ppa_addr l;
l.ppa = 0;
- /*
- * (r.ppa << X offset) & X len bitmask. X eq. blk, pg, etc.
- */
- l.g.blk = (r.ppa >> geo->ppaf.blk_offset) &
- (((1 << geo->ppaf.blk_len) - 1));
- l.g.pg |= (r.ppa >> geo->ppaf.pg_offset) &
- (((1 << geo->ppaf.pg_len) - 1));
- l.g.sec |= (r.ppa >> geo->ppaf.sect_offset) &
- (((1 << geo->ppaf.sect_len) - 1));
- l.g.pl |= (r.ppa >> geo->ppaf.pln_offset) &
- (((1 << geo->ppaf.pln_len) - 1));
- l.g.lun |= (r.ppa >> geo->ppaf.lun_offset) &
- (((1 << geo->ppaf.lun_len) - 1));
- l.g.ch |= (r.ppa >> geo->ppaf.ch_offset) &
- (((1 << geo->ppaf.ch_len) - 1));
- return l;
-}
-
-static inline int ppa_empty(struct ppa_addr ppa_addr)
-{
- return (ppa_addr.ppa == ADDR_EMPTY);
-}
+ if (geo->version == NVM_OCSSD_SPEC_12) {
+ struct nvm_addrf_12 *ppaf = (struct nvm_addrf_12 *)&geo->addrf;
-static inline void ppa_set_empty(struct ppa_addr *ppa_addr)
-{
- ppa_addr->ppa = ADDR_EMPTY;
-}
+ l.g.ch = (r.ppa & ppaf->ch_mask) >> ppaf->ch_offset;
+ l.g.lun = (r.ppa & ppaf->lun_mask) >> ppaf->lun_offset;
+ l.g.blk = (r.ppa & ppaf->blk_mask) >> ppaf->blk_offset;
+ l.g.pg = (r.ppa & ppaf->pg_mask) >> ppaf->pg_offset;
+ l.g.pl = (r.ppa & ppaf->pln_mask) >> ppaf->pln_offset;
+ l.g.sec = (r.ppa & ppaf->sec_mask) >> ppaf->sec_offset;
+ } else {
+ struct nvm_addrf *lbaf = &geo->addrf;
-static inline int ppa_cmp_blk(struct ppa_addr ppa1, struct ppa_addr ppa2)
-{
- if (ppa_empty(ppa1) || ppa_empty(ppa2))
- return 0;
+ l.m.grp = (r.ppa & lbaf->ch_mask) >> lbaf->ch_offset;
+ l.m.pu = (r.ppa & lbaf->lun_mask) >> lbaf->lun_offset;
+ l.m.chk = (r.ppa & lbaf->chk_mask) >> lbaf->chk_offset;
+ l.m.sec = (r.ppa & lbaf->sec_mask) >> lbaf->sec_offset;
+ }
- return ((ppa1.g.ch == ppa2.g.ch) && (ppa1.g.lun == ppa2.g.lun) &&
- (ppa1.g.blk == ppa2.g.blk));
+ return l;
}
typedef blk_qc_t (nvm_tgt_make_rq_fn)(struct request_queue *, struct bio *);
@@ -476,22 +524,19 @@ extern struct nvm_dev *nvm_alloc_dev(int);
extern int nvm_register(struct nvm_dev *);
extern void nvm_unregister(struct nvm_dev *);
+
+extern int nvm_get_chunk_meta(struct nvm_tgt_dev *tgt_dev,
+ struct nvm_chk_meta *meta, struct ppa_addr ppa,
+ int nchks);
+
extern int nvm_set_tgt_bb_tbl(struct nvm_tgt_dev *, struct ppa_addr *,
int, int);
-extern int nvm_max_phys_sects(struct nvm_tgt_dev *);
extern int nvm_submit_io(struct nvm_tgt_dev *, struct nvm_rq *);
extern int nvm_submit_io_sync(struct nvm_tgt_dev *, struct nvm_rq *);
-extern int nvm_erase_sync(struct nvm_tgt_dev *, struct ppa_addr *, int);
-extern int nvm_get_l2p_tbl(struct nvm_tgt_dev *, u64, u32, nvm_l2p_update_fn *,
- void *);
-extern int nvm_get_area(struct nvm_tgt_dev *, sector_t *, sector_t);
-extern void nvm_put_area(struct nvm_tgt_dev *, sector_t);
extern void nvm_end_io(struct nvm_rq *);
extern int nvm_bb_tbl_fold(struct nvm_dev *, u8 *, int);
extern int nvm_get_tgt_bb_tbl(struct nvm_tgt_dev *, struct ppa_addr, u8 *);
-extern void nvm_part_to_tgt(struct nvm_dev *, sector_t *, int);
-
#else /* CONFIG_NVM */
struct nvm_dev_ops;
diff --git a/include/linux/linux_logo.h b/include/linux/linux_logo.h
index 5e3581d76c7f..d4d5b93efe84 100644
--- a/include/linux/linux_logo.h
+++ b/include/linux/linux_logo.h
@@ -36,8 +36,6 @@ struct linux_logo {
extern const struct linux_logo logo_linux_mono;
extern const struct linux_logo logo_linux_vga16;
extern const struct linux_logo logo_linux_clut224;
-extern const struct linux_logo logo_blackfin_vga16;
-extern const struct linux_logo logo_blackfin_clut224;
extern const struct linux_logo logo_dec_clut224;
extern const struct linux_logo logo_mac_clut224;
extern const struct linux_logo logo_parisc_clut224;
@@ -46,7 +44,6 @@ extern const struct linux_logo logo_sun_clut224;
extern const struct linux_logo logo_superh_mono;
extern const struct linux_logo logo_superh_vga16;
extern const struct linux_logo logo_superh_clut224;
-extern const struct linux_logo logo_m32r_clut224;
extern const struct linux_logo logo_spe_clut224;
extern const struct linux_logo *fb_find_logo(int depth);
diff --git a/include/linux/list_lru.h b/include/linux/list_lru.h
index bb8129a3474d..96def9d15b1b 100644
--- a/include/linux/list_lru.h
+++ b/include/linux/list_lru.h
@@ -32,6 +32,7 @@ struct list_lru_one {
};
struct list_lru_memcg {
+ struct rcu_head rcu;
/* array of per cgroup lists, indexed by memcg_cache_id */
struct list_lru_one *lru[0];
};
@@ -43,7 +44,7 @@ struct list_lru_node {
struct list_lru_one lru;
#if defined(CONFIG_MEMCG) && !defined(CONFIG_SLOB)
/* for cgroup aware lrus points to per cgroup lists, otherwise NULL */
- struct list_lru_memcg *memcg_lrus;
+ struct list_lru_memcg __rcu *memcg_lrus;
#endif
long nr_items;
} ____cacheline_aligned_in_smp;
diff --git a/include/linux/livepatch.h b/include/linux/livepatch.h
index fc5c1be3f6f4..4754f01c1abb 100644
--- a/include/linux/livepatch.h
+++ b/include/linux/livepatch.h
@@ -40,7 +40,6 @@
* @new_func: pointer to the patched function code
* @old_sympos: a hint indicating which symbol position the old function
* can be found (optional)
- * @immediate: patch the func immediately, bypassing safety mechanisms
* @old_addr: the address of the function being patched
* @kobj: kobject for sysfs resources
* @stack_node: list node for klp_ops func_stack list
@@ -76,7 +75,6 @@ struct klp_func {
* in kallsyms for the given object is used.
*/
unsigned long old_sympos;
- bool immediate;
/* internal */
unsigned long old_addr;
@@ -137,7 +135,6 @@ struct klp_object {
* struct klp_patch - patch structure for live patching
* @mod: reference to the live patch module
* @objs: object entries for kernel objects to be patched
- * @immediate: patch all funcs immediately, bypassing safety mechanisms
* @list: list node for global list of registered patches
* @kobj: kobject for sysfs resources
* @enabled: the patch is enabled (but operation may be incomplete)
@@ -147,7 +144,6 @@ struct klp_patch {
/* external */
struct module *mod;
struct klp_object *objs;
- bool immediate;
/* internal */
struct list_head list;
diff --git a/include/linux/lockd/lockd.h b/include/linux/lockd/lockd.h
index d7d313fb9cd4..4fd95dbeb52f 100644
--- a/include/linux/lockd/lockd.h
+++ b/include/linux/lockd/lockd.h
@@ -17,6 +17,7 @@
#include <net/ipv6.h>
#include <linux/fs.h>
#include <linux/kref.h>
+#include <linux/refcount.h>
#include <linux/utsname.h>
#include <linux/lockd/bind.h>
#include <linux/lockd/xdr.h>
@@ -58,7 +59,7 @@ struct nlm_host {
u32 h_state; /* pseudo-state counter */
u32 h_nsmstate; /* true remote NSM state */
u32 h_pidcount; /* Pseudopids */
- atomic_t h_count; /* reference count */
+ refcount_t h_count; /* reference count */
struct mutex h_mutex; /* mutex for pmap binding */
unsigned long h_nextrebind; /* next portmap call */
unsigned long h_expires; /* eligible for GC */
@@ -83,7 +84,7 @@ struct nlm_host {
struct nsm_handle {
struct list_head sm_link;
- atomic_t sm_count;
+ refcount_t sm_count;
char *sm_mon_name;
char *sm_name;
struct sockaddr_storage sm_addr;
@@ -122,7 +123,7 @@ static inline struct sockaddr *nlm_srcaddr(const struct nlm_host *host)
*/
struct nlm_lockowner {
struct list_head list;
- atomic_t count;
+ refcount_t count;
struct nlm_host *host;
fl_owner_t owner;
@@ -136,7 +137,7 @@ struct nlm_wait;
*/
#define NLMCLNT_OHSIZE ((__NEW_UTS_LEN) + 10u)
struct nlm_rqst {
- atomic_t a_count;
+ refcount_t a_count;
unsigned int a_flags; /* initial RPC task flags */
struct nlm_host * a_host; /* host handle */
struct nlm_args a_args; /* arguments */
diff --git a/include/linux/lockdep.h b/include/linux/lockdep.h
index 3251d9c0d313..6fc77d4dbdcd 100644
--- a/include/linux/lockdep.h
+++ b/include/linux/lockdep.h
@@ -337,9 +337,9 @@ extern void lock_release(struct lockdep_map *lock, int nested,
/*
* Same "read" as for lock_acquire(), except -1 means any.
*/
-extern int lock_is_held_type(struct lockdep_map *lock, int read);
+extern int lock_is_held_type(const struct lockdep_map *lock, int read);
-static inline int lock_is_held(struct lockdep_map *lock)
+static inline int lock_is_held(const struct lockdep_map *lock)
{
return lock_is_held_type(lock, -1);
}
@@ -367,8 +367,6 @@ extern struct pin_cookie lock_pin_lock(struct lockdep_map *lock);
extern void lock_repin_lock(struct lockdep_map *lock, struct pin_cookie);
extern void lock_unpin_lock(struct lockdep_map *lock, struct pin_cookie);
-# define INIT_LOCKDEP .lockdep_recursion = 0,
-
#define lockdep_depth(tsk) (debug_locks ? (tsk)->lockdep_depth : 0)
#define lockdep_assert_held(l) do { \
@@ -426,7 +424,6 @@ static inline void lockdep_on(void)
* #ifdef the call himself.
*/
-# define INIT_LOCKDEP
# define lockdep_reset() do { debug_locks = 1; } while (0)
# define lockdep_free_key_range(start, size) do { } while (0)
# define lockdep_sys_exit() do { } while (0)
diff --git a/include/linux/lockref.h b/include/linux/lockref.h
index ef3c9342e119..2eac32095113 100644
--- a/include/linux/lockref.h
+++ b/include/linux/lockref.h
@@ -44,7 +44,7 @@ extern void lockref_mark_dead(struct lockref *);
extern int lockref_get_not_dead(struct lockref *);
/* Must be called under spinlock for reliable results */
-static inline int __lockref_is_dead(const struct lockref *l)
+static inline bool __lockref_is_dead(const struct lockref *l)
{
return ((int)l->count < 0);
}
diff --git a/include/linux/logic_pio.h b/include/linux/logic_pio.h
new file mode 100644
index 000000000000..cbd9d8495690
--- /dev/null
+++ b/include/linux/logic_pio.h
@@ -0,0 +1,123 @@
+// SPDX-License-Identifier: GPL-2.0+
+/*
+ * Copyright (C) 2017 HiSilicon Limited, All Rights Reserved.
+ * Author: Gabriele Paoloni <gabriele.paoloni@huawei.com>
+ * Author: Zhichang Yuan <yuanzhichang@hisilicon.com>
+ */
+
+#ifndef __LINUX_LOGIC_PIO_H
+#define __LINUX_LOGIC_PIO_H
+
+#include <linux/fwnode.h>
+
+enum {
+ LOGIC_PIO_INDIRECT, /* Indirect IO flag */
+ LOGIC_PIO_CPU_MMIO, /* Memory-mapped IO flag */
+};
+
+struct logic_pio_hwaddr {
+ struct list_head list;
+ struct fwnode_handle *fwnode;
+ resource_size_t hw_start;
+ resource_size_t io_start;
+ resource_size_t size; /* range size populated */
+ unsigned long flags;
+
+ void *hostdata;
+ const struct logic_pio_host_ops *ops;
+};
+
+struct logic_pio_host_ops {
+ u32 (*in)(void *hostdata, unsigned long addr, size_t dwidth);
+ void (*out)(void *hostdata, unsigned long addr, u32 val,
+ size_t dwidth);
+ u32 (*ins)(void *hostdata, unsigned long addr, void *buffer,
+ size_t dwidth, unsigned int count);
+ void (*outs)(void *hostdata, unsigned long addr, const void *buffer,
+ size_t dwidth, unsigned int count);
+};
+
+#ifdef CONFIG_INDIRECT_PIO
+u8 logic_inb(unsigned long addr);
+void logic_outb(u8 value, unsigned long addr);
+void logic_outw(u16 value, unsigned long addr);
+void logic_outl(u32 value, unsigned long addr);
+u16 logic_inw(unsigned long addr);
+u32 logic_inl(unsigned long addr);
+void logic_outb(u8 value, unsigned long addr);
+void logic_outw(u16 value, unsigned long addr);
+void logic_outl(u32 value, unsigned long addr);
+void logic_insb(unsigned long addr, void *buffer, unsigned int count);
+void logic_insl(unsigned long addr, void *buffer, unsigned int count);
+void logic_insw(unsigned long addr, void *buffer, unsigned int count);
+void logic_outsb(unsigned long addr, const void *buffer, unsigned int count);
+void logic_outsw(unsigned long addr, const void *buffer, unsigned int count);
+void logic_outsl(unsigned long addr, const void *buffer, unsigned int count);
+
+#ifndef inb
+#define inb logic_inb
+#endif
+
+#ifndef inw
+#define inw logic_inw
+#endif
+
+#ifndef inl
+#define inl logic_inl
+#endif
+
+#ifndef outb
+#define outb logic_outb
+#endif
+
+#ifndef outw
+#define outw logic_outw
+#endif
+
+#ifndef outl
+#define outl logic_outl
+#endif
+
+#ifndef insb
+#define insb logic_insb
+#endif
+
+#ifndef insw
+#define insw logic_insw
+#endif
+
+#ifndef insl
+#define insl logic_insl
+#endif
+
+#ifndef outsb
+#define outsb logic_outsb
+#endif
+
+#ifndef outsw
+#define outsw logic_outsw
+#endif
+
+#ifndef outsl
+#define outsl logic_outsl
+#endif
+
+/*
+ * We reserve 0x4000 bytes for Indirect IO as so far this library is only
+ * used by the HiSilicon LPC Host. If needed, we can reserve a wider IO
+ * area by redefining the macro below.
+ */
+#define PIO_INDIRECT_SIZE 0x4000
+#define MMIO_UPPER_LIMIT (IO_SPACE_LIMIT - PIO_INDIRECT_SIZE)
+#else
+#define MMIO_UPPER_LIMIT IO_SPACE_LIMIT
+#endif /* CONFIG_INDIRECT_PIO */
+
+struct logic_pio_hwaddr *find_io_range_by_fwnode(struct fwnode_handle *fwnode);
+unsigned long logic_pio_trans_hwaddr(struct fwnode_handle *fwnode,
+ resource_size_t hw_addr, resource_size_t size);
+int logic_pio_register_range(struct logic_pio_hwaddr *newrange);
+resource_size_t logic_pio_to_hwaddr(unsigned long pio);
+unsigned long logic_pio_trans_cpuaddr(resource_size_t hw_addr);
+
+#endif /* __LINUX_LOGIC_PIO_H */
diff --git a/include/linux/lsm_hooks.h b/include/linux/lsm_hooks.h
index 7161d8e7ee79..9d0b286f3dba 100644
--- a/include/linux/lsm_hooks.h
+++ b/include/linux/lsm_hooks.h
@@ -554,6 +554,10 @@
* @new points to the new credentials.
* @old points to the original credentials.
* Transfer data from original creds to new creds
+ * @cred_getsecid:
+ * Retrieve the security identifier of the cred structure @c
+ * @c contains the credentials, secid will be placed into @secid.
+ * In case of failure, @secid will be set to zero.
* @kernel_act_as:
* Set the credentials for a kernel service to act as (subjective context).
* @new points to the credentials to be modified.
@@ -672,7 +676,8 @@
* @p contains the task_struct for process.
* @info contains the signal information.
* @sig contains the signal value.
- * @secid contains the sid of the process where the signal originated
+ * @cred contains the cred of the process where the signal originated, or
+ * NULL if the current task is the originator.
* Return 0 if permission is granted.
* @task_prctl:
* Check permission before performing a process control operation on the
@@ -906,6 +911,33 @@
* associated with the TUN device's security structure.
* @security pointer to the TUN devices's security structure.
*
+ * Security hooks for SCTP
+ *
+ * @sctp_assoc_request:
+ * Passes the @ep and @chunk->skb of the association INIT packet to
+ * the security module.
+ * @ep pointer to sctp endpoint structure.
+ * @skb pointer to skbuff of association packet.
+ * Return 0 on success, error on failure.
+ * @sctp_bind_connect:
+ * Validiate permissions required for each address associated with sock
+ * @sk. Depending on @optname, the addresses will be treated as either
+ * for a connect or bind service. The @addrlen is calculated on each
+ * ipv4 and ipv6 address using sizeof(struct sockaddr_in) or
+ * sizeof(struct sockaddr_in6).
+ * @sk pointer to sock structure.
+ * @optname name of the option to validate.
+ * @address list containing one or more ipv4/ipv6 addresses.
+ * @addrlen total length of address(s).
+ * Return 0 on success, error on failure.
+ * @sctp_sk_clone:
+ * Called whenever a new socket is created by accept(2) (i.e. a TCP
+ * style socket) or when a socket is 'peeled off' e.g userspace
+ * calls sctp_peeloff(3).
+ * @ep pointer to current sctp endpoint structure.
+ * @sk pointer to current sock structure.
+ * @sk pointer to new sock structure.
+ *
* Security hooks for Infiniband
*
* @ib_pkey_access:
@@ -1541,6 +1573,7 @@ union security_list_options {
int (*cred_prepare)(struct cred *new, const struct cred *old,
gfp_t gfp);
void (*cred_transfer)(struct cred *new, const struct cred *old);
+ void (*cred_getsecid)(const struct cred *c, u32 *secid);
int (*kernel_act_as)(struct cred *new, u32 secid);
int (*kernel_create_files_as)(struct cred *new, struct inode *inode);
int (*kernel_module_request)(char *kmod_name);
@@ -1564,7 +1597,7 @@ union security_list_options {
int (*task_getscheduler)(struct task_struct *p);
int (*task_movememory)(struct task_struct *p);
int (*task_kill)(struct task_struct *p, struct siginfo *info,
- int sig, u32 secid);
+ int sig, const struct cred *cred);
int (*task_prctl)(int option, unsigned long arg2, unsigned long arg3,
unsigned long arg4, unsigned long arg5);
void (*task_to_inode)(struct task_struct *p, struct inode *inode);
@@ -1575,28 +1608,28 @@ union security_list_options {
int (*msg_msg_alloc_security)(struct msg_msg *msg);
void (*msg_msg_free_security)(struct msg_msg *msg);
- int (*msg_queue_alloc_security)(struct msg_queue *msq);
- void (*msg_queue_free_security)(struct msg_queue *msq);
- int (*msg_queue_associate)(struct msg_queue *msq, int msqflg);
- int (*msg_queue_msgctl)(struct msg_queue *msq, int cmd);
- int (*msg_queue_msgsnd)(struct msg_queue *msq, struct msg_msg *msg,
+ int (*msg_queue_alloc_security)(struct kern_ipc_perm *msq);
+ void (*msg_queue_free_security)(struct kern_ipc_perm *msq);
+ int (*msg_queue_associate)(struct kern_ipc_perm *msq, int msqflg);
+ int (*msg_queue_msgctl)(struct kern_ipc_perm *msq, int cmd);
+ int (*msg_queue_msgsnd)(struct kern_ipc_perm *msq, struct msg_msg *msg,
int msqflg);
- int (*msg_queue_msgrcv)(struct msg_queue *msq, struct msg_msg *msg,
+ int (*msg_queue_msgrcv)(struct kern_ipc_perm *msq, struct msg_msg *msg,
struct task_struct *target, long type,
int mode);
- int (*shm_alloc_security)(struct shmid_kernel *shp);
- void (*shm_free_security)(struct shmid_kernel *shp);
- int (*shm_associate)(struct shmid_kernel *shp, int shmflg);
- int (*shm_shmctl)(struct shmid_kernel *shp, int cmd);
- int (*shm_shmat)(struct shmid_kernel *shp, char __user *shmaddr,
+ int (*shm_alloc_security)(struct kern_ipc_perm *shp);
+ void (*shm_free_security)(struct kern_ipc_perm *shp);
+ int (*shm_associate)(struct kern_ipc_perm *shp, int shmflg);
+ int (*shm_shmctl)(struct kern_ipc_perm *shp, int cmd);
+ int (*shm_shmat)(struct kern_ipc_perm *shp, char __user *shmaddr,
int shmflg);
- int (*sem_alloc_security)(struct sem_array *sma);
- void (*sem_free_security)(struct sem_array *sma);
- int (*sem_associate)(struct sem_array *sma, int semflg);
- int (*sem_semctl)(struct sem_array *sma, int cmd);
- int (*sem_semop)(struct sem_array *sma, struct sembuf *sops,
+ int (*sem_alloc_security)(struct kern_ipc_perm *sma);
+ void (*sem_free_security)(struct kern_ipc_perm *sma);
+ int (*sem_associate)(struct kern_ipc_perm *sma, int semflg);
+ int (*sem_semctl)(struct kern_ipc_perm *sma, int cmd);
+ int (*sem_semop)(struct kern_ipc_perm *sma, struct sembuf *sops,
unsigned nsops, int alter);
int (*netlink_send)(struct sock *sk, struct sk_buff *skb);
@@ -1665,6 +1698,12 @@ union security_list_options {
int (*tun_dev_attach_queue)(void *security);
int (*tun_dev_attach)(struct sock *sk, void *security);
int (*tun_dev_open)(void *security);
+ int (*sctp_assoc_request)(struct sctp_endpoint *ep,
+ struct sk_buff *skb);
+ int (*sctp_bind_connect)(struct sock *sk, int optname,
+ struct sockaddr *address, int addrlen);
+ void (*sctp_sk_clone)(struct sctp_endpoint *ep, struct sock *sk,
+ struct sock *newsk);
#endif /* CONFIG_SECURITY_NETWORK */
#ifdef CONFIG_SECURITY_INFINIBAND
@@ -1730,230 +1769,234 @@ union security_list_options {
};
struct security_hook_heads {
- struct list_head binder_set_context_mgr;
- struct list_head binder_transaction;
- struct list_head binder_transfer_binder;
- struct list_head binder_transfer_file;
- struct list_head ptrace_access_check;
- struct list_head ptrace_traceme;
- struct list_head capget;
- struct list_head capset;
- struct list_head capable;
- struct list_head quotactl;
- struct list_head quota_on;
- struct list_head syslog;
- struct list_head settime;
- struct list_head vm_enough_memory;
- struct list_head bprm_set_creds;
- struct list_head bprm_check_security;
- struct list_head bprm_committing_creds;
- struct list_head bprm_committed_creds;
- struct list_head sb_alloc_security;
- struct list_head sb_free_security;
- struct list_head sb_copy_data;
- struct list_head sb_remount;
- struct list_head sb_kern_mount;
- struct list_head sb_show_options;
- struct list_head sb_statfs;
- struct list_head sb_mount;
- struct list_head sb_umount;
- struct list_head sb_pivotroot;
- struct list_head sb_set_mnt_opts;
- struct list_head sb_clone_mnt_opts;
- struct list_head sb_parse_opts_str;
- struct list_head dentry_init_security;
- struct list_head dentry_create_files_as;
+ struct hlist_head binder_set_context_mgr;
+ struct hlist_head binder_transaction;
+ struct hlist_head binder_transfer_binder;
+ struct hlist_head binder_transfer_file;
+ struct hlist_head ptrace_access_check;
+ struct hlist_head ptrace_traceme;
+ struct hlist_head capget;
+ struct hlist_head capset;
+ struct hlist_head capable;
+ struct hlist_head quotactl;
+ struct hlist_head quota_on;
+ struct hlist_head syslog;
+ struct hlist_head settime;
+ struct hlist_head vm_enough_memory;
+ struct hlist_head bprm_set_creds;
+ struct hlist_head bprm_check_security;
+ struct hlist_head bprm_committing_creds;
+ struct hlist_head bprm_committed_creds;
+ struct hlist_head sb_alloc_security;
+ struct hlist_head sb_free_security;
+ struct hlist_head sb_copy_data;
+ struct hlist_head sb_remount;
+ struct hlist_head sb_kern_mount;
+ struct hlist_head sb_show_options;
+ struct hlist_head sb_statfs;
+ struct hlist_head sb_mount;
+ struct hlist_head sb_umount;
+ struct hlist_head sb_pivotroot;
+ struct hlist_head sb_set_mnt_opts;
+ struct hlist_head sb_clone_mnt_opts;
+ struct hlist_head sb_parse_opts_str;
+ struct hlist_head dentry_init_security;
+ struct hlist_head dentry_create_files_as;
#ifdef CONFIG_SECURITY_PATH
- struct list_head path_unlink;
- struct list_head path_mkdir;
- struct list_head path_rmdir;
- struct list_head path_mknod;
- struct list_head path_truncate;
- struct list_head path_symlink;
- struct list_head path_link;
- struct list_head path_rename;
- struct list_head path_chmod;
- struct list_head path_chown;
- struct list_head path_chroot;
+ struct hlist_head path_unlink;
+ struct hlist_head path_mkdir;
+ struct hlist_head path_rmdir;
+ struct hlist_head path_mknod;
+ struct hlist_head path_truncate;
+ struct hlist_head path_symlink;
+ struct hlist_head path_link;
+ struct hlist_head path_rename;
+ struct hlist_head path_chmod;
+ struct hlist_head path_chown;
+ struct hlist_head path_chroot;
#endif
- struct list_head inode_alloc_security;
- struct list_head inode_free_security;
- struct list_head inode_init_security;
- struct list_head inode_create;
- struct list_head inode_link;
- struct list_head inode_unlink;
- struct list_head inode_symlink;
- struct list_head inode_mkdir;
- struct list_head inode_rmdir;
- struct list_head inode_mknod;
- struct list_head inode_rename;
- struct list_head inode_readlink;
- struct list_head inode_follow_link;
- struct list_head inode_permission;
- struct list_head inode_setattr;
- struct list_head inode_getattr;
- struct list_head inode_setxattr;
- struct list_head inode_post_setxattr;
- struct list_head inode_getxattr;
- struct list_head inode_listxattr;
- struct list_head inode_removexattr;
- struct list_head inode_need_killpriv;
- struct list_head inode_killpriv;
- struct list_head inode_getsecurity;
- struct list_head inode_setsecurity;
- struct list_head inode_listsecurity;
- struct list_head inode_getsecid;
- struct list_head inode_copy_up;
- struct list_head inode_copy_up_xattr;
- struct list_head file_permission;
- struct list_head file_alloc_security;
- struct list_head file_free_security;
- struct list_head file_ioctl;
- struct list_head mmap_addr;
- struct list_head mmap_file;
- struct list_head file_mprotect;
- struct list_head file_lock;
- struct list_head file_fcntl;
- struct list_head file_set_fowner;
- struct list_head file_send_sigiotask;
- struct list_head file_receive;
- struct list_head file_open;
- struct list_head task_alloc;
- struct list_head task_free;
- struct list_head cred_alloc_blank;
- struct list_head cred_free;
- struct list_head cred_prepare;
- struct list_head cred_transfer;
- struct list_head kernel_act_as;
- struct list_head kernel_create_files_as;
- struct list_head kernel_read_file;
- struct list_head kernel_post_read_file;
- struct list_head kernel_module_request;
- struct list_head task_fix_setuid;
- struct list_head task_setpgid;
- struct list_head task_getpgid;
- struct list_head task_getsid;
- struct list_head task_getsecid;
- struct list_head task_setnice;
- struct list_head task_setioprio;
- struct list_head task_getioprio;
- struct list_head task_prlimit;
- struct list_head task_setrlimit;
- struct list_head task_setscheduler;
- struct list_head task_getscheduler;
- struct list_head task_movememory;
- struct list_head task_kill;
- struct list_head task_prctl;
- struct list_head task_to_inode;
- struct list_head ipc_permission;
- struct list_head ipc_getsecid;
- struct list_head msg_msg_alloc_security;
- struct list_head msg_msg_free_security;
- struct list_head msg_queue_alloc_security;
- struct list_head msg_queue_free_security;
- struct list_head msg_queue_associate;
- struct list_head msg_queue_msgctl;
- struct list_head msg_queue_msgsnd;
- struct list_head msg_queue_msgrcv;
- struct list_head shm_alloc_security;
- struct list_head shm_free_security;
- struct list_head shm_associate;
- struct list_head shm_shmctl;
- struct list_head shm_shmat;
- struct list_head sem_alloc_security;
- struct list_head sem_free_security;
- struct list_head sem_associate;
- struct list_head sem_semctl;
- struct list_head sem_semop;
- struct list_head netlink_send;
- struct list_head d_instantiate;
- struct list_head getprocattr;
- struct list_head setprocattr;
- struct list_head ismaclabel;
- struct list_head secid_to_secctx;
- struct list_head secctx_to_secid;
- struct list_head release_secctx;
- struct list_head inode_invalidate_secctx;
- struct list_head inode_notifysecctx;
- struct list_head inode_setsecctx;
- struct list_head inode_getsecctx;
+ struct hlist_head inode_alloc_security;
+ struct hlist_head inode_free_security;
+ struct hlist_head inode_init_security;
+ struct hlist_head inode_create;
+ struct hlist_head inode_link;
+ struct hlist_head inode_unlink;
+ struct hlist_head inode_symlink;
+ struct hlist_head inode_mkdir;
+ struct hlist_head inode_rmdir;
+ struct hlist_head inode_mknod;
+ struct hlist_head inode_rename;
+ struct hlist_head inode_readlink;
+ struct hlist_head inode_follow_link;
+ struct hlist_head inode_permission;
+ struct hlist_head inode_setattr;
+ struct hlist_head inode_getattr;
+ struct hlist_head inode_setxattr;
+ struct hlist_head inode_post_setxattr;
+ struct hlist_head inode_getxattr;
+ struct hlist_head inode_listxattr;
+ struct hlist_head inode_removexattr;
+ struct hlist_head inode_need_killpriv;
+ struct hlist_head inode_killpriv;
+ struct hlist_head inode_getsecurity;
+ struct hlist_head inode_setsecurity;
+ struct hlist_head inode_listsecurity;
+ struct hlist_head inode_getsecid;
+ struct hlist_head inode_copy_up;
+ struct hlist_head inode_copy_up_xattr;
+ struct hlist_head file_permission;
+ struct hlist_head file_alloc_security;
+ struct hlist_head file_free_security;
+ struct hlist_head file_ioctl;
+ struct hlist_head mmap_addr;
+ struct hlist_head mmap_file;
+ struct hlist_head file_mprotect;
+ struct hlist_head file_lock;
+ struct hlist_head file_fcntl;
+ struct hlist_head file_set_fowner;
+ struct hlist_head file_send_sigiotask;
+ struct hlist_head file_receive;
+ struct hlist_head file_open;
+ struct hlist_head task_alloc;
+ struct hlist_head task_free;
+ struct hlist_head cred_alloc_blank;
+ struct hlist_head cred_free;
+ struct hlist_head cred_prepare;
+ struct hlist_head cred_transfer;
+ struct hlist_head cred_getsecid;
+ struct hlist_head kernel_act_as;
+ struct hlist_head kernel_create_files_as;
+ struct hlist_head kernel_read_file;
+ struct hlist_head kernel_post_read_file;
+ struct hlist_head kernel_module_request;
+ struct hlist_head task_fix_setuid;
+ struct hlist_head task_setpgid;
+ struct hlist_head task_getpgid;
+ struct hlist_head task_getsid;
+ struct hlist_head task_getsecid;
+ struct hlist_head task_setnice;
+ struct hlist_head task_setioprio;
+ struct hlist_head task_getioprio;
+ struct hlist_head task_prlimit;
+ struct hlist_head task_setrlimit;
+ struct hlist_head task_setscheduler;
+ struct hlist_head task_getscheduler;
+ struct hlist_head task_movememory;
+ struct hlist_head task_kill;
+ struct hlist_head task_prctl;
+ struct hlist_head task_to_inode;
+ struct hlist_head ipc_permission;
+ struct hlist_head ipc_getsecid;
+ struct hlist_head msg_msg_alloc_security;
+ struct hlist_head msg_msg_free_security;
+ struct hlist_head msg_queue_alloc_security;
+ struct hlist_head msg_queue_free_security;
+ struct hlist_head msg_queue_associate;
+ struct hlist_head msg_queue_msgctl;
+ struct hlist_head msg_queue_msgsnd;
+ struct hlist_head msg_queue_msgrcv;
+ struct hlist_head shm_alloc_security;
+ struct hlist_head shm_free_security;
+ struct hlist_head shm_associate;
+ struct hlist_head shm_shmctl;
+ struct hlist_head shm_shmat;
+ struct hlist_head sem_alloc_security;
+ struct hlist_head sem_free_security;
+ struct hlist_head sem_associate;
+ struct hlist_head sem_semctl;
+ struct hlist_head sem_semop;
+ struct hlist_head netlink_send;
+ struct hlist_head d_instantiate;
+ struct hlist_head getprocattr;
+ struct hlist_head setprocattr;
+ struct hlist_head ismaclabel;
+ struct hlist_head secid_to_secctx;
+ struct hlist_head secctx_to_secid;
+ struct hlist_head release_secctx;
+ struct hlist_head inode_invalidate_secctx;
+ struct hlist_head inode_notifysecctx;
+ struct hlist_head inode_setsecctx;
+ struct hlist_head inode_getsecctx;
#ifdef CONFIG_SECURITY_NETWORK
- struct list_head unix_stream_connect;
- struct list_head unix_may_send;
- struct list_head socket_create;
- struct list_head socket_post_create;
- struct list_head socket_bind;
- struct list_head socket_connect;
- struct list_head socket_listen;
- struct list_head socket_accept;
- struct list_head socket_sendmsg;
- struct list_head socket_recvmsg;
- struct list_head socket_getsockname;
- struct list_head socket_getpeername;
- struct list_head socket_getsockopt;
- struct list_head socket_setsockopt;
- struct list_head socket_shutdown;
- struct list_head socket_sock_rcv_skb;
- struct list_head socket_getpeersec_stream;
- struct list_head socket_getpeersec_dgram;
- struct list_head sk_alloc_security;
- struct list_head sk_free_security;
- struct list_head sk_clone_security;
- struct list_head sk_getsecid;
- struct list_head sock_graft;
- struct list_head inet_conn_request;
- struct list_head inet_csk_clone;
- struct list_head inet_conn_established;
- struct list_head secmark_relabel_packet;
- struct list_head secmark_refcount_inc;
- struct list_head secmark_refcount_dec;
- struct list_head req_classify_flow;
- struct list_head tun_dev_alloc_security;
- struct list_head tun_dev_free_security;
- struct list_head tun_dev_create;
- struct list_head tun_dev_attach_queue;
- struct list_head tun_dev_attach;
- struct list_head tun_dev_open;
+ struct hlist_head unix_stream_connect;
+ struct hlist_head unix_may_send;
+ struct hlist_head socket_create;
+ struct hlist_head socket_post_create;
+ struct hlist_head socket_bind;
+ struct hlist_head socket_connect;
+ struct hlist_head socket_listen;
+ struct hlist_head socket_accept;
+ struct hlist_head socket_sendmsg;
+ struct hlist_head socket_recvmsg;
+ struct hlist_head socket_getsockname;
+ struct hlist_head socket_getpeername;
+ struct hlist_head socket_getsockopt;
+ struct hlist_head socket_setsockopt;
+ struct hlist_head socket_shutdown;
+ struct hlist_head socket_sock_rcv_skb;
+ struct hlist_head socket_getpeersec_stream;
+ struct hlist_head socket_getpeersec_dgram;
+ struct hlist_head sk_alloc_security;
+ struct hlist_head sk_free_security;
+ struct hlist_head sk_clone_security;
+ struct hlist_head sk_getsecid;
+ struct hlist_head sock_graft;
+ struct hlist_head inet_conn_request;
+ struct hlist_head inet_csk_clone;
+ struct hlist_head inet_conn_established;
+ struct hlist_head secmark_relabel_packet;
+ struct hlist_head secmark_refcount_inc;
+ struct hlist_head secmark_refcount_dec;
+ struct hlist_head req_classify_flow;
+ struct hlist_head tun_dev_alloc_security;
+ struct hlist_head tun_dev_free_security;
+ struct hlist_head tun_dev_create;
+ struct hlist_head tun_dev_attach_queue;
+ struct hlist_head tun_dev_attach;
+ struct hlist_head tun_dev_open;
+ struct hlist_head sctp_assoc_request;
+ struct hlist_head sctp_bind_connect;
+ struct hlist_head sctp_sk_clone;
#endif /* CONFIG_SECURITY_NETWORK */
#ifdef CONFIG_SECURITY_INFINIBAND
- struct list_head ib_pkey_access;
- struct list_head ib_endport_manage_subnet;
- struct list_head ib_alloc_security;
- struct list_head ib_free_security;
+ struct hlist_head ib_pkey_access;
+ struct hlist_head ib_endport_manage_subnet;
+ struct hlist_head ib_alloc_security;
+ struct hlist_head ib_free_security;
#endif /* CONFIG_SECURITY_INFINIBAND */
#ifdef CONFIG_SECURITY_NETWORK_XFRM
- struct list_head xfrm_policy_alloc_security;
- struct list_head xfrm_policy_clone_security;
- struct list_head xfrm_policy_free_security;
- struct list_head xfrm_policy_delete_security;
- struct list_head xfrm_state_alloc;
- struct list_head xfrm_state_alloc_acquire;
- struct list_head xfrm_state_free_security;
- struct list_head xfrm_state_delete_security;
- struct list_head xfrm_policy_lookup;
- struct list_head xfrm_state_pol_flow_match;
- struct list_head xfrm_decode_session;
+ struct hlist_head xfrm_policy_alloc_security;
+ struct hlist_head xfrm_policy_clone_security;
+ struct hlist_head xfrm_policy_free_security;
+ struct hlist_head xfrm_policy_delete_security;
+ struct hlist_head xfrm_state_alloc;
+ struct hlist_head xfrm_state_alloc_acquire;
+ struct hlist_head xfrm_state_free_security;
+ struct hlist_head xfrm_state_delete_security;
+ struct hlist_head xfrm_policy_lookup;
+ struct hlist_head xfrm_state_pol_flow_match;
+ struct hlist_head xfrm_decode_session;
#endif /* CONFIG_SECURITY_NETWORK_XFRM */
#ifdef CONFIG_KEYS
- struct list_head key_alloc;
- struct list_head key_free;
- struct list_head key_permission;
- struct list_head key_getsecurity;
+ struct hlist_head key_alloc;
+ struct hlist_head key_free;
+ struct hlist_head key_permission;
+ struct hlist_head key_getsecurity;
#endif /* CONFIG_KEYS */
#ifdef CONFIG_AUDIT
- struct list_head audit_rule_init;
- struct list_head audit_rule_known;
- struct list_head audit_rule_match;
- struct list_head audit_rule_free;
+ struct hlist_head audit_rule_init;
+ struct hlist_head audit_rule_known;
+ struct hlist_head audit_rule_match;
+ struct hlist_head audit_rule_free;
#endif /* CONFIG_AUDIT */
#ifdef CONFIG_BPF_SYSCALL
- struct list_head bpf;
- struct list_head bpf_map;
- struct list_head bpf_prog;
- struct list_head bpf_map_alloc_security;
- struct list_head bpf_map_free_security;
- struct list_head bpf_prog_alloc_security;
- struct list_head bpf_prog_free_security;
+ struct hlist_head bpf;
+ struct hlist_head bpf_map;
+ struct hlist_head bpf_prog;
+ struct hlist_head bpf_map_alloc_security;
+ struct hlist_head bpf_map_free_security;
+ struct hlist_head bpf_prog_alloc_security;
+ struct hlist_head bpf_prog_free_security;
#endif /* CONFIG_BPF_SYSCALL */
} __randomize_layout;
@@ -1962,8 +2005,8 @@ struct security_hook_heads {
* For use with generic list macros for common operations.
*/
struct security_hook_list {
- struct list_head list;
- struct list_head *head;
+ struct hlist_node list;
+ struct hlist_head *head;
union security_list_options hook;
char *lsm;
} __randomize_layout;
@@ -2002,7 +2045,7 @@ static inline void security_delete_hooks(struct security_hook_list *hooks,
int i;
for (i = 0; i < count; i++)
- list_del_rcu(&hooks[i].list);
+ hlist_del_rcu(&hooks[i].list);
}
#endif /* CONFIG_SECURITY_SELINUX_DISABLE */
diff --git a/include/linux/mdio.h b/include/linux/mdio.h
index ca08ab16ecdc..2cfffe586885 100644
--- a/include/linux/mdio.h
+++ b/include/linux/mdio.h
@@ -12,6 +12,7 @@
#include <uapi/linux/mdio.h>
#include <linux/mod_devicetable.h>
+struct gpio_desc;
struct mii_bus;
/* Multiple levels of nesting are possible. However typically this is
@@ -39,6 +40,9 @@ struct mdio_device {
/* Bus address of the MDIO device (0-31) */
int addr;
int flags;
+ struct gpio_desc *reset;
+ unsigned int reset_assert_delay;
+ unsigned int reset_deassert_delay;
};
#define to_mdio_device(d) container_of(d, struct mdio_device, dev)
@@ -71,6 +75,7 @@ void mdio_device_free(struct mdio_device *mdiodev);
struct mdio_device *mdio_device_create(struct mii_bus *bus, int addr);
int mdio_device_register(struct mdio_device *mdiodev);
void mdio_device_remove(struct mdio_device *mdiodev);
+void mdio_device_reset(struct mdio_device *mdiodev, int value);
int mdio_driver_register(struct mdio_driver *drv);
void mdio_driver_unregister(struct mdio_driver *drv);
int mdio_device_bus_match(struct device *dev, struct device_driver *drv);
@@ -257,6 +262,9 @@ static inline u16 ethtool_adv_to_mmd_eee_adv_t(u32 adv)
return reg;
}
+int __mdiobus_read(struct mii_bus *bus, int addr, u32 regnum);
+int __mdiobus_write(struct mii_bus *bus, int addr, u32 regnum, u16 val);
+
int mdiobus_read(struct mii_bus *bus, int addr, u32 regnum);
int mdiobus_read_nested(struct mii_bus *bus, int addr, u32 regnum);
int mdiobus_write(struct mii_bus *bus, int addr, u32 regnum, u16 val);
diff --git a/include/linux/memblock.h b/include/linux/memblock.h
index 7ed0f7782d16..ca59883c8364 100644
--- a/include/linux/memblock.h
+++ b/include/linux/memblock.h
@@ -187,7 +187,6 @@ int memblock_search_pfn_nid(unsigned long pfn, unsigned long *start_pfn,
unsigned long *end_pfn);
void __next_mem_pfn_range(int *idx, int nid, unsigned long *out_start_pfn,
unsigned long *out_end_pfn, int *out_nid);
-unsigned long memblock_next_valid_pfn(unsigned long pfn, unsigned long max_pfn);
/**
* for_each_mem_pfn_range - early memory pfn range iterator
@@ -319,6 +318,9 @@ static inline bool memblock_bottom_up(void)
phys_addr_t __init memblock_alloc_range(phys_addr_t size, phys_addr_t align,
phys_addr_t start, phys_addr_t end,
ulong flags);
+phys_addr_t memblock_alloc_base_nid(phys_addr_t size,
+ phys_addr_t align, phys_addr_t max_addr,
+ int nid, ulong flags);
phys_addr_t memblock_alloc_base(phys_addr_t size, phys_addr_t align,
phys_addr_t max_addr);
phys_addr_t __memblock_alloc_base(phys_addr_t size, phys_addr_t align,
@@ -332,8 +334,8 @@ void memblock_enforce_memory_limit(phys_addr_t memory_limit);
void memblock_cap_memory_range(phys_addr_t base, phys_addr_t size);
void memblock_mem_limit_remove_map(phys_addr_t limit);
bool memblock_is_memory(phys_addr_t addr);
-int memblock_is_map_memory(phys_addr_t addr);
-int memblock_is_region_memory(phys_addr_t base, phys_addr_t size);
+bool memblock_is_map_memory(phys_addr_t addr);
+bool memblock_is_region_memory(phys_addr_t base, phys_addr_t size);
bool memblock_is_reserved(phys_addr_t addr);
bool memblock_is_region_reserved(phys_addr_t base, phys_addr_t size);
@@ -417,21 +419,11 @@ static inline void early_memtest(phys_addr_t start, phys_addr_t end)
{
}
#endif
-
-extern unsigned long memblock_reserved_memory_within(phys_addr_t start_addr,
- phys_addr_t end_addr);
#else
static inline phys_addr_t memblock_alloc(phys_addr_t size, phys_addr_t align)
{
return 0;
}
-
-static inline unsigned long memblock_reserved_memory_within(phys_addr_t start_addr,
- phys_addr_t end_addr)
-{
- return 0;
-}
-
#endif /* CONFIG_HAVE_MEMBLOCK */
#endif /* __KERNEL__ */
diff --git a/include/linux/memcontrol.h b/include/linux/memcontrol.h
index 69966c461d1c..d99b71bc2c66 100644
--- a/include/linux/memcontrol.h
+++ b/include/linux/memcontrol.h
@@ -48,13 +48,12 @@ enum memcg_stat_item {
MEMCG_NR_STAT,
};
-/* Cgroup-specific events, on top of universal VM events */
-enum memcg_event_item {
- MEMCG_LOW = NR_VM_EVENT_ITEMS,
+enum memcg_memory_event {
+ MEMCG_LOW,
MEMCG_HIGH,
MEMCG_MAX,
MEMCG_OOM,
- MEMCG_NR_EVENTS,
+ MEMCG_NR_MEMORY_EVENTS,
};
struct mem_cgroup_reclaim_cookie {
@@ -88,7 +87,7 @@ enum mem_cgroup_events_target {
struct mem_cgroup_stat_cpu {
long count[MEMCG_NR_STAT];
- unsigned long events[MEMCG_NR_EVENTS];
+ unsigned long events[NR_VM_EVENT_ITEMS];
unsigned long nr_page_events;
unsigned long targets[MEM_CGROUP_NTARGETS];
};
@@ -108,7 +107,10 @@ struct lruvec_stat {
*/
struct mem_cgroup_per_node {
struct lruvec lruvec;
- struct lruvec_stat __percpu *lruvec_stat;
+
+ struct lruvec_stat __percpu *lruvec_stat_cpu;
+ atomic_long_t lruvec_stat[NR_VM_NODE_STAT_ITEMS];
+
unsigned long lru_zone_size[MAX_NR_ZONES][NR_LRU_LISTS];
struct mem_cgroup_reclaim_iter iter[DEF_PRIORITY + 1];
@@ -117,6 +119,9 @@ struct mem_cgroup_per_node {
unsigned long usage_in_excess;/* Set to the value by which */
/* the soft limit is exceeded*/
bool on_tree;
+ bool congested; /* memcg has many dirty pages */
+ /* backed by a congested BDI */
+
struct mem_cgroup *memcg; /* Back pointer, we cannot */
/* use container_of */
};
@@ -199,7 +204,8 @@ struct mem_cgroup {
/* OOM-Killer disable */
int oom_kill_disable;
- /* handle for "memory.events" */
+ /* memory.events */
+ atomic_long_t memory_events[MEMCG_NR_MEMORY_EVENTS];
struct cgroup_file events_file;
/* protect arrays of thresholds */
@@ -227,10 +233,11 @@ struct mem_cgroup {
spinlock_t move_lock;
struct task_struct *move_lock_task;
unsigned long move_lock_flags;
- /*
- * percpu counter.
- */
- struct mem_cgroup_stat_cpu __percpu *stat;
+
+ /* memory.stat */
+ struct mem_cgroup_stat_cpu __percpu *stat_cpu;
+ atomic_long_t stat[MEMCG_NR_STAT];
+ atomic_long_t events[NR_VM_EVENT_ITEMS];
unsigned long socket_pressure;
@@ -265,6 +272,12 @@ struct mem_cgroup {
/* WARNING: nodeinfo must be the last member here */
};
+/*
+ * size of first charge trial. "32" comes from vmscan.c's magic value.
+ * TODO: maybe necessary to use big numbers in big irons.
+ */
+#define MEMCG_CHARGE_BATCH 32U
+
extern struct mem_cgroup *root_mem_cgroup;
static inline bool mem_cgroup_disabled(void)
@@ -272,13 +285,6 @@ static inline bool mem_cgroup_disabled(void)
return !cgroup_subsys_enabled(memory_cgrp_subsys);
}
-static inline void mem_cgroup_event(struct mem_cgroup *memcg,
- enum memcg_event_item event)
-{
- this_cpu_inc(memcg->stat->events[event]);
- cgroup_file_notify(&memcg->events_file);
-}
-
bool mem_cgroup_low(struct mem_cgroup *root, struct mem_cgroup *memcg);
int mem_cgroup_try_charge(struct page *page, struct mm_struct *mm,
@@ -492,32 +498,40 @@ void unlock_page_memcg(struct page *page);
static inline unsigned long memcg_page_state(struct mem_cgroup *memcg,
int idx)
{
- long val = 0;
- int cpu;
-
- for_each_possible_cpu(cpu)
- val += per_cpu(memcg->stat->count[idx], cpu);
-
- if (val < 0)
- val = 0;
-
- return val;
+ long x = atomic_long_read(&memcg->stat[idx]);
+#ifdef CONFIG_SMP
+ if (x < 0)
+ x = 0;
+#endif
+ return x;
}
/* idx can be of type enum memcg_stat_item or node_stat_item */
static inline void __mod_memcg_state(struct mem_cgroup *memcg,
int idx, int val)
{
- if (!mem_cgroup_disabled())
- __this_cpu_add(memcg->stat->count[idx], val);
+ long x;
+
+ if (mem_cgroup_disabled())
+ return;
+
+ x = val + __this_cpu_read(memcg->stat_cpu->count[idx]);
+ if (unlikely(abs(x) > MEMCG_CHARGE_BATCH)) {
+ atomic_long_add(x, &memcg->stat[idx]);
+ x = 0;
+ }
+ __this_cpu_write(memcg->stat_cpu->count[idx], x);
}
/* idx can be of type enum memcg_stat_item or node_stat_item */
static inline void mod_memcg_state(struct mem_cgroup *memcg,
int idx, int val)
{
- if (!mem_cgroup_disabled())
- this_cpu_add(memcg->stat->count[idx], val);
+ unsigned long flags;
+
+ local_irq_save(flags);
+ __mod_memcg_state(memcg, idx, val);
+ local_irq_restore(flags);
}
/**
@@ -555,89 +569,116 @@ static inline unsigned long lruvec_page_state(struct lruvec *lruvec,
enum node_stat_item idx)
{
struct mem_cgroup_per_node *pn;
- long val = 0;
- int cpu;
+ long x;
if (mem_cgroup_disabled())
return node_page_state(lruvec_pgdat(lruvec), idx);
pn = container_of(lruvec, struct mem_cgroup_per_node, lruvec);
- for_each_possible_cpu(cpu)
- val += per_cpu(pn->lruvec_stat->count[idx], cpu);
-
- if (val < 0)
- val = 0;
-
- return val;
+ x = atomic_long_read(&pn->lruvec_stat[idx]);
+#ifdef CONFIG_SMP
+ if (x < 0)
+ x = 0;
+#endif
+ return x;
}
static inline void __mod_lruvec_state(struct lruvec *lruvec,
enum node_stat_item idx, int val)
{
struct mem_cgroup_per_node *pn;
+ long x;
+ /* Update node */
__mod_node_page_state(lruvec_pgdat(lruvec), idx, val);
+
if (mem_cgroup_disabled())
return;
+
pn = container_of(lruvec, struct mem_cgroup_per_node, lruvec);
+
+ /* Update memcg */
__mod_memcg_state(pn->memcg, idx, val);
- __this_cpu_add(pn->lruvec_stat->count[idx], val);
+
+ /* Update lruvec */
+ x = val + __this_cpu_read(pn->lruvec_stat_cpu->count[idx]);
+ if (unlikely(abs(x) > MEMCG_CHARGE_BATCH)) {
+ atomic_long_add(x, &pn->lruvec_stat[idx]);
+ x = 0;
+ }
+ __this_cpu_write(pn->lruvec_stat_cpu->count[idx], x);
}
static inline void mod_lruvec_state(struct lruvec *lruvec,
enum node_stat_item idx, int val)
{
- struct mem_cgroup_per_node *pn;
+ unsigned long flags;
- mod_node_page_state(lruvec_pgdat(lruvec), idx, val);
- if (mem_cgroup_disabled())
- return;
- pn = container_of(lruvec, struct mem_cgroup_per_node, lruvec);
- mod_memcg_state(pn->memcg, idx, val);
- this_cpu_add(pn->lruvec_stat->count[idx], val);
+ local_irq_save(flags);
+ __mod_lruvec_state(lruvec, idx, val);
+ local_irq_restore(flags);
}
static inline void __mod_lruvec_page_state(struct page *page,
enum node_stat_item idx, int val)
{
- struct mem_cgroup_per_node *pn;
+ pg_data_t *pgdat = page_pgdat(page);
+ struct lruvec *lruvec;
- __mod_node_page_state(page_pgdat(page), idx, val);
- if (mem_cgroup_disabled() || !page->mem_cgroup)
+ /* Untracked pages have no memcg, no lruvec. Update only the node */
+ if (!page->mem_cgroup) {
+ __mod_node_page_state(pgdat, idx, val);
return;
- __mod_memcg_state(page->mem_cgroup, idx, val);
- pn = page->mem_cgroup->nodeinfo[page_to_nid(page)];
- __this_cpu_add(pn->lruvec_stat->count[idx], val);
+ }
+
+ lruvec = mem_cgroup_lruvec(pgdat, page->mem_cgroup);
+ __mod_lruvec_state(lruvec, idx, val);
}
static inline void mod_lruvec_page_state(struct page *page,
enum node_stat_item idx, int val)
{
- struct mem_cgroup_per_node *pn;
+ unsigned long flags;
- mod_node_page_state(page_pgdat(page), idx, val);
- if (mem_cgroup_disabled() || !page->mem_cgroup)
- return;
- mod_memcg_state(page->mem_cgroup, idx, val);
- pn = page->mem_cgroup->nodeinfo[page_to_nid(page)];
- this_cpu_add(pn->lruvec_stat->count[idx], val);
+ local_irq_save(flags);
+ __mod_lruvec_page_state(page, idx, val);
+ local_irq_restore(flags);
}
unsigned long mem_cgroup_soft_limit_reclaim(pg_data_t *pgdat, int order,
gfp_t gfp_mask,
unsigned long *total_scanned);
+static inline void __count_memcg_events(struct mem_cgroup *memcg,
+ enum vm_event_item idx,
+ unsigned long count)
+{
+ unsigned long x;
+
+ if (mem_cgroup_disabled())
+ return;
+
+ x = count + __this_cpu_read(memcg->stat_cpu->events[idx]);
+ if (unlikely(x > MEMCG_CHARGE_BATCH)) {
+ atomic_long_add(x, &memcg->events[idx]);
+ x = 0;
+ }
+ __this_cpu_write(memcg->stat_cpu->events[idx], x);
+}
+
static inline void count_memcg_events(struct mem_cgroup *memcg,
enum vm_event_item idx,
unsigned long count)
{
- if (!mem_cgroup_disabled())
- this_cpu_add(memcg->stat->events[idx], count);
+ unsigned long flags;
+
+ local_irq_save(flags);
+ __count_memcg_events(memcg, idx, count);
+ local_irq_restore(flags);
}
-/* idx can be of type enum memcg_stat_item or node_stat_item */
static inline void count_memcg_page_event(struct page *page,
- int idx)
+ enum vm_event_item idx)
{
if (page->mem_cgroup)
count_memcg_events(page->mem_cgroup, idx, 1);
@@ -654,12 +695,20 @@ static inline void count_memcg_event_mm(struct mm_struct *mm,
rcu_read_lock();
memcg = mem_cgroup_from_task(rcu_dereference(mm->owner));
if (likely(memcg)) {
- this_cpu_inc(memcg->stat->events[idx]);
+ count_memcg_events(memcg, idx, 1);
if (idx == OOM_KILL)
cgroup_file_notify(&memcg->events_file);
}
rcu_read_unlock();
}
+
+static inline void memcg_memory_event(struct mem_cgroup *memcg,
+ enum memcg_memory_event event)
+{
+ atomic_long_inc(&memcg->memory_events[event]);
+ cgroup_file_notify(&memcg->events_file);
+}
+
#ifdef CONFIG_TRANSPARENT_HUGEPAGE
void mem_cgroup_split_huge_fixup(struct page *head);
#endif
@@ -676,8 +725,8 @@ static inline bool mem_cgroup_disabled(void)
return true;
}
-static inline void mem_cgroup_event(struct mem_cgroup *memcg,
- enum memcg_event_item event)
+static inline void memcg_memory_event(struct mem_cgroup *memcg,
+ enum memcg_memory_event event)
{
}
diff --git a/include/linux/memory.h b/include/linux/memory.h
index f71e732c77b2..31ca3e28b0eb 100644
--- a/include/linux/memory.h
+++ b/include/linux/memory.h
@@ -33,6 +33,7 @@ struct memory_block {
void *hw; /* optional pointer to fw/hw data */
int (*phys_callback)(struct memory_block *);
struct device dev;
+ int nid; /* NID for this memory block */
};
int arch_get_memory_phys_device(unsigned long start_pfn);
@@ -109,7 +110,7 @@ extern int register_memory_notifier(struct notifier_block *nb);
extern void unregister_memory_notifier(struct notifier_block *nb);
extern int register_memory_isolate_notifier(struct notifier_block *nb);
extern void unregister_memory_isolate_notifier(struct notifier_block *nb);
-extern int register_new_memory(int, struct mem_section *);
+int hotplug_memory_register(int nid, struct mem_section *section);
#ifdef CONFIG_MEMORY_HOTREMOVE
extern int unregister_memory_section(struct mem_section *);
#endif
diff --git a/include/linux/memory_hotplug.h b/include/linux/memory_hotplug.h
index 58e110aee7ab..e0e49b5b1ee1 100644
--- a/include/linux/memory_hotplug.h
+++ b/include/linux/memory_hotplug.h
@@ -13,6 +13,7 @@ struct pglist_data;
struct mem_section;
struct memory_block;
struct resource;
+struct vmem_altmap;
#ifdef CONFIG_MEMORY_HOTPLUG
/*
@@ -51,24 +52,6 @@ enum {
};
/*
- * pgdat resizing functions
- */
-static inline
-void pgdat_resize_lock(struct pglist_data *pgdat, unsigned long *flags)
-{
- spin_lock_irqsave(&pgdat->node_size_lock, *flags);
-}
-static inline
-void pgdat_resize_unlock(struct pglist_data *pgdat, unsigned long *flags)
-{
- spin_unlock_irqrestore(&pgdat->node_size_lock, *flags);
-}
-static inline
-void pgdat_resize_init(struct pglist_data *pgdat)
-{
- spin_lock_init(&pgdat->node_size_lock);
-}
-/*
* Zone resizing functions
*
* Note: any attempt to resize a zone should has pgdat_resize_lock()
@@ -125,24 +108,26 @@ static inline bool movable_node_is_enabled(void)
#ifdef CONFIG_MEMORY_HOTREMOVE
extern bool is_pageblock_removable_nolock(struct page *page);
-extern int arch_remove_memory(u64 start, u64 size);
+extern int arch_remove_memory(u64 start, u64 size,
+ struct vmem_altmap *altmap);
extern int __remove_pages(struct zone *zone, unsigned long start_pfn,
- unsigned long nr_pages);
+ unsigned long nr_pages, struct vmem_altmap *altmap);
#endif /* CONFIG_MEMORY_HOTREMOVE */
/* reasonably generic interface to expand the physical pages */
-extern int __add_pages(int nid, unsigned long start_pfn,
- unsigned long nr_pages, bool want_memblock);
+extern int __add_pages(int nid, unsigned long start_pfn, unsigned long nr_pages,
+ struct vmem_altmap *altmap, bool want_memblock);
#ifndef CONFIG_ARCH_HAS_ADD_PAGES
static inline int add_pages(int nid, unsigned long start_pfn,
- unsigned long nr_pages, bool want_memblock)
+ unsigned long nr_pages, struct vmem_altmap *altmap,
+ bool want_memblock)
{
- return __add_pages(nid, start_pfn, nr_pages, want_memblock);
+ return __add_pages(nid, start_pfn, nr_pages, altmap, want_memblock);
}
#else /* ARCH_HAS_ADD_PAGES */
-int add_pages(int nid, unsigned long start_pfn,
- unsigned long nr_pages, bool want_memblock);
+int add_pages(int nid, unsigned long start_pfn, unsigned long nr_pages,
+ struct vmem_altmap *altmap, bool want_memblock);
#endif /* ARCH_HAS_ADD_PAGES */
#ifdef CONFIG_NUMA
@@ -231,9 +216,6 @@ void put_online_mems(void);
void mem_hotplug_begin(void);
void mem_hotplug_done(void);
-extern void set_zone_contiguous(struct zone *zone);
-extern void clear_zone_contiguous(struct zone *zone);
-
#else /* ! CONFIG_MEMORY_HOTPLUG */
#define pfn_to_online_page(pfn) \
({ \
@@ -243,13 +225,6 @@ extern void clear_zone_contiguous(struct zone *zone);
___page; \
})
-/*
- * Stub functions for when hotplug is off
- */
-static inline void pgdat_resize_lock(struct pglist_data *p, unsigned long *f) {}
-static inline void pgdat_resize_unlock(struct pglist_data *p, unsigned long *f) {}
-static inline void pgdat_resize_init(struct pglist_data *pgdat) {}
-
static inline unsigned zone_span_seqbegin(struct zone *zone)
{
return 0;
@@ -290,6 +265,34 @@ static inline bool movable_node_is_enabled(void)
}
#endif /* ! CONFIG_MEMORY_HOTPLUG */
+#if defined(CONFIG_MEMORY_HOTPLUG) || defined(CONFIG_DEFERRED_STRUCT_PAGE_INIT)
+/*
+ * pgdat resizing functions
+ */
+static inline
+void pgdat_resize_lock(struct pglist_data *pgdat, unsigned long *flags)
+{
+ spin_lock_irqsave(&pgdat->node_size_lock, *flags);
+}
+static inline
+void pgdat_resize_unlock(struct pglist_data *pgdat, unsigned long *flags)
+{
+ spin_unlock_irqrestore(&pgdat->node_size_lock, *flags);
+}
+static inline
+void pgdat_resize_init(struct pglist_data *pgdat)
+{
+ spin_lock_init(&pgdat->node_size_lock);
+}
+#else /* !(CONFIG_MEMORY_HOTPLUG || CONFIG_DEFERRED_STRUCT_PAGE_INIT) */
+/*
+ * Stub functions for when hotplug is off
+ */
+static inline void pgdat_resize_lock(struct pglist_data *p, unsigned long *f) {}
+static inline void pgdat_resize_unlock(struct pglist_data *p, unsigned long *f) {}
+static inline void pgdat_resize_init(struct pglist_data *pgdat) {}
+#endif /* !(CONFIG_MEMORY_HOTPLUG || CONFIG_DEFERRED_STRUCT_PAGE_INIT) */
+
#ifdef CONFIG_MEMORY_HOTREMOVE
extern bool is_mem_section_removable(unsigned long pfn, unsigned long nr_pages);
@@ -318,15 +321,17 @@ extern int walk_memory_range(unsigned long start_pfn, unsigned long end_pfn,
void *arg, int (*func)(struct memory_block *, void *));
extern int add_memory(int nid, u64 start, u64 size);
extern int add_memory_resource(int nid, struct resource *resource, bool online);
-extern int arch_add_memory(int nid, u64 start, u64 size, bool want_memblock);
+extern int arch_add_memory(int nid, u64 start, u64 size,
+ struct vmem_altmap *altmap, bool want_memblock);
extern void move_pfn_range_to_zone(struct zone *zone, unsigned long start_pfn,
- unsigned long nr_pages);
+ unsigned long nr_pages, struct vmem_altmap *altmap);
extern int offline_pages(unsigned long start_pfn, unsigned long nr_pages);
extern bool is_memblock_offlined(struct memory_block *mem);
extern void remove_memory(int nid, u64 start, u64 size);
-extern int sparse_add_one_section(struct pglist_data *pgdat, unsigned long start_pfn);
+extern int sparse_add_one_section(struct pglist_data *pgdat,
+ unsigned long start_pfn, struct vmem_altmap *altmap);
extern void sparse_remove_one_section(struct zone *zone, struct mem_section *ms,
- unsigned long map_offset);
+ unsigned long map_offset, struct vmem_altmap *altmap);
extern struct page *sparse_decode_mem_map(unsigned long coded_mem_map,
unsigned long pnum);
extern bool allow_online_pfn_range(int nid, unsigned long pfn, unsigned long nr_pages,
diff --git a/include/linux/memremap.h b/include/linux/memremap.h
index 10d23c367048..7b4899c06f49 100644
--- a/include/linux/memremap.h
+++ b/include/linux/memremap.h
@@ -26,18 +26,6 @@ struct vmem_altmap {
unsigned long alloc;
};
-unsigned long vmem_altmap_offset(struct vmem_altmap *altmap);
-void vmem_altmap_free(struct vmem_altmap *altmap, unsigned long nr_pfns);
-
-#ifdef CONFIG_ZONE_DEVICE
-struct vmem_altmap *to_vmem_altmap(unsigned long memmap_start);
-#else
-static inline struct vmem_altmap *to_vmem_altmap(unsigned long memmap_start)
-{
- return NULL;
-}
-#endif
-
/*
* Specialize ZONE_DEVICE memory into multiple types each having differents
* usage.
@@ -125,8 +113,9 @@ typedef void (*dev_page_free_t)(struct page *page, void *data);
struct dev_pagemap {
dev_page_fault_t page_fault;
dev_page_free_t page_free;
- struct vmem_altmap *altmap;
- const struct resource *res;
+ struct vmem_altmap altmap;
+ bool altmap_valid;
+ struct resource res;
struct percpu_ref *ref;
struct device *dev;
void *data;
@@ -134,15 +123,17 @@ struct dev_pagemap {
};
#ifdef CONFIG_ZONE_DEVICE
-void *devm_memremap_pages(struct device *dev, struct resource *res,
- struct percpu_ref *ref, struct vmem_altmap *altmap);
-struct dev_pagemap *find_dev_pagemap(resource_size_t phys);
+void *devm_memremap_pages(struct device *dev, struct dev_pagemap *pgmap);
+struct dev_pagemap *get_dev_pagemap(unsigned long pfn,
+ struct dev_pagemap *pgmap);
+
+unsigned long vmem_altmap_offset(struct vmem_altmap *altmap);
+void vmem_altmap_free(struct vmem_altmap *altmap, unsigned long nr_pfns);
static inline bool is_zone_device_page(const struct page *page);
#else
static inline void *devm_memremap_pages(struct device *dev,
- struct resource *res, struct percpu_ref *ref,
- struct vmem_altmap *altmap)
+ struct dev_pagemap *pgmap)
{
/*
* Fail attempts to call devm_memremap_pages() without
@@ -153,11 +144,22 @@ static inline void *devm_memremap_pages(struct device *dev,
return ERR_PTR(-ENXIO);
}
-static inline struct dev_pagemap *find_dev_pagemap(resource_size_t phys)
+static inline struct dev_pagemap *get_dev_pagemap(unsigned long pfn,
+ struct dev_pagemap *pgmap)
{
return NULL;
}
-#endif
+
+static inline unsigned long vmem_altmap_offset(struct vmem_altmap *altmap)
+{
+ return 0;
+}
+
+static inline void vmem_altmap_free(struct vmem_altmap *altmap,
+ unsigned long nr_pfns)
+{
+}
+#endif /* CONFIG_ZONE_DEVICE */
#if defined(CONFIG_DEVICE_PRIVATE) || defined(CONFIG_DEVICE_PUBLIC)
static inline bool is_device_private_page(const struct page *page)
@@ -173,39 +175,6 @@ static inline bool is_device_public_page(const struct page *page)
}
#endif /* CONFIG_DEVICE_PRIVATE || CONFIG_DEVICE_PUBLIC */
-/**
- * get_dev_pagemap() - take a new live reference on the dev_pagemap for @pfn
- * @pfn: page frame number to lookup page_map
- * @pgmap: optional known pgmap that already has a reference
- *
- * @pgmap allows the overhead of a lookup to be bypassed when @pfn lands in the
- * same mapping.
- */
-static inline struct dev_pagemap *get_dev_pagemap(unsigned long pfn,
- struct dev_pagemap *pgmap)
-{
- const struct resource *res = pgmap ? pgmap->res : NULL;
- resource_size_t phys = PFN_PHYS(pfn);
-
- /*
- * In the cached case we're already holding a live reference so
- * we can simply do a blind increment
- */
- if (res && phys >= res->start && phys <= res->end) {
- percpu_ref_get(pgmap->ref);
- return pgmap;
- }
-
- /* fall back to slow path lookup */
- rcu_read_lock();
- pgmap = find_dev_pagemap(phys);
- if (pgmap && !percpu_ref_tryget_live(pgmap->ref))
- pgmap = NULL;
- rcu_read_unlock();
-
- return pgmap;
-}
-
static inline void put_dev_pagemap(struct dev_pagemap *pgmap)
{
if (pgmap)
diff --git a/include/linux/mfd/axp20x.h b/include/linux/mfd/axp20x.h
index 78dc85365c4f..82bf7747b312 100644
--- a/include/linux/mfd/axp20x.h
+++ b/include/linux/mfd/axp20x.h
@@ -266,6 +266,8 @@ enum axp20x_variants {
#define AXP288_RT_BATT_V_H 0xa0
#define AXP288_RT_BATT_V_L 0xa1
+#define AXP813_ADC_RATE 0x85
+
/* Fuel Gauge */
#define AXP288_FG_RDC1_REG 0xba
#define AXP288_FG_RDC0_REG 0xbb
@@ -645,11 +647,6 @@ struct axp20x_dev {
const struct regmap_irq_chip *regmap_irq_chip;
};
-struct axp288_extcon_pdata {
- /* GPIO pin control to switch D+/D- lines b/w PMIC and SOC */
- struct gpio_desc *gpio_mux_cntl;
-};
-
/* generic helper function for reading 9-16 bit wide regs */
static inline int axp20x_read_variable_width(struct regmap *regmap,
unsigned int reg, unsigned int width)
diff --git a/include/linux/mfd/cros_ec.h b/include/linux/mfd/cros_ec.h
index 4e887ba22635..c61535979b8f 100644
--- a/include/linux/mfd/cros_ec.h
+++ b/include/linux/mfd/cros_ec.h
@@ -322,6 +322,10 @@ extern struct attribute_group cros_ec_attr_group;
extern struct attribute_group cros_ec_lightbar_attr_group;
extern struct attribute_group cros_ec_vbc_attr_group;
+/* debugfs stuff */
+int cros_ec_debugfs_init(struct cros_ec_dev *ec);
+void cros_ec_debugfs_remove(struct cros_ec_dev *ec);
+
/* ACPI GPE handler */
#ifdef CONFIG_ACPI
diff --git a/include/linux/mfd/cros_ec_commands.h b/include/linux/mfd/cros_ec_commands.h
index 2b16e95b9bb8..2b96e630e3b6 100644
--- a/include/linux/mfd/cros_ec_commands.h
+++ b/include/linux/mfd/cros_ec_commands.h
@@ -291,6 +291,9 @@ enum host_event_code {
/* EC desires to change state of host-controlled USB mux */
EC_HOST_EVENT_USB_MUX = 28,
+ /* EC RTC event occurred */
+ EC_HOST_EVENT_RTC = 26,
+
/*
* The high bit of the event mask is not used as a host event code. If
* it reads back as set, then the entire event mask should be
@@ -799,6 +802,8 @@ enum ec_feature_code {
EC_FEATURE_USB_MUX = 23,
/* Motion Sensor code has an internal software FIFO */
EC_FEATURE_MOTION_SENSE_FIFO = 24,
+ /* EC has RTC feature that can be controlled by host commands */
+ EC_FEATURE_RTC = 27,
};
#define EC_FEATURE_MASK_0(event_code) (1UL << (event_code % 32))
@@ -1709,6 +1714,9 @@ struct ec_response_rtc {
#define EC_CMD_RTC_SET_VALUE 0x46
#define EC_CMD_RTC_SET_ALARM 0x47
+/* Pass as param to SET_ALARM to clear the current alarm */
+#define EC_RTC_ALARM_CLEAR 0
+
/*****************************************************************************/
/* Port80 log access */
@@ -2904,16 +2912,33 @@ enum usb_pd_control_mux {
USB_PD_CTRL_MUX_AUTO = 5,
};
+enum usb_pd_control_swap {
+ USB_PD_CTRL_SWAP_NONE = 0,
+ USB_PD_CTRL_SWAP_DATA = 1,
+ USB_PD_CTRL_SWAP_POWER = 2,
+ USB_PD_CTRL_SWAP_VCONN = 3,
+ USB_PD_CTRL_SWAP_COUNT
+};
+
struct ec_params_usb_pd_control {
uint8_t port;
uint8_t role;
uint8_t mux;
+ uint8_t swap;
} __packed;
#define PD_CTRL_RESP_ENABLED_COMMS (1 << 0) /* Communication enabled */
#define PD_CTRL_RESP_ENABLED_CONNECTED (1 << 1) /* Device connected */
#define PD_CTRL_RESP_ENABLED_PD_CAPABLE (1 << 2) /* Partner is PD capable */
+#define PD_CTRL_RESP_ROLE_POWER BIT(0) /* 0=SNK/1=SRC */
+#define PD_CTRL_RESP_ROLE_DATA BIT(1) /* 0=UFP/1=DFP */
+#define PD_CTRL_RESP_ROLE_VCONN BIT(2) /* Vconn status */
+#define PD_CTRL_RESP_ROLE_DR_POWER BIT(3) /* Partner is dualrole power */
+#define PD_CTRL_RESP_ROLE_DR_DATA BIT(4) /* Partner is dualrole data */
+#define PD_CTRL_RESP_ROLE_USB_COMM BIT(5) /* Partner USB comm capable */
+#define PD_CTRL_RESP_ROLE_EXT_POWERED BIT(6) /* Partner externally powerd */
+
struct ec_response_usb_pd_control_v1 {
uint8_t enabled;
uint8_t role;
diff --git a/include/linux/mfd/da9055/pdata.h b/include/linux/mfd/da9055/pdata.h
index 04e092be4b07..1a94fa2ac309 100644
--- a/include/linux/mfd/da9055/pdata.h
+++ b/include/linux/mfd/da9055/pdata.h
@@ -12,6 +12,7 @@
#define DA9055_MAX_REGULATORS 8
struct da9055;
+struct gpio_desc;
enum gpio_select {
NO_GPIO = 0,
@@ -47,7 +48,7 @@ struct da9055_pdata {
* controls the regulator set A/B, 0 if not available.
*/
enum gpio_select *reg_rsel;
- /* GPIOs to enable regulator, 0 if not available */
- int *ena_gpio;
+ /* GPIO descriptors to enable regulator, NULL if not available */
+ struct gpio_desc **ena_gpiods;
};
#endif /* __DA9055_PDATA_H */
diff --git a/include/linux/mfd/palmas.h b/include/linux/mfd/palmas.h
index 3c8568aa82a5..75e5c8ff85fc 100644
--- a/include/linux/mfd/palmas.h
+++ b/include/linux/mfd/palmas.h
@@ -3733,6 +3733,9 @@ enum usb_irq_events {
#define TPS65917_REGEN3_CTRL_MODE_ACTIVE 0x01
#define TPS65917_REGEN3_CTRL_MODE_ACTIVE_SHIFT 0x00
+/* POWERHOLD Mask field for PRIMARY_SECONDARY_PAD2 register */
+#define TPS65917_PRIMARY_SECONDARY_PAD2_GPIO_5_MASK 0xC
+
/* Registers for function RESOURCE */
#define TPS65917_REGEN1_CTRL 0x2
#define TPS65917_PLLEN_CTRL 0x3
diff --git a/include/linux/mfd/rave-sp.h b/include/linux/mfd/rave-sp.h
new file mode 100644
index 000000000000..796fb9794c9e
--- /dev/null
+++ b/include/linux/mfd/rave-sp.h
@@ -0,0 +1,60 @@
+/* SPDX-License-Identifier: GPL-2.0+ */
+
+/*
+ * Core definitions for RAVE SP MFD driver.
+ *
+ * Copyright (C) 2017 Zodiac Inflight Innovations
+ */
+
+#ifndef _LINUX_RAVE_SP_H_
+#define _LINUX_RAVE_SP_H_
+
+#include <linux/notifier.h>
+
+enum rave_sp_command {
+ RAVE_SP_CMD_GET_FIRMWARE_VERSION = 0x20,
+ RAVE_SP_CMD_GET_BOOTLOADER_VERSION = 0x21,
+ RAVE_SP_CMD_BOOT_SOURCE = 0x26,
+ RAVE_SP_CMD_GET_BOARD_COPPER_REV = 0x2B,
+ RAVE_SP_CMD_GET_GPIO_STATE = 0x2F,
+
+ RAVE_SP_CMD_STATUS = 0xA0,
+ RAVE_SP_CMD_SW_WDT = 0xA1,
+ RAVE_SP_CMD_PET_WDT = 0xA2,
+ RAVE_SP_CMD_RESET = 0xA7,
+ RAVE_SP_CMD_RESET_REASON = 0xA8,
+
+ RAVE_SP_CMD_REQ_COPPER_REV = 0xB6,
+ RAVE_SP_CMD_GET_I2C_DEVICE_STATUS = 0xBA,
+ RAVE_SP_CMD_GET_SP_SILICON_REV = 0xB9,
+ RAVE_SP_CMD_CONTROL_EVENTS = 0xBB,
+
+ RAVE_SP_EVNT_BASE = 0xE0,
+};
+
+struct rave_sp;
+
+static inline unsigned long rave_sp_action_pack(u8 event, u8 value)
+{
+ return ((unsigned long)value << 8) | event;
+}
+
+static inline u8 rave_sp_action_unpack_event(unsigned long action)
+{
+ return action;
+}
+
+static inline u8 rave_sp_action_unpack_value(unsigned long action)
+{
+ return action >> 8;
+}
+
+int rave_sp_exec(struct rave_sp *sp,
+ void *__data, size_t data_size,
+ void *reply_data, size_t reply_data_size);
+
+struct device;
+int devm_rave_sp_register_event_notifier(struct device *dev,
+ struct notifier_block *nb);
+
+#endif /* _LINUX_RAVE_SP_H_ */
diff --git a/include/linux/mfd/samsung/rtc.h b/include/linux/mfd/samsung/rtc.h
index 48c3c5be7eb1..9ed2871ea335 100644
--- a/include/linux/mfd/samsung/rtc.h
+++ b/include/linux/mfd/samsung/rtc.h
@@ -141,15 +141,4 @@ enum s2mps_rtc_reg {
#define WTSR_ENABLE_SHIFT 6
#define WTSR_ENABLE_MASK (1 << WTSR_ENABLE_SHIFT)
-enum {
- RTC_SEC = 0,
- RTC_MIN,
- RTC_HOUR,
- RTC_WEEKDAY,
- RTC_DATE,
- RTC_MONTH,
- RTC_YEAR1,
- RTC_YEAR2,
-};
-
#endif /* __LINUX_MFD_SEC_RTC_H */
diff --git a/include/linux/mfd/stm32-lptimer.h b/include/linux/mfd/stm32-lptimer.h
index 77c7cf40d9b4..605f62264825 100644
--- a/include/linux/mfd/stm32-lptimer.h
+++ b/include/linux/mfd/stm32-lptimer.h
@@ -1,13 +1,9 @@
+/* SPDX-License-Identifier: GPL-2.0 */
/*
* STM32 Low-Power Timer parent driver.
- *
* Copyright (C) STMicroelectronics 2017
- *
* Author: Fabrice Gasnier <fabrice.gasnier@st.com>
- *
* Inspired by Benjamin Gaignard's stm32-timers driver
- *
- * License terms: GNU General Public License (GPL), version 2
*/
#ifndef _LINUX_STM32_LPTIMER_H_
diff --git a/include/linux/mfd/stm32-timers.h b/include/linux/mfd/stm32-timers.h
index ce7346e7f77a..2aadab6f34a1 100644
--- a/include/linux/mfd/stm32-timers.h
+++ b/include/linux/mfd/stm32-timers.h
@@ -1,9 +1,7 @@
+/* SPDX-License-Identifier: GPL-2.0 */
/*
* Copyright (C) STMicroelectronics 2016
- *
* Author: Benjamin Gaignard <benjamin.gaignard@st.com>
- *
- * License terms: GNU General Public License (GPL), version 2
*/
#ifndef _LINUX_STM32_GPTIMER_H_
diff --git a/include/linux/mfd/syscon/imx6q-iomuxc-gpr.h b/include/linux/mfd/syscon/imx6q-iomuxc-gpr.h
index c8e0164c5423..e06f5f79eaef 100644
--- a/include/linux/mfd/syscon/imx6q-iomuxc-gpr.h
+++ b/include/linux/mfd/syscon/imx6q-iomuxc-gpr.h
@@ -243,6 +243,8 @@
#define IMX6Q_GPR4_IPU_RD_CACHE_CTL BIT(0)
#define IMX6Q_GPR5_L2_CLK_STOP BIT(8)
+#define IMX6Q_GPR5_SATA_SW_PD BIT(10)
+#define IMX6Q_GPR5_SATA_SW_RST BIT(11)
#define IMX6Q_GPR6_IPU1_ID00_WR_QOS_MASK (0xf << 0)
#define IMX6Q_GPR6_IPU1_ID01_WR_QOS_MASK (0xf << 4)
diff --git a/include/linux/mfd/tmio.h b/include/linux/mfd/tmio.h
index e1cfe9194129..91f92215ca74 100644
--- a/include/linux/mfd/tmio.h
+++ b/include/linux/mfd/tmio.h
@@ -25,26 +25,6 @@
writew((val) >> 16, (addr) + 2); \
} while (0)
-#define CNF_CMD 0x04
-#define CNF_CTL_BASE 0x10
-#define CNF_INT_PIN 0x3d
-#define CNF_STOP_CLK_CTL 0x40
-#define CNF_GCLK_CTL 0x41
-#define CNF_SD_CLK_MODE 0x42
-#define CNF_PIN_STATUS 0x44
-#define CNF_PWR_CTL_1 0x48
-#define CNF_PWR_CTL_2 0x49
-#define CNF_PWR_CTL_3 0x4a
-#define CNF_CARD_DETECT_MODE 0x4c
-#define CNF_SD_SLOT 0x50
-#define CNF_EXT_GCLK_CTL_1 0xf0
-#define CNF_EXT_GCLK_CTL_2 0xf1
-#define CNF_EXT_GCLK_CTL_3 0xf9
-#define CNF_SD_LED_EN_1 0xfa
-#define CNF_SD_LED_EN_2 0xfe
-
-#define SDCREN 0x2 /* Enable access to MMC CTL regs. (flag in COMMAND_REG)*/
-
#define sd_config_write8(base, shift, reg, val) \
tmio_iowrite8((val), (base) + ((reg) << (shift)))
#define sd_config_write16(base, shift, reg, val) \
@@ -56,7 +36,6 @@
} while (0)
/* tmio MMC platform flags */
-#define TMIO_MMC_WRPROTECT_DISABLE BIT(0)
/*
* Some controllers can support a 2-byte block size when the bus width
* is configured in 4-bit mode.
diff --git a/include/linux/migrate.h b/include/linux/migrate.h
index a2246cf670ba..f2b4abbca55e 100644
--- a/include/linux/migrate.h
+++ b/include/linux/migrate.h
@@ -7,8 +7,7 @@
#include <linux/migrate_mode.h>
#include <linux/hugetlb.h>
-typedef struct page *new_page_t(struct page *page, unsigned long private,
- int **reason);
+typedef struct page *new_page_t(struct page *page, unsigned long private);
typedef void free_page_t(struct page *page, unsigned long private);
/*
@@ -25,7 +24,7 @@ enum migrate_reason {
MR_SYSCALL, /* also applies to cpusets */
MR_MEMPOLICY_MBIND,
MR_NUMA_MISPLACED,
- MR_CMA,
+ MR_CONTIG_RANGE,
MR_TYPES
};
@@ -43,9 +42,9 @@ static inline struct page *new_page_nodemask(struct page *page,
return alloc_huge_page_nodemask(page_hstate(compound_head(page)),
preferred_nid, nodemask);
- if (thp_migration_supported() && PageTransHuge(page)) {
- order = HPAGE_PMD_ORDER;
+ if (PageTransHuge(page)) {
gfp_mask |= GFP_TRANSHUGE;
+ order = HPAGE_PMD_ORDER;
}
if (PageHighMem(page) || (zone_idx(page_zone(page)) == ZONE_MOVABLE))
diff --git a/include/linux/mlx4/device.h b/include/linux/mlx4/device.h
index a9b5fed8f7c6..81d0799b6091 100644
--- a/include/linux/mlx4/device.h
+++ b/include/linux/mlx4/device.h
@@ -257,10 +257,6 @@ enum {
};
enum {
- MLX4_USER_DEV_CAP_LARGE_CQE = 1L << 0
-};
-
-enum {
MLX4_FUNC_CAP_64B_EQE_CQE = 1L << 0,
MLX4_FUNC_CAP_EQE_CQE_STRIDE = 1L << 1,
MLX4_FUNC_CAP_DMFS_A0_STATIC = 1L << 2
diff --git a/include/linux/mlx5/accel.h b/include/linux/mlx5/accel.h
new file mode 100644
index 000000000000..70e7e5673ce9
--- /dev/null
+++ b/include/linux/mlx5/accel.h
@@ -0,0 +1,144 @@
+/*
+ * Copyright (c) 2018 Mellanox Technologies. All rights reserved.
+ *
+ * This software is available to you under a choice of one of two
+ * licenses. You may choose to be licensed under the terms of the GNU
+ * General Public License (GPL) Version 2, available from the file
+ * COPYING in the main directory of this source tree, or the
+ * OpenIB.org BSD license below:
+ *
+ * Redistribution and use in source and binary forms, with or
+ * without modification, are permitted provided that the following
+ * conditions are met:
+ *
+ * - Redistributions of source code must retain the above
+ * copyright notice, this list of conditions and the following
+ * disclaimer.
+ *
+ * - Redistributions in binary form must reproduce the above
+ * copyright notice, this list of conditions and the following
+ * disclaimer in the documentation and/or other materials
+ * provided with the distribution.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND,
+ * EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
+ * MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND
+ * NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS
+ * BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN
+ * ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN
+ * CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
+ * SOFTWARE.
+ *
+ */
+
+#ifndef __MLX5_ACCEL_H__
+#define __MLX5_ACCEL_H__
+
+#include <linux/mlx5/driver.h>
+
+enum mlx5_accel_esp_aes_gcm_keymat_iv_algo {
+ MLX5_ACCEL_ESP_AES_GCM_IV_ALGO_SEQ,
+};
+
+enum mlx5_accel_esp_flags {
+ MLX5_ACCEL_ESP_FLAGS_TUNNEL = 0, /* Default */
+ MLX5_ACCEL_ESP_FLAGS_TRANSPORT = 1UL << 0,
+ MLX5_ACCEL_ESP_FLAGS_ESN_TRIGGERED = 1UL << 1,
+ MLX5_ACCEL_ESP_FLAGS_ESN_STATE_OVERLAP = 1UL << 2,
+};
+
+enum mlx5_accel_esp_action {
+ MLX5_ACCEL_ESP_ACTION_DECRYPT,
+ MLX5_ACCEL_ESP_ACTION_ENCRYPT,
+};
+
+enum mlx5_accel_esp_keymats {
+ MLX5_ACCEL_ESP_KEYMAT_AES_NONE,
+ MLX5_ACCEL_ESP_KEYMAT_AES_GCM,
+};
+
+enum mlx5_accel_esp_replay {
+ MLX5_ACCEL_ESP_REPLAY_NONE,
+ MLX5_ACCEL_ESP_REPLAY_BMP,
+};
+
+struct aes_gcm_keymat {
+ u64 seq_iv;
+ enum mlx5_accel_esp_aes_gcm_keymat_iv_algo iv_algo;
+
+ u32 salt;
+ u32 icv_len;
+
+ u32 key_len;
+ u32 aes_key[256 / 32];
+};
+
+struct mlx5_accel_esp_xfrm_attrs {
+ enum mlx5_accel_esp_action action;
+ u32 esn;
+ u32 spi;
+ u32 seq;
+ u32 tfc_pad;
+ u32 flags;
+ u32 sa_handle;
+ enum mlx5_accel_esp_replay replay_type;
+ union {
+ struct {
+ u32 size;
+
+ } bmp;
+ } replay;
+ enum mlx5_accel_esp_keymats keymat_type;
+ union {
+ struct aes_gcm_keymat aes_gcm;
+ } keymat;
+};
+
+struct mlx5_accel_esp_xfrm {
+ struct mlx5_core_dev *mdev;
+ struct mlx5_accel_esp_xfrm_attrs attrs;
+};
+
+enum {
+ MLX5_ACCEL_XFRM_FLAG_REQUIRE_METADATA = 1UL << 0,
+};
+
+enum mlx5_accel_ipsec_cap {
+ MLX5_ACCEL_IPSEC_CAP_DEVICE = 1 << 0,
+ MLX5_ACCEL_IPSEC_CAP_REQUIRED_METADATA = 1 << 1,
+ MLX5_ACCEL_IPSEC_CAP_ESP = 1 << 2,
+ MLX5_ACCEL_IPSEC_CAP_IPV6 = 1 << 3,
+ MLX5_ACCEL_IPSEC_CAP_LSO = 1 << 4,
+ MLX5_ACCEL_IPSEC_CAP_RX_NO_TRAILER = 1 << 5,
+ MLX5_ACCEL_IPSEC_CAP_ESN = 1 << 6,
+ MLX5_ACCEL_IPSEC_CAP_TX_IV_IS_ESN = 1 << 7,
+};
+
+#ifdef CONFIG_MLX5_ACCEL
+
+u32 mlx5_accel_ipsec_device_caps(struct mlx5_core_dev *mdev);
+
+struct mlx5_accel_esp_xfrm *
+mlx5_accel_esp_create_xfrm(struct mlx5_core_dev *mdev,
+ const struct mlx5_accel_esp_xfrm_attrs *attrs,
+ u32 flags);
+void mlx5_accel_esp_destroy_xfrm(struct mlx5_accel_esp_xfrm *xfrm);
+int mlx5_accel_esp_modify_xfrm(struct mlx5_accel_esp_xfrm *xfrm,
+ const struct mlx5_accel_esp_xfrm_attrs *attrs);
+
+#else
+
+static inline u32 mlx5_accel_ipsec_device_caps(struct mlx5_core_dev *mdev) { return 0; }
+
+static inline struct mlx5_accel_esp_xfrm *
+mlx5_accel_esp_create_xfrm(struct mlx5_core_dev *mdev,
+ const struct mlx5_accel_esp_xfrm_attrs *attrs,
+ u32 flags) { return ERR_PTR(-EOPNOTSUPP); }
+static inline void
+mlx5_accel_esp_destroy_xfrm(struct mlx5_accel_esp_xfrm *xfrm) {}
+static inline int
+mlx5_accel_esp_modify_xfrm(struct mlx5_accel_esp_xfrm *xfrm,
+ const struct mlx5_accel_esp_xfrm_attrs *attrs) { return -EOPNOTSUPP; }
+
+#endif
+#endif
diff --git a/include/linux/mlx5/cq.h b/include/linux/mlx5/cq.h
index 48c181a2acc9..0ef6138eca49 100644
--- a/include/linux/mlx5/cq.h
+++ b/include/linux/mlx5/cq.h
@@ -60,6 +60,7 @@ struct mlx5_core_cq {
} tasklet_ctx;
int reset_notify_added;
struct list_head reset_notify;
+ struct mlx5_eq *eq;
};
@@ -171,8 +172,17 @@ static inline void mlx5_cq_arm(struct mlx5_core_cq *cq, u32 cmd,
mlx5_write64(doorbell, uar_page + MLX5_CQ_DOORBELL, NULL);
}
-int mlx5_init_cq_table(struct mlx5_core_dev *dev);
-void mlx5_cleanup_cq_table(struct mlx5_core_dev *dev);
+static inline void mlx5_cq_hold(struct mlx5_core_cq *cq)
+{
+ refcount_inc(&cq->refcount);
+}
+
+static inline void mlx5_cq_put(struct mlx5_core_cq *cq)
+{
+ if (refcount_dec_and_test(&cq->refcount))
+ complete(&cq->free);
+}
+
int mlx5_core_create_cq(struct mlx5_core_dev *dev, struct mlx5_core_cq *cq,
u32 *in, int inlen);
int mlx5_core_destroy_cq(struct mlx5_core_dev *dev, struct mlx5_core_cq *cq);
@@ -183,6 +193,12 @@ int mlx5_core_modify_cq(struct mlx5_core_dev *dev, struct mlx5_core_cq *cq,
int mlx5_core_modify_cq_moderation(struct mlx5_core_dev *dev,
struct mlx5_core_cq *cq, u16 cq_period,
u16 cq_max_count);
+static inline void mlx5_dump_err_cqe(struct mlx5_core_dev *dev,
+ struct mlx5_err_cqe *err_cqe)
+{
+ print_hex_dump(KERN_WARNING, "", DUMP_PREFIX_OFFSET, 16, 1, err_cqe,
+ sizeof(*err_cqe), false);
+}
int mlx5_debug_cq_add(struct mlx5_core_dev *dev, struct mlx5_core_cq *cq);
void mlx5_debug_cq_remove(struct mlx5_core_dev *dev, struct mlx5_core_cq *cq);
diff --git a/include/linux/mlx5/device.h b/include/linux/mlx5/device.h
index 409ffb14298a..2bc27f8c5b87 100644
--- a/include/linux/mlx5/device.h
+++ b/include/linux/mlx5/device.h
@@ -79,6 +79,11 @@
<< __mlx5_dw_bit_off(typ, fld))); \
} while (0)
+#define MLX5_ARRAY_SET(typ, p, fld, idx, v) do { \
+ BUILD_BUG_ON(__mlx5_bit_off(typ, fld) % 32); \
+ MLX5_SET(typ, p, fld[idx], v); \
+} while (0)
+
#define MLX5_SET_TO_ONES(typ, p, fld) do { \
BUILD_BUG_ON(__mlx5_st_sz_bits(typ) % 32); \
*((__be32 *)(p) + __mlx5_dw_off(typ, fld)) = \
@@ -244,6 +249,8 @@ enum {
MLX5_NON_FP_BFREGS_PER_UAR,
MLX5_UARS_IN_PAGE = PAGE_SIZE / MLX5_ADAPTER_PAGE_SIZE,
MLX5_NON_FP_BFREGS_IN_PAGE = MLX5_NON_FP_BFREGS_PER_UAR * MLX5_UARS_IN_PAGE,
+ MLX5_MIN_DYN_BFREGS = 512,
+ MLX5_MAX_DYN_BFREGS = 1024,
};
enum {
@@ -284,6 +291,7 @@ enum {
MLX5_EVENT_QUEUE_TYPE_QP = 0,
MLX5_EVENT_QUEUE_TYPE_RQ = 1,
MLX5_EVENT_QUEUE_TYPE_SQ = 2,
+ MLX5_EVENT_QUEUE_TYPE_DCT = 6,
};
enum mlx5_event {
@@ -319,6 +327,8 @@ enum mlx5_event {
MLX5_EVENT_TYPE_PAGE_FAULT = 0xc,
MLX5_EVENT_TYPE_NIC_VPORT_CHANGE = 0xd,
+ MLX5_EVENT_TYPE_DCT_DRAINED = 0x1c,
+
MLX5_EVENT_TYPE_FPGA_ERROR = 0x20,
};
@@ -611,6 +621,11 @@ struct mlx5_eqe_pps {
u8 rsvd2[12];
} __packed;
+struct mlx5_eqe_dct {
+ __be32 reserved[6];
+ __be32 dctn;
+};
+
union ev_data {
__be32 raw[7];
struct mlx5_eqe_cmd cmd;
@@ -626,6 +641,7 @@ union ev_data {
struct mlx5_eqe_vport_change vport_change;
struct mlx5_eqe_port_module port_module;
struct mlx5_eqe_pps pps;
+ struct mlx5_eqe_dct dct;
} __packed;
struct mlx5_eqe {
@@ -766,6 +782,9 @@ static inline u64 get_cqe_ts(struct mlx5_cqe64 *cqe)
return (u64)lo | ((u64)hi << 32);
}
+#define MLX5_MPWQE_LOG_NUM_STRIDES_BASE (9)
+#define MLX5_MPWQE_LOG_STRIDE_SZ_BASE (6)
+
struct mpwrq_cqe_bc {
__be16 filler_consumed_strides;
__be16 byte_cnt;
@@ -997,6 +1016,9 @@ enum mlx5_cap_type {
MLX5_CAP_RESERVED,
MLX5_CAP_VECTOR_CALC,
MLX5_CAP_QOS,
+ MLX5_CAP_DEBUG,
+ MLX5_CAP_RESERVED_14,
+ MLX5_CAP_DEV_MEM,
/* NUM OF CAP Types */
MLX5_CAP_NUM
};
@@ -1124,6 +1146,9 @@ enum mlx5_qcam_feature_groups {
#define MLX5_CAP_QOS(mdev, cap)\
MLX5_GET(qos_cap, mdev->caps.hca_cur[MLX5_CAP_QOS], cap)
+#define MLX5_CAP_DEBUG(mdev, cap)\
+ MLX5_GET(debug_cap, mdev->caps.hca_cur[MLX5_CAP_DEBUG], cap)
+
#define MLX5_CAP_PCAM_FEATURE(mdev, fld) \
MLX5_GET(pcam_reg, (mdev)->caps.pcam, feature_cap_mask.enhanced_features.fld)
@@ -1145,6 +1170,12 @@ enum mlx5_qcam_feature_groups {
#define MLX5_CAP64_FPGA(mdev, cap) \
MLX5_GET64(fpga_cap, (mdev)->caps.fpga, cap)
+#define MLX5_CAP_DEV_MEM(mdev, cap)\
+ MLX5_GET(device_mem_cap, mdev->caps.hca_cur[MLX5_CAP_DEV_MEM], cap)
+
+#define MLX5_CAP64_DEV_MEM(mdev, cap)\
+ MLX5_GET64(device_mem_cap, mdev->caps.hca_cur[MLX5_CAP_DEV_MEM], cap)
+
enum {
MLX5_CMD_STAT_OK = 0x0,
MLX5_CMD_STAT_INT_ERR = 0x1,
@@ -1188,8 +1219,8 @@ static inline u16 mlx5_to_sw_pkey_sz(int pkey_sz)
return MLX5_MIN_PKEY_TABLE_SIZE << pkey_sz;
}
-#define MLX5_BY_PASS_NUM_REGULAR_PRIOS 8
-#define MLX5_BY_PASS_NUM_DONT_TRAP_PRIOS 8
+#define MLX5_BY_PASS_NUM_REGULAR_PRIOS 16
+#define MLX5_BY_PASS_NUM_DONT_TRAP_PRIOS 16
#define MLX5_BY_PASS_NUM_MULTICAST_PRIOS 1
#define MLX5_BY_PASS_NUM_PRIOS (MLX5_BY_PASS_NUM_REGULAR_PRIOS +\
MLX5_BY_PASS_NUM_DONT_TRAP_PRIOS +\
diff --git a/include/linux/mlx5/driver.h b/include/linux/mlx5/driver.h
index a0610427e168..767d193c269a 100644
--- a/include/linux/mlx5/driver.h
+++ b/include/linux/mlx5/driver.h
@@ -155,6 +155,13 @@ enum mlx5_dcbx_oper_mode {
MLX5E_DCBX_PARAM_VER_OPER_AUTO = 0x3,
};
+enum mlx5_dct_atomic_mode {
+ MLX5_ATOMIC_MODE_DCT_OFF = 20,
+ MLX5_ATOMIC_MODE_DCT_NONE = 0 << MLX5_ATOMIC_MODE_DCT_OFF,
+ MLX5_ATOMIC_MODE_DCT_IB_COMP = 1 << MLX5_ATOMIC_MODE_DCT_OFF,
+ MLX5_ATOMIC_MODE_DCT_CX = 2 << MLX5_ATOMIC_MODE_DCT_OFF,
+};
+
enum {
MLX5_ATOMIC_OPS_CMP_SWAP = 1 << 0,
MLX5_ATOMIC_OPS_FETCH_ADD = 1 << 1,
@@ -231,6 +238,9 @@ struct mlx5_bfreg_info {
u32 ver;
bool lib_uar_4k;
u32 num_sys_pages;
+ u32 num_static_sys_pages;
+ u32 total_num_bfregs;
+ u32 num_dyn_bfregs;
};
struct mlx5_cmd_first {
@@ -335,13 +345,6 @@ struct mlx5_buf_list {
dma_addr_t map;
};
-struct mlx5_buf {
- struct mlx5_buf_list direct;
- int npages;
- int size;
- u8 page_shift;
-};
-
struct mlx5_frag_buf {
struct mlx5_buf_list *frags;
int npages;
@@ -349,6 +352,15 @@ struct mlx5_frag_buf {
u8 page_shift;
};
+struct mlx5_frag_buf_ctrl {
+ struct mlx5_frag_buf frag_buf;
+ u32 sz_m1;
+ u32 frag_sz_m1;
+ u8 log_sz;
+ u8 log_stride;
+ u8 log_frag_strides;
+};
+
struct mlx5_eq_tasklet {
struct list_head list;
struct list_head process_list;
@@ -365,11 +377,18 @@ struct mlx5_eq_pagefault {
mempool_t *pool;
};
+struct mlx5_cq_table {
+ /* protect radix tree */
+ spinlock_t lock;
+ struct radix_tree_root tree;
+};
+
struct mlx5_eq {
struct mlx5_core_dev *dev;
+ struct mlx5_cq_table cq_table;
__be32 __iomem *doorbell;
u32 cons_index;
- struct mlx5_buf buf;
+ struct mlx5_frag_buf buf;
int size;
unsigned int irqn;
u8 eqn;
@@ -430,6 +449,7 @@ enum mlx5_res_type {
MLX5_RES_SRQ = 3,
MLX5_RES_XSRQ = 4,
MLX5_RES_XRQ = 5,
+ MLX5_RES_DCT = MLX5_EVENT_QUEUE_TYPE_DCT,
};
struct mlx5_core_rsc_common {
@@ -442,8 +462,8 @@ struct mlx5_core_srq {
struct mlx5_core_rsc_common common; /* must be first */
u32 srqn;
int max;
- int max_gs;
- int max_avail_gather;
+ size_t max_gs;
+ size_t max_avail_gather;
int wqe_shift;
void (*event) (struct mlx5_core_srq *, enum mlx5_event);
@@ -515,13 +535,6 @@ struct mlx5_core_health {
struct delayed_work recover_work;
};
-struct mlx5_cq_table {
- /* protect radix tree
- */
- spinlock_t lock;
- struct radix_tree_root tree;
-};
-
struct mlx5_qp_table {
/* protect radix tree
*/
@@ -578,8 +591,14 @@ struct mlx5_eswitch;
struct mlx5_lag;
struct mlx5_pagefault;
+struct mlx5_rate_limit {
+ u32 rate;
+ u32 max_burst_sz;
+ u16 typical_pkt_sz;
+};
+
struct mlx5_rl_entry {
- u32 rate;
+ struct mlx5_rate_limit rl;
u16 index;
u16 refcount;
};
@@ -643,10 +662,6 @@ struct mlx5_priv {
struct dentry *cmdif_debugfs;
/* end: qp staff */
- /* start: cq staff */
- struct mlx5_cq_table cq_table;
- /* end: cq staff */
-
/* start: mkey staff */
struct mlx5_mkey_table mkey_table;
/* end: mkey staff */
@@ -788,6 +803,7 @@ struct mlx5_clock {
u32 nominal_c_mult;
unsigned long overflow_period;
struct delayed_work overflow_work;
+ struct mlx5_core_dev *mdev;
struct ptp_clock *ptp;
struct ptp_clock_info ptp_info;
struct mlx5_pps pps_info;
@@ -826,7 +842,7 @@ struct mlx5_core_dev {
struct mlx5e_resources mlx5e_res;
struct {
struct mlx5_rsvd_gids reserved_gids;
- atomic_t roce_en;
+ u32 roce_en;
} roce;
#ifdef CONFIG_MLX5_FPGA
struct mlx5_fpga_device *fpga;
@@ -835,6 +851,8 @@ struct mlx5_core_dev {
struct cpu_rmap *rmap;
#endif
struct mlx5_clock clock;
+ struct mlx5_ib_clock_info *clock_info;
+ struct page *clock_info_page;
};
struct mlx5_db {
@@ -922,9 +940,9 @@ struct mlx5_hca_vport_context {
bool grh_required;
};
-static inline void *mlx5_buf_offset(struct mlx5_buf *buf, int offset)
+static inline void *mlx5_buf_offset(struct mlx5_frag_buf *buf, int offset)
{
- return buf->direct.buf + offset;
+ return buf->frags->buf + offset;
}
#define STRUCT_FIELD(header, field) \
@@ -963,6 +981,25 @@ static inline u32 mlx5_base_mkey(const u32 key)
return key & 0xffffff00u;
}
+static inline void mlx5_core_init_cq_frag_buf(struct mlx5_frag_buf_ctrl *fbc,
+ void *cqc)
+{
+ fbc->log_stride = 6 + MLX5_GET(cqc, cqc, cqe_sz);
+ fbc->log_sz = MLX5_GET(cqc, cqc, log_cq_size);
+ fbc->sz_m1 = (1 << fbc->log_sz) - 1;
+ fbc->log_frag_strides = PAGE_SHIFT - fbc->log_stride;
+ fbc->frag_sz_m1 = (1 << fbc->log_frag_strides) - 1;
+}
+
+static inline void *mlx5_frag_buf_get_wqe(struct mlx5_frag_buf_ctrl *fbc,
+ u32 ix)
+{
+ unsigned int frag = (ix >> fbc->log_frag_strides);
+
+ return fbc->frag_buf.frags[frag].buf +
+ ((fbc->frag_sz_m1 & ix) << fbc->log_stride);
+}
+
int mlx5_cmd_init(struct mlx5_core_dev *dev);
void mlx5_cmd_cleanup(struct mlx5_core_dev *dev);
void mlx5_cmd_use_events(struct mlx5_core_dev *dev);
@@ -988,9 +1025,10 @@ void mlx5_drain_health_wq(struct mlx5_core_dev *dev);
void mlx5_trigger_health_work(struct mlx5_core_dev *dev);
void mlx5_drain_health_recovery(struct mlx5_core_dev *dev);
int mlx5_buf_alloc_node(struct mlx5_core_dev *dev, int size,
- struct mlx5_buf *buf, int node);
-int mlx5_buf_alloc(struct mlx5_core_dev *dev, int size, struct mlx5_buf *buf);
-void mlx5_buf_free(struct mlx5_core_dev *dev, struct mlx5_buf *buf);
+ struct mlx5_frag_buf *buf, int node);
+int mlx5_buf_alloc(struct mlx5_core_dev *dev,
+ int size, struct mlx5_frag_buf *buf);
+void mlx5_buf_free(struct mlx5_core_dev *dev, struct mlx5_frag_buf *buf);
int mlx5_frag_buf_alloc_node(struct mlx5_core_dev *dev, int size,
struct mlx5_frag_buf *buf, int node);
void mlx5_frag_buf_free(struct mlx5_core_dev *dev, struct mlx5_frag_buf *buf);
@@ -1035,22 +1073,12 @@ int mlx5_satisfy_startup_pages(struct mlx5_core_dev *dev, int boot);
int mlx5_reclaim_startup_pages(struct mlx5_core_dev *dev);
void mlx5_register_debugfs(void);
void mlx5_unregister_debugfs(void);
-int mlx5_eq_init(struct mlx5_core_dev *dev);
-void mlx5_eq_cleanup(struct mlx5_core_dev *dev);
-void mlx5_fill_page_array(struct mlx5_buf *buf, __be64 *pas);
+
+void mlx5_fill_page_array(struct mlx5_frag_buf *buf, __be64 *pas);
void mlx5_fill_page_frag_array(struct mlx5_frag_buf *frag_buf, __be64 *pas);
-void mlx5_cq_completion(struct mlx5_core_dev *dev, u32 cqn);
void mlx5_rsc_event(struct mlx5_core_dev *dev, u32 rsn, int event_type);
void mlx5_srq_event(struct mlx5_core_dev *dev, u32 srqn, int event_type);
struct mlx5_core_srq *mlx5_core_get_srq(struct mlx5_core_dev *dev, u32 srqn);
-void mlx5_cmd_comp_handler(struct mlx5_core_dev *dev, u64 vec, bool forced);
-void mlx5_cq_event(struct mlx5_core_dev *dev, u32 cqn, int event_type);
-int mlx5_create_map_eq(struct mlx5_core_dev *dev, struct mlx5_eq *eq, u8 vecidx,
- int nent, u64 mask, const char *name,
- enum mlx5_eq_type type);
-int mlx5_destroy_unmap_eq(struct mlx5_core_dev *dev, struct mlx5_eq *eq);
-int mlx5_start_eqs(struct mlx5_core_dev *dev);
-void mlx5_stop_eqs(struct mlx5_core_dev *dev);
int mlx5_vector2eqn(struct mlx5_core_dev *dev, int vector, int *eqn,
unsigned int *irqn);
int mlx5_core_attach_mcg(struct mlx5_core_dev *dev, union ib_gid *mgid, u32 qpn);
@@ -1062,14 +1090,6 @@ int mlx5_core_access_reg(struct mlx5_core_dev *dev, void *data_in,
int size_in, void *data_out, int size_out,
u16 reg_num, int arg, int write);
-int mlx5_debug_eq_add(struct mlx5_core_dev *dev, struct mlx5_eq *eq);
-void mlx5_debug_eq_remove(struct mlx5_core_dev *dev, struct mlx5_eq *eq);
-int mlx5_core_eq_query(struct mlx5_core_dev *dev, struct mlx5_eq *eq,
- u32 *out, int outlen);
-int mlx5_eq_debugfs_init(struct mlx5_core_dev *dev);
-void mlx5_eq_debugfs_cleanup(struct mlx5_core_dev *dev);
-int mlx5_cq_debugfs_init(struct mlx5_core_dev *dev);
-void mlx5_cq_debugfs_cleanup(struct mlx5_core_dev *dev);
int mlx5_db_alloc(struct mlx5_core_dev *dev, struct mlx5_db *db);
int mlx5_db_alloc_node(struct mlx5_core_dev *dev, struct mlx5_db *db,
int node);
@@ -1093,9 +1113,12 @@ int mlx5_core_page_fault_resume(struct mlx5_core_dev *dev, u32 token,
int mlx5_init_rl_table(struct mlx5_core_dev *dev);
void mlx5_cleanup_rl_table(struct mlx5_core_dev *dev);
-int mlx5_rl_add_rate(struct mlx5_core_dev *dev, u32 rate, u16 *index);
-void mlx5_rl_remove_rate(struct mlx5_core_dev *dev, u32 rate);
+int mlx5_rl_add_rate(struct mlx5_core_dev *dev, u16 *index,
+ struct mlx5_rate_limit *rl);
+void mlx5_rl_remove_rate(struct mlx5_core_dev *dev, struct mlx5_rate_limit *rl);
bool mlx5_rl_is_in_range(struct mlx5_core_dev *dev, u32 rate);
+bool mlx5_rl_are_equal(struct mlx5_rate_limit *rl_0,
+ struct mlx5_rate_limit *rl_1);
int mlx5_alloc_bfreg(struct mlx5_core_dev *mdev, struct mlx5_sq_bfreg *bfreg,
bool map_wc, bool fast_path);
void mlx5_free_bfreg(struct mlx5_core_dev *mdev, struct mlx5_sq_bfreg *bfreg);
@@ -1103,7 +1126,7 @@ void mlx5_free_bfreg(struct mlx5_core_dev *mdev, struct mlx5_sq_bfreg *bfreg);
unsigned int mlx5_core_reserved_gids_count(struct mlx5_core_dev *dev);
int mlx5_core_roce_gid_set(struct mlx5_core_dev *dev, unsigned int index,
u8 roce_version, u8 roce_l3_type, const u8 *gid,
- const u8 *mac, bool vlan, u16 vlan_id);
+ const u8 *mac, bool vlan, u16 vlan_id, u8 port_num);
static inline int fw_initializing(struct mlx5_core_dev *dev)
{
@@ -1210,6 +1233,12 @@ static inline int mlx5_core_is_pf(struct mlx5_core_dev *dev)
return !(dev->priv.pci_dev_data & MLX5_PCI_DEV_IS_VF);
}
+#define MLX5_TOTAL_VPORTS(mdev) (1 + pci_sriov_get_totalvfs((mdev)->pdev))
+#define MLX5_VPORT_MANAGER(mdev) \
+ (MLX5_CAP_GEN(mdev, vport_group_manager) && \
+ (MLX5_CAP_GEN(mdev, port_type) == MLX5_CAP_PORT_TYPE_ETH) && \
+ mlx5_core_is_pf(mdev))
+
static inline int mlx5_get_gid_table_len(u16 param)
{
if (param > 4) {
@@ -1225,6 +1254,31 @@ static inline bool mlx5_rl_is_supported(struct mlx5_core_dev *dev)
return !!(dev->priv.rl_table.max_size);
}
+static inline int mlx5_core_is_mp_slave(struct mlx5_core_dev *dev)
+{
+ return MLX5_CAP_GEN(dev, affiliate_nic_vport_criteria) &&
+ MLX5_CAP_GEN(dev, num_vhca_ports) <= 1;
+}
+
+static inline int mlx5_core_is_mp_master(struct mlx5_core_dev *dev)
+{
+ return MLX5_CAP_GEN(dev, num_vhca_ports) > 1;
+}
+
+static inline int mlx5_core_mp_enabled(struct mlx5_core_dev *dev)
+{
+ return mlx5_core_is_mp_slave(dev) ||
+ mlx5_core_is_mp_master(dev);
+}
+
+static inline int mlx5_core_native_port_num(struct mlx5_core_dev *dev)
+{
+ if (!mlx5_core_mp_enabled(dev))
+ return 1;
+
+ return MLX5_CAP_GEN(dev, native_port_num);
+}
+
enum {
MLX5_TRIGGERED_CMD_COMP = (u64)1 << 32,
};
@@ -1238,7 +1292,7 @@ mlx5_get_vector_affinity(struct mlx5_core_dev *dev, int vector)
int eqn;
int err;
- err = mlx5_vector2eqn(dev, vector, &eqn, &irq);
+ err = mlx5_vector2eqn(dev, MLX5_EQ_VEC_COMP_BASE + vector, &eqn, &irq);
if (err)
return NULL;
diff --git a/include/linux/mlx5/eswitch.h b/include/linux/mlx5/eswitch.h
new file mode 100644
index 000000000000..d3c9db492b30
--- /dev/null
+++ b/include/linux/mlx5/eswitch.h
@@ -0,0 +1,58 @@
+/* SPDX-License-Identifier: (GPL-2.0+ OR BSD-3-Clause) */
+/*
+ * Copyright (c) 2018 Mellanox Technologies. All rights reserved.
+ */
+
+#ifndef _MLX5_ESWITCH_
+#define _MLX5_ESWITCH_
+
+#include <linux/mlx5/driver.h>
+
+enum {
+ SRIOV_NONE,
+ SRIOV_LEGACY,
+ SRIOV_OFFLOADS
+};
+
+enum {
+ REP_ETH,
+ REP_IB,
+ NUM_REP_TYPES,
+};
+
+struct mlx5_eswitch_rep;
+struct mlx5_eswitch_rep_if {
+ int (*load)(struct mlx5_core_dev *dev,
+ struct mlx5_eswitch_rep *rep);
+ void (*unload)(struct mlx5_eswitch_rep *rep);
+ void *(*get_proto_dev)(struct mlx5_eswitch_rep *rep);
+ void *priv;
+ bool valid;
+};
+
+struct mlx5_eswitch_rep {
+ struct mlx5_eswitch_rep_if rep_if[NUM_REP_TYPES];
+ u16 vport;
+ u8 hw_id[ETH_ALEN];
+ u16 vlan;
+ u32 vlan_refcount;
+};
+
+void mlx5_eswitch_register_vport_rep(struct mlx5_eswitch *esw,
+ int vport_index,
+ struct mlx5_eswitch_rep_if *rep_if,
+ u8 rep_type);
+void mlx5_eswitch_unregister_vport_rep(struct mlx5_eswitch *esw,
+ int vport_index,
+ u8 rep_type);
+void *mlx5_eswitch_get_proto_dev(struct mlx5_eswitch *esw,
+ int vport,
+ u8 rep_type);
+struct mlx5_eswitch_rep *mlx5_eswitch_vport_rep(struct mlx5_eswitch *esw,
+ int vport);
+void *mlx5_eswitch_uplink_get_proto_dev(struct mlx5_eswitch *esw, u8 rep_type);
+u8 mlx5_eswitch_mode(struct mlx5_eswitch *esw);
+struct mlx5_flow_handle *
+mlx5_eswitch_add_send_to_vport_rule(struct mlx5_eswitch *esw,
+ int vport, u32 sqn);
+#endif
diff --git a/include/linux/mlx5/fs.h b/include/linux/mlx5/fs.h
index b25e7baa273e..47aecc4fa8c2 100644
--- a/include/linux/mlx5/fs.h
+++ b/include/linux/mlx5/fs.h
@@ -40,6 +40,8 @@
enum {
MLX5_FLOW_CONTEXT_ACTION_FWD_NEXT_PRIO = 1 << 16,
+ MLX5_FLOW_CONTEXT_ACTION_ENCRYPT = 1 << 17,
+ MLX5_FLOW_CONTEXT_ACTION_DECRYPT = 1 << 18,
};
enum {
@@ -69,6 +71,7 @@ enum mlx5_flow_namespace_type {
MLX5_FLOW_NAMESPACE_ESW_INGRESS,
MLX5_FLOW_NAMESPACE_SNIFFER_RX,
MLX5_FLOW_NAMESPACE_SNIFFER_TX,
+ MLX5_FLOW_NAMESPACE_EGRESS,
};
struct mlx5_flow_table;
@@ -95,6 +98,10 @@ struct mlx5_flow_destination {
struct mlx5_flow_namespace *
mlx5_get_flow_namespace(struct mlx5_core_dev *dev,
enum mlx5_flow_namespace_type type);
+struct mlx5_flow_namespace *
+mlx5_get_flow_vport_acl_namespace(struct mlx5_core_dev *dev,
+ enum mlx5_flow_namespace_type type,
+ int vport);
struct mlx5_flow_table *
mlx5_create_auto_grouped_flow_table(struct mlx5_flow_namespace *ns,
@@ -135,11 +142,20 @@ struct mlx5_flow_group *
mlx5_create_flow_group(struct mlx5_flow_table *ft, u32 *in);
void mlx5_destroy_flow_group(struct mlx5_flow_group *fg);
+struct mlx5_fs_vlan {
+ u16 ethtype;
+ u16 vid;
+ u8 prio;
+};
+
struct mlx5_flow_act {
u32 action;
+ bool has_flow_tag;
u32 flow_tag;
u32 encap_id;
u32 modify_id;
+ uintptr_t esp_id;
+ struct mlx5_fs_vlan vlan;
};
#define MLX5_DECLARE_FLOW_ACT(name) \
diff --git a/include/linux/mlx5/fs_helpers.h b/include/linux/mlx5/fs_helpers.h
new file mode 100644
index 000000000000..9db21cd0e92c
--- /dev/null
+++ b/include/linux/mlx5/fs_helpers.h
@@ -0,0 +1,142 @@
+/*
+ * Copyright (c) 2018, Mellanox Technologies. All rights reserved.
+ *
+ * This software is available to you under a choice of one of two
+ * licenses. You may choose to be licensed under the terms of the GNU
+ * General Public License (GPL) Version 2, available from the file
+ * COPYING in the main directory of this source tree, or the
+ * OpenIB.org BSD license below:
+ *
+ * Redistribution and use in source and binary forms, with or
+ * without modification, are permitted provided that the following
+ * conditions are met:
+ *
+ * - Redistributions of source code must retain the above
+ * copyright notice, this list of conditions and the following
+ * disclaimer.
+ *
+ * - Redistributions in binary form must reproduce the above
+ * copyright notice, this list of conditions and the following
+ * disclaimer in the documentation and/or other materials
+ * provided with the distribution.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND,
+ * EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
+ * MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND
+ * NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS
+ * BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN
+ * ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN
+ * CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
+ * SOFTWARE.
+ */
+
+#ifndef _MLX5_FS_HELPERS_
+#define _MLX5_FS_HELPERS_
+
+#include <linux/mlx5/mlx5_ifc.h>
+
+#define MLX5_FS_IPV4_VERSION 4
+#define MLX5_FS_IPV6_VERSION 6
+
+static inline bool mlx5_fs_is_ipsec_flow(const u32 *match_c)
+{
+ void *misc_params_c = MLX5_ADDR_OF(fte_match_param, match_c,
+ misc_parameters);
+
+ return MLX5_GET(fte_match_set_misc, misc_params_c, outer_esp_spi);
+}
+
+static inline bool _mlx5_fs_is_outer_ipproto_flow(const u32 *match_c,
+ const u32 *match_v, u8 match)
+{
+ const void *headers_c = MLX5_ADDR_OF(fte_match_param, match_c,
+ outer_headers);
+ const void *headers_v = MLX5_ADDR_OF(fte_match_param, match_v,
+ outer_headers);
+
+ return MLX5_GET(fte_match_set_lyr_2_4, headers_c, ip_protocol) == 0xff &&
+ MLX5_GET(fte_match_set_lyr_2_4, headers_v, ip_protocol) == match;
+}
+
+static inline bool mlx5_fs_is_outer_tcp_flow(const u32 *match_c,
+ const u32 *match_v)
+{
+ return _mlx5_fs_is_outer_ipproto_flow(match_c, match_v, IPPROTO_TCP);
+}
+
+static inline bool mlx5_fs_is_outer_udp_flow(const u32 *match_c,
+ const u32 *match_v)
+{
+ return _mlx5_fs_is_outer_ipproto_flow(match_c, match_v, IPPROTO_UDP);
+}
+
+static inline bool mlx5_fs_is_vxlan_flow(const u32 *match_c)
+{
+ void *misc_params_c = MLX5_ADDR_OF(fte_match_param, match_c,
+ misc_parameters);
+
+ return MLX5_GET(fte_match_set_misc, misc_params_c, vxlan_vni);
+}
+
+static inline bool _mlx5_fs_is_outer_ipv_flow(struct mlx5_core_dev *mdev,
+ const u32 *match_c,
+ const u32 *match_v, int version)
+{
+ int match_ipv = MLX5_CAP_FLOWTABLE_NIC_RX(mdev,
+ ft_field_support.outer_ip_version);
+ const void *headers_c = MLX5_ADDR_OF(fte_match_param, match_c,
+ outer_headers);
+ const void *headers_v = MLX5_ADDR_OF(fte_match_param, match_v,
+ outer_headers);
+
+ if (!match_ipv) {
+ u16 ethertype;
+
+ switch (version) {
+ case MLX5_FS_IPV4_VERSION:
+ ethertype = ETH_P_IP;
+ break;
+ case MLX5_FS_IPV6_VERSION:
+ ethertype = ETH_P_IPV6;
+ break;
+ default:
+ return false;
+ }
+
+ return MLX5_GET(fte_match_set_lyr_2_4, headers_c,
+ ethertype) == 0xffff &&
+ MLX5_GET(fte_match_set_lyr_2_4, headers_v,
+ ethertype) == ethertype;
+ }
+
+ return MLX5_GET(fte_match_set_lyr_2_4, headers_c,
+ ip_version) == 0xf &&
+ MLX5_GET(fte_match_set_lyr_2_4, headers_v,
+ ip_version) == version;
+}
+
+static inline bool
+mlx5_fs_is_outer_ipv4_flow(struct mlx5_core_dev *mdev, const u32 *match_c,
+ const u32 *match_v)
+{
+ return _mlx5_fs_is_outer_ipv_flow(mdev, match_c, match_v,
+ MLX5_FS_IPV4_VERSION);
+}
+
+static inline bool
+mlx5_fs_is_outer_ipv6_flow(struct mlx5_core_dev *mdev, const u32 *match_c,
+ const u32 *match_v)
+{
+ return _mlx5_fs_is_outer_ipv_flow(mdev, match_c, match_v,
+ MLX5_FS_IPV6_VERSION);
+}
+
+static inline bool mlx5_fs_is_outer_ipsec_flow(const u32 *match_c)
+{
+ void *misc_params_c =
+ MLX5_ADDR_OF(fte_match_param, match_c, misc_parameters);
+
+ return MLX5_GET(fte_match_set_misc, misc_params_c, outer_esp_spi);
+}
+
+#endif
diff --git a/include/linux/mlx5/mlx5_ifc.h b/include/linux/mlx5/mlx5_ifc.h
index 1391a82da98e..1aad455538f4 100644
--- a/include/linux/mlx5/mlx5_ifc.h
+++ b/include/linux/mlx5/mlx5_ifc.h
@@ -92,6 +92,8 @@ enum {
MLX5_CMD_OP_DESTROY_MKEY = 0x202,
MLX5_CMD_OP_QUERY_SPECIAL_CONTEXTS = 0x203,
MLX5_CMD_OP_PAGE_FAULT_RESUME = 0x204,
+ MLX5_CMD_OP_ALLOC_MEMIC = 0x205,
+ MLX5_CMD_OP_DEALLOC_MEMIC = 0x206,
MLX5_CMD_OP_CREATE_EQ = 0x301,
MLX5_CMD_OP_DESTROY_EQ = 0x302,
MLX5_CMD_OP_QUERY_EQ = 0x303,
@@ -143,6 +145,7 @@ enum {
MLX5_CMD_OP_MODIFY_HCA_VPORT_CONTEXT = 0x763,
MLX5_CMD_OP_QUERY_HCA_VPORT_GID = 0x764,
MLX5_CMD_OP_QUERY_HCA_VPORT_PKEY = 0x765,
+ MLX5_CMD_OP_QUERY_VNIC_ENV = 0x76f,
MLX5_CMD_OP_QUERY_VPORT_COUNTER = 0x770,
MLX5_CMD_OP_ALLOC_Q_COUNTER = 0x771,
MLX5_CMD_OP_DEALLOC_Q_COUNTER = 0x772,
@@ -295,7 +298,9 @@ struct mlx5_ifc_flow_table_fields_supported_bits {
u8 inner_tcp_dport[0x1];
u8 inner_tcp_flags[0x1];
u8 reserved_at_37[0x9];
- u8 reserved_at_40[0x1a];
+ u8 reserved_at_40[0x17];
+ u8 outer_esp_spi[0x1];
+ u8 reserved_at_58[0x2];
u8 bth_dst_qp[0x1];
u8 reserved_at_5b[0x25];
@@ -311,7 +316,10 @@ struct mlx5_ifc_flow_table_prop_layout_bits {
u8 flow_table_modify[0x1];
u8 encap[0x1];
u8 decap[0x1];
- u8 reserved_at_9[0x17];
+ u8 reserved_at_9[0x1];
+ u8 pop_vlan[0x1];
+ u8 push_vlan[0x1];
+ u8 reserved_at_c[0x14];
u8 reserved_at_20[0x2];
u8 log_max_ft_size[0x6];
@@ -437,7 +445,9 @@ struct mlx5_ifc_fte_match_set_misc_bits {
u8 reserved_at_120[0x28];
u8 bth_dst_qp[0x18];
- u8 reserved_at_160[0xa0];
+ u8 reserved_at_160[0x20];
+ u8 outer_esp_spi[0x20];
+ u8 reserved_at_1a0[0x60];
};
struct mlx5_ifc_cmd_pas_bits {
@@ -502,7 +512,7 @@ struct mlx5_ifc_ads_bits {
u8 dei_cfi[0x1];
u8 eth_prio[0x3];
u8 sl[0x4];
- u8 port[0x8];
+ u8 vhca_port_num[0x8];
u8 rmac_47_32[0x10];
u8 rmac_31_0[0x20];
@@ -567,7 +577,10 @@ struct mlx5_ifc_qos_cap_bits {
u8 esw_scheduling[0x1];
u8 esw_bw_share[0x1];
u8 esw_rate_limit[0x1];
- u8 reserved_at_4[0x1c];
+ u8 reserved_at_4[0x1];
+ u8 packet_pacing_burst_bound[0x1];
+ u8 packet_pacing_typical_size[0x1];
+ u8 reserved_at_7[0x19];
u8 reserved_at_20[0x20];
@@ -589,6 +602,16 @@ struct mlx5_ifc_qos_cap_bits {
u8 reserved_at_100[0x700];
};
+struct mlx5_ifc_debug_cap_bits {
+ u8 reserved_at_0[0x20];
+
+ u8 reserved_at_20[0x2];
+ u8 stall_detect[0x1];
+ u8 reserved_at_23[0x1d];
+
+ u8 reserved_at_40[0x7c0];
+};
+
struct mlx5_ifc_per_protocol_networking_offload_caps_bits {
u8 csum_cap[0x1];
u8 vlan_cap[0x1];
@@ -651,6 +674,24 @@ struct mlx5_ifc_roce_cap_bits {
u8 reserved_at_100[0x700];
};
+struct mlx5_ifc_device_mem_cap_bits {
+ u8 memic[0x1];
+ u8 reserved_at_1[0x1f];
+
+ u8 reserved_at_20[0xb];
+ u8 log_min_memic_alloc_size[0x5];
+ u8 reserved_at_30[0x8];
+ u8 log_max_memic_addr_alignment[0x8];
+
+ u8 memic_bar_start_addr[0x40];
+
+ u8 memic_bar_size[0x20];
+
+ u8 max_memic_size[0x20];
+
+ u8 reserved_at_c0[0x740];
+};
+
enum {
MLX5_ATOMIC_CAPS_ATOMIC_SIZE_QP_1_BYTE = 0x0,
MLX5_ATOMIC_CAPS_ATOMIC_SIZE_QP_2_BYTES = 0x2,
@@ -794,7 +835,10 @@ enum {
};
struct mlx5_ifc_cmd_hca_cap_bits {
- u8 reserved_at_0[0x80];
+ u8 reserved_at_0[0x30];
+ u8 vhca_id[0x10];
+
+ u8 reserved_at_40[0x40];
u8 log_max_srq_sz[0x8];
u8 log_max_qp_sz[0x8];
@@ -848,7 +892,7 @@ struct mlx5_ifc_cmd_hca_cap_bits {
u8 out_of_seq_cnt[0x1];
u8 vport_counters[0x1];
u8 retransmission_q_counters[0x1];
- u8 reserved_at_183[0x1];
+ u8 debug[0x1];
u8 modify_rq_counter_set_id[0x1];
u8 rq_delay_drop[0x1];
u8 max_qp_cnt[0xa];
@@ -858,11 +902,11 @@ struct mlx5_ifc_cmd_hca_cap_bits {
u8 vhca_group_manager[0x1];
u8 ib_virt[0x1];
u8 eth_virt[0x1];
- u8 reserved_at_1a4[0x1];
+ u8 vnic_env_queue_counters[0x1];
u8 ets[0x1];
u8 nic_flow_table[0x1];
u8 eswitch_flow_table[0x1];
- u8 early_vf_enable[0x1];
+ u8 device_memory[0x1];
u8 mcam_reg[0x1];
u8 pcam_reg[0x1];
u8 local_ca_ack_delay[0x5];
@@ -906,7 +950,11 @@ struct mlx5_ifc_cmd_hca_cap_bits {
u8 reserved_at_202[0x1];
u8 ipoib_enhanced_offloads[0x1];
u8 ipoib_basic_offloads[0x1];
- u8 reserved_at_205[0x5];
+ u8 reserved_at_205[0x1];
+ u8 repeated_block_disabled[0x1];
+ u8 umr_modify_entity_size_disabled[0x1];
+ u8 umr_modify_atomic_disabled[0x1];
+ u8 umr_indirect_mkey_disabled[0x1];
u8 umr_fence[0x2];
u8 reserved_at_20c[0x3];
u8 drain_sigerr[0x1];
@@ -990,7 +1038,10 @@ struct mlx5_ifc_cmd_hca_cap_bits {
u8 reserved_at_330[0xb];
u8 log_max_xrcd[0x5];
- u8 reserved_at_340[0x8];
+ u8 nic_receive_steering_discard[0x1];
+ u8 receive_discard_vport_down[0x1];
+ u8 transmit_discard_vport_down[0x1];
+ u8 reserved_at_343[0x5];
u8 log_max_flow_counter_bulk[0x8];
u8 max_flow_counter_15_0[0x10];
@@ -1014,7 +1065,8 @@ struct mlx5_ifc_cmd_hca_cap_bits {
u8 reserved_at_398[0x3];
u8 log_max_tis_per_sq[0x5];
- u8 reserved_at_3a0[0x3];
+ u8 ext_stride_num_range[0x1];
+ u8 reserved_at_3a1[0x2];
u8 log_max_stride_sz_rq[0x5];
u8 reserved_at_3a8[0x3];
u8 log_min_stride_sz_rq[0x5];
@@ -1023,13 +1075,21 @@ struct mlx5_ifc_cmd_hca_cap_bits {
u8 reserved_at_3b8[0x3];
u8 log_min_stride_sz_sq[0x5];
- u8 reserved_at_3c0[0x1b];
+ u8 hairpin[0x1];
+ u8 reserved_at_3c1[0x2];
+ u8 log_max_hairpin_queues[0x5];
+ u8 reserved_at_3c8[0x3];
+ u8 log_max_hairpin_wq_data_sz[0x5];
+ u8 reserved_at_3d0[0x3];
+ u8 log_max_hairpin_num_packets[0x5];
+ u8 reserved_at_3d8[0x3];
u8 log_max_wq_sz[0x5];
u8 nic_vport_change_event[0x1];
u8 disable_local_lb_uc[0x1];
u8 disable_local_lb_mc[0x1];
- u8 reserved_at_3e3[0x8];
+ u8 log_min_hairpin_wq_data_sz[0x5];
+ u8 reserved_at_3e8[0x3];
u8 log_max_vlan_list[0x5];
u8 reserved_at_3f0[0x3];
u8 log_max_current_mc_list[0x5];
@@ -1067,7 +1127,12 @@ struct mlx5_ifc_cmd_hca_cap_bits {
u8 reserved_at_5f8[0x3];
u8 log_max_xrq[0x5];
- u8 reserved_at_600[0x200];
+ u8 affiliate_nic_vport_criteria[0x8];
+ u8 native_port_num[0x8];
+ u8 num_vhca_ports[0x8];
+ u8 reserved_at_618[0x6];
+ u8 sw_owner_id[0x1];
+ u8 reserved_at_61f[0x1e1];
};
enum mlx5_flow_destination_type {
@@ -1075,6 +1140,7 @@ enum mlx5_flow_destination_type {
MLX5_FLOW_DESTINATION_TYPE_FLOW_TABLE = 0x1,
MLX5_FLOW_DESTINATION_TYPE_TIR = 0x2,
+ MLX5_FLOW_DESTINATION_TYPE_PORT = 0x99,
MLX5_FLOW_DESTINATION_TYPE_COUNTER = 0x100,
};
@@ -1163,8 +1229,13 @@ struct mlx5_ifc_wq_bits {
u8 reserved_at_118[0x3];
u8 log_wq_sz[0x5];
- u8 reserved_at_120[0x15];
- u8 log_wqe_num_of_strides[0x3];
+ u8 reserved_at_120[0x3];
+ u8 log_hairpin_num_packets[0x5];
+ u8 reserved_at_128[0x3];
+ u8 log_hairpin_data_sz[0x5];
+
+ u8 reserved_at_130[0x4];
+ u8 log_wqe_num_of_strides[0x4];
u8 two_byte_shift_en[0x1];
u8 reserved_at_139[0x4];
u8 log_wqe_stride_size[0x3];
@@ -1546,7 +1617,17 @@ struct mlx5_ifc_eth_per_prio_grp_data_layout_bits {
u8 rx_pause_transition_low[0x20];
- u8 reserved_at_3c0[0x400];
+ u8 reserved_at_3c0[0x40];
+
+ u8 device_stall_minor_watermark_cnt_high[0x20];
+
+ u8 device_stall_minor_watermark_cnt_low[0x20];
+
+ u8 device_stall_critical_watermark_cnt_high[0x20];
+
+ u8 device_stall_critical_watermark_cnt_low[0x20];
+
+ u8 reserved_at_480[0x340];
};
struct mlx5_ifc_eth_extended_cntrs_grp_data_layout_bits {
@@ -2261,10 +2342,19 @@ enum {
MLX5_FLOW_CONTEXT_ACTION_ENCAP = 0x10,
MLX5_FLOW_CONTEXT_ACTION_DECAP = 0x20,
MLX5_FLOW_CONTEXT_ACTION_MOD_HDR = 0x40,
+ MLX5_FLOW_CONTEXT_ACTION_VLAN_POP = 0x80,
+ MLX5_FLOW_CONTEXT_ACTION_VLAN_PUSH = 0x100,
+};
+
+struct mlx5_ifc_vlan_bits {
+ u8 ethtype[0x10];
+ u8 prio[0x3];
+ u8 cfi[0x1];
+ u8 vid[0xc];
};
struct mlx5_ifc_flow_context_bits {
- u8 reserved_at_0[0x20];
+ struct mlx5_ifc_vlan_bits push_vlan;
u8 group_id[0x20];
@@ -2340,6 +2430,24 @@ struct mlx5_ifc_xrc_srqc_bits {
u8 reserved_at_180[0x80];
};
+struct mlx5_ifc_vnic_diagnostic_statistics_bits {
+ u8 counter_error_queues[0x20];
+
+ u8 total_error_queues[0x20];
+
+ u8 send_queue_priority_update_flow[0x20];
+
+ u8 reserved_at_60[0x20];
+
+ u8 nic_receive_steering_discard[0x40];
+
+ u8 receive_discard_vport_down[0x40];
+
+ u8 transmit_discard_vport_down[0x40];
+
+ u8 reserved_at_140[0xec0];
+};
+
struct mlx5_ifc_traffic_counter_bits {
u8 packets[0x40];
@@ -2483,7 +2591,8 @@ struct mlx5_ifc_sqc_bits {
u8 state[0x4];
u8 reg_umr[0x1];
u8 allow_swp[0x1];
- u8 reserved_at_e[0x12];
+ u8 hairpin[0x1];
+ u8 reserved_at_f[0x11];
u8 reserved_at_20[0x8];
u8 user_index[0x18];
@@ -2491,7 +2600,13 @@ struct mlx5_ifc_sqc_bits {
u8 reserved_at_40[0x8];
u8 cqn[0x18];
- u8 reserved_at_60[0x90];
+ u8 reserved_at_60[0x8];
+ u8 hairpin_peer_rq[0x18];
+
+ u8 reserved_at_80[0x10];
+ u8 hairpin_peer_vhca[0x10];
+
+ u8 reserved_at_a0[0x50];
u8 packet_pacing_rate_limit_index[0x10];
u8 tis_lst_sz[0x10];
@@ -2563,7 +2678,8 @@ struct mlx5_ifc_rqc_bits {
u8 state[0x4];
u8 reserved_at_c[0x1];
u8 flush_in_error_en[0x1];
- u8 reserved_at_e[0x12];
+ u8 hairpin[0x1];
+ u8 reserved_at_f[0x11];
u8 reserved_at_20[0x8];
u8 user_index[0x18];
@@ -2577,7 +2693,13 @@ struct mlx5_ifc_rqc_bits {
u8 reserved_at_80[0x8];
u8 rmpn[0x18];
- u8 reserved_at_a0[0xe0];
+ u8 reserved_at_a0[0x8];
+ u8 hairpin_peer_sq[0x18];
+
+ u8 reserved_at_c0[0x10];
+ u8 hairpin_peer_vhca[0x10];
+
+ u8 reserved_at_e0[0xa0];
struct mlx5_ifc_wq_bits wq;
};
@@ -2616,7 +2738,12 @@ struct mlx5_ifc_nic_vport_context_bits {
u8 event_on_mc_address_change[0x1];
u8 event_on_uc_address_change[0x1];
- u8 reserved_at_40[0xf0];
+ u8 reserved_at_40[0xc];
+
+ u8 affiliation_criteria[0x4];
+ u8 affiliated_vhca_id[0x10];
+
+ u8 reserved_at_60[0xd0];
u8 mtu[0x10];
@@ -2648,12 +2775,17 @@ enum {
MLX5_MKC_ACCESS_MODE_MTT = 0x1,
MLX5_MKC_ACCESS_MODE_KLMS = 0x2,
MLX5_MKC_ACCESS_MODE_KSM = 0x3,
+ MLX5_MKC_ACCESS_MODE_MEMIC = 0x5,
};
struct mlx5_ifc_mkc_bits {
u8 reserved_at_0[0x1];
u8 free[0x1];
- u8 reserved_at_2[0xd];
+ u8 reserved_at_2[0x1];
+ u8 access_mode_4_2[0x3];
+ u8 reserved_at_6[0x7];
+ u8 relaxed_ordering_write[0x1];
+ u8 reserved_at_e[0x1];
u8 small_fence_on_rdma_read_response[0x1];
u8 umr_en[0x1];
u8 a[0x1];
@@ -2661,7 +2793,7 @@ struct mlx5_ifc_mkc_bits {
u8 rr[0x1];
u8 lw[0x1];
u8 lr[0x1];
- u8 access_mode[0x2];
+ u8 access_mode_1_0[0x2];
u8 reserved_at_18[0x8];
u8 qpn[0x18];
@@ -3259,7 +3391,8 @@ struct mlx5_ifc_set_roce_address_in_bits {
u8 op_mod[0x10];
u8 roce_address_index[0x10];
- u8 reserved_at_50[0x10];
+ u8 reserved_at_50[0xc];
+ u8 vhca_port_num[0x4];
u8 reserved_at_60[0x20];
@@ -3595,6 +3728,35 @@ struct mlx5_ifc_query_vport_state_in_bits {
u8 reserved_at_60[0x20];
};
+struct mlx5_ifc_query_vnic_env_out_bits {
+ u8 status[0x8];
+ u8 reserved_at_8[0x18];
+
+ u8 syndrome[0x20];
+
+ u8 reserved_at_40[0x40];
+
+ struct mlx5_ifc_vnic_diagnostic_statistics_bits vport_env;
+};
+
+enum {
+ MLX5_QUERY_VNIC_ENV_IN_OP_MOD_VPORT_DIAG_STATISTICS = 0x0,
+};
+
+struct mlx5_ifc_query_vnic_env_in_bits {
+ u8 opcode[0x10];
+ u8 reserved_at_10[0x10];
+
+ u8 reserved_at_20[0x10];
+ u8 op_mod[0x10];
+
+ u8 other_vport[0x1];
+ u8 reserved_at_41[0xf];
+ u8 vport_number[0x10];
+
+ u8 reserved_at_60[0x20];
+};
+
struct mlx5_ifc_query_vport_counter_out_bits {
u8 status[0x8];
u8 reserved_at_8[0x18];
@@ -3879,7 +4041,8 @@ struct mlx5_ifc_query_roce_address_in_bits {
u8 op_mod[0x10];
u8 roce_address_index[0x10];
- u8 reserved_at_50[0x10];
+ u8 reserved_at_50[0xc];
+ u8 vhca_port_num[0x4];
u8 reserved_at_60[0x20];
};
@@ -5311,7 +5474,9 @@ struct mlx5_ifc_modify_nic_vport_context_out_bits {
};
struct mlx5_ifc_modify_nic_vport_field_select_bits {
- u8 reserved_at_0[0x14];
+ u8 reserved_at_0[0x12];
+ u8 affiliation[0x1];
+ u8 reserved_at_e[0x1];
u8 disable_uc_local_lb[0x1];
u8 disable_mc_local_lb[0x1];
u8 node_guid[0x1];
@@ -5532,6 +5697,7 @@ struct mlx5_ifc_init_hca_in_bits {
u8 op_mod[0x10];
u8 reserved_at_40[0x40];
+ u8 sw_owner_id[4][0x20];
};
struct mlx5_ifc_init2rtr_qp_out_bits {
@@ -7263,7 +7429,12 @@ struct mlx5_ifc_set_pp_rate_limit_in_bits {
u8 rate_limit[0x20];
- u8 reserved_at_a0[0x160];
+ u8 burst_upper_bound[0x20];
+
+ u8 reserved_at_c0[0x10];
+ u8 typical_packet_size[0x10];
+
+ u8 reserved_at_e0[0x120];
};
struct mlx5_ifc_access_register_out_bits {
@@ -7763,7 +7934,11 @@ struct mlx5_ifc_pifr_reg_bits {
struct mlx5_ifc_pfcc_reg_bits {
u8 reserved_at_0[0x8];
u8 local_port[0x8];
- u8 reserved_at_10[0x10];
+ u8 reserved_at_10[0xb];
+ u8 ppan_mask_n[0x1];
+ u8 minor_stall_mask[0x1];
+ u8 critical_stall_mask[0x1];
+ u8 reserved_at_1e[0x2];
u8 ppan[0x4];
u8 reserved_at_24[0x4];
@@ -7773,17 +7948,22 @@ struct mlx5_ifc_pfcc_reg_bits {
u8 pptx[0x1];
u8 aptx[0x1];
- u8 reserved_at_42[0x6];
+ u8 pptx_mask_n[0x1];
+ u8 reserved_at_43[0x5];
u8 pfctx[0x8];
u8 reserved_at_50[0x10];
u8 pprx[0x1];
u8 aprx[0x1];
- u8 reserved_at_62[0x6];
+ u8 pprx_mask_n[0x1];
+ u8 reserved_at_63[0x5];
u8 pfcrx[0x8];
u8 reserved_at_70[0x10];
- u8 reserved_at_80[0x80];
+ u8 device_stall_minor_watermark[0x10];
+ u8 device_stall_critical_watermark[0x10];
+
+ u8 reserved_at_a0[0x60];
};
struct mlx5_ifc_pelc_reg_bits {
@@ -7824,8 +8004,10 @@ struct mlx5_ifc_peir_reg_bits {
};
struct mlx5_ifc_pcam_enhanced_features_bits {
- u8 reserved_at_0[0x7b];
+ u8 reserved_at_0[0x76];
+ u8 pfcc_mask[0x1];
+ u8 reserved_at_77[0x4];
u8 rx_buffer_fullness_counters[0x1];
u8 ptys_connector_type[0x1];
u8 reserved_at_7d[0x1];
@@ -8806,4 +8988,57 @@ struct mlx5_ifc_destroy_vport_lag_in_bits {
u8 reserved_at_40[0x40];
};
+struct mlx5_ifc_alloc_memic_in_bits {
+ u8 opcode[0x10];
+ u8 reserved_at_10[0x10];
+
+ u8 reserved_at_20[0x10];
+ u8 op_mod[0x10];
+
+ u8 reserved_at_30[0x20];
+
+ u8 reserved_at_40[0x18];
+ u8 log_memic_addr_alignment[0x8];
+
+ u8 range_start_addr[0x40];
+
+ u8 range_size[0x20];
+
+ u8 memic_size[0x20];
+};
+
+struct mlx5_ifc_alloc_memic_out_bits {
+ u8 status[0x8];
+ u8 reserved_at_8[0x18];
+
+ u8 syndrome[0x20];
+
+ u8 memic_start_addr[0x40];
+};
+
+struct mlx5_ifc_dealloc_memic_in_bits {
+ u8 opcode[0x10];
+ u8 reserved_at_10[0x10];
+
+ u8 reserved_at_20[0x10];
+ u8 op_mod[0x10];
+
+ u8 reserved_at_40[0x40];
+
+ u8 memic_start_addr[0x40];
+
+ u8 memic_size[0x20];
+
+ u8 reserved_at_e0[0x20];
+};
+
+struct mlx5_ifc_dealloc_memic_out_bits {
+ u8 status[0x8];
+ u8 reserved_at_8[0x18];
+
+ u8 syndrome[0x20];
+
+ u8 reserved_at_40[0x40];
+};
+
#endif /* MLX5_IFC_H */
diff --git a/include/linux/mlx5/mlx5_ifc_fpga.h b/include/linux/mlx5/mlx5_ifc_fpga.h
index 255a88d08078..ec052491ba3d 100644
--- a/include/linux/mlx5/mlx5_ifc_fpga.h
+++ b/include/linux/mlx5/mlx5_ifc_fpga.h
@@ -373,7 +373,10 @@ struct mlx5_ifc_fpga_destroy_qp_out_bits {
struct mlx5_ifc_ipsec_extended_cap_bits {
u8 encapsulation[0x20];
- u8 reserved_0[0x15];
+ u8 reserved_0[0x12];
+ u8 v2_command[0x1];
+ u8 udp_encap[0x1];
+ u8 rx_no_trailer[0x1];
u8 ipv4_fragment[0x1];
u8 ipv6[0x1];
u8 esn[0x1];
@@ -429,4 +432,91 @@ struct mlx5_ifc_ipsec_counters_bits {
u8 dropped_cmd[0x40];
};
+enum mlx5_ifc_fpga_ipsec_response_syndrome {
+ MLX5_FPGA_IPSEC_RESPONSE_SUCCESS = 0,
+ MLX5_FPGA_IPSEC_RESPONSE_ILLEGAL_REQUEST = 1,
+ MLX5_FPGA_IPSEC_RESPONSE_SADB_ISSUE = 2,
+ MLX5_FPGA_IPSEC_RESPONSE_WRITE_RESPONSE_ISSUE = 3,
+};
+
+struct mlx5_ifc_fpga_ipsec_cmd_resp {
+ __be32 syndrome;
+ union {
+ __be32 sw_sa_handle;
+ __be32 flags;
+ };
+ u8 reserved[24];
+} __packed;
+
+enum mlx5_ifc_fpga_ipsec_cmd_opcode {
+ MLX5_FPGA_IPSEC_CMD_OP_ADD_SA = 0,
+ MLX5_FPGA_IPSEC_CMD_OP_DEL_SA = 1,
+ MLX5_FPGA_IPSEC_CMD_OP_ADD_SA_V2 = 2,
+ MLX5_FPGA_IPSEC_CMD_OP_DEL_SA_V2 = 3,
+ MLX5_FPGA_IPSEC_CMD_OP_MOD_SA_V2 = 4,
+ MLX5_FPGA_IPSEC_CMD_OP_SET_CAP = 5,
+};
+
+enum mlx5_ifc_fpga_ipsec_cap {
+ MLX5_FPGA_IPSEC_CAP_NO_TRAILER = BIT(0),
+};
+
+struct mlx5_ifc_fpga_ipsec_cmd_cap {
+ __be32 cmd;
+ __be32 flags;
+ u8 reserved[24];
+} __packed;
+
+enum mlx5_ifc_fpga_ipsec_sa_flags {
+ MLX5_FPGA_IPSEC_SA_ESN_EN = BIT(0),
+ MLX5_FPGA_IPSEC_SA_ESN_OVERLAP = BIT(1),
+ MLX5_FPGA_IPSEC_SA_IPV6 = BIT(2),
+ MLX5_FPGA_IPSEC_SA_DIR_SX = BIT(3),
+ MLX5_FPGA_IPSEC_SA_SPI_EN = BIT(4),
+ MLX5_FPGA_IPSEC_SA_SA_VALID = BIT(5),
+ MLX5_FPGA_IPSEC_SA_IP_ESP = BIT(6),
+ MLX5_FPGA_IPSEC_SA_IP_AH = BIT(7),
+};
+
+enum mlx5_ifc_fpga_ipsec_sa_enc_mode {
+ MLX5_FPGA_IPSEC_SA_ENC_MODE_NONE = 0,
+ MLX5_FPGA_IPSEC_SA_ENC_MODE_AES_GCM_128_AUTH_128 = 1,
+ MLX5_FPGA_IPSEC_SA_ENC_MODE_AES_GCM_256_AUTH_128 = 3,
+};
+
+struct mlx5_ifc_fpga_ipsec_sa_v1 {
+ __be32 cmd;
+ u8 key_enc[32];
+ u8 key_auth[32];
+ __be32 sip[4];
+ __be32 dip[4];
+ union {
+ struct {
+ __be32 reserved;
+ u8 salt_iv[8];
+ __be32 salt;
+ } __packed gcm;
+ struct {
+ u8 salt[16];
+ } __packed cbc;
+ };
+ __be32 spi;
+ __be32 sw_sa_handle;
+ __be16 tfclen;
+ u8 enc_mode;
+ u8 reserved1[2];
+ u8 flags;
+ u8 reserved2[2];
+};
+
+struct mlx5_ifc_fpga_ipsec_sa {
+ struct mlx5_ifc_fpga_ipsec_sa_v1 ipsec_sa_v1;
+ __be16 udp_sp;
+ __be16 udp_dp;
+ u8 reserved1[4];
+ __be32 esn;
+ __be16 vid; /* only 12 bits, rest is reserved */
+ __be16 reserved2;
+} __packed;
+
#endif /* MLX5_IFC_FPGA_H */
diff --git a/include/linux/mlx5/port.h b/include/linux/mlx5/port.h
index 035f0d4dc9fe..34aed6032f86 100644
--- a/include/linux/mlx5/port.h
+++ b/include/linux/mlx5/port.h
@@ -151,6 +151,12 @@ int mlx5_set_port_pfc(struct mlx5_core_dev *dev, u8 pfc_en_tx, u8 pfc_en_rx);
int mlx5_query_port_pfc(struct mlx5_core_dev *dev, u8 *pfc_en_tx,
u8 *pfc_en_rx);
+int mlx5_set_port_stall_watermark(struct mlx5_core_dev *dev,
+ u16 stall_critical_watermark,
+ u16 stall_minor_watermark);
+int mlx5_query_port_stall_watermark(struct mlx5_core_dev *dev,
+ u16 *stall_critical_watermark, u16 *stall_minor_watermark);
+
int mlx5_max_tc(struct mlx5_core_dev *mdev);
int mlx5_set_port_prio_tc(struct mlx5_core_dev *mdev, u8 *prio_tc);
diff --git a/include/linux/mlx5/qp.h b/include/linux/mlx5/qp.h
index 62af7512dabb..4778d41085d4 100644
--- a/include/linux/mlx5/qp.h
+++ b/include/linux/mlx5/qp.h
@@ -473,6 +473,11 @@ struct mlx5_core_qp {
int pid;
};
+struct mlx5_core_dct {
+ struct mlx5_core_qp mqp;
+ struct completion drained;
+};
+
struct mlx5_qp_path {
u8 fl_free_ar;
u8 rsvd3;
@@ -549,6 +554,9 @@ static inline struct mlx5_core_mkey *__mlx5_mr_lookup(struct mlx5_core_dev *dev,
return radix_tree_lookup(&dev->priv.mkey_table.tree, key);
}
+int mlx5_core_create_dct(struct mlx5_core_dev *dev,
+ struct mlx5_core_dct *qp,
+ u32 *in, int inlen);
int mlx5_core_create_qp(struct mlx5_core_dev *dev,
struct mlx5_core_qp *qp,
u32 *in,
@@ -558,8 +566,12 @@ int mlx5_core_qp_modify(struct mlx5_core_dev *dev, u16 opcode,
struct mlx5_core_qp *qp);
int mlx5_core_destroy_qp(struct mlx5_core_dev *dev,
struct mlx5_core_qp *qp);
+int mlx5_core_destroy_dct(struct mlx5_core_dev *dev,
+ struct mlx5_core_dct *dct);
int mlx5_core_qp_query(struct mlx5_core_dev *dev, struct mlx5_core_qp *qp,
u32 *out, int outlen);
+int mlx5_core_dct_query(struct mlx5_core_dev *dev, struct mlx5_core_dct *dct,
+ u32 *out, int outlen);
int mlx5_core_set_delay_drop(struct mlx5_core_dev *dev,
u32 timeout_usec);
diff --git a/include/linux/mlx5/transobj.h b/include/linux/mlx5/transobj.h
index 88441f5ece25..83a33a1873a6 100644
--- a/include/linux/mlx5/transobj.h
+++ b/include/linux/mlx5/transobj.h
@@ -47,6 +47,7 @@ int mlx5_core_create_sq(struct mlx5_core_dev *dev, u32 *in, int inlen,
int mlx5_core_modify_sq(struct mlx5_core_dev *dev, u32 sqn, u32 *in, int inlen);
void mlx5_core_destroy_sq(struct mlx5_core_dev *dev, u32 sqn);
int mlx5_core_query_sq(struct mlx5_core_dev *dev, u32 sqn, u32 *out);
+int mlx5_core_query_sq_state(struct mlx5_core_dev *dev, u32 sqn, u8 *state);
int mlx5_core_create_tir(struct mlx5_core_dev *dev, u32 *in, int inlen,
u32 *tirn);
int mlx5_core_modify_tir(struct mlx5_core_dev *dev, u32 tirn, u32 *in,
@@ -66,7 +67,6 @@ int mlx5_core_arm_rmp(struct mlx5_core_dev *dev, u32 rmpn, u16 lwm);
int mlx5_core_create_xsrq(struct mlx5_core_dev *dev, u32 *in, int inlen,
u32 *rmpn);
int mlx5_core_destroy_xsrq(struct mlx5_core_dev *dev, u32 rmpn);
-int mlx5_core_query_xsrq(struct mlx5_core_dev *dev, u32 rmpn, u32 *out);
int mlx5_core_arm_xsrq(struct mlx5_core_dev *dev, u32 rmpn, u16 lwm);
int mlx5_core_create_rqt(struct mlx5_core_dev *dev, u32 *in, int inlen,
@@ -75,4 +75,27 @@ int mlx5_core_modify_rqt(struct mlx5_core_dev *dev, u32 rqtn, u32 *in,
int inlen);
void mlx5_core_destroy_rqt(struct mlx5_core_dev *dev, u32 rqtn);
+struct mlx5_hairpin_params {
+ u8 log_data_size;
+ u8 log_num_packets;
+ u16 q_counter;
+ int num_channels;
+};
+
+struct mlx5_hairpin {
+ struct mlx5_core_dev *func_mdev;
+ struct mlx5_core_dev *peer_mdev;
+
+ int num_channels;
+
+ u32 *rqn;
+ u32 *sqn;
+};
+
+struct mlx5_hairpin *
+mlx5_core_hairpin_create(struct mlx5_core_dev *func_mdev,
+ struct mlx5_core_dev *peer_mdev,
+ struct mlx5_hairpin_params *params);
+
+void mlx5_core_hairpin_destroy(struct mlx5_hairpin *pair);
#endif /* __TRANSOBJ_H__ */
diff --git a/include/linux/mlx5/vport.h b/include/linux/mlx5/vport.h
index aaa0bb9e7655..9208cb8809ac 100644
--- a/include/linux/mlx5/vport.h
+++ b/include/linux/mlx5/vport.h
@@ -107,6 +107,9 @@ int mlx5_modify_nic_vport_vlans(struct mlx5_core_dev *dev,
int mlx5_nic_vport_enable_roce(struct mlx5_core_dev *mdev);
int mlx5_nic_vport_disable_roce(struct mlx5_core_dev *mdev);
+int mlx5_query_vport_down_stats(struct mlx5_core_dev *mdev, u16 vport,
+ u64 *rx_discard_vport_down,
+ u64 *tx_discard_vport_down);
int mlx5_core_query_vport_counter(struct mlx5_core_dev *dev, u8 other_vport,
int vf, u8 port_num, void *out,
size_t out_sz);
@@ -116,4 +119,8 @@ int mlx5_core_modify_hca_vport_context(struct mlx5_core_dev *dev,
struct mlx5_hca_vport_context *req);
int mlx5_nic_vport_update_local_lb(struct mlx5_core_dev *mdev, bool enable);
int mlx5_nic_vport_query_local_lb(struct mlx5_core_dev *mdev, bool *status);
+
+int mlx5_nic_vport_affiliate_multiport(struct mlx5_core_dev *master_mdev,
+ struct mlx5_core_dev *port_mdev);
+int mlx5_nic_vport_unaffiliate_multiport(struct mlx5_core_dev *port_mdev);
#endif /* __MLX5_VPORT_H__ */
diff --git a/include/linux/mm.h b/include/linux/mm.h
index ea818ff739cd..1ac1f06a4be6 100644
--- a/include/linux/mm.h
+++ b/include/linux/mm.h
@@ -241,10 +241,11 @@ extern unsigned int kobjsize(const void *objp);
# define VM_SAO VM_ARCH_1 /* Strong Access Ordering (powerpc) */
#elif defined(CONFIG_PARISC)
# define VM_GROWSUP VM_ARCH_1
-#elif defined(CONFIG_METAG)
-# define VM_GROWSUP VM_ARCH_1
#elif defined(CONFIG_IA64)
# define VM_GROWSUP VM_ARCH_1
+#elif defined(CONFIG_SPARC64)
+# define VM_SPARC_ADI VM_ARCH_1 /* Uses ADI tag for access control */
+# define VM_ARCH_CLEAR VM_SPARC_ADI
#elif !defined(CONFIG_MMU)
# define VM_MAPPED_COPY VM_ARCH_1 /* T if mapped copy of data (nommu mmap) */
#endif
@@ -287,6 +288,12 @@ extern unsigned int kobjsize(const void *objp);
/* This mask is used to clear all the VMA flags used by mlock */
#define VM_LOCKED_CLEAR_MASK (~(VM_LOCKED | VM_LOCKONFAULT))
+/* Arch-specific flags to clear when updating VM flags on protection change */
+#ifndef VM_ARCH_CLEAR
+# define VM_ARCH_CLEAR VM_NONE
+#endif
+#define VM_FLAGS_CLEAR (ARCH_VM_PKEY_FLAGS | VM_ARCH_CLEAR)
+
/*
* mapping from the currently active vm_flags protection bits (the
* low four bits) to a page protection mask..
@@ -379,17 +386,19 @@ struct vm_operations_struct {
void (*close)(struct vm_area_struct * area);
int (*split)(struct vm_area_struct * area, unsigned long addr);
int (*mremap)(struct vm_area_struct * area);
- int (*fault)(struct vm_fault *vmf);
- int (*huge_fault)(struct vm_fault *vmf, enum page_entry_size pe_size);
+ vm_fault_t (*fault)(struct vm_fault *vmf);
+ vm_fault_t (*huge_fault)(struct vm_fault *vmf,
+ enum page_entry_size pe_size);
void (*map_pages)(struct vm_fault *vmf,
pgoff_t start_pgoff, pgoff_t end_pgoff);
+ unsigned long (*pagesize)(struct vm_area_struct * area);
/* notification that a previously read-only page is about to become
* writable, if an error is returned it will cause a SIGBUS */
- int (*page_mkwrite)(struct vm_fault *vmf);
+ vm_fault_t (*page_mkwrite)(struct vm_fault *vmf);
/* same as page_mkwrite when using VM_PFNMAP|VM_MIXEDMAP */
- int (*pfn_mkwrite)(struct vm_fault *vmf);
+ vm_fault_t (*pfn_mkwrite)(struct vm_fault *vmf);
/* called by access_process_vm when get_user_pages() fails, typically
* for use by special VMAs that can switch between memory and hardware
@@ -738,7 +747,7 @@ int finish_mkwrite_fault(struct vm_fault *vmf);
* refcount. The each user mapping also has a reference to the page.
*
* The pagecache pages are stored in a per-mapping radix tree, which is
- * rooted at mapping->page_tree, and indexed by offset.
+ * rooted at mapping->i_pages, and indexed by offset.
* Where 2.4 and early 2.6 kernels kept dirty/clean pages in per-address_space
* lists, we instead now tag pages as dirty/writeback in the radix tree.
*
@@ -896,7 +905,9 @@ extern int page_to_nid(const struct page *page);
#else
static inline int page_to_nid(const struct page *page)
{
- return (page->flags >> NODES_PGSHIFT) & NODES_MASK;
+ struct page *p = (struct page *)page;
+
+ return (PF_POISONED_CHECK(p)->flags >> NODES_PGSHIFT) & NODES_MASK;
}
#endif
@@ -1145,6 +1156,7 @@ static inline pgoff_t page_index(struct page *page)
bool page_mapped(struct page *page);
struct address_space *page_mapping(struct page *page);
+struct address_space *page_mapping_file(struct page *page);
/*
* Return true only if the page has been allocated with
@@ -1312,8 +1324,6 @@ void free_pgd_range(struct mmu_gather *tlb, unsigned long addr,
unsigned long end, unsigned long floor, unsigned long ceiling);
int copy_page_range(struct mm_struct *dst, struct mm_struct *src,
struct vm_area_struct *vma);
-void unmap_mapping_range(struct address_space *mapping,
- loff_t const holebegin, loff_t const holelen, int even_cows);
int follow_pte_pmd(struct mm_struct *mm, unsigned long address,
unsigned long *start, unsigned long *end,
pte_t **ptepp, pmd_t **pmdpp, spinlock_t **ptlp);
@@ -1324,12 +1334,6 @@ int follow_phys(struct vm_area_struct *vma, unsigned long address,
int generic_access_phys(struct vm_area_struct *vma, unsigned long addr,
void *buf, int len, int write);
-static inline void unmap_shared_mapping_range(struct address_space *mapping,
- loff_t const holebegin, loff_t const holelen)
-{
- unmap_mapping_range(mapping, holebegin, holelen, 0);
-}
-
extern void truncate_pagecache(struct inode *inode, loff_t new);
extern void truncate_setsize(struct inode *inode, loff_t newsize);
void pagecache_isize_extended(struct inode *inode, loff_t from, loff_t to);
@@ -1344,6 +1348,10 @@ extern int handle_mm_fault(struct vm_area_struct *vma, unsigned long address,
extern int fixup_user_fault(struct task_struct *tsk, struct mm_struct *mm,
unsigned long address, unsigned int fault_flags,
bool *unlocked);
+void unmap_mapping_pages(struct address_space *mapping,
+ pgoff_t start, pgoff_t nr, bool even_cows);
+void unmap_mapping_range(struct address_space *mapping,
+ loff_t const holebegin, loff_t const holelen, int even_cows);
#else
static inline int handle_mm_fault(struct vm_area_struct *vma,
unsigned long address, unsigned int flags)
@@ -1360,10 +1368,20 @@ static inline int fixup_user_fault(struct task_struct *tsk,
BUG();
return -EFAULT;
}
+static inline void unmap_mapping_pages(struct address_space *mapping,
+ pgoff_t start, pgoff_t nr, bool even_cows) { }
+static inline void unmap_mapping_range(struct address_space *mapping,
+ loff_t const holebegin, loff_t const holelen, int even_cows) { }
#endif
-extern int access_process_vm(struct task_struct *tsk, unsigned long addr, void *buf, int len,
- unsigned int gup_flags);
+static inline void unmap_shared_mapping_range(struct address_space *mapping,
+ loff_t const holebegin, loff_t const holelen)
+{
+ unmap_mapping_range(mapping, holebegin, holelen, 0);
+}
+
+extern int access_process_vm(struct task_struct *tsk, unsigned long addr,
+ void *buf, int len, unsigned int gup_flags);
extern int access_remote_vm(struct mm_struct *mm, unsigned long addr,
void *buf, int len, unsigned int gup_flags);
extern int __access_remote_vm(struct task_struct *tsk, struct mm_struct *mm,
@@ -1448,6 +1466,7 @@ extern int try_to_release_page(struct page * page, gfp_t gfp_mask);
extern void do_invalidatepage(struct page *page, unsigned int offset,
unsigned int length);
+void __set_page_dirty(struct page *, struct address_space *, int warn);
int __set_page_dirty_nobuffers(struct page *page);
int __set_page_dirty_no_writeback(struct page *page);
int redirty_page_for_writepage(struct writeback_control *wbc,
@@ -2069,8 +2088,8 @@ static inline void zero_resv_unavail(void) {}
#endif
extern void set_dma_reserve(unsigned long new_dma_reserve);
-extern void memmap_init_zone(unsigned long, int, unsigned long,
- unsigned long, enum memmap_context);
+extern void memmap_init_zone(unsigned long, int, unsigned long, unsigned long,
+ enum memmap_context, struct vmem_altmap *);
extern void setup_per_zone_wmarks(void);
extern int __meminit init_per_zone_wmark_min(void);
extern void mem_init(void);
@@ -2090,6 +2109,7 @@ extern void setup_per_cpu_pageset(void);
extern void zone_pcp_update(struct zone *zone);
extern void zone_pcp_reset(struct zone *zone);
+extern void setup_zone_pageset(struct zone *zone);
/* page_alloc.c */
extern int min_free_kbytes;
@@ -2407,6 +2427,44 @@ int vm_insert_mixed_mkwrite(struct vm_area_struct *vma, unsigned long addr,
pfn_t pfn);
int vm_iomap_memory(struct vm_area_struct *vma, phys_addr_t start, unsigned long len);
+static inline vm_fault_t vmf_insert_page(struct vm_area_struct *vma,
+ unsigned long addr, struct page *page)
+{
+ int err = vm_insert_page(vma, addr, page);
+
+ if (err == -ENOMEM)
+ return VM_FAULT_OOM;
+ if (err < 0 && err != -EBUSY)
+ return VM_FAULT_SIGBUS;
+
+ return VM_FAULT_NOPAGE;
+}
+
+static inline vm_fault_t vmf_insert_mixed(struct vm_area_struct *vma,
+ unsigned long addr, pfn_t pfn)
+{
+ int err = vm_insert_mixed(vma, addr, pfn);
+
+ if (err == -ENOMEM)
+ return VM_FAULT_OOM;
+ if (err < 0 && err != -EBUSY)
+ return VM_FAULT_SIGBUS;
+
+ return VM_FAULT_NOPAGE;
+}
+
+static inline vm_fault_t vmf_insert_pfn(struct vm_area_struct *vma,
+ unsigned long addr, unsigned long pfn)
+{
+ int err = vm_insert_pfn(vma, addr, pfn);
+
+ if (err == -ENOMEM)
+ return VM_FAULT_OOM;
+ if (err < 0 && err != -EBUSY)
+ return VM_FAULT_SIGBUS;
+
+ return VM_FAULT_NOPAGE;
+}
struct page *follow_page_mask(struct vm_area_struct *vma,
unsigned long address, unsigned int foll_flags,
@@ -2538,7 +2596,8 @@ void sparse_mem_maps_populate_node(struct page **map_map,
unsigned long map_count,
int nodeid);
-struct page *sparse_mem_map_populate(unsigned long pnum, int nid);
+struct page *sparse_mem_map_populate(unsigned long pnum, int nid,
+ struct vmem_altmap *altmap);
pgd_t *vmemmap_pgd_populate(unsigned long addr, int node);
p4d_t *vmemmap_p4d_populate(pgd_t *pgd, unsigned long addr, int node);
pud_t *vmemmap_pud_populate(p4d_t *p4d, unsigned long addr, int node);
@@ -2546,20 +2605,17 @@ pmd_t *vmemmap_pmd_populate(pud_t *pud, unsigned long addr, int node);
pte_t *vmemmap_pte_populate(pmd_t *pmd, unsigned long addr, int node);
void *vmemmap_alloc_block(unsigned long size, int node);
struct vmem_altmap;
-void *__vmemmap_alloc_block_buf(unsigned long size, int node,
- struct vmem_altmap *altmap);
-static inline void *vmemmap_alloc_block_buf(unsigned long size, int node)
-{
- return __vmemmap_alloc_block_buf(size, node, NULL);
-}
-
+void *vmemmap_alloc_block_buf(unsigned long size, int node);
+void *altmap_alloc_block_buf(unsigned long size, struct vmem_altmap *altmap);
void vmemmap_verify(pte_t *, int, unsigned long, unsigned long);
int vmemmap_populate_basepages(unsigned long start, unsigned long end,
int node);
-int vmemmap_populate(unsigned long start, unsigned long end, int node);
+int vmemmap_populate(unsigned long start, unsigned long end, int node,
+ struct vmem_altmap *altmap);
void vmemmap_populate_print_last(void);
#ifdef CONFIG_MEMORY_HOTPLUG
-void vmemmap_free(unsigned long start, unsigned long end);
+void vmemmap_free(unsigned long start, unsigned long end,
+ struct vmem_altmap *altmap);
#endif
void register_page_bootmem_memmap(unsigned long section_nr, struct page *map,
unsigned long nr_pages);
@@ -2570,15 +2626,15 @@ enum mf_flags {
MF_MUST_KILL = 1 << 2,
MF_SOFT_OFFLINE = 1 << 3,
};
-extern int memory_failure(unsigned long pfn, int trapno, int flags);
-extern void memory_failure_queue(unsigned long pfn, int trapno, int flags);
+extern int memory_failure(unsigned long pfn, int flags);
+extern void memory_failure_queue(unsigned long pfn, int flags);
extern int unpoison_memory(unsigned long pfn);
extern int get_hwpoison_page(struct page *page);
#define put_hwpoison_page(page) put_page(page)
extern int sysctl_memory_failure_early_kill;
extern int sysctl_memory_failure_recovery;
extern void shake_page(struct page *p, int access);
-extern atomic_long_t num_poisoned_pages;
+extern atomic_long_t num_poisoned_pages __read_mostly;
extern int soft_offline_page(struct page *page, int flags);
@@ -2600,6 +2656,7 @@ enum mf_action_page_type {
MF_MSG_POISONED_HUGE,
MF_MSG_HUGE,
MF_MSG_FREE_HUGE,
+ MF_MSG_NON_PMD_HUGE,
MF_MSG_UNMAP_FAILED,
MF_MSG_DIRTY_SWAPCACHE,
MF_MSG_CLEAN_SWAPCACHE,
diff --git a/include/linux/mm_inline.h b/include/linux/mm_inline.h
index c30b32e3c862..10191c28fc04 100644
--- a/include/linux/mm_inline.h
+++ b/include/linux/mm_inline.h
@@ -127,10 +127,4 @@ static __always_inline enum lru_list page_lru(struct page *page)
#define lru_to_page(head) (list_entry((head)->prev, struct page, lru))
-#ifdef arch_unmap_kpfn
-extern void arch_unmap_kpfn(unsigned long pfn);
-#else
-static __always_inline void arch_unmap_kpfn(unsigned long pfn) { }
-#endif
-
#endif
diff --git a/include/linux/mm_types.h b/include/linux/mm_types.h
index cfd0ac4e5e0e..21612347d311 100644
--- a/include/linux/mm_types.h
+++ b/include/linux/mm_types.h
@@ -22,6 +22,8 @@
#endif
#define AT_VECTOR_SIZE (2*(AT_VECTOR_SIZE_ARCH + AT_VECTOR_SIZE_BASE + 1))
+typedef int vm_fault_t;
+
struct address_space;
struct mem_cgroup;
struct hmm;
@@ -31,28 +33,56 @@ struct hmm;
* it to keep track of whatever it is we are using the page for at the
* moment. Note that we have no way to track which tasks are using
* a page, though if it is a pagecache page, rmap structures can tell us
- * who is mapping it.
+ * who is mapping it. If you allocate the page using alloc_pages(), you
+ * can use some of the space in struct page for your own purposes.
+ *
+ * Pages that were once in the page cache may be found under the RCU lock
+ * even after they have been recycled to a different purpose. The page
+ * cache reads and writes some of the fields in struct page to pin the
+ * page before checking that it's still in the page cache. It is vital
+ * that all users of struct page:
+ * 1. Use the first word as PageFlags.
+ * 2. Clear or preserve bit 0 of page->compound_head. It is used as
+ * PageTail for compound pages, and the page cache must not see false
+ * positives. Some users put a pointer here (guaranteed to be at least
+ * 4-byte aligned), other users avoid using the field altogether.
+ * 3. page->_refcount must either not be used, or must be used in such a
+ * way that other CPUs temporarily incrementing and then decrementing the
+ * refcount does not cause problems. On receiving the page from
+ * alloc_pages(), the refcount will be positive.
+ * 4. Either preserve page->_mapcount or restore it to -1 before freeing it.
*
- * The objects in struct page are organized in double word blocks in
- * order to allows us to use atomic double word operations on portions
- * of struct page. That is currently only used by slub but the arrangement
- * allows the use of atomic double word operations on the flags/mapping
- * and lru list pointers also.
+ * If you allocate pages of order > 0, you can use the fields in the struct
+ * page associated with each page, but bear in mind that the pages may have
+ * been inserted individually into the page cache, so you must use the above
+ * four fields in a compatible way for each struct page.
+ *
+ * SLUB uses cmpxchg_double() to atomically update its freelist and
+ * counters. That requires that freelist & counters be adjacent and
+ * double-word aligned. We align all struct pages to double-word
+ * boundaries, and ensure that 'freelist' is aligned within the
+ * struct.
*/
+#ifdef CONFIG_HAVE_ALIGNED_STRUCT_PAGE
+#define _struct_page_alignment __aligned(2 * sizeof(unsigned long))
+#if defined(CONFIG_HAVE_CMPXCHG_DOUBLE)
+#define _slub_counter_t unsigned long
+#else
+#define _slub_counter_t unsigned int
+#endif
+#else /* !CONFIG_HAVE_ALIGNED_STRUCT_PAGE */
+#define _struct_page_alignment
+#define _slub_counter_t unsigned int
+#endif /* !CONFIG_HAVE_ALIGNED_STRUCT_PAGE */
+
struct page {
/* First double word block */
unsigned long flags; /* Atomic flags, some possibly
* updated asynchronously */
union {
- struct address_space *mapping; /* If low bit clear, points to
- * inode address_space, or NULL.
- * If page mapped as anonymous
- * memory, low bit is set, and
- * it points to anon_vma object
- * or KSM private structure. See
- * PAGE_MAPPING_ANON and
- * PAGE_MAPPING_KSM.
- */
+ /* See page-flags.h for the definition of PAGE_MAPPING_FLAGS */
+ struct address_space *mapping;
+
void *s_mem; /* slab first object */
atomic_t compound_mapcount; /* first tail page */
/* page_deferred_list().next -- second tail page */
@@ -66,40 +96,27 @@ struct page {
};
union {
-#if defined(CONFIG_HAVE_CMPXCHG_DOUBLE) && \
- defined(CONFIG_HAVE_ALIGNED_STRUCT_PAGE)
- /* Used for cmpxchg_double in slub */
- unsigned long counters;
-#else
- /*
- * Keep _refcount separate from slub cmpxchg_double data.
- * As the rest of the double word is protected by slab_lock
- * but _refcount is not.
- */
- unsigned counters;
-#endif
- struct {
+ _slub_counter_t counters;
+ unsigned int active; /* SLAB */
+ struct { /* SLUB */
+ unsigned inuse:16;
+ unsigned objects:15;
+ unsigned frozen:1;
+ };
+ int units; /* SLOB */
+
+ struct { /* Page cache */
+ /*
+ * Count of ptes mapped in mms, to show when
+ * page is mapped & limit reverse map searches.
+ *
+ * Extra information about page type may be
+ * stored here for pages that are never mapped,
+ * in which case the value MUST BE <= -2.
+ * See page-flags.h for more details.
+ */
+ atomic_t _mapcount;
- union {
- /*
- * Count of ptes mapped in mms, to show when
- * page is mapped & limit reverse map searches.
- *
- * Extra information about page type may be
- * stored here for pages that are never mapped,
- * in which case the value MUST BE <= -2.
- * See page-flags.h for more details.
- */
- atomic_t _mapcount;
-
- unsigned int active; /* SLAB */
- struct { /* SLUB */
- unsigned inuse:16;
- unsigned objects:15;
- unsigned frozen:1;
- };
- int units; /* SLOB */
- };
/*
* Usage count, *USE WRAPPER FUNCTION* when manual
* accounting. See page_ref.h
@@ -109,8 +126,6 @@ struct page {
};
/*
- * Third double word block
- *
* WARNING: bit 0 of the first word encode PageTail(). That means
* the rest users of the storage space MUST NOT use the bit to
* avoid collision and false-positive PageTail().
@@ -145,19 +160,9 @@ struct page {
unsigned long compound_head; /* If bit zero is set */
/* First tail page only */
-#ifdef CONFIG_64BIT
- /*
- * On 64 bit system we have enough space in struct page
- * to encode compound_dtor and compound_order with
- * unsigned int. It can help compiler generate better or
- * smaller code on some archtectures.
- */
- unsigned int compound_dtor;
- unsigned int compound_order;
-#else
- unsigned short int compound_dtor;
- unsigned short int compound_order;
-#endif
+ unsigned char compound_dtor;
+ unsigned char compound_order;
+ /* two/six bytes available here */
};
#if defined(CONFIG_TRANSPARENT_HUGEPAGE) && USE_SPLIT_PMD_PTLOCKS
@@ -171,15 +176,14 @@ struct page {
#endif
};
- /* Remainder is not double word aligned */
union {
- unsigned long private; /* Mapping-private opaque data:
- * usually used for buffer_heads
- * if PagePrivate set; used for
- * swp_entry_t if PageSwapCache;
- * indicates order in the buddy
- * system if PG_buddy is set.
- */
+ /*
+ * Mapping-private opaque data:
+ * Usually used for buffer_heads if PagePrivate
+ * Used for swp_entry_t if PageSwapCache
+ * Indicates order in the buddy system if PageBuddy
+ */
+ unsigned long private;
#if USE_SPLIT_PTE_PTLOCKS
#if ALLOC_SPLIT_PTLOCKS
spinlock_t *ptl;
@@ -212,15 +216,7 @@ struct page {
#ifdef LAST_CPUPID_NOT_IN_PAGE_FLAGS
int _last_cpupid;
#endif
-}
-/*
- * The struct page can be forced to be double word aligned so that atomic ops
- * on double words work. The SLUB allocator can make use of such a feature.
- */
-#ifdef CONFIG_HAVE_ALIGNED_STRUCT_PAGE
- __aligned(2 * sizeof(unsigned long))
-#endif
-;
+} _struct_page_alignment;
#define PAGE_FRAG_CACHE_MAX_SIZE __ALIGN_MASK(32768, ~PAGE_MASK)
#define PAGE_FRAG_CACHE_MAX_ORDER get_order(PAGE_FRAG_CACHE_MAX_SIZE)
diff --git a/include/linux/mman.h b/include/linux/mman.h
index 6a4d1caaff5c..4b08e9c9c538 100644
--- a/include/linux/mman.h
+++ b/include/linux/mman.h
@@ -92,7 +92,7 @@ static inline void vm_unacct_memory(long pages)
*
* Returns true if the prot flags are valid
*/
-static inline bool arch_validate_prot(unsigned long prot)
+static inline bool arch_validate_prot(unsigned long prot, unsigned long addr)
{
return (prot & ~(PROT_READ | PROT_WRITE | PROT_EXEC | PROT_SEM)) == 0;
}
diff --git a/include/linux/mmc/host.h b/include/linux/mmc/host.h
index e7743eca1021..85146235231e 100644
--- a/include/linux/mmc/host.h
+++ b/include/linux/mmc/host.h
@@ -324,6 +324,7 @@ struct mmc_host {
#define MMC_CAP_DRIVER_TYPE_A (1 << 23) /* Host supports Driver Type A */
#define MMC_CAP_DRIVER_TYPE_C (1 << 24) /* Host supports Driver Type C */
#define MMC_CAP_DRIVER_TYPE_D (1 << 25) /* Host supports Driver Type D */
+#define MMC_CAP_DONE_COMPLETE (1 << 27) /* RW reqs can be completed within mmc_request_done() */
#define MMC_CAP_CD_WAKE (1 << 28) /* Enable card detect wake */
#define MMC_CAP_CMD_DURING_TFR (1 << 29) /* Commands during data transfer */
#define MMC_CAP_CMD23 (1 << 30) /* CMD23 supported. */
@@ -380,6 +381,7 @@ struct mmc_host {
unsigned int doing_retune:1; /* re-tuning in progress */
unsigned int retune_now:1; /* do re-tuning at next req */
unsigned int retune_paused:1; /* re-tuning is temporarily disabled */
+ unsigned int use_blk_mq:1; /* use blk-mq */
int rescan_disable; /* disable card detection */
int rescan_entered; /* used with nonremovable devices */
@@ -422,9 +424,6 @@ struct mmc_host {
struct dentry *debugfs_root;
- struct mmc_async_req *areq; /* active async req */
- struct mmc_context_info context_info; /* async synchronization info */
-
/* Ongoing data transfer that allows commands during transfer */
struct mmc_request *ongoing_mrq;
diff --git a/include/linux/mmc/slot-gpio.h b/include/linux/mmc/slot-gpio.h
index 82f0d289f110..06607c59c4d0 100644
--- a/include/linux/mmc/slot-gpio.h
+++ b/include/linux/mmc/slot-gpio.h
@@ -31,7 +31,9 @@ int mmc_gpiod_request_ro(struct mmc_host *host, const char *con_id,
unsigned int debounce, bool *gpio_invert);
void mmc_gpio_set_cd_isr(struct mmc_host *host,
irqreturn_t (*isr)(int irq, void *dev_id));
+int mmc_gpio_set_cd_wake(struct mmc_host *host, bool on);
void mmc_gpiod_request_cd_irq(struct mmc_host *host);
bool mmc_can_gpio_cd(struct mmc_host *host);
+bool mmc_can_gpio_ro(struct mmc_host *host);
#endif
diff --git a/include/linux/mmdebug.h b/include/linux/mmdebug.h
index 57b0030d3800..2ad72d2c8cc5 100644
--- a/include/linux/mmdebug.h
+++ b/include/linux/mmdebug.h
@@ -37,10 +37,10 @@ void dump_mm(const struct mm_struct *mm);
BUG(); \
} \
} while (0)
-#define VM_WARN_ON(cond) WARN_ON(cond)
-#define VM_WARN_ON_ONCE(cond) WARN_ON_ONCE(cond)
-#define VM_WARN_ONCE(cond, format...) WARN_ONCE(cond, format)
-#define VM_WARN(cond, format...) WARN(cond, format)
+#define VM_WARN_ON(cond) (void)WARN_ON(cond)
+#define VM_WARN_ON_ONCE(cond) (void)WARN_ON_ONCE(cond)
+#define VM_WARN_ONCE(cond, format...) (void)WARN_ONCE(cond, format)
+#define VM_WARN(cond, format...) (void)WARN(cond, format)
#else
#define VM_BUG_ON(cond) BUILD_BUG_ON_INVALID(cond)
#define VM_BUG_ON_PAGE(cond, page) VM_BUG_ON(cond)
diff --git a/include/linux/mmu_notifier.h b/include/linux/mmu_notifier.h
index b25dc9db19fc..2d07a1ed5a31 100644
--- a/include/linux/mmu_notifier.h
+++ b/include/linux/mmu_notifier.h
@@ -2,6 +2,7 @@
#ifndef _LINUX_MMU_NOTIFIER_H
#define _LINUX_MMU_NOTIFIER_H
+#include <linux/types.h>
#include <linux/list.h>
#include <linux/spinlock.h>
#include <linux/mm_types.h>
@@ -10,6 +11,9 @@
struct mmu_notifier;
struct mmu_notifier_ops;
+/* mmu_notifier_ops flags */
+#define MMU_INVALIDATE_DOES_NOT_BLOCK (0x01)
+
#ifdef CONFIG_MMU_NOTIFIER
/*
@@ -27,6 +31,15 @@ struct mmu_notifier_mm {
struct mmu_notifier_ops {
/*
+ * Flags to specify behavior of callbacks for this MMU notifier.
+ * Used to determine which context an operation may be called.
+ *
+ * MMU_INVALIDATE_DOES_NOT_BLOCK: invalidate_range_* callbacks do not
+ * block
+ */
+ int flags;
+
+ /*
* Called either by mmu_notifier_unregister or when the mm is
* being destroyed by exit_mmap, always before all pages are
* freed. This can run concurrently with other mmu notifier
@@ -137,6 +150,10 @@ struct mmu_notifier_ops {
* page. Pages will no longer be referenced by the linux
* address space but may still be referenced by sptes until
* the last refcount is dropped.
+ *
+ * If both of these callbacks cannot block, and invalidate_range
+ * cannot block, mmu_notifier_ops.flags should have
+ * MMU_INVALIDATE_DOES_NOT_BLOCK set.
*/
void (*invalidate_range_start)(struct mmu_notifier *mn,
struct mm_struct *mm,
@@ -159,12 +176,13 @@ struct mmu_notifier_ops {
* external TLB range needs to be flushed. For more in depth
* discussion on this see Documentation/vm/mmu_notifier.txt
*
- * The invalidate_range() function is called under the ptl
- * spin-lock and not allowed to sleep.
- *
* Note that this function might be called with just a sub-range
* of what was passed to invalidate_range_start()/end(), if
* called between those functions.
+ *
+ * If this callback cannot block, and invalidate_range_{start,end}
+ * cannot block, mmu_notifier_ops.flags should have
+ * MMU_INVALIDATE_DOES_NOT_BLOCK set.
*/
void (*invalidate_range)(struct mmu_notifier *mn, struct mm_struct *mm,
unsigned long start, unsigned long end);
@@ -218,6 +236,7 @@ extern void __mmu_notifier_invalidate_range_end(struct mm_struct *mm,
bool only_end);
extern void __mmu_notifier_invalidate_range(struct mm_struct *mm,
unsigned long start, unsigned long end);
+extern bool mm_has_blockable_invalidate_notifiers(struct mm_struct *mm);
static inline void mmu_notifier_release(struct mm_struct *mm)
{
@@ -457,6 +476,11 @@ static inline void mmu_notifier_invalidate_range(struct mm_struct *mm,
{
}
+static inline bool mm_has_blockable_invalidate_notifiers(struct mm_struct *mm)
+{
+ return false;
+}
+
static inline void mmu_notifier_mm_init(struct mm_struct *mm)
{
}
diff --git a/include/linux/mmzone.h b/include/linux/mmzone.h
index 67f2e3c38939..32699b2dc52a 100644
--- a/include/linux/mmzone.h
+++ b/include/linux/mmzone.h
@@ -180,6 +180,7 @@ enum node_stat_item {
NR_VMSCAN_IMMEDIATE, /* Prioritise for reclaim when writeback ends */
NR_DIRTIED, /* page dirtyings since bootup */
NR_WRITTEN, /* page writings since bootup */
+ NR_INDIRECTLY_RECLAIMABLE_BYTES, /* measured in bytes */
NR_VM_NODE_STAT_ITEMS
};
@@ -633,14 +634,15 @@ typedef struct pglist_data {
#ifndef CONFIG_NO_BOOTMEM
struct bootmem_data *bdata;
#endif
-#ifdef CONFIG_MEMORY_HOTPLUG
+#if defined(CONFIG_MEMORY_HOTPLUG) || defined(CONFIG_DEFERRED_STRUCT_PAGE_INIT)
/*
* Must be held any time you expect node_start_pfn, node_present_pages
* or node_spanned_pages stay constant. Holding this will also
* guarantee that any pfn_valid() stays that way.
*
* pgdat_resize_lock() and pgdat_resize_unlock() are provided to
- * manipulate node_size_lock without checking for CONFIG_MEMORY_HOTPLUG.
+ * manipulate node_size_lock without checking for CONFIG_MEMORY_HOTPLUG
+ * or CONFIG_DEFERRED_STRUCT_PAGE_INIT.
*
* Nests above zone->lock and zone->span_seqlock
*/
@@ -775,7 +777,8 @@ static inline bool is_dev_zone(const struct zone *zone)
#include <linux/memory_hotplug.h>
void build_all_zonelists(pg_data_t *pgdat);
-void wakeup_kswapd(struct zone *zone, int order, enum zone_type classzone_idx);
+void wakeup_kswapd(struct zone *zone, gfp_t gfp_mask, int order,
+ enum zone_type classzone_idx);
bool __zone_watermark_ok(struct zone *z, unsigned int order, unsigned long mark,
int classzone_idx, unsigned int alloc_flags,
long free_pages);
@@ -816,10 +819,6 @@ int local_memory_node(int node_id);
static inline int local_memory_node(int node_id) { return node_id; };
#endif
-#ifdef CONFIG_NEED_NODE_MEMMAP_SIZE
-unsigned long __init node_memmap_size_bytes(int, unsigned long, unsigned long);
-#endif
-
/*
* zone_idx() returns 0 for the ZONE_DMA zone, 1 for the ZONE_NORMAL zone, etc.
*/
@@ -886,7 +885,7 @@ int min_free_kbytes_sysctl_handler(struct ctl_table *, int,
void __user *, size_t *, loff_t *);
int watermark_scale_factor_sysctl_handler(struct ctl_table *, int,
void __user *, size_t *, loff_t *);
-extern int sysctl_lowmem_reserve_ratio[MAX_NR_ZONES-1];
+extern int sysctl_lowmem_reserve_ratio[MAX_NR_ZONES];
int lowmem_reserve_ratio_sysctl_handler(struct ctl_table *, int,
void __user *, size_t *, loff_t *);
int percpu_pagelist_fraction_sysctl_handler(struct ctl_table *, int,
@@ -1166,8 +1165,16 @@ extern unsigned long usemap_size(void);
/*
* We use the lower bits of the mem_map pointer to store
- * a little bit of information. There should be at least
- * 3 bits here due to 32-bit alignment.
+ * a little bit of information. The pointer is calculated
+ * as mem_map - section_nr_to_pfn(pnum). The result is
+ * aligned to the minimum alignment of the two values:
+ * 1. All mem_map arrays are page-aligned.
+ * 2. section_nr_to_pfn() always clears PFN_SECTION_SHIFT
+ * lowest bits. PFN_SECTION_SHIFT is arch-specific
+ * (equal SECTION_SIZE_BITS - PAGE_SHIFT), and the
+ * worst combination is powerpc with 256k pages,
+ * which results in PFN_SECTION_SHIFT equal 6.
+ * To sum it up, at least 6 bits are available.
*/
#define SECTION_MARKED_PRESENT (1UL<<0)
#define SECTION_HAS_MEM_MAP (1UL<<1)
@@ -1281,7 +1288,6 @@ struct mminit_pfnnid_cache {
#endif
void memory_present(int nid, unsigned long start, unsigned long end);
-unsigned long __init node_memmap_size_bytes(int, unsigned long, unsigned long);
/*
* If it is possible to have holes within a MAX_ORDER_NR_PAGES, then we
diff --git a/include/linux/mod_devicetable.h b/include/linux/mod_devicetable.h
index abb6dc2ebbf8..48fb2b43c35a 100644
--- a/include/linux/mod_devicetable.h
+++ b/include/linux/mod_devicetable.h
@@ -229,6 +229,12 @@ struct hda_device_id {
unsigned long driver_data;
};
+struct sdw_device_id {
+ __u16 mfg_id;
+ __u16 part_id;
+ kernel_ulong_t driver_data;
+};
+
/*
* Struct used for matching a device
*/
@@ -452,6 +458,19 @@ struct spi_device_id {
kernel_ulong_t driver_data; /* Data private to the driver */
};
+/* SLIMbus */
+
+#define SLIMBUS_NAME_SIZE 32
+#define SLIMBUS_MODULE_PREFIX "slim:"
+
+struct slim_device_id {
+ __u16 manf_id, prod_code;
+ __u16 dev_index, instance;
+
+ /* Data private to the driver */
+ kernel_ulong_t driver_data;
+};
+
#define SPMI_NAME_SIZE 32
#define SPMI_MODULE_PREFIX "spmi:"
diff --git a/include/linux/module.h b/include/linux/module.h
index c69b49abe877..d44df9b2c131 100644
--- a/include/linux/module.h
+++ b/include/linux/module.h
@@ -19,6 +19,7 @@
#include <linux/jump_label.h>
#include <linux/export.h>
#include <linux/rbtree_latch.h>
+#include <linux/error-injection.h>
#include <linux/percpu.h>
#include <asm/module.h>
@@ -475,6 +476,11 @@ struct module {
ctor_fn_t *ctors;
unsigned int num_ctors;
#endif
+
+#ifdef CONFIG_FUNCTION_ERROR_INJECTION
+ struct error_injection_entry *ei_funcs;
+ unsigned int num_ei_funcs;
+#endif
} ____cacheline_aligned __randomize_layout;
#ifndef MODULE_ARCH_INIT
#define MODULE_ARCH_INIT {}
@@ -485,7 +491,7 @@ extern struct mutex module_mutex;
/* FIXME: It'd be nice to isolate modules during init, too, so they
aren't used before they (may) fail. But presently too much code
(IDE & SCSI) require entry into the module during init.*/
-static inline int module_is_live(struct module *mod)
+static inline bool module_is_live(struct module *mod)
{
return mod->state != MODULE_STATE_GOING;
}
@@ -606,6 +612,9 @@ int ref_module(struct module *a, struct module *b);
__mod ? __mod->name : "kernel"; \
})
+/* Dereference module function descriptor */
+void *dereference_module_function_descriptor(struct module *mod, void *ptr);
+
/* For kallsyms to ask for address resolution. namebuf should be at
* least KSYM_NAME_LEN long: a pointer to namebuf is returned if
* found, otherwise NULL. */
@@ -760,6 +769,13 @@ static inline bool is_module_sig_enforced(void)
return false;
}
+/* Dereference module function descriptor */
+static inline
+void *dereference_module_function_descriptor(struct module *mod, void *ptr)
+{
+ return ptr;
+}
+
#endif /* CONFIG_MODULES */
#ifdef CONFIG_SYSFS
@@ -801,6 +817,15 @@ static inline void module_bug_finalize(const Elf_Ehdr *hdr,
static inline void module_bug_cleanup(struct module *mod) {}
#endif /* CONFIG_GENERIC_BUG */
+#ifdef RETPOLINE
+extern bool retpoline_module_ok(bool has_retpoline);
+#else
+static inline bool retpoline_module_ok(bool has_retpoline)
+{
+ return true;
+}
+#endif
+
#ifdef CONFIG_MODULE_SIG
static inline bool module_sig_ok(struct module *module)
{
diff --git a/include/linux/mroute.h b/include/linux/mroute.h
index 5396521a776a..9a36fad9e068 100644
--- a/include/linux/mroute.h
+++ b/include/linux/mroute.h
@@ -4,11 +4,10 @@
#include <linux/in.h>
#include <linux/pim.h>
-#include <linux/rhashtable.h>
-#include <net/sock.h>
#include <net/fib_rules.h>
#include <net/fib_notifier.h>
#include <uapi/linux/mroute.h>
+#include <linux/mroute_base.h>
#ifdef CONFIG_IP_MROUTE
static inline int ip_mroute_opt(int opt)
@@ -56,56 +55,8 @@ static inline bool ipmr_rule_default(const struct fib_rule *rule)
}
#endif
-struct vif_device {
- struct net_device *dev; /* Device we are using */
- struct netdev_phys_item_id dev_parent_id; /* Device parent ID */
- unsigned long bytes_in,bytes_out;
- unsigned long pkt_in,pkt_out; /* Statistics */
- unsigned long rate_limit; /* Traffic shaping (NI) */
- unsigned char threshold; /* TTL threshold */
- unsigned short flags; /* Control flags */
- __be32 local,remote; /* Addresses(remote for tunnels)*/
- int link; /* Physical interface index */
-};
-
-struct vif_entry_notifier_info {
- struct fib_notifier_info info;
- struct net_device *dev;
- vifi_t vif_index;
- unsigned short vif_flags;
- u32 tb_id;
-};
-
#define VIFF_STATIC 0x8000
-#define VIF_EXISTS(_mrt, _idx) ((_mrt)->vif_table[_idx].dev != NULL)
-
-struct mr_table {
- struct list_head list;
- possible_net_t net;
- u32 id;
- struct sock __rcu *mroute_sk;
- struct timer_list ipmr_expire_timer;
- struct list_head mfc_unres_queue;
- struct vif_device vif_table[MAXVIFS];
- struct rhltable mfc_hash;
- struct list_head mfc_cache_list;
- int maxvif;
- atomic_t cache_resolve_queue_len;
- bool mroute_do_assert;
- bool mroute_do_pim;
- int mroute_reg_vif_num;
-};
-
-/* mfc_flags:
- * MFC_STATIC - the entry was added statically (not by a routing daemon)
- * MFC_OFFLOAD - the entry was offloaded to the hardware
- */
-enum {
- MFC_STATIC = BIT(0),
- MFC_OFFLOAD = BIT(1),
-};
-
struct mfc_cache_cmp_arg {
__be32 mfc_mcastgrp;
__be32 mfc_origin;
@@ -113,28 +64,13 @@ struct mfc_cache_cmp_arg {
/**
* struct mfc_cache - multicast routing entries
- * @mnode: rhashtable list
+ * @_c: Common multicast routing information; has to be first [for casting]
* @mfc_mcastgrp: destination multicast group address
* @mfc_origin: source address
* @cmparg: used for rhashtable comparisons
- * @mfc_parent: source interface (iif)
- * @mfc_flags: entry flags
- * @expires: unresolved entry expire time
- * @unresolved: unresolved cached skbs
- * @last_assert: time of last assert
- * @minvif: minimum VIF id
- * @maxvif: maximum VIF id
- * @bytes: bytes that have passed for this entry
- * @pkt: packets that have passed for this entry
- * @wrong_if: number of wrong source interface hits
- * @lastuse: time of last use of the group (traffic or update)
- * @ttls: OIF TTL threshold array
- * @refcount: reference count for this entry
- * @list: global entry list
- * @rcu: used for entry destruction
*/
struct mfc_cache {
- struct rhlist_head mnode;
+ struct mr_mfc _c;
union {
struct {
__be32 mfc_mcastgrp;
@@ -142,57 +78,10 @@ struct mfc_cache {
};
struct mfc_cache_cmp_arg cmparg;
};
- vifi_t mfc_parent;
- int mfc_flags;
-
- union {
- struct {
- unsigned long expires;
- struct sk_buff_head unresolved;
- } unres;
- struct {
- unsigned long last_assert;
- int minvif;
- int maxvif;
- unsigned long bytes;
- unsigned long pkt;
- unsigned long wrong_if;
- unsigned long lastuse;
- unsigned char ttls[MAXVIFS];
- refcount_t refcount;
- } res;
- } mfc_un;
- struct list_head list;
- struct rcu_head rcu;
-};
-
-struct mfc_entry_notifier_info {
- struct fib_notifier_info info;
- struct mfc_cache *mfc;
- u32 tb_id;
};
struct rtmsg;
int ipmr_get_route(struct net *net, struct sk_buff *skb,
__be32 saddr, __be32 daddr,
struct rtmsg *rtm, u32 portid);
-
-#ifdef CONFIG_IP_MROUTE
-void ipmr_cache_free(struct mfc_cache *mfc_cache);
-#else
-static inline void ipmr_cache_free(struct mfc_cache *mfc_cache)
-{
-}
-#endif
-
-static inline void ipmr_cache_put(struct mfc_cache *c)
-{
- if (refcount_dec_and_test(&c->mfc_un.res.refcount))
- ipmr_cache_free(c);
-}
-static inline void ipmr_cache_hold(struct mfc_cache *c)
-{
- refcount_inc(&c->mfc_un.res.refcount);
-}
-
#endif
diff --git a/include/linux/mroute6.h b/include/linux/mroute6.h
index 3014c52bfd86..c4a45859f586 100644
--- a/include/linux/mroute6.h
+++ b/include/linux/mroute6.h
@@ -7,6 +7,8 @@
#include <linux/skbuff.h> /* for struct sk_buff_head */
#include <net/net_namespace.h>
#include <uapi/linux/mroute6.h>
+#include <linux/mroute_base.h>
+#include <net/fib_rules.h>
#ifdef CONFIG_IPV6_MROUTE
static inline int ip6_mroute_opt(int opt)
@@ -62,57 +64,33 @@ static inline void ip6_mr_cleanup(void)
}
#endif
-struct mif_device {
- struct net_device *dev; /* Device we are using */
- unsigned long bytes_in,bytes_out;
- unsigned long pkt_in,pkt_out; /* Statistics */
- unsigned long rate_limit; /* Traffic shaping (NI) */
- unsigned char threshold; /* TTL threshold */
- unsigned short flags; /* Control flags */
- int link; /* Physical interface index */
-};
+#ifdef CONFIG_IPV6_MROUTE_MULTIPLE_TABLES
+bool ip6mr_rule_default(const struct fib_rule *rule);
+#else
+static inline bool ip6mr_rule_default(const struct fib_rule *rule)
+{
+ return true;
+}
+#endif
#define VIFF_STATIC 0x8000
-struct mfc6_cache {
- struct list_head list;
- struct in6_addr mf6c_mcastgrp; /* Group the entry belongs to */
- struct in6_addr mf6c_origin; /* Source of packet */
- mifi_t mf6c_parent; /* Source interface */
- int mfc_flags; /* Flags on line */
+struct mfc6_cache_cmp_arg {
+ struct in6_addr mf6c_mcastgrp;
+ struct in6_addr mf6c_origin;
+};
+struct mfc6_cache {
+ struct mr_mfc _c;
union {
struct {
- unsigned long expires;
- struct sk_buff_head unresolved; /* Unresolved buffers */
- } unres;
- struct {
- unsigned long last_assert;
- int minvif;
- int maxvif;
- unsigned long bytes;
- unsigned long pkt;
- unsigned long wrong_if;
- unsigned long lastuse;
- unsigned char ttls[MAXMIFS]; /* TTL thresholds */
- } res;
- } mfc_un;
+ struct in6_addr mf6c_mcastgrp;
+ struct in6_addr mf6c_origin;
+ };
+ struct mfc6_cache_cmp_arg cmparg;
+ };
};
-#define MFC_STATIC 1
-#define MFC_NOTIFY 2
-
-#define MFC6_LINES 64
-
-#define MFC6_HASH(a, g) (((__force u32)(a)->s6_addr32[0] ^ \
- (__force u32)(a)->s6_addr32[1] ^ \
- (__force u32)(a)->s6_addr32[2] ^ \
- (__force u32)(a)->s6_addr32[3] ^ \
- (__force u32)(g)->s6_addr32[0] ^ \
- (__force u32)(g)->s6_addr32[1] ^ \
- (__force u32)(g)->s6_addr32[2] ^ \
- (__force u32)(g)->s6_addr32[3]) % MFC6_LINES)
-
#define MFC_ASSERT_THRESH (3*HZ) /* Maximal freq. of asserts */
struct rtmsg;
@@ -120,12 +98,12 @@ extern int ip6mr_get_route(struct net *net, struct sk_buff *skb,
struct rtmsg *rtm, u32 portid);
#ifdef CONFIG_IPV6_MROUTE
-extern struct sock *mroute6_socket(struct net *net, struct sk_buff *skb);
+bool mroute6_is_socket(struct net *net, struct sk_buff *skb);
extern int ip6mr_sk_done(struct sock *sk);
#else
-static inline struct sock *mroute6_socket(struct net *net, struct sk_buff *skb)
+static inline bool mroute6_is_socket(struct net *net, struct sk_buff *skb)
{
- return NULL;
+ return false;
}
static inline int ip6mr_sk_done(struct sock *sk)
{
diff --git a/include/linux/mroute_base.h b/include/linux/mroute_base.h
new file mode 100644
index 000000000000..d617fe45543e
--- /dev/null
+++ b/include/linux/mroute_base.h
@@ -0,0 +1,474 @@
+#ifndef __LINUX_MROUTE_BASE_H
+#define __LINUX_MROUTE_BASE_H
+
+#include <linux/netdevice.h>
+#include <linux/rhashtable.h>
+#include <linux/spinlock.h>
+#include <net/net_namespace.h>
+#include <net/sock.h>
+#include <net/fib_notifier.h>
+
+/**
+ * struct vif_device - interface representor for multicast routing
+ * @dev: network device being used
+ * @bytes_in: statistic; bytes ingressing
+ * @bytes_out: statistic; bytes egresing
+ * @pkt_in: statistic; packets ingressing
+ * @pkt_out: statistic; packets egressing
+ * @rate_limit: Traffic shaping (NI)
+ * @threshold: TTL threshold
+ * @flags: Control flags
+ * @link: Physical interface index
+ * @dev_parent_id: device parent id
+ * @local: Local address
+ * @remote: Remote address for tunnels
+ */
+struct vif_device {
+ struct net_device *dev;
+ unsigned long bytes_in, bytes_out;
+ unsigned long pkt_in, pkt_out;
+ unsigned long rate_limit;
+ unsigned char threshold;
+ unsigned short flags;
+ int link;
+
+ /* Currently only used by ipmr */
+ struct netdev_phys_item_id dev_parent_id;
+ __be32 local, remote;
+};
+
+struct vif_entry_notifier_info {
+ struct fib_notifier_info info;
+ struct net_device *dev;
+ unsigned short vif_index;
+ unsigned short vif_flags;
+ u32 tb_id;
+};
+
+static inline int mr_call_vif_notifier(struct notifier_block *nb,
+ struct net *net,
+ unsigned short family,
+ enum fib_event_type event_type,
+ struct vif_device *vif,
+ unsigned short vif_index, u32 tb_id)
+{
+ struct vif_entry_notifier_info info = {
+ .info = {
+ .family = family,
+ .net = net,
+ },
+ .dev = vif->dev,
+ .vif_index = vif_index,
+ .vif_flags = vif->flags,
+ .tb_id = tb_id,
+ };
+
+ return call_fib_notifier(nb, net, event_type, &info.info);
+}
+
+static inline int mr_call_vif_notifiers(struct net *net,
+ unsigned short family,
+ enum fib_event_type event_type,
+ struct vif_device *vif,
+ unsigned short vif_index, u32 tb_id,
+ unsigned int *ipmr_seq)
+{
+ struct vif_entry_notifier_info info = {
+ .info = {
+ .family = family,
+ .net = net,
+ },
+ .dev = vif->dev,
+ .vif_index = vif_index,
+ .vif_flags = vif->flags,
+ .tb_id = tb_id,
+ };
+
+ ASSERT_RTNL();
+ (*ipmr_seq)++;
+ return call_fib_notifiers(net, event_type, &info.info);
+}
+
+#ifndef MAXVIFS
+/* This one is nasty; value is defined in uapi using different symbols for
+ * mroute and morute6 but both map into same 32.
+ */
+#define MAXVIFS 32
+#endif
+
+#define VIF_EXISTS(_mrt, _idx) (!!((_mrt)->vif_table[_idx].dev))
+
+/* mfc_flags:
+ * MFC_STATIC - the entry was added statically (not by a routing daemon)
+ * MFC_OFFLOAD - the entry was offloaded to the hardware
+ */
+enum {
+ MFC_STATIC = BIT(0),
+ MFC_OFFLOAD = BIT(1),
+};
+
+/**
+ * struct mr_mfc - common multicast routing entries
+ * @mnode: rhashtable list
+ * @mfc_parent: source interface (iif)
+ * @mfc_flags: entry flags
+ * @expires: unresolved entry expire time
+ * @unresolved: unresolved cached skbs
+ * @last_assert: time of last assert
+ * @minvif: minimum VIF id
+ * @maxvif: maximum VIF id
+ * @bytes: bytes that have passed for this entry
+ * @pkt: packets that have passed for this entry
+ * @wrong_if: number of wrong source interface hits
+ * @lastuse: time of last use of the group (traffic or update)
+ * @ttls: OIF TTL threshold array
+ * @refcount: reference count for this entry
+ * @list: global entry list
+ * @rcu: used for entry destruction
+ * @free: Operation used for freeing an entry under RCU
+ */
+struct mr_mfc {
+ struct rhlist_head mnode;
+ unsigned short mfc_parent;
+ int mfc_flags;
+
+ union {
+ struct {
+ unsigned long expires;
+ struct sk_buff_head unresolved;
+ } unres;
+ struct {
+ unsigned long last_assert;
+ int minvif;
+ int maxvif;
+ unsigned long bytes;
+ unsigned long pkt;
+ unsigned long wrong_if;
+ unsigned long lastuse;
+ unsigned char ttls[MAXVIFS];
+ refcount_t refcount;
+ } res;
+ } mfc_un;
+ struct list_head list;
+ struct rcu_head rcu;
+ void (*free)(struct rcu_head *head);
+};
+
+static inline void mr_cache_put(struct mr_mfc *c)
+{
+ if (refcount_dec_and_test(&c->mfc_un.res.refcount))
+ call_rcu(&c->rcu, c->free);
+}
+
+static inline void mr_cache_hold(struct mr_mfc *c)
+{
+ refcount_inc(&c->mfc_un.res.refcount);
+}
+
+struct mfc_entry_notifier_info {
+ struct fib_notifier_info info;
+ struct mr_mfc *mfc;
+ u32 tb_id;
+};
+
+static inline int mr_call_mfc_notifier(struct notifier_block *nb,
+ struct net *net,
+ unsigned short family,
+ enum fib_event_type event_type,
+ struct mr_mfc *mfc, u32 tb_id)
+{
+ struct mfc_entry_notifier_info info = {
+ .info = {
+ .family = family,
+ .net = net,
+ },
+ .mfc = mfc,
+ .tb_id = tb_id
+ };
+
+ return call_fib_notifier(nb, net, event_type, &info.info);
+}
+
+static inline int mr_call_mfc_notifiers(struct net *net,
+ unsigned short family,
+ enum fib_event_type event_type,
+ struct mr_mfc *mfc, u32 tb_id,
+ unsigned int *ipmr_seq)
+{
+ struct mfc_entry_notifier_info info = {
+ .info = {
+ .family = family,
+ .net = net,
+ },
+ .mfc = mfc,
+ .tb_id = tb_id
+ };
+
+ ASSERT_RTNL();
+ (*ipmr_seq)++;
+ return call_fib_notifiers(net, event_type, &info.info);
+}
+
+struct mr_table;
+
+/**
+ * struct mr_table_ops - callbacks and info for protocol-specific ops
+ * @rht_params: parameters for accessing the MFC hash
+ * @cmparg_any: a hash key to be used for matching on (*,*) routes
+ */
+struct mr_table_ops {
+ const struct rhashtable_params *rht_params;
+ void *cmparg_any;
+};
+
+/**
+ * struct mr_table - a multicast routing table
+ * @list: entry within a list of multicast routing tables
+ * @net: net where this table belongs
+ * @ops: protocol specific operations
+ * @id: identifier of the table
+ * @mroute_sk: socket associated with the table
+ * @ipmr_expire_timer: timer for handling unresolved routes
+ * @mfc_unres_queue: list of unresolved MFC entries
+ * @vif_table: array containing all possible vifs
+ * @mfc_hash: Hash table of all resolved routes for easy lookup
+ * @mfc_cache_list: list of resovled routes for possible traversal
+ * @maxvif: Identifier of highest value vif currently in use
+ * @cache_resolve_queue_len: current size of unresolved queue
+ * @mroute_do_assert: Whether to inform userspace on wrong ingress
+ * @mroute_do_pim: Whether to receive IGMP PIMv1
+ * @mroute_reg_vif_num: PIM-device vif index
+ */
+struct mr_table {
+ struct list_head list;
+ possible_net_t net;
+ struct mr_table_ops ops;
+ u32 id;
+ struct sock __rcu *mroute_sk;
+ struct timer_list ipmr_expire_timer;
+ struct list_head mfc_unres_queue;
+ struct vif_device vif_table[MAXVIFS];
+ struct rhltable mfc_hash;
+ struct list_head mfc_cache_list;
+ int maxvif;
+ atomic_t cache_resolve_queue_len;
+ bool mroute_do_assert;
+ bool mroute_do_pim;
+ int mroute_reg_vif_num;
+};
+
+#ifdef CONFIG_IP_MROUTE_COMMON
+void vif_device_init(struct vif_device *v,
+ struct net_device *dev,
+ unsigned long rate_limit,
+ unsigned char threshold,
+ unsigned short flags,
+ unsigned short get_iflink_mask);
+
+struct mr_table *
+mr_table_alloc(struct net *net, u32 id,
+ struct mr_table_ops *ops,
+ void (*expire_func)(struct timer_list *t),
+ void (*table_set)(struct mr_table *mrt,
+ struct net *net));
+
+/* These actually return 'struct mr_mfc *', but to avoid need for explicit
+ * castings they simply return void.
+ */
+void *mr_mfc_find_parent(struct mr_table *mrt,
+ void *hasharg, int parent);
+void *mr_mfc_find_any_parent(struct mr_table *mrt, int vifi);
+void *mr_mfc_find_any(struct mr_table *mrt, int vifi, void *hasharg);
+
+int mr_fill_mroute(struct mr_table *mrt, struct sk_buff *skb,
+ struct mr_mfc *c, struct rtmsg *rtm);
+int mr_rtm_dumproute(struct sk_buff *skb, struct netlink_callback *cb,
+ struct mr_table *(*iter)(struct net *net,
+ struct mr_table *mrt),
+ int (*fill)(struct mr_table *mrt,
+ struct sk_buff *skb,
+ u32 portid, u32 seq, struct mr_mfc *c,
+ int cmd, int flags),
+ spinlock_t *lock);
+
+int mr_dump(struct net *net, struct notifier_block *nb, unsigned short family,
+ int (*rules_dump)(struct net *net,
+ struct notifier_block *nb),
+ struct mr_table *(*mr_iter)(struct net *net,
+ struct mr_table *mrt),
+ rwlock_t *mrt_lock);
+#else
+static inline void vif_device_init(struct vif_device *v,
+ struct net_device *dev,
+ unsigned long rate_limit,
+ unsigned char threshold,
+ unsigned short flags,
+ unsigned short get_iflink_mask)
+{
+}
+
+static inline void *
+mr_table_alloc(struct net *net, u32 id,
+ struct mr_table_ops *ops,
+ void (*expire_func)(struct timer_list *t),
+ void (*table_set)(struct mr_table *mrt,
+ struct net *net))
+{
+ return NULL;
+}
+
+static inline void *mr_mfc_find_parent(struct mr_table *mrt,
+ void *hasharg, int parent)
+{
+ return NULL;
+}
+
+static inline void *mr_mfc_find_any_parent(struct mr_table *mrt,
+ int vifi)
+{
+ return NULL;
+}
+
+static inline struct mr_mfc *mr_mfc_find_any(struct mr_table *mrt,
+ int vifi, void *hasharg)
+{
+ return NULL;
+}
+
+static inline int mr_fill_mroute(struct mr_table *mrt, struct sk_buff *skb,
+ struct mr_mfc *c, struct rtmsg *rtm)
+{
+ return -EINVAL;
+}
+
+static inline int
+mr_rtm_dumproute(struct sk_buff *skb, struct netlink_callback *cb,
+ struct mr_table *(*iter)(struct net *net,
+ struct mr_table *mrt),
+ int (*fill)(struct mr_table *mrt,
+ struct sk_buff *skb,
+ u32 portid, u32 seq, struct mr_mfc *c,
+ int cmd, int flags),
+ spinlock_t *lock)
+{
+ return -EINVAL;
+}
+
+static inline int mr_dump(struct net *net, struct notifier_block *nb,
+ unsigned short family,
+ int (*rules_dump)(struct net *net,
+ struct notifier_block *nb),
+ struct mr_table *(*mr_iter)(struct net *net,
+ struct mr_table *mrt),
+ rwlock_t *mrt_lock)
+{
+ return -EINVAL;
+}
+#endif
+
+static inline void *mr_mfc_find(struct mr_table *mrt, void *hasharg)
+{
+ return mr_mfc_find_parent(mrt, hasharg, -1);
+}
+
+#ifdef CONFIG_PROC_FS
+struct mr_vif_iter {
+ struct seq_net_private p;
+ struct mr_table *mrt;
+ int ct;
+};
+
+struct mr_mfc_iter {
+ struct seq_net_private p;
+ struct mr_table *mrt;
+ struct list_head *cache;
+
+ /* Lock protecting the mr_table's unresolved queue */
+ spinlock_t *lock;
+};
+
+#ifdef CONFIG_IP_MROUTE_COMMON
+void *mr_vif_seq_idx(struct net *net, struct mr_vif_iter *iter, loff_t pos);
+void *mr_vif_seq_next(struct seq_file *seq, void *v, loff_t *pos);
+
+static inline void *mr_vif_seq_start(struct seq_file *seq, loff_t *pos)
+{
+ return *pos ? mr_vif_seq_idx(seq_file_net(seq),
+ seq->private, *pos - 1)
+ : SEQ_START_TOKEN;
+}
+
+/* These actually return 'struct mr_mfc *', but to avoid need for explicit
+ * castings they simply return void.
+ */
+void *mr_mfc_seq_idx(struct net *net,
+ struct mr_mfc_iter *it, loff_t pos);
+void *mr_mfc_seq_next(struct seq_file *seq, void *v,
+ loff_t *pos);
+
+static inline void *mr_mfc_seq_start(struct seq_file *seq, loff_t *pos,
+ struct mr_table *mrt, spinlock_t *lock)
+{
+ struct mr_mfc_iter *it = seq->private;
+
+ it->mrt = mrt;
+ it->cache = NULL;
+ it->lock = lock;
+
+ return *pos ? mr_mfc_seq_idx(seq_file_net(seq),
+ seq->private, *pos - 1)
+ : SEQ_START_TOKEN;
+}
+
+static inline void mr_mfc_seq_stop(struct seq_file *seq, void *v)
+{
+ struct mr_mfc_iter *it = seq->private;
+ struct mr_table *mrt = it->mrt;
+
+ if (it->cache == &mrt->mfc_unres_queue)
+ spin_unlock_bh(it->lock);
+ else if (it->cache == &mrt->mfc_cache_list)
+ rcu_read_unlock();
+}
+#else
+static inline void *mr_vif_seq_idx(struct net *net, struct mr_vif_iter *iter,
+ loff_t pos)
+{
+ return NULL;
+}
+
+static inline void *mr_vif_seq_next(struct seq_file *seq,
+ void *v, loff_t *pos)
+{
+ return NULL;
+}
+
+static inline void *mr_vif_seq_start(struct seq_file *seq, loff_t *pos)
+{
+ return NULL;
+}
+
+static inline void *mr_mfc_seq_idx(struct net *net,
+ struct mr_mfc_iter *it, loff_t pos)
+{
+ return NULL;
+}
+
+static inline void *mr_mfc_seq_next(struct seq_file *seq, void *v,
+ loff_t *pos)
+{
+ return NULL;
+}
+
+static inline void *mr_mfc_seq_start(struct seq_file *seq, loff_t *pos,
+ struct mr_table *mrt, spinlock_t *lock)
+{
+ return NULL;
+}
+
+static inline void mr_mfc_seq_stop(struct seq_file *seq, void *v)
+{
+}
+#endif
+#endif
+#endif
diff --git a/include/linux/msg.h b/include/linux/msg.h
index 0a7eefeee0d1..9a972a296b95 100644
--- a/include/linux/msg.h
+++ b/include/linux/msg.h
@@ -3,7 +3,6 @@
#define _LINUX_MSG_H
#include <linux/list.h>
-#include <linux/time64.h>
#include <uapi/linux/msg.h>
/* one msg_msg structure for each message */
@@ -16,21 +15,4 @@ struct msg_msg {
/* the actual message follows immediately */
};
-/* one msq_queue structure for each present queue on the system */
-struct msg_queue {
- struct kern_ipc_perm q_perm;
- time64_t q_stime; /* last msgsnd time */
- time64_t q_rtime; /* last msgrcv time */
- time64_t q_ctime; /* last change time */
- unsigned long q_cbytes; /* current number of bytes on queue */
- unsigned long q_qnum; /* number of messages in queue */
- unsigned long q_qbytes; /* max number of bytes on queue */
- pid_t q_lspid; /* pid of last msgsnd */
- pid_t q_lrpid; /* last receive pid */
-
- struct list_head q_messages;
- struct list_head q_receivers;
- struct list_head q_senders;
-} __randomize_layout;
-
#endif /* _LINUX_MSG_H */
diff --git a/include/linux/mtd/bbm.h b/include/linux/mtd/bbm.h
index 3bf8f954b642..3102bd754d18 100644
--- a/include/linux/mtd/bbm.h
+++ b/include/linux/mtd/bbm.h
@@ -1,6 +1,4 @@
/*
- * linux/include/linux/mtd/bbm.h
- *
* NAND family Bad Block Management (BBM) header file
* - Bad Block Table (BBT) implementation
*
diff --git a/include/linux/mtd/map.h b/include/linux/mtd/map.h
index 3aa56e3104bb..b5b43f94f311 100644
--- a/include/linux/mtd/map.h
+++ b/include/linux/mtd/map.h
@@ -270,75 +270,67 @@ void map_destroy(struct mtd_info *mtd);
#define INVALIDATE_CACHED_RANGE(map, from, size) \
do { if (map->inval_cache) map->inval_cache(map, from, size); } while (0)
-
-static inline int map_word_equal(struct map_info *map, map_word val1, map_word val2)
-{
- int i;
-
- for (i = 0; i < map_words(map); i++) {
- if (val1.x[i] != val2.x[i])
- return 0;
- }
-
- return 1;
-}
-
-static inline map_word map_word_and(struct map_info *map, map_word val1, map_word val2)
-{
- map_word r;
- int i;
-
- for (i = 0; i < map_words(map); i++)
- r.x[i] = val1.x[i] & val2.x[i];
-
- return r;
-}
-
-static inline map_word map_word_clr(struct map_info *map, map_word val1, map_word val2)
-{
- map_word r;
- int i;
-
- for (i = 0; i < map_words(map); i++)
- r.x[i] = val1.x[i] & ~val2.x[i];
-
- return r;
-}
-
-static inline map_word map_word_or(struct map_info *map, map_word val1, map_word val2)
-{
- map_word r;
- int i;
-
- for (i = 0; i < map_words(map); i++)
- r.x[i] = val1.x[i] | val2.x[i];
-
- return r;
-}
-
-static inline int map_word_andequal(struct map_info *map, map_word val1, map_word val2, map_word val3)
-{
- int i;
-
- for (i = 0; i < map_words(map); i++) {
- if ((val1.x[i] & val2.x[i]) != val3.x[i])
- return 0;
- }
-
- return 1;
-}
-
-static inline int map_word_bitsset(struct map_info *map, map_word val1, map_word val2)
-{
- int i;
-
- for (i = 0; i < map_words(map); i++) {
- if (val1.x[i] & val2.x[i])
- return 1;
- }
-
- return 0;
-}
+#define map_word_equal(map, val1, val2) \
+({ \
+ int i, ret = 1; \
+ for (i = 0; i < map_words(map); i++) \
+ if ((val1).x[i] != (val2).x[i]) { \
+ ret = 0; \
+ break; \
+ } \
+ ret; \
+})
+
+#define map_word_and(map, val1, val2) \
+({ \
+ map_word r; \
+ int i; \
+ for (i = 0; i < map_words(map); i++) \
+ r.x[i] = (val1).x[i] & (val2).x[i]; \
+ r; \
+})
+
+#define map_word_clr(map, val1, val2) \
+({ \
+ map_word r; \
+ int i; \
+ for (i = 0; i < map_words(map); i++) \
+ r.x[i] = (val1).x[i] & ~(val2).x[i]; \
+ r; \
+})
+
+#define map_word_or(map, val1, val2) \
+({ \
+ map_word r; \
+ int i; \
+ for (i = 0; i < map_words(map); i++) \
+ r.x[i] = (val1).x[i] | (val2).x[i]; \
+ r; \
+})
+
+#define map_word_andequal(map, val1, val2, val3) \
+({ \
+ int i, ret = 1; \
+ for (i = 0; i < map_words(map); i++) { \
+ if (((val1).x[i] & (val2).x[i]) != (val2).x[i]) { \
+ ret = 0; \
+ break; \
+ } \
+ } \
+ ret; \
+})
+
+#define map_word_bitsset(map, val1, val2) \
+({ \
+ int i, ret = 0; \
+ for (i = 0; i < map_words(map); i++) { \
+ if ((val1).x[i] & (val2).x[i]) { \
+ ret = 1; \
+ break; \
+ } \
+ } \
+ ret; \
+})
static inline map_word map_word_load(struct map_info *map, const void *ptr)
{
diff --git a/include/linux/mtd/mtd.h b/include/linux/mtd/mtd.h
index cd55bf14ad51..a86c4fa93115 100644
--- a/include/linux/mtd/mtd.h
+++ b/include/linux/mtd/mtd.h
@@ -30,32 +30,19 @@
#include <asm/div64.h>
-#define MTD_ERASE_PENDING 0x01
-#define MTD_ERASING 0x02
-#define MTD_ERASE_SUSPEND 0x04
-#define MTD_ERASE_DONE 0x08
-#define MTD_ERASE_FAILED 0x10
-
#define MTD_FAIL_ADDR_UNKNOWN -1LL
+struct mtd_info;
+
/*
* If the erase fails, fail_addr might indicate exactly which block failed. If
* fail_addr = MTD_FAIL_ADDR_UNKNOWN, the failure was not at the device level
* or was not specific to any particular block.
*/
struct erase_info {
- struct mtd_info *mtd;
uint64_t addr;
uint64_t len;
uint64_t fail_addr;
- u_long time;
- u_long retries;
- unsigned dev;
- unsigned cell;
- void (*callback) (struct erase_info *self);
- u_long priv;
- u_char state;
- struct erase_info *next;
};
struct mtd_erase_region_info {
@@ -489,6 +476,34 @@ static inline uint32_t mtd_mod_by_eb(uint64_t sz, struct mtd_info *mtd)
return do_div(sz, mtd->erasesize);
}
+/**
+ * mtd_align_erase_req - Adjust an erase request to align things on eraseblock
+ * boundaries.
+ * @mtd: the MTD device this erase request applies on
+ * @req: the erase request to adjust
+ *
+ * This function will adjust @req->addr and @req->len to align them on
+ * @mtd->erasesize. Of course we expect @mtd->erasesize to be != 0.
+ */
+static inline void mtd_align_erase_req(struct mtd_info *mtd,
+ struct erase_info *req)
+{
+ u32 mod;
+
+ if (WARN_ON(!mtd->erasesize))
+ return;
+
+ mod = mtd_mod_by_eb(req->addr, mtd);
+ if (mod) {
+ req->addr -= mod;
+ req->len += mod;
+ }
+
+ mod = mtd_mod_by_eb(req->addr + req->len, mtd);
+ if (mod)
+ req->len += mtd->erasesize - mod;
+}
+
static inline uint32_t mtd_div_by_ws(uint64_t sz, struct mtd_info *mtd)
{
if (mtd->writesize_shift)
@@ -567,8 +582,6 @@ extern void register_mtd_user (struct mtd_notifier *new);
extern int unregister_mtd_user (struct mtd_notifier *old);
void *mtd_kmalloc_up_to(const struct mtd_info *mtd, size_t *size);
-void mtd_erase_callback(struct erase_info *instr);
-
static inline int mtd_is_bitflip(int err) {
return err == -EUCLEAN;
}
diff --git a/include/linux/mtd/nand.h b/include/linux/mtd/nand.h
new file mode 100644
index 000000000000..792ea5c26329
--- /dev/null
+++ b/include/linux/mtd/nand.h
@@ -0,0 +1,731 @@
+/* SPDX-License-Identifier: GPL-2.0 */
+/*
+ * Copyright 2017 - Free Electrons
+ *
+ * Authors:
+ * Boris Brezillon <boris.brezillon@free-electrons.com>
+ * Peter Pan <peterpandong@micron.com>
+ */
+
+#ifndef __LINUX_MTD_NAND_H
+#define __LINUX_MTD_NAND_H
+
+#include <linux/mtd/mtd.h>
+
+/**
+ * struct nand_memory_organization - Memory organization structure
+ * @bits_per_cell: number of bits per NAND cell
+ * @pagesize: page size
+ * @oobsize: OOB area size
+ * @pages_per_eraseblock: number of pages per eraseblock
+ * @eraseblocks_per_lun: number of eraseblocks per LUN (Logical Unit Number)
+ * @planes_per_lun: number of planes per LUN
+ * @luns_per_target: number of LUN per target (target is a synonym for die)
+ * @ntargets: total number of targets exposed by the NAND device
+ */
+struct nand_memory_organization {
+ unsigned int bits_per_cell;
+ unsigned int pagesize;
+ unsigned int oobsize;
+ unsigned int pages_per_eraseblock;
+ unsigned int eraseblocks_per_lun;
+ unsigned int planes_per_lun;
+ unsigned int luns_per_target;
+ unsigned int ntargets;
+};
+
+#define NAND_MEMORG(bpc, ps, os, ppe, epl, ppl, lpt, nt) \
+ { \
+ .bits_per_cell = (bpc), \
+ .pagesize = (ps), \
+ .oobsize = (os), \
+ .pages_per_eraseblock = (ppe), \
+ .eraseblocks_per_lun = (epl), \
+ .planes_per_lun = (ppl), \
+ .luns_per_target = (lpt), \
+ .ntargets = (nt), \
+ }
+
+/**
+ * struct nand_row_converter - Information needed to convert an absolute offset
+ * into a row address
+ * @lun_addr_shift: position of the LUN identifier in the row address
+ * @eraseblock_addr_shift: position of the eraseblock identifier in the row
+ * address
+ */
+struct nand_row_converter {
+ unsigned int lun_addr_shift;
+ unsigned int eraseblock_addr_shift;
+};
+
+/**
+ * struct nand_pos - NAND position object
+ * @target: the NAND target/die
+ * @lun: the LUN identifier
+ * @plane: the plane within the LUN
+ * @eraseblock: the eraseblock within the LUN
+ * @page: the page within the LUN
+ *
+ * These information are usually used by specific sub-layers to select the
+ * appropriate target/die and generate a row address to pass to the device.
+ */
+struct nand_pos {
+ unsigned int target;
+ unsigned int lun;
+ unsigned int plane;
+ unsigned int eraseblock;
+ unsigned int page;
+};
+
+/**
+ * struct nand_page_io_req - NAND I/O request object
+ * @pos: the position this I/O request is targeting
+ * @dataoffs: the offset within the page
+ * @datalen: number of data bytes to read from/write to this page
+ * @databuf: buffer to store data in or get data from
+ * @ooboffs: the OOB offset within the page
+ * @ooblen: the number of OOB bytes to read from/write to this page
+ * @oobbuf: buffer to store OOB data in or get OOB data from
+ *
+ * This object is used to pass per-page I/O requests to NAND sub-layers. This
+ * way all useful information are already formatted in a useful way and
+ * specific NAND layers can focus on translating these information into
+ * specific commands/operations.
+ */
+struct nand_page_io_req {
+ struct nand_pos pos;
+ unsigned int dataoffs;
+ unsigned int datalen;
+ union {
+ const void *out;
+ void *in;
+ } databuf;
+ unsigned int ooboffs;
+ unsigned int ooblen;
+ union {
+ const void *out;
+ void *in;
+ } oobbuf;
+};
+
+/**
+ * struct nand_ecc_req - NAND ECC requirements
+ * @strength: ECC strength
+ * @step_size: ECC step/block size
+ */
+struct nand_ecc_req {
+ unsigned int strength;
+ unsigned int step_size;
+};
+
+#define NAND_ECCREQ(str, stp) { .strength = (str), .step_size = (stp) }
+
+/**
+ * struct nand_bbt - bad block table object
+ * @cache: in memory BBT cache
+ */
+struct nand_bbt {
+ unsigned long *cache;
+};
+
+struct nand_device;
+
+/**
+ * struct nand_ops - NAND operations
+ * @erase: erase a specific block. No need to check if the block is bad before
+ * erasing, this has been taken care of by the generic NAND layer
+ * @markbad: mark a specific block bad. No need to check if the block is
+ * already marked bad, this has been taken care of by the generic
+ * NAND layer. This method should just write the BBM (Bad Block
+ * Marker) so that future call to struct_nand_ops->isbad() return
+ * true
+ * @isbad: check whether a block is bad or not. This method should just read
+ * the BBM and return whether the block is bad or not based on what it
+ * reads
+ *
+ * These are all low level operations that should be implemented by specialized
+ * NAND layers (SPI NAND, raw NAND, ...).
+ */
+struct nand_ops {
+ int (*erase)(struct nand_device *nand, const struct nand_pos *pos);
+ int (*markbad)(struct nand_device *nand, const struct nand_pos *pos);
+ bool (*isbad)(struct nand_device *nand, const struct nand_pos *pos);
+};
+
+/**
+ * struct nand_device - NAND device
+ * @mtd: MTD instance attached to the NAND device
+ * @memorg: memory layout
+ * @eccreq: ECC requirements
+ * @rowconv: position to row address converter
+ * @bbt: bad block table info
+ * @ops: NAND operations attached to the NAND device
+ *
+ * Generic NAND object. Specialized NAND layers (raw NAND, SPI NAND, OneNAND)
+ * should declare their own NAND object embedding a nand_device struct (that's
+ * how inheritance is done).
+ * struct_nand_device->memorg and struct_nand_device->eccreq should be filled
+ * at device detection time to reflect the NAND device
+ * capabilities/requirements. Once this is done nanddev_init() can be called.
+ * It will take care of converting NAND information into MTD ones, which means
+ * the specialized NAND layers should never manually tweak
+ * struct_nand_device->mtd except for the ->_read/write() hooks.
+ */
+struct nand_device {
+ struct mtd_info mtd;
+ struct nand_memory_organization memorg;
+ struct nand_ecc_req eccreq;
+ struct nand_row_converter rowconv;
+ struct nand_bbt bbt;
+ const struct nand_ops *ops;
+};
+
+/**
+ * struct nand_io_iter - NAND I/O iterator
+ * @req: current I/O request
+ * @oobbytes_per_page: maximum number of OOB bytes per page
+ * @dataleft: remaining number of data bytes to read/write
+ * @oobleft: remaining number of OOB bytes to read/write
+ *
+ * Can be used by specialized NAND layers to iterate over all pages covered
+ * by an MTD I/O request, which should greatly simplifies the boiler-plate
+ * code needed to read/write data from/to a NAND device.
+ */
+struct nand_io_iter {
+ struct nand_page_io_req req;
+ unsigned int oobbytes_per_page;
+ unsigned int dataleft;
+ unsigned int oobleft;
+};
+
+/**
+ * mtd_to_nanddev() - Get the NAND device attached to the MTD instance
+ * @mtd: MTD instance
+ *
+ * Return: the NAND device embedding @mtd.
+ */
+static inline struct nand_device *mtd_to_nanddev(struct mtd_info *mtd)
+{
+ return container_of(mtd, struct nand_device, mtd);
+}
+
+/**
+ * nanddev_to_mtd() - Get the MTD device attached to a NAND device
+ * @nand: NAND device
+ *
+ * Return: the MTD device embedded in @nand.
+ */
+static inline struct mtd_info *nanddev_to_mtd(struct nand_device *nand)
+{
+ return &nand->mtd;
+}
+
+/*
+ * nanddev_bits_per_cell() - Get the number of bits per cell
+ * @nand: NAND device
+ *
+ * Return: the number of bits per cell.
+ */
+static inline unsigned int nanddev_bits_per_cell(const struct nand_device *nand)
+{
+ return nand->memorg.bits_per_cell;
+}
+
+/**
+ * nanddev_page_size() - Get NAND page size
+ * @nand: NAND device
+ *
+ * Return: the page size.
+ */
+static inline size_t nanddev_page_size(const struct nand_device *nand)
+{
+ return nand->memorg.pagesize;
+}
+
+/**
+ * nanddev_per_page_oobsize() - Get NAND OOB size
+ * @nand: NAND device
+ *
+ * Return: the OOB size.
+ */
+static inline unsigned int
+nanddev_per_page_oobsize(const struct nand_device *nand)
+{
+ return nand->memorg.oobsize;
+}
+
+/**
+ * nanddev_pages_per_eraseblock() - Get the number of pages per eraseblock
+ * @nand: NAND device
+ *
+ * Return: the number of pages per eraseblock.
+ */
+static inline unsigned int
+nanddev_pages_per_eraseblock(const struct nand_device *nand)
+{
+ return nand->memorg.pages_per_eraseblock;
+}
+
+/**
+ * nanddev_per_page_oobsize() - Get NAND erase block size
+ * @nand: NAND device
+ *
+ * Return: the eraseblock size.
+ */
+static inline size_t nanddev_eraseblock_size(const struct nand_device *nand)
+{
+ return nand->memorg.pagesize * nand->memorg.pages_per_eraseblock;
+}
+
+/**
+ * nanddev_eraseblocks_per_lun() - Get the number of eraseblocks per LUN
+ * @nand: NAND device
+ *
+ * Return: the number of eraseblocks per LUN.
+ */
+static inline unsigned int
+nanddev_eraseblocks_per_lun(const struct nand_device *nand)
+{
+ return nand->memorg.eraseblocks_per_lun;
+}
+
+/**
+ * nanddev_target_size() - Get the total size provided by a single target/die
+ * @nand: NAND device
+ *
+ * Return: the total size exposed by a single target/die in bytes.
+ */
+static inline u64 nanddev_target_size(const struct nand_device *nand)
+{
+ return (u64)nand->memorg.luns_per_target *
+ nand->memorg.eraseblocks_per_lun *
+ nand->memorg.pages_per_eraseblock *
+ nand->memorg.pagesize;
+}
+
+/**
+ * nanddev_ntarget() - Get the total of targets
+ * @nand: NAND device
+ *
+ * Return: the number of targets/dies exposed by @nand.
+ */
+static inline unsigned int nanddev_ntargets(const struct nand_device *nand)
+{
+ return nand->memorg.ntargets;
+}
+
+/**
+ * nanddev_neraseblocks() - Get the total number of erasablocks
+ * @nand: NAND device
+ *
+ * Return: the total number of eraseblocks exposed by @nand.
+ */
+static inline unsigned int nanddev_neraseblocks(const struct nand_device *nand)
+{
+ return (u64)nand->memorg.luns_per_target *
+ nand->memorg.eraseblocks_per_lun *
+ nand->memorg.pages_per_eraseblock;
+}
+
+/**
+ * nanddev_size() - Get NAND size
+ * @nand: NAND device
+ *
+ * Return: the total size (in bytes) exposed by @nand.
+ */
+static inline u64 nanddev_size(const struct nand_device *nand)
+{
+ return nanddev_target_size(nand) * nanddev_ntargets(nand);
+}
+
+/**
+ * nanddev_get_memorg() - Extract memory organization info from a NAND device
+ * @nand: NAND device
+ *
+ * This can be used by the upper layer to fill the memorg info before calling
+ * nanddev_init().
+ *
+ * Return: the memorg object embedded in the NAND device.
+ */
+static inline struct nand_memory_organization *
+nanddev_get_memorg(struct nand_device *nand)
+{
+ return &nand->memorg;
+}
+
+int nanddev_init(struct nand_device *nand, const struct nand_ops *ops,
+ struct module *owner);
+void nanddev_cleanup(struct nand_device *nand);
+
+/**
+ * nanddev_register() - Register a NAND device
+ * @nand: NAND device
+ *
+ * Register a NAND device.
+ * This function is just a wrapper around mtd_device_register()
+ * registering the MTD device embedded in @nand.
+ *
+ * Return: 0 in case of success, a negative error code otherwise.
+ */
+static inline int nanddev_register(struct nand_device *nand)
+{
+ return mtd_device_register(&nand->mtd, NULL, 0);
+}
+
+/**
+ * nanddev_unregister() - Unregister a NAND device
+ * @nand: NAND device
+ *
+ * Unregister a NAND device.
+ * This function is just a wrapper around mtd_device_unregister()
+ * unregistering the MTD device embedded in @nand.
+ *
+ * Return: 0 in case of success, a negative error code otherwise.
+ */
+static inline int nanddev_unregister(struct nand_device *nand)
+{
+ return mtd_device_unregister(&nand->mtd);
+}
+
+/**
+ * nanddev_set_of_node() - Attach a DT node to a NAND device
+ * @nand: NAND device
+ * @np: DT node
+ *
+ * Attach a DT node to a NAND device.
+ */
+static inline void nanddev_set_of_node(struct nand_device *nand,
+ struct device_node *np)
+{
+ mtd_set_of_node(&nand->mtd, np);
+}
+
+/**
+ * nanddev_get_of_node() - Retrieve the DT node attached to a NAND device
+ * @nand: NAND device
+ *
+ * Return: the DT node attached to @nand.
+ */
+static inline struct device_node *nanddev_get_of_node(struct nand_device *nand)
+{
+ return mtd_get_of_node(&nand->mtd);
+}
+
+/**
+ * nanddev_offs_to_pos() - Convert an absolute NAND offset into a NAND position
+ * @nand: NAND device
+ * @offs: absolute NAND offset (usually passed by the MTD layer)
+ * @pos: a NAND position object to fill in
+ *
+ * Converts @offs into a nand_pos representation.
+ *
+ * Return: the offset within the NAND page pointed by @pos.
+ */
+static inline unsigned int nanddev_offs_to_pos(struct nand_device *nand,
+ loff_t offs,
+ struct nand_pos *pos)
+{
+ unsigned int pageoffs;
+ u64 tmp = offs;
+
+ pageoffs = do_div(tmp, nand->memorg.pagesize);
+ pos->page = do_div(tmp, nand->memorg.pages_per_eraseblock);
+ pos->eraseblock = do_div(tmp, nand->memorg.eraseblocks_per_lun);
+ pos->plane = pos->eraseblock % nand->memorg.planes_per_lun;
+ pos->lun = do_div(tmp, nand->memorg.luns_per_target);
+ pos->target = tmp;
+
+ return pageoffs;
+}
+
+/**
+ * nanddev_pos_cmp() - Compare two NAND positions
+ * @a: First NAND position
+ * @b: Second NAND position
+ *
+ * Compares two NAND positions.
+ *
+ * Return: -1 if @a < @b, 0 if @a == @b and 1 if @a > @b.
+ */
+static inline int nanddev_pos_cmp(const struct nand_pos *a,
+ const struct nand_pos *b)
+{
+ if (a->target != b->target)
+ return a->target < b->target ? -1 : 1;
+
+ if (a->lun != b->lun)
+ return a->lun < b->lun ? -1 : 1;
+
+ if (a->eraseblock != b->eraseblock)
+ return a->eraseblock < b->eraseblock ? -1 : 1;
+
+ if (a->page != b->page)
+ return a->page < b->page ? -1 : 1;
+
+ return 0;
+}
+
+/**
+ * nanddev_pos_to_offs() - Convert a NAND position into an absolute offset
+ * @nand: NAND device
+ * @pos: the NAND position to convert
+ *
+ * Converts @pos NAND position into an absolute offset.
+ *
+ * Return: the absolute offset. Note that @pos points to the beginning of a
+ * page, if one wants to point to a specific offset within this page
+ * the returned offset has to be adjusted manually.
+ */
+static inline loff_t nanddev_pos_to_offs(struct nand_device *nand,
+ const struct nand_pos *pos)
+{
+ unsigned int npages;
+
+ npages = pos->page +
+ ((pos->eraseblock +
+ (pos->lun +
+ (pos->target * nand->memorg.luns_per_target)) *
+ nand->memorg.eraseblocks_per_lun) *
+ nand->memorg.pages_per_eraseblock);
+
+ return (loff_t)npages * nand->memorg.pagesize;
+}
+
+/**
+ * nanddev_pos_to_row() - Extract a row address from a NAND position
+ * @nand: NAND device
+ * @pos: the position to convert
+ *
+ * Converts a NAND position into a row address that can then be passed to the
+ * device.
+ *
+ * Return: the row address extracted from @pos.
+ */
+static inline unsigned int nanddev_pos_to_row(struct nand_device *nand,
+ const struct nand_pos *pos)
+{
+ return (pos->lun << nand->rowconv.lun_addr_shift) |
+ (pos->eraseblock << nand->rowconv.eraseblock_addr_shift) |
+ pos->page;
+}
+
+/**
+ * nanddev_pos_next_target() - Move a position to the next target/die
+ * @nand: NAND device
+ * @pos: the position to update
+ *
+ * Updates @pos to point to the start of the next target/die. Useful when you
+ * want to iterate over all targets/dies of a NAND device.
+ */
+static inline void nanddev_pos_next_target(struct nand_device *nand,
+ struct nand_pos *pos)
+{
+ pos->page = 0;
+ pos->plane = 0;
+ pos->eraseblock = 0;
+ pos->lun = 0;
+ pos->target++;
+}
+
+/**
+ * nanddev_pos_next_lun() - Move a position to the next LUN
+ * @nand: NAND device
+ * @pos: the position to update
+ *
+ * Updates @pos to point to the start of the next LUN. Useful when you want to
+ * iterate over all LUNs of a NAND device.
+ */
+static inline void nanddev_pos_next_lun(struct nand_device *nand,
+ struct nand_pos *pos)
+{
+ if (pos->lun >= nand->memorg.luns_per_target - 1)
+ return nanddev_pos_next_target(nand, pos);
+
+ pos->lun++;
+ pos->page = 0;
+ pos->plane = 0;
+ pos->eraseblock = 0;
+}
+
+/**
+ * nanddev_pos_next_eraseblock() - Move a position to the next eraseblock
+ * @nand: NAND device
+ * @pos: the position to update
+ *
+ * Updates @pos to point to the start of the next eraseblock. Useful when you
+ * want to iterate over all eraseblocks of a NAND device.
+ */
+static inline void nanddev_pos_next_eraseblock(struct nand_device *nand,
+ struct nand_pos *pos)
+{
+ if (pos->eraseblock >= nand->memorg.eraseblocks_per_lun - 1)
+ return nanddev_pos_next_lun(nand, pos);
+
+ pos->eraseblock++;
+ pos->page = 0;
+ pos->plane = pos->eraseblock % nand->memorg.planes_per_lun;
+}
+
+/**
+ * nanddev_pos_next_eraseblock() - Move a position to the next page
+ * @nand: NAND device
+ * @pos: the position to update
+ *
+ * Updates @pos to point to the start of the next page. Useful when you want to
+ * iterate over all pages of a NAND device.
+ */
+static inline void nanddev_pos_next_page(struct nand_device *nand,
+ struct nand_pos *pos)
+{
+ if (pos->page >= nand->memorg.pages_per_eraseblock - 1)
+ return nanddev_pos_next_eraseblock(nand, pos);
+
+ pos->page++;
+}
+
+/**
+ * nand_io_iter_init - Initialize a NAND I/O iterator
+ * @nand: NAND device
+ * @offs: absolute offset
+ * @req: MTD request
+ * @iter: NAND I/O iterator
+ *
+ * Initializes a NAND iterator based on the information passed by the MTD
+ * layer.
+ */
+static inline void nanddev_io_iter_init(struct nand_device *nand,
+ loff_t offs, struct mtd_oob_ops *req,
+ struct nand_io_iter *iter)
+{
+ struct mtd_info *mtd = nanddev_to_mtd(nand);
+
+ iter->req.dataoffs = nanddev_offs_to_pos(nand, offs, &iter->req.pos);
+ iter->req.ooboffs = req->ooboffs;
+ iter->oobbytes_per_page = mtd_oobavail(mtd, req);
+ iter->dataleft = req->len;
+ iter->oobleft = req->ooblen;
+ iter->req.databuf.in = req->datbuf;
+ iter->req.datalen = min_t(unsigned int,
+ nand->memorg.pagesize - iter->req.dataoffs,
+ iter->dataleft);
+ iter->req.oobbuf.in = req->oobbuf;
+ iter->req.ooblen = min_t(unsigned int,
+ iter->oobbytes_per_page - iter->req.ooboffs,
+ iter->oobleft);
+}
+
+/**
+ * nand_io_iter_next_page - Move to the next page
+ * @nand: NAND device
+ * @iter: NAND I/O iterator
+ *
+ * Updates the @iter to point to the next page.
+ */
+static inline void nanddev_io_iter_next_page(struct nand_device *nand,
+ struct nand_io_iter *iter)
+{
+ nanddev_pos_next_page(nand, &iter->req.pos);
+ iter->dataleft -= iter->req.datalen;
+ iter->req.databuf.in += iter->req.datalen;
+ iter->oobleft -= iter->req.ooblen;
+ iter->req.oobbuf.in += iter->req.ooblen;
+ iter->req.dataoffs = 0;
+ iter->req.ooboffs = 0;
+ iter->req.datalen = min_t(unsigned int, nand->memorg.pagesize,
+ iter->dataleft);
+ iter->req.ooblen = min_t(unsigned int, iter->oobbytes_per_page,
+ iter->oobleft);
+}
+
+/**
+ * nand_io_iter_end - Should end iteration or not
+ * @nand: NAND device
+ * @iter: NAND I/O iterator
+ *
+ * Check whether @iter has reached the end of the NAND portion it was asked to
+ * iterate on or not.
+ *
+ * Return: true if @iter has reached the end of the iteration request, false
+ * otherwise.
+ */
+static inline bool nanddev_io_iter_end(struct nand_device *nand,
+ const struct nand_io_iter *iter)
+{
+ if (iter->dataleft || iter->oobleft)
+ return false;
+
+ return true;
+}
+
+/**
+ * nand_io_for_each_page - Iterate over all NAND pages contained in an MTD I/O
+ * request
+ * @nand: NAND device
+ * @start: start address to read/write from
+ * @req: MTD I/O request
+ * @iter: NAND I/O iterator
+ *
+ * Should be used for iterate over pages that are contained in an MTD request.
+ */
+#define nanddev_io_for_each_page(nand, start, req, iter) \
+ for (nanddev_io_iter_init(nand, start, req, iter); \
+ !nanddev_io_iter_end(nand, iter); \
+ nanddev_io_iter_next_page(nand, iter))
+
+bool nanddev_isbad(struct nand_device *nand, const struct nand_pos *pos);
+bool nanddev_isreserved(struct nand_device *nand, const struct nand_pos *pos);
+int nanddev_erase(struct nand_device *nand, const struct nand_pos *pos);
+int nanddev_markbad(struct nand_device *nand, const struct nand_pos *pos);
+
+/* BBT related functions */
+enum nand_bbt_block_status {
+ NAND_BBT_BLOCK_STATUS_UNKNOWN,
+ NAND_BBT_BLOCK_GOOD,
+ NAND_BBT_BLOCK_WORN,
+ NAND_BBT_BLOCK_RESERVED,
+ NAND_BBT_BLOCK_FACTORY_BAD,
+ NAND_BBT_BLOCK_NUM_STATUS,
+};
+
+int nanddev_bbt_init(struct nand_device *nand);
+void nanddev_bbt_cleanup(struct nand_device *nand);
+int nanddev_bbt_update(struct nand_device *nand);
+int nanddev_bbt_get_block_status(const struct nand_device *nand,
+ unsigned int entry);
+int nanddev_bbt_set_block_status(struct nand_device *nand, unsigned int entry,
+ enum nand_bbt_block_status status);
+int nanddev_bbt_markbad(struct nand_device *nand, unsigned int block);
+
+/**
+ * nanddev_bbt_pos_to_entry() - Convert a NAND position into a BBT entry
+ * @nand: NAND device
+ * @pos: the NAND position we want to get BBT entry for
+ *
+ * Return the BBT entry used to store information about the eraseblock pointed
+ * by @pos.
+ *
+ * Return: the BBT entry storing information about eraseblock pointed by @pos.
+ */
+static inline unsigned int nanddev_bbt_pos_to_entry(struct nand_device *nand,
+ const struct nand_pos *pos)
+{
+ return pos->eraseblock +
+ ((pos->lun + (pos->target * nand->memorg.luns_per_target)) *
+ nand->memorg.eraseblocks_per_lun);
+}
+
+/**
+ * nanddev_bbt_is_initialized() - Check if the BBT has been initialized
+ * @nand: NAND device
+ *
+ * Return: true if the BBT has been initialized, false otherwise.
+ */
+static inline bool nanddev_bbt_is_initialized(struct nand_device *nand)
+{
+ return !!nand->bbt.cache;
+}
+
+/* MTD -> NAND helper functions. */
+int nanddev_mtd_erase(struct mtd_info *mtd, struct erase_info *einfo);
+
+#endif /* __LINUX_MTD_NAND_H */
diff --git a/include/linux/mtd/nand_ecc.h b/include/linux/mtd/nand_ecc.h
index 4d8406c81652..8a2decf7462c 100644
--- a/include/linux/mtd/nand_ecc.h
+++ b/include/linux/mtd/nand_ecc.h
@@ -1,6 +1,4 @@
/*
- * drivers/mtd/nand_ecc.h
- *
* Copyright (C) 2000-2010 Steven J. Hill <sjhill@realitydiluted.com>
* David Woodhouse <dwmw2@infradead.org>
* Thomas Gleixner <tglx@linutronix.de>
diff --git a/include/linux/mtd/ndfc.h b/include/linux/mtd/ndfc.h
index d0558a982628..357e88b3263a 100644
--- a/include/linux/mtd/ndfc.h
+++ b/include/linux/mtd/ndfc.h
@@ -1,6 +1,4 @@
/*
- * linux/include/linux/mtd/ndfc.h
- *
* Copyright (c) 2006 Thomas Gleixner <tglx@linutronix.de>
*
* This program is free software; you can redistribute it and/or modify
diff --git a/include/linux/mtd/partitions.h b/include/linux/mtd/partitions.h
index c4beb70dacbd..11cb0c50cd84 100644
--- a/include/linux/mtd/partitions.h
+++ b/include/linux/mtd/partitions.h
@@ -77,6 +77,7 @@ struct mtd_part_parser {
struct list_head list;
struct module *owner;
const char *name;
+ const struct of_device_id *of_match_table;
int (*parse_fn)(struct mtd_info *, const struct mtd_partition **,
struct mtd_part_parser_data *);
void (*cleanup)(const struct mtd_partition *pparts, int nr_parts);
diff --git a/include/linux/mtd/rawnand.h b/include/linux/mtd/rawnand.h
index 749bb08c4772..5dad59b31244 100644
--- a/include/linux/mtd/rawnand.h
+++ b/include/linux/mtd/rawnand.h
@@ -21,6 +21,7 @@
#include <linux/mtd/mtd.h>
#include <linux/mtd/flashchip.h>
#include <linux/mtd/bbm.h>
+#include <linux/types.h>
struct mtd_info;
struct nand_flash_dev;
@@ -133,12 +134,6 @@ enum nand_ecc_algo {
*/
#define NAND_ECC_GENERIC_ERASED_CHECK BIT(0)
#define NAND_ECC_MAXIMIZE BIT(1)
-/*
- * If your controller already sends the required NAND commands when
- * reading or writing a page, then the framework is not supposed to
- * send READ0 and SEQIN/PAGEPROG respectively.
- */
-#define NAND_ECC_CUSTOM_PAGE_ACCESS BIT(2)
/* Bit mask for flags passed to do_nand_read_ecc */
#define NAND_GET_DEVICE 0x80
@@ -191,11 +186,6 @@ enum nand_ecc_algo {
/* Non chip related options */
/* This option skips the bbt scan during initialization. */
#define NAND_SKIP_BBTSCAN 0x00010000
-/*
- * This option is defined if the board driver allocates its own buffers
- * (e.g. because it needs them DMA-coherent).
- */
-#define NAND_OWN_BUFFERS 0x00020000
/* Chip may not exist, so silence any errors in scan */
#define NAND_SCAN_SILENT_NODEV 0x00040000
/*
@@ -246,7 +236,8 @@ struct nand_chip;
#define ONFI_TIMING_MODE_5 (1 << 5)
#define ONFI_TIMING_MODE_UNKNOWN (1 << 6)
-/* ONFI feature address */
+/* ONFI feature number/address */
+#define ONFI_FEATURE_NUMBER 256
#define ONFI_FEATURE_ADDR_TIMING_MODE 0x1
/* Vendor-specific feature address (Micron) */
@@ -440,6 +431,47 @@ struct nand_jedec_params {
__le16 crc;
} __packed;
+/**
+ * struct onfi_params - ONFI specific parameters that will be reused
+ * @version: ONFI version (BCD encoded), 0 if ONFI is not supported
+ * @tPROG: Page program time
+ * @tBERS: Block erase time
+ * @tR: Page read time
+ * @tCCS: Change column setup time
+ * @async_timing_mode: Supported asynchronous timing mode
+ * @vendor_revision: Vendor specific revision number
+ * @vendor: Vendor specific data
+ */
+struct onfi_params {
+ int version;
+ u16 tPROG;
+ u16 tBERS;
+ u16 tR;
+ u16 tCCS;
+ u16 async_timing_mode;
+ u16 vendor_revision;
+ u8 vendor[88];
+};
+
+/**
+ * struct nand_parameters - NAND generic parameters from the parameter page
+ * @model: Model name
+ * @supports_set_get_features: The NAND chip supports setting/getting features
+ * @set_feature_list: Bitmap of features that can be set
+ * @get_feature_list: Bitmap of features that can be get
+ * @onfi: ONFI specific parameters
+ */
+struct nand_parameters {
+ /* Generic parameters */
+ char model[100];
+ bool supports_set_get_features;
+ DECLARE_BITMAP(set_feature_list, ONFI_FEATURE_NUMBER);
+ DECLARE_BITMAP(get_feature_list, ONFI_FEATURE_NUMBER);
+
+ /* ONFI parameters */
+ struct onfi_params onfi;
+};
+
/* The maximum expected count of bytes in the NAND ID sequence */
#define NAND_MAX_ID_LEN 8
@@ -525,6 +557,8 @@ static const struct nand_ecc_caps __name = { \
* @postpad: padding information for syndrome based ECC generators
* @options: ECC specific options (see NAND_ECC_XXX flags defined above)
* @priv: pointer to private ECC control data
+ * @calc_buf: buffer for calculated ECC, size is oobsize.
+ * @code_buf: buffer for ECC read from flash, size is oobsize.
* @hwctl: function to control hardware ECC generator. Must only
* be provided if an hardware ECC is available
* @calculate: function for ECC calculation or readback from ECC hardware
@@ -575,6 +609,8 @@ struct nand_ecc_ctrl {
int postpad;
unsigned int options;
void *priv;
+ u8 *calc_buf;
+ u8 *code_buf;
void (*hwctl)(struct mtd_info *mtd, int mode);
int (*calculate)(struct mtd_info *mtd, const uint8_t *dat,
uint8_t *ecc_code);
@@ -602,26 +638,6 @@ struct nand_ecc_ctrl {
int page);
};
-static inline int nand_standard_page_accessors(struct nand_ecc_ctrl *ecc)
-{
- return !(ecc->options & NAND_ECC_CUSTOM_PAGE_ACCESS);
-}
-
-/**
- * struct nand_buffers - buffer structure for read/write
- * @ecccalc: buffer pointer for calculated ECC, size is oobsize.
- * @ecccode: buffer pointer for ECC read from flash, size is oobsize.
- * @databuf: buffer pointer for data, size is (page size + oobsize).
- *
- * Do not change the order of buffers. databuf and oobrbuf must be in
- * consecutive order.
- */
-struct nand_buffers {
- uint8_t *ecccalc;
- uint8_t *ecccode;
- uint8_t *databuf;
-};
-
/**
* struct nand_sdr_timings - SDR NAND chip timings
*
@@ -762,6 +778,350 @@ struct nand_manufacturer_ops {
};
/**
+ * struct nand_op_cmd_instr - Definition of a command instruction
+ * @opcode: the command to issue in one cycle
+ */
+struct nand_op_cmd_instr {
+ u8 opcode;
+};
+
+/**
+ * struct nand_op_addr_instr - Definition of an address instruction
+ * @naddrs: length of the @addrs array
+ * @addrs: array containing the address cycles to issue
+ */
+struct nand_op_addr_instr {
+ unsigned int naddrs;
+ const u8 *addrs;
+};
+
+/**
+ * struct nand_op_data_instr - Definition of a data instruction
+ * @len: number of data bytes to move
+ * @in: buffer to fill when reading from the NAND chip
+ * @out: buffer to read from when writing to the NAND chip
+ * @force_8bit: force 8-bit access
+ *
+ * Please note that "in" and "out" are inverted from the ONFI specification
+ * and are from the controller perspective, so a "in" is a read from the NAND
+ * chip while a "out" is a write to the NAND chip.
+ */
+struct nand_op_data_instr {
+ unsigned int len;
+ union {
+ void *in;
+ const void *out;
+ } buf;
+ bool force_8bit;
+};
+
+/**
+ * struct nand_op_waitrdy_instr - Definition of a wait ready instruction
+ * @timeout_ms: maximum delay while waiting for the ready/busy pin in ms
+ */
+struct nand_op_waitrdy_instr {
+ unsigned int timeout_ms;
+};
+
+/**
+ * enum nand_op_instr_type - Definition of all instruction types
+ * @NAND_OP_CMD_INSTR: command instruction
+ * @NAND_OP_ADDR_INSTR: address instruction
+ * @NAND_OP_DATA_IN_INSTR: data in instruction
+ * @NAND_OP_DATA_OUT_INSTR: data out instruction
+ * @NAND_OP_WAITRDY_INSTR: wait ready instruction
+ */
+enum nand_op_instr_type {
+ NAND_OP_CMD_INSTR,
+ NAND_OP_ADDR_INSTR,
+ NAND_OP_DATA_IN_INSTR,
+ NAND_OP_DATA_OUT_INSTR,
+ NAND_OP_WAITRDY_INSTR,
+};
+
+/**
+ * struct nand_op_instr - Instruction object
+ * @type: the instruction type
+ * @cmd/@addr/@data/@waitrdy: extra data associated to the instruction.
+ * You'll have to use the appropriate element
+ * depending on @type
+ * @delay_ns: delay the controller should apply after the instruction has been
+ * issued on the bus. Most modern controllers have internal timings
+ * control logic, and in this case, the controller driver can ignore
+ * this field.
+ */
+struct nand_op_instr {
+ enum nand_op_instr_type type;
+ union {
+ struct nand_op_cmd_instr cmd;
+ struct nand_op_addr_instr addr;
+ struct nand_op_data_instr data;
+ struct nand_op_waitrdy_instr waitrdy;
+ } ctx;
+ unsigned int delay_ns;
+};
+
+/*
+ * Special handling must be done for the WAITRDY timeout parameter as it usually
+ * is either tPROG (after a prog), tR (before a read), tRST (during a reset) or
+ * tBERS (during an erase) which all of them are u64 values that cannot be
+ * divided by usual kernel macros and must be handled with the special
+ * DIV_ROUND_UP_ULL() macro.
+ */
+#define __DIVIDE(dividend, divisor) ({ \
+ sizeof(dividend) == sizeof(u32) ? \
+ DIV_ROUND_UP(dividend, divisor) : \
+ DIV_ROUND_UP_ULL(dividend, divisor); \
+ })
+#define PSEC_TO_NSEC(x) __DIVIDE(x, 1000)
+#define PSEC_TO_MSEC(x) __DIVIDE(x, 1000000000)
+
+#define NAND_OP_CMD(id, ns) \
+ { \
+ .type = NAND_OP_CMD_INSTR, \
+ .ctx.cmd.opcode = id, \
+ .delay_ns = ns, \
+ }
+
+#define NAND_OP_ADDR(ncycles, cycles, ns) \
+ { \
+ .type = NAND_OP_ADDR_INSTR, \
+ .ctx.addr = { \
+ .naddrs = ncycles, \
+ .addrs = cycles, \
+ }, \
+ .delay_ns = ns, \
+ }
+
+#define NAND_OP_DATA_IN(l, b, ns) \
+ { \
+ .type = NAND_OP_DATA_IN_INSTR, \
+ .ctx.data = { \
+ .len = l, \
+ .buf.in = b, \
+ .force_8bit = false, \
+ }, \
+ .delay_ns = ns, \
+ }
+
+#define NAND_OP_DATA_OUT(l, b, ns) \
+ { \
+ .type = NAND_OP_DATA_OUT_INSTR, \
+ .ctx.data = { \
+ .len = l, \
+ .buf.out = b, \
+ .force_8bit = false, \
+ }, \
+ .delay_ns = ns, \
+ }
+
+#define NAND_OP_8BIT_DATA_IN(l, b, ns) \
+ { \
+ .type = NAND_OP_DATA_IN_INSTR, \
+ .ctx.data = { \
+ .len = l, \
+ .buf.in = b, \
+ .force_8bit = true, \
+ }, \
+ .delay_ns = ns, \
+ }
+
+#define NAND_OP_8BIT_DATA_OUT(l, b, ns) \
+ { \
+ .type = NAND_OP_DATA_OUT_INSTR, \
+ .ctx.data = { \
+ .len = l, \
+ .buf.out = b, \
+ .force_8bit = true, \
+ }, \
+ .delay_ns = ns, \
+ }
+
+#define NAND_OP_WAIT_RDY(tout_ms, ns) \
+ { \
+ .type = NAND_OP_WAITRDY_INSTR, \
+ .ctx.waitrdy.timeout_ms = tout_ms, \
+ .delay_ns = ns, \
+ }
+
+/**
+ * struct nand_subop - a sub operation
+ * @instrs: array of instructions
+ * @ninstrs: length of the @instrs array
+ * @first_instr_start_off: offset to start from for the first instruction
+ * of the sub-operation
+ * @last_instr_end_off: offset to end at (excluded) for the last instruction
+ * of the sub-operation
+ *
+ * Both @first_instr_start_off and @last_instr_end_off only apply to data or
+ * address instructions.
+ *
+ * When an operation cannot be handled as is by the NAND controller, it will
+ * be split by the parser into sub-operations which will be passed to the
+ * controller driver.
+ */
+struct nand_subop {
+ const struct nand_op_instr *instrs;
+ unsigned int ninstrs;
+ unsigned int first_instr_start_off;
+ unsigned int last_instr_end_off;
+};
+
+int nand_subop_get_addr_start_off(const struct nand_subop *subop,
+ unsigned int op_id);
+int nand_subop_get_num_addr_cyc(const struct nand_subop *subop,
+ unsigned int op_id);
+int nand_subop_get_data_start_off(const struct nand_subop *subop,
+ unsigned int op_id);
+int nand_subop_get_data_len(const struct nand_subop *subop,
+ unsigned int op_id);
+
+/**
+ * struct nand_op_parser_addr_constraints - Constraints for address instructions
+ * @maxcycles: maximum number of address cycles the controller can issue in a
+ * single step
+ */
+struct nand_op_parser_addr_constraints {
+ unsigned int maxcycles;
+};
+
+/**
+ * struct nand_op_parser_data_constraints - Constraints for data instructions
+ * @maxlen: maximum data length that the controller can handle in a single step
+ */
+struct nand_op_parser_data_constraints {
+ unsigned int maxlen;
+};
+
+/**
+ * struct nand_op_parser_pattern_elem - One element of a pattern
+ * @type: the instructuction type
+ * @optional: whether this element of the pattern is optional or mandatory
+ * @addr/@data: address or data constraint (number of cycles or data length)
+ */
+struct nand_op_parser_pattern_elem {
+ enum nand_op_instr_type type;
+ bool optional;
+ union {
+ struct nand_op_parser_addr_constraints addr;
+ struct nand_op_parser_data_constraints data;
+ } ctx;
+};
+
+#define NAND_OP_PARSER_PAT_CMD_ELEM(_opt) \
+ { \
+ .type = NAND_OP_CMD_INSTR, \
+ .optional = _opt, \
+ }
+
+#define NAND_OP_PARSER_PAT_ADDR_ELEM(_opt, _maxcycles) \
+ { \
+ .type = NAND_OP_ADDR_INSTR, \
+ .optional = _opt, \
+ .ctx.addr.maxcycles = _maxcycles, \
+ }
+
+#define NAND_OP_PARSER_PAT_DATA_IN_ELEM(_opt, _maxlen) \
+ { \
+ .type = NAND_OP_DATA_IN_INSTR, \
+ .optional = _opt, \
+ .ctx.data.maxlen = _maxlen, \
+ }
+
+#define NAND_OP_PARSER_PAT_DATA_OUT_ELEM(_opt, _maxlen) \
+ { \
+ .type = NAND_OP_DATA_OUT_INSTR, \
+ .optional = _opt, \
+ .ctx.data.maxlen = _maxlen, \
+ }
+
+#define NAND_OP_PARSER_PAT_WAITRDY_ELEM(_opt) \
+ { \
+ .type = NAND_OP_WAITRDY_INSTR, \
+ .optional = _opt, \
+ }
+
+/**
+ * struct nand_op_parser_pattern - NAND sub-operation pattern descriptor
+ * @elems: array of pattern elements
+ * @nelems: number of pattern elements in @elems array
+ * @exec: the function that will issue a sub-operation
+ *
+ * A pattern is a list of elements, each element reprensenting one instruction
+ * with its constraints. The pattern itself is used by the core to match NAND
+ * chip operation with NAND controller operations.
+ * Once a match between a NAND controller operation pattern and a NAND chip
+ * operation (or a sub-set of a NAND operation) is found, the pattern ->exec()
+ * hook is called so that the controller driver can issue the operation on the
+ * bus.
+ *
+ * Controller drivers should declare as many patterns as they support and pass
+ * this list of patterns (created with the help of the following macro) to
+ * the nand_op_parser_exec_op() helper.
+ */
+struct nand_op_parser_pattern {
+ const struct nand_op_parser_pattern_elem *elems;
+ unsigned int nelems;
+ int (*exec)(struct nand_chip *chip, const struct nand_subop *subop);
+};
+
+#define NAND_OP_PARSER_PATTERN(_exec, ...) \
+ { \
+ .exec = _exec, \
+ .elems = (struct nand_op_parser_pattern_elem[]) { __VA_ARGS__ }, \
+ .nelems = sizeof((struct nand_op_parser_pattern_elem[]) { __VA_ARGS__ }) / \
+ sizeof(struct nand_op_parser_pattern_elem), \
+ }
+
+/**
+ * struct nand_op_parser - NAND controller operation parser descriptor
+ * @patterns: array of supported patterns
+ * @npatterns: length of the @patterns array
+ *
+ * The parser descriptor is just an array of supported patterns which will be
+ * iterated by nand_op_parser_exec_op() everytime it tries to execute an
+ * NAND operation (or tries to determine if a specific operation is supported).
+ *
+ * It is worth mentioning that patterns will be tested in their declaration
+ * order, and the first match will be taken, so it's important to order patterns
+ * appropriately so that simple/inefficient patterns are placed at the end of
+ * the list. Usually, this is where you put single instruction patterns.
+ */
+struct nand_op_parser {
+ const struct nand_op_parser_pattern *patterns;
+ unsigned int npatterns;
+};
+
+#define NAND_OP_PARSER(...) \
+ { \
+ .patterns = (struct nand_op_parser_pattern[]) { __VA_ARGS__ }, \
+ .npatterns = sizeof((struct nand_op_parser_pattern[]) { __VA_ARGS__ }) / \
+ sizeof(struct nand_op_parser_pattern), \
+ }
+
+/**
+ * struct nand_operation - NAND operation descriptor
+ * @instrs: array of instructions to execute
+ * @ninstrs: length of the @instrs array
+ *
+ * The actual operation structure that will be passed to chip->exec_op().
+ */
+struct nand_operation {
+ const struct nand_op_instr *instrs;
+ unsigned int ninstrs;
+};
+
+#define NAND_OPERATION(_instrs) \
+ { \
+ .instrs = _instrs, \
+ .ninstrs = ARRAY_SIZE(_instrs), \
+ }
+
+int nand_op_parser_exec_op(struct nand_chip *chip,
+ const struct nand_op_parser *parser,
+ const struct nand_operation *op, bool check_only);
+
+/**
* struct nand_chip - NAND Private Flash Chip Data
* @mtd: MTD device registered to the MTD framework
* @IO_ADDR_R: [BOARDSPECIFIC] address to read the 8 I/O lines of the
@@ -787,10 +1147,13 @@ struct nand_manufacturer_ops {
* commands to the chip.
* @waitfunc: [REPLACEABLE] hardwarespecific function for wait on
* ready.
+ * @exec_op: controller specific method to execute NAND operations.
+ * This method replaces ->cmdfunc(),
+ * ->{read,write}_{buf,byte,word}(), ->dev_ready() and
+ * ->waifunc().
* @setup_read_retry: [FLASHSPECIFIC] flash (vendor) specific function for
* setting the read-retry mode. Mostly needed for MLC NAND.
* @ecc: [BOARDSPECIFIC] ECC control structure
- * @buffers: buffer structure for read/write
* @buf_align: minimum buffer alignment required by a platform
* @hwcontrol: platform-specific hardware control structure
* @erase: [REPLACEABLE] erase function
@@ -830,27 +1193,22 @@ struct nand_manufacturer_ops {
* @numchips: [INTERN] number of physical chips
* @chipsize: [INTERN] the size of one chip for multichip arrays
* @pagemask: [INTERN] page number mask = number of (pages / chip) - 1
+ * @data_buf: [INTERN] buffer for data, size is (page size + oobsize).
* @pagebuf: [INTERN] holds the pagenumber which is currently in
* data_buf.
* @pagebuf_bitflips: [INTERN] holds the bitflip count for the page which is
* currently in data_buf.
* @subpagesize: [INTERN] holds the subpagesize
* @id: [INTERN] holds NAND ID
- * @onfi_version: [INTERN] holds the chip ONFI version (BCD encoded),
- * non 0 if ONFI supported.
- * @jedec_version: [INTERN] holds the chip JEDEC version (BCD encoded),
- * non 0 if JEDEC supported.
- * @onfi_params: [INTERN] holds the ONFI page parameter when ONFI is
- * supported, 0 otherwise.
- * @jedec_params: [INTERN] holds the JEDEC parameter page when JEDEC is
- * supported, 0 otherwise.
+ * @parameters: [INTERN] holds generic parameters under an easily
+ * readable form.
* @max_bb_per_die: [INTERN] the max number of bad blocks each die of a
* this nand device will encounter their life times.
* @blocks_per_die: [INTERN] The number of PEBs in a die
* @data_interface: [INTERN] NAND interface timing information
* @read_retries: [INTERN] the number of read retry modes supported
- * @onfi_set_features: [REPLACEABLE] set the features for ONFI nand
- * @onfi_get_features: [REPLACEABLE] get the features for ONFI nand
+ * @set_features: [REPLACEABLE] set the NAND chip features
+ * @get_features: [REPLACEABLE] get the NAND chip features
* @setup_data_interface: [OPTIONAL] setup the data interface and timing. If
* chipnr is set to %NAND_DATA_IFACE_CHECK_ONLY this
* means the configuration should not be applied but
@@ -886,17 +1244,19 @@ struct nand_chip {
void (*cmdfunc)(struct mtd_info *mtd, unsigned command, int column,
int page_addr);
int(*waitfunc)(struct mtd_info *mtd, struct nand_chip *this);
+ int (*exec_op)(struct nand_chip *chip,
+ const struct nand_operation *op,
+ bool check_only);
int (*erase)(struct mtd_info *mtd, int page);
int (*scan_bbt)(struct mtd_info *mtd);
- int (*onfi_set_features)(struct mtd_info *mtd, struct nand_chip *chip,
- int feature_addr, uint8_t *subfeature_para);
- int (*onfi_get_features)(struct mtd_info *mtd, struct nand_chip *chip,
- int feature_addr, uint8_t *subfeature_para);
+ int (*set_features)(struct mtd_info *mtd, struct nand_chip *chip,
+ int feature_addr, uint8_t *subfeature_para);
+ int (*get_features)(struct mtd_info *mtd, struct nand_chip *chip,
+ int feature_addr, uint8_t *subfeature_para);
int (*setup_read_retry)(struct mtd_info *mtd, int retry_mode);
int (*setup_data_interface)(struct mtd_info *mtd, int chipnr,
const struct nand_data_interface *conf);
-
int chip_delay;
unsigned int options;
unsigned int bbt_options;
@@ -908,6 +1268,7 @@ struct nand_chip {
int numchips;
uint64_t chipsize;
int pagemask;
+ u8 *data_buf;
int pagebuf;
unsigned int pagebuf_bitflips;
int subpagesize;
@@ -919,16 +1280,11 @@ struct nand_chip {
int badblockbits;
struct nand_id id;
- int onfi_version;
- int jedec_version;
- union {
- struct nand_onfi_params onfi_params;
- struct nand_jedec_params jedec_params;
- };
+ struct nand_parameters parameters;
u16 max_bb_per_die;
u32 blocks_per_die;
- struct nand_data_interface *data_interface;
+ struct nand_data_interface data_interface;
int read_retries;
@@ -938,7 +1294,6 @@ struct nand_chip {
struct nand_hw_control *controller;
struct nand_ecc_ctrl ecc;
- struct nand_buffers *buffers;
unsigned long buf_align;
struct nand_hw_control hwcontrol;
@@ -956,6 +1311,15 @@ struct nand_chip {
} manufacturer;
};
+static inline int nand_exec_op(struct nand_chip *chip,
+ const struct nand_operation *op)
+{
+ if (!chip->exec_op)
+ return -ENOTSUPP;
+
+ return chip->exec_op(chip, op, false);
+}
+
extern const struct mtd_ooblayout_ops nand_ooblayout_sp_ops;
extern const struct mtd_ooblayout_ops nand_ooblayout_lp_ops;
@@ -1203,30 +1567,16 @@ struct platform_nand_data {
struct platform_nand_ctrl ctrl;
};
-/* return the supported features. */
-static inline int onfi_feature(struct nand_chip *chip)
-{
- return chip->onfi_version ? le16_to_cpu(chip->onfi_params.features) : 0;
-}
-
/* return the supported asynchronous timing mode. */
static inline int onfi_get_async_timing_mode(struct nand_chip *chip)
{
- if (!chip->onfi_version)
+ if (!chip->parameters.onfi.version)
return ONFI_TIMING_MODE_UNKNOWN;
- return le16_to_cpu(chip->onfi_params.async_timing_mode);
-}
-/* return the supported synchronous timing mode. */
-static inline int onfi_get_sync_timing_mode(struct nand_chip *chip)
-{
- if (!chip->onfi_version)
- return ONFI_TIMING_MODE_UNKNOWN;
- return le16_to_cpu(chip->onfi_params.src_sync_timing_mode);
+ return chip->parameters.onfi.async_timing_mode;
}
-int onfi_init_data_interface(struct nand_chip *chip,
- struct nand_data_interface *iface,
+int onfi_fill_data_interface(struct nand_chip *chip,
enum nand_data_interface_type type,
int timing_mode);
@@ -1260,17 +1610,8 @@ static inline int nand_opcode_8bits(unsigned int command)
return 0;
}
-/* return the supported JEDEC features. */
-static inline int jedec_feature(struct nand_chip *chip)
-{
- return chip->jedec_version ? le16_to_cpu(chip->jedec_params.features)
- : 0;
-}
-
/* get timing characteristics from ONFI timing mode. */
const struct nand_sdr_timings *onfi_async_timing_mode_to_sdr_timings(int mode);
-/* get data interface from ONFI timing mode 0, used after reset. */
-const struct nand_data_interface *nand_get_default_data_interface(void);
int nand_check_erased_ecc_chunk(void *data, int datalen,
void *ecc, int ecclen,
@@ -1300,10 +1641,12 @@ int nand_read_oob_std(struct mtd_info *mtd, struct nand_chip *chip, int page);
int nand_read_oob_syndrome(struct mtd_info *mtd, struct nand_chip *chip,
int page);
+/* Wrapper to use in order for controllers/vendors to GET/SET FEATURES */
+int nand_get_features(struct nand_chip *chip, int addr, u8 *subfeature_param);
+int nand_set_features(struct nand_chip *chip, int addr, u8 *subfeature_param);
/* Stub used by drivers that do not support GET/SET FEATURES operations */
-int nand_onfi_get_set_features_notsupp(struct mtd_info *mtd,
- struct nand_chip *chip, int addr,
- u8 *subfeature_param);
+int nand_get_set_features_notsupp(struct mtd_info *mtd, struct nand_chip *chip,
+ int addr, u8 *subfeature_param);
/* Default read_page_raw implementation */
int nand_read_page_raw(struct mtd_info *mtd, struct nand_chip *chip,
@@ -1316,9 +1659,45 @@ int nand_write_page_raw(struct mtd_info *mtd, struct nand_chip *chip,
/* Reset and initialize a NAND device */
int nand_reset(struct nand_chip *chip, int chipnr);
+/* NAND operation helpers */
+int nand_reset_op(struct nand_chip *chip);
+int nand_readid_op(struct nand_chip *chip, u8 addr, void *buf,
+ unsigned int len);
+int nand_status_op(struct nand_chip *chip, u8 *status);
+int nand_exit_status_op(struct nand_chip *chip);
+int nand_erase_op(struct nand_chip *chip, unsigned int eraseblock);
+int nand_read_page_op(struct nand_chip *chip, unsigned int page,
+ unsigned int offset_in_page, void *buf, unsigned int len);
+int nand_change_read_column_op(struct nand_chip *chip,
+ unsigned int offset_in_page, void *buf,
+ unsigned int len, bool force_8bit);
+int nand_read_oob_op(struct nand_chip *chip, unsigned int page,
+ unsigned int offset_in_page, void *buf, unsigned int len);
+int nand_prog_page_begin_op(struct nand_chip *chip, unsigned int page,
+ unsigned int offset_in_page, const void *buf,
+ unsigned int len);
+int nand_prog_page_end_op(struct nand_chip *chip);
+int nand_prog_page_op(struct nand_chip *chip, unsigned int page,
+ unsigned int offset_in_page, const void *buf,
+ unsigned int len);
+int nand_change_write_column_op(struct nand_chip *chip,
+ unsigned int offset_in_page, const void *buf,
+ unsigned int len, bool force_8bit);
+int nand_read_data_op(struct nand_chip *chip, void *buf, unsigned int len,
+ bool force_8bit);
+int nand_write_data_op(struct nand_chip *chip, const void *buf,
+ unsigned int len, bool force_8bit);
+
/* Free resources held by the NAND device */
void nand_cleanup(struct nand_chip *chip);
/* Default extended ID decoding function */
void nand_decode_ext_id(struct nand_chip *chip);
+
+/*
+ * External helper for controller drivers that have to implement the WAITRDY
+ * instruction and have no physical pin to check it.
+ */
+int nand_soft_waitrdy(struct nand_chip *chip, unsigned long timeout_ms);
+
#endif /* __LINUX_MTD_RAWNAND_H */
diff --git a/include/linux/mtd/spi-nor.h b/include/linux/mtd/spi-nor.h
index d0c66a0975cf..de36969eb359 100644
--- a/include/linux/mtd/spi-nor.h
+++ b/include/linux/mtd/spi-nor.h
@@ -61,6 +61,7 @@
#define SPINOR_OP_RDSFDP 0x5a /* Read SFDP */
#define SPINOR_OP_RDCR 0x35 /* Read configuration register */
#define SPINOR_OP_RDFSR 0x70 /* Read flag status register */
+#define SPINOR_OP_CLFSR 0x50 /* Clear flag status register */
/* 4-byte address opcodes - used on Spansion and some Macronix flashes. */
#define SPINOR_OP_READ_4B 0x13 /* Read data bytes (low frequency) */
@@ -130,7 +131,10 @@
#define EVCR_QUAD_EN_MICRON BIT(7) /* Micron Quad I/O */
/* Flag Status Register bits */
-#define FSR_READY BIT(7)
+#define FSR_READY BIT(7) /* Device status, 0 = Busy, 1 = Ready */
+#define FSR_E_ERR BIT(5) /* Erase operation status */
+#define FSR_P_ERR BIT(4) /* Program operation status */
+#define FSR_PT_ERR BIT(1) /* Protection error bit */
/* Configuration Register bits. */
#define CR_QUAD_EN_SPAN BIT(1) /* Spansion Quad I/O */
@@ -399,4 +403,10 @@ struct spi_nor_hwcaps {
int spi_nor_scan(struct spi_nor *nor, const char *name,
const struct spi_nor_hwcaps *hwcaps);
+/**
+ * spi_nor_restore_addr_mode() - restore the status of SPI NOR
+ * @nor: the spi_nor structure
+ */
+void spi_nor_restore(struct spi_nor *nor);
+
#endif
diff --git a/include/linux/mutex.h b/include/linux/mutex.h
index 153274f78402..14bc0d5d0ee5 100644
--- a/include/linux/mutex.h
+++ b/include/linux/mutex.h
@@ -14,7 +14,6 @@
#include <asm/current.h>
#include <linux/list.h>
#include <linux/spinlock_types.h>
-#include <linux/linkage.h>
#include <linux/lockdep.h>
#include <linux/atomic.h>
#include <asm/processor.h>
@@ -66,6 +65,11 @@ struct mutex {
#endif
};
+/*
+ * Internal helper function; C doesn't allow us to hide it :/
+ *
+ * DO NOT USE (outside of mutex code).
+ */
static inline struct task_struct *__mutex_owner(struct mutex *lock)
{
return (struct task_struct *)(atomic_long_read(&lock->owner) & ~0x07);
@@ -138,9 +142,9 @@ extern void __mutex_init(struct mutex *lock, const char *name,
* mutex_is_locked - is the mutex locked
* @lock: the mutex to be queried
*
- * Returns 1 if the mutex is locked, 0 if unlocked.
+ * Returns true if the mutex is locked, false if unlocked.
*/
-static inline int mutex_is_locked(struct mutex *lock)
+static inline bool mutex_is_locked(struct mutex *lock)
{
/*
* XXX think about spin_is_locked
diff --git a/include/linux/mux/consumer.h b/include/linux/mux/consumer.h
index ea96d4c82be7..5fc6bb2fefad 100644
--- a/include/linux/mux/consumer.h
+++ b/include/linux/mux/consumer.h
@@ -1,13 +1,10 @@
+/* SPDX-License-Identifier: GPL-2.0 */
/*
* mux/consumer.h - definitions for the multiplexer consumer interface
*
* Copyright (C) 2017 Axentia Technologies AB
*
* Author: Peter Rosin <peda@axentia.se>
- *
- * This program is free software; you can redistribute it and/or modify
- * it under the terms of the GNU General Public License version 2 as
- * published by the Free Software Foundation.
*/
#ifndef _LINUX_MUX_CONSUMER_H
diff --git a/include/linux/mux/driver.h b/include/linux/mux/driver.h
index 35c3579c3304..627a2c6bc02d 100644
--- a/include/linux/mux/driver.h
+++ b/include/linux/mux/driver.h
@@ -1,13 +1,10 @@
+/* SPDX-License-Identifier: GPL-2.0 */
/*
* mux/driver.h - definitions for the multiplexer driver interface
*
* Copyright (C) 2017 Axentia Technologies AB
*
* Author: Peter Rosin <peda@axentia.se>
- *
- * This program is free software; you can redistribute it and/or modify
- * it under the terms of the GNU General Public License version 2 as
- * published by the Free Software Foundation.
*/
#ifndef _LINUX_MUX_DRIVER_H
diff --git a/include/linux/nd.h b/include/linux/nd.h
index 5dc6b695437d..43c181a6add5 100644
--- a/include/linux/nd.h
+++ b/include/linux/nd.h
@@ -180,6 +180,12 @@ struct nd_region;
void nvdimm_region_notify(struct nd_region *nd_region, enum nvdimm_event event);
int __must_check __nd_driver_register(struct nd_device_driver *nd_drv,
struct module *module, const char *mod_name);
+static inline void nd_driver_unregister(struct nd_device_driver *drv)
+{
+ driver_unregister(&drv->drv);
+}
#define nd_driver_register(driver) \
__nd_driver_register(driver, THIS_MODULE, KBUILD_MODNAME)
+#define module_nd_driver(driver) \
+ module_driver(driver, nd_driver_register, nd_driver_unregister)
#endif /* __LINUX_ND_H__ */
diff --git a/include/linux/net.h b/include/linux/net.h
index caeb159abda5..2248a052061d 100644
--- a/include/linux/net.h
+++ b/include/linux/net.h
@@ -146,8 +146,8 @@ struct proto_ops {
struct socket *newsock, int flags, bool kern);
int (*getname) (struct socket *sock,
struct sockaddr *addr,
- int *sockaddr_len, int peer);
- unsigned int (*poll) (struct file *file, struct socket *sock,
+ int peer);
+ __poll_t (*poll) (struct file *file, struct socket *sock,
struct poll_table_struct *wait);
int (*ioctl) (struct socket *sock, unsigned int cmd,
unsigned long arg);
@@ -222,6 +222,7 @@ enum {
int sock_wake_async(struct socket_wq *sk_wq, int how, int band);
int sock_register(const struct net_proto_family *fam);
void sock_unregister(int family);
+bool sock_is_registered(int family);
int __sock_create(struct net *net, int family, int type, int proto,
struct socket **res, int kern);
int sock_create(int family, int type, int proto, struct socket **res);
@@ -294,10 +295,8 @@ int kernel_listen(struct socket *sock, int backlog);
int kernel_accept(struct socket *sock, struct socket **newsock, int flags);
int kernel_connect(struct socket *sock, struct sockaddr *addr, int addrlen,
int flags);
-int kernel_getsockname(struct socket *sock, struct sockaddr *addr,
- int *addrlen);
-int kernel_getpeername(struct socket *sock, struct sockaddr *addr,
- int *addrlen);
+int kernel_getsockname(struct socket *sock, struct sockaddr *addr);
+int kernel_getpeername(struct socket *sock, struct sockaddr *addr);
int kernel_getsockopt(struct socket *sock, int level, int optname, char *optval,
int *optlen);
int kernel_setsockopt(struct socket *sock, int level, int optname, char *optval,
@@ -306,7 +305,6 @@ int kernel_sendpage(struct socket *sock, struct page *page, int offset,
size_t size, int flags);
int kernel_sendpage_locked(struct sock *sk, struct page *page, int offset,
size_t size, int flags);
-int kernel_sock_ioctl(struct socket *sock, int cmd, unsigned long arg);
int kernel_sock_shutdown(struct socket *sock, enum sock_shutdown_cmd how);
/* Routine returns the IP overhead imposed by a (caller-protected) socket. */
diff --git a/include/linux/net_dim.h b/include/linux/net_dim.h
new file mode 100644
index 000000000000..29ed8fd6379a
--- /dev/null
+++ b/include/linux/net_dim.h
@@ -0,0 +1,380 @@
+/*
+ * Copyright (c) 2016, Mellanox Technologies. All rights reserved.
+ * Copyright (c) 2017-2018, Broadcom Limited. All rights reserved.
+ *
+ * This software is available to you under a choice of one of two
+ * licenses. You may choose to be licensed under the terms of the GNU
+ * General Public License (GPL) Version 2, available from the file
+ * COPYING in the main directory of this source tree, or the
+ * OpenIB.org BSD license below:
+ *
+ * Redistribution and use in source and binary forms, with or
+ * without modification, are permitted provided that the following
+ * conditions are met:
+ *
+ * - Redistributions of source code must retain the above
+ * copyright notice, this list of conditions and the following
+ * disclaimer.
+ *
+ * - Redistributions in binary form must reproduce the above
+ * copyright notice, this list of conditions and the following
+ * disclaimer in the documentation and/or other materials
+ * provided with the distribution.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND,
+ * EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
+ * MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND
+ * NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS
+ * BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN
+ * ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN
+ * CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
+ * SOFTWARE.
+ */
+
+#ifndef NET_DIM_H
+#define NET_DIM_H
+
+#include <linux/module.h>
+
+struct net_dim_cq_moder {
+ u16 usec;
+ u16 pkts;
+ u8 cq_period_mode;
+};
+
+struct net_dim_sample {
+ ktime_t time;
+ u32 pkt_ctr;
+ u32 byte_ctr;
+ u16 event_ctr;
+};
+
+struct net_dim_stats {
+ int ppms; /* packets per msec */
+ int bpms; /* bytes per msec */
+ int epms; /* events per msec */
+};
+
+struct net_dim { /* Adaptive Moderation */
+ u8 state;
+ struct net_dim_stats prev_stats;
+ struct net_dim_sample start_sample;
+ struct work_struct work;
+ u8 profile_ix;
+ u8 mode;
+ u8 tune_state;
+ u8 steps_right;
+ u8 steps_left;
+ u8 tired;
+};
+
+enum {
+ NET_DIM_CQ_PERIOD_MODE_START_FROM_EQE = 0x0,
+ NET_DIM_CQ_PERIOD_MODE_START_FROM_CQE = 0x1,
+ NET_DIM_CQ_PERIOD_NUM_MODES
+};
+
+/* Adaptive moderation logic */
+enum {
+ NET_DIM_START_MEASURE,
+ NET_DIM_MEASURE_IN_PROGRESS,
+ NET_DIM_APPLY_NEW_PROFILE,
+};
+
+enum {
+ NET_DIM_PARKING_ON_TOP,
+ NET_DIM_PARKING_TIRED,
+ NET_DIM_GOING_RIGHT,
+ NET_DIM_GOING_LEFT,
+};
+
+enum {
+ NET_DIM_STATS_WORSE,
+ NET_DIM_STATS_SAME,
+ NET_DIM_STATS_BETTER,
+};
+
+enum {
+ NET_DIM_STEPPED,
+ NET_DIM_TOO_TIRED,
+ NET_DIM_ON_EDGE,
+};
+
+#define NET_DIM_PARAMS_NUM_PROFILES 5
+/* Adaptive moderation profiles */
+#define NET_DIM_DEFAULT_RX_CQ_MODERATION_PKTS_FROM_EQE 256
+#define NET_DIM_DEF_PROFILE_CQE 1
+#define NET_DIM_DEF_PROFILE_EQE 1
+
+/* All profiles sizes must be NET_PARAMS_DIM_NUM_PROFILES */
+#define NET_DIM_EQE_PROFILES { \
+ {1, NET_DIM_DEFAULT_RX_CQ_MODERATION_PKTS_FROM_EQE}, \
+ {8, NET_DIM_DEFAULT_RX_CQ_MODERATION_PKTS_FROM_EQE}, \
+ {64, NET_DIM_DEFAULT_RX_CQ_MODERATION_PKTS_FROM_EQE}, \
+ {128, NET_DIM_DEFAULT_RX_CQ_MODERATION_PKTS_FROM_EQE}, \
+ {256, NET_DIM_DEFAULT_RX_CQ_MODERATION_PKTS_FROM_EQE}, \
+}
+
+#define NET_DIM_CQE_PROFILES { \
+ {2, 256}, \
+ {8, 128}, \
+ {16, 64}, \
+ {32, 64}, \
+ {64, 64} \
+}
+
+static const struct net_dim_cq_moder
+profile[NET_DIM_CQ_PERIOD_NUM_MODES][NET_DIM_PARAMS_NUM_PROFILES] = {
+ NET_DIM_EQE_PROFILES,
+ NET_DIM_CQE_PROFILES,
+};
+
+static inline struct net_dim_cq_moder net_dim_get_profile(u8 cq_period_mode,
+ int ix)
+{
+ struct net_dim_cq_moder cq_moder;
+
+ cq_moder = profile[cq_period_mode][ix];
+ cq_moder.cq_period_mode = cq_period_mode;
+ return cq_moder;
+}
+
+static inline struct net_dim_cq_moder net_dim_get_def_profile(u8 rx_cq_period_mode)
+{
+ int default_profile_ix;
+
+ if (rx_cq_period_mode == NET_DIM_CQ_PERIOD_MODE_START_FROM_CQE)
+ default_profile_ix = NET_DIM_DEF_PROFILE_CQE;
+ else /* NET_DIM_CQ_PERIOD_MODE_START_FROM_EQE */
+ default_profile_ix = NET_DIM_DEF_PROFILE_EQE;
+
+ return net_dim_get_profile(rx_cq_period_mode, default_profile_ix);
+}
+
+static inline bool net_dim_on_top(struct net_dim *dim)
+{
+ switch (dim->tune_state) {
+ case NET_DIM_PARKING_ON_TOP:
+ case NET_DIM_PARKING_TIRED:
+ return true;
+ case NET_DIM_GOING_RIGHT:
+ return (dim->steps_left > 1) && (dim->steps_right == 1);
+ default: /* NET_DIM_GOING_LEFT */
+ return (dim->steps_right > 1) && (dim->steps_left == 1);
+ }
+}
+
+static inline void net_dim_turn(struct net_dim *dim)
+{
+ switch (dim->tune_state) {
+ case NET_DIM_PARKING_ON_TOP:
+ case NET_DIM_PARKING_TIRED:
+ break;
+ case NET_DIM_GOING_RIGHT:
+ dim->tune_state = NET_DIM_GOING_LEFT;
+ dim->steps_left = 0;
+ break;
+ case NET_DIM_GOING_LEFT:
+ dim->tune_state = NET_DIM_GOING_RIGHT;
+ dim->steps_right = 0;
+ break;
+ }
+}
+
+static inline int net_dim_step(struct net_dim *dim)
+{
+ if (dim->tired == (NET_DIM_PARAMS_NUM_PROFILES * 2))
+ return NET_DIM_TOO_TIRED;
+
+ switch (dim->tune_state) {
+ case NET_DIM_PARKING_ON_TOP:
+ case NET_DIM_PARKING_TIRED:
+ break;
+ case NET_DIM_GOING_RIGHT:
+ if (dim->profile_ix == (NET_DIM_PARAMS_NUM_PROFILES - 1))
+ return NET_DIM_ON_EDGE;
+ dim->profile_ix++;
+ dim->steps_right++;
+ break;
+ case NET_DIM_GOING_LEFT:
+ if (dim->profile_ix == 0)
+ return NET_DIM_ON_EDGE;
+ dim->profile_ix--;
+ dim->steps_left++;
+ break;
+ }
+
+ dim->tired++;
+ return NET_DIM_STEPPED;
+}
+
+static inline void net_dim_park_on_top(struct net_dim *dim)
+{
+ dim->steps_right = 0;
+ dim->steps_left = 0;
+ dim->tired = 0;
+ dim->tune_state = NET_DIM_PARKING_ON_TOP;
+}
+
+static inline void net_dim_park_tired(struct net_dim *dim)
+{
+ dim->steps_right = 0;
+ dim->steps_left = 0;
+ dim->tune_state = NET_DIM_PARKING_TIRED;
+}
+
+static inline void net_dim_exit_parking(struct net_dim *dim)
+{
+ dim->tune_state = dim->profile_ix ? NET_DIM_GOING_LEFT :
+ NET_DIM_GOING_RIGHT;
+ net_dim_step(dim);
+}
+
+#define IS_SIGNIFICANT_DIFF(val, ref) \
+ (((100UL * abs((val) - (ref))) / (ref)) > 10) /* more than 10% difference */
+
+static inline int net_dim_stats_compare(struct net_dim_stats *curr,
+ struct net_dim_stats *prev)
+{
+ if (!prev->bpms)
+ return curr->bpms ? NET_DIM_STATS_BETTER :
+ NET_DIM_STATS_SAME;
+
+ if (IS_SIGNIFICANT_DIFF(curr->bpms, prev->bpms))
+ return (curr->bpms > prev->bpms) ? NET_DIM_STATS_BETTER :
+ NET_DIM_STATS_WORSE;
+
+ if (!prev->ppms)
+ return curr->ppms ? NET_DIM_STATS_BETTER :
+ NET_DIM_STATS_SAME;
+
+ if (IS_SIGNIFICANT_DIFF(curr->ppms, prev->ppms))
+ return (curr->ppms > prev->ppms) ? NET_DIM_STATS_BETTER :
+ NET_DIM_STATS_WORSE;
+
+ if (!prev->epms)
+ return NET_DIM_STATS_SAME;
+
+ if (IS_SIGNIFICANT_DIFF(curr->epms, prev->epms))
+ return (curr->epms < prev->epms) ? NET_DIM_STATS_BETTER :
+ NET_DIM_STATS_WORSE;
+
+ return NET_DIM_STATS_SAME;
+}
+
+static inline bool net_dim_decision(struct net_dim_stats *curr_stats,
+ struct net_dim *dim)
+{
+ int prev_state = dim->tune_state;
+ int prev_ix = dim->profile_ix;
+ int stats_res;
+ int step_res;
+
+ switch (dim->tune_state) {
+ case NET_DIM_PARKING_ON_TOP:
+ stats_res = net_dim_stats_compare(curr_stats, &dim->prev_stats);
+ if (stats_res != NET_DIM_STATS_SAME)
+ net_dim_exit_parking(dim);
+ break;
+
+ case NET_DIM_PARKING_TIRED:
+ dim->tired--;
+ if (!dim->tired)
+ net_dim_exit_parking(dim);
+ break;
+
+ case NET_DIM_GOING_RIGHT:
+ case NET_DIM_GOING_LEFT:
+ stats_res = net_dim_stats_compare(curr_stats, &dim->prev_stats);
+ if (stats_res != NET_DIM_STATS_BETTER)
+ net_dim_turn(dim);
+
+ if (net_dim_on_top(dim)) {
+ net_dim_park_on_top(dim);
+ break;
+ }
+
+ step_res = net_dim_step(dim);
+ switch (step_res) {
+ case NET_DIM_ON_EDGE:
+ net_dim_park_on_top(dim);
+ break;
+ case NET_DIM_TOO_TIRED:
+ net_dim_park_tired(dim);
+ break;
+ }
+
+ break;
+ }
+
+ if ((prev_state != NET_DIM_PARKING_ON_TOP) ||
+ (dim->tune_state != NET_DIM_PARKING_ON_TOP))
+ dim->prev_stats = *curr_stats;
+
+ return dim->profile_ix != prev_ix;
+}
+
+static inline void net_dim_sample(u16 event_ctr,
+ u64 packets,
+ u64 bytes,
+ struct net_dim_sample *s)
+{
+ s->time = ktime_get();
+ s->pkt_ctr = packets;
+ s->byte_ctr = bytes;
+ s->event_ctr = event_ctr;
+}
+
+#define NET_DIM_NEVENTS 64
+#define BITS_PER_TYPE(type) (sizeof(type) * BITS_PER_BYTE)
+#define BIT_GAP(bits, end, start) ((((end) - (start)) + BIT_ULL(bits)) & (BIT_ULL(bits) - 1))
+
+static inline void net_dim_calc_stats(struct net_dim_sample *start,
+ struct net_dim_sample *end,
+ struct net_dim_stats *curr_stats)
+{
+ /* u32 holds up to 71 minutes, should be enough */
+ u32 delta_us = ktime_us_delta(end->time, start->time);
+ u32 npkts = BIT_GAP(BITS_PER_TYPE(u32), end->pkt_ctr, start->pkt_ctr);
+ u32 nbytes = BIT_GAP(BITS_PER_TYPE(u32), end->byte_ctr,
+ start->byte_ctr);
+
+ if (!delta_us)
+ return;
+
+ curr_stats->ppms = DIV_ROUND_UP(npkts * USEC_PER_MSEC, delta_us);
+ curr_stats->bpms = DIV_ROUND_UP(nbytes * USEC_PER_MSEC, delta_us);
+ curr_stats->epms = DIV_ROUND_UP(NET_DIM_NEVENTS * USEC_PER_MSEC,
+ delta_us);
+}
+
+static inline void net_dim(struct net_dim *dim,
+ struct net_dim_sample end_sample)
+{
+ struct net_dim_stats curr_stats;
+ u16 nevents;
+
+ switch (dim->state) {
+ case NET_DIM_MEASURE_IN_PROGRESS:
+ nevents = BIT_GAP(BITS_PER_TYPE(u16),
+ end_sample.event_ctr,
+ dim->start_sample.event_ctr);
+ if (nevents < NET_DIM_NEVENTS)
+ break;
+ net_dim_calc_stats(&dim->start_sample, &end_sample,
+ &curr_stats);
+ if (net_dim_decision(&curr_stats, dim)) {
+ dim->state = NET_DIM_APPLY_NEW_PROFILE;
+ schedule_work(&dim->work);
+ break;
+ }
+ /* fall through */
+ case NET_DIM_START_MEASURE:
+ dim->state = NET_DIM_MEASURE_IN_PROGRESS;
+ break;
+ case NET_DIM_APPLY_NEW_PROFILE:
+ break;
+ }
+}
+
+#endif /* NET_DIM_H */
diff --git a/include/linux/netdev_features.h b/include/linux/netdev_features.h
index b1b0ca7ccb2b..35b79f47a13d 100644
--- a/include/linux/netdev_features.h
+++ b/include/linux/netdev_features.h
@@ -78,6 +78,9 @@ enum {
NETIF_F_HW_ESP_TX_CSUM_BIT, /* ESP with TX checksum offload */
NETIF_F_RX_UDP_TUNNEL_PORT_BIT, /* Offload of RX port for UDP tunnels */
+ NETIF_F_GRO_HW_BIT, /* Hardware Generic receive offload */
+ NETIF_F_HW_TLS_RECORD_BIT, /* Offload TLS record */
+
/*
* Add your fresh new feature above and remember to update
* netdev_features_strings[] in net/core/ethtool.c and maybe
@@ -97,6 +100,7 @@ enum {
#define NETIF_F_FRAGLIST __NETIF_F(FRAGLIST)
#define NETIF_F_FSO __NETIF_F(FSO)
#define NETIF_F_GRO __NETIF_F(GRO)
+#define NETIF_F_GRO_HW __NETIF_F(GRO_HW)
#define NETIF_F_GSO __NETIF_F(GSO)
#define NETIF_F_GSO_ROBUST __NETIF_F(GSO_ROBUST)
#define NETIF_F_HIGHDMA __NETIF_F(HIGHDMA)
@@ -142,6 +146,7 @@ enum {
#define NETIF_F_HW_ESP __NETIF_F(HW_ESP)
#define NETIF_F_HW_ESP_TX_CSUM __NETIF_F(HW_ESP_TX_CSUM)
#define NETIF_F_RX_UDP_TUNNEL_PORT __NETIF_F(RX_UDP_TUNNEL_PORT)
+#define NETIF_F_HW_TLS_RECORD __NETIF_F(HW_TLS_RECORD)
#define for_each_netdev_feature(mask_addr, bit) \
for_each_set_bit(bit, (unsigned long *)mask_addr, NETDEV_FEATURE_COUNT)
diff --git a/include/linux/netdevice.h b/include/linux/netdevice.h
index ef789e1d679e..cf44503ea81a 100644
--- a/include/linux/netdevice.h
+++ b/include/linux/netdevice.h
@@ -44,6 +44,7 @@
#include <net/dcbnl.h>
#endif
#include <net/netprio_cgroup.h>
+#include <net/xdp.h>
#include <linux/netdev_features.h>
#include <linux/neighbour.h>
@@ -57,6 +58,7 @@ struct device;
struct phy_device;
struct dsa_port;
+struct sfp_bus;
/* 802.11 specific */
struct wireless_dev;
/* 802.15.4 specific */
@@ -584,6 +586,15 @@ struct netdev_queue {
#endif
} ____cacheline_aligned_in_smp;
+extern int sysctl_fb_tunnels_only_for_init_net;
+
+static inline bool net_has_fallback_tunnels(const struct net *net)
+{
+ return net == &init_net ||
+ !IS_ENABLED(CONFIG_SYSCTL) ||
+ !sysctl_fb_tunnels_only_for_init_net;
+}
+
static inline int netdev_queue_numa_node_read(const struct netdev_queue *q)
{
#if defined(CONFIG_XPS) && defined(CONFIG_NUMA)
@@ -686,6 +697,7 @@ struct netdev_rx_queue {
#endif
struct kobject kobj;
struct net_device *dev;
+ struct xdp_rxq_info xdp_rxq;
} ____cacheline_aligned_in_smp;
/*
@@ -778,6 +790,7 @@ enum tc_setup_type {
TC_SETUP_BLOCK,
TC_SETUP_QDISC_CBS,
TC_SETUP_QDISC_RED,
+ TC_SETUP_QDISC_PRIO,
};
/* These structures hold the attributes of bpf state that are being passed
@@ -802,9 +815,11 @@ enum bpf_netdev_command {
BPF_OFFLOAD_VERIFIER_PREP,
BPF_OFFLOAD_TRANSLATE,
BPF_OFFLOAD_DESTROY,
+ BPF_OFFLOAD_MAP_ALLOC,
+ BPF_OFFLOAD_MAP_FREE,
};
-struct bpf_ext_analyzer_ops;
+struct bpf_prog_offload_ops;
struct netlink_ext_ack;
struct netdev_bpf {
@@ -820,16 +835,22 @@ struct netdev_bpf {
struct {
u8 prog_attached;
u32 prog_id;
+ /* flags with which program was installed */
+ u32 prog_flags;
};
/* BPF_OFFLOAD_VERIFIER_PREP */
struct {
struct bpf_prog *prog;
- const struct bpf_ext_analyzer_ops *ops; /* callee set */
+ const struct bpf_prog_offload_ops *ops; /* callee set */
} verifier;
/* BPF_OFFLOAD_TRANSLATE, BPF_OFFLOAD_DESTROY */
struct {
struct bpf_prog *prog;
} offload;
+ /* BPF_OFFLOAD_MAP_ALLOC, BPF_OFFLOAD_MAP_FREE */
+ struct {
+ struct bpf_offloaded_map *offmap;
+ };
};
};
@@ -840,6 +861,7 @@ struct xfrmdev_ops {
void (*xdo_dev_state_free) (struct xfrm_state *x);
bool (*xdo_dev_offload_ok) (struct sk_buff *skb,
struct xfrm_state *x);
+ void (*xdo_dev_state_advance_esn) (struct xfrm_state *x);
};
#endif
@@ -1369,8 +1391,6 @@ struct net_device_ops {
* @IFF_MACVLAN: Macvlan device
* @IFF_XMIT_DST_RELEASE_PERM: IFF_XMIT_DST_RELEASE not taking into account
* underlying stacked devices
- * @IFF_IPVLAN_MASTER: IPvlan master device
- * @IFF_IPVLAN_SLAVE: IPvlan slave device
* @IFF_L3MDEV_MASTER: device is an L3 master device
* @IFF_NO_QUEUE: device can run without qdisc attached
* @IFF_OPENVSWITCH: device is a Open vSwitch master
@@ -1380,6 +1400,7 @@ struct net_device_ops {
* @IFF_PHONY_HEADROOM: the headroom value is controlled by an external
* entity (i.e. the master device for bridged veth)
* @IFF_MACSEC: device is a MACsec device
+ * @IFF_NO_RX_HANDLER: device doesn't support the rx_handler hook
*/
enum netdev_priv_flags {
IFF_802_1Q_VLAN = 1<<0,
@@ -1400,16 +1421,15 @@ enum netdev_priv_flags {
IFF_LIVE_ADDR_CHANGE = 1<<15,
IFF_MACVLAN = 1<<16,
IFF_XMIT_DST_RELEASE_PERM = 1<<17,
- IFF_IPVLAN_MASTER = 1<<18,
- IFF_IPVLAN_SLAVE = 1<<19,
- IFF_L3MDEV_MASTER = 1<<20,
- IFF_NO_QUEUE = 1<<21,
- IFF_OPENVSWITCH = 1<<22,
- IFF_L3MDEV_SLAVE = 1<<23,
- IFF_TEAM = 1<<24,
- IFF_RXFH_CONFIGURED = 1<<25,
- IFF_PHONY_HEADROOM = 1<<26,
- IFF_MACSEC = 1<<27,
+ IFF_L3MDEV_MASTER = 1<<18,
+ IFF_NO_QUEUE = 1<<19,
+ IFF_OPENVSWITCH = 1<<20,
+ IFF_L3MDEV_SLAVE = 1<<21,
+ IFF_TEAM = 1<<22,
+ IFF_RXFH_CONFIGURED = 1<<23,
+ IFF_PHONY_HEADROOM = 1<<24,
+ IFF_MACSEC = 1<<25,
+ IFF_NO_RX_HANDLER = 1<<26,
};
#define IFF_802_1Q_VLAN IFF_802_1Q_VLAN
@@ -1430,8 +1450,6 @@ enum netdev_priv_flags {
#define IFF_LIVE_ADDR_CHANGE IFF_LIVE_ADDR_CHANGE
#define IFF_MACVLAN IFF_MACVLAN
#define IFF_XMIT_DST_RELEASE_PERM IFF_XMIT_DST_RELEASE_PERM
-#define IFF_IPVLAN_MASTER IFF_IPVLAN_MASTER
-#define IFF_IPVLAN_SLAVE IFF_IPVLAN_SLAVE
#define IFF_L3MDEV_MASTER IFF_L3MDEV_MASTER
#define IFF_NO_QUEUE IFF_NO_QUEUE
#define IFF_OPENVSWITCH IFF_OPENVSWITCH
@@ -1439,6 +1457,7 @@ enum netdev_priv_flags {
#define IFF_TEAM IFF_TEAM
#define IFF_RXFH_CONFIGURED IFF_RXFH_CONFIGURED
#define IFF_MACSEC IFF_MACSEC
+#define IFF_NO_RX_HANDLER IFF_NO_RX_HANDLER
/**
* struct net_device - The DEVICE structure.
@@ -1458,8 +1477,6 @@ enum netdev_priv_flags {
* @base_addr: Device I/O address
* @irq: Device IRQ number
*
- * @carrier_changes: Stats to monitor carrier on<->off transitions
- *
* @state: Generic network queuing layer state, see netdev_state_t
* @dev_list: The global list of network devices
* @napi_list: List entry used for polling NAPI devices
@@ -1495,6 +1512,8 @@ enum netdev_priv_flags {
* do not use this in drivers
* @rx_nohandler: nohandler dropped packets by core network on
* inactive devices, do not use this in drivers
+ * @carrier_up_count: Number of times the carrier has been up
+ * @carrier_down_count: Number of times the carrier has been down
*
* @wireless_handlers: List of functions to handle Wireless Extensions,
* instead of ioctl,
@@ -1644,6 +1663,7 @@ enum netdev_priv_flags {
* @priomap: XXX: need comments on this one
* @phydev: Physical device may attach itself
* for hardware timestamping
+ * @sfp_bus: attached &struct sfp_bus structure.
*
* @qdisc_tx_busylock: lockdep class annotating Qdisc->busylock spinlock
* @qdisc_running_key: lockdep class annotating Qdisc->running seqcount
@@ -1669,8 +1689,6 @@ struct net_device {
unsigned long base_addr;
int irq;
- atomic_t carrier_changes;
-
/*
* Some hardware also needs these fields (state,dev_list,
* napi_list,unreg_list,close_list) but they are not
@@ -1708,6 +1726,10 @@ struct net_device {
atomic_long_t tx_dropped;
atomic_long_t rx_nohandler;
+ /* Stats to monitor link on/off, flapping */
+ atomic_t carrier_up_count;
+ atomic_t carrier_down_count;
+
#ifdef CONFIG_WIRELESS_EXT
const struct iw_handler_def *wireless_handlers;
struct iw_public_data *wireless_data;
@@ -1724,7 +1746,7 @@ struct net_device {
const struct ndisc_ops *ndisc_ops;
#endif
-#ifdef CONFIG_XFRM
+#ifdef CONFIG_XFRM_OFFLOAD
const struct xfrmdev_ops *xfrmdev_ops;
#endif
@@ -1784,11 +1806,17 @@ struct net_device {
#if IS_ENABLED(CONFIG_TIPC)
struct tipc_bearer __rcu *tipc_ptr;
#endif
+#if IS_ENABLED(CONFIG_IRDA) || IS_ENABLED(CONFIG_ATALK)
void *atalk_ptr;
+#endif
struct in_device __rcu *ip_ptr;
+#if IS_ENABLED(CONFIG_DECNET)
struct dn_dev __rcu *dn_ptr;
+#endif
struct inet6_dev __rcu *ip6_ptr;
+#if IS_ENABLED(CONFIG_AX25)
void *ax25_ptr;
+#endif
struct wireless_dev *ieee80211_ptr;
struct wpan_dev *ieee802154_ptr;
#if IS_ENABLED(CONFIG_MPLS_ROUTING)
@@ -1801,12 +1829,9 @@ struct net_device {
/* Interface address info used in eth_type_trans() */
unsigned char *dev_addr;
-#ifdef CONFIG_SYSFS
struct netdev_rx_queue *_rx;
-
unsigned int num_rx_queues;
unsigned int real_num_rx_queues;
-#endif
struct bpf_prog __rcu *xdp_prog;
unsigned long gro_flush_timeout;
@@ -1922,6 +1947,7 @@ struct net_device {
struct netprio_map __rcu *priomap;
#endif
struct phy_device *phydev;
+ struct sfp_bus *sfp_bus;
struct lock_class_key *qdisc_tx_busylock;
struct lock_class_key *qdisc_running_key;
bool proto_down;
@@ -2289,43 +2315,49 @@ struct netdev_lag_lower_state_info {
#include <linux/notifier.h>
-/* netdevice notifier chain. Please remember to update the rtnetlink
- * notification exclusion list in rtnetlink_event() when adding new
- * types.
+/* netdevice notifier chain. Please remember to update netdev_cmd_to_name()
+ * and the rtnetlink notification exclusion list in rtnetlink_event() when
+ * adding new types.
*/
-#define NETDEV_UP 0x0001 /* For now you can't veto a device up/down */
-#define NETDEV_DOWN 0x0002
-#define NETDEV_REBOOT 0x0003 /* Tell a protocol stack a network interface
+enum netdev_cmd {
+ NETDEV_UP = 1, /* For now you can't veto a device up/down */
+ NETDEV_DOWN,
+ NETDEV_REBOOT, /* Tell a protocol stack a network interface
detected a hardware crash and restarted
- we can use this eg to kick tcp sessions
once done */
-#define NETDEV_CHANGE 0x0004 /* Notify device state change */
-#define NETDEV_REGISTER 0x0005
-#define NETDEV_UNREGISTER 0x0006
-#define NETDEV_CHANGEMTU 0x0007 /* notify after mtu change happened */
-#define NETDEV_CHANGEADDR 0x0008
-#define NETDEV_GOING_DOWN 0x0009
-#define NETDEV_CHANGENAME 0x000A
-#define NETDEV_FEAT_CHANGE 0x000B
-#define NETDEV_BONDING_FAILOVER 0x000C
-#define NETDEV_PRE_UP 0x000D
-#define NETDEV_PRE_TYPE_CHANGE 0x000E
-#define NETDEV_POST_TYPE_CHANGE 0x000F
-#define NETDEV_POST_INIT 0x0010
-#define NETDEV_UNREGISTER_FINAL 0x0011
-#define NETDEV_RELEASE 0x0012
-#define NETDEV_NOTIFY_PEERS 0x0013
-#define NETDEV_JOIN 0x0014
-#define NETDEV_CHANGEUPPER 0x0015
-#define NETDEV_RESEND_IGMP 0x0016
-#define NETDEV_PRECHANGEMTU 0x0017 /* notify before mtu change happened */
-#define NETDEV_CHANGEINFODATA 0x0018
-#define NETDEV_BONDING_INFO 0x0019
-#define NETDEV_PRECHANGEUPPER 0x001A
-#define NETDEV_CHANGELOWERSTATE 0x001B
-#define NETDEV_UDP_TUNNEL_PUSH_INFO 0x001C
-#define NETDEV_UDP_TUNNEL_DROP_INFO 0x001D
-#define NETDEV_CHANGE_TX_QUEUE_LEN 0x001E
+ NETDEV_CHANGE, /* Notify device state change */
+ NETDEV_REGISTER,
+ NETDEV_UNREGISTER,
+ NETDEV_CHANGEMTU, /* notify after mtu change happened */
+ NETDEV_CHANGEADDR,
+ NETDEV_GOING_DOWN,
+ NETDEV_CHANGENAME,
+ NETDEV_FEAT_CHANGE,
+ NETDEV_BONDING_FAILOVER,
+ NETDEV_PRE_UP,
+ NETDEV_PRE_TYPE_CHANGE,
+ NETDEV_POST_TYPE_CHANGE,
+ NETDEV_POST_INIT,
+ NETDEV_RELEASE,
+ NETDEV_NOTIFY_PEERS,
+ NETDEV_JOIN,
+ NETDEV_CHANGEUPPER,
+ NETDEV_RESEND_IGMP,
+ NETDEV_PRECHANGEMTU, /* notify before mtu change happened */
+ NETDEV_CHANGEINFODATA,
+ NETDEV_BONDING_INFO,
+ NETDEV_PRECHANGEUPPER,
+ NETDEV_CHANGELOWERSTATE,
+ NETDEV_UDP_TUNNEL_PUSH_INFO,
+ NETDEV_UDP_TUNNEL_DROP_INFO,
+ NETDEV_CHANGE_TX_QUEUE_LEN,
+ NETDEV_CVLAN_FILTER_PUSH_INFO,
+ NETDEV_CVLAN_FILTER_DROP_INFO,
+ NETDEV_SVLAN_FILTER_PUSH_INFO,
+ NETDEV_SVLAN_FILTER_DROP_INFO,
+};
+const char *netdev_cmd_to_name(enum netdev_cmd cmd);
int register_netdevice_notifier(struct notifier_block *nb);
int unregister_netdevice_notifier(struct notifier_block *nb);
@@ -2751,7 +2783,8 @@ static inline bool dev_validate_header(const struct net_device *dev,
return false;
}
-typedef int gifconf_func_t(struct net_device * dev, char __user * bufptr, int len);
+typedef int gifconf_func_t(struct net_device * dev, char __user * bufptr,
+ int len, int size);
int register_gifconf(unsigned int family, gifconf_func_t *gifconf);
static inline int unregister_gifconf(unsigned int family)
{
@@ -2791,7 +2824,9 @@ struct softnet_data {
struct Qdisc *output_queue;
struct Qdisc **output_queue_tailp;
struct sk_buff *completion_queue;
-
+#ifdef CONFIG_XFRM_OFFLOAD
+ struct sk_buff_head xfrm_backlog;
+#endif
#ifdef CONFIG_RPS
/* input_queue_head should be written by cpu owning this struct,
* and only read by other cpus. Worth using a cache line.
@@ -3214,6 +3249,12 @@ static inline int netif_set_real_num_rx_queues(struct net_device *dev,
}
#endif
+static inline struct netdev_rx_queue *
+__netif_get_rx_queue(struct net_device *dev, unsigned int rxq)
+{
+ return dev->_rx + rxq;
+}
+
#ifdef CONFIG_SYSFS
static inline unsigned int get_netdev_rx_queue_index(
struct netdev_rx_queue *queue)
@@ -3302,7 +3343,9 @@ int netdev_rx_handler_register(struct net_device *dev,
void netdev_rx_handler_unregister(struct net_device *dev);
bool dev_valid_name(const char *name);
-int dev_ioctl(struct net *net, unsigned int cmd, void __user *);
+int dev_ioctl(struct net *net, unsigned int cmd, struct ifreq *ifr,
+ bool *need_copyout);
+int dev_ifconf(struct net *net, struct ifconf *, int);
int dev_ethtool(struct net *net, struct ifreq *);
unsigned int dev_get_flags(const struct net_device *);
int __dev_change_flags(struct net_device *, unsigned int flags);
@@ -3315,6 +3358,7 @@ int dev_get_alias(const struct net_device *, char *, size_t);
int dev_change_net_namespace(struct net_device *, struct net *, const char *);
int __dev_set_mtu(struct net_device *, int);
int dev_set_mtu(struct net_device *, int);
+int dev_change_tx_queue_len(struct net_device *, unsigned long);
void dev_set_group(struct net_device *, int);
int dev_set_mac_address(struct net_device *, struct sockaddr *);
int dev_change_carrier(struct net_device *, bool new_carrier);
@@ -3323,14 +3367,15 @@ int dev_get_phys_port_id(struct net_device *dev,
int dev_get_phys_port_name(struct net_device *dev,
char *name, size_t len);
int dev_change_proto_down(struct net_device *dev, bool proto_down);
-struct sk_buff *validate_xmit_skb_list(struct sk_buff *skb, struct net_device *dev);
+struct sk_buff *validate_xmit_skb_list(struct sk_buff *skb, struct net_device *dev, bool *again);
struct sk_buff *dev_hard_start_xmit(struct sk_buff *skb, struct net_device *dev,
struct netdev_queue *txq, int *ret);
typedef int (*bpf_op_t)(struct net_device *dev, struct netdev_bpf *bpf);
int dev_change_xdp_fd(struct net_device *dev, struct netlink_ext_ack *extack,
int fd, u32 flags);
-u8 __dev_xdp_attached(struct net_device *dev, bpf_op_t xdp_op, u32 *prog_id);
+void __dev_xdp_query(struct net_device *dev, bpf_op_t xdp_op,
+ struct netdev_bpf *xdp);
int __dev_forward_skb(struct net_device *dev, struct sk_buff *skb);
int dev_forward_skb(struct net_device *dev, struct sk_buff *skb);
@@ -4193,16 +4238,6 @@ static inline bool netif_is_macvlan_port(const struct net_device *dev)
return dev->priv_flags & IFF_MACVLAN_PORT;
}
-static inline bool netif_is_ipvlan(const struct net_device *dev)
-{
- return dev->priv_flags & IFF_IPVLAN_SLAVE;
-}
-
-static inline bool netif_is_ipvlan_port(const struct net_device *dev)
-{
- return dev->priv_flags & IFF_IPVLAN_MASTER;
-}
-
static inline bool netif_is_bond_master(const struct net_device *dev)
{
return dev->flags & IFF_MASTER && dev->priv_flags & IFF_BONDING;
@@ -4399,11 +4434,11 @@ do { \
* file/line information and a backtrace.
*/
#define netdev_WARN(dev, format, args...) \
- WARN(1, "netdevice: %s%s\n" format, netdev_name(dev), \
+ WARN(1, "netdevice: %s%s: " format, netdev_name(dev), \
netdev_reg_state(dev), ##args)
-#define netdev_WARN_ONCE(dev, condition, format, arg...) \
- WARN_ONCE(1, "netdevice: %s%s\n" format, netdev_name(dev) \
+#define netdev_WARN_ONCE(dev, format, args...) \
+ WARN_ONCE(1, "netdevice: %s%s: " format, netdev_name(dev), \
netdev_reg_state(dev), ##args)
/* netif printk helpers, similar to netdev_printk */
diff --git a/include/linux/netfilter.h b/include/linux/netfilter.h
index b24e9b101651..85a1a0b32c66 100644
--- a/include/linux/netfilter.h
+++ b/include/linux/netfilter.h
@@ -67,6 +67,7 @@ struct nf_hook_ops {
struct net_device *dev;
void *priv;
u_int8_t pf;
+ bool nat_hook;
unsigned int hooknum;
/* Hooks are ordered in ascending priority. */
int priority;
@@ -77,17 +78,28 @@ struct nf_hook_entry {
void *priv;
};
+struct nf_hook_entries_rcu_head {
+ struct rcu_head head;
+ void *allocation;
+};
+
struct nf_hook_entries {
u16 num_hook_entries;
/* padding */
struct nf_hook_entry hooks[];
- /* trailer: pointers to original orig_ops of each hook.
- *
- * This is not part of struct nf_hook_entry since its only
- * needed in slow path (hook register/unregister).
+ /* trailer: pointers to original orig_ops of each hook,
+ * followed by rcu_head and scratch space used for freeing
+ * the structure via call_rcu.
*
+ * This is not part of struct nf_hook_entry since its only
+ * needed in slow path (hook register/unregister):
* const struct nf_hook_ops *orig_ops[]
+ *
+ * For the same reason, we store this at end -- its
+ * only needed when a hook is deleted, not during
+ * packet path processing:
+ * struct nf_hook_entries_rcu_head head
*/
};
@@ -184,7 +196,7 @@ static inline int nf_hook(u_int8_t pf, unsigned int hook, struct net *net,
struct net_device *indev, struct net_device *outdev,
int (*okfn)(struct net *, struct sock *, struct sk_buff *))
{
- struct nf_hook_entries *hook_head;
+ struct nf_hook_entries *hook_head = NULL;
int ret = 1;
#ifdef HAVE_JUMP_LABEL
@@ -195,7 +207,33 @@ static inline int nf_hook(u_int8_t pf, unsigned int hook, struct net *net,
#endif
rcu_read_lock();
- hook_head = rcu_dereference(net->nf.hooks[pf][hook]);
+ switch (pf) {
+ case NFPROTO_IPV4:
+ hook_head = rcu_dereference(net->nf.hooks_ipv4[hook]);
+ break;
+ case NFPROTO_IPV6:
+ hook_head = rcu_dereference(net->nf.hooks_ipv6[hook]);
+ break;
+ case NFPROTO_ARP:
+#ifdef CONFIG_NETFILTER_FAMILY_ARP
+ hook_head = rcu_dereference(net->nf.hooks_arp[hook]);
+#endif
+ break;
+ case NFPROTO_BRIDGE:
+#ifdef CONFIG_NETFILTER_FAMILY_BRIDGE
+ hook_head = rcu_dereference(net->nf.hooks_bridge[hook]);
+#endif
+ break;
+#if IS_ENABLED(CONFIG_DECNET)
+ case NFPROTO_DECNET:
+ hook_head = rcu_dereference(net->nf.hooks_decnet[hook]);
+ break;
+#endif
+ default:
+ WARN_ON_ONCE(1);
+ break;
+ }
+
if (hook_head) {
struct nf_hook_state state;
@@ -271,64 +309,16 @@ int skb_make_writable(struct sk_buff *skb, unsigned int writable_len);
struct flowi;
struct nf_queue_entry;
-struct nf_afinfo {
- unsigned short family;
- __sum16 (*checksum)(struct sk_buff *skb, unsigned int hook,
- unsigned int dataoff, u_int8_t protocol);
- __sum16 (*checksum_partial)(struct sk_buff *skb,
- unsigned int hook,
- unsigned int dataoff,
- unsigned int len,
- u_int8_t protocol);
- int (*route)(struct net *net, struct dst_entry **dst,
- struct flowi *fl, bool strict);
- void (*saveroute)(const struct sk_buff *skb,
- struct nf_queue_entry *entry);
- int (*reroute)(struct net *net, struct sk_buff *skb,
- const struct nf_queue_entry *entry);
- int route_key_size;
-};
-
-extern const struct nf_afinfo __rcu *nf_afinfo[NFPROTO_NUMPROTO];
-static inline const struct nf_afinfo *nf_get_afinfo(unsigned short family)
-{
- return rcu_dereference(nf_afinfo[family]);
-}
-
-static inline __sum16
-nf_checksum(struct sk_buff *skb, unsigned int hook, unsigned int dataoff,
- u_int8_t protocol, unsigned short family)
-{
- const struct nf_afinfo *afinfo;
- __sum16 csum = 0;
-
- rcu_read_lock();
- afinfo = nf_get_afinfo(family);
- if (afinfo)
- csum = afinfo->checksum(skb, hook, dataoff, protocol);
- rcu_read_unlock();
- return csum;
-}
-
-static inline __sum16
-nf_checksum_partial(struct sk_buff *skb, unsigned int hook,
- unsigned int dataoff, unsigned int len,
- u_int8_t protocol, unsigned short family)
-{
- const struct nf_afinfo *afinfo;
- __sum16 csum = 0;
-
- rcu_read_lock();
- afinfo = nf_get_afinfo(family);
- if (afinfo)
- csum = afinfo->checksum_partial(skb, hook, dataoff, len,
- protocol);
- rcu_read_unlock();
- return csum;
-}
+__sum16 nf_checksum(struct sk_buff *skb, unsigned int hook,
+ unsigned int dataoff, u_int8_t protocol,
+ unsigned short family);
-int nf_register_afinfo(const struct nf_afinfo *afinfo);
-void nf_unregister_afinfo(const struct nf_afinfo *afinfo);
+__sum16 nf_checksum_partial(struct sk_buff *skb, unsigned int hook,
+ unsigned int dataoff, unsigned int len,
+ u_int8_t protocol, unsigned short family);
+int nf_route(struct net *net, struct dst_entry **dst, struct flowi *fl,
+ bool strict, unsigned short family);
+int nf_reroute(struct sk_buff *skb, struct nf_queue_entry *entry);
#include <net/flow.h>
extern void (*nf_nat_decode_session_hook)(struct sk_buff *, struct flowi *);
diff --git a/include/linux/netfilter/ipset/ip_set.h b/include/linux/netfilter/ipset/ip_set.h
index 8e42253e5d4d..34fc80f3eb90 100644
--- a/include/linux/netfilter/ipset/ip_set.h
+++ b/include/linux/netfilter/ipset/ip_set.h
@@ -122,6 +122,8 @@ struct ip_set_ext {
u64 bytes;
char *comment;
u32 timeout;
+ u8 packets_op;
+ u8 bytes_op;
};
struct ip_set;
@@ -339,6 +341,10 @@ extern int ip_set_get_extensions(struct ip_set *set, struct nlattr *tb[],
struct ip_set_ext *ext);
extern int ip_set_put_extensions(struct sk_buff *skb, const struct ip_set *set,
const void *e, bool active);
+extern bool ip_set_match_extensions(struct ip_set *set,
+ const struct ip_set_ext *ext,
+ struct ip_set_ext *mext,
+ u32 flags, void *data);
static inline int
ip_set_get_hostipaddr4(struct nlattr *nla, u32 *ipaddr)
diff --git a/include/linux/netfilter/ipset/ip_set_counter.h b/include/linux/netfilter/ipset/ip_set_counter.h
index bb6fba480118..3d33a2c3f39f 100644
--- a/include/linux/netfilter/ipset/ip_set_counter.h
+++ b/include/linux/netfilter/ipset/ip_set_counter.h
@@ -34,20 +34,33 @@ ip_set_get_packets(const struct ip_set_counter *counter)
return (u64)atomic64_read(&(counter)->packets);
}
+static inline bool
+ip_set_match_counter(u64 counter, u64 match, u8 op)
+{
+ switch (op) {
+ case IPSET_COUNTER_NONE:
+ return true;
+ case IPSET_COUNTER_EQ:
+ return counter == match;
+ case IPSET_COUNTER_NE:
+ return counter != match;
+ case IPSET_COUNTER_LT:
+ return counter < match;
+ case IPSET_COUNTER_GT:
+ return counter > match;
+ }
+ return false;
+}
+
static inline void
ip_set_update_counter(struct ip_set_counter *counter,
- const struct ip_set_ext *ext,
- struct ip_set_ext *mext, u32 flags)
+ const struct ip_set_ext *ext, u32 flags)
{
if (ext->packets != ULLONG_MAX &&
!(flags & IPSET_FLAG_SKIP_COUNTER_UPDATE)) {
ip_set_add_bytes(ext->bytes, counter);
ip_set_add_packets(ext->packets, counter);
}
- if (flags & IPSET_FLAG_MATCH_COUNTERS) {
- mext->packets = ip_set_get_packets(counter);
- mext->bytes = ip_set_get_bytes(counter);
- }
}
static inline bool
diff --git a/include/linux/netfilter/nfnetlink.h b/include/linux/netfilter/nfnetlink.h
index 495ba4dd9da5..34551f8aaf9d 100644
--- a/include/linux/netfilter/nfnetlink.h
+++ b/include/linux/netfilter/nfnetlink.h
@@ -67,8 +67,7 @@ static inline bool lockdep_nfnl_is_held(__u8 subsys_id)
* @ss: The nfnetlink subsystem ID
*
* Return the value of the specified RCU-protected pointer, but omit
- * both the smp_read_barrier_depends() and the READ_ONCE(), because
- * caller holds the NFNL subsystem mutex.
+ * the READ_ONCE(), because caller holds the NFNL subsystem mutex.
*/
#define nfnl_dereference(p, ss) \
rcu_dereference_protected(p, lockdep_nfnl_is_held(ss))
diff --git a/include/linux/netfilter/nfnetlink_acct.h b/include/linux/netfilter/nfnetlink_acct.h
index b4d741195c28..beee8bffe49e 100644
--- a/include/linux/netfilter/nfnetlink_acct.h
+++ b/include/linux/netfilter/nfnetlink_acct.h
@@ -16,6 +16,5 @@ struct nf_acct;
struct nf_acct *nfnl_acct_find_get(struct net *net, const char *filter_name);
void nfnl_acct_put(struct nf_acct *acct);
void nfnl_acct_update(const struct sk_buff *skb, struct nf_acct *nfacct);
-int nfnl_acct_overquota(struct net *net, const struct sk_buff *skb,
- struct nf_acct *nfacct);
+int nfnl_acct_overquota(struct net *net, struct nf_acct *nfacct);
#endif /* _NFNL_ACCT_H */
diff --git a/include/linux/netfilter/x_tables.h b/include/linux/netfilter/x_tables.h
index 33f7530f96b9..9077b3ebea08 100644
--- a/include/linux/netfilter/x_tables.h
+++ b/include/linux/netfilter/x_tables.h
@@ -281,10 +281,14 @@ int xt_check_entry_offsets(const void *base, const char *elems,
unsigned int target_offset,
unsigned int next_offset);
+int xt_check_table_hooks(const struct xt_table_info *info, unsigned int valid_hooks);
+
unsigned int *xt_alloc_entry_offsets(unsigned int size);
bool xt_find_jump_offset(const unsigned int *offsets,
unsigned int target, unsigned int size);
+int xt_check_proc_name(const char *name, unsigned int size);
+
int xt_check_match(struct xt_mtchk_param *, unsigned int size, u_int8_t proto,
bool inv_proto);
int xt_check_target(struct xt_tgchk_param *, unsigned int size, u_int8_t proto,
@@ -299,6 +303,7 @@ int xt_data_to_user(void __user *dst, const void *src,
void *xt_copy_counters_from_user(const void __user *user, unsigned int len,
struct xt_counters_info *info, bool compat);
+struct xt_counters *xt_counters_alloc(unsigned int counters);
struct xt_table *xt_register_table(struct net *net,
const struct xt_table *table,
@@ -320,6 +325,8 @@ int xt_find_revision(u8 af, const char *name, u8 revision, int target,
struct xt_table *xt_find_table_lock(struct net *net, u_int8_t af,
const char *name);
+struct xt_table *xt_request_find_table_lock(struct net *net, u_int8_t af,
+ const char *name);
void xt_table_unlock(struct xt_table *t);
int xt_proto_init(struct net *net, u_int8_t af);
@@ -505,7 +512,7 @@ void xt_compat_unlock(u_int8_t af);
int xt_compat_add_offset(u_int8_t af, unsigned int offset, int delta);
void xt_compat_flush_offsets(u_int8_t af);
-void xt_compat_init_offsets(u_int8_t af, unsigned int number);
+int xt_compat_init_offsets(u8 af, unsigned int number);
int xt_compat_calc_jump(u_int8_t af, unsigned int offset);
int xt_compat_match_offset(const struct xt_match *match);
diff --git a/include/linux/netfilter_defs.h b/include/linux/netfilter_defs.h
index dc6111adea06..8dddfb151f00 100644
--- a/include/linux/netfilter_defs.h
+++ b/include/linux/netfilter_defs.h
@@ -4,7 +4,17 @@
#include <uapi/linux/netfilter.h>
+/* in/out/forward only */
+#define NF_ARP_NUMHOOKS 3
+
+/* max hook is NF_DN_ROUTE (6), also see uapi/linux/netfilter_decnet.h */
+#define NF_DN_NUMHOOKS 7
+
+#if IS_ENABLED(CONFIG_DECNET)
/* Largest hook number + 1, see uapi/linux/netfilter_decnet.h */
-#define NF_MAX_HOOKS 8
+#define NF_MAX_HOOKS NF_DN_NUMHOOKS
+#else
+#define NF_MAX_HOOKS NF_INET_NUMHOOKS
+#endif
#endif
diff --git a/include/linux/netfilter_ipv4.h b/include/linux/netfilter_ipv4.h
index 98c03b2462b5..b31dabfdb453 100644
--- a/include/linux/netfilter_ipv4.h
+++ b/include/linux/netfilter_ipv4.h
@@ -6,7 +6,53 @@
#include <uapi/linux/netfilter_ipv4.h>
+/* Extra routing may needed on local out, as the QUEUE target never returns
+ * control to the table.
+ */
+struct ip_rt_info {
+ __be32 daddr;
+ __be32 saddr;
+ u_int8_t tos;
+ u_int32_t mark;
+};
+
int ip_route_me_harder(struct net *net, struct sk_buff *skb, unsigned addr_type);
+
+struct nf_queue_entry;
+
+#ifdef CONFIG_INET
__sum16 nf_ip_checksum(struct sk_buff *skb, unsigned int hook,
unsigned int dataoff, u_int8_t protocol);
+__sum16 nf_ip_checksum_partial(struct sk_buff *skb, unsigned int hook,
+ unsigned int dataoff, unsigned int len,
+ u_int8_t protocol);
+int nf_ip_route(struct net *net, struct dst_entry **dst, struct flowi *fl,
+ bool strict);
+int nf_ip_reroute(struct sk_buff *skb, const struct nf_queue_entry *entry);
+#else
+static inline __sum16 nf_ip_checksum(struct sk_buff *skb, unsigned int hook,
+ unsigned int dataoff, u_int8_t protocol)
+{
+ return 0;
+}
+static inline __sum16 nf_ip_checksum_partial(struct sk_buff *skb,
+ unsigned int hook,
+ unsigned int dataoff,
+ unsigned int len,
+ u_int8_t protocol)
+{
+ return 0;
+}
+static inline int nf_ip_route(struct net *net, struct dst_entry **dst,
+ struct flowi *fl, bool strict)
+{
+ return -EOPNOTSUPP;
+}
+static inline int nf_ip_reroute(struct sk_buff *skb,
+ const struct nf_queue_entry *entry)
+{
+ return -EOPNOTSUPP;
+}
+#endif /* CONFIG_INET */
+
#endif /*__LINUX_IP_NETFILTER_H*/
diff --git a/include/linux/netfilter_ipv6.h b/include/linux/netfilter_ipv6.h
index 47c6b04c28c0..288c597e75b3 100644
--- a/include/linux/netfilter_ipv6.h
+++ b/include/linux/netfilter_ipv6.h
@@ -9,6 +9,17 @@
#include <uapi/linux/netfilter_ipv6.h>
+/* Extra routing may needed on local out, as the QUEUE target never returns
+ * control to the table.
+ */
+struct ip6_rt_info {
+ struct in6_addr daddr;
+ struct in6_addr saddr;
+ u_int32_t mark;
+};
+
+struct nf_queue_entry;
+
/*
* Hook functions for ipv6 to allow xt_* modules to be built-in even
* if IPv6 is a module.
@@ -19,6 +30,14 @@ struct nf_ipv6_ops {
void (*route_input)(struct sk_buff *skb);
int (*fragment)(struct net *net, struct sock *sk, struct sk_buff *skb,
int (*output)(struct net *, struct sock *, struct sk_buff *));
+ __sum16 (*checksum)(struct sk_buff *skb, unsigned int hook,
+ unsigned int dataoff, u_int8_t protocol);
+ __sum16 (*checksum_partial)(struct sk_buff *skb, unsigned int hook,
+ unsigned int dataoff, unsigned int len,
+ u_int8_t protocol);
+ int (*route)(struct net *net, struct dst_entry **dst, struct flowi *fl,
+ bool strict);
+ int (*reroute)(struct sk_buff *skb, const struct nf_queue_entry *entry);
};
#ifdef CONFIG_NETFILTER
diff --git a/include/linux/nfs4.h b/include/linux/nfs4.h
index 47adac640191..57ffaa20d564 100644
--- a/include/linux/nfs4.h
+++ b/include/linux/nfs4.h
@@ -457,7 +457,12 @@ enum lock_type4 {
#define NFS4_DEBUG 1
-/* Index of predefined Linux client operations */
+/*
+ * Index of predefined Linux client operations
+ *
+ * To ensure that /proc/net/rpc/nfs remains correctly ordered, please
+ * append only to this enum when adding new client operations.
+ */
enum {
NFSPROC4_CLNT_NULL = 0, /* Unused */
@@ -480,7 +485,6 @@ enum {
NFSPROC4_CLNT_ACCESS,
NFSPROC4_CLNT_GETATTR,
NFSPROC4_CLNT_LOOKUP,
- NFSPROC4_CLNT_LOOKUPP,
NFSPROC4_CLNT_LOOKUP_ROOT,
NFSPROC4_CLNT_REMOVE,
NFSPROC4_CLNT_RENAME,
@@ -500,7 +504,6 @@ enum {
NFSPROC4_CLNT_SECINFO,
NFSPROC4_CLNT_FSID_PRESENT,
- /* nfs41 */
NFSPROC4_CLNT_EXCHANGE_ID,
NFSPROC4_CLNT_CREATE_SESSION,
NFSPROC4_CLNT_DESTROY_SESSION,
@@ -518,13 +521,14 @@ enum {
NFSPROC4_CLNT_BIND_CONN_TO_SESSION,
NFSPROC4_CLNT_DESTROY_CLIENTID,
- /* nfs42 */
NFSPROC4_CLNT_SEEK,
NFSPROC4_CLNT_ALLOCATE,
NFSPROC4_CLNT_DEALLOCATE,
NFSPROC4_CLNT_LAYOUTSTATS,
NFSPROC4_CLNT_CLONE,
NFSPROC4_CLNT_COPY,
+
+ NFSPROC4_CLNT_LOOKUPP,
};
/* nfs41 types */
diff --git a/include/linux/node.h b/include/linux/node.h
index 4ece0fee0ffc..41f171861dcc 100644
--- a/include/linux/node.h
+++ b/include/linux/node.h
@@ -67,7 +67,7 @@ extern void unregister_one_node(int nid);
extern int register_cpu_under_node(unsigned int cpu, unsigned int nid);
extern int unregister_cpu_under_node(unsigned int cpu, unsigned int nid);
extern int register_mem_sect_under_node(struct memory_block *mem_blk,
- int nid);
+ int nid, bool check_nid);
extern int unregister_mem_sect_under_nodes(struct memory_block *mem_blk,
unsigned long phys_index);
@@ -97,7 +97,7 @@ static inline int unregister_cpu_under_node(unsigned int cpu, unsigned int nid)
return 0;
}
static inline int register_mem_sect_under_node(struct memory_block *mem_blk,
- int nid)
+ int nid, bool check_nid)
{
return 0;
}
diff --git a/include/linux/nospec.h b/include/linux/nospec.h
new file mode 100644
index 000000000000..e791ebc65c9c
--- /dev/null
+++ b/include/linux/nospec.h
@@ -0,0 +1,58 @@
+// SPDX-License-Identifier: GPL-2.0
+// Copyright(c) 2018 Linus Torvalds. All rights reserved.
+// Copyright(c) 2018 Alexei Starovoitov. All rights reserved.
+// Copyright(c) 2018 Intel Corporation. All rights reserved.
+
+#ifndef _LINUX_NOSPEC_H
+#define _LINUX_NOSPEC_H
+#include <asm/barrier.h>
+
+/**
+ * array_index_mask_nospec() - generate a ~0 mask when index < size, 0 otherwise
+ * @index: array element index
+ * @size: number of elements in array
+ *
+ * When @index is out of bounds (@index >= @size), the sign bit will be
+ * set. Extend the sign bit to all bits and invert, giving a result of
+ * zero for an out of bounds index, or ~0 if within bounds [0, @size).
+ */
+#ifndef array_index_mask_nospec
+static inline unsigned long array_index_mask_nospec(unsigned long index,
+ unsigned long size)
+{
+ /*
+ * Always calculate and emit the mask even if the compiler
+ * thinks the mask is not needed. The compiler does not take
+ * into account the value of @index under speculation.
+ */
+ OPTIMIZER_HIDE_VAR(index);
+ return ~(long)(index | (size - 1UL - index)) >> (BITS_PER_LONG - 1);
+}
+#endif
+
+/*
+ * array_index_nospec - sanitize an array index after a bounds check
+ *
+ * For a code sequence like:
+ *
+ * if (index < size) {
+ * index = array_index_nospec(index, size);
+ * val = array[index];
+ * }
+ *
+ * ...if the CPU speculates past the bounds check then
+ * array_index_nospec() will clamp the index within the range of [0,
+ * size).
+ */
+#define array_index_nospec(index, size) \
+({ \
+ typeof(index) _i = (index); \
+ typeof(size) _s = (size); \
+ unsigned long _mask = array_index_mask_nospec(_i, _s); \
+ \
+ BUILD_BUG_ON(sizeof(_i) > sizeof(long)); \
+ BUILD_BUG_ON(sizeof(_s) > sizeof(long)); \
+ \
+ (typeof(_i)) (_i & _mask); \
+})
+#endif /* _LINUX_NOSPEC_H */
diff --git a/include/linux/ntb.h b/include/linux/ntb.h
index c308964777eb..181d16601dd9 100644
--- a/include/linux/ntb.h
+++ b/include/linux/ntb.h
@@ -71,6 +71,7 @@ struct pci_dev;
* @NTB_TOPO_B2B_USD: On primary side of local ntb upstream of remote ntb.
* @NTB_TOPO_B2B_DSD: On primary side of local ntb downstream of remote ntb.
* @NTB_TOPO_SWITCH: Connected via a switch which supports ntb.
+ * @NTB_TOPO_CROSSLINK: Connected via two symmetric switchecs
*/
enum ntb_topo {
NTB_TOPO_NONE = -1,
@@ -79,6 +80,7 @@ enum ntb_topo {
NTB_TOPO_B2B_USD,
NTB_TOPO_B2B_DSD,
NTB_TOPO_SWITCH,
+ NTB_TOPO_CROSSLINK,
};
static inline int ntb_topo_is_b2b(enum ntb_topo topo)
@@ -94,12 +96,13 @@ static inline int ntb_topo_is_b2b(enum ntb_topo topo)
static inline char *ntb_topo_string(enum ntb_topo topo)
{
switch (topo) {
- case NTB_TOPO_NONE: return "NTB_TOPO_NONE";
- case NTB_TOPO_PRI: return "NTB_TOPO_PRI";
- case NTB_TOPO_SEC: return "NTB_TOPO_SEC";
- case NTB_TOPO_B2B_USD: return "NTB_TOPO_B2B_USD";
- case NTB_TOPO_B2B_DSD: return "NTB_TOPO_B2B_DSD";
- case NTB_TOPO_SWITCH: return "NTB_TOPO_SWITCH";
+ case NTB_TOPO_NONE: return "NTB_TOPO_NONE";
+ case NTB_TOPO_PRI: return "NTB_TOPO_PRI";
+ case NTB_TOPO_SEC: return "NTB_TOPO_SEC";
+ case NTB_TOPO_B2B_USD: return "NTB_TOPO_B2B_USD";
+ case NTB_TOPO_B2B_DSD: return "NTB_TOPO_B2B_DSD";
+ case NTB_TOPO_SWITCH: return "NTB_TOPO_SWITCH";
+ case NTB_TOPO_CROSSLINK: return "NTB_TOPO_CROSSLINK";
}
return "NTB_TOPO_INVALID";
}
@@ -250,7 +253,7 @@ static inline int ntb_ctx_ops_is_valid(const struct ntb_ctx_ops *ops)
* @msg_set_mask: See ntb_msg_set_mask().
* @msg_clear_mask: See ntb_msg_clear_mask().
* @msg_read: See ntb_msg_read().
- * @msg_write: See ntb_msg_write().
+ * @peer_msg_write: See ntb_peer_msg_write().
*/
struct ntb_dev_ops {
int (*port_number)(struct ntb_dev *ntb);
@@ -321,8 +324,8 @@ struct ntb_dev_ops {
int (*msg_clear_sts)(struct ntb_dev *ntb, u64 sts_bits);
int (*msg_set_mask)(struct ntb_dev *ntb, u64 mask_bits);
int (*msg_clear_mask)(struct ntb_dev *ntb, u64 mask_bits);
- int (*msg_read)(struct ntb_dev *ntb, int midx, int *pidx, u32 *msg);
- int (*msg_write)(struct ntb_dev *ntb, int midx, int pidx, u32 msg);
+ u32 (*msg_read)(struct ntb_dev *ntb, int *pidx, int midx);
+ int (*peer_msg_write)(struct ntb_dev *ntb, int pidx, int midx, u32 msg);
};
static inline int ntb_dev_ops_is_valid(const struct ntb_dev_ops *ops)
@@ -384,7 +387,7 @@ static inline int ntb_dev_ops_is_valid(const struct ntb_dev_ops *ops)
/* !ops->msg_set_mask == !ops->msg_count && */
/* !ops->msg_clear_mask == !ops->msg_count && */
!ops->msg_read == !ops->msg_count &&
- !ops->msg_write == !ops->msg_count &&
+ !ops->peer_msg_write == !ops->msg_count &&
1;
}
@@ -764,7 +767,7 @@ static inline int ntb_mw_get_align(struct ntb_dev *ntb, int pidx, int widx,
resource_size_t *size_align,
resource_size_t *size_max)
{
- if (!(ntb_link_is_up(ntb, NULL, NULL) & (1 << pidx)))
+ if (!(ntb_link_is_up(ntb, NULL, NULL) & BIT_ULL(pidx)))
return -ENOTCONN;
return ntb->ops->mw_get_align(ntb, pidx, widx, addr_align, size_align,
@@ -1459,31 +1462,29 @@ static inline int ntb_msg_clear_mask(struct ntb_dev *ntb, u64 mask_bits)
}
/**
- * ntb_msg_read() - read message register with specified index
+ * ntb_msg_read() - read inbound message register with specified index
* @ntb: NTB device context.
- * @midx: Message register index
* @pidx: OUT - Port index of peer device a message retrieved from
- * @msg: OUT - Data
+ * @midx: Message register index
*
* Read data from the specified message register. Source port index of a
* message is retrieved as well.
*
- * Return: Zero on success, otherwise a negative error number.
+ * Return: The value of the inbound message register.
*/
-static inline int ntb_msg_read(struct ntb_dev *ntb, int midx, int *pidx,
- u32 *msg)
+static inline u32 ntb_msg_read(struct ntb_dev *ntb, int *pidx, int midx)
{
if (!ntb->ops->msg_read)
- return -EINVAL;
+ return ~(u32)0;
- return ntb->ops->msg_read(ntb, midx, pidx, msg);
+ return ntb->ops->msg_read(ntb, pidx, midx);
}
/**
- * ntb_msg_write() - write data to the specified message register
+ * ntb_peer_msg_write() - write data to the specified peer message register
* @ntb: NTB device context.
- * @midx: Message register index
* @pidx: Port index of peer device a message being sent to
+ * @midx: Message register index
* @msg: Data to send
*
* Send data to a specified peer device using the defined message register.
@@ -1492,13 +1493,13 @@ static inline int ntb_msg_read(struct ntb_dev *ntb, int midx, int *pidx,
*
* Return: Zero on success, otherwise a negative error number.
*/
-static inline int ntb_msg_write(struct ntb_dev *ntb, int midx, int pidx,
- u32 msg)
+static inline int ntb_peer_msg_write(struct ntb_dev *ntb, int pidx, int midx,
+ u32 msg)
{
- if (!ntb->ops->msg_write)
+ if (!ntb->ops->peer_msg_write)
return -EINVAL;
- return ntb->ops->msg_write(ntb, midx, pidx, msg);
+ return ntb->ops->peer_msg_write(ntb, pidx, midx, msg);
}
#endif
diff --git a/include/linux/nubus.h b/include/linux/nubus.h
index 11ce6b1117a8..6e8200215321 100644
--- a/include/linux/nubus.h
+++ b/include/linux/nubus.h
@@ -5,20 +5,36 @@
Originally written by Alan Cox.
Hacked to death by C. Scott Ananian and David Huggins-Daines.
-
- Some of the constants in here are from the corresponding
- NetBSD/OpenBSD header file, by Allen Briggs. We figured out the
- rest of them on our own. */
+*/
+
#ifndef LINUX_NUBUS_H
#define LINUX_NUBUS_H
+#include <linux/device.h>
#include <asm/nubus.h>
#include <uapi/linux/nubus.h>
+struct proc_dir_entry;
+struct seq_file;
+
+struct nubus_dir {
+ unsigned char *base;
+ unsigned char *ptr;
+ int done;
+ int mask;
+ struct proc_dir_entry *procdir;
+};
+
+struct nubus_dirent {
+ unsigned char *base;
+ unsigned char type;
+ __u32 data; /* Actually 24 bits used */
+ int mask;
+};
+
struct nubus_board {
- struct nubus_board* next;
- struct nubus_dev* first_dev;
-
+ struct device dev;
+
/* Only 9-E actually exist, though 0-8 are also theoretically
possible, and 0 is a special case which represents the
motherboard and onboard peripherals (Ethernet, video) */
@@ -27,10 +43,10 @@ struct nubus_board {
char name[64];
/* Format block */
- unsigned char* fblock;
+ unsigned char *fblock;
/* Root directory (does *not* always equal fblock + doffset!) */
- unsigned char* directory;
-
+ unsigned char *directory;
+
unsigned long slot_addr;
/* Offset to root directory (sometimes) */
unsigned long doffset;
@@ -41,15 +57,15 @@ struct nubus_board {
unsigned char rev;
unsigned char format;
unsigned char lanes;
-};
-struct nubus_dev {
- /* Next link in device list */
- struct nubus_dev* next;
/* Directory entry in /proc/bus/nubus */
- struct proc_dir_entry* procdir;
+ struct proc_dir_entry *procdir;
+};
+
+struct nubus_rsrc {
+ struct list_head list;
- /* The functional resource ID of this device */
+ /* The functional resource ID */
unsigned char resid;
/* These are mostly here for convenience; we could always read
them from the ROMs if we wanted to */
@@ -57,79 +73,116 @@ struct nubus_dev {
unsigned short type;
unsigned short dr_sw;
unsigned short dr_hw;
- /* This is the device's name rather than the board's.
- Sometimes they are different. Usually the board name is
- more correct. */
- char name[64];
- /* MacOS driver (I kid you not) */
- unsigned char* driver;
- /* Actually this is an offset */
- unsigned long iobase;
- unsigned long iosize;
- unsigned char flags, hwdevid;
-
+
/* Functional directory */
- unsigned char* directory;
+ unsigned char *directory;
/* Much of our info comes from here */
- struct nubus_board* board;
+ struct nubus_board *board;
+};
+
+/* This is all NuBus functional resources (used to find devices later on) */
+extern struct list_head nubus_func_rsrcs;
+
+struct nubus_driver {
+ struct device_driver driver;
+ int (*probe)(struct nubus_board *board);
+ int (*remove)(struct nubus_board *board);
};
-/* This is all NuBus devices (used to find devices later on) */
-extern struct nubus_dev* nubus_devices;
-/* This is all NuBus cards */
-extern struct nubus_board* nubus_boards;
+extern struct bus_type nubus_bus_type;
/* Generic NuBus interface functions, modelled after the PCI interface */
-void nubus_scan_bus(void);
#ifdef CONFIG_PROC_FS
-extern void nubus_proc_init(void);
+void nubus_proc_init(void);
+struct proc_dir_entry *nubus_proc_add_board(struct nubus_board *board);
+struct proc_dir_entry *nubus_proc_add_rsrc_dir(struct proc_dir_entry *procdir,
+ const struct nubus_dirent *ent,
+ struct nubus_board *board);
+void nubus_proc_add_rsrc_mem(struct proc_dir_entry *procdir,
+ const struct nubus_dirent *ent,
+ unsigned int size);
+void nubus_proc_add_rsrc(struct proc_dir_entry *procdir,
+ const struct nubus_dirent *ent);
#else
static inline void nubus_proc_init(void) {}
+static inline
+struct proc_dir_entry *nubus_proc_add_board(struct nubus_board *board)
+{ return NULL; }
+static inline
+struct proc_dir_entry *nubus_proc_add_rsrc_dir(struct proc_dir_entry *procdir,
+ const struct nubus_dirent *ent,
+ struct nubus_board *board)
+{ return NULL; }
+static inline void nubus_proc_add_rsrc_mem(struct proc_dir_entry *procdir,
+ const struct nubus_dirent *ent,
+ unsigned int size) {}
+static inline void nubus_proc_add_rsrc(struct proc_dir_entry *procdir,
+ const struct nubus_dirent *ent) {}
#endif
-int get_nubus_list(char *buf);
-int nubus_proc_attach_device(struct nubus_dev *dev);
-/* If we need more precision we can add some more of these */
-struct nubus_dev* nubus_find_device(unsigned short category,
- unsigned short type,
- unsigned short dr_hw,
- unsigned short dr_sw,
- const struct nubus_dev* from);
-struct nubus_dev* nubus_find_type(unsigned short category,
- unsigned short type,
- const struct nubus_dev* from);
-/* Might have more than one device in a slot, you know... */
-struct nubus_dev* nubus_find_slot(unsigned int slot,
- const struct nubus_dev* from);
+
+struct nubus_rsrc *nubus_first_rsrc_or_null(void);
+struct nubus_rsrc *nubus_next_rsrc_or_null(struct nubus_rsrc *from);
+
+#define for_each_func_rsrc(f) \
+ for (f = nubus_first_rsrc_or_null(); f; f = nubus_next_rsrc_or_null(f))
+
+#define for_each_board_func_rsrc(b, f) \
+ for_each_func_rsrc(f) if (f->board != b) {} else
/* These are somewhat more NuBus-specific. They all return 0 for
success and -1 for failure, as you'd expect. */
/* The root directory which contains the board and functional
directories */
-int nubus_get_root_dir(const struct nubus_board* board,
- struct nubus_dir* dir);
+int nubus_get_root_dir(const struct nubus_board *board,
+ struct nubus_dir *dir);
/* The board directory */
-int nubus_get_board_dir(const struct nubus_board* board,
- struct nubus_dir* dir);
+int nubus_get_board_dir(const struct nubus_board *board,
+ struct nubus_dir *dir);
/* The functional directory */
-int nubus_get_func_dir(const struct nubus_dev* dev,
- struct nubus_dir* dir);
+int nubus_get_func_dir(const struct nubus_rsrc *fres, struct nubus_dir *dir);
/* These work on any directory gotten via the above */
-int nubus_readdir(struct nubus_dir* dir,
- struct nubus_dirent* ent);
-int nubus_find_rsrc(struct nubus_dir* dir,
+int nubus_readdir(struct nubus_dir *dir,
+ struct nubus_dirent *ent);
+int nubus_find_rsrc(struct nubus_dir *dir,
unsigned char rsrc_type,
- struct nubus_dirent* ent);
-int nubus_rewinddir(struct nubus_dir* dir);
+ struct nubus_dirent *ent);
+int nubus_rewinddir(struct nubus_dir *dir);
/* Things to do with directory entries */
-int nubus_get_subdir(const struct nubus_dirent* ent,
- struct nubus_dir* dir);
-void nubus_get_rsrc_mem(void* dest,
- const struct nubus_dirent *dirent,
- int len);
-void nubus_get_rsrc_str(void* dest,
- const struct nubus_dirent *dirent,
- int maxlen);
+int nubus_get_subdir(const struct nubus_dirent *ent,
+ struct nubus_dir *dir);
+void nubus_get_rsrc_mem(void *dest, const struct nubus_dirent *dirent,
+ unsigned int len);
+unsigned int nubus_get_rsrc_str(char *dest, const struct nubus_dirent *dirent,
+ unsigned int len);
+void nubus_seq_write_rsrc_mem(struct seq_file *m,
+ const struct nubus_dirent *dirent,
+ unsigned int len);
+unsigned char *nubus_dirptr(const struct nubus_dirent *nd);
+
+/* Declarations relating to driver model objects */
+int nubus_bus_register(void);
+int nubus_device_register(struct nubus_board *board);
+int nubus_driver_register(struct nubus_driver *ndrv);
+void nubus_driver_unregister(struct nubus_driver *ndrv);
+int nubus_proc_show(struct seq_file *m, void *data);
+
+static inline void nubus_set_drvdata(struct nubus_board *board, void *data)
+{
+ dev_set_drvdata(&board->dev, data);
+}
+
+static inline void *nubus_get_drvdata(struct nubus_board *board)
+{
+ return dev_get_drvdata(&board->dev);
+}
+
+/* Returns a pointer to the "standard" slot space. */
+static inline void *nubus_slot_addr(int slot)
+{
+ return (void *)(0xF0000000 | (slot << 24));
+}
+
#endif /* LINUX_NUBUS_H */
diff --git a/include/linux/nvme.h b/include/linux/nvme.h
index aea87f0d917b..4112e2bd747f 100644
--- a/include/linux/nvme.h
+++ b/include/linux/nvme.h
@@ -124,14 +124,20 @@ enum {
#define NVME_CMB_BIR(cmbloc) ((cmbloc) & 0x7)
#define NVME_CMB_OFST(cmbloc) (((cmbloc) >> 12) & 0xfffff)
-#define NVME_CMB_SZ(cmbsz) (((cmbsz) >> 12) & 0xfffff)
-#define NVME_CMB_SZU(cmbsz) (((cmbsz) >> 8) & 0xf)
-
-#define NVME_CMB_WDS(cmbsz) ((cmbsz) & 0x10)
-#define NVME_CMB_RDS(cmbsz) ((cmbsz) & 0x8)
-#define NVME_CMB_LISTS(cmbsz) ((cmbsz) & 0x4)
-#define NVME_CMB_CQS(cmbsz) ((cmbsz) & 0x2)
-#define NVME_CMB_SQS(cmbsz) ((cmbsz) & 0x1)
+
+enum {
+ NVME_CMBSZ_SQS = 1 << 0,
+ NVME_CMBSZ_CQS = 1 << 1,
+ NVME_CMBSZ_LISTS = 1 << 2,
+ NVME_CMBSZ_RDS = 1 << 3,
+ NVME_CMBSZ_WDS = 1 << 4,
+
+ NVME_CMBSZ_SZ_SHIFT = 12,
+ NVME_CMBSZ_SZ_MASK = 0xfffff,
+
+ NVME_CMBSZ_SZU_SHIFT = 8,
+ NVME_CMBSZ_SZU_MASK = 0xf,
+};
/*
* Submission and Completion Queue Entry Sizes for the NVM command set.
diff --git a/include/linux/nvmem-provider.h b/include/linux/nvmem-provider.h
index 497706f5adca..f89598bc4e1c 100644
--- a/include/linux/nvmem-provider.h
+++ b/include/linux/nvmem-provider.h
@@ -22,6 +22,31 @@ typedef int (*nvmem_reg_read_t)(void *priv, unsigned int offset,
typedef int (*nvmem_reg_write_t)(void *priv, unsigned int offset,
void *val, size_t bytes);
+/**
+ * struct nvmem_config - NVMEM device configuration
+ *
+ * @dev: Parent device.
+ * @name: Optional name.
+ * @id: Optional device ID used in full name. Ignored if name is NULL.
+ * @owner: Pointer to exporter module. Used for refcounting.
+ * @cells: Optional array of pre-defined NVMEM cells.
+ * @ncells: Number of elements in cells.
+ * @read_only: Device is read-only.
+ * @root_only: Device is accessibly to root only.
+ * @reg_read: Callback to read data.
+ * @reg_write: Callback to write data.
+ * @size: Device size.
+ * @word_size: Minimum read/write access granularity.
+ * @stride: Minimum read/write access stride.
+ * @priv: User context passed to read/write callbacks.
+ *
+ * Note: A default "nvmem<id>" name will be assigned to the device if
+ * no name is specified in its configuration. In such case "<id>" is
+ * generated with ida_simple_get() and provided id field is ignored.
+ *
+ * Note: Specifying name and setting id to -1 implies a unique device
+ * whose name is provided as-is (kept unaltered).
+ */
struct nvmem_config {
struct device *dev;
const char *name;
@@ -47,6 +72,11 @@ struct nvmem_config {
struct nvmem_device *nvmem_register(const struct nvmem_config *cfg);
int nvmem_unregister(struct nvmem_device *nvmem);
+struct nvmem_device *devm_nvmem_register(struct device *dev,
+ const struct nvmem_config *cfg);
+
+int devm_nvmem_unregister(struct device *dev, struct nvmem_device *nvmem);
+
#else
static inline struct nvmem_device *nvmem_register(const struct nvmem_config *c)
@@ -59,5 +89,17 @@ static inline int nvmem_unregister(struct nvmem_device *nvmem)
return -ENOSYS;
}
+static inline struct nvmem_device *
+devm_nvmem_register(struct device *dev, const struct nvmem_config *c)
+{
+ return nvmem_register(c);
+}
+
+static inline int
+devm_nvmem_unregister(struct device *dev, struct nvmem_device *nvmem)
+{
+ return nvmem_unregister(nvmem);
+}
+
#endif /* CONFIG_NVMEM */
#endif /* ifndef _LINUX_NVMEM_PROVIDER_H */
diff --git a/include/linux/of.h b/include/linux/of.h
index d3dea1d1e3a9..4d25e4f952d9 100644
--- a/include/linux/of.h
+++ b/include/linux/of.h
@@ -1,3 +1,4 @@
+/* SPDX-License-Identifier: GPL-2.0+ */
#ifndef _LINUX_OF_H
#define _LINUX_OF_H
/*
@@ -9,11 +10,6 @@
* Updates for PPC64 by Peter Bergner & David Engebretsen, IBM Corp.
* Updates for SPARC64 by David S. Miller
* Derived from PowerPC and Sparc prom.h files by Stephen Rothwell, IBM Corp.
- *
- * This program is free software; you can redistribute it and/or
- * modify it under the terms of the GNU General Public License
- * as published by the Free Software Foundation; either version
- * 2 of the License, or (at your option) any later version.
*/
#include <linux/types.h>
#include <linux/bitops.h>
@@ -367,6 +363,9 @@ extern struct device_node *of_parse_phandle(const struct device_node *np,
extern int of_parse_phandle_with_args(const struct device_node *np,
const char *list_name, const char *cells_name, int index,
struct of_phandle_args *out_args);
+extern int of_parse_phandle_with_args_map(const struct device_node *np,
+ const char *list_name, const char *stem_name, int index,
+ struct of_phandle_args *out_args);
extern int of_parse_phandle_with_fixed_args(const struct device_node *np,
const char *list_name, int cells_count, int index,
struct of_phandle_args *out_args);
@@ -544,6 +543,8 @@ const char *of_prop_next_string(struct property *prop, const char *cur);
bool of_console_check(struct device_node *dn, char *name, int index);
+extern int of_cpu_node_to_id(struct device_node *np);
+
#else /* CONFIG_OF */
static inline void of_core_init(void)
@@ -817,6 +818,15 @@ static inline int of_parse_phandle_with_args(const struct device_node *np,
return -ENOSYS;
}
+static inline int of_parse_phandle_with_args_map(const struct device_node *np,
+ const char *list_name,
+ const char *stem_name,
+ int index,
+ struct of_phandle_args *out_args)
+{
+ return -ENOSYS;
+}
+
static inline int of_parse_phandle_with_fixed_args(const struct device_node *np,
const char *list_name, int cells_count, int index,
struct of_phandle_args *out_args)
@@ -916,6 +926,11 @@ static inline void of_property_clear_flag(struct property *p, unsigned long flag
{
}
+static inline int of_cpu_node_to_id(struct device_node *np)
+{
+ return -ENODEV;
+}
+
#define of_match_ptr(_ptr) NULL
#define of_match_node(_matches, _node) NULL
#endif /* CONFIG_OF */
@@ -1356,8 +1371,8 @@ struct of_overlay_notify_data {
#ifdef CONFIG_OF_OVERLAY
-/* ID based overlays; the API for external users */
-int of_overlay_apply(struct device_node *tree, int *ovcs_id);
+int of_overlay_fdt_apply(const void *overlay_fdt, u32 overlay_fdt_size,
+ int *ovcs_id);
int of_overlay_remove(int *ovcs_id);
int of_overlay_remove_all(void);
@@ -1366,7 +1381,7 @@ int of_overlay_notifier_unregister(struct notifier_block *nb);
#else
-static inline int of_overlay_apply(struct device_node *tree, int *ovcs_id)
+static inline int of_overlay_fdt_apply(void *overlay_fdt, int *ovcs_id)
{
return -ENOTSUPP;
}
diff --git a/include/linux/of_dma.h b/include/linux/of_dma.h
index b90d8ec57c1f..fd706cdf255c 100644
--- a/include/linux/of_dma.h
+++ b/include/linux/of_dma.h
@@ -1,13 +1,10 @@
+/* SPDX-License-Identifier: GPL-2.0 */
/*
* OF helpers for DMA request / controller
*
* Based on of_gpio.h
*
* Copyright (C) 2012 Texas Instruments Incorporated - http://www.ti.com/
- *
- * This program is free software; you can redistribute it and/or modify
- * it under the terms of the GNU General Public License version 2 as
- * published by the Free Software Foundation.
*/
#ifndef __LINUX_OF_DMA_H
diff --git a/include/linux/of_fdt.h b/include/linux/of_fdt.h
index 013c5418aeec..b9cd9ebdf9b9 100644
--- a/include/linux/of_fdt.h
+++ b/include/linux/of_fdt.h
@@ -1,12 +1,9 @@
+/* SPDX-License-Identifier: GPL-2.0 */
/*
* Definitions for working with the Flattened Device Tree data format
*
* Copyright 2009 Benjamin Herrenschmidt, IBM Corp
* benh@kernel.crashing.org
- *
- * This program is free software; you can redistribute it and/or
- * modify it under the terms of the GNU General Public License
- * version 2 as published by the Free Software Foundation.
*/
#ifndef _LINUX_OF_FDT_H
@@ -47,6 +44,12 @@ extern void *initial_boot_params;
extern char __dtb_start[];
extern char __dtb_end[];
+/* Other Prototypes */
+extern u64 of_flat_dt_translate_address(unsigned long node);
+extern void of_fdt_limit_memory(int limit);
+#endif /* CONFIG_OF_FLATTREE */
+
+#ifdef CONFIG_OF_EARLY_FLATTREE
/* For scanning the flat device-tree at boot time */
extern int of_scan_flat_dt(int (*it)(unsigned long node, const char *uname,
int depth, void *data),
@@ -77,7 +80,6 @@ extern void early_init_dt_add_memory_arch(u64 base, u64 size);
extern int early_init_dt_mark_hotplug_memory_arch(u64 base, u64 size);
extern int early_init_dt_reserve_memory_arch(phys_addr_t base, phys_addr_t size,
bool no_map);
-extern void * early_init_dt_alloc_memory_arch(u64 size, u64 align);
extern u64 dt_mem_next_cell(int s, const __be32 **cellp);
/* Early flat tree scan hooks */
@@ -97,16 +99,14 @@ extern void unflatten_device_tree(void);
extern void unflatten_and_copy_device_tree(void);
extern void early_init_devtree(void *);
extern void early_get_first_memblock_info(void *, phys_addr_t *);
-extern u64 of_flat_dt_translate_address(unsigned long node);
-extern void of_fdt_limit_memory(int limit);
-#else /* CONFIG_OF_FLATTREE */
+#else /* CONFIG_OF_EARLY_FLATTREE */
static inline int early_init_dt_scan_chosen_stdout(void) { return -ENODEV; }
static inline void early_init_fdt_scan_reserved_mem(void) {}
static inline void early_init_fdt_reserve_self(void) {}
static inline const char *of_flat_dt_get_machine_name(void) { return NULL; }
static inline void unflatten_device_tree(void) {}
static inline void unflatten_and_copy_device_tree(void) {}
-#endif /* CONFIG_OF_FLATTREE */
+#endif /* CONFIG_OF_EARLY_FLATTREE */
#endif /* __ASSEMBLY__ */
#endif /* _LINUX_OF_FDT_H */
diff --git a/include/linux/of_gpio.h b/include/linux/of_gpio.h
index 1fe205582111..163b79ecd01a 100644
--- a/include/linux/of_gpio.h
+++ b/include/linux/of_gpio.h
@@ -1,14 +1,10 @@
+/* SPDX-License-Identifier: GPL-2.0+ */
/*
* OF helpers for the GPIO API
*
* Copyright (c) 2007-2008 MontaVista Software, Inc.
*
* Author: Anton Vorontsov <avorontsov@ru.mvista.com>
- *
- * This program is free software; you can redistribute it and/or modify
- * it under the terms of the GNU General Public License as published by
- * the Free Software Foundation; either version 2 of the License, or
- * (at your option) any later version.
*/
#ifndef __LINUX_OF_GPIO_H
@@ -31,7 +27,7 @@ enum of_gpio_flags {
OF_GPIO_ACTIVE_LOW = 0x1,
OF_GPIO_SINGLE_ENDED = 0x2,
OF_GPIO_OPEN_DRAIN = 0x4,
- OF_GPIO_SLEEP_MAY_LOSE_VALUE = 0x8,
+ OF_GPIO_TRANSITORY = 0x8,
};
#ifdef CONFIG_OF_GPIO
diff --git a/include/linux/of_graph.h b/include/linux/of_graph.h
index 3e058f05ab04..01038a6aade0 100644
--- a/include/linux/of_graph.h
+++ b/include/linux/of_graph.h
@@ -1,3 +1,4 @@
+/* SPDX-License-Identifier: GPL-2.0 */
/*
* OF graph binding parsing helpers
*
@@ -6,10 +7,6 @@
*
* Copyright (C) 2012 Renesas Electronics Corp.
* Author: Guennadi Liakhovetski <g.liakhovetski@gmx.de>
- *
- * This program is free software; you can redistribute it and/or modify
- * it under the terms of version 2 of the GNU General Public License as
- * published by the Free Software Foundation.
*/
#ifndef __LINUX_OF_GRAPH_H
#define __LINUX_OF_GRAPH_H
diff --git a/include/linux/of_iommu.h b/include/linux/of_iommu.h
index cddfaff4d0b7..4fa654e4b5a9 100644
--- a/include/linux/of_iommu.h
+++ b/include/linux/of_iommu.h
@@ -34,9 +34,6 @@ static inline const struct iommu_ops *of_iommu_configure(struct device *dev,
extern struct of_device_id __iommu_of_table;
-typedef int (*of_iommu_init_fn)(struct device_node *);
-
-#define IOMMU_OF_DECLARE(name, compat, fn) \
- _OF_DECLARE(iommu, name, compat, fn, of_iommu_init_fn)
+#define IOMMU_OF_DECLARE(name, compat) OF_DECLARE_1(iommu, name, compat, NULL)
#endif /* __OF_IOMMU_H */
diff --git a/include/linux/of_net.h b/include/linux/of_net.h
index 9cd72aab76fe..90d81ee9e6a0 100644
--- a/include/linux/of_net.h
+++ b/include/linux/of_net.h
@@ -13,6 +13,7 @@
struct net_device;
extern int of_get_phy_mode(struct device_node *np);
extern const void *of_get_mac_address(struct device_node *np);
+extern int of_get_nvmem_mac_address(struct device_node *np, void *addr);
extern struct net_device *of_find_net_device_by_node(struct device_node *np);
#else
static inline int of_get_phy_mode(struct device_node *np)
@@ -25,6 +26,11 @@ static inline const void *of_get_mac_address(struct device_node *np)
return NULL;
}
+static inline int of_get_nvmem_mac_address(struct device_node *np, void *addr)
+{
+ return -ENODEV;
+}
+
static inline struct net_device *of_find_net_device_by_node(struct device_node *np)
{
return NULL;
diff --git a/include/linux/of_pci.h b/include/linux/of_pci.h
index bf588a05d0d0..091033a6b836 100644
--- a/include/linux/of_pci.h
+++ b/include/linux/of_pci.h
@@ -9,12 +9,10 @@ struct pci_dev;
struct of_phandle_args;
struct device_node;
-#ifdef CONFIG_OF_PCI
-int of_irq_parse_pci(const struct pci_dev *pdev, struct of_phandle_args *out_irq);
+#if IS_ENABLED(CONFIG_OF) && IS_ENABLED(CONFIG_PCI)
struct device_node *of_pci_find_child_device(struct device_node *parent,
unsigned int devfn);
int of_pci_get_devfn(struct device_node *np);
-int of_irq_parse_and_map_pci(const struct pci_dev *dev, u8 slot, u8 pin);
int of_pci_parse_bus_range(struct device_node *node, struct resource *res);
int of_get_pci_domain_nr(struct device_node *node);
int of_pci_get_max_link_speed(struct device_node *node);
@@ -23,11 +21,6 @@ int of_pci_map_rid(struct device_node *np, u32 rid,
const char *map_name, const char *map_mask_name,
struct device_node **target, u32 *id_out);
#else
-static inline int of_irq_parse_pci(const struct pci_dev *pdev, struct of_phandle_args *out_irq)
-{
- return 0;
-}
-
static inline struct device_node *of_pci_find_child_device(struct device_node *parent,
unsigned int devfn)
{
@@ -40,12 +33,6 @@ static inline int of_pci_get_devfn(struct device_node *np)
}
static inline int
-of_irq_parse_and_map_pci(const struct pci_dev *dev, u8 slot, u8 pin)
-{
- return 0;
-}
-
-static inline int
of_pci_parse_bus_range(struct device_node *node, struct resource *res)
{
return -EINVAL;
@@ -73,6 +60,16 @@ of_pci_get_max_link_speed(struct device_node *node)
static inline void of_pci_check_probe_only(void) { }
#endif
+#if IS_ENABLED(CONFIG_OF_IRQ)
+int of_irq_parse_and_map_pci(const struct pci_dev *dev, u8 slot, u8 pin);
+#else
+static inline int
+of_irq_parse_and_map_pci(const struct pci_dev *dev, u8 slot, u8 pin)
+{
+ return 0;
+}
+#endif
+
#if defined(CONFIG_OF_ADDRESS)
int of_pci_get_host_bridge_resources(struct device_node *dev,
unsigned char busno, unsigned char bus_max,
diff --git a/include/linux/of_pdt.h b/include/linux/of_pdt.h
index 7e09244bb679..d0b183ab65c6 100644
--- a/include/linux/of_pdt.h
+++ b/include/linux/of_pdt.h
@@ -1,13 +1,9 @@
+/* SPDX-License-Identifier: GPL-2.0+ */
/*
* Definitions for building a device tree by calling into the
* Open Firmware PROM.
*
* Copyright (C) 2010 Andres Salomon <dilinger@queued.net>
- *
- * This program is free software; you can redistribute it and/or
- * modify it under the terms of the GNU General Public License
- * as published by the Free Software Foundation; either version
- * 2 of the License, or (at your option) any later version.
*/
#ifndef _LINUX_OF_PDT_H
diff --git a/include/linux/of_platform.h b/include/linux/of_platform.h
index fb908e598348..84a966623e78 100644
--- a/include/linux/of_platform.h
+++ b/include/linux/of_platform.h
@@ -1,14 +1,9 @@
+/* SPDX-License-Identifier: GPL-2.0+ */
#ifndef _LINUX_OF_PLATFORM_H
#define _LINUX_OF_PLATFORM_H
/*
* Copyright (C) 2006 Benjamin Herrenschmidt, IBM Corp.
* <benh@kernel.crashing.org>
- *
- * This program is free software; you can redistribute it and/or
- * modify it under the terms of the GNU General Public License
- * as published by the Free Software Foundation; either version
- * 2 of the License, or (at your option) any later version.
- *
*/
#include <linux/device.h>
diff --git a/include/linux/omap-gpmc.h b/include/linux/omap-gpmc.h
index edfa280c3d56..053feb41510a 100644
--- a/include/linux/omap-gpmc.h
+++ b/include/linux/omap-gpmc.h
@@ -25,15 +25,43 @@ struct gpmc_nand_ops {
struct gpmc_nand_regs;
+struct gpmc_onenand_info {
+ bool sync_read;
+ bool sync_write;
+ int burst_len;
+};
+
#if IS_ENABLED(CONFIG_OMAP_GPMC)
struct gpmc_nand_ops *gpmc_omap_get_nand_ops(struct gpmc_nand_regs *regs,
int cs);
+/**
+ * gpmc_omap_onenand_set_timings - set optimized sync timings.
+ * @cs: Chip Select Region
+ * @freq: Chip frequency
+ * @latency: Burst latency cycle count
+ * @info: Structure describing parameters used
+ *
+ * Sets optimized timings for the @cs region based on @freq and @latency.
+ * Updates the @info structure based on the GPMC settings.
+ */
+int gpmc_omap_onenand_set_timings(struct device *dev, int cs, int freq,
+ int latency,
+ struct gpmc_onenand_info *info);
+
#else
static inline struct gpmc_nand_ops *gpmc_omap_get_nand_ops(struct gpmc_nand_regs *regs,
int cs)
{
return NULL;
}
+
+static inline
+int gpmc_omap_onenand_set_timings(struct device *dev, int cs, int freq,
+ int latency,
+ struct gpmc_onenand_info *info)
+{
+ return -EINVAL;
+}
#endif /* CONFIG_OMAP_GPMC */
extern int gpmc_calc_timings(struct gpmc_timings *gpmc_t,
diff --git a/include/linux/page-flags.h b/include/linux/page-flags.h
index 3ec44e27aa9d..e34a27727b9a 100644
--- a/include/linux/page-flags.h
+++ b/include/linux/page-flags.h
@@ -46,11 +46,6 @@
* guarantees that this bit is cleared for a page when it first is entered into
* the page cache.
*
- * PG_highmem pages are not permanently mapped into the kernel virtual address
- * space, they need to be kmapped separately for doing IO on the pages. The
- * struct page (these bits with information) are always mapped into kernel
- * address space...
- *
* PG_hwpoison indicates that a page got corrupted in hardware and contains
* data with incorrect ECC bits that triggered a machine check. Accessing is
* not safe since it may cause another machine check. Don't touch!
@@ -161,9 +156,18 @@ static __always_inline int PageCompound(struct page *page)
return test_bit(PG_head, &page->flags) || PageTail(page);
}
+#define PAGE_POISON_PATTERN -1l
+static inline int PagePoisoned(const struct page *page)
+{
+ return page->flags == PAGE_POISON_PATTERN;
+}
+
/*
* Page flags policies wrt compound pages
*
+ * PF_POISONED_CHECK
+ * check if this struct page poisoned/uninitialized
+ *
* PF_ANY:
* the page flag is relevant for small, head and tail pages.
*
@@ -181,17 +185,20 @@ static __always_inline int PageCompound(struct page *page)
* PF_NO_COMPOUND:
* the page flag is not relevant for compound pages.
*/
-#define PF_ANY(page, enforce) page
-#define PF_HEAD(page, enforce) compound_head(page)
+#define PF_POISONED_CHECK(page) ({ \
+ VM_BUG_ON_PGFLAGS(PagePoisoned(page), page); \
+ page; })
+#define PF_ANY(page, enforce) PF_POISONED_CHECK(page)
+#define PF_HEAD(page, enforce) PF_POISONED_CHECK(compound_head(page))
#define PF_ONLY_HEAD(page, enforce) ({ \
VM_BUG_ON_PGFLAGS(PageTail(page), page); \
- page;})
+ PF_POISONED_CHECK(page); })
#define PF_NO_TAIL(page, enforce) ({ \
VM_BUG_ON_PGFLAGS(enforce && PageTail(page), page); \
- compound_head(page);})
+ PF_POISONED_CHECK(compound_head(page)); })
#define PF_NO_COMPOUND(page, enforce) ({ \
VM_BUG_ON_PGFLAGS(enforce && PageCompound(page), page); \
- page;})
+ PF_POISONED_CHECK(page); })
/*
* Macros to create function definitions for page flags
diff --git a/include/linux/page-isolation.h b/include/linux/page-isolation.h
index cdad58bbfd8b..4ae347cbc36d 100644
--- a/include/linux/page-isolation.h
+++ b/include/linux/page-isolation.h
@@ -63,7 +63,6 @@ undo_isolate_page_range(unsigned long start_pfn, unsigned long end_pfn,
int test_pages_isolated(unsigned long start_pfn, unsigned long end_pfn,
bool skip_hwpoisoned_pages);
-struct page *alloc_migrate_target(struct page *page, unsigned long private,
- int **resultp);
+struct page *alloc_migrate_target(struct page *page, unsigned long private);
#endif
diff --git a/include/linux/page_ref.h b/include/linux/page_ref.h
index 760d74a0e9a9..14d14beb1f7f 100644
--- a/include/linux/page_ref.h
+++ b/include/linux/page_ref.h
@@ -175,8 +175,7 @@ static inline void page_ref_unfreeze(struct page *page, int count)
VM_BUG_ON_PAGE(page_count(page) != 0, page);
VM_BUG_ON(count == 0);
- smp_mb();
- atomic_set(&page->_refcount, count);
+ atomic_set_release(&page->_refcount, count);
if (page_ref_tracepoint_active(__tracepoint_page_ref_unfreeze))
__page_ref_unfreeze(page, count);
}
diff --git a/include/linux/pagemap.h b/include/linux/pagemap.h
index 34ce3ebf97d5..b1bd2186e6d2 100644
--- a/include/linux/pagemap.h
+++ b/include/linux/pagemap.h
@@ -144,7 +144,7 @@ void release_pages(struct page **pages, int nr);
* 3. check the page is still in pagecache (if no, goto 1)
*
* Remove-side that cares about stability of _refcount (eg. reclaim) has the
- * following (with tree_lock held for write):
+ * following (with the i_pages lock held):
* A. atomically check refcount is correct and set it to 0 (atomic_cmpxchg)
* B. remove page from pagecache
* C. free the page
@@ -157,7 +157,7 @@ void release_pages(struct page **pages, int nr);
*
* It is possible that between 1 and 2, the page is removed then the exact same
* page is inserted into the same position in pagecache. That's OK: the
- * old find_get_page using tree_lock could equally have run before or after
+ * old find_get_page using a lock could equally have run before or after
* such a re-insertion, depending on order that locks are granted.
*
* Lookups racing against pagecache insertion isn't a big problem: either 1
diff --git a/include/linux/pagevec.h b/include/linux/pagevec.h
index 5fb6580f7f23..6dc456ac6136 100644
--- a/include/linux/pagevec.h
+++ b/include/linux/pagevec.h
@@ -9,14 +9,14 @@
#ifndef _LINUX_PAGEVEC_H
#define _LINUX_PAGEVEC_H
-/* 14 pointers + two long's align the pagevec structure to a power of two */
-#define PAGEVEC_SIZE 14
+/* 15 pointers + header align the pagevec structure to a power of two */
+#define PAGEVEC_SIZE 15
struct page;
struct address_space;
struct pagevec {
- unsigned long nr;
+ unsigned char nr;
bool percpu_pvec_drained;
struct page *pages[PAGEVEC_SIZE];
};
diff --git a/include/linux/pci-aspm.h b/include/linux/pci-aspm.h
index 3cc06b059017..df28af5cef21 100644
--- a/include/linux/pci-aspm.h
+++ b/include/linux/pci-aspm.h
@@ -24,43 +24,12 @@
#define PCIE_LINK_STATE_CLKPM 4
#ifdef CONFIG_PCIEASPM
-void pcie_aspm_init_link_state(struct pci_dev *pdev);
-void pcie_aspm_exit_link_state(struct pci_dev *pdev);
-void pcie_aspm_pm_state_change(struct pci_dev *pdev);
-void pcie_aspm_powersave_config_link(struct pci_dev *pdev);
void pci_disable_link_state(struct pci_dev *pdev, int state);
void pci_disable_link_state_locked(struct pci_dev *pdev, int state);
void pcie_no_aspm(void);
#else
-static inline void pcie_aspm_init_link_state(struct pci_dev *pdev)
-{
-}
-static inline void pcie_aspm_exit_link_state(struct pci_dev *pdev)
-{
-}
-static inline void pcie_aspm_pm_state_change(struct pci_dev *pdev)
-{
-}
-static inline void pcie_aspm_powersave_config_link(struct pci_dev *pdev)
-{
-}
-static inline void pci_disable_link_state(struct pci_dev *pdev, int state)
-{
-}
-static inline void pcie_no_aspm(void)
-{
-}
+static inline void pci_disable_link_state(struct pci_dev *pdev, int state) { }
+static inline void pcie_no_aspm(void) { }
#endif
-#ifdef CONFIG_PCIEASPM_DEBUG /* this depends on CONFIG_PCIEASPM */
-void pcie_aspm_create_sysfs_dev_files(struct pci_dev *pdev);
-void pcie_aspm_remove_sysfs_dev_files(struct pci_dev *pdev);
-#else
-static inline void pcie_aspm_create_sysfs_dev_files(struct pci_dev *pdev)
-{
-}
-static inline void pcie_aspm_remove_sysfs_dev_files(struct pci_dev *pdev)
-{
-}
-#endif
#endif /* LINUX_ASPM_H */
diff --git a/include/linux/pci-dma-compat.h b/include/linux/pci-dma-compat.h
index d1f9fdade1e0..0dd1a3f7b309 100644
--- a/include/linux/pci-dma-compat.h
+++ b/include/linux/pci-dma-compat.h
@@ -17,91 +17,90 @@ static inline void *
pci_alloc_consistent(struct pci_dev *hwdev, size_t size,
dma_addr_t *dma_handle)
{
- return dma_alloc_coherent(hwdev == NULL ? NULL : &hwdev->dev, size, dma_handle, GFP_ATOMIC);
+ return dma_alloc_coherent(&hwdev->dev, size, dma_handle, GFP_ATOMIC);
}
static inline void *
pci_zalloc_consistent(struct pci_dev *hwdev, size_t size,
dma_addr_t *dma_handle)
{
- return dma_zalloc_coherent(hwdev == NULL ? NULL : &hwdev->dev,
- size, dma_handle, GFP_ATOMIC);
+ return dma_zalloc_coherent(&hwdev->dev, size, dma_handle, GFP_ATOMIC);
}
static inline void
pci_free_consistent(struct pci_dev *hwdev, size_t size,
void *vaddr, dma_addr_t dma_handle)
{
- dma_free_coherent(hwdev == NULL ? NULL : &hwdev->dev, size, vaddr, dma_handle);
+ dma_free_coherent(&hwdev->dev, size, vaddr, dma_handle);
}
static inline dma_addr_t
pci_map_single(struct pci_dev *hwdev, void *ptr, size_t size, int direction)
{
- return dma_map_single(hwdev == NULL ? NULL : &hwdev->dev, ptr, size, (enum dma_data_direction)direction);
+ return dma_map_single(&hwdev->dev, ptr, size, (enum dma_data_direction)direction);
}
static inline void
pci_unmap_single(struct pci_dev *hwdev, dma_addr_t dma_addr,
size_t size, int direction)
{
- dma_unmap_single(hwdev == NULL ? NULL : &hwdev->dev, dma_addr, size, (enum dma_data_direction)direction);
+ dma_unmap_single(&hwdev->dev, dma_addr, size, (enum dma_data_direction)direction);
}
static inline dma_addr_t
pci_map_page(struct pci_dev *hwdev, struct page *page,
unsigned long offset, size_t size, int direction)
{
- return dma_map_page(hwdev == NULL ? NULL : &hwdev->dev, page, offset, size, (enum dma_data_direction)direction);
+ return dma_map_page(&hwdev->dev, page, offset, size, (enum dma_data_direction)direction);
}
static inline void
pci_unmap_page(struct pci_dev *hwdev, dma_addr_t dma_address,
size_t size, int direction)
{
- dma_unmap_page(hwdev == NULL ? NULL : &hwdev->dev, dma_address, size, (enum dma_data_direction)direction);
+ dma_unmap_page(&hwdev->dev, dma_address, size, (enum dma_data_direction)direction);
}
static inline int
pci_map_sg(struct pci_dev *hwdev, struct scatterlist *sg,
int nents, int direction)
{
- return dma_map_sg(hwdev == NULL ? NULL : &hwdev->dev, sg, nents, (enum dma_data_direction)direction);
+ return dma_map_sg(&hwdev->dev, sg, nents, (enum dma_data_direction)direction);
}
static inline void
pci_unmap_sg(struct pci_dev *hwdev, struct scatterlist *sg,
int nents, int direction)
{
- dma_unmap_sg(hwdev == NULL ? NULL : &hwdev->dev, sg, nents, (enum dma_data_direction)direction);
+ dma_unmap_sg(&hwdev->dev, sg, nents, (enum dma_data_direction)direction);
}
static inline void
pci_dma_sync_single_for_cpu(struct pci_dev *hwdev, dma_addr_t dma_handle,
size_t size, int direction)
{
- dma_sync_single_for_cpu(hwdev == NULL ? NULL : &hwdev->dev, dma_handle, size, (enum dma_data_direction)direction);
+ dma_sync_single_for_cpu(&hwdev->dev, dma_handle, size, (enum dma_data_direction)direction);
}
static inline void
pci_dma_sync_single_for_device(struct pci_dev *hwdev, dma_addr_t dma_handle,
size_t size, int direction)
{
- dma_sync_single_for_device(hwdev == NULL ? NULL : &hwdev->dev, dma_handle, size, (enum dma_data_direction)direction);
+ dma_sync_single_for_device(&hwdev->dev, dma_handle, size, (enum dma_data_direction)direction);
}
static inline void
pci_dma_sync_sg_for_cpu(struct pci_dev *hwdev, struct scatterlist *sg,
int nelems, int direction)
{
- dma_sync_sg_for_cpu(hwdev == NULL ? NULL : &hwdev->dev, sg, nelems, (enum dma_data_direction)direction);
+ dma_sync_sg_for_cpu(&hwdev->dev, sg, nelems, (enum dma_data_direction)direction);
}
static inline void
pci_dma_sync_sg_for_device(struct pci_dev *hwdev, struct scatterlist *sg,
int nelems, int direction)
{
- dma_sync_sg_for_device(hwdev == NULL ? NULL : &hwdev->dev, sg, nelems, (enum dma_data_direction)direction);
+ dma_sync_sg_for_device(&hwdev->dev, sg, nelems, (enum dma_data_direction)direction);
}
static inline int
diff --git a/include/linux/pci-ecam.h b/include/linux/pci-ecam.h
index 809c2f1873ac..baadad1aabbc 100644
--- a/include/linux/pci-ecam.h
+++ b/include/linux/pci-ecam.h
@@ -1,17 +1,6 @@
+/* SPDX-License-Identifier: GPL-2.0 */
/*
* Copyright 2016 Broadcom
- *
- * This program is free software; you can redistribute it and/or modify
- * it under the terms of the GNU General Public License, version 2, as
- * published by the Free Software Foundation (the "GPL").
- *
- * This program is distributed in the hope that it will be useful, but
- * WITHOUT ANY WARRANTY; without even the implied warranty of
- * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
- * General Public License version 2 (GPLv2) for more details.
- *
- * You should have received a copy of the GNU General Public License
- * version 2 (GPLv2) along with this source code.
*/
#ifndef DRIVERS_PCI_ECAM_H
#define DRIVERS_PCI_ECAM_H
diff --git a/include/linux/pci-ep-cfs.h b/include/linux/pci-ep-cfs.h
index 263b89ea5705..f42b0fd4b4bc 100644
--- a/include/linux/pci-ep-cfs.h
+++ b/include/linux/pci-ep-cfs.h
@@ -1,12 +1,9 @@
+/* SPDX-License-Identifier: GPL-2.0+ */
/**
* PCI Endpoint ConfigFS header file
*
* Copyright (C) 2017 Texas Instruments
* Author: Kishon Vijay Abraham I <kishon@ti.com>
- *
- * This program is free software: you can redistribute it and/or modify
- * it under the terms of the GNU General Public License version 2 of
- * the License as published by the Free Software Foundation.
*/
#ifndef __LINUX_PCI_EP_CFS_H
diff --git a/include/linux/pci-epc.h b/include/linux/pci-epc.h
index f7a04e1af112..af657ca58b70 100644
--- a/include/linux/pci-epc.h
+++ b/include/linux/pci-epc.h
@@ -1,12 +1,9 @@
+/* SPDX-License-Identifier: GPL-2.0 */
/**
* PCI Endpoint *Controller* (EPC) header file
*
* Copyright (C) 2017 Texas Instruments
* Author: Kishon Vijay Abraham I <kishon@ti.com>
- *
- * This program is free software: you can redistribute it and/or modify
- * it under the terms of the GNU General Public License version 2 of
- * the License as published by the Free Software Foundation.
*/
#ifndef __LINUX_PCI_EPC_H
@@ -39,17 +36,19 @@ enum pci_epc_irq_type {
* @owner: the module owner containing the ops
*/
struct pci_epc_ops {
- int (*write_header)(struct pci_epc *pci_epc,
+ int (*write_header)(struct pci_epc *epc, u8 func_no,
struct pci_epf_header *hdr);
- int (*set_bar)(struct pci_epc *epc, enum pci_barno bar,
- dma_addr_t bar_phys, size_t size, int flags);
- void (*clear_bar)(struct pci_epc *epc, enum pci_barno bar);
- int (*map_addr)(struct pci_epc *epc, phys_addr_t addr,
- u64 pci_addr, size_t size);
- void (*unmap_addr)(struct pci_epc *epc, phys_addr_t addr);
- int (*set_msi)(struct pci_epc *epc, u8 interrupts);
- int (*get_msi)(struct pci_epc *epc);
- int (*raise_irq)(struct pci_epc *pci_epc,
+ int (*set_bar)(struct pci_epc *epc, u8 func_no,
+ struct pci_epf_bar *epf_bar);
+ void (*clear_bar)(struct pci_epc *epc, u8 func_no,
+ struct pci_epf_bar *epf_bar);
+ int (*map_addr)(struct pci_epc *epc, u8 func_no,
+ phys_addr_t addr, u64 pci_addr, size_t size);
+ void (*unmap_addr)(struct pci_epc *epc, u8 func_no,
+ phys_addr_t addr);
+ int (*set_msi)(struct pci_epc *epc, u8 func_no, u8 interrupts);
+ int (*get_msi)(struct pci_epc *epc, u8 func_no);
+ int (*raise_irq)(struct pci_epc *epc, u8 func_no,
enum pci_epc_irq_type type, u8 interrupt_num);
int (*start)(struct pci_epc *epc);
void (*stop)(struct pci_epc *epc);
@@ -124,17 +123,21 @@ void pci_epc_destroy(struct pci_epc *epc);
int pci_epc_add_epf(struct pci_epc *epc, struct pci_epf *epf);
void pci_epc_linkup(struct pci_epc *epc);
void pci_epc_remove_epf(struct pci_epc *epc, struct pci_epf *epf);
-int pci_epc_write_header(struct pci_epc *epc, struct pci_epf_header *hdr);
-int pci_epc_set_bar(struct pci_epc *epc, enum pci_barno bar,
- dma_addr_t bar_phys, size_t size, int flags);
-void pci_epc_clear_bar(struct pci_epc *epc, int bar);
-int pci_epc_map_addr(struct pci_epc *epc, phys_addr_t phys_addr,
+int pci_epc_write_header(struct pci_epc *epc, u8 func_no,
+ struct pci_epf_header *hdr);
+int pci_epc_set_bar(struct pci_epc *epc, u8 func_no,
+ struct pci_epf_bar *epf_bar);
+void pci_epc_clear_bar(struct pci_epc *epc, u8 func_no,
+ struct pci_epf_bar *epf_bar);
+int pci_epc_map_addr(struct pci_epc *epc, u8 func_no,
+ phys_addr_t phys_addr,
u64 pci_addr, size_t size);
-void pci_epc_unmap_addr(struct pci_epc *epc, phys_addr_t phys_addr);
-int pci_epc_set_msi(struct pci_epc *epc, u8 interrupts);
-int pci_epc_get_msi(struct pci_epc *epc);
-int pci_epc_raise_irq(struct pci_epc *epc, enum pci_epc_irq_type type,
- u8 interrupt_num);
+void pci_epc_unmap_addr(struct pci_epc *epc, u8 func_no,
+ phys_addr_t phys_addr);
+int pci_epc_set_msi(struct pci_epc *epc, u8 func_no, u8 interrupts);
+int pci_epc_get_msi(struct pci_epc *epc, u8 func_no);
+int pci_epc_raise_irq(struct pci_epc *epc, u8 func_no,
+ enum pci_epc_irq_type type, u8 interrupt_num);
int pci_epc_start(struct pci_epc *epc);
void pci_epc_stop(struct pci_epc *epc);
struct pci_epc *pci_epc_get(const char *epc_name);
diff --git a/include/linux/pci-epf.h b/include/linux/pci-epf.h
index 60d551a9a1ba..f7d6f4883f8b 100644
--- a/include/linux/pci-epf.h
+++ b/include/linux/pci-epf.h
@@ -1,12 +1,9 @@
+/* SPDX-License-Identifier: GPL-2.0 */
/**
* PCI Endpoint *Function* (EPF) header file
*
* Copyright (C) 2017 Texas Instruments
* Author: Kishon Vijay Abraham I <kishon@ti.com>
- *
- * This program is free software: you can redistribute it and/or modify
- * it under the terms of the GNU General Public License version 2 of
- * the License as published by the Free Software Foundation.
*/
#ifndef __LINUX_PCI_EPF_H
@@ -100,6 +97,8 @@ struct pci_epf_driver {
struct pci_epf_bar {
dma_addr_t phys_addr;
size_t size;
+ enum pci_barno barno;
+ int flags;
};
/**
diff --git a/include/linux/pci.h b/include/linux/pci.h
index c170c9250c8b..73178a2fcee0 100644
--- a/include/linux/pci.h
+++ b/include/linux/pci.h
@@ -48,17 +48,17 @@
* In the interest of not exposing interfaces to user-space unnecessarily,
* the following kernel-only defines are being added here.
*/
-#define PCI_DEVID(bus, devfn) ((((u16)(bus)) << 8) | (devfn))
+#define PCI_DEVID(bus, devfn) ((((u16)(bus)) << 8) | (devfn))
/* return bus from PCI devid = ((u16)bus_number) << 8) | devfn */
#define PCI_BUS_NUM(x) (((x) >> 8) & 0xff)
/* pci_slot represents a physical slot */
struct pci_slot {
- struct pci_bus *bus; /* The bus this slot is on */
- struct list_head list; /* node in list of slots on this bus */
- struct hotplug_slot *hotplug; /* Hotplug info (migrate over time) */
- unsigned char number; /* PCI_SLOT(pci_dev->devfn) */
- struct kobject kobj;
+ struct pci_bus *bus; /* Bus this slot is on */
+ struct list_head list; /* Node in list of slots */
+ struct hotplug_slot *hotplug; /* Hotplug info (move here) */
+ unsigned char number; /* PCI_SLOT(pci_dev->devfn) */
+ struct kobject kobj;
};
static inline const char *pci_slot_name(const struct pci_slot *slot)
@@ -72,9 +72,7 @@ enum pci_mmap_state {
pci_mmap_mem
};
-/*
- * For PCI devices, the region numbers are assigned this way:
- */
+/* For PCI devices, the region numbers are assigned this way: */
enum {
/* #0-5: standard PCI resources */
PCI_STD_RESOURCES,
@@ -83,23 +81,23 @@ enum {
/* #6: expansion ROM resource */
PCI_ROM_RESOURCE,
- /* device specific resources */
+ /* Device-specific resources */
#ifdef CONFIG_PCI_IOV
PCI_IOV_RESOURCES,
PCI_IOV_RESOURCE_END = PCI_IOV_RESOURCES + PCI_SRIOV_NUM_BARS - 1,
#endif
- /* resources assigned to buses behind the bridge */
+ /* Resources assigned to buses behind the bridge */
#define PCI_BRIDGE_RESOURCE_NUM 4
PCI_BRIDGE_RESOURCES,
PCI_BRIDGE_RESOURCE_END = PCI_BRIDGE_RESOURCES +
PCI_BRIDGE_RESOURCE_NUM - 1,
- /* total resources associated with a PCI device */
+ /* Total resources associated with a PCI device */
PCI_NUM_RESOURCES,
- /* preserve this for compatibility */
+ /* Preserve this for compatibility */
DEVICE_COUNT_RESOURCE = PCI_NUM_RESOURCES,
};
@@ -152,9 +150,10 @@ static inline const char *pci_power_name(pci_power_t state)
#define PCI_PM_D3COLD_WAIT 100
#define PCI_PM_BUS_WAIT 50
-/** The pci_channel state describes connectivity between the CPU and
- * the pci device. If some PCI bus between here and the pci device
- * has crashed or locked up, this info is reflected here.
+/**
+ * The pci_channel state describes connectivity between the CPU and
+ * the PCI device. If some PCI bus between here and the PCI device
+ * has crashed or locked up, this info is reflected here.
*/
typedef unsigned int __bitwise pci_channel_state_t;
@@ -184,9 +183,7 @@ enum pcie_reset_state {
typedef unsigned short __bitwise pci_dev_flags_t;
enum pci_dev_flags {
- /* INTX_DISABLE in PCI_COMMAND register disables MSI
- * generation too.
- */
+ /* INTX_DISABLE in PCI_COMMAND register disables MSI too */
PCI_DEV_FLAGS_MSI_INTX_DISABLE_BUG = (__force pci_dev_flags_t) (1 << 0),
/* Device configuration is irrevocably lost if disabled into D3 */
PCI_DEV_FLAGS_NO_D3 = (__force pci_dev_flags_t) (1 << 1),
@@ -202,7 +199,7 @@ enum pci_dev_flags {
PCI_DEV_FLAGS_NO_PM_RESET = (__force pci_dev_flags_t) (1 << 7),
/* Get VPD from function 0 VPD */
PCI_DEV_FLAGS_VPD_REF_F0 = (__force pci_dev_flags_t) (1 << 8),
- /* a non-root bridge where translation occurs, stop alias search here */
+ /* A non-root bridge where translation occurs, stop alias search here */
PCI_DEV_FLAGS_BRIDGE_XLATE_ROOT = (__force pci_dev_flags_t) (1 << 9),
/* Do not use FLR even if device advertises PCI_AF_CAP */
PCI_DEV_FLAGS_NO_FLR_RESET = (__force pci_dev_flags_t) (1 << 10),
@@ -222,17 +219,17 @@ enum pci_bus_flags {
PCI_BUS_FLAGS_NO_AERSID = (__force pci_bus_flags_t) 4,
};
-/* These values come from the PCI Express Spec */
+/* Values from Link Status register, PCIe r3.1, sec 7.8.8 */
enum pcie_link_width {
PCIE_LNK_WIDTH_RESRV = 0x00,
PCIE_LNK_X1 = 0x01,
PCIE_LNK_X2 = 0x02,
PCIE_LNK_X4 = 0x04,
PCIE_LNK_X8 = 0x08,
- PCIE_LNK_X12 = 0x0C,
+ PCIE_LNK_X12 = 0x0c,
PCIE_LNK_X16 = 0x10,
PCIE_LNK_X32 = 0x20,
- PCIE_LNK_WIDTH_UNKNOWN = 0xFF,
+ PCIE_LNK_WIDTH_UNKNOWN = 0xff,
};
/* Based on the PCI Hotplug Spec, but some values are made up by us */
@@ -259,19 +256,20 @@ enum pci_bus_speed {
PCIE_SPEED_2_5GT = 0x14,
PCIE_SPEED_5_0GT = 0x15,
PCIE_SPEED_8_0GT = 0x16,
+ PCIE_SPEED_16_0GT = 0x17,
PCI_SPEED_UNKNOWN = 0xff,
};
struct pci_cap_saved_data {
- u16 cap_nr;
- bool cap_extended;
- unsigned int size;
- u32 data[0];
+ u16 cap_nr;
+ bool cap_extended;
+ unsigned int size;
+ u32 data[0];
};
struct pci_cap_saved_state {
- struct hlist_node next;
- struct pci_cap_saved_data cap;
+ struct hlist_node next;
+ struct pci_cap_saved_data cap;
};
struct irq_affinity;
@@ -280,19 +278,17 @@ struct pci_vpd;
struct pci_sriov;
struct pci_ats;
-/*
- * The pci_dev structure is used to describe PCI devices.
- */
+/* The pci_dev structure describes PCI devices */
struct pci_dev {
- struct list_head bus_list; /* node in per-bus list */
- struct pci_bus *bus; /* bus this device is on */
- struct pci_bus *subordinate; /* bus this device bridges to */
+ struct list_head bus_list; /* Node in per-bus list */
+ struct pci_bus *bus; /* Bus this device is on */
+ struct pci_bus *subordinate; /* Bus this device bridges to */
- void *sysdata; /* hook for sys-specific extension */
- struct proc_dir_entry *procent; /* device entry in /proc/bus/pci */
+ void *sysdata; /* Hook for sys-specific extension */
+ struct proc_dir_entry *procent; /* Device entry in /proc/bus/pci */
struct pci_slot *slot; /* Physical slot this device is in */
- unsigned int devfn; /* encoded device & function index */
+ unsigned int devfn; /* Encoded device & function index */
unsigned short vendor;
unsigned short device;
unsigned short subsystem_vendor;
@@ -307,12 +303,12 @@ struct pci_dev {
u8 msi_cap; /* MSI capability offset */
u8 msix_cap; /* MSI-X capability offset */
u8 pcie_mpss:3; /* PCIe Max Payload Size Supported */
- u8 rom_base_reg; /* which config register controls the ROM */
- u8 pin; /* which interrupt pin this device uses */
- u16 pcie_flags_reg; /* cached PCIe Capabilities Register */
- unsigned long *dma_alias_mask;/* mask of enabled devfn aliases */
+ u8 rom_base_reg; /* Config register controlling ROM */
+ u8 pin; /* Interrupt pin this device uses */
+ u16 pcie_flags_reg; /* Cached PCIe Capabilities Register */
+ unsigned long *dma_alias_mask;/* Mask of enabled devfn aliases */
- struct pci_driver *driver; /* which driver has allocated this device */
+ struct pci_driver *driver; /* Driver bound to this device */
u64 dma_mask; /* Mask of the bits of bus address this
device implements. Normally this is
0xffffffff. You only need to change
@@ -321,9 +317,9 @@ struct pci_dev {
struct device_dma_parameters dma_parms;
- pci_power_t current_state; /* Current operating state. In ACPI-speak,
- this is D0-D3, D0 being fully functional,
- and D3 being off. */
+ pci_power_t current_state; /* Current operating state. In ACPI,
+ this is D0-D3, D0 being fully
+ functional, and D3 being off. */
u8 pm_cap; /* PM capability offset */
unsigned int pme_support:5; /* Bitmask of states from which PME#
can be generated */
@@ -334,10 +330,10 @@ struct pci_dev {
unsigned int no_d3cold:1; /* D3cold is forbidden */
unsigned int bridge_d3:1; /* Allow D3 for bridge */
unsigned int d3cold_allowed:1; /* D3cold is allowed by user */
- unsigned int mmio_always_on:1; /* disallow turning off io/mem
- decoding during bar sizing */
+ unsigned int mmio_always_on:1; /* Disallow turning off io/mem
+ decoding during BAR sizing */
unsigned int wakeup_prepared:1;
- unsigned int runtime_d3cold:1; /* whether go through runtime
+ unsigned int runtime_d3cold:1; /* Whether go through runtime
D3cold, not set for devices
powered on/off by the
corresponding bridge */
@@ -350,12 +346,14 @@ struct pci_dev {
#ifdef CONFIG_PCIEASPM
struct pcie_link_state *link_state; /* ASPM link state */
+ unsigned int ltr_path:1; /* Latency Tolerance Reporting
+ supported from root to here */
#endif
- pci_channel_state_t error_state; /* current connectivity state */
- struct device dev; /* Generic device interface */
+ pci_channel_state_t error_state; /* Current connectivity state */
+ struct device dev; /* Generic device interface */
- int cfg_size; /* Size of configuration space */
+ int cfg_size; /* Size of config space */
/*
* Instead of touching interrupt line and base address registers
@@ -364,47 +362,47 @@ struct pci_dev {
unsigned int irq;
struct resource resource[DEVICE_COUNT_RESOURCE]; /* I/O and memory regions + expansion ROMs */
- bool match_driver; /* Skip attaching driver */
- /* These fields are used by common fixups */
- unsigned int transparent:1; /* Subtractive decode PCI bridge */
- unsigned int multifunction:1;/* Part of multi-function device */
- /* keep track of device state */
+ bool match_driver; /* Skip attaching driver */
+
+ unsigned int transparent:1; /* Subtractive decode bridge */
+ unsigned int multifunction:1; /* Multi-function device */
+
unsigned int is_added:1;
- unsigned int is_busmaster:1; /* device is busmaster */
- unsigned int no_msi:1; /* device may not use msi */
- unsigned int no_64bit_msi:1; /* device may only use 32-bit MSIs */
- unsigned int block_cfg_access:1; /* config space access is blocked */
- unsigned int broken_parity_status:1; /* Device generates false positive parity */
- unsigned int irq_reroute_variant:2; /* device needs IRQ rerouting variant */
+ unsigned int is_busmaster:1; /* Is busmaster */
+ unsigned int no_msi:1; /* May not use MSI */
+ unsigned int no_64bit_msi:1; /* May only use 32-bit MSIs */
+ unsigned int block_cfg_access:1; /* Config space access blocked */
+ unsigned int broken_parity_status:1; /* Generates false positive parity */
+ unsigned int irq_reroute_variant:2; /* Needs IRQ rerouting variant */
unsigned int msi_enabled:1;
unsigned int msix_enabled:1;
- unsigned int ari_enabled:1; /* ARI forwarding */
- unsigned int ats_enabled:1; /* Address Translation Service */
+ unsigned int ari_enabled:1; /* ARI forwarding */
+ unsigned int ats_enabled:1; /* Address Translation Svc */
unsigned int pasid_enabled:1; /* Process Address Space ID */
unsigned int pri_enabled:1; /* Page Request Interface */
unsigned int is_managed:1;
- unsigned int needs_freset:1; /* Dev requires fundamental reset */
+ unsigned int needs_freset:1; /* Requires fundamental reset */
unsigned int state_saved:1;
unsigned int is_physfn:1;
unsigned int is_virtfn:1;
unsigned int reset_fn:1;
- unsigned int is_hotplug_bridge:1;
- unsigned int is_thunderbolt:1; /* Thunderbolt controller */
- unsigned int __aer_firmware_first_valid:1;
+ unsigned int is_hotplug_bridge:1;
+ unsigned int is_thunderbolt:1; /* Thunderbolt controller */
+ unsigned int __aer_firmware_first_valid:1;
unsigned int __aer_firmware_first:1;
- unsigned int broken_intx_masking:1; /* INTx masking can't be used */
- unsigned int io_window_1k:1; /* Intel P2P bridge 1K I/O windows */
+ unsigned int broken_intx_masking:1; /* INTx masking can't be used */
+ unsigned int io_window_1k:1; /* Intel bridge 1K I/O windows */
unsigned int irq_managed:1;
unsigned int has_secondary_link:1;
- unsigned int non_compliant_bars:1; /* broken BARs; ignore them */
- unsigned int is_probed:1; /* device probing in progress */
+ unsigned int non_compliant_bars:1; /* Broken BARs; ignore them */
+ unsigned int is_probed:1; /* Device probing in progress */
pci_dev_flags_t dev_flags;
atomic_t enable_cnt; /* pci_enable_device has been called */
- u32 saved_config_space[16]; /* config space saved at suspend time */
+ u32 saved_config_space[16]; /* Config space saved at suspend time */
struct hlist_head saved_cap_space;
- struct bin_attribute *rom_attr; /* attribute descriptor for sysfs ROM entry */
- int rom_attr_enabled; /* has display of the rom attribute been enabled? */
+ struct bin_attribute *rom_attr; /* Attribute descriptor for sysfs ROM entry */
+ int rom_attr_enabled; /* Display of ROM attribute enabled? */
struct bin_attribute *res_attr[DEVICE_COUNT_RESOURCE]; /* sysfs file for resources */
struct bin_attribute *res_attr_wc[DEVICE_COUNT_RESOURCE]; /* sysfs file for WC mapping of resources */
@@ -419,12 +417,12 @@ struct pci_dev {
struct pci_vpd *vpd;
#ifdef CONFIG_PCI_ATS
union {
- struct pci_sriov *sriov; /* SR-IOV capability related */
- struct pci_dev *physfn; /* the PF this VF is associated with */
+ struct pci_sriov *sriov; /* PF: SR-IOV info */
+ struct pci_dev *physfn; /* VF: related PF */
};
u16 ats_cap; /* ATS Capability offset */
u8 ats_stu; /* ATS Smallest Translation Unit */
- atomic_t ats_ref_cnt; /* number of VFs with ATS enabled */
+ atomic_t ats_ref_cnt; /* Number of VFs with ATS enabled */
#endif
#ifdef CONFIG_PCI_PRI
u32 pri_reqs_alloc; /* Number of PRI requests allocated */
@@ -432,11 +430,11 @@ struct pci_dev {
#ifdef CONFIG_PCI_PASID
u16 pasid_features;
#endif
- phys_addr_t rom; /* Physical address of ROM if it's not from the BAR */
- size_t romlen; /* Length of ROM if it's not from the BAR */
- char *driver_override; /* Driver name to force a match */
+ phys_addr_t rom; /* Physical address if not from BAR */
+ size_t romlen; /* Length if not from BAR */
+ char *driver_override; /* Driver name to force a match */
- unsigned long priv_flags; /* Private flags for the pci driver */
+ unsigned long priv_flags; /* Private flags for the PCI driver */
};
static inline struct pci_dev *pci_physfn(struct pci_dev *dev)
@@ -459,26 +457,29 @@ static inline int pci_channel_offline(struct pci_dev *pdev)
}
struct pci_host_bridge {
- struct device dev;
- struct pci_bus *bus; /* root bus */
- struct pci_ops *ops;
- void *sysdata;
- int busnr;
+ struct device dev;
+ struct pci_bus *bus; /* Root bus */
+ struct pci_ops *ops;
+ void *sysdata;
+ int busnr;
struct list_head windows; /* resource_entry */
- u8 (*swizzle_irq)(struct pci_dev *, u8 *); /* platform IRQ swizzler */
+ u8 (*swizzle_irq)(struct pci_dev *, u8 *); /* Platform IRQ swizzler */
int (*map_irq)(const struct pci_dev *, u8, u8);
void (*release_fn)(struct pci_host_bridge *);
- void *release_data;
+ void *release_data;
struct msi_controller *msi;
- unsigned int ignore_reset_delay:1; /* for entire hierarchy */
- unsigned int no_ext_tags:1; /* no Extended Tags */
+ unsigned int ignore_reset_delay:1; /* For entire hierarchy */
+ unsigned int no_ext_tags:1; /* No Extended Tags */
+ unsigned int native_aer:1; /* OS may use PCIe AER */
+ unsigned int native_hotplug:1; /* OS may use PCIe hotplug */
+ unsigned int native_pme:1; /* OS may use PCIe PME */
/* Resource alignment requirements */
resource_size_t (*align_resource)(struct pci_dev *dev,
const struct resource *res,
resource_size_t start,
resource_size_t size,
resource_size_t align);
- unsigned long private[0] ____cacheline_aligned;
+ unsigned long private[0] ____cacheline_aligned;
};
#define to_pci_host_bridge(n) container_of(n, struct pci_host_bridge, dev)
@@ -500,8 +501,8 @@ void pci_free_host_bridge(struct pci_host_bridge *bridge);
struct pci_host_bridge *pci_find_host_bridge(struct pci_bus *bus);
void pci_set_host_bridge_release(struct pci_host_bridge *bridge,
- void (*release_fn)(struct pci_host_bridge *),
- void *release_data);
+ void (*release_fn)(struct pci_host_bridge *),
+ void *release_data);
int pcibios_root_bridge_prepare(struct pci_host_bridge *bridge);
@@ -521,32 +522,32 @@ int pcibios_root_bridge_prepare(struct pci_host_bridge *bridge);
#define PCI_SUBTRACTIVE_DECODE 0x1
struct pci_bus_resource {
- struct list_head list;
- struct resource *res;
- unsigned int flags;
+ struct list_head list;
+ struct resource *res;
+ unsigned int flags;
};
#define PCI_REGION_FLAG_MASK 0x0fU /* These bits of resource flags tell us the PCI region flags */
struct pci_bus {
- struct list_head node; /* node in list of buses */
- struct pci_bus *parent; /* parent bus this bridge is on */
- struct list_head children; /* list of child buses */
- struct list_head devices; /* list of devices on this bus */
- struct pci_dev *self; /* bridge device as seen by parent */
- struct list_head slots; /* list of slots on this bus;
+ struct list_head node; /* Node in list of buses */
+ struct pci_bus *parent; /* Parent bus this bridge is on */
+ struct list_head children; /* List of child buses */
+ struct list_head devices; /* List of devices on this bus */
+ struct pci_dev *self; /* Bridge device as seen by parent */
+ struct list_head slots; /* List of slots on this bus;
protected by pci_slot_mutex */
struct resource *resource[PCI_BRIDGE_RESOURCE_NUM];
- struct list_head resources; /* address space routed to this bus */
- struct resource busn_res; /* bus numbers routed to this bus */
+ struct list_head resources; /* Address space routed to this bus */
+ struct resource busn_res; /* Bus numbers routed to this bus */
- struct pci_ops *ops; /* configuration access functions */
+ struct pci_ops *ops; /* Configuration access functions */
struct msi_controller *msi; /* MSI controller */
- void *sysdata; /* hook for sys-specific extension */
- struct proc_dir_entry *procdir; /* directory entry in /proc/bus/pci */
+ void *sysdata; /* Hook for sys-specific extension */
+ struct proc_dir_entry *procdir; /* Directory entry in /proc/bus/pci */
- unsigned char number; /* bus number */
- unsigned char primary; /* number of primary bridge */
+ unsigned char number; /* Bus number */
+ unsigned char primary; /* Number of primary bridge */
unsigned char max_bus_speed; /* enum pci_bus_speed */
unsigned char cur_bus_speed; /* enum pci_bus_speed */
#ifdef CONFIG_PCI_DOMAINS_GENERIC
@@ -555,12 +556,12 @@ struct pci_bus {
char name[48];
- unsigned short bridge_ctl; /* manage NO_ISA/FBB/et al behaviors */
- pci_bus_flags_t bus_flags; /* inherited by child buses */
+ unsigned short bridge_ctl; /* Manage NO_ISA/FBB/et al behaviors */
+ pci_bus_flags_t bus_flags; /* Inherited by child buses */
struct device *bridge;
struct device dev;
- struct bin_attribute *legacy_io; /* legacy I/O for this bus */
- struct bin_attribute *legacy_mem; /* legacy mem */
+ struct bin_attribute *legacy_io; /* Legacy I/O for this bus */
+ struct bin_attribute *legacy_mem; /* Legacy mem */
unsigned int is_added:1;
};
@@ -617,9 +618,7 @@ static inline bool pci_dev_msi_enabled(struct pci_dev *pci_dev)
static inline bool pci_dev_msi_enabled(struct pci_dev *pci_dev) { return false; }
#endif
-/*
- * Error values that may be returned by PCI functions.
- */
+/* Error values that may be returned by PCI functions */
#define PCIBIOS_SUCCESSFUL 0x00
#define PCIBIOS_FUNC_NOT_SUPPORTED 0x81
#define PCIBIOS_BAD_VENDOR_ID 0x83
@@ -628,9 +627,7 @@ static inline bool pci_dev_msi_enabled(struct pci_dev *pci_dev) { return false;
#define PCIBIOS_SET_FAILED 0x88
#define PCIBIOS_BUFFER_TOO_SMALL 0x89
-/*
- * Translate above to generic errno for passing back through non-PCI code.
- */
+/* Translate above to generic errno for passing back through non-PCI code */
static inline int pcibios_err_to_errno(int err)
{
if (err <= PCIBIOS_SUCCESSFUL)
@@ -680,13 +677,13 @@ typedef u32 pci_bus_addr_t;
#endif
struct pci_bus_region {
- pci_bus_addr_t start;
- pci_bus_addr_t end;
+ pci_bus_addr_t start;
+ pci_bus_addr_t end;
};
struct pci_dynids {
- spinlock_t lock; /* protects list, index */
- struct list_head list; /* for IDs added at runtime */
+ spinlock_t lock; /* Protects list, index */
+ struct list_head list; /* For IDs added at runtime */
};
@@ -700,13 +697,13 @@ struct pci_dynids {
typedef unsigned int __bitwise pci_ers_result_t;
enum pci_ers_result {
- /* no result/none/not supported in device driver */
+ /* No result/none/not supported in device driver */
PCI_ERS_RESULT_NONE = (__force pci_ers_result_t) 1,
/* Device driver can recover without slot reset */
PCI_ERS_RESULT_CAN_RECOVER = (__force pci_ers_result_t) 2,
- /* Device driver wants slot to be reset. */
+ /* Device driver wants slot to be reset */
PCI_ERS_RESULT_NEED_RESET = (__force pci_ers_result_t) 3,
/* Device has completely failed, is unrecoverable */
@@ -742,27 +739,27 @@ struct pci_error_handlers {
struct module;
struct pci_driver {
- struct list_head node;
- const char *name;
- const struct pci_device_id *id_table; /* must be non-NULL for probe to be called */
- int (*probe) (struct pci_dev *dev, const struct pci_device_id *id); /* New device inserted */
- void (*remove) (struct pci_dev *dev); /* Device removed (NULL if not a hot-plug capable driver) */
- int (*suspend) (struct pci_dev *dev, pm_message_t state); /* Device suspended */
- int (*suspend_late) (struct pci_dev *dev, pm_message_t state);
- int (*resume_early) (struct pci_dev *dev);
- int (*resume) (struct pci_dev *dev); /* Device woken up */
+ struct list_head node;
+ const char *name;
+ const struct pci_device_id *id_table; /* Must be non-NULL for probe to be called */
+ int (*probe)(struct pci_dev *dev, const struct pci_device_id *id); /* New device inserted */
+ void (*remove)(struct pci_dev *dev); /* Device removed (NULL if not a hot-plug capable driver) */
+ int (*suspend)(struct pci_dev *dev, pm_message_t state); /* Device suspended */
+ int (*suspend_late)(struct pci_dev *dev, pm_message_t state);
+ int (*resume_early)(struct pci_dev *dev);
+ int (*resume) (struct pci_dev *dev); /* Device woken up */
void (*shutdown) (struct pci_dev *dev);
- int (*sriov_configure) (struct pci_dev *dev, int num_vfs); /* PF pdev */
+ int (*sriov_configure) (struct pci_dev *dev, int num_vfs); /* On PF */
const struct pci_error_handlers *err_handler;
const struct attribute_group **groups;
struct device_driver driver;
- struct pci_dynids dynids;
+ struct pci_dynids dynids;
};
#define to_pci_driver(drv) container_of(drv, struct pci_driver, driver)
/**
- * PCI_DEVICE - macro used to describe a specific pci device
+ * PCI_DEVICE - macro used to describe a specific PCI device
* @vend: the 16 bit PCI Vendor ID
* @dev: the 16 bit PCI Device ID
*
@@ -775,7 +772,7 @@ struct pci_driver {
.subvendor = PCI_ANY_ID, .subdevice = PCI_ANY_ID
/**
- * PCI_DEVICE_SUB - macro used to describe a specific pci device with subsystem
+ * PCI_DEVICE_SUB - macro used to describe a specific PCI device with subsystem
* @vend: the 16 bit PCI Vendor ID
* @dev: the 16 bit PCI Device ID
* @subvend: the 16 bit PCI Subvendor ID
@@ -789,7 +786,7 @@ struct pci_driver {
.subvendor = (subvend), .subdevice = (subdev)
/**
- * PCI_DEVICE_CLASS - macro used to describe a specific pci device class
+ * PCI_DEVICE_CLASS - macro used to describe a specific PCI device class
* @dev_class: the class, subclass, prog-if triple for this device
* @dev_class_mask: the class mask for this device
*
@@ -803,7 +800,7 @@ struct pci_driver {
.subvendor = PCI_ANY_ID, .subdevice = PCI_ANY_ID
/**
- * PCI_VDEVICE - macro used to describe a specific pci device in short form
+ * PCI_VDEVICE - macro used to describe a specific PCI device in short form
* @vend: the vendor name
* @dev: the 16 bit PCI Device ID
*
@@ -812,22 +809,21 @@ struct pci_driver {
* to PCI_ANY_ID. The macro allows the next field to follow as the device
* private data.
*/
-
#define PCI_VDEVICE(vend, dev) \
.vendor = PCI_VENDOR_ID_##vend, .device = (dev), \
.subvendor = PCI_ANY_ID, .subdevice = PCI_ANY_ID, 0, 0
enum {
- PCI_REASSIGN_ALL_RSRC = 0x00000001, /* ignore firmware setup */
- PCI_REASSIGN_ALL_BUS = 0x00000002, /* reassign all bus numbers */
- PCI_PROBE_ONLY = 0x00000004, /* use existing setup */
- PCI_CAN_SKIP_ISA_ALIGN = 0x00000008, /* don't do ISA alignment */
- PCI_ENABLE_PROC_DOMAINS = 0x00000010, /* enable domains in /proc */
+ PCI_REASSIGN_ALL_RSRC = 0x00000001, /* Ignore firmware setup */
+ PCI_REASSIGN_ALL_BUS = 0x00000002, /* Reassign all bus numbers */
+ PCI_PROBE_ONLY = 0x00000004, /* Use existing setup */
+ PCI_CAN_SKIP_ISA_ALIGN = 0x00000008, /* Don't do ISA alignment */
+ PCI_ENABLE_PROC_DOMAINS = 0x00000010, /* Enable domains in /proc */
PCI_COMPAT_DOMAIN_0 = 0x00000020, /* ... except domain 0 */
- PCI_SCAN_ALL_PCIE_DEVS = 0x00000040, /* scan all, not just dev 0 */
+ PCI_SCAN_ALL_PCIE_DEVS = 0x00000040, /* Scan all, not just dev 0 */
};
-/* these external functions are only available when PCI support is enabled */
+/* These external functions are only available when PCI support is enabled */
#ifdef CONFIG_PCI
extern unsigned int pci_flags;
@@ -840,11 +836,11 @@ static inline int pci_has_flag(int flag) { return pci_flags & flag; }
void pcie_bus_configure_settings(struct pci_bus *bus);
enum pcie_bus_config_types {
- PCIE_BUS_TUNE_OFF, /* don't touch MPS at all */
- PCIE_BUS_DEFAULT, /* ensure MPS matches upstream bridge */
- PCIE_BUS_SAFE, /* use largest MPS boot-time devices support */
- PCIE_BUS_PERFORMANCE, /* use MPS and MRRS for best performance */
- PCIE_BUS_PEER2PEER, /* set MPS = 128 for all devices */
+ PCIE_BUS_TUNE_OFF, /* Don't touch MPS at all */
+ PCIE_BUS_DEFAULT, /* Ensure MPS matches upstream bridge */
+ PCIE_BUS_SAFE, /* Use largest MPS boot-time devices support */
+ PCIE_BUS_PERFORMANCE, /* Use MPS and MRRS for best performance */
+ PCIE_BUS_PEER2PEER, /* Set MPS = 128 for all devices */
};
extern enum pcie_bus_config_types pcie_bus_config;
@@ -853,7 +849,7 @@ extern struct bus_type pci_bus_type;
/* Do NOT directly access these two variables, unless you are arch-specific PCI
* code, or PCI core code. */
-extern struct list_head pci_root_buses; /* list of all known PCI buses */
+extern struct list_head pci_root_buses; /* List of all known PCI buses */
/* Some device drivers need know if PCI is initiated */
int no_pci_devices(void);
@@ -887,12 +883,13 @@ struct pci_bus *pci_scan_bus(int bus, struct pci_ops *ops, void *sysdata);
struct pci_bus *pci_create_root_bus(struct device *parent, int bus,
struct pci_ops *ops, void *sysdata,
struct list_head *resources);
+int pci_host_probe(struct pci_host_bridge *bridge);
int pci_bus_insert_busn_res(struct pci_bus *b, int bus, int busmax);
int pci_bus_update_busn_res_end(struct pci_bus *b, int busmax);
void pci_bus_release_busn_res(struct pci_bus *b);
struct pci_bus *pci_scan_root_bus(struct device *parent, int bus,
- struct pci_ops *ops, void *sysdata,
- struct list_head *resources);
+ struct pci_ops *ops, void *sysdata,
+ struct list_head *resources);
int pci_scan_root_bus_bridge(struct pci_host_bridge *bridge);
struct pci_bus *pci_add_new_bus(struct pci_bus *parent, struct pci_dev *dev,
int busnr);
@@ -949,18 +946,13 @@ int pci_find_next_ht_capability(struct pci_dev *dev, int pos, int ht_cap);
struct pci_bus *pci_find_next_bus(const struct pci_bus *from);
struct pci_dev *pci_get_device(unsigned int vendor, unsigned int device,
- struct pci_dev *from);
+ struct pci_dev *from);
struct pci_dev *pci_get_subsys(unsigned int vendor, unsigned int device,
- unsigned int ss_vendor, unsigned int ss_device,
- struct pci_dev *from);
+ unsigned int ss_vendor, unsigned int ss_device,
+ struct pci_dev *from);
struct pci_dev *pci_get_slot(struct pci_bus *bus, unsigned int devfn);
struct pci_dev *pci_get_domain_bus_and_slot(int domain, unsigned int bus,
unsigned int devfn);
-static inline struct pci_dev *pci_get_bus_and_slot(unsigned int bus,
- unsigned int devfn)
-{
- return pci_get_domain_bus_and_slot(0, bus, devfn);
-}
struct pci_dev *pci_get_class(unsigned int class, struct pci_dev *from);
int pci_dev_present(const struct pci_device_id *ids);
@@ -1028,7 +1020,7 @@ static inline int pcie_capability_clear_dword(struct pci_dev *dev, int pos,
return pcie_capability_clear_and_set_dword(dev, pos, clear, 0);
}
-/* user-space driven config access */
+/* User-space driven config access */
int pci_user_read_config_byte(struct pci_dev *dev, int where, u8 *val);
int pci_user_read_config_word(struct pci_dev *dev, int where, u16 *val);
int pci_user_read_config_dword(struct pci_dev *dev, int where, u32 *val);
@@ -1072,6 +1064,7 @@ int pci_set_pcie_reset_state(struct pci_dev *dev, enum pcie_reset_state state);
int pci_set_cacheline_size(struct pci_dev *dev);
#define HAVE_PCI_SET_MWI
int __must_check pci_set_mwi(struct pci_dev *dev);
+int __must_check pcim_set_mwi(struct pci_dev *dev);
int pci_try_set_mwi(struct pci_dev *dev);
void pci_clear_mwi(struct pci_dev *dev);
void pci_intx(struct pci_dev *dev, int enable);
@@ -1088,7 +1081,11 @@ int pcie_get_mps(struct pci_dev *dev);
int pcie_set_mps(struct pci_dev *dev, int mps);
int pcie_get_minimum_link(struct pci_dev *dev, enum pci_bus_speed *speed,
enum pcie_link_width *width);
-void pcie_flr(struct pci_dev *dev);
+u32 pcie_bandwidth_available(struct pci_dev *dev, struct pci_dev **limiting_dev,
+ enum pci_bus_speed *speed,
+ enum pcie_link_width *width);
+void pcie_print_link_status(struct pci_dev *dev);
+int pcie_flr(struct pci_dev *dev);
int __pci_reset_function_locked(struct pci_dev *dev);
int pci_reset_function(struct pci_dev *dev);
int pci_reset_function_locked(struct pci_dev *dev);
@@ -1101,7 +1098,7 @@ int pci_reset_bus(struct pci_bus *bus);
int pci_try_reset_bus(struct pci_bus *bus);
void pci_reset_secondary_bus(struct pci_dev *dev);
void pcibios_reset_secondary_bus(struct pci_dev *dev);
-void pci_reset_bridge_secondary_bus(struct pci_dev *dev);
+int pci_reset_bridge_secondary_bus(struct pci_dev *dev);
void pci_update_resource(struct pci_dev *dev, int resno);
int __must_check pci_assign_resource(struct pci_dev *dev, int i);
int __must_check pci_reassign_resource(struct pci_dev *dev, int i, resource_size_t add_size, resource_size_t align);
@@ -1153,6 +1150,8 @@ void pci_pme_wakeup_bus(struct pci_bus *bus);
void pci_d3cold_enable(struct pci_dev *dev);
void pci_d3cold_disable(struct pci_dev *dev);
bool pcie_relaxed_ordering_enabled(struct pci_dev *dev);
+void pci_wakeup_bus(struct pci_bus *bus);
+void pci_bus_set_current_state(struct pci_bus *bus, pci_power_t state);
/* PCI Virtual Channel */
int pci_save_vc_state(struct pci_dev *dev);
@@ -1170,7 +1169,7 @@ unsigned int pci_rescan_bus(struct pci_bus *bus);
void pci_lock_rescan_remove(void);
void pci_unlock_rescan_remove(void);
-/* Vital product data routines */
+/* Vital Product Data routines */
ssize_t pci_read_vpd(struct pci_dev *dev, loff_t pos, size_t count, void *buf);
ssize_t pci_write_vpd(struct pci_dev *dev, loff_t pos, size_t count, const void *buf);
int pci_set_vpd_size(struct pci_dev *dev, size_t len);
@@ -1232,7 +1231,8 @@ int __must_check pci_bus_alloc_resource(struct pci_bus *bus,
void *alignf_data);
-int pci_register_io_range(phys_addr_t addr, resource_size_t size);
+int pci_register_io_range(struct fwnode_handle *fwnode, phys_addr_t addr,
+ resource_size_t size);
unsigned long pci_address_to_pio(phys_addr_t addr);
phys_addr_t pci_pio_to_address(unsigned long pio);
int pci_remap_iospace(const struct resource *res, phys_addr_t phys_addr);
@@ -1255,9 +1255,7 @@ static inline pci_bus_addr_t pci_bus_address(struct pci_dev *pdev, int bar)
int __must_check __pci_register_driver(struct pci_driver *, struct module *,
const char *mod_name);
-/*
- * pci_register_driver must be a macro so that KBUILD_MODNAME can be expanded
- */
+/* pci_register_driver() must be a macro so KBUILD_MODNAME can be expanded */
#define pci_register_driver(driver) \
__pci_register_driver(driver, THIS_MODULE, KBUILD_MODNAME)
@@ -1272,8 +1270,7 @@ void pci_unregister_driver(struct pci_driver *dev);
* use this macro once, and calling it replaces module_init() and module_exit()
*/
#define module_pci_driver(__pci_driver) \
- module_driver(__pci_driver, pci_register_driver, \
- pci_unregister_driver)
+ module_driver(__pci_driver, pci_register_driver, pci_unregister_driver)
/**
* builtin_pci_driver() - Helper macro for registering a PCI driver
@@ -1304,7 +1301,6 @@ unsigned char pci_bus_max_busnr(struct pci_bus *bus);
void pci_setup_bridge(struct pci_bus *bus);
resource_size_t pcibios_window_alignment(struct pci_bus *bus,
unsigned long type);
-resource_size_t pcibios_iov_resource_alignment(struct pci_dev *dev, int resno);
#define PCI_VGA_STATE_CHANGE_BRIDGE (1 << 0)
#define PCI_VGA_STATE_CHANGE_DECODES (1 << 1)
@@ -1312,10 +1308,10 @@ resource_size_t pcibios_iov_resource_alignment(struct pci_dev *dev, int resno);
int pci_set_vga_state(struct pci_dev *pdev, bool decode,
unsigned int command_bits, u32 flags);
-#define PCI_IRQ_LEGACY (1 << 0) /* allow legacy interrupts */
-#define PCI_IRQ_MSI (1 << 1) /* allow MSI interrupts */
-#define PCI_IRQ_MSIX (1 << 2) /* allow MSI-X interrupts */
-#define PCI_IRQ_AFFINITY (1 << 3) /* auto-assign affinity */
+#define PCI_IRQ_LEGACY (1 << 0) /* Allow legacy interrupts */
+#define PCI_IRQ_MSI (1 << 1) /* Allow MSI interrupts */
+#define PCI_IRQ_MSIX (1 << 2) /* Allow MSI-X interrupts */
+#define PCI_IRQ_AFFINITY (1 << 3) /* Auto-assign affinity */
#define PCI_IRQ_ALL_TYPES \
(PCI_IRQ_LEGACY | PCI_IRQ_MSI | PCI_IRQ_MSIX)
@@ -1334,8 +1330,8 @@ int pci_set_vga_state(struct pci_dev *pdev, bool decode,
#define pci_pool_free(pool, vaddr, addr) dma_pool_free(pool, vaddr, addr)
struct msix_entry {
- u32 vector; /* kernel uses to write allocated vector */
- u16 entry; /* driver uses to specify entry, OS writes */
+ u32 vector; /* Kernel uses to write allocated vector */
+ u16 entry; /* Driver uses to specify entry, OS writes */
};
#ifdef CONFIG_PCI_MSI
@@ -1375,10 +1371,10 @@ static inline int pci_msi_enabled(void) { return 0; }
static inline int pci_enable_msi(struct pci_dev *dev)
{ return -ENOSYS; }
static inline int pci_enable_msix_range(struct pci_dev *dev,
- struct msix_entry *entries, int minvec, int maxvec)
+ struct msix_entry *entries, int minvec, int maxvec)
{ return -ENOSYS; }
static inline int pci_enable_msix_exact(struct pci_dev *dev,
- struct msix_entry *entries, int nvec)
+ struct msix_entry *entries, int nvec)
{ return -ENOSYS; }
static inline int
@@ -1455,10 +1451,8 @@ static inline int pci_irqd_intx_xlate(struct irq_domain *d,
#ifdef CONFIG_PCIEPORTBUS
extern bool pcie_ports_disabled;
-extern bool pcie_ports_auto;
#else
#define pcie_ports_disabled true
-#define pcie_ports_auto false
#endif
#ifdef CONFIG_PCIEASPM
@@ -1543,9 +1537,9 @@ static inline int acpi_pci_bus_find_domain_nr(struct pci_bus *bus)
int pci_bus_find_domain_nr(struct pci_bus *bus, struct device *parent);
#endif
-/* some architectures require additional setup to direct VGA traffic */
+/* Some architectures require additional setup to direct VGA traffic */
typedef int (*arch_set_vga_state_t)(struct pci_dev *pdev, bool decode,
- unsigned int command_bits, u32 flags);
+ unsigned int command_bits, u32 flags);
void pci_register_set_vga_state(arch_set_vga_state_t func);
static inline int
@@ -1584,10 +1578,9 @@ static inline void pci_clear_flags(int flags) { }
static inline int pci_has_flag(int flag) { return 0; }
/*
- * If the system does not have PCI, clearly these return errors. Define
- * these as simple inline functions to avoid hair in drivers.
+ * If the system does not have PCI, clearly these return errors. Define
+ * these as simple inline functions to avoid hair in drivers.
*/
-
#define _PCI_NOP(o, s, t) \
static inline int pci_##o##_config_##s(struct pci_dev *dev, \
int where, t val) \
@@ -1671,9 +1664,6 @@ static inline struct pci_bus *pci_find_next_bus(const struct pci_bus *from)
static inline struct pci_dev *pci_get_slot(struct pci_bus *bus,
unsigned int devfn)
{ return NULL; }
-static inline struct pci_dev *pci_get_bus_and_slot(unsigned int bus,
- unsigned int devfn)
-{ return NULL; }
static inline struct pci_dev *pci_get_domain_bus_and_slot(int domain,
unsigned int bus, unsigned int devfn)
{ return NULL; }
@@ -1686,6 +1676,13 @@ static inline int pci_get_new_domain_nr(void) { return -ENOSYS; }
#define dev_is_pf(d) (false)
static inline bool pci_acs_enabled(struct pci_dev *pdev, u16 acs_flags)
{ return false; }
+static inline int pci_irqd_intx_xlate(struct irq_domain *d,
+ struct device_node *node,
+ const u32 *intspec,
+ unsigned int intsize,
+ unsigned long *out_hwirq,
+ unsigned int *out_type)
+{ return -EINVAL; }
#endif /* CONFIG_PCI */
/* Include architecture-dependent settings and functions */
@@ -1726,8 +1723,10 @@ int pci_iobar_pfn(struct pci_dev *pdev, int bar, struct vm_area_struct *vma);
#define pci_root_bus_fwnode(bus) NULL
#endif
-/* these helpers provide future and backwards compatibility
- * for accessing popular PCI BAR info */
+/*
+ * These helpers provide future and backwards compatibility
+ * for accessing popular PCI BAR info
+ */
#define pci_resource_start(dev, bar) ((dev)->resource[(bar)].start)
#define pci_resource_end(dev, bar) ((dev)->resource[(bar)].end)
#define pci_resource_flags(dev, bar) ((dev)->resource[(bar)].flags)
@@ -1739,7 +1738,8 @@ int pci_iobar_pfn(struct pci_dev *pdev, int bar, struct vm_area_struct *vma);
(pci_resource_end((dev), (bar)) - \
pci_resource_start((dev), (bar)) + 1))
-/* Similar to the helpers above, these manipulate per-pci_dev
+/*
+ * Similar to the helpers above, these manipulate per-pci_dev
* driver-specific data. They are really just a wrapper around
* the generic device structure functions of these calls.
*/
@@ -1753,16 +1753,14 @@ static inline void pci_set_drvdata(struct pci_dev *pdev, void *data)
dev_set_drvdata(&pdev->dev, data);
}
-/* If you want to know what to call your pci_dev, ask this function.
- * Again, it's a wrapper around the generic device.
- */
static inline const char *pci_name(const struct pci_dev *pdev)
{
return dev_name(&pdev->dev);
}
-/* Some archs don't want to expose struct resource to userland as-is
+/*
+ * Some archs don't want to expose struct resource to userland as-is
* in sysfs and /proc
*/
#ifdef HAVE_ARCH_PCI_RESOURCE_TO_USER
@@ -1781,16 +1779,16 @@ static inline void pci_resource_to_user(const struct pci_dev *dev, int bar,
/*
- * The world is not perfect and supplies us with broken PCI devices.
- * For at least a part of these bugs we need a work-around, so both
- * generic (drivers/pci/quirks.c) and per-architecture code can define
- * fixup hooks to be called for particular buggy devices.
+ * The world is not perfect and supplies us with broken PCI devices.
+ * For at least a part of these bugs we need a work-around, so both
+ * generic (drivers/pci/quirks.c) and per-architecture code can define
+ * fixup hooks to be called for particular buggy devices.
*/
struct pci_fixup {
- u16 vendor; /* You can use PCI_ANY_ID here of course */
- u16 device; /* You can use PCI_ANY_ID here of course */
- u32 class; /* You can use PCI_ANY_ID here too */
+ u16 vendor; /* Or PCI_ANY_ID */
+ u16 device; /* Or PCI_ANY_ID */
+ u32 class; /* Or PCI_ANY_ID */
unsigned int class_shift; /* should be 0, 8, 16 */
void (*hook)(struct pci_dev *dev);
};
@@ -1832,23 +1830,19 @@ enum pci_fixup_pass {
#define DECLARE_PCI_FIXUP_CLASS_RESUME(vendor, device, class, \
class_shift, hook) \
DECLARE_PCI_FIXUP_SECTION(.pci_fixup_resume, \
- resume##hook, vendor, device, class, \
- class_shift, hook)
+ resume##hook, vendor, device, class, class_shift, hook)
#define DECLARE_PCI_FIXUP_CLASS_RESUME_EARLY(vendor, device, class, \
class_shift, hook) \
DECLARE_PCI_FIXUP_SECTION(.pci_fixup_resume_early, \
- resume_early##hook, vendor, device, \
- class, class_shift, hook)
+ resume_early##hook, vendor, device, class, class_shift, hook)
#define DECLARE_PCI_FIXUP_CLASS_SUSPEND(vendor, device, class, \
class_shift, hook) \
DECLARE_PCI_FIXUP_SECTION(.pci_fixup_suspend, \
- suspend##hook, vendor, device, class, \
- class_shift, hook)
+ suspend##hook, vendor, device, class, class_shift, hook)
#define DECLARE_PCI_FIXUP_CLASS_SUSPEND_LATE(vendor, device, class, \
class_shift, hook) \
DECLARE_PCI_FIXUP_SECTION(.pci_fixup_suspend_late, \
- suspend_late##hook, vendor, device, \
- class, class_shift, hook)
+ suspend_late##hook, vendor, device, class, class_shift, hook)
#define DECLARE_PCI_FIXUP_EARLY(vendor, device, hook) \
DECLARE_PCI_FIXUP_SECTION(.pci_fixup_early, \
@@ -1864,20 +1858,16 @@ enum pci_fixup_pass {
hook, vendor, device, PCI_ANY_ID, 0, hook)
#define DECLARE_PCI_FIXUP_RESUME(vendor, device, hook) \
DECLARE_PCI_FIXUP_SECTION(.pci_fixup_resume, \
- resume##hook, vendor, device, \
- PCI_ANY_ID, 0, hook)
+ resume##hook, vendor, device, PCI_ANY_ID, 0, hook)
#define DECLARE_PCI_FIXUP_RESUME_EARLY(vendor, device, hook) \
DECLARE_PCI_FIXUP_SECTION(.pci_fixup_resume_early, \
- resume_early##hook, vendor, device, \
- PCI_ANY_ID, 0, hook)
+ resume_early##hook, vendor, device, PCI_ANY_ID, 0, hook)
#define DECLARE_PCI_FIXUP_SUSPEND(vendor, device, hook) \
DECLARE_PCI_FIXUP_SECTION(.pci_fixup_suspend, \
- suspend##hook, vendor, device, \
- PCI_ANY_ID, 0, hook)
+ suspend##hook, vendor, device, PCI_ANY_ID, 0, hook)
#define DECLARE_PCI_FIXUP_SUSPEND_LATE(vendor, device, hook) \
DECLARE_PCI_FIXUP_SECTION(.pci_fixup_suspend_late, \
- suspend_late##hook, vendor, device, \
- PCI_ANY_ID, 0, hook)
+ suspend_late##hook, vendor, device, PCI_ANY_ID, 0, hook)
#ifdef CONFIG_PCI_QUIRKS
void pci_fixup_device(enum pci_fixup_pass pass, struct pci_dev *dev);
@@ -1933,6 +1923,7 @@ void pcibios_release_device(struct pci_dev *dev);
void pcibios_penalize_isa_irq(int irq, int active);
int pcibios_alloc_irq(struct pci_dev *dev);
void pcibios_free_irq(struct pci_dev *dev);
+resource_size_t pcibios_default_alignment(void);
#ifdef CONFIG_HIBERNATE_CALLBACKS
extern struct dev_pm_ops pcibios_pm_ops;
@@ -1964,6 +1955,12 @@ int pci_vfs_assigned(struct pci_dev *dev);
int pci_sriov_set_totalvfs(struct pci_dev *dev, u16 numvfs);
int pci_sriov_get_totalvfs(struct pci_dev *dev);
resource_size_t pci_iov_resource_size(struct pci_dev *dev, int resno);
+void pci_vf_drivers_autoprobe(struct pci_dev *dev, bool probe);
+
+/* Arch may override these (weak) */
+int pcibios_sriov_enable(struct pci_dev *pdev, u16 num_vfs);
+int pcibios_sriov_disable(struct pci_dev *pdev);
+resource_size_t pcibios_iov_resource_alignment(struct pci_dev *dev, int resno);
#else
static inline int pci_iov_virtfn_bus(struct pci_dev *dev, int id)
{
@@ -1991,6 +1988,7 @@ static inline int pci_sriov_get_totalvfs(struct pci_dev *dev)
{ return 0; }
static inline resource_size_t pci_iov_resource_size(struct pci_dev *dev, int resno)
{ return 0; }
+static inline void pci_vf_drivers_autoprobe(struct pci_dev *dev, bool probe) { }
#endif
#if defined(CONFIG_HOTPLUG_PCI) || defined(CONFIG_HOTPLUG_PCI_MODULE)
@@ -2061,6 +2059,7 @@ void pci_request_acs(void);
bool pci_acs_enabled(struct pci_dev *pdev, u16 acs_flags);
bool pci_acs_path_enabled(struct pci_dev *start,
struct pci_dev *end, u16 acs_flags);
+int pci_enable_atomic_ops_to_root(struct pci_dev *dev, u32 cap_mask);
#define PCI_VPD_LRDT 0x80 /* Large Resource Data Type */
#define PCI_VPD_LRDT_ID(x) ((x) | PCI_VPD_LRDT)
@@ -2112,7 +2111,7 @@ static inline u16 pci_vpd_lrdt_size(const u8 *lrdt)
*/
static inline u16 pci_vpd_lrdt_tag(const u8 *lrdt)
{
- return (u16)(lrdt[0] & PCI_VPD_LRDT_TIN_MASK);
+ return (u16)(lrdt[0] & PCI_VPD_LRDT_TIN_MASK);
}
/**
@@ -2182,10 +2181,28 @@ void pci_release_of_node(struct pci_dev *dev);
void pci_set_bus_of_node(struct pci_bus *bus);
void pci_release_bus_of_node(struct pci_bus *bus);
struct irq_domain *pci_host_bridge_of_msi_domain(struct pci_bus *bus);
+int pci_parse_request_of_pci_ranges(struct device *dev,
+ struct list_head *resources,
+ struct resource **bus_range);
/* Arch may override this (weak) */
struct device_node *pcibios_get_phb_of_node(struct pci_bus *bus);
+#else /* CONFIG_OF */
+static inline void pci_set_of_node(struct pci_dev *dev) { }
+static inline void pci_release_of_node(struct pci_dev *dev) { }
+static inline void pci_set_bus_of_node(struct pci_bus *bus) { }
+static inline void pci_release_bus_of_node(struct pci_bus *bus) { }
+static inline struct irq_domain *
+pci_host_bridge_of_msi_domain(struct pci_bus *bus) { return NULL; }
+static inline int pci_parse_request_of_pci_ranges(struct device *dev,
+ struct list_head *resources,
+ struct resource **bus_range)
+{
+ return -EINVAL;
+}
+#endif /* CONFIG_OF */
+
static inline struct device_node *
pci_device_to_OF_node(const struct pci_dev *pdev)
{
@@ -2197,17 +2214,6 @@ static inline struct device_node *pci_bus_to_OF_node(struct pci_bus *bus)
return bus ? bus->dev.of_node : NULL;
}
-#else /* CONFIG_OF */
-static inline void pci_set_of_node(struct pci_dev *dev) { }
-static inline void pci_release_of_node(struct pci_dev *dev) { }
-static inline void pci_set_bus_of_node(struct pci_bus *bus) { }
-static inline void pci_release_bus_of_node(struct pci_bus *bus) { }
-static inline struct device_node *
-pci_device_to_OF_node(const struct pci_dev *pdev) { return NULL; }
-static inline struct irq_domain *
-pci_host_bridge_of_msi_domain(struct pci_bus *bus) { return NULL; }
-#endif /* CONFIG_OF */
-
#ifdef CONFIG_ACPI
struct irq_domain *pci_host_bridge_acpi_msi_domain(struct pci_bus *bus);
@@ -2231,7 +2237,7 @@ int pci_for_each_dma_alias(struct pci_dev *pdev,
int (*fn)(struct pci_dev *pdev,
u16 alias, void *data), void *data);
-/* helper functions for operation of device flag */
+/* Helper functions for operation of device flag */
static inline void pci_set_dev_assigned(struct pci_dev *pdev)
{
pdev->dev_flags |= PCI_DEV_FLAGS_ASSIGNED;
@@ -2278,7 +2284,23 @@ static inline bool pci_is_thunderbolt_attached(struct pci_dev *pdev)
return false;
}
-/* provide the legacy pci_dma_* API */
+#if defined(CONFIG_PCIEAER) || defined(CONFIG_EEH)
+void pci_uevent_ers(struct pci_dev *pdev, enum pci_ers_result err_type);
+#endif
+
+/* Provide the legacy pci_dma_* API */
#include <linux/pci-dma-compat.h>
+#define pci_printk(level, pdev, fmt, arg...) \
+ dev_printk(level, &(pdev)->dev, fmt, ##arg)
+
+#define pci_emerg(pdev, fmt, arg...) dev_emerg(&(pdev)->dev, fmt, ##arg)
+#define pci_alert(pdev, fmt, arg...) dev_alert(&(pdev)->dev, fmt, ##arg)
+#define pci_crit(pdev, fmt, arg...) dev_crit(&(pdev)->dev, fmt, ##arg)
+#define pci_err(pdev, fmt, arg...) dev_err(&(pdev)->dev, fmt, ##arg)
+#define pci_warn(pdev, fmt, arg...) dev_warn(&(pdev)->dev, fmt, ##arg)
+#define pci_notice(pdev, fmt, arg...) dev_notice(&(pdev)->dev, fmt, ##arg)
+#define pci_info(pdev, fmt, arg...) dev_info(&(pdev)->dev, fmt, ##arg)
+#define pci_dbg(pdev, fmt, arg...) dev_dbg(&(pdev)->dev, fmt, ##arg)
+
#endif /* LINUX_PCI_H */
diff --git a/include/linux/pci_hotplug.h b/include/linux/pci_hotplug.h
index 2e855afa0212..26213024e81b 100644
--- a/include/linux/pci_hotplug.h
+++ b/include/linux/pci_hotplug.h
@@ -1,3 +1,4 @@
+/* SPDX-License-Identifier: GPL-2.0+ */
/*
* PCI HotPlug Core Functions
*
@@ -7,21 +8,6 @@
*
* All rights reserved.
*
- * This program is free software; you can redistribute it and/or modify
- * it under the terms of the GNU General Public License as published by
- * the Free Software Foundation; either version 2 of the License, or (at
- * your option) any later version.
- *
- * This program is distributed in the hope that it will be useful, but
- * WITHOUT ANY WARRANTY; without even the implied warranty of
- * MERCHANTABILITY OR FITNESS FOR A PARTICULAR PURPOSE, GOOD TITLE or
- * NON INFRINGEMENT. See the GNU General Public License for more
- * details.
- *
- * You should have received a copy of the GNU General Public License
- * along with this program; if not, write to the Free Software
- * Foundation, Inc., 675 Mass Ave, Cambridge, MA 02139, USA.
- *
* Send feedback to <kristen.c.accardi@intel.com>
*
*/
diff --git a/include/linux/pci_ids.h b/include/linux/pci_ids.h
index ab20dc5db423..cc608fc55334 100644
--- a/include/linux/pci_ids.h
+++ b/include/linux/pci_ids.h
@@ -45,6 +45,7 @@
#define PCI_CLASS_MULTIMEDIA_VIDEO 0x0400
#define PCI_CLASS_MULTIMEDIA_AUDIO 0x0401
#define PCI_CLASS_MULTIMEDIA_PHONE 0x0402
+#define PCI_CLASS_MULTIMEDIA_HD_AUDIO 0x0403
#define PCI_CLASS_MULTIMEDIA_OTHER 0x0480
#define PCI_BASE_CLASS_MEMORY 0x05
@@ -149,6 +150,8 @@
#define PCI_VENDOR_ID_DYNALINK 0x0675
#define PCI_DEVICE_ID_DYNALINK_IS64PH 0x1702
+#define PCI_VENDOR_ID_UBIQUITI 0x0777
+
#define PCI_VENDOR_ID_BERKOM 0x0871
#define PCI_DEVICE_ID_BERKOM_A1T 0xffa1
#define PCI_DEVICE_ID_BERKOM_T_CONCEPT 0xffa2
@@ -1331,6 +1334,7 @@
#define PCI_DEVICE_ID_IMS_TT3D 0x9135
#define PCI_VENDOR_ID_AMCC 0x10e8
+#define PCI_VENDOR_ID_AMPERE 0x1def
#define PCI_VENDOR_ID_INTERG 0x10ea
#define PCI_DEVICE_ID_INTERG_1682 0x1682
@@ -1559,6 +1563,8 @@
#define PCI_DEVICE_ID_SERVERWORKS_CSB6LPC 0x0227
#define PCI_DEVICE_ID_SERVERWORKS_HT1100LD 0x0408
+#define PCI_VENDOR_ID_ALTERA 0x1172
+
#define PCI_VENDOR_ID_SBE 0x1176
#define PCI_DEVICE_ID_SBE_WANXL100 0x0301
#define PCI_DEVICE_ID_SBE_WANXL200 0x0302
@@ -2381,6 +2387,8 @@
#define PCI_VENDOR_ID_LENOVO 0x17aa
+#define PCI_VENDOR_ID_CDNS 0x17cd
+
#define PCI_VENDOR_ID_ARECA 0x17d3
#define PCI_DEVICE_ID_ARECA_1110 0x1110
#define PCI_DEVICE_ID_ARECA_1120 0x1120
@@ -2552,6 +2560,9 @@
#define PCI_DEVICE_ID_TEHUTI_3010 0x3010
#define PCI_DEVICE_ID_TEHUTI_3014 0x3014
+#define PCI_VENDOR_ID_SUNIX 0x1fd4
+#define PCI_DEVICE_ID_SUNIX_1999 0x1999
+
#define PCI_VENDOR_ID_HINT 0x3388
#define PCI_DEVICE_ID_HINT_VXPROII_IDE 0x8013
diff --git a/include/linux/pcieport_if.h b/include/linux/pcieport_if.h
deleted file mode 100644
index b69769dbf659..000000000000
--- a/include/linux/pcieport_if.h
+++ /dev/null
@@ -1,71 +0,0 @@
-/* SPDX-License-Identifier: GPL-2.0 */
-/*
- * File: pcieport_if.h
- * Purpose: PCI Express Port Bus Driver's IF Data Structure
- *
- * Copyright (C) 2004 Intel
- * Copyright (C) Tom Long Nguyen (tom.l.nguyen@intel.com)
- */
-
-#ifndef _PCIEPORT_IF_H_
-#define _PCIEPORT_IF_H_
-
-/* Port Type */
-#define PCIE_ANY_PORT (~0)
-
-/* Service Type */
-#define PCIE_PORT_SERVICE_PME_SHIFT 0 /* Power Management Event */
-#define PCIE_PORT_SERVICE_PME (1 << PCIE_PORT_SERVICE_PME_SHIFT)
-#define PCIE_PORT_SERVICE_AER_SHIFT 1 /* Advanced Error Reporting */
-#define PCIE_PORT_SERVICE_AER (1 << PCIE_PORT_SERVICE_AER_SHIFT)
-#define PCIE_PORT_SERVICE_HP_SHIFT 2 /* Native Hotplug */
-#define PCIE_PORT_SERVICE_HP (1 << PCIE_PORT_SERVICE_HP_SHIFT)
-#define PCIE_PORT_SERVICE_VC_SHIFT 3 /* Virtual Channel */
-#define PCIE_PORT_SERVICE_VC (1 << PCIE_PORT_SERVICE_VC_SHIFT)
-#define PCIE_PORT_SERVICE_DPC_SHIFT 4 /* Downstream Port Containment */
-#define PCIE_PORT_SERVICE_DPC (1 << PCIE_PORT_SERVICE_DPC_SHIFT)
-
-struct pcie_device {
- int irq; /* Service IRQ/MSI/MSI-X Vector */
- struct pci_dev *port; /* Root/Upstream/Downstream Port */
- u32 service; /* Port service this device represents */
- void *priv_data; /* Service Private Data */
- struct device device; /* Generic Device Interface */
-};
-#define to_pcie_device(d) container_of(d, struct pcie_device, device)
-
-static inline void set_service_data(struct pcie_device *dev, void *data)
-{
- dev->priv_data = data;
-}
-
-static inline void *get_service_data(struct pcie_device *dev)
-{
- return dev->priv_data;
-}
-
-struct pcie_port_service_driver {
- const char *name;
- int (*probe) (struct pcie_device *dev);
- void (*remove) (struct pcie_device *dev);
- int (*suspend) (struct pcie_device *dev);
- int (*resume) (struct pcie_device *dev);
-
- /* Device driver may resume normal operations */
- void (*error_resume)(struct pci_dev *dev);
-
- /* Link Reset Capability - AER service driver specific */
- pci_ers_result_t (*reset_link) (struct pci_dev *dev);
-
- int port_type; /* Type of the port this driver can handle */
- u32 service; /* Port service this device represents */
-
- struct device_driver driver;
-};
-#define to_service_driver(d) \
- container_of(d, struct pcie_port_service_driver, driver)
-
-int pcie_port_service_register(struct pcie_port_service_driver *new);
-void pcie_port_service_unregister(struct pcie_port_service_driver *new);
-
-#endif /* _PCIEPORT_IF_H_ */
diff --git a/include/linux/percpu-refcount.h b/include/linux/percpu-refcount.h
index 6658d9ee5257..009cdf3d65b6 100644
--- a/include/linux/percpu-refcount.h
+++ b/include/linux/percpu-refcount.h
@@ -30,10 +30,14 @@
* calls io_destroy() or the process exits.
*
* In the aio code, kill_ioctx() is called when we wish to destroy a kioctx; it
- * calls percpu_ref_kill(), then hlist_del_rcu() and synchronize_rcu() to remove
- * the kioctx from the proccess's list of kioctxs - after that, there can't be
- * any new users of the kioctx (from lookup_ioctx()) and it's then safe to drop
- * the initial ref with percpu_ref_put().
+ * removes the kioctx from the proccess's table of kioctxs and kills percpu_ref.
+ * After that, there can't be any new users of the kioctx (from lookup_ioctx())
+ * and it's then safe to drop the initial ref with percpu_ref_put().
+ *
+ * Note that the free path, free_ioctx(), needs to go through explicit call_rcu()
+ * to synchronize with RCU protected lookup_ioctx(). percpu_ref operations don't
+ * imply RCU grace periods of any kind and if a user wants to combine percpu_ref
+ * with RCU protection, it must be done explicitly.
*
* Code that does a two stage shutdown like this often needs some kind of
* explicit synchronization to ensure the initial refcount can only be dropped
@@ -113,8 +117,10 @@ void percpu_ref_reinit(struct percpu_ref *ref);
* Must be used to drop the initial ref on a percpu refcount; must be called
* precisely once before shutdown.
*
- * Puts @ref in non percpu mode, then does a call_rcu() before gathering up the
- * percpu counters and dropping the initial ref.
+ * Switches @ref into atomic mode before gathering up the percpu counters
+ * and dropping the initial ref.
+ *
+ * There are no implied RCU grace periods between kill and release.
*/
static inline void percpu_ref_kill(struct percpu_ref *ref)
{
@@ -139,12 +145,12 @@ static inline bool __ref_is_percpu(struct percpu_ref *ref,
* when using it as a pointer, __PERCPU_REF_ATOMIC may be set in
* between contaminating the pointer value, meaning that
* READ_ONCE() is required when fetching it.
+ *
+ * The smp_read_barrier_depends() implied by READ_ONCE() pairs
+ * with smp_store_release() in __percpu_ref_switch_to_percpu().
*/
percpu_ptr = READ_ONCE(ref->percpu_count_ptr);
- /* paired with smp_store_release() in __percpu_ref_switch_to_percpu() */
- smp_read_barrier_depends();
-
/*
* Theoretically, the following could test just ATOMIC; however,
* then we'd have to mask off DEAD separately as DEAD may be
diff --git a/include/linux/percpu_counter.h b/include/linux/percpu_counter.h
index 73a7bf30fe9a..4f052496cdfd 100644
--- a/include/linux/percpu_counter.h
+++ b/include/linux/percpu_counter.h
@@ -86,7 +86,7 @@ static inline s64 percpu_counter_read_positive(struct percpu_counter *fbc)
return 0;
}
-static inline int percpu_counter_initialized(struct percpu_counter *fbc)
+static inline bool percpu_counter_initialized(struct percpu_counter *fbc)
{
return (fbc->counters != NULL);
}
@@ -167,9 +167,9 @@ static inline s64 percpu_counter_sum(struct percpu_counter *fbc)
return percpu_counter_read(fbc);
}
-static inline int percpu_counter_initialized(struct percpu_counter *fbc)
+static inline bool percpu_counter_initialized(struct percpu_counter *fbc)
{
- return 1;
+ return true;
}
#endif /* CONFIG_SMP */
diff --git a/include/linux/perf/arm_pmu.h b/include/linux/perf/arm_pmu.h
index af0f44effd44..40036a57d072 100644
--- a/include/linux/perf/arm_pmu.h
+++ b/include/linux/perf/arm_pmu.h
@@ -14,26 +14,10 @@
#include <linux/interrupt.h>
#include <linux/perf_event.h>
+#include <linux/platform_device.h>
#include <linux/sysfs.h>
#include <asm/cputype.h>
-/*
- * struct arm_pmu_platdata - ARM PMU platform data
- *
- * @handle_irq: an optional handler which will be called from the
- * interrupt and passed the address of the low level handler,
- * and can be used to implement any platform specific handling
- * before or after calling it.
- *
- * @irq_flags: if non-zero, these flags will be passed to request_irq
- * when requesting interrupts for this PMU device.
- */
-struct arm_pmu_platdata {
- irqreturn_t (*handle_irq)(int irq, void *dev,
- irq_handler_t pmu_handler);
- unsigned long irq_flags;
-};
-
#ifdef CONFIG_ARM_PMU
/*
@@ -92,7 +76,6 @@ enum armpmu_attr_groups {
struct arm_pmu {
struct pmu pmu;
- cpumask_t active_irqs;
cpumask_t supported_cpus;
char *name;
irqreturn_t (*handle_irq)(int irq_num, void *dev);
@@ -174,12 +157,11 @@ static inline int arm_pmu_acpi_probe(armpmu_init_fn init_fn) { return 0; }
/* Internal functions only for core arm_pmu code */
struct arm_pmu *armpmu_alloc(void);
+struct arm_pmu *armpmu_alloc_atomic(void);
void armpmu_free(struct arm_pmu *pmu);
int armpmu_register(struct arm_pmu *pmu);
-int armpmu_request_irqs(struct arm_pmu *armpmu);
-void armpmu_free_irqs(struct arm_pmu *armpmu);
-int armpmu_request_irq(struct arm_pmu *armpmu, int cpu);
-void armpmu_free_irq(struct arm_pmu *armpmu, int cpu);
+int armpmu_request_irq(int irq, int cpu);
+void armpmu_free_irq(int irq, int cpu);
#define ARMV8_PMU_PDEV_NAME "armv8-pmu"
diff --git a/include/linux/perf_event.h b/include/linux/perf_event.h
index 7546822a1d74..e71e99eb9a4e 100644
--- a/include/linux/perf_event.h
+++ b/include/linux/perf_event.h
@@ -449,14 +449,19 @@ struct pmu {
int (*filter_match) (struct perf_event *event); /* optional */
};
+enum perf_addr_filter_action_t {
+ PERF_ADDR_FILTER_ACTION_STOP = 0,
+ PERF_ADDR_FILTER_ACTION_START,
+ PERF_ADDR_FILTER_ACTION_FILTER,
+};
+
/**
* struct perf_addr_filter - address range filter definition
* @entry: event's filter list linkage
* @inode: object file's inode for file-based filters
* @offset: filter range offset
- * @size: filter range size
- * @range: 1: range, 0: address
- * @filter: 1: filter/start, 0: stop
+ * @size: filter range size (size==0 means single address trigger)
+ * @action: filter/start/stop
*
* This is a hardware-agnostic filter configuration as specified by the user.
*/
@@ -465,8 +470,7 @@ struct perf_addr_filter {
struct inode *inode;
unsigned long offset;
unsigned long size;
- unsigned int range : 1,
- filter : 1;
+ enum perf_addr_filter_action_t action;
};
/**
@@ -536,6 +540,10 @@ struct pmu_event_list {
struct list_head list;
};
+#define for_each_sibling_event(sibling, event) \
+ if ((event)->group_leader == (event)) \
+ list_for_each_entry((sibling), &(event)->sibling_list, sibling_list)
+
/**
* struct perf_event - performance event kernel representation:
*/
@@ -549,16 +557,16 @@ struct perf_event {
struct list_head event_entry;
/*
- * XXX: group_entry and sibling_list should be mutually exclusive;
- * either you're a sibling on a group, or you're the group leader.
- * Rework the code to always use the same list element.
- *
* Locked for modification by both ctx->mutex and ctx->lock; holding
* either sufficies for read.
*/
- struct list_head group_entry;
struct list_head sibling_list;
-
+ struct list_head active_list;
+ /*
+ * Node on the pinned or flexible tree located at the event context;
+ */
+ struct rb_node group_node;
+ u64 group_index;
/*
* We need storage to track the entries in perf_pmu_migrate_context; we
* cannot use the event_entry because of RCU and we want to keep the
@@ -690,6 +698,12 @@ struct perf_event {
#endif /* CONFIG_PERF_EVENTS */
};
+
+struct perf_event_groups {
+ struct rb_root tree;
+ u64 index;
+};
+
/**
* struct perf_event_context - event context structure
*
@@ -710,9 +724,13 @@ struct perf_event_context {
struct mutex mutex;
struct list_head active_ctx_list;
- struct list_head pinned_groups;
- struct list_head flexible_groups;
+ struct perf_event_groups pinned_groups;
+ struct perf_event_groups flexible_groups;
struct list_head event_list;
+
+ struct list_head pinned_active;
+ struct list_head flexible_active;
+
int nr_events;
int nr_active;
int is_active;
diff --git a/include/linux/pfn_t.h b/include/linux/pfn_t.h
index 43b1d7648e82..a03c2642a87c 100644
--- a/include/linux/pfn_t.h
+++ b/include/linux/pfn_t.h
@@ -15,8 +15,10 @@
#define PFN_SG_LAST (1ULL << (BITS_PER_LONG_LONG - 2))
#define PFN_DEV (1ULL << (BITS_PER_LONG_LONG - 3))
#define PFN_MAP (1ULL << (BITS_PER_LONG_LONG - 4))
+#define PFN_SPECIAL (1ULL << (BITS_PER_LONG_LONG - 5))
#define PFN_FLAGS_TRACE \
+ { PFN_SPECIAL, "SPECIAL" }, \
{ PFN_SG_CHAIN, "SG_CHAIN" }, \
{ PFN_SG_LAST, "SG_LAST" }, \
{ PFN_DEV, "DEV" }, \
@@ -120,4 +122,15 @@ pud_t pud_mkdevmap(pud_t pud);
#endif
#endif /* __HAVE_ARCH_PTE_DEVMAP */
+#ifdef __HAVE_ARCH_PTE_SPECIAL
+static inline bool pfn_t_special(pfn_t pfn)
+{
+ return (pfn.val & PFN_SPECIAL) == PFN_SPECIAL;
+}
+#else
+static inline bool pfn_t_special(pfn_t pfn)
+{
+ return false;
+}
+#endif /* __HAVE_ARCH_PTE_SPECIAL */
#endif /* _LINUX_PFN_T_H_ */
diff --git a/include/linux/phy.h b/include/linux/phy.h
index dc82a07cb4fd..f0b5870a6d40 100644
--- a/include/linux/phy.h
+++ b/include/linux/phy.h
@@ -59,6 +59,7 @@
#define PHY_HAS_INTERRUPT 0x00000001
#define PHY_IS_INTERNAL 0x00000002
+#define PHY_RST_AFTER_CLK_EN 0x00000004
#define MDIO_DEVICE_IS_PHY 0x80000000
/* Interface Mode definitions */
@@ -468,7 +469,6 @@ struct phy_device {
/* Interrupt and Polling infrastructure */
struct work_struct phy_queue;
struct delayed_work state_queue;
- atomic_t irq_disable;
struct mutex lock;
@@ -497,19 +497,19 @@ struct phy_device {
* flags: A bitfield defining certain other features this PHY
* supports (like interrupts)
*
- * The drivers must implement config_aneg and read_status. All
- * other functions are optional. Note that none of these
- * functions should be called from interrupt time. The goal is
- * for the bus read/write functions to be able to block when the
- * bus transaction is happening, and be freed up by an interrupt
- * (The MPC85xx has this ability, though it is not currently
- * supported in the driver).
+ * All functions are optional. If config_aneg or read_status
+ * are not implemented, the phy core uses the genphy versions.
+ * Note that none of these functions should be called from
+ * interrupt time. The goal is for the bus read/write functions
+ * to be able to block when the bus transaction is happening,
+ * and be freed up by an interrupt (The MPC85xx has this ability,
+ * though it is not currently supported in the driver).
*/
struct phy_driver {
struct mdio_driver_common mdiodrv;
u32 phy_id;
char *name;
- unsigned int phy_id_mask;
+ u32 phy_id_mask;
u32 features;
u32 flags;
const void *driver_data;
@@ -634,6 +634,9 @@ struct phy_driver {
int (*write_mmd)(struct phy_device *dev, int devnum, u16 regnum,
u16 val);
+ int (*read_page)(struct phy_device *dev);
+ int (*write_page)(struct phy_device *dev, int page);
+
/* Get the size and type of the eeprom contained within a plug-in
* module */
int (*module_info)(struct phy_device *dev,
@@ -690,6 +693,8 @@ phy_lookup_setting(int speed, int duplex, const unsigned long *mask,
size_t phy_speeds(unsigned int *speeds, size_t size,
unsigned long *mask, size_t maxbit);
+void phy_resolve_aneg_linkmode(struct phy_device *phydev);
+
/**
* phy_read_mmd - Convenience function for reading a register
* from an MMD on a given PHY.
@@ -716,6 +721,18 @@ static inline int phy_read(struct phy_device *phydev, u32 regnum)
}
/**
+ * __phy_read - convenience function for reading a given PHY register
+ * @phydev: the phy_device struct
+ * @regnum: register number to read
+ *
+ * The caller must have taken the MDIO bus lock.
+ */
+static inline int __phy_read(struct phy_device *phydev, u32 regnum)
+{
+ return __mdiobus_read(phydev->mdio.bus, phydev->mdio.addr, regnum);
+}
+
+/**
* phy_write - Convenience function for writing a given PHY register
* @phydev: the phy_device struct
* @regnum: register number to write
@@ -731,6 +748,72 @@ static inline int phy_write(struct phy_device *phydev, u32 regnum, u16 val)
}
/**
+ * __phy_write - Convenience function for writing a given PHY register
+ * @phydev: the phy_device struct
+ * @regnum: register number to write
+ * @val: value to write to @regnum
+ *
+ * The caller must have taken the MDIO bus lock.
+ */
+static inline int __phy_write(struct phy_device *phydev, u32 regnum, u16 val)
+{
+ return __mdiobus_write(phydev->mdio.bus, phydev->mdio.addr, regnum,
+ val);
+}
+
+int __phy_modify(struct phy_device *phydev, u32 regnum, u16 mask, u16 set);
+int phy_modify(struct phy_device *phydev, u32 regnum, u16 mask, u16 set);
+
+/**
+ * __phy_set_bits - Convenience function for setting bits in a PHY register
+ * @phydev: the phy_device struct
+ * @regnum: register number to write
+ * @val: bits to set
+ *
+ * The caller must have taken the MDIO bus lock.
+ */
+static inline int __phy_set_bits(struct phy_device *phydev, u32 regnum, u16 val)
+{
+ return __phy_modify(phydev, regnum, 0, val);
+}
+
+/**
+ * __phy_clear_bits - Convenience function for clearing bits in a PHY register
+ * @phydev: the phy_device struct
+ * @regnum: register number to write
+ * @val: bits to clear
+ *
+ * The caller must have taken the MDIO bus lock.
+ */
+static inline int __phy_clear_bits(struct phy_device *phydev, u32 regnum,
+ u16 val)
+{
+ return __phy_modify(phydev, regnum, val, 0);
+}
+
+/**
+ * phy_set_bits - Convenience function for setting bits in a PHY register
+ * @phydev: the phy_device struct
+ * @regnum: register number to write
+ * @val: bits to set
+ */
+static inline int phy_set_bits(struct phy_device *phydev, u32 regnum, u16 val)
+{
+ return phy_modify(phydev, regnum, 0, val);
+}
+
+/**
+ * phy_clear_bits - Convenience function for clearing bits in a PHY register
+ * @phydev: the phy_device struct
+ * @regnum: register number to write
+ * @val: bits to clear
+ */
+static inline int phy_clear_bits(struct phy_device *phydev, u32 regnum, u16 val)
+{
+ return phy_modify(phydev, regnum, val, 0);
+}
+
+/**
* phy_interrupt_is_valid - Convenience function for testing a given PHY irq
* @phydev: the phy_device struct
*
@@ -763,6 +846,20 @@ static inline bool phy_interface_mode_is_rgmii(phy_interface_t mode)
};
/**
+ * phy_interface_mode_is_8023z() - does the phy interface mode use 802.3z
+ * negotiation
+ * @mode: one of &enum phy_interface_t
+ *
+ * Returns true if the phy interface mode uses the 16-bit negotiation
+ * word as defined in 802.3z. (See 802.3-2015 37.2.1 Config_Reg encoding)
+ */
+static inline bool phy_interface_mode_is_8023z(phy_interface_t mode)
+{
+ return mode == PHY_INTERFACE_MODE_1000BASEX ||
+ mode == PHY_INTERFACE_MODE_2500BASEX;
+}
+
+/**
* phy_interface_is_rgmii - Convenience function for testing if a PHY interface
* is RGMII (all variants)
* @phydev: the phy_device struct
@@ -794,6 +891,14 @@ static inline bool phy_is_pseudo_fixed_link(struct phy_device *phydev)
*/
int phy_write_mmd(struct phy_device *phydev, int devad, u32 regnum, u16 val);
+int phy_save_page(struct phy_device *phydev);
+int phy_select_page(struct phy_device *phydev, int page);
+int phy_restore_page(struct phy_device *phydev, int oldpage, int ret);
+int phy_read_paged(struct phy_device *phydev, int page, u32 regnum);
+int phy_write_paged(struct phy_device *phydev, int page, u32 regnum, u16 val);
+int phy_modify_paged(struct phy_device *phydev, int page, u32 regnum,
+ u16 mask, u16 set);
+
struct phy_device *phy_device_create(struct mii_bus *bus, int addr, int phy_id,
bool is_c45,
struct phy_c45_device_ids *c45_ids);
@@ -819,6 +924,7 @@ void phy_device_remove(struct phy_device *phydev);
int phy_init_hw(struct phy_device *phydev);
int phy_suspend(struct phy_device *phydev);
int phy_resume(struct phy_device *phydev);
+int __phy_resume(struct phy_device *phydev);
int phy_loopback(struct phy_device *phydev, bool enable);
struct phy_device *phy_attach(struct net_device *dev, const char *bus_id,
phy_interface_t interface);
@@ -840,13 +946,11 @@ int phy_aneg_done(struct phy_device *phydev);
int phy_stop_interrupts(struct phy_device *phydev);
int phy_restart_aneg(struct phy_device *phydev);
+int phy_reset_after_clk_enable(struct phy_device *phydev);
-static inline int phy_read_status(struct phy_device *phydev)
+static inline void phy_device_reset(struct phy_device *phydev, int value)
{
- if (!phydev->drv)
- return -EIO;
-
- return phydev->drv->read_status(phydev);
+ mdio_device_reset(&phydev->mdio, value);
}
#define phydev_err(_phydev, format, args...) \
@@ -880,6 +984,10 @@ static inline int genphy_no_soft_reset(struct phy_device *phydev)
{
return 0;
}
+int genphy_read_mmd_unsupported(struct phy_device *phdev, int devad,
+ u16 regnum);
+int genphy_write_mmd_unsupported(struct phy_device *phdev, int devnum,
+ u16 regnum, u16 val);
/* Clause 45 PHY */
int genphy_c45_restart_aneg(struct phy_device *phydev);
@@ -889,6 +997,26 @@ int genphy_c45_read_lpa(struct phy_device *phydev);
int genphy_c45_read_pma(struct phy_device *phydev);
int genphy_c45_pma_setup_forced(struct phy_device *phydev);
int genphy_c45_an_disable_aneg(struct phy_device *phydev);
+int genphy_c45_read_mdix(struct phy_device *phydev);
+
+/* The gen10g_* functions are the old Clause 45 stub */
+int gen10g_config_aneg(struct phy_device *phydev);
+int gen10g_read_status(struct phy_device *phydev);
+int gen10g_no_soft_reset(struct phy_device *phydev);
+int gen10g_config_init(struct phy_device *phydev);
+int gen10g_suspend(struct phy_device *phydev);
+int gen10g_resume(struct phy_device *phydev);
+
+static inline int phy_read_status(struct phy_device *phydev)
+{
+ if (!phydev->drv)
+ return -EIO;
+
+ if (phydev->drv->read_status)
+ return phydev->drv->read_status(phydev);
+ else
+ return genphy_read_status(phydev);
+}
void phy_driver_unregister(struct phy_driver *drv);
void phy_drivers_unregister(struct phy_driver *drv, int n);
@@ -896,9 +1024,8 @@ int phy_driver_register(struct phy_driver *new_driver, struct module *owner);
int phy_drivers_register(struct phy_driver *new_driver, int n,
struct module *owner);
void phy_state_machine(struct work_struct *work);
-void phy_change(struct phy_device *phydev);
void phy_change_work(struct work_struct *work);
-void phy_mac_interrupt(struct phy_device *phydev, int new_link);
+void phy_mac_interrupt(struct phy_device *phydev);
void phy_start_machine(struct phy_device *phydev);
void phy_stop_machine(struct phy_device *phydev);
void phy_trigger_machine(struct phy_device *phydev, bool sync);
diff --git a/include/linux/phy/phy.h b/include/linux/phy/phy.h
index 4f8423a948d5..c9d14eeee7f5 100644
--- a/include/linux/phy/phy.h
+++ b/include/linux/phy/phy.h
@@ -25,7 +25,15 @@ struct phy;
enum phy_mode {
PHY_MODE_INVALID,
PHY_MODE_USB_HOST,
+ PHY_MODE_USB_HOST_LS,
+ PHY_MODE_USB_HOST_FS,
+ PHY_MODE_USB_HOST_HS,
+ PHY_MODE_USB_HOST_SS,
PHY_MODE_USB_DEVICE,
+ PHY_MODE_USB_DEVICE_LS,
+ PHY_MODE_USB_DEVICE_FS,
+ PHY_MODE_USB_DEVICE_HS,
+ PHY_MODE_USB_DEVICE_SS,
PHY_MODE_USB_OTG,
PHY_MODE_SGMII,
PHY_MODE_10GKR,
@@ -61,6 +69,7 @@ struct phy_ops {
*/
struct phy_attrs {
u32 bus_width;
+ enum phy_mode mode;
};
/**
@@ -72,7 +81,8 @@ struct phy_attrs {
* @mutex: mutex to protect phy_ops
* @init_count: used to protect when the PHY is used by multiple consumers
* @power_count: used to protect when the PHY is used by multiple consumers
- * @phy_attrs: used to specify PHY specific attributes
+ * @attrs: used to specify PHY specific attributes
+ * @pwr: power regulator associated with the phy
*/
struct phy {
struct device dev;
@@ -88,9 +98,10 @@ struct phy {
/**
* struct phy_provider - represents the phy provider
* @dev: phy provider device
+ * @children: can be used to override the default (dev->of_node) child node
* @owner: the module owner having of_xlate
- * @of_xlate: function pointer to obtain phy instance from phy pointer
* @list: to maintain a linked list of PHY providers
+ * @of_xlate: function pointer to obtain phy instance from phy pointer
*/
struct phy_provider {
struct device *dev;
@@ -101,6 +112,13 @@ struct phy_provider {
struct of_phandle_args *args);
};
+/**
+ * struct phy_lookup - PHY association in list of phys managed by the phy driver
+ * @node: list node
+ * @dev_id: the device of the association
+ * @con_id: connection ID string on device
+ * @phy: the phy of the association
+ */
struct phy_lookup {
struct list_head node;
const char *dev_id;
@@ -144,6 +162,10 @@ int phy_exit(struct phy *phy);
int phy_power_on(struct phy *phy);
int phy_power_off(struct phy *phy);
int phy_set_mode(struct phy *phy, enum phy_mode mode);
+static inline enum phy_mode phy_get_mode(struct phy *phy)
+{
+ return phy->attrs.mode;
+}
int phy_reset(struct phy *phy);
int phy_calibrate(struct phy *phy);
static inline int phy_get_bus_width(struct phy *phy)
@@ -260,6 +282,11 @@ static inline int phy_set_mode(struct phy *phy, enum phy_mode mode)
return -ENOSYS;
}
+static inline enum phy_mode phy_get_mode(struct phy *phy)
+{
+ return PHY_MODE_INVALID;
+}
+
static inline int phy_reset(struct phy *phy)
{
if (!phy)
diff --git a/include/linux/phy_fixed.h b/include/linux/phy_fixed.h
index cf6392de6eb0..ee54453a40a0 100644
--- a/include/linux/phy_fixed.h
+++ b/include/linux/phy_fixed.h
@@ -24,9 +24,6 @@ extern void fixed_phy_unregister(struct phy_device *phydev);
extern int fixed_phy_set_link_update(struct phy_device *phydev,
int (*link_update)(struct net_device *,
struct fixed_phy_status *));
-extern int fixed_phy_update_state(struct phy_device *phydev,
- const struct fixed_phy_status *status,
- const struct fixed_phy_status *changed);
#else
static inline int fixed_phy_add(unsigned int irq, int phy_id,
struct fixed_phy_status *status,
@@ -50,12 +47,6 @@ static inline int fixed_phy_set_link_update(struct phy_device *phydev,
{
return -ENODEV;
}
-static inline int fixed_phy_update_state(struct phy_device *phydev,
- const struct fixed_phy_status *status,
- const struct fixed_phy_status *changed)
-{
- return -ENODEV;
-}
#endif /* CONFIG_FIXED_PHY */
#endif /* __PHY_FIXED_H */
diff --git a/include/linux/phylink.h b/include/linux/phylink.h
index af67edd4ae38..50eeae025f1e 100644
--- a/include/linux/phylink.h
+++ b/include/linux/phylink.h
@@ -7,6 +7,7 @@
struct device_node;
struct ethtool_cmd;
+struct fwnode_handle;
struct net_device;
enum {
@@ -20,19 +21,31 @@ enum {
MLO_AN_PHY = 0, /* Conventional PHY */
MLO_AN_FIXED, /* Fixed-link mode */
- MLO_AN_SGMII, /* Cisco SGMII protocol */
- MLO_AN_8023Z, /* 1000base-X protocol */
+ MLO_AN_INBAND, /* In-band protocol */
};
static inline bool phylink_autoneg_inband(unsigned int mode)
{
- return mode == MLO_AN_SGMII || mode == MLO_AN_8023Z;
+ return mode == MLO_AN_INBAND;
}
+/**
+ * struct phylink_link_state - link state structure
+ * @advertising: ethtool bitmask containing advertised link modes
+ * @lp_advertising: ethtool bitmask containing link partner advertised link
+ * modes
+ * @interface: link &typedef phy_interface_t mode
+ * @speed: link speed, one of the SPEED_* constants.
+ * @duplex: link duplex mode, one of DUPLEX_* constants.
+ * @pause: link pause state, described by MLO_PAUSE_* constants.
+ * @link: true if the link is up.
+ * @an_enabled: true if autonegotiation is enabled/desired.
+ * @an_complete: true if autonegotiation has completed.
+ */
struct phylink_link_state {
__ETHTOOL_DECLARE_LINK_MODE_MASK(advertising);
__ETHTOOL_DECLARE_LINK_MODE_MASK(lp_advertising);
- phy_interface_t interface; /* PHY_INTERFACE_xxx */
+ phy_interface_t interface;
int speed;
int duplex;
int pause;
@@ -41,72 +54,153 @@ struct phylink_link_state {
unsigned int an_complete:1;
};
+/**
+ * struct phylink_mac_ops - MAC operations structure.
+ * @validate: Validate and update the link configuration.
+ * @mac_link_state: Read the current link state from the hardware.
+ * @mac_config: configure the MAC for the selected mode and state.
+ * @mac_an_restart: restart 802.3z BaseX autonegotiation.
+ * @mac_link_down: take the link down.
+ * @mac_link_up: allow the link to come up.
+ *
+ * The individual methods are described more fully below.
+ */
struct phylink_mac_ops {
- /**
- * validate: validate and update the link configuration
- * @ndev: net_device structure associated with MAC
- * @config: configuration to validate
- *
- * Update the %config->supported and %config->advertised masks
- * clearing bits that can not be supported.
- *
- * Note: the PHY may be able to transform from one connection
- * technology to another, so, eg, don't clear 1000BaseX just
- * because the MAC is unable to support it. This is more about
- * clearing unsupported speeds and duplex settings.
- *
- * If the %config->interface mode is %PHY_INTERFACE_MODE_1000BASEX
- * or %PHY_INTERFACE_MODE_2500BASEX, select the appropriate mode
- * based on %config->advertised and/or %config->speed.
- */
void (*validate)(struct net_device *ndev, unsigned long *supported,
struct phylink_link_state *state);
-
- /* Read the current link state from the hardware */
- int (*mac_link_state)(struct net_device *, struct phylink_link_state *);
-
- /* Configure the MAC */
- /**
- * mac_config: configure the MAC for the selected mode and state
- * @ndev: net_device structure for the MAC
- * @mode: one of MLO_AN_FIXED, MLO_AN_PHY, MLO_AN_8023Z, MLO_AN_SGMII
- * @state: state structure
- *
- * The action performed depends on the currently selected mode:
- *
- * %MLO_AN_FIXED, %MLO_AN_PHY:
- * set the specified speed, duplex, pause mode, and phy interface
- * mode in the provided @state.
- * %MLO_AN_8023Z:
- * place the link in 1000base-X mode, advertising the parameters
- * given in advertising in @state.
- * %MLO_AN_SGMII:
- * place the link in Cisco SGMII mode - there is no advertisment
- * to make as the PHY communicates the speed and duplex to the
- * MAC over the in-band control word. Configuration of the pause
- * mode is as per MLO_AN_PHY since this is not included.
- */
+ int (*mac_link_state)(struct net_device *ndev,
+ struct phylink_link_state *state);
void (*mac_config)(struct net_device *ndev, unsigned int mode,
const struct phylink_link_state *state);
-
- /**
- * mac_an_restart: restart 802.3z BaseX autonegotiation
- * @ndev: net_device structure for the MAC
- */
void (*mac_an_restart)(struct net_device *ndev);
-
- void (*mac_link_down)(struct net_device *, unsigned int mode);
- void (*mac_link_up)(struct net_device *, unsigned int mode,
- struct phy_device *);
+ void (*mac_link_down)(struct net_device *ndev, unsigned int mode,
+ phy_interface_t interface);
+ void (*mac_link_up)(struct net_device *ndev, unsigned int mode,
+ phy_interface_t interface,
+ struct phy_device *phy);
};
-struct phylink *phylink_create(struct net_device *, struct device_node *,
+#if 0 /* For kernel-doc purposes only. */
+/**
+ * validate - Validate and update the link configuration
+ * @ndev: a pointer to a &struct net_device for the MAC.
+ * @supported: ethtool bitmask for supported link modes.
+ * @state: a pointer to a &struct phylink_link_state.
+ *
+ * Clear bits in the @supported and @state->advertising masks that
+ * are not supportable by the MAC.
+ *
+ * Note that the PHY may be able to transform from one connection
+ * technology to another, so, eg, don't clear 1000BaseX just
+ * because the MAC is unable to BaseX mode. This is more about
+ * clearing unsupported speeds and duplex settings.
+ *
+ * If the @state->interface mode is %PHY_INTERFACE_MODE_1000BASEX
+ * or %PHY_INTERFACE_MODE_2500BASEX, select the appropriate mode
+ * based on @state->advertising and/or @state->speed and update
+ * @state->interface accordingly.
+ */
+void validate(struct net_device *ndev, unsigned long *supported,
+ struct phylink_link_state *state);
+
+/**
+ * mac_link_state() - Read the current link state from the hardware
+ * @ndev: a pointer to a &struct net_device for the MAC.
+ * @state: a pointer to a &struct phylink_link_state.
+ *
+ * Read the current link state from the MAC, reporting the current
+ * speed in @state->speed, duplex mode in @state->duplex, pause mode
+ * in @state->pause using the %MLO_PAUSE_RX and %MLO_PAUSE_TX bits,
+ * negotiation completion state in @state->an_complete, and link
+ * up state in @state->link.
+ */
+int mac_link_state(struct net_device *ndev,
+ struct phylink_link_state *state);
+
+/**
+ * mac_config() - configure the MAC for the selected mode and state
+ * @ndev: a pointer to a &struct net_device for the MAC.
+ * @mode: one of %MLO_AN_FIXED, %MLO_AN_PHY, %MLO_AN_INBAND.
+ * @state: a pointer to a &struct phylink_link_state.
+ *
+ * The action performed depends on the currently selected mode:
+ *
+ * %MLO_AN_FIXED, %MLO_AN_PHY:
+ * Configure the specified @state->speed, @state->duplex and
+ * @state->pause (%MLO_PAUSE_TX / %MLO_PAUSE_RX) mode.
+ *
+ * %MLO_AN_INBAND:
+ * place the link in an inband negotiation mode (such as 802.3z
+ * 1000base-X or Cisco SGMII mode depending on the @state->interface
+ * mode). In both cases, link state management (whether the link
+ * is up or not) is performed by the MAC, and reported via the
+ * mac_link_state() callback. Changes in link state must be made
+ * by calling phylink_mac_change().
+ *
+ * If in 802.3z mode, the link speed is fixed, dependent on the
+ * @state->interface. Duplex is negotiated, and pause is advertised
+ * according to @state->an_enabled, @state->pause and
+ * @state->advertising flags. Beware of MACs which only support full
+ * duplex at gigabit and higher speeds.
+ *
+ * If in Cisco SGMII mode, the link speed and duplex mode are passed
+ * in the serial bitstream 16-bit configuration word, and the MAC
+ * should be configured to read these bits and acknowledge the
+ * configuration word. Nothing is advertised by the MAC. The MAC is
+ * responsible for reading the configuration word and configuring
+ * itself accordingly.
+ */
+void mac_config(struct net_device *ndev, unsigned int mode,
+ const struct phylink_link_state *state);
+
+/**
+ * mac_an_restart() - restart 802.3z BaseX autonegotiation
+ * @ndev: a pointer to a &struct net_device for the MAC.
+ */
+void mac_an_restart(struct net_device *ndev);
+
+/**
+ * mac_link_down() - take the link down
+ * @ndev: a pointer to a &struct net_device for the MAC.
+ * @mode: link autonegotiation mode
+ * @interface: link &typedef phy_interface_t mode
+ *
+ * If @mode is not an in-band negotiation mode (as defined by
+ * phylink_autoneg_inband()), force the link down and disable any
+ * Energy Efficient Ethernet MAC configuration. Interface type
+ * selection must be done in mac_config().
+ */
+void mac_link_down(struct net_device *ndev, unsigned int mode,
+ phy_interface_t interface);
+
+/**
+ * mac_link_up() - allow the link to come up
+ * @ndev: a pointer to a &struct net_device for the MAC.
+ * @mode: link autonegotiation mode
+ * @interface: link &typedef phy_interface_t mode
+ * @phy: any attached phy
+ *
+ * If @mode is not an in-band negotiation mode (as defined by
+ * phylink_autoneg_inband()), allow the link to come up. If @phy
+ * is non-%NULL, configure Energy Efficient Ethernet by calling
+ * phy_init_eee() and perform appropriate MAC configuration for EEE.
+ * Interface type selection must be done in mac_config().
+ */
+void mac_link_up(struct net_device *ndev, unsigned int mode,
+ phy_interface_t interface,
+ struct phy_device *phy);
+#endif
+
+struct phylink *phylink_create(struct net_device *, struct fwnode_handle *,
phy_interface_t iface, const struct phylink_mac_ops *ops);
void phylink_destroy(struct phylink *);
int phylink_connect_phy(struct phylink *, struct phy_device *);
-int phylink_of_phy_connect(struct phylink *, struct device_node *);
+int phylink_of_phy_connect(struct phylink *, struct device_node *, u32 flags);
void phylink_disconnect_phy(struct phylink *);
+int phylink_fixed_state_cb(struct phylink *,
+ void (*cb)(struct net_device *dev,
+ struct phylink_link_state *));
void phylink_mac_change(struct phylink *, bool up);
@@ -125,10 +219,6 @@ void phylink_ethtool_get_pauseparam(struct phylink *,
struct ethtool_pauseparam *);
int phylink_ethtool_set_pauseparam(struct phylink *,
struct ethtool_pauseparam *);
-int phylink_ethtool_get_module_info(struct phylink *, struct ethtool_modinfo *);
-int phylink_ethtool_get_module_eeprom(struct phylink *,
- struct ethtool_eeprom *, u8 *);
-int phylink_init_eee(struct phylink *, bool);
int phylink_get_eee_err(struct phylink *);
int phylink_ethtool_get_eee(struct phylink *, struct ethtool_eee *);
int phylink_ethtool_set_eee(struct phylink *, struct ethtool_eee *);
diff --git a/include/linux/pinctrl/devinfo.h b/include/linux/pinctrl/devinfo.h
index 05082e407c4a..d01a8638bb45 100644
--- a/include/linux/pinctrl/devinfo.h
+++ b/include/linux/pinctrl/devinfo.h
@@ -43,6 +43,8 @@ extern int pinctrl_init_done(struct device *dev);
#else
+struct device;
+
/* Stubs if we're not using pinctrl */
static inline int pinctrl_bind_pins(struct device *dev)
diff --git a/include/linux/pinctrl/pinconf-generic.h b/include/linux/pinctrl/pinconf-generic.h
index ec6dadcc1fde..6c0680641108 100644
--- a/include/linux/pinctrl/pinconf-generic.h
+++ b/include/linux/pinctrl/pinconf-generic.h
@@ -94,6 +94,7 @@
* or latch delay (on outputs) this parameter (in a custom format)
* specifies the clock skew or latch delay. It typically controls how
* many double inverters are put in front of the line.
+ * @PIN_CONFIG_PERSIST_STATE: retain pin state across sleep or controller reset
* @PIN_CONFIG_END: this is the last enumerator for pin configurations, if
* you need to pass in custom configurations to the pin controller, use
* PIN_CONFIG_END+1 as the base offset.
@@ -122,6 +123,7 @@ enum pin_config_param {
PIN_CONFIG_SLEEP_HARDWARE_STATE,
PIN_CONFIG_SLEW_RATE,
PIN_CONFIG_SKEW_DELAY,
+ PIN_CONFIG_PERSIST_STATE,
PIN_CONFIG_END = 0x7F,
PIN_CONFIG_MAX = 0xFF,
};
diff --git a/include/linux/pinctrl/pinctrl.h b/include/linux/pinctrl/pinctrl.h
index 5e45385c5bdc..8f5dbb84547a 100644
--- a/include/linux/pinctrl/pinctrl.h
+++ b/include/linux/pinctrl/pinctrl.h
@@ -18,6 +18,7 @@
#include <linux/list.h>
#include <linux/seq_file.h>
#include <linux/pinctrl/pinctrl-state.h>
+#include <linux/pinctrl/devinfo.h>
struct device;
struct pinctrl_dev;
diff --git a/include/linux/pipe_fs_i.h b/include/linux/pipe_fs_i.h
index 2dc5e9870fcd..5a3bb3b7c9ad 100644
--- a/include/linux/pipe_fs_i.h
+++ b/include/linux/pipe_fs_i.h
@@ -167,10 +167,9 @@ void pipe_lock(struct pipe_inode_info *);
void pipe_unlock(struct pipe_inode_info *);
void pipe_double_lock(struct pipe_inode_info *, struct pipe_inode_info *);
-extern unsigned int pipe_max_size, pipe_min_size;
+extern unsigned int pipe_max_size;
extern unsigned long pipe_user_pages_hard;
extern unsigned long pipe_user_pages_soft;
-int pipe_proc_fn(struct ctl_table *, int, void __user *, size_t *, loff_t *);
/* Drop the inode semaphore and wait for a pipe event, atomically */
void pipe_wait(struct pipe_inode_info *pipe);
@@ -191,6 +190,6 @@ long pipe_fcntl(struct file *, unsigned int, unsigned long arg);
struct pipe_inode_info *get_pipe_info(struct file *file);
int create_pipe_files(struct file **, int);
-unsigned int round_pipe_size(unsigned int size);
+unsigned int round_pipe_size(unsigned long size);
#endif
diff --git a/include/linux/platform_data/asoc-ti-mcbsp.h b/include/linux/platform_data/asoc-ti-mcbsp.h
index e684543254f3..e319d0a2ec82 100644
--- a/include/linux/platform_data/asoc-ti-mcbsp.h
+++ b/include/linux/platform_data/asoc-ti-mcbsp.h
@@ -25,10 +25,6 @@
#include <linux/spinlock.h>
#include <linux/clk.h>
-#define MCBSP_CONFIG_TYPE2 0x2
-#define MCBSP_CONFIG_TYPE3 0x3
-#define MCBSP_CONFIG_TYPE4 0x4
-
/* Platform specific configuration */
struct omap_mcbsp_ops {
void (*request)(unsigned int);
@@ -47,14 +43,6 @@ struct omap_mcbsp_platform_data {
int (*force_ick_on)(struct clk *clk, bool force_on);
};
-/**
- * omap_mcbsp_dev_attr - OMAP McBSP device attributes for omap_hwmod
- * @sidetone: name of the sidetone device
- */
-struct omap_mcbsp_dev_attr {
- const char *sidetone;
-};
-
void omap3_mcbsp_init_pdata_callback(struct omap_mcbsp_platform_data *pdata);
#endif
diff --git a/include/linux/platform_data/at24.h b/include/linux/platform_data/at24.h
index 271a4e25af67..63507ff464ee 100644
--- a/include/linux/platform_data/at24.h
+++ b/include/linux/platform_data/at24.h
@@ -50,6 +50,8 @@ struct at24_platform_data {
#define AT24_FLAG_TAKE8ADDR BIT(4) /* take always 8 addresses (24c00) */
#define AT24_FLAG_SERIAL BIT(3) /* factory-programmed serial number */
#define AT24_FLAG_MAC BIT(2) /* factory-programmed mac address */
+#define AT24_FLAG_NO_RDROL BIT(1) /* does not auto-rollover reads to */
+ /* the next slave address */
void (*setup)(struct nvmem_device *nvmem, void *context);
void *context;
diff --git a/include/linux/platform_data/bfin_rotary.h b/include/linux/platform_data/bfin_rotary.h
deleted file mode 100644
index 98829370fee2..000000000000
--- a/include/linux/platform_data/bfin_rotary.h
+++ /dev/null
@@ -1,117 +0,0 @@
-/*
- * board initialization should put one of these structures into platform_data
- * and place the bfin-rotary onto platform_bus named "bfin-rotary".
- *
- * Copyright 2008-2010 Analog Devices Inc.
- *
- * Licensed under the GPL-2 or later.
- */
-
-#ifndef _BFIN_ROTARY_H
-#define _BFIN_ROTARY_H
-
-/* mode bitmasks */
-#define ROT_QUAD_ENC CNTMODE_QUADENC /* quadrature/grey code encoder mode */
-#define ROT_BIN_ENC CNTMODE_BINENC /* binary encoder mode */
-#define ROT_UD_CNT CNTMODE_UDCNT /* rotary counter mode */
-#define ROT_DIR_CNT CNTMODE_DIRCNT /* direction counter mode */
-
-#define ROT_DEBE DEBE /* Debounce Enable */
-
-#define ROT_CDGINV CDGINV /* CDG Pin Polarity Invert */
-#define ROT_CUDINV CUDINV /* CUD Pin Polarity Invert */
-#define ROT_CZMINV CZMINV /* CZM Pin Polarity Invert */
-
-struct bfin_rotary_platform_data {
- /* set rotary UP KEY_### or BTN_### in case you prefer
- * bfin-rotary to send EV_KEY otherwise set 0
- */
- unsigned int rotary_up_key;
- /* set rotary DOWN KEY_### or BTN_### in case you prefer
- * bfin-rotary to send EV_KEY otherwise set 0
- */
- unsigned int rotary_down_key;
- /* set rotary BUTTON KEY_### or BTN_### */
- unsigned int rotary_button_key;
- /* set rotary Relative Axis REL_### in case you prefer
- * bfin-rotary to send EV_REL otherwise set 0
- */
- unsigned int rotary_rel_code;
- unsigned short debounce; /* 0..17 */
- unsigned short mode;
- unsigned short pm_wakeup;
- unsigned short *pin_list;
-};
-
-/* CNT_CONFIG bitmasks */
-#define CNTE (1 << 0) /* Counter Enable */
-#define DEBE (1 << 1) /* Debounce Enable */
-#define CDGINV (1 << 4) /* CDG Pin Polarity Invert */
-#define CUDINV (1 << 5) /* CUD Pin Polarity Invert */
-#define CZMINV (1 << 6) /* CZM Pin Polarity Invert */
-#define CNTMODE_SHIFT 8
-#define CNTMODE (0x7 << CNTMODE_SHIFT) /* Counter Operating Mode */
-#define ZMZC (1 << 1) /* CZM Zeroes Counter Enable */
-#define BNDMODE_SHIFT 12
-#define BNDMODE (0x3 << BNDMODE_SHIFT) /* Boundary register Mode */
-#define INPDIS (1 << 15) /* CUG and CDG Input Disable */
-
-#define CNTMODE_QUADENC (0 << CNTMODE_SHIFT) /* quadrature encoder mode */
-#define CNTMODE_BINENC (1 << CNTMODE_SHIFT) /* binary encoder mode */
-#define CNTMODE_UDCNT (2 << CNTMODE_SHIFT) /* up/down counter mode */
-#define CNTMODE_DIRCNT (4 << CNTMODE_SHIFT) /* direction counter mode */
-#define CNTMODE_DIRTMR (5 << CNTMODE_SHIFT) /* direction timer mode */
-
-#define BNDMODE_COMP (0 << BNDMODE_SHIFT) /* boundary compare mode */
-#define BNDMODE_ZERO (1 << BNDMODE_SHIFT) /* boundary compare and zero mode */
-#define BNDMODE_CAPT (2 << BNDMODE_SHIFT) /* boundary capture mode */
-#define BNDMODE_AEXT (3 << BNDMODE_SHIFT) /* boundary auto-extend mode */
-
-/* CNT_IMASK bitmasks */
-#define ICIE (1 << 0) /* Illegal Gray/Binary Code Interrupt Enable */
-#define UCIE (1 << 1) /* Up count Interrupt Enable */
-#define DCIE (1 << 2) /* Down count Interrupt Enable */
-#define MINCIE (1 << 3) /* Min Count Interrupt Enable */
-#define MAXCIE (1 << 4) /* Max Count Interrupt Enable */
-#define COV31IE (1 << 5) /* Bit 31 Overflow Interrupt Enable */
-#define COV15IE (1 << 6) /* Bit 15 Overflow Interrupt Enable */
-#define CZEROIE (1 << 7) /* Count to Zero Interrupt Enable */
-#define CZMIE (1 << 8) /* CZM Pin Interrupt Enable */
-#define CZMEIE (1 << 9) /* CZM Error Interrupt Enable */
-#define CZMZIE (1 << 10) /* CZM Zeroes Counter Interrupt Enable */
-
-/* CNT_STATUS bitmasks */
-#define ICII (1 << 0) /* Illegal Gray/Binary Code Interrupt Identifier */
-#define UCII (1 << 1) /* Up count Interrupt Identifier */
-#define DCII (1 << 2) /* Down count Interrupt Identifier */
-#define MINCII (1 << 3) /* Min Count Interrupt Identifier */
-#define MAXCII (1 << 4) /* Max Count Interrupt Identifier */
-#define COV31II (1 << 5) /* Bit 31 Overflow Interrupt Identifier */
-#define COV15II (1 << 6) /* Bit 15 Overflow Interrupt Identifier */
-#define CZEROII (1 << 7) /* Count to Zero Interrupt Identifier */
-#define CZMII (1 << 8) /* CZM Pin Interrupt Identifier */
-#define CZMEII (1 << 9) /* CZM Error Interrupt Identifier */
-#define CZMZII (1 << 10) /* CZM Zeroes Counter Interrupt Identifier */
-
-/* CNT_COMMAND bitmasks */
-#define W1LCNT 0xf /* Load Counter Register */
-#define W1LMIN 0xf0 /* Load Min Register */
-#define W1LMAX 0xf00 /* Load Max Register */
-#define W1ZMONCE (1 << 12) /* Enable CZM Clear Counter Once */
-
-#define W1LCNT_ZERO (1 << 0) /* write 1 to load CNT_COUNTER with zero */
-#define W1LCNT_MIN (1 << 2) /* write 1 to load CNT_COUNTER from CNT_MIN */
-#define W1LCNT_MAX (1 << 3) /* write 1 to load CNT_COUNTER from CNT_MAX */
-
-#define W1LMIN_ZERO (1 << 4) /* write 1 to load CNT_MIN with zero */
-#define W1LMIN_CNT (1 << 5) /* write 1 to load CNT_MIN from CNT_COUNTER */
-#define W1LMIN_MAX (1 << 7) /* write 1 to load CNT_MIN from CNT_MAX */
-
-#define W1LMAX_ZERO (1 << 8) /* write 1 to load CNT_MAX with zero */
-#define W1LMAX_CNT (1 << 9) /* write 1 to load CNT_MAX from CNT_COUNTER */
-#define W1LMAX_MIN (1 << 10) /* write 1 to load CNT_MAX from CNT_MIN */
-
-/* CNT_DEBOUNCE bitmasks */
-#define DPRESCALE 0xf /* Load Counter Register */
-
-#endif
diff --git a/include/linux/platform_data/dmtimer-omap.h b/include/linux/platform_data/dmtimer-omap.h
index a19b78d826e9..757a0f9e26f9 100644
--- a/include/linux/platform_data/dmtimer-omap.h
+++ b/include/linux/platform_data/dmtimer-omap.h
@@ -20,12 +20,50 @@
#ifndef __PLATFORM_DATA_DMTIMER_OMAP_H__
#define __PLATFORM_DATA_DMTIMER_OMAP_H__
+struct omap_dm_timer_ops {
+ struct omap_dm_timer *(*request_by_node)(struct device_node *np);
+ struct omap_dm_timer *(*request_specific)(int timer_id);
+ struct omap_dm_timer *(*request)(void);
+
+ int (*free)(struct omap_dm_timer *timer);
+
+ void (*enable)(struct omap_dm_timer *timer);
+ void (*disable)(struct omap_dm_timer *timer);
+
+ int (*get_irq)(struct omap_dm_timer *timer);
+ int (*set_int_enable)(struct omap_dm_timer *timer,
+ unsigned int value);
+ int (*set_int_disable)(struct omap_dm_timer *timer, u32 mask);
+
+ struct clk *(*get_fclk)(struct omap_dm_timer *timer);
+
+ int (*start)(struct omap_dm_timer *timer);
+ int (*stop)(struct omap_dm_timer *timer);
+ int (*set_source)(struct omap_dm_timer *timer, int source);
+
+ int (*set_load)(struct omap_dm_timer *timer, int autoreload,
+ unsigned int value);
+ int (*set_match)(struct omap_dm_timer *timer, int enable,
+ unsigned int match);
+ int (*set_pwm)(struct omap_dm_timer *timer, int def_on,
+ int toggle, int trigger);
+ int (*set_prescaler)(struct omap_dm_timer *timer, int prescaler);
+
+ unsigned int (*read_counter)(struct omap_dm_timer *timer);
+ int (*write_counter)(struct omap_dm_timer *timer,
+ unsigned int value);
+ unsigned int (*read_status)(struct omap_dm_timer *timer);
+ int (*write_status)(struct omap_dm_timer *timer,
+ unsigned int value);
+};
+
struct dmtimer_platform_data {
/* set_timer_src - Only used for OMAP1 devices */
int (*set_timer_src)(struct platform_device *pdev, int source);
u32 timer_capability;
u32 timer_errata;
int (*get_context_loss_count)(struct device *);
+ const struct omap_dm_timer_ops *timer_ops;
};
#endif /* __PLATFORM_DATA_DMTIMER_OMAP_H__ */
diff --git a/include/linux/platform_data/gpio-htc-egpio.h b/include/linux/platform_data/gpio-htc-egpio.h
index b7baf1e42c55..9a3e78082883 100644
--- a/include/linux/platform_data/gpio-htc-egpio.h
+++ b/include/linux/platform_data/gpio-htc-egpio.h
@@ -6,8 +6,6 @@
#ifndef __HTC_EGPIO_H__
#define __HTC_EGPIO_H__
-#include <linux/gpio.h>
-
/* Descriptive values for all-in or all-out htc_egpio_chip descriptors. */
#define HTC_EGPIO_OUTPUT (~0)
#define HTC_EGPIO_INPUT 0
diff --git a/include/linux/platform_data/gpio-omap.h b/include/linux/platform_data/gpio-omap.h
index cb2618147c34..8612855691b2 100644
--- a/include/linux/platform_data/gpio-omap.h
+++ b/include/linux/platform_data/gpio-omap.h
@@ -157,11 +157,6 @@
#define OMAP_MPUIO(nr) (OMAP_MAX_GPIO_LINES + (nr))
#define OMAP_GPIO_IS_MPUIO(nr) ((nr) >= OMAP_MAX_GPIO_LINES)
-struct omap_gpio_dev_attr {
- int bank_width; /* GPIO bank width */
- bool dbck_flag; /* dbck required or not - True for OMAP3&4 */
-};
-
struct omap_gpio_reg_offs {
u16 revision;
u16 direction;
diff --git a/include/linux/platform_data/i2c-davinci.h b/include/linux/platform_data/i2c-davinci.h
index 89fd34727a24..98967df07468 100644
--- a/include/linux/platform_data/i2c-davinci.h
+++ b/include/linux/platform_data/i2c-davinci.h
@@ -16,9 +16,8 @@
struct davinci_i2c_platform_data {
unsigned int bus_freq; /* standard bus frequency (kHz) */
unsigned int bus_delay; /* post-transaction delay (usec) */
- unsigned int sda_pin; /* GPIO pin ID to use for SDA */
- unsigned int scl_pin; /* GPIO pin ID to use for SCL */
- bool has_pfunc; /*chip has a ICPFUNC register */
+ bool gpio_recovery; /* Use GPIO recovery method */
+ bool has_pfunc; /* Chip has a ICPFUNC register */
};
/* for board setup code */
diff --git a/include/linux/i2c/pxa-i2c.h b/include/linux/platform_data/i2c-pxa.h
index 53aab243cbd8..5236f216dfae 100644
--- a/include/linux/i2c/pxa-i2c.h
+++ b/include/linux/platform_data/i2c-pxa.h
@@ -71,15 +71,4 @@ struct i2c_pxa_platform_data {
unsigned char master_code;
unsigned long rate;
};
-
-extern void pxa_set_i2c_info(struct i2c_pxa_platform_data *info);
-
-#ifdef CONFIG_PXA27x
-extern void pxa27x_set_i2c_power_info(struct i2c_pxa_platform_data *info);
-#endif
-
-#ifdef CONFIG_PXA3xx
-extern void pxa3xx_set_i2c_power_info(struct i2c_pxa_platform_data *info);
-#endif
-
#endif
diff --git a/include/linux/platform_data/mlxcpld-hotplug.h b/include/linux/platform_data/mlxcpld-hotplug.h
deleted file mode 100644
index e4cfcffaa6f4..000000000000
--- a/include/linux/platform_data/mlxcpld-hotplug.h
+++ /dev/null
@@ -1,99 +0,0 @@
-/*
- * include/linux/platform_data/mlxcpld-hotplug.h
- * Copyright (c) 2016 Mellanox Technologies. All rights reserved.
- * Copyright (c) 2016 Vadim Pasternak <vadimp@mellanox.com>
- *
- * Redistribution and use in source and binary forms, with or without
- * modification, are permitted provided that the following conditions are met:
- *
- * 1. Redistributions of source code must retain the above copyright
- * notice, this list of conditions and the following disclaimer.
- * 2. Redistributions in binary form must reproduce the above copyright
- * notice, this list of conditions and the following disclaimer in the
- * documentation and/or other materials provided with the distribution.
- * 3. Neither the names of the copyright holders nor the names of its
- * contributors may be used to endorse or promote products derived from
- * this software without specific prior written permission.
- *
- * Alternatively, this software may be distributed under the terms of the
- * GNU General Public License ("GPL") version 2 as published by the Free
- * Software Foundation.
- *
- * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
- * AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
- * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
- * ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR CONTRIBUTORS BE
- * LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
- * CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
- * SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
- * INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN
- * CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
- * ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
- * POSSIBILITY OF SUCH DAMAGE.
- */
-
-#ifndef __LINUX_PLATFORM_DATA_MLXCPLD_HOTPLUG_H
-#define __LINUX_PLATFORM_DATA_MLXCPLD_HOTPLUG_H
-
-/**
- * struct mlxcpld_hotplug_device - I2C device data:
- * @adapter: I2C device adapter;
- * @client: I2C device client;
- * @brdinfo: device board information;
- * @bus: I2C bus, where device is attached;
- *
- * Structure represents I2C hotplug device static data (board topology) and
- * dynamic data (related kernel objects handles).
- */
-struct mlxcpld_hotplug_device {
- struct i2c_adapter *adapter;
- struct i2c_client *client;
- struct i2c_board_info brdinfo;
- u16 bus;
-};
-
-/**
- * struct mlxcpld_hotplug_platform_data - device platform data:
- * @top_aggr_offset: offset of top aggregation interrupt register;
- * @top_aggr_mask: top aggregation interrupt common mask;
- * @top_aggr_psu_mask: top aggregation interrupt PSU mask;
- * @psu_reg_offset: offset of PSU interrupt register;
- * @psu_mask: PSU interrupt mask;
- * @psu_count: number of equipped replaceable PSUs;
- * @psu: pointer to PSU devices data array;
- * @top_aggr_pwr_mask: top aggregation interrupt power mask;
- * @pwr_reg_offset: offset of power interrupt register
- * @pwr_mask: power interrupt mask;
- * @pwr_count: number of power sources;
- * @pwr: pointer to power devices data array;
- * @top_aggr_fan_mask: top aggregation interrupt FAN mask;
- * @fan_reg_offset: offset of FAN interrupt register;
- * @fan_mask: FAN interrupt mask;
- * @fan_count: number of equipped replaceable FANs;
- * @fan: pointer to FAN devices data array;
- *
- * Structure represents board platform data, related to system hotplug events,
- * like FAN, PSU, power cable insertion and removing. This data provides the
- * number of hot-pluggable devices and hardware description for event handling.
- */
-struct mlxcpld_hotplug_platform_data {
- u16 top_aggr_offset;
- u8 top_aggr_mask;
- u8 top_aggr_psu_mask;
- u16 psu_reg_offset;
- u8 psu_mask;
- u8 psu_count;
- struct mlxcpld_hotplug_device *psu;
- u8 top_aggr_pwr_mask;
- u16 pwr_reg_offset;
- u8 pwr_mask;
- u8 pwr_count;
- struct mlxcpld_hotplug_device *pwr;
- u8 top_aggr_fan_mask;
- u16 fan_reg_offset;
- u8 fan_mask;
- u8 fan_count;
- struct mlxcpld_hotplug_device *fan;
-};
-
-#endif /* __LINUX_PLATFORM_DATA_MLXCPLD_HOTPLUG_H */
diff --git a/include/linux/platform_data/mlxreg.h b/include/linux/platform_data/mlxreg.h
new file mode 100644
index 000000000000..2744cff1b297
--- /dev/null
+++ b/include/linux/platform_data/mlxreg.h
@@ -0,0 +1,148 @@
+/*
+ * Copyright (c) 2017 Mellanox Technologies. All rights reserved.
+ * Copyright (c) 2017 Vadim Pasternak <vadimp@mellanox.com>
+ *
+ * Redistribution and use in source and binary forms, with or without
+ * modification, are permitted provided that the following conditions are met:
+ *
+ * 1. Redistributions of source code must retain the above copyright
+ * notice, this list of conditions and the following disclaimer.
+ * 2. Redistributions in binary form must reproduce the above copyright
+ * notice, this list of conditions and the following disclaimer in the
+ * documentation and/or other materials provided with the distribution.
+ * 3. Neither the names of the copyright holders nor the names of its
+ * contributors may be used to endorse or promote products derived from
+ * this software without specific prior written permission.
+ *
+ * Alternatively, this software may be distributed under the terms of the
+ * GNU General Public License ("GPL") version 2 as published by the Free
+ * Software Foundation.
+ *
+ * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
+ * AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
+ * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
+ * ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR CONTRIBUTORS BE
+ * LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
+ * CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
+ * SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
+ * INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN
+ * CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
+ * ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
+ * POSSIBILITY OF SUCH DAMAGE.
+ */
+
+#ifndef __LINUX_PLATFORM_DATA_MLXREG_H
+#define __LINUX_PLATFORM_DATA_MLXREG_H
+
+#define MLXREG_CORE_LABEL_MAX_SIZE 32
+
+/**
+ * struct mlxreg_hotplug_device - I2C device data:
+ *
+ * @adapter: I2C device adapter;
+ * @client: I2C device client;
+ * @brdinfo: device board information;
+ * @nr: I2C device adapter number, to which device is to be attached;
+ *
+ * Structure represents I2C hotplug device static data (board topology) and
+ * dynamic data (related kernel objects handles).
+ */
+struct mlxreg_hotplug_device {
+ struct i2c_adapter *adapter;
+ struct i2c_client *client;
+ struct i2c_board_info *brdinfo;
+ int nr;
+};
+
+/**
+ * struct mlxreg_core_data - attributes control data:
+ *
+ * @label: attribute label;
+ * @label: attribute register offset;
+ * @reg: attribute register;
+ * @mask: attribute access mask;
+ * @mode: access mode;
+ * @bit: attribute effective bit;
+ * @np - pointer to node platform associated with attribute;
+ * @hpdev - hotplug device data;
+ * @health_cntr: dynamic device health indication counter;
+ * @attached: true if device has been attached after good health indication;
+ */
+struct mlxreg_core_data {
+ char label[MLXREG_CORE_LABEL_MAX_SIZE];
+ u32 reg;
+ u32 mask;
+ u32 bit;
+ umode_t mode;
+ struct device_node *np;
+ struct mlxreg_hotplug_device hpdev;
+ u8 health_cntr;
+ bool attached;
+};
+
+/**
+ * struct mlxreg_core_item - same type components controlled by the driver:
+ *
+ * @data: component data;
+ * @aggr_mask: group aggregation mask;
+ * @reg: group interrupt status register;
+ * @mask: group interrupt mask;
+ * @cache: last status value for elements fro the same group;
+ * @count: number of available elements in the group;
+ * @ind: element's index inside the group;
+ * @inversed: if 0: 0 for signal status is OK, if 1 - 1 is OK;
+ * @health: true if device has health indication, false in other case;
+ */
+struct mlxreg_core_item {
+ struct mlxreg_core_data *data;
+ u32 aggr_mask;
+ u32 reg;
+ u32 mask;
+ u32 cache;
+ u8 count;
+ u8 ind;
+ u8 inversed;
+ u8 health;
+};
+
+/**
+ * struct mlxreg_core_platform_data - platform data:
+ *
+ * @led_data: led private data;
+ * @regmap: register map of parent device;
+ * @counter: number of led instances;
+ */
+struct mlxreg_core_platform_data {
+ struct mlxreg_core_data *data;
+ void *regmap;
+ int counter;
+};
+
+/**
+ * struct mlxreg_core_hotplug_platform_data - hotplug platform data:
+ *
+ * @items: same type components with the hotplug capability;
+ * @irq: platform interrupt number;
+ * @regmap: register map of parent device;
+ * @counter: number of the components with the hotplug capability;
+ * @cell: location of top aggregation interrupt register;
+ * @mask: top aggregation interrupt common mask;
+ * @cell_low: location of low aggregation interrupt register;
+ * @mask_low: low aggregation interrupt common mask;
+ * @deferred_nr: I2C adapter number must be exist prior probing execution;
+ * @shift_nr: I2C adapter numbers must be incremented by this value;
+ */
+struct mlxreg_core_hotplug_platform_data {
+ struct mlxreg_core_item *items;
+ int irq;
+ void *regmap;
+ int counter;
+ u32 cell;
+ u32 mask;
+ u32 cell_low;
+ u32 mask_low;
+ int deferred_nr;
+ int shift_nr;
+};
+
+#endif /* __LINUX_PLATFORM_DATA_MLXREG_H */
diff --git a/include/linux/platform_data/mms114.h b/include/linux/platform_data/mms114.h
deleted file mode 100644
index 5722ebfb2738..000000000000
--- a/include/linux/platform_data/mms114.h
+++ /dev/null
@@ -1,24 +0,0 @@
-/*
- * Copyright (C) 2012 Samsung Electronics Co.Ltd
- * Author: Joonyoung Shim <jy0922.shim@samsung.com>
- *
- * This program is free software; you can redistribute it and/or modify
- * it under the terms of the GNU General Public License version 2 as
- * published by the Free Software Foundationr
- */
-
-#ifndef __LINUX_MMS114_H
-#define __LINUX_MMS114_H
-
-struct mms114_platform_data {
- unsigned int x_size;
- unsigned int y_size;
- unsigned int contact_threshold;
- unsigned int moving_threshold;
- bool x_invert;
- bool y_invert;
-
- void (*cfg_pin)(bool);
-};
-
-#endif /* __LINUX_MMS114_H */
diff --git a/include/linux/platform_data/mtd-nand-pxa3xx.h b/include/linux/platform_data/mtd-nand-pxa3xx.h
index b42ad83cbc20..4fd0f592a2d2 100644
--- a/include/linux/platform_data/mtd-nand-pxa3xx.h
+++ b/include/linux/platform_data/mtd-nand-pxa3xx.h
@@ -6,41 +6,22 @@
#include <linux/mtd/partitions.h>
/*
- * Current pxa3xx_nand controller has two chip select which
- * both be workable.
- *
- * Notice should be taken that:
- * When you want to use this feature, you should not enable the
- * keep configuration feature, for two chip select could be
- * attached with different nand chip. The different page size
- * and timing requirement make the keep configuration impossible.
+ * Current pxa3xx_nand controller has two chip select which both be workable but
+ * historically all platforms remaining on platform data used only one. Switch
+ * to device tree if you need more.
*/
-
-/* The max num of chip select current support */
-#define NUM_CHIP_SELECT (2)
struct pxa3xx_nand_platform_data {
-
- /* the data flash bus is shared between the Static Memory
- * Controller and the Data Flash Controller, the arbiter
- * controls the ownership of the bus
- */
- int enable_arbiter;
-
- /* allow platform code to keep OBM/bootloader defined NFC config */
- int keep_config;
-
- /* indicate how many chip selects will be used */
- int num_cs;
-
- /* use an flash-based bad block table */
- bool flash_bbt;
-
- /* requested ECC strength and ECC step size */
+ /* Keep OBM/bootloader NFC timing configuration */
+ bool keep_config;
+ /* Use a flash-based bad block table */
+ bool flash_bbt;
+ /* Requested ECC strength and ECC step size */
int ecc_strength, ecc_step_size;
-
- const struct mtd_partition *parts[NUM_CHIP_SELECT];
- unsigned int nr_parts[NUM_CHIP_SELECT];
+ /* Partitions */
+ const struct mtd_partition *parts;
+ unsigned int nr_parts;
};
extern void pxa3xx_set_nand_info(struct pxa3xx_nand_platform_data *info);
+
#endif /* __ASM_ARCH_PXA3XX_NAND_H */
diff --git a/include/linux/platform_data/mtd-onenand-omap2.h b/include/linux/platform_data/mtd-onenand-omap2.h
deleted file mode 100644
index 56ff0e6f5ad1..000000000000
--- a/include/linux/platform_data/mtd-onenand-omap2.h
+++ /dev/null
@@ -1,34 +0,0 @@
-/*
- * Copyright (C) 2006 Nokia Corporation
- * Author: Juha Yrjola
- *
- * This program is free software; you can redistribute it and/or modify
- * it under the terms of the GNU General Public License version 2 as
- * published by the Free Software Foundation.
- */
-
-#ifndef __MTD_ONENAND_OMAP2_H
-#define __MTD_ONENAND_OMAP2_H
-
-#include <linux/mtd/mtd.h>
-#include <linux/mtd/partitions.h>
-
-#define ONENAND_SYNC_READ (1 << 0)
-#define ONENAND_SYNC_READWRITE (1 << 1)
-#define ONENAND_IN_OMAP34XX (1 << 2)
-
-struct omap_onenand_platform_data {
- int cs;
- int gpio_irq;
- struct mtd_partition *parts;
- int nr_parts;
- int (*onenand_setup)(void __iomem *, int *freq_ptr);
- int dma_channel;
- u8 flags;
- u8 regulator_can_sleep;
- u8 skip_initial_unlocking;
-
- /* for passing the partitions */
- struct device_node *of_node;
-};
-#endif
diff --git a/include/linux/platform_data/phy-da8xx-usb.h b/include/linux/platform_data/phy-da8xx-usb.h
new file mode 100644
index 000000000000..85c2b99381b2
--- /dev/null
+++ b/include/linux/platform_data/phy-da8xx-usb.h
@@ -0,0 +1,21 @@
+// SPDX-License-Identifier: GPL-2.0
+/*
+ * phy-da8xx-usb - TI DaVinci DA8xx USB PHY driver
+ *
+ * Copyright (C) 2018 David Lechner <david@lechnology.com>
+ */
+
+#ifndef __LINUX_PLATFORM_DATA_PHY_DA8XX_USB_H__
+#define __LINUX_PLATFORM_DATA_PHY_DA8XX_USB_H__
+
+#include <linux/regmap.h>
+
+/**
+ * da8xx_usb_phy_platform_data
+ * @cfgchip: CFGCHIP syscon regmap
+ */
+struct da8xx_usb_phy_platform_data {
+ struct regmap *cfgchip;
+};
+
+#endif /* __LINUX_PLATFORM_DATA_PHY_DA8XX_USB_H__ */
diff --git a/include/linux/platform_data/pinctrl-adi2.h b/include/linux/platform_data/pinctrl-adi2.h
deleted file mode 100644
index 8f91300617ec..000000000000
--- a/include/linux/platform_data/pinctrl-adi2.h
+++ /dev/null
@@ -1,40 +0,0 @@
-/*
- * Pinctrl Driver for ADI GPIO2 controller
- *
- * Copyright 2007-2013 Analog Devices Inc.
- *
- * Licensed under the GPLv2 or later
- */
-
-
-#ifndef PINCTRL_ADI2_H
-#define PINCTRL_ADI2_H
-
-#include <linux/io.h>
-#include <linux/platform_device.h>
-
-/**
- * struct adi_pinctrl_gpio_platform_data - Pinctrl gpio platform data
- * for ADI GPIO2 device.
- *
- * @port_gpio_base: Optional global GPIO index of the GPIO bank.
- * 0 means driver decides.
- * @port_pin_base: Pin index of the pin controller device.
- * @port_width: PIN number of the GPIO bank device
- * @pint_id: GPIO PINT device id that this GPIO bank should map to.
- * @pint_assign: The 32-bit GPIO PINT registers can be divided into 2 parts. A
- * GPIO bank can be mapped into either low 16 bits[0] or high 16
- * bits[1] of each PINT register.
- * @pint_map: GIOP bank mapping code in PINT device
- */
-struct adi_pinctrl_gpio_platform_data {
- unsigned int port_gpio_base;
- unsigned int port_pin_base;
- unsigned int port_width;
- u8 pinctrl_id;
- u8 pint_id;
- bool pint_assign;
- u8 pint_map;
-};
-
-#endif
diff --git a/include/linux/platform_data/pm33xx.h b/include/linux/platform_data/pm33xx.h
new file mode 100644
index 000000000000..f9bed2a0af9d
--- /dev/null
+++ b/include/linux/platform_data/pm33xx.h
@@ -0,0 +1,42 @@
+/* SPDX-License-Identifier: GPL-2.0 */
+/*
+ * TI pm33xx platform data
+ *
+ * Copyright (C) 2016-2018 Texas Instruments, Inc.
+ * Dave Gerlach <d-gerlach@ti.com>
+ */
+
+#ifndef _LINUX_PLATFORM_DATA_PM33XX_H
+#define _LINUX_PLATFORM_DATA_PM33XX_H
+
+#include <linux/kbuild.h>
+#include <linux/types.h>
+
+#ifndef __ASSEMBLER__
+struct am33xx_pm_sram_addr {
+ void (*do_wfi)(void);
+ unsigned long *do_wfi_sz;
+ unsigned long *resume_offset;
+ unsigned long *emif_sram_table;
+ unsigned long *ro_sram_data;
+};
+
+struct am33xx_pm_platform_data {
+ int (*init)(void);
+ int (*soc_suspend)(unsigned int state, int (*fn)(unsigned long));
+ struct am33xx_pm_sram_addr *(*get_sram_addrs)(void);
+};
+
+struct am33xx_pm_sram_data {
+ u32 wfi_flags;
+ u32 l2_aux_ctrl_val;
+ u32 l2_prefetch_ctrl_val;
+} __packed __aligned(8);
+
+struct am33xx_pm_ro_sram_data {
+ u32 amx3_pm_sram_data_virt;
+ u32 amx3_pm_sram_data_phys;
+} __packed __aligned(8);
+
+#endif /* __ASSEMBLER__ */
+#endif /* _LINUX_PLATFORM_DATA_PM33XX_H */
diff --git a/include/linux/platform_data/si5351.h b/include/linux/platform_data/si5351.h
index 818c5c6e203f..c71a2dd66143 100644
--- a/include/linux/platform_data/si5351.h
+++ b/include/linux/platform_data/si5351.h
@@ -86,6 +86,7 @@ enum si5351_disable_state {
* @multisynth_src: multisynth source clock
* @clkout_src: clkout source clock
* @pll_master: if true, clkout can also change pll rate
+ * @pll_reset: if true, clkout can reset its pll
* @drive: output drive strength
* @rate: initial clkout rate, or default if 0
*/
@@ -95,6 +96,7 @@ struct si5351_clkout_config {
enum si5351_drive_strength drive;
enum si5351_disable_state disable_state;
bool pll_master;
+ bool pll_reset;
unsigned long rate;
};
diff --git a/include/linux/platform_data/spi-omap2-mcspi.h b/include/linux/platform_data/spi-omap2-mcspi.h
index 13c83a25958a..0bf9fddb8306 100644
--- a/include/linux/platform_data/spi-omap2-mcspi.h
+++ b/include/linux/platform_data/spi-omap2-mcspi.h
@@ -2,10 +2,6 @@
#ifndef _OMAP2_MCSPI_H
#define _OMAP2_MCSPI_H
-#define OMAP2_MCSPI_REV 0
-#define OMAP3_MCSPI_REV 1
-#define OMAP4_MCSPI_REV 2
-
#define OMAP4_MCSPI_REG_OFFSET 0x100
#define MCSPI_PINDIR_D0_IN_D1_OUT 0
@@ -17,10 +13,6 @@ struct omap2_mcspi_platform_config {
unsigned int pin_dir:1;
};
-struct omap2_mcspi_dev_attr {
- unsigned short num_chipselect;
-};
-
struct omap2_mcspi_device_config {
unsigned turbo_mode:1;
diff --git a/include/linux/platform_data/spi-s3c64xx.h b/include/linux/platform_data/spi-s3c64xx.h
index da79774078a7..773daf7915a3 100644
--- a/include/linux/platform_data/spi-s3c64xx.h
+++ b/include/linux/platform_data/spi-s3c64xx.h
@@ -1,10 +1,8 @@
+/* SPDX-License-Identifier: GPL-2.0 */
+
/*
* Copyright (C) 2009 Samsung Electronics Ltd.
* Jaswinder Singh <jassi.brar@samsung.com>
- *
- * This program is free software; you can redistribute it and/or modify
- * it under the terms of the GNU General Public License version 2 as
- * published by the Free Software Foundation.
*/
#ifndef __SPI_S3C64XX_H
diff --git a/include/linux/platform_data/ti-sysc.h b/include/linux/platform_data/ti-sysc.h
new file mode 100644
index 000000000000..80ce28d40832
--- /dev/null
+++ b/include/linux/platform_data/ti-sysc.h
@@ -0,0 +1,136 @@
+#ifndef __TI_SYSC_DATA_H__
+#define __TI_SYSC_DATA_H__
+
+enum ti_sysc_module_type {
+ TI_SYSC_OMAP2,
+ TI_SYSC_OMAP2_TIMER,
+ TI_SYSC_OMAP3_SHAM,
+ TI_SYSC_OMAP3_AES,
+ TI_SYSC_OMAP4,
+ TI_SYSC_OMAP4_TIMER,
+ TI_SYSC_OMAP4_SIMPLE,
+ TI_SYSC_OMAP34XX_SR,
+ TI_SYSC_OMAP36XX_SR,
+ TI_SYSC_OMAP4_SR,
+ TI_SYSC_OMAP4_MCASP,
+ TI_SYSC_OMAP4_USB_HOST_FS,
+};
+
+struct ti_sysc_cookie {
+ void *data;
+};
+
+/**
+ * struct sysc_regbits - TI OCP_SYSCONFIG register field offsets
+ * @midle_shift: Offset of the midle bit
+ * @clkact_shift: Offset of the clockactivity bit
+ * @sidle_shift: Offset of the sidle bit
+ * @enwkup_shift: Offset of the enawakeup bit
+ * @srst_shift: Offset of the softreset bit
+ * @autoidle_shift: Offset of the autoidle bit
+ * @dmadisable_shift: Offset of the dmadisable bit
+ * @emufree_shift; Offset of the emufree bit
+ *
+ * Note that 0 is a valid shift, and for ti-sysc.c -ENODEV can be used if a
+ * feature is not available.
+ */
+struct sysc_regbits {
+ s8 midle_shift;
+ s8 clkact_shift;
+ s8 sidle_shift;
+ s8 enwkup_shift;
+ s8 srst_shift;
+ s8 autoidle_shift;
+ s8 dmadisable_shift;
+ s8 emufree_shift;
+};
+
+#define SYSC_QUIRK_LEGACY_IDLE BIT(8)
+#define SYSC_QUIRK_RESET_STATUS BIT(7)
+#define SYSC_QUIRK_NO_IDLE_ON_INIT BIT(6)
+#define SYSC_QUIRK_NO_RESET_ON_INIT BIT(5)
+#define SYSC_QUIRK_OPT_CLKS_NEEDED BIT(4)
+#define SYSC_QUIRK_OPT_CLKS_IN_RESET BIT(3)
+#define SYSC_QUIRK_16BIT BIT(2)
+#define SYSC_QUIRK_UNCACHED BIT(1)
+#define SYSC_QUIRK_USE_CLOCKACT BIT(0)
+
+#define SYSC_NR_IDLEMODES 4
+
+/**
+ * struct sysc_capabilities - capabilities for an interconnect target module
+ *
+ * @sysc_mask: bitmask of supported SYSCONFIG register bits
+ * @regbits: bitmask of SYSCONFIG register bits
+ * @mod_quirks: bitmask of module specific quirks
+ */
+struct sysc_capabilities {
+ const enum ti_sysc_module_type type;
+ const u32 sysc_mask;
+ const struct sysc_regbits *regbits;
+ const u32 mod_quirks;
+};
+
+/**
+ * struct sysc_config - configuration for an interconnect target module
+ * @sysc_val: configured value for sysc register
+ * @midlemodes: bitmask of supported master idle modes
+ * @sidlemodes: bitmask of supported master idle modes
+ * @srst_udelay: optional delay needed after OCP soft reset
+ * @quirks: bitmask of enabled quirks
+ */
+struct sysc_config {
+ u32 sysc_val;
+ u32 syss_mask;
+ u8 midlemodes;
+ u8 sidlemodes;
+ u8 srst_udelay;
+ u32 quirks;
+};
+
+enum sysc_registers {
+ SYSC_REVISION,
+ SYSC_SYSCONFIG,
+ SYSC_SYSSTATUS,
+ SYSC_MAX_REGS,
+};
+
+/**
+ * struct ti_sysc_module_data - ti-sysc to hwmod translation data for a module
+ * @name: legacy "ti,hwmods" module name
+ * @module_pa: physical address of the interconnect target module
+ * @module_size: size of the interconnect target module
+ * @offsets: array of register offsets as listed in enum sysc_registers
+ * @nr_offsets: number of registers
+ * @cap: interconnect target module capabilities
+ * @cfg: interconnect target module configuration
+ *
+ * This data is enough to allocate a new struct omap_hwmod_class_sysconfig
+ * based on device tree data parsed by ti-sysc driver.
+ */
+struct ti_sysc_module_data {
+ const char *name;
+ u64 module_pa;
+ u32 module_size;
+ int *offsets;
+ int nr_offsets;
+ const struct sysc_capabilities *cap;
+ struct sysc_config *cfg;
+};
+
+struct device;
+
+struct ti_sysc_platform_data {
+ struct of_dev_auxdata *auxdata;
+ int (*init_module)(struct device *dev,
+ const struct ti_sysc_module_data *data,
+ struct ti_sysc_cookie *cookie);
+ int (*enable_module)(struct device *dev,
+ const struct ti_sysc_cookie *cookie);
+ int (*idle_module)(struct device *dev,
+ const struct ti_sysc_cookie *cookie);
+ int (*shutdown_module)(struct device *dev,
+ const struct ti_sysc_cookie *cookie);
+};
+
+#endif /* __TI_SYSC_DATA_H__ */
diff --git a/include/linux/pm.h b/include/linux/pm.h
index 492ed473ba7e..e723b78d8357 100644
--- a/include/linux/pm.h
+++ b/include/linux/pm.h
@@ -556,9 +556,10 @@ struct pm_subsys_data {
* These flags can be set by device drivers at the probe time. They need not be
* cleared by the drivers as the driver core will take care of that.
*
- * NEVER_SKIP: Do not skip system suspend/resume callbacks for the device.
+ * NEVER_SKIP: Do not skip all system suspend/resume callbacks for the device.
* SMART_PREPARE: Check the return value of the driver's ->prepare callback.
* SMART_SUSPEND: No need to resume the device from runtime suspend.
+ * LEAVE_SUSPENDED: Avoid resuming the device during system resume if possible.
*
* Setting SMART_PREPARE instructs bus types and PM domains which may want
* system suspend/resume callbacks to be skipped for the device to return 0 from
@@ -572,10 +573,14 @@ struct pm_subsys_data {
* necessary from the driver's perspective. It also may cause them to skip
* invocations of the ->suspend_late and ->suspend_noirq callbacks provided by
* the driver if they decide to leave the device in runtime suspend.
+ *
+ * Setting LEAVE_SUSPENDED informs the PM core and middle-layer code that the
+ * driver prefers the device to be left in suspend after system resume.
*/
-#define DPM_FLAG_NEVER_SKIP BIT(0)
-#define DPM_FLAG_SMART_PREPARE BIT(1)
-#define DPM_FLAG_SMART_SUSPEND BIT(2)
+#define DPM_FLAG_NEVER_SKIP BIT(0)
+#define DPM_FLAG_SMART_PREPARE BIT(1)
+#define DPM_FLAG_SMART_SUSPEND BIT(2)
+#define DPM_FLAG_LEAVE_SUSPENDED BIT(3)
struct dev_pm_info {
pm_message_t power_state;
@@ -597,6 +602,8 @@ struct dev_pm_info {
bool wakeup_path:1;
bool syscore:1;
bool no_pm_callbacks:1; /* Owned by the PM core */
+ unsigned int must_resume:1; /* Owned by the PM core */
+ unsigned int may_skip_resume:1; /* Set by subsystems */
#else
unsigned int should_wakeup:1;
#endif
@@ -766,6 +773,7 @@ extern int pm_generic_poweroff(struct device *dev);
extern void pm_generic_complete(struct device *dev);
extern void dev_pm_skip_next_resume_phases(struct device *dev);
+extern bool dev_pm_may_skip_resume(struct device *dev);
extern bool dev_pm_smart_suspend_and_suspended(struct device *dev);
#else /* !CONFIG_PM_SLEEP */
diff --git a/include/linux/pm_wakeup.h b/include/linux/pm_wakeup.h
index 4c2cba7ec1d4..4238dde0aaf0 100644
--- a/include/linux/pm_wakeup.h
+++ b/include/linux/pm_wakeup.h
@@ -88,6 +88,11 @@ static inline bool device_may_wakeup(struct device *dev)
return dev->power.can_wakeup && !!dev->power.wakeup;
}
+static inline void device_set_wakeup_path(struct device *dev)
+{
+ dev->power.wakeup_path = true;
+}
+
/* drivers/base/power/wakeup.c */
extern void wakeup_source_prepare(struct wakeup_source *ws, const char *name);
extern struct wakeup_source *wakeup_source_create(const char *name);
@@ -174,6 +179,8 @@ static inline bool device_may_wakeup(struct device *dev)
return dev->power.can_wakeup && dev->power.should_wakeup;
}
+static inline void device_set_wakeup_path(struct device *dev) {}
+
static inline void __pm_stay_awake(struct wakeup_source *ws) {}
static inline void pm_stay_awake(struct device *dev) {}
diff --git a/include/linux/poll.h b/include/linux/poll.h
index d384f12abdd5..f45ebd017eaa 100644
--- a/include/linux/poll.h
+++ b/include/linux/poll.h
@@ -11,6 +11,7 @@
#include <linux/sysctl.h>
#include <linux/uaccess.h>
#include <uapi/linux/poll.h>
+#include <uapi/linux/eventpoll.h>
extern struct ctl_table epoll_table[]; /* for sysctl */
/* ~832 bytes of stack space used max in sys_select/sys_poll before allocating
@@ -22,7 +23,7 @@ extern struct ctl_table epoll_table[]; /* for sysctl */
#define WQUEUES_STACK_ALLOC (MAX_STACK_ALLOC - FRONTEND_STACK_ALLOC)
#define N_INLINE_POLL_ENTRIES (WQUEUES_STACK_ALLOC / sizeof(struct poll_table_entry))
-#define DEFAULT_POLLMASK (POLLIN | POLLOUT | POLLRDNORM | POLLWRNORM)
+#define DEFAULT_POLLMASK (EPOLLIN | EPOLLOUT | EPOLLRDNORM | EPOLLWRNORM)
struct poll_table_struct;
@@ -37,7 +38,7 @@ typedef void (*poll_queue_proc)(struct file *, wait_queue_head_t *, struct poll_
*/
typedef struct poll_table_struct {
poll_queue_proc _qproc;
- unsigned long _key;
+ __poll_t _key;
} poll_table;
static inline void poll_wait(struct file * filp, wait_queue_head_t * wait_address, poll_table *p)
@@ -62,20 +63,20 @@ static inline bool poll_does_not_wait(const poll_table *p)
* to be started implicitly on poll(). You typically only want to do that
* if the application is actually polling for POLLIN and/or POLLOUT.
*/
-static inline unsigned long poll_requested_events(const poll_table *p)
+static inline __poll_t poll_requested_events(const poll_table *p)
{
- return p ? p->_key : ~0UL;
+ return p ? p->_key : ~(__poll_t)0;
}
static inline void init_poll_funcptr(poll_table *pt, poll_queue_proc qproc)
{
pt->_qproc = qproc;
- pt->_key = ~0UL; /* all events enabled */
+ pt->_key = ~(__poll_t)0; /* all events enabled */
}
struct poll_table_entry {
struct file *filp;
- unsigned long key;
+ __poll_t key;
wait_queue_entry_t wait;
wait_queue_head_t *wait_address;
};
@@ -107,4 +108,28 @@ extern int core_sys_select(int n, fd_set __user *inp, fd_set __user *outp,
extern int poll_select_set_timeout(struct timespec64 *to, time64_t sec,
long nsec);
+#define __MAP(v, from, to) \
+ (from < to ? (v & from) * (to/from) : (v & from) / (from/to))
+
+static inline __u16 mangle_poll(__poll_t val)
+{
+ __u16 v = (__force __u16)val;
+#define M(X) __MAP(v, (__force __u16)EPOLL##X, POLL##X)
+ return M(IN) | M(OUT) | M(PRI) | M(ERR) | M(NVAL) |
+ M(RDNORM) | M(RDBAND) | M(WRNORM) | M(WRBAND) |
+ M(HUP) | M(RDHUP) | M(MSG);
+#undef M
+}
+
+static inline __poll_t demangle_poll(u16 val)
+{
+#define M(X) (__force __poll_t)__MAP(val, POLL##X, (__force __u16)EPOLL##X)
+ return M(IN) | M(OUT) | M(PRI) | M(ERR) | M(NVAL) |
+ M(RDNORM) | M(RDBAND) | M(WRNORM) | M(WRBAND) |
+ M(HUP) | M(RDHUP) | M(MSG);
+#undef M
+}
+#undef __MAP
+
+
#endif /* _LINUX_POLL_H */
diff --git a/include/linux/posix-clock.h b/include/linux/posix-clock.h
index 38d8225510f1..3a3bc71017d5 100644
--- a/include/linux/posix-clock.h
+++ b/include/linux/posix-clock.h
@@ -68,7 +68,7 @@ struct posix_clock_operations {
int (*open) (struct posix_clock *pc, fmode_t f_mode);
- uint (*poll) (struct posix_clock *pc,
+ __poll_t (*poll) (struct posix_clock *pc,
struct file *file, poll_table *wait);
int (*release) (struct posix_clock *pc);
diff --git a/include/linux/posix-timers.h b/include/linux/posix-timers.h
index 672c4f32311e..c85704fcdbd2 100644
--- a/include/linux/posix-timers.h
+++ b/include/linux/posix-timers.h
@@ -42,13 +42,26 @@ struct cpu_timer_list {
#define CLOCKFD CPUCLOCK_MAX
#define CLOCKFD_MASK (CPUCLOCK_PERTHREAD_MASK|CPUCLOCK_CLOCK_MASK)
-#define MAKE_PROCESS_CPUCLOCK(pid, clock) \
- ((~(clockid_t) (pid) << 3) | (clockid_t) (clock))
-#define MAKE_THREAD_CPUCLOCK(tid, clock) \
- MAKE_PROCESS_CPUCLOCK((tid), (clock) | CPUCLOCK_PERTHREAD_MASK)
+static inline clockid_t make_process_cpuclock(const unsigned int pid,
+ const clockid_t clock)
+{
+ return ((~pid) << 3) | clock;
+}
+static inline clockid_t make_thread_cpuclock(const unsigned int tid,
+ const clockid_t clock)
+{
+ return make_process_cpuclock(tid, clock | CPUCLOCK_PERTHREAD_MASK);
+}
-#define FD_TO_CLOCKID(fd) ((~(clockid_t) (fd) << 3) | CLOCKFD)
-#define CLOCKID_TO_FD(clk) ((unsigned int) ~((clk) >> 3))
+static inline clockid_t fd_to_clockid(const int fd)
+{
+ return make_process_cpuclock((unsigned int) fd, CLOCKFD);
+}
+
+static inline int clockid_to_fd(const clockid_t clk)
+{
+ return ~(clk >> 3);
+}
#define REQUEUE_PENDING 1
diff --git a/include/linux/posix_acl.h b/include/linux/posix_acl.h
index b2b7255ec7f5..540595a321a7 100644
--- a/include/linux/posix_acl.h
+++ b/include/linux/posix_acl.h
@@ -12,6 +12,7 @@
#include <linux/bug.h>
#include <linux/slab.h>
#include <linux/rcupdate.h>
+#include <linux/refcount.h>
#include <uapi/linux/posix_acl.h>
struct posix_acl_entry {
@@ -24,7 +25,7 @@ struct posix_acl_entry {
};
struct posix_acl {
- atomic_t a_refcount;
+ refcount_t a_refcount;
struct rcu_head a_rcu;
unsigned int a_count;
struct posix_acl_entry a_entries[0];
@@ -41,7 +42,7 @@ static inline struct posix_acl *
posix_acl_dup(struct posix_acl *acl)
{
if (acl)
- atomic_inc(&acl->a_refcount);
+ refcount_inc(&acl->a_refcount);
return acl;
}
@@ -51,7 +52,7 @@ posix_acl_dup(struct posix_acl *acl)
static inline void
posix_acl_release(struct posix_acl *acl)
{
- if (acl && atomic_dec_and_test(&acl->a_refcount))
+ if (acl && refcount_dec_and_test(&acl->a_refcount))
kfree_rcu(acl, a_rcu);
}
diff --git a/include/linux/power/bq27xxx_battery.h b/include/linux/power/bq27xxx_battery.h
index e6187f524f2c..01fbf1b16258 100644
--- a/include/linux/power/bq27xxx_battery.h
+++ b/include/linux/power/bq27xxx_battery.h
@@ -16,6 +16,7 @@ enum bq27xxx_chip {
BQ27520G2, /* bq27520G2 */
BQ27520G3, /* bq27520G3 */
BQ27520G4, /* bq27520G4 */
+ BQ27521, /* bq27521 */
BQ27530, /* bq27530, bq27531 */
BQ27531,
BQ27541, /* bq27541, bq27542, bq27546, bq27742 */
diff --git a/include/linux/power/smartreflex.h b/include/linux/power/smartreflex.h
index d8b187c3925d..7b81dad712de 100644
--- a/include/linux/power/smartreflex.h
+++ b/include/linux/power/smartreflex.h
@@ -143,6 +143,13 @@
#define OMAP3430_SR_ERRWEIGHT 0x04
#define OMAP3430_SR_ERRMAXLIMIT 0x02
+enum sr_instance {
+ OMAP_SR_MPU, /* shared with iva on omap3 */
+ OMAP_SR_CORE,
+ OMAP_SR_IVA,
+ OMAP_SR_NR,
+};
+
struct omap_sr {
char *name;
struct list_head node;
@@ -207,7 +214,6 @@ struct omap_smartreflex_dev_attr {
const char *sensor_voltdm_name;
};
-#ifdef CONFIG_POWER_AVS_OMAP
/*
* The smart reflex driver supports CLASS1 CLASS2 and CLASS3 SR.
* The smartreflex class driver should pass the class type.
@@ -290,6 +296,8 @@ struct omap_sr_data {
struct voltagedomain *voltdm;
};
+#ifdef CONFIG_POWER_AVS_OMAP
+
/* Smartreflex module enable/disable interface */
void omap_sr_enable(struct voltagedomain *voltdm);
void omap_sr_disable(struct voltagedomain *voltdm);
diff --git a/include/linux/power_supply.h b/include/linux/power_supply.h
index 79e90b3d3288..f0139b460a72 100644
--- a/include/linux/power_supply.h
+++ b/include/linux/power_supply.h
@@ -371,6 +371,8 @@ devm_power_supply_register_no_ws(struct device *parent,
extern void power_supply_unregister(struct power_supply *psy);
extern int power_supply_powers(struct power_supply *psy, struct device *dev);
+#define to_power_supply(device) container_of(device, struct power_supply, dev)
+
extern void *power_supply_get_drvdata(struct power_supply *psy);
/* For APM emulation, think legacy userspace. */
extern struct class *power_supply_class;
diff --git a/include/linux/printk.h b/include/linux/printk.h
index e9b603ee9953..6d7e800affd8 100644
--- a/include/linux/printk.h
+++ b/include/linux/printk.h
@@ -201,6 +201,7 @@ void __init setup_log_buf(int early);
__printf(1, 2) void dump_stack_set_arch_desc(const char *fmt, ...);
void dump_stack_print_info(const char *log_lvl);
void show_regs_print_info(const char *log_lvl);
+extern asmlinkage void dump_stack(void) __cold;
extern void printk_safe_init(void);
extern void printk_safe_flush(void);
extern void printk_safe_flush_on_panic(void);
@@ -264,6 +265,10 @@ static inline void show_regs_print_info(const char *log_lvl)
{
}
+static inline asmlinkage void dump_stack(void)
+{
+}
+
static inline void printk_safe_init(void)
{
}
@@ -279,8 +284,6 @@ static inline void printk_safe_flush_on_panic(void)
extern int kptr_restrict;
-extern asmlinkage void dump_stack(void) __cold;
-
#ifndef pr_fmt
#define pr_fmt(fmt) fmt
#endif
diff --git a/include/linux/proc_ns.h b/include/linux/proc_ns.h
index 2ff18c9840a7..d31cb6215905 100644
--- a/include/linux/proc_ns.h
+++ b/include/linux/proc_ns.h
@@ -78,6 +78,9 @@ extern struct file *proc_ns_fget(int fd);
#define get_proc_ns(inode) ((struct ns_common *)(inode)->i_private)
extern void *ns_get_path(struct path *path, struct task_struct *task,
const struct proc_ns_operations *ns_ops);
+typedef struct ns_common *ns_get_path_helper_t(void *);
+extern void *ns_get_path_cb(struct path *path, ns_get_path_helper_t ns_get_cb,
+ void *private_data);
extern int ns_get_name(char *buf, size_t size, struct task_struct *task,
const struct proc_ns_operations *ns_ops);
diff --git a/include/linux/property.h b/include/linux/property.h
index f6189a3ac63c..2eea4b310fc2 100644
--- a/include/linux/property.h
+++ b/include/linux/property.h
@@ -83,11 +83,17 @@ struct fwnode_handle *fwnode_get_next_parent(
struct fwnode_handle *fwnode);
struct fwnode_handle *fwnode_get_next_child_node(
const struct fwnode_handle *fwnode, struct fwnode_handle *child);
+struct fwnode_handle *fwnode_get_next_available_child_node(
+ const struct fwnode_handle *fwnode, struct fwnode_handle *child);
#define fwnode_for_each_child_node(fwnode, child) \
for (child = fwnode_get_next_child_node(fwnode, NULL); child; \
child = fwnode_get_next_child_node(fwnode, child))
+#define fwnode_for_each_available_child_node(fwnode, child) \
+ for (child = fwnode_get_next_available_child_node(fwnode, NULL); child;\
+ child = fwnode_get_next_available_child_node(fwnode, child))
+
struct fwnode_handle *device_get_next_child_node(
struct device *dev, struct fwnode_handle *child);
@@ -103,6 +109,8 @@ struct fwnode_handle *device_get_named_child_node(struct device *dev,
struct fwnode_handle *fwnode_handle_get(struct fwnode_handle *fwnode);
void fwnode_handle_put(struct fwnode_handle *fwnode);
+int fwnode_irq_get(struct fwnode_handle *fwnode, unsigned int index);
+
unsigned int device_get_child_node_count(struct device *dev);
static inline bool device_property_read_bool(struct device *dev,
@@ -206,7 +214,7 @@ struct property_entry {
*/
#define PROPERTY_ENTRY_INTEGER_ARRAY(_name_, _type_, _val_) \
-{ \
+(struct property_entry) { \
.name = _name_, \
.length = ARRAY_SIZE(_val_) * sizeof(_type_), \
.is_array = true, \
@@ -224,7 +232,7 @@ struct property_entry {
PROPERTY_ENTRY_INTEGER_ARRAY(_name_, u64, _val_)
#define PROPERTY_ENTRY_STRING_ARRAY(_name_, _val_) \
-{ \
+(struct property_entry) { \
.name = _name_, \
.length = ARRAY_SIZE(_val_) * sizeof(const char *), \
.is_array = true, \
@@ -233,7 +241,7 @@ struct property_entry {
}
#define PROPERTY_ENTRY_INTEGER(_name_, _type_, _val_) \
-{ \
+(struct property_entry) { \
.name = _name_, \
.length = sizeof(_type_), \
.is_string = false, \
@@ -250,7 +258,7 @@ struct property_entry {
PROPERTY_ENTRY_INTEGER(_name_, u64, _val_)
#define PROPERTY_ENTRY_STRING(_name_, _val_) \
-{ \
+(struct property_entry) { \
.name = _name_, \
.length = sizeof(_val_), \
.is_string = true, \
@@ -258,7 +266,7 @@ struct property_entry {
}
#define PROPERTY_ENTRY_BOOL(_name_) \
-{ \
+(struct property_entry) { \
.name = _name_, \
}
@@ -275,10 +283,15 @@ bool device_dma_supported(struct device *dev);
enum dev_dma_attr device_get_dma_attr(struct device *dev);
+const void *device_get_match_data(struct device *dev);
+
int device_get_phy_mode(struct device *dev);
void *device_get_mac_address(struct device *dev, char *addr, int alen);
+int fwnode_get_phy_mode(struct fwnode_handle *fwnode);
+void *fwnode_get_mac_address(struct fwnode_handle *fwnode,
+ char *addr, int alen);
struct fwnode_handle *fwnode_graph_get_next_endpoint(
const struct fwnode_handle *fwnode, struct fwnode_handle *prev);
struct fwnode_handle *
diff --git a/include/linux/psci.h b/include/linux/psci.h
index bdea1cb5e1db..8b1b3b5935ab 100644
--- a/include/linux/psci.h
+++ b/include/linux/psci.h
@@ -25,7 +25,19 @@ bool psci_tos_resident_on(int cpu);
int psci_cpu_init_idle(unsigned int cpu);
int psci_cpu_suspend_enter(unsigned long index);
+enum psci_conduit {
+ PSCI_CONDUIT_NONE,
+ PSCI_CONDUIT_SMC,
+ PSCI_CONDUIT_HVC,
+};
+
+enum smccc_version {
+ SMCCC_VERSION_1_0,
+ SMCCC_VERSION_1_1,
+};
+
struct psci_operations {
+ u32 (*get_version)(void);
int (*cpu_suspend)(u32 state, unsigned long entry_point);
int (*cpu_off)(u32 state);
int (*cpu_on)(unsigned long cpuid, unsigned long entry_point);
@@ -33,6 +45,8 @@ struct psci_operations {
int (*affinity_info)(unsigned long target_affinity,
unsigned long lowest_affinity_level);
int (*migrate_info_type)(void);
+ enum psci_conduit conduit;
+ enum smccc_version smccc_version;
};
extern struct psci_operations psci_ops;
@@ -46,10 +60,11 @@ static inline int psci_dt_init(void) { return 0; }
#if defined(CONFIG_ARM_PSCI_FW) && defined(CONFIG_ACPI)
int __init psci_acpi_init(void);
bool __init acpi_psci_present(void);
-bool __init acpi_psci_use_hvc(void);
+bool acpi_psci_use_hvc(void);
#else
static inline int psci_acpi_init(void) { return 0; }
static inline bool acpi_psci_present(void) { return false; }
+static inline bool acpi_psci_use_hvc(void) {return false; }
#endif
#endif /* __LINUX_PSCI_H */
diff --git a/include/linux/psp-sev.h b/include/linux/psp-sev.h
new file mode 100644
index 000000000000..93addfa34061
--- /dev/null
+++ b/include/linux/psp-sev.h
@@ -0,0 +1,606 @@
+/*
+ * AMD Secure Encrypted Virtualization (SEV) driver interface
+ *
+ * Copyright (C) 2016-2017 Advanced Micro Devices, Inc.
+ *
+ * Author: Brijesh Singh <brijesh.singh@amd.com>
+ *
+ * SEV spec 0.14 is available at:
+ * http://support.amd.com/TechDocs/55766_SEV-KM API_Specification.pdf
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License version 2 as
+ * published by the Free Software Foundation.
+ */
+
+#ifndef __PSP_SEV_H__
+#define __PSP_SEV_H__
+
+#include <uapi/linux/psp-sev.h>
+
+#ifdef CONFIG_X86
+#include <linux/mem_encrypt.h>
+
+#define __psp_pa(x) __sme_pa(x)
+#else
+#define __psp_pa(x) __pa(x)
+#endif
+
+#define SEV_FW_BLOB_MAX_SIZE 0x4000 /* 16KB */
+
+/**
+ * SEV platform state
+ */
+enum sev_state {
+ SEV_STATE_UNINIT = 0x0,
+ SEV_STATE_INIT = 0x1,
+ SEV_STATE_WORKING = 0x2,
+
+ SEV_STATE_MAX
+};
+
+/**
+ * SEV platform and guest management commands
+ */
+enum sev_cmd {
+ /* platform commands */
+ SEV_CMD_INIT = 0x001,
+ SEV_CMD_SHUTDOWN = 0x002,
+ SEV_CMD_FACTORY_RESET = 0x003,
+ SEV_CMD_PLATFORM_STATUS = 0x004,
+ SEV_CMD_PEK_GEN = 0x005,
+ SEV_CMD_PEK_CSR = 0x006,
+ SEV_CMD_PEK_CERT_IMPORT = 0x007,
+ SEV_CMD_PDH_CERT_EXPORT = 0x008,
+ SEV_CMD_PDH_GEN = 0x009,
+ SEV_CMD_DF_FLUSH = 0x00A,
+
+ /* Guest commands */
+ SEV_CMD_DECOMMISSION = 0x020,
+ SEV_CMD_ACTIVATE = 0x021,
+ SEV_CMD_DEACTIVATE = 0x022,
+ SEV_CMD_GUEST_STATUS = 0x023,
+
+ /* Guest launch commands */
+ SEV_CMD_LAUNCH_START = 0x030,
+ SEV_CMD_LAUNCH_UPDATE_DATA = 0x031,
+ SEV_CMD_LAUNCH_UPDATE_VMSA = 0x032,
+ SEV_CMD_LAUNCH_MEASURE = 0x033,
+ SEV_CMD_LAUNCH_UPDATE_SECRET = 0x034,
+ SEV_CMD_LAUNCH_FINISH = 0x035,
+
+ /* Guest migration commands (outgoing) */
+ SEV_CMD_SEND_START = 0x040,
+ SEV_CMD_SEND_UPDATE_DATA = 0x041,
+ SEV_CMD_SEND_UPDATE_VMSA = 0x042,
+ SEV_CMD_SEND_FINISH = 0x043,
+
+ /* Guest migration commands (incoming) */
+ SEV_CMD_RECEIVE_START = 0x050,
+ SEV_CMD_RECEIVE_UPDATE_DATA = 0x051,
+ SEV_CMD_RECEIVE_UPDATE_VMSA = 0x052,
+ SEV_CMD_RECEIVE_FINISH = 0x053,
+
+ /* Guest debug commands */
+ SEV_CMD_DBG_DECRYPT = 0x060,
+ SEV_CMD_DBG_ENCRYPT = 0x061,
+
+ SEV_CMD_MAX,
+};
+
+/**
+ * struct sev_data_init - INIT command parameters
+ *
+ * @flags: processing flags
+ * @tmr_address: system physical address used for SEV-ES
+ * @tmr_len: len of tmr_address
+ */
+struct sev_data_init {
+ u32 flags; /* In */
+ u32 reserved; /* In */
+ u64 tmr_address; /* In */
+ u32 tmr_len; /* In */
+} __packed;
+
+/**
+ * struct sev_data_pek_csr - PEK_CSR command parameters
+ *
+ * @address: PEK certificate chain
+ * @len: len of certificate
+ */
+struct sev_data_pek_csr {
+ u64 address; /* In */
+ u32 len; /* In/Out */
+} __packed;
+
+/**
+ * struct sev_data_cert_import - PEK_CERT_IMPORT command parameters
+ *
+ * @pek_address: PEK certificate chain
+ * @pek_len: len of PEK certificate
+ * @oca_address: OCA certificate chain
+ * @oca_len: len of OCA certificate
+ */
+struct sev_data_pek_cert_import {
+ u64 pek_cert_address; /* In */
+ u32 pek_cert_len; /* In */
+ u32 reserved; /* In */
+ u64 oca_cert_address; /* In */
+ u32 oca_cert_len; /* In */
+} __packed;
+
+/**
+ * struct sev_data_pdh_cert_export - PDH_CERT_EXPORT command parameters
+ *
+ * @pdh_address: PDH certificate address
+ * @pdh_len: len of PDH certificate
+ * @cert_chain_address: PDH certificate chain
+ * @cert_chain_len: len of PDH certificate chain
+ */
+struct sev_data_pdh_cert_export {
+ u64 pdh_cert_address; /* In */
+ u32 pdh_cert_len; /* In/Out */
+ u32 reserved; /* In */
+ u64 cert_chain_address; /* In */
+ u32 cert_chain_len; /* In/Out */
+} __packed;
+
+/**
+ * struct sev_data_decommission - DECOMMISSION command parameters
+ *
+ * @handle: handle of the VM to decommission
+ */
+struct sev_data_decommission {
+ u32 handle; /* In */
+} __packed;
+
+/**
+ * struct sev_data_activate - ACTIVATE command parameters
+ *
+ * @handle: handle of the VM to activate
+ * @asid: asid assigned to the VM
+ */
+struct sev_data_activate {
+ u32 handle; /* In */
+ u32 asid; /* In */
+} __packed;
+
+/**
+ * struct sev_data_deactivate - DEACTIVATE command parameters
+ *
+ * @handle: handle of the VM to deactivate
+ */
+struct sev_data_deactivate {
+ u32 handle; /* In */
+} __packed;
+
+/**
+ * struct sev_data_guest_status - SEV GUEST_STATUS command parameters
+ *
+ * @handle: handle of the VM to retrieve status
+ * @policy: policy information for the VM
+ * @asid: current ASID of the VM
+ * @state: current state of the VM
+ */
+struct sev_data_guest_status {
+ u32 handle; /* In */
+ u32 policy; /* Out */
+ u32 asid; /* Out */
+ u8 state; /* Out */
+} __packed;
+
+/**
+ * struct sev_data_launch_start - LAUNCH_START command parameters
+ *
+ * @handle: handle assigned to the VM
+ * @policy: guest launch policy
+ * @dh_cert_address: physical address of DH certificate blob
+ * @dh_cert_len: len of DH certificate blob
+ * @session_address: physical address of session parameters
+ * @session_len: len of session parameters
+ */
+struct sev_data_launch_start {
+ u32 handle; /* In/Out */
+ u32 policy; /* In */
+ u64 dh_cert_address; /* In */
+ u32 dh_cert_len; /* In */
+ u32 reserved; /* In */
+ u64 session_address; /* In */
+ u32 session_len; /* In */
+} __packed;
+
+/**
+ * struct sev_data_launch_update_data - LAUNCH_UPDATE_DATA command parameter
+ *
+ * @handle: handle of the VM to update
+ * @len: len of memory to be encrypted
+ * @address: physical address of memory region to encrypt
+ */
+struct sev_data_launch_update_data {
+ u32 handle; /* In */
+ u32 reserved;
+ u64 address; /* In */
+ u32 len; /* In */
+} __packed;
+
+/**
+ * struct sev_data_launch_update_vmsa - LAUNCH_UPDATE_VMSA command
+ *
+ * @handle: handle of the VM
+ * @address: physical address of memory region to encrypt
+ * @len: len of memory region to encrypt
+ */
+struct sev_data_launch_update_vmsa {
+ u32 handle; /* In */
+ u32 reserved;
+ u64 address; /* In */
+ u32 len; /* In */
+} __packed;
+
+/**
+ * struct sev_data_launch_measure - LAUNCH_MEASURE command parameters
+ *
+ * @handle: handle of the VM to process
+ * @address: physical address containing the measurement blob
+ * @len: len of measurement blob
+ */
+struct sev_data_launch_measure {
+ u32 handle; /* In */
+ u32 reserved;
+ u64 address; /* In */
+ u32 len; /* In/Out */
+} __packed;
+
+/**
+ * struct sev_data_launch_secret - LAUNCH_SECRET command parameters
+ *
+ * @handle: handle of the VM to process
+ * @hdr_address: physical address containing the packet header
+ * @hdr_len: len of packet header
+ * @guest_address: system physical address of guest memory region
+ * @guest_len: len of guest_paddr
+ * @trans_address: physical address of transport memory buffer
+ * @trans_len: len of transport memory buffer
+ */
+struct sev_data_launch_secret {
+ u32 handle; /* In */
+ u32 reserved1;
+ u64 hdr_address; /* In */
+ u32 hdr_len; /* In */
+ u32 reserved2;
+ u64 guest_address; /* In */
+ u32 guest_len; /* In */
+ u32 reserved3;
+ u64 trans_address; /* In */
+ u32 trans_len; /* In */
+} __packed;
+
+/**
+ * struct sev_data_launch_finish - LAUNCH_FINISH command parameters
+ *
+ * @handle: handle of the VM to process
+ */
+struct sev_data_launch_finish {
+ u32 handle; /* In */
+} __packed;
+
+/**
+ * struct sev_data_send_start - SEND_START command parameters
+ *
+ * @handle: handle of the VM to process
+ * @policy: policy information for the VM
+ * @pdh_cert_address: physical address containing PDH certificate
+ * @pdh_cert_len: len of PDH certificate
+ * @plat_certs_address: physical address containing platform certificate
+ * @plat_certs_len: len of platform certificate
+ * @amd_certs_address: physical address containing AMD certificate
+ * @amd_certs_len: len of AMD certificate
+ * @session_address: physical address containing Session data
+ * @session_len: len of session data
+ */
+struct sev_data_send_start {
+ u32 handle; /* In */
+ u32 policy; /* Out */
+ u64 pdh_cert_address; /* In */
+ u32 pdh_cert_len; /* In */
+ u32 reserved1;
+ u64 plat_cert_address; /* In */
+ u32 plat_cert_len; /* In */
+ u32 reserved2;
+ u64 amd_cert_address; /* In */
+ u32 amd_cert_len; /* In */
+ u32 reserved3;
+ u64 session_address; /* In */
+ u32 session_len; /* In/Out */
+} __packed;
+
+/**
+ * struct sev_data_send_update - SEND_UPDATE_DATA command
+ *
+ * @handle: handle of the VM to process
+ * @hdr_address: physical address containing packet header
+ * @hdr_len: len of packet header
+ * @guest_address: physical address of guest memory region to send
+ * @guest_len: len of guest memory region to send
+ * @trans_address: physical address of host memory region
+ * @trans_len: len of host memory region
+ */
+struct sev_data_send_update_data {
+ u32 handle; /* In */
+ u32 reserved1;
+ u64 hdr_address; /* In */
+ u32 hdr_len; /* In/Out */
+ u32 reserved2;
+ u64 guest_address; /* In */
+ u32 guest_len; /* In */
+ u32 reserved3;
+ u64 trans_address; /* In */
+ u32 trans_len; /* In */
+} __packed;
+
+/**
+ * struct sev_data_send_update - SEND_UPDATE_VMSA command
+ *
+ * @handle: handle of the VM to process
+ * @hdr_address: physical address containing packet header
+ * @hdr_len: len of packet header
+ * @guest_address: physical address of guest memory region to send
+ * @guest_len: len of guest memory region to send
+ * @trans_address: physical address of host memory region
+ * @trans_len: len of host memory region
+ */
+struct sev_data_send_update_vmsa {
+ u32 handle; /* In */
+ u64 hdr_address; /* In */
+ u32 hdr_len; /* In/Out */
+ u32 reserved2;
+ u64 guest_address; /* In */
+ u32 guest_len; /* In */
+ u32 reserved3;
+ u64 trans_address; /* In */
+ u32 trans_len; /* In */
+} __packed;
+
+/**
+ * struct sev_data_send_finish - SEND_FINISH command parameters
+ *
+ * @handle: handle of the VM to process
+ */
+struct sev_data_send_finish {
+ u32 handle; /* In */
+} __packed;
+
+/**
+ * struct sev_data_receive_start - RECEIVE_START command parameters
+ *
+ * @handle: handle of the VM to perform receive operation
+ * @pdh_cert_address: system physical address containing PDH certificate blob
+ * @pdh_cert_len: len of PDH certificate blob
+ * @session_address: system physical address containing session blob
+ * @session_len: len of session blob
+ */
+struct sev_data_receive_start {
+ u32 handle; /* In/Out */
+ u32 policy; /* In */
+ u64 pdh_cert_address; /* In */
+ u32 pdh_cert_len; /* In */
+ u32 reserved1;
+ u64 session_address; /* In */
+ u32 session_len; /* In */
+} __packed;
+
+/**
+ * struct sev_data_receive_update_data - RECEIVE_UPDATE_DATA command parameters
+ *
+ * @handle: handle of the VM to update
+ * @hdr_address: physical address containing packet header blob
+ * @hdr_len: len of packet header
+ * @guest_address: system physical address of guest memory region
+ * @guest_len: len of guest memory region
+ * @trans_address: system physical address of transport buffer
+ * @trans_len: len of transport buffer
+ */
+struct sev_data_receive_update_data {
+ u32 handle; /* In */
+ u32 reserved1;
+ u64 hdr_address; /* In */
+ u32 hdr_len; /* In */
+ u32 reserved2;
+ u64 guest_address; /* In */
+ u32 guest_len; /* In */
+ u32 reserved3;
+ u64 trans_address; /* In */
+ u32 trans_len; /* In */
+} __packed;
+
+/**
+ * struct sev_data_receive_update_vmsa - RECEIVE_UPDATE_VMSA command parameters
+ *
+ * @handle: handle of the VM to update
+ * @hdr_address: physical address containing packet header blob
+ * @hdr_len: len of packet header
+ * @guest_address: system physical address of guest memory region
+ * @guest_len: len of guest memory region
+ * @trans_address: system physical address of transport buffer
+ * @trans_len: len of transport buffer
+ */
+struct sev_data_receive_update_vmsa {
+ u32 handle; /* In */
+ u32 reserved1;
+ u64 hdr_address; /* In */
+ u32 hdr_len; /* In */
+ u32 reserved2;
+ u64 guest_address; /* In */
+ u32 guest_len; /* In */
+ u32 reserved3;
+ u64 trans_address; /* In */
+ u32 trans_len; /* In */
+} __packed;
+
+/**
+ * struct sev_data_receive_finish - RECEIVE_FINISH command parameters
+ *
+ * @handle: handle of the VM to finish
+ */
+struct sev_data_receive_finish {
+ u32 handle; /* In */
+} __packed;
+
+/**
+ * struct sev_data_dbg - DBG_ENCRYPT/DBG_DECRYPT command parameters
+ *
+ * @handle: handle of the VM to perform debug operation
+ * @src_addr: source address of data to operate on
+ * @dst_addr: destination address of data to operate on
+ * @len: len of data to operate on
+ */
+struct sev_data_dbg {
+ u32 handle; /* In */
+ u32 reserved;
+ u64 src_addr; /* In */
+ u64 dst_addr; /* In */
+ u32 len; /* In */
+} __packed;
+
+#ifdef CONFIG_CRYPTO_DEV_SP_PSP
+
+/**
+ * sev_platform_init - perform SEV INIT command
+ *
+ * @error: SEV command return code
+ *
+ * Returns:
+ * 0 if the SEV successfully processed the command
+ * -%ENODEV if the SEV device is not available
+ * -%ENOTSUPP if the SEV does not support SEV
+ * -%ETIMEDOUT if the SEV command timed out
+ * -%EIO if the SEV returned a non-zero return code
+ */
+int sev_platform_init(int *error);
+
+/**
+ * sev_platform_status - perform SEV PLATFORM_STATUS command
+ *
+ * @status: sev_user_data_status structure to be processed
+ * @error: SEV command return code
+ *
+ * Returns:
+ * 0 if the SEV successfully processed the command
+ * -%ENODEV if the SEV device is not available
+ * -%ENOTSUPP if the SEV does not support SEV
+ * -%ETIMEDOUT if the SEV command timed out
+ * -%EIO if the SEV returned a non-zero return code
+ */
+int sev_platform_status(struct sev_user_data_status *status, int *error);
+
+/**
+ * sev_issue_cmd_external_user - issue SEV command by other driver with a file
+ * handle.
+ *
+ * This function can be used by other drivers to issue a SEV command on
+ * behalf of userspace. The caller must pass a valid SEV file descriptor
+ * so that we know that it has access to SEV device.
+ *
+ * @filep - SEV device file pointer
+ * @cmd - command to issue
+ * @data - command buffer
+ * @error: SEV command return code
+ *
+ * Returns:
+ * 0 if the SEV successfully processed the command
+ * -%ENODEV if the SEV device is not available
+ * -%ENOTSUPP if the SEV does not support SEV
+ * -%ETIMEDOUT if the SEV command timed out
+ * -%EIO if the SEV returned a non-zero return code
+ * -%EINVAL if the SEV file descriptor is not valid
+ */
+int sev_issue_cmd_external_user(struct file *filep, unsigned int id,
+ void *data, int *error);
+
+/**
+ * sev_guest_deactivate - perform SEV DEACTIVATE command
+ *
+ * @deactivate: sev_data_deactivate structure to be processed
+ * @sev_ret: sev command return code
+ *
+ * Returns:
+ * 0 if the sev successfully processed the command
+ * -%ENODEV if the sev device is not available
+ * -%ENOTSUPP if the sev does not support SEV
+ * -%ETIMEDOUT if the sev command timed out
+ * -%EIO if the sev returned a non-zero return code
+ */
+int sev_guest_deactivate(struct sev_data_deactivate *data, int *error);
+
+/**
+ * sev_guest_activate - perform SEV ACTIVATE command
+ *
+ * @activate: sev_data_activate structure to be processed
+ * @sev_ret: sev command return code
+ *
+ * Returns:
+ * 0 if the sev successfully processed the command
+ * -%ENODEV if the sev device is not available
+ * -%ENOTSUPP if the sev does not support SEV
+ * -%ETIMEDOUT if the sev command timed out
+ * -%EIO if the sev returned a non-zero return code
+ */
+int sev_guest_activate(struct sev_data_activate *data, int *error);
+
+/**
+ * sev_guest_df_flush - perform SEV DF_FLUSH command
+ *
+ * @sev_ret: sev command return code
+ *
+ * Returns:
+ * 0 if the sev successfully processed the command
+ * -%ENODEV if the sev device is not available
+ * -%ENOTSUPP if the sev does not support SEV
+ * -%ETIMEDOUT if the sev command timed out
+ * -%EIO if the sev returned a non-zero return code
+ */
+int sev_guest_df_flush(int *error);
+
+/**
+ * sev_guest_decommission - perform SEV DECOMMISSION command
+ *
+ * @decommission: sev_data_decommission structure to be processed
+ * @sev_ret: sev command return code
+ *
+ * Returns:
+ * 0 if the sev successfully processed the command
+ * -%ENODEV if the sev device is not available
+ * -%ENOTSUPP if the sev does not support SEV
+ * -%ETIMEDOUT if the sev command timed out
+ * -%EIO if the sev returned a non-zero return code
+ */
+int sev_guest_decommission(struct sev_data_decommission *data, int *error);
+
+void *psp_copy_user_blob(u64 __user uaddr, u32 len);
+
+#else /* !CONFIG_CRYPTO_DEV_SP_PSP */
+
+static inline int
+sev_platform_status(struct sev_user_data_status *status, int *error) { return -ENODEV; }
+
+static inline int sev_platform_init(int *error) { return -ENODEV; }
+
+static inline int
+sev_guest_deactivate(struct sev_data_deactivate *data, int *error) { return -ENODEV; }
+
+static inline int
+sev_guest_decommission(struct sev_data_decommission *data, int *error) { return -ENODEV; }
+
+static inline int
+sev_guest_activate(struct sev_data_activate *data, int *error) { return -ENODEV; }
+
+static inline int sev_guest_df_flush(int *error) { return -ENODEV; }
+
+static inline int
+sev_issue_cmd_external_user(struct file *filep, unsigned int id, void *data, int *error) { return -ENODEV; }
+
+static inline void *psp_copy_user_blob(u64 __user uaddr, u32 len) { return ERR_PTR(-EINVAL); }
+
+#endif /* CONFIG_CRYPTO_DEV_SP_PSP */
+
+#endif /* __PSP_SEV_H__ */
diff --git a/include/linux/pstore_ram.h b/include/linux/pstore_ram.h
index 9395f06e8372..e6d226464838 100644
--- a/include/linux/pstore_ram.h
+++ b/include/linux/pstore_ram.h
@@ -39,6 +39,7 @@ struct persistent_ram_ecc_info {
int ecc_size;
int symsize;
int poly;
+ uint16_t *par;
};
struct persistent_ram_zone {
diff --git a/include/linux/ptp_classify.h b/include/linux/ptp_classify.h
index a079656b614c..059242030631 100644
--- a/include/linux/ptp_classify.h
+++ b/include/linux/ptp_classify.h
@@ -75,5 +75,9 @@ void __init ptp_classifier_init(void);
static inline void ptp_classifier_init(void)
{
}
+static inline unsigned int ptp_classify_raw(struct sk_buff *skb)
+{
+ return PTP_CLASS_NONE;
+}
#endif
#endif /* _PTP_CLASSIFY_H_ */
diff --git a/include/linux/ptr_ring.h b/include/linux/ptr_ring.h
index d72b2e7dd500..6894976b54e3 100644
--- a/include/linux/ptr_ring.h
+++ b/include/linux/ptr_ring.h
@@ -45,9 +45,10 @@ struct ptr_ring {
};
/* Note: callers invoking this in a loop must use a compiler barrier,
- * for example cpu_relax(). If ring is ever resized, callers must hold
- * producer_lock - see e.g. ptr_ring_full. Otherwise, if callers don't hold
- * producer_lock, the next call to __ptr_ring_produce may fail.
+ * for example cpu_relax().
+ *
+ * NB: this is unlike __ptr_ring_empty in that callers must hold producer_lock:
+ * see e.g. ptr_ring_full.
*/
static inline bool __ptr_ring_full(struct ptr_ring *r)
{
@@ -113,7 +114,7 @@ static inline int __ptr_ring_produce(struct ptr_ring *r, void *ptr)
/* Pairs with smp_read_barrier_depends in __ptr_ring_consume. */
smp_wmb();
- r->queue[r->producer++] = ptr;
+ WRITE_ONCE(r->queue[r->producer++], ptr);
if (unlikely(r->producer >= r->size))
r->producer = 0;
return 0;
@@ -169,32 +170,36 @@ static inline int ptr_ring_produce_bh(struct ptr_ring *r, void *ptr)
return ret;
}
-/* Note: callers invoking this in a loop must use a compiler barrier,
- * for example cpu_relax(). Callers must take consumer_lock
- * if they dereference the pointer - see e.g. PTR_RING_PEEK_CALL.
- * If ring is never resized, and if the pointer is merely
- * tested, there's no need to take the lock - see e.g. __ptr_ring_empty.
- * However, if called outside the lock, and if some other CPU
- * consumes ring entries at the same time, the value returned
- * is not guaranteed to be correct.
- * In this case - to avoid incorrectly detecting the ring
- * as empty - the CPU consuming the ring entries is responsible
- * for either consuming all ring entries until the ring is empty,
- * or synchronizing with some other CPU and causing it to
- * execute __ptr_ring_peek and/or consume the ring enteries
- * after the synchronization point.
- */
static inline void *__ptr_ring_peek(struct ptr_ring *r)
{
if (likely(r->size))
- return r->queue[r->consumer_head];
+ return READ_ONCE(r->queue[r->consumer_head]);
return NULL;
}
-/* See __ptr_ring_peek above for locking rules. */
+/*
+ * Test ring empty status without taking any locks.
+ *
+ * NB: This is only safe to call if ring is never resized.
+ *
+ * However, if some other CPU consumes ring entries at the same time, the value
+ * returned is not guaranteed to be correct.
+ *
+ * In this case - to avoid incorrectly detecting the ring
+ * as empty - the CPU consuming the ring entries is responsible
+ * for either consuming all ring entries until the ring is empty,
+ * or synchronizing with some other CPU and causing it to
+ * re-test __ptr_ring_empty and/or consume the ring enteries
+ * after the synchronization point.
+ *
+ * Note: callers invoking this in a loop must use a compiler barrier,
+ * for example cpu_relax().
+ */
static inline bool __ptr_ring_empty(struct ptr_ring *r)
{
- return !__ptr_ring_peek(r);
+ if (likely(r->size))
+ return !r->queue[READ_ONCE(r->consumer_head)];
+ return true;
}
static inline bool ptr_ring_empty(struct ptr_ring *r)
@@ -248,22 +253,28 @@ static inline void __ptr_ring_discard_one(struct ptr_ring *r)
/* Fundamentally, what we want to do is update consumer
* index and zero out the entry so producer can reuse it.
* Doing it naively at each consume would be as simple as:
- * r->queue[r->consumer++] = NULL;
- * if (unlikely(r->consumer >= r->size))
- * r->consumer = 0;
+ * consumer = r->consumer;
+ * r->queue[consumer++] = NULL;
+ * if (unlikely(consumer >= r->size))
+ * consumer = 0;
+ * r->consumer = consumer;
* but that is suboptimal when the ring is full as producer is writing
* out new entries in the same cache line. Defer these updates until a
* batch of entries has been consumed.
*/
- int head = r->consumer_head++;
+ /* Note: we must keep consumer_head valid at all times for __ptr_ring_empty
+ * to work correctly.
+ */
+ int consumer_head = r->consumer_head;
+ int head = consumer_head++;
/* Once we have processed enough entries invalidate them in
* the ring all at once so producer can reuse their space in the ring.
* We also do this when we reach end of the ring - not mandatory
* but helps keep the implementation simple.
*/
- if (unlikely(r->consumer_head - r->consumer_tail >= r->batch ||
- r->consumer_head >= r->size)) {
+ if (unlikely(consumer_head - r->consumer_tail >= r->batch ||
+ consumer_head >= r->size)) {
/* Zero out entries in the reverse order: this way we touch the
* cache line that producer might currently be reading the last;
* producer won't make progress and touch other cache lines
@@ -271,25 +282,28 @@ static inline void __ptr_ring_discard_one(struct ptr_ring *r)
*/
while (likely(head >= r->consumer_tail))
r->queue[head--] = NULL;
- r->consumer_tail = r->consumer_head;
+ r->consumer_tail = consumer_head;
}
- if (unlikely(r->consumer_head >= r->size)) {
- r->consumer_head = 0;
+ if (unlikely(consumer_head >= r->size)) {
+ consumer_head = 0;
r->consumer_tail = 0;
}
+ /* matching READ_ONCE in __ptr_ring_empty for lockless tests */
+ WRITE_ONCE(r->consumer_head, consumer_head);
}
static inline void *__ptr_ring_consume(struct ptr_ring *r)
{
void *ptr;
+ /* The READ_ONCE in __ptr_ring_peek guarantees that anyone
+ * accessing data through the pointer is up to date. Pairs
+ * with smp_wmb in __ptr_ring_produce.
+ */
ptr = __ptr_ring_peek(r);
if (ptr)
__ptr_ring_discard_one(r);
- /* Make sure anyone accessing data through the pointer is up to date. */
- /* Pairs with smp_wmb in __ptr_ring_produce. */
- smp_read_barrier_depends();
return ptr;
}
@@ -451,9 +465,14 @@ static inline int ptr_ring_consume_batched_bh(struct ptr_ring *r,
__PTR_RING_PEEK_CALL_v; \
})
+/* Not all gfp_t flags (besides GFP_KERNEL) are allowed. See
+ * documentation for vmalloc for which of them are legal.
+ */
static inline void **__ptr_ring_init_queue_alloc(unsigned int size, gfp_t gfp)
{
- return kcalloc(size, sizeof(void *), gfp);
+ if (size > KMALLOC_MAX_SIZE / sizeof(void *))
+ return NULL;
+ return kvmalloc_array(size, sizeof(void *), gfp | __GFP_ZERO);
}
static inline void __ptr_ring_set_size(struct ptr_ring *r, int size)
@@ -527,7 +546,9 @@ static inline void ptr_ring_unconsume(struct ptr_ring *r, void **batch, int n,
goto done;
}
r->queue[head] = batch[--n];
- r->consumer_tail = r->consumer_head = head;
+ r->consumer_tail = head;
+ /* matching READ_ONCE in __ptr_ring_empty for lockless tests */
+ WRITE_ONCE(r->consumer_head, head);
}
done:
@@ -586,7 +607,7 @@ static inline int ptr_ring_resize(struct ptr_ring *r, int size, gfp_t gfp,
spin_unlock(&(r)->producer_lock);
spin_unlock_irqrestore(&(r)->consumer_lock, flags);
- kfree(old);
+ kvfree(old);
return 0;
}
@@ -626,7 +647,7 @@ static inline int ptr_ring_resize_multiple(struct ptr_ring **rings,
}
for (i = 0; i < nrings; ++i)
- kfree(queues[i]);
+ kvfree(queues[i]);
kfree(queues);
@@ -634,7 +655,7 @@ static inline int ptr_ring_resize_multiple(struct ptr_ring **rings,
nomem:
while (--i >= 0)
- kfree(queues[i]);
+ kvfree(queues[i]);
kfree(queues);
@@ -649,7 +670,7 @@ static inline void ptr_ring_cleanup(struct ptr_ring *r, void (*destroy)(void *))
if (destroy)
while ((ptr = ptr_ring_consume(r)))
destroy(ptr);
- kfree(r->queue);
+ kvfree(r->queue);
}
#endif /* _LINUX_PTR_RING_H */
diff --git a/include/linux/qcom_scm.h b/include/linux/qcom_scm.h
index 1fd27d68926b..b401b962afff 100644
--- a/include/linux/qcom_scm.h
+++ b/include/linux/qcom_scm.h
@@ -13,6 +13,9 @@
#ifndef __QCOM_SCM_H
#define __QCOM_SCM_H
+#include <linux/types.h>
+#include <linux/cpumask.h>
+
#define QCOM_SCM_VERSION(major, minor) (((major) << 16) | ((minor) & 0xFF))
#define QCOM_SCM_CPU_PWR_DOWN_L2_ON 0x0
#define QCOM_SCM_CPU_PWR_DOWN_L2_OFF 0x1
diff --git a/include/linux/qed/common_hsi.h b/include/linux/qed/common_hsi.h
index 39e2a2ac2471..13c8ab171437 100644
--- a/include/linux/qed/common_hsi.h
+++ b/include/linux/qed/common_hsi.h
@@ -32,14 +32,15 @@
#ifndef _COMMON_HSI_H
#define _COMMON_HSI_H
+
#include <linux/types.h>
#include <asm/byteorder.h>
#include <linux/bitops.h>
#include <linux/slab.h>
/* dma_addr_t manip */
-#define PTR_LO(x) ((u32)(((uintptr_t)(x)) & 0xffffffff))
-#define PTR_HI(x) ((u32)((((uintptr_t)(x)) >> 16) >> 16))
+#define PTR_LO(x) ((u32)(((uintptr_t)(x)) & 0xffffffff))
+#define PTR_HI(x) ((u32)((((uintptr_t)(x)) >> 16) >> 16))
#define DMA_LO_LE(x) cpu_to_le32(lower_32_bits(x))
#define DMA_HI_LE(x) cpu_to_le32(upper_32_bits(x))
#define DMA_REGPAIR_LE(x, val) do { \
@@ -47,39 +48,45 @@
(x).lo = DMA_LO_LE((val)); \
} while (0)
-#define HILO_GEN(hi, lo, type) ((((type)(hi)) << 32) + (lo))
-#define HILO_64(hi, lo) HILO_GEN((le32_to_cpu(hi)), (le32_to_cpu(lo)), u64)
-#define HILO_64_REGPAIR(regpair) (HILO_64(regpair.hi, regpair.lo))
+#define HILO_GEN(hi, lo, type) ((((type)(hi)) << 32) + (lo))
+#define HILO_64(hi, lo) \
+ HILO_GEN(le32_to_cpu(hi), le32_to_cpu(lo), u64)
+#define HILO_64_REGPAIR(regpair) ({ \
+ typeof(regpair) __regpair = (regpair); \
+ HILO_64(__regpair.hi, __regpair.lo); })
#define HILO_DMA_REGPAIR(regpair) ((dma_addr_t)HILO_64_REGPAIR(regpair))
#ifndef __COMMON_HSI__
#define __COMMON_HSI__
+/********************************/
+/* PROTOCOL COMMON FW CONSTANTS */
+/********************************/
-#define X_FINAL_CLEANUP_AGG_INT 1
+#define X_FINAL_CLEANUP_AGG_INT 1
-#define EVENT_RING_PAGE_SIZE_BYTES 4096
+#define EVENT_RING_PAGE_SIZE_BYTES 4096
-#define NUM_OF_GLOBAL_QUEUES 128
-#define COMMON_QUEUE_ENTRY_MAX_BYTE_SIZE 64
+#define NUM_OF_GLOBAL_QUEUES 128
+#define COMMON_QUEUE_ENTRY_MAX_BYTE_SIZE 64
-#define ISCSI_CDU_TASK_SEG_TYPE 0
-#define FCOE_CDU_TASK_SEG_TYPE 0
-#define RDMA_CDU_TASK_SEG_TYPE 1
+#define ISCSI_CDU_TASK_SEG_TYPE 0
+#define FCOE_CDU_TASK_SEG_TYPE 0
+#define RDMA_CDU_TASK_SEG_TYPE 1
-#define FW_ASSERT_GENERAL_ATTN_IDX 32
+#define FW_ASSERT_GENERAL_ATTN_IDX 32
-#define MAX_PINNED_CCFC 32
+#define MAX_PINNED_CCFC 32
/* Queue Zone sizes in bytes */
-#define TSTORM_QZONE_SIZE 8
-#define MSTORM_QZONE_SIZE 16
-#define USTORM_QZONE_SIZE 8
-#define XSTORM_QZONE_SIZE 8
-#define YSTORM_QZONE_SIZE 0
-#define PSTORM_QZONE_SIZE 0
-
-#define MSTORM_VF_ZONE_DEFAULT_SIZE_LOG 7
+#define TSTORM_QZONE_SIZE 8
+#define MSTORM_QZONE_SIZE 16
+#define USTORM_QZONE_SIZE 8
+#define XSTORM_QZONE_SIZE 8
+#define YSTORM_QZONE_SIZE 0
+#define PSTORM_QZONE_SIZE 0
+
+#define MSTORM_VF_ZONE_DEFAULT_SIZE_LOG 7
#define ETH_MAX_NUM_RX_QUEUES_PER_VF_DEFAULT 16
#define ETH_MAX_NUM_RX_QUEUES_PER_VF_DOUBLE 48
#define ETH_MAX_NUM_RX_QUEUES_PER_VF_QUAD 112
@@ -102,8 +109,8 @@
#define MAX_NUM_LL2_TX_STATS_COUNTERS 48
#define FW_MAJOR_VERSION 8
-#define FW_MINOR_VERSION 20
-#define FW_REVISION_VERSION 0
+#define FW_MINOR_VERSION 33
+#define FW_REVISION_VERSION 11
#define FW_ENGINEERING_VERSION 0
/***********************/
@@ -115,10 +122,10 @@
#define MAX_NUM_PORTS_BB (2)
#define MAX_NUM_PORTS (MAX_NUM_PORTS_K2)
-#define MAX_NUM_PFS_K2 (16)
-#define MAX_NUM_PFS_BB (8)
-#define MAX_NUM_PFS (MAX_NUM_PFS_K2)
-#define MAX_NUM_OF_PFS_IN_CHIP (16) /* On both engines */
+#define MAX_NUM_PFS_K2 (16)
+#define MAX_NUM_PFS_BB (8)
+#define MAX_NUM_PFS (MAX_NUM_PFS_K2)
+#define MAX_NUM_OF_PFS_IN_CHIP (16) /* On both engines */
#define MAX_NUM_VFS_K2 (192)
#define MAX_NUM_VFS_BB (120)
@@ -141,29 +148,14 @@
/* Traffic classes in network-facing blocks (PBF, BTB, NIG, BRB, PRS and QM) */
#define NUM_PHYS_TCS_4PORT_K2 (4)
#define NUM_OF_PHYS_TCS (8)
-
+#define PURE_LB_TC NUM_OF_PHYS_TCS
#define NUM_TCS_4PORT_K2 (NUM_PHYS_TCS_4PORT_K2 + 1)
#define NUM_OF_TCS (NUM_OF_PHYS_TCS + 1)
-#define LB_TC (NUM_OF_PHYS_TCS)
-
-/* Num of possible traffic priority values */
-#define NUM_OF_PRIO (8)
-
-#define MAX_NUM_VOQS_K2 (NUM_TCS_4PORT_K2 * MAX_NUM_PORTS_K2)
-#define MAX_NUM_VOQS_BB (NUM_OF_TCS * MAX_NUM_PORTS_BB)
-#define MAX_NUM_VOQS (MAX_NUM_VOQS_K2)
-#define MAX_PHYS_VOQS (NUM_OF_PHYS_TCS * MAX_NUM_PORTS_BB)
-
/* CIDs */
-#define NUM_OF_CONNECTION_TYPES (8)
-#define NUM_OF_LCIDS (320)
-#define NUM_OF_LTIDS (320)
-
-/* Clock values */
-#define MASTER_CLK_FREQ_E4 (375e6)
-#define STORM_CLK_FREQ_E4 (1000e6)
-#define CLK25M_CLK_FREQ_E4 (25e6)
+#define NUM_OF_CONNECTION_TYPES_E4 (8)
+#define NUM_OF_LCIDS (320)
+#define NUM_OF_LTIDS (320)
/* Global PXP windows (GTT) */
#define NUM_OF_GTT 19
@@ -172,17 +164,17 @@
#define GTT_DWORD_SIZE BIT(GTT_DWORD_SIZE_BITS)
/* Tools Version */
-#define TOOLS_VERSION 10
+#define TOOLS_VERSION 10
/*****************/
/* CDU CONSTANTS */
/*****************/
-#define CDU_SEG_TYPE_OFFSET_REG_TYPE_SHIFT (17)
-#define CDU_SEG_TYPE_OFFSET_REG_OFFSET_MASK (0x1ffff)
+#define CDU_SEG_TYPE_OFFSET_REG_TYPE_SHIFT (17)
+#define CDU_SEG_TYPE_OFFSET_REG_OFFSET_MASK (0x1ffff)
-#define CDU_VF_FL_SEG_TYPE_OFFSET_REG_TYPE_SHIFT (12)
-#define CDU_VF_FL_SEG_TYPE_OFFSET_REG_OFFSET_MASK (0xfff)
+#define CDU_VF_FL_SEG_TYPE_OFFSET_REG_TYPE_SHIFT (12)
+#define CDU_VF_FL_SEG_TYPE_OFFSET_REG_OFFSET_MASK (0xfff)
#define CDU_CONTEXT_VALIDATION_CFG_ENABLE_SHIFT (0)
#define CDU_CONTEXT_VALIDATION_CFG_VALIDATION_TYPE_SHIFT (1)
@@ -201,45 +193,45 @@
#define DQ_DEMS_TOE_LOCAL_ADV_WND 4
#define DQ_DEMS_ROCE_CQ_CONS 7
-/* XCM agg val selection */
-#define DQ_XCM_AGG_VAL_SEL_WORD2 0
-#define DQ_XCM_AGG_VAL_SEL_WORD3 1
-#define DQ_XCM_AGG_VAL_SEL_WORD4 2
-#define DQ_XCM_AGG_VAL_SEL_WORD5 3
-#define DQ_XCM_AGG_VAL_SEL_REG3 4
-#define DQ_XCM_AGG_VAL_SEL_REG4 5
-#define DQ_XCM_AGG_VAL_SEL_REG5 6
-#define DQ_XCM_AGG_VAL_SEL_REG6 7
-
-/* XCM agg val selection */
-#define DQ_XCM_CORE_TX_BD_CONS_CMD DQ_XCM_AGG_VAL_SEL_WORD3
-#define DQ_XCM_CORE_TX_BD_PROD_CMD DQ_XCM_AGG_VAL_SEL_WORD4
-#define DQ_XCM_CORE_SPQ_PROD_CMD DQ_XCM_AGG_VAL_SEL_WORD4
-#define DQ_XCM_ETH_EDPM_NUM_BDS_CMD DQ_XCM_AGG_VAL_SEL_WORD2
-#define DQ_XCM_ETH_TX_BD_CONS_CMD DQ_XCM_AGG_VAL_SEL_WORD3
-#define DQ_XCM_ETH_TX_BD_PROD_CMD DQ_XCM_AGG_VAL_SEL_WORD4
-#define DQ_XCM_ETH_GO_TO_BD_CONS_CMD DQ_XCM_AGG_VAL_SEL_WORD5
-#define DQ_XCM_FCOE_SQ_CONS_CMD DQ_XCM_AGG_VAL_SEL_WORD3
-#define DQ_XCM_FCOE_SQ_PROD_CMD DQ_XCM_AGG_VAL_SEL_WORD4
-#define DQ_XCM_FCOE_X_FERQ_PROD_CMD DQ_XCM_AGG_VAL_SEL_WORD5
-#define DQ_XCM_ISCSI_SQ_CONS_CMD DQ_XCM_AGG_VAL_SEL_WORD3
-#define DQ_XCM_ISCSI_SQ_PROD_CMD DQ_XCM_AGG_VAL_SEL_WORD4
-#define DQ_XCM_ISCSI_MORE_TO_SEND_SEQ_CMD DQ_XCM_AGG_VAL_SEL_REG3
-#define DQ_XCM_ISCSI_EXP_STAT_SN_CMD DQ_XCM_AGG_VAL_SEL_REG6
-#define DQ_XCM_ROCE_SQ_PROD_CMD DQ_XCM_AGG_VAL_SEL_WORD4
-#define DQ_XCM_TOE_TX_BD_PROD_CMD DQ_XCM_AGG_VAL_SEL_WORD4
-#define DQ_XCM_TOE_MORE_TO_SEND_SEQ_CMD DQ_XCM_AGG_VAL_SEL_REG3
-#define DQ_XCM_TOE_LOCAL_ADV_WND_SEQ_CMD DQ_XCM_AGG_VAL_SEL_REG4
+/* XCM agg val selection (HW) */
+#define DQ_XCM_AGG_VAL_SEL_WORD2 0
+#define DQ_XCM_AGG_VAL_SEL_WORD3 1
+#define DQ_XCM_AGG_VAL_SEL_WORD4 2
+#define DQ_XCM_AGG_VAL_SEL_WORD5 3
+#define DQ_XCM_AGG_VAL_SEL_REG3 4
+#define DQ_XCM_AGG_VAL_SEL_REG4 5
+#define DQ_XCM_AGG_VAL_SEL_REG5 6
+#define DQ_XCM_AGG_VAL_SEL_REG6 7
+
+/* XCM agg val selection (FW) */
+#define DQ_XCM_CORE_TX_BD_CONS_CMD DQ_XCM_AGG_VAL_SEL_WORD3
+#define DQ_XCM_CORE_TX_BD_PROD_CMD DQ_XCM_AGG_VAL_SEL_WORD4
+#define DQ_XCM_CORE_SPQ_PROD_CMD DQ_XCM_AGG_VAL_SEL_WORD4
+#define DQ_XCM_ETH_EDPM_NUM_BDS_CMD DQ_XCM_AGG_VAL_SEL_WORD2
+#define DQ_XCM_ETH_TX_BD_CONS_CMD DQ_XCM_AGG_VAL_SEL_WORD3
+#define DQ_XCM_ETH_TX_BD_PROD_CMD DQ_XCM_AGG_VAL_SEL_WORD4
+#define DQ_XCM_ETH_GO_TO_BD_CONS_CMD DQ_XCM_AGG_VAL_SEL_WORD5
+#define DQ_XCM_FCOE_SQ_CONS_CMD DQ_XCM_AGG_VAL_SEL_WORD3
+#define DQ_XCM_FCOE_SQ_PROD_CMD DQ_XCM_AGG_VAL_SEL_WORD4
+#define DQ_XCM_FCOE_X_FERQ_PROD_CMD DQ_XCM_AGG_VAL_SEL_WORD5
+#define DQ_XCM_ISCSI_SQ_CONS_CMD DQ_XCM_AGG_VAL_SEL_WORD3
+#define DQ_XCM_ISCSI_SQ_PROD_CMD DQ_XCM_AGG_VAL_SEL_WORD4
+#define DQ_XCM_ISCSI_MORE_TO_SEND_SEQ_CMD DQ_XCM_AGG_VAL_SEL_REG3
+#define DQ_XCM_ISCSI_EXP_STAT_SN_CMD DQ_XCM_AGG_VAL_SEL_REG6
+#define DQ_XCM_ROCE_SQ_PROD_CMD DQ_XCM_AGG_VAL_SEL_WORD4
+#define DQ_XCM_TOE_TX_BD_PROD_CMD DQ_XCM_AGG_VAL_SEL_WORD4
+#define DQ_XCM_TOE_MORE_TO_SEND_SEQ_CMD DQ_XCM_AGG_VAL_SEL_REG3
+#define DQ_XCM_TOE_LOCAL_ADV_WND_SEQ_CMD DQ_XCM_AGG_VAL_SEL_REG4
/* UCM agg val selection (HW) */
#define DQ_UCM_AGG_VAL_SEL_WORD0 0
#define DQ_UCM_AGG_VAL_SEL_WORD1 1
#define DQ_UCM_AGG_VAL_SEL_WORD2 2
#define DQ_UCM_AGG_VAL_SEL_WORD3 3
-#define DQ_UCM_AGG_VAL_SEL_REG0 4
-#define DQ_UCM_AGG_VAL_SEL_REG1 5
-#define DQ_UCM_AGG_VAL_SEL_REG2 6
-#define DQ_UCM_AGG_VAL_SEL_REG3 7
+#define DQ_UCM_AGG_VAL_SEL_REG0 4
+#define DQ_UCM_AGG_VAL_SEL_REG1 5
+#define DQ_UCM_AGG_VAL_SEL_REG2 6
+#define DQ_UCM_AGG_VAL_SEL_REG3 7
/* UCM agg val selection (FW) */
#define DQ_UCM_ETH_PMD_TX_CONS_CMD DQ_UCM_AGG_VAL_SEL_WORD2
@@ -263,7 +255,7 @@
#define DQ_TCM_ROCE_RQ_PROD_CMD \
DQ_TCM_AGG_VAL_SEL_WORD0
-/* XCM agg counter flag selection */
+/* XCM agg counter flag selection (HW) */
#define DQ_XCM_AGG_FLG_SHIFT_BIT14 0
#define DQ_XCM_AGG_FLG_SHIFT_BIT15 1
#define DQ_XCM_AGG_FLG_SHIFT_CF12 2
@@ -273,20 +265,20 @@
#define DQ_XCM_AGG_FLG_SHIFT_CF22 6
#define DQ_XCM_AGG_FLG_SHIFT_CF23 7
-/* XCM agg counter flag selection */
-#define DQ_XCM_CORE_DQ_CF_CMD BIT(DQ_XCM_AGG_FLG_SHIFT_CF18)
-#define DQ_XCM_CORE_TERMINATE_CMD BIT(DQ_XCM_AGG_FLG_SHIFT_CF19)
-#define DQ_XCM_CORE_SLOW_PATH_CMD BIT(DQ_XCM_AGG_FLG_SHIFT_CF22)
-#define DQ_XCM_ETH_DQ_CF_CMD BIT(DQ_XCM_AGG_FLG_SHIFT_CF18)
-#define DQ_XCM_ETH_TERMINATE_CMD BIT(DQ_XCM_AGG_FLG_SHIFT_CF19)
-#define DQ_XCM_ETH_SLOW_PATH_CMD BIT(DQ_XCM_AGG_FLG_SHIFT_CF22)
-#define DQ_XCM_ETH_TPH_EN_CMD BIT(DQ_XCM_AGG_FLG_SHIFT_CF23)
-#define DQ_XCM_FCOE_SLOW_PATH_CMD BIT(DQ_XCM_AGG_FLG_SHIFT_CF22)
-#define DQ_XCM_ISCSI_DQ_FLUSH_CMD BIT(DQ_XCM_AGG_FLG_SHIFT_CF19)
-#define DQ_XCM_ISCSI_SLOW_PATH_CMD BIT(DQ_XCM_AGG_FLG_SHIFT_CF22)
-#define DQ_XCM_ISCSI_PROC_ONLY_CLEANUP_CMD BIT(DQ_XCM_AGG_FLG_SHIFT_CF23)
-#define DQ_XCM_TOE_DQ_FLUSH_CMD BIT(DQ_XCM_AGG_FLG_SHIFT_CF19)
-#define DQ_XCM_TOE_SLOW_PATH_CMD BIT(DQ_XCM_AGG_FLG_SHIFT_CF22)
+/* XCM agg counter flag selection (FW) */
+#define DQ_XCM_CORE_DQ_CF_CMD BIT(DQ_XCM_AGG_FLG_SHIFT_CF18)
+#define DQ_XCM_CORE_TERMINATE_CMD BIT(DQ_XCM_AGG_FLG_SHIFT_CF19)
+#define DQ_XCM_CORE_SLOW_PATH_CMD BIT(DQ_XCM_AGG_FLG_SHIFT_CF22)
+#define DQ_XCM_ETH_DQ_CF_CMD BIT(DQ_XCM_AGG_FLG_SHIFT_CF18)
+#define DQ_XCM_ETH_TERMINATE_CMD BIT(DQ_XCM_AGG_FLG_SHIFT_CF19)
+#define DQ_XCM_ETH_SLOW_PATH_CMD BIT(DQ_XCM_AGG_FLG_SHIFT_CF22)
+#define DQ_XCM_ETH_TPH_EN_CMD BIT(DQ_XCM_AGG_FLG_SHIFT_CF23)
+#define DQ_XCM_FCOE_SLOW_PATH_CMD BIT(DQ_XCM_AGG_FLG_SHIFT_CF22)
+#define DQ_XCM_ISCSI_DQ_FLUSH_CMD BIT(DQ_XCM_AGG_FLG_SHIFT_CF19)
+#define DQ_XCM_ISCSI_SLOW_PATH_CMD BIT(DQ_XCM_AGG_FLG_SHIFT_CF22)
+#define DQ_XCM_ISCSI_PROC_ONLY_CLEANUP_CMD BIT(DQ_XCM_AGG_FLG_SHIFT_CF23)
+#define DQ_XCM_TOE_DQ_FLUSH_CMD BIT(DQ_XCM_AGG_FLG_SHIFT_CF19)
+#define DQ_XCM_TOE_SLOW_PATH_CMD BIT(DQ_XCM_AGG_FLG_SHIFT_CF22)
/* UCM agg counter flag selection (HW) */
#define DQ_UCM_AGG_FLG_SHIFT_CF0 0
@@ -317,9 +309,9 @@
#define DQ_TCM_AGG_FLG_SHIFT_CF6 6
#define DQ_TCM_AGG_FLG_SHIFT_CF7 7
/* TCM agg counter flag selection (FW) */
-#define DQ_TCM_FCOE_FLUSH_Q0_CMD BIT(DQ_TCM_AGG_FLG_SHIFT_CF1)
-#define DQ_TCM_FCOE_DUMMY_TIMER_CMD BIT(DQ_TCM_AGG_FLG_SHIFT_CF2)
-#define DQ_TCM_FCOE_TIMER_STOP_ALL_CMD BIT(DQ_TCM_AGG_FLG_SHIFT_CF3)
+#define DQ_TCM_FCOE_FLUSH_Q0_CMD BIT(DQ_TCM_AGG_FLG_SHIFT_CF1)
+#define DQ_TCM_FCOE_DUMMY_TIMER_CMD BIT(DQ_TCM_AGG_FLG_SHIFT_CF2)
+#define DQ_TCM_FCOE_TIMER_STOP_ALL_CMD BIT(DQ_TCM_AGG_FLG_SHIFT_CF3)
#define DQ_TCM_ISCSI_FLUSH_Q0_CMD BIT(DQ_TCM_AGG_FLG_SHIFT_CF1)
#define DQ_TCM_ISCSI_TIMER_STOP_ALL_CMD BIT(DQ_TCM_AGG_FLG_SHIFT_CF3)
#define DQ_TCM_TOE_FLUSH_Q0_CMD BIT(DQ_TCM_AGG_FLG_SHIFT_CF1)
@@ -327,18 +319,18 @@
#define DQ_TCM_IWARP_POST_RQ_CF_CMD BIT(DQ_TCM_AGG_FLG_SHIFT_CF1)
/* PWM address mapping */
-#define DQ_PWM_OFFSET_DPM_BASE 0x0
-#define DQ_PWM_OFFSET_DPM_END 0x27
+#define DQ_PWM_OFFSET_DPM_BASE 0x0
+#define DQ_PWM_OFFSET_DPM_END 0x27
#define DQ_PWM_OFFSET_XCM16_BASE 0x40
#define DQ_PWM_OFFSET_XCM32_BASE 0x44
#define DQ_PWM_OFFSET_UCM16_BASE 0x48
#define DQ_PWM_OFFSET_UCM32_BASE 0x4C
-#define DQ_PWM_OFFSET_UCM16_4 0x50
+#define DQ_PWM_OFFSET_UCM16_4 0x50
#define DQ_PWM_OFFSET_TCM16_BASE 0x58
#define DQ_PWM_OFFSET_TCM32_BASE 0x5C
-#define DQ_PWM_OFFSET_XCM_FLAGS 0x68
-#define DQ_PWM_OFFSET_UCM_FLAGS 0x69
-#define DQ_PWM_OFFSET_TCM_FLAGS 0x6B
+#define DQ_PWM_OFFSET_XCM_FLAGS 0x68
+#define DQ_PWM_OFFSET_UCM_FLAGS 0x69
+#define DQ_PWM_OFFSET_TCM_FLAGS 0x6B
#define DQ_PWM_OFFSET_XCM_RDMA_SQ_PROD (DQ_PWM_OFFSET_XCM16_BASE + 2)
#define DQ_PWM_OFFSET_UCM_RDMA_CQ_CONS_32BIT (DQ_PWM_OFFSET_UCM32_BASE)
@@ -347,10 +339,11 @@
#define DQ_PWM_OFFSET_UCM_RDMA_ARM_FLAGS (DQ_PWM_OFFSET_UCM_FLAGS)
#define DQ_PWM_OFFSET_TCM_ROCE_RQ_PROD (DQ_PWM_OFFSET_TCM16_BASE + 1)
#define DQ_PWM_OFFSET_TCM_IWARP_RQ_PROD (DQ_PWM_OFFSET_TCM16_BASE + 3)
-#define DQ_REGION_SHIFT (12)
+
+#define DQ_REGION_SHIFT (12)
/* DPM */
-#define DQ_DPM_WQE_BUFF_SIZE (320)
+#define DQ_DPM_WQE_BUFF_SIZE (320)
/* Conn type ranges */
#define DQ_CONN_TYPE_RANGE_SHIFT (4)
@@ -359,29 +352,30 @@
/* QM CONSTANTS */
/*****************/
-/* number of TX queues in the QM */
+/* Number of TX queues in the QM */
#define MAX_QM_TX_QUEUES_K2 512
#define MAX_QM_TX_QUEUES_BB 448
#define MAX_QM_TX_QUEUES MAX_QM_TX_QUEUES_K2
-/* number of Other queues in the QM */
+/* Number of Other queues in the QM */
#define MAX_QM_OTHER_QUEUES_BB 64
#define MAX_QM_OTHER_QUEUES_K2 128
#define MAX_QM_OTHER_QUEUES MAX_QM_OTHER_QUEUES_K2
-/* number of queues in a PF queue group */
+/* Number of queues in a PF queue group */
#define QM_PF_QUEUE_GROUP_SIZE 8
-/* the size of a single queue element in bytes */
-#define QM_PQ_ELEMENT_SIZE 4
+/* The size of a single queue element in bytes */
+#define QM_PQ_ELEMENT_SIZE 4
-/* base number of Tx PQs in the CM PQ representation.
- * should be used when storing PQ IDs in CM PQ registers and context
+/* Base number of Tx PQs in the CM PQ representation.
+ * Should be used when storing PQ IDs in CM PQ registers and context.
*/
-#define CM_TX_PQ_BASE 0x200
+#define CM_TX_PQ_BASE 0x200
-/* number of global Vport/QCN rate limiters */
+/* Number of global Vport/QCN rate limiters */
#define MAX_QM_GLOBAL_RLS 256
+
/* QM registers data */
#define QM_LINE_CRD_REG_WIDTH 16
#define QM_LINE_CRD_REG_SIGN_BIT BIT((QM_LINE_CRD_REG_WIDTH - 1))
@@ -400,7 +394,7 @@
#define CAU_FSM_ETH_TX 1
/* Number of Protocol Indices per Status Block */
-#define PIS_PER_SB 12
+#define PIS_PER_SB_E4 12
#define CAU_HC_STOPPED_STATE 3
#define CAU_HC_DISABLE_STATE 4
@@ -432,8 +426,7 @@
#define IGU_CMD_INT_ACK_BASE 0x0400
#define IGU_CMD_INT_ACK_UPPER (IGU_CMD_INT_ACK_BASE + \
- MAX_TOT_SB_PER_PATH - \
- 1)
+ MAX_TOT_SB_PER_PATH - 1)
#define IGU_CMD_INT_ACK_RESERVED_UPPER 0x05ff
#define IGU_CMD_ATTN_BIT_UPD_UPPER 0x05f0
@@ -447,8 +440,7 @@
#define IGU_CMD_PROD_UPD_BASE 0x0600
#define IGU_CMD_PROD_UPD_UPPER (IGU_CMD_PROD_UPD_BASE +\
- MAX_TOT_SB_PER_PATH - \
- 1)
+ MAX_TOT_SB_PER_PATH - 1)
#define IGU_CMD_PROD_UPD_RESERVED_UPPER 0x07ff
/*****************/
@@ -514,129 +506,126 @@
PXP_EXTERNAL_BAR_GLOBAL_WINDOW_LENGTH - 1)
/* PF BAR */
-#define PXP_BAR0_START_GRC 0x0000
-#define PXP_BAR0_GRC_LENGTH 0x1C00000
-#define PXP_BAR0_END_GRC (PXP_BAR0_START_GRC + \
- PXP_BAR0_GRC_LENGTH - 1)
-
-#define PXP_BAR0_START_IGU 0x1C00000
-#define PXP_BAR0_IGU_LENGTH 0x10000
-#define PXP_BAR0_END_IGU (PXP_BAR0_START_IGU + \
- PXP_BAR0_IGU_LENGTH - 1)
-
-#define PXP_BAR0_START_TSDM 0x1C80000
-#define PXP_BAR0_SDM_LENGTH 0x40000
+#define PXP_BAR0_START_GRC 0x0000
+#define PXP_BAR0_GRC_LENGTH 0x1C00000
+#define PXP_BAR0_END_GRC (PXP_BAR0_START_GRC + \
+ PXP_BAR0_GRC_LENGTH - 1)
+
+#define PXP_BAR0_START_IGU 0x1C00000
+#define PXP_BAR0_IGU_LENGTH 0x10000
+#define PXP_BAR0_END_IGU (PXP_BAR0_START_IGU + \
+ PXP_BAR0_IGU_LENGTH - 1)
+
+#define PXP_BAR0_START_TSDM 0x1C80000
+#define PXP_BAR0_SDM_LENGTH 0x40000
#define PXP_BAR0_SDM_RESERVED_LENGTH 0x40000
-#define PXP_BAR0_END_TSDM (PXP_BAR0_START_TSDM + \
- PXP_BAR0_SDM_LENGTH - 1)
+#define PXP_BAR0_END_TSDM (PXP_BAR0_START_TSDM + \
+ PXP_BAR0_SDM_LENGTH - 1)
-#define PXP_BAR0_START_MSDM 0x1D00000
-#define PXP_BAR0_END_MSDM (PXP_BAR0_START_MSDM + \
- PXP_BAR0_SDM_LENGTH - 1)
+#define PXP_BAR0_START_MSDM 0x1D00000
+#define PXP_BAR0_END_MSDM (PXP_BAR0_START_MSDM + \
+ PXP_BAR0_SDM_LENGTH - 1)
-#define PXP_BAR0_START_USDM 0x1D80000
-#define PXP_BAR0_END_USDM (PXP_BAR0_START_USDM + \
- PXP_BAR0_SDM_LENGTH - 1)
+#define PXP_BAR0_START_USDM 0x1D80000
+#define PXP_BAR0_END_USDM (PXP_BAR0_START_USDM + \
+ PXP_BAR0_SDM_LENGTH - 1)
-#define PXP_BAR0_START_XSDM 0x1E00000
-#define PXP_BAR0_END_XSDM (PXP_BAR0_START_XSDM + \
- PXP_BAR0_SDM_LENGTH - 1)
+#define PXP_BAR0_START_XSDM 0x1E00000
+#define PXP_BAR0_END_XSDM (PXP_BAR0_START_XSDM + \
+ PXP_BAR0_SDM_LENGTH - 1)
-#define PXP_BAR0_START_YSDM 0x1E80000
-#define PXP_BAR0_END_YSDM (PXP_BAR0_START_YSDM + \
- PXP_BAR0_SDM_LENGTH - 1)
+#define PXP_BAR0_START_YSDM 0x1E80000
+#define PXP_BAR0_END_YSDM (PXP_BAR0_START_YSDM + \
+ PXP_BAR0_SDM_LENGTH - 1)
-#define PXP_BAR0_START_PSDM 0x1F00000
-#define PXP_BAR0_END_PSDM (PXP_BAR0_START_PSDM + \
- PXP_BAR0_SDM_LENGTH - 1)
+#define PXP_BAR0_START_PSDM 0x1F00000
+#define PXP_BAR0_END_PSDM (PXP_BAR0_START_PSDM + \
+ PXP_BAR0_SDM_LENGTH - 1)
#define PXP_BAR0_FIRST_INVALID_ADDRESS (PXP_BAR0_END_PSDM + 1)
/* VF BAR */
-#define PXP_VF_BAR0 0
-
-#define PXP_VF_BAR0_START_GRC 0x3E00
-#define PXP_VF_BAR0_GRC_LENGTH 0x200
-#define PXP_VF_BAR0_END_GRC (PXP_VF_BAR0_START_GRC + \
- PXP_VF_BAR0_GRC_LENGTH - 1)
-
-#define PXP_VF_BAR0_START_IGU 0
-#define PXP_VF_BAR0_IGU_LENGTH 0x3000
-#define PXP_VF_BAR0_END_IGU (PXP_VF_BAR0_START_IGU + \
- PXP_VF_BAR0_IGU_LENGTH - 1)
-
-#define PXP_VF_BAR0_START_DQ 0x3000
-#define PXP_VF_BAR0_DQ_LENGTH 0x200
-#define PXP_VF_BAR0_DQ_OPAQUE_OFFSET 0
-#define PXP_VF_BAR0_ME_OPAQUE_ADDRESS (PXP_VF_BAR0_START_DQ + \
- PXP_VF_BAR0_DQ_OPAQUE_OFFSET)
-#define PXP_VF_BAR0_ME_CONCRETE_ADDRESS (PXP_VF_BAR0_ME_OPAQUE_ADDRESS \
- + 4)
-#define PXP_VF_BAR0_END_DQ (PXP_VF_BAR0_START_DQ + \
- PXP_VF_BAR0_DQ_LENGTH - 1)
-
-#define PXP_VF_BAR0_START_TSDM_ZONE_B 0x3200
-#define PXP_VF_BAR0_SDM_LENGTH_ZONE_B 0x200
-#define PXP_VF_BAR0_END_TSDM_ZONE_B (PXP_VF_BAR0_START_TSDM_ZONE_B \
- + \
- PXP_VF_BAR0_SDM_LENGTH_ZONE_B \
- - 1)
-
-#define PXP_VF_BAR0_START_MSDM_ZONE_B 0x3400
-#define PXP_VF_BAR0_END_MSDM_ZONE_B (PXP_VF_BAR0_START_MSDM_ZONE_B \
- + \
- PXP_VF_BAR0_SDM_LENGTH_ZONE_B \
- - 1)
-
-#define PXP_VF_BAR0_START_USDM_ZONE_B 0x3600
-#define PXP_VF_BAR0_END_USDM_ZONE_B (PXP_VF_BAR0_START_USDM_ZONE_B \
- + \
- PXP_VF_BAR0_SDM_LENGTH_ZONE_B \
- - 1)
-
-#define PXP_VF_BAR0_START_XSDM_ZONE_B 0x3800
-#define PXP_VF_BAR0_END_XSDM_ZONE_B (PXP_VF_BAR0_START_XSDM_ZONE_B \
- + \
- PXP_VF_BAR0_SDM_LENGTH_ZONE_B \
- - 1)
-
-#define PXP_VF_BAR0_START_YSDM_ZONE_B 0x3a00
-#define PXP_VF_BAR0_END_YSDM_ZONE_B (PXP_VF_BAR0_START_YSDM_ZONE_B \
- + \
- PXP_VF_BAR0_SDM_LENGTH_ZONE_B \
- - 1)
-
-#define PXP_VF_BAR0_START_PSDM_ZONE_B 0x3c00
-#define PXP_VF_BAR0_END_PSDM_ZONE_B (PXP_VF_BAR0_START_PSDM_ZONE_B \
- + \
- PXP_VF_BAR0_SDM_LENGTH_ZONE_B \
- - 1)
-
-#define PXP_VF_BAR0_START_SDM_ZONE_A 0x4000
-#define PXP_VF_BAR0_END_SDM_ZONE_A 0x10000
-
-#define PXP_VF_BAR0_GRC_WINDOW_LENGTH 32
-
-#define PXP_ILT_PAGE_SIZE_NUM_BITS_MIN 12
-#define PXP_ILT_BLOCK_FACTOR_MULTIPLIER 1024
+#define PXP_VF_BAR0 0
+
+#define PXP_VF_BAR0_START_IGU 0
+#define PXP_VF_BAR0_IGU_LENGTH 0x3000
+#define PXP_VF_BAR0_END_IGU (PXP_VF_BAR0_START_IGU + \
+ PXP_VF_BAR0_IGU_LENGTH - 1)
+
+#define PXP_VF_BAR0_START_DQ 0x3000
+#define PXP_VF_BAR0_DQ_LENGTH 0x200
+#define PXP_VF_BAR0_DQ_OPAQUE_OFFSET 0
+#define PXP_VF_BAR0_ME_OPAQUE_ADDRESS (PXP_VF_BAR0_START_DQ + \
+ PXP_VF_BAR0_DQ_OPAQUE_OFFSET)
+#define PXP_VF_BAR0_ME_CONCRETE_ADDRESS (PXP_VF_BAR0_ME_OPAQUE_ADDRESS \
+ + 4)
+#define PXP_VF_BAR0_END_DQ (PXP_VF_BAR0_START_DQ + \
+ PXP_VF_BAR0_DQ_LENGTH - 1)
+
+#define PXP_VF_BAR0_START_TSDM_ZONE_B 0x3200
+#define PXP_VF_BAR0_SDM_LENGTH_ZONE_B 0x200
+#define PXP_VF_BAR0_END_TSDM_ZONE_B (PXP_VF_BAR0_START_TSDM_ZONE_B + \
+ PXP_VF_BAR0_SDM_LENGTH_ZONE_B - 1)
+
+#define PXP_VF_BAR0_START_MSDM_ZONE_B 0x3400
+#define PXP_VF_BAR0_END_MSDM_ZONE_B (PXP_VF_BAR0_START_MSDM_ZONE_B + \
+ PXP_VF_BAR0_SDM_LENGTH_ZONE_B - 1)
+
+#define PXP_VF_BAR0_START_USDM_ZONE_B 0x3600
+#define PXP_VF_BAR0_END_USDM_ZONE_B (PXP_VF_BAR0_START_USDM_ZONE_B + \
+ PXP_VF_BAR0_SDM_LENGTH_ZONE_B - 1)
+
+#define PXP_VF_BAR0_START_XSDM_ZONE_B 0x3800
+#define PXP_VF_BAR0_END_XSDM_ZONE_B (PXP_VF_BAR0_START_XSDM_ZONE_B + \
+ PXP_VF_BAR0_SDM_LENGTH_ZONE_B - 1)
+
+#define PXP_VF_BAR0_START_YSDM_ZONE_B 0x3a00
+#define PXP_VF_BAR0_END_YSDM_ZONE_B (PXP_VF_BAR0_START_YSDM_ZONE_B + \
+ PXP_VF_BAR0_SDM_LENGTH_ZONE_B - 1)
+
+#define PXP_VF_BAR0_START_PSDM_ZONE_B 0x3c00
+#define PXP_VF_BAR0_END_PSDM_ZONE_B (PXP_VF_BAR0_START_PSDM_ZONE_B + \
+ PXP_VF_BAR0_SDM_LENGTH_ZONE_B - 1)
+
+#define PXP_VF_BAR0_START_GRC 0x3E00
+#define PXP_VF_BAR0_GRC_LENGTH 0x200
+#define PXP_VF_BAR0_END_GRC (PXP_VF_BAR0_START_GRC + \
+ PXP_VF_BAR0_GRC_LENGTH - 1)
+
+#define PXP_VF_BAR0_START_SDM_ZONE_A 0x4000
+#define PXP_VF_BAR0_END_SDM_ZONE_A 0x10000
+
+#define PXP_VF_BAR0_START_IGU2 0x10000
+#define PXP_VF_BAR0_IGU2_LENGTH 0xD000
+#define PXP_VF_BAR0_END_IGU2 (PXP_VF_BAR0_START_IGU2 + \
+ PXP_VF_BAR0_IGU2_LENGTH - 1)
+
+#define PXP_VF_BAR0_GRC_WINDOW_LENGTH 32
+
+#define PXP_ILT_PAGE_SIZE_NUM_BITS_MIN 12
+#define PXP_ILT_BLOCK_FACTOR_MULTIPLIER 1024
/* ILT Records */
#define PXP_NUM_ILT_RECORDS_BB 7600
#define PXP_NUM_ILT_RECORDS_K2 11000
#define MAX_NUM_ILT_RECORDS MAX(PXP_NUM_ILT_RECORDS_BB, PXP_NUM_ILT_RECORDS_K2)
-#define PXP_QUEUES_ZONE_MAX_NUM 320
+
+/* Host Interface */
+#define PXP_QUEUES_ZONE_MAX_NUM 320
+
/*****************/
/* PRM CONSTANTS */
/*****************/
-#define PRM_DMA_PAD_BYTES_NUM 2
+#define PRM_DMA_PAD_BYTES_NUM 2
+
/*****************/
/* SDMs CONSTANTS */
/*****************/
-#define SDM_OP_GEN_TRIG_NONE 0
-#define SDM_OP_GEN_TRIG_WAKE_THREAD 1
-#define SDM_OP_GEN_TRIG_AGG_INT 2
-#define SDM_OP_GEN_TRIG_LOADER 4
+#define SDM_OP_GEN_TRIG_NONE 0
+#define SDM_OP_GEN_TRIG_WAKE_THREAD 1
+#define SDM_OP_GEN_TRIG_AGG_INT 2
+#define SDM_OP_GEN_TRIG_LOADER 4
#define SDM_OP_GEN_TRIG_INDICATE_ERROR 6
#define SDM_OP_GEN_TRIG_INC_ORDER_CNT 9
@@ -644,26 +633,26 @@
/* Completion types */
/********************/
-#define SDM_COMP_TYPE_NONE 0
-#define SDM_COMP_TYPE_WAKE_THREAD 1
-#define SDM_COMP_TYPE_AGG_INT 2
-#define SDM_COMP_TYPE_CM 3
-#define SDM_COMP_TYPE_LOADER 4
-#define SDM_COMP_TYPE_PXP 5
-#define SDM_COMP_TYPE_INDICATE_ERROR 6
-#define SDM_COMP_TYPE_RELEASE_THREAD 7
-#define SDM_COMP_TYPE_RAM 8
-#define SDM_COMP_TYPE_INC_ORDER_CNT 9
+#define SDM_COMP_TYPE_NONE 0
+#define SDM_COMP_TYPE_WAKE_THREAD 1
+#define SDM_COMP_TYPE_AGG_INT 2
+#define SDM_COMP_TYPE_CM 3
+#define SDM_COMP_TYPE_LOADER 4
+#define SDM_COMP_TYPE_PXP 5
+#define SDM_COMP_TYPE_INDICATE_ERROR 6
+#define SDM_COMP_TYPE_RELEASE_THREAD 7
+#define SDM_COMP_TYPE_RAM 8
+#define SDM_COMP_TYPE_INC_ORDER_CNT 9
/*****************/
-/* PBF Constants */
+/* PBF CONSTANTS */
/*****************/
/* Number of PBF command queue lines. Each line is 32B. */
-#define PBF_MAX_CMD_LINES 3328
+#define PBF_MAX_CMD_LINES 3328
/* Number of BTB blocks. Each block is 256B. */
-#define BTB_MAX_BLOCKS 1440
+#define BTB_MAX_BLOCKS 1440
/*****************/
/* PRS CONSTANTS */
@@ -671,14 +660,7 @@
#define PRS_GFT_CAM_LINES_NO_MATCH 31
-/* Async data KCQ CQE */
-struct async_data {
- __le32 cid;
- __le16 itid;
- u8 error_code;
- u8 fw_debug_param;
-};
-
+/* Interrupt coalescing TimeSet */
struct coalescing_timeset {
u8 value;
#define COALESCING_TIMESET_TIMESET_MASK 0x7F
@@ -692,23 +674,32 @@ struct common_queue_zone {
__le16 reserved;
};
+/* ETH Rx producers data */
struct eth_rx_prod_data {
__le16 bd_prod;
__le16 cqe_prod;
};
-struct regpair {
- __le32 lo;
- __le32 hi;
+struct tcp_ulp_connect_done_params {
+ __le16 mss;
+ u8 snd_wnd_scale;
+ u8 flags;
+#define TCP_ULP_CONNECT_DONE_PARAMS_TS_EN_MASK 0x1
+#define TCP_ULP_CONNECT_DONE_PARAMS_TS_EN_SHIFT 0
+#define TCP_ULP_CONNECT_DONE_PARAMS_RESERVED_MASK 0x7F
+#define TCP_ULP_CONNECT_DONE_PARAMS_RESERVED_SHIFT 1
};
-struct vf_pf_channel_eqe_data {
- struct regpair msg_addr;
+struct iscsi_connect_done_results {
+ __le16 icid;
+ __le16 conn_id;
+ struct tcp_ulp_connect_done_params params;
};
struct iscsi_eqe_data {
- __le32 cid;
+ __le16 icid;
__le16 conn_id;
+ __le16 reserved;
u8 error_code;
u8 error_pdu_opcode_reserved;
#define ISCSI_EQE_DATA_ERROR_PDU_OPCODE_MASK 0x3F
@@ -719,52 +710,6 @@ struct iscsi_eqe_data {
#define ISCSI_EQE_DATA_RESERVED0_SHIFT 7
};
-struct rdma_eqe_destroy_qp {
- __le32 cid;
- u8 reserved[4];
-};
-
-union rdma_eqe_data {
- struct regpair async_handle;
- struct rdma_eqe_destroy_qp rdma_destroy_qp_data;
-};
-
-struct malicious_vf_eqe_data {
- u8 vf_id;
- u8 err_id;
- __le16 reserved[3];
-};
-
-struct initial_cleanup_eqe_data {
- u8 vf_id;
- u8 reserved[7];
-};
-
-/* Event Data Union */
-union event_ring_data {
- u8 bytes[8];
- struct vf_pf_channel_eqe_data vf_pf_channel;
- struct iscsi_eqe_data iscsi_info;
- union rdma_eqe_data rdma_data;
- struct malicious_vf_eqe_data malicious_vf;
- struct initial_cleanup_eqe_data vf_init_cleanup;
-};
-
-/* Event Ring Entry */
-struct event_ring_entry {
- u8 protocol_id;
- u8 opcode;
- __le16 reserved0;
- __le16 echo;
- u8 fw_return_code;
- u8 flags;
-#define EVENT_RING_ENTRY_ASYNC_MASK 0x1
-#define EVENT_RING_ENTRY_ASYNC_SHIFT 0
-#define EVENT_RING_ENTRY_RESERVED1_MASK 0x7F
-#define EVENT_RING_ENTRY_RESERVED1_SHIFT 1
- union event_ring_data data;
-};
-
/* Multi function mode */
enum mf_mode {
ERROR_MODE /* Unsupported mode */,
@@ -781,13 +726,31 @@ enum protocol_type {
PROTOCOLID_CORE,
PROTOCOLID_ETH,
PROTOCOLID_IWARP,
- PROTOCOLID_RESERVED5,
+ PROTOCOLID_RESERVED0,
PROTOCOLID_PREROCE,
PROTOCOLID_COMMON,
- PROTOCOLID_RESERVED6,
+ PROTOCOLID_RESERVED1,
MAX_PROTOCOL_TYPE
};
+struct regpair {
+ __le32 lo;
+ __le32 hi;
+};
+
+/* RoCE Destroy Event Data */
+struct rdma_eqe_destroy_qp {
+ __le32 cid;
+ u8 reserved[4];
+};
+
+/* RDMA Event Data Union */
+union rdma_eqe_data {
+ struct regpair async_handle;
+ struct rdma_eqe_destroy_qp rdma_destroy_qp_data;
+};
+
+/* Ustorm Queue Zone */
struct ustorm_eth_queue_zone {
struct coalescing_timeset int_coalescing_timeset;
u8 reserved[3];
@@ -798,62 +761,71 @@ struct ustorm_queue_zone {
struct common_queue_zone common;
};
-/* status block structure */
+/* Status block structure */
struct cau_pi_entry {
- u32 prod;
-#define CAU_PI_ENTRY_PROD_VAL_MASK 0xFFFF
-#define CAU_PI_ENTRY_PROD_VAL_SHIFT 0
-#define CAU_PI_ENTRY_PI_TIMESET_MASK 0x7F
-#define CAU_PI_ENTRY_PI_TIMESET_SHIFT 16
-#define CAU_PI_ENTRY_FSM_SEL_MASK 0x1
-#define CAU_PI_ENTRY_FSM_SEL_SHIFT 23
-#define CAU_PI_ENTRY_RESERVED_MASK 0xFF
-#define CAU_PI_ENTRY_RESERVED_SHIFT 24
+ __le32 prod;
+#define CAU_PI_ENTRY_PROD_VAL_MASK 0xFFFF
+#define CAU_PI_ENTRY_PROD_VAL_SHIFT 0
+#define CAU_PI_ENTRY_PI_TIMESET_MASK 0x7F
+#define CAU_PI_ENTRY_PI_TIMESET_SHIFT 16
+#define CAU_PI_ENTRY_FSM_SEL_MASK 0x1
+#define CAU_PI_ENTRY_FSM_SEL_SHIFT 23
+#define CAU_PI_ENTRY_RESERVED_MASK 0xFF
+#define CAU_PI_ENTRY_RESERVED_SHIFT 24
};
-/* status block structure */
+/* Status block structure */
struct cau_sb_entry {
- u32 data;
-#define CAU_SB_ENTRY_SB_PROD_MASK 0xFFFFFF
-#define CAU_SB_ENTRY_SB_PROD_SHIFT 0
-#define CAU_SB_ENTRY_STATE0_MASK 0xF
-#define CAU_SB_ENTRY_STATE0_SHIFT 24
-#define CAU_SB_ENTRY_STATE1_MASK 0xF
-#define CAU_SB_ENTRY_STATE1_SHIFT 28
- u32 params;
-#define CAU_SB_ENTRY_SB_TIMESET0_MASK 0x7F
-#define CAU_SB_ENTRY_SB_TIMESET0_SHIFT 0
-#define CAU_SB_ENTRY_SB_TIMESET1_MASK 0x7F
-#define CAU_SB_ENTRY_SB_TIMESET1_SHIFT 7
-#define CAU_SB_ENTRY_TIMER_RES0_MASK 0x3
-#define CAU_SB_ENTRY_TIMER_RES0_SHIFT 14
-#define CAU_SB_ENTRY_TIMER_RES1_MASK 0x3
-#define CAU_SB_ENTRY_TIMER_RES1_SHIFT 16
-#define CAU_SB_ENTRY_VF_NUMBER_MASK 0xFF
-#define CAU_SB_ENTRY_VF_NUMBER_SHIFT 18
-#define CAU_SB_ENTRY_VF_VALID_MASK 0x1
-#define CAU_SB_ENTRY_VF_VALID_SHIFT 26
-#define CAU_SB_ENTRY_PF_NUMBER_MASK 0xF
-#define CAU_SB_ENTRY_PF_NUMBER_SHIFT 27
-#define CAU_SB_ENTRY_TPH_MASK 0x1
-#define CAU_SB_ENTRY_TPH_SHIFT 31
+ __le32 data;
+#define CAU_SB_ENTRY_SB_PROD_MASK 0xFFFFFF
+#define CAU_SB_ENTRY_SB_PROD_SHIFT 0
+#define CAU_SB_ENTRY_STATE0_MASK 0xF
+#define CAU_SB_ENTRY_STATE0_SHIFT 24
+#define CAU_SB_ENTRY_STATE1_MASK 0xF
+#define CAU_SB_ENTRY_STATE1_SHIFT 28
+ __le32 params;
+#define CAU_SB_ENTRY_SB_TIMESET0_MASK 0x7F
+#define CAU_SB_ENTRY_SB_TIMESET0_SHIFT 0
+#define CAU_SB_ENTRY_SB_TIMESET1_MASK 0x7F
+#define CAU_SB_ENTRY_SB_TIMESET1_SHIFT 7
+#define CAU_SB_ENTRY_TIMER_RES0_MASK 0x3
+#define CAU_SB_ENTRY_TIMER_RES0_SHIFT 14
+#define CAU_SB_ENTRY_TIMER_RES1_MASK 0x3
+#define CAU_SB_ENTRY_TIMER_RES1_SHIFT 16
+#define CAU_SB_ENTRY_VF_NUMBER_MASK 0xFF
+#define CAU_SB_ENTRY_VF_NUMBER_SHIFT 18
+#define CAU_SB_ENTRY_VF_VALID_MASK 0x1
+#define CAU_SB_ENTRY_VF_VALID_SHIFT 26
+#define CAU_SB_ENTRY_PF_NUMBER_MASK 0xF
+#define CAU_SB_ENTRY_PF_NUMBER_SHIFT 27
+#define CAU_SB_ENTRY_TPH_MASK 0x1
+#define CAU_SB_ENTRY_TPH_SHIFT 31
};
-/* core doorbell data */
+/* Igu cleanup bit values to distinguish between clean or producer consumer
+ * update.
+ */
+enum command_type_bit {
+ IGU_COMMAND_TYPE_NOP = 0,
+ IGU_COMMAND_TYPE_SET = 1,
+ MAX_COMMAND_TYPE_BIT
+};
+
+/* Core doorbell data */
struct core_db_data {
u8 params;
-#define CORE_DB_DATA_DEST_MASK 0x3
-#define CORE_DB_DATA_DEST_SHIFT 0
-#define CORE_DB_DATA_AGG_CMD_MASK 0x3
-#define CORE_DB_DATA_AGG_CMD_SHIFT 2
-#define CORE_DB_DATA_BYPASS_EN_MASK 0x1
-#define CORE_DB_DATA_BYPASS_EN_SHIFT 4
-#define CORE_DB_DATA_RESERVED_MASK 0x1
-#define CORE_DB_DATA_RESERVED_SHIFT 5
-#define CORE_DB_DATA_AGG_VAL_SEL_MASK 0x3
-#define CORE_DB_DATA_AGG_VAL_SEL_SHIFT 6
- u8 agg_flags;
- __le16 spq_prod;
+#define CORE_DB_DATA_DEST_MASK 0x3
+#define CORE_DB_DATA_DEST_SHIFT 0
+#define CORE_DB_DATA_AGG_CMD_MASK 0x3
+#define CORE_DB_DATA_AGG_CMD_SHIFT 2
+#define CORE_DB_DATA_BYPASS_EN_MASK 0x1
+#define CORE_DB_DATA_BYPASS_EN_SHIFT 4
+#define CORE_DB_DATA_RESERVED_MASK 0x1
+#define CORE_DB_DATA_RESERVED_SHIFT 5
+#define CORE_DB_DATA_AGG_VAL_SEL_MASK 0x3
+#define CORE_DB_DATA_AGG_VAL_SEL_SHIFT 6
+ u8 agg_flags;
+ __le16 spq_prod;
};
/* Enum of doorbell aggregative command selection */
@@ -909,67 +881,69 @@ struct db_l2_dpm_sge {
struct regpair addr;
__le16 nbytes;
__le16 bitfields;
-#define DB_L2_DPM_SGE_TPH_ST_INDEX_MASK 0x1FF
-#define DB_L2_DPM_SGE_TPH_ST_INDEX_SHIFT 0
-#define DB_L2_DPM_SGE_RESERVED0_MASK 0x3
-#define DB_L2_DPM_SGE_RESERVED0_SHIFT 9
-#define DB_L2_DPM_SGE_ST_VALID_MASK 0x1
-#define DB_L2_DPM_SGE_ST_VALID_SHIFT 11
-#define DB_L2_DPM_SGE_RESERVED1_MASK 0xF
-#define DB_L2_DPM_SGE_RESERVED1_SHIFT 12
+#define DB_L2_DPM_SGE_TPH_ST_INDEX_MASK 0x1FF
+#define DB_L2_DPM_SGE_TPH_ST_INDEX_SHIFT 0
+#define DB_L2_DPM_SGE_RESERVED0_MASK 0x3
+#define DB_L2_DPM_SGE_RESERVED0_SHIFT 9
+#define DB_L2_DPM_SGE_ST_VALID_MASK 0x1
+#define DB_L2_DPM_SGE_ST_VALID_SHIFT 11
+#define DB_L2_DPM_SGE_RESERVED1_MASK 0xF
+#define DB_L2_DPM_SGE_RESERVED1_SHIFT 12
__le32 reserved2;
};
/* Structure for doorbell address, in legacy mode */
struct db_legacy_addr {
__le32 addr;
-#define DB_LEGACY_ADDR_RESERVED0_MASK 0x3
-#define DB_LEGACY_ADDR_RESERVED0_SHIFT 0
-#define DB_LEGACY_ADDR_DEMS_MASK 0x7
-#define DB_LEGACY_ADDR_DEMS_SHIFT 2
-#define DB_LEGACY_ADDR_ICID_MASK 0x7FFFFFF
-#define DB_LEGACY_ADDR_ICID_SHIFT 5
+#define DB_LEGACY_ADDR_RESERVED0_MASK 0x3
+#define DB_LEGACY_ADDR_RESERVED0_SHIFT 0
+#define DB_LEGACY_ADDR_DEMS_MASK 0x7
+#define DB_LEGACY_ADDR_DEMS_SHIFT 2
+#define DB_LEGACY_ADDR_ICID_MASK 0x7FFFFFF
+#define DB_LEGACY_ADDR_ICID_SHIFT 5
};
/* Structure for doorbell address, in PWM mode */
struct db_pwm_addr {
__le32 addr;
#define DB_PWM_ADDR_RESERVED0_MASK 0x7
-#define DB_PWM_ADDR_RESERVED0_SHIFT 0
-#define DB_PWM_ADDR_OFFSET_MASK 0x7F
+#define DB_PWM_ADDR_RESERVED0_SHIFT 0
+#define DB_PWM_ADDR_OFFSET_MASK 0x7F
#define DB_PWM_ADDR_OFFSET_SHIFT 3
-#define DB_PWM_ADDR_WID_MASK 0x3
-#define DB_PWM_ADDR_WID_SHIFT 10
-#define DB_PWM_ADDR_DPI_MASK 0xFFFF
-#define DB_PWM_ADDR_DPI_SHIFT 12
+#define DB_PWM_ADDR_WID_MASK 0x3
+#define DB_PWM_ADDR_WID_SHIFT 10
+#define DB_PWM_ADDR_DPI_MASK 0xFFFF
+#define DB_PWM_ADDR_DPI_SHIFT 12
#define DB_PWM_ADDR_RESERVED1_MASK 0xF
-#define DB_PWM_ADDR_RESERVED1_SHIFT 28
+#define DB_PWM_ADDR_RESERVED1_SHIFT 28
};
-/* Parameters to RoCE firmware, passed in EDPM doorbell */
+/* Parameters to RDMA firmware, passed in EDPM doorbell */
struct db_rdma_dpm_params {
__le32 params;
-#define DB_RDMA_DPM_PARAMS_SIZE_MASK 0x3F
-#define DB_RDMA_DPM_PARAMS_SIZE_SHIFT 0
-#define DB_RDMA_DPM_PARAMS_DPM_TYPE_MASK 0x3
-#define DB_RDMA_DPM_PARAMS_DPM_TYPE_SHIFT 6
-#define DB_RDMA_DPM_PARAMS_OPCODE_MASK 0xFF
-#define DB_RDMA_DPM_PARAMS_OPCODE_SHIFT 8
-#define DB_RDMA_DPM_PARAMS_WQE_SIZE_MASK 0x7FF
-#define DB_RDMA_DPM_PARAMS_WQE_SIZE_SHIFT 16
-#define DB_RDMA_DPM_PARAMS_RESERVED0_MASK 0x1
-#define DB_RDMA_DPM_PARAMS_RESERVED0_SHIFT 27
-#define DB_RDMA_DPM_PARAMS_COMPLETION_FLG_MASK 0x1
-#define DB_RDMA_DPM_PARAMS_COMPLETION_FLG_SHIFT 28
-#define DB_RDMA_DPM_PARAMS_S_FLG_MASK 0x1
-#define DB_RDMA_DPM_PARAMS_S_FLG_SHIFT 29
-#define DB_RDMA_DPM_PARAMS_RESERVED1_MASK 0x1
-#define DB_RDMA_DPM_PARAMS_RESERVED1_SHIFT 30
+#define DB_RDMA_DPM_PARAMS_SIZE_MASK 0x3F
+#define DB_RDMA_DPM_PARAMS_SIZE_SHIFT 0
+#define DB_RDMA_DPM_PARAMS_DPM_TYPE_MASK 0x3
+#define DB_RDMA_DPM_PARAMS_DPM_TYPE_SHIFT 6
+#define DB_RDMA_DPM_PARAMS_OPCODE_MASK 0xFF
+#define DB_RDMA_DPM_PARAMS_OPCODE_SHIFT 8
+#define DB_RDMA_DPM_PARAMS_WQE_SIZE_MASK 0x7FF
+#define DB_RDMA_DPM_PARAMS_WQE_SIZE_SHIFT 16
+#define DB_RDMA_DPM_PARAMS_RESERVED0_MASK 0x1
+#define DB_RDMA_DPM_PARAMS_RESERVED0_SHIFT 27
+#define DB_RDMA_DPM_PARAMS_COMPLETION_FLG_MASK 0x1
+#define DB_RDMA_DPM_PARAMS_COMPLETION_FLG_SHIFT 28
+#define DB_RDMA_DPM_PARAMS_S_FLG_MASK 0x1
+#define DB_RDMA_DPM_PARAMS_S_FLG_SHIFT 29
+#define DB_RDMA_DPM_PARAMS_RESERVED1_MASK 0x1
+#define DB_RDMA_DPM_PARAMS_RESERVED1_SHIFT 30
#define DB_RDMA_DPM_PARAMS_CONN_TYPE_IS_IWARP_MASK 0x1
#define DB_RDMA_DPM_PARAMS_CONN_TYPE_IS_IWARP_SHIFT 31
};
-/* Structure for doorbell data, in ROCE DPM mode, for 1st db in a DPM burst */
+/* Structure for doorbell data, in RDMA DPM mode, for the first doorbell in a
+ * DPM burst.
+ */
struct db_rdma_dpm_data {
__le16 icid;
__le16 prod_val;
@@ -987,22 +961,22 @@ enum igu_int_cmd {
/* IGU producer or consumer update command */
struct igu_prod_cons_update {
- u32 sb_id_and_flags;
-#define IGU_PROD_CONS_UPDATE_SB_INDEX_MASK 0xFFFFFF
-#define IGU_PROD_CONS_UPDATE_SB_INDEX_SHIFT 0
-#define IGU_PROD_CONS_UPDATE_UPDATE_FLAG_MASK 0x1
-#define IGU_PROD_CONS_UPDATE_UPDATE_FLAG_SHIFT 24
-#define IGU_PROD_CONS_UPDATE_ENABLE_INT_MASK 0x3
-#define IGU_PROD_CONS_UPDATE_ENABLE_INT_SHIFT 25
-#define IGU_PROD_CONS_UPDATE_SEGMENT_ACCESS_MASK 0x1
-#define IGU_PROD_CONS_UPDATE_SEGMENT_ACCESS_SHIFT 27
-#define IGU_PROD_CONS_UPDATE_TIMER_MASK_MASK 0x1
-#define IGU_PROD_CONS_UPDATE_TIMER_MASK_SHIFT 28
-#define IGU_PROD_CONS_UPDATE_RESERVED0_MASK 0x3
-#define IGU_PROD_CONS_UPDATE_RESERVED0_SHIFT 29
-#define IGU_PROD_CONS_UPDATE_COMMAND_TYPE_MASK 0x1
-#define IGU_PROD_CONS_UPDATE_COMMAND_TYPE_SHIFT 31
- u32 reserved1;
+ __le32 sb_id_and_flags;
+#define IGU_PROD_CONS_UPDATE_SB_INDEX_MASK 0xFFFFFF
+#define IGU_PROD_CONS_UPDATE_SB_INDEX_SHIFT 0
+#define IGU_PROD_CONS_UPDATE_UPDATE_FLAG_MASK 0x1
+#define IGU_PROD_CONS_UPDATE_UPDATE_FLAG_SHIFT 24
+#define IGU_PROD_CONS_UPDATE_ENABLE_INT_MASK 0x3
+#define IGU_PROD_CONS_UPDATE_ENABLE_INT_SHIFT 25
+#define IGU_PROD_CONS_UPDATE_SEGMENT_ACCESS_MASK 0x1
+#define IGU_PROD_CONS_UPDATE_SEGMENT_ACCESS_SHIFT 27
+#define IGU_PROD_CONS_UPDATE_TIMER_MASK_MASK 0x1
+#define IGU_PROD_CONS_UPDATE_TIMER_MASK_SHIFT 28
+#define IGU_PROD_CONS_UPDATE_RESERVED0_MASK 0x3
+#define IGU_PROD_CONS_UPDATE_RESERVED0_SHIFT 29
+#define IGU_PROD_CONS_UPDATE_COMMAND_TYPE_MASK 0x1
+#define IGU_PROD_CONS_UPDATE_COMMAND_TYPE_SHIFT 31
+ __le32 reserved1;
};
/* Igu segments access for default status block only */
@@ -1012,38 +986,63 @@ enum igu_seg_access {
MAX_IGU_SEG_ACCESS
};
+/* Enumeration for L3 type field of parsing_and_err_flags.
+ * L3Type: 0 - unknown (not ip), 1 - Ipv4, 2 - Ipv6
+ * (This field can be filled according to the last-ethertype)
+ */
+enum l3_type {
+ e_l3_type_unknown,
+ e_l3_type_ipv4,
+ e_l3_type_ipv6,
+ MAX_L3_TYPE
+};
+
+/* Enumeration for l4Protocol field of parsing_and_err_flags.
+ * L4-protocol: 0 - none, 1 - TCP, 2 - UDP.
+ * If the packet is IPv4 fragment, and its not the first fragment, the
+ * protocol-type should be set to none.
+ */
+enum l4_protocol {
+ e_l4_protocol_none,
+ e_l4_protocol_tcp,
+ e_l4_protocol_udp,
+ MAX_L4_PROTOCOL
+};
+
+/* Parsing and error flags field */
struct parsing_and_err_flags {
__le16 flags;
-#define PARSING_AND_ERR_FLAGS_L3TYPE_MASK 0x3
-#define PARSING_AND_ERR_FLAGS_L3TYPE_SHIFT 0
-#define PARSING_AND_ERR_FLAGS_L4PROTOCOL_MASK 0x3
-#define PARSING_AND_ERR_FLAGS_L4PROTOCOL_SHIFT 2
-#define PARSING_AND_ERR_FLAGS_IPV4FRAG_MASK 0x1
-#define PARSING_AND_ERR_FLAGS_IPV4FRAG_SHIFT 4
-#define PARSING_AND_ERR_FLAGS_TAG8021QEXIST_MASK 0x1
-#define PARSING_AND_ERR_FLAGS_TAG8021QEXIST_SHIFT 5
-#define PARSING_AND_ERR_FLAGS_L4CHKSMWASCALCULATED_MASK 0x1
-#define PARSING_AND_ERR_FLAGS_L4CHKSMWASCALCULATED_SHIFT 6
-#define PARSING_AND_ERR_FLAGS_TIMESYNCPKT_MASK 0x1
-#define PARSING_AND_ERR_FLAGS_TIMESYNCPKT_SHIFT 7
-#define PARSING_AND_ERR_FLAGS_TIMESTAMPRECORDED_MASK 0x1
-#define PARSING_AND_ERR_FLAGS_TIMESTAMPRECORDED_SHIFT 8
-#define PARSING_AND_ERR_FLAGS_IPHDRERROR_MASK 0x1
-#define PARSING_AND_ERR_FLAGS_IPHDRERROR_SHIFT 9
-#define PARSING_AND_ERR_FLAGS_L4CHKSMERROR_MASK 0x1
-#define PARSING_AND_ERR_FLAGS_L4CHKSMERROR_SHIFT 10
-#define PARSING_AND_ERR_FLAGS_TUNNELEXIST_MASK 0x1
-#define PARSING_AND_ERR_FLAGS_TUNNELEXIST_SHIFT 11
-#define PARSING_AND_ERR_FLAGS_TUNNEL8021QTAGEXIST_MASK 0x1
-#define PARSING_AND_ERR_FLAGS_TUNNEL8021QTAGEXIST_SHIFT 12
-#define PARSING_AND_ERR_FLAGS_TUNNELIPHDRERROR_MASK 0x1
-#define PARSING_AND_ERR_FLAGS_TUNNELIPHDRERROR_SHIFT 13
-#define PARSING_AND_ERR_FLAGS_TUNNELL4CHKSMWASCALCULATED_MASK 0x1
-#define PARSING_AND_ERR_FLAGS_TUNNELL4CHKSMWASCALCULATED_SHIFT 14
-#define PARSING_AND_ERR_FLAGS_TUNNELL4CHKSMERROR_MASK 0x1
-#define PARSING_AND_ERR_FLAGS_TUNNELL4CHKSMERROR_SHIFT 15
+#define PARSING_AND_ERR_FLAGS_L3TYPE_MASK 0x3
+#define PARSING_AND_ERR_FLAGS_L3TYPE_SHIFT 0
+#define PARSING_AND_ERR_FLAGS_L4PROTOCOL_MASK 0x3
+#define PARSING_AND_ERR_FLAGS_L4PROTOCOL_SHIFT 2
+#define PARSING_AND_ERR_FLAGS_IPV4FRAG_MASK 0x1
+#define PARSING_AND_ERR_FLAGS_IPV4FRAG_SHIFT 4
+#define PARSING_AND_ERR_FLAGS_TAG8021QEXIST_MASK 0x1
+#define PARSING_AND_ERR_FLAGS_TAG8021QEXIST_SHIFT 5
+#define PARSING_AND_ERR_FLAGS_L4CHKSMWASCALCULATED_MASK 0x1
+#define PARSING_AND_ERR_FLAGS_L4CHKSMWASCALCULATED_SHIFT 6
+#define PARSING_AND_ERR_FLAGS_TIMESYNCPKT_MASK 0x1
+#define PARSING_AND_ERR_FLAGS_TIMESYNCPKT_SHIFT 7
+#define PARSING_AND_ERR_FLAGS_TIMESTAMPRECORDED_MASK 0x1
+#define PARSING_AND_ERR_FLAGS_TIMESTAMPRECORDED_SHIFT 8
+#define PARSING_AND_ERR_FLAGS_IPHDRERROR_MASK 0x1
+#define PARSING_AND_ERR_FLAGS_IPHDRERROR_SHIFT 9
+#define PARSING_AND_ERR_FLAGS_L4CHKSMERROR_MASK 0x1
+#define PARSING_AND_ERR_FLAGS_L4CHKSMERROR_SHIFT 10
+#define PARSING_AND_ERR_FLAGS_TUNNELEXIST_MASK 0x1
+#define PARSING_AND_ERR_FLAGS_TUNNELEXIST_SHIFT 11
+#define PARSING_AND_ERR_FLAGS_TUNNEL8021QTAGEXIST_MASK 0x1
+#define PARSING_AND_ERR_FLAGS_TUNNEL8021QTAGEXIST_SHIFT 12
+#define PARSING_AND_ERR_FLAGS_TUNNELIPHDRERROR_MASK 0x1
+#define PARSING_AND_ERR_FLAGS_TUNNELIPHDRERROR_SHIFT 13
+#define PARSING_AND_ERR_FLAGS_TUNNELL4CHKSMWASCALCULATED_MASK 0x1
+#define PARSING_AND_ERR_FLAGS_TUNNELL4CHKSMWASCALCULATED_SHIFT 14
+#define PARSING_AND_ERR_FLAGS_TUNNELL4CHKSMERROR_MASK 0x1
+#define PARSING_AND_ERR_FLAGS_TUNNELL4CHKSMERROR_SHIFT 15
};
+/* Parsing error flags bitmap */
struct parsing_err_flags {
__le16 flags;
#define PARSING_ERR_FLAGS_MAC_ERROR_MASK 0x1
@@ -1080,266 +1079,260 @@ struct parsing_err_flags {
#define PARSING_ERR_FLAGS_TUNNEL_L4_CHKSM_ERROR_SHIFT 15
};
+/* Pb context */
struct pb_context {
__le32 crc[4];
};
+/* Concrete Function ID */
struct pxp_concrete_fid {
__le16 fid;
-#define PXP_CONCRETE_FID_PFID_MASK 0xF
-#define PXP_CONCRETE_FID_PFID_SHIFT 0
-#define PXP_CONCRETE_FID_PORT_MASK 0x3
-#define PXP_CONCRETE_FID_PORT_SHIFT 4
-#define PXP_CONCRETE_FID_PATH_MASK 0x1
-#define PXP_CONCRETE_FID_PATH_SHIFT 6
-#define PXP_CONCRETE_FID_VFVALID_MASK 0x1
-#define PXP_CONCRETE_FID_VFVALID_SHIFT 7
-#define PXP_CONCRETE_FID_VFID_MASK 0xFF
-#define PXP_CONCRETE_FID_VFID_SHIFT 8
+#define PXP_CONCRETE_FID_PFID_MASK 0xF
+#define PXP_CONCRETE_FID_PFID_SHIFT 0
+#define PXP_CONCRETE_FID_PORT_MASK 0x3
+#define PXP_CONCRETE_FID_PORT_SHIFT 4
+#define PXP_CONCRETE_FID_PATH_MASK 0x1
+#define PXP_CONCRETE_FID_PATH_SHIFT 6
+#define PXP_CONCRETE_FID_VFVALID_MASK 0x1
+#define PXP_CONCRETE_FID_VFVALID_SHIFT 7
+#define PXP_CONCRETE_FID_VFID_MASK 0xFF
+#define PXP_CONCRETE_FID_VFID_SHIFT 8
};
+/* Concrete Function ID */
struct pxp_pretend_concrete_fid {
__le16 fid;
-#define PXP_PRETEND_CONCRETE_FID_PFID_MASK 0xF
-#define PXP_PRETEND_CONCRETE_FID_PFID_SHIFT 0
-#define PXP_PRETEND_CONCRETE_FID_RESERVED_MASK 0x7
-#define PXP_PRETEND_CONCRETE_FID_RESERVED_SHIFT 4
-#define PXP_PRETEND_CONCRETE_FID_VFVALID_MASK 0x1
-#define PXP_PRETEND_CONCRETE_FID_VFVALID_SHIFT 7
-#define PXP_PRETEND_CONCRETE_FID_VFID_MASK 0xFF
-#define PXP_PRETEND_CONCRETE_FID_VFID_SHIFT 8
+#define PXP_PRETEND_CONCRETE_FID_PFID_MASK 0xF
+#define PXP_PRETEND_CONCRETE_FID_PFID_SHIFT 0
+#define PXP_PRETEND_CONCRETE_FID_RESERVED_MASK 0x7
+#define PXP_PRETEND_CONCRETE_FID_RESERVED_SHIFT 4
+#define PXP_PRETEND_CONCRETE_FID_VFVALID_MASK 0x1
+#define PXP_PRETEND_CONCRETE_FID_VFVALID_SHIFT 7
+#define PXP_PRETEND_CONCRETE_FID_VFID_MASK 0xFF
+#define PXP_PRETEND_CONCRETE_FID_VFID_SHIFT 8
};
+/* Function ID */
union pxp_pretend_fid {
struct pxp_pretend_concrete_fid concrete_fid;
- __le16 opaque_fid;
+ __le16 opaque_fid;
};
-/* Pxp Pretend Command Register. */
+/* Pxp Pretend Command Register */
struct pxp_pretend_cmd {
- union pxp_pretend_fid fid;
- __le16 control;
-#define PXP_PRETEND_CMD_PATH_MASK 0x1
-#define PXP_PRETEND_CMD_PATH_SHIFT 0
-#define PXP_PRETEND_CMD_USE_PORT_MASK 0x1
-#define PXP_PRETEND_CMD_USE_PORT_SHIFT 1
-#define PXP_PRETEND_CMD_PORT_MASK 0x3
-#define PXP_PRETEND_CMD_PORT_SHIFT 2
-#define PXP_PRETEND_CMD_RESERVED0_MASK 0xF
-#define PXP_PRETEND_CMD_RESERVED0_SHIFT 4
-#define PXP_PRETEND_CMD_RESERVED1_MASK 0xF
-#define PXP_PRETEND_CMD_RESERVED1_SHIFT 8
-#define PXP_PRETEND_CMD_PRETEND_PATH_MASK 0x1
-#define PXP_PRETEND_CMD_PRETEND_PATH_SHIFT 12
-#define PXP_PRETEND_CMD_PRETEND_PORT_MASK 0x1
-#define PXP_PRETEND_CMD_PRETEND_PORT_SHIFT 13
-#define PXP_PRETEND_CMD_PRETEND_FUNCTION_MASK 0x1
-#define PXP_PRETEND_CMD_PRETEND_FUNCTION_SHIFT 14
-#define PXP_PRETEND_CMD_IS_CONCRETE_MASK 0x1
-#define PXP_PRETEND_CMD_IS_CONCRETE_SHIFT 15
+ union pxp_pretend_fid fid;
+ __le16 control;
+#define PXP_PRETEND_CMD_PATH_MASK 0x1
+#define PXP_PRETEND_CMD_PATH_SHIFT 0
+#define PXP_PRETEND_CMD_USE_PORT_MASK 0x1
+#define PXP_PRETEND_CMD_USE_PORT_SHIFT 1
+#define PXP_PRETEND_CMD_PORT_MASK 0x3
+#define PXP_PRETEND_CMD_PORT_SHIFT 2
+#define PXP_PRETEND_CMD_RESERVED0_MASK 0xF
+#define PXP_PRETEND_CMD_RESERVED0_SHIFT 4
+#define PXP_PRETEND_CMD_RESERVED1_MASK 0xF
+#define PXP_PRETEND_CMD_RESERVED1_SHIFT 8
+#define PXP_PRETEND_CMD_PRETEND_PATH_MASK 0x1
+#define PXP_PRETEND_CMD_PRETEND_PATH_SHIFT 12
+#define PXP_PRETEND_CMD_PRETEND_PORT_MASK 0x1
+#define PXP_PRETEND_CMD_PRETEND_PORT_SHIFT 13
+#define PXP_PRETEND_CMD_PRETEND_FUNCTION_MASK 0x1
+#define PXP_PRETEND_CMD_PRETEND_FUNCTION_SHIFT 14
+#define PXP_PRETEND_CMD_IS_CONCRETE_MASK 0x1
+#define PXP_PRETEND_CMD_IS_CONCRETE_SHIFT 15
};
-/* PTT Record in PXP Admin Window. */
+/* PTT Record in PXP Admin Window */
struct pxp_ptt_entry {
- __le32 offset;
-#define PXP_PTT_ENTRY_OFFSET_MASK 0x7FFFFF
-#define PXP_PTT_ENTRY_OFFSET_SHIFT 0
-#define PXP_PTT_ENTRY_RESERVED0_MASK 0x1FF
-#define PXP_PTT_ENTRY_RESERVED0_SHIFT 23
- struct pxp_pretend_cmd pretend;
+ __le32 offset;
+#define PXP_PTT_ENTRY_OFFSET_MASK 0x7FFFFF
+#define PXP_PTT_ENTRY_OFFSET_SHIFT 0
+#define PXP_PTT_ENTRY_RESERVED0_MASK 0x1FF
+#define PXP_PTT_ENTRY_RESERVED0_SHIFT 23
+ struct pxp_pretend_cmd pretend;
};
-/* VF Zone A Permission Register. */
+/* VF Zone A Permission Register */
struct pxp_vf_zone_a_permission {
__le32 control;
-#define PXP_VF_ZONE_A_PERMISSION_VFID_MASK 0xFF
-#define PXP_VF_ZONE_A_PERMISSION_VFID_SHIFT 0
-#define PXP_VF_ZONE_A_PERMISSION_VALID_MASK 0x1
-#define PXP_VF_ZONE_A_PERMISSION_VALID_SHIFT 8
-#define PXP_VF_ZONE_A_PERMISSION_RESERVED0_MASK 0x7F
-#define PXP_VF_ZONE_A_PERMISSION_RESERVED0_SHIFT 9
-#define PXP_VF_ZONE_A_PERMISSION_RESERVED1_MASK 0xFFFF
-#define PXP_VF_ZONE_A_PERMISSION_RESERVED1_SHIFT 16
+#define PXP_VF_ZONE_A_PERMISSION_VFID_MASK 0xFF
+#define PXP_VF_ZONE_A_PERMISSION_VFID_SHIFT 0
+#define PXP_VF_ZONE_A_PERMISSION_VALID_MASK 0x1
+#define PXP_VF_ZONE_A_PERMISSION_VALID_SHIFT 8
+#define PXP_VF_ZONE_A_PERMISSION_RESERVED0_MASK 0x7F
+#define PXP_VF_ZONE_A_PERMISSION_RESERVED0_SHIFT 9
+#define PXP_VF_ZONE_A_PERMISSION_RESERVED1_MASK 0xFFFF
+#define PXP_VF_ZONE_A_PERMISSION_RESERVED1_SHIFT 16
};
-/* RSS hash type */
+/* Rdif context */
struct rdif_task_context {
__le32 initial_ref_tag;
__le16 app_tag_value;
__le16 app_tag_mask;
u8 flags0;
-#define RDIF_TASK_CONTEXT_IGNOREAPPTAG_MASK 0x1
-#define RDIF_TASK_CONTEXT_IGNOREAPPTAG_SHIFT 0
-#define RDIF_TASK_CONTEXT_INITIALREFTAGVALID_MASK 0x1
-#define RDIF_TASK_CONTEXT_INITIALREFTAGVALID_SHIFT 1
-#define RDIF_TASK_CONTEXT_HOSTGUARDTYPE_MASK 0x1
-#define RDIF_TASK_CONTEXT_HOSTGUARDTYPE_SHIFT 2
-#define RDIF_TASK_CONTEXT_SETERRORWITHEOP_MASK 0x1
-#define RDIF_TASK_CONTEXT_SETERRORWITHEOP_SHIFT 3
-#define RDIF_TASK_CONTEXT_PROTECTIONTYPE_MASK 0x3
-#define RDIF_TASK_CONTEXT_PROTECTIONTYPE_SHIFT 4
-#define RDIF_TASK_CONTEXT_CRC_SEED_MASK 0x1
-#define RDIF_TASK_CONTEXT_CRC_SEED_SHIFT 6
-#define RDIF_TASK_CONTEXT_KEEPREFTAGCONST_MASK 0x1
-#define RDIF_TASK_CONTEXT_KEEPREFTAGCONST_SHIFT 7
+#define RDIF_TASK_CONTEXT_IGNORE_APP_TAG_MASK 0x1
+#define RDIF_TASK_CONTEXT_IGNORE_APP_TAG_SHIFT 0
+#define RDIF_TASK_CONTEXT_INITIAL_REF_TAG_VALID_MASK 0x1
+#define RDIF_TASK_CONTEXT_INITIAL_REF_TAG_VALID_SHIFT 1
+#define RDIF_TASK_CONTEXT_HOST_GUARD_TYPE_MASK 0x1
+#define RDIF_TASK_CONTEXT_HOST_GUARD_TYPE_SHIFT 2
+#define RDIF_TASK_CONTEXT_SET_ERROR_WITH_EOP_MASK 0x1
+#define RDIF_TASK_CONTEXT_SET_ERROR_WITH_EOP_SHIFT 3
+#define RDIF_TASK_CONTEXT_PROTECTION_TYPE_MASK 0x3
+#define RDIF_TASK_CONTEXT_PROTECTION_TYPE_SHIFT 4
+#define RDIF_TASK_CONTEXT_CRC_SEED_MASK 0x1
+#define RDIF_TASK_CONTEXT_CRC_SEED_SHIFT 6
+#define RDIF_TASK_CONTEXT_KEEP_REF_TAG_CONST_MASK 0x1
+#define RDIF_TASK_CONTEXT_KEEP_REF_TAG_CONST_SHIFT 7
u8 partial_dif_data[7];
__le16 partial_crc_value;
__le16 partial_checksum_value;
__le32 offset_in_io;
__le16 flags1;
-#define RDIF_TASK_CONTEXT_VALIDATEGUARD_MASK 0x1
-#define RDIF_TASK_CONTEXT_VALIDATEGUARD_SHIFT 0
-#define RDIF_TASK_CONTEXT_VALIDATEAPPTAG_MASK 0x1
-#define RDIF_TASK_CONTEXT_VALIDATEAPPTAG_SHIFT 1
-#define RDIF_TASK_CONTEXT_VALIDATEREFTAG_MASK 0x1
-#define RDIF_TASK_CONTEXT_VALIDATEREFTAG_SHIFT 2
-#define RDIF_TASK_CONTEXT_FORWARDGUARD_MASK 0x1
-#define RDIF_TASK_CONTEXT_FORWARDGUARD_SHIFT 3
-#define RDIF_TASK_CONTEXT_FORWARDAPPTAG_MASK 0x1
-#define RDIF_TASK_CONTEXT_FORWARDAPPTAG_SHIFT 4
-#define RDIF_TASK_CONTEXT_FORWARDREFTAG_MASK 0x1
-#define RDIF_TASK_CONTEXT_FORWARDREFTAG_SHIFT 5
-#define RDIF_TASK_CONTEXT_INTERVALSIZE_MASK 0x7
-#define RDIF_TASK_CONTEXT_INTERVALSIZE_SHIFT 6
-#define RDIF_TASK_CONTEXT_HOSTINTERFACE_MASK 0x3
-#define RDIF_TASK_CONTEXT_HOSTINTERFACE_SHIFT 9
-#define RDIF_TASK_CONTEXT_DIFBEFOREDATA_MASK 0x1
-#define RDIF_TASK_CONTEXT_DIFBEFOREDATA_SHIFT 11
-#define RDIF_TASK_CONTEXT_RESERVED0_MASK 0x1
-#define RDIF_TASK_CONTEXT_RESERVED0_SHIFT 12
-#define RDIF_TASK_CONTEXT_NETWORKINTERFACE_MASK 0x1
-#define RDIF_TASK_CONTEXT_NETWORKINTERFACE_SHIFT 13
-#define RDIF_TASK_CONTEXT_FORWARDAPPTAGWITHMASK_MASK 0x1
-#define RDIF_TASK_CONTEXT_FORWARDAPPTAGWITHMASK_SHIFT 14
-#define RDIF_TASK_CONTEXT_FORWARDREFTAGWITHMASK_MASK 0x1
-#define RDIF_TASK_CONTEXT_FORWARDREFTAGWITHMASK_SHIFT 15
+#define RDIF_TASK_CONTEXT_VALIDATE_GUARD_MASK 0x1
+#define RDIF_TASK_CONTEXT_VALIDATE_GUARD_SHIFT 0
+#define RDIF_TASK_CONTEXT_VALIDATE_APP_TAG_MASK 0x1
+#define RDIF_TASK_CONTEXT_VALIDATE_APP_TAG_SHIFT 1
+#define RDIF_TASK_CONTEXT_VALIDATE_REF_TAG_MASK 0x1
+#define RDIF_TASK_CONTEXT_VALIDATE_REF_TAG_SHIFT 2
+#define RDIF_TASK_CONTEXT_FORWARD_GUARD_MASK 0x1
+#define RDIF_TASK_CONTEXT_FORWARD_GUARD_SHIFT 3
+#define RDIF_TASK_CONTEXT_FORWARD_APP_TAG_MASK 0x1
+#define RDIF_TASK_CONTEXT_FORWARD_APP_TAG_SHIFT 4
+#define RDIF_TASK_CONTEXT_FORWARD_REF_TAG_MASK 0x1
+#define RDIF_TASK_CONTEXT_FORWARD_REF_TAG_SHIFT 5
+#define RDIF_TASK_CONTEXT_INTERVAL_SIZE_MASK 0x7
+#define RDIF_TASK_CONTEXT_INTERVAL_SIZE_SHIFT 6
+#define RDIF_TASK_CONTEXT_HOST_INTERFACE_MASK 0x3
+#define RDIF_TASK_CONTEXT_HOST_INTERFACE_SHIFT 9
+#define RDIF_TASK_CONTEXT_DIF_BEFORE_DATA_MASK 0x1
+#define RDIF_TASK_CONTEXT_DIF_BEFORE_DATA_SHIFT 11
+#define RDIF_TASK_CONTEXT_RESERVED0_MASK 0x1
+#define RDIF_TASK_CONTEXT_RESERVED0_SHIFT 12
+#define RDIF_TASK_CONTEXT_NETWORK_INTERFACE_MASK 0x1
+#define RDIF_TASK_CONTEXT_NETWORK_INTERFACE_SHIFT 13
+#define RDIF_TASK_CONTEXT_FORWARD_APP_TAG_WITH_MASK_MASK 0x1
+#define RDIF_TASK_CONTEXT_FORWARD_APP_TAG_WITH_MASK_SHIFT 14
+#define RDIF_TASK_CONTEXT_FORWARD_REF_TAG_WITH_MASK_MASK 0x1
+#define RDIF_TASK_CONTEXT_FORWARD_REF_TAG_WITH_MASK_SHIFT 15
__le16 state;
-#define RDIF_TASK_CONTEXT_RECEIVEDDIFBYTESLEFT_MASK 0xF
-#define RDIF_TASK_CONTEXT_RECEIVEDDIFBYTESLEFT_SHIFT 0
-#define RDIF_TASK_CONTEXT_TRANSMITEDDIFBYTESLEFT_MASK 0xF
-#define RDIF_TASK_CONTEXT_TRANSMITEDDIFBYTESLEFT_SHIFT 4
-#define RDIF_TASK_CONTEXT_ERRORINIO_MASK 0x1
-#define RDIF_TASK_CONTEXT_ERRORINIO_SHIFT 8
-#define RDIF_TASK_CONTEXT_CHECKSUMOVERFLOW_MASK 0x1
-#define RDIF_TASK_CONTEXT_CHECKSUMOVERFLOW_SHIFT 9
-#define RDIF_TASK_CONTEXT_REFTAGMASK_MASK 0xF
-#define RDIF_TASK_CONTEXT_REFTAGMASK_SHIFT 10
-#define RDIF_TASK_CONTEXT_RESERVED1_MASK 0x3
-#define RDIF_TASK_CONTEXT_RESERVED1_SHIFT 14
+#define RDIF_TASK_CONTEXT_RECEIVED_DIF_BYTES_LEFT_MASK 0xF
+#define RDIF_TASK_CONTEXT_RECEIVED_DIF_BYTES_LEFT_SHIFT 0
+#define RDIF_TASK_CONTEXT_TRANSMITED_DIF_BYTES_LEFT_MASK 0xF
+#define RDIF_TASK_CONTEXT_TRANSMITED_DIF_BYTES_LEFT_SHIFT 4
+#define RDIF_TASK_CONTEXT_ERROR_IN_IO_MASK 0x1
+#define RDIF_TASK_CONTEXT_ERROR_IN_IO_SHIFT 8
+#define RDIF_TASK_CONTEXT_CHECKSUM_OVERFLOW_MASK 0x1
+#define RDIF_TASK_CONTEXT_CHECKSUM_OVERFLOW_SHIFT 9
+#define RDIF_TASK_CONTEXT_REF_TAG_MASK_MASK 0xF
+#define RDIF_TASK_CONTEXT_REF_TAG_MASK_SHIFT 10
+#define RDIF_TASK_CONTEXT_RESERVED1_MASK 0x3
+#define RDIF_TASK_CONTEXT_RESERVED1_SHIFT 14
__le32 reserved2;
};
-/* RSS hash type */
-enum rss_hash_type {
- RSS_HASH_TYPE_DEFAULT = 0,
- RSS_HASH_TYPE_IPV4 = 1,
- RSS_HASH_TYPE_TCP_IPV4 = 2,
- RSS_HASH_TYPE_IPV6 = 3,
- RSS_HASH_TYPE_TCP_IPV6 = 4,
- RSS_HASH_TYPE_UDP_IPV4 = 5,
- RSS_HASH_TYPE_UDP_IPV6 = 6,
- MAX_RSS_HASH_TYPE
-};
-
-/* status block structure */
-struct status_block {
- __le16 pi_array[PIS_PER_SB];
+/* Status block structure */
+struct status_block_e4 {
+ __le16 pi_array[PIS_PER_SB_E4];
__le32 sb_num;
-#define STATUS_BLOCK_SB_NUM_MASK 0x1FF
-#define STATUS_BLOCK_SB_NUM_SHIFT 0
-#define STATUS_BLOCK_ZERO_PAD_MASK 0x7F
-#define STATUS_BLOCK_ZERO_PAD_SHIFT 9
-#define STATUS_BLOCK_ZERO_PAD2_MASK 0xFFFF
-#define STATUS_BLOCK_ZERO_PAD2_SHIFT 16
+#define STATUS_BLOCK_E4_SB_NUM_MASK 0x1FF
+#define STATUS_BLOCK_E4_SB_NUM_SHIFT 0
+#define STATUS_BLOCK_E4_ZERO_PAD_MASK 0x7F
+#define STATUS_BLOCK_E4_ZERO_PAD_SHIFT 9
+#define STATUS_BLOCK_E4_ZERO_PAD2_MASK 0xFFFF
+#define STATUS_BLOCK_E4_ZERO_PAD2_SHIFT 16
__le32 prod_index;
-#define STATUS_BLOCK_PROD_INDEX_MASK 0xFFFFFF
-#define STATUS_BLOCK_PROD_INDEX_SHIFT 0
-#define STATUS_BLOCK_ZERO_PAD3_MASK 0xFF
-#define STATUS_BLOCK_ZERO_PAD3_SHIFT 24
+#define STATUS_BLOCK_E4_PROD_INDEX_MASK 0xFFFFFF
+#define STATUS_BLOCK_E4_PROD_INDEX_SHIFT 0
+#define STATUS_BLOCK_E4_ZERO_PAD3_MASK 0xFF
+#define STATUS_BLOCK_E4_ZERO_PAD3_SHIFT 24
};
+/* Tdif context */
struct tdif_task_context {
__le32 initial_ref_tag;
__le16 app_tag_value;
__le16 app_tag_mask;
- __le16 partial_crc_valueB;
- __le16 partial_checksum_valueB;
+ __le16 partial_crc_value_b;
+ __le16 partial_checksum_value_b;
__le16 stateB;
-#define TDIF_TASK_CONTEXT_RECEIVEDDIFBYTESLEFTB_MASK 0xF
-#define TDIF_TASK_CONTEXT_RECEIVEDDIFBYTESLEFTB_SHIFT 0
-#define TDIF_TASK_CONTEXT_TRANSMITEDDIFBYTESLEFTB_MASK 0xF
-#define TDIF_TASK_CONTEXT_TRANSMITEDDIFBYTESLEFTB_SHIFT 4
-#define TDIF_TASK_CONTEXT_ERRORINIOB_MASK 0x1
-#define TDIF_TASK_CONTEXT_ERRORINIOB_SHIFT 8
-#define TDIF_TASK_CONTEXT_CHECKSUMOVERFLOW_MASK 0x1
-#define TDIF_TASK_CONTEXT_CHECKSUMOVERFLOW_SHIFT 9
-#define TDIF_TASK_CONTEXT_RESERVED0_MASK 0x3F
-#define TDIF_TASK_CONTEXT_RESERVED0_SHIFT 10
+#define TDIF_TASK_CONTEXT_RECEIVED_DIF_BYTES_LEFT_B_MASK 0xF
+#define TDIF_TASK_CONTEXT_RECEIVED_DIF_BYTES_LEFT_B_SHIFT 0
+#define TDIF_TASK_CONTEXT_TRANSMITED_DIF_BYTES_LEFT_B_MASK 0xF
+#define TDIF_TASK_CONTEXT_TRANSMITED_DIF_BYTES_LEFT_B_SHIFT 4
+#define TDIF_TASK_CONTEXT_ERROR_IN_IO_B_MASK 0x1
+#define TDIF_TASK_CONTEXT_ERROR_IN_IO_B_SHIFT 8
+#define TDIF_TASK_CONTEXT_CHECKSUM_VERFLOW_MASK 0x1
+#define TDIF_TASK_CONTEXT_CHECKSUM_VERFLOW_SHIFT 9
+#define TDIF_TASK_CONTEXT_RESERVED0_MASK 0x3F
+#define TDIF_TASK_CONTEXT_RESERVED0_SHIFT 10
u8 reserved1;
u8 flags0;
-#define TDIF_TASK_CONTEXT_IGNOREAPPTAG_MASK 0x1
-#define TDIF_TASK_CONTEXT_IGNOREAPPTAG_SHIFT 0
-#define TDIF_TASK_CONTEXT_INITIALREFTAGVALID_MASK 0x1
-#define TDIF_TASK_CONTEXT_INITIALREFTAGVALID_SHIFT 1
-#define TDIF_TASK_CONTEXT_HOSTGUARDTYPE_MASK 0x1
-#define TDIF_TASK_CONTEXT_HOSTGUARDTYPE_SHIFT 2
-#define TDIF_TASK_CONTEXT_SETERRORWITHEOP_MASK 0x1
-#define TDIF_TASK_CONTEXT_SETERRORWITHEOP_SHIFT 3
-#define TDIF_TASK_CONTEXT_PROTECTIONTYPE_MASK 0x3
-#define TDIF_TASK_CONTEXT_PROTECTIONTYPE_SHIFT 4
-#define TDIF_TASK_CONTEXT_CRC_SEED_MASK 0x1
-#define TDIF_TASK_CONTEXT_CRC_SEED_SHIFT 6
-#define TDIF_TASK_CONTEXT_RESERVED2_MASK 0x1
-#define TDIF_TASK_CONTEXT_RESERVED2_SHIFT 7
+#define TDIF_TASK_CONTEXT_IGNORE_APP_TAG_MASK 0x1
+#define TDIF_TASK_CONTEXT_IGNORE_APP_TAG_SHIFT 0
+#define TDIF_TASK_CONTEXT_INITIAL_REF_TAG_VALID_MASK 0x1
+#define TDIF_TASK_CONTEXT_INITIAL_REF_TAG_VALID_SHIFT 1
+#define TDIF_TASK_CONTEXT_HOST_GUARD_TYPE_MASK 0x1
+#define TDIF_TASK_CONTEXT_HOST_GUARD_TYPE_SHIFT 2
+#define TDIF_TASK_CONTEXT_SET_ERROR_WITH_EOP_MASK 0x1
+#define TDIF_TASK_CONTEXT_SET_ERROR_WITH_EOP_SHIFT 3
+#define TDIF_TASK_CONTEXT_PROTECTION_TYPE_MASK 0x3
+#define TDIF_TASK_CONTEXT_PROTECTION_TYPE_SHIFT 4
+#define TDIF_TASK_CONTEXT_CRC_SEED_MASK 0x1
+#define TDIF_TASK_CONTEXT_CRC_SEED_SHIFT 6
+#define TDIF_TASK_CONTEXT_RESERVED2_MASK 0x1
+#define TDIF_TASK_CONTEXT_RESERVED2_SHIFT 7
__le32 flags1;
-#define TDIF_TASK_CONTEXT_VALIDATEGUARD_MASK 0x1
-#define TDIF_TASK_CONTEXT_VALIDATEGUARD_SHIFT 0
-#define TDIF_TASK_CONTEXT_VALIDATEAPPTAG_MASK 0x1
-#define TDIF_TASK_CONTEXT_VALIDATEAPPTAG_SHIFT 1
-#define TDIF_TASK_CONTEXT_VALIDATEREFTAG_MASK 0x1
-#define TDIF_TASK_CONTEXT_VALIDATEREFTAG_SHIFT 2
-#define TDIF_TASK_CONTEXT_FORWARDGUARD_MASK 0x1
-#define TDIF_TASK_CONTEXT_FORWARDGUARD_SHIFT 3
-#define TDIF_TASK_CONTEXT_FORWARDAPPTAG_MASK 0x1
-#define TDIF_TASK_CONTEXT_FORWARDAPPTAG_SHIFT 4
-#define TDIF_TASK_CONTEXT_FORWARDREFTAG_MASK 0x1
-#define TDIF_TASK_CONTEXT_FORWARDREFTAG_SHIFT 5
-#define TDIF_TASK_CONTEXT_INTERVALSIZE_MASK 0x7
-#define TDIF_TASK_CONTEXT_INTERVALSIZE_SHIFT 6
-#define TDIF_TASK_CONTEXT_HOSTINTERFACE_MASK 0x3
-#define TDIF_TASK_CONTEXT_HOSTINTERFACE_SHIFT 9
-#define TDIF_TASK_CONTEXT_DIFBEFOREDATA_MASK 0x1
-#define TDIF_TASK_CONTEXT_DIFBEFOREDATA_SHIFT 11
-#define TDIF_TASK_CONTEXT_RESERVED3_MASK 0x1
-#define TDIF_TASK_CONTEXT_RESERVED3_SHIFT 12
-#define TDIF_TASK_CONTEXT_NETWORKINTERFACE_MASK 0x1
-#define TDIF_TASK_CONTEXT_NETWORKINTERFACE_SHIFT 13
-#define TDIF_TASK_CONTEXT_RECEIVEDDIFBYTESLEFTA_MASK 0xF
-#define TDIF_TASK_CONTEXT_RECEIVEDDIFBYTESLEFTA_SHIFT 14
-#define TDIF_TASK_CONTEXT_TRANSMITEDDIFBYTESLEFTA_MASK 0xF
-#define TDIF_TASK_CONTEXT_TRANSMITEDDIFBYTESLEFTA_SHIFT 18
-#define TDIF_TASK_CONTEXT_ERRORINIOA_MASK 0x1
-#define TDIF_TASK_CONTEXT_ERRORINIOA_SHIFT 22
-#define TDIF_TASK_CONTEXT_CHECKSUMOVERFLOWA_MASK 0x1
-#define TDIF_TASK_CONTEXT_CHECKSUMOVERFLOWA_SHIFT 23
-#define TDIF_TASK_CONTEXT_REFTAGMASK_MASK 0xF
-#define TDIF_TASK_CONTEXT_REFTAGMASK_SHIFT 24
-#define TDIF_TASK_CONTEXT_FORWARDAPPTAGWITHMASK_MASK 0x1
-#define TDIF_TASK_CONTEXT_FORWARDAPPTAGWITHMASK_SHIFT 28
-#define TDIF_TASK_CONTEXT_FORWARDREFTAGWITHMASK_MASK 0x1
-#define TDIF_TASK_CONTEXT_FORWARDREFTAGWITHMASK_SHIFT 29
-#define TDIF_TASK_CONTEXT_KEEPREFTAGCONST_MASK 0x1
-#define TDIF_TASK_CONTEXT_KEEPREFTAGCONST_SHIFT 30
-#define TDIF_TASK_CONTEXT_RESERVED4_MASK 0x1
-#define TDIF_TASK_CONTEXT_RESERVED4_SHIFT 31
- __le32 offset_in_iob;
+#define TDIF_TASK_CONTEXT_VALIDATE_GUARD_MASK 0x1
+#define TDIF_TASK_CONTEXT_VALIDATE_GUARD_SHIFT 0
+#define TDIF_TASK_CONTEXT_VALIDATE_APP_TAG_MASK 0x1
+#define TDIF_TASK_CONTEXT_VALIDATE_APP_TAG_SHIFT 1
+#define TDIF_TASK_CONTEXT_VALIDATE_REF_TAG_MASK 0x1
+#define TDIF_TASK_CONTEXT_VALIDATE_REF_TAG_SHIFT 2
+#define TDIF_TASK_CONTEXT_FORWARD_GUARD_MASK 0x1
+#define TDIF_TASK_CONTEXT_FORWARD_GUARD_SHIFT 3
+#define TDIF_TASK_CONTEXT_FORWARD_APP_TAG_MASK 0x1
+#define TDIF_TASK_CONTEXT_FORWARD_APP_TAG_SHIFT 4
+#define TDIF_TASK_CONTEXT_FORWARD_REF_TAG_MASK 0x1
+#define TDIF_TASK_CONTEXT_FORWARD_REF_TAG_SHIFT 5
+#define TDIF_TASK_CONTEXT_INTERVAL_SIZE_MASK 0x7
+#define TDIF_TASK_CONTEXT_INTERVAL_SIZE_SHIFT 6
+#define TDIF_TASK_CONTEXT_HOST_INTERFACE_MASK 0x3
+#define TDIF_TASK_CONTEXT_HOST_INTERFACE_SHIFT 9
+#define TDIF_TASK_CONTEXT_DIF_BEFORE_DATA_MASK 0x1
+#define TDIF_TASK_CONTEXT_DIF_BEFORE_DATA_SHIFT 11
+#define TDIF_TASK_CONTEXT_RESERVED3_MASK 0x1
+#define TDIF_TASK_CONTEXT_RESERVED3_SHIFT 12
+#define TDIF_TASK_CONTEXT_NETWORK_INTERFACE_MASK 0x1
+#define TDIF_TASK_CONTEXT_NETWORK_INTERFACE_SHIFT 13
+#define TDIF_TASK_CONTEXT_RECEIVED_DIF_BYTES_LEFT_A_MASK 0xF
+#define TDIF_TASK_CONTEXT_RECEIVED_DIF_BYTES_LEFT_A_SHIFT 14
+#define TDIF_TASK_CONTEXT_TRANSMITED_DIF_BYTES_LEFT_A_MASK 0xF
+#define TDIF_TASK_CONTEXT_TRANSMITED_DIF_BYTES_LEFT_A_SHIFT 18
+#define TDIF_TASK_CONTEXT_ERROR_IN_IO_A_MASK 0x1
+#define TDIF_TASK_CONTEXT_ERROR_IN_IO_A_SHIFT 22
+#define TDIF_TASK_CONTEXT_CHECKSUM_OVERFLOW_A_MASK 0x1
+#define TDIF_TASK_CONTEXT_CHECKSUM_OVERFLOW_A_SHIFT 23
+#define TDIF_TASK_CONTEXT_REF_TAG_MASK_MASK 0xF
+#define TDIF_TASK_CONTEXT_REF_TAG_MASK_SHIFT 24
+#define TDIF_TASK_CONTEXT_FORWARD_APP_TAG_WITH_MASK_MASK 0x1
+#define TDIF_TASK_CONTEXT_FORWARD_APP_TAG_WITH_MASK_SHIFT 28
+#define TDIF_TASK_CONTEXT_FORWARD_REF_TAG_WITH_MASK_MASK 0x1
+#define TDIF_TASK_CONTEXT_FORWARD_REF_TAG_WITH_MASK_SHIFT 29
+#define TDIF_TASK_CONTEXT_KEEP_REF_TAG_CONST_MASK 0x1
+#define TDIF_TASK_CONTEXT_KEEP_REF_TAG_CONST_SHIFT 30
+#define TDIF_TASK_CONTEXT_RESERVED4_MASK 0x1
+#define TDIF_TASK_CONTEXT_RESERVED4_SHIFT 31
+ __le32 offset_in_io_b;
__le16 partial_crc_value_a;
- __le16 partial_checksum_valuea_;
- __le32 offset_in_ioa;
+ __le16 partial_checksum_value_a;
+ __le32 offset_in_io_a;
u8 partial_dif_data_a[8];
u8 partial_dif_data_b[8];
};
+/* Timers context */
struct timers_context {
__le32 logical_client_0;
#define TIMERS_CONTEXT_EXPIRATIONTIMELC0_MASK 0x7FFFFFF
@@ -1385,6 +1378,7 @@ struct timers_context {
#define TIMERS_CONTEXT_RESERVED7_SHIFT 29
};
+/* Enum for next_protocol field of tunnel_parsing_flags / tunnelTypeDesc */
enum tunnel_next_protocol {
e_unknown = 0,
e_l2 = 1,
diff --git a/include/linux/qed/eth_common.h b/include/linux/qed/eth_common.h
index cb06e6e368e1..d9416ad5ef59 100644
--- a/include/linux/qed/eth_common.h
+++ b/include/linux/qed/eth_common.h
@@ -36,150 +36,168 @@
/********************/
/* ETH FW CONSTANTS */
/********************/
-#define ETH_HSI_VER_MAJOR 3
-#define ETH_HSI_VER_MINOR 10
+
+#define ETH_HSI_VER_MAJOR 3
+#define ETH_HSI_VER_MINOR 10
#define ETH_HSI_VER_NO_PKT_LEN_TUNN 5
-#define ETH_CACHE_LINE_SIZE 64
-#define ETH_RX_CQE_GAP 32
-#define ETH_MAX_RAMROD_PER_CON 8
-#define ETH_TX_BD_PAGE_SIZE_BYTES 4096
-#define ETH_RX_BD_PAGE_SIZE_BYTES 4096
-#define ETH_RX_CQE_PAGE_SIZE_BYTES 4096
-#define ETH_RX_NUM_NEXT_PAGE_BDS 2
-
-#define ETH_MAX_TUNN_LSO_INNER_IPV4_OFFSET 253
-#define ETH_MAX_TUNN_LSO_INNER_IPV6_OFFSET 251
-
-#define ETH_TX_MIN_BDS_PER_NON_LSO_PKT 1
-#define ETH_TX_MAX_BDS_PER_NON_LSO_PACKET 18
-#define ETH_TX_MAX_BDS_PER_LSO_PACKET 255
-#define ETH_TX_MAX_LSO_HDR_NBD 4
-#define ETH_TX_MIN_BDS_PER_LSO_PKT 3
-#define ETH_TX_MIN_BDS_PER_TUNN_IPV6_WITH_EXT_PKT 3
-#define ETH_TX_MIN_BDS_PER_IPV6_WITH_EXT_PKT 2
-#define ETH_TX_MIN_BDS_PER_PKT_W_LOOPBACK_MODE 2
-#define ETH_TX_MAX_NON_LSO_PKT_LEN (9700 - (4 + 4 + 12 + 8))
-#define ETH_TX_MAX_LSO_HDR_BYTES 510
-#define ETH_TX_LSO_WINDOW_BDS_NUM (18 - 1)
-#define ETH_TX_LSO_WINDOW_MIN_LEN 9700
-#define ETH_TX_MAX_LSO_PAYLOAD_LEN 0xFE000
-#define ETH_TX_NUM_SAME_AS_LAST_ENTRIES 320
-#define ETH_TX_INACTIVE_SAME_AS_LAST 0xFFFF
-
-#define ETH_NUM_STATISTIC_COUNTERS MAX_NUM_VPORTS
+#define ETH_CACHE_LINE_SIZE 64
+#define ETH_RX_CQE_GAP 32
+#define ETH_MAX_RAMROD_PER_CON 8
+#define ETH_TX_BD_PAGE_SIZE_BYTES 4096
+#define ETH_RX_BD_PAGE_SIZE_BYTES 4096
+#define ETH_RX_CQE_PAGE_SIZE_BYTES 4096
+#define ETH_RX_NUM_NEXT_PAGE_BDS 2
+
+#define ETH_MAX_TUNN_LSO_INNER_IPV4_OFFSET 253
+#define ETH_MAX_TUNN_LSO_INNER_IPV6_OFFSET 251
+
+#define ETH_TX_MIN_BDS_PER_NON_LSO_PKT 1
+#define ETH_TX_MAX_BDS_PER_NON_LSO_PACKET 18
+#define ETH_TX_MAX_BDS_PER_LSO_PACKET 255
+#define ETH_TX_MAX_LSO_HDR_NBD 4
+#define ETH_TX_MIN_BDS_PER_LSO_PKT 3
+#define ETH_TX_MIN_BDS_PER_TUNN_IPV6_WITH_EXT_PKT 3
+#define ETH_TX_MIN_BDS_PER_IPV6_WITH_EXT_PKT 2
+#define ETH_TX_MIN_BDS_PER_PKT_W_LOOPBACK_MODE 2
+#define ETH_TX_MAX_NON_LSO_PKT_LEN (9700 - (4 + 4 + 12 + 8))
+#define ETH_TX_MAX_LSO_HDR_BYTES 510
+#define ETH_TX_LSO_WINDOW_BDS_NUM (18 - 1)
+#define ETH_TX_LSO_WINDOW_MIN_LEN 9700
+#define ETH_TX_MAX_LSO_PAYLOAD_LEN 0xFE000
+#define ETH_TX_NUM_SAME_AS_LAST_ENTRIES 320
+#define ETH_TX_INACTIVE_SAME_AS_LAST 0xFFFF
+
+#define ETH_NUM_STATISTIC_COUNTERS MAX_NUM_VPORTS
#define ETH_NUM_STATISTIC_COUNTERS_DOUBLE_VF_ZONE \
(ETH_NUM_STATISTIC_COUNTERS - MAX_NUM_VFS / 2)
#define ETH_NUM_STATISTIC_COUNTERS_QUAD_VF_ZONE \
(ETH_NUM_STATISTIC_COUNTERS - 3 * MAX_NUM_VFS / 4)
/* Maximum number of buffers, used for RX packet placement */
-#define ETH_RX_MAX_BUFF_PER_PKT 5
-#define ETH_RX_BD_THRESHOLD 12
+#define ETH_RX_MAX_BUFF_PER_PKT 5
+#define ETH_RX_BD_THRESHOLD 12
-/* num of MAC/VLAN filters */
-#define ETH_NUM_MAC_FILTERS 512
-#define ETH_NUM_VLAN_FILTERS 512
+/* Num of MAC/VLAN filters */
+#define ETH_NUM_MAC_FILTERS 512
+#define ETH_NUM_VLAN_FILTERS 512
-/* approx. multicast constants */
-#define ETH_MULTICAST_BIN_FROM_MAC_SEED 0
-#define ETH_MULTICAST_MAC_BINS 256
-#define ETH_MULTICAST_MAC_BINS_IN_REGS (ETH_MULTICAST_MAC_BINS / 32)
+/* Approx. multicast constants */
+#define ETH_MULTICAST_BIN_FROM_MAC_SEED 0
+#define ETH_MULTICAST_MAC_BINS 256
+#define ETH_MULTICAST_MAC_BINS_IN_REGS (ETH_MULTICAST_MAC_BINS / 32)
-/* ethernet vport update constants */
-#define ETH_FILTER_RULES_COUNT 10
-#define ETH_RSS_IND_TABLE_ENTRIES_NUM 128
-#define ETH_RSS_KEY_SIZE_REGS 10
-#define ETH_RSS_ENGINE_NUM_K2 207
-#define ETH_RSS_ENGINE_NUM_BB 127
+/* Ethernet vport update constants */
+#define ETH_FILTER_RULES_COUNT 10
+#define ETH_RSS_IND_TABLE_ENTRIES_NUM 128
+#define ETH_RSS_KEY_SIZE_REGS 10
+#define ETH_RSS_ENGINE_NUM_K2 207
+#define ETH_RSS_ENGINE_NUM_BB 127
/* TPA constants */
-#define ETH_TPA_MAX_AGGS_NUM 64
-#define ETH_TPA_CQE_START_LEN_LIST_SIZE ETH_RX_MAX_BUFF_PER_PKT
-#define ETH_TPA_CQE_CONT_LEN_LIST_SIZE 6
-#define ETH_TPA_CQE_END_LEN_LIST_SIZE 4
+#define ETH_TPA_MAX_AGGS_NUM 64
+#define ETH_TPA_CQE_START_LEN_LIST_SIZE ETH_RX_MAX_BUFF_PER_PKT
+#define ETH_TPA_CQE_CONT_LEN_LIST_SIZE 6
+#define ETH_TPA_CQE_END_LEN_LIST_SIZE 4
/* Control frame check constants */
#define ETH_CTL_FRAME_ETH_TYPE_NUM 4
+/* GFS constants */
+#define ETH_GFT_TRASHCAN_VPORT 0x1FF /* GFT drop flow vport number */
+
+/* Destination port mode */
+enum dest_port_mode {
+ DEST_PORT_PHY,
+ DEST_PORT_LOOPBACK,
+ DEST_PORT_PHY_LOOPBACK,
+ DEST_PORT_DROP,
+ MAX_DEST_PORT_MODE
+};
+
+/* Ethernet address type */
+enum eth_addr_type {
+ BROADCAST_ADDRESS,
+ MULTICAST_ADDRESS,
+ UNICAST_ADDRESS,
+ UNKNOWN_ADDRESS,
+ MAX_ETH_ADDR_TYPE
+};
+
struct eth_tx_1st_bd_flags {
u8 bitfields;
-#define ETH_TX_1ST_BD_FLAGS_START_BD_MASK 0x1
-#define ETH_TX_1ST_BD_FLAGS_START_BD_SHIFT 0
-#define ETH_TX_1ST_BD_FLAGS_FORCE_VLAN_MODE_MASK 0x1
-#define ETH_TX_1ST_BD_FLAGS_FORCE_VLAN_MODE_SHIFT 1
-#define ETH_TX_1ST_BD_FLAGS_IP_CSUM_MASK 0x1
-#define ETH_TX_1ST_BD_FLAGS_IP_CSUM_SHIFT 2
-#define ETH_TX_1ST_BD_FLAGS_L4_CSUM_MASK 0x1
-#define ETH_TX_1ST_BD_FLAGS_L4_CSUM_SHIFT 3
-#define ETH_TX_1ST_BD_FLAGS_VLAN_INSERTION_MASK 0x1
-#define ETH_TX_1ST_BD_FLAGS_VLAN_INSERTION_SHIFT 4
-#define ETH_TX_1ST_BD_FLAGS_LSO_MASK 0x1
-#define ETH_TX_1ST_BD_FLAGS_LSO_SHIFT 5
-#define ETH_TX_1ST_BD_FLAGS_TUNN_IP_CSUM_MASK 0x1
-#define ETH_TX_1ST_BD_FLAGS_TUNN_IP_CSUM_SHIFT 6
-#define ETH_TX_1ST_BD_FLAGS_TUNN_L4_CSUM_MASK 0x1
-#define ETH_TX_1ST_BD_FLAGS_TUNN_L4_CSUM_SHIFT 7
+#define ETH_TX_1ST_BD_FLAGS_START_BD_MASK 0x1
+#define ETH_TX_1ST_BD_FLAGS_START_BD_SHIFT 0
+#define ETH_TX_1ST_BD_FLAGS_FORCE_VLAN_MODE_MASK 0x1
+#define ETH_TX_1ST_BD_FLAGS_FORCE_VLAN_MODE_SHIFT 1
+#define ETH_TX_1ST_BD_FLAGS_IP_CSUM_MASK 0x1
+#define ETH_TX_1ST_BD_FLAGS_IP_CSUM_SHIFT 2
+#define ETH_TX_1ST_BD_FLAGS_L4_CSUM_MASK 0x1
+#define ETH_TX_1ST_BD_FLAGS_L4_CSUM_SHIFT 3
+#define ETH_TX_1ST_BD_FLAGS_VLAN_INSERTION_MASK 0x1
+#define ETH_TX_1ST_BD_FLAGS_VLAN_INSERTION_SHIFT 4
+#define ETH_TX_1ST_BD_FLAGS_LSO_MASK 0x1
+#define ETH_TX_1ST_BD_FLAGS_LSO_SHIFT 5
+#define ETH_TX_1ST_BD_FLAGS_TUNN_IP_CSUM_MASK 0x1
+#define ETH_TX_1ST_BD_FLAGS_TUNN_IP_CSUM_SHIFT 6
+#define ETH_TX_1ST_BD_FLAGS_TUNN_L4_CSUM_MASK 0x1
+#define ETH_TX_1ST_BD_FLAGS_TUNN_L4_CSUM_SHIFT 7
};
-/* The parsing information data fo rthe first tx bd of a given packet. */
+/* The parsing information data fo rthe first tx bd of a given packet */
struct eth_tx_data_1st_bd {
__le16 vlan;
u8 nbds;
struct eth_tx_1st_bd_flags bd_flags;
__le16 bitfields;
-#define ETH_TX_DATA_1ST_BD_TUNN_FLAG_MASK 0x1
-#define ETH_TX_DATA_1ST_BD_TUNN_FLAG_SHIFT 0
-#define ETH_TX_DATA_1ST_BD_RESERVED0_MASK 0x1
-#define ETH_TX_DATA_1ST_BD_RESERVED0_SHIFT 1
-#define ETH_TX_DATA_1ST_BD_PKT_LEN_MASK 0x3FFF
-#define ETH_TX_DATA_1ST_BD_PKT_LEN_SHIFT 2
+#define ETH_TX_DATA_1ST_BD_TUNN_FLAG_MASK 0x1
+#define ETH_TX_DATA_1ST_BD_TUNN_FLAG_SHIFT 0
+#define ETH_TX_DATA_1ST_BD_RESERVED0_MASK 0x1
+#define ETH_TX_DATA_1ST_BD_RESERVED0_SHIFT 1
+#define ETH_TX_DATA_1ST_BD_PKT_LEN_MASK 0x3FFF
+#define ETH_TX_DATA_1ST_BD_PKT_LEN_SHIFT 2
};
-/* The parsing information data for the second tx bd of a given packet. */
+/* The parsing information data for the second tx bd of a given packet */
struct eth_tx_data_2nd_bd {
__le16 tunn_ip_size;
__le16 bitfields1;
-#define ETH_TX_DATA_2ND_BD_TUNN_INNER_L2_HDR_SIZE_W_MASK 0xF
-#define ETH_TX_DATA_2ND_BD_TUNN_INNER_L2_HDR_SIZE_W_SHIFT 0
-#define ETH_TX_DATA_2ND_BD_TUNN_INNER_ETH_TYPE_MASK 0x3
-#define ETH_TX_DATA_2ND_BD_TUNN_INNER_ETH_TYPE_SHIFT 4
-#define ETH_TX_DATA_2ND_BD_DEST_PORT_MODE_MASK 0x3
-#define ETH_TX_DATA_2ND_BD_DEST_PORT_MODE_SHIFT 6
-#define ETH_TX_DATA_2ND_BD_START_BD_MASK 0x1
-#define ETH_TX_DATA_2ND_BD_START_BD_SHIFT 8
-#define ETH_TX_DATA_2ND_BD_TUNN_TYPE_MASK 0x3
-#define ETH_TX_DATA_2ND_BD_TUNN_TYPE_SHIFT 9
-#define ETH_TX_DATA_2ND_BD_TUNN_INNER_IPV6_MASK 0x1
-#define ETH_TX_DATA_2ND_BD_TUNN_INNER_IPV6_SHIFT 11
-#define ETH_TX_DATA_2ND_BD_IPV6_EXT_MASK 0x1
-#define ETH_TX_DATA_2ND_BD_IPV6_EXT_SHIFT 12
-#define ETH_TX_DATA_2ND_BD_TUNN_IPV6_EXT_MASK 0x1
-#define ETH_TX_DATA_2ND_BD_TUNN_IPV6_EXT_SHIFT 13
-#define ETH_TX_DATA_2ND_BD_L4_UDP_MASK 0x1
-#define ETH_TX_DATA_2ND_BD_L4_UDP_SHIFT 14
-#define ETH_TX_DATA_2ND_BD_L4_PSEUDO_CSUM_MODE_MASK 0x1
-#define ETH_TX_DATA_2ND_BD_L4_PSEUDO_CSUM_MODE_SHIFT 15
+#define ETH_TX_DATA_2ND_BD_TUNN_INNER_L2_HDR_SIZE_W_MASK 0xF
+#define ETH_TX_DATA_2ND_BD_TUNN_INNER_L2_HDR_SIZE_W_SHIFT 0
+#define ETH_TX_DATA_2ND_BD_TUNN_INNER_ETH_TYPE_MASK 0x3
+#define ETH_TX_DATA_2ND_BD_TUNN_INNER_ETH_TYPE_SHIFT 4
+#define ETH_TX_DATA_2ND_BD_DEST_PORT_MODE_MASK 0x3
+#define ETH_TX_DATA_2ND_BD_DEST_PORT_MODE_SHIFT 6
+#define ETH_TX_DATA_2ND_BD_START_BD_MASK 0x1
+#define ETH_TX_DATA_2ND_BD_START_BD_SHIFT 8
+#define ETH_TX_DATA_2ND_BD_TUNN_TYPE_MASK 0x3
+#define ETH_TX_DATA_2ND_BD_TUNN_TYPE_SHIFT 9
+#define ETH_TX_DATA_2ND_BD_TUNN_INNER_IPV6_MASK 0x1
+#define ETH_TX_DATA_2ND_BD_TUNN_INNER_IPV6_SHIFT 11
+#define ETH_TX_DATA_2ND_BD_IPV6_EXT_MASK 0x1
+#define ETH_TX_DATA_2ND_BD_IPV6_EXT_SHIFT 12
+#define ETH_TX_DATA_2ND_BD_TUNN_IPV6_EXT_MASK 0x1
+#define ETH_TX_DATA_2ND_BD_TUNN_IPV6_EXT_SHIFT 13
+#define ETH_TX_DATA_2ND_BD_L4_UDP_MASK 0x1
+#define ETH_TX_DATA_2ND_BD_L4_UDP_SHIFT 14
+#define ETH_TX_DATA_2ND_BD_L4_PSEUDO_CSUM_MODE_MASK 0x1
+#define ETH_TX_DATA_2ND_BD_L4_PSEUDO_CSUM_MODE_SHIFT 15
__le16 bitfields2;
-#define ETH_TX_DATA_2ND_BD_L4_HDR_START_OFFSET_W_MASK 0x1FFF
-#define ETH_TX_DATA_2ND_BD_L4_HDR_START_OFFSET_W_SHIFT 0
-#define ETH_TX_DATA_2ND_BD_RESERVED0_MASK 0x7
-#define ETH_TX_DATA_2ND_BD_RESERVED0_SHIFT 13
+#define ETH_TX_DATA_2ND_BD_L4_HDR_START_OFFSET_W_MASK 0x1FFF
+#define ETH_TX_DATA_2ND_BD_L4_HDR_START_OFFSET_W_SHIFT 0
+#define ETH_TX_DATA_2ND_BD_RESERVED0_MASK 0x7
+#define ETH_TX_DATA_2ND_BD_RESERVED0_SHIFT 13
};
-/* Firmware data for L2-EDPM packet. */
+/* Firmware data for L2-EDPM packet */
struct eth_edpm_fw_data {
struct eth_tx_data_1st_bd data_1st_bd;
struct eth_tx_data_2nd_bd data_2nd_bd;
__le32 reserved;
};
-struct eth_fast_path_cqe_fw_debug {
- __le16 reserved2;
-};
-
-/* tunneling parsing flags */
+/* Tunneling parsing flags */
struct eth_tunnel_parsing_flags {
u8 flags;
#define ETH_TUNNEL_PARSING_FLAGS_TYPE_MASK 0x3
@@ -199,24 +217,24 @@ struct eth_tunnel_parsing_flags {
/* PMD flow control bits */
struct eth_pmd_flow_flags {
u8 flags;
-#define ETH_PMD_FLOW_FLAGS_VALID_MASK 0x1
-#define ETH_PMD_FLOW_FLAGS_VALID_SHIFT 0
-#define ETH_PMD_FLOW_FLAGS_TOGGLE_MASK 0x1
-#define ETH_PMD_FLOW_FLAGS_TOGGLE_SHIFT 1
-#define ETH_PMD_FLOW_FLAGS_RESERVED_MASK 0x3F
-#define ETH_PMD_FLOW_FLAGS_RESERVED_SHIFT 2
+#define ETH_PMD_FLOW_FLAGS_VALID_MASK 0x1
+#define ETH_PMD_FLOW_FLAGS_VALID_SHIFT 0
+#define ETH_PMD_FLOW_FLAGS_TOGGLE_MASK 0x1
+#define ETH_PMD_FLOW_FLAGS_TOGGLE_SHIFT 1
+#define ETH_PMD_FLOW_FLAGS_RESERVED_MASK 0x3F
+#define ETH_PMD_FLOW_FLAGS_RESERVED_SHIFT 2
};
-/* Regular ETH Rx FP CQE. */
+/* Regular ETH Rx FP CQE */
struct eth_fast_path_rx_reg_cqe {
u8 type;
u8 bitfields;
-#define ETH_FAST_PATH_RX_REG_CQE_RSS_HASH_TYPE_MASK 0x7
-#define ETH_FAST_PATH_RX_REG_CQE_RSS_HASH_TYPE_SHIFT 0
-#define ETH_FAST_PATH_RX_REG_CQE_TC_MASK 0xF
-#define ETH_FAST_PATH_RX_REG_CQE_TC_SHIFT 3
-#define ETH_FAST_PATH_RX_REG_CQE_RESERVED0_MASK 0x1
-#define ETH_FAST_PATH_RX_REG_CQE_RESERVED0_SHIFT 7
+#define ETH_FAST_PATH_RX_REG_CQE_RSS_HASH_TYPE_MASK 0x7
+#define ETH_FAST_PATH_RX_REG_CQE_RSS_HASH_TYPE_SHIFT 0
+#define ETH_FAST_PATH_RX_REG_CQE_TC_MASK 0xF
+#define ETH_FAST_PATH_RX_REG_CQE_TC_SHIFT 3
+#define ETH_FAST_PATH_RX_REG_CQE_RESERVED0_MASK 0x1
+#define ETH_FAST_PATH_RX_REG_CQE_RESERVED0_SHIFT 7
__le16 pkt_len;
struct parsing_and_err_flags pars_flags;
__le16 vlan_tag;
@@ -225,13 +243,13 @@ struct eth_fast_path_rx_reg_cqe {
u8 placement_offset;
struct eth_tunnel_parsing_flags tunnel_pars_flags;
u8 bd_num;
- u8 reserved[9];
- struct eth_fast_path_cqe_fw_debug fw_debug;
- u8 reserved1[3];
+ u8 reserved;
+ __le16 flow_id;
+ u8 reserved1[11];
struct eth_pmd_flow_flags pmd_flags;
};
-/* TPA-continue ETH Rx FP CQE. */
+/* TPA-continue ETH Rx FP CQE */
struct eth_fast_path_rx_tpa_cont_cqe {
u8 type;
u8 tpa_agg_index;
@@ -243,7 +261,7 @@ struct eth_fast_path_rx_tpa_cont_cqe {
struct eth_pmd_flow_flags pmd_flags;
};
-/* TPA-end ETH Rx FP CQE. */
+/* TPA-end ETH Rx FP CQE */
struct eth_fast_path_rx_tpa_end_cqe {
u8 type;
u8 tpa_agg_index;
@@ -259,16 +277,16 @@ struct eth_fast_path_rx_tpa_end_cqe {
struct eth_pmd_flow_flags pmd_flags;
};
-/* TPA-start ETH Rx FP CQE. */
+/* TPA-start ETH Rx FP CQE */
struct eth_fast_path_rx_tpa_start_cqe {
u8 type;
u8 bitfields;
-#define ETH_FAST_PATH_RX_TPA_START_CQE_RSS_HASH_TYPE_MASK 0x7
-#define ETH_FAST_PATH_RX_TPA_START_CQE_RSS_HASH_TYPE_SHIFT 0
-#define ETH_FAST_PATH_RX_TPA_START_CQE_TC_MASK 0xF
-#define ETH_FAST_PATH_RX_TPA_START_CQE_TC_SHIFT 3
-#define ETH_FAST_PATH_RX_TPA_START_CQE_RESERVED0_MASK 0x1
-#define ETH_FAST_PATH_RX_TPA_START_CQE_RESERVED0_SHIFT 7
+#define ETH_FAST_PATH_RX_TPA_START_CQE_RSS_HASH_TYPE_MASK 0x7
+#define ETH_FAST_PATH_RX_TPA_START_CQE_RSS_HASH_TYPE_SHIFT 0
+#define ETH_FAST_PATH_RX_TPA_START_CQE_TC_MASK 0xF
+#define ETH_FAST_PATH_RX_TPA_START_CQE_TC_SHIFT 3
+#define ETH_FAST_PATH_RX_TPA_START_CQE_RESERVED0_MASK 0x1
+#define ETH_FAST_PATH_RX_TPA_START_CQE_RESERVED0_SHIFT 7
__le16 seg_len;
struct parsing_and_err_flags pars_flags;
__le16 vlan_tag;
@@ -279,7 +297,7 @@ struct eth_fast_path_rx_tpa_start_cqe {
u8 tpa_agg_index;
u8 header_len;
__le16 ext_bd_len_list[ETH_TPA_CQE_START_LEN_LIST_SIZE];
- struct eth_fast_path_cqe_fw_debug fw_debug;
+ __le16 flow_id;
u8 reserved;
struct eth_pmd_flow_flags pmd_flags;
};
@@ -295,24 +313,24 @@ struct eth_rx_bd {
struct regpair addr;
};
-/* regular ETH Rx SP CQE */
+/* Regular ETH Rx SP CQE */
struct eth_slow_path_rx_cqe {
- u8 type;
- u8 ramrod_cmd_id;
- u8 error_flag;
- u8 reserved[25];
- __le16 echo;
- u8 reserved1;
+ u8 type;
+ u8 ramrod_cmd_id;
+ u8 error_flag;
+ u8 reserved[25];
+ __le16 echo;
+ u8 reserved1;
struct eth_pmd_flow_flags pmd_flags;
};
-/* union for all ETH Rx CQE types */
+/* Union for all ETH Rx CQE types */
union eth_rx_cqe {
- struct eth_fast_path_rx_reg_cqe fast_path_regular;
- struct eth_fast_path_rx_tpa_start_cqe fast_path_tpa_start;
- struct eth_fast_path_rx_tpa_cont_cqe fast_path_tpa_cont;
- struct eth_fast_path_rx_tpa_end_cqe fast_path_tpa_end;
- struct eth_slow_path_rx_cqe slow_path;
+ struct eth_fast_path_rx_reg_cqe fast_path_regular;
+ struct eth_fast_path_rx_tpa_start_cqe fast_path_tpa_start;
+ struct eth_fast_path_rx_tpa_cont_cqe fast_path_tpa_cont;
+ struct eth_fast_path_rx_tpa_end_cqe fast_path_tpa_end;
+ struct eth_slow_path_rx_cqe slow_path;
};
/* ETH Rx CQE type */
@@ -339,7 +357,7 @@ enum eth_rx_tunn_type {
MAX_ETH_RX_TUNN_TYPE
};
-/* Aggregation end reason. */
+/* Aggregation end reason. */
enum eth_tpa_end_reason {
ETH_AGG_END_UNUSED,
ETH_AGG_END_SP_UPDATE,
@@ -354,59 +372,59 @@ enum eth_tpa_end_reason {
/* The first tx bd of a given packet */
struct eth_tx_1st_bd {
- struct regpair addr;
- __le16 nbytes;
- struct eth_tx_data_1st_bd data;
+ struct regpair addr;
+ __le16 nbytes;
+ struct eth_tx_data_1st_bd data;
};
/* The second tx bd of a given packet */
struct eth_tx_2nd_bd {
- struct regpair addr;
- __le16 nbytes;
- struct eth_tx_data_2nd_bd data;
+ struct regpair addr;
+ __le16 nbytes;
+ struct eth_tx_data_2nd_bd data;
};
-/* The parsing information data for the third tx bd of a given packet. */
+/* The parsing information data for the third tx bd of a given packet */
struct eth_tx_data_3rd_bd {
__le16 lso_mss;
__le16 bitfields;
-#define ETH_TX_DATA_3RD_BD_TCP_HDR_LEN_DW_MASK 0xF
-#define ETH_TX_DATA_3RD_BD_TCP_HDR_LEN_DW_SHIFT 0
-#define ETH_TX_DATA_3RD_BD_HDR_NBD_MASK 0xF
-#define ETH_TX_DATA_3RD_BD_HDR_NBD_SHIFT 4
-#define ETH_TX_DATA_3RD_BD_START_BD_MASK 0x1
-#define ETH_TX_DATA_3RD_BD_START_BD_SHIFT 8
-#define ETH_TX_DATA_3RD_BD_RESERVED0_MASK 0x7F
-#define ETH_TX_DATA_3RD_BD_RESERVED0_SHIFT 9
+#define ETH_TX_DATA_3RD_BD_TCP_HDR_LEN_DW_MASK 0xF
+#define ETH_TX_DATA_3RD_BD_TCP_HDR_LEN_DW_SHIFT 0
+#define ETH_TX_DATA_3RD_BD_HDR_NBD_MASK 0xF
+#define ETH_TX_DATA_3RD_BD_HDR_NBD_SHIFT 4
+#define ETH_TX_DATA_3RD_BD_START_BD_MASK 0x1
+#define ETH_TX_DATA_3RD_BD_START_BD_SHIFT 8
+#define ETH_TX_DATA_3RD_BD_RESERVED0_MASK 0x7F
+#define ETH_TX_DATA_3RD_BD_RESERVED0_SHIFT 9
u8 tunn_l4_hdr_start_offset_w;
u8 tunn_hdr_size_w;
};
/* The third tx bd of a given packet */
struct eth_tx_3rd_bd {
- struct regpair addr;
- __le16 nbytes;
- struct eth_tx_data_3rd_bd data;
+ struct regpair addr;
+ __le16 nbytes;
+ struct eth_tx_data_3rd_bd data;
};
-/* Complementary information for the regular tx bd of a given packet. */
+/* Complementary information for the regular tx bd of a given packet */
struct eth_tx_data_bd {
- __le16 reserved0;
- __le16 bitfields;
-#define ETH_TX_DATA_BD_RESERVED1_MASK 0xFF
-#define ETH_TX_DATA_BD_RESERVED1_SHIFT 0
-#define ETH_TX_DATA_BD_START_BD_MASK 0x1
-#define ETH_TX_DATA_BD_START_BD_SHIFT 8
-#define ETH_TX_DATA_BD_RESERVED2_MASK 0x7F
-#define ETH_TX_DATA_BD_RESERVED2_SHIFT 9
+ __le16 reserved0;
+ __le16 bitfields;
+#define ETH_TX_DATA_BD_RESERVED1_MASK 0xFF
+#define ETH_TX_DATA_BD_RESERVED1_SHIFT 0
+#define ETH_TX_DATA_BD_START_BD_MASK 0x1
+#define ETH_TX_DATA_BD_START_BD_SHIFT 8
+#define ETH_TX_DATA_BD_RESERVED2_MASK 0x7F
+#define ETH_TX_DATA_BD_RESERVED2_SHIFT 9
__le16 reserved3;
};
/* The common non-special TX BD ring element */
struct eth_tx_bd {
- struct regpair addr;
- __le16 nbytes;
- struct eth_tx_data_bd data;
+ struct regpair addr;
+ __le16 nbytes;
+ struct eth_tx_data_bd data;
};
union eth_tx_bd_types {
@@ -434,18 +452,30 @@ struct xstorm_eth_queue_zone {
/* ETH doorbell data */
struct eth_db_data {
u8 params;
-#define ETH_DB_DATA_DEST_MASK 0x3
-#define ETH_DB_DATA_DEST_SHIFT 0
-#define ETH_DB_DATA_AGG_CMD_MASK 0x3
-#define ETH_DB_DATA_AGG_CMD_SHIFT 2
-#define ETH_DB_DATA_BYPASS_EN_MASK 0x1
-#define ETH_DB_DATA_BYPASS_EN_SHIFT 4
-#define ETH_DB_DATA_RESERVED_MASK 0x1
-#define ETH_DB_DATA_RESERVED_SHIFT 5
-#define ETH_DB_DATA_AGG_VAL_SEL_MASK 0x3
-#define ETH_DB_DATA_AGG_VAL_SEL_SHIFT 6
+#define ETH_DB_DATA_DEST_MASK 0x3
+#define ETH_DB_DATA_DEST_SHIFT 0
+#define ETH_DB_DATA_AGG_CMD_MASK 0x3
+#define ETH_DB_DATA_AGG_CMD_SHIFT 2
+#define ETH_DB_DATA_BYPASS_EN_MASK 0x1
+#define ETH_DB_DATA_BYPASS_EN_SHIFT 4
+#define ETH_DB_DATA_RESERVED_MASK 0x1
+#define ETH_DB_DATA_RESERVED_SHIFT 5
+#define ETH_DB_DATA_AGG_VAL_SEL_MASK 0x3
+#define ETH_DB_DATA_AGG_VAL_SEL_SHIFT 6
u8 agg_flags;
__le16 bd_prod;
};
+/* RSS hash type */
+enum rss_hash_type {
+ RSS_HASH_TYPE_DEFAULT = 0,
+ RSS_HASH_TYPE_IPV4 = 1,
+ RSS_HASH_TYPE_TCP_IPV4 = 2,
+ RSS_HASH_TYPE_IPV6 = 3,
+ RSS_HASH_TYPE_TCP_IPV6 = 4,
+ RSS_HASH_TYPE_UDP_IPV4 = 5,
+ RSS_HASH_TYPE_UDP_IPV6 = 6,
+ MAX_RSS_HASH_TYPE
+};
+
#endif /* __ETH_COMMON__ */
diff --git a/include/linux/qed/fcoe_common.h b/include/linux/qed/fcoe_common.h
index 12fc9e788eea..22077c586853 100644
--- a/include/linux/qed/fcoe_common.h
+++ b/include/linux/qed/fcoe_common.h
@@ -8,217 +8,78 @@
#ifndef __FCOE_COMMON__
#define __FCOE_COMMON__
+
/*********************/
/* FCOE FW CONSTANTS */
/*********************/
#define FC_ABTS_REPLY_MAX_PAYLOAD_LEN 12
-struct fcoe_abts_pkt {
- __le32 abts_rsp_fc_payload_lo;
- __le16 abts_rsp_rx_id;
- u8 abts_rsp_rctl;
- u8 reserved2;
-};
-
-/* FCoE additional WQE (Sq/XferQ) information */
-union fcoe_additional_info_union {
- __le32 previous_tid;
- __le32 parent_tid;
- __le32 burst_length;
- __le32 seq_rec_updated_offset;
-};
-
-struct fcoe_exp_ro {
- __le32 data_offset;
- __le32 reserved;
-};
-
-union fcoe_cleanup_addr_exp_ro_union {
- struct regpair abts_rsp_fc_payload_hi;
- struct fcoe_exp_ro exp_ro;
-};
-
-/* FCoE Ramrod Command IDs */
-enum fcoe_completion_status {
- FCOE_COMPLETION_STATUS_SUCCESS,
- FCOE_COMPLETION_STATUS_FCOE_VER_ERR,
- FCOE_COMPLETION_STATUS_SRC_MAC_ADD_ARR_ERR,
- MAX_FCOE_COMPLETION_STATUS
-};
-
-struct fc_addr_nw {
- u8 addr_lo;
- u8 addr_mid;
- u8 addr_hi;
-};
-
-/* FCoE connection offload */
-struct fcoe_conn_offload_ramrod_data {
- struct regpair sq_pbl_addr;
- struct regpair sq_curr_page_addr;
- struct regpair sq_next_page_addr;
- struct regpair xferq_pbl_addr;
- struct regpair xferq_curr_page_addr;
- struct regpair xferq_next_page_addr;
- struct regpair respq_pbl_addr;
- struct regpair respq_curr_page_addr;
- struct regpair respq_next_page_addr;
- __le16 dst_mac_addr_lo;
- __le16 dst_mac_addr_mid;
- __le16 dst_mac_addr_hi;
- __le16 src_mac_addr_lo;
- __le16 src_mac_addr_mid;
- __le16 src_mac_addr_hi;
- __le16 tx_max_fc_pay_len;
- __le16 e_d_tov_timer_val;
- __le16 rx_max_fc_pay_len;
- __le16 vlan_tag;
-#define FCOE_CONN_OFFLOAD_RAMROD_DATA_VLAN_ID_MASK 0xFFF
-#define FCOE_CONN_OFFLOAD_RAMROD_DATA_VLAN_ID_SHIFT 0
-#define FCOE_CONN_OFFLOAD_RAMROD_DATA_CFI_MASK 0x1
-#define FCOE_CONN_OFFLOAD_RAMROD_DATA_CFI_SHIFT 12
-#define FCOE_CONN_OFFLOAD_RAMROD_DATA_PRIORITY_MASK 0x7
-#define FCOE_CONN_OFFLOAD_RAMROD_DATA_PRIORITY_SHIFT 13
- __le16 physical_q0;
- __le16 rec_rr_tov_timer_val;
- struct fc_addr_nw s_id;
- u8 max_conc_seqs_c3;
- struct fc_addr_nw d_id;
- u8 flags;
-#define FCOE_CONN_OFFLOAD_RAMROD_DATA_B_CONT_INCR_SEQ_CNT_MASK 0x1
-#define FCOE_CONN_OFFLOAD_RAMROD_DATA_B_CONT_INCR_SEQ_CNT_SHIFT 0
-#define FCOE_CONN_OFFLOAD_RAMROD_DATA_B_CONF_REQ_MASK 0x1
-#define FCOE_CONN_OFFLOAD_RAMROD_DATA_B_CONF_REQ_SHIFT 1
-#define FCOE_CONN_OFFLOAD_RAMROD_DATA_B_REC_VALID_MASK 0x1
-#define FCOE_CONN_OFFLOAD_RAMROD_DATA_B_REC_VALID_SHIFT 2
-#define FCOE_CONN_OFFLOAD_RAMROD_DATA_B_VLAN_FLAG_MASK 0x1
-#define FCOE_CONN_OFFLOAD_RAMROD_DATA_B_VLAN_FLAG_SHIFT 3
-#define FCOE_CONN_OFFLOAD_RAMROD_DATA_MODE_MASK 0x3
-#define FCOE_CONN_OFFLOAD_RAMROD_DATA_MODE_SHIFT 4
-#define FCOE_CONN_OFFLOAD_RAMROD_DATA_RESERVED0_MASK 0x3
-#define FCOE_CONN_OFFLOAD_RAMROD_DATA_RESERVED0_SHIFT 6
- __le16 conn_id;
- u8 def_q_idx;
- u8 reserved[5];
-};
-
-/* FCoE terminate connection request */
-struct fcoe_conn_terminate_ramrod_data {
- struct regpair terminate_params_addr;
-};
-
-struct fcoe_slow_sgl_ctx {
- struct regpair base_sgl_addr;
- __le16 curr_sge_off;
- __le16 remainder_num_sges;
- __le16 curr_sgl_index;
- __le16 reserved;
-};
-
-union fcoe_dix_desc_ctx {
- struct fcoe_slow_sgl_ctx dix_sgl;
- struct scsi_sge cached_dix_sge;
+/* The fcoe storm task context protection-information of Ystorm */
+struct protection_info_ctx {
+ __le16 flags;
+#define PROTECTION_INFO_CTX_HOST_INTERFACE_MASK 0x3
+#define PROTECTION_INFO_CTX_HOST_INTERFACE_SHIFT 0
+#define PROTECTION_INFO_CTX_DIF_TO_PEER_MASK 0x1
+#define PROTECTION_INFO_CTX_DIF_TO_PEER_SHIFT 2
+#define PROTECTION_INFO_CTX_VALIDATE_DIX_APP_TAG_MASK 0x1
+#define PROTECTION_INFO_CTX_VALIDATE_DIX_APP_TAG_SHIFT 3
+#define PROTECTION_INFO_CTX_INTERVAL_SIZE_LOG_MASK 0xF
+#define PROTECTION_INFO_CTX_INTERVAL_SIZE_LOG_SHIFT 4
+#define PROTECTION_INFO_CTX_VALIDATE_DIX_REF_TAG_MASK 0x1
+#define PROTECTION_INFO_CTX_VALIDATE_DIX_REF_TAG_SHIFT 8
+#define PROTECTION_INFO_CTX_RESERVED0_MASK 0x7F
+#define PROTECTION_INFO_CTX_RESERVED0_SHIFT 9
+ u8 dix_block_size;
+ u8 dst_size;
};
-struct fcoe_fast_sgl_ctx {
- struct regpair sgl_start_addr;
- __le32 sgl_byte_offset;
- __le16 task_reuse_cnt;
- __le16 init_offset_in_first_sge;
+/* The fcoe storm task context protection-information of Ystorm */
+union protection_info_union_ctx {
+ struct protection_info_ctx info;
+ __le32 value;
};
+/* FCP CMD payload */
struct fcoe_fcp_cmd_payload {
__le32 opaque[8];
};
+/* FCP RSP payload */
struct fcoe_fcp_rsp_payload {
__le32 opaque[6];
};
-struct fcoe_fcp_xfer_payload {
- __le32 opaque[3];
-};
-
-/* FCoE firmware function init */
-struct fcoe_init_func_ramrod_data {
- struct scsi_init_func_params func_params;
- struct scsi_init_func_queues q_params;
- __le16 mtu;
- __le16 sq_num_pages_in_pbl;
- __le32 reserved;
-};
-
-/* FCoE: Mode of the connection: Target or Initiator or both */
-enum fcoe_mode_type {
- FCOE_INITIATOR_MODE = 0x0,
- FCOE_TARGET_MODE = 0x1,
- FCOE_BOTH_OR_NOT_CHOSEN = 0x3,
- MAX_FCOE_MODE_TYPE
-};
-
-struct fcoe_rx_stat {
- struct regpair fcoe_rx_byte_cnt;
- struct regpair fcoe_rx_data_pkt_cnt;
- struct regpair fcoe_rx_xfer_pkt_cnt;
- struct regpair fcoe_rx_other_pkt_cnt;
- __le32 fcoe_silent_drop_pkt_cmdq_full_cnt;
- __le32 fcoe_silent_drop_pkt_rq_full_cnt;
- __le32 fcoe_silent_drop_pkt_crc_error_cnt;
- __le32 fcoe_silent_drop_pkt_task_invalid_cnt;
- __le32 fcoe_silent_drop_total_pkt_cnt;
- __le32 rsrv;
-};
-
-struct fcoe_stat_ramrod_data {
- struct regpair stat_params_addr;
-};
-
-struct protection_info_ctx {
- __le16 flags;
-#define PROTECTION_INFO_CTX_HOST_INTERFACE_MASK 0x3
-#define PROTECTION_INFO_CTX_HOST_INTERFACE_SHIFT 0
-#define PROTECTION_INFO_CTX_DIF_TO_PEER_MASK 0x1
-#define PROTECTION_INFO_CTX_DIF_TO_PEER_SHIFT 2
-#define PROTECTION_INFO_CTX_VALIDATE_DIX_APP_TAG_MASK 0x1
-#define PROTECTION_INFO_CTX_VALIDATE_DIX_APP_TAG_SHIFT 3
-#define PROTECTION_INFO_CTX_INTERVAL_SIZE_LOG_MASK 0xF
-#define PROTECTION_INFO_CTX_INTERVAL_SIZE_LOG_SHIFT 4
-#define PROTECTION_INFO_CTX_VALIDATE_DIX_REF_TAG_MASK 0x1
-#define PROTECTION_INFO_CTX_VALIDATE_DIX_REF_TAG_SHIFT 8
-#define PROTECTION_INFO_CTX_RESERVED0_MASK 0x7F
-#define PROTECTION_INFO_CTX_RESERVED0_SHIFT 9
- u8 dix_block_size;
- u8 dst_size;
-};
-
-union protection_info_union_ctx {
- struct protection_info_ctx info;
- __le32 value;
-};
-
+/* FCP RSP payload */
struct fcp_rsp_payload_padded {
struct fcoe_fcp_rsp_payload rsp_payload;
__le32 reserved[2];
};
+/* FCP RSP payload */
+struct fcoe_fcp_xfer_payload {
+ __le32 opaque[3];
+};
+
+/* FCP RSP payload */
struct fcp_xfer_payload_padded {
struct fcoe_fcp_xfer_payload xfer_payload;
__le32 reserved[5];
};
+/* Task params */
struct fcoe_tx_data_params {
__le32 data_offset;
__le32 offset_in_io;
u8 flags;
-#define FCOE_TX_DATA_PARAMS_OFFSET_IN_IO_VALID_MASK 0x1
-#define FCOE_TX_DATA_PARAMS_OFFSET_IN_IO_VALID_SHIFT 0
-#define FCOE_TX_DATA_PARAMS_DROP_DATA_MASK 0x1
-#define FCOE_TX_DATA_PARAMS_DROP_DATA_SHIFT 1
-#define FCOE_TX_DATA_PARAMS_AFTER_SEQ_REC_MASK 0x1
-#define FCOE_TX_DATA_PARAMS_AFTER_SEQ_REC_SHIFT 2
-#define FCOE_TX_DATA_PARAMS_RESERVED0_MASK 0x1F
-#define FCOE_TX_DATA_PARAMS_RESERVED0_SHIFT 3
+#define FCOE_TX_DATA_PARAMS_OFFSET_IN_IO_VALID_MASK 0x1
+#define FCOE_TX_DATA_PARAMS_OFFSET_IN_IO_VALID_SHIFT 0
+#define FCOE_TX_DATA_PARAMS_DROP_DATA_MASK 0x1
+#define FCOE_TX_DATA_PARAMS_DROP_DATA_SHIFT 1
+#define FCOE_TX_DATA_PARAMS_AFTER_SEQ_REC_MASK 0x1
+#define FCOE_TX_DATA_PARAMS_AFTER_SEQ_REC_SHIFT 2
+#define FCOE_TX_DATA_PARAMS_RESERVED0_MASK 0x1F
+#define FCOE_TX_DATA_PARAMS_RESERVED0_SHIFT 3
u8 dif_residual;
__le16 seq_cnt;
__le16 single_sge_saved_offset;
@@ -227,6 +88,7 @@ struct fcoe_tx_data_params {
__le16 reserved3;
};
+/* Middle path parameters: FC header fields provided by the driver */
struct fcoe_tx_mid_path_params {
__le32 parameter;
u8 r_ctl;
@@ -237,11 +99,13 @@ struct fcoe_tx_mid_path_params {
__le16 ox_id;
};
+/* Task params */
struct fcoe_tx_params {
struct fcoe_tx_data_params data;
struct fcoe_tx_mid_path_params mid_path;
};
+/* Union of FCP CMD payload \ TX params \ ABTS \ Cleanup */
union fcoe_tx_info_union_ctx {
struct fcoe_fcp_cmd_payload fcp_cmd_payload;
struct fcp_rsp_payload_padded fcp_rsp_payload;
@@ -249,13 +113,29 @@ union fcoe_tx_info_union_ctx {
struct fcoe_tx_params tx_params;
};
+/* Data sgl */
+struct fcoe_slow_sgl_ctx {
+ struct regpair base_sgl_addr;
+ __le16 curr_sge_off;
+ __le16 remainder_num_sges;
+ __le16 curr_sgl_index;
+ __le16 reserved;
+};
+
+/* Union of DIX SGL \ cached DIX sges */
+union fcoe_dix_desc_ctx {
+ struct fcoe_slow_sgl_ctx dix_sgl;
+ struct scsi_sge cached_dix_sge;
+};
+
+/* The fcoe storm task context of Ystorm */
struct ystorm_fcoe_task_st_ctx {
u8 task_type;
u8 sgl_mode;
-#define YSTORM_FCOE_TASK_ST_CTX_TX_SGL_MODE_MASK 0x1
-#define YSTORM_FCOE_TASK_ST_CTX_TX_SGL_MODE_SHIFT 0
-#define YSTORM_FCOE_TASK_ST_CTX_RSRV_MASK 0x7F
-#define YSTORM_FCOE_TASK_ST_CTX_RSRV_SHIFT 1
+#define YSTORM_FCOE_TASK_ST_CTX_TX_SGL_MODE_MASK 0x1
+#define YSTORM_FCOE_TASK_ST_CTX_TX_SGL_MODE_SHIFT 0
+#define YSTORM_FCOE_TASK_ST_CTX_RSRV_MASK 0x7F
+#define YSTORM_FCOE_TASK_ST_CTX_RSRV_SHIFT 1
u8 cached_dix_sge;
u8 expect_first_xfer;
__le32 num_pbf_zero_write;
@@ -272,49 +152,49 @@ struct ystorm_fcoe_task_st_ctx {
u8 reserved2[8];
};
-struct ystorm_fcoe_task_ag_ctx {
+struct e4_ystorm_fcoe_task_ag_ctx {
u8 byte0;
u8 byte1;
__le16 word0;
u8 flags0;
-#define YSTORM_FCOE_TASK_AG_CTX_NIBBLE0_MASK 0xF
-#define YSTORM_FCOE_TASK_AG_CTX_NIBBLE0_SHIFT 0
-#define YSTORM_FCOE_TASK_AG_CTX_BIT0_MASK 0x1
-#define YSTORM_FCOE_TASK_AG_CTX_BIT0_SHIFT 4
-#define YSTORM_FCOE_TASK_AG_CTX_BIT1_MASK 0x1
-#define YSTORM_FCOE_TASK_AG_CTX_BIT1_SHIFT 5
-#define YSTORM_FCOE_TASK_AG_CTX_BIT2_MASK 0x1
-#define YSTORM_FCOE_TASK_AG_CTX_BIT2_SHIFT 6
-#define YSTORM_FCOE_TASK_AG_CTX_BIT3_MASK 0x1
-#define YSTORM_FCOE_TASK_AG_CTX_BIT3_SHIFT 7
+#define E4_YSTORM_FCOE_TASK_AG_CTX_NIBBLE0_MASK 0xF
+#define E4_YSTORM_FCOE_TASK_AG_CTX_NIBBLE0_SHIFT 0
+#define E4_YSTORM_FCOE_TASK_AG_CTX_BIT0_MASK 0x1
+#define E4_YSTORM_FCOE_TASK_AG_CTX_BIT0_SHIFT 4
+#define E4_YSTORM_FCOE_TASK_AG_CTX_BIT1_MASK 0x1
+#define E4_YSTORM_FCOE_TASK_AG_CTX_BIT1_SHIFT 5
+#define E4_YSTORM_FCOE_TASK_AG_CTX_BIT2_MASK 0x1
+#define E4_YSTORM_FCOE_TASK_AG_CTX_BIT2_SHIFT 6
+#define E4_YSTORM_FCOE_TASK_AG_CTX_BIT3_MASK 0x1
+#define E4_YSTORM_FCOE_TASK_AG_CTX_BIT3_SHIFT 7
u8 flags1;
-#define YSTORM_FCOE_TASK_AG_CTX_CF0_MASK 0x3
-#define YSTORM_FCOE_TASK_AG_CTX_CF0_SHIFT 0
-#define YSTORM_FCOE_TASK_AG_CTX_CF1_MASK 0x3
-#define YSTORM_FCOE_TASK_AG_CTX_CF1_SHIFT 2
-#define YSTORM_FCOE_TASK_AG_CTX_CF2SPECIAL_MASK 0x3
-#define YSTORM_FCOE_TASK_AG_CTX_CF2SPECIAL_SHIFT 4
-#define YSTORM_FCOE_TASK_AG_CTX_CF0EN_MASK 0x1
-#define YSTORM_FCOE_TASK_AG_CTX_CF0EN_SHIFT 6
-#define YSTORM_FCOE_TASK_AG_CTX_CF1EN_MASK 0x1
-#define YSTORM_FCOE_TASK_AG_CTX_CF1EN_SHIFT 7
+#define E4_YSTORM_FCOE_TASK_AG_CTX_CF0_MASK 0x3
+#define E4_YSTORM_FCOE_TASK_AG_CTX_CF0_SHIFT 0
+#define E4_YSTORM_FCOE_TASK_AG_CTX_CF1_MASK 0x3
+#define E4_YSTORM_FCOE_TASK_AG_CTX_CF1_SHIFT 2
+#define E4_YSTORM_FCOE_TASK_AG_CTX_CF2SPECIAL_MASK 0x3
+#define E4_YSTORM_FCOE_TASK_AG_CTX_CF2SPECIAL_SHIFT 4
+#define E4_YSTORM_FCOE_TASK_AG_CTX_CF0EN_MASK 0x1
+#define E4_YSTORM_FCOE_TASK_AG_CTX_CF0EN_SHIFT 6
+#define E4_YSTORM_FCOE_TASK_AG_CTX_CF1EN_MASK 0x1
+#define E4_YSTORM_FCOE_TASK_AG_CTX_CF1EN_SHIFT 7
u8 flags2;
-#define YSTORM_FCOE_TASK_AG_CTX_BIT4_MASK 0x1
-#define YSTORM_FCOE_TASK_AG_CTX_BIT4_SHIFT 0
-#define YSTORM_FCOE_TASK_AG_CTX_RULE0EN_MASK 0x1
-#define YSTORM_FCOE_TASK_AG_CTX_RULE0EN_SHIFT 1
-#define YSTORM_FCOE_TASK_AG_CTX_RULE1EN_MASK 0x1
-#define YSTORM_FCOE_TASK_AG_CTX_RULE1EN_SHIFT 2
-#define YSTORM_FCOE_TASK_AG_CTX_RULE2EN_MASK 0x1
-#define YSTORM_FCOE_TASK_AG_CTX_RULE2EN_SHIFT 3
-#define YSTORM_FCOE_TASK_AG_CTX_RULE3EN_MASK 0x1
-#define YSTORM_FCOE_TASK_AG_CTX_RULE3EN_SHIFT 4
-#define YSTORM_FCOE_TASK_AG_CTX_RULE4EN_MASK 0x1
-#define YSTORM_FCOE_TASK_AG_CTX_RULE4EN_SHIFT 5
-#define YSTORM_FCOE_TASK_AG_CTX_RULE5EN_MASK 0x1
-#define YSTORM_FCOE_TASK_AG_CTX_RULE5EN_SHIFT 6
-#define YSTORM_FCOE_TASK_AG_CTX_RULE6EN_MASK 0x1
-#define YSTORM_FCOE_TASK_AG_CTX_RULE6EN_SHIFT 7
+#define E4_YSTORM_FCOE_TASK_AG_CTX_BIT4_MASK 0x1
+#define E4_YSTORM_FCOE_TASK_AG_CTX_BIT4_SHIFT 0
+#define E4_YSTORM_FCOE_TASK_AG_CTX_RULE0EN_MASK 0x1
+#define E4_YSTORM_FCOE_TASK_AG_CTX_RULE0EN_SHIFT 1
+#define E4_YSTORM_FCOE_TASK_AG_CTX_RULE1EN_MASK 0x1
+#define E4_YSTORM_FCOE_TASK_AG_CTX_RULE1EN_SHIFT 2
+#define E4_YSTORM_FCOE_TASK_AG_CTX_RULE2EN_MASK 0x1
+#define E4_YSTORM_FCOE_TASK_AG_CTX_RULE2EN_SHIFT 3
+#define E4_YSTORM_FCOE_TASK_AG_CTX_RULE3EN_MASK 0x1
+#define E4_YSTORM_FCOE_TASK_AG_CTX_RULE3EN_SHIFT 4
+#define E4_YSTORM_FCOE_TASK_AG_CTX_RULE4EN_MASK 0x1
+#define E4_YSTORM_FCOE_TASK_AG_CTX_RULE4EN_SHIFT 5
+#define E4_YSTORM_FCOE_TASK_AG_CTX_RULE5EN_MASK 0x1
+#define E4_YSTORM_FCOE_TASK_AG_CTX_RULE5EN_SHIFT 6
+#define E4_YSTORM_FCOE_TASK_AG_CTX_RULE6EN_MASK 0x1
+#define E4_YSTORM_FCOE_TASK_AG_CTX_RULE6EN_SHIFT 7
u8 byte2;
__le32 reg0;
u8 byte3;
@@ -328,73 +208,73 @@ struct ystorm_fcoe_task_ag_ctx {
__le32 reg2;
};
-struct tstorm_fcoe_task_ag_ctx {
+struct e4_tstorm_fcoe_task_ag_ctx {
u8 reserved;
u8 byte1;
__le16 icid;
u8 flags0;
-#define TSTORM_FCOE_TASK_AG_CTX_CONNECTION_TYPE_MASK 0xF
-#define TSTORM_FCOE_TASK_AG_CTX_CONNECTION_TYPE_SHIFT 0
-#define TSTORM_FCOE_TASK_AG_CTX_EXIST_IN_QM0_MASK 0x1
-#define TSTORM_FCOE_TASK_AG_CTX_EXIST_IN_QM0_SHIFT 4
-#define TSTORM_FCOE_TASK_AG_CTX_BIT1_MASK 0x1
-#define TSTORM_FCOE_TASK_AG_CTX_BIT1_SHIFT 5
-#define TSTORM_FCOE_TASK_AG_CTX_WAIT_ABTS_RSP_F_MASK 0x1
-#define TSTORM_FCOE_TASK_AG_CTX_WAIT_ABTS_RSP_F_SHIFT 6
-#define TSTORM_FCOE_TASK_AG_CTX_VALID_MASK 0x1
-#define TSTORM_FCOE_TASK_AG_CTX_VALID_SHIFT 7
+#define E4_TSTORM_FCOE_TASK_AG_CTX_CONNECTION_TYPE_MASK 0xF
+#define E4_TSTORM_FCOE_TASK_AG_CTX_CONNECTION_TYPE_SHIFT 0
+#define E4_TSTORM_FCOE_TASK_AG_CTX_EXIST_IN_QM0_MASK 0x1
+#define E4_TSTORM_FCOE_TASK_AG_CTX_EXIST_IN_QM0_SHIFT 4
+#define E4_TSTORM_FCOE_TASK_AG_CTX_BIT1_MASK 0x1
+#define E4_TSTORM_FCOE_TASK_AG_CTX_BIT1_SHIFT 5
+#define E4_TSTORM_FCOE_TASK_AG_CTX_WAIT_ABTS_RSP_F_MASK 0x1
+#define E4_TSTORM_FCOE_TASK_AG_CTX_WAIT_ABTS_RSP_F_SHIFT 6
+#define E4_TSTORM_FCOE_TASK_AG_CTX_VALID_MASK 0x1
+#define E4_TSTORM_FCOE_TASK_AG_CTX_VALID_SHIFT 7
u8 flags1;
-#define TSTORM_FCOE_TASK_AG_CTX_FALSE_RR_TOV_MASK 0x1
-#define TSTORM_FCOE_TASK_AG_CTX_FALSE_RR_TOV_SHIFT 0
-#define TSTORM_FCOE_TASK_AG_CTX_BIT5_MASK 0x1
-#define TSTORM_FCOE_TASK_AG_CTX_BIT5_SHIFT 1
-#define TSTORM_FCOE_TASK_AG_CTX_REC_RR_TOV_CF_MASK 0x3
-#define TSTORM_FCOE_TASK_AG_CTX_REC_RR_TOV_CF_SHIFT 2
-#define TSTORM_FCOE_TASK_AG_CTX_ED_TOV_CF_MASK 0x3
-#define TSTORM_FCOE_TASK_AG_CTX_ED_TOV_CF_SHIFT 4
-#define TSTORM_FCOE_TASK_AG_CTX_CF2_MASK 0x3
-#define TSTORM_FCOE_TASK_AG_CTX_CF2_SHIFT 6
+#define E4_TSTORM_FCOE_TASK_AG_CTX_FALSE_RR_TOV_MASK 0x1
+#define E4_TSTORM_FCOE_TASK_AG_CTX_FALSE_RR_TOV_SHIFT 0
+#define E4_TSTORM_FCOE_TASK_AG_CTX_BIT5_MASK 0x1
+#define E4_TSTORM_FCOE_TASK_AG_CTX_BIT5_SHIFT 1
+#define E4_TSTORM_FCOE_TASK_AG_CTX_REC_RR_TOV_CF_MASK 0x3
+#define E4_TSTORM_FCOE_TASK_AG_CTX_REC_RR_TOV_CF_SHIFT 2
+#define E4_TSTORM_FCOE_TASK_AG_CTX_ED_TOV_CF_MASK 0x3
+#define E4_TSTORM_FCOE_TASK_AG_CTX_ED_TOV_CF_SHIFT 4
+#define E4_TSTORM_FCOE_TASK_AG_CTX_CF2_MASK 0x3
+#define E4_TSTORM_FCOE_TASK_AG_CTX_CF2_SHIFT 6
u8 flags2;
-#define TSTORM_FCOE_TASK_AG_CTX_TIMER_STOP_ALL_MASK 0x3
-#define TSTORM_FCOE_TASK_AG_CTX_TIMER_STOP_ALL_SHIFT 0
-#define TSTORM_FCOE_TASK_AG_CTX_EX_CLEANUP_CF_MASK 0x3
-#define TSTORM_FCOE_TASK_AG_CTX_EX_CLEANUP_CF_SHIFT 2
-#define TSTORM_FCOE_TASK_AG_CTX_SEQ_INIT_CF_MASK 0x3
-#define TSTORM_FCOE_TASK_AG_CTX_SEQ_INIT_CF_SHIFT 4
-#define TSTORM_FCOE_TASK_AG_CTX_SEQ_RECOVERY_CF_MASK 0x3
-#define TSTORM_FCOE_TASK_AG_CTX_SEQ_RECOVERY_CF_SHIFT 6
+#define E4_TSTORM_FCOE_TASK_AG_CTX_TIMER_STOP_ALL_MASK 0x3
+#define E4_TSTORM_FCOE_TASK_AG_CTX_TIMER_STOP_ALL_SHIFT 0
+#define E4_TSTORM_FCOE_TASK_AG_CTX_EX_CLEANUP_CF_MASK 0x3
+#define E4_TSTORM_FCOE_TASK_AG_CTX_EX_CLEANUP_CF_SHIFT 2
+#define E4_TSTORM_FCOE_TASK_AG_CTX_SEQ_INIT_CF_MASK 0x3
+#define E4_TSTORM_FCOE_TASK_AG_CTX_SEQ_INIT_CF_SHIFT 4
+#define E4_TSTORM_FCOE_TASK_AG_CTX_SEQ_RECOVERY_CF_MASK 0x3
+#define E4_TSTORM_FCOE_TASK_AG_CTX_SEQ_RECOVERY_CF_SHIFT 6
u8 flags3;
-#define TSTORM_FCOE_TASK_AG_CTX_UNSOL_COMP_CF_MASK 0x3
-#define TSTORM_FCOE_TASK_AG_CTX_UNSOL_COMP_CF_SHIFT 0
-#define TSTORM_FCOE_TASK_AG_CTX_REC_RR_TOV_CF_EN_MASK 0x1
-#define TSTORM_FCOE_TASK_AG_CTX_REC_RR_TOV_CF_EN_SHIFT 2
-#define TSTORM_FCOE_TASK_AG_CTX_ED_TOV_CF_EN_MASK 0x1
-#define TSTORM_FCOE_TASK_AG_CTX_ED_TOV_CF_EN_SHIFT 3
-#define TSTORM_FCOE_TASK_AG_CTX_CF2EN_MASK 0x1
-#define TSTORM_FCOE_TASK_AG_CTX_CF2EN_SHIFT 4
-#define TSTORM_FCOE_TASK_AG_CTX_TIMER_STOP_ALL_EN_MASK 0x1
-#define TSTORM_FCOE_TASK_AG_CTX_TIMER_STOP_ALL_EN_SHIFT 5
-#define TSTORM_FCOE_TASK_AG_CTX_EX_CLEANUP_CF_EN_MASK 0x1
-#define TSTORM_FCOE_TASK_AG_CTX_EX_CLEANUP_CF_EN_SHIFT 6
-#define TSTORM_FCOE_TASK_AG_CTX_SEQ_INIT_CF_EN_MASK 0x1
-#define TSTORM_FCOE_TASK_AG_CTX_SEQ_INIT_CF_EN_SHIFT 7
+#define E4_TSTORM_FCOE_TASK_AG_CTX_UNSOL_COMP_CF_MASK 0x3
+#define E4_TSTORM_FCOE_TASK_AG_CTX_UNSOL_COMP_CF_SHIFT 0
+#define E4_TSTORM_FCOE_TASK_AG_CTX_REC_RR_TOV_CF_EN_MASK 0x1
+#define E4_TSTORM_FCOE_TASK_AG_CTX_REC_RR_TOV_CF_EN_SHIFT 2
+#define E4_TSTORM_FCOE_TASK_AG_CTX_ED_TOV_CF_EN_MASK 0x1
+#define E4_TSTORM_FCOE_TASK_AG_CTX_ED_TOV_CF_EN_SHIFT 3
+#define E4_TSTORM_FCOE_TASK_AG_CTX_CF2EN_MASK 0x1
+#define E4_TSTORM_FCOE_TASK_AG_CTX_CF2EN_SHIFT 4
+#define E4_TSTORM_FCOE_TASK_AG_CTX_TIMER_STOP_ALL_EN_MASK 0x1
+#define E4_TSTORM_FCOE_TASK_AG_CTX_TIMER_STOP_ALL_EN_SHIFT 5
+#define E4_TSTORM_FCOE_TASK_AG_CTX_EX_CLEANUP_CF_EN_MASK 0x1
+#define E4_TSTORM_FCOE_TASK_AG_CTX_EX_CLEANUP_CF_EN_SHIFT 6
+#define E4_TSTORM_FCOE_TASK_AG_CTX_SEQ_INIT_CF_EN_MASK 0x1
+#define E4_TSTORM_FCOE_TASK_AG_CTX_SEQ_INIT_CF_EN_SHIFT 7
u8 flags4;
-#define TSTORM_FCOE_TASK_AG_CTX_SEQ_RECOVERY_CF_EN_MASK 0x1
-#define TSTORM_FCOE_TASK_AG_CTX_SEQ_RECOVERY_CF_EN_SHIFT 0
-#define TSTORM_FCOE_TASK_AG_CTX_UNSOL_COMP_CF_EN_MASK 0x1
-#define TSTORM_FCOE_TASK_AG_CTX_UNSOL_COMP_CF_EN_SHIFT 1
-#define TSTORM_FCOE_TASK_AG_CTX_RULE0EN_MASK 0x1
-#define TSTORM_FCOE_TASK_AG_CTX_RULE0EN_SHIFT 2
-#define TSTORM_FCOE_TASK_AG_CTX_RULE1EN_MASK 0x1
-#define TSTORM_FCOE_TASK_AG_CTX_RULE1EN_SHIFT 3
-#define TSTORM_FCOE_TASK_AG_CTX_RULE2EN_MASK 0x1
-#define TSTORM_FCOE_TASK_AG_CTX_RULE2EN_SHIFT 4
-#define TSTORM_FCOE_TASK_AG_CTX_RULE3EN_MASK 0x1
-#define TSTORM_FCOE_TASK_AG_CTX_RULE3EN_SHIFT 5
-#define TSTORM_FCOE_TASK_AG_CTX_RULE4EN_MASK 0x1
-#define TSTORM_FCOE_TASK_AG_CTX_RULE4EN_SHIFT 6
-#define TSTORM_FCOE_TASK_AG_CTX_RULE5EN_MASK 0x1
-#define TSTORM_FCOE_TASK_AG_CTX_RULE5EN_SHIFT 7
+#define E4_TSTORM_FCOE_TASK_AG_CTX_SEQ_RECOVERY_CF_EN_MASK 0x1
+#define E4_TSTORM_FCOE_TASK_AG_CTX_SEQ_RECOVERY_CF_EN_SHIFT 0
+#define E4_TSTORM_FCOE_TASK_AG_CTX_UNSOL_COMP_CF_EN_MASK 0x1
+#define E4_TSTORM_FCOE_TASK_AG_CTX_UNSOL_COMP_CF_EN_SHIFT 1
+#define E4_TSTORM_FCOE_TASK_AG_CTX_RULE0EN_MASK 0x1
+#define E4_TSTORM_FCOE_TASK_AG_CTX_RULE0EN_SHIFT 2
+#define E4_TSTORM_FCOE_TASK_AG_CTX_RULE1EN_MASK 0x1
+#define E4_TSTORM_FCOE_TASK_AG_CTX_RULE1EN_SHIFT 3
+#define E4_TSTORM_FCOE_TASK_AG_CTX_RULE2EN_MASK 0x1
+#define E4_TSTORM_FCOE_TASK_AG_CTX_RULE2EN_SHIFT 4
+#define E4_TSTORM_FCOE_TASK_AG_CTX_RULE3EN_MASK 0x1
+#define E4_TSTORM_FCOE_TASK_AG_CTX_RULE3EN_SHIFT 5
+#define E4_TSTORM_FCOE_TASK_AG_CTX_RULE4EN_MASK 0x1
+#define E4_TSTORM_FCOE_TASK_AG_CTX_RULE4EN_SHIFT 6
+#define E4_TSTORM_FCOE_TASK_AG_CTX_RULE5EN_MASK 0x1
+#define E4_TSTORM_FCOE_TASK_AG_CTX_RULE5EN_SHIFT 7
u8 cleanup_state;
__le16 last_sent_tid;
__le32 rec_rr_tov_exp_timeout;
@@ -407,25 +287,46 @@ struct tstorm_fcoe_task_ag_ctx {
__le32 data_offset_next;
};
+/* Cached data sges */
+struct fcoe_exp_ro {
+ __le32 data_offset;
+ __le32 reserved;
+};
+
+/* Union of Cleanup address \ expected relative offsets */
+union fcoe_cleanup_addr_exp_ro_union {
+ struct regpair abts_rsp_fc_payload_hi;
+ struct fcoe_exp_ro exp_ro;
+};
+
+/* Fields coppied from ABTSrsp pckt */
+struct fcoe_abts_pkt {
+ __le32 abts_rsp_fc_payload_lo;
+ __le16 abts_rsp_rx_id;
+ u8 abts_rsp_rctl;
+ u8 reserved2;
+};
+
+/* FW read- write (modifyable) part The fcoe task storm context of Tstorm */
struct fcoe_tstorm_fcoe_task_st_ctx_read_write {
union fcoe_cleanup_addr_exp_ro_union cleanup_addr_exp_ro_union;
__le16 flags;
-#define FCOE_TSTORM_FCOE_TASK_ST_CTX_READ_WRITE_RX_SGL_MODE_MASK 0x1
-#define FCOE_TSTORM_FCOE_TASK_ST_CTX_READ_WRITE_RX_SGL_MODE_SHIFT 0
-#define FCOE_TSTORM_FCOE_TASK_ST_CTX_READ_WRITE_EXP_FIRST_FRAME_MASK 0x1
-#define FCOE_TSTORM_FCOE_TASK_ST_CTX_READ_WRITE_EXP_FIRST_FRAME_SHIFT 1
-#define FCOE_TSTORM_FCOE_TASK_ST_CTX_READ_WRITE_SEQ_ACTIVE_MASK 0x1
-#define FCOE_TSTORM_FCOE_TASK_ST_CTX_READ_WRITE_SEQ_ACTIVE_SHIFT 2
-#define FCOE_TSTORM_FCOE_TASK_ST_CTX_READ_WRITE_SEQ_TIMEOUT_MASK 0x1
-#define FCOE_TSTORM_FCOE_TASK_ST_CTX_READ_WRITE_SEQ_TIMEOUT_SHIFT 3
-#define FCOE_TSTORM_FCOE_TASK_ST_CTX_READ_WRITE_SINGLE_PKT_IN_EX_MASK 0x1
-#define FCOE_TSTORM_FCOE_TASK_ST_CTX_READ_WRITE_SINGLE_PKT_IN_EX_SHIFT 4
-#define FCOE_TSTORM_FCOE_TASK_ST_CTX_READ_WRITE_OOO_RX_SEQ_STAT_MASK 0x1
-#define FCOE_TSTORM_FCOE_TASK_ST_CTX_READ_WRITE_OOO_RX_SEQ_STAT_SHIFT 5
-#define FCOE_TSTORM_FCOE_TASK_ST_CTX_READ_WRITE_CQ_ADD_ADV_MASK 0x3
-#define FCOE_TSTORM_FCOE_TASK_ST_CTX_READ_WRITE_CQ_ADD_ADV_SHIFT 6
-#define FCOE_TSTORM_FCOE_TASK_ST_CTX_READ_WRITE_RSRV1_MASK 0xFF
-#define FCOE_TSTORM_FCOE_TASK_ST_CTX_READ_WRITE_RSRV1_SHIFT 8
+#define FCOE_TSTORM_FCOE_TASK_ST_CTX_READ_WRITE_RX_SGL_MODE_MASK 0x1
+#define FCOE_TSTORM_FCOE_TASK_ST_CTX_READ_WRITE_RX_SGL_MODE_SHIFT 0
+#define FCOE_TSTORM_FCOE_TASK_ST_CTX_READ_WRITE_EXP_FIRST_FRAME_MASK 0x1
+#define FCOE_TSTORM_FCOE_TASK_ST_CTX_READ_WRITE_EXP_FIRST_FRAME_SHIFT 1
+#define FCOE_TSTORM_FCOE_TASK_ST_CTX_READ_WRITE_SEQ_ACTIVE_MASK 0x1
+#define FCOE_TSTORM_FCOE_TASK_ST_CTX_READ_WRITE_SEQ_ACTIVE_SHIFT 2
+#define FCOE_TSTORM_FCOE_TASK_ST_CTX_READ_WRITE_SEQ_TIMEOUT_MASK 0x1
+#define FCOE_TSTORM_FCOE_TASK_ST_CTX_READ_WRITE_SEQ_TIMEOUT_SHIFT 3
+#define FCOE_TSTORM_FCOE_TASK_ST_CTX_READ_WRITE_SINGLE_PKT_IN_EX_MASK 0x1
+#define FCOE_TSTORM_FCOE_TASK_ST_CTX_READ_WRITE_SINGLE_PKT_IN_EX_SHIFT 4
+#define FCOE_TSTORM_FCOE_TASK_ST_CTX_READ_WRITE_OOO_RX_SEQ_STAT_MASK 0x1
+#define FCOE_TSTORM_FCOE_TASK_ST_CTX_READ_WRITE_OOO_RX_SEQ_STAT_SHIFT 5
+#define FCOE_TSTORM_FCOE_TASK_ST_CTX_READ_WRITE_CQ_ADD_ADV_MASK 0x3
+#define FCOE_TSTORM_FCOE_TASK_ST_CTX_READ_WRITE_CQ_ADD_ADV_SHIFT 6
+#define FCOE_TSTORM_FCOE_TASK_ST_CTX_READ_WRITE_RSRV1_MASK 0xFF
+#define FCOE_TSTORM_FCOE_TASK_ST_CTX_READ_WRITE_RSRV1_SHIFT 8
__le16 seq_cnt;
u8 seq_id;
u8 ooo_rx_seq_id;
@@ -436,6 +337,7 @@ struct fcoe_tstorm_fcoe_task_st_ctx_read_write {
__le16 reserved1;
};
+/* FW read only part The fcoe task storm context of Tstorm */
struct fcoe_tstorm_fcoe_task_st_ctx_read_only {
u8 task_type;
u8 dev_type;
@@ -446,54 +348,55 @@ struct fcoe_tstorm_fcoe_task_st_ctx_read_only {
__le32 rsrv;
};
+/** The fcoe task storm context of Tstorm */
struct tstorm_fcoe_task_st_ctx {
struct fcoe_tstorm_fcoe_task_st_ctx_read_write read_write;
struct fcoe_tstorm_fcoe_task_st_ctx_read_only read_only;
};
-struct mstorm_fcoe_task_ag_ctx {
+struct e4_mstorm_fcoe_task_ag_ctx {
u8 byte0;
u8 byte1;
__le16 icid;
u8 flags0;
-#define MSTORM_FCOE_TASK_AG_CTX_CONNECTION_TYPE_MASK 0xF
-#define MSTORM_FCOE_TASK_AG_CTX_CONNECTION_TYPE_SHIFT 0
-#define MSTORM_FCOE_TASK_AG_CTX_EXIST_IN_QM0_MASK 0x1
-#define MSTORM_FCOE_TASK_AG_CTX_EXIST_IN_QM0_SHIFT 4
-#define MSTORM_FCOE_TASK_AG_CTX_CQE_PLACED_MASK 0x1
-#define MSTORM_FCOE_TASK_AG_CTX_CQE_PLACED_SHIFT 5
-#define MSTORM_FCOE_TASK_AG_CTX_BIT2_MASK 0x1
-#define MSTORM_FCOE_TASK_AG_CTX_BIT2_SHIFT 6
-#define MSTORM_FCOE_TASK_AG_CTX_BIT3_MASK 0x1
-#define MSTORM_FCOE_TASK_AG_CTX_BIT3_SHIFT 7
+#define E4_MSTORM_FCOE_TASK_AG_CTX_CONNECTION_TYPE_MASK 0xF
+#define E4_MSTORM_FCOE_TASK_AG_CTX_CONNECTION_TYPE_SHIFT 0
+#define E4_MSTORM_FCOE_TASK_AG_CTX_EXIST_IN_QM0_MASK 0x1
+#define E4_MSTORM_FCOE_TASK_AG_CTX_EXIST_IN_QM0_SHIFT 4
+#define E4_MSTORM_FCOE_TASK_AG_CTX_CQE_PLACED_MASK 0x1
+#define E4_MSTORM_FCOE_TASK_AG_CTX_CQE_PLACED_SHIFT 5
+#define E4_MSTORM_FCOE_TASK_AG_CTX_BIT2_MASK 0x1
+#define E4_MSTORM_FCOE_TASK_AG_CTX_BIT2_SHIFT 6
+#define E4_MSTORM_FCOE_TASK_AG_CTX_BIT3_MASK 0x1
+#define E4_MSTORM_FCOE_TASK_AG_CTX_BIT3_SHIFT 7
u8 flags1;
-#define MSTORM_FCOE_TASK_AG_CTX_EX_CLEANUP_CF_MASK 0x3
-#define MSTORM_FCOE_TASK_AG_CTX_EX_CLEANUP_CF_SHIFT 0
-#define MSTORM_FCOE_TASK_AG_CTX_CF1_MASK 0x3
-#define MSTORM_FCOE_TASK_AG_CTX_CF1_SHIFT 2
-#define MSTORM_FCOE_TASK_AG_CTX_CF2_MASK 0x3
-#define MSTORM_FCOE_TASK_AG_CTX_CF2_SHIFT 4
-#define MSTORM_FCOE_TASK_AG_CTX_EX_CLEANUP_CF_EN_MASK 0x1
-#define MSTORM_FCOE_TASK_AG_CTX_EX_CLEANUP_CF_EN_SHIFT 6
-#define MSTORM_FCOE_TASK_AG_CTX_CF1EN_MASK 0x1
-#define MSTORM_FCOE_TASK_AG_CTX_CF1EN_SHIFT 7
+#define E4_MSTORM_FCOE_TASK_AG_CTX_EX_CLEANUP_CF_MASK 0x3
+#define E4_MSTORM_FCOE_TASK_AG_CTX_EX_CLEANUP_CF_SHIFT 0
+#define E4_MSTORM_FCOE_TASK_AG_CTX_CF1_MASK 0x3
+#define E4_MSTORM_FCOE_TASK_AG_CTX_CF1_SHIFT 2
+#define E4_MSTORM_FCOE_TASK_AG_CTX_CF2_MASK 0x3
+#define E4_MSTORM_FCOE_TASK_AG_CTX_CF2_SHIFT 4
+#define E4_MSTORM_FCOE_TASK_AG_CTX_EX_CLEANUP_CF_EN_MASK 0x1
+#define E4_MSTORM_FCOE_TASK_AG_CTX_EX_CLEANUP_CF_EN_SHIFT 6
+#define E4_MSTORM_FCOE_TASK_AG_CTX_CF1EN_MASK 0x1
+#define E4_MSTORM_FCOE_TASK_AG_CTX_CF1EN_SHIFT 7
u8 flags2;
-#define MSTORM_FCOE_TASK_AG_CTX_CF2EN_MASK 0x1
-#define MSTORM_FCOE_TASK_AG_CTX_CF2EN_SHIFT 0
-#define MSTORM_FCOE_TASK_AG_CTX_RULE0EN_MASK 0x1
-#define MSTORM_FCOE_TASK_AG_CTX_RULE0EN_SHIFT 1
-#define MSTORM_FCOE_TASK_AG_CTX_RULE1EN_MASK 0x1
-#define MSTORM_FCOE_TASK_AG_CTX_RULE1EN_SHIFT 2
-#define MSTORM_FCOE_TASK_AG_CTX_RULE2EN_MASK 0x1
-#define MSTORM_FCOE_TASK_AG_CTX_RULE2EN_SHIFT 3
-#define MSTORM_FCOE_TASK_AG_CTX_RULE3EN_MASK 0x1
-#define MSTORM_FCOE_TASK_AG_CTX_RULE3EN_SHIFT 4
-#define MSTORM_FCOE_TASK_AG_CTX_RULE4EN_MASK 0x1
-#define MSTORM_FCOE_TASK_AG_CTX_RULE4EN_SHIFT 5
-#define MSTORM_FCOE_TASK_AG_CTX_XFER_PLACEMENT_EN_MASK 0x1
-#define MSTORM_FCOE_TASK_AG_CTX_XFER_PLACEMENT_EN_SHIFT 6
-#define MSTORM_FCOE_TASK_AG_CTX_RULE6EN_MASK 0x1
-#define MSTORM_FCOE_TASK_AG_CTX_RULE6EN_SHIFT 7
+#define E4_MSTORM_FCOE_TASK_AG_CTX_CF2EN_MASK 0x1
+#define E4_MSTORM_FCOE_TASK_AG_CTX_CF2EN_SHIFT 0
+#define E4_MSTORM_FCOE_TASK_AG_CTX_RULE0EN_MASK 0x1
+#define E4_MSTORM_FCOE_TASK_AG_CTX_RULE0EN_SHIFT 1
+#define E4_MSTORM_FCOE_TASK_AG_CTX_RULE1EN_MASK 0x1
+#define E4_MSTORM_FCOE_TASK_AG_CTX_RULE1EN_SHIFT 2
+#define E4_MSTORM_FCOE_TASK_AG_CTX_RULE2EN_MASK 0x1
+#define E4_MSTORM_FCOE_TASK_AG_CTX_RULE2EN_SHIFT 3
+#define E4_MSTORM_FCOE_TASK_AG_CTX_RULE3EN_MASK 0x1
+#define E4_MSTORM_FCOE_TASK_AG_CTX_RULE3EN_SHIFT 4
+#define E4_MSTORM_FCOE_TASK_AG_CTX_RULE4EN_MASK 0x1
+#define E4_MSTORM_FCOE_TASK_AG_CTX_RULE4EN_SHIFT 5
+#define E4_MSTORM_FCOE_TASK_AG_CTX_XFER_PLACEMENT_EN_MASK 0x1
+#define E4_MSTORM_FCOE_TASK_AG_CTX_XFER_PLACEMENT_EN_SHIFT 6
+#define E4_MSTORM_FCOE_TASK_AG_CTX_RULE6EN_MASK 0x1
+#define E4_MSTORM_FCOE_TASK_AG_CTX_RULE6EN_SHIFT 7
u8 cleanup_state;
__le32 received_bytes;
u8 byte3;
@@ -507,6 +410,7 @@ struct mstorm_fcoe_task_ag_ctx {
__le32 reg2;
};
+/* The fcoe task storm context of Mstorm */
struct mstorm_fcoe_task_st_ctx {
struct regpair rsp_buf_addr;
__le32 rsrv[2];
@@ -515,79 +419,79 @@ struct mstorm_fcoe_task_st_ctx {
__le32 data_buffer_offset;
__le16 parent_id;
__le16 flags;
-#define MSTORM_FCOE_TASK_ST_CTX_INTERVAL_SIZE_LOG_MASK 0xF
-#define MSTORM_FCOE_TASK_ST_CTX_INTERVAL_SIZE_LOG_SHIFT 0
-#define MSTORM_FCOE_TASK_ST_CTX_HOST_INTERFACE_MASK 0x3
-#define MSTORM_FCOE_TASK_ST_CTX_HOST_INTERFACE_SHIFT 4
-#define MSTORM_FCOE_TASK_ST_CTX_DIF_TO_PEER_MASK 0x1
-#define MSTORM_FCOE_TASK_ST_CTX_DIF_TO_PEER_SHIFT 6
-#define MSTORM_FCOE_TASK_ST_CTX_MP_INCLUDE_FC_HEADER_MASK 0x1
-#define MSTORM_FCOE_TASK_ST_CTX_MP_INCLUDE_FC_HEADER_SHIFT 7
-#define MSTORM_FCOE_TASK_ST_CTX_DIX_BLOCK_SIZE_MASK 0x3
-#define MSTORM_FCOE_TASK_ST_CTX_DIX_BLOCK_SIZE_SHIFT 8
-#define MSTORM_FCOE_TASK_ST_CTX_VALIDATE_DIX_REF_TAG_MASK 0x1
-#define MSTORM_FCOE_TASK_ST_CTX_VALIDATE_DIX_REF_TAG_SHIFT 10
-#define MSTORM_FCOE_TASK_ST_CTX_DIX_CACHED_SGE_FLG_MASK 0x1
-#define MSTORM_FCOE_TASK_ST_CTX_DIX_CACHED_SGE_FLG_SHIFT 11
-#define MSTORM_FCOE_TASK_ST_CTX_DIF_SUPPORTED_MASK 0x1
-#define MSTORM_FCOE_TASK_ST_CTX_DIF_SUPPORTED_SHIFT 12
-#define MSTORM_FCOE_TASK_ST_CTX_TX_SGL_MODE_MASK 0x1
-#define MSTORM_FCOE_TASK_ST_CTX_TX_SGL_MODE_SHIFT 13
-#define MSTORM_FCOE_TASK_ST_CTX_RESERVED_MASK 0x3
-#define MSTORM_FCOE_TASK_ST_CTX_RESERVED_SHIFT 14
+#define MSTORM_FCOE_TASK_ST_CTX_INTERVAL_SIZE_LOG_MASK 0xF
+#define MSTORM_FCOE_TASK_ST_CTX_INTERVAL_SIZE_LOG_SHIFT 0
+#define MSTORM_FCOE_TASK_ST_CTX_HOST_INTERFACE_MASK 0x3
+#define MSTORM_FCOE_TASK_ST_CTX_HOST_INTERFACE_SHIFT 4
+#define MSTORM_FCOE_TASK_ST_CTX_DIF_TO_PEER_MASK 0x1
+#define MSTORM_FCOE_TASK_ST_CTX_DIF_TO_PEER_SHIFT 6
+#define MSTORM_FCOE_TASK_ST_CTX_MP_INCLUDE_FC_HEADER_MASK 0x1
+#define MSTORM_FCOE_TASK_ST_CTX_MP_INCLUDE_FC_HEADER_SHIFT 7
+#define MSTORM_FCOE_TASK_ST_CTX_DIX_BLOCK_SIZE_MASK 0x3
+#define MSTORM_FCOE_TASK_ST_CTX_DIX_BLOCK_SIZE_SHIFT 8
+#define MSTORM_FCOE_TASK_ST_CTX_VALIDATE_DIX_REF_TAG_MASK 0x1
+#define MSTORM_FCOE_TASK_ST_CTX_VALIDATE_DIX_REF_TAG_SHIFT 10
+#define MSTORM_FCOE_TASK_ST_CTX_DIX_CACHED_SGE_FLG_MASK 0x1
+#define MSTORM_FCOE_TASK_ST_CTX_DIX_CACHED_SGE_FLG_SHIFT 11
+#define MSTORM_FCOE_TASK_ST_CTX_DIF_SUPPORTED_MASK 0x1
+#define MSTORM_FCOE_TASK_ST_CTX_DIF_SUPPORTED_SHIFT 12
+#define MSTORM_FCOE_TASK_ST_CTX_TX_SGL_MODE_MASK 0x1
+#define MSTORM_FCOE_TASK_ST_CTX_TX_SGL_MODE_SHIFT 13
+#define MSTORM_FCOE_TASK_ST_CTX_RESERVED_MASK 0x3
+#define MSTORM_FCOE_TASK_ST_CTX_RESERVED_SHIFT 14
struct scsi_cached_sges data_desc;
};
-struct ustorm_fcoe_task_ag_ctx {
+struct e4_ustorm_fcoe_task_ag_ctx {
u8 reserved;
u8 byte1;
__le16 icid;
u8 flags0;
-#define USTORM_FCOE_TASK_AG_CTX_CONNECTION_TYPE_MASK 0xF
-#define USTORM_FCOE_TASK_AG_CTX_CONNECTION_TYPE_SHIFT 0
-#define USTORM_FCOE_TASK_AG_CTX_EXIST_IN_QM0_MASK 0x1
-#define USTORM_FCOE_TASK_AG_CTX_EXIST_IN_QM0_SHIFT 4
-#define USTORM_FCOE_TASK_AG_CTX_BIT1_MASK 0x1
-#define USTORM_FCOE_TASK_AG_CTX_BIT1_SHIFT 5
-#define USTORM_FCOE_TASK_AG_CTX_CF0_MASK 0x3
-#define USTORM_FCOE_TASK_AG_CTX_CF0_SHIFT 6
+#define E4_USTORM_FCOE_TASK_AG_CTX_CONNECTION_TYPE_MASK 0xF
+#define E4_USTORM_FCOE_TASK_AG_CTX_CONNECTION_TYPE_SHIFT 0
+#define E4_USTORM_FCOE_TASK_AG_CTX_EXIST_IN_QM0_MASK 0x1
+#define E4_USTORM_FCOE_TASK_AG_CTX_EXIST_IN_QM0_SHIFT 4
+#define E4_USTORM_FCOE_TASK_AG_CTX_BIT1_MASK 0x1
+#define E4_USTORM_FCOE_TASK_AG_CTX_BIT1_SHIFT 5
+#define E4_USTORM_FCOE_TASK_AG_CTX_CF0_MASK 0x3
+#define E4_USTORM_FCOE_TASK_AG_CTX_CF0_SHIFT 6
u8 flags1;
-#define USTORM_FCOE_TASK_AG_CTX_CF1_MASK 0x3
-#define USTORM_FCOE_TASK_AG_CTX_CF1_SHIFT 0
-#define USTORM_FCOE_TASK_AG_CTX_CF2_MASK 0x3
-#define USTORM_FCOE_TASK_AG_CTX_CF2_SHIFT 2
-#define USTORM_FCOE_TASK_AG_CTX_CF3_MASK 0x3
-#define USTORM_FCOE_TASK_AG_CTX_CF3_SHIFT 4
-#define USTORM_FCOE_TASK_AG_CTX_DIF_ERROR_CF_MASK 0x3
-#define USTORM_FCOE_TASK_AG_CTX_DIF_ERROR_CF_SHIFT 6
+#define E4_USTORM_FCOE_TASK_AG_CTX_CF1_MASK 0x3
+#define E4_USTORM_FCOE_TASK_AG_CTX_CF1_SHIFT 0
+#define E4_USTORM_FCOE_TASK_AG_CTX_CF2_MASK 0x3
+#define E4_USTORM_FCOE_TASK_AG_CTX_CF2_SHIFT 2
+#define E4_USTORM_FCOE_TASK_AG_CTX_CF3_MASK 0x3
+#define E4_USTORM_FCOE_TASK_AG_CTX_CF3_SHIFT 4
+#define E4_USTORM_FCOE_TASK_AG_CTX_DIF_ERROR_CF_MASK 0x3
+#define E4_USTORM_FCOE_TASK_AG_CTX_DIF_ERROR_CF_SHIFT 6
u8 flags2;
-#define USTORM_FCOE_TASK_AG_CTX_CF0EN_MASK 0x1
-#define USTORM_FCOE_TASK_AG_CTX_CF0EN_SHIFT 0
-#define USTORM_FCOE_TASK_AG_CTX_CF1EN_MASK 0x1
-#define USTORM_FCOE_TASK_AG_CTX_CF1EN_SHIFT 1
-#define USTORM_FCOE_TASK_AG_CTX_CF2EN_MASK 0x1
-#define USTORM_FCOE_TASK_AG_CTX_CF2EN_SHIFT 2
-#define USTORM_FCOE_TASK_AG_CTX_CF3EN_MASK 0x1
-#define USTORM_FCOE_TASK_AG_CTX_CF3EN_SHIFT 3
-#define USTORM_FCOE_TASK_AG_CTX_DIF_ERROR_CF_EN_MASK 0x1
-#define USTORM_FCOE_TASK_AG_CTX_DIF_ERROR_CF_EN_SHIFT 4
-#define USTORM_FCOE_TASK_AG_CTX_RULE0EN_MASK 0x1
-#define USTORM_FCOE_TASK_AG_CTX_RULE0EN_SHIFT 5
-#define USTORM_FCOE_TASK_AG_CTX_RULE1EN_MASK 0x1
-#define USTORM_FCOE_TASK_AG_CTX_RULE1EN_SHIFT 6
-#define USTORM_FCOE_TASK_AG_CTX_RULE2EN_MASK 0x1
-#define USTORM_FCOE_TASK_AG_CTX_RULE2EN_SHIFT 7
+#define E4_USTORM_FCOE_TASK_AG_CTX_CF0EN_MASK 0x1
+#define E4_USTORM_FCOE_TASK_AG_CTX_CF0EN_SHIFT 0
+#define E4_USTORM_FCOE_TASK_AG_CTX_CF1EN_MASK 0x1
+#define E4_USTORM_FCOE_TASK_AG_CTX_CF1EN_SHIFT 1
+#define E4_USTORM_FCOE_TASK_AG_CTX_CF2EN_MASK 0x1
+#define E4_USTORM_FCOE_TASK_AG_CTX_CF2EN_SHIFT 2
+#define E4_USTORM_FCOE_TASK_AG_CTX_CF3EN_MASK 0x1
+#define E4_USTORM_FCOE_TASK_AG_CTX_CF3EN_SHIFT 3
+#define E4_USTORM_FCOE_TASK_AG_CTX_DIF_ERROR_CF_EN_MASK 0x1
+#define E4_USTORM_FCOE_TASK_AG_CTX_DIF_ERROR_CF_EN_SHIFT 4
+#define E4_USTORM_FCOE_TASK_AG_CTX_RULE0EN_MASK 0x1
+#define E4_USTORM_FCOE_TASK_AG_CTX_RULE0EN_SHIFT 5
+#define E4_USTORM_FCOE_TASK_AG_CTX_RULE1EN_MASK 0x1
+#define E4_USTORM_FCOE_TASK_AG_CTX_RULE1EN_SHIFT 6
+#define E4_USTORM_FCOE_TASK_AG_CTX_RULE2EN_MASK 0x1
+#define E4_USTORM_FCOE_TASK_AG_CTX_RULE2EN_SHIFT 7
u8 flags3;
-#define USTORM_FCOE_TASK_AG_CTX_RULE3EN_MASK 0x1
-#define USTORM_FCOE_TASK_AG_CTX_RULE3EN_SHIFT 0
-#define USTORM_FCOE_TASK_AG_CTX_RULE4EN_MASK 0x1
-#define USTORM_FCOE_TASK_AG_CTX_RULE4EN_SHIFT 1
-#define USTORM_FCOE_TASK_AG_CTX_RULE5EN_MASK 0x1
-#define USTORM_FCOE_TASK_AG_CTX_RULE5EN_SHIFT 2
-#define USTORM_FCOE_TASK_AG_CTX_RULE6EN_MASK 0x1
-#define USTORM_FCOE_TASK_AG_CTX_RULE6EN_SHIFT 3
-#define USTORM_FCOE_TASK_AG_CTX_DIF_ERROR_TYPE_MASK 0xF
-#define USTORM_FCOE_TASK_AG_CTX_DIF_ERROR_TYPE_SHIFT 4
+#define E4_USTORM_FCOE_TASK_AG_CTX_RULE3EN_MASK 0x1
+#define E4_USTORM_FCOE_TASK_AG_CTX_RULE3EN_SHIFT 0
+#define E4_USTORM_FCOE_TASK_AG_CTX_RULE4EN_MASK 0x1
+#define E4_USTORM_FCOE_TASK_AG_CTX_RULE4EN_SHIFT 1
+#define E4_USTORM_FCOE_TASK_AG_CTX_RULE5EN_MASK 0x1
+#define E4_USTORM_FCOE_TASK_AG_CTX_RULE5EN_SHIFT 2
+#define E4_USTORM_FCOE_TASK_AG_CTX_RULE6EN_MASK 0x1
+#define E4_USTORM_FCOE_TASK_AG_CTX_RULE6EN_SHIFT 3
+#define E4_USTORM_FCOE_TASK_AG_CTX_DIF_ERROR_TYPE_MASK 0xF
+#define E4_USTORM_FCOE_TASK_AG_CTX_DIF_ERROR_TYPE_SHIFT 4
__le32 dif_err_intervals;
__le32 dif_error_1st_interval;
__le32 global_cq_num;
@@ -596,21 +500,189 @@ struct ustorm_fcoe_task_ag_ctx {
__le32 reg5;
};
-struct fcoe_task_context {
+/* FCoE task context */
+struct e4_fcoe_task_context {
struct ystorm_fcoe_task_st_ctx ystorm_st_context;
struct regpair ystorm_st_padding[2];
struct tdif_task_context tdif_context;
- struct ystorm_fcoe_task_ag_ctx ystorm_ag_context;
- struct tstorm_fcoe_task_ag_ctx tstorm_ag_context;
+ struct e4_ystorm_fcoe_task_ag_ctx ystorm_ag_context;
+ struct e4_tstorm_fcoe_task_ag_ctx tstorm_ag_context;
struct timers_context timer_context;
struct tstorm_fcoe_task_st_ctx tstorm_st_context;
struct regpair tstorm_st_padding[2];
- struct mstorm_fcoe_task_ag_ctx mstorm_ag_context;
+ struct e4_mstorm_fcoe_task_ag_ctx mstorm_ag_context;
struct mstorm_fcoe_task_st_ctx mstorm_st_context;
- struct ustorm_fcoe_task_ag_ctx ustorm_ag_context;
+ struct e4_ustorm_fcoe_task_ag_ctx ustorm_ag_context;
struct rdif_task_context rdif_context;
};
+/* FCoE additional WQE (Sq/XferQ) information */
+union fcoe_additional_info_union {
+ __le32 previous_tid;
+ __le32 parent_tid;
+ __le32 burst_length;
+ __le32 seq_rec_updated_offset;
+};
+
+/* FCoE Ramrod Command IDs */
+enum fcoe_completion_status {
+ FCOE_COMPLETION_STATUS_SUCCESS,
+ FCOE_COMPLETION_STATUS_FCOE_VER_ERR,
+ FCOE_COMPLETION_STATUS_SRC_MAC_ADD_ARR_ERR,
+ MAX_FCOE_COMPLETION_STATUS
+};
+
+/* FC address (SID/DID) network presentation */
+struct fc_addr_nw {
+ u8 addr_lo;
+ u8 addr_mid;
+ u8 addr_hi;
+};
+
+/* FCoE connection offload */
+struct fcoe_conn_offload_ramrod_data {
+ struct regpair sq_pbl_addr;
+ struct regpair sq_curr_page_addr;
+ struct regpair sq_next_page_addr;
+ struct regpair xferq_pbl_addr;
+ struct regpair xferq_curr_page_addr;
+ struct regpair xferq_next_page_addr;
+ struct regpair respq_pbl_addr;
+ struct regpair respq_curr_page_addr;
+ struct regpair respq_next_page_addr;
+ __le16 dst_mac_addr_lo;
+ __le16 dst_mac_addr_mid;
+ __le16 dst_mac_addr_hi;
+ __le16 src_mac_addr_lo;
+ __le16 src_mac_addr_mid;
+ __le16 src_mac_addr_hi;
+ __le16 tx_max_fc_pay_len;
+ __le16 e_d_tov_timer_val;
+ __le16 rx_max_fc_pay_len;
+ __le16 vlan_tag;
+#define FCOE_CONN_OFFLOAD_RAMROD_DATA_VLAN_ID_MASK 0xFFF
+#define FCOE_CONN_OFFLOAD_RAMROD_DATA_VLAN_ID_SHIFT 0
+#define FCOE_CONN_OFFLOAD_RAMROD_DATA_CFI_MASK 0x1
+#define FCOE_CONN_OFFLOAD_RAMROD_DATA_CFI_SHIFT 12
+#define FCOE_CONN_OFFLOAD_RAMROD_DATA_PRIORITY_MASK 0x7
+#define FCOE_CONN_OFFLOAD_RAMROD_DATA_PRIORITY_SHIFT 13
+ __le16 physical_q0;
+ __le16 rec_rr_tov_timer_val;
+ struct fc_addr_nw s_id;
+ u8 max_conc_seqs_c3;
+ struct fc_addr_nw d_id;
+ u8 flags;
+#define FCOE_CONN_OFFLOAD_RAMROD_DATA_B_CONT_INCR_SEQ_CNT_MASK 0x1
+#define FCOE_CONN_OFFLOAD_RAMROD_DATA_B_CONT_INCR_SEQ_CNT_SHIFT 0
+#define FCOE_CONN_OFFLOAD_RAMROD_DATA_B_CONF_REQ_MASK 0x1
+#define FCOE_CONN_OFFLOAD_RAMROD_DATA_B_CONF_REQ_SHIFT 1
+#define FCOE_CONN_OFFLOAD_RAMROD_DATA_B_REC_VALID_MASK 0x1
+#define FCOE_CONN_OFFLOAD_RAMROD_DATA_B_REC_VALID_SHIFT 2
+#define FCOE_CONN_OFFLOAD_RAMROD_DATA_B_VLAN_FLAG_MASK 0x1
+#define FCOE_CONN_OFFLOAD_RAMROD_DATA_B_VLAN_FLAG_SHIFT 3
+#define FCOE_CONN_OFFLOAD_RAMROD_DATA_B_SINGLE_VLAN_MASK 0x1
+#define FCOE_CONN_OFFLOAD_RAMROD_DATA_B_SINGLE_VLAN_SHIFT 4
+#define FCOE_CONN_OFFLOAD_RAMROD_DATA_MODE_MASK 0x3
+#define FCOE_CONN_OFFLOAD_RAMROD_DATA_MODE_SHIFT 5
+#define FCOE_CONN_OFFLOAD_RAMROD_DATA_RESERVED0_MASK 0x1
+#define FCOE_CONN_OFFLOAD_RAMROD_DATA_RESERVED0_SHIFT 7
+ __le16 conn_id;
+ u8 def_q_idx;
+ u8 reserved[5];
+};
+
+/* FCoE terminate connection request */
+struct fcoe_conn_terminate_ramrod_data {
+ struct regpair terminate_params_addr;
+};
+
+/* FCoE device type */
+enum fcoe_device_type {
+ FCOE_TASK_DEV_TYPE_DISK,
+ FCOE_TASK_DEV_TYPE_TAPE,
+ MAX_FCOE_DEVICE_TYPE
+};
+
+/* Data sgl */
+struct fcoe_fast_sgl_ctx {
+ struct regpair sgl_start_addr;
+ __le32 sgl_byte_offset;
+ __le16 task_reuse_cnt;
+ __le16 init_offset_in_first_sge;
+};
+
+/* FCoE firmware function init */
+struct fcoe_init_func_ramrod_data {
+ struct scsi_init_func_params func_params;
+ struct scsi_init_func_queues q_params;
+ __le16 mtu;
+ __le16 sq_num_pages_in_pbl;
+ __le32 reserved[3];
+};
+
+/* FCoE: Mode of the connection: Target or Initiator or both */
+enum fcoe_mode_type {
+ FCOE_INITIATOR_MODE = 0x0,
+ FCOE_TARGET_MODE = 0x1,
+ FCOE_BOTH_OR_NOT_CHOSEN = 0x3,
+ MAX_FCOE_MODE_TYPE
+};
+
+/* Per PF FCoE receive path statistics - tStorm RAM structure */
+struct fcoe_rx_stat {
+ struct regpair fcoe_rx_byte_cnt;
+ struct regpair fcoe_rx_data_pkt_cnt;
+ struct regpair fcoe_rx_xfer_pkt_cnt;
+ struct regpair fcoe_rx_other_pkt_cnt;
+ __le32 fcoe_silent_drop_pkt_cmdq_full_cnt;
+ __le32 fcoe_silent_drop_pkt_rq_full_cnt;
+ __le32 fcoe_silent_drop_pkt_crc_error_cnt;
+ __le32 fcoe_silent_drop_pkt_task_invalid_cnt;
+ __le32 fcoe_silent_drop_total_pkt_cnt;
+ __le32 rsrv;
+};
+
+/* FCoE SQE request type */
+enum fcoe_sqe_request_type {
+ SEND_FCOE_CMD,
+ SEND_FCOE_MIDPATH,
+ SEND_FCOE_ABTS_REQUEST,
+ FCOE_EXCHANGE_CLEANUP,
+ FCOE_SEQUENCE_RECOVERY,
+ SEND_FCOE_XFER_RDY,
+ SEND_FCOE_RSP,
+ SEND_FCOE_RSP_WITH_SENSE_DATA,
+ SEND_FCOE_TARGET_DATA,
+ SEND_FCOE_INITIATOR_DATA,
+ SEND_FCOE_XFER_CONTINUATION_RDY,
+ SEND_FCOE_TARGET_ABTS_RSP,
+ MAX_FCOE_SQE_REQUEST_TYPE
+};
+
+/* FCoe statistics request */
+struct fcoe_stat_ramrod_data {
+ struct regpair stat_params_addr;
+};
+
+/* FCoE task type */
+enum fcoe_task_type {
+ FCOE_TASK_TYPE_WRITE_INITIATOR,
+ FCOE_TASK_TYPE_READ_INITIATOR,
+ FCOE_TASK_TYPE_MIDPATH,
+ FCOE_TASK_TYPE_UNSOLICITED,
+ FCOE_TASK_TYPE_ABTS,
+ FCOE_TASK_TYPE_EXCHANGE_CLEANUP,
+ FCOE_TASK_TYPE_SEQUENCE_CLEANUP,
+ FCOE_TASK_TYPE_WRITE_TARGET,
+ FCOE_TASK_TYPE_READ_TARGET,
+ FCOE_TASK_TYPE_RSP,
+ FCOE_TASK_TYPE_RSP_SENSE_DATA,
+ FCOE_TASK_TYPE_ABTS_TARGET,
+ FCOE_TASK_TYPE_ENUM_SIZE,
+ MAX_FCOE_TASK_TYPE
+};
+
+/* Per PF FCoE transmit path statistics - pStorm RAM structure */
struct fcoe_tx_stat {
struct regpair fcoe_tx_byte_cnt;
struct regpair fcoe_tx_data_pkt_cnt;
@@ -618,51 +690,55 @@ struct fcoe_tx_stat {
struct regpair fcoe_tx_other_pkt_cnt;
};
+/* FCoE SQ/XferQ element */
struct fcoe_wqe {
__le16 task_id;
__le16 flags;
-#define FCOE_WQE_REQ_TYPE_MASK 0xF
-#define FCOE_WQE_REQ_TYPE_SHIFT 0
-#define FCOE_WQE_SGL_MODE_MASK 0x1
-#define FCOE_WQE_SGL_MODE_SHIFT 4
-#define FCOE_WQE_CONTINUATION_MASK 0x1
-#define FCOE_WQE_CONTINUATION_SHIFT 5
-#define FCOE_WQE_SEND_AUTO_RSP_MASK 0x1
-#define FCOE_WQE_SEND_AUTO_RSP_SHIFT 6
-#define FCOE_WQE_RESERVED_MASK 0x1
-#define FCOE_WQE_RESERVED_SHIFT 7
-#define FCOE_WQE_NUM_SGES_MASK 0xF
-#define FCOE_WQE_NUM_SGES_SHIFT 8
-#define FCOE_WQE_RESERVED1_MASK 0xF
-#define FCOE_WQE_RESERVED1_SHIFT 12
+#define FCOE_WQE_REQ_TYPE_MASK 0xF
+#define FCOE_WQE_REQ_TYPE_SHIFT 0
+#define FCOE_WQE_SGL_MODE_MASK 0x1
+#define FCOE_WQE_SGL_MODE_SHIFT 4
+#define FCOE_WQE_CONTINUATION_MASK 0x1
+#define FCOE_WQE_CONTINUATION_SHIFT 5
+#define FCOE_WQE_SEND_AUTO_RSP_MASK 0x1
+#define FCOE_WQE_SEND_AUTO_RSP_SHIFT 6
+#define FCOE_WQE_RESERVED_MASK 0x1
+#define FCOE_WQE_RESERVED_SHIFT 7
+#define FCOE_WQE_NUM_SGES_MASK 0xF
+#define FCOE_WQE_NUM_SGES_SHIFT 8
+#define FCOE_WQE_RESERVED1_MASK 0xF
+#define FCOE_WQE_RESERVED1_SHIFT 12
union fcoe_additional_info_union additional_info_union;
};
+/* FCoE XFRQ element */
struct xfrqe_prot_flags {
u8 flags;
-#define XFRQE_PROT_FLAGS_PROT_INTERVAL_SIZE_LOG_MASK 0xF
-#define XFRQE_PROT_FLAGS_PROT_INTERVAL_SIZE_LOG_SHIFT 0
-#define XFRQE_PROT_FLAGS_DIF_TO_PEER_MASK 0x1
-#define XFRQE_PROT_FLAGS_DIF_TO_PEER_SHIFT 4
-#define XFRQE_PROT_FLAGS_HOST_INTERFACE_MASK 0x3
-#define XFRQE_PROT_FLAGS_HOST_INTERFACE_SHIFT 5
-#define XFRQE_PROT_FLAGS_RESERVED_MASK 0x1
-#define XFRQE_PROT_FLAGS_RESERVED_SHIFT 7
+#define XFRQE_PROT_FLAGS_PROT_INTERVAL_SIZE_LOG_MASK 0xF
+#define XFRQE_PROT_FLAGS_PROT_INTERVAL_SIZE_LOG_SHIFT 0
+#define XFRQE_PROT_FLAGS_DIF_TO_PEER_MASK 0x1
+#define XFRQE_PROT_FLAGS_DIF_TO_PEER_SHIFT 4
+#define XFRQE_PROT_FLAGS_HOST_INTERFACE_MASK 0x3
+#define XFRQE_PROT_FLAGS_HOST_INTERFACE_SHIFT 5
+#define XFRQE_PROT_FLAGS_RESERVED_MASK 0x1
+#define XFRQE_PROT_FLAGS_RESERVED_SHIFT 7
};
+/* FCoE doorbell data */
struct fcoe_db_data {
u8 params;
-#define FCOE_DB_DATA_DEST_MASK 0x3
-#define FCOE_DB_DATA_DEST_SHIFT 0
-#define FCOE_DB_DATA_AGG_CMD_MASK 0x3
-#define FCOE_DB_DATA_AGG_CMD_SHIFT 2
-#define FCOE_DB_DATA_BYPASS_EN_MASK 0x1
-#define FCOE_DB_DATA_BYPASS_EN_SHIFT 4
-#define FCOE_DB_DATA_RESERVED_MASK 0x1
-#define FCOE_DB_DATA_RESERVED_SHIFT 5
-#define FCOE_DB_DATA_AGG_VAL_SEL_MASK 0x3
-#define FCOE_DB_DATA_AGG_VAL_SEL_SHIFT 6
+#define FCOE_DB_DATA_DEST_MASK 0x3
+#define FCOE_DB_DATA_DEST_SHIFT 0
+#define FCOE_DB_DATA_AGG_CMD_MASK 0x3
+#define FCOE_DB_DATA_AGG_CMD_SHIFT 2
+#define FCOE_DB_DATA_BYPASS_EN_MASK 0x1
+#define FCOE_DB_DATA_BYPASS_EN_SHIFT 4
+#define FCOE_DB_DATA_RESERVED_MASK 0x1
+#define FCOE_DB_DATA_RESERVED_SHIFT 5
+#define FCOE_DB_DATA_AGG_VAL_SEL_MASK 0x3
+#define FCOE_DB_DATA_AGG_VAL_SEL_SHIFT 6
u8 agg_flags;
__le16 sq_prod;
};
+
#endif /* __FCOE_COMMON__ */
diff --git a/include/linux/qed/iscsi_common.h b/include/linux/qed/iscsi_common.h
index 85e086cba639..938df614cb6a 100644
--- a/include/linux/qed/iscsi_common.h
+++ b/include/linux/qed/iscsi_common.h
@@ -32,47 +32,48 @@
#ifndef __ISCSI_COMMON__
#define __ISCSI_COMMON__
+
/**********************/
/* ISCSI FW CONSTANTS */
/**********************/
/* iSCSI HSI constants */
-#define ISCSI_DEFAULT_MTU (1500)
+#define ISCSI_DEFAULT_MTU (1500)
/* KWQ (kernel work queue) layer codes */
-#define ISCSI_SLOW_PATH_LAYER_CODE (6)
+#define ISCSI_SLOW_PATH_LAYER_CODE (6)
/* iSCSI parameter defaults */
-#define ISCSI_DEFAULT_HEADER_DIGEST (0)
-#define ISCSI_DEFAULT_DATA_DIGEST (0)
-#define ISCSI_DEFAULT_INITIAL_R2T (1)
-#define ISCSI_DEFAULT_IMMEDIATE_DATA (1)
-#define ISCSI_DEFAULT_MAX_PDU_LENGTH (0x2000)
-#define ISCSI_DEFAULT_FIRST_BURST_LENGTH (0x10000)
-#define ISCSI_DEFAULT_MAX_BURST_LENGTH (0x40000)
-#define ISCSI_DEFAULT_MAX_OUTSTANDING_R2T (1)
+#define ISCSI_DEFAULT_HEADER_DIGEST (0)
+#define ISCSI_DEFAULT_DATA_DIGEST (0)
+#define ISCSI_DEFAULT_INITIAL_R2T (1)
+#define ISCSI_DEFAULT_IMMEDIATE_DATA (1)
+#define ISCSI_DEFAULT_MAX_PDU_LENGTH (0x2000)
+#define ISCSI_DEFAULT_FIRST_BURST_LENGTH (0x10000)
+#define ISCSI_DEFAULT_MAX_BURST_LENGTH (0x40000)
+#define ISCSI_DEFAULT_MAX_OUTSTANDING_R2T (1)
/* iSCSI parameter limits */
-#define ISCSI_MIN_VAL_MAX_PDU_LENGTH (0x200)
-#define ISCSI_MAX_VAL_MAX_PDU_LENGTH (0xffffff)
-#define ISCSI_MIN_VAL_BURST_LENGTH (0x200)
-#define ISCSI_MAX_VAL_BURST_LENGTH (0xffffff)
-#define ISCSI_MIN_VAL_MAX_OUTSTANDING_R2T (1)
-#define ISCSI_MAX_VAL_MAX_OUTSTANDING_R2T (0xff)
+#define ISCSI_MIN_VAL_MAX_PDU_LENGTH (0x200)
+#define ISCSI_MAX_VAL_MAX_PDU_LENGTH (0xffffff)
+#define ISCSI_MIN_VAL_BURST_LENGTH (0x200)
+#define ISCSI_MAX_VAL_BURST_LENGTH (0xffffff)
+#define ISCSI_MIN_VAL_MAX_OUTSTANDING_R2T (1)
+#define ISCSI_MAX_VAL_MAX_OUTSTANDING_R2T (0xff)
-#define ISCSI_AHS_CNTL_SIZE 4
+#define ISCSI_AHS_CNTL_SIZE 4
-#define ISCSI_WQE_NUM_SGES_SLOWIO (0xf)
+#define ISCSI_WQE_NUM_SGES_SLOWIO (0xf)
/* iSCSI reserved params */
#define ISCSI_ITT_ALL_ONES (0xffffffff)
#define ISCSI_TTT_ALL_ONES (0xffffffff)
-#define ISCSI_OPTION_1_OFF_CHIP_TCP 1
-#define ISCSI_OPTION_2_ON_CHIP_TCP 2
+#define ISCSI_OPTION_1_OFF_CHIP_TCP 1
+#define ISCSI_OPTION_2_ON_CHIP_TCP 2
-#define ISCSI_INITIATOR_MODE 0
-#define ISCSI_TARGET_MODE 1
+#define ISCSI_INITIATOR_MODE 0
+#define ISCSI_TARGET_MODE 1
/* iSCSI request op codes */
#define ISCSI_OPCODE_NOP_OUT (0)
@@ -84,41 +85,48 @@
#define ISCSI_OPCODE_LOGOUT_REQUEST (6)
/* iSCSI response/messages op codes */
-#define ISCSI_OPCODE_NOP_IN (0x20)
-#define ISCSI_OPCODE_SCSI_RESPONSE (0x21)
-#define ISCSI_OPCODE_TMF_RESPONSE (0x22)
-#define ISCSI_OPCODE_LOGIN_RESPONSE (0x23)
-#define ISCSI_OPCODE_TEXT_RESPONSE (0x24)
-#define ISCSI_OPCODE_DATA_IN (0x25)
-#define ISCSI_OPCODE_LOGOUT_RESPONSE (0x26)
-#define ISCSI_OPCODE_R2T (0x31)
-#define ISCSI_OPCODE_ASYNC_MSG (0x32)
-#define ISCSI_OPCODE_REJECT (0x3f)
+#define ISCSI_OPCODE_NOP_IN (0x20)
+#define ISCSI_OPCODE_SCSI_RESPONSE (0x21)
+#define ISCSI_OPCODE_TMF_RESPONSE (0x22)
+#define ISCSI_OPCODE_LOGIN_RESPONSE (0x23)
+#define ISCSI_OPCODE_TEXT_RESPONSE (0x24)
+#define ISCSI_OPCODE_DATA_IN (0x25)
+#define ISCSI_OPCODE_LOGOUT_RESPONSE (0x26)
+#define ISCSI_OPCODE_R2T (0x31)
+#define ISCSI_OPCODE_ASYNC_MSG (0x32)
+#define ISCSI_OPCODE_REJECT (0x3f)
/* iSCSI stages */
-#define ISCSI_STAGE_SECURITY_NEGOTIATION (0)
-#define ISCSI_STAGE_LOGIN_OPERATIONAL_NEGOTIATION (1)
-#define ISCSI_STAGE_FULL_FEATURE_PHASE (3)
+#define ISCSI_STAGE_SECURITY_NEGOTIATION (0)
+#define ISCSI_STAGE_LOGIN_OPERATIONAL_NEGOTIATION (1)
+#define ISCSI_STAGE_FULL_FEATURE_PHASE (3)
/* iSCSI CQE errors */
-#define CQE_ERROR_BITMAP_DATA_DIGEST (0x08)
-#define CQE_ERROR_BITMAP_RCV_ON_INVALID_CONN (0x10)
-#define CQE_ERROR_BITMAP_DATA_TRUNCATED (0x20)
+#define CQE_ERROR_BITMAP_DATA_DIGEST (0x08)
+#define CQE_ERROR_BITMAP_RCV_ON_INVALID_CONN (0x10)
+#define CQE_ERROR_BITMAP_DATA_TRUNCATED (0x20)
+
+/* Union of data bd_opaque/ tq_tid */
+union bd_opaque_tq_union {
+ __le16 bd_opaque;
+ __le16 tq_tid;
+};
+/* ISCSI SGL entry */
struct cqe_error_bitmap {
u8 cqe_error_status_bits;
-#define CQE_ERROR_BITMAP_DIF_ERR_BITS_MASK 0x7
-#define CQE_ERROR_BITMAP_DIF_ERR_BITS_SHIFT 0
-#define CQE_ERROR_BITMAP_DATA_DIGEST_ERR_MASK 0x1
-#define CQE_ERROR_BITMAP_DATA_DIGEST_ERR_SHIFT 3
-#define CQE_ERROR_BITMAP_RCV_ON_INVALID_CONN_MASK 0x1
-#define CQE_ERROR_BITMAP_RCV_ON_INVALID_CONN_SHIFT 4
-#define CQE_ERROR_BITMAP_DATA_TRUNCATED_ERR_MASK 0x1
-#define CQE_ERROR_BITMAP_DATA_TRUNCATED_ERR_SHIFT 5
-#define CQE_ERROR_BITMAP_UNDER_RUN_ERR_MASK 0x1
-#define CQE_ERROR_BITMAP_UNDER_RUN_ERR_SHIFT 6
-#define CQE_ERROR_BITMAP_RESERVED2_MASK 0x1
-#define CQE_ERROR_BITMAP_RESERVED2_SHIFT 7
+#define CQE_ERROR_BITMAP_DIF_ERR_BITS_MASK 0x7
+#define CQE_ERROR_BITMAP_DIF_ERR_BITS_SHIFT 0
+#define CQE_ERROR_BITMAP_DATA_DIGEST_ERR_MASK 0x1
+#define CQE_ERROR_BITMAP_DATA_DIGEST_ERR_SHIFT 3
+#define CQE_ERROR_BITMAP_RCV_ON_INVALID_CONN_MASK 0x1
+#define CQE_ERROR_BITMAP_RCV_ON_INVALID_CONN_SHIFT 4
+#define CQE_ERROR_BITMAP_DATA_TRUNCATED_ERR_MASK 0x1
+#define CQE_ERROR_BITMAP_DATA_TRUNCATED_ERR_SHIFT 5
+#define CQE_ERROR_BITMAP_UNDER_RUN_ERR_MASK 0x1
+#define CQE_ERROR_BITMAP_UNDER_RUN_ERR_SHIFT 6
+#define CQE_ERROR_BITMAP_RESERVED2_MASK 0x1
+#define CQE_ERROR_BITMAP_RESERVED2_SHIFT 7
};
union cqe_error_status {
@@ -126,86 +134,133 @@ union cqe_error_status {
struct cqe_error_bitmap error_bits;
};
+/* iSCSI Login Response PDU header */
struct data_hdr {
__le32 data[12];
};
-struct iscsi_async_msg_hdr {
- __le16 reserved0;
- u8 flags_attr;
-#define ISCSI_ASYNC_MSG_HDR_RSRV_MASK 0x7F
-#define ISCSI_ASYNC_MSG_HDR_RSRV_SHIFT 0
-#define ISCSI_ASYNC_MSG_HDR_CONST1_MASK 0x1
-#define ISCSI_ASYNC_MSG_HDR_CONST1_SHIFT 7
- u8 opcode;
- __le32 hdr_second_dword;
-#define ISCSI_ASYNC_MSG_HDR_DATA_SEG_LEN_MASK 0xFFFFFF
-#define ISCSI_ASYNC_MSG_HDR_DATA_SEG_LEN_SHIFT 0
-#define ISCSI_ASYNC_MSG_HDR_TOTAL_AHS_LEN_MASK 0xFF
-#define ISCSI_ASYNC_MSG_HDR_TOTAL_AHS_LEN_SHIFT 24
- struct regpair lun;
- __le32 all_ones;
- __le32 reserved1;
- __le32 stat_sn;
- __le32 exp_cmd_sn;
- __le32 max_cmd_sn;
- __le16 param1_rsrv;
- u8 async_vcode;
- u8 async_event;
- __le16 param3_rsrv;
- __le16 param2_rsrv;
- __le32 reserved7;
+struct lun_mapper_addr_reserved {
+ struct regpair lun_mapper_addr;
+ u8 reserved0[8];
+};
+
+/* rdif conetxt for dif on immediate */
+struct dif_on_immediate_params {
+ __le32 initial_ref_tag;
+ __le16 application_tag;
+ __le16 application_tag_mask;
+ __le16 flags1;
+#define DIF_ON_IMMEDIATE_PARAMS_VALIDATE_GUARD_MASK 0x1
+#define DIF_ON_IMMEDIATE_PARAMS_VALIDATE_GUARD_SHIFT 0
+#define DIF_ON_IMMEDIATE_PARAMS_VALIDATE_APP_TAG_MASK 0x1
+#define DIF_ON_IMMEDIATE_PARAMS_VALIDATE_APP_TAG_SHIFT 1
+#define DIF_ON_IMMEDIATE_PARAMS_VALIDATE_REF_TAG_MASK 0x1
+#define DIF_ON_IMMEDIATE_PARAMS_VALIDATE_REF_TAG_SHIFT 2
+#define DIF_ON_IMMEDIATE_PARAMS_FORWARD_GUARD_MASK 0x1
+#define DIF_ON_IMMEDIATE_PARAMS_FORWARD_GUARD_SHIFT 3
+#define DIF_ON_IMMEDIATE_PARAMS_FORWARD_APP_TAG_MASK 0x1
+#define DIF_ON_IMMEDIATE_PARAMS_FORWARD_APP_TAG_SHIFT 4
+#define DIF_ON_IMMEDIATE_PARAMS_FORWARD_REF_TAG_MASK 0x1
+#define DIF_ON_IMMEDIATE_PARAMS_FORWARD_REF_TAG_SHIFT 5
+#define DIF_ON_IMMEDIATE_PARAMS_INTERVAL_SIZE_MASK 0x1
+#define DIF_ON_IMMEDIATE_PARAMS_INTERVAL_SIZE_SHIFT 6
+#define DIF_ON_IMMEDIATE_PARAMS_NETWORK_INTERFACE_MASK 0x1
+#define DIF_ON_IMMEDIATE_PARAMS_NETWORK_INTERFACE_SHIFT 7
+#define DIF_ON_IMMEDIATE_PARAMS_HOST_INTERFACE_MASK 0x3
+#define DIF_ON_IMMEDIATE_PARAMS_HOST_INTERFACE_SHIFT 8
+#define DIF_ON_IMMEDIATE_PARAMS_REF_TAG_MASK_MASK 0xF
+#define DIF_ON_IMMEDIATE_PARAMS_REF_TAG_MASK_SHIFT 10
+#define DIF_ON_IMMEDIATE_PARAMS_FORWARD_APP_TAG_WITH_MASK_MASK 0x1
+#define DIF_ON_IMMEDIATE_PARAMS_FORWARD_APP_TAG_WITH_MASK_SHIFT 14
+#define DIF_ON_IMMEDIATE_PARAMS_FORWARD_REF_TAG_WITH_MASK_MASK 0x1
+#define DIF_ON_IMMEDIATE_PARAMS_FORWARD_REF_TAG_WITH_MASK_SHIFT 15
+ u8 flags0;
+#define DIF_ON_IMMEDIATE_PARAMS_RESERVED_MASK 0x1
+#define DIF_ON_IMMEDIATE_PARAMS_RESERVED_SHIFT 0
+#define DIF_ON_IMMEDIATE_PARAMS_IGNORE_APP_TAG_MASK 0x1
+#define DIF_ON_IMMEDIATE_PARAMS_IGNORE_APP_TAG_SHIFT 1
+#define DIF_ON_IMMEDIATE_PARAMS_INITIAL_REF_TAG_IS_VALID_MASK 0x1
+#define DIF_ON_IMMEDIATE_PARAMS_INITIAL_REF_TAG_IS_VALID_SHIFT 2
+#define DIF_ON_IMMEDIATE_PARAMS_HOST_GUARD_TYPE_MASK 0x1
+#define DIF_ON_IMMEDIATE_PARAMS_HOST_GUARD_TYPE_SHIFT 3
+#define DIF_ON_IMMEDIATE_PARAMS_PROTECTION_TYPE_MASK 0x3
+#define DIF_ON_IMMEDIATE_PARAMS_PROTECTION_TYPE_SHIFT 4
+#define DIF_ON_IMMEDIATE_PARAMS_CRC_SEED_MASK 0x1
+#define DIF_ON_IMMEDIATE_PARAMS_CRC_SEED_SHIFT 6
+#define DIF_ON_IMMEDIATE_PARAMS_KEEP_REF_TAG_CONST_MASK 0x1
+#define DIF_ON_IMMEDIATE_PARAMS_KEEP_REF_TAG_CONST_SHIFT 7
+ u8 reserved_zero[5];
+};
+
+/* iSCSI dif on immediate mode attributes union */
+union dif_configuration_params {
+ struct lun_mapper_addr_reserved lun_mapper_address;
+ struct dif_on_immediate_params def_dif_conf;
+};
+
+/* Union of data/r2t sequence number */
+union iscsi_seq_num {
+ __le16 data_sn;
+ __le16 r2t_sn;
};
-struct iscsi_cmd_hdr {
- __le16 reserved1;
- u8 flags_attr;
-#define ISCSI_CMD_HDR_ATTR_MASK 0x7
-#define ISCSI_CMD_HDR_ATTR_SHIFT 0
-#define ISCSI_CMD_HDR_RSRV_MASK 0x3
-#define ISCSI_CMD_HDR_RSRV_SHIFT 3
-#define ISCSI_CMD_HDR_WRITE_MASK 0x1
-#define ISCSI_CMD_HDR_WRITE_SHIFT 5
-#define ISCSI_CMD_HDR_READ_MASK 0x1
-#define ISCSI_CMD_HDR_READ_SHIFT 6
-#define ISCSI_CMD_HDR_FINAL_MASK 0x1
-#define ISCSI_CMD_HDR_FINAL_SHIFT 7
- u8 hdr_first_byte;
-#define ISCSI_CMD_HDR_OPCODE_MASK 0x3F
-#define ISCSI_CMD_HDR_OPCODE_SHIFT 0
-#define ISCSI_CMD_HDR_IMM_MASK 0x1
-#define ISCSI_CMD_HDR_IMM_SHIFT 6
-#define ISCSI_CMD_HDR_RSRV1_MASK 0x1
-#define ISCSI_CMD_HDR_RSRV1_SHIFT 7
- __le32 hdr_second_dword;
-#define ISCSI_CMD_HDR_DATA_SEG_LEN_MASK 0xFFFFFF
-#define ISCSI_CMD_HDR_DATA_SEG_LEN_SHIFT 0
-#define ISCSI_CMD_HDR_TOTAL_AHS_LEN_MASK 0xFF
-#define ISCSI_CMD_HDR_TOTAL_AHS_LEN_SHIFT 24
- struct regpair lun;
- __le32 itt;
- __le32 expected_transfer_length;
- __le32 cmd_sn;
- __le32 exp_stat_sn;
- __le32 cdb[4];
+/* iSCSI DIF flags */
+struct iscsi_dif_flags {
+ u8 flags;
+#define ISCSI_DIF_FLAGS_PROT_INTERVAL_SIZE_LOG_MASK 0xF
+#define ISCSI_DIF_FLAGS_PROT_INTERVAL_SIZE_LOG_SHIFT 0
+#define ISCSI_DIF_FLAGS_DIF_TO_PEER_MASK 0x1
+#define ISCSI_DIF_FLAGS_DIF_TO_PEER_SHIFT 4
+#define ISCSI_DIF_FLAGS_HOST_INTERFACE_MASK 0x7
+#define ISCSI_DIF_FLAGS_HOST_INTERFACE_SHIFT 5
};
+/* The iscsi storm task context of Ystorm */
+struct ystorm_iscsi_task_state {
+ struct scsi_cached_sges data_desc;
+ struct scsi_sgl_params sgl_params;
+ __le32 exp_r2t_sn;
+ __le32 buffer_offset;
+ union iscsi_seq_num seq_num;
+ struct iscsi_dif_flags dif_flags;
+ u8 flags;
+#define YSTORM_ISCSI_TASK_STATE_LOCAL_COMP_MASK 0x1
+#define YSTORM_ISCSI_TASK_STATE_LOCAL_COMP_SHIFT 0
+#define YSTORM_ISCSI_TASK_STATE_SLOW_IO_MASK 0x1
+#define YSTORM_ISCSI_TASK_STATE_SLOW_IO_SHIFT 1
+#define YSTORM_ISCSI_TASK_STATE_SET_DIF_OFFSET_MASK 0x1
+#define YSTORM_ISCSI_TASK_STATE_SET_DIF_OFFSET_SHIFT 2
+#define YSTORM_ISCSI_TASK_STATE_RESERVED0_MASK 0x1F
+#define YSTORM_ISCSI_TASK_STATE_RESERVED0_SHIFT 3
+};
+
+/* The iscsi storm task context of Ystorm */
+struct ystorm_iscsi_task_rxmit_opt {
+ __le32 fast_rxmit_sge_offset;
+ __le32 scan_start_buffer_offset;
+ __le32 fast_rxmit_buffer_offset;
+ u8 scan_start_sgl_index;
+ u8 fast_rxmit_sgl_index;
+ __le16 reserved;
+};
+
+/* iSCSI Common PDU header */
struct iscsi_common_hdr {
u8 hdr_status;
u8 hdr_response;
u8 hdr_flags;
u8 hdr_first_byte;
-#define ISCSI_COMMON_HDR_OPCODE_MASK 0x3F
-#define ISCSI_COMMON_HDR_OPCODE_SHIFT 0
-#define ISCSI_COMMON_HDR_IMM_MASK 0x1
-#define ISCSI_COMMON_HDR_IMM_SHIFT 6
-#define ISCSI_COMMON_HDR_RSRV_MASK 0x1
-#define ISCSI_COMMON_HDR_RSRV_SHIFT 7
+#define ISCSI_COMMON_HDR_OPCODE_MASK 0x3F
+#define ISCSI_COMMON_HDR_OPCODE_SHIFT 0
+#define ISCSI_COMMON_HDR_IMM_MASK 0x1
+#define ISCSI_COMMON_HDR_IMM_SHIFT 6
+#define ISCSI_COMMON_HDR_RSRV_MASK 0x1
+#define ISCSI_COMMON_HDR_RSRV_SHIFT 7
__le32 hdr_second_dword;
-#define ISCSI_COMMON_HDR_DATA_SEG_LEN_MASK 0xFFFFFF
-#define ISCSI_COMMON_HDR_DATA_SEG_LEN_SHIFT 0
-#define ISCSI_COMMON_HDR_TOTAL_AHS_LEN_MASK 0xFF
-#define ISCSI_COMMON_HDR_TOTAL_AHS_LEN_SHIFT 24
+#define ISCSI_COMMON_HDR_DATA_SEG_LEN_MASK 0xFFFFFF
+#define ISCSI_COMMON_HDR_DATA_SEG_LEN_SHIFT 0
+#define ISCSI_COMMON_HDR_TOTAL_AHS_LEN_MASK 0xFF
+#define ISCSI_COMMON_HDR_TOTAL_AHS_LEN_SHIFT 24
struct regpair lun_reserved;
__le32 itt;
__le32 ttt;
@@ -215,86 +270,60 @@ struct iscsi_common_hdr {
__le32 data[3];
};
-struct iscsi_conn_offload_params {
- struct regpair sq_pbl_addr;
- struct regpair r2tq_pbl_addr;
- struct regpair xhq_pbl_addr;
- struct regpair uhq_pbl_addr;
- __le32 initial_ack;
- __le16 physical_q0;
- __le16 physical_q1;
- u8 flags;
-#define ISCSI_CONN_OFFLOAD_PARAMS_TCP_ON_CHIP_1B_MASK 0x1
-#define ISCSI_CONN_OFFLOAD_PARAMS_TCP_ON_CHIP_1B_SHIFT 0
-#define ISCSI_CONN_OFFLOAD_PARAMS_TARGET_MODE_MASK 0x1
-#define ISCSI_CONN_OFFLOAD_PARAMS_TARGET_MODE_SHIFT 1
-#define ISCSI_CONN_OFFLOAD_PARAMS_RESTRICTED_MODE_MASK 0x1
-#define ISCSI_CONN_OFFLOAD_PARAMS_RESTRICTED_MODE_SHIFT 2
-#define ISCSI_CONN_OFFLOAD_PARAMS_RESERVED1_MASK 0x1F
-#define ISCSI_CONN_OFFLOAD_PARAMS_RESERVED1_SHIFT 3
- u8 pbl_page_size_log;
- u8 pbe_page_size_log;
- u8 default_cq;
- __le32 stat_sn;
-};
-
-struct iscsi_slow_path_hdr {
- u8 op_code;
- u8 flags;
-#define ISCSI_SLOW_PATH_HDR_RESERVED0_MASK 0xF
-#define ISCSI_SLOW_PATH_HDR_RESERVED0_SHIFT 0
-#define ISCSI_SLOW_PATH_HDR_LAYER_CODE_MASK 0x7
-#define ISCSI_SLOW_PATH_HDR_LAYER_CODE_SHIFT 4
-#define ISCSI_SLOW_PATH_HDR_RESERVED1_MASK 0x1
-#define ISCSI_SLOW_PATH_HDR_RESERVED1_SHIFT 7
-};
-
-struct iscsi_conn_update_ramrod_params {
- struct iscsi_slow_path_hdr hdr;
- __le16 conn_id;
- __le32 fw_cid;
- u8 flags;
-#define ISCSI_CONN_UPDATE_RAMROD_PARAMS_HD_EN_MASK 0x1
-#define ISCSI_CONN_UPDATE_RAMROD_PARAMS_HD_EN_SHIFT 0
-#define ISCSI_CONN_UPDATE_RAMROD_PARAMS_DD_EN_MASK 0x1
-#define ISCSI_CONN_UPDATE_RAMROD_PARAMS_DD_EN_SHIFT 1
-#define ISCSI_CONN_UPDATE_RAMROD_PARAMS_INITIAL_R2T_MASK 0x1
-#define ISCSI_CONN_UPDATE_RAMROD_PARAMS_INITIAL_R2T_SHIFT 2
-#define ISCSI_CONN_UPDATE_RAMROD_PARAMS_IMMEDIATE_DATA_MASK 0x1
-#define ISCSI_CONN_UPDATE_RAMROD_PARAMS_IMMEDIATE_DATA_SHIFT 3
-#define ISCSI_CONN_UPDATE_RAMROD_PARAMS_DIF_BLOCK_SIZE_MASK 0x1
-#define ISCSI_CONN_UPDATE_RAMROD_PARAMS_DIF_BLOCK_SIZE_SHIFT 4
-#define ISCSI_CONN_UPDATE_RAMROD_PARAMS_DIF_ON_HOST_EN_MASK 0x1
-#define ISCSI_CONN_UPDATE_RAMROD_PARAMS_DIF_ON_HOST_EN_SHIFT 5
-#define ISCSI_CONN_UPDATE_RAMROD_PARAMS_RESERVED1_MASK 0x3
-#define ISCSI_CONN_UPDATE_RAMROD_PARAMS_RESERVED1_SHIFT 6
- u8 reserved0[3];
- __le32 max_seq_size;
- __le32 max_send_pdu_length;
- __le32 max_recv_pdu_length;
- __le32 first_seq_length;
+/* iSCSI Command PDU header */
+struct iscsi_cmd_hdr {
+ __le16 reserved1;
+ u8 flags_attr;
+#define ISCSI_CMD_HDR_ATTR_MASK 0x7
+#define ISCSI_CMD_HDR_ATTR_SHIFT 0
+#define ISCSI_CMD_HDR_RSRV_MASK 0x3
+#define ISCSI_CMD_HDR_RSRV_SHIFT 3
+#define ISCSI_CMD_HDR_WRITE_MASK 0x1
+#define ISCSI_CMD_HDR_WRITE_SHIFT 5
+#define ISCSI_CMD_HDR_READ_MASK 0x1
+#define ISCSI_CMD_HDR_READ_SHIFT 6
+#define ISCSI_CMD_HDR_FINAL_MASK 0x1
+#define ISCSI_CMD_HDR_FINAL_SHIFT 7
+ u8 hdr_first_byte;
+#define ISCSI_CMD_HDR_OPCODE_MASK 0x3F
+#define ISCSI_CMD_HDR_OPCODE_SHIFT 0
+#define ISCSI_CMD_HDR_IMM_MASK 0x1
+#define ISCSI_CMD_HDR_IMM_SHIFT 6
+#define ISCSI_CMD_HDR_RSRV1_MASK 0x1
+#define ISCSI_CMD_HDR_RSRV1_SHIFT 7
+ __le32 hdr_second_dword;
+#define ISCSI_CMD_HDR_DATA_SEG_LEN_MASK 0xFFFFFF
+#define ISCSI_CMD_HDR_DATA_SEG_LEN_SHIFT 0
+#define ISCSI_CMD_HDR_TOTAL_AHS_LEN_MASK 0xFF
+#define ISCSI_CMD_HDR_TOTAL_AHS_LEN_SHIFT 24
+ struct regpair lun;
+ __le32 itt;
+ __le32 expected_transfer_length;
+ __le32 cmd_sn;
__le32 exp_stat_sn;
+ __le32 cdb[4];
};
+/* iSCSI Command PDU header with Extended CDB (Initiator Mode) */
struct iscsi_ext_cdb_cmd_hdr {
__le16 reserved1;
u8 flags_attr;
-#define ISCSI_EXT_CDB_CMD_HDR_ATTR_MASK 0x7
-#define ISCSI_EXT_CDB_CMD_HDR_ATTR_SHIFT 0
-#define ISCSI_EXT_CDB_CMD_HDR_RSRV_MASK 0x3
-#define ISCSI_EXT_CDB_CMD_HDR_RSRV_SHIFT 3
-#define ISCSI_EXT_CDB_CMD_HDR_WRITE_MASK 0x1
-#define ISCSI_EXT_CDB_CMD_HDR_WRITE_SHIFT 5
-#define ISCSI_EXT_CDB_CMD_HDR_READ_MASK 0x1
-#define ISCSI_EXT_CDB_CMD_HDR_READ_SHIFT 6
-#define ISCSI_EXT_CDB_CMD_HDR_FINAL_MASK 0x1
-#define ISCSI_EXT_CDB_CMD_HDR_FINAL_SHIFT 7
+#define ISCSI_EXT_CDB_CMD_HDR_ATTR_MASK 0x7
+#define ISCSI_EXT_CDB_CMD_HDR_ATTR_SHIFT 0
+#define ISCSI_EXT_CDB_CMD_HDR_RSRV_MASK 0x3
+#define ISCSI_EXT_CDB_CMD_HDR_RSRV_SHIFT 3
+#define ISCSI_EXT_CDB_CMD_HDR_WRITE_MASK 0x1
+#define ISCSI_EXT_CDB_CMD_HDR_WRITE_SHIFT 5
+#define ISCSI_EXT_CDB_CMD_HDR_READ_MASK 0x1
+#define ISCSI_EXT_CDB_CMD_HDR_READ_SHIFT 6
+#define ISCSI_EXT_CDB_CMD_HDR_FINAL_MASK 0x1
+#define ISCSI_EXT_CDB_CMD_HDR_FINAL_SHIFT 7
u8 opcode;
__le32 hdr_second_dword;
-#define ISCSI_EXT_CDB_CMD_HDR_DATA_SEG_LEN_MASK 0xFFFFFF
-#define ISCSI_EXT_CDB_CMD_HDR_DATA_SEG_LEN_SHIFT 0
-#define ISCSI_EXT_CDB_CMD_HDR_CDB_SIZE_MASK 0xFF
-#define ISCSI_EXT_CDB_CMD_HDR_CDB_SIZE_SHIFT 24
+#define ISCSI_EXT_CDB_CMD_HDR_DATA_SEG_LEN_MASK 0xFFFFFF
+#define ISCSI_EXT_CDB_CMD_HDR_DATA_SEG_LEN_SHIFT 0
+#define ISCSI_EXT_CDB_CMD_HDR_CDB_SIZE_MASK 0xFF
+#define ISCSI_EXT_CDB_CMD_HDR_CDB_SIZE_SHIFT 24
struct regpair lun;
__le32 itt;
__le32 expected_transfer_length;
@@ -303,26 +332,27 @@ struct iscsi_ext_cdb_cmd_hdr {
struct scsi_sge cdb_sge;
};
+/* iSCSI login request PDU header */
struct iscsi_login_req_hdr {
u8 version_min;
u8 version_max;
u8 flags_attr;
-#define ISCSI_LOGIN_REQ_HDR_NSG_MASK 0x3
-#define ISCSI_LOGIN_REQ_HDR_NSG_SHIFT 0
-#define ISCSI_LOGIN_REQ_HDR_CSG_MASK 0x3
-#define ISCSI_LOGIN_REQ_HDR_CSG_SHIFT 2
-#define ISCSI_LOGIN_REQ_HDR_RSRV_MASK 0x3
-#define ISCSI_LOGIN_REQ_HDR_RSRV_SHIFT 4
-#define ISCSI_LOGIN_REQ_HDR_C_MASK 0x1
-#define ISCSI_LOGIN_REQ_HDR_C_SHIFT 6
-#define ISCSI_LOGIN_REQ_HDR_T_MASK 0x1
-#define ISCSI_LOGIN_REQ_HDR_T_SHIFT 7
+#define ISCSI_LOGIN_REQ_HDR_NSG_MASK 0x3
+#define ISCSI_LOGIN_REQ_HDR_NSG_SHIFT 0
+#define ISCSI_LOGIN_REQ_HDR_CSG_MASK 0x3
+#define ISCSI_LOGIN_REQ_HDR_CSG_SHIFT 2
+#define ISCSI_LOGIN_REQ_HDR_RSRV_MASK 0x3
+#define ISCSI_LOGIN_REQ_HDR_RSRV_SHIFT 4
+#define ISCSI_LOGIN_REQ_HDR_C_MASK 0x1
+#define ISCSI_LOGIN_REQ_HDR_C_SHIFT 6
+#define ISCSI_LOGIN_REQ_HDR_T_MASK 0x1
+#define ISCSI_LOGIN_REQ_HDR_T_SHIFT 7
u8 opcode;
__le32 hdr_second_dword;
-#define ISCSI_LOGIN_REQ_HDR_DATA_SEG_LEN_MASK 0xFFFFFF
-#define ISCSI_LOGIN_REQ_HDR_DATA_SEG_LEN_SHIFT 0
-#define ISCSI_LOGIN_REQ_HDR_TOTAL_AHS_LEN_MASK 0xFF
-#define ISCSI_LOGIN_REQ_HDR_TOTAL_AHS_LEN_SHIFT 24
+#define ISCSI_LOGIN_REQ_HDR_DATA_SEG_LEN_MASK 0xFFFFFF
+#define ISCSI_LOGIN_REQ_HDR_DATA_SEG_LEN_SHIFT 0
+#define ISCSI_LOGIN_REQ_HDR_TOTAL_AHS_LEN_MASK 0xFF
+#define ISCSI_LOGIN_REQ_HDR_TOTAL_AHS_LEN_SHIFT 24
__le32 isid_tabc;
__le16 tsih;
__le16 isid_d;
@@ -334,6 +364,7 @@ struct iscsi_login_req_hdr {
__le32 reserved2[4];
};
+/* iSCSI logout request PDU header */
struct iscsi_logout_req_hdr {
__le16 reserved0;
u8 reason_code;
@@ -348,13 +379,14 @@ struct iscsi_logout_req_hdr {
__le32 reserved4[4];
};
+/* iSCSI Data-out PDU header */
struct iscsi_data_out_hdr {
__le16 reserved1;
u8 flags_attr;
-#define ISCSI_DATA_OUT_HDR_RSRV_MASK 0x7F
-#define ISCSI_DATA_OUT_HDR_RSRV_SHIFT 0
-#define ISCSI_DATA_OUT_HDR_FINAL_MASK 0x1
-#define ISCSI_DATA_OUT_HDR_FINAL_SHIFT 7
+#define ISCSI_DATA_OUT_HDR_RSRV_MASK 0x7F
+#define ISCSI_DATA_OUT_HDR_RSRV_SHIFT 0
+#define ISCSI_DATA_OUT_HDR_FINAL_MASK 0x1
+#define ISCSI_DATA_OUT_HDR_FINAL_SHIFT 7
u8 opcode;
__le32 reserved2;
struct regpair lun;
@@ -368,22 +400,23 @@ struct iscsi_data_out_hdr {
__le32 reserved5;
};
+/* iSCSI Data-in PDU header */
struct iscsi_data_in_hdr {
u8 status_rsvd;
u8 reserved1;
u8 flags;
-#define ISCSI_DATA_IN_HDR_STATUS_MASK 0x1
-#define ISCSI_DATA_IN_HDR_STATUS_SHIFT 0
-#define ISCSI_DATA_IN_HDR_UNDERFLOW_MASK 0x1
-#define ISCSI_DATA_IN_HDR_UNDERFLOW_SHIFT 1
-#define ISCSI_DATA_IN_HDR_OVERFLOW_MASK 0x1
-#define ISCSI_DATA_IN_HDR_OVERFLOW_SHIFT 2
-#define ISCSI_DATA_IN_HDR_RSRV_MASK 0x7
-#define ISCSI_DATA_IN_HDR_RSRV_SHIFT 3
-#define ISCSI_DATA_IN_HDR_ACK_MASK 0x1
-#define ISCSI_DATA_IN_HDR_ACK_SHIFT 6
-#define ISCSI_DATA_IN_HDR_FINAL_MASK 0x1
-#define ISCSI_DATA_IN_HDR_FINAL_SHIFT 7
+#define ISCSI_DATA_IN_HDR_STATUS_MASK 0x1
+#define ISCSI_DATA_IN_HDR_STATUS_SHIFT 0
+#define ISCSI_DATA_IN_HDR_UNDERFLOW_MASK 0x1
+#define ISCSI_DATA_IN_HDR_UNDERFLOW_SHIFT 1
+#define ISCSI_DATA_IN_HDR_OVERFLOW_MASK 0x1
+#define ISCSI_DATA_IN_HDR_OVERFLOW_SHIFT 2
+#define ISCSI_DATA_IN_HDR_RSRV_MASK 0x7
+#define ISCSI_DATA_IN_HDR_RSRV_SHIFT 3
+#define ISCSI_DATA_IN_HDR_ACK_MASK 0x1
+#define ISCSI_DATA_IN_HDR_ACK_SHIFT 6
+#define ISCSI_DATA_IN_HDR_FINAL_MASK 0x1
+#define ISCSI_DATA_IN_HDR_FINAL_SHIFT 7
u8 opcode;
__le32 reserved2;
struct regpair lun;
@@ -397,6 +430,7 @@ struct iscsi_data_in_hdr {
__le32 residual_count;
};
+/* iSCSI R2T PDU header */
struct iscsi_r2t_hdr {
u8 reserved0[3];
u8 opcode;
@@ -412,13 +446,14 @@ struct iscsi_r2t_hdr {
__le32 desired_data_trns_len;
};
+/* iSCSI NOP-out PDU header */
struct iscsi_nop_out_hdr {
__le16 reserved1;
u8 flags_attr;
-#define ISCSI_NOP_OUT_HDR_RSRV_MASK 0x7F
-#define ISCSI_NOP_OUT_HDR_RSRV_SHIFT 0
-#define ISCSI_NOP_OUT_HDR_CONST1_MASK 0x1
-#define ISCSI_NOP_OUT_HDR_CONST1_SHIFT 7
+#define ISCSI_NOP_OUT_HDR_RSRV_MASK 0x7F
+#define ISCSI_NOP_OUT_HDR_RSRV_SHIFT 0
+#define ISCSI_NOP_OUT_HDR_CONST1_MASK 0x1
+#define ISCSI_NOP_OUT_HDR_CONST1_SHIFT 7
u8 opcode;
__le32 reserved2;
struct regpair lun;
@@ -432,19 +467,20 @@ struct iscsi_nop_out_hdr {
__le32 reserved6;
};
+/* iSCSI NOP-in PDU header */
struct iscsi_nop_in_hdr {
__le16 reserved0;
u8 flags_attr;
-#define ISCSI_NOP_IN_HDR_RSRV_MASK 0x7F
-#define ISCSI_NOP_IN_HDR_RSRV_SHIFT 0
-#define ISCSI_NOP_IN_HDR_CONST1_MASK 0x1
-#define ISCSI_NOP_IN_HDR_CONST1_SHIFT 7
+#define ISCSI_NOP_IN_HDR_RSRV_MASK 0x7F
+#define ISCSI_NOP_IN_HDR_RSRV_SHIFT 0
+#define ISCSI_NOP_IN_HDR_CONST1_MASK 0x1
+#define ISCSI_NOP_IN_HDR_CONST1_SHIFT 7
u8 opcode;
__le32 hdr_second_dword;
-#define ISCSI_NOP_IN_HDR_DATA_SEG_LEN_MASK 0xFFFFFF
-#define ISCSI_NOP_IN_HDR_DATA_SEG_LEN_SHIFT 0
-#define ISCSI_NOP_IN_HDR_TOTAL_AHS_LEN_MASK 0xFF
-#define ISCSI_NOP_IN_HDR_TOTAL_AHS_LEN_SHIFT 24
+#define ISCSI_NOP_IN_HDR_DATA_SEG_LEN_MASK 0xFFFFFF
+#define ISCSI_NOP_IN_HDR_DATA_SEG_LEN_SHIFT 0
+#define ISCSI_NOP_IN_HDR_TOTAL_AHS_LEN_MASK 0xFF
+#define ISCSI_NOP_IN_HDR_TOTAL_AHS_LEN_SHIFT 24
struct regpair lun;
__le32 itt;
__le32 ttt;
@@ -456,26 +492,27 @@ struct iscsi_nop_in_hdr {
__le32 reserved7;
};
+/* iSCSI Login Response PDU header */
struct iscsi_login_response_hdr {
u8 version_active;
u8 version_max;
u8 flags_attr;
-#define ISCSI_LOGIN_RESPONSE_HDR_NSG_MASK 0x3
-#define ISCSI_LOGIN_RESPONSE_HDR_NSG_SHIFT 0
-#define ISCSI_LOGIN_RESPONSE_HDR_CSG_MASK 0x3
-#define ISCSI_LOGIN_RESPONSE_HDR_CSG_SHIFT 2
-#define ISCSI_LOGIN_RESPONSE_HDR_RSRV_MASK 0x3
-#define ISCSI_LOGIN_RESPONSE_HDR_RSRV_SHIFT 4
-#define ISCSI_LOGIN_RESPONSE_HDR_C_MASK 0x1
-#define ISCSI_LOGIN_RESPONSE_HDR_C_SHIFT 6
-#define ISCSI_LOGIN_RESPONSE_HDR_T_MASK 0x1
-#define ISCSI_LOGIN_RESPONSE_HDR_T_SHIFT 7
+#define ISCSI_LOGIN_RESPONSE_HDR_NSG_MASK 0x3
+#define ISCSI_LOGIN_RESPONSE_HDR_NSG_SHIFT 0
+#define ISCSI_LOGIN_RESPONSE_HDR_CSG_MASK 0x3
+#define ISCSI_LOGIN_RESPONSE_HDR_CSG_SHIFT 2
+#define ISCSI_LOGIN_RESPONSE_HDR_RSRV_MASK 0x3
+#define ISCSI_LOGIN_RESPONSE_HDR_RSRV_SHIFT 4
+#define ISCSI_LOGIN_RESPONSE_HDR_C_MASK 0x1
+#define ISCSI_LOGIN_RESPONSE_HDR_C_SHIFT 6
+#define ISCSI_LOGIN_RESPONSE_HDR_T_MASK 0x1
+#define ISCSI_LOGIN_RESPONSE_HDR_T_SHIFT 7
u8 opcode;
__le32 hdr_second_dword;
-#define ISCSI_LOGIN_RESPONSE_HDR_DATA_SEG_LEN_MASK 0xFFFFFF
-#define ISCSI_LOGIN_RESPONSE_HDR_DATA_SEG_LEN_SHIFT 0
-#define ISCSI_LOGIN_RESPONSE_HDR_TOTAL_AHS_LEN_MASK 0xFF
-#define ISCSI_LOGIN_RESPONSE_HDR_TOTAL_AHS_LEN_SHIFT 24
+#define ISCSI_LOGIN_RESPONSE_HDR_DATA_SEG_LEN_MASK 0xFFFFFF
+#define ISCSI_LOGIN_RESPONSE_HDR_DATA_SEG_LEN_SHIFT 0
+#define ISCSI_LOGIN_RESPONSE_HDR_TOTAL_AHS_LEN_MASK 0xFF
+#define ISCSI_LOGIN_RESPONSE_HDR_TOTAL_AHS_LEN_SHIFT 24
__le32 isid_tabc;
__le16 tsih;
__le16 isid_d;
@@ -490,16 +527,17 @@ struct iscsi_login_response_hdr {
__le32 reserved4[2];
};
+/* iSCSI Logout Response PDU header */
struct iscsi_logout_response_hdr {
u8 reserved1;
u8 response;
u8 flags;
u8 opcode;
__le32 hdr_second_dword;
-#define ISCSI_LOGOUT_RESPONSE_HDR_DATA_SEG_LEN_MASK 0xFFFFFF
-#define ISCSI_LOGOUT_RESPONSE_HDR_DATA_SEG_LEN_SHIFT 0
-#define ISCSI_LOGOUT_RESPONSE_HDR_TOTAL_AHS_LEN_MASK 0xFF
-#define ISCSI_LOGOUT_RESPONSE_HDR_TOTAL_AHS_LEN_SHIFT 24
+#define ISCSI_LOGOUT_RESPONSE_HDR_DATA_SEG_LEN_MASK 0xFFFFFF
+#define ISCSI_LOGOUT_RESPONSE_HDR_DATA_SEG_LEN_SHIFT 0
+#define ISCSI_LOGOUT_RESPONSE_HDR_TOTAL_AHS_LEN_MASK 0xFF
+#define ISCSI_LOGOUT_RESPONSE_HDR_TOTAL_AHS_LEN_SHIFT 24
__le32 reserved2[2];
__le32 itt;
__le32 reserved3;
@@ -512,21 +550,22 @@ struct iscsi_logout_response_hdr {
__le32 reserved5[1];
};
+/* iSCSI Text Request PDU header */
struct iscsi_text_request_hdr {
__le16 reserved0;
u8 flags_attr;
-#define ISCSI_TEXT_REQUEST_HDR_RSRV_MASK 0x3F
-#define ISCSI_TEXT_REQUEST_HDR_RSRV_SHIFT 0
-#define ISCSI_TEXT_REQUEST_HDR_C_MASK 0x1
-#define ISCSI_TEXT_REQUEST_HDR_C_SHIFT 6
-#define ISCSI_TEXT_REQUEST_HDR_F_MASK 0x1
-#define ISCSI_TEXT_REQUEST_HDR_F_SHIFT 7
+#define ISCSI_TEXT_REQUEST_HDR_RSRV_MASK 0x3F
+#define ISCSI_TEXT_REQUEST_HDR_RSRV_SHIFT 0
+#define ISCSI_TEXT_REQUEST_HDR_C_MASK 0x1
+#define ISCSI_TEXT_REQUEST_HDR_C_SHIFT 6
+#define ISCSI_TEXT_REQUEST_HDR_F_MASK 0x1
+#define ISCSI_TEXT_REQUEST_HDR_F_SHIFT 7
u8 opcode;
__le32 hdr_second_dword;
-#define ISCSI_TEXT_REQUEST_HDR_DATA_SEG_LEN_MASK 0xFFFFFF
-#define ISCSI_TEXT_REQUEST_HDR_DATA_SEG_LEN_SHIFT 0
-#define ISCSI_TEXT_REQUEST_HDR_TOTAL_AHS_LEN_MASK 0xFF
-#define ISCSI_TEXT_REQUEST_HDR_TOTAL_AHS_LEN_SHIFT 24
+#define ISCSI_TEXT_REQUEST_HDR_DATA_SEG_LEN_MASK 0xFFFFFF
+#define ISCSI_TEXT_REQUEST_HDR_DATA_SEG_LEN_SHIFT 0
+#define ISCSI_TEXT_REQUEST_HDR_TOTAL_AHS_LEN_MASK 0xFF
+#define ISCSI_TEXT_REQUEST_HDR_TOTAL_AHS_LEN_SHIFT 24
struct regpair lun;
__le32 itt;
__le32 ttt;
@@ -535,21 +574,22 @@ struct iscsi_text_request_hdr {
__le32 reserved4[4];
};
+/* iSCSI Text Response PDU header */
struct iscsi_text_response_hdr {
__le16 reserved1;
u8 flags;
-#define ISCSI_TEXT_RESPONSE_HDR_RSRV_MASK 0x3F
-#define ISCSI_TEXT_RESPONSE_HDR_RSRV_SHIFT 0
-#define ISCSI_TEXT_RESPONSE_HDR_C_MASK 0x1
-#define ISCSI_TEXT_RESPONSE_HDR_C_SHIFT 6
-#define ISCSI_TEXT_RESPONSE_HDR_F_MASK 0x1
-#define ISCSI_TEXT_RESPONSE_HDR_F_SHIFT 7
+#define ISCSI_TEXT_RESPONSE_HDR_RSRV_MASK 0x3F
+#define ISCSI_TEXT_RESPONSE_HDR_RSRV_SHIFT 0
+#define ISCSI_TEXT_RESPONSE_HDR_C_MASK 0x1
+#define ISCSI_TEXT_RESPONSE_HDR_C_SHIFT 6
+#define ISCSI_TEXT_RESPONSE_HDR_F_MASK 0x1
+#define ISCSI_TEXT_RESPONSE_HDR_F_SHIFT 7
u8 opcode;
__le32 hdr_second_dword;
-#define ISCSI_TEXT_RESPONSE_HDR_DATA_SEG_LEN_MASK 0xFFFFFF
-#define ISCSI_TEXT_RESPONSE_HDR_DATA_SEG_LEN_SHIFT 0
-#define ISCSI_TEXT_RESPONSE_HDR_TOTAL_AHS_LEN_MASK 0xFF
-#define ISCSI_TEXT_RESPONSE_HDR_TOTAL_AHS_LEN_SHIFT 24
+#define ISCSI_TEXT_RESPONSE_HDR_DATA_SEG_LEN_MASK 0xFFFFFF
+#define ISCSI_TEXT_RESPONSE_HDR_DATA_SEG_LEN_SHIFT 0
+#define ISCSI_TEXT_RESPONSE_HDR_TOTAL_AHS_LEN_MASK 0xFF
+#define ISCSI_TEXT_RESPONSE_HDR_TOTAL_AHS_LEN_SHIFT 24
struct regpair lun;
__le32 itt;
__le32 ttt;
@@ -559,15 +599,16 @@ struct iscsi_text_response_hdr {
__le32 reserved4[3];
};
+/* iSCSI TMF Request PDU header */
struct iscsi_tmf_request_hdr {
__le16 reserved0;
u8 function;
u8 opcode;
__le32 hdr_second_dword;
-#define ISCSI_TMF_REQUEST_HDR_DATA_SEG_LEN_MASK 0xFFFFFF
-#define ISCSI_TMF_REQUEST_HDR_DATA_SEG_LEN_SHIFT 0
-#define ISCSI_TMF_REQUEST_HDR_TOTAL_AHS_LEN_MASK 0xFF
-#define ISCSI_TMF_REQUEST_HDR_TOTAL_AHS_LEN_SHIFT 24
+#define ISCSI_TMF_REQUEST_HDR_DATA_SEG_LEN_MASK 0xFFFFFF
+#define ISCSI_TMF_REQUEST_HDR_DATA_SEG_LEN_SHIFT 0
+#define ISCSI_TMF_REQUEST_HDR_TOTAL_AHS_LEN_MASK 0xFF
+#define ISCSI_TMF_REQUEST_HDR_TOTAL_AHS_LEN_SHIFT 24
struct regpair lun;
__le32 itt;
__le32 rtt;
@@ -584,10 +625,10 @@ struct iscsi_tmf_response_hdr {
u8 hdr_flags;
u8 opcode;
__le32 hdr_second_dword;
-#define ISCSI_TMF_RESPONSE_HDR_DATA_SEG_LEN_MASK 0xFFFFFF
-#define ISCSI_TMF_RESPONSE_HDR_DATA_SEG_LEN_SHIFT 0
-#define ISCSI_TMF_RESPONSE_HDR_TOTAL_AHS_LEN_MASK 0xFF
-#define ISCSI_TMF_RESPONSE_HDR_TOTAL_AHS_LEN_SHIFT 24
+#define ISCSI_TMF_RESPONSE_HDR_DATA_SEG_LEN_MASK 0xFFFFFF
+#define ISCSI_TMF_RESPONSE_HDR_DATA_SEG_LEN_SHIFT 0
+#define ISCSI_TMF_RESPONSE_HDR_TOTAL_AHS_LEN_MASK 0xFF
+#define ISCSI_TMF_RESPONSE_HDR_TOTAL_AHS_LEN_SHIFT 24
struct regpair reserved0;
__le32 itt;
__le32 reserved1;
@@ -597,16 +638,17 @@ struct iscsi_tmf_response_hdr {
__le32 reserved4[3];
};
+/* iSCSI Response PDU header */
struct iscsi_response_hdr {
u8 hdr_status;
u8 hdr_response;
u8 hdr_flags;
u8 opcode;
__le32 hdr_second_dword;
-#define ISCSI_RESPONSE_HDR_DATA_SEG_LEN_MASK 0xFFFFFF
-#define ISCSI_RESPONSE_HDR_DATA_SEG_LEN_SHIFT 0
-#define ISCSI_RESPONSE_HDR_TOTAL_AHS_LEN_MASK 0xFF
-#define ISCSI_RESPONSE_HDR_TOTAL_AHS_LEN_SHIFT 24
+#define ISCSI_RESPONSE_HDR_DATA_SEG_LEN_MASK 0xFFFFFF
+#define ISCSI_RESPONSE_HDR_DATA_SEG_LEN_SHIFT 0
+#define ISCSI_RESPONSE_HDR_TOTAL_AHS_LEN_MASK 0xFF
+#define ISCSI_RESPONSE_HDR_TOTAL_AHS_LEN_SHIFT 24
struct regpair lun;
__le32 itt;
__le32 snack_tag;
@@ -618,16 +660,17 @@ struct iscsi_response_hdr {
__le32 residual_count;
};
+/* iSCSI Reject PDU header */
struct iscsi_reject_hdr {
u8 reserved4;
u8 hdr_reason;
u8 hdr_flags;
u8 opcode;
__le32 hdr_second_dword;
-#define ISCSI_REJECT_HDR_DATA_SEG_LEN_MASK 0xFFFFFF
-#define ISCSI_REJECT_HDR_DATA_SEG_LEN_SHIFT 0
-#define ISCSI_REJECT_HDR_TOTAL_AHS_LEN_MASK 0xFF
-#define ISCSI_REJECT_HDR_TOTAL_AHS_LEN_SHIFT 24
+#define ISCSI_REJECT_HDR_DATA_SEG_LEN_MASK 0xFFFFFF
+#define ISCSI_REJECT_HDR_DATA_SEG_LEN_SHIFT 0
+#define ISCSI_REJECT_HDR_TOTAL_AHS_LEN_MASK 0xFF
+#define ISCSI_REJECT_HDR_TOTAL_AHS_LEN_SHIFT 24
struct regpair reserved0;
__le32 all_ones;
__le32 reserved2;
@@ -638,6 +681,35 @@ struct iscsi_reject_hdr {
__le32 reserved3[2];
};
+/* iSCSI Asynchronous Message PDU header */
+struct iscsi_async_msg_hdr {
+ __le16 reserved0;
+ u8 flags_attr;
+#define ISCSI_ASYNC_MSG_HDR_RSRV_MASK 0x7F
+#define ISCSI_ASYNC_MSG_HDR_RSRV_SHIFT 0
+#define ISCSI_ASYNC_MSG_HDR_CONST1_MASK 0x1
+#define ISCSI_ASYNC_MSG_HDR_CONST1_SHIFT 7
+ u8 opcode;
+ __le32 hdr_second_dword;
+#define ISCSI_ASYNC_MSG_HDR_DATA_SEG_LEN_MASK 0xFFFFFF
+#define ISCSI_ASYNC_MSG_HDR_DATA_SEG_LEN_SHIFT 0
+#define ISCSI_ASYNC_MSG_HDR_TOTAL_AHS_LEN_MASK 0xFF
+#define ISCSI_ASYNC_MSG_HDR_TOTAL_AHS_LEN_SHIFT 24
+ struct regpair lun;
+ __le32 all_ones;
+ __le32 reserved1;
+ __le32 stat_sn;
+ __le32 exp_cmd_sn;
+ __le32 max_cmd_sn;
+ __le16 param1_rsrv;
+ u8 async_vcode;
+ u8 async_event;
+ __le16 param3_rsrv;
+ __le16 param2_rsrv;
+ __le32 reserved7;
+};
+
+/* PDU header part of Ystorm task context */
union iscsi_task_hdr {
struct iscsi_common_hdr common;
struct data_hdr data;
@@ -661,6 +733,348 @@ union iscsi_task_hdr {
struct iscsi_async_msg_hdr async_msg;
};
+/* The iscsi storm task context of Ystorm */
+struct ystorm_iscsi_task_st_ctx {
+ struct ystorm_iscsi_task_state state;
+ struct ystorm_iscsi_task_rxmit_opt rxmit_opt;
+ union iscsi_task_hdr pdu_hdr;
+};
+
+struct e4_ystorm_iscsi_task_ag_ctx {
+ u8 reserved;
+ u8 byte1;
+ __le16 word0;
+ u8 flags0;
+#define E4_YSTORM_ISCSI_TASK_AG_CTX_NIBBLE0_MASK 0xF
+#define E4_YSTORM_ISCSI_TASK_AG_CTX_NIBBLE0_SHIFT 0
+#define E4_YSTORM_ISCSI_TASK_AG_CTX_BIT0_MASK 0x1
+#define E4_YSTORM_ISCSI_TASK_AG_CTX_BIT0_SHIFT 4
+#define E4_YSTORM_ISCSI_TASK_AG_CTX_BIT1_MASK 0x1
+#define E4_YSTORM_ISCSI_TASK_AG_CTX_BIT1_SHIFT 5
+#define E4_YSTORM_ISCSI_TASK_AG_CTX_VALID_MASK 0x1
+#define E4_YSTORM_ISCSI_TASK_AG_CTX_VALID_SHIFT 6
+#define E4_YSTORM_ISCSI_TASK_AG_CTX_TTT_VALID_MASK 0x1 /* bit3 */
+#define E4_YSTORM_ISCSI_TASK_AG_CTX_TTT_VALID_SHIFT 7
+ u8 flags1;
+#define E4_YSTORM_ISCSI_TASK_AG_CTX_CF0_MASK 0x3
+#define E4_YSTORM_ISCSI_TASK_AG_CTX_CF0_SHIFT 0
+#define E4_YSTORM_ISCSI_TASK_AG_CTX_CF1_MASK 0x3
+#define E4_YSTORM_ISCSI_TASK_AG_CTX_CF1_SHIFT 2
+#define E4_YSTORM_ISCSI_TASK_AG_CTX_CF2SPECIAL_MASK 0x3
+#define E4_YSTORM_ISCSI_TASK_AG_CTX_CF2SPECIAL_SHIFT 4
+#define E4_YSTORM_ISCSI_TASK_AG_CTX_CF0EN_MASK 0x1
+#define E4_YSTORM_ISCSI_TASK_AG_CTX_CF0EN_SHIFT 6
+#define E4_YSTORM_ISCSI_TASK_AG_CTX_CF1EN_MASK 0x1
+#define E4_YSTORM_ISCSI_TASK_AG_CTX_CF1EN_SHIFT 7
+ u8 flags2;
+#define E4_YSTORM_ISCSI_TASK_AG_CTX_BIT4_MASK 0x1
+#define E4_YSTORM_ISCSI_TASK_AG_CTX_BIT4_SHIFT 0
+#define E4_YSTORM_ISCSI_TASK_AG_CTX_RULE0EN_MASK 0x1
+#define E4_YSTORM_ISCSI_TASK_AG_CTX_RULE0EN_SHIFT 1
+#define E4_YSTORM_ISCSI_TASK_AG_CTX_RULE1EN_MASK 0x1
+#define E4_YSTORM_ISCSI_TASK_AG_CTX_RULE1EN_SHIFT 2
+#define E4_YSTORM_ISCSI_TASK_AG_CTX_RULE2EN_MASK 0x1
+#define E4_YSTORM_ISCSI_TASK_AG_CTX_RULE2EN_SHIFT 3
+#define E4_YSTORM_ISCSI_TASK_AG_CTX_RULE3EN_MASK 0x1
+#define E4_YSTORM_ISCSI_TASK_AG_CTX_RULE3EN_SHIFT 4
+#define E4_YSTORM_ISCSI_TASK_AG_CTX_RULE4EN_MASK 0x1
+#define E4_YSTORM_ISCSI_TASK_AG_CTX_RULE4EN_SHIFT 5
+#define E4_YSTORM_ISCSI_TASK_AG_CTX_RULE5EN_MASK 0x1
+#define E4_YSTORM_ISCSI_TASK_AG_CTX_RULE5EN_SHIFT 6
+#define E4_YSTORM_ISCSI_TASK_AG_CTX_RULE6EN_MASK 0x1
+#define E4_YSTORM_ISCSI_TASK_AG_CTX_RULE6EN_SHIFT 7
+ u8 byte2;
+ __le32 TTT;
+ u8 byte3;
+ u8 byte4;
+ __le16 word1;
+};
+
+struct e4_mstorm_iscsi_task_ag_ctx {
+ u8 cdu_validation;
+ u8 byte1;
+ __le16 task_cid;
+ u8 flags0;
+#define E4_MSTORM_ISCSI_TASK_AG_CTX_CONNECTION_TYPE_MASK 0xF
+#define E4_MSTORM_ISCSI_TASK_AG_CTX_CONNECTION_TYPE_SHIFT 0
+#define E4_MSTORM_ISCSI_TASK_AG_CTX_EXIST_IN_QM0_MASK 0x1
+#define E4_MSTORM_ISCSI_TASK_AG_CTX_EXIST_IN_QM0_SHIFT 4
+#define E4_MSTORM_ISCSI_TASK_AG_CTX_BIT1_MASK 0x1
+#define E4_MSTORM_ISCSI_TASK_AG_CTX_BIT1_SHIFT 5
+#define E4_MSTORM_ISCSI_TASK_AG_CTX_VALID_MASK 0x1
+#define E4_MSTORM_ISCSI_TASK_AG_CTX_VALID_SHIFT 6
+#define E4_MSTORM_ISCSI_TASK_AG_CTX_TASK_CLEANUP_FLAG_MASK 0x1
+#define E4_MSTORM_ISCSI_TASK_AG_CTX_TASK_CLEANUP_FLAG_SHIFT 7
+ u8 flags1;
+#define E4_MSTORM_ISCSI_TASK_AG_CTX_TASK_CLEANUP_CF_MASK 0x3
+#define E4_MSTORM_ISCSI_TASK_AG_CTX_TASK_CLEANUP_CF_SHIFT 0
+#define E4_MSTORM_ISCSI_TASK_AG_CTX_CF1_MASK 0x3
+#define E4_MSTORM_ISCSI_TASK_AG_CTX_CF1_SHIFT 2
+#define E4_MSTORM_ISCSI_TASK_AG_CTX_CF2_MASK 0x3
+#define E4_MSTORM_ISCSI_TASK_AG_CTX_CF2_SHIFT 4
+#define E4_MSTORM_ISCSI_TASK_AG_CTX_TASK_CLEANUP_CF_EN_MASK 0x1
+#define E4_MSTORM_ISCSI_TASK_AG_CTX_TASK_CLEANUP_CF_EN_SHIFT 6
+#define E4_MSTORM_ISCSI_TASK_AG_CTX_CF1EN_MASK 0x1
+#define E4_MSTORM_ISCSI_TASK_AG_CTX_CF1EN_SHIFT 7
+ u8 flags2;
+#define E4_MSTORM_ISCSI_TASK_AG_CTX_CF2EN_MASK 0x1
+#define E4_MSTORM_ISCSI_TASK_AG_CTX_CF2EN_SHIFT 0
+#define E4_MSTORM_ISCSI_TASK_AG_CTX_RULE0EN_MASK 0x1
+#define E4_MSTORM_ISCSI_TASK_AG_CTX_RULE0EN_SHIFT 1
+#define E4_MSTORM_ISCSI_TASK_AG_CTX_RULE1EN_MASK 0x1
+#define E4_MSTORM_ISCSI_TASK_AG_CTX_RULE1EN_SHIFT 2
+#define E4_MSTORM_ISCSI_TASK_AG_CTX_RULE2EN_MASK 0x1
+#define E4_MSTORM_ISCSI_TASK_AG_CTX_RULE2EN_SHIFT 3
+#define E4_MSTORM_ISCSI_TASK_AG_CTX_RULE3EN_MASK 0x1
+#define E4_MSTORM_ISCSI_TASK_AG_CTX_RULE3EN_SHIFT 4
+#define E4_MSTORM_ISCSI_TASK_AG_CTX_RULE4EN_MASK 0x1
+#define E4_MSTORM_ISCSI_TASK_AG_CTX_RULE4EN_SHIFT 5
+#define E4_MSTORM_ISCSI_TASK_AG_CTX_RULE5EN_MASK 0x1
+#define E4_MSTORM_ISCSI_TASK_AG_CTX_RULE5EN_SHIFT 6
+#define E4_MSTORM_ISCSI_TASK_AG_CTX_RULE6EN_MASK 0x1
+#define E4_MSTORM_ISCSI_TASK_AG_CTX_RULE6EN_SHIFT 7
+ u8 byte2;
+ __le32 reg0;
+ u8 byte3;
+ u8 byte4;
+ __le16 word1;
+};
+
+struct e4_ustorm_iscsi_task_ag_ctx {
+ u8 reserved;
+ u8 state;
+ __le16 icid;
+ u8 flags0;
+#define E4_USTORM_ISCSI_TASK_AG_CTX_CONNECTION_TYPE_MASK 0xF
+#define E4_USTORM_ISCSI_TASK_AG_CTX_CONNECTION_TYPE_SHIFT 0
+#define E4_USTORM_ISCSI_TASK_AG_CTX_EXIST_IN_QM0_MASK 0x1
+#define E4_USTORM_ISCSI_TASK_AG_CTX_EXIST_IN_QM0_SHIFT 4
+#define E4_USTORM_ISCSI_TASK_AG_CTX_BIT1_MASK 0x1
+#define E4_USTORM_ISCSI_TASK_AG_CTX_BIT1_SHIFT 5
+#define E4_USTORM_ISCSI_TASK_AG_CTX_HQ_SCANNED_CF_MASK 0x3
+#define E4_USTORM_ISCSI_TASK_AG_CTX_HQ_SCANNED_CF_SHIFT 6
+ u8 flags1;
+#define E4_USTORM_ISCSI_TASK_AG_CTX_RESERVED1_MASK 0x3
+#define E4_USTORM_ISCSI_TASK_AG_CTX_RESERVED1_SHIFT 0
+#define E4_USTORM_ISCSI_TASK_AG_CTX_R2T2RECV_MASK 0x3
+#define E4_USTORM_ISCSI_TASK_AG_CTX_R2T2RECV_SHIFT 2
+#define E4_USTORM_ISCSI_TASK_AG_CTX_CF3_MASK 0x3
+#define E4_USTORM_ISCSI_TASK_AG_CTX_CF3_SHIFT 4
+#define E4_USTORM_ISCSI_TASK_AG_CTX_DIF_ERROR_CF_MASK 0x3
+#define E4_USTORM_ISCSI_TASK_AG_CTX_DIF_ERROR_CF_SHIFT 6
+ u8 flags2;
+#define E4_USTORM_ISCSI_TASK_AG_CTX_HQ_SCANNED_CF_EN_MASK 0x1
+#define E4_USTORM_ISCSI_TASK_AG_CTX_HQ_SCANNED_CF_EN_SHIFT 0
+#define E4_USTORM_ISCSI_TASK_AG_CTX_DISABLE_DATA_ACKED_MASK 0x1
+#define E4_USTORM_ISCSI_TASK_AG_CTX_DISABLE_DATA_ACKED_SHIFT 1
+#define E4_USTORM_ISCSI_TASK_AG_CTX_R2T2RECV_EN_MASK 0x1
+#define E4_USTORM_ISCSI_TASK_AG_CTX_R2T2RECV_EN_SHIFT 2
+#define E4_USTORM_ISCSI_TASK_AG_CTX_CF3EN_MASK 0x1
+#define E4_USTORM_ISCSI_TASK_AG_CTX_CF3EN_SHIFT 3
+#define E4_USTORM_ISCSI_TASK_AG_CTX_DIF_ERROR_CF_EN_MASK 0x1
+#define E4_USTORM_ISCSI_TASK_AG_CTX_DIF_ERROR_CF_EN_SHIFT 4
+#define E4_USTORM_ISCSI_TASK_AG_CTX_CMP_DATA_TOTAL_EXP_EN_MASK 0x1
+#define E4_USTORM_ISCSI_TASK_AG_CTX_CMP_DATA_TOTAL_EXP_EN_SHIFT 5
+#define E4_USTORM_ISCSI_TASK_AG_CTX_RULE1EN_MASK 0x1
+#define E4_USTORM_ISCSI_TASK_AG_CTX_RULE1EN_SHIFT 6
+#define E4_USTORM_ISCSI_TASK_AG_CTX_CMP_CONT_RCV_EXP_EN_MASK 0x1
+#define E4_USTORM_ISCSI_TASK_AG_CTX_CMP_CONT_RCV_EXP_EN_SHIFT 7
+ u8 flags3;
+#define E4_USTORM_ISCSI_TASK_AG_CTX_RULE3EN_MASK 0x1
+#define E4_USTORM_ISCSI_TASK_AG_CTX_RULE3EN_SHIFT 0
+#define E4_USTORM_ISCSI_TASK_AG_CTX_RULE4EN_MASK 0x1
+#define E4_USTORM_ISCSI_TASK_AG_CTX_RULE4EN_SHIFT 1
+#define E4_USTORM_ISCSI_TASK_AG_CTX_RULE5EN_MASK 0x1
+#define E4_USTORM_ISCSI_TASK_AG_CTX_RULE5EN_SHIFT 2
+#define E4_USTORM_ISCSI_TASK_AG_CTX_RULE6EN_MASK 0x1
+#define E4_USTORM_ISCSI_TASK_AG_CTX_RULE6EN_SHIFT 3
+#define E4_USTORM_ISCSI_TASK_AG_CTX_DIF_ERROR_TYPE_MASK 0xF
+#define E4_USTORM_ISCSI_TASK_AG_CTX_DIF_ERROR_TYPE_SHIFT 4
+ __le32 dif_err_intervals;
+ __le32 dif_error_1st_interval;
+ __le32 rcv_cont_len;
+ __le32 exp_cont_len;
+ __le32 total_data_acked;
+ __le32 exp_data_acked;
+ u8 next_tid_valid;
+ u8 byte3;
+ __le16 word1;
+ __le16 next_tid;
+ __le16 word3;
+ __le32 hdr_residual_count;
+ __le32 exp_r2t_sn;
+};
+
+/* The iscsi storm task context of Mstorm */
+struct mstorm_iscsi_task_st_ctx {
+ struct scsi_cached_sges data_desc;
+ struct scsi_sgl_params sgl_params;
+ __le32 rem_task_size;
+ __le32 data_buffer_offset;
+ u8 task_type;
+ struct iscsi_dif_flags dif_flags;
+ __le16 dif_task_icid;
+ struct regpair sense_db;
+ __le32 expected_itt;
+ __le32 reserved1;
+};
+
+struct iscsi_reg1 {
+ __le32 reg1_map;
+#define ISCSI_REG1_NUM_SGES_MASK 0xF
+#define ISCSI_REG1_NUM_SGES_SHIFT 0
+#define ISCSI_REG1_RESERVED1_MASK 0xFFFFFFF
+#define ISCSI_REG1_RESERVED1_SHIFT 4
+};
+
+struct tqe_opaque {
+ __le16 opaque[2];
+};
+
+/* The iscsi storm task context of Ustorm */
+struct ustorm_iscsi_task_st_ctx {
+ __le32 rem_rcv_len;
+ __le32 exp_data_transfer_len;
+ __le32 exp_data_sn;
+ struct regpair lun;
+ struct iscsi_reg1 reg1;
+ u8 flags2;
+#define USTORM_ISCSI_TASK_ST_CTX_AHS_EXIST_MASK 0x1
+#define USTORM_ISCSI_TASK_ST_CTX_AHS_EXIST_SHIFT 0
+#define USTORM_ISCSI_TASK_ST_CTX_RESERVED1_MASK 0x7F
+#define USTORM_ISCSI_TASK_ST_CTX_RESERVED1_SHIFT 1
+ struct iscsi_dif_flags dif_flags;
+ __le16 reserved3;
+ struct tqe_opaque tqe_opaque_list;
+ __le32 reserved5;
+ __le32 reserved6;
+ __le32 reserved7;
+ u8 task_type;
+ u8 error_flags;
+#define USTORM_ISCSI_TASK_ST_CTX_DATA_DIGEST_ERROR_MASK 0x1
+#define USTORM_ISCSI_TASK_ST_CTX_DATA_DIGEST_ERROR_SHIFT 0
+#define USTORM_ISCSI_TASK_ST_CTX_DATA_TRUNCATED_ERROR_MASK 0x1
+#define USTORM_ISCSI_TASK_ST_CTX_DATA_TRUNCATED_ERROR_SHIFT 1
+#define USTORM_ISCSI_TASK_ST_CTX_UNDER_RUN_ERROR_MASK 0x1
+#define USTORM_ISCSI_TASK_ST_CTX_UNDER_RUN_ERROR_SHIFT 2
+#define USTORM_ISCSI_TASK_ST_CTX_RESERVED8_MASK 0x1F
+#define USTORM_ISCSI_TASK_ST_CTX_RESERVED8_SHIFT 3
+ u8 flags;
+#define USTORM_ISCSI_TASK_ST_CTX_CQE_WRITE_MASK 0x3
+#define USTORM_ISCSI_TASK_ST_CTX_CQE_WRITE_SHIFT 0
+#define USTORM_ISCSI_TASK_ST_CTX_LOCAL_COMP_MASK 0x1
+#define USTORM_ISCSI_TASK_ST_CTX_LOCAL_COMP_SHIFT 2
+#define USTORM_ISCSI_TASK_ST_CTX_Q0_R2TQE_WRITE_MASK 0x1
+#define USTORM_ISCSI_TASK_ST_CTX_Q0_R2TQE_WRITE_SHIFT 3
+#define USTORM_ISCSI_TASK_ST_CTX_TOTAL_DATA_ACKED_DONE_MASK 0x1
+#define USTORM_ISCSI_TASK_ST_CTX_TOTAL_DATA_ACKED_DONE_SHIFT 4
+#define USTORM_ISCSI_TASK_ST_CTX_HQ_SCANNED_DONE_MASK 0x1
+#define USTORM_ISCSI_TASK_ST_CTX_HQ_SCANNED_DONE_SHIFT 5
+#define USTORM_ISCSI_TASK_ST_CTX_R2T2RECV_DONE_MASK 0x1
+#define USTORM_ISCSI_TASK_ST_CTX_R2T2RECV_DONE_SHIFT 6
+#define USTORM_ISCSI_TASK_ST_CTX_RESERVED0_MASK 0x1
+#define USTORM_ISCSI_TASK_ST_CTX_RESERVED0_SHIFT 7
+ u8 cq_rss_number;
+};
+
+/* iscsi task context */
+struct e4_iscsi_task_context {
+ struct ystorm_iscsi_task_st_ctx ystorm_st_context;
+ struct e4_ystorm_iscsi_task_ag_ctx ystorm_ag_context;
+ struct regpair ystorm_ag_padding[2];
+ struct tdif_task_context tdif_context;
+ struct e4_mstorm_iscsi_task_ag_ctx mstorm_ag_context;
+ struct regpair mstorm_ag_padding[2];
+ struct e4_ustorm_iscsi_task_ag_ctx ustorm_ag_context;
+ struct mstorm_iscsi_task_st_ctx mstorm_st_context;
+ struct ustorm_iscsi_task_st_ctx ustorm_st_context;
+ struct rdif_task_context rdif_context;
+};
+
+/* iSCSI connection offload params passed by driver to FW in ISCSI offload
+ * ramrod.
+ */
+struct iscsi_conn_offload_params {
+ struct regpair sq_pbl_addr;
+ struct regpair r2tq_pbl_addr;
+ struct regpair xhq_pbl_addr;
+ struct regpair uhq_pbl_addr;
+ __le32 initial_ack;
+ __le16 physical_q0;
+ __le16 physical_q1;
+ u8 flags;
+#define ISCSI_CONN_OFFLOAD_PARAMS_TCP_ON_CHIP_1B_MASK 0x1
+#define ISCSI_CONN_OFFLOAD_PARAMS_TCP_ON_CHIP_1B_SHIFT 0
+#define ISCSI_CONN_OFFLOAD_PARAMS_TARGET_MODE_MASK 0x1
+#define ISCSI_CONN_OFFLOAD_PARAMS_TARGET_MODE_SHIFT 1
+#define ISCSI_CONN_OFFLOAD_PARAMS_RESTRICTED_MODE_MASK 0x1
+#define ISCSI_CONN_OFFLOAD_PARAMS_RESTRICTED_MODE_SHIFT 2
+#define ISCSI_CONN_OFFLOAD_PARAMS_RESERVED1_MASK 0x1F
+#define ISCSI_CONN_OFFLOAD_PARAMS_RESERVED1_SHIFT 3
+ u8 pbl_page_size_log;
+ u8 pbe_page_size_log;
+ u8 default_cq;
+ __le32 stat_sn;
+};
+
+/* iSCSI connection statistics */
+struct iscsi_conn_stats_params {
+ struct regpair iscsi_tcp_tx_packets_cnt;
+ struct regpair iscsi_tcp_tx_bytes_cnt;
+ struct regpair iscsi_tcp_tx_rxmit_cnt;
+ struct regpair iscsi_tcp_rx_packets_cnt;
+ struct regpair iscsi_tcp_rx_bytes_cnt;
+ struct regpair iscsi_tcp_rx_dup_ack_cnt;
+ __le32 iscsi_tcp_rx_chksum_err_cnt;
+ __le32 reserved;
+};
+
+/* spe message header */
+struct iscsi_slow_path_hdr {
+ u8 op_code;
+ u8 flags;
+#define ISCSI_SLOW_PATH_HDR_RESERVED0_MASK 0xF
+#define ISCSI_SLOW_PATH_HDR_RESERVED0_SHIFT 0
+#define ISCSI_SLOW_PATH_HDR_LAYER_CODE_MASK 0x7
+#define ISCSI_SLOW_PATH_HDR_LAYER_CODE_SHIFT 4
+#define ISCSI_SLOW_PATH_HDR_RESERVED1_MASK 0x1
+#define ISCSI_SLOW_PATH_HDR_RESERVED1_SHIFT 7
+};
+
+/* iSCSI connection update params passed by driver to FW in ISCSI update
+ *ramrod.
+ */
+struct iscsi_conn_update_ramrod_params {
+ struct iscsi_slow_path_hdr hdr;
+ __le16 conn_id;
+ __le32 fw_cid;
+ u8 flags;
+#define ISCSI_CONN_UPDATE_RAMROD_PARAMS_HD_EN_MASK 0x1
+#define ISCSI_CONN_UPDATE_RAMROD_PARAMS_HD_EN_SHIFT 0
+#define ISCSI_CONN_UPDATE_RAMROD_PARAMS_DD_EN_MASK 0x1
+#define ISCSI_CONN_UPDATE_RAMROD_PARAMS_DD_EN_SHIFT 1
+#define ISCSI_CONN_UPDATE_RAMROD_PARAMS_INITIAL_R2T_MASK 0x1
+#define ISCSI_CONN_UPDATE_RAMROD_PARAMS_INITIAL_R2T_SHIFT 2
+#define ISCSI_CONN_UPDATE_RAMROD_PARAMS_IMMEDIATE_DATA_MASK 0x1
+#define ISCSI_CONN_UPDATE_RAMROD_PARAMS_IMMEDIATE_DATA_SHIFT 3
+#define ISCSI_CONN_UPDATE_RAMROD_PARAMS_DIF_BLOCK_SIZE_MASK 0x1
+#define ISCSI_CONN_UPDATE_RAMROD_PARAMS_DIF_BLOCK_SIZE_SHIFT 4
+#define ISCSI_CONN_UPDATE_RAMROD_PARAMS_DIF_ON_HOST_EN_MASK 0x1
+#define ISCSI_CONN_UPDATE_RAMROD_PARAMS_DIF_ON_HOST_EN_SHIFT 5
+#define ISCSI_CONN_UPDATE_RAMROD_PARAMS_DIF_ON_IMM_EN_MASK 0x1
+#define ISCSI_CONN_UPDATE_RAMROD_PARAMS_DIF_ON_IMM_EN_SHIFT 6
+#define ISCSI_CONN_UPDATE_RAMROD_PARAMS_LUN_MAPPER_EN_MASK 0x1
+#define ISCSI_CONN_UPDATE_RAMROD_PARAMS_LUN_MAPPER_EN_SHIFT 7
+ u8 reserved0[3];
+ __le32 max_seq_size;
+ __le32 max_send_pdu_length;
+ __le32 max_recv_pdu_length;
+ __le32 first_seq_length;
+ __le32 exp_stat_sn;
+ union dif_configuration_params dif_on_imme_params;
+};
+
+/* iSCSI CQ element */
struct iscsi_cqe_common {
__le16 conn_id;
u8 cqe_type;
@@ -669,6 +1083,7 @@ struct iscsi_cqe_common {
union iscsi_task_hdr iscsi_hdr;
};
+/* iSCSI CQ element */
struct iscsi_cqe_solicited {
__le16 conn_id;
u8 cqe_type;
@@ -678,10 +1093,11 @@ struct iscsi_cqe_solicited {
u8 fw_dbg_field;
u8 caused_conn_err;
u8 reserved0[3];
- __le32 reserved1[1];
+ __le32 data_truncated_bytes;
union iscsi_task_hdr iscsi_hdr;
};
+/* iSCSI CQ element */
struct iscsi_cqe_unsolicited {
__le16 conn_id;
u8 cqe_type;
@@ -689,16 +1105,19 @@ struct iscsi_cqe_unsolicited {
__le16 reserved0;
u8 reserved1;
u8 unsol_cqe_type;
- struct regpair rqe_opaque;
+ __le16 rqe_opaque;
+ __le16 reserved2[3];
union iscsi_task_hdr iscsi_hdr;
};
+/* iSCSI CQ element */
union iscsi_cqe {
struct iscsi_cqe_common cqe_common;
struct iscsi_cqe_solicited cqe_solicited;
struct iscsi_cqe_unsolicited cqe_unsolicited;
};
+/* iSCSI CQE type */
enum iscsi_cqes_type {
ISCSI_CQE_TYPE_SOLICITED = 1,
ISCSI_CQE_TYPE_UNSOLICITED,
@@ -708,6 +1127,7 @@ enum iscsi_cqes_type {
MAX_ISCSI_CQES_TYPE
};
+/* iSCSI CQE type */
enum iscsi_cqe_unsolicited_type {
ISCSI_CQE_UNSOLICITED_NONE,
ISCSI_CQE_UNSOLICITED_SINGLE,
@@ -717,37 +1137,28 @@ enum iscsi_cqe_unsolicited_type {
MAX_ISCSI_CQE_UNSOLICITED_TYPE
};
-
+/* iscsi debug modes */
struct iscsi_debug_modes {
u8 flags;
-#define ISCSI_DEBUG_MODES_ASSERT_IF_RX_CONN_ERROR_MASK 0x1
-#define ISCSI_DEBUG_MODES_ASSERT_IF_RX_CONN_ERROR_SHIFT 0
-#define ISCSI_DEBUG_MODES_ASSERT_IF_RECV_RESET_MASK 0x1
-#define ISCSI_DEBUG_MODES_ASSERT_IF_RECV_RESET_SHIFT 1
-#define ISCSI_DEBUG_MODES_ASSERT_IF_RECV_FIN_MASK 0x1
-#define ISCSI_DEBUG_MODES_ASSERT_IF_RECV_FIN_SHIFT 2
-#define ISCSI_DEBUG_MODES_ASSERT_IF_RECV_CLEANUP_MASK 0x1
-#define ISCSI_DEBUG_MODES_ASSERT_IF_RECV_CLEANUP_SHIFT 3
-#define ISCSI_DEBUG_MODES_ASSERT_IF_RECV_REJECT_OR_ASYNC_MASK 0x1
-#define ISCSI_DEBUG_MODES_ASSERT_IF_RECV_REJECT_OR_ASYNC_SHIFT 4
-#define ISCSI_DEBUG_MODES_ASSERT_IF_RECV_NOP_MASK 0x1
-#define ISCSI_DEBUG_MODES_ASSERT_IF_RECV_NOP_SHIFT 5
-#define ISCSI_DEBUG_MODES_ASSERT_IF_DATA_DIGEST_ERROR_MASK 0x1
-#define ISCSI_DEBUG_MODES_ASSERT_IF_DATA_DIGEST_ERROR_SHIFT 6
-#define ISCSI_DEBUG_MODES_ASSERT_IF_DIF_ERROR_MASK 0x1
-#define ISCSI_DEBUG_MODES_ASSERT_IF_DIF_ERROR_SHIFT 7
-};
-
-struct iscsi_dif_flags {
- u8 flags;
-#define ISCSI_DIF_FLAGS_PROT_INTERVAL_SIZE_LOG_MASK 0xF
-#define ISCSI_DIF_FLAGS_PROT_INTERVAL_SIZE_LOG_SHIFT 0
-#define ISCSI_DIF_FLAGS_DIF_TO_PEER_MASK 0x1
-#define ISCSI_DIF_FLAGS_DIF_TO_PEER_SHIFT 4
-#define ISCSI_DIF_FLAGS_HOST_INTERFACE_MASK 0x7
-#define ISCSI_DIF_FLAGS_HOST_INTERFACE_SHIFT 5
-};
-
+#define ISCSI_DEBUG_MODES_ASSERT_IF_RX_CONN_ERROR_MASK 0x1
+#define ISCSI_DEBUG_MODES_ASSERT_IF_RX_CONN_ERROR_SHIFT 0
+#define ISCSI_DEBUG_MODES_ASSERT_IF_RECV_RESET_MASK 0x1
+#define ISCSI_DEBUG_MODES_ASSERT_IF_RECV_RESET_SHIFT 1
+#define ISCSI_DEBUG_MODES_ASSERT_IF_RECV_FIN_MASK 0x1
+#define ISCSI_DEBUG_MODES_ASSERT_IF_RECV_FIN_SHIFT 2
+#define ISCSI_DEBUG_MODES_ASSERT_IF_RECV_CLEANUP_MASK 0x1
+#define ISCSI_DEBUG_MODES_ASSERT_IF_RECV_CLEANUP_SHIFT 3
+#define ISCSI_DEBUG_MODES_ASSERT_IF_RECV_REJECT_OR_ASYNC_MASK 0x1
+#define ISCSI_DEBUG_MODES_ASSERT_IF_RECV_REJECT_OR_ASYNC_SHIFT 4
+#define ISCSI_DEBUG_MODES_ASSERT_IF_RECV_NOP_MASK 0x1
+#define ISCSI_DEBUG_MODES_ASSERT_IF_RECV_NOP_SHIFT 5
+#define ISCSI_DEBUG_MODES_ASSERT_IF_DIF_OR_DATA_DIGEST_ERROR_MASK 0x1
+#define ISCSI_DEBUG_MODES_ASSERT_IF_DIF_OR_DATA_DIGEST_ERROR_SHIFT 6
+#define ISCSI_DEBUG_MODES_ASSERT_IF_HQ_CORRUPT_MASK 0x1
+#define ISCSI_DEBUG_MODES_ASSERT_IF_HQ_CORRUPT_SHIFT 7
+};
+
+/* iSCSI kernel completion queue IDs */
enum iscsi_eqe_opcode {
ISCSI_EVENT_TYPE_INIT_FUNC = 0,
ISCSI_EVENT_TYPE_DESTROY_FUNC,
@@ -756,9 +1167,9 @@ enum iscsi_eqe_opcode {
ISCSI_EVENT_TYPE_CLEAR_SQ,
ISCSI_EVENT_TYPE_TERMINATE_CONN,
ISCSI_EVENT_TYPE_MAC_UPDATE_CONN,
+ ISCSI_EVENT_TYPE_COLLECT_STATS_CONN,
ISCSI_EVENT_TYPE_ASYN_CONNECT_COMPLETE,
ISCSI_EVENT_TYPE_ASYN_TERMINATE_DONE,
- RESERVED9,
ISCSI_EVENT_TYPE_START_OF_ERROR_TYPES = 10,
ISCSI_EVENT_TYPE_ASYN_ABORT_RCVD,
ISCSI_EVENT_TYPE_ASYN_CLOSE_RCVD,
@@ -772,6 +1183,7 @@ enum iscsi_eqe_opcode {
MAX_ISCSI_EQE_OPCODE
};
+/* iSCSI EQE and CQE completion status */
enum iscsi_error_types {
ISCSI_STATUS_NONE = 0,
ISCSI_CQE_ERROR_UNSOLICITED_RCV_ON_INVALID_CONN = 1,
@@ -823,7 +1235,7 @@ enum iscsi_error_types {
MAX_ISCSI_ERROR_TYPES
};
-
+/* iSCSI Ramrod Command IDs */
enum iscsi_ramrod_cmd_id {
ISCSI_RAMROD_CMD_ID_UNUSED = 0,
ISCSI_RAMROD_CMD_ID_INIT_FUNC = 1,
@@ -833,22 +1245,11 @@ enum iscsi_ramrod_cmd_id {
ISCSI_RAMROD_CMD_ID_TERMINATION_CONN = 5,
ISCSI_RAMROD_CMD_ID_CLEAR_SQ = 6,
ISCSI_RAMROD_CMD_ID_MAC_UPDATE = 7,
+ ISCSI_RAMROD_CMD_ID_CONN_STATS = 8,
MAX_ISCSI_RAMROD_CMD_ID
};
-struct iscsi_reg1 {
- __le32 reg1_map;
-#define ISCSI_REG1_NUM_SGES_MASK 0xF
-#define ISCSI_REG1_NUM_SGES_SHIFT 0
-#define ISCSI_REG1_RESERVED1_MASK 0xFFFFFFF
-#define ISCSI_REG1_RESERVED1_SHIFT 4
-};
-
-union iscsi_seq_num {
- __le16 data_sn;
- __le16 r2t_sn;
-};
-
+/* iSCSI connection termination request */
struct iscsi_spe_conn_mac_update {
struct iscsi_slow_path_hdr hdr;
__le16 conn_id;
@@ -859,6 +1260,9 @@ struct iscsi_spe_conn_mac_update {
u8 reserved0[2];
};
+/* iSCSI and TCP connection (Option 1) offload params passed by driver to FW in
+ * iSCSI offload ramrod.
+ */
struct iscsi_spe_conn_offload {
struct iscsi_slow_path_hdr hdr;
__le16 conn_id;
@@ -867,6 +1271,9 @@ struct iscsi_spe_conn_offload {
struct tcp_offload_params tcp;
};
+/* iSCSI and TCP connection(Option 2) offload params passed by driver to FW in
+ * iSCSI offload ramrod.
+ */
struct iscsi_spe_conn_offload_option2 {
struct iscsi_slow_path_hdr hdr;
__le16 conn_id;
@@ -875,6 +1282,17 @@ struct iscsi_spe_conn_offload_option2 {
struct tcp_offload_params_opt2 tcp;
};
+/* iSCSI collect connection statistics request */
+struct iscsi_spe_conn_statistics {
+ struct iscsi_slow_path_hdr hdr;
+ __le16 conn_id;
+ __le32 fw_cid;
+ u8 reset_stats;
+ u8 reserved0[7];
+ struct regpair stats_cnts_addr;
+};
+
+/* iSCSI connection termination request */
struct iscsi_spe_conn_termination {
struct iscsi_slow_path_hdr hdr;
__le16 conn_id;
@@ -885,12 +1303,14 @@ struct iscsi_spe_conn_termination {
struct regpair query_params_addr;
};
+/* iSCSI firmware function destroy parameters */
struct iscsi_spe_func_dstry {
struct iscsi_slow_path_hdr hdr;
__le16 reserved0;
__le32 reserved1;
};
+/* iSCSI firmware function init parameters */
struct iscsi_spe_func_init {
struct iscsi_slow_path_hdr hdr;
__le16 half_way_close_timeout;
@@ -898,283 +1318,19 @@ struct iscsi_spe_func_init {
u8 num_r2tq_pages_in_ring;
u8 num_uhq_pages_in_ring;
u8 ll2_rx_queue_id;
- u8 ooo_enable;
+ u8 flags;
+#define ISCSI_SPE_FUNC_INIT_COUNTERS_EN_MASK 0x1
+#define ISCSI_SPE_FUNC_INIT_COUNTERS_EN_SHIFT 0
+#define ISCSI_SPE_FUNC_INIT_RESERVED0_MASK 0x7F
+#define ISCSI_SPE_FUNC_INIT_RESERVED0_SHIFT 1
struct iscsi_debug_modes debug_mode;
__le16 reserved1;
__le32 reserved2;
- __le32 reserved3;
- __le32 reserved4;
struct scsi_init_func_params func_params;
struct scsi_init_func_queues q_params;
};
-struct ystorm_iscsi_task_state {
- struct scsi_cached_sges data_desc;
- struct scsi_sgl_params sgl_params;
- __le32 exp_r2t_sn;
- __le32 buffer_offset;
- union iscsi_seq_num seq_num;
- struct iscsi_dif_flags dif_flags;
- u8 flags;
-#define YSTORM_ISCSI_TASK_STATE_LOCAL_COMP_MASK 0x1
-#define YSTORM_ISCSI_TASK_STATE_LOCAL_COMP_SHIFT 0
-#define YSTORM_ISCSI_TASK_STATE_SLOW_IO_MASK 0x1
-#define YSTORM_ISCSI_TASK_STATE_SLOW_IO_SHIFT 1
-#define YSTORM_ISCSI_TASK_STATE_RESERVED0_MASK 0x3F
-#define YSTORM_ISCSI_TASK_STATE_RESERVED0_SHIFT 2
-};
-
-struct ystorm_iscsi_task_rxmit_opt {
- __le32 fast_rxmit_sge_offset;
- __le32 scan_start_buffer_offset;
- __le32 fast_rxmit_buffer_offset;
- u8 scan_start_sgl_index;
- u8 fast_rxmit_sgl_index;
- __le16 reserved;
-};
-
-struct ystorm_iscsi_task_st_ctx {
- struct ystorm_iscsi_task_state state;
- struct ystorm_iscsi_task_rxmit_opt rxmit_opt;
- union iscsi_task_hdr pdu_hdr;
-};
-
-struct ystorm_iscsi_task_ag_ctx {
- u8 reserved;
- u8 byte1;
- __le16 word0;
- u8 flags0;
-#define YSTORM_ISCSI_TASK_AG_CTX_NIBBLE0_MASK 0xF
-#define YSTORM_ISCSI_TASK_AG_CTX_NIBBLE0_SHIFT 0
-#define YSTORM_ISCSI_TASK_AG_CTX_BIT0_MASK 0x1
-#define YSTORM_ISCSI_TASK_AG_CTX_BIT0_SHIFT 4
-#define YSTORM_ISCSI_TASK_AG_CTX_BIT1_MASK 0x1
-#define YSTORM_ISCSI_TASK_AG_CTX_BIT1_SHIFT 5
-#define YSTORM_ISCSI_TASK_AG_CTX_VALID_MASK 0x1
-#define YSTORM_ISCSI_TASK_AG_CTX_VALID_SHIFT 6
-#define YSTORM_ISCSI_TASK_AG_CTX_BIT3_MASK 0x1
-#define YSTORM_ISCSI_TASK_AG_CTX_BIT3_SHIFT 7
- u8 flags1;
-#define YSTORM_ISCSI_TASK_AG_CTX_CF0_MASK 0x3
-#define YSTORM_ISCSI_TASK_AG_CTX_CF0_SHIFT 0
-#define YSTORM_ISCSI_TASK_AG_CTX_CF1_MASK 0x3
-#define YSTORM_ISCSI_TASK_AG_CTX_CF1_SHIFT 2
-#define YSTORM_ISCSI_TASK_AG_CTX_CF2SPECIAL_MASK 0x3
-#define YSTORM_ISCSI_TASK_AG_CTX_CF2SPECIAL_SHIFT 4
-#define YSTORM_ISCSI_TASK_AG_CTX_CF0EN_MASK 0x1
-#define YSTORM_ISCSI_TASK_AG_CTX_CF0EN_SHIFT 6
-#define YSTORM_ISCSI_TASK_AG_CTX_CF1EN_MASK 0x1
-#define YSTORM_ISCSI_TASK_AG_CTX_CF1EN_SHIFT 7
- u8 flags2;
-#define YSTORM_ISCSI_TASK_AG_CTX_BIT4_MASK 0x1
-#define YSTORM_ISCSI_TASK_AG_CTX_BIT4_SHIFT 0
-#define YSTORM_ISCSI_TASK_AG_CTX_RULE0EN_MASK 0x1
-#define YSTORM_ISCSI_TASK_AG_CTX_RULE0EN_SHIFT 1
-#define YSTORM_ISCSI_TASK_AG_CTX_RULE1EN_MASK 0x1
-#define YSTORM_ISCSI_TASK_AG_CTX_RULE1EN_SHIFT 2
-#define YSTORM_ISCSI_TASK_AG_CTX_RULE2EN_MASK 0x1
-#define YSTORM_ISCSI_TASK_AG_CTX_RULE2EN_SHIFT 3
-#define YSTORM_ISCSI_TASK_AG_CTX_RULE3EN_MASK 0x1
-#define YSTORM_ISCSI_TASK_AG_CTX_RULE3EN_SHIFT 4
-#define YSTORM_ISCSI_TASK_AG_CTX_RULE4EN_MASK 0x1
-#define YSTORM_ISCSI_TASK_AG_CTX_RULE4EN_SHIFT 5
-#define YSTORM_ISCSI_TASK_AG_CTX_RULE5EN_MASK 0x1
-#define YSTORM_ISCSI_TASK_AG_CTX_RULE5EN_SHIFT 6
-#define YSTORM_ISCSI_TASK_AG_CTX_RULE6EN_MASK 0x1
-#define YSTORM_ISCSI_TASK_AG_CTX_RULE6EN_SHIFT 7
- u8 byte2;
- __le32 TTT;
- u8 byte3;
- u8 byte4;
- __le16 word1;
-};
-
-struct mstorm_iscsi_task_ag_ctx {
- u8 cdu_validation;
- u8 byte1;
- __le16 task_cid;
- u8 flags0;
-#define MSTORM_ISCSI_TASK_AG_CTX_CONNECTION_TYPE_MASK 0xF
-#define MSTORM_ISCSI_TASK_AG_CTX_CONNECTION_TYPE_SHIFT 0
-#define MSTORM_ISCSI_TASK_AG_CTX_EXIST_IN_QM0_MASK 0x1
-#define MSTORM_ISCSI_TASK_AG_CTX_EXIST_IN_QM0_SHIFT 4
-#define MSTORM_ISCSI_TASK_AG_CTX_BIT1_MASK 0x1
-#define MSTORM_ISCSI_TASK_AG_CTX_BIT1_SHIFT 5
-#define MSTORM_ISCSI_TASK_AG_CTX_VALID_MASK 0x1
-#define MSTORM_ISCSI_TASK_AG_CTX_VALID_SHIFT 6
-#define MSTORM_ISCSI_TASK_AG_CTX_TASK_CLEANUP_FLAG_MASK 0x1
-#define MSTORM_ISCSI_TASK_AG_CTX_TASK_CLEANUP_FLAG_SHIFT 7
- u8 flags1;
-#define MSTORM_ISCSI_TASK_AG_CTX_TASK_CLEANUP_CF_MASK 0x3
-#define MSTORM_ISCSI_TASK_AG_CTX_TASK_CLEANUP_CF_SHIFT 0
-#define MSTORM_ISCSI_TASK_AG_CTX_CF1_MASK 0x3
-#define MSTORM_ISCSI_TASK_AG_CTX_CF1_SHIFT 2
-#define MSTORM_ISCSI_TASK_AG_CTX_CF2_MASK 0x3
-#define MSTORM_ISCSI_TASK_AG_CTX_CF2_SHIFT 4
-#define MSTORM_ISCSI_TASK_AG_CTX_TASK_CLEANUP_CF_EN_MASK 0x1
-#define MSTORM_ISCSI_TASK_AG_CTX_TASK_CLEANUP_CF_EN_SHIFT 6
-#define MSTORM_ISCSI_TASK_AG_CTX_CF1EN_MASK 0x1
-#define MSTORM_ISCSI_TASK_AG_CTX_CF1EN_SHIFT 7
- u8 flags2;
-#define MSTORM_ISCSI_TASK_AG_CTX_CF2EN_MASK 0x1
-#define MSTORM_ISCSI_TASK_AG_CTX_CF2EN_SHIFT 0
-#define MSTORM_ISCSI_TASK_AG_CTX_RULE0EN_MASK 0x1
-#define MSTORM_ISCSI_TASK_AG_CTX_RULE0EN_SHIFT 1
-#define MSTORM_ISCSI_TASK_AG_CTX_RULE1EN_MASK 0x1
-#define MSTORM_ISCSI_TASK_AG_CTX_RULE1EN_SHIFT 2
-#define MSTORM_ISCSI_TASK_AG_CTX_RULE2EN_MASK 0x1
-#define MSTORM_ISCSI_TASK_AG_CTX_RULE2EN_SHIFT 3
-#define MSTORM_ISCSI_TASK_AG_CTX_RULE3EN_MASK 0x1
-#define MSTORM_ISCSI_TASK_AG_CTX_RULE3EN_SHIFT 4
-#define MSTORM_ISCSI_TASK_AG_CTX_RULE4EN_MASK 0x1
-#define MSTORM_ISCSI_TASK_AG_CTX_RULE4EN_SHIFT 5
-#define MSTORM_ISCSI_TASK_AG_CTX_RULE5EN_MASK 0x1
-#define MSTORM_ISCSI_TASK_AG_CTX_RULE5EN_SHIFT 6
-#define MSTORM_ISCSI_TASK_AG_CTX_RULE6EN_MASK 0x1
-#define MSTORM_ISCSI_TASK_AG_CTX_RULE6EN_SHIFT 7
- u8 byte2;
- __le32 reg0;
- u8 byte3;
- u8 byte4;
- __le16 word1;
-};
-
-struct ustorm_iscsi_task_ag_ctx {
- u8 reserved;
- u8 state;
- __le16 icid;
- u8 flags0;
-#define USTORM_ISCSI_TASK_AG_CTX_CONNECTION_TYPE_MASK 0xF
-#define USTORM_ISCSI_TASK_AG_CTX_CONNECTION_TYPE_SHIFT 0
-#define USTORM_ISCSI_TASK_AG_CTX_EXIST_IN_QM0_MASK 0x1
-#define USTORM_ISCSI_TASK_AG_CTX_EXIST_IN_QM0_SHIFT 4
-#define USTORM_ISCSI_TASK_AG_CTX_BIT1_MASK 0x1
-#define USTORM_ISCSI_TASK_AG_CTX_BIT1_SHIFT 5
-#define USTORM_ISCSI_TASK_AG_CTX_HQ_SCANNED_CF_MASK 0x3
-#define USTORM_ISCSI_TASK_AG_CTX_HQ_SCANNED_CF_SHIFT 6
- u8 flags1;
-#define USTORM_ISCSI_TASK_AG_CTX_RESERVED1_MASK 0x3
-#define USTORM_ISCSI_TASK_AG_CTX_RESERVED1_SHIFT 0
-#define USTORM_ISCSI_TASK_AG_CTX_R2T2RECV_MASK 0x3
-#define USTORM_ISCSI_TASK_AG_CTX_R2T2RECV_SHIFT 2
-#define USTORM_ISCSI_TASK_AG_CTX_CF3_MASK 0x3
-#define USTORM_ISCSI_TASK_AG_CTX_CF3_SHIFT 4
-#define USTORM_ISCSI_TASK_AG_CTX_DIF_ERROR_CF_MASK 0x3
-#define USTORM_ISCSI_TASK_AG_CTX_DIF_ERROR_CF_SHIFT 6
- u8 flags2;
-#define USTORM_ISCSI_TASK_AG_CTX_HQ_SCANNED_CF_EN_MASK 0x1
-#define USTORM_ISCSI_TASK_AG_CTX_HQ_SCANNED_CF_EN_SHIFT 0
-#define USTORM_ISCSI_TASK_AG_CTX_DISABLE_DATA_ACKED_MASK 0x1
-#define USTORM_ISCSI_TASK_AG_CTX_DISABLE_DATA_ACKED_SHIFT 1
-#define USTORM_ISCSI_TASK_AG_CTX_R2T2RECV_EN_MASK 0x1
-#define USTORM_ISCSI_TASK_AG_CTX_R2T2RECV_EN_SHIFT 2
-#define USTORM_ISCSI_TASK_AG_CTX_CF3EN_MASK 0x1
-#define USTORM_ISCSI_TASK_AG_CTX_CF3EN_SHIFT 3
-#define USTORM_ISCSI_TASK_AG_CTX_DIF_ERROR_CF_EN_MASK 0x1
-#define USTORM_ISCSI_TASK_AG_CTX_DIF_ERROR_CF_EN_SHIFT 4
-#define USTORM_ISCSI_TASK_AG_CTX_CMP_DATA_TOTAL_EXP_EN_MASK 0x1
-#define USTORM_ISCSI_TASK_AG_CTX_CMP_DATA_TOTAL_EXP_EN_SHIFT 5
-#define USTORM_ISCSI_TASK_AG_CTX_RULE1EN_MASK 0x1
-#define USTORM_ISCSI_TASK_AG_CTX_RULE1EN_SHIFT 6
-#define USTORM_ISCSI_TASK_AG_CTX_CMP_CONT_RCV_EXP_EN_MASK 0x1
-#define USTORM_ISCSI_TASK_AG_CTX_CMP_CONT_RCV_EXP_EN_SHIFT 7
- u8 flags3;
-#define USTORM_ISCSI_TASK_AG_CTX_RULE3EN_MASK 0x1
-#define USTORM_ISCSI_TASK_AG_CTX_RULE3EN_SHIFT 0
-#define USTORM_ISCSI_TASK_AG_CTX_RULE4EN_MASK 0x1
-#define USTORM_ISCSI_TASK_AG_CTX_RULE4EN_SHIFT 1
-#define USTORM_ISCSI_TASK_AG_CTX_RULE5EN_MASK 0x1
-#define USTORM_ISCSI_TASK_AG_CTX_RULE5EN_SHIFT 2
-#define USTORM_ISCSI_TASK_AG_CTX_RULE6EN_MASK 0x1
-#define USTORM_ISCSI_TASK_AG_CTX_RULE6EN_SHIFT 3
-#define USTORM_ISCSI_TASK_AG_CTX_DIF_ERROR_TYPE_MASK 0xF
-#define USTORM_ISCSI_TASK_AG_CTX_DIF_ERROR_TYPE_SHIFT 4
- __le32 dif_err_intervals;
- __le32 dif_error_1st_interval;
- __le32 rcv_cont_len;
- __le32 exp_cont_len;
- __le32 total_data_acked;
- __le32 exp_data_acked;
- u8 next_tid_valid;
- u8 byte3;
- __le16 word1;
- __le16 next_tid;
- __le16 word3;
- __le32 hdr_residual_count;
- __le32 exp_r2t_sn;
-};
-
-struct mstorm_iscsi_task_st_ctx {
- struct scsi_cached_sges data_desc;
- struct scsi_sgl_params sgl_params;
- __le32 rem_task_size;
- __le32 data_buffer_offset;
- u8 task_type;
- struct iscsi_dif_flags dif_flags;
- u8 reserved0[2];
- struct regpair sense_db;
- __le32 expected_itt;
- __le32 reserved1;
-};
-
-struct ustorm_iscsi_task_st_ctx {
- __le32 rem_rcv_len;
- __le32 exp_data_transfer_len;
- __le32 exp_data_sn;
- struct regpair lun;
- struct iscsi_reg1 reg1;
- u8 flags2;
-#define USTORM_ISCSI_TASK_ST_CTX_AHS_EXIST_MASK 0x1
-#define USTORM_ISCSI_TASK_ST_CTX_AHS_EXIST_SHIFT 0
-#define USTORM_ISCSI_TASK_ST_CTX_RESERVED1_MASK 0x7F
-#define USTORM_ISCSI_TASK_ST_CTX_RESERVED1_SHIFT 1
- struct iscsi_dif_flags dif_flags;
- __le16 reserved3;
- __le32 reserved4;
- __le32 reserved5;
- __le32 reserved6;
- __le32 reserved7;
- u8 task_type;
- u8 error_flags;
-#define USTORM_ISCSI_TASK_ST_CTX_DATA_DIGEST_ERROR_MASK 0x1
-#define USTORM_ISCSI_TASK_ST_CTX_DATA_DIGEST_ERROR_SHIFT 0
-#define USTORM_ISCSI_TASK_ST_CTX_DATA_TRUNCATED_ERROR_MASK 0x1
-#define USTORM_ISCSI_TASK_ST_CTX_DATA_TRUNCATED_ERROR_SHIFT 1
-#define USTORM_ISCSI_TASK_ST_CTX_UNDER_RUN_ERROR_MASK 0x1
-#define USTORM_ISCSI_TASK_ST_CTX_UNDER_RUN_ERROR_SHIFT 2
-#define USTORM_ISCSI_TASK_ST_CTX_RESERVED8_MASK 0x1F
-#define USTORM_ISCSI_TASK_ST_CTX_RESERVED8_SHIFT 3
- u8 flags;
-#define USTORM_ISCSI_TASK_ST_CTX_CQE_WRITE_MASK 0x3
-#define USTORM_ISCSI_TASK_ST_CTX_CQE_WRITE_SHIFT 0
-#define USTORM_ISCSI_TASK_ST_CTX_LOCAL_COMP_MASK 0x1
-#define USTORM_ISCSI_TASK_ST_CTX_LOCAL_COMP_SHIFT 2
-#define USTORM_ISCSI_TASK_ST_CTX_Q0_R2TQE_WRITE_MASK 0x1
-#define USTORM_ISCSI_TASK_ST_CTX_Q0_R2TQE_WRITE_SHIFT 3
-#define USTORM_ISCSI_TASK_ST_CTX_TOTAL_DATA_ACKED_DONE_MASK 0x1
-#define USTORM_ISCSI_TASK_ST_CTX_TOTAL_DATA_ACKED_DONE_SHIFT 4
-#define USTORM_ISCSI_TASK_ST_CTX_HQ_SCANNED_DONE_MASK 0x1
-#define USTORM_ISCSI_TASK_ST_CTX_HQ_SCANNED_DONE_SHIFT 5
-#define USTORM_ISCSI_TASK_ST_CTX_R2T2RECV_DONE_MASK 0x1
-#define USTORM_ISCSI_TASK_ST_CTX_R2T2RECV_DONE_SHIFT 6
-#define USTORM_ISCSI_TASK_ST_CTX_RESERVED0_MASK 0x1
-#define USTORM_ISCSI_TASK_ST_CTX_RESERVED0_SHIFT 7
- u8 cq_rss_number;
-};
-
-struct iscsi_task_context {
- struct ystorm_iscsi_task_st_ctx ystorm_st_context;
- struct ystorm_iscsi_task_ag_ctx ystorm_ag_context;
- struct regpair ystorm_ag_padding[2];
- struct tdif_task_context tdif_context;
- struct mstorm_iscsi_task_ag_ctx mstorm_ag_context;
- struct regpair mstorm_ag_padding[2];
- struct ustorm_iscsi_task_ag_ctx ustorm_ag_context;
- struct mstorm_iscsi_task_st_ctx mstorm_st_context;
- struct ustorm_iscsi_task_st_ctx ustorm_st_context;
- struct rdif_task_context rdif_context;
-};
-
+/* iSCSI task type */
enum iscsi_task_type {
ISCSI_TASK_TYPE_INITIATOR_WRITE,
ISCSI_TASK_TYPE_INITIATOR_READ,
@@ -1186,53 +1342,57 @@ enum iscsi_task_type {
ISCSI_TASK_TYPE_TARGET_READ,
ISCSI_TASK_TYPE_TARGET_RESPONSE,
ISCSI_TASK_TYPE_LOGIN_RESPONSE,
+ ISCSI_TASK_TYPE_TARGET_IMM_W_DIF,
MAX_ISCSI_TASK_TYPE
};
+/* iSCSI DesiredDataTransferLength/ttt union */
union iscsi_ttt_txlen_union {
__le32 desired_tx_len;
__le32 ttt;
};
+/* iSCSI uHQ element */
struct iscsi_uhqe {
__le32 reg1;
-#define ISCSI_UHQE_PDU_PAYLOAD_LEN_MASK 0xFFFFF
-#define ISCSI_UHQE_PDU_PAYLOAD_LEN_SHIFT 0
-#define ISCSI_UHQE_LOCAL_COMP_MASK 0x1
-#define ISCSI_UHQE_LOCAL_COMP_SHIFT 20
-#define ISCSI_UHQE_TOGGLE_BIT_MASK 0x1
-#define ISCSI_UHQE_TOGGLE_BIT_SHIFT 21
-#define ISCSI_UHQE_PURE_PAYLOAD_MASK 0x1
-#define ISCSI_UHQE_PURE_PAYLOAD_SHIFT 22
-#define ISCSI_UHQE_LOGIN_RESPONSE_PDU_MASK 0x1
-#define ISCSI_UHQE_LOGIN_RESPONSE_PDU_SHIFT 23
-#define ISCSI_UHQE_TASK_ID_HI_MASK 0xFF
-#define ISCSI_UHQE_TASK_ID_HI_SHIFT 24
+#define ISCSI_UHQE_PDU_PAYLOAD_LEN_MASK 0xFFFFF
+#define ISCSI_UHQE_PDU_PAYLOAD_LEN_SHIFT 0
+#define ISCSI_UHQE_LOCAL_COMP_MASK 0x1
+#define ISCSI_UHQE_LOCAL_COMP_SHIFT 20
+#define ISCSI_UHQE_TOGGLE_BIT_MASK 0x1
+#define ISCSI_UHQE_TOGGLE_BIT_SHIFT 21
+#define ISCSI_UHQE_PURE_PAYLOAD_MASK 0x1
+#define ISCSI_UHQE_PURE_PAYLOAD_SHIFT 22
+#define ISCSI_UHQE_LOGIN_RESPONSE_PDU_MASK 0x1
+#define ISCSI_UHQE_LOGIN_RESPONSE_PDU_SHIFT 23
+#define ISCSI_UHQE_TASK_ID_HI_MASK 0xFF
+#define ISCSI_UHQE_TASK_ID_HI_SHIFT 24
__le32 reg2;
-#define ISCSI_UHQE_BUFFER_OFFSET_MASK 0xFFFFFF
-#define ISCSI_UHQE_BUFFER_OFFSET_SHIFT 0
-#define ISCSI_UHQE_TASK_ID_LO_MASK 0xFF
-#define ISCSI_UHQE_TASK_ID_LO_SHIFT 24
+#define ISCSI_UHQE_BUFFER_OFFSET_MASK 0xFFFFFF
+#define ISCSI_UHQE_BUFFER_OFFSET_SHIFT 0
+#define ISCSI_UHQE_TASK_ID_LO_MASK 0xFF
+#define ISCSI_UHQE_TASK_ID_LO_SHIFT 24
};
-
+/* iSCSI WQ element */
struct iscsi_wqe {
__le16 task_id;
u8 flags;
-#define ISCSI_WQE_WQE_TYPE_MASK 0x7
-#define ISCSI_WQE_WQE_TYPE_SHIFT 0
-#define ISCSI_WQE_NUM_SGES_MASK 0xF
-#define ISCSI_WQE_NUM_SGES_SHIFT 3
-#define ISCSI_WQE_RESPONSE_MASK 0x1
-#define ISCSI_WQE_RESPONSE_SHIFT 7
+#define ISCSI_WQE_WQE_TYPE_MASK 0x7
+#define ISCSI_WQE_WQE_TYPE_SHIFT 0
+#define ISCSI_WQE_NUM_SGES_MASK 0xF
+#define ISCSI_WQE_NUM_SGES_SHIFT 3
+#define ISCSI_WQE_RESPONSE_MASK 0x1
+#define ISCSI_WQE_RESPONSE_SHIFT 7
struct iscsi_dif_flags prot_flags;
__le32 contlen_cdbsize;
-#define ISCSI_WQE_CONT_LEN_MASK 0xFFFFFF
-#define ISCSI_WQE_CONT_LEN_SHIFT 0
-#define ISCSI_WQE_CDB_SIZE_MASK 0xFF
-#define ISCSI_WQE_CDB_SIZE_SHIFT 24
+#define ISCSI_WQE_CONT_LEN_MASK 0xFFFFFF
+#define ISCSI_WQE_CONT_LEN_SHIFT 0
+#define ISCSI_WQE_CDB_SIZE_MASK 0xFF
+#define ISCSI_WQE_CDB_SIZE_SHIFT 24
};
+/* iSCSI wqe type */
enum iscsi_wqe_type {
ISCSI_WQE_TYPE_NORMAL,
ISCSI_WQE_TYPE_TASK_CLEANUP,
@@ -1244,6 +1404,7 @@ enum iscsi_wqe_type {
MAX_ISCSI_WQE_TYPE
};
+/* iSCSI xHQ element */
struct iscsi_xhqe {
union iscsi_ttt_txlen_union ttt_or_txlen;
__le32 exp_stat_sn;
@@ -1251,120 +1412,134 @@ struct iscsi_xhqe {
u8 total_ahs_length;
u8 opcode;
u8 flags;
-#define ISCSI_XHQE_FINAL_MASK 0x1
-#define ISCSI_XHQE_FINAL_SHIFT 0
-#define ISCSI_XHQE_STATUS_BIT_MASK 0x1
-#define ISCSI_XHQE_STATUS_BIT_SHIFT 1
-#define ISCSI_XHQE_NUM_SGES_MASK 0xF
-#define ISCSI_XHQE_NUM_SGES_SHIFT 2
-#define ISCSI_XHQE_RESERVED0_MASK 0x3
-#define ISCSI_XHQE_RESERVED0_SHIFT 6
+#define ISCSI_XHQE_FINAL_MASK 0x1
+#define ISCSI_XHQE_FINAL_SHIFT 0
+#define ISCSI_XHQE_STATUS_BIT_MASK 0x1
+#define ISCSI_XHQE_STATUS_BIT_SHIFT 1
+#define ISCSI_XHQE_NUM_SGES_MASK 0xF
+#define ISCSI_XHQE_NUM_SGES_SHIFT 2
+#define ISCSI_XHQE_RESERVED0_MASK 0x3
+#define ISCSI_XHQE_RESERVED0_SHIFT 6
union iscsi_seq_num seq_num;
__le16 reserved1;
};
+/* Per PF iSCSI receive path statistics - mStorm RAM structure */
struct mstorm_iscsi_stats_drv {
struct regpair iscsi_rx_dropped_pdus_task_not_valid;
+ struct regpair iscsi_rx_dup_ack_cnt;
};
+/* Per PF iSCSI transmit path statistics - pStorm RAM structure */
struct pstorm_iscsi_stats_drv {
struct regpair iscsi_tx_bytes_cnt;
struct regpair iscsi_tx_packet_cnt;
};
+/* Per PF iSCSI receive path statistics - tStorm RAM structure */
struct tstorm_iscsi_stats_drv {
struct regpair iscsi_rx_bytes_cnt;
struct regpair iscsi_rx_packet_cnt;
struct regpair iscsi_rx_new_ooo_isle_events_cnt;
+ struct regpair iscsi_rx_tcp_payload_bytes_cnt;
+ struct regpair iscsi_rx_tcp_pkt_cnt;
+ struct regpair iscsi_rx_pure_ack_cnt;
__le32 iscsi_cmdq_threshold_cnt;
__le32 iscsi_rq_threshold_cnt;
__le32 iscsi_immq_threshold_cnt;
};
+/* Per PF iSCSI receive path statistics - uStorm RAM structure */
struct ustorm_iscsi_stats_drv {
struct regpair iscsi_rx_data_pdu_cnt;
struct regpair iscsi_rx_r2t_pdu_cnt;
struct regpair iscsi_rx_total_pdu_cnt;
};
+/* Per PF iSCSI transmit path statistics - xStorm RAM structure */
struct xstorm_iscsi_stats_drv {
struct regpair iscsi_tx_go_to_slow_start_event_cnt;
struct regpair iscsi_tx_fast_retransmit_event_cnt;
+ struct regpair iscsi_tx_pure_ack_cnt;
+ struct regpair iscsi_tx_delayed_ack_cnt;
};
+/* Per PF iSCSI transmit path statistics - yStorm RAM structure */
struct ystorm_iscsi_stats_drv {
struct regpair iscsi_tx_data_pdu_cnt;
struct regpair iscsi_tx_r2t_pdu_cnt;
struct regpair iscsi_tx_total_pdu_cnt;
+ struct regpair iscsi_tx_tcp_payload_bytes_cnt;
+ struct regpair iscsi_tx_tcp_pkt_cnt;
};
-struct tstorm_iscsi_task_ag_ctx {
+struct e4_tstorm_iscsi_task_ag_ctx {
u8 byte0;
u8 byte1;
__le16 word0;
u8 flags0;
-#define TSTORM_ISCSI_TASK_AG_CTX_NIBBLE0_MASK 0xF
-#define TSTORM_ISCSI_TASK_AG_CTX_NIBBLE0_SHIFT 0
-#define TSTORM_ISCSI_TASK_AG_CTX_BIT0_MASK 0x1
-#define TSTORM_ISCSI_TASK_AG_CTX_BIT0_SHIFT 4
-#define TSTORM_ISCSI_TASK_AG_CTX_BIT1_MASK 0x1
-#define TSTORM_ISCSI_TASK_AG_CTX_BIT1_SHIFT 5
-#define TSTORM_ISCSI_TASK_AG_CTX_BIT2_MASK 0x1
-#define TSTORM_ISCSI_TASK_AG_CTX_BIT2_SHIFT 6
-#define TSTORM_ISCSI_TASK_AG_CTX_BIT3_MASK 0x1
-#define TSTORM_ISCSI_TASK_AG_CTX_BIT3_SHIFT 7
+#define E4_TSTORM_ISCSI_TASK_AG_CTX_NIBBLE0_MASK 0xF
+#define E4_TSTORM_ISCSI_TASK_AG_CTX_NIBBLE0_SHIFT 0
+#define E4_TSTORM_ISCSI_TASK_AG_CTX_BIT0_MASK 0x1
+#define E4_TSTORM_ISCSI_TASK_AG_CTX_BIT0_SHIFT 4
+#define E4_TSTORM_ISCSI_TASK_AG_CTX_BIT1_MASK 0x1
+#define E4_TSTORM_ISCSI_TASK_AG_CTX_BIT1_SHIFT 5
+#define E4_TSTORM_ISCSI_TASK_AG_CTX_BIT2_MASK 0x1
+#define E4_TSTORM_ISCSI_TASK_AG_CTX_BIT2_SHIFT 6
+#define E4_TSTORM_ISCSI_TASK_AG_CTX_BIT3_MASK 0x1
+#define E4_TSTORM_ISCSI_TASK_AG_CTX_BIT3_SHIFT 7
u8 flags1;
-#define TSTORM_ISCSI_TASK_AG_CTX_BIT4_MASK 0x1
-#define TSTORM_ISCSI_TASK_AG_CTX_BIT4_SHIFT 0
-#define TSTORM_ISCSI_TASK_AG_CTX_BIT5_MASK 0x1
-#define TSTORM_ISCSI_TASK_AG_CTX_BIT5_SHIFT 1
-#define TSTORM_ISCSI_TASK_AG_CTX_CF0_MASK 0x3
-#define TSTORM_ISCSI_TASK_AG_CTX_CF0_SHIFT 2
-#define TSTORM_ISCSI_TASK_AG_CTX_CF1_MASK 0x3
-#define TSTORM_ISCSI_TASK_AG_CTX_CF1_SHIFT 4
-#define TSTORM_ISCSI_TASK_AG_CTX_CF2_MASK 0x3
-#define TSTORM_ISCSI_TASK_AG_CTX_CF2_SHIFT 6
+#define E4_TSTORM_ISCSI_TASK_AG_CTX_BIT4_MASK 0x1
+#define E4_TSTORM_ISCSI_TASK_AG_CTX_BIT4_SHIFT 0
+#define E4_TSTORM_ISCSI_TASK_AG_CTX_BIT5_MASK 0x1
+#define E4_TSTORM_ISCSI_TASK_AG_CTX_BIT5_SHIFT 1
+#define E4_TSTORM_ISCSI_TASK_AG_CTX_CF0_MASK 0x3
+#define E4_TSTORM_ISCSI_TASK_AG_CTX_CF0_SHIFT 2
+#define E4_TSTORM_ISCSI_TASK_AG_CTX_CF1_MASK 0x3
+#define E4_TSTORM_ISCSI_TASK_AG_CTX_CF1_SHIFT 4
+#define E4_TSTORM_ISCSI_TASK_AG_CTX_CF2_MASK 0x3
+#define E4_TSTORM_ISCSI_TASK_AG_CTX_CF2_SHIFT 6
u8 flags2;
-#define TSTORM_ISCSI_TASK_AG_CTX_CF3_MASK 0x3
-#define TSTORM_ISCSI_TASK_AG_CTX_CF3_SHIFT 0
-#define TSTORM_ISCSI_TASK_AG_CTX_CF4_MASK 0x3
-#define TSTORM_ISCSI_TASK_AG_CTX_CF4_SHIFT 2
-#define TSTORM_ISCSI_TASK_AG_CTX_CF5_MASK 0x3
-#define TSTORM_ISCSI_TASK_AG_CTX_CF5_SHIFT 4
-#define TSTORM_ISCSI_TASK_AG_CTX_CF6_MASK 0x3
-#define TSTORM_ISCSI_TASK_AG_CTX_CF6_SHIFT 6
+#define E4_TSTORM_ISCSI_TASK_AG_CTX_CF3_MASK 0x3
+#define E4_TSTORM_ISCSI_TASK_AG_CTX_CF3_SHIFT 0
+#define E4_TSTORM_ISCSI_TASK_AG_CTX_CF4_MASK 0x3
+#define E4_TSTORM_ISCSI_TASK_AG_CTX_CF4_SHIFT 2
+#define E4_TSTORM_ISCSI_TASK_AG_CTX_CF5_MASK 0x3
+#define E4_TSTORM_ISCSI_TASK_AG_CTX_CF5_SHIFT 4
+#define E4_TSTORM_ISCSI_TASK_AG_CTX_CF6_MASK 0x3
+#define E4_TSTORM_ISCSI_TASK_AG_CTX_CF6_SHIFT 6
u8 flags3;
-#define TSTORM_ISCSI_TASK_AG_CTX_CF7_MASK 0x3
-#define TSTORM_ISCSI_TASK_AG_CTX_CF7_SHIFT 0
-#define TSTORM_ISCSI_TASK_AG_CTX_CF0EN_MASK 0x1
-#define TSTORM_ISCSI_TASK_AG_CTX_CF0EN_SHIFT 2
-#define TSTORM_ISCSI_TASK_AG_CTX_CF1EN_MASK 0x1
-#define TSTORM_ISCSI_TASK_AG_CTX_CF1EN_SHIFT 3
-#define TSTORM_ISCSI_TASK_AG_CTX_CF2EN_MASK 0x1
-#define TSTORM_ISCSI_TASK_AG_CTX_CF2EN_SHIFT 4
-#define TSTORM_ISCSI_TASK_AG_CTX_CF3EN_MASK 0x1
-#define TSTORM_ISCSI_TASK_AG_CTX_CF3EN_SHIFT 5
-#define TSTORM_ISCSI_TASK_AG_CTX_CF4EN_MASK 0x1
-#define TSTORM_ISCSI_TASK_AG_CTX_CF4EN_SHIFT 6
-#define TSTORM_ISCSI_TASK_AG_CTX_CF5EN_MASK 0x1
-#define TSTORM_ISCSI_TASK_AG_CTX_CF5EN_SHIFT 7
+#define E4_TSTORM_ISCSI_TASK_AG_CTX_CF7_MASK 0x3
+#define E4_TSTORM_ISCSI_TASK_AG_CTX_CF7_SHIFT 0
+#define E4_TSTORM_ISCSI_TASK_AG_CTX_CF0EN_MASK 0x1
+#define E4_TSTORM_ISCSI_TASK_AG_CTX_CF0EN_SHIFT 2
+#define E4_TSTORM_ISCSI_TASK_AG_CTX_CF1EN_MASK 0x1
+#define E4_TSTORM_ISCSI_TASK_AG_CTX_CF1EN_SHIFT 3
+#define E4_TSTORM_ISCSI_TASK_AG_CTX_CF2EN_MASK 0x1
+#define E4_TSTORM_ISCSI_TASK_AG_CTX_CF2EN_SHIFT 4
+#define E4_TSTORM_ISCSI_TASK_AG_CTX_CF3EN_MASK 0x1
+#define E4_TSTORM_ISCSI_TASK_AG_CTX_CF3EN_SHIFT 5
+#define E4_TSTORM_ISCSI_TASK_AG_CTX_CF4EN_MASK 0x1
+#define E4_TSTORM_ISCSI_TASK_AG_CTX_CF4EN_SHIFT 6
+#define E4_TSTORM_ISCSI_TASK_AG_CTX_CF5EN_MASK 0x1
+#define E4_TSTORM_ISCSI_TASK_AG_CTX_CF5EN_SHIFT 7
u8 flags4;
-#define TSTORM_ISCSI_TASK_AG_CTX_CF6EN_MASK 0x1
-#define TSTORM_ISCSI_TASK_AG_CTX_CF6EN_SHIFT 0
-#define TSTORM_ISCSI_TASK_AG_CTX_CF7EN_MASK 0x1
-#define TSTORM_ISCSI_TASK_AG_CTX_CF7EN_SHIFT 1
-#define TSTORM_ISCSI_TASK_AG_CTX_RULE0EN_MASK 0x1
-#define TSTORM_ISCSI_TASK_AG_CTX_RULE0EN_SHIFT 2
-#define TSTORM_ISCSI_TASK_AG_CTX_RULE1EN_MASK 0x1
-#define TSTORM_ISCSI_TASK_AG_CTX_RULE1EN_SHIFT 3
-#define TSTORM_ISCSI_TASK_AG_CTX_RULE2EN_MASK 0x1
-#define TSTORM_ISCSI_TASK_AG_CTX_RULE2EN_SHIFT 4
-#define TSTORM_ISCSI_TASK_AG_CTX_RULE3EN_MASK 0x1
-#define TSTORM_ISCSI_TASK_AG_CTX_RULE3EN_SHIFT 5
-#define TSTORM_ISCSI_TASK_AG_CTX_RULE4EN_MASK 0x1
-#define TSTORM_ISCSI_TASK_AG_CTX_RULE4EN_SHIFT 6
-#define TSTORM_ISCSI_TASK_AG_CTX_RULE5EN_MASK 0x1
-#define TSTORM_ISCSI_TASK_AG_CTX_RULE5EN_SHIFT 7
+#define E4_TSTORM_ISCSI_TASK_AG_CTX_CF6EN_MASK 0x1
+#define E4_TSTORM_ISCSI_TASK_AG_CTX_CF6EN_SHIFT 0
+#define E4_TSTORM_ISCSI_TASK_AG_CTX_CF7EN_MASK 0x1
+#define E4_TSTORM_ISCSI_TASK_AG_CTX_CF7EN_SHIFT 1
+#define E4_TSTORM_ISCSI_TASK_AG_CTX_RULE0EN_MASK 0x1
+#define E4_TSTORM_ISCSI_TASK_AG_CTX_RULE0EN_SHIFT 2
+#define E4_TSTORM_ISCSI_TASK_AG_CTX_RULE1EN_MASK 0x1
+#define E4_TSTORM_ISCSI_TASK_AG_CTX_RULE1EN_SHIFT 3
+#define E4_TSTORM_ISCSI_TASK_AG_CTX_RULE2EN_MASK 0x1
+#define E4_TSTORM_ISCSI_TASK_AG_CTX_RULE2EN_SHIFT 4
+#define E4_TSTORM_ISCSI_TASK_AG_CTX_RULE3EN_MASK 0x1
+#define E4_TSTORM_ISCSI_TASK_AG_CTX_RULE3EN_SHIFT 5
+#define E4_TSTORM_ISCSI_TASK_AG_CTX_RULE4EN_MASK 0x1
+#define E4_TSTORM_ISCSI_TASK_AG_CTX_RULE4EN_SHIFT 6
+#define E4_TSTORM_ISCSI_TASK_AG_CTX_RULE5EN_MASK 0x1
+#define E4_TSTORM_ISCSI_TASK_AG_CTX_RULE5EN_SHIFT 7
u8 byte2;
__le16 word1;
__le32 reg0;
@@ -1376,18 +1551,20 @@ struct tstorm_iscsi_task_ag_ctx {
__le32 reg1;
__le32 reg2;
};
+
+/* iSCSI doorbell data */
struct iscsi_db_data {
u8 params;
-#define ISCSI_DB_DATA_DEST_MASK 0x3
-#define ISCSI_DB_DATA_DEST_SHIFT 0
-#define ISCSI_DB_DATA_AGG_CMD_MASK 0x3
-#define ISCSI_DB_DATA_AGG_CMD_SHIFT 2
-#define ISCSI_DB_DATA_BYPASS_EN_MASK 0x1
-#define ISCSI_DB_DATA_BYPASS_EN_SHIFT 4
-#define ISCSI_DB_DATA_RESERVED_MASK 0x1
-#define ISCSI_DB_DATA_RESERVED_SHIFT 5
-#define ISCSI_DB_DATA_AGG_VAL_SEL_MASK 0x3
-#define ISCSI_DB_DATA_AGG_VAL_SEL_SHIFT 6
+#define ISCSI_DB_DATA_DEST_MASK 0x3
+#define ISCSI_DB_DATA_DEST_SHIFT 0
+#define ISCSI_DB_DATA_AGG_CMD_MASK 0x3
+#define ISCSI_DB_DATA_AGG_CMD_SHIFT 2
+#define ISCSI_DB_DATA_BYPASS_EN_MASK 0x1
+#define ISCSI_DB_DATA_BYPASS_EN_SHIFT 4
+#define ISCSI_DB_DATA_RESERVED_MASK 0x1
+#define ISCSI_DB_DATA_RESERVED_SHIFT 5
+#define ISCSI_DB_DATA_AGG_VAL_SEL_MASK 0x3
+#define ISCSI_DB_DATA_AGG_VAL_SEL_SHIFT 6
u8 agg_flags;
__le16 sq_prod;
};
diff --git a/include/linux/qed/iwarp_common.h b/include/linux/qed/iwarp_common.h
index b8b3e1cfae90..c6cfd39cd910 100644
--- a/include/linux/qed/iwarp_common.h
+++ b/include/linux/qed/iwarp_common.h
@@ -29,9 +29,12 @@
* CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
* SOFTWARE.
*/
+
#ifndef __IWARP_COMMON__
#define __IWARP_COMMON__
+
#include <linux/qed/rdma_common.h>
+
/************************/
/* IWARP FW CONSTANTS */
/************************/
@@ -40,14 +43,14 @@
#define IWARP_PASSIVE_MODE 1
#define IWARP_SHARED_QUEUE_PAGE_SIZE (0x8000)
-#define IWARP_SHARED_QUEUE_PAGE_RQ_PBL_OFFSET (0x4000)
-#define IWARP_SHARED_QUEUE_PAGE_RQ_PBL_MAX_SIZE (0x1000)
-#define IWARP_SHARED_QUEUE_PAGE_SQ_PBL_OFFSET (0x5000)
-#define IWARP_SHARED_QUEUE_PAGE_SQ_PBL_MAX_SIZE (0x3000)
+#define IWARP_SHARED_QUEUE_PAGE_RQ_PBL_OFFSET (0x4000)
+#define IWARP_SHARED_QUEUE_PAGE_RQ_PBL_MAX_SIZE (0x1000)
+#define IWARP_SHARED_QUEUE_PAGE_SQ_PBL_OFFSET (0x5000)
+#define IWARP_SHARED_QUEUE_PAGE_SQ_PBL_MAX_SIZE (0x3000)
-#define IWARP_REQ_MAX_INLINE_DATA_SIZE (128)
-#define IWARP_REQ_MAX_SINGLE_SQ_WQE_SIZE (176)
+#define IWARP_REQ_MAX_INLINE_DATA_SIZE (128)
+#define IWARP_REQ_MAX_SINGLE_SQ_WQE_SIZE (176)
-#define IWARP_MAX_QPS (64 * 1024)
+#define IWARP_MAX_QPS (64 * 1024)
#endif /* __IWARP_COMMON__ */
diff --git a/include/linux/qed/qed_eth_if.h b/include/linux/qed/qed_eth_if.h
index d60de4a39810..147d08ccf813 100644
--- a/include/linux/qed/qed_eth_if.h
+++ b/include/linux/qed/qed_eth_if.h
@@ -61,6 +61,35 @@ struct qed_txq_start_ret_params {
void *p_handle;
};
+enum qed_filter_config_mode {
+ QED_FILTER_CONFIG_MODE_DISABLE,
+ QED_FILTER_CONFIG_MODE_5_TUPLE,
+ QED_FILTER_CONFIG_MODE_L4_PORT,
+ QED_FILTER_CONFIG_MODE_IP_DEST,
+};
+
+struct qed_ntuple_filter_params {
+ /* Physically mapped address containing header of buffer to be used
+ * as filter.
+ */
+ dma_addr_t addr;
+
+ /* Length of header in bytes */
+ u16 length;
+
+ /* Relative queue-id to receive classified packet */
+#define QED_RFS_NTUPLE_QID_RSS ((u16)-1)
+ u16 qid;
+
+ /* Identifier can either be according to vport-id or vfid */
+ bool b_is_vf;
+ u8 vport_id;
+ u8 vf_id;
+
+ /* true iff this filter is to be added. Else to be removed */
+ bool b_is_add;
+};
+
struct qed_dev_eth_info {
struct qed_dev_info common;
@@ -316,13 +345,12 @@ struct qed_eth_ops {
int (*tunn_config)(struct qed_dev *cdev,
struct qed_tunn_params *params);
- int (*ntuple_filter_config)(struct qed_dev *cdev, void *cookie,
- dma_addr_t mapping, u16 length,
- u16 vport_id, u16 rx_queue_id,
- bool add_filter);
+ int (*ntuple_filter_config)(struct qed_dev *cdev,
+ void *cookie,
+ struct qed_ntuple_filter_params *params);
int (*configure_arfs_searcher)(struct qed_dev *cdev,
- bool en_searcher);
+ enum qed_filter_config_mode mode);
int (*get_coalesce)(struct qed_dev *cdev, u16 *coal, void *handle);
};
diff --git a/include/linux/qed/qed_if.h b/include/linux/qed/qed_if.h
index cc646ca97974..b5b2bc9eacca 100644
--- a/include/linux/qed/qed_if.h
+++ b/include/linux/qed/qed_if.h
@@ -244,16 +244,11 @@ struct qed_fcoe_pf_params {
/* Most of the the parameters below are described in the FW iSCSI / TCP HSI */
struct qed_iscsi_pf_params {
u64 glbl_q_params_addr;
- u64 bdq_pbl_base_addr[2];
- u32 max_cwnd;
+ u64 bdq_pbl_base_addr[3];
u16 cq_num_entries;
u16 cmdq_num_entries;
u32 two_msl_timer;
- u16 dup_ack_threshold;
u16 tx_sws_timer;
- u16 min_rto;
- u16 min_rto_rt;
- u16 max_rto;
/* The following parameters are used during HW-init
* and these parameters need to be passed as arguments
@@ -264,8 +259,8 @@ struct qed_iscsi_pf_params {
/* The following parameters are used during protocol-init */
u16 half_way_close_timeout;
- u16 bdq_xoff_threshold[2];
- u16 bdq_xon_threshold[2];
+ u16 bdq_xoff_threshold[3];
+ u16 bdq_xon_threshold[3];
u16 cmdq_xoff_threshold;
u16 cmdq_xon_threshold;
u16 rq_buffer_size;
@@ -281,10 +276,11 @@ struct qed_iscsi_pf_params {
u8 gl_cmd_pi;
u8 debug_mode;
u8 ll2_ooo_queue_id;
- u8 ooo_enable;
u8 is_target;
- u8 bdq_pbl_num_entries[2];
+ u8 is_soc_en;
+ u8 soc_num_of_blocks_log;
+ u8 bdq_pbl_num_entries[3];
};
struct qed_rdma_pf_params {
@@ -316,16 +312,16 @@ enum qed_int_mode {
};
struct qed_sb_info {
- struct status_block *sb_virt;
- dma_addr_t sb_phys;
- u32 sb_ack; /* Last given ack */
- u16 igu_sb_id;
- void __iomem *igu_addr;
- u8 flags;
-#define QED_SB_INFO_INIT 0x1
-#define QED_SB_INFO_SETUP 0x2
-
- struct qed_dev *cdev;
+ struct status_block_e4 *sb_virt;
+ dma_addr_t sb_phys;
+ u32 sb_ack; /* Last given ack */
+ u16 igu_sb_id;
+ void __iomem *igu_addr;
+ u8 flags;
+#define QED_SB_INFO_INIT 0x1
+#define QED_SB_INFO_SETUP 0x2
+
+ struct qed_dev *cdev;
};
enum qed_dev_type {
@@ -487,6 +483,15 @@ struct qed_int_info {
u8 used_cnt;
};
+#define QED_NVM_SIGNATURE 0x12435687
+
+enum qed_nvm_flash_cmd {
+ QED_NVM_FLASH_CMD_FILE_DATA = 0x2,
+ QED_NVM_FLASH_CMD_FILE_START = 0x3,
+ QED_NVM_FLASH_CMD_NVM_CHANGE = 0x4,
+ QED_NVM_FLASH_CMD_NVM_MAX,
+};
+
struct qed_common_cb_ops {
void (*arfs_filter_op)(void *dev, void *fltr, u8 fw_rc);
void (*link_update)(void *dev,
@@ -662,6 +667,16 @@ struct qed_common_ops {
struct qed_chain *p_chain);
/**
+ * @brief nvm_flash - Flash nvm data.
+ *
+ * @param cdev
+ * @param name - file containing the data
+ *
+ * @return 0 on success, error otherwise.
+ */
+ int (*nvm_flash)(struct qed_dev *cdev, const char *name);
+
+/**
* @brief nvm_get_image - reads an entire image from nvram
*
* @param cdev
@@ -939,7 +954,7 @@ static inline u16 qed_sb_update_sb_idx(struct qed_sb_info *sb_info)
u16 rc = 0;
prod = le32_to_cpu(sb_info->sb_virt->prod_index) &
- STATUS_BLOCK_PROD_INDEX_MASK;
+ STATUS_BLOCK_E4_PROD_INDEX_MASK;
if (sb_info->sb_ack != prod) {
sb_info->sb_ack = prod;
rc |= QED_SB_IDX;
diff --git a/include/linux/qed/qed_iscsi_if.h b/include/linux/qed/qed_iscsi_if.h
index 111e606a74c8..d0df1bec5357 100644
--- a/include/linux/qed/qed_iscsi_if.h
+++ b/include/linux/qed/qed_iscsi_if.h
@@ -102,7 +102,6 @@ struct qed_iscsi_params_offload {
u32 ss_thresh;
u16 srtt;
u16 rtt_var;
- u32 ts_time;
u32 ts_recent;
u32 ts_recent_age;
u32 total_rt;
@@ -124,7 +123,6 @@ struct qed_iscsi_params_offload {
u16 mss;
u8 snd_wnd_scale;
u8 rcv_wnd_scale;
- u32 ts_ticks_per_second;
u16 da_timeout_value;
u8 ack_frequency;
};
diff --git a/include/linux/qed/qed_ll2_if.h b/include/linux/qed/qed_ll2_if.h
index e755954d85fd..266c1fb45387 100644
--- a/include/linux/qed/qed_ll2_if.h
+++ b/include/linux/qed/qed_ll2_if.h
@@ -116,7 +116,7 @@ struct qed_ll2_comp_rx_data {
u32 opaque_data_1;
/* GSI only */
- u32 gid_dst[4];
+ u32 src_qp;
u16 qp_id;
union {
diff --git a/include/linux/qed/rdma_common.h b/include/linux/qed/rdma_common.h
index a9b3050f469c..480a57eb36cc 100644
--- a/include/linux/qed/rdma_common.h
+++ b/include/linux/qed/rdma_common.h
@@ -32,28 +32,31 @@
#ifndef __RDMA_COMMON__
#define __RDMA_COMMON__
+
/************************/
/* RDMA FW CONSTANTS */
/************************/
-#define RDMA_RESERVED_LKEY (0)
-#define RDMA_RING_PAGE_SIZE (0x1000)
+#define RDMA_RESERVED_LKEY (0)
+#define RDMA_RING_PAGE_SIZE (0x1000)
-#define RDMA_MAX_SGE_PER_SQ_WQE (4)
-#define RDMA_MAX_SGE_PER_RQ_WQE (4)
+#define RDMA_MAX_SGE_PER_SQ_WQE (4)
+#define RDMA_MAX_SGE_PER_RQ_WQE (4)
#define RDMA_MAX_DATA_SIZE_IN_WQE (0x80000000)
-#define RDMA_REQ_RD_ATOMIC_ELM_SIZE (0x50)
-#define RDMA_RESP_RD_ATOMIC_ELM_SIZE (0x20)
+#define RDMA_REQ_RD_ATOMIC_ELM_SIZE (0x50)
+#define RDMA_RESP_RD_ATOMIC_ELM_SIZE (0x20)
-#define RDMA_MAX_CQS (64 * 1024)
-#define RDMA_MAX_TIDS (128 * 1024 - 1)
-#define RDMA_MAX_PDS (64 * 1024)
+#define RDMA_MAX_CQS (64 * 1024)
+#define RDMA_MAX_TIDS (128 * 1024 - 1)
+#define RDMA_MAX_PDS (64 * 1024)
+#define RDMA_MAX_XRC_SRQS (1024)
+#define RDMA_MAX_SRQS (32 * 1024)
-#define RDMA_NUM_STATISTIC_COUNTERS MAX_NUM_VPORTS
-#define RDMA_NUM_STATISTIC_COUNTERS_K2 MAX_NUM_VPORTS_K2
-#define RDMA_NUM_STATISTIC_COUNTERS_BB MAX_NUM_VPORTS_BB
+#define RDMA_NUM_STATISTIC_COUNTERS MAX_NUM_VPORTS
+#define RDMA_NUM_STATISTIC_COUNTERS_K2 MAX_NUM_VPORTS_K2
+#define RDMA_NUM_STATISTIC_COUNTERS_BB MAX_NUM_VPORTS_BB
#define RDMA_TASK_TYPE (PROTOCOLID_ROCE)
diff --git a/include/linux/qed/roce_common.h b/include/linux/qed/roce_common.h
index fe6a33e45977..193bcef302e1 100644
--- a/include/linux/qed/roce_common.h
+++ b/include/linux/qed/roce_common.h
@@ -33,13 +33,18 @@
#ifndef __ROCE_COMMON__
#define __ROCE_COMMON__
-#define ROCE_REQ_MAX_INLINE_DATA_SIZE (256)
-#define ROCE_REQ_MAX_SINGLE_SQ_WQE_SIZE (288)
+/************************/
+/* ROCE FW CONSTANTS */
+/************************/
-#define ROCE_MAX_QPS (32 * 1024)
-#define ROCE_DCQCN_NP_MAX_QPS (64)
-#define ROCE_DCQCN_RP_MAX_QPS (64)
+#define ROCE_REQ_MAX_INLINE_DATA_SIZE (256)
+#define ROCE_REQ_MAX_SINGLE_SQ_WQE_SIZE (288)
+#define ROCE_MAX_QPS (32 * 1024)
+#define ROCE_DCQCN_NP_MAX_QPS (64)
+#define ROCE_DCQCN_RP_MAX_QPS (64)
+
+/* Affiliated asynchronous events / errors enumeration */
enum roce_async_events_type {
ROCE_ASYNC_EVENT_NONE = 0,
ROCE_ASYNC_EVENT_COMM_EST = 1,
@@ -54,6 +59,9 @@ enum roce_async_events_type {
ROCE_ASYNC_EVENT_CQ_OVERFLOW_ERR,
ROCE_ASYNC_EVENT_SRQ_EMPTY,
ROCE_ASYNC_EVENT_DESTROY_QP_DONE,
+ ROCE_ASYNC_EVENT_XRC_DOMAIN_ERR,
+ ROCE_ASYNC_EVENT_INVALID_XRCETH_ERR,
+ ROCE_ASYNC_EVENT_XRC_SRQ_CATASTROPHIC_ERR,
MAX_ROCE_ASYNC_EVENTS_TYPE
};
diff --git a/include/linux/qed/storage_common.h b/include/linux/qed/storage_common.h
index 08df82a096b6..505c0b48a761 100644
--- a/include/linux/qed/storage_common.h
+++ b/include/linux/qed/storage_common.h
@@ -33,43 +33,77 @@
#ifndef __STORAGE_COMMON__
#define __STORAGE_COMMON__
-#define NUM_OF_CMDQS_CQS (NUM_OF_GLOBAL_QUEUES / 2)
-#define BDQ_NUM_RESOURCES (4)
-
-#define BDQ_ID_RQ (0)
-#define BDQ_ID_IMM_DATA (1)
-#define BDQ_NUM_IDS (2)
-
-#define SCSI_NUM_SGES_SLOW_SGL_THR 8
+/*********************/
+/* SCSI CONSTANTS */
+/*********************/
+
+#define SCSI_MAX_NUM_OF_CMDQS (NUM_OF_GLOBAL_QUEUES / 2)
+#define BDQ_NUM_RESOURCES (4)
+
+#define BDQ_ID_RQ (0)
+#define BDQ_ID_IMM_DATA (1)
+#define BDQ_ID_TQ (2)
+#define BDQ_NUM_IDS (3)
+
+#define SCSI_NUM_SGES_SLOW_SGL_THR 8
+
+#define BDQ_MAX_EXTERNAL_RING_SIZE BIT(15)
+
+/* SCSI op codes */
+#define SCSI_OPCODE_COMPARE_AND_WRITE (0x89)
+#define SCSI_OPCODE_READ_10 (0x28)
+#define SCSI_OPCODE_WRITE_6 (0x0A)
+#define SCSI_OPCODE_WRITE_10 (0x2A)
+#define SCSI_OPCODE_WRITE_12 (0xAA)
+#define SCSI_OPCODE_WRITE_16 (0x8A)
+#define SCSI_OPCODE_WRITE_AND_VERIFY_10 (0x2E)
+#define SCSI_OPCODE_WRITE_AND_VERIFY_12 (0xAE)
+#define SCSI_OPCODE_WRITE_AND_VERIFY_16 (0x8E)
+
+/* iSCSI Drv opaque */
+struct iscsi_drv_opaque {
+ __le16 reserved_zero[3];
+ __le16 opaque;
+};
-#define BDQ_MAX_EXTERNAL_RING_SIZE (1 << 15)
+/* Scsi 2B/8B opaque union */
+union scsi_opaque {
+ struct regpair fcoe_opaque;
+ struct iscsi_drv_opaque iscsi_opaque;
+};
+/* SCSI buffer descriptor */
struct scsi_bd {
struct regpair address;
- struct regpair opaque;
+ union scsi_opaque opaque;
};
+/* Scsi Drv BDQ struct */
struct scsi_bdq_ram_drv_data {
__le16 external_producer;
__le16 reserved0[3];
};
+/* SCSI SGE entry */
struct scsi_sge {
struct regpair sge_addr;
__le32 sge_len;
__le32 reserved;
};
+/* Cached SGEs section */
struct scsi_cached_sges {
struct scsi_sge sge[4];
};
+/* Scsi Drv CMDQ struct */
struct scsi_drv_cmdq {
__le16 cmdq_cons;
__le16 reserved0;
__le32 reserved1;
};
+/* Common SCSI init params passed by driver to FW in function init ramrod */
struct scsi_init_func_params {
__le16 num_tasks;
u8 log_page_size;
@@ -77,6 +111,7 @@ struct scsi_init_func_params {
u8 reserved2[12];
};
+/* SCSI RQ/CQ/CMDQ firmware function init parameters */
struct scsi_init_func_queues {
struct regpair glbl_q_params_addr;
__le16 rq_buffer_size;
@@ -84,39 +119,45 @@ struct scsi_init_func_queues {
__le16 cmdq_num_entries;
u8 bdq_resource_id;
u8 q_validity;
-#define SCSI_INIT_FUNC_QUEUES_RQ_VALID_MASK 0x1
-#define SCSI_INIT_FUNC_QUEUES_RQ_VALID_SHIFT 0
-#define SCSI_INIT_FUNC_QUEUES_IMM_DATA_VALID_MASK 0x1
-#define SCSI_INIT_FUNC_QUEUES_IMM_DATA_VALID_SHIFT 1
-#define SCSI_INIT_FUNC_QUEUES_CMD_VALID_MASK 0x1
-#define SCSI_INIT_FUNC_QUEUES_CMD_VALID_SHIFT 2
-#define SCSI_INIT_FUNC_QUEUES_RESERVED_VALID_MASK 0x1F
-#define SCSI_INIT_FUNC_QUEUES_RESERVED_VALID_SHIFT 3
+#define SCSI_INIT_FUNC_QUEUES_RQ_VALID_MASK 0x1
+#define SCSI_INIT_FUNC_QUEUES_RQ_VALID_SHIFT 0
+#define SCSI_INIT_FUNC_QUEUES_IMM_DATA_VALID_MASK 0x1
+#define SCSI_INIT_FUNC_QUEUES_IMM_DATA_VALID_SHIFT 1
+#define SCSI_INIT_FUNC_QUEUES_CMD_VALID_MASK 0x1
+#define SCSI_INIT_FUNC_QUEUES_CMD_VALID_SHIFT 2
+#define SCSI_INIT_FUNC_QUEUES_TQ_VALID_MASK 0x1
+#define SCSI_INIT_FUNC_QUEUES_TQ_VALID_SHIFT 3
+#define SCSI_INIT_FUNC_QUEUES_SOC_EN_MASK 0x1
+#define SCSI_INIT_FUNC_QUEUES_SOC_EN_SHIFT 4
+#define SCSI_INIT_FUNC_QUEUES_SOC_NUM_OF_BLOCKS_LOG_MASK 0x7
+#define SCSI_INIT_FUNC_QUEUES_SOC_NUM_OF_BLOCKS_LOG_SHIFT 5
+ __le16 cq_cmdq_sb_num_arr[SCSI_MAX_NUM_OF_CMDQS];
u8 num_queues;
u8 queue_relative_offset;
u8 cq_sb_pi;
u8 cmdq_sb_pi;
- __le16 cq_cmdq_sb_num_arr[NUM_OF_CMDQS_CQS];
- __le16 reserved0;
u8 bdq_pbl_num_entries[BDQ_NUM_IDS];
+ u8 reserved1;
struct regpair bdq_pbl_base_address[BDQ_NUM_IDS];
__le16 bdq_xoff_threshold[BDQ_NUM_IDS];
- __le16 bdq_xon_threshold[BDQ_NUM_IDS];
__le16 cmdq_xoff_threshold;
+ __le16 bdq_xon_threshold[BDQ_NUM_IDS];
__le16 cmdq_xon_threshold;
- __le32 reserved1;
};
+/* Scsi Drv BDQ Data struct (2 BDQ IDs: 0 - RQ, 1 - Immediate Data) */
struct scsi_ram_per_bdq_resource_drv_data {
struct scsi_bdq_ram_drv_data drv_data_per_bdq_id[BDQ_NUM_IDS];
};
+/* SCSI SGL types */
enum scsi_sgl_mode {
SCSI_TX_SLOW_SGL,
SCSI_FAST_SGL,
MAX_SCSI_SGL_MODE
};
+/* SCSI SGL parameters */
struct scsi_sgl_params {
struct regpair sgl_addr;
__le32 sgl_total_length;
@@ -126,10 +167,16 @@ struct scsi_sgl_params {
u8 reserved;
};
+/* SCSI terminate connection params */
struct scsi_terminate_extra_params {
__le16 unsolicited_cq_count;
__le16 cmdq_count;
u8 reserved[4];
};
+/* SCSI Task Queue Element */
+struct scsi_tqe {
+ __le16 itid;
+};
+
#endif /* __STORAGE_COMMON__ */
diff --git a/include/linux/qed/tcp_common.h b/include/linux/qed/tcp_common.h
index dbf7a43c3e1f..4a4845193539 100644
--- a/include/linux/qed/tcp_common.h
+++ b/include/linux/qed/tcp_common.h
@@ -33,8 +33,13 @@
#ifndef __TCP_COMMON__
#define __TCP_COMMON__
-#define TCP_INVALID_TIMEOUT_VAL -1
+/********************/
+/* TCP FW CONSTANTS */
+/********************/
+#define TCP_INVALID_TIMEOUT_VAL -1
+
+/* OOO opaque data received from LL2 */
struct ooo_opaque {
__le32 cid;
u8 drop_isle;
@@ -43,25 +48,29 @@ struct ooo_opaque {
u8 ooo_isle;
};
+/* tcp connect mode enum */
enum tcp_connect_mode {
TCP_CONNECT_ACTIVE,
TCP_CONNECT_PASSIVE,
MAX_TCP_CONNECT_MODE
};
+/* tcp function init parameters */
struct tcp_init_params {
__le32 two_msl_timer;
__le16 tx_sws_timer;
- u8 maxfinrt;
+ u8 max_fin_rt;
u8 reserved[9];
};
+/* tcp IPv4/IPv6 enum */
enum tcp_ip_version {
TCP_IPV4,
TCP_IPV6,
MAX_TCP_IP_VERSION
};
+/* tcp offload parameters */
struct tcp_offload_params {
__le16 local_mac_addr_lo;
__le16 local_mac_addr_mid;
@@ -70,24 +79,29 @@ struct tcp_offload_params {
__le16 remote_mac_addr_mid;
__le16 remote_mac_addr_hi;
__le16 vlan_id;
- u8 flags;
-#define TCP_OFFLOAD_PARAMS_TS_EN_MASK 0x1
-#define TCP_OFFLOAD_PARAMS_TS_EN_SHIFT 0
-#define TCP_OFFLOAD_PARAMS_DA_EN_MASK 0x1
-#define TCP_OFFLOAD_PARAMS_DA_EN_SHIFT 1
-#define TCP_OFFLOAD_PARAMS_KA_EN_MASK 0x1
-#define TCP_OFFLOAD_PARAMS_KA_EN_SHIFT 2
-#define TCP_OFFLOAD_PARAMS_NAGLE_EN_MASK 0x1
-#define TCP_OFFLOAD_PARAMS_NAGLE_EN_SHIFT 3
-#define TCP_OFFLOAD_PARAMS_DA_CNT_EN_MASK 0x1
-#define TCP_OFFLOAD_PARAMS_DA_CNT_EN_SHIFT 4
-#define TCP_OFFLOAD_PARAMS_FIN_SENT_MASK 0x1
-#define TCP_OFFLOAD_PARAMS_FIN_SENT_SHIFT 5
-#define TCP_OFFLOAD_PARAMS_FIN_RECEIVED_MASK 0x1
-#define TCP_OFFLOAD_PARAMS_FIN_RECEIVED_SHIFT 6
-#define TCP_OFFLOAD_PARAMS_RESERVED0_MASK 0x1
-#define TCP_OFFLOAD_PARAMS_RESERVED0_SHIFT 7
+ __le16 flags;
+#define TCP_OFFLOAD_PARAMS_TS_EN_MASK 0x1
+#define TCP_OFFLOAD_PARAMS_TS_EN_SHIFT 0
+#define TCP_OFFLOAD_PARAMS_DA_EN_MASK 0x1
+#define TCP_OFFLOAD_PARAMS_DA_EN_SHIFT 1
+#define TCP_OFFLOAD_PARAMS_KA_EN_MASK 0x1
+#define TCP_OFFLOAD_PARAMS_KA_EN_SHIFT 2
+#define TCP_OFFLOAD_PARAMS_ECN_SENDER_EN_MASK 0x1
+#define TCP_OFFLOAD_PARAMS_ECN_SENDER_EN_SHIFT 3
+#define TCP_OFFLOAD_PARAMS_ECN_RECEIVER_EN_MASK 0x1
+#define TCP_OFFLOAD_PARAMS_ECN_RECEIVER_EN_SHIFT 4
+#define TCP_OFFLOAD_PARAMS_NAGLE_EN_MASK 0x1
+#define TCP_OFFLOAD_PARAMS_NAGLE_EN_SHIFT 5
+#define TCP_OFFLOAD_PARAMS_DA_CNT_EN_MASK 0x1
+#define TCP_OFFLOAD_PARAMS_DA_CNT_EN_SHIFT 6
+#define TCP_OFFLOAD_PARAMS_FIN_SENT_MASK 0x1
+#define TCP_OFFLOAD_PARAMS_FIN_SENT_SHIFT 7
+#define TCP_OFFLOAD_PARAMS_FIN_RECEIVED_MASK 0x1
+#define TCP_OFFLOAD_PARAMS_FIN_RECEIVED_SHIFT 8
+#define TCP_OFFLOAD_PARAMS_RESERVED_MASK 0x7F
+#define TCP_OFFLOAD_PARAMS_RESERVED_SHIFT 9
u8 ip_version;
+ u8 reserved0[3];
__le32 remote_ip[4];
__le32 local_ip[4];
__le32 flow_label;
@@ -99,17 +113,21 @@ struct tcp_offload_params {
u8 rcv_wnd_scale;
u8 connect_mode;
__le16 srtt;
- __le32 cwnd;
__le32 ss_thresh;
- __le16 reserved1;
+ __le32 rcv_wnd;
+ __le32 cwnd;
u8 ka_max_probe_cnt;
u8 dup_ack_theshold;
+ __le16 reserved1;
+ __le32 ka_timeout;
+ __le32 ka_interval;
+ __le32 max_rt_time;
+ __le32 initial_rcv_wnd;
__le32 rcv_next;
__le32 snd_una;
__le32 snd_next;
__le32 snd_max;
__le32 snd_wnd;
- __le32 rcv_wnd;
__le32 snd_wl1;
__le32 ts_recent;
__le32 ts_recent_age;
@@ -122,16 +140,13 @@ struct tcp_offload_params {
u8 rt_cnt;
__le16 rtt_var;
__le16 fw_internal;
- __le32 ka_timeout;
- __le32 ka_interval;
- __le32 max_rt_time;
- __le32 initial_rcv_wnd;
u8 snd_wnd_scale;
u8 ack_frequency;
__le16 da_timeout_value;
- __le32 reserved3[2];
+ __le32 reserved3;
};
+/* tcp offload parameters */
struct tcp_offload_params_opt2 {
__le16 local_mac_addr_lo;
__le16 local_mac_addr_mid;
@@ -140,16 +155,19 @@ struct tcp_offload_params_opt2 {
__le16 remote_mac_addr_mid;
__le16 remote_mac_addr_hi;
__le16 vlan_id;
- u8 flags;
-#define TCP_OFFLOAD_PARAMS_OPT2_TS_EN_MASK 0x1
-#define TCP_OFFLOAD_PARAMS_OPT2_TS_EN_SHIFT 0
-#define TCP_OFFLOAD_PARAMS_OPT2_DA_EN_MASK 0x1
-#define TCP_OFFLOAD_PARAMS_OPT2_DA_EN_SHIFT 1
-#define TCP_OFFLOAD_PARAMS_OPT2_KA_EN_MASK 0x1
-#define TCP_OFFLOAD_PARAMS_OPT2_KA_EN_SHIFT 2
-#define TCP_OFFLOAD_PARAMS_OPT2_RESERVED0_MASK 0x1F
-#define TCP_OFFLOAD_PARAMS_OPT2_RESERVED0_SHIFT 3
+ __le16 flags;
+#define TCP_OFFLOAD_PARAMS_OPT2_TS_EN_MASK 0x1
+#define TCP_OFFLOAD_PARAMS_OPT2_TS_EN_SHIFT 0
+#define TCP_OFFLOAD_PARAMS_OPT2_DA_EN_MASK 0x1
+#define TCP_OFFLOAD_PARAMS_OPT2_DA_EN_SHIFT 1
+#define TCP_OFFLOAD_PARAMS_OPT2_KA_EN_MASK 0x1
+#define TCP_OFFLOAD_PARAMS_OPT2_KA_EN_SHIFT 2
+#define TCP_OFFLOAD_PARAMS_OPT2_ECN_EN_MASK 0x1
+#define TCP_OFFLOAD_PARAMS_OPT2_ECN_EN_SHIFT 3
+#define TCP_OFFLOAD_PARAMS_OPT2_RESERVED0_MASK 0xFFF
+#define TCP_OFFLOAD_PARAMS_OPT2_RESERVED0_SHIFT 4
u8 ip_version;
+ u8 reserved1[3];
__le32 remote_ip[4];
__le32 local_ip[4];
__le32 flow_label;
@@ -163,9 +181,16 @@ struct tcp_offload_params_opt2 {
__le16 syn_ip_payload_length;
__le32 syn_phy_addr_lo;
__le32 syn_phy_addr_hi;
- __le32 reserved1[22];
+ __le32 cwnd;
+ u8 ka_max_probe_cnt;
+ u8 reserved2[3];
+ __le32 ka_timeout;
+ __le32 ka_interval;
+ __le32 max_rt_time;
+ __le32 reserved3[16];
};
+/* tcp IPv4/IPv6 enum */
enum tcp_seg_placement_event {
TCP_EVENT_ADD_PEN,
TCP_EVENT_ADD_NEW_ISLE,
@@ -177,40 +202,41 @@ enum tcp_seg_placement_event {
MAX_TCP_SEG_PLACEMENT_EVENT
};
+/* tcp init parameters */
struct tcp_update_params {
__le16 flags;
-#define TCP_UPDATE_PARAMS_REMOTE_MAC_ADDR_CHANGED_MASK 0x1
-#define TCP_UPDATE_PARAMS_REMOTE_MAC_ADDR_CHANGED_SHIFT 0
-#define TCP_UPDATE_PARAMS_MSS_CHANGED_MASK 0x1
-#define TCP_UPDATE_PARAMS_MSS_CHANGED_SHIFT 1
-#define TCP_UPDATE_PARAMS_TTL_CHANGED_MASK 0x1
-#define TCP_UPDATE_PARAMS_TTL_CHANGED_SHIFT 2
-#define TCP_UPDATE_PARAMS_TOS_OR_TC_CHANGED_MASK 0x1
-#define TCP_UPDATE_PARAMS_TOS_OR_TC_CHANGED_SHIFT 3
-#define TCP_UPDATE_PARAMS_KA_TIMEOUT_CHANGED_MASK 0x1
-#define TCP_UPDATE_PARAMS_KA_TIMEOUT_CHANGED_SHIFT 4
-#define TCP_UPDATE_PARAMS_KA_INTERVAL_CHANGED_MASK 0x1
-#define TCP_UPDATE_PARAMS_KA_INTERVAL_CHANGED_SHIFT 5
-#define TCP_UPDATE_PARAMS_MAX_RT_TIME_CHANGED_MASK 0x1
-#define TCP_UPDATE_PARAMS_MAX_RT_TIME_CHANGED_SHIFT 6
-#define TCP_UPDATE_PARAMS_FLOW_LABEL_CHANGED_MASK 0x1
-#define TCP_UPDATE_PARAMS_FLOW_LABEL_CHANGED_SHIFT 7
-#define TCP_UPDATE_PARAMS_INITIAL_RCV_WND_CHANGED_MASK 0x1
-#define TCP_UPDATE_PARAMS_INITIAL_RCV_WND_CHANGED_SHIFT 8
-#define TCP_UPDATE_PARAMS_KA_MAX_PROBE_CNT_CHANGED_MASK 0x1
-#define TCP_UPDATE_PARAMS_KA_MAX_PROBE_CNT_CHANGED_SHIFT 9
-#define TCP_UPDATE_PARAMS_KA_EN_CHANGED_MASK 0x1
-#define TCP_UPDATE_PARAMS_KA_EN_CHANGED_SHIFT 10
-#define TCP_UPDATE_PARAMS_NAGLE_EN_CHANGED_MASK 0x1
-#define TCP_UPDATE_PARAMS_NAGLE_EN_CHANGED_SHIFT 11
-#define TCP_UPDATE_PARAMS_KA_EN_MASK 0x1
-#define TCP_UPDATE_PARAMS_KA_EN_SHIFT 12
-#define TCP_UPDATE_PARAMS_NAGLE_EN_MASK 0x1
-#define TCP_UPDATE_PARAMS_NAGLE_EN_SHIFT 13
-#define TCP_UPDATE_PARAMS_KA_RESTART_MASK 0x1
-#define TCP_UPDATE_PARAMS_KA_RESTART_SHIFT 14
-#define TCP_UPDATE_PARAMS_RETRANSMIT_RESTART_MASK 0x1
-#define TCP_UPDATE_PARAMS_RETRANSMIT_RESTART_SHIFT 15
+#define TCP_UPDATE_PARAMS_REMOTE_MAC_ADDR_CHANGED_MASK 0x1
+#define TCP_UPDATE_PARAMS_REMOTE_MAC_ADDR_CHANGED_SHIFT 0
+#define TCP_UPDATE_PARAMS_MSS_CHANGED_MASK 0x1
+#define TCP_UPDATE_PARAMS_MSS_CHANGED_SHIFT 1
+#define TCP_UPDATE_PARAMS_TTL_CHANGED_MASK 0x1
+#define TCP_UPDATE_PARAMS_TTL_CHANGED_SHIFT 2
+#define TCP_UPDATE_PARAMS_TOS_OR_TC_CHANGED_MASK 0x1
+#define TCP_UPDATE_PARAMS_TOS_OR_TC_CHANGED_SHIFT 3
+#define TCP_UPDATE_PARAMS_KA_TIMEOUT_CHANGED_MASK 0x1
+#define TCP_UPDATE_PARAMS_KA_TIMEOUT_CHANGED_SHIFT 4
+#define TCP_UPDATE_PARAMS_KA_INTERVAL_CHANGED_MASK 0x1
+#define TCP_UPDATE_PARAMS_KA_INTERVAL_CHANGED_SHIFT 5
+#define TCP_UPDATE_PARAMS_MAX_RT_TIME_CHANGED_MASK 0x1
+#define TCP_UPDATE_PARAMS_MAX_RT_TIME_CHANGED_SHIFT 6
+#define TCP_UPDATE_PARAMS_FLOW_LABEL_CHANGED_MASK 0x1
+#define TCP_UPDATE_PARAMS_FLOW_LABEL_CHANGED_SHIFT 7
+#define TCP_UPDATE_PARAMS_INITIAL_RCV_WND_CHANGED_MASK 0x1
+#define TCP_UPDATE_PARAMS_INITIAL_RCV_WND_CHANGED_SHIFT 8
+#define TCP_UPDATE_PARAMS_KA_MAX_PROBE_CNT_CHANGED_MASK 0x1
+#define TCP_UPDATE_PARAMS_KA_MAX_PROBE_CNT_CHANGED_SHIFT 9
+#define TCP_UPDATE_PARAMS_KA_EN_CHANGED_MASK 0x1
+#define TCP_UPDATE_PARAMS_KA_EN_CHANGED_SHIFT 10
+#define TCP_UPDATE_PARAMS_NAGLE_EN_CHANGED_MASK 0x1
+#define TCP_UPDATE_PARAMS_NAGLE_EN_CHANGED_SHIFT 11
+#define TCP_UPDATE_PARAMS_KA_EN_MASK 0x1
+#define TCP_UPDATE_PARAMS_KA_EN_SHIFT 12
+#define TCP_UPDATE_PARAMS_NAGLE_EN_MASK 0x1
+#define TCP_UPDATE_PARAMS_NAGLE_EN_SHIFT 13
+#define TCP_UPDATE_PARAMS_KA_RESTART_MASK 0x1
+#define TCP_UPDATE_PARAMS_KA_RESTART_SHIFT 14
+#define TCP_UPDATE_PARAMS_RETRANSMIT_RESTART_MASK 0x1
+#define TCP_UPDATE_PARAMS_RETRANSMIT_RESTART_SHIFT 15
__le16 remote_mac_addr_lo;
__le16 remote_mac_addr_mid;
__le16 remote_mac_addr_hi;
@@ -226,6 +252,7 @@ struct tcp_update_params {
u8 reserved1[7];
};
+/* toe upload parameters */
struct tcp_upload_params {
__le32 rcv_next;
__le32 snd_una;
diff --git a/include/linux/quota.h b/include/linux/quota.h
index 5ac9de4fcd6f..ca9772c8e48b 100644
--- a/include/linux/quota.h
+++ b/include/linux/quota.h
@@ -267,7 +267,6 @@ struct dqstats {
struct percpu_counter counter[_DQST_DQSTAT_LAST];
};
-extern struct dqstats *dqstats_pcpu;
extern struct dqstats dqstats;
static inline void dqstats_inc(unsigned int type)
diff --git a/include/linux/quotaops.h b/include/linux/quotaops.h
index 2fb6fb11132e..dc905a4ff8d7 100644
--- a/include/linux/quotaops.h
+++ b/include/linux/quotaops.h
@@ -27,6 +27,9 @@ static inline bool is_quota_modification(struct inode *inode, struct iattr *ia)
(ia->ia_valid & ATTR_GID && !gid_eq(ia->ia_gid, inode->i_gid));
}
+int kernel_quotactl(unsigned int cmd, const char __user *special,
+ qid_t id, void __user *addr);
+
#if defined(CONFIG_QUOTA)
#define quota_error(sb, fmt, args...) \
diff --git a/include/linux/radix-tree.h b/include/linux/radix-tree.h
index 23a9c89c7ad9..34149e8b5f73 100644
--- a/include/linux/radix-tree.h
+++ b/include/linux/radix-tree.h
@@ -104,25 +104,29 @@ struct radix_tree_node {
unsigned long tags[RADIX_TREE_MAX_TAGS][RADIX_TREE_TAG_LONGS];
};
-/* The top bits of gfp_mask are used to store the root tags and the IDR flag */
-#define ROOT_IS_IDR ((__force gfp_t)(1 << __GFP_BITS_SHIFT))
-#define ROOT_TAG_SHIFT (__GFP_BITS_SHIFT + 1)
+/* The IDR tag is stored in the low bits of the GFP flags */
+#define ROOT_IS_IDR ((__force gfp_t)4)
+/* The top bits of gfp_mask are used to store the root tags */
+#define ROOT_TAG_SHIFT (__GFP_BITS_SHIFT)
struct radix_tree_root {
+ spinlock_t xa_lock;
gfp_t gfp_mask;
struct radix_tree_node __rcu *rnode;
};
-#define RADIX_TREE_INIT(mask) { \
+#define RADIX_TREE_INIT(name, mask) { \
+ .xa_lock = __SPIN_LOCK_UNLOCKED(name.xa_lock), \
.gfp_mask = (mask), \
.rnode = NULL, \
}
#define RADIX_TREE(name, mask) \
- struct radix_tree_root name = RADIX_TREE_INIT(mask)
+ struct radix_tree_root name = RADIX_TREE_INIT(name, mask)
#define INIT_RADIX_TREE(root, mask) \
do { \
+ spin_lock_init(&(root)->xa_lock); \
(root)->gfp_mask = (mask); \
(root)->rnode = NULL; \
} while (0)
@@ -356,24 +360,9 @@ int radix_tree_split(struct radix_tree_root *, unsigned long index,
int radix_tree_join(struct radix_tree_root *, unsigned long index,
unsigned new_order, void *);
-void __rcu **idr_get_free_cmn(struct radix_tree_root *root,
+void __rcu **idr_get_free(struct radix_tree_root *root,
struct radix_tree_iter *iter, gfp_t gfp,
unsigned long max);
-static inline void __rcu **idr_get_free(struct radix_tree_root *root,
- struct radix_tree_iter *iter,
- gfp_t gfp,
- int end)
-{
- return idr_get_free_cmn(root, iter, gfp, end > 0 ? end - 1 : INT_MAX);
-}
-
-static inline void __rcu **idr_get_free_ext(struct radix_tree_root *root,
- struct radix_tree_iter *iter,
- gfp_t gfp,
- unsigned long end)
-{
- return idr_get_free_cmn(root, iter, gfp, end - 1);
-}
enum {
RADIX_TREE_ITER_TAG_MASK = 0x0f, /* tag index in lower nybble */
diff --git a/include/linux/raid/pq.h b/include/linux/raid/pq.h
index 583cdd3d49ca..ea8505204fdf 100644
--- a/include/linux/raid/pq.h
+++ b/include/linux/raid/pq.h
@@ -105,8 +105,11 @@ extern const struct raid6_calls raid6_avx2x4;
extern const struct raid6_calls raid6_avx512x1;
extern const struct raid6_calls raid6_avx512x2;
extern const struct raid6_calls raid6_avx512x4;
-extern const struct raid6_calls raid6_tilegx8;
extern const struct raid6_calls raid6_s390vx8;
+extern const struct raid6_calls raid6_vpermxor1;
+extern const struct raid6_calls raid6_vpermxor2;
+extern const struct raid6_calls raid6_vpermxor4;
+extern const struct raid6_calls raid6_vpermxor8;
struct raid6_recov_calls {
void (*data2)(int, size_t, int, int, void **);
diff --git a/include/linux/raid_class.h b/include/linux/raid_class.h
index 31e1ff69efc8..ec8655514283 100644
--- a/include/linux/raid_class.h
+++ b/include/linux/raid_class.h
@@ -38,6 +38,7 @@ enum raid_level {
RAID_LEVEL_5,
RAID_LEVEL_50,
RAID_LEVEL_6,
+ RAID_LEVEL_JBOD,
};
struct raid_data {
diff --git a/include/linux/random.h b/include/linux/random.h
index 4024f7d9c77d..2ddf13b4281e 100644
--- a/include/linux/random.h
+++ b/include/linux/random.h
@@ -85,10 +85,8 @@ static inline unsigned long get_random_canary(void)
static inline int get_random_bytes_wait(void *buf, int nbytes)
{
int ret = wait_for_random_bytes();
- if (unlikely(ret))
- return ret;
get_random_bytes(buf, nbytes);
- return 0;
+ return ret;
}
#define declare_get_random_var_wait(var) \
diff --git a/include/linux/rcupdate.h b/include/linux/rcupdate.h
index a6ddc42f87a5..36360d07f25b 100644
--- a/include/linux/rcupdate.h
+++ b/include/linux/rcupdate.h
@@ -197,7 +197,7 @@ static inline void exit_tasks_rcu_finish(void) { }
#define cond_resched_rcu_qs() \
do { \
if (!cond_resched()) \
- rcu_note_voluntary_context_switch(current); \
+ rcu_note_voluntary_context_switch_lite(current); \
} while (0)
/*
@@ -214,10 +214,12 @@ do { \
#endif
/*
- * init_rcu_head_on_stack()/destroy_rcu_head_on_stack() are needed for dynamic
- * initialization and destruction of rcu_head on the stack. rcu_head structures
- * allocated dynamically in the heap or defined statically don't need any
- * initialization.
+ * The init_rcu_head_on_stack() and destroy_rcu_head_on_stack() calls
+ * are needed for dynamic initialization and destruction of rcu_head
+ * on the stack, and init_rcu_head()/destroy_rcu_head() are needed for
+ * dynamic initialization and destruction of statically allocated rcu_head
+ * structures. However, rcu_head structures allocated dynamically in the
+ * heap don't need any initialization.
*/
#ifdef CONFIG_DEBUG_OBJECTS_RCU_HEAD
void init_rcu_head(struct rcu_head *head);
@@ -433,12 +435,12 @@ static inline void rcu_preempt_sleep_check(void) { }
* @p: The pointer to read
*
* Return the value of the specified RCU-protected pointer, but omit the
- * smp_read_barrier_depends() and keep the READ_ONCE(). This is useful
- * when the value of this pointer is accessed, but the pointer is not
- * dereferenced, for example, when testing an RCU-protected pointer against
- * NULL. Although rcu_access_pointer() may also be used in cases where
- * update-side locks prevent the value of the pointer from changing, you
- * should instead use rcu_dereference_protected() for this use case.
+ * lockdep checks for being in an RCU read-side critical section. This is
+ * useful when the value of this pointer is accessed, but the pointer is
+ * not dereferenced, for example, when testing an RCU-protected pointer
+ * against NULL. Although rcu_access_pointer() may also be used in cases
+ * where update-side locks prevent the value of the pointer from changing,
+ * you should instead use rcu_dereference_protected() for this use case.
*
* It is also permissible to use rcu_access_pointer() when read-side
* access to the pointer was removed at least one grace period ago, as
@@ -521,12 +523,11 @@ static inline void rcu_preempt_sleep_check(void) { }
* @c: The conditions under which the dereference will take place
*
* Return the value of the specified RCU-protected pointer, but omit
- * both the smp_read_barrier_depends() and the READ_ONCE(). This
- * is useful in cases where update-side locks prevent the value of the
- * pointer from changing. Please note that this primitive does *not*
- * prevent the compiler from repeating this reference or combining it
- * with other references, so it should not be used without protection
- * of appropriate locks.
+ * the READ_ONCE(). This is useful in cases where update-side locks
+ * prevent the value of the pointer from changing. Please note that this
+ * primitive does *not* prevent the compiler from repeating this reference
+ * or combining it with other references, so it should not be used without
+ * protection of appropriate locks.
*
* This function is only for update-side use. Using this function
* when protected only by rcu_read_lock() will result in infrequent
diff --git a/include/linux/rcutiny.h b/include/linux/rcutiny.h
index b3dbf9502fd0..ce9beec35e34 100644
--- a/include/linux/rcutiny.h
+++ b/include/linux/rcutiny.h
@@ -111,7 +111,6 @@ static inline void rcu_cpu_stall_reset(void) { }
static inline void rcu_idle_enter(void) { }
static inline void rcu_idle_exit(void) { }
static inline void rcu_irq_enter(void) { }
-static inline bool rcu_irq_enter_disabled(void) { return false; }
static inline void rcu_irq_exit_irqson(void) { }
static inline void rcu_irq_enter_irqson(void) { }
static inline void rcu_irq_exit(void) { }
diff --git a/include/linux/rcutree.h b/include/linux/rcutree.h
index 37d6fd3b7ff8..fd996cdf1833 100644
--- a/include/linux/rcutree.h
+++ b/include/linux/rcutree.h
@@ -85,7 +85,6 @@ void rcu_irq_enter(void);
void rcu_irq_exit(void);
void rcu_irq_enter_irqson(void);
void rcu_irq_exit_irqson(void);
-bool rcu_irq_enter_disabled(void);
void exit_rcu(void);
diff --git a/include/linux/refcount.h b/include/linux/refcount.h
index e8286585e149..4193c41e383a 100644
--- a/include/linux/refcount.h
+++ b/include/linux/refcount.h
@@ -8,7 +8,7 @@
#include <linux/kernel.h>
/**
- * refcount_t - variant of atomic_t specialized for reference counts
+ * struct refcount_t - variant of atomic_t specialized for reference counts
* @refs: atomic_t counter field
*
* The counter saturates at UINT_MAX and will not move once
diff --git a/include/linux/regmap.h b/include/linux/regmap.h
index 15eddc1353ba..5f7ad0552c03 100644
--- a/include/linux/regmap.h
+++ b/include/linux/regmap.h
@@ -21,15 +21,18 @@
#include <linux/lockdep.h>
struct module;
+struct clk;
struct device;
struct i2c_client;
struct irq_domain;
+struct slim_device;
struct spi_device;
struct spmi_device;
struct regmap;
struct regmap_range_cfg;
struct regmap_field;
struct snd_ac97;
+struct sdw_slave;
/* An enum of all the supported cache types */
enum regcache_type {
@@ -264,6 +267,9 @@ typedef void (*regmap_unlock)(void *);
* field is NULL but precious_table (see below) is not, the
* check is performed on such table (a register is precious if
* it belongs to one of the ranges specified by precious_table).
+ * @disable_locking: This regmap is either protected by external means or
+ * is guaranteed not be be accessed from multiple threads.
+ * Don't use any locking mechanisms.
* @lock: Optional lock callback (overrides regmap's default lock
* function, based on spinlock or mutex).
* @unlock: As above for unlocking.
@@ -296,7 +302,10 @@ typedef void (*regmap_unlock)(void *);
* a read.
* @write_flag_mask: Mask to be set in the top bytes of the register when doing
* a write. If both read_flag_mask and write_flag_mask are
- * empty the regmap_bus default masks are used.
+ * empty and zero_flag_mask is not set the regmap_bus default
+ * masks are used.
+ * @zero_flag_mask: If set, read_flag_mask and write_flag_mask are used even
+ * if they are both empty.
* @use_single_rw: If set, converts the bulk read and write operations into
* a series of single read and write operations. This is useful
* for device that does not support bulk read and write.
@@ -317,6 +326,7 @@ typedef void (*regmap_unlock)(void *);
*
* @ranges: Array of configuration entries for virtual address ranges.
* @num_ranges: Number of range configuration entries.
+ * @use_hwlock: Indicate if a hardware spinlock should be used.
* @hwlock_id: Specify the hardware spinlock id.
* @hwlock_mode: The hardware spinlock mode, should be HWLOCK_IRQSTATE,
* HWLOCK_IRQ or 0.
@@ -333,6 +343,8 @@ struct regmap_config {
bool (*readable_reg)(struct device *dev, unsigned int reg);
bool (*volatile_reg)(struct device *dev, unsigned int reg);
bool (*precious_reg)(struct device *dev, unsigned int reg);
+
+ bool disable_locking;
regmap_lock lock;
regmap_unlock unlock;
void *lock_arg;
@@ -355,6 +367,7 @@ struct regmap_config {
unsigned long read_flag_mask;
unsigned long write_flag_mask;
+ bool zero_flag_mask;
bool use_single_rw;
bool can_multi_write;
@@ -365,6 +378,7 @@ struct regmap_config {
const struct regmap_range_cfg *ranges;
unsigned int num_ranges;
+ bool use_hwlock;
unsigned int hwlock_id;
unsigned int hwlock_mode;
};
@@ -499,6 +513,10 @@ struct regmap *__regmap_init_i2c(struct i2c_client *i2c,
const struct regmap_config *config,
struct lock_class_key *lock_key,
const char *lock_name);
+struct regmap *__regmap_init_slimbus(struct slim_device *slimbus,
+ const struct regmap_config *config,
+ struct lock_class_key *lock_key,
+ const char *lock_name);
struct regmap *__regmap_init_spi(struct spi_device *dev,
const struct regmap_config *config,
struct lock_class_key *lock_key,
@@ -524,6 +542,10 @@ struct regmap *__regmap_init_ac97(struct snd_ac97 *ac97,
const struct regmap_config *config,
struct lock_class_key *lock_key,
const char *lock_name);
+struct regmap *__regmap_init_sdw(struct sdw_slave *sdw,
+ const struct regmap_config *config,
+ struct lock_class_key *lock_key,
+ const char *lock_name);
struct regmap *__devm_regmap_init(struct device *dev,
const struct regmap_bus *bus,
@@ -561,6 +583,10 @@ struct regmap *__devm_regmap_init_ac97(struct snd_ac97 *ac97,
const struct regmap_config *config,
struct lock_class_key *lock_key,
const char *lock_name);
+struct regmap *__devm_regmap_init_sdw(struct sdw_slave *sdw,
+ const struct regmap_config *config,
+ struct lock_class_key *lock_key,
+ const char *lock_name);
/*
* Wrapper for regmap_init macros to include a unique lockdep key and name
@@ -616,6 +642,19 @@ int regmap_attach_dev(struct device *dev, struct regmap *map,
i2c, config)
/**
+ * regmap_init_slimbus() - Initialise register map
+ *
+ * @slimbus: Device that will be interacted with
+ * @config: Configuration for register map
+ *
+ * The return value will be an ERR_PTR() on error or a valid pointer to
+ * a struct regmap.
+ */
+#define regmap_init_slimbus(slimbus, config) \
+ __regmap_lockdep_wrapper(__regmap_init_slimbus, #config, \
+ slimbus, config)
+
+/**
* regmap_init_spi() - Initialise register map
*
* @dev: Device that will be interacted with
@@ -710,6 +749,20 @@ int regmap_attach_dev(struct device *dev, struct regmap *map,
bool regmap_ac97_default_volatile(struct device *dev, unsigned int reg);
/**
+ * regmap_init_sdw() - Initialise register map
+ *
+ * @sdw: Device that will be interacted with
+ * @config: Configuration for register map
+ *
+ * The return value will be an ERR_PTR() on error or a valid pointer to
+ * a struct regmap.
+ */
+#define regmap_init_sdw(sdw, config) \
+ __regmap_lockdep_wrapper(__regmap_init_sdw, #config, \
+ sdw, config)
+
+
+/**
* devm_regmap_init() - Initialise managed register map
*
* @dev: Device that will be interacted with
@@ -839,6 +892,22 @@ bool regmap_ac97_default_volatile(struct device *dev, unsigned int reg);
__regmap_lockdep_wrapper(__devm_regmap_init_ac97, #config, \
ac97, config)
+/**
+ * devm_regmap_init_sdw() - Initialise managed register map
+ *
+ * @sdw: Device that will be interacted with
+ * @config: Configuration for register map
+ *
+ * The return value will be an ERR_PTR() on error or a valid pointer
+ * to a struct regmap. The regmap will be automatically freed by the
+ * device management code.
+ */
+#define devm_regmap_init_sdw(sdw, config) \
+ __regmap_lockdep_wrapper(__devm_regmap_init_sdw, #config, \
+ sdw, config)
+
+int regmap_mmio_attach_clk(struct regmap *map, struct clk *clk);
+void regmap_mmio_detach_clk(struct regmap *map);
void regmap_exit(struct regmap *map);
int regmap_reinit_cache(struct regmap *map,
const struct regmap_config *config);
diff --git a/include/linux/regulator/da9211.h b/include/linux/regulator/da9211.h
index f2fd2d3bf58f..d1f2073e4d5f 100644
--- a/include/linux/regulator/da9211.h
+++ b/include/linux/regulator/da9211.h
@@ -21,6 +21,8 @@
#define DA9211_MAX_REGULATORS 2
+struct gpio_desc;
+
enum da9211_chip_id {
DA9211,
DA9212,
@@ -39,7 +41,7 @@ struct da9211_pdata {
* 2 : 2 phase 2 buck
*/
int num_buck;
- int gpio_ren[DA9211_MAX_REGULATORS];
+ struct gpio_desc *gpiod_ren[DA9211_MAX_REGULATORS];
struct device_node *reg_node[DA9211_MAX_REGULATORS];
struct regulator_init_data *init_data[DA9211_MAX_REGULATORS];
};
diff --git a/include/linux/regulator/driver.h b/include/linux/regulator/driver.h
index 94417b4226bd..4fc96cb8e5d7 100644
--- a/include/linux/regulator/driver.h
+++ b/include/linux/regulator/driver.h
@@ -19,6 +19,7 @@
#include <linux/notifier.h>
#include <linux/regulator/consumer.h>
+struct gpio_desc;
struct regmap;
struct regulator_dev;
struct regulator_config;
@@ -214,6 +215,8 @@ struct regulator_ops {
/* set regulator suspend operating mode (defined in consumer.h) */
int (*set_suspend_mode) (struct regulator_dev *, unsigned int mode);
+ int (*resume_early)(struct regulator_dev *rdev);
+
int (*set_pull_down) (struct regulator_dev *);
};
@@ -385,6 +388,7 @@ struct regulator_desc {
* initialized, meaning that >= 0 is a valid gpio
* identifier and < 0 is a non existent gpio.
* @ena_gpio: GPIO controlling regulator enable.
+ * @ena_gpiod: GPIO descriptor controlling regulator enable.
* @ena_gpio_invert: Sense for GPIO enable control.
* @ena_gpio_flags: Flags to use when calling gpio_request_one()
*/
@@ -397,6 +401,7 @@ struct regulator_config {
bool ena_gpio_initialized;
int ena_gpio;
+ struct gpio_desc *ena_gpiod;
unsigned int ena_gpio_invert:1;
unsigned int ena_gpio_flags;
};
diff --git a/include/linux/regulator/machine.h b/include/linux/regulator/machine.h
index 9cd4fef37203..93a04893c739 100644
--- a/include/linux/regulator/machine.h
+++ b/include/linux/regulator/machine.h
@@ -42,6 +42,16 @@ struct regulator;
#define REGULATOR_CHANGE_DRMS 0x10
#define REGULATOR_CHANGE_BYPASS 0x20
+/*
+ * operations in suspend mode
+ * DO_NOTHING_IN_SUSPEND - the default value
+ * DISABLE_IN_SUSPEND - turn off regulator in suspend states
+ * ENABLE_IN_SUSPEND - keep regulator on in suspend states
+ */
+#define DO_NOTHING_IN_SUSPEND (-1)
+#define DISABLE_IN_SUSPEND 0
+#define ENABLE_IN_SUSPEND 1
+
/* Regulator active discharge flags */
enum regulator_active_discharge {
REGULATOR_ACTIVE_DISCHARGE_DEFAULT,
@@ -56,16 +66,24 @@ enum regulator_active_discharge {
* state. One of enabled or disabled must be set for the
* configuration to be applied.
*
- * @uV: Operating voltage during suspend.
+ * @uV: Default operating voltage during suspend, it can be adjusted
+ * among <min_uV, max_uV>.
+ * @min_uV: Minimum suspend voltage may be set.
+ * @max_uV: Maximum suspend voltage may be set.
* @mode: Operating mode during suspend.
- * @enabled: Enabled during suspend.
- * @disabled: Disabled during suspend.
+ * @enabled: operations during suspend.
+ * - DO_NOTHING_IN_SUSPEND
+ * - DISABLE_IN_SUSPEND
+ * - ENABLE_IN_SUSPEND
+ * @changeable: Is this state can be switched between enabled/disabled,
*/
struct regulator_state {
- int uV; /* suspend voltage */
- unsigned int mode; /* suspend regulator operating mode */
- int enabled; /* is regulator enabled in this suspend state */
- int disabled; /* is the regulator disabled in this suspend state */
+ int uV;
+ int min_uV;
+ int max_uV;
+ unsigned int mode;
+ int enabled;
+ bool changeable;
};
/**
@@ -225,12 +243,12 @@ struct regulator_init_data {
#ifdef CONFIG_REGULATOR
void regulator_has_full_constraints(void);
-int regulator_suspend_prepare(suspend_state_t state);
-int regulator_suspend_finish(void);
#else
static inline void regulator_has_full_constraints(void)
{
}
+#endif
+
static inline int regulator_suspend_prepare(suspend_state_t state)
{
return 0;
@@ -239,6 +257,5 @@ static inline int regulator_suspend_finish(void)
{
return 0;
}
-#endif
#endif
diff --git a/include/linux/remoteproc.h b/include/linux/remoteproc.h
index 44e630eb3d94..d09a9c7af109 100644
--- a/include/linux/remoteproc.h
+++ b/include/linux/remoteproc.h
@@ -324,6 +324,7 @@ struct rproc_mem_entry {
};
struct rproc;
+struct firmware;
/**
* struct rproc_ops - platform-specific device handlers
@@ -331,12 +332,24 @@ struct rproc;
* @stop: power off the device
* @kick: kick a virtqueue (virtqueue id given as a parameter)
* @da_to_va: optional platform hook to perform address translations
+ * @load_rsc_table: load resource table from firmware image
+ * @find_loaded_rsc_table: find the loaded resouce table
+ * @load: load firmeware to memory, where the remote processor
+ * expects to find it
+ * @sanity_check: sanity check the fw image
+ * @get_boot_addr: get boot address to entry point specified in firmware
*/
struct rproc_ops {
int (*start)(struct rproc *rproc);
int (*stop)(struct rproc *rproc);
void (*kick)(struct rproc *rproc, int vqid);
void * (*da_to_va)(struct rproc *rproc, u64 da, int len);
+ int (*parse_fw)(struct rproc *rproc, const struct firmware *fw);
+ struct resource_table *(*find_loaded_rsc_table)(
+ struct rproc *rproc, const struct firmware *fw);
+ int (*load)(struct rproc *rproc, const struct firmware *fw);
+ int (*sanity_check)(struct rproc *rproc, const struct firmware *fw);
+ u32 (*get_boot_addr)(struct rproc *rproc, const struct firmware *fw);
};
/**
@@ -382,6 +395,21 @@ enum rproc_crash_type {
};
/**
+ * struct rproc_dump_segment - segment info from ELF header
+ * @node: list node related to the rproc segment list
+ * @da: device address of the segment
+ * @size: size of the segment
+ */
+struct rproc_dump_segment {
+ struct list_head node;
+
+ dma_addr_t da;
+ size_t size;
+
+ loff_t offset;
+};
+
+/**
* struct rproc - represents a physical remote processor device
* @node: list node of this rproc object
* @domain: iommu domain
@@ -390,7 +418,6 @@ enum rproc_crash_type {
* @priv: private data which belongs to the platform-specific rproc module
* @ops: platform-specific start/stop rproc handlers
* @dev: virtual device for refcounting and common remoteproc behavior
- * @fw_ops: firmware-specific handlers
* @power: refcount of users who need this rproc powered up
* @state: state of the device
* @lock: lock which protects concurrent manipulations of the rproc
@@ -406,12 +433,13 @@ enum rproc_crash_type {
* @index: index of this rproc device
* @crash_handler: workqueue for handling a crash
* @crash_cnt: crash counter
- * @crash_comp: completion used to sync crash handler and the rproc reload
* @recovery_disabled: flag that state if recovery was disabled
* @max_notifyid: largest allocated notify id.
* @table_ptr: pointer to the resource table in effect
* @cached_table: copy of the resource table
+ * @table_sz: size of @cached_table
* @has_iommu: flag to indicate if remote processor is behind an MMU
+ * @dump_segments: list of segments in the firmware
*/
struct rproc {
struct list_head node;
@@ -419,9 +447,8 @@ struct rproc {
const char *name;
char *firmware;
void *priv;
- const struct rproc_ops *ops;
+ struct rproc_ops *ops;
struct device dev;
- const struct rproc_fw_ops *fw_ops;
atomic_t power;
unsigned int state;
struct mutex lock;
@@ -437,26 +464,28 @@ struct rproc {
int index;
struct work_struct crash_handler;
unsigned int crash_cnt;
- struct completion crash_comp;
bool recovery_disabled;
int max_notifyid;
struct resource_table *table_ptr;
struct resource_table *cached_table;
+ size_t table_sz;
bool has_iommu;
bool auto_boot;
+ struct list_head dump_segments;
};
/**
* struct rproc_subdev - subdevice tied to a remoteproc
* @node: list node related to the rproc subdevs list
* @probe: probe function, called as the rproc is started
- * @remove: remove function, called as the rproc is stopped
+ * @remove: remove function, called as the rproc is being stopped, the @crashed
+ * parameter indicates if this originates from the a recovery
*/
struct rproc_subdev {
struct list_head node;
int (*probe)(struct rproc_subdev *subdev);
- void (*remove)(struct rproc_subdev *subdev);
+ void (*remove)(struct rproc_subdev *subdev, bool crashed);
};
/* we currently support only two vrings per rvdev */
@@ -523,6 +552,7 @@ void rproc_free(struct rproc *rproc);
int rproc_boot(struct rproc *rproc);
void rproc_shutdown(struct rproc *rproc);
void rproc_report_crash(struct rproc *rproc, enum rproc_crash_type type);
+int rproc_coredump_add_segment(struct rproc *rproc, dma_addr_t da, size_t size);
static inline struct rproc_vdev *vdev_to_rvdev(struct virtio_device *vdev)
{
@@ -539,7 +569,7 @@ static inline struct rproc *vdev_to_rproc(struct virtio_device *vdev)
void rproc_add_subdev(struct rproc *rproc,
struct rproc_subdev *subdev,
int (*probe)(struct rproc_subdev *subdev),
- void (*remove)(struct rproc_subdev *subdev));
+ void (*remove)(struct rproc_subdev *subdev, bool graceful));
void rproc_remove_subdev(struct rproc *rproc, struct rproc_subdev *subdev);
diff --git a/include/linux/reservation.h b/include/linux/reservation.h
index 21fc84d82d41..02166e815afb 100644
--- a/include/linux/reservation.h
+++ b/include/linux/reservation.h
@@ -167,6 +167,29 @@ reservation_object_lock(struct reservation_object *obj,
}
/**
+ * reservation_object_lock_interruptible - lock the reservation object
+ * @obj: the reservation object
+ * @ctx: the locking context
+ *
+ * Locks the reservation object interruptible for exclusive access and
+ * modification. Note, that the lock is only against other writers, readers
+ * will run concurrently with a writer under RCU. The seqlock is used to
+ * notify readers if they overlap with a writer.
+ *
+ * As the reservation object may be locked by multiple parties in an
+ * undefined order, a #ww_acquire_ctx is passed to unwind if a cycle
+ * is detected. See ww_mutex_lock() and ww_acquire_init(). A reservation
+ * object may be locked by itself by passing NULL as @ctx.
+ */
+static inline int
+reservation_object_lock_interruptible(struct reservation_object *obj,
+ struct ww_acquire_ctx *ctx)
+{
+ return ww_mutex_lock_interruptible(&obj->lock, ctx);
+}
+
+
+/**
* reservation_object_trylock - trylock the reservation object
* @obj: the reservation object
*
diff --git a/include/linux/reset-controller.h b/include/linux/reset-controller.h
index adb88f8cefbc..9326d671b6e6 100644
--- a/include/linux/reset-controller.h
+++ b/include/linux/reset-controller.h
@@ -27,12 +27,38 @@ struct device_node;
struct of_phandle_args;
/**
+ * struct reset_control_lookup - represents a single lookup entry
+ *
+ * @list: internal list of all reset lookup entries
+ * @provider: name of the reset controller device controlling this reset line
+ * @index: ID of the reset controller in the reset controller device
+ * @dev_id: name of the device associated with this reset line
+ * @con_id name of the reset line (can be NULL)
+ */
+struct reset_control_lookup {
+ struct list_head list;
+ const char *provider;
+ unsigned int index;
+ const char *dev_id;
+ const char *con_id;
+};
+
+#define RESET_LOOKUP(_provider, _index, _dev_id, _con_id) \
+ { \
+ .provider = _provider, \
+ .index = _index, \
+ .dev_id = _dev_id, \
+ .con_id = _con_id, \
+ }
+
+/**
* struct reset_controller_dev - reset controller entity that might
* provide multiple reset controls
* @ops: a pointer to device specific struct reset_control_ops
* @owner: kernel module of the reset controller driver
* @list: internal list of reset controller devices
* @reset_control_head: head of internal list of requested reset controls
+ * @dev: corresponding driver model device struct
* @of_node: corresponding device tree node as phandle target
* @of_reset_n_cells: number of cells in reset line specifiers
* @of_xlate: translation function to translate from specifier as found in the
@@ -44,6 +70,7 @@ struct reset_controller_dev {
struct module *owner;
struct list_head list;
struct list_head reset_control_head;
+ struct device *dev;
struct device_node *of_node;
int of_reset_n_cells;
int (*of_xlate)(struct reset_controller_dev *rcdev,
@@ -58,4 +85,7 @@ struct device;
int devm_reset_controller_register(struct device *dev,
struct reset_controller_dev *rcdev);
+void reset_controller_add_lookup(struct reset_control_lookup *lookup,
+ unsigned int num_entries);
+
#endif
diff --git a/include/linux/reset.h b/include/linux/reset.h
index 4c7871ddf3c6..09732c36f351 100644
--- a/include/linux/reset.h
+++ b/include/linux/reset.h
@@ -2,8 +2,10 @@
#ifndef _LINUX_RESET_H_
#define _LINUX_RESET_H_
-#include <linux/device.h>
+#include <linux/types.h>
+struct device;
+struct device_node;
struct reset_control;
#ifdef CONFIG_RESET_CONTROLLER
@@ -20,22 +22,16 @@ struct reset_control *__reset_control_get(struct device *dev, const char *id,
int index, bool shared,
bool optional);
void reset_control_put(struct reset_control *rstc);
+int __device_reset(struct device *dev, bool optional);
struct reset_control *__devm_reset_control_get(struct device *dev,
const char *id, int index, bool shared,
bool optional);
-int __must_check device_reset(struct device *dev);
-
struct reset_control *devm_reset_control_array_get(struct device *dev,
bool shared, bool optional);
struct reset_control *of_reset_control_array_get(struct device_node *np,
bool shared, bool optional);
-static inline int device_reset_optional(struct device *dev)
-{
- return device_reset(dev);
-}
-
#else
static inline int reset_control_reset(struct reset_control *rstc)
@@ -62,15 +58,9 @@ static inline void reset_control_put(struct reset_control *rstc)
{
}
-static inline int __must_check device_reset(struct device *dev)
-{
- WARN_ON(1);
- return -ENOTSUPP;
-}
-
-static inline int device_reset_optional(struct device *dev)
+static inline int __device_reset(struct device *dev, bool optional)
{
- return -ENOTSUPP;
+ return optional ? 0 : -ENOTSUPP;
}
static inline struct reset_control *__of_reset_control_get(
@@ -109,6 +99,16 @@ of_reset_control_array_get(struct device_node *np, bool shared, bool optional)
#endif /* CONFIG_RESET_CONTROLLER */
+static inline int __must_check device_reset(struct device *dev)
+{
+ return __device_reset(dev, false);
+}
+
+static inline int device_reset_optional(struct device *dev)
+{
+ return __device_reset(dev, true);
+}
+
/**
* reset_control_get_exclusive - Lookup and obtain an exclusive reference
* to a reset controller.
@@ -127,9 +127,6 @@ of_reset_control_array_get(struct device_node *np, bool shared, bool optional)
static inline struct reset_control *
__must_check reset_control_get_exclusive(struct device *dev, const char *id)
{
-#ifndef CONFIG_RESET_CONTROLLER
- WARN_ON(1);
-#endif
return __reset_control_get(dev, id, 0, false, false);
}
@@ -275,9 +272,6 @@ static inline struct reset_control *
__must_check devm_reset_control_get_exclusive(struct device *dev,
const char *id)
{
-#ifndef CONFIG_RESET_CONTROLLER
- WARN_ON(1);
-#endif
return __devm_reset_control_get(dev, id, 0, false, false);
}
@@ -350,18 +344,6 @@ devm_reset_control_get_shared_by_index(struct device *dev, int index)
* These inline function calls will be removed once all consumers
* have been moved over to the new explicit API.
*/
-static inline struct reset_control *reset_control_get(
- struct device *dev, const char *id)
-{
- return reset_control_get_exclusive(dev, id);
-}
-
-static inline struct reset_control *reset_control_get_optional(
- struct device *dev, const char *id)
-{
- return reset_control_get_optional_exclusive(dev, id);
-}
-
static inline struct reset_control *of_reset_control_get(
struct device_node *node, const char *id)
{
diff --git a/include/linux/rhashtable.h b/include/linux/rhashtable.h
index 361c08e35dbc..1f8ad121eb43 100644
--- a/include/linux/rhashtable.h
+++ b/include/linux/rhashtable.h
@@ -152,25 +152,25 @@ struct rhashtable_params {
/**
* struct rhashtable - Hash table handle
* @tbl: Bucket table
- * @nelems: Number of elements in table
* @key_len: Key length for hashfn
- * @p: Configuration parameters
* @max_elems: Maximum number of elements in table
+ * @p: Configuration parameters
* @rhlist: True if this is an rhltable
* @run_work: Deferred worker to expand/shrink asynchronously
* @mutex: Mutex to protect current/future table swapping
* @lock: Spin lock to protect walker list
+ * @nelems: Number of elements in table
*/
struct rhashtable {
struct bucket_table __rcu *tbl;
- atomic_t nelems;
unsigned int key_len;
- struct rhashtable_params p;
unsigned int max_elems;
+ struct rhashtable_params p;
bool rhlist;
struct work_struct run_work;
struct mutex mutex;
spinlock_t lock;
+ atomic_t nelems;
};
/**
@@ -207,6 +207,7 @@ struct rhashtable_iter {
struct rhashtable_walker walker;
unsigned int slot;
unsigned int skip;
+ bool end_of_table;
};
static inline unsigned long rht_marker(const struct rhashtable *ht, u32 hash)
@@ -239,34 +240,42 @@ static inline unsigned int rht_bucket_index(const struct bucket_table *tbl,
return (hash >> RHT_HASH_RESERVED_SPACE) & (tbl->size - 1);
}
-static inline unsigned int rht_key_hashfn(
- struct rhashtable *ht, const struct bucket_table *tbl,
- const void *key, const struct rhashtable_params params)
+static inline unsigned int rht_key_get_hash(struct rhashtable *ht,
+ const void *key, const struct rhashtable_params params,
+ unsigned int hash_rnd)
{
unsigned int hash;
/* params must be equal to ht->p if it isn't constant. */
if (!__builtin_constant_p(params.key_len))
- hash = ht->p.hashfn(key, ht->key_len, tbl->hash_rnd);
+ hash = ht->p.hashfn(key, ht->key_len, hash_rnd);
else if (params.key_len) {
unsigned int key_len = params.key_len;
if (params.hashfn)
- hash = params.hashfn(key, key_len, tbl->hash_rnd);
+ hash = params.hashfn(key, key_len, hash_rnd);
else if (key_len & (sizeof(u32) - 1))
- hash = jhash(key, key_len, tbl->hash_rnd);
+ hash = jhash(key, key_len, hash_rnd);
else
- hash = jhash2(key, key_len / sizeof(u32),
- tbl->hash_rnd);
+ hash = jhash2(key, key_len / sizeof(u32), hash_rnd);
} else {
unsigned int key_len = ht->p.key_len;
if (params.hashfn)
- hash = params.hashfn(key, key_len, tbl->hash_rnd);
+ hash = params.hashfn(key, key_len, hash_rnd);
else
- hash = jhash(key, key_len, tbl->hash_rnd);
+ hash = jhash(key, key_len, hash_rnd);
}
+ return hash;
+}
+
+static inline unsigned int rht_key_hashfn(
+ struct rhashtable *ht, const struct bucket_table *tbl,
+ const void *key, const struct rhashtable_params params)
+{
+ unsigned int hash = rht_key_get_hash(ht, key, params, tbl->hash_rnd);
+
return rht_bucket_index(tbl, hash);
}
@@ -378,8 +387,15 @@ void *rhashtable_insert_slow(struct rhashtable *ht, const void *key,
void rhashtable_walk_enter(struct rhashtable *ht,
struct rhashtable_iter *iter);
void rhashtable_walk_exit(struct rhashtable_iter *iter);
-int rhashtable_walk_start(struct rhashtable_iter *iter) __acquires(RCU);
+int rhashtable_walk_start_check(struct rhashtable_iter *iter) __acquires(RCU);
+
+static inline void rhashtable_walk_start(struct rhashtable_iter *iter)
+{
+ (void)rhashtable_walk_start_check(iter);
+}
+
void *rhashtable_walk_next(struct rhashtable_iter *iter);
+void *rhashtable_walk_peek(struct rhashtable_iter *iter);
void rhashtable_walk_stop(struct rhashtable_iter *iter) __releases(RCU);
void rhashtable_free_and_destroy(struct rhashtable *ht,
@@ -750,8 +766,10 @@ slow_path:
if (!key ||
(params.obj_cmpfn ?
params.obj_cmpfn(&arg, rht_obj(ht, head)) :
- rhashtable_compare(&arg, rht_obj(ht, head))))
+ rhashtable_compare(&arg, rht_obj(ht, head)))) {
+ pprev = &head->next;
continue;
+ }
data = rht_obj(ht, head);
diff --git a/include/linux/ring_buffer.h b/include/linux/ring_buffer.h
index 289e4d54e3e0..a0233edc0718 100644
--- a/include/linux/ring_buffer.h
+++ b/include/linux/ring_buffer.h
@@ -34,10 +34,12 @@ struct ring_buffer_event {
* array[0] = time delta (28 .. 59)
* size = 8 bytes
*
- * @RINGBUF_TYPE_TIME_STAMP: Sync time stamp with external clock
- * array[0] = tv_nsec
- * array[1..2] = tv_sec
- * size = 16 bytes
+ * @RINGBUF_TYPE_TIME_STAMP: Absolute timestamp
+ * Same format as TIME_EXTEND except that the
+ * value is an absolute timestamp, not a delta
+ * event.time_delta contains bottom 27 bits
+ * array[0] = top (28 .. 59) bits
+ * size = 8 bytes
*
* <= @RINGBUF_TYPE_DATA_TYPE_LEN_MAX:
* Data record
@@ -54,12 +56,12 @@ enum ring_buffer_type {
RINGBUF_TYPE_DATA_TYPE_LEN_MAX = 28,
RINGBUF_TYPE_PADDING,
RINGBUF_TYPE_TIME_EXTEND,
- /* FIXME: RINGBUF_TYPE_TIME_STAMP not implemented */
RINGBUF_TYPE_TIME_STAMP,
};
unsigned ring_buffer_event_length(struct ring_buffer_event *event);
void *ring_buffer_event_data(struct ring_buffer_event *event);
+u64 ring_buffer_event_time_stamp(struct ring_buffer_event *event);
/*
* ring_buffer_discard_commit will remove an event that has not
@@ -96,7 +98,7 @@ __ring_buffer_alloc(unsigned long size, unsigned flags, struct lock_class_key *k
})
int ring_buffer_wait(struct ring_buffer *buffer, int cpu, bool full);
-int ring_buffer_poll_wait(struct ring_buffer *buffer, int cpu,
+__poll_t ring_buffer_poll_wait(struct ring_buffer *buffer, int cpu,
struct file *filp, poll_table *poll_table);
@@ -115,6 +117,9 @@ int ring_buffer_unlock_commit(struct ring_buffer *buffer,
int ring_buffer_write(struct ring_buffer *buffer,
unsigned long length, void *data);
+void ring_buffer_nest_start(struct ring_buffer *buffer);
+void ring_buffer_nest_end(struct ring_buffer *buffer);
+
struct ring_buffer_event *
ring_buffer_peek(struct ring_buffer *buffer, int cpu, u64 *ts,
unsigned long *lost_events);
@@ -178,6 +183,8 @@ void ring_buffer_normalize_time_stamp(struct ring_buffer *buffer,
int cpu, u64 *ts);
void ring_buffer_set_clock(struct ring_buffer *buffer,
u64 (*clock)(void));
+void ring_buffer_set_time_stamp_abs(struct ring_buffer *buffer, bool abs);
+bool ring_buffer_time_stamp_abs(struct ring_buffer *buffer);
size_t ring_buffer_page_len(void *page);
diff --git a/include/linux/rpmsg.h b/include/linux/rpmsg.h
index 10d6ae8bbb7d..ca07366c4c33 100644
--- a/include/linux/rpmsg.h
+++ b/include/linux/rpmsg.h
@@ -157,7 +157,7 @@ int rpmsg_trysendto(struct rpmsg_endpoint *ept, void *data, int len, u32 dst);
int rpmsg_trysend_offchannel(struct rpmsg_endpoint *ept, u32 src, u32 dst,
void *data, int len);
-unsigned int rpmsg_poll(struct rpmsg_endpoint *ept, struct file *filp,
+__poll_t rpmsg_poll(struct rpmsg_endpoint *ept, struct file *filp,
poll_table *wait);
#else
@@ -258,7 +258,7 @@ static inline int rpmsg_trysend_offchannel(struct rpmsg_endpoint *ept, u32 src,
return -ENXIO;
}
-static inline unsigned int rpmsg_poll(struct rpmsg_endpoint *ept,
+static inline __poll_t rpmsg_poll(struct rpmsg_endpoint *ept,
struct file *filp, poll_table *wait)
{
/* This shouldn't be possible */
diff --git a/include/linux/rtc.h b/include/linux/rtc.h
index 41319a2e409b..4c007f69082f 100644
--- a/include/linux/rtc.h
+++ b/include/linux/rtc.h
@@ -87,7 +87,6 @@ struct rtc_class_ops {
int (*set_offset)(struct device *, long offset);
};
-#define RTC_DEVICE_NAME_SIZE 20
typedef struct rtc_task {
void (*func)(void *private_data);
void *private_data;
@@ -146,12 +145,17 @@ struct rtc_device {
bool registered;
- struct nvmem_config *nvmem_config;
struct nvmem_device *nvmem;
/* Old ABI support */
bool nvram_old_abi;
struct bin_attribute *nvram;
+ time64_t range_min;
+ timeu64_t range_max;
+ time64_t start_secs;
+ time64_t offset_secs;
+ bool set_start_time;
+
#ifdef CONFIG_RTC_INTF_DEV_UIE_EMUL
struct work_struct uie_task;
struct timer_list uie_timer;
@@ -165,6 +169,11 @@ struct rtc_device {
};
#define to_rtc_device(d) container_of(d, struct rtc_device, dev)
+/* useful timestamps */
+#define RTC_TIMESTAMP_BEGIN_1900 -2208989361LL /* 1900-01-01 00:00:00 */
+#define RTC_TIMESTAMP_BEGIN_2000 946684800LL /* 2000-01-01 00:00:00 */
+#define RTC_TIMESTAMP_END_2099 4102444799LL /* 2099-12-31 23:59:59 */
+
extern struct rtc_device *rtc_device_register(const char *name,
struct device *dev,
const struct rtc_class_ops *ops,
@@ -213,10 +222,6 @@ void rtc_aie_update_irq(void *private);
void rtc_uie_update_irq(void *private);
enum hrtimer_restart rtc_pie_update_irq(struct hrtimer *timer);
-int rtc_register(rtc_task_t *task);
-int rtc_unregister(rtc_task_t *task);
-int rtc_control(rtc_task_t *t, unsigned int cmd, unsigned long arg);
-
void rtc_timer_init(struct rtc_timer *timer, void (*f)(void *p), void *data);
int rtc_timer_start(struct rtc_device *rtc, struct rtc_timer *timer,
ktime_t expires, ktime_t period);
@@ -272,4 +277,17 @@ extern int rtc_hctosys_ret;
#define rtc_hctosys_ret -ENODEV
#endif
+#ifdef CONFIG_RTC_NVMEM
+int rtc_nvmem_register(struct rtc_device *rtc,
+ struct nvmem_config *nvmem_config);
+void rtc_nvmem_unregister(struct rtc_device *rtc);
+#else
+static inline int rtc_nvmem_register(struct rtc_device *rtc,
+ struct nvmem_config *nvmem_config)
+{
+ return -ENODEV;
+}
+static inline void rtc_nvmem_unregister(struct rtc_device *rtc) {}
+#endif
+
#endif /* _LINUX_RTC_H_ */
diff --git a/include/linux/rtnetlink.h b/include/linux/rtnetlink.h
index 2032ce2eb20b..5225832bd6ff 100644
--- a/include/linux/rtnetlink.h
+++ b/include/linux/rtnetlink.h
@@ -19,10 +19,11 @@ extern int rtnl_put_cacheinfo(struct sk_buff *skb, struct dst_entry *dst,
void rtmsg_ifinfo(int type, struct net_device *dev, unsigned change, gfp_t flags);
void rtmsg_ifinfo_newnet(int type, struct net_device *dev, unsigned int change,
- gfp_t flags, int *new_nsid);
+ gfp_t flags, int *new_nsid, int new_ifindex);
struct sk_buff *rtmsg_ifinfo_build_skb(int type, struct net_device *dev,
unsigned change, u32 event,
- gfp_t flags, int *new_nsid);
+ gfp_t flags, int *new_nsid,
+ int new_ifindex);
void rtmsg_ifinfo_send(struct sk_buff *skb, struct net_device *dev,
gfp_t flags);
@@ -32,9 +33,11 @@ extern void rtnl_lock(void);
extern void rtnl_unlock(void);
extern int rtnl_trylock(void);
extern int rtnl_is_locked(void);
+extern int rtnl_lock_killable(void);
extern wait_queue_head_t netdev_unregistering_wq;
-extern struct mutex net_mutex;
+extern struct rw_semaphore pernet_ops_rwsem;
+extern struct rw_semaphore net_rwsem;
#ifdef CONFIG_PROVE_LOCKING
extern bool lockdep_rtnl_is_held(void);
@@ -70,8 +73,7 @@ static inline bool lockdep_rtnl_is_held(void)
* @p: The pointer to read, prior to dereferencing
*
* Return the value of the specified RCU-protected pointer, but omit
- * both the smp_read_barrier_depends() and the READ_ONCE(), because
- * caller holds RTNL.
+ * the READ_ONCE(), because caller holds RTNL.
*/
#define rtnl_dereference(p) \
rcu_dereference_protected(p, lockdep_rtnl_is_held())
@@ -97,13 +99,9 @@ void rtnetlink_init(void);
void __rtnl_unlock(void);
void rtnl_kfree_skbs(struct sk_buff *head, struct sk_buff *tail);
-#define ASSERT_RTNL() do { \
- if (unlikely(!rtnl_is_locked())) { \
- printk(KERN_ERR "RTNL: assertion failed at %s (%d)\n", \
- __FILE__, __LINE__); \
- dump_stack(); \
- } \
-} while(0)
+#define ASSERT_RTNL() \
+ WARN_ONCE(!rtnl_is_locked(), \
+ "RTNL: assertion failed at %s (%d)\n", __FILE__, __LINE__)
extern int ndo_dflt_fdb_dump(struct sk_buff *skb,
struct netlink_callback *cb,
diff --git a/include/linux/mfd/rtsx_common.h b/include/linux/rtsx_common.h
index 443176ee1ab0..443176ee1ab0 100644
--- a/include/linux/mfd/rtsx_common.h
+++ b/include/linux/rtsx_common.h
diff --git a/include/linux/mfd/rtsx_pci.h b/include/linux/rtsx_pci.h
index c3d3f04d8cc6..e964bbd03fc2 100644
--- a/include/linux/mfd/rtsx_pci.h
+++ b/include/linux/rtsx_pci.h
@@ -24,7 +24,7 @@
#include <linux/sched.h>
#include <linux/pci.h>
-#include <linux/mfd/rtsx_common.h>
+#include <linux/rtsx_common.h>
#define MAX_RW_REG_CNT 1024
@@ -36,12 +36,12 @@
#define CHECK_REG_CMD 2
#define RTSX_HDBAR 0x08
-#define SG_INT 0x04
-#define SG_END 0x02
-#define SG_VALID 0x01
-#define SG_NO_OP 0x00
-#define SG_TRANS_DATA (0x02 << 4)
-#define SG_LINK_DESC (0x03 << 4)
+#define RTSX_SG_INT 0x04
+#define RTSX_SG_END 0x02
+#define RTSX_SG_VALID 0x01
+#define RTSX_SG_NO_OP 0x00
+#define RTSX_SG_TRANS_DATA (0x02 << 4)
+#define RTSX_SG_LINK_DESC (0x03 << 4)
#define RTSX_HDBCTLR 0x0C
#define SDMA_MODE 0x00
#define ADMA_MODE (0x02 << 26)
@@ -203,6 +203,7 @@
#define SD_DDR_MODE 0x04
#define SD_30_MODE 0x08
#define SD_CLK_DIVIDE_MASK 0xC0
+#define SD_MODE_SELECT_MASK 0x0C
#define SD_CFG2 0xFDA1
#define SD_CALCULATE_CRC7 0x00
#define SD_NO_CALCULATE_CRC7 0x80
@@ -226,6 +227,7 @@
#define SD_RSP_TYPE_R6 0x01
#define SD_RSP_TYPE_R7 0x01
#define SD_CFG3 0xFDA2
+#define SD30_CLK_END_EN 0x10
#define SD_RSP_80CLK_TIMEOUT_EN 0x01
#define SD_STAT1 0xFDA3
@@ -309,6 +311,12 @@
#define SD_DATA_STATE 0xFDB6
#define SD_DATA_IDLE 0x80
+#define REG_SD_STOP_SDCLK_CFG 0xFDB8
+#define SD30_CLK_STOP_CFG_EN 0x04
+#define SD30_CLK_STOP_CFG1 0x02
+#define SD30_CLK_STOP_CFG0 0x01
+#define REG_PRE_RW_MODE 0xFD70
+#define EN_INFINITE_MODE 0x01
#define SRCTL 0xFC13
@@ -434,6 +442,7 @@
#define CARD_CLK_EN 0xFD69
#define SD_CLK_EN 0x04
#define MS_CLK_EN 0x08
+#define SD40_CLK_EN 0x10
#define SDIO_CTRL 0xFD6B
#define CD_PAD_CTL 0xFD73
#define CD_DISABLE_MASK 0x07
@@ -453,8 +462,8 @@
#define FPDCTL 0xFC00
#define SSC_POWER_DOWN 0x01
#define SD_OC_POWER_DOWN 0x02
-#define ALL_POWER_DOWN 0x07
-#define OC_POWER_DOWN 0x06
+#define ALL_POWER_DOWN 0x03
+#define OC_POWER_DOWN 0x02
#define PDINFO 0xFC01
#define CLK_CTL 0xFC02
@@ -490,6 +499,9 @@
#define FPGA_PULL_CTL 0xFC1D
#define OLT_LED_CTL 0xFC1E
+#define LED_SHINE_MASK 0x08
+#define LED_SHINE_EN 0x08
+#define LED_SHINE_DISABLE 0x00
#define GPIO_CTL 0xFC1F
#define LDO_CTL 0xFC1E
@@ -511,7 +523,11 @@
#define BPP_LDO_ON 0x00
#define BPP_LDO_SUSPEND 0x02
#define BPP_LDO_OFF 0x03
+#define EFUSE_CTL 0xFC30
+#define EFUSE_ADD 0xFC31
#define SYS_VER 0xFC32
+#define EFUSE_DATAL 0xFC34
+#define EFUSE_DATAH 0xFC35
#define CARD_PULL_CTL1 0xFD60
#define CARD_PULL_CTL2 0xFD61
@@ -553,6 +569,9 @@
#define RBBC1 0xFE2F
#define RBDAT 0xFE30
#define RBCTL 0xFE34
+#define U_AUTO_DMA_EN_MASK 0x20
+#define U_AUTO_DMA_DISABLE 0x00
+#define RB_FLUSH 0x80
#define CFGADDR0 0xFE35
#define CFGADDR1 0xFE36
#define CFGDATA0 0xFE37
@@ -581,6 +600,8 @@
#define LTR_LATENCY_MODE_HW 0
#define LTR_LATENCY_MODE_SW BIT(6)
#define OBFF_CFG 0xFE4C
+#define OBFF_EN_MASK 0x03
+#define OBFF_DISABLE 0x00
#define CDRESUMECTL 0xFE52
#define WAKE_SEL_CTL 0xFE54
@@ -595,6 +616,7 @@
#define FORCE_ASPM_L0_EN 0x01
#define FORCE_ASPM_NO_ASPM 0x00
#define PM_CLK_FORCE_CTL 0xFE58
+#define CLK_PM_EN 0x01
#define FUNC_FORCE_CTL 0xFE59
#define FUNC_FORCE_UPME_XMT_DBG 0x02
#define PERST_GLITCH_WIDTH 0xFE5C
@@ -620,14 +642,23 @@
#define LDO_PWR_SEL 0xFE78
#define L1SUB_CONFIG1 0xFE8D
+#define AUX_CLK_ACTIVE_SEL_MASK 0x01
+#define MAC_CKSW_DONE 0x00
#define L1SUB_CONFIG2 0xFE8E
#define L1SUB_AUTO_CFG 0x02
#define L1SUB_CONFIG3 0xFE8F
#define L1OFF_MBIAS2_EN_5250 BIT(7)
#define DUMMY_REG_RESET_0 0xFE90
+#define IC_VERSION_MASK 0x0F
+#define REG_VREF 0xFE97
+#define PWD_SUSPND_EN 0x10
+#define RTS5260_DMA_RST_CTL_0 0xFEBF
+#define RTS5260_DMA_RST 0x80
+#define RTS5260_ADMA3_RST 0x40
#define AUTOLOAD_CFG_BASE 0xFF00
+#define RELINK_TIME_MASK 0x01
#define PETXCFG 0xFF03
#define FORCE_CLKREQ_DELINK_MASK BIT(7)
#define FORCE_CLKREQ_LOW 0x80
@@ -667,15 +698,24 @@
#define LDO_DV18_CFG 0xFF70
#define LDO_DV18_SR_MASK 0xC0
#define LDO_DV18_SR_DF 0x40
+#define DV331812_MASK 0x70
+#define DV331812_33 0x70
+#define DV331812_17 0x30
#define LDO_CONFIG2 0xFF71
#define LDO_D3318_MASK 0x07
#define LDO_D3318_33V 0x07
#define LDO_D3318_18V 0x02
+#define DV331812_VDD1 0x04
+#define DV331812_POWERON 0x08
+#define DV331812_POWEROFF 0x00
#define LDO_VCC_CFG0 0xFF72
#define LDO_VCC_LMTVTH_MASK 0x30
#define LDO_VCC_LMTVTH_2A 0x10
+/*RTS5260*/
+#define RTS5260_DVCC_TUNE_MASK 0x70
+#define RTS5260_DVCC_33 0x70
#define LDO_VCC_CFG1 0xFF73
#define LDO_VCC_REF_TUNE_MASK 0x30
@@ -684,6 +724,10 @@
#define LDO_VCC_1V8 0x04
#define LDO_VCC_3V3 0x07
#define LDO_VCC_LMT_EN 0x08
+/*RTS5260*/
+#define LDO_POW_SDVDD1_MASK 0x08
+#define LDO_POW_SDVDD1_ON 0x08
+#define LDO_POW_SDVDD1_OFF 0x00
#define LDO_VIO_CFG 0xFF75
#define LDO_VIO_SR_MASK 0xC0
@@ -711,6 +755,160 @@
#define SD_VIO_LDO_1V8 0x40
#define SD_VIO_LDO_3V3 0x70
+#define RTS5260_AUTOLOAD_CFG4 0xFF7F
+#define RTS5260_MIMO_DISABLE 0x8A
+
+#define RTS5260_REG_GPIO_CTL0 0xFC1A
+#define RTS5260_REG_GPIO_MASK 0x01
+#define RTS5260_REG_GPIO_ON 0x01
+#define RTS5260_REG_GPIO_OFF 0x00
+
+#define PWR_GLOBAL_CTRL 0xF200
+#define PCIE_L1_2_EN 0x0C
+#define PCIE_L1_1_EN 0x0A
+#define PCIE_L1_0_EN 0x09
+#define PWR_FE_CTL 0xF201
+#define PCIE_L1_2_PD_FE_EN 0x0C
+#define PCIE_L1_1_PD_FE_EN 0x0A
+#define PCIE_L1_0_PD_FE_EN 0x09
+#define CFG_PCIE_APHY_OFF_0 0xF204
+#define CFG_PCIE_APHY_OFF_0_DEFAULT 0xBF
+#define CFG_PCIE_APHY_OFF_1 0xF205
+#define CFG_PCIE_APHY_OFF_1_DEFAULT 0xFF
+#define CFG_PCIE_APHY_OFF_2 0xF206
+#define CFG_PCIE_APHY_OFF_2_DEFAULT 0x01
+#define CFG_PCIE_APHY_OFF_3 0xF207
+#define CFG_PCIE_APHY_OFF_3_DEFAULT 0x00
+#define CFG_L1_0_PCIE_MAC_RET_VALUE 0xF20C
+#define CFG_L1_0_PCIE_DPHY_RET_VALUE 0xF20E
+#define CFG_L1_0_SYS_RET_VALUE 0xF210
+#define CFG_L1_0_CRC_MISC_RET_VALUE 0xF212
+#define CFG_L1_0_CRC_SD30_RET_VALUE 0xF214
+#define CFG_L1_0_CRC_SD40_RET_VALUE 0xF216
+#define CFG_LP_FPWM_VALUE 0xF219
+#define CFG_LP_FPWM_VALUE_DEFAULT 0x18
+#define PWC_CDR 0xF253
+#define PWC_CDR_DEFAULT 0x03
+#define CFG_L1_0_RET_VALUE_DEFAULT 0x1B
+#define CFG_L1_0_CRC_MISC_RET_VALUE_DEFAULT 0x0C
+
+/* OCPCTL */
+#define SD_DETECT_EN 0x08
+#define SD_OCP_INT_EN 0x04
+#define SD_OCP_INT_CLR 0x02
+#define SD_OC_CLR 0x01
+
+#define SDVIO_DETECT_EN (1 << 7)
+#define SDVIO_OCP_INT_EN (1 << 6)
+#define SDVIO_OCP_INT_CLR (1 << 5)
+#define SDVIO_OC_CLR (1 << 4)
+
+/* OCPSTAT */
+#define SD_OCP_DETECT 0x08
+#define SD_OC_NOW 0x04
+#define SD_OC_EVER 0x02
+
+#define SDVIO_OC_NOW (1 << 6)
+#define SDVIO_OC_EVER (1 << 5)
+
+#define REG_OCPCTL 0xFD6A
+#define REG_OCPSTAT 0xFD6E
+#define REG_OCPGLITCH 0xFD6C
+#define REG_OCPPARA1 0xFD6B
+#define REG_OCPPARA2 0xFD6D
+
+/* rts5260 DV3318 OCP-related registers */
+#define REG_DV3318_OCPCTL 0xFD89
+#define DV3318_OCP_TIME_MASK 0xF0
+#define DV3318_DETECT_EN 0x08
+#define DV3318_OCP_INT_EN 0x04
+#define DV3318_OCP_INT_CLR 0x02
+#define DV3318_OCP_CLR 0x01
+
+#define REG_DV3318_OCPSTAT 0xFD8A
+#define DV3318_OCP_GlITCH_TIME_MASK 0xF0
+#define DV3318_OCP_DETECT 0x08
+#define DV3318_OCP_NOW 0x04
+#define DV3318_OCP_EVER 0x02
+
+#define SD_OCP_GLITCH_MASK 0x0F
+
+/* OCPPARA1 */
+#define SDVIO_OCP_TIME_60 0x00
+#define SDVIO_OCP_TIME_100 0x10
+#define SDVIO_OCP_TIME_200 0x20
+#define SDVIO_OCP_TIME_400 0x30
+#define SDVIO_OCP_TIME_600 0x40
+#define SDVIO_OCP_TIME_800 0x50
+#define SDVIO_OCP_TIME_1100 0x60
+#define SDVIO_OCP_TIME_MASK 0x70
+
+#define SD_OCP_TIME_60 0x00
+#define SD_OCP_TIME_100 0x01
+#define SD_OCP_TIME_200 0x02
+#define SD_OCP_TIME_400 0x03
+#define SD_OCP_TIME_600 0x04
+#define SD_OCP_TIME_800 0x05
+#define SD_OCP_TIME_1100 0x06
+#define SD_OCP_TIME_MASK 0x07
+
+/* OCPPARA2 */
+#define SDVIO_OCP_THD_190 0x00
+#define SDVIO_OCP_THD_250 0x10
+#define SDVIO_OCP_THD_320 0x20
+#define SDVIO_OCP_THD_380 0x30
+#define SDVIO_OCP_THD_440 0x40
+#define SDVIO_OCP_THD_500 0x50
+#define SDVIO_OCP_THD_570 0x60
+#define SDVIO_OCP_THD_630 0x70
+#define SDVIO_OCP_THD_MASK 0x70
+
+#define SD_OCP_THD_450 0x00
+#define SD_OCP_THD_550 0x01
+#define SD_OCP_THD_650 0x02
+#define SD_OCP_THD_750 0x03
+#define SD_OCP_THD_850 0x04
+#define SD_OCP_THD_950 0x05
+#define SD_OCP_THD_1050 0x06
+#define SD_OCP_THD_1150 0x07
+#define SD_OCP_THD_MASK 0x07
+
+#define SDVIO_OCP_GLITCH_MASK 0xF0
+#define SDVIO_OCP_GLITCH_NONE 0x00
+#define SDVIO_OCP_GLITCH_50U 0x10
+#define SDVIO_OCP_GLITCH_100U 0x20
+#define SDVIO_OCP_GLITCH_200U 0x30
+#define SDVIO_OCP_GLITCH_600U 0x40
+#define SDVIO_OCP_GLITCH_800U 0x50
+#define SDVIO_OCP_GLITCH_1M 0x60
+#define SDVIO_OCP_GLITCH_2M 0x70
+#define SDVIO_OCP_GLITCH_3M 0x80
+#define SDVIO_OCP_GLITCH_4M 0x90
+#define SDVIO_OCP_GLIVCH_5M 0xA0
+#define SDVIO_OCP_GLITCH_6M 0xB0
+#define SDVIO_OCP_GLITCH_7M 0xC0
+#define SDVIO_OCP_GLITCH_8M 0xD0
+#define SDVIO_OCP_GLITCH_9M 0xE0
+#define SDVIO_OCP_GLITCH_10M 0xF0
+
+#define SD_OCP_GLITCH_MASK 0x0F
+#define SD_OCP_GLITCH_NONE 0x00
+#define SD_OCP_GLITCH_50U 0x01
+#define SD_OCP_GLITCH_100U 0x02
+#define SD_OCP_GLITCH_200U 0x03
+#define SD_OCP_GLITCH_600U 0x04
+#define SD_OCP_GLITCH_800U 0x05
+#define SD_OCP_GLITCH_1M 0x06
+#define SD_OCP_GLITCH_2M 0x07
+#define SD_OCP_GLITCH_3M 0x08
+#define SD_OCP_GLITCH_4M 0x09
+#define SD_OCP_GLIVCH_5M 0x0A
+#define SD_OCP_GLITCH_6M 0x0B
+#define SD_OCP_GLITCH_7M 0x0C
+#define SD_OCP_GLITCH_8M 0x0D
+#define SD_OCP_GLITCH_9M 0x0E
+#define SD_OCP_GLITCH_10M 0x0F
+
/* Phy register */
#define PHY_PCR 0x00
#define PHY_PCR_FORCE_CODE 0xB000
@@ -857,6 +1055,7 @@
#define PCR_ASPM_SETTING_REG1 0x160
#define PCR_ASPM_SETTING_REG2 0x168
+#define PCR_ASPM_SETTING_5260 0x178
#define PCR_SETTING_REG1 0x724
#define PCR_SETTING_REG2 0x814
@@ -890,6 +1089,7 @@ struct pcr_ops {
int (*conv_clk_and_div_n)(int clk, int dir);
void (*fetch_vendor_settings)(struct rtsx_pcr *pcr);
void (*force_power_down)(struct rtsx_pcr *pcr, u8 pm_state);
+ void (*stop_cmd)(struct rtsx_pcr *pcr);
void (*set_aspm)(struct rtsx_pcr *pcr, bool enable);
int (*set_ltr_latency)(struct rtsx_pcr *pcr, u32 latency);
@@ -897,6 +1097,12 @@ struct pcr_ops {
void (*set_l1off_cfg_sub_d0)(struct rtsx_pcr *pcr, int active);
void (*full_on)(struct rtsx_pcr *pcr);
void (*power_saving)(struct rtsx_pcr *pcr);
+ void (*enable_ocp)(struct rtsx_pcr *pcr);
+ void (*disable_ocp)(struct rtsx_pcr *pcr);
+ void (*init_ocp)(struct rtsx_pcr *pcr);
+ void (*process_ocp)(struct rtsx_pcr *pcr);
+ int (*get_ocpstat)(struct rtsx_pcr *pcr, u8 *val);
+ void (*clear_ocpstat)(struct rtsx_pcr *pcr);
};
enum PDEV_STAT {PDEV_STAT_IDLE, PDEV_STAT_RUN};
@@ -935,6 +1141,9 @@ enum dev_aspm_mode {
* @l1_snooze_delay: l1 snooze delay
* @ltr_l1off_sspwrgate: ltr l1off sspwrgate
* @ltr_l1off_snooze_sspwrgate: ltr l1off snooze sspwrgate
+ * @ocp_en: enable ocp flag
+ * @sd_400mA_ocp_thd: 400mA ocp thd
+ * @sd_800mA_ocp_thd: 800mA ocp thd
*/
struct rtsx_cr_option {
u32 dev_flags;
@@ -949,6 +1158,19 @@ struct rtsx_cr_option {
u32 l1_snooze_delay;
u8 ltr_l1off_sspwrgate;
u8 ltr_l1off_snooze_sspwrgate;
+ bool ocp_en;
+ u8 sd_400mA_ocp_thd;
+ u8 sd_800mA_ocp_thd;
+};
+
+/*
+ * struct rtsx_hw_param - card reader hardware param
+ * @interrupt_en: indicate which interrutp enable
+ * @ocp_glitch: ocp glitch time
+ */
+struct rtsx_hw_param {
+ u32 interrupt_en;
+ u8 ocp_glitch;
};
#define rtsx_set_dev_flag(cr, flag) \
@@ -963,6 +1185,7 @@ struct rtsx_pcr {
unsigned int id;
int pcie_cap;
struct rtsx_cr_option option;
+ struct rtsx_hw_param hw_param;
/* pci resources */
unsigned long addr;
@@ -1042,12 +1265,15 @@ struct rtsx_pcr {
struct rtsx_slot *slots;
u8 dma_error_count;
+ u8 ocp_stat;
+ u8 ocp_stat2;
};
#define PID_524A 0x524A
-#define PID_5249 0x5249
-#define PID_5250 0x5250
+#define PID_5249 0x5249
+#define PID_5250 0x5250
#define PID_525A 0x525A
+#define PID_5260 0x5260
#define CHK_PCI_PID(pcr, pid) ((pcr)->pci->device == (pid))
#define PCI_VID(pcr) ((pcr)->pci->vendor)
diff --git a/include/linux/mfd/rtsx_usb.h b/include/linux/rtsx_usb.h
index c446e4fd6b5c..c446e4fd6b5c 100644
--- a/include/linux/mfd/rtsx_usb.h
+++ b/include/linux/rtsx_usb.h
diff --git a/include/linux/sbitmap.h b/include/linux/sbitmap.h
index 0dcc60e820de..841585f6e5f2 100644
--- a/include/linux/sbitmap.h
+++ b/include/linux/sbitmap.h
@@ -171,6 +171,8 @@ void sbitmap_resize(struct sbitmap *sb, unsigned int depth);
* starting from the last allocated bit. This is less efficient
* than the default behavior (false).
*
+ * This operation provides acquire barrier semantics if it succeeds.
+ *
* Return: Non-negative allocated bit number if successful, -1 otherwise.
*/
int sbitmap_get(struct sbitmap *sb, unsigned int alloc_hint, bool round_robin);
@@ -300,6 +302,12 @@ static inline void sbitmap_clear_bit(struct sbitmap *sb, unsigned int bitnr)
clear_bit(SB_NR_TO_BIT(sb, bitnr), __sbitmap_word(sb, bitnr));
}
+static inline void sbitmap_clear_bit_unlock(struct sbitmap *sb,
+ unsigned int bitnr)
+{
+ clear_bit_unlock(SB_NR_TO_BIT(sb, bitnr), __sbitmap_word(sb, bitnr));
+}
+
static inline int sbitmap_test_bit(struct sbitmap *sb, unsigned int bitnr)
{
return test_bit(SB_NR_TO_BIT(sb, bitnr), __sbitmap_word(sb, bitnr));
diff --git a/include/linux/scatterlist.h b/include/linux/scatterlist.h
index b7c83254c566..51f52020ad5f 100644
--- a/include/linux/scatterlist.h
+++ b/include/linux/scatterlist.h
@@ -65,16 +65,18 @@ struct sg_table {
*/
#define SG_MAGIC 0x87654321
+#define SG_CHAIN 0x01UL
+#define SG_END 0x02UL
/*
* We overload the LSB of the page pointer to indicate whether it's
* a valid sg entry, or whether it points to the start of a new scatterlist.
* Those low bits are there for everyone! (thanks mason :-)
*/
-#define sg_is_chain(sg) ((sg)->page_link & 0x01)
-#define sg_is_last(sg) ((sg)->page_link & 0x02)
+#define sg_is_chain(sg) ((sg)->page_link & SG_CHAIN)
+#define sg_is_last(sg) ((sg)->page_link & SG_END)
#define sg_chain_ptr(sg) \
- ((struct scatterlist *) ((sg)->page_link & ~0x03))
+ ((struct scatterlist *) ((sg)->page_link & ~(SG_CHAIN | SG_END)))
/**
* sg_assign_page - Assign a given page to an SG entry
@@ -88,13 +90,13 @@ struct sg_table {
**/
static inline void sg_assign_page(struct scatterlist *sg, struct page *page)
{
- unsigned long page_link = sg->page_link & 0x3;
+ unsigned long page_link = sg->page_link & (SG_CHAIN | SG_END);
/*
* In order for the low bit stealing approach to work, pages
* must be aligned at a 32-bit boundary as a minimum.
*/
- BUG_ON((unsigned long) page & 0x03);
+ BUG_ON((unsigned long) page & (SG_CHAIN | SG_END));
#ifdef CONFIG_DEBUG_SG
BUG_ON(sg->sg_magic != SG_MAGIC);
BUG_ON(sg_is_chain(sg));
@@ -130,7 +132,7 @@ static inline struct page *sg_page(struct scatterlist *sg)
BUG_ON(sg->sg_magic != SG_MAGIC);
BUG_ON(sg_is_chain(sg));
#endif
- return (struct page *)((sg)->page_link & ~0x3);
+ return (struct page *)((sg)->page_link & ~(SG_CHAIN | SG_END));
}
/**
@@ -178,7 +180,8 @@ static inline void sg_chain(struct scatterlist *prv, unsigned int prv_nents,
* Set lowest bit to indicate a link pointer, and make sure to clear
* the termination bit if it happens to be set.
*/
- prv[prv_nents - 1].page_link = ((unsigned long) sgl | 0x01) & ~0x02;
+ prv[prv_nents - 1].page_link = ((unsigned long) sgl | SG_CHAIN)
+ & ~SG_END;
}
/**
@@ -198,8 +201,8 @@ static inline void sg_mark_end(struct scatterlist *sg)
/*
* Set termination bit, clear potential chain bit
*/
- sg->page_link |= 0x02;
- sg->page_link &= ~0x01;
+ sg->page_link |= SG_END;
+ sg->page_link &= ~SG_CHAIN;
}
/**
@@ -215,7 +218,7 @@ static inline void sg_unmark_end(struct scatterlist *sg)
#ifdef CONFIG_DEBUG_SG
BUG_ON(sg->sg_magic != SG_MAGIC);
#endif
- sg->page_link &= ~0x02;
+ sg->page_link &= ~SG_END;
}
/**
@@ -248,6 +251,24 @@ static inline void *sg_virt(struct scatterlist *sg)
return page_address(sg_page(sg)) + sg->offset;
}
+/**
+ * sg_init_marker - Initialize markers in sg table
+ * @sgl: The SG table
+ * @nents: Number of entries in table
+ *
+ **/
+static inline void sg_init_marker(struct scatterlist *sgl,
+ unsigned int nents)
+{
+#ifdef CONFIG_DEBUG_SG
+ unsigned int i;
+
+ for (i = 0; i < nents; i++)
+ sgl[i].sg_magic = SG_MAGIC;
+#endif
+ sg_mark_end(&sgl[nents - 1]);
+}
+
int sg_nents(struct scatterlist *sg);
int sg_nents_for_len(struct scatterlist *sg, u64 len);
struct scatterlist *sg_next(struct scatterlist *);
@@ -276,6 +297,17 @@ int sg_alloc_table_from_pages(struct sg_table *sgt, struct page **pages,
unsigned int n_pages, unsigned int offset,
unsigned long size, gfp_t gfp_mask);
+#ifdef CONFIG_SGL_ALLOC
+struct scatterlist *sgl_alloc_order(unsigned long long length,
+ unsigned int order, bool chainable,
+ gfp_t gfp, unsigned int *nent_p);
+struct scatterlist *sgl_alloc(unsigned long long length, gfp_t gfp,
+ unsigned int *nent_p);
+void sgl_free_n_order(struct scatterlist *sgl, int nents, int order);
+void sgl_free_order(struct scatterlist *sgl, int order);
+void sgl_free(struct scatterlist *sgl);
+#endif /* CONFIG_SGL_ALLOC */
+
size_t sg_copy_buffer(struct scatterlist *sgl, unsigned int nents, void *buf,
size_t buflen, off_t skip, bool to_buffer);
diff --git a/include/linux/sched.h b/include/linux/sched.h
index d2588263a989..f228c6033832 100644
--- a/include/linux/sched.h
+++ b/include/linux/sched.h
@@ -93,7 +93,6 @@ struct task_group;
/* Convenience macros for the sake of wake_up(): */
#define TASK_NORMAL (TASK_INTERRUPTIBLE | TASK_UNINTERRUPTIBLE)
-#define TASK_ALL (TASK_NORMAL | __TASK_STOPPED | __TASK_TRACED)
/* get_task_state(): */
#define TASK_REPORT (TASK_RUNNING | TASK_INTERRUPTIBLE | \
@@ -275,6 +274,34 @@ struct load_weight {
u32 inv_weight;
};
+/**
+ * struct util_est - Estimation utilization of FAIR tasks
+ * @enqueued: instantaneous estimated utilization of a task/cpu
+ * @ewma: the Exponential Weighted Moving Average (EWMA)
+ * utilization of a task
+ *
+ * Support data structure to track an Exponential Weighted Moving Average
+ * (EWMA) of a FAIR task's utilization. New samples are added to the moving
+ * average each time a task completes an activation. Sample's weight is chosen
+ * so that the EWMA will be relatively insensitive to transient changes to the
+ * task's workload.
+ *
+ * The enqueued attribute has a slightly different meaning for tasks and cpus:
+ * - task: the task's util_avg at last task dequeue time
+ * - cfs_rq: the sum of util_est.enqueued for each RUNNABLE task on that CPU
+ * Thus, the util_est.enqueued of a task represents the contribution on the
+ * estimated utilization of the CPU where that task is currently enqueued.
+ *
+ * Only for tasks we track a moving average of the past instantaneous
+ * estimated utilization. This allows to absorb sporadic drops in utilization
+ * of an otherwise almost periodic task.
+ */
+struct util_est {
+ unsigned int enqueued;
+ unsigned int ewma;
+#define UTIL_EST_WEIGHT_SHIFT 2
+};
+
/*
* The load_avg/util_avg accumulates an infinite geometric series
* (see __update_load_avg() in kernel/sched/fair.c).
@@ -336,6 +363,7 @@ struct sched_avg {
unsigned long load_avg;
unsigned long runnable_load_avg;
unsigned long util_avg;
+ struct util_est util_est;
};
struct sched_statistics {
@@ -472,11 +500,15 @@ struct sched_dl_entity {
* has not been executed yet. This flag is useful to avoid race
* conditions between the inactive timer handler and the wakeup
* code.
+ *
+ * @dl_overrun tells if the task asked to be informed about runtime
+ * overruns.
*/
unsigned int dl_throttled : 1;
unsigned int dl_boosted : 1;
unsigned int dl_yielded : 1;
unsigned int dl_non_contending : 1;
+ unsigned int dl_overrun : 1;
/*
* Bandwidth enforcement timer. Each -deadline task has its
@@ -551,6 +583,14 @@ struct task_struct {
unsigned long wakee_flip_decay_ts;
struct task_struct *last_wakee;
+ /*
+ * recent_used_cpu is initially set as the last CPU used by a task
+ * that wakes affine another task. Waker/wakee relationships can
+ * push tasks around a CPU where each wakeup moves to the next one.
+ * Tracking a recently used CPU allows a quick search for a recently
+ * used CPU that may be idle.
+ */
+ int recent_used_cpu;
int wake_cpu;
#endif
int on_rq;
@@ -1427,6 +1467,7 @@ extern int idle_cpu(int cpu);
extern int sched_setscheduler(struct task_struct *, int, const struct sched_param *);
extern int sched_setscheduler_nocheck(struct task_struct *, int, const struct sched_param *);
extern int sched_setattr(struct task_struct *, const struct sched_attr *);
+extern int sched_setattr_nocheck(struct task_struct *, const struct sched_attr *);
extern struct task_struct *idle_task(int cpu);
/**
@@ -1446,12 +1487,21 @@ extern void ia64_set_curr_task(int cpu, struct task_struct *p);
void yield(void);
union thread_union {
+#ifndef CONFIG_ARCH_TASK_STRUCT_ON_STACK
+ struct task_struct task;
+#endif
#ifndef CONFIG_THREAD_INFO_IN_TASK
struct thread_info thread_info;
#endif
unsigned long stack[THREAD_SIZE/sizeof(long)];
};
+#ifndef CONFIG_THREAD_INFO_IN_TASK
+extern struct thread_info init_thread_info;
+#endif
+
+extern unsigned long init_stack[THREAD_SIZE / sizeof(unsigned long)];
+
#ifdef CONFIG_THREAD_INFO_IN_TASK
static inline struct thread_info *task_thread_info(struct task_struct *task)
{
@@ -1475,6 +1525,11 @@ static inline struct thread_info *task_thread_info(struct task_struct *task)
extern struct task_struct *find_task_by_vpid(pid_t nr);
extern struct task_struct *find_task_by_pid_ns(pid_t nr, struct pid_namespace *ns);
+/*
+ * find a task by its virtual pid and get the task struct
+ */
+extern struct task_struct *find_get_task_by_vpid(pid_t nr);
+
extern int wake_up_state(struct task_struct *tsk, unsigned int state);
extern int wake_up_process(struct task_struct *tsk);
extern void wake_up_new_task(struct task_struct *tsk);
diff --git a/include/linux/sched/cpufreq.h b/include/linux/sched/cpufreq.h
index d1ad3d825561..59667444669f 100644
--- a/include/linux/sched/cpufreq.h
+++ b/include/linux/sched/cpufreq.h
@@ -8,11 +8,8 @@
* Interface between cpufreq drivers and the scheduler:
*/
-#define SCHED_CPUFREQ_RT (1U << 0)
-#define SCHED_CPUFREQ_DL (1U << 1)
-#define SCHED_CPUFREQ_IOWAIT (1U << 2)
-
-#define SCHED_CPUFREQ_RT_DL (SCHED_CPUFREQ_RT | SCHED_CPUFREQ_DL)
+#define SCHED_CPUFREQ_IOWAIT (1U << 0)
+#define SCHED_CPUFREQ_MIGRATION (1U << 1)
#ifdef CONFIG_CPU_FREQ
struct update_util_data {
diff --git a/include/linux/sched/deadline.h b/include/linux/sched/deadline.h
index a5bc8728ead7..0cb034331cbb 100644
--- a/include/linux/sched/deadline.h
+++ b/include/linux/sched/deadline.h
@@ -1,8 +1,4 @@
/* SPDX-License-Identifier: GPL-2.0 */
-#ifndef _LINUX_SCHED_DEADLINE_H
-#define _LINUX_SCHED_DEADLINE_H
-
-#include <linux/sched.h>
/*
* SCHED_DEADLINE tasks has negative priorities, reflecting
@@ -28,5 +24,3 @@ static inline bool dl_time_before(u64 a, u64 b)
{
return (s64)(a - b) < 0;
}
-
-#endif /* _LINUX_SCHED_DEADLINE_H */
diff --git a/include/linux/sched/isolation.h b/include/linux/sched/isolation.h
index d849431c8060..4a6582c27dea 100644
--- a/include/linux/sched/isolation.h
+++ b/include/linux/sched/isolation.h
@@ -12,6 +12,7 @@ enum hk_flags {
HK_FLAG_SCHED = (1 << 3),
HK_FLAG_TICK = (1 << 4),
HK_FLAG_DOMAIN = (1 << 5),
+ HK_FLAG_WQ = (1 << 6),
};
#ifdef CONFIG_CPU_ISOLATION
diff --git a/include/linux/sched/mm.h b/include/linux/sched/mm.h
index 3d49b91b674d..2c570cd934af 100644
--- a/include/linux/sched/mm.h
+++ b/include/linux/sched/mm.h
@@ -7,11 +7,12 @@
#include <linux/sched.h>
#include <linux/mm_types.h>
#include <linux/gfp.h>
+#include <linux/sync_core.h>
/*
* Routines for handling mm_structs
*/
-extern struct mm_struct * mm_alloc(void);
+extern struct mm_struct *mm_alloc(void);
/**
* mmgrab() - Pin a &struct mm_struct.
@@ -35,28 +36,19 @@ static inline void mmgrab(struct mm_struct *mm)
atomic_inc(&mm->mm_count);
}
-/* mmdrop drops the mm and the page tables */
-extern void __mmdrop(struct mm_struct *);
+extern void __mmdrop(struct mm_struct *mm);
+
static inline void mmdrop(struct mm_struct *mm)
{
+ /*
+ * The implicit full barrier implied by atomic_dec_and_test() is
+ * required by the membarrier system call before returning to
+ * user-space, after storing to rq->curr.
+ */
if (unlikely(atomic_dec_and_test(&mm->mm_count)))
__mmdrop(mm);
}
-static inline void mmdrop_async_fn(struct work_struct *work)
-{
- struct mm_struct *mm = container_of(work, struct mm_struct, async_put_work);
- __mmdrop(mm);
-}
-
-static inline void mmdrop_async(struct mm_struct *mm)
-{
- if (unlikely(atomic_dec_and_test(&mm->mm_count))) {
- INIT_WORK(&mm->async_put_work, mmdrop_async_fn);
- schedule_work(&mm->async_put_work);
- }
-}
-
/**
* mmget() - Pin the address space associated with a &struct mm_struct.
* @mm: The address space to pin.
@@ -112,7 +104,8 @@ static inline void mm_update_next_owner(struct mm_struct *mm)
#endif /* CONFIG_MEMCG */
#ifdef CONFIG_MMU
-extern void arch_pick_mmap_layout(struct mm_struct *mm);
+extern void arch_pick_mmap_layout(struct mm_struct *mm,
+ struct rlimit *rlim_stack);
extern unsigned long
arch_get_unmapped_area(struct file *, unsigned long, unsigned long,
unsigned long, unsigned long);
@@ -121,7 +114,8 @@ arch_get_unmapped_area_topdown(struct file *filp, unsigned long addr,
unsigned long len, unsigned long pgoff,
unsigned long flags);
#else
-static inline void arch_pick_mmap_layout(struct mm_struct *mm) {}
+static inline void arch_pick_mmap_layout(struct mm_struct *mm,
+ struct rlimit *rlim_stack) {}
#endif
static inline bool in_vfork(struct task_struct *tsk)
@@ -214,18 +208,48 @@ static inline void memalloc_noreclaim_restore(unsigned int flags)
#ifdef CONFIG_MEMBARRIER
enum {
- MEMBARRIER_STATE_PRIVATE_EXPEDITED_READY = (1U << 0),
- MEMBARRIER_STATE_SWITCH_MM = (1U << 1),
+ MEMBARRIER_STATE_PRIVATE_EXPEDITED_READY = (1U << 0),
+ MEMBARRIER_STATE_PRIVATE_EXPEDITED = (1U << 1),
+ MEMBARRIER_STATE_GLOBAL_EXPEDITED_READY = (1U << 2),
+ MEMBARRIER_STATE_GLOBAL_EXPEDITED = (1U << 3),
+ MEMBARRIER_STATE_PRIVATE_EXPEDITED_SYNC_CORE_READY = (1U << 4),
+ MEMBARRIER_STATE_PRIVATE_EXPEDITED_SYNC_CORE = (1U << 5),
+};
+
+enum {
+ MEMBARRIER_FLAG_SYNC_CORE = (1U << 0),
};
+#ifdef CONFIG_ARCH_HAS_MEMBARRIER_CALLBACKS
+#include <asm/membarrier.h>
+#endif
+
+static inline void membarrier_mm_sync_core_before_usermode(struct mm_struct *mm)
+{
+ if (likely(!(atomic_read(&mm->membarrier_state) &
+ MEMBARRIER_STATE_PRIVATE_EXPEDITED_SYNC_CORE)))
+ return;
+ sync_core_before_usermode();
+}
+
static inline void membarrier_execve(struct task_struct *t)
{
atomic_set(&t->mm->membarrier_state, 0);
}
#else
+#ifdef CONFIG_ARCH_HAS_MEMBARRIER_CALLBACKS
+static inline void membarrier_arch_switch_mm(struct mm_struct *prev,
+ struct mm_struct *next,
+ struct task_struct *tsk)
+{
+}
+#endif
static inline void membarrier_execve(struct task_struct *t)
{
}
+static inline void membarrier_mm_sync_core_before_usermode(struct mm_struct *mm)
+{
+}
#endif
#endif /* _LINUX_SCHED_MM_H */
diff --git a/include/linux/sched/nohz.h b/include/linux/sched/nohz.h
index 3d3a97d9399d..b36f4cf38111 100644
--- a/include/linux/sched/nohz.h
+++ b/include/linux/sched/nohz.h
@@ -16,11 +16,9 @@ static inline void cpu_load_update_nohz_stop(void) { }
#if defined(CONFIG_SMP) && defined(CONFIG_NO_HZ_COMMON)
extern void nohz_balance_enter_idle(int cpu);
-extern void set_cpu_sd_state_idle(void);
extern int get_nohz_timer_target(void);
#else
static inline void nohz_balance_enter_idle(int cpu) { }
-static inline void set_cpu_sd_state_idle(void) { }
#endif
#ifdef CONFIG_NO_HZ_COMMON
@@ -37,8 +35,4 @@ extern void wake_up_nohz_cpu(int cpu);
static inline void wake_up_nohz_cpu(int cpu) { }
#endif
-#ifdef CONFIG_NO_HZ_FULL
-extern u64 scheduler_tick_max_deferment(void);
-#endif
-
#endif /* _LINUX_SCHED_NOHZ_H */
diff --git a/include/linux/sched/signal.h b/include/linux/sched/signal.h
index 0aa4548fb492..a7ce74c74e49 100644
--- a/include/linux/sched/signal.h
+++ b/include/linux/sched/signal.h
@@ -285,13 +285,41 @@ static inline void kernel_signal_stop(void)
schedule();
}
+#ifdef __ARCH_SI_TRAPNO
+# define ___ARCH_SI_TRAPNO(_a1) , _a1
+#else
+# define ___ARCH_SI_TRAPNO(_a1)
+#endif
+#ifdef __ia64__
+# define ___ARCH_SI_IA64(_a1, _a2, _a3) , _a1, _a2, _a3
+#else
+# define ___ARCH_SI_IA64(_a1, _a2, _a3)
+#endif
+
+int force_sig_fault(int sig, int code, void __user *addr
+ ___ARCH_SI_TRAPNO(int trapno)
+ ___ARCH_SI_IA64(int imm, unsigned int flags, unsigned long isr)
+ , struct task_struct *t);
+int send_sig_fault(int sig, int code, void __user *addr
+ ___ARCH_SI_TRAPNO(int trapno)
+ ___ARCH_SI_IA64(int imm, unsigned int flags, unsigned long isr)
+ , struct task_struct *t);
+
+int force_sig_mceerr(int code, void __user *, short, struct task_struct *);
+int send_sig_mceerr(int code, void __user *, short, struct task_struct *);
+
+int force_sig_bnderr(void __user *addr, void __user *lower, void __user *upper);
+int force_sig_pkuerr(void __user *addr, u32 pkey);
+
+int force_sig_ptrace_errno_trap(int errno, void __user *addr);
+
extern int send_sig_info(int, struct siginfo *, struct task_struct *);
extern int force_sigsegv(int, struct task_struct *);
extern int force_sig_info(int, struct siginfo *, struct task_struct *);
extern int __kill_pgrp_info(int sig, struct siginfo *info, struct pid *pgrp);
extern int kill_pid_info(int sig, struct siginfo *info, struct pid *pid);
extern int kill_pid_info_as_cred(int, struct siginfo *, struct pid *,
- const struct cred *, u32);
+ const struct cred *);
extern int kill_pgrp(struct pid *pid, int sig, int priv);
extern int kill_pid(struct pid *pid, int sig, int priv);
extern __must_check bool do_notify_parent(struct task_struct *, int);
diff --git a/include/linux/sched/task.h b/include/linux/sched/task.h
index 05b8650f06f5..5be31eb7b266 100644
--- a/include/linux/sched/task.h
+++ b/include/linux/sched/task.h
@@ -104,6 +104,20 @@ extern int arch_task_struct_size __read_mostly;
# define arch_task_struct_size (sizeof(struct task_struct))
#endif
+#ifndef CONFIG_HAVE_ARCH_THREAD_STRUCT_WHITELIST
+/*
+ * If an architecture has not declared a thread_struct whitelist we
+ * must assume something there may need to be copied to userspace.
+ */
+static inline void arch_thread_struct_whitelist(unsigned long *offset,
+ unsigned long *size)
+{
+ *offset = 0;
+ /* Handle dynamically sized thread_struct. */
+ *size = arch_task_struct_size - offsetof(struct task_struct, thread);
+}
+#endif
+
#ifdef CONFIG_VMAP_STACK
static inline struct vm_struct *task_stack_vm_area(const struct task_struct *t)
{
diff --git a/include/linux/sched/task_stack.h b/include/linux/sched/task_stack.h
index cb4828aaa34f..6a841929073f 100644
--- a/include/linux/sched/task_stack.h
+++ b/include/linux/sched/task_stack.h
@@ -78,7 +78,7 @@ static inline void put_task_stack(struct task_struct *tsk) {}
#define task_stack_end_corrupted(task) \
(*(end_of_stack(task)) != STACK_END_MAGIC)
-static inline int object_is_on_stack(void *obj)
+static inline int object_is_on_stack(const void *obj)
{
void *stack = task_stack_page(current);
diff --git a/include/linux/sched/topology.h b/include/linux/sched/topology.h
index cf257c2e728d..26347741ba50 100644
--- a/include/linux/sched/topology.h
+++ b/include/linux/sched/topology.h
@@ -7,6 +7,12 @@
#include <linux/sched/idle.h>
/*
+ * Increase resolution of cpu_capacity calculations
+ */
+#define SCHED_CAPACITY_SHIFT SCHED_FIXEDPOINT_SHIFT
+#define SCHED_CAPACITY_SCALE (1L << SCHED_CAPACITY_SHIFT)
+
+/*
* sched-domains (multiprocessor balancing) declarations:
*/
#ifdef CONFIG_SMP
@@ -27,12 +33,6 @@
#define SD_OVERLAP 0x2000 /* sched_domains of this level overlap */
#define SD_NUMA 0x4000 /* cross-node balancing */
-/*
- * Increase resolution of cpu_capacity calculations
- */
-#define SCHED_CAPACITY_SHIFT SCHED_FIXEDPOINT_SHIFT
-#define SCHED_CAPACITY_SCALE (1L << SCHED_CAPACITY_SHIFT)
-
#ifdef CONFIG_SCHED_SMT
static inline int cpu_smt_flags(void)
{
diff --git a/include/linux/sched/user.h b/include/linux/sched/user.h
index 0dcf4e480ef7..96fe289c4c6e 100644
--- a/include/linux/sched/user.h
+++ b/include/linux/sched/user.h
@@ -4,6 +4,7 @@
#include <linux/uidgid.h>
#include <linux/atomic.h>
+#include <linux/ratelimit.h>
struct key;
@@ -41,6 +42,9 @@ struct user_struct {
defined(CONFIG_NET)
atomic_long_t locked_vm;
#endif
+
+ /* Miscellaneous per-user rate limit */
+ struct ratelimit_state ratelimit;
};
extern int uids_sysfs_init(void);
diff --git a/include/linux/scif.h b/include/linux/scif.h
index 49a35d6edc94..eeb250b73c4b 100644
--- a/include/linux/scif.h
+++ b/include/linux/scif.h
@@ -123,8 +123,8 @@ struct scif_range {
*/
struct scif_pollepd {
scif_epd_t epd;
- short events;
- short revents;
+ __poll_t events;
+ __poll_t revents;
};
/**
@@ -1266,8 +1266,8 @@ int scif_put_pages(struct scif_range *pages);
* events is a bitmask specifying the events which the application is
* interested in. The field revents is an output parameter, filled by the
* kernel with the events that actually occurred. The bits returned in revents
- * can include any of those specified in events, or one of the values POLLERR,
- * POLLHUP, or POLLNVAL. (These three bits are meaningless in the events
+ * can include any of those specified in events, or one of the values EPOLLERR,
+ * EPOLLHUP, or EPOLLNVAL. (These three bits are meaningless in the events
* field, and will be set in the revents field whenever the corresponding
* condition is true.)
*
@@ -1279,20 +1279,20 @@ int scif_put_pages(struct scif_range *pages);
* timeout means an infinite timeout.
*
* The following bits may be set in events and returned in revents.
- * POLLIN - Data may be received without blocking. For a connected
+ * EPOLLIN - Data may be received without blocking. For a connected
* endpoint, this means that scif_recv() may be called without blocking. For a
* listening endpoint, this means that scif_accept() may be called without
* blocking.
- * POLLOUT - Data may be sent without blocking. For a connected endpoint, this
- * means that scif_send() may be called without blocking. POLLOUT may also be
+ * EPOLLOUT - Data may be sent without blocking. For a connected endpoint, this
+ * means that scif_send() may be called without blocking. EPOLLOUT may also be
* used to block waiting for a non-blocking connect to complete. This bit value
* has no meaning for a listening endpoint and is ignored if specified.
*
* The following bits are only returned in revents, and are ignored if set in
* events.
- * POLLERR - An error occurred on the endpoint
- * POLLHUP - The connection to the peer endpoint was disconnected
- * POLLNVAL - The specified endpoint descriptor is invalid.
+ * EPOLLERR - An error occurred on the endpoint
+ * EPOLLHUP - The connection to the peer endpoint was disconnected
+ * EPOLLNVAL - The specified endpoint descriptor is invalid.
*
* Return:
* Upon successful completion, scif_poll() returns a non-negative value. A
diff --git a/include/linux/scmi_protocol.h b/include/linux/scmi_protocol.h
new file mode 100644
index 000000000000..b458c87b866c
--- /dev/null
+++ b/include/linux/scmi_protocol.h
@@ -0,0 +1,277 @@
+// SPDX-License-Identifier: GPL-2.0
+/*
+ * SCMI Message Protocol driver header
+ *
+ * Copyright (C) 2018 ARM Ltd.
+ */
+#include <linux/device.h>
+#include <linux/types.h>
+
+#define SCMI_MAX_STR_SIZE 16
+#define SCMI_MAX_NUM_RATES 16
+
+/**
+ * struct scmi_revision_info - version information structure
+ *
+ * @major_ver: Major ABI version. Change here implies risk of backward
+ * compatibility break.
+ * @minor_ver: Minor ABI version. Change here implies new feature addition,
+ * or compatible change in ABI.
+ * @num_protocols: Number of protocols that are implemented, excluding the
+ * base protocol.
+ * @num_agents: Number of agents in the system.
+ * @impl_ver: A vendor-specific implementation version.
+ * @vendor_id: A vendor identifier(Null terminated ASCII string)
+ * @sub_vendor_id: A sub-vendor identifier(Null terminated ASCII string)
+ */
+struct scmi_revision_info {
+ u16 major_ver;
+ u16 minor_ver;
+ u8 num_protocols;
+ u8 num_agents;
+ u32 impl_ver;
+ char vendor_id[SCMI_MAX_STR_SIZE];
+ char sub_vendor_id[SCMI_MAX_STR_SIZE];
+};
+
+struct scmi_clock_info {
+ char name[SCMI_MAX_STR_SIZE];
+ bool rate_discrete;
+ union {
+ struct {
+ int num_rates;
+ u64 rates[SCMI_MAX_NUM_RATES];
+ } list;
+ struct {
+ u64 min_rate;
+ u64 max_rate;
+ u64 step_size;
+ } range;
+ };
+};
+
+struct scmi_handle;
+
+/**
+ * struct scmi_clk_ops - represents the various operations provided
+ * by SCMI Clock Protocol
+ *
+ * @count_get: get the count of clocks provided by SCMI
+ * @info_get: get the information of the specified clock
+ * @rate_get: request the current clock rate of a clock
+ * @rate_set: set the clock rate of a clock
+ * @enable: enables the specified clock
+ * @disable: disables the specified clock
+ */
+struct scmi_clk_ops {
+ int (*count_get)(const struct scmi_handle *handle);
+
+ const struct scmi_clock_info *(*info_get)
+ (const struct scmi_handle *handle, u32 clk_id);
+ int (*rate_get)(const struct scmi_handle *handle, u32 clk_id,
+ u64 *rate);
+ int (*rate_set)(const struct scmi_handle *handle, u32 clk_id,
+ u32 config, u64 rate);
+ int (*enable)(const struct scmi_handle *handle, u32 clk_id);
+ int (*disable)(const struct scmi_handle *handle, u32 clk_id);
+};
+
+/**
+ * struct scmi_perf_ops - represents the various operations provided
+ * by SCMI Performance Protocol
+ *
+ * @limits_set: sets limits on the performance level of a domain
+ * @limits_get: gets limits on the performance level of a domain
+ * @level_set: sets the performance level of a domain
+ * @level_get: gets the performance level of a domain
+ * @device_domain_id: gets the scmi domain id for a given device
+ * @get_transition_latency: gets the DVFS transition latency for a given device
+ * @add_opps_to_device: adds all the OPPs for a given device
+ * @freq_set: sets the frequency for a given device using sustained frequency
+ * to sustained performance level mapping
+ * @freq_get: gets the frequency for a given device using sustained frequency
+ * to sustained performance level mapping
+ */
+struct scmi_perf_ops {
+ int (*limits_set)(const struct scmi_handle *handle, u32 domain,
+ u32 max_perf, u32 min_perf);
+ int (*limits_get)(const struct scmi_handle *handle, u32 domain,
+ u32 *max_perf, u32 *min_perf);
+ int (*level_set)(const struct scmi_handle *handle, u32 domain,
+ u32 level, bool poll);
+ int (*level_get)(const struct scmi_handle *handle, u32 domain,
+ u32 *level, bool poll);
+ int (*device_domain_id)(struct device *dev);
+ int (*get_transition_latency)(const struct scmi_handle *handle,
+ struct device *dev);
+ int (*add_opps_to_device)(const struct scmi_handle *handle,
+ struct device *dev);
+ int (*freq_set)(const struct scmi_handle *handle, u32 domain,
+ unsigned long rate, bool poll);
+ int (*freq_get)(const struct scmi_handle *handle, u32 domain,
+ unsigned long *rate, bool poll);
+};
+
+/**
+ * struct scmi_power_ops - represents the various operations provided
+ * by SCMI Power Protocol
+ *
+ * @num_domains_get: get the count of power domains provided by SCMI
+ * @name_get: gets the name of a power domain
+ * @state_set: sets the power state of a power domain
+ * @state_get: gets the power state of a power domain
+ */
+struct scmi_power_ops {
+ int (*num_domains_get)(const struct scmi_handle *handle);
+ char *(*name_get)(const struct scmi_handle *handle, u32 domain);
+#define SCMI_POWER_STATE_TYPE_SHIFT 30
+#define SCMI_POWER_STATE_ID_MASK (BIT(28) - 1)
+#define SCMI_POWER_STATE_PARAM(type, id) \
+ ((((type) & BIT(0)) << SCMI_POWER_STATE_TYPE_SHIFT) | \
+ ((id) & SCMI_POWER_STATE_ID_MASK))
+#define SCMI_POWER_STATE_GENERIC_ON SCMI_POWER_STATE_PARAM(0, 0)
+#define SCMI_POWER_STATE_GENERIC_OFF SCMI_POWER_STATE_PARAM(1, 0)
+ int (*state_set)(const struct scmi_handle *handle, u32 domain,
+ u32 state);
+ int (*state_get)(const struct scmi_handle *handle, u32 domain,
+ u32 *state);
+};
+
+struct scmi_sensor_info {
+ u32 id;
+ u8 type;
+ char name[SCMI_MAX_STR_SIZE];
+};
+
+/*
+ * Partial list from Distributed Management Task Force (DMTF) specification:
+ * DSP0249 (Platform Level Data Model specification)
+ */
+enum scmi_sensor_class {
+ NONE = 0x0,
+ TEMPERATURE_C = 0x2,
+ VOLTAGE = 0x5,
+ CURRENT = 0x6,
+ POWER = 0x7,
+ ENERGY = 0x8,
+};
+
+/**
+ * struct scmi_sensor_ops - represents the various operations provided
+ * by SCMI Sensor Protocol
+ *
+ * @count_get: get the count of sensors provided by SCMI
+ * @info_get: get the information of the specified sensor
+ * @configuration_set: control notifications on cross-over events for
+ * the trip-points
+ * @trip_point_set: selects and configures a trip-point of interest
+ * @reading_get: gets the current value of the sensor
+ */
+struct scmi_sensor_ops {
+ int (*count_get)(const struct scmi_handle *handle);
+
+ const struct scmi_sensor_info *(*info_get)
+ (const struct scmi_handle *handle, u32 sensor_id);
+ int (*configuration_set)(const struct scmi_handle *handle,
+ u32 sensor_id);
+ int (*trip_point_set)(const struct scmi_handle *handle, u32 sensor_id,
+ u8 trip_id, u64 trip_value);
+ int (*reading_get)(const struct scmi_handle *handle, u32 sensor_id,
+ bool async, u64 *value);
+};
+
+/**
+ * struct scmi_handle - Handle returned to ARM SCMI clients for usage.
+ *
+ * @dev: pointer to the SCMI device
+ * @version: pointer to the structure containing SCMI version information
+ * @power_ops: pointer to set of power protocol operations
+ * @perf_ops: pointer to set of performance protocol operations
+ * @clk_ops: pointer to set of clock protocol operations
+ * @sensor_ops: pointer to set of sensor protocol operations
+ */
+struct scmi_handle {
+ struct device *dev;
+ struct scmi_revision_info *version;
+ struct scmi_perf_ops *perf_ops;
+ struct scmi_clk_ops *clk_ops;
+ struct scmi_power_ops *power_ops;
+ struct scmi_sensor_ops *sensor_ops;
+ /* for protocol internal use */
+ void *perf_priv;
+ void *clk_priv;
+ void *power_priv;
+ void *sensor_priv;
+};
+
+enum scmi_std_protocol {
+ SCMI_PROTOCOL_BASE = 0x10,
+ SCMI_PROTOCOL_POWER = 0x11,
+ SCMI_PROTOCOL_SYSTEM = 0x12,
+ SCMI_PROTOCOL_PERF = 0x13,
+ SCMI_PROTOCOL_CLOCK = 0x14,
+ SCMI_PROTOCOL_SENSOR = 0x15,
+};
+
+struct scmi_device {
+ u32 id;
+ u8 protocol_id;
+ struct device dev;
+ struct scmi_handle *handle;
+};
+
+#define to_scmi_dev(d) container_of(d, struct scmi_device, dev)
+
+struct scmi_device *
+scmi_device_create(struct device_node *np, struct device *parent, int protocol);
+void scmi_device_destroy(struct scmi_device *scmi_dev);
+
+struct scmi_device_id {
+ u8 protocol_id;
+};
+
+struct scmi_driver {
+ const char *name;
+ int (*probe)(struct scmi_device *sdev);
+ void (*remove)(struct scmi_device *sdev);
+ const struct scmi_device_id *id_table;
+
+ struct device_driver driver;
+};
+
+#define to_scmi_driver(d) container_of(d, struct scmi_driver, driver)
+
+#ifdef CONFIG_ARM_SCMI_PROTOCOL
+int scmi_driver_register(struct scmi_driver *driver,
+ struct module *owner, const char *mod_name);
+void scmi_driver_unregister(struct scmi_driver *driver);
+#else
+static inline int
+scmi_driver_register(struct scmi_driver *driver, struct module *owner,
+ const char *mod_name)
+{
+ return -EINVAL;
+}
+
+static inline void scmi_driver_unregister(struct scmi_driver *driver) {}
+#endif /* CONFIG_ARM_SCMI_PROTOCOL */
+
+#define scmi_register(driver) \
+ scmi_driver_register(driver, THIS_MODULE, KBUILD_MODNAME)
+#define scmi_unregister(driver) \
+ scmi_driver_unregister(driver)
+
+/**
+ * module_scmi_driver() - Helper macro for registering a scmi driver
+ * @__scmi_driver: scmi_driver structure
+ *
+ * Helper macro for scmi drivers to set up proper module init / exit
+ * functions. Replaces module_init() and module_exit() and keeps people from
+ * printing pointless things to the kernel log when their driver is loaded.
+ */
+#define module_scmi_driver(__scmi_driver) \
+ module_driver(__scmi_driver, scmi_register, scmi_unregister)
+
+typedef int (*scmi_prot_init_fn_t)(struct scmi_handle *);
+int scmi_protocol_register(int protocol_id, scmi_prot_init_fn_t fn);
+void scmi_protocol_unregister(int protocol_id);
diff --git a/include/linux/sctp.h b/include/linux/sctp.h
index da803dfc7a39..b36c76635f18 100644
--- a/include/linux/sctp.h
+++ b/include/linux/sctp.h
@@ -102,11 +102,15 @@ enum sctp_cid {
/* AUTH Extension Section 4.1 */
SCTP_CID_AUTH = 0x0F,
+ /* sctp ndata 5.1. I-DATA */
+ SCTP_CID_I_DATA = 0x40,
+
/* PR-SCTP Sec 3.2 */
SCTP_CID_FWD_TSN = 0xC0,
/* Use hex, as defined in ADDIP sec. 3.1 */
SCTP_CID_ASCONF = 0xC1,
+ SCTP_CID_I_FWD_TSN = 0xC2,
SCTP_CID_ASCONF_ACK = 0x80,
SCTP_CID_RECONF = 0x82,
}; /* enum */
@@ -240,6 +244,23 @@ struct sctp_data_chunk {
struct sctp_datahdr data_hdr;
};
+struct sctp_idatahdr {
+ __be32 tsn;
+ __be16 stream;
+ __be16 reserved;
+ __be32 mid;
+ union {
+ __u32 ppid;
+ __be32 fsn;
+ };
+ __u8 payload[0];
+};
+
+struct sctp_idata_chunk {
+ struct sctp_chunkhdr chunk_hdr;
+ struct sctp_idatahdr data_hdr;
+};
+
/* DATA Chuck Specific Flags */
enum {
SCTP_DATA_MIDDLE_FRAG = 0x00,
@@ -596,6 +617,22 @@ struct sctp_fwdtsn_chunk {
struct sctp_fwdtsn_hdr fwdtsn_hdr;
};
+struct sctp_ifwdtsn_skip {
+ __be16 stream;
+ __u8 reserved;
+ __u8 flags;
+ __be32 mid;
+};
+
+struct sctp_ifwdtsn_hdr {
+ __be32 new_cum_tsn;
+ struct sctp_ifwdtsn_skip skip[0];
+};
+
+struct sctp_ifwdtsn_chunk {
+ struct sctp_chunkhdr chunk_hdr;
+ struct sctp_ifwdtsn_hdr fwdtsn_hdr;
+};
/* ADDIP
* Section 3.1.1 Address Configuration Change Chunk (ASCONF)
diff --git a/include/linux/seccomp.h b/include/linux/seccomp.h
index 10f25f7e4304..c723a5c4e3ff 100644
--- a/include/linux/seccomp.h
+++ b/include/linux/seccomp.h
@@ -95,11 +95,19 @@ static inline void get_seccomp_filter(struct task_struct *tsk)
#if defined(CONFIG_SECCOMP_FILTER) && defined(CONFIG_CHECKPOINT_RESTORE)
extern long seccomp_get_filter(struct task_struct *task,
unsigned long filter_off, void __user *data);
+extern long seccomp_get_metadata(struct task_struct *task,
+ unsigned long filter_off, void __user *data);
#else
static inline long seccomp_get_filter(struct task_struct *task,
unsigned long n, void __user *data)
{
return -EINVAL;
}
+static inline long seccomp_get_metadata(struct task_struct *task,
+ unsigned long filter_off,
+ void __user *data)
+{
+ return -EINVAL;
+}
#endif /* CONFIG_SECCOMP_FILTER && CONFIG_CHECKPOINT_RESTORE */
#endif /* _LINUX_SECCOMP_H */
diff --git a/include/linux/security.h b/include/linux/security.h
index 73f1ef625d40..200920f521a1 100644
--- a/include/linux/security.h
+++ b/include/linux/security.h
@@ -36,7 +36,6 @@ struct linux_binprm;
struct cred;
struct rlimit;
struct siginfo;
-struct sem_array;
struct sembuf;
struct kern_ipc_perm;
struct audit_context;
@@ -50,9 +49,7 @@ struct qstr;
struct iattr;
struct fown_struct;
struct file_operations;
-struct shmid_kernel;
struct msg_msg;
-struct msg_queue;
struct xattr;
struct xfrm_sec_ctx;
struct mm_struct;
@@ -115,6 +112,7 @@ struct xfrm_policy;
struct xfrm_state;
struct xfrm_user_sec_ctx;
struct seq_file;
+struct sctp_endpoint;
#ifdef CONFIG_MMU
extern unsigned long mmap_min_addr;
@@ -324,6 +322,7 @@ int security_cred_alloc_blank(struct cred *cred, gfp_t gfp);
void security_cred_free(struct cred *cred);
int security_prepare_creds(struct cred *new, const struct cred *old, gfp_t gfp);
void security_transfer_creds(struct cred *new, const struct cred *old);
+void security_cred_getsecid(const struct cred *c, u32 *secid);
int security_kernel_act_as(struct cred *new, u32 secid);
int security_kernel_create_files_as(struct cred *new, struct inode *inode);
int security_kernel_module_request(char *kmod_name);
@@ -347,7 +346,7 @@ int security_task_setscheduler(struct task_struct *p);
int security_task_getscheduler(struct task_struct *p);
int security_task_movememory(struct task_struct *p);
int security_task_kill(struct task_struct *p, struct siginfo *info,
- int sig, u32 secid);
+ int sig, const struct cred *cred);
int security_task_prctl(int option, unsigned long arg2, unsigned long arg3,
unsigned long arg4, unsigned long arg5);
void security_task_to_inode(struct task_struct *p, struct inode *inode);
@@ -355,24 +354,24 @@ int security_ipc_permission(struct kern_ipc_perm *ipcp, short flag);
void security_ipc_getsecid(struct kern_ipc_perm *ipcp, u32 *secid);
int security_msg_msg_alloc(struct msg_msg *msg);
void security_msg_msg_free(struct msg_msg *msg);
-int security_msg_queue_alloc(struct msg_queue *msq);
-void security_msg_queue_free(struct msg_queue *msq);
-int security_msg_queue_associate(struct msg_queue *msq, int msqflg);
-int security_msg_queue_msgctl(struct msg_queue *msq, int cmd);
-int security_msg_queue_msgsnd(struct msg_queue *msq,
+int security_msg_queue_alloc(struct kern_ipc_perm *msq);
+void security_msg_queue_free(struct kern_ipc_perm *msq);
+int security_msg_queue_associate(struct kern_ipc_perm *msq, int msqflg);
+int security_msg_queue_msgctl(struct kern_ipc_perm *msq, int cmd);
+int security_msg_queue_msgsnd(struct kern_ipc_perm *msq,
struct msg_msg *msg, int msqflg);
-int security_msg_queue_msgrcv(struct msg_queue *msq, struct msg_msg *msg,
+int security_msg_queue_msgrcv(struct kern_ipc_perm *msq, struct msg_msg *msg,
struct task_struct *target, long type, int mode);
-int security_shm_alloc(struct shmid_kernel *shp);
-void security_shm_free(struct shmid_kernel *shp);
-int security_shm_associate(struct shmid_kernel *shp, int shmflg);
-int security_shm_shmctl(struct shmid_kernel *shp, int cmd);
-int security_shm_shmat(struct shmid_kernel *shp, char __user *shmaddr, int shmflg);
-int security_sem_alloc(struct sem_array *sma);
-void security_sem_free(struct sem_array *sma);
-int security_sem_associate(struct sem_array *sma, int semflg);
-int security_sem_semctl(struct sem_array *sma, int cmd);
-int security_sem_semop(struct sem_array *sma, struct sembuf *sops,
+int security_shm_alloc(struct kern_ipc_perm *shp);
+void security_shm_free(struct kern_ipc_perm *shp);
+int security_shm_associate(struct kern_ipc_perm *shp, int shmflg);
+int security_shm_shmctl(struct kern_ipc_perm *shp, int cmd);
+int security_shm_shmat(struct kern_ipc_perm *shp, char __user *shmaddr, int shmflg);
+int security_sem_alloc(struct kern_ipc_perm *sma);
+void security_sem_free(struct kern_ipc_perm *sma);
+int security_sem_associate(struct kern_ipc_perm *sma, int semflg);
+int security_sem_semctl(struct kern_ipc_perm *sma, int cmd);
+int security_sem_semop(struct kern_ipc_perm *sma, struct sembuf *sops,
unsigned nsops, int alter);
void security_d_instantiate(struct dentry *dentry, struct inode *inode);
int security_getprocattr(struct task_struct *p, char *name, char **value);
@@ -1010,7 +1009,7 @@ static inline int security_task_movememory(struct task_struct *p)
static inline int security_task_kill(struct task_struct *p,
struct siginfo *info, int sig,
- u32 secid)
+ const struct cred *cred)
{
return 0;
}
@@ -1045,32 +1044,32 @@ static inline int security_msg_msg_alloc(struct msg_msg *msg)
static inline void security_msg_msg_free(struct msg_msg *msg)
{ }
-static inline int security_msg_queue_alloc(struct msg_queue *msq)
+static inline int security_msg_queue_alloc(struct kern_ipc_perm *msq)
{
return 0;
}
-static inline void security_msg_queue_free(struct msg_queue *msq)
+static inline void security_msg_queue_free(struct kern_ipc_perm *msq)
{ }
-static inline int security_msg_queue_associate(struct msg_queue *msq,
+static inline int security_msg_queue_associate(struct kern_ipc_perm *msq,
int msqflg)
{
return 0;
}
-static inline int security_msg_queue_msgctl(struct msg_queue *msq, int cmd)
+static inline int security_msg_queue_msgctl(struct kern_ipc_perm *msq, int cmd)
{
return 0;
}
-static inline int security_msg_queue_msgsnd(struct msg_queue *msq,
+static inline int security_msg_queue_msgsnd(struct kern_ipc_perm *msq,
struct msg_msg *msg, int msqflg)
{
return 0;
}
-static inline int security_msg_queue_msgrcv(struct msg_queue *msq,
+static inline int security_msg_queue_msgrcv(struct kern_ipc_perm *msq,
struct msg_msg *msg,
struct task_struct *target,
long type, int mode)
@@ -1078,50 +1077,50 @@ static inline int security_msg_queue_msgrcv(struct msg_queue *msq,
return 0;
}
-static inline int security_shm_alloc(struct shmid_kernel *shp)
+static inline int security_shm_alloc(struct kern_ipc_perm *shp)
{
return 0;
}
-static inline void security_shm_free(struct shmid_kernel *shp)
+static inline void security_shm_free(struct kern_ipc_perm *shp)
{ }
-static inline int security_shm_associate(struct shmid_kernel *shp,
+static inline int security_shm_associate(struct kern_ipc_perm *shp,
int shmflg)
{
return 0;
}
-static inline int security_shm_shmctl(struct shmid_kernel *shp, int cmd)
+static inline int security_shm_shmctl(struct kern_ipc_perm *shp, int cmd)
{
return 0;
}
-static inline int security_shm_shmat(struct shmid_kernel *shp,
+static inline int security_shm_shmat(struct kern_ipc_perm *shp,
char __user *shmaddr, int shmflg)
{
return 0;
}
-static inline int security_sem_alloc(struct sem_array *sma)
+static inline int security_sem_alloc(struct kern_ipc_perm *sma)
{
return 0;
}
-static inline void security_sem_free(struct sem_array *sma)
+static inline void security_sem_free(struct kern_ipc_perm *sma)
{ }
-static inline int security_sem_associate(struct sem_array *sma, int semflg)
+static inline int security_sem_associate(struct kern_ipc_perm *sma, int semflg)
{
return 0;
}
-static inline int security_sem_semctl(struct sem_array *sma, int cmd)
+static inline int security_sem_semctl(struct kern_ipc_perm *sma, int cmd)
{
return 0;
}
-static inline int security_sem_semop(struct sem_array *sma,
+static inline int security_sem_semop(struct kern_ipc_perm *sma,
struct sembuf *sops, unsigned nsops,
int alter)
{
@@ -1229,6 +1228,11 @@ int security_tun_dev_create(void);
int security_tun_dev_attach_queue(void *security);
int security_tun_dev_attach(struct sock *sk, void *security);
int security_tun_dev_open(void *security);
+int security_sctp_assoc_request(struct sctp_endpoint *ep, struct sk_buff *skb);
+int security_sctp_bind_connect(struct sock *sk, int optname,
+ struct sockaddr *address, int addrlen);
+void security_sctp_sk_clone(struct sctp_endpoint *ep, struct sock *sk,
+ struct sock *newsk);
#else /* CONFIG_SECURITY_NETWORK */
static inline int security_unix_stream_connect(struct sock *sock,
@@ -1421,6 +1425,25 @@ static inline int security_tun_dev_open(void *security)
{
return 0;
}
+
+static inline int security_sctp_assoc_request(struct sctp_endpoint *ep,
+ struct sk_buff *skb)
+{
+ return 0;
+}
+
+static inline int security_sctp_bind_connect(struct sock *sk, int optname,
+ struct sockaddr *address,
+ int addrlen)
+{
+ return 0;
+}
+
+static inline void security_sctp_sk_clone(struct sctp_endpoint *ep,
+ struct sock *sk,
+ struct sock *newsk)
+{
+}
#endif /* CONFIG_SECURITY_NETWORK */
#ifdef CONFIG_SECURITY_INFINIBAND
diff --git a/include/linux/sem.h b/include/linux/sem.h
index 9badd322dcee..5608a500c43e 100644
--- a/include/linux/sem.h
+++ b/include/linux/sem.h
@@ -2,48 +2,10 @@
#ifndef _LINUX_SEM_H
#define _LINUX_SEM_H
-#include <linux/atomic.h>
-#include <linux/rcupdate.h>
-#include <linux/cache.h>
-#include <linux/time64.h>
#include <uapi/linux/sem.h>
struct task_struct;
-
-/* One semaphore structure for each semaphore in the system. */
-struct sem {
- int semval; /* current value */
- /*
- * PID of the process that last modified the semaphore. For
- * Linux, specifically these are:
- * - semop
- * - semctl, via SETVAL and SETALL.
- * - at task exit when performing undo adjustments (see exit_sem).
- */
- int sempid;
- spinlock_t lock; /* spinlock for fine-grained semtimedop */
- struct list_head pending_alter; /* pending single-sop operations */
- /* that alter the semaphore */
- struct list_head pending_const; /* pending single-sop operations */
- /* that do not alter the semaphore*/
- time_t sem_otime; /* candidate for sem_otime */
-} ____cacheline_aligned_in_smp;
-
-/* One sem_array data structure for each set of semaphores in the system. */
-struct sem_array {
- struct kern_ipc_perm sem_perm; /* permissions .. see ipc.h */
- time64_t sem_ctime; /* create/last semctl() time */
- struct list_head pending_alter; /* pending operations */
- /* that alter the array */
- struct list_head pending_const; /* pending complex operations */
- /* that do not alter semvals */
- struct list_head list_id; /* undo requests on this array */
- int sem_nsems; /* no. of semaphores in array */
- int complex_count; /* pending complex operations */
- unsigned int use_global_lock;/* >0: global lock required */
-
- struct sem sems[];
-} __randomize_layout;
+struct sem_undo_list;
#ifdef CONFIG_SYSVIPC
diff --git a/include/linux/semaphore.h b/include/linux/semaphore.h
index dc368b8ce215..11c86fbfeb98 100644
--- a/include/linux/semaphore.h
+++ b/include/linux/semaphore.h
@@ -4,7 +4,7 @@
*
* Distributed under the terms of the GNU GPL, version 2
*
- * Please see kernel/semaphore.c for documentation of these functions
+ * Please see kernel/locking/semaphore.c for documentation of these functions
*/
#ifndef __LINUX_SEMAPHORE_H
#define __LINUX_SEMAPHORE_H
diff --git a/include/linux/seq_file.h b/include/linux/seq_file.h
index 09c6e28746f9..a121982af0f5 100644
--- a/include/linux/seq_file.h
+++ b/include/linux/seq_file.h
@@ -118,9 +118,14 @@ __printf(2, 3)
void seq_printf(struct seq_file *m, const char *fmt, ...);
void seq_putc(struct seq_file *m, char c);
void seq_puts(struct seq_file *m, const char *s);
+void seq_put_decimal_ull_width(struct seq_file *m, const char *delimiter,
+ unsigned long long num, unsigned int width);
void seq_put_decimal_ull(struct seq_file *m, const char *delimiter,
unsigned long long num);
void seq_put_decimal_ll(struct seq_file *m, const char *delimiter, long long num);
+void seq_put_hex_ll(struct seq_file *m, const char *delimiter,
+ unsigned long long v, unsigned int width);
+
void seq_escape(struct seq_file *m, const char *s, const char *esc);
void seq_hex_dump(struct seq_file *m, const char *prefix_str, int prefix_type,
@@ -140,6 +145,20 @@ void *__seq_open_private(struct file *, const struct seq_operations *, int);
int seq_open_private(struct file *, const struct seq_operations *, int);
int seq_release_private(struct inode *, struct file *);
+#define DEFINE_SHOW_ATTRIBUTE(__name) \
+static int __name ## _open(struct inode *inode, struct file *file) \
+{ \
+ return single_open(file, __name ## _show, inode->i_private); \
+} \
+ \
+static const struct file_operations __name ## _fops = { \
+ .owner = THIS_MODULE, \
+ .open = __name ## _open, \
+ .read = seq_read, \
+ .llseek = seq_lseek, \
+ .release = single_release, \
+}
+
static inline struct user_namespace *seq_user_ns(struct seq_file *seq)
{
#ifdef CONFIG_USER_NS
@@ -221,4 +240,5 @@ extern struct hlist_node *seq_hlist_start_percpu(struct hlist_head __percpu *hea
extern struct hlist_node *seq_hlist_next_percpu(void *v, struct hlist_head __percpu *head, int *cpu, loff_t *pos);
+void seq_file_init(void);
#endif
diff --git a/include/linux/seqlock.h b/include/linux/seqlock.h
index f189a8a3bbb8..bcf4cf26b8c8 100644
--- a/include/linux/seqlock.h
+++ b/include/linux/seqlock.h
@@ -278,9 +278,8 @@ static inline void raw_write_seqcount_barrier(seqcount_t *s)
static inline int raw_read_seqcount_latch(seqcount_t *s)
{
- int seq = READ_ONCE(s->sequence);
/* Pairs with the first smp_wmb() in raw_write_seqcount_latch() */
- smp_read_barrier_depends();
+ int seq = READ_ONCE(s->sequence); /* ^^^ */
return seq;
}
diff --git a/include/linux/serdev.h b/include/linux/serdev.h
index d609e6dc5bad..f153b2c7f0cd 100644
--- a/include/linux/serdev.h
+++ b/include/linux/serdev.h
@@ -27,8 +27,10 @@ struct serdev_device;
/**
* struct serdev_device_ops - Callback operations for a serdev device
- * @receive_buf: Function called with data received from device.
- * @write_wakeup: Function called when ready to transmit more data.
+ * @receive_buf: Function called with data received from device;
+ * returns number of bytes accepted; may sleep.
+ * @write_wakeup: Function called when ready to transmit more data; must
+ * not sleep.
*/
struct serdev_device_ops {
int (*receive_buf)(struct serdev_device *, const unsigned char *, size_t);
@@ -76,6 +78,12 @@ static inline struct serdev_device_driver *to_serdev_device_driver(struct device
return container_of(d, struct serdev_device_driver, driver);
}
+enum serdev_parity {
+ SERDEV_PARITY_NONE,
+ SERDEV_PARITY_EVEN,
+ SERDEV_PARITY_ODD,
+};
+
/*
* serdev controller structures
*/
@@ -86,6 +94,7 @@ struct serdev_controller_ops {
int (*open)(struct serdev_controller *);
void (*close)(struct serdev_controller *);
void (*set_flow_control)(struct serdev_controller *, bool);
+ int (*set_parity)(struct serdev_controller *, enum serdev_parity);
unsigned int (*set_baudrate)(struct serdev_controller *, unsigned int);
void (*wait_until_sent)(struct serdev_controller *, long);
int (*get_tiocm)(struct serdev_controller *);
@@ -193,6 +202,7 @@ static inline int serdev_controller_receive_buf(struct serdev_controller *ctrl,
int serdev_device_open(struct serdev_device *);
void serdev_device_close(struct serdev_device *);
+int devm_serdev_device_open(struct device *, struct serdev_device *);
unsigned int serdev_device_set_baudrate(struct serdev_device *, unsigned int);
void serdev_device_set_flow_control(struct serdev_device *, bool);
int serdev_device_write_buf(struct serdev_device *, const unsigned char *, size_t);
@@ -298,6 +308,9 @@ static inline int serdev_device_set_rts(struct serdev_device *serdev, bool enabl
return serdev_device_set_tiocm(serdev, 0, TIOCM_RTS);
}
+int serdev_device_set_parity(struct serdev_device *serdev,
+ enum serdev_parity parity);
+
/*
* serdev hooks into TTY core
*/
diff --git a/include/linux/serial_core.h b/include/linux/serial_core.h
index 37b044e78333..1d356105f25a 100644
--- a/include/linux/serial_core.h
+++ b/include/linux/serial_core.h
@@ -376,10 +376,10 @@ extern int of_setup_earlycon(const struct earlycon_id *match,
const char *options);
#ifdef CONFIG_SERIAL_EARLYCON
-extern bool earlycon_init_is_deferred __initdata;
+extern bool earlycon_acpi_spcr_enable __initdata;
int setup_earlycon(char *buf);
#else
-static const bool earlycon_init_is_deferred;
+static const bool earlycon_acpi_spcr_enable EARLYCON_USED_OR_UNUSED;
static inline int setup_earlycon(char *buf) { return 0; }
#endif
@@ -387,7 +387,7 @@ struct uart_port *uart_get_console(struct uart_port *ports, int nr,
struct console *c);
int uart_parse_earlycon(char *p, unsigned char *iotype, resource_size_t *addr,
char **options);
-void uart_parse_options(char *options, int *baud, int *parity, int *bits,
+void uart_parse_options(const char *options, int *baud, int *parity, int *bits,
int *flow);
int uart_set_options(struct uart_port *port, struct console *co, int baud,
int parity, int bits, int flow);
@@ -501,9 +501,5 @@ static inline int uart_handle_break(struct uart_port *port)
(cflag) & CRTSCTS || \
!((cflag) & CLOCAL))
-/*
- * Common device tree parsing helpers
- */
-void of_get_rs485_mode(struct device_node *np, struct serial_rs485 *rs485conf);
-
+void uart_get_rs485_mode(struct device *dev, struct serial_rs485 *rs485conf);
#endif /* LINUX_SERIAL_CORE_H */
diff --git a/include/linux/serial_s3c.h b/include/linux/serial_s3c.h
index a7f004a3c177..463ed28d2b27 100644
--- a/include/linux/serial_s3c.h
+++ b/include/linux/serial_s3c.h
@@ -1,3 +1,4 @@
+/* SPDX-License-Identifier: GPL-2.0+ */
/*
* Internal header file for Samsung S3C2410 serial ports (UART0-2)
*
@@ -10,21 +11,7 @@
* Internal header file for MX1ADS serial ports (UART1 & 2)
*
* Copyright (C) 2002 Shane Nay (shane@minirl.com)
- *
- * This program is free software; you can redistribute it and/or modify
- * it under the terms of the GNU General Public License as published by
- * the Free Software Foundation; either version 2 of the License, or
- * (at your option) any later version.
- *
- * This program is distributed in the hope that it will be useful,
- * but WITHOUT ANY WARRANTY; without even the implied warranty of
- * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
- * GNU General Public License for more details.
- *
- * You should have received a copy of the GNU General Public License
- * along with this program; if not, write to the Free Software
- * Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA
-*/
+ */
#ifndef __ASM_ARM_REGS_SERIAL_H
#define __ASM_ARM_REGS_SERIAL_H
diff --git a/include/linux/set_memory.h b/include/linux/set_memory.h
index e5140648f638..da5178216da5 100644
--- a/include/linux/set_memory.h
+++ b/include/linux/set_memory.h
@@ -17,4 +17,16 @@ static inline int set_memory_x(unsigned long addr, int numpages) { return 0; }
static inline int set_memory_nx(unsigned long addr, int numpages) { return 0; }
#endif
+#ifndef CONFIG_ARCH_HAS_MEM_ENCRYPT
+static inline int set_memory_encrypted(unsigned long addr, int numpages)
+{
+ return 0;
+}
+
+static inline int set_memory_decrypted(unsigned long addr, int numpages)
+{
+ return 0;
+}
+#endif /* CONFIG_ARCH_HAS_MEM_ENCRYPT */
+
#endif /* _LINUX_SET_MEMORY_H_ */
diff --git a/include/linux/sfp.h b/include/linux/sfp.h
index 4a906f560817..ebce9e24906a 100644
--- a/include/linux/sfp.h
+++ b/include/linux/sfp.h
@@ -3,7 +3,7 @@
#include <linux/phy.h>
-struct __packed sfp_eeprom_base {
+struct sfp_eeprom_base {
u8 phys_id;
u8 phys_ext_id;
u8 connector;
@@ -165,13 +165,47 @@ struct __packed sfp_eeprom_base {
char vendor_rev[4];
union {
__be16 optical_wavelength;
- u8 cable_spec;
- };
+ __be16 cable_compliance;
+ struct {
+#if defined __BIG_ENDIAN_BITFIELD
+ u8 reserved60_2:6;
+ u8 fc_pi_4_app_h:1;
+ u8 sff8431_app_e:1;
+ u8 reserved61:8;
+#elif defined __LITTLE_ENDIAN_BITFIELD
+ u8 sff8431_app_e:1;
+ u8 fc_pi_4_app_h:1;
+ u8 reserved60_2:6;
+ u8 reserved61:8;
+#else
+#error Unknown Endian
+#endif
+ } __packed passive;
+ struct {
+#if defined __BIG_ENDIAN_BITFIELD
+ u8 reserved60_4:4;
+ u8 fc_pi_4_lim:1;
+ u8 sff8431_lim:1;
+ u8 fc_pi_4_app_h:1;
+ u8 sff8431_app_e:1;
+ u8 reserved61:8;
+#elif defined __LITTLE_ENDIAN_BITFIELD
+ u8 sff8431_app_e:1;
+ u8 fc_pi_4_app_h:1;
+ u8 sff8431_lim:1;
+ u8 fc_pi_4_lim:1;
+ u8 reserved60_4:4;
+ u8 reserved61:8;
+#else
+#error Unknown Endian
+#endif
+ } __packed active;
+ } __packed;
u8 reserved62;
u8 cc_base;
-};
+} __packed;
-struct __packed sfp_eeprom_ext {
+struct sfp_eeprom_ext {
__be16 options;
u8 br_max;
u8 br_min;
@@ -181,12 +215,21 @@ struct __packed sfp_eeprom_ext {
u8 enhopts;
u8 sff8472_compliance;
u8 cc_ext;
-};
-
-struct __packed sfp_eeprom_id {
+} __packed;
+
+/**
+ * struct sfp_eeprom_id - raw SFP module identification information
+ * @base: base SFP module identification structure
+ * @ext: extended SFP module identification structure
+ *
+ * See the SFF-8472 specification and related documents for the definition
+ * of these structure members. This can be obtained from
+ * ftp://ftp.seagate.com/sff
+ */
+struct sfp_eeprom_id {
struct sfp_eeprom_base base;
struct sfp_eeprom_ext ext;
-};
+} __packed;
/* SFP EEPROM registers */
enum {
@@ -222,6 +265,7 @@ enum {
SFP_SFF8472_COMPLIANCE = 0x5e,
SFP_CC_EXT = 0x5f,
+ SFP_PHYS_ID_SFF = 0x02,
SFP_PHYS_ID_SFP = 0x03,
SFP_PHYS_EXT_ID_SFP = 0x04,
SFP_CONNECTOR_UNSPEC = 0x00,
@@ -347,35 +391,49 @@ enum {
SFP_PAGE = 0x7f,
};
-struct device_node;
+struct fwnode_handle;
struct ethtool_eeprom;
struct ethtool_modinfo;
struct net_device;
struct sfp_bus;
+/**
+ * struct sfp_upstream_ops - upstream operations structure
+ * @module_insert: called after a module has been detected to determine
+ * whether the module is supported for the upstream device.
+ * @module_remove: called after the module has been removed.
+ * @link_down: called when the link is non-operational for whatever
+ * reason.
+ * @link_up: called when the link is operational.
+ * @connect_phy: called when an I2C accessible PHY has been detected
+ * on the module.
+ * @disconnect_phy: called when a module with an I2C accessible PHY has
+ * been removed.
+ */
struct sfp_upstream_ops {
- int (*module_insert)(void *, const struct sfp_eeprom_id *id);
- void (*module_remove)(void *);
- void (*link_down)(void *);
- void (*link_up)(void *);
- int (*connect_phy)(void *, struct phy_device *);
- void (*disconnect_phy)(void *);
+ int (*module_insert)(void *priv, const struct sfp_eeprom_id *id);
+ void (*module_remove)(void *priv);
+ void (*link_down)(void *priv);
+ void (*link_up)(void *priv);
+ int (*connect_phy)(void *priv, struct phy_device *);
+ void (*disconnect_phy)(void *priv);
};
#if IS_ENABLED(CONFIG_SFP)
int sfp_parse_port(struct sfp_bus *bus, const struct sfp_eeprom_id *id,
unsigned long *support);
-phy_interface_t sfp_parse_interface(struct sfp_bus *bus,
- const struct sfp_eeprom_id *id);
void sfp_parse_support(struct sfp_bus *bus, const struct sfp_eeprom_id *id,
unsigned long *support);
+phy_interface_t sfp_select_interface(struct sfp_bus *bus,
+ const struct sfp_eeprom_id *id,
+ unsigned long *link_modes);
int sfp_get_module_info(struct sfp_bus *bus, struct ethtool_modinfo *modinfo);
int sfp_get_module_eeprom(struct sfp_bus *bus, struct ethtool_eeprom *ee,
u8 *data);
void sfp_upstream_start(struct sfp_bus *bus);
void sfp_upstream_stop(struct sfp_bus *bus);
-struct sfp_bus *sfp_register_upstream(struct device_node *np,
+struct sfp_bus *sfp_register_upstream(struct fwnode_handle *fwnode,
struct net_device *ndev, void *upstream,
const struct sfp_upstream_ops *ops);
void sfp_unregister_upstream(struct sfp_bus *bus);
@@ -387,18 +445,19 @@ static inline int sfp_parse_port(struct sfp_bus *bus,
return PORT_OTHER;
}
-static inline phy_interface_t sfp_parse_interface(struct sfp_bus *bus,
- const struct sfp_eeprom_id *id)
-{
- return PHY_INTERFACE_MODE_NA;
-}
-
static inline void sfp_parse_support(struct sfp_bus *bus,
const struct sfp_eeprom_id *id,
unsigned long *support)
{
}
+static inline phy_interface_t sfp_select_interface(struct sfp_bus *bus,
+ const struct sfp_eeprom_id *id,
+ unsigned long *link_modes)
+{
+ return PHY_INTERFACE_MODE_NA;
+}
+
static inline int sfp_get_module_info(struct sfp_bus *bus,
struct ethtool_modinfo *modinfo)
{
@@ -419,7 +478,8 @@ static inline void sfp_upstream_stop(struct sfp_bus *bus)
{
}
-static inline struct sfp_bus *sfp_register_upstream(struct device_node *np,
+static inline struct sfp_bus *sfp_register_upstream(
+ struct fwnode_handle *fwnode,
struct net_device *ndev, void *upstream,
const struct sfp_upstream_ops *ops)
{
diff --git a/include/linux/sh_eth.h b/include/linux/sh_eth.h
index 94081e9a5010..6dfda97a6c1a 100644
--- a/include/linux/sh_eth.h
+++ b/include/linux/sh_eth.h
@@ -5,12 +5,9 @@
#include <linux/phy.h>
#include <linux/if_ether.h>
-enum {EDMAC_LITTLE_ENDIAN};
-
struct sh_eth_plat_data {
int phy;
int phy_irq;
- int edmac_endian;
phy_interface_t phy_interface;
void (*set_mdio_gate)(void *addr);
diff --git a/include/linux/shm.h b/include/linux/shm.h
index 2bbafacfbfc9..d8e69aed3d32 100644
--- a/include/linux/shm.h
+++ b/include/linux/shm.h
@@ -7,27 +7,7 @@
#include <uapi/linux/shm.h>
#include <asm/shmparam.h>
-struct shmid_kernel /* private to the kernel */
-{
- struct kern_ipc_perm shm_perm;
- struct file *shm_file;
- unsigned long shm_nattch;
- unsigned long shm_segsz;
- time64_t shm_atim;
- time64_t shm_dtim;
- time64_t shm_ctim;
- pid_t shm_cprid;
- pid_t shm_lprid;
- struct user_struct *mlock_user;
-
- /* The task created the shm object. NULL if the task is dead. */
- struct task_struct *shm_creator;
- struct list_head shm_clist; /* list by creator */
-} __randomize_layout;
-
-/* shm_mode upper byte flags */
-#define SHM_DEST 01000 /* segment will be destroyed on last detach */
-#define SHM_LOCKED 02000 /* segment will not be swapped */
+struct file;
#ifdef CONFIG_SYSVIPC
struct sysv_shm {
diff --git a/include/linux/shmem_fs.h b/include/linux/shmem_fs.h
index 06b295bec00d..73b5e655a76e 100644
--- a/include/linux/shmem_fs.h
+++ b/include/linux/shmem_fs.h
@@ -112,13 +112,11 @@ extern void shmem_uncharge(struct inode *inode, long pages);
#ifdef CONFIG_TMPFS
-extern int shmem_add_seals(struct file *file, unsigned int seals);
-extern int shmem_get_seals(struct file *file);
-extern long shmem_fcntl(struct file *file, unsigned int cmd, unsigned long arg);
+extern long memfd_fcntl(struct file *file, unsigned int cmd, unsigned long arg);
#else
-static inline long shmem_fcntl(struct file *f, unsigned int c, unsigned long a)
+static inline long memfd_fcntl(struct file *f, unsigned int c, unsigned long a)
{
return -EINVAL;
}
diff --git a/include/linux/signal.h b/include/linux/signal.h
index 042968dd98f0..a9bc7e1b077e 100644
--- a/include/linux/signal.h
+++ b/include/linux/signal.h
@@ -11,13 +11,14 @@ struct task_struct;
/* for sysctl */
extern int print_fatal_signals;
-static inline void copy_siginfo(struct siginfo *to, struct siginfo *from)
+static inline void copy_siginfo(struct siginfo *to, const struct siginfo *from)
{
- if (from->si_code < 0)
- memcpy(to, from, sizeof(*to));
- else
- /* _sigchld is currently the largest know union member */
- memcpy(to, from, __ARCH_SI_PREAMBLE_SIZE + sizeof(from->_sifields._sigchld));
+ memcpy(to, from, sizeof(*to));
+}
+
+static inline void clear_siginfo(struct siginfo *info)
+{
+ memset(info, 0, sizeof(*info));
}
int copy_siginfo_to_user(struct siginfo __user *to, const struct siginfo *from);
@@ -29,9 +30,7 @@ enum siginfo_layout {
SIL_FAULT,
SIL_CHLD,
SIL_RT,
-#ifdef __ARCH_SIGSYS
SIL_SYS,
-#endif
};
enum siginfo_layout siginfo_layout(int sig, int si_code);
diff --git a/include/linux/siox.h b/include/linux/siox.h
new file mode 100644
index 000000000000..d79624e83134
--- /dev/null
+++ b/include/linux/siox.h
@@ -0,0 +1,77 @@
+/*
+ * Copyright (C) 2015 Pengutronix, Uwe Kleine-König <kernel@pengutronix.de>
+ *
+ * This program is free software; you can redistribute it and/or modify it under
+ * the terms of the GNU General Public License version 2 as published by the
+ * Free Software Foundation.
+ */
+
+#include <linux/device.h>
+
+#define to_siox_device(_dev) container_of((_dev), struct siox_device, dev)
+struct siox_device {
+ struct list_head node; /* node in smaster->devices */
+ struct siox_master *smaster;
+ struct device dev;
+
+ const char *type;
+ size_t inbytes;
+ size_t outbytes;
+ u8 statustype;
+
+ u8 status_read_clean;
+ u8 status_written;
+ u8 status_written_lastcycle;
+ bool connected;
+
+ /* statistics */
+ unsigned int watchdog_errors;
+ unsigned int status_errors;
+
+ struct kernfs_node *status_errors_kn;
+ struct kernfs_node *watchdog_kn;
+ struct kernfs_node *watchdog_errors_kn;
+ struct kernfs_node *connected_kn;
+};
+
+bool siox_device_synced(struct siox_device *sdevice);
+bool siox_device_connected(struct siox_device *sdevice);
+
+struct siox_driver {
+ int (*probe)(struct siox_device *sdevice);
+ int (*remove)(struct siox_device *sdevice);
+ void (*shutdown)(struct siox_device *sdevice);
+
+ /*
+ * buf is big enough to hold sdev->inbytes - 1 bytes, the status byte
+ * is in the scope of the framework.
+ */
+ int (*set_data)(struct siox_device *sdevice, u8 status, u8 buf[]);
+ /*
+ * buf is big enough to hold sdev->outbytes - 1 bytes, the status byte
+ * is in the scope of the framework
+ */
+ int (*get_data)(struct siox_device *sdevice, const u8 buf[]);
+
+ struct device_driver driver;
+};
+
+static inline struct siox_driver *to_siox_driver(struct device_driver *driver)
+{
+ if (driver)
+ return container_of(driver, struct siox_driver, driver);
+ else
+ return NULL;
+}
+
+int __siox_driver_register(struct siox_driver *sdriver, struct module *owner);
+
+static inline int siox_driver_register(struct siox_driver *sdriver)
+{
+ return __siox_driver_register(sdriver, THIS_MODULE);
+}
+
+static inline void siox_driver_unregister(struct siox_driver *sdriver)
+{
+ return driver_unregister(&sdriver->driver);
+}
diff --git a/include/linux/sizes.h b/include/linux/sizes.h
index ce3e8150c174..fbde0bc7e882 100644
--- a/include/linux/sizes.h
+++ b/include/linux/sizes.h
@@ -8,6 +8,8 @@
#ifndef __LINUX_SIZES_H__
#define __LINUX_SIZES_H__
+#include <linux/const.h>
+
#define SZ_1 0x00000001
#define SZ_2 0x00000002
#define SZ_4 0x00000004
@@ -44,4 +46,6 @@
#define SZ_1G 0x40000000
#define SZ_2G 0x80000000
+#define SZ_4G _AC(0x100000000, ULL)
+
#endif /* __LINUX_SIZES_H__ */
diff --git a/include/linux/skb_array.h b/include/linux/skb_array.h
index 8621ffdeecbf..a6b6e8bb3d7b 100644
--- a/include/linux/skb_array.h
+++ b/include/linux/skb_array.h
@@ -69,7 +69,12 @@ static inline int skb_array_produce_any(struct skb_array *a, struct sk_buff *skb
*/
static inline bool __skb_array_empty(struct skb_array *a)
{
- return !__ptr_ring_peek(&a->ring);
+ return __ptr_ring_empty(&a->ring);
+}
+
+static inline struct sk_buff *__skb_array_peek(struct skb_array *a)
+{
+ return __ptr_ring_peek(&a->ring);
}
static inline bool skb_array_empty(struct skb_array *a)
diff --git a/include/linux/skbuff.h b/include/linux/skbuff.h
index a38c80e9f91e..9065477ed255 100644
--- a/include/linux/skbuff.h
+++ b/include/linux/skbuff.h
@@ -466,6 +466,9 @@ struct ubuf_info {
#define skb_uarg(SKB) ((struct ubuf_info *)(skb_shinfo(SKB)->destructor_arg))
+int mm_account_pinned_pages(struct mmpin *mmp, size_t size);
+void mm_unaccount_pinned_pages(struct mmpin *mmp);
+
struct ubuf_info *sock_zerocopy_alloc(struct sock *sk, size_t size);
struct ubuf_info *sock_zerocopy_realloc(struct sock *sk, size_t size,
struct ubuf_info *uarg);
@@ -669,6 +672,7 @@ struct sk_buff {
* UDP receive path is one user.
*/
unsigned long dev_scratch;
+ int ip_defrag_offset;
};
};
struct rb_node rbnode; /* used in netem & tcp stack */
@@ -1211,6 +1215,11 @@ static inline bool skb_flow_dissect_flow_keys_buf(struct flow_keys *flow,
data, proto, nhoff, hlen, flags);
}
+void
+skb_flow_dissect_tunnel_info(const struct sk_buff *skb,
+ struct flow_dissector *flow_dissector,
+ void *target_container);
+
static inline __u32 skb_get_hash(struct sk_buff *skb)
{
if (!skb->l4_hash && !skb->sw_hash)
@@ -3241,7 +3250,7 @@ struct sk_buff *__skb_recv_datagram(struct sock *sk, unsigned flags,
int *peeked, int *off, int *err);
struct sk_buff *skb_recv_datagram(struct sock *sk, unsigned flags, int noblock,
int *err);
-unsigned int datagram_poll(struct file *file, struct socket *sock,
+__poll_t datagram_poll(struct file *file, struct socket *sock,
struct poll_table_struct *wait);
int skb_copy_datagram_iter(const struct sk_buff *from, int offset,
struct iov_iter *to, int size);
@@ -3280,8 +3289,8 @@ int skb_zerocopy(struct sk_buff *to, struct sk_buff *from,
void skb_split(struct sk_buff *skb, struct sk_buff *skb1, const u32 len);
int skb_shift(struct sk_buff *tgt, struct sk_buff *skb, int shiftlen);
void skb_scrub_packet(struct sk_buff *skb, bool xnet);
-unsigned int skb_gso_transport_seglen(const struct sk_buff *skb);
-bool skb_gso_validate_mtu(const struct sk_buff *skb, unsigned int mtu);
+bool skb_gso_validate_network_len(const struct sk_buff *skb, unsigned int mtu);
+bool skb_gso_validate_mac_len(const struct sk_buff *skb, unsigned int len);
struct sk_buff *skb_segment(struct sk_buff *skb, netdev_features_t features);
struct sk_buff *skb_vlan_untag(struct sk_buff *skb);
int skb_ensure_writable(struct sk_buff *skb, int write_len);
@@ -3640,7 +3649,7 @@ static inline bool __skb_checksum_validate_needed(struct sk_buff *skb,
return true;
}
-/* For small packets <= CHECKSUM_BREAK peform checksum complete directly
+/* For small packets <= CHECKSUM_BREAK perform checksum complete directly
* in checksum_init.
*/
#define CHECKSUM_BREAK 76
@@ -4032,6 +4041,12 @@ static inline bool skb_is_gso_v6(const struct sk_buff *skb)
return skb_shinfo(skb)->gso_type & SKB_GSO_TCPV6;
}
+/* Note: Should be called only if skb_is_gso(skb) is true */
+static inline bool skb_is_gso_sctp(const struct sk_buff *skb)
+{
+ return skb_shinfo(skb)->gso_type & SKB_GSO_SCTP;
+}
+
static inline void skb_gso_reset(struct sk_buff *skb)
{
skb_shinfo(skb)->gso_size = 0;
@@ -4039,6 +4054,22 @@ static inline void skb_gso_reset(struct sk_buff *skb)
skb_shinfo(skb)->gso_type = 0;
}
+static inline void skb_increase_gso_size(struct skb_shared_info *shinfo,
+ u16 increment)
+{
+ if (WARN_ON_ONCE(shinfo->gso_size == GSO_BY_FRAGS))
+ return;
+ shinfo->gso_size += increment;
+}
+
+static inline void skb_decrease_gso_size(struct skb_shared_info *shinfo,
+ u16 decrement)
+{
+ if (WARN_ON_ONCE(shinfo->gso_size == GSO_BY_FRAGS))
+ return;
+ shinfo->gso_size -= decrement;
+}
+
void __skb_warn_lro_forwarding(const struct sk_buff *skb);
static inline bool skb_warn_if_lro(const struct sk_buff *skb)
@@ -4098,23 +4129,6 @@ static inline bool skb_head_is_locked(const struct sk_buff *skb)
return !skb->head_frag || skb_cloned(skb);
}
-/**
- * skb_gso_network_seglen - Return length of individual segments of a gso packet
- *
- * @skb: GSO skb
- *
- * skb_gso_network_seglen is used to determine the real size of the
- * individual segments, including Layer3 (IP, IPv6) and L4 headers (TCP/UDP).
- *
- * The MAC/L2 header is not accounted for.
- */
-static inline unsigned int skb_gso_network_seglen(const struct sk_buff *skb)
-{
- unsigned int hdr_len = skb_transport_header(skb) -
- skb_network_header(skb);
- return hdr_len + skb_gso_transport_seglen(skb);
-}
-
/* Local Checksum Offload.
* Compute outer checksum based on the assumption that the
* inner checksum will be offloaded later.
diff --git a/include/linux/slab.h b/include/linux/slab.h
index 50697a1d6621..81ebd71f8c03 100644
--- a/include/linux/slab.h
+++ b/include/linux/slab.h
@@ -125,7 +125,6 @@
#define ZERO_OR_NULL_PTR(x) ((unsigned long)(x) <= \
(unsigned long)ZERO_SIZE_PTR)
-#include <linux/kmemleak.h>
#include <linux/kasan.h>
struct mem_cgroup;
@@ -135,9 +134,16 @@ struct mem_cgroup;
void __init kmem_cache_init(void);
bool slab_is_available(void);
-struct kmem_cache *kmem_cache_create(const char *, size_t, size_t,
- slab_flags_t,
- void (*)(void *));
+extern bool usercopy_fallback;
+
+struct kmem_cache *kmem_cache_create(const char *name, unsigned int size,
+ unsigned int align, slab_flags_t flags,
+ void (*ctor)(void *));
+struct kmem_cache *kmem_cache_create_usercopy(const char *name,
+ unsigned int size, unsigned int align,
+ slab_flags_t flags,
+ unsigned int useroffset, unsigned int usersize,
+ void (*ctor)(void *));
void kmem_cache_destroy(struct kmem_cache *);
int kmem_cache_shrink(struct kmem_cache *);
@@ -153,9 +159,20 @@ void memcg_destroy_kmem_caches(struct mem_cgroup *);
* f.e. add ____cacheline_aligned_in_smp to the struct declaration
* then the objects will be properly aligned in SMP configurations.
*/
-#define KMEM_CACHE(__struct, __flags) kmem_cache_create(#__struct,\
- sizeof(struct __struct), __alignof__(struct __struct),\
- (__flags), NULL)
+#define KMEM_CACHE(__struct, __flags) \
+ kmem_cache_create(#__struct, sizeof(struct __struct), \
+ __alignof__(struct __struct), (__flags), NULL)
+
+/*
+ * To whitelist a single field for copying to/from usercopy, use this
+ * macro instead for KMEM_CACHE() above.
+ */
+#define KMEM_CACHE_USERCOPY(__struct, __flags, __field) \
+ kmem_cache_create_usercopy(#__struct, \
+ sizeof(struct __struct), \
+ __alignof__(struct __struct), (__flags), \
+ offsetof(struct __struct, __field), \
+ sizeof_field(struct __struct, __field), NULL)
/*
* Common kmalloc functions provided by all allocators
@@ -167,15 +184,11 @@ void kzfree(const void *);
size_t ksize(const void *);
#ifdef CONFIG_HAVE_HARDENED_USERCOPY_ALLOCATOR
-const char *__check_heap_object(const void *ptr, unsigned long n,
- struct page *page);
+void __check_heap_object(const void *ptr, unsigned long n, struct page *page,
+ bool to_user);
#else
-static inline const char *__check_heap_object(const void *ptr,
- unsigned long n,
- struct page *page)
-{
- return NULL;
-}
+static inline void __check_heap_object(const void *ptr, unsigned long n,
+ struct page *page, bool to_user) { }
#endif
/*
@@ -295,7 +308,7 @@ extern struct kmem_cache *kmalloc_dma_caches[KMALLOC_SHIFT_HIGH + 1];
* 2 = 129 .. 192 bytes
* n = 2^(n-1)+1 .. 2^n
*/
-static __always_inline int kmalloc_index(size_t size)
+static __always_inline unsigned int kmalloc_index(size_t size)
{
if (!size)
return 0;
@@ -491,7 +504,7 @@ static __always_inline void *kmalloc(size_t size, gfp_t flags)
return kmalloc_large(size, flags);
#ifndef CONFIG_SLOB
if (!(flags & GFP_DMA)) {
- int index = kmalloc_index(size);
+ unsigned int index = kmalloc_index(size);
if (!index)
return ZERO_SIZE_PTR;
@@ -509,11 +522,11 @@ static __always_inline void *kmalloc(size_t size, gfp_t flags)
* return size or 0 if a kmalloc cache for that
* size does not exist
*/
-static __always_inline int kmalloc_size(int n)
+static __always_inline unsigned int kmalloc_size(unsigned int n)
{
#ifndef CONFIG_SLOB
if (n > 2)
- return 1 << n;
+ return 1U << n;
if (n == 1 && KMALLOC_MIN_SIZE <= 32)
return 96;
@@ -529,7 +542,7 @@ static __always_inline void *kmalloc_node(size_t size, gfp_t flags, int node)
#ifndef CONFIG_SLOB
if (__builtin_constant_p(size) &&
size <= KMALLOC_MAX_CACHE_SIZE && !(flags & GFP_DMA)) {
- int i = kmalloc_index(size);
+ unsigned int i = kmalloc_index(size);
if (!i)
return ZERO_SIZE_PTR;
diff --git a/include/linux/slab_def.h b/include/linux/slab_def.h
index 072e46e9e1d5..d9228e4d0320 100644
--- a/include/linux/slab_def.h
+++ b/include/linux/slab_def.h
@@ -85,6 +85,9 @@ struct kmem_cache {
unsigned int *random_seq;
#endif
+ unsigned int useroffset; /* Usercopy region offset */
+ unsigned int usersize; /* Usercopy region size */
+
struct kmem_cache_node *node[MAX_NUMNODES];
};
diff --git a/include/linux/slimbus.h b/include/linux/slimbus.h
new file mode 100644
index 000000000000..c36cf121d2cd
--- /dev/null
+++ b/include/linux/slimbus.h
@@ -0,0 +1,164 @@
+// SPDX-License-Identifier: GPL-2.0
+/*
+ * Copyright (c) 2011-2017, The Linux Foundation
+ */
+
+#ifndef _LINUX_SLIMBUS_H
+#define _LINUX_SLIMBUS_H
+#include <linux/device.h>
+#include <linux/module.h>
+#include <linux/completion.h>
+#include <linux/mod_devicetable.h>
+
+extern struct bus_type slimbus_bus;
+
+/**
+ * struct slim_eaddr - Enumeration address for a SLIMbus device
+ * @manf_id: Manufacturer Id for the device
+ * @prod_code: Product code
+ * @dev_index: Device index
+ * @instance: Instance value
+ */
+struct slim_eaddr {
+ u16 manf_id;
+ u16 prod_code;
+ u8 dev_index;
+ u8 instance;
+} __packed;
+
+/**
+ * enum slim_device_status - slim device status
+ * @SLIM_DEVICE_STATUS_DOWN: Slim device is absent or not reported yet.
+ * @SLIM_DEVICE_STATUS_UP: Slim device is announced on the bus.
+ * @SLIM_DEVICE_STATUS_RESERVED: Reserved for future use.
+ */
+enum slim_device_status {
+ SLIM_DEVICE_STATUS_DOWN = 0,
+ SLIM_DEVICE_STATUS_UP,
+ SLIM_DEVICE_STATUS_RESERVED,
+};
+
+struct slim_controller;
+
+/**
+ * struct slim_device - Slim device handle.
+ * @dev: Driver model representation of the device.
+ * @e_addr: Enumeration address of this device.
+ * @status: slim device status
+ * @ctrl: slim controller instance.
+ * @laddr: 1-byte Logical address of this device.
+ * @is_laddr_valid: indicates if the laddr is valid or not
+ *
+ * This is the client/device handle returned when a SLIMbus
+ * device is registered with a controller.
+ * Pointer to this structure is used by client-driver as a handle.
+ */
+struct slim_device {
+ struct device dev;
+ struct slim_eaddr e_addr;
+ struct slim_controller *ctrl;
+ enum slim_device_status status;
+ u8 laddr;
+ bool is_laddr_valid;
+};
+
+#define to_slim_device(d) container_of(d, struct slim_device, dev)
+
+/**
+ * struct slim_driver - SLIMbus 'generic device' (slave) device driver
+ * (similar to 'spi_device' on SPI)
+ * @probe: Binds this driver to a SLIMbus device.
+ * @remove: Unbinds this driver from the SLIMbus device.
+ * @shutdown: Standard shutdown callback used during powerdown/halt.
+ * @device_status: This callback is called when
+ * - The device reports present and gets a laddr assigned
+ * - The device reports absent, or the bus goes down.
+ * @driver: SLIMbus device drivers should initialize name and owner field of
+ * this structure
+ * @id_table: List of SLIMbus devices supported by this driver
+ */
+
+struct slim_driver {
+ int (*probe)(struct slim_device *sl);
+ void (*remove)(struct slim_device *sl);
+ void (*shutdown)(struct slim_device *sl);
+ int (*device_status)(struct slim_device *sl,
+ enum slim_device_status s);
+ struct device_driver driver;
+ const struct slim_device_id *id_table;
+};
+#define to_slim_driver(d) container_of(d, struct slim_driver, driver)
+
+/**
+ * struct slim_val_inf - Slimbus value or information element
+ * @start_offset: Specifies starting offset in information/value element map
+ * @rbuf: buffer to read the values
+ * @wbuf: buffer to write
+ * @num_bytes: upto 16. This ensures that the message will fit the slicesize
+ * per SLIMbus spec
+ * @comp: completion for asynchronous operations, valid only if TID is
+ * required for transaction, like REQUEST operations.
+ * Rest of the transactions are synchronous anyway.
+ */
+struct slim_val_inf {
+ u16 start_offset;
+ u8 num_bytes;
+ u8 *rbuf;
+ const u8 *wbuf;
+ struct completion *comp;
+};
+
+/*
+ * use a macro to avoid include chaining to get THIS_MODULE
+ */
+#define slim_driver_register(drv) \
+ __slim_driver_register(drv, THIS_MODULE)
+int __slim_driver_register(struct slim_driver *drv, struct module *owner);
+void slim_driver_unregister(struct slim_driver *drv);
+
+/**
+ * module_slim_driver() - Helper macro for registering a SLIMbus driver
+ * @__slim_driver: slimbus_driver struct
+ *
+ * Helper macro for SLIMbus drivers which do not do anything special in module
+ * init/exit. This eliminates a lot of boilerplate. Each module may only
+ * use this macro once, and calling it replaces module_init() and module_exit()
+ */
+#define module_slim_driver(__slim_driver) \
+ module_driver(__slim_driver, slim_driver_register, \
+ slim_driver_unregister)
+
+static inline void *slim_get_devicedata(const struct slim_device *dev)
+{
+ return dev_get_drvdata(&dev->dev);
+}
+
+static inline void slim_set_devicedata(struct slim_device *dev, void *data)
+{
+ dev_set_drvdata(&dev->dev, data);
+}
+
+struct slim_device *slim_get_device(struct slim_controller *ctrl,
+ struct slim_eaddr *e_addr);
+int slim_get_logical_addr(struct slim_device *sbdev);
+
+/* Information Element management messages */
+#define SLIM_MSG_MC_REQUEST_INFORMATION 0x20
+#define SLIM_MSG_MC_REQUEST_CLEAR_INFORMATION 0x21
+#define SLIM_MSG_MC_REPLY_INFORMATION 0x24
+#define SLIM_MSG_MC_CLEAR_INFORMATION 0x28
+#define SLIM_MSG_MC_REPORT_INFORMATION 0x29
+
+/* Value Element management messages */
+#define SLIM_MSG_MC_REQUEST_VALUE 0x60
+#define SLIM_MSG_MC_REQUEST_CHANGE_VALUE 0x61
+#define SLIM_MSG_MC_REPLY_VALUE 0x64
+#define SLIM_MSG_MC_CHANGE_VALUE 0x68
+
+int slim_xfer_msg(struct slim_device *sbdev, struct slim_val_inf *msg,
+ u8 mc);
+int slim_readb(struct slim_device *sdev, u32 addr);
+int slim_writeb(struct slim_device *sdev, u32 addr, u8 value);
+int slim_read(struct slim_device *sdev, u32 addr, size_t count, u8 *val);
+int slim_write(struct slim_device *sdev, u32 addr, size_t count, u8 *val);
+#endif /* _LINUX_SLIMBUS_H */
diff --git a/include/linux/slub_def.h b/include/linux/slub_def.h
index 0adae162dc8f..3773e26c08c1 100644
--- a/include/linux/slub_def.h
+++ b/include/linux/slub_def.h
@@ -73,7 +73,7 @@ struct kmem_cache_cpu {
* given order would contain.
*/
struct kmem_cache_order_objects {
- unsigned long x;
+ unsigned int x;
};
/*
@@ -84,11 +84,12 @@ struct kmem_cache {
/* Used for retriving partial slabs etc */
slab_flags_t flags;
unsigned long min_partial;
- int size; /* The size of an object including meta data */
- int object_size; /* The size of an object without meta data */
- int offset; /* Free pointer offset. */
+ unsigned int size; /* The size of an object including meta data */
+ unsigned int object_size;/* The size of an object without meta data */
+ unsigned int offset; /* Free pointer offset. */
#ifdef CONFIG_SLUB_CPU_PARTIAL
- int cpu_partial; /* Number of per cpu partial objects to keep around */
+ /* Number of per cpu partial objects to keep around */
+ unsigned int cpu_partial;
#endif
struct kmem_cache_order_objects oo;
@@ -98,10 +99,10 @@ struct kmem_cache {
gfp_t allocflags; /* gfp flags to use on each alloc */
int refcount; /* Refcount for slab cache destroy */
void (*ctor)(void *);
- int inuse; /* Offset to metadata */
- int align; /* Alignment */
- int reserved; /* Reserved bytes at the end of slabs */
- int red_left_pad; /* Left redzone padding size */
+ unsigned int inuse; /* Offset to metadata */
+ unsigned int align; /* Alignment */
+ unsigned int reserved; /* Reserved bytes at the end of slabs */
+ unsigned int red_left_pad; /* Left redzone padding size */
const char *name; /* Name (only for display!) */
struct list_head list; /* List of slab caches */
#ifdef CONFIG_SYSFS
@@ -110,7 +111,8 @@ struct kmem_cache {
#endif
#ifdef CONFIG_MEMCG
struct memcg_cache_params memcg_params;
- int max_attr_size; /* for propagation, maximum size of a stored attr */
+ /* for propagation, maximum size of a stored attr */
+ unsigned int max_attr_size;
#ifdef CONFIG_SYSFS
struct kset *memcg_kset;
#endif
@@ -124,7 +126,7 @@ struct kmem_cache {
/*
* Defragmentation by allocating from a remote node.
*/
- int remote_node_defrag_ratio;
+ unsigned int remote_node_defrag_ratio;
#endif
#ifdef CONFIG_SLAB_FREELIST_RANDOM
@@ -135,6 +137,9 @@ struct kmem_cache {
struct kasan_cache kasan_info;
#endif
+ unsigned int useroffset; /* Usercopy region offset */
+ unsigned int usersize; /* Usercopy region size */
+
struct kmem_cache_node *node[MAX_NUMNODES];
};
diff --git a/include/linux/soc/brcmstb/brcmstb.h b/include/linux/soc/brcmstb/brcmstb.h
index 12e548938bbb..8e884e0dda0a 100644
--- a/include/linux/soc/brcmstb/brcmstb.h
+++ b/include/linux/soc/brcmstb/brcmstb.h
@@ -13,12 +13,6 @@ static inline u32 BRCM_REV(u32 reg)
}
/*
- * Bus Interface Unit control register setup, must happen early during boot,
- * before SMP is brought up, called by machine entry point.
- */
-void brcmstb_biuctrl_init(void);
-
-/*
* Helper functions for getting family or product id from the
* SoC driver.
*/
diff --git a/include/linux/soc/mediatek/infracfg.h b/include/linux/soc/mediatek/infracfg.h
index e8d9f0d52933..fd25f0148566 100644
--- a/include/linux/soc/mediatek/infracfg.h
+++ b/include/linux/soc/mediatek/infracfg.h
@@ -21,6 +21,10 @@
#define MT8173_TOP_AXI_PROT_EN_MFG_M1 BIT(22)
#define MT8173_TOP_AXI_PROT_EN_MFG_SNOOP_OUT BIT(23)
+#define MT2701_TOP_AXI_PROT_EN_MM_M0 BIT(1)
+#define MT2701_TOP_AXI_PROT_EN_CONN_M BIT(2)
+#define MT2701_TOP_AXI_PROT_EN_CONN_S BIT(8)
+
#define MT7622_TOP_AXI_PROT_EN_ETHSYS (BIT(3) | BIT(17))
#define MT7622_TOP_AXI_PROT_EN_HIF0 (BIT(24) | BIT(25))
#define MT7622_TOP_AXI_PROT_EN_HIF1 (BIT(26) | BIT(27) | \
@@ -28,7 +32,8 @@
#define MT7622_TOP_AXI_PROT_EN_WB (BIT(2) | BIT(6) | \
BIT(7) | BIT(8))
-int mtk_infracfg_set_bus_protection(struct regmap *infracfg, u32 mask);
-int mtk_infracfg_clear_bus_protection(struct regmap *infracfg, u32 mask);
-
+int mtk_infracfg_set_bus_protection(struct regmap *infracfg, u32 mask,
+ bool reg_update);
+int mtk_infracfg_clear_bus_protection(struct regmap *infracfg, u32 mask,
+ bool reg_update);
#endif /* __SOC_MEDIATEK_INFRACFG_H */
diff --git a/include/linux/soc/qcom/mdt_loader.h b/include/linux/soc/qcom/mdt_loader.h
index bd8e0864b059..5b98bbdabc25 100644
--- a/include/linux/soc/qcom/mdt_loader.h
+++ b/include/linux/soc/qcom/mdt_loader.h
@@ -14,6 +14,7 @@ struct firmware;
ssize_t qcom_mdt_get_size(const struct firmware *fw);
int qcom_mdt_load(struct device *dev, const struct firmware *fw,
const char *fw_name, int pas_id, void *mem_region,
- phys_addr_t mem_phys, size_t mem_size);
+ phys_addr_t mem_phys, size_t mem_size,
+ phys_addr_t *reloc_base);
#endif
diff --git a/include/linux/soc/qcom/qmi.h b/include/linux/soc/qcom/qmi.h
new file mode 100644
index 000000000000..f4de33654a60
--- /dev/null
+++ b/include/linux/soc/qcom/qmi.h
@@ -0,0 +1,271 @@
+// SPDX-License-Identifier: GPL-2.0
+/*
+ * Copyright (c) 2012-2014, The Linux Foundation. All rights reserved.
+ * Copyright (c) 2017, Linaro Ltd.
+ */
+#ifndef __QMI_HELPERS_H__
+#define __QMI_HELPERS_H__
+
+#include <linux/completion.h>
+#include <linux/idr.h>
+#include <linux/list.h>
+#include <linux/qrtr.h>
+#include <linux/types.h>
+#include <linux/workqueue.h>
+
+struct socket;
+
+/**
+ * qmi_header - wireformat header of QMI messages
+ * @type: type of message
+ * @txn_id: transaction id
+ * @msg_id: message id
+ * @msg_len: length of message payload following header
+ */
+struct qmi_header {
+ u8 type;
+ u16 txn_id;
+ u16 msg_id;
+ u16 msg_len;
+} __packed;
+
+#define QMI_REQUEST 0
+#define QMI_RESPONSE 2
+#define QMI_INDICATION 4
+
+#define QMI_COMMON_TLV_TYPE 0
+
+enum qmi_elem_type {
+ QMI_EOTI,
+ QMI_OPT_FLAG,
+ QMI_DATA_LEN,
+ QMI_UNSIGNED_1_BYTE,
+ QMI_UNSIGNED_2_BYTE,
+ QMI_UNSIGNED_4_BYTE,
+ QMI_UNSIGNED_8_BYTE,
+ QMI_SIGNED_2_BYTE_ENUM,
+ QMI_SIGNED_4_BYTE_ENUM,
+ QMI_STRUCT,
+ QMI_STRING,
+};
+
+enum qmi_array_type {
+ NO_ARRAY,
+ STATIC_ARRAY,
+ VAR_LEN_ARRAY,
+};
+
+/**
+ * struct qmi_elem_info - describes how to encode a single QMI element
+ * @data_type: Data type of this element.
+ * @elem_len: Array length of this element, if an array.
+ * @elem_size: Size of a single instance of this data type.
+ * @array_type: Array type of this element.
+ * @tlv_type: QMI message specific type to identify which element
+ * is present in an incoming message.
+ * @offset: Specifies the offset of the first instance of this
+ * element in the data structure.
+ * @ei_array: Null-terminated array of @qmi_elem_info to describe nested
+ * structures.
+ */
+struct qmi_elem_info {
+ enum qmi_elem_type data_type;
+ u32 elem_len;
+ u32 elem_size;
+ enum qmi_array_type array_type;
+ u8 tlv_type;
+ u32 offset;
+ struct qmi_elem_info *ei_array;
+};
+
+#define QMI_RESULT_SUCCESS_V01 0
+#define QMI_RESULT_FAILURE_V01 1
+
+#define QMI_ERR_NONE_V01 0
+#define QMI_ERR_MALFORMED_MSG_V01 1
+#define QMI_ERR_NO_MEMORY_V01 2
+#define QMI_ERR_INTERNAL_V01 3
+#define QMI_ERR_CLIENT_IDS_EXHAUSTED_V01 5
+#define QMI_ERR_INVALID_ID_V01 41
+#define QMI_ERR_ENCODING_V01 58
+#define QMI_ERR_INCOMPATIBLE_STATE_V01 90
+#define QMI_ERR_NOT_SUPPORTED_V01 94
+
+/**
+ * qmi_response_type_v01 - common response header (decoded)
+ * @result: result of the transaction
+ * @error: error value, when @result is QMI_RESULT_FAILURE_V01
+ */
+struct qmi_response_type_v01 {
+ u16 result;
+ u16 error;
+};
+
+extern struct qmi_elem_info qmi_response_type_v01_ei[];
+
+/**
+ * struct qmi_service - context to track lookup-results
+ * @service: service type
+ * @version: version of the @service
+ * @instance: instance id of the @service
+ * @node: node of the service
+ * @port: port of the service
+ * @priv: handle for client's use
+ * @list_node: list_head for house keeping
+ */
+struct qmi_service {
+ unsigned int service;
+ unsigned int version;
+ unsigned int instance;
+
+ unsigned int node;
+ unsigned int port;
+
+ void *priv;
+ struct list_head list_node;
+};
+
+struct qmi_handle;
+
+/**
+ * struct qmi_ops - callbacks for qmi_handle
+ * @new_server: inform client of a new_server lookup-result, returning
+ * successfully from this call causes the library to call
+ * @del_server as the service is removed from the
+ * lookup-result. @priv of the qmi_service can be used by
+ * the client
+ * @del_server: inform client of a del_server lookup-result
+ * @net_reset: inform client that the name service was restarted and
+ * that and any state needs to be released
+ * @msg_handler: invoked for incoming messages, allows a client to
+ * override the usual QMI message handler
+ * @bye: inform a client that all clients from a node are gone
+ * @del_client: inform a client that a particular client is gone
+ */
+struct qmi_ops {
+ int (*new_server)(struct qmi_handle *qmi, struct qmi_service *svc);
+ void (*del_server)(struct qmi_handle *qmi, struct qmi_service *svc);
+ void (*net_reset)(struct qmi_handle *qmi);
+ void (*msg_handler)(struct qmi_handle *qmi, struct sockaddr_qrtr *sq,
+ const void *data, size_t count);
+ void (*bye)(struct qmi_handle *qmi, unsigned int node);
+ void (*del_client)(struct qmi_handle *qmi,
+ unsigned int node, unsigned int port);
+};
+
+/**
+ * struct qmi_txn - transaction context
+ * @qmi: QMI handle this transaction is associated with
+ * @id: transaction id
+ * @lock: for synchronization between handler and waiter of messages
+ * @completion: completion object as the transaction receives a response
+ * @result: result code for the completed transaction
+ * @ei: description of the QMI encoded response (optional)
+ * @dest: destination buffer to decode message into (optional)
+ */
+struct qmi_txn {
+ struct qmi_handle *qmi;
+
+ int id;
+
+ struct mutex lock;
+ struct completion completion;
+ int result;
+
+ struct qmi_elem_info *ei;
+ void *dest;
+};
+
+/**
+ * struct qmi_msg_handler - description of QMI message handler
+ * @type: type of message
+ * @msg_id: message id
+ * @ei: description of the QMI encoded message
+ * @decoded_size: size of the decoded object
+ * @fn: function to invoke as the message is decoded
+ */
+struct qmi_msg_handler {
+ unsigned int type;
+ unsigned int msg_id;
+
+ struct qmi_elem_info *ei;
+
+ size_t decoded_size;
+ void (*fn)(struct qmi_handle *qmi, struct sockaddr_qrtr *sq,
+ struct qmi_txn *txn, const void *decoded);
+};
+
+/**
+ * struct qmi_handle - QMI context
+ * @sock: socket handle
+ * @sock_lock: synchronization of @sock modifications
+ * @sq: sockaddr of @sock
+ * @work: work for handling incoming messages
+ * @wq: workqueue to post @work on
+ * @recv_buf: scratch buffer for handling incoming messages
+ * @recv_buf_size: size of @recv_buf
+ * @lookups: list of registered lookup requests
+ * @lookup_results: list of lookup-results advertised to the client
+ * @services: list of registered services (by this client)
+ * @ops: reference to callbacks
+ * @txns: outstanding transactions
+ * @txn_lock: lock for modifications of @txns
+ * @handlers: list of handlers for incoming messages
+ */
+struct qmi_handle {
+ struct socket *sock;
+ struct mutex sock_lock;
+
+ struct sockaddr_qrtr sq;
+
+ struct work_struct work;
+ struct workqueue_struct *wq;
+
+ void *recv_buf;
+ size_t recv_buf_size;
+
+ struct list_head lookups;
+ struct list_head lookup_results;
+ struct list_head services;
+
+ struct qmi_ops ops;
+
+ struct idr txns;
+ struct mutex txn_lock;
+
+ const struct qmi_msg_handler *handlers;
+};
+
+int qmi_add_lookup(struct qmi_handle *qmi, unsigned int service,
+ unsigned int version, unsigned int instance);
+int qmi_add_server(struct qmi_handle *qmi, unsigned int service,
+ unsigned int version, unsigned int instance);
+
+int qmi_handle_init(struct qmi_handle *qmi, size_t max_msg_len,
+ const struct qmi_ops *ops,
+ const struct qmi_msg_handler *handlers);
+void qmi_handle_release(struct qmi_handle *qmi);
+
+ssize_t qmi_send_request(struct qmi_handle *qmi, struct sockaddr_qrtr *sq,
+ struct qmi_txn *txn, int msg_id, size_t len,
+ struct qmi_elem_info *ei, const void *c_struct);
+ssize_t qmi_send_response(struct qmi_handle *qmi, struct sockaddr_qrtr *sq,
+ struct qmi_txn *txn, int msg_id, size_t len,
+ struct qmi_elem_info *ei, const void *c_struct);
+ssize_t qmi_send_indication(struct qmi_handle *qmi, struct sockaddr_qrtr *sq,
+ int msg_id, size_t len, struct qmi_elem_info *ei,
+ const void *c_struct);
+
+void *qmi_encode_message(int type, unsigned int msg_id, size_t *len,
+ unsigned int txn_id, struct qmi_elem_info *ei,
+ const void *c_struct);
+
+int qmi_decode_message(const void *buf, size_t len,
+ struct qmi_elem_info *ei, void *c_struct);
+
+int qmi_txn_init(struct qmi_handle *qmi, struct qmi_txn *txn,
+ struct qmi_elem_info *ei, void *c_struct);
+int qmi_txn_wait(struct qmi_txn *txn, unsigned long timeout);
+void qmi_txn_cancel(struct qmi_txn *txn);
+
+#endif
diff --git a/include/linux/soc/qcom/smd-rpm.h b/include/linux/soc/qcom/smd-rpm.h
index 9f5c6e53f3a5..9e4fdd861a51 100644
--- a/include/linux/soc/qcom/smd-rpm.h
+++ b/include/linux/soc/qcom/smd-rpm.h
@@ -10,6 +10,7 @@ struct qcom_smd_rpm;
/*
* Constants used for addressing resources in the RPM.
*/
+#define QCOM_SMD_RPM_BOBB 0x62626f62
#define QCOM_SMD_RPM_BOOST 0x61747362
#define QCOM_SMD_RPM_BUS_CLK 0x316b6c63
#define QCOM_SMD_RPM_BUS_MASTER 0x73616d62
diff --git a/include/linux/soc/samsung/exynos-pmu.h b/include/linux/soc/samsung/exynos-pmu.h
index e57eb4b6cc5a..fc0b445bb36b 100644
--- a/include/linux/soc/samsung/exynos-pmu.h
+++ b/include/linux/soc/samsung/exynos-pmu.h
@@ -1,12 +1,9 @@
+/* SPDX-License-Identifier: GPL-2.0 */
/*
* Copyright (c) 2014 Samsung Electronics Co., Ltd.
* http://www.samsung.com
*
* Header for EXYNOS PMU Driver support
- *
- * This program is free software; you can redistribute it and/or modify
- * it under the terms of the GNU General Public License version 2 as
- * published by the Free Software Foundation.
*/
#ifndef __LINUX_SOC_EXYNOS_PMU_H
diff --git a/include/linux/soc/samsung/exynos-regs-pmu.h b/include/linux/soc/samsung/exynos-regs-pmu.h
index bebdde5dccd6..66dcb9ec273a 100644
--- a/include/linux/soc/samsung/exynos-regs-pmu.h
+++ b/include/linux/soc/samsung/exynos-regs-pmu.h
@@ -1,14 +1,10 @@
+/* SPDX-License-Identifier: GPL-2.0 */
/*
* Copyright (c) 2010-2015 Samsung Electronics Co., Ltd.
* http://www.samsung.com
*
* EXYNOS - Power management unit definition
*
- * This program is free software; you can redistribute it and/or modify
- * it under the terms of the GNU General Public License version 2 as
- * published by the Free Software Foundation.
- *
- *
* Notice:
* This is not a list of all Exynos Power Management Unit SFRs.
* There are too many of them, not mentioning subtle differences
diff --git a/include/linux/socket.h b/include/linux/socket.h
index 9286a5a8c60c..ea50f4a65816 100644
--- a/include/linux/socket.h
+++ b/include/linux/socket.h
@@ -287,6 +287,7 @@ struct ucred {
#define MSG_SENDPAGE_NOTLAST 0x20000 /* sendpage() internal : not the last page */
#define MSG_BATCH 0x40000 /* sendmmsg(): more messages coming */
#define MSG_EOF MSG_FIN
+#define MSG_NO_SHARED_FRAGS 0x80000 /* sendpage() internal : page frags are not shared */
#define MSG_ZEROCOPY 0x4000000 /* Use user data in kernel path */
#define MSG_FASTOPEN 0x20000000 /* Send data in TCP SYN */
@@ -346,11 +347,40 @@ extern int put_cmsg(struct msghdr*, int level, int type, int len, void *data);
struct timespec;
-/* The __sys_...msg variants allow MSG_CMSG_COMPAT */
-extern long __sys_recvmsg(int fd, struct user_msghdr __user *msg, unsigned flags);
-extern long __sys_sendmsg(int fd, struct user_msghdr __user *msg, unsigned flags);
+/* The __sys_...msg variants allow MSG_CMSG_COMPAT iff
+ * forbid_cmsg_compat==false
+ */
+extern long __sys_recvmsg(int fd, struct user_msghdr __user *msg,
+ unsigned int flags, bool forbid_cmsg_compat);
+extern long __sys_sendmsg(int fd, struct user_msghdr __user *msg,
+ unsigned int flags, bool forbid_cmsg_compat);
extern int __sys_recvmmsg(int fd, struct mmsghdr __user *mmsg, unsigned int vlen,
unsigned int flags, struct timespec *timeout);
extern int __sys_sendmmsg(int fd, struct mmsghdr __user *mmsg,
- unsigned int vlen, unsigned int flags);
+ unsigned int vlen, unsigned int flags,
+ bool forbid_cmsg_compat);
+
+/* helpers which do the actual work for syscalls */
+extern int __sys_recvfrom(int fd, void __user *ubuf, size_t size,
+ unsigned int flags, struct sockaddr __user *addr,
+ int __user *addr_len);
+extern int __sys_sendto(int fd, void __user *buff, size_t len,
+ unsigned int flags, struct sockaddr __user *addr,
+ int addr_len);
+extern int __sys_accept4(int fd, struct sockaddr __user *upeer_sockaddr,
+ int __user *upeer_addrlen, int flags);
+extern int __sys_socket(int family, int type, int protocol);
+extern int __sys_bind(int fd, struct sockaddr __user *umyaddr, int addrlen);
+extern int __sys_connect(int fd, struct sockaddr __user *uservaddr,
+ int addrlen);
+extern int __sys_listen(int fd, int backlog);
+extern int __sys_getsockname(int fd, struct sockaddr __user *usockaddr,
+ int __user *usockaddr_len);
+extern int __sys_getpeername(int fd, struct sockaddr __user *usockaddr,
+ int __user *usockaddr_len);
+extern int __sys_socketpair(int family, int type, int protocol,
+ int __user *usockvec);
+extern int __sys_shutdown(int fd, int how);
+
+extern struct ns_common *get_net_ns(struct ns_common *ns);
#endif /* _LINUX_SOCKET_H */
diff --git a/include/linux/sound.h b/include/linux/sound.h
index 3c6d393c7f29..ec85b7a1f8d1 100644
--- a/include/linux/sound.h
+++ b/include/linux/sound.h
@@ -12,11 +12,9 @@ struct device;
extern int register_sound_special(const struct file_operations *fops, int unit);
extern int register_sound_special_device(const struct file_operations *fops, int unit, struct device *dev);
extern int register_sound_mixer(const struct file_operations *fops, int dev);
-extern int register_sound_midi(const struct file_operations *fops, int dev);
extern int register_sound_dsp(const struct file_operations *fops, int dev);
extern void unregister_sound_special(int unit);
extern void unregister_sound_mixer(int unit);
-extern void unregister_sound_midi(int unit);
extern void unregister_sound_dsp(int unit);
#endif /* _LINUX_SOUND_H */
diff --git a/include/linux/soundwire/sdw.h b/include/linux/soundwire/sdw.h
new file mode 100644
index 000000000000..e91fdcf41049
--- /dev/null
+++ b/include/linux/soundwire/sdw.h
@@ -0,0 +1,479 @@
+// SPDX-License-Identifier: (GPL-2.0 OR BSD-3-Clause)
+// Copyright(c) 2015-17 Intel Corporation.
+
+#ifndef __SOUNDWIRE_H
+#define __SOUNDWIRE_H
+
+struct sdw_bus;
+struct sdw_slave;
+
+/* SDW spec defines and enums, as defined by MIPI 1.1. Spec */
+
+/* SDW Broadcast Device Number */
+#define SDW_BROADCAST_DEV_NUM 15
+
+/* SDW Enumeration Device Number */
+#define SDW_ENUM_DEV_NUM 0
+
+/* SDW Group Device Numbers */
+#define SDW_GROUP12_DEV_NUM 12
+#define SDW_GROUP13_DEV_NUM 13
+
+/* SDW Master Device Number, not supported yet */
+#define SDW_MASTER_DEV_NUM 14
+
+#define SDW_NUM_DEV_ID_REGISTERS 6
+
+#define SDW_MAX_DEVICES 11
+
+/**
+ * enum sdw_slave_status - Slave status
+ * @SDW_SLAVE_UNATTACHED: Slave is not attached with the bus.
+ * @SDW_SLAVE_ATTACHED: Slave is attached with bus.
+ * @SDW_SLAVE_ALERT: Some alert condition on the Slave
+ * @SDW_SLAVE_RESERVED: Reserved for future use
+ */
+enum sdw_slave_status {
+ SDW_SLAVE_UNATTACHED = 0,
+ SDW_SLAVE_ATTACHED = 1,
+ SDW_SLAVE_ALERT = 2,
+ SDW_SLAVE_RESERVED = 3,
+};
+
+/**
+ * enum sdw_command_response - Command response as defined by SDW spec
+ * @SDW_CMD_OK: cmd was successful
+ * @SDW_CMD_IGNORED: cmd was ignored
+ * @SDW_CMD_FAIL: cmd was NACKed
+ * @SDW_CMD_TIMEOUT: cmd timedout
+ * @SDW_CMD_FAIL_OTHER: cmd failed due to other reason than above
+ *
+ * NOTE: The enum is different than actual Spec as response in the Spec is
+ * combination of ACK/NAK bits
+ *
+ * SDW_CMD_TIMEOUT/FAIL_OTHER is defined for SW use, not in spec
+ */
+enum sdw_command_response {
+ SDW_CMD_OK = 0,
+ SDW_CMD_IGNORED = 1,
+ SDW_CMD_FAIL = 2,
+ SDW_CMD_TIMEOUT = 3,
+ SDW_CMD_FAIL_OTHER = 4,
+};
+
+/*
+ * SDW properties, defined in MIPI DisCo spec v1.0
+ */
+enum sdw_clk_stop_reset_behave {
+ SDW_CLK_STOP_KEEP_STATUS = 1,
+};
+
+/**
+ * enum sdw_p15_behave - Slave Port 15 behaviour when the Master attempts a
+ * read
+ * @SDW_P15_READ_IGNORED: Read is ignored
+ * @SDW_P15_CMD_OK: Command is ok
+ */
+enum sdw_p15_behave {
+ SDW_P15_READ_IGNORED = 0,
+ SDW_P15_CMD_OK = 1,
+};
+
+/**
+ * enum sdw_dpn_type - Data port types
+ * @SDW_DPN_FULL: Full Data Port is supported
+ * @SDW_DPN_SIMPLE: Simplified Data Port as defined in spec.
+ * DPN_SampleCtrl2, DPN_OffsetCtrl2, DPN_HCtrl and DPN_BlockCtrl3
+ * are not implemented.
+ * @SDW_DPN_REDUCED: Reduced Data Port as defined in spec.
+ * DPN_SampleCtrl2, DPN_HCtrl are not implemented.
+ */
+enum sdw_dpn_type {
+ SDW_DPN_FULL = 0,
+ SDW_DPN_SIMPLE = 1,
+ SDW_DPN_REDUCED = 2,
+};
+
+/**
+ * enum sdw_clk_stop_mode - Clock Stop modes
+ * @SDW_CLK_STOP_MODE0: Slave can continue operation seamlessly on clock
+ * restart
+ * @SDW_CLK_STOP_MODE1: Slave may have entered a deeper power-saving mode,
+ * not capable of continuing operation seamlessly when the clock restarts
+ */
+enum sdw_clk_stop_mode {
+ SDW_CLK_STOP_MODE0 = 0,
+ SDW_CLK_STOP_MODE1 = 1,
+};
+
+/**
+ * struct sdw_dp0_prop - DP0 properties
+ * @max_word: Maximum number of bits in a Payload Channel Sample, 1 to 64
+ * (inclusive)
+ * @min_word: Minimum number of bits in a Payload Channel Sample, 1 to 64
+ * (inclusive)
+ * @num_words: number of wordlengths supported
+ * @words: wordlengths supported
+ * @flow_controlled: Slave implementation results in an OK_NotReady
+ * response
+ * @simple_ch_prep_sm: If channel prepare sequence is required
+ * @device_interrupts: If implementation-defined interrupts are supported
+ *
+ * The wordlengths are specified by Spec as max, min AND number of
+ * discrete values, implementation can define based on the wordlengths they
+ * support
+ */
+struct sdw_dp0_prop {
+ u32 max_word;
+ u32 min_word;
+ u32 num_words;
+ u32 *words;
+ bool flow_controlled;
+ bool simple_ch_prep_sm;
+ bool device_interrupts;
+};
+
+/**
+ * struct sdw_dpn_audio_mode - Audio mode properties for DPn
+ * @bus_min_freq: Minimum bus frequency, in Hz
+ * @bus_max_freq: Maximum bus frequency, in Hz
+ * @bus_num_freq: Number of discrete frequencies supported
+ * @bus_freq: Discrete bus frequencies, in Hz
+ * @min_freq: Minimum sampling frequency, in Hz
+ * @max_freq: Maximum sampling bus frequency, in Hz
+ * @num_freq: Number of discrete sampling frequency supported
+ * @freq: Discrete sampling frequencies, in Hz
+ * @prep_ch_behave: Specifies the dependencies between Channel Prepare
+ * sequence and bus clock configuration
+ * If 0, Channel Prepare can happen at any Bus clock rate
+ * If 1, Channel Prepare sequence shall happen only after Bus clock is
+ * changed to a frequency supported by this mode or compatible modes
+ * described by the next field
+ * @glitchless: Bitmap describing possible glitchless transitions from this
+ * Audio Mode to other Audio Modes
+ */
+struct sdw_dpn_audio_mode {
+ u32 bus_min_freq;
+ u32 bus_max_freq;
+ u32 bus_num_freq;
+ u32 *bus_freq;
+ u32 max_freq;
+ u32 min_freq;
+ u32 num_freq;
+ u32 *freq;
+ u32 prep_ch_behave;
+ u32 glitchless;
+};
+
+/**
+ * struct sdw_dpn_prop - Data Port DPn properties
+ * @num: port number
+ * @max_word: Maximum number of bits in a Payload Channel Sample, 1 to 64
+ * (inclusive)
+ * @min_word: Minimum number of bits in a Payload Channel Sample, 1 to 64
+ * (inclusive)
+ * @num_words: Number of discrete supported wordlengths
+ * @words: Discrete supported wordlength
+ * @type: Data port type. Full, Simplified or Reduced
+ * @max_grouping: Maximum number of samples that can be grouped together for
+ * a full data port
+ * @simple_ch_prep_sm: If the port supports simplified channel prepare state
+ * machine
+ * @ch_prep_timeout: Port-specific timeout value, in milliseconds
+ * @device_interrupts: If set, each bit corresponds to support for
+ * implementation-defined interrupts
+ * @max_ch: Maximum channels supported
+ * @min_ch: Minimum channels supported
+ * @num_ch: Number of discrete channels supported
+ * @ch: Discrete channels supported
+ * @num_ch_combinations: Number of channel combinations supported
+ * @ch_combinations: Channel combinations supported
+ * @modes: SDW mode supported
+ * @max_async_buffer: Number of samples that this port can buffer in
+ * asynchronous modes
+ * @block_pack_mode: Type of block port mode supported
+ * @port_encoding: Payload Channel Sample encoding schemes supported
+ * @audio_modes: Audio modes supported
+ */
+struct sdw_dpn_prop {
+ u32 num;
+ u32 max_word;
+ u32 min_word;
+ u32 num_words;
+ u32 *words;
+ enum sdw_dpn_type type;
+ u32 max_grouping;
+ bool simple_ch_prep_sm;
+ u32 ch_prep_timeout;
+ u32 device_interrupts;
+ u32 max_ch;
+ u32 min_ch;
+ u32 num_ch;
+ u32 *ch;
+ u32 num_ch_combinations;
+ u32 *ch_combinations;
+ u32 modes;
+ u32 max_async_buffer;
+ bool block_pack_mode;
+ u32 port_encoding;
+ struct sdw_dpn_audio_mode *audio_modes;
+};
+
+/**
+ * struct sdw_slave_prop - SoundWire Slave properties
+ * @mipi_revision: Spec version of the implementation
+ * @wake_capable: Wake-up events are supported
+ * @test_mode_capable: If test mode is supported
+ * @clk_stop_mode1: Clock-Stop Mode 1 is supported
+ * @simple_clk_stop_capable: Simple clock mode is supported
+ * @clk_stop_timeout: Worst-case latency of the Clock Stop Prepare State
+ * Machine transitions, in milliseconds
+ * @ch_prep_timeout: Worst-case latency of the Channel Prepare State Machine
+ * transitions, in milliseconds
+ * @reset_behave: Slave keeps the status of the SlaveStopClockPrepare
+ * state machine (P=1 SCSP_SM) after exit from clock-stop mode1
+ * @high_PHY_capable: Slave is HighPHY capable
+ * @paging_support: Slave implements paging registers SCP_AddrPage1 and
+ * SCP_AddrPage2
+ * @bank_delay_support: Slave implements bank delay/bridge support registers
+ * SCP_BankDelay and SCP_NextFrame
+ * @p15_behave: Slave behavior when the Master attempts a read to the Port15
+ * alias
+ * @lane_control_support: Slave supports lane control
+ * @master_count: Number of Masters present on this Slave
+ * @source_ports: Bitmap identifying source ports
+ * @sink_ports: Bitmap identifying sink ports
+ * @dp0_prop: Data Port 0 properties
+ * @src_dpn_prop: Source Data Port N properties
+ * @sink_dpn_prop: Sink Data Port N properties
+ */
+struct sdw_slave_prop {
+ u32 mipi_revision;
+ bool wake_capable;
+ bool test_mode_capable;
+ bool clk_stop_mode1;
+ bool simple_clk_stop_capable;
+ u32 clk_stop_timeout;
+ u32 ch_prep_timeout;
+ enum sdw_clk_stop_reset_behave reset_behave;
+ bool high_PHY_capable;
+ bool paging_support;
+ bool bank_delay_support;
+ enum sdw_p15_behave p15_behave;
+ bool lane_control_support;
+ u32 master_count;
+ u32 source_ports;
+ u32 sink_ports;
+ struct sdw_dp0_prop *dp0_prop;
+ struct sdw_dpn_prop *src_dpn_prop;
+ struct sdw_dpn_prop *sink_dpn_prop;
+};
+
+/**
+ * struct sdw_master_prop - Master properties
+ * @revision: MIPI spec version of the implementation
+ * @master_count: Number of masters
+ * @clk_stop_mode: Bitmap for Clock Stop modes supported
+ * @max_freq: Maximum Bus clock frequency, in Hz
+ * @num_clk_gears: Number of clock gears supported
+ * @clk_gears: Clock gears supported
+ * @num_freq: Number of clock frequencies supported, in Hz
+ * @freq: Clock frequencies supported, in Hz
+ * @default_frame_rate: Controller default Frame rate, in Hz
+ * @default_row: Number of rows
+ * @default_col: Number of columns
+ * @dynamic_frame: Dynamic frame supported
+ * @err_threshold: Number of times that software may retry sending a single
+ * command
+ * @dpn_prop: Data Port N properties
+ */
+struct sdw_master_prop {
+ u32 revision;
+ u32 master_count;
+ enum sdw_clk_stop_mode clk_stop_mode;
+ u32 max_freq;
+ u32 num_clk_gears;
+ u32 *clk_gears;
+ u32 num_freq;
+ u32 *freq;
+ u32 default_frame_rate;
+ u32 default_row;
+ u32 default_col;
+ bool dynamic_frame;
+ u32 err_threshold;
+ struct sdw_dpn_prop *dpn_prop;
+};
+
+int sdw_master_read_prop(struct sdw_bus *bus);
+int sdw_slave_read_prop(struct sdw_slave *slave);
+
+/*
+ * SDW Slave Structures and APIs
+ */
+
+/**
+ * struct sdw_slave_id - Slave ID
+ * @mfg_id: MIPI Manufacturer ID
+ * @part_id: Device Part ID
+ * @class_id: MIPI Class ID, unused now.
+ * Currently a placeholder in MIPI SoundWire Spec
+ * @unique_id: Device unique ID
+ * @sdw_version: SDW version implemented
+ *
+ * The order of the IDs here does not follow the DisCo spec definitions
+ */
+struct sdw_slave_id {
+ __u16 mfg_id;
+ __u16 part_id;
+ __u8 class_id;
+ __u8 unique_id:4;
+ __u8 sdw_version:4;
+};
+
+/**
+ * struct sdw_slave_intr_status - Slave interrupt status
+ * @control_port: control port status
+ * @port: data port status
+ */
+struct sdw_slave_intr_status {
+ u8 control_port;
+ u8 port[15];
+};
+
+/**
+ * struct sdw_slave_ops - Slave driver callback ops
+ * @read_prop: Read Slave properties
+ * @interrupt_callback: Device interrupt notification (invoked in thread
+ * context)
+ * @update_status: Update Slave status
+ */
+struct sdw_slave_ops {
+ int (*read_prop)(struct sdw_slave *sdw);
+ int (*interrupt_callback)(struct sdw_slave *slave,
+ struct sdw_slave_intr_status *status);
+ int (*update_status)(struct sdw_slave *slave,
+ enum sdw_slave_status status);
+};
+
+/**
+ * struct sdw_slave - SoundWire Slave
+ * @id: MIPI device ID
+ * @dev: Linux device
+ * @status: Status reported by the Slave
+ * @bus: Bus handle
+ * @ops: Slave callback ops
+ * @prop: Slave properties
+ * @node: node for bus list
+ * @port_ready: Port ready completion flag for each Slave port
+ * @dev_num: Device Number assigned by Bus
+ */
+struct sdw_slave {
+ struct sdw_slave_id id;
+ struct device dev;
+ enum sdw_slave_status status;
+ struct sdw_bus *bus;
+ const struct sdw_slave_ops *ops;
+ struct sdw_slave_prop prop;
+ struct list_head node;
+ struct completion *port_ready;
+ u16 dev_num;
+};
+
+#define dev_to_sdw_dev(_dev) container_of(_dev, struct sdw_slave, dev)
+
+struct sdw_driver {
+ const char *name;
+
+ int (*probe)(struct sdw_slave *sdw,
+ const struct sdw_device_id *id);
+ int (*remove)(struct sdw_slave *sdw);
+ void (*shutdown)(struct sdw_slave *sdw);
+
+ const struct sdw_device_id *id_table;
+ const struct sdw_slave_ops *ops;
+
+ struct device_driver driver;
+};
+
+#define SDW_SLAVE_ENTRY(_mfg_id, _part_id, _drv_data) \
+ { .mfg_id = (_mfg_id), .part_id = (_part_id), \
+ .driver_data = (unsigned long)(_drv_data) }
+
+int sdw_handle_slave_status(struct sdw_bus *bus,
+ enum sdw_slave_status status[]);
+
+/*
+ * SDW master structures and APIs
+ */
+
+struct sdw_msg;
+
+/**
+ * struct sdw_defer - SDW deffered message
+ * @length: message length
+ * @complete: message completion
+ * @msg: SDW message
+ */
+struct sdw_defer {
+ int length;
+ struct completion complete;
+ struct sdw_msg *msg;
+};
+
+/**
+ * struct sdw_master_ops - Master driver ops
+ * @read_prop: Read Master properties
+ * @xfer_msg: Transfer message callback
+ * @xfer_msg_defer: Defer version of transfer message callback
+ * @reset_page_addr: Reset the SCP page address registers
+ */
+struct sdw_master_ops {
+ int (*read_prop)(struct sdw_bus *bus);
+
+ enum sdw_command_response (*xfer_msg)
+ (struct sdw_bus *bus, struct sdw_msg *msg);
+ enum sdw_command_response (*xfer_msg_defer)
+ (struct sdw_bus *bus, struct sdw_msg *msg,
+ struct sdw_defer *defer);
+ enum sdw_command_response (*reset_page_addr)
+ (struct sdw_bus *bus, unsigned int dev_num);
+};
+
+/**
+ * struct sdw_bus - SoundWire bus
+ * @dev: Master linux device
+ * @link_id: Link id number, can be 0 to N, unique for each Master
+ * @slaves: list of Slaves on this bus
+ * @assigned: Bitmap for Slave device numbers.
+ * Bit set implies used number, bit clear implies unused number.
+ * @bus_lock: bus lock
+ * @msg_lock: message lock
+ * @ops: Master callback ops
+ * @prop: Master properties
+ * @defer_msg: Defer message
+ * @clk_stop_timeout: Clock stop timeout computed
+ */
+struct sdw_bus {
+ struct device *dev;
+ unsigned int link_id;
+ struct list_head slaves;
+ DECLARE_BITMAP(assigned, SDW_MAX_DEVICES);
+ struct mutex bus_lock;
+ struct mutex msg_lock;
+ const struct sdw_master_ops *ops;
+ struct sdw_master_prop prop;
+ struct sdw_defer defer_msg;
+ unsigned int clk_stop_timeout;
+};
+
+int sdw_add_bus_master(struct sdw_bus *bus);
+void sdw_delete_bus_master(struct sdw_bus *bus);
+
+/* messaging and data APIs */
+
+int sdw_read(struct sdw_slave *slave, u32 addr);
+int sdw_write(struct sdw_slave *slave, u32 addr, u8 value);
+int sdw_nread(struct sdw_slave *slave, u32 addr, size_t count, u8 *val);
+int sdw_nwrite(struct sdw_slave *slave, u32 addr, size_t count, u8 *val);
+
+#endif /* __SOUNDWIRE_H */
diff --git a/include/linux/soundwire/sdw_intel.h b/include/linux/soundwire/sdw_intel.h
new file mode 100644
index 000000000000..4b37528f592d
--- /dev/null
+++ b/include/linux/soundwire/sdw_intel.h
@@ -0,0 +1,24 @@
+// SPDX-License-Identifier: (GPL-2.0 OR BSD-3-Clause)
+// Copyright(c) 2015-17 Intel Corporation.
+
+#ifndef __SDW_INTEL_H
+#define __SDW_INTEL_H
+
+/**
+ * struct sdw_intel_res - Soundwire Intel resource structure
+ * @mmio_base: mmio base of SoundWire registers
+ * @irq: interrupt number
+ * @handle: ACPI parent handle
+ * @parent: parent device
+ */
+struct sdw_intel_res {
+ void __iomem *mmio_base;
+ int irq;
+ acpi_handle handle;
+ struct device *parent;
+};
+
+void *sdw_intel_init(acpi_handle *parent_handle, struct sdw_intel_res *res);
+void sdw_intel_exit(void *arg);
+
+#endif
diff --git a/include/linux/soundwire/sdw_registers.h b/include/linux/soundwire/sdw_registers.h
new file mode 100644
index 000000000000..df472b1ab410
--- /dev/null
+++ b/include/linux/soundwire/sdw_registers.h
@@ -0,0 +1,194 @@
+// SPDX-License-Identifier: (GPL-2.0 OR BSD-3-Clause)
+// Copyright(c) 2015-17 Intel Corporation.
+
+#ifndef __SDW_REGISTERS_H
+#define __SDW_REGISTERS_H
+
+/*
+ * typically we define register and shifts but if one observes carefully,
+ * the shift can be generated from MASKS using few bit primitaives like ffs
+ * etc, so we use that and avoid defining shifts
+ */
+#define SDW_REG_SHIFT(n) (ffs(n) - 1)
+
+/*
+ * SDW registers as defined by MIPI 1.1 Spec
+ */
+#define SDW_REGADDR GENMASK(14, 0)
+#define SDW_SCP_ADDRPAGE2_MASK GENMASK(22, 15)
+#define SDW_SCP_ADDRPAGE1_MASK GENMASK(30, 23)
+
+#define SDW_REG_NO_PAGE 0x00008000
+#define SDW_REG_OPTIONAL_PAGE 0x00010000
+#define SDW_REG_MAX 0x80000000
+
+#define SDW_DPN_SIZE 0x100
+#define SDW_BANK1_OFFSET 0x10
+
+/*
+ * DP0 Interrupt register & bits
+ *
+ * Spec treats Status (RO) and Clear (WC) as separate but they are same
+ * address, so treat as same register with WC.
+ */
+
+/* both INT and STATUS register are same */
+#define SDW_DP0_INT 0x0
+#define SDW_DP0_INTMASK 0x1
+#define SDW_DP0_PORTCTRL 0x2
+#define SDW_DP0_BLOCKCTRL1 0x3
+#define SDW_DP0_PREPARESTATUS 0x4
+#define SDW_DP0_PREPARECTRL 0x5
+
+#define SDW_DP0_INT_TEST_FAIL BIT(0)
+#define SDW_DP0_INT_PORT_READY BIT(1)
+#define SDW_DP0_INT_BRA_FAILURE BIT(2)
+#define SDW_DP0_INT_IMPDEF1 BIT(5)
+#define SDW_DP0_INT_IMPDEF2 BIT(6)
+#define SDW_DP0_INT_IMPDEF3 BIT(7)
+
+#define SDW_DP0_PORTCTRL_DATAMODE GENMASK(3, 2)
+#define SDW_DP0_PORTCTRL_NXTINVBANK BIT(4)
+#define SDW_DP0_PORTCTRL_BPT_PAYLD GENMASK(7, 6)
+
+#define SDW_DP0_CHANNELEN 0x20
+#define SDW_DP0_SAMPLECTRL1 0x22
+#define SDW_DP0_SAMPLECTRL2 0x23
+#define SDW_DP0_OFFSETCTRL1 0x24
+#define SDW_DP0_OFFSETCTRL2 0x25
+#define SDW_DP0_HCTRL 0x26
+#define SDW_DP0_LANECTRL 0x28
+
+/* Both INT and STATUS register are same */
+#define SDW_SCP_INT1 0x40
+#define SDW_SCP_INTMASK1 0x41
+
+#define SDW_SCP_INT1_PARITY BIT(0)
+#define SDW_SCP_INT1_BUS_CLASH BIT(1)
+#define SDW_SCP_INT1_IMPL_DEF BIT(2)
+#define SDW_SCP_INT1_SCP2_CASCADE BIT(7)
+#define SDW_SCP_INT1_PORT0_3 GENMASK(6, 3)
+
+#define SDW_SCP_INTSTAT2 0x42
+#define SDW_SCP_INTSTAT2_SCP3_CASCADE BIT(7)
+#define SDW_SCP_INTSTAT2_PORT4_10 GENMASK(6, 0)
+
+
+#define SDW_SCP_INTSTAT3 0x43
+#define SDW_SCP_INTSTAT3_PORT11_14 GENMASK(3, 0)
+
+/* Number of interrupt status registers */
+#define SDW_NUM_INT_STAT_REGISTERS 3
+
+/* Number of interrupt clear registers */
+#define SDW_NUM_INT_CLEAR_REGISTERS 1
+
+#define SDW_SCP_CTRL 0x44
+#define SDW_SCP_CTRL_CLK_STP_NOW BIT(1)
+#define SDW_SCP_CTRL_FORCE_RESET BIT(7)
+
+#define SDW_SCP_STAT 0x44
+#define SDW_SCP_STAT_CLK_STP_NF BIT(0)
+#define SDW_SCP_STAT_HPHY_NOK BIT(5)
+#define SDW_SCP_STAT_CURR_BANK BIT(6)
+
+#define SDW_SCP_SYSTEMCTRL 0x45
+#define SDW_SCP_SYSTEMCTRL_CLK_STP_PREP BIT(0)
+#define SDW_SCP_SYSTEMCTRL_CLK_STP_MODE BIT(2)
+#define SDW_SCP_SYSTEMCTRL_WAKE_UP_EN BIT(3)
+#define SDW_SCP_SYSTEMCTRL_HIGH_PHY BIT(4)
+
+#define SDW_SCP_SYSTEMCTRL_CLK_STP_MODE0 0
+#define SDW_SCP_SYSTEMCTRL_CLK_STP_MODE1 BIT(2)
+
+#define SDW_SCP_DEVNUMBER 0x46
+#define SDW_SCP_HIGH_PHY_CHECK 0x47
+#define SDW_SCP_ADDRPAGE1 0x48
+#define SDW_SCP_ADDRPAGE2 0x49
+#define SDW_SCP_KEEPEREN 0x4A
+#define SDW_SCP_BANKDELAY 0x4B
+#define SDW_SCP_TESTMODE 0x4F
+#define SDW_SCP_DEVID_0 0x50
+#define SDW_SCP_DEVID_1 0x51
+#define SDW_SCP_DEVID_2 0x52
+#define SDW_SCP_DEVID_3 0x53
+#define SDW_SCP_DEVID_4 0x54
+#define SDW_SCP_DEVID_5 0x55
+
+/* Banked Registers */
+#define SDW_SCP_FRAMECTRL_B0 0x60
+#define SDW_SCP_FRAMECTRL_B1 (0x60 + SDW_BANK1_OFFSET)
+#define SDW_SCP_NEXTFRAME_B0 0x61
+#define SDW_SCP_NEXTFRAME_B1 (0x61 + SDW_BANK1_OFFSET)
+
+/* Both INT and STATUS register is same */
+#define SDW_DPN_INT(n) (0x0 + SDW_DPN_SIZE * (n))
+#define SDW_DPN_INTMASK(n) (0x1 + SDW_DPN_SIZE * (n))
+#define SDW_DPN_PORTCTRL(n) (0x2 + SDW_DPN_SIZE * (n))
+#define SDW_DPN_BLOCKCTRL1(n) (0x3 + SDW_DPN_SIZE * (n))
+#define SDW_DPN_PREPARESTATUS(n) (0x4 + SDW_DPN_SIZE * (n))
+#define SDW_DPN_PREPARECTRL(n) (0x5 + SDW_DPN_SIZE * (n))
+
+#define SDW_DPN_INT_TEST_FAIL BIT(0)
+#define SDW_DPN_INT_PORT_READY BIT(1)
+#define SDW_DPN_INT_IMPDEF1 BIT(5)
+#define SDW_DPN_INT_IMPDEF2 BIT(6)
+#define SDW_DPN_INT_IMPDEF3 BIT(7)
+
+#define SDW_DPN_PORTCTRL_FLOWMODE GENMASK(1, 0)
+#define SDW_DPN_PORTCTRL_DATAMODE GENMASK(3, 2)
+#define SDW_DPN_PORTCTRL_NXTINVBANK BIT(4)
+
+#define SDW_DPN_BLOCKCTRL1_WDLEN GENMASK(5, 0)
+
+#define SDW_DPN_PREPARECTRL_CH_PREP GENMASK(7, 0)
+
+#define SDW_DPN_CHANNELEN_B0(n) (0x20 + SDW_DPN_SIZE * (n))
+#define SDW_DPN_CHANNELEN_B1(n) (0x30 + SDW_DPN_SIZE * (n))
+
+#define SDW_DPN_BLOCKCTRL2_B0(n) (0x21 + SDW_DPN_SIZE * (n))
+#define SDW_DPN_BLOCKCTRL2_B1(n) (0x31 + SDW_DPN_SIZE * (n))
+
+#define SDW_DPN_SAMPLECTRL1_B0(n) (0x22 + SDW_DPN_SIZE * (n))
+#define SDW_DPN_SAMPLECTRL1_B1(n) (0x32 + SDW_DPN_SIZE * (n))
+
+#define SDW_DPN_SAMPLECTRL2_B0(n) (0x23 + SDW_DPN_SIZE * (n))
+#define SDW_DPN_SAMPLECTRL2_B1(n) (0x33 + SDW_DPN_SIZE * (n))
+
+#define SDW_DPN_OFFSETCTRL1_B0(n) (0x24 + SDW_DPN_SIZE * (n))
+#define SDW_DPN_OFFSETCTRL1_B1(n) (0x34 + SDW_DPN_SIZE * (n))
+
+#define SDW_DPN_OFFSETCTRL2_B0(n) (0x25 + SDW_DPN_SIZE * (n))
+#define SDW_DPN_OFFSETCTRL2_B1(n) (0x35 + SDW_DPN_SIZE * (n))
+
+#define SDW_DPN_HCTRL_B0(n) (0x26 + SDW_DPN_SIZE * (n))
+#define SDW_DPN_HCTRL_B1(n) (0x36 + SDW_DPN_SIZE * (n))
+
+#define SDW_DPN_BLOCKCTRL3_B0(n) (0x27 + SDW_DPN_SIZE * (n))
+#define SDW_DPN_BLOCKCTRL3_B1(n) (0x37 + SDW_DPN_SIZE * (n))
+
+#define SDW_DPN_LANECTRL_B0(n) (0x28 + SDW_DPN_SIZE * (n))
+#define SDW_DPN_LANECTRL_B1(n) (0x38 + SDW_DPN_SIZE * (n))
+
+#define SDW_DPN_SAMPLECTRL_LOW GENMASK(7, 0)
+#define SDW_DPN_SAMPLECTRL_HIGH GENMASK(15, 8)
+
+#define SDW_DPN_HCTRL_HSTART GENMASK(7, 4)
+#define SDW_DPN_HCTRL_HSTOP GENMASK(3, 0)
+
+#define SDW_NUM_CASC_PORT_INTSTAT1 4
+#define SDW_CASC_PORT_START_INTSTAT1 0
+#define SDW_CASC_PORT_MASK_INTSTAT1 0x8
+#define SDW_CASC_PORT_REG_OFFSET_INTSTAT1 0x0
+
+#define SDW_NUM_CASC_PORT_INTSTAT2 7
+#define SDW_CASC_PORT_START_INTSTAT2 4
+#define SDW_CASC_PORT_MASK_INTSTAT2 1
+#define SDW_CASC_PORT_REG_OFFSET_INTSTAT2 1
+
+#define SDW_NUM_CASC_PORT_INTSTAT3 4
+#define SDW_CASC_PORT_START_INTSTAT3 11
+#define SDW_CASC_PORT_MASK_INTSTAT3 1
+#define SDW_CASC_PORT_REG_OFFSET_INTSTAT3 2
+
+#endif /* __SDW_REGISTERS_H */
diff --git a/include/linux/soundwire/sdw_type.h b/include/linux/soundwire/sdw_type.h
new file mode 100644
index 000000000000..9fd553e553e9
--- /dev/null
+++ b/include/linux/soundwire/sdw_type.h
@@ -0,0 +1,19 @@
+// SPDX-License-Identifier: GPL-2.0
+// Copyright(c) 2015-17 Intel Corporation.
+
+#ifndef __SOUNDWIRE_TYPES_H
+#define __SOUNDWIRE_TYPES_H
+
+extern struct bus_type sdw_bus_type;
+
+#define drv_to_sdw_driver(_drv) container_of(_drv, struct sdw_driver, driver)
+
+#define sdw_register_driver(drv) \
+ __sdw_register_driver(drv, THIS_MODULE)
+
+int __sdw_register_driver(struct sdw_driver *drv, struct module *);
+void sdw_unregister_driver(struct sdw_driver *drv);
+
+int sdw_slave_modalias(const struct sdw_slave *slave, char *buf, size_t size);
+
+#endif /* __SOUNDWIRE_TYPES_H */
diff --git a/include/linux/spi/spi_gpio.h b/include/linux/spi/spi_gpio.h
index e7bd89a59cd1..9e7e83d8645b 100644
--- a/include/linux/spi/spi_gpio.h
+++ b/include/linux/spi/spi_gpio.h
@@ -8,64 +8,17 @@
* - id the same as the SPI bus number it implements
* - dev.platform data pointing to a struct spi_gpio_platform_data
*
- * Or, see the driver code for information about speedups that are
- * possible on platforms that support inlined access for GPIOs (no
- * spi_gpio_platform_data is used).
- *
- * Use spi_board_info with these busses in the usual way, being sure
- * that the controller_data being the GPIO used for each device's
- * chipselect:
- *
- * static struct spi_board_info ... [] = {
- * ...
- * // this slave uses GPIO 42 for its chipselect
- * .controller_data = (void *) 42,
- * ...
- * // this one uses GPIO 86 for its chipselect
- * .controller_data = (void *) 86,
- * ...
- * };
- *
- * If chipselect is not used (there's only one device on the bus), assign
- * SPI_GPIO_NO_CHIPSELECT to the controller_data:
- * .controller_data = (void *) SPI_GPIO_NO_CHIPSELECT;
- *
- * If the MISO or MOSI pin is not available then it should be set to
- * SPI_GPIO_NO_MISO or SPI_GPIO_NO_MOSI.
+ * Use spi_board_info with these busses in the usual way.
*
* If the bitbanged bus is later switched to a "native" controller,
* that platform_device and controller_data should be removed.
*/
-#define SPI_GPIO_NO_CHIPSELECT ((unsigned long)-1l)
-#define SPI_GPIO_NO_MISO ((unsigned long)-1l)
-#define SPI_GPIO_NO_MOSI ((unsigned long)-1l)
-
/**
* struct spi_gpio_platform_data - parameter for bitbanged SPI master
- * @sck: number of the GPIO used for clock output
- * @mosi: number of the GPIO used for Master Output, Slave In (MOSI) data
- * @miso: number of the GPIO used for Master Input, Slave Output (MISO) data
* @num_chipselect: how many slaves to allow
- *
- * All GPIO signals used with the SPI bus managed through this driver
- * (chipselects, MOSI, MISO, SCK) must be configured as GPIOs, instead
- * of some alternate function.
- *
- * It can be convenient to use this driver with pins that have alternate
- * functions associated with a "native" SPI controller if a driver for that
- * controller is not available, or is missing important functionality.
- *
- * On platforms which can do so, configure MISO with a weak pullup unless
- * there's an external pullup on that signal. That saves power by avoiding
- * floating signals. (A weak pulldown would save power too, but many
- * drivers expect to see all-ones data as the no slave "response".)
*/
struct spi_gpio_platform_data {
- unsigned sck;
- unsigned long mosi;
- unsigned long miso;
-
u16 num_chipselect;
};
diff --git a/include/linux/spinlock.h b/include/linux/spinlock.h
index 3bf273538840..4894d322d258 100644
--- a/include/linux/spinlock.h
+++ b/include/linux/spinlock.h
@@ -409,4 +409,10 @@ extern int _atomic_dec_and_lock(atomic_t *atomic, spinlock_t *lock);
#define atomic_dec_and_lock(atomic, lock) \
__cond_lock(lock, _atomic_dec_and_lock(atomic, lock))
+int alloc_bucket_spinlocks(spinlock_t **locks, unsigned int *lock_mask,
+ size_t max_size, unsigned int cpu_mult,
+ gfp_t gfp);
+
+void free_bucket_spinlocks(spinlock_t *locks);
+
#endif /* __LINUX_SPINLOCK_H */
diff --git a/include/linux/srcu.h b/include/linux/srcu.h
index 62be8966e837..33c1c698df09 100644
--- a/include/linux/srcu.h
+++ b/include/linux/srcu.h
@@ -92,7 +92,7 @@ void synchronize_srcu(struct srcu_struct *sp);
* relies on normal RCU, it can be called from the CPU which
* is in the idle loop from an RCU point of view or offline.
*/
-static inline int srcu_read_lock_held(struct srcu_struct *sp)
+static inline int srcu_read_lock_held(const struct srcu_struct *sp)
{
if (!debug_lockdep_rcu_enabled())
return 1;
@@ -101,7 +101,7 @@ static inline int srcu_read_lock_held(struct srcu_struct *sp)
#else /* #ifdef CONFIG_DEBUG_LOCK_ALLOC */
-static inline int srcu_read_lock_held(struct srcu_struct *sp)
+static inline int srcu_read_lock_held(const struct srcu_struct *sp)
{
return 1;
}
diff --git a/include/linux/srcutree.h b/include/linux/srcutree.h
index a949f4f9e4d7..4eda108abee0 100644
--- a/include/linux/srcutree.h
+++ b/include/linux/srcutree.h
@@ -40,7 +40,7 @@ struct srcu_data {
unsigned long srcu_unlock_count[2]; /* Unlocks per CPU. */
/* Update-side state. */
- raw_spinlock_t __private lock ____cacheline_internodealigned_in_smp;
+ spinlock_t __private lock ____cacheline_internodealigned_in_smp;
struct rcu_segcblist srcu_cblist; /* List of callbacks.*/
unsigned long srcu_gp_seq_needed; /* Furthest future GP needed. */
unsigned long srcu_gp_seq_needed_exp; /* Furthest future exp GP. */
@@ -58,7 +58,7 @@ struct srcu_data {
* Node in SRCU combining tree, similar in function to rcu_data.
*/
struct srcu_node {
- raw_spinlock_t __private lock;
+ spinlock_t __private lock;
unsigned long srcu_have_cbs[4]; /* GP seq for children */
/* having CBs, but only */
/* is > ->srcu_gq_seq. */
@@ -78,7 +78,7 @@ struct srcu_struct {
struct srcu_node *level[RCU_NUM_LVLS + 1];
/* First node at each level. */
struct mutex srcu_cb_mutex; /* Serialize CB preparation. */
- raw_spinlock_t __private lock; /* Protect counters */
+ spinlock_t __private lock; /* Protect counters */
struct mutex srcu_gp_mutex; /* Serialize GP work. */
unsigned int srcu_idx; /* Current rdr array element. */
unsigned long srcu_gp_seq; /* Grace-period seq #. */
@@ -107,7 +107,7 @@ struct srcu_struct {
#define __SRCU_STRUCT_INIT(name) \
{ \
.sda = &name##_srcu_data, \
- .lock = __RAW_SPIN_LOCK_UNLOCKED(name.lock), \
+ .lock = __SPIN_LOCK_UNLOCKED(name.lock), \
.srcu_gp_seq_needed = 0 - 1, \
__SRCU_DEP_MAP_INIT(name) \
}
diff --git a/include/linux/stddef.h b/include/linux/stddef.h
index 2181719fd907..998a4ba28eba 100644
--- a/include/linux/stddef.h
+++ b/include/linux/stddef.h
@@ -20,12 +20,20 @@ enum {
#endif
/**
+ * sizeof_field(TYPE, MEMBER)
+ *
+ * @TYPE: The structure containing the field of interest
+ * @MEMBER: The field to return the size of
+ */
+#define sizeof_field(TYPE, MEMBER) sizeof((((TYPE *)0)->MEMBER))
+
+/**
* offsetofend(TYPE, MEMBER)
*
* @TYPE: The type of the structure
* @MEMBER: The member within the structure to get the end offset of
*/
#define offsetofend(TYPE, MEMBER) \
- (offsetof(TYPE, MEMBER) + sizeof(((TYPE *)0)->MEMBER))
+ (offsetof(TYPE, MEMBER) + sizeof_field(TYPE, MEMBER))
#endif
diff --git a/include/linux/stm.h b/include/linux/stm.h
index 210ff2292361..c6f577ab6f21 100644
--- a/include/linux/stm.h
+++ b/include/linux/stm.h
@@ -1,15 +1,7 @@
+// SPDX-License-Identifier: GPL-2.0
/*
* System Trace Module (STM) infrastructure apis
* Copyright (C) 2014 Intel Corporation.
- *
- * This program is free software; you can redistribute it and/or modify it
- * under the terms and conditions of the GNU General Public License,
- * version 2, as published by the Free Software Foundation.
- *
- * This program is distributed in the hope it will be useful, but WITHOUT
- * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
- * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for
- * more details.
*/
#ifndef _STM_H_
diff --git a/include/linux/string.h b/include/linux/string.h
index cfd83eb2f926..dd39a690c841 100644
--- a/include/linux/string.h
+++ b/include/linux/string.h
@@ -11,6 +11,7 @@
extern char *strndup_user(const char __user *, long);
extern void *memdup_user(const void __user *, size_t);
+extern void *vmemdup_user(const void __user *, size_t);
extern void *memdup_user_nul(const void __user *, size_t);
/*
@@ -28,7 +29,7 @@ extern char * strncpy(char *,const char *, __kernel_size_t);
size_t strlcpy(char *, const char *, size_t);
#endif
#ifndef __HAVE_ARCH_STRSCPY
-ssize_t __must_check strscpy(char *, const char *, size_t);
+ssize_t strscpy(char *, const char *, size_t);
#endif
#ifndef __HAVE_ARCH_STRCAT
extern char * strcat(char *, const char *);
diff --git a/include/linux/sunrpc/clnt.h b/include/linux/sunrpc/clnt.h
index 71c237e8240e..ed761f751ecb 100644
--- a/include/linux/sunrpc/clnt.h
+++ b/include/linux/sunrpc/clnt.h
@@ -179,7 +179,6 @@ struct rpc_task *rpc_call_null(struct rpc_clnt *clnt, struct rpc_cred *cred,
int rpc_restart_call_prepare(struct rpc_task *);
int rpc_restart_call(struct rpc_task *);
void rpc_setbufsize(struct rpc_clnt *, unsigned int, unsigned int);
-int rpc_protocol(struct rpc_clnt *);
struct net * rpc_net_ns(struct rpc_clnt *);
size_t rpc_max_payload(struct rpc_clnt *);
size_t rpc_max_bc_payload(struct rpc_clnt *);
diff --git a/include/linux/sunrpc/sched.h b/include/linux/sunrpc/sched.h
index d96e74e114c0..592653becd91 100644
--- a/include/linux/sunrpc/sched.h
+++ b/include/linux/sunrpc/sched.h
@@ -229,6 +229,9 @@ void rpc_sleep_on_priority(struct rpc_wait_queue *,
struct rpc_task *,
rpc_action action,
int priority);
+void rpc_wake_up_queued_task_on_wq(struct workqueue_struct *wq,
+ struct rpc_wait_queue *queue,
+ struct rpc_task *task);
void rpc_wake_up_queued_task(struct rpc_wait_queue *,
struct rpc_task *);
void rpc_wake_up(struct rpc_wait_queue *);
diff --git a/include/linux/sunrpc/svc.h b/include/linux/sunrpc/svc.h
index 786ae2255f05..574368e8a16f 100644
--- a/include/linux/sunrpc/svc.h
+++ b/include/linux/sunrpc/svc.h
@@ -272,6 +272,7 @@ struct svc_rqst {
#define RQ_BUSY (6) /* request is busy */
#define RQ_DATA (7) /* request has data */
unsigned long rq_flags; /* flags field */
+ ktime_t rq_qtime; /* enqueue time */
void * rq_argp; /* decoded arguments */
void * rq_resp; /* xdr'd results */
@@ -283,6 +284,7 @@ struct svc_rqst {
int rq_reserved; /* space on socket outq
* reserved for this request
*/
+ ktime_t rq_stime; /* start time */
struct cache_req rq_chandle; /* handle passed to caches for
* request delaying
@@ -493,6 +495,10 @@ void svc_wake_up(struct svc_serv *);
void svc_reserve(struct svc_rqst *rqstp, int space);
struct svc_pool * svc_pool_for_cpu(struct svc_serv *serv, int cpu);
char * svc_print_addr(struct svc_rqst *, char *, size_t);
+unsigned int svc_fill_write_vector(struct svc_rqst *rqstp,
+ struct kvec *first, size_t total);
+char *svc_fill_symlink_pathname(struct svc_rqst *rqstp,
+ struct kvec *first, size_t total);
#define RPC_MAX_ADDRBUFLEN (63U)
diff --git a/include/linux/sunrpc/svc_rdma.h b/include/linux/sunrpc/svc_rdma.h
index 995c6fe9ee90..7337e1221590 100644
--- a/include/linux/sunrpc/svc_rdma.h
+++ b/include/linux/sunrpc/svc_rdma.h
@@ -132,9 +132,6 @@ struct svcxprt_rdma {
#define RDMAXPRT_CONN_PENDING 3
#define RPCRDMA_LISTEN_BACKLOG 10
-/* The default ORD value is based on two outstanding full-size writes with a
- * page size of 4k, or 32k * 2 ops / 4k = 16 outstanding RDMA_READ. */
-#define RPCRDMA_ORD (64/4)
#define RPCRDMA_MAX_REQUESTS 32
/* Typical ULP usage of BC requests is NFSv4.1 backchannel. Our
@@ -185,8 +182,6 @@ extern void svc_rdma_wc_reg(struct ib_cq *, struct ib_wc *);
extern void svc_rdma_wc_read(struct ib_cq *, struct ib_wc *);
extern void svc_rdma_wc_inv(struct ib_cq *, struct ib_wc *);
extern int svc_rdma_send(struct svcxprt_rdma *, struct ib_send_wr *);
-extern int svc_rdma_post_recv(struct svcxprt_rdma *, gfp_t);
-extern int svc_rdma_repost_recv(struct svcxprt_rdma *, gfp_t);
extern int svc_rdma_create_listen(struct svc_serv *, int, struct sockaddr *);
extern struct svc_rdma_op_ctxt *svc_rdma_get_context(struct svcxprt_rdma *);
extern void svc_rdma_put_context(struct svc_rdma_op_ctxt *, int);
diff --git a/include/linux/sunrpc/svc_xprt.h b/include/linux/sunrpc/svc_xprt.h
index 1caf7bc83306..c3d72066d4b1 100644
--- a/include/linux/sunrpc/svc_xprt.h
+++ b/include/linux/sunrpc/svc_xprt.h
@@ -25,7 +25,7 @@ struct svc_xprt_ops {
void (*xpo_release_rqst)(struct svc_rqst *);
void (*xpo_detach)(struct svc_xprt *);
void (*xpo_free)(struct svc_xprt *);
- int (*xpo_secure_port)(struct svc_rqst *);
+ void (*xpo_secure_port)(struct svc_rqst *rqstp);
void (*xpo_kill_temp_xprt)(struct svc_xprt *);
};
@@ -83,6 +83,7 @@ struct svc_xprt {
size_t xpt_locallen; /* length of address */
struct sockaddr_storage xpt_remote; /* remote peer's address */
size_t xpt_remotelen; /* length of address */
+ char xpt_remotebuf[INET6_ADDRSTRLEN + 10];
struct rpc_wait_queue xpt_bc_pending; /* backchannel wait queue */
struct list_head xpt_users; /* callbacks on free */
@@ -152,7 +153,10 @@ static inline void svc_xprt_set_remote(struct svc_xprt *xprt,
{
memcpy(&xprt->xpt_remote, sa, salen);
xprt->xpt_remotelen = salen;
+ snprintf(xprt->xpt_remotebuf, sizeof(xprt->xpt_remotebuf) - 1,
+ "%pISpc", sa);
}
+
static inline unsigned short svc_addr_port(const struct sockaddr *sa)
{
const struct sockaddr_in *sin = (const struct sockaddr_in *)sa;
diff --git a/include/linux/sunrpc/xprtrdma.h b/include/linux/sunrpc/xprtrdma.h
index 221b7a2e5406..5859563e3c1f 100644
--- a/include/linux/sunrpc/xprtrdma.h
+++ b/include/linux/sunrpc/xprtrdma.h
@@ -64,7 +64,7 @@ enum rpcrdma_memreg {
RPCRDMA_MEMWINDOWS,
RPCRDMA_MEMWINDOWS_ASYNC,
RPCRDMA_MTHCAFMR,
- RPCRDMA_FRMR,
+ RPCRDMA_FRWR,
RPCRDMA_ALLPHYSICAL,
RPCRDMA_LAST
};
diff --git a/include/linux/suspend.h b/include/linux/suspend.h
index d60b0f5c38d5..440b62f7502e 100644
--- a/include/linux/suspend.h
+++ b/include/linux/suspend.h
@@ -384,6 +384,8 @@ extern int swsusp_page_is_forbidden(struct page *);
extern void swsusp_set_page_free(struct page *);
extern void swsusp_unset_page_free(struct page *);
extern unsigned long get_safe_page(gfp_t gfp_mask);
+extern asmlinkage int swsusp_arch_suspend(void);
+extern asmlinkage int swsusp_arch_resume(void);
extern void hibernation_set_ops(const struct platform_hibernation_ops *ops);
extern int hibernate(void);
@@ -443,32 +445,8 @@ extern bool pm_save_wakeup_count(unsigned int count);
extern void pm_wakep_autosleep_enabled(bool set);
extern void pm_print_active_wakeup_sources(void);
-static inline void lock_system_sleep(void)
-{
- current->flags |= PF_FREEZER_SKIP;
- mutex_lock(&pm_mutex);
-}
-
-static inline void unlock_system_sleep(void)
-{
- /*
- * Don't use freezer_count() because we don't want the call to
- * try_to_freeze() here.
- *
- * Reason:
- * Fundamentally, we just don't need it, because freezing condition
- * doesn't come into effect until we release the pm_mutex lock,
- * since the freezer always works with pm_mutex held.
- *
- * More importantly, in the case of hibernation,
- * unlock_system_sleep() gets called in snapshot_read() and
- * snapshot_write() when the freezing condition is still in effect.
- * Which means, if we use try_to_freeze() here, it would make them
- * enter the refrigerator, thus causing hibernation to lockup.
- */
- current->flags &= ~PF_FREEZER_SKIP;
- mutex_unlock(&pm_mutex);
-}
+extern void lock_system_sleep(void);
+extern void unlock_system_sleep(void);
#else /* !CONFIG_PM_SLEEP */
diff --git a/include/linux/swap.h b/include/linux/swap.h
index c2b8128799c1..2417d288e016 100644
--- a/include/linux/swap.h
+++ b/include/linux/swap.h
@@ -332,20 +332,16 @@ extern void mark_page_accessed(struct page *);
extern void lru_add_drain(void);
extern void lru_add_drain_cpu(int cpu);
extern void lru_add_drain_all(void);
-extern void lru_add_drain_all_cpuslocked(void);
extern void rotate_reclaimable_page(struct page *page);
extern void deactivate_file_page(struct page *page);
extern void mark_page_lazyfree(struct page *page);
extern void swap_setup(void);
-extern void add_page_to_unevictable_list(struct page *page);
-
extern void lru_cache_add_active_or_unevictable(struct page *page,
struct vm_area_struct *vma);
/* linux/mm/vmscan.c */
extern unsigned long zone_reclaimable_pages(struct zone *zone);
-extern unsigned long pgdat_reclaimable_pages(struct pglist_data *pgdat);
extern unsigned long try_to_free_pages(struct zonelist *zonelist, int order,
gfp_t gfp_mask, nodemask_t *mask);
extern int __isolate_lru_page(struct page *page, isolate_mode_t mode);
@@ -404,7 +400,6 @@ int generic_swapfile_activate(struct swap_info_struct *, struct file *,
#define SWAP_ADDRESS_SPACE_SHIFT 14
#define SWAP_ADDRESS_SPACE_PAGES (1 << SWAP_ADDRESS_SPACE_SHIFT)
extern struct address_space *swapper_spaces[];
-extern bool swap_vma_readahead;
#define swap_address_space(entry) \
(&swapper_spaces[swp_type(entry)][swp_offset(entry) \
>> SWAP_ADDRESS_SPACE_SHIFT])
@@ -426,14 +421,10 @@ extern struct page *read_swap_cache_async(swp_entry_t, gfp_t,
extern struct page *__read_swap_cache_async(swp_entry_t, gfp_t,
struct vm_area_struct *vma, unsigned long addr,
bool *new_page_allocated);
-extern struct page *swapin_readahead(swp_entry_t, gfp_t,
- struct vm_area_struct *vma, unsigned long addr);
-
-extern struct page *swap_readahead_detect(struct vm_fault *vmf,
- struct vma_swap_readahead *swap_ra);
-extern struct page *do_swap_page_readahead(swp_entry_t fentry, gfp_t gfp_mask,
- struct vm_fault *vmf,
- struct vma_swap_readahead *swap_ra);
+extern struct page *swap_cluster_readahead(swp_entry_t entry, gfp_t flag,
+ struct vm_fault *vmf);
+extern struct page *swapin_readahead(swp_entry_t entry, gfp_t flag,
+ struct vm_fault *vmf);
/* linux/mm/swapfile.c */
extern atomic_long_t nr_swap_pages;
@@ -441,11 +432,6 @@ extern long total_swap_pages;
extern atomic_t nr_rotate_swap;
extern bool has_usable_swap(void);
-static inline bool swap_use_vma_readahead(void)
-{
- return READ_ONCE(swap_vma_readahead) && !atomic_read(&nr_rotate_swap);
-}
-
/* Swap 50% full? Release swapcache more aggressively.. */
static inline bool vm_swap_full(void)
{
@@ -541,26 +527,14 @@ static inline void put_swap_page(struct page *page, swp_entry_t swp)
{
}
-static inline struct page *swapin_readahead(swp_entry_t swp, gfp_t gfp_mask,
- struct vm_area_struct *vma, unsigned long addr)
-{
- return NULL;
-}
-
-static inline bool swap_use_vma_readahead(void)
-{
- return false;
-}
-
-static inline struct page *swap_readahead_detect(
- struct vm_fault *vmf, struct vma_swap_readahead *swap_ra)
+static inline struct page *swap_cluster_readahead(swp_entry_t entry,
+ gfp_t gfp_mask, struct vm_fault *vmf)
{
return NULL;
}
-static inline struct page *do_swap_page_readahead(
- swp_entry_t fentry, gfp_t gfp_mask,
- struct vm_fault *vmf, struct vma_swap_readahead *swap_ra)
+static inline struct page *swapin_readahead(swp_entry_t swp, gfp_t gfp_mask,
+ struct vm_fault *vmf)
{
return NULL;
}
diff --git a/include/linux/swiotlb.h b/include/linux/swiotlb.h
index 24ed817082ee..965be92c33b5 100644
--- a/include/linux/swiotlb.h
+++ b/include/linux/swiotlb.h
@@ -66,13 +66,11 @@ extern void swiotlb_tbl_sync_single(struct device *hwdev,
enum dma_sync_target target);
/* Accessory functions. */
-extern void
-*swiotlb_alloc_coherent(struct device *hwdev, size_t size,
- dma_addr_t *dma_handle, gfp_t flags);
-extern void
-swiotlb_free_coherent(struct device *hwdev, size_t size,
- void *vaddr, dma_addr_t dma_handle);
+void *swiotlb_alloc(struct device *hwdev, size_t size, dma_addr_t *dma_handle,
+ gfp_t flags, unsigned long attrs);
+void swiotlb_free(struct device *dev, size_t size, void *vaddr,
+ dma_addr_t dma_addr, unsigned long attrs);
extern dma_addr_t swiotlb_map_page(struct device *dev, struct page *page,
unsigned long offset, size_t size,
@@ -115,10 +113,10 @@ extern int
swiotlb_dma_supported(struct device *hwdev, u64 mask);
#ifdef CONFIG_SWIOTLB
-extern void __init swiotlb_free(void);
+extern void __init swiotlb_exit(void);
unsigned int swiotlb_max_segment(void);
#else
-static inline void swiotlb_free(void) { }
+static inline void swiotlb_exit(void) { }
static inline unsigned int swiotlb_max_segment(void) { return 0; }
#endif
@@ -126,4 +124,6 @@ extern void swiotlb_print_info(void);
extern int is_swiotlb_buffer(phys_addr_t paddr);
extern void swiotlb_set_max_segment(unsigned int);
+extern const struct dma_map_ops swiotlb_dma_ops;
+
#endif /* __LINUX_SWIOTLB_H */
diff --git a/include/linux/switchtec.h b/include/linux/switchtec.h
index 09d73d0d1aa8..ec93e93371fa 100644
--- a/include/linux/switchtec.h
+++ b/include/linux/switchtec.h
@@ -100,6 +100,9 @@ struct sw_event_regs {
u32 gpio_interrupt_hdr;
u32 gpio_interrupt_data;
u32 reserved16[4];
+ u32 gfms_event_hdr;
+ u32 gfms_event_data;
+ u32 reserved17[4];
} __packed;
enum {
@@ -168,6 +171,14 @@ struct ntb_info_regs {
u16 reserved1;
u64 ep_map;
u16 requester_id;
+ u16 reserved2;
+ u32 reserved3[4];
+ struct nt_partition_info {
+ u32 xlink_enabled;
+ u32 target_part_low;
+ u32 target_part_high;
+ u32 reserved;
+ } ntp_info[48];
} __packed;
struct part_cfg_regs {
@@ -284,7 +295,20 @@ enum {
struct pff_csr_regs {
u16 vendor_id;
u16 device_id;
- u32 pci_cfg_header[15];
+ u16 pcicmd;
+ u16 pcists;
+ u32 pci_class;
+ u32 pci_opts;
+ union {
+ u32 pci_bar[6];
+ u64 pci_bar64[3];
+ };
+ u32 pci_cardbus;
+ u32 pci_subsystem_id;
+ u32 pci_expansion_rom;
+ u32 pci_cap_ptr;
+ u32 reserved1;
+ u32 pci_irq;
u32 pci_cap_region[48];
u32 pcie_cap_region[448];
u32 indirect_gas_window[128];
diff --git a/include/linux/sync_core.h b/include/linux/sync_core.h
new file mode 100644
index 000000000000..013da4b8b327
--- /dev/null
+++ b/include/linux/sync_core.h
@@ -0,0 +1,21 @@
+/* SPDX-License-Identifier: GPL-2.0 */
+#ifndef _LINUX_SYNC_CORE_H
+#define _LINUX_SYNC_CORE_H
+
+#ifdef CONFIG_ARCH_HAS_SYNC_CORE_BEFORE_USERMODE
+#include <asm/sync_core.h>
+#else
+/*
+ * This is a dummy sync_core_before_usermode() implementation that can be used
+ * on all architectures which return to user-space through core serializing
+ * instructions.
+ * If your architecture returns to user-space through non-core-serializing
+ * instructions, you need to write your own functions.
+ */
+static inline void sync_core_before_usermode(void)
+{
+}
+#endif
+
+#endif /* _LINUX_SYNC_CORE_H */
+
diff --git a/include/linux/syscalls.h b/include/linux/syscalls.h
index a78186d826d7..b961184f597a 100644
--- a/include/linux/syscalls.h
+++ b/include/linux/syscalls.h
@@ -191,6 +191,8 @@ static inline int is_syscall_trace_event(struct trace_event_call *tp_event)
#define SYSCALL_DEFINE0(sname) \
SYSCALL_METADATA(_##sname, 0); \
+ asmlinkage long sys_##sname(void); \
+ ALLOW_ERROR_INJECTION(sys_##sname, ERRNO); \
asmlinkage long sys_##sname(void)
#define SYSCALL_DEFINE1(name, ...) SYSCALL_DEFINEx(1, _##name, __VA_ARGS__)
@@ -210,6 +212,7 @@ static inline int is_syscall_trace_event(struct trace_event_call *tp_event)
#define __SYSCALL_DEFINEx(x, name, ...) \
asmlinkage long sys##name(__MAP(x,__SC_DECL,__VA_ARGS__)) \
__attribute__((alias(__stringify(SyS##name)))); \
+ ALLOW_ERROR_INJECTION(sys##name, ERRNO); \
static inline long SYSC##name(__MAP(x,__SC_DECL,__VA_ARGS__)); \
asmlinkage long SyS##name(__MAP(x,__SC_LONG,__VA_ARGS__)); \
asmlinkage long SyS##name(__MAP(x,__SC_LONG,__VA_ARGS__)) \
@@ -241,63 +244,292 @@ static inline void addr_limit_user_check(void)
#endif
}
-asmlinkage long sys32_quotactl(unsigned int cmd, const char __user *special,
- qid_t id, void __user *addr);
-asmlinkage long sys_time(time_t __user *tloc);
-asmlinkage long sys_stime(time_t __user *tptr);
-asmlinkage long sys_gettimeofday(struct timeval __user *tv,
- struct timezone __user *tz);
-asmlinkage long sys_settimeofday(struct timeval __user *tv,
- struct timezone __user *tz);
-asmlinkage long sys_adjtimex(struct timex __user *txc_p);
+/*
+ * These syscall function prototypes are kept in the same order as
+ * include/uapi/asm-generic/unistd.h. Architecture specific entries go below,
+ * followed by deprecated or obsolete system calls.
+ *
+ * Please note that these prototypes here are only provided for information
+ * purposes, for static analysis, and for linking from the syscall table.
+ * These functions should not be called elsewhere from kernel code.
+ */
+asmlinkage long sys_io_setup(unsigned nr_reqs, aio_context_t __user *ctx);
+asmlinkage long sys_io_destroy(aio_context_t ctx);
+asmlinkage long sys_io_submit(aio_context_t, long,
+ struct iocb __user * __user *);
+asmlinkage long sys_io_cancel(aio_context_t ctx_id, struct iocb __user *iocb,
+ struct io_event __user *result);
+asmlinkage long sys_io_getevents(aio_context_t ctx_id,
+ long min_nr,
+ long nr,
+ struct io_event __user *events,
+ struct timespec __user *timeout);
-asmlinkage long sys_times(struct tms __user *tbuf);
+/* fs/xattr.c */
+asmlinkage long sys_setxattr(const char __user *path, const char __user *name,
+ const void __user *value, size_t size, int flags);
+asmlinkage long sys_lsetxattr(const char __user *path, const char __user *name,
+ const void __user *value, size_t size, int flags);
+asmlinkage long sys_fsetxattr(int fd, const char __user *name,
+ const void __user *value, size_t size, int flags);
+asmlinkage long sys_getxattr(const char __user *path, const char __user *name,
+ void __user *value, size_t size);
+asmlinkage long sys_lgetxattr(const char __user *path, const char __user *name,
+ void __user *value, size_t size);
+asmlinkage long sys_fgetxattr(int fd, const char __user *name,
+ void __user *value, size_t size);
+asmlinkage long sys_listxattr(const char __user *path, char __user *list,
+ size_t size);
+asmlinkage long sys_llistxattr(const char __user *path, char __user *list,
+ size_t size);
+asmlinkage long sys_flistxattr(int fd, char __user *list, size_t size);
+asmlinkage long sys_removexattr(const char __user *path,
+ const char __user *name);
+asmlinkage long sys_lremovexattr(const char __user *path,
+ const char __user *name);
+asmlinkage long sys_fremovexattr(int fd, const char __user *name);
-asmlinkage long sys_gettid(void);
-asmlinkage long sys_nanosleep(struct timespec __user *rqtp, struct timespec __user *rmtp);
-asmlinkage long sys_alarm(unsigned int seconds);
-asmlinkage long sys_getpid(void);
-asmlinkage long sys_getppid(void);
-asmlinkage long sys_getuid(void);
-asmlinkage long sys_geteuid(void);
-asmlinkage long sys_getgid(void);
-asmlinkage long sys_getegid(void);
-asmlinkage long sys_getresuid(uid_t __user *ruid, uid_t __user *euid, uid_t __user *suid);
-asmlinkage long sys_getresgid(gid_t __user *rgid, gid_t __user *egid, gid_t __user *sgid);
-asmlinkage long sys_getpgid(pid_t pid);
-asmlinkage long sys_getpgrp(void);
-asmlinkage long sys_getsid(pid_t pid);
-asmlinkage long sys_getgroups(int gidsetsize, gid_t __user *grouplist);
+/* fs/dcache.c */
+asmlinkage long sys_getcwd(char __user *buf, unsigned long size);
-asmlinkage long sys_setregid(gid_t rgid, gid_t egid);
-asmlinkage long sys_setgid(gid_t gid);
-asmlinkage long sys_setreuid(uid_t ruid, uid_t euid);
-asmlinkage long sys_setuid(uid_t uid);
-asmlinkage long sys_setresuid(uid_t ruid, uid_t euid, uid_t suid);
-asmlinkage long sys_setresgid(gid_t rgid, gid_t egid, gid_t sgid);
-asmlinkage long sys_setfsuid(uid_t uid);
-asmlinkage long sys_setfsgid(gid_t gid);
-asmlinkage long sys_setpgid(pid_t pid, pid_t pgid);
-asmlinkage long sys_setsid(void);
-asmlinkage long sys_setgroups(int gidsetsize, gid_t __user *grouplist);
+/* fs/cookies.c */
+asmlinkage long sys_lookup_dcookie(u64 cookie64, char __user *buf, size_t len);
+
+/* fs/eventfd.c */
+asmlinkage long sys_eventfd2(unsigned int count, int flags);
+
+/* fs/eventpoll.c */
+asmlinkage long sys_epoll_create1(int flags);
+asmlinkage long sys_epoll_ctl(int epfd, int op, int fd,
+ struct epoll_event __user *event);
+asmlinkage long sys_epoll_pwait(int epfd, struct epoll_event __user *events,
+ int maxevents, int timeout,
+ const sigset_t __user *sigmask,
+ size_t sigsetsize);
+
+/* fs/fcntl.c */
+asmlinkage long sys_dup(unsigned int fildes);
+asmlinkage long sys_dup3(unsigned int oldfd, unsigned int newfd, int flags);
+asmlinkage long sys_fcntl(unsigned int fd, unsigned int cmd, unsigned long arg);
+#if BITS_PER_LONG == 32
+asmlinkage long sys_fcntl64(unsigned int fd,
+ unsigned int cmd, unsigned long arg);
+#endif
+
+/* fs/inotify_user.c */
+asmlinkage long sys_inotify_init1(int flags);
+asmlinkage long sys_inotify_add_watch(int fd, const char __user *path,
+ u32 mask);
+asmlinkage long sys_inotify_rm_watch(int fd, __s32 wd);
+
+/* fs/ioctl.c */
+asmlinkage long sys_ioctl(unsigned int fd, unsigned int cmd,
+ unsigned long arg);
+
+/* fs/ioprio.c */
+asmlinkage long sys_ioprio_set(int which, int who, int ioprio);
+asmlinkage long sys_ioprio_get(int which, int who);
+
+/* fs/locks.c */
+asmlinkage long sys_flock(unsigned int fd, unsigned int cmd);
+
+/* fs/namei.c */
+asmlinkage long sys_mknodat(int dfd, const char __user * filename, umode_t mode,
+ unsigned dev);
+asmlinkage long sys_mkdirat(int dfd, const char __user * pathname, umode_t mode);
+asmlinkage long sys_unlinkat(int dfd, const char __user * pathname, int flag);
+asmlinkage long sys_symlinkat(const char __user * oldname,
+ int newdfd, const char __user * newname);
+asmlinkage long sys_linkat(int olddfd, const char __user *oldname,
+ int newdfd, const char __user *newname, int flags);
+asmlinkage long sys_renameat(int olddfd, const char __user * oldname,
+ int newdfd, const char __user * newname);
+
+/* fs/namespace.c */
+asmlinkage long sys_umount(char __user *name, int flags);
+asmlinkage long sys_mount(char __user *dev_name, char __user *dir_name,
+ char __user *type, unsigned long flags,
+ void __user *data);
+asmlinkage long sys_pivot_root(const char __user *new_root,
+ const char __user *put_old);
+
+/* fs/nfsctl.c */
+/* fs/open.c */
+asmlinkage long sys_statfs(const char __user * path,
+ struct statfs __user *buf);
+asmlinkage long sys_statfs64(const char __user *path, size_t sz,
+ struct statfs64 __user *buf);
+asmlinkage long sys_fstatfs(unsigned int fd, struct statfs __user *buf);
+asmlinkage long sys_fstatfs64(unsigned int fd, size_t sz,
+ struct statfs64 __user *buf);
+asmlinkage long sys_truncate(const char __user *path, long length);
+asmlinkage long sys_ftruncate(unsigned int fd, unsigned long length);
+#if BITS_PER_LONG == 32
+asmlinkage long sys_truncate64(const char __user *path, loff_t length);
+asmlinkage long sys_ftruncate64(unsigned int fd, loff_t length);
+#endif
+asmlinkage long sys_fallocate(int fd, int mode, loff_t offset, loff_t len);
+asmlinkage long sys_faccessat(int dfd, const char __user *filename, int mode);
+asmlinkage long sys_chdir(const char __user *filename);
+asmlinkage long sys_fchdir(unsigned int fd);
+asmlinkage long sys_chroot(const char __user *filename);
+asmlinkage long sys_fchmod(unsigned int fd, umode_t mode);
+asmlinkage long sys_fchmodat(int dfd, const char __user * filename,
+ umode_t mode);
+asmlinkage long sys_fchownat(int dfd, const char __user *filename, uid_t user,
+ gid_t group, int flag);
+asmlinkage long sys_fchown(unsigned int fd, uid_t user, gid_t group);
+asmlinkage long sys_openat(int dfd, const char __user *filename, int flags,
+ umode_t mode);
+asmlinkage long sys_close(unsigned int fd);
+asmlinkage long sys_vhangup(void);
+
+/* fs/pipe.c */
+asmlinkage long sys_pipe2(int __user *fildes, int flags);
+
+/* fs/quota.c */
+asmlinkage long sys_quotactl(unsigned int cmd, const char __user *special,
+ qid_t id, void __user *addr);
+
+/* fs/readdir.c */
+asmlinkage long sys_getdents64(unsigned int fd,
+ struct linux_dirent64 __user *dirent,
+ unsigned int count);
+
+/* fs/read_write.c */
+asmlinkage long sys_llseek(unsigned int fd, unsigned long offset_high,
+ unsigned long offset_low, loff_t __user *result,
+ unsigned int whence);
+asmlinkage long sys_lseek(unsigned int fd, off_t offset,
+ unsigned int whence);
+asmlinkage long sys_read(unsigned int fd, char __user *buf, size_t count);
+asmlinkage long sys_write(unsigned int fd, const char __user *buf,
+ size_t count);
+asmlinkage long sys_readv(unsigned long fd,
+ const struct iovec __user *vec,
+ unsigned long vlen);
+asmlinkage long sys_writev(unsigned long fd,
+ const struct iovec __user *vec,
+ unsigned long vlen);
+asmlinkage long sys_pread64(unsigned int fd, char __user *buf,
+ size_t count, loff_t pos);
+asmlinkage long sys_pwrite64(unsigned int fd, const char __user *buf,
+ size_t count, loff_t pos);
+asmlinkage long sys_preadv(unsigned long fd, const struct iovec __user *vec,
+ unsigned long vlen, unsigned long pos_l, unsigned long pos_h);
+asmlinkage long sys_pwritev(unsigned long fd, const struct iovec __user *vec,
+ unsigned long vlen, unsigned long pos_l, unsigned long pos_h);
+
+/* fs/sendfile.c */
+asmlinkage long sys_sendfile64(int out_fd, int in_fd,
+ loff_t __user *offset, size_t count);
+
+/* fs/select.c */
+asmlinkage long sys_pselect6(int, fd_set __user *, fd_set __user *,
+ fd_set __user *, struct timespec __user *,
+ void __user *);
+asmlinkage long sys_ppoll(struct pollfd __user *, unsigned int,
+ struct timespec __user *, const sigset_t __user *,
+ size_t);
+
+/* fs/signalfd.c */
+asmlinkage long sys_signalfd4(int ufd, sigset_t __user *user_mask, size_t sizemask, int flags);
+
+/* fs/splice.c */
+asmlinkage long sys_vmsplice(int fd, const struct iovec __user *iov,
+ unsigned long nr_segs, unsigned int flags);
+asmlinkage long sys_splice(int fd_in, loff_t __user *off_in,
+ int fd_out, loff_t __user *off_out,
+ size_t len, unsigned int flags);
+asmlinkage long sys_tee(int fdin, int fdout, size_t len, unsigned int flags);
+
+/* fs/stat.c */
+asmlinkage long sys_readlinkat(int dfd, const char __user *path, char __user *buf,
+ int bufsiz);
+asmlinkage long sys_newfstatat(int dfd, const char __user *filename,
+ struct stat __user *statbuf, int flag);
+asmlinkage long sys_newfstat(unsigned int fd, struct stat __user *statbuf);
+#if defined(__ARCH_WANT_STAT64) || defined(__ARCH_WANT_COMPAT_STAT64)
+asmlinkage long sys_fstat64(unsigned long fd, struct stat64 __user *statbuf);
+asmlinkage long sys_fstatat64(int dfd, const char __user *filename,
+ struct stat64 __user *statbuf, int flag);
+#endif
+
+/* fs/sync.c */
+asmlinkage long sys_sync(void);
+asmlinkage long sys_fsync(unsigned int fd);
+asmlinkage long sys_fdatasync(unsigned int fd);
+asmlinkage long sys_sync_file_range2(int fd, unsigned int flags,
+ loff_t offset, loff_t nbytes);
+asmlinkage long sys_sync_file_range(int fd, loff_t offset, loff_t nbytes,
+ unsigned int flags);
+
+/* fs/timerfd.c */
+asmlinkage long sys_timerfd_create(int clockid, int flags);
+asmlinkage long sys_timerfd_settime(int ufd, int flags,
+ const struct itimerspec __user *utmr,
+ struct itimerspec __user *otmr);
+asmlinkage long sys_timerfd_gettime(int ufd, struct itimerspec __user *otmr);
+
+/* fs/utimes.c */
+asmlinkage long sys_utimensat(int dfd, const char __user *filename,
+ struct timespec __user *utimes, int flags);
+
+/* kernel/acct.c */
asmlinkage long sys_acct(const char __user *name);
+
+/* kernel/capability.c */
asmlinkage long sys_capget(cap_user_header_t header,
cap_user_data_t dataptr);
asmlinkage long sys_capset(cap_user_header_t header,
const cap_user_data_t data);
+
+/* kernel/exec_domain.c */
asmlinkage long sys_personality(unsigned int personality);
-asmlinkage long sys_sigpending(old_sigset_t __user *set);
-asmlinkage long sys_sigprocmask(int how, old_sigset_t __user *set,
- old_sigset_t __user *oset);
-asmlinkage long sys_sigaltstack(const struct sigaltstack __user *uss,
- struct sigaltstack __user *uoss);
+/* kernel/exit.c */
+asmlinkage long sys_exit(int error_code);
+asmlinkage long sys_exit_group(int error_code);
+asmlinkage long sys_waitid(int which, pid_t pid,
+ struct siginfo __user *infop,
+ int options, struct rusage __user *ru);
+/* kernel/fork.c */
+asmlinkage long sys_set_tid_address(int __user *tidptr);
+asmlinkage long sys_unshare(unsigned long unshare_flags);
+
+/* kernel/futex.c */
+asmlinkage long sys_futex(u32 __user *uaddr, int op, u32 val,
+ struct timespec __user *utime, u32 __user *uaddr2,
+ u32 val3);
+asmlinkage long sys_get_robust_list(int pid,
+ struct robust_list_head __user * __user *head_ptr,
+ size_t __user *len_ptr);
+asmlinkage long sys_set_robust_list(struct robust_list_head __user *head,
+ size_t len);
+
+/* kernel/hrtimer.c */
+asmlinkage long sys_nanosleep(struct timespec __user *rqtp, struct timespec __user *rmtp);
+
+/* kernel/itimer.c */
asmlinkage long sys_getitimer(int which, struct itimerval __user *value);
asmlinkage long sys_setitimer(int which,
struct itimerval __user *value,
struct itimerval __user *ovalue);
+
+/* kernel/kexec.c */
+asmlinkage long sys_kexec_load(unsigned long entry, unsigned long nr_segments,
+ struct kexec_segment __user *segments,
+ unsigned long flags);
+
+/* kernel/module.c */
+asmlinkage long sys_init_module(void __user *umod, unsigned long len,
+ const char __user *uargs);
+asmlinkage long sys_delete_module(const char __user *name_user,
+ unsigned int flags);
+
+/* kernel/posix-timers.c */
asmlinkage long sys_timer_create(clockid_t which_clock,
struct sigevent __user *timer_event_spec,
timer_t __user * created_timer_id);
@@ -312,29 +544,27 @@ asmlinkage long sys_clock_settime(clockid_t which_clock,
const struct timespec __user *tp);
asmlinkage long sys_clock_gettime(clockid_t which_clock,
struct timespec __user *tp);
-asmlinkage long sys_clock_adjtime(clockid_t which_clock,
- struct timex __user *tx);
asmlinkage long sys_clock_getres(clockid_t which_clock,
struct timespec __user *tp);
asmlinkage long sys_clock_nanosleep(clockid_t which_clock, int flags,
const struct timespec __user *rqtp,
struct timespec __user *rmtp);
-asmlinkage long sys_nice(int increment);
-asmlinkage long sys_sched_setscheduler(pid_t pid, int policy,
- struct sched_param __user *param);
+/* kernel/printk.c */
+asmlinkage long sys_syslog(int type, char __user *buf, int len);
+
+/* kernel/ptrace.c */
+asmlinkage long sys_ptrace(long request, long pid, unsigned long addr,
+ unsigned long data);
+/* kernel/sched/core.c */
+
asmlinkage long sys_sched_setparam(pid_t pid,
struct sched_param __user *param);
-asmlinkage long sys_sched_setattr(pid_t pid,
- struct sched_attr __user *attr,
- unsigned int flags);
+asmlinkage long sys_sched_setscheduler(pid_t pid, int policy,
+ struct sched_param __user *param);
asmlinkage long sys_sched_getscheduler(pid_t pid);
asmlinkage long sys_sched_getparam(pid_t pid,
struct sched_param __user *param);
-asmlinkage long sys_sched_getattr(pid_t pid,
- struct sched_attr __user *attr,
- unsigned int size,
- unsigned int flags);
asmlinkage long sys_sched_setaffinity(pid_t pid, unsigned int len,
unsigned long __user *user_mask_ptr);
asmlinkage long sys_sched_getaffinity(pid_t pid, unsigned int len,
@@ -344,54 +574,15 @@ asmlinkage long sys_sched_get_priority_max(int policy);
asmlinkage long sys_sched_get_priority_min(int policy);
asmlinkage long sys_sched_rr_get_interval(pid_t pid,
struct timespec __user *interval);
-asmlinkage long sys_setpriority(int which, int who, int niceval);
-asmlinkage long sys_getpriority(int which, int who);
-asmlinkage long sys_shutdown(int, int);
-asmlinkage long sys_reboot(int magic1, int magic2, unsigned int cmd,
- void __user *arg);
+/* kernel/signal.c */
asmlinkage long sys_restart_syscall(void);
-asmlinkage long sys_kexec_load(unsigned long entry, unsigned long nr_segments,
- struct kexec_segment __user *segments,
- unsigned long flags);
-asmlinkage long sys_kexec_file_load(int kernel_fd, int initrd_fd,
- unsigned long cmdline_len,
- const char __user *cmdline_ptr,
- unsigned long flags);
-
-asmlinkage long sys_exit(int error_code);
-asmlinkage long sys_exit_group(int error_code);
-asmlinkage long sys_wait4(pid_t pid, int __user *stat_addr,
- int options, struct rusage __user *ru);
-asmlinkage long sys_waitid(int which, pid_t pid,
- struct siginfo __user *infop,
- int options, struct rusage __user *ru);
-asmlinkage long sys_waitpid(pid_t pid, int __user *stat_addr, int options);
-asmlinkage long sys_set_tid_address(int __user *tidptr);
-asmlinkage long sys_futex(u32 __user *uaddr, int op, u32 val,
- struct timespec __user *utime, u32 __user *uaddr2,
- u32 val3);
-
-asmlinkage long sys_init_module(void __user *umod, unsigned long len,
- const char __user *uargs);
-asmlinkage long sys_delete_module(const char __user *name_user,
- unsigned int flags);
-
-#ifdef CONFIG_OLD_SIGSUSPEND
-asmlinkage long sys_sigsuspend(old_sigset_t mask);
-#endif
-
-#ifdef CONFIG_OLD_SIGSUSPEND3
-asmlinkage long sys_sigsuspend(int unused1, int unused2, old_sigset_t mask);
-#endif
-
+asmlinkage long sys_kill(pid_t pid, int sig);
+asmlinkage long sys_tkill(pid_t pid, int sig);
+asmlinkage long sys_tgkill(pid_t tgid, pid_t pid, int sig);
+asmlinkage long sys_sigaltstack(const struct sigaltstack __user *uss,
+ struct sigaltstack __user *uoss);
asmlinkage long sys_rt_sigsuspend(sigset_t __user *unewset, size_t sigsetsize);
-
-#ifdef CONFIG_OLD_SIGACTION
-asmlinkage long sys_sigaction(int, const struct old_sigaction __user *,
- struct old_sigaction __user *);
-#endif
-
#ifndef CONFIG_ODD_RT_SIGACTION
asmlinkage long sys_rt_sigaction(int,
const struct sigaction __user *,
@@ -405,470 +596,136 @@ asmlinkage long sys_rt_sigtimedwait(const sigset_t __user *uthese,
siginfo_t __user *uinfo,
const struct timespec __user *uts,
size_t sigsetsize);
-asmlinkage long sys_rt_tgsigqueueinfo(pid_t tgid, pid_t pid, int sig,
- siginfo_t __user *uinfo);
-asmlinkage long sys_kill(pid_t pid, int sig);
-asmlinkage long sys_tgkill(pid_t tgid, pid_t pid, int sig);
-asmlinkage long sys_tkill(pid_t pid, int sig);
asmlinkage long sys_rt_sigqueueinfo(pid_t pid, int sig, siginfo_t __user *uinfo);
-asmlinkage long sys_sgetmask(void);
-asmlinkage long sys_ssetmask(int newmask);
-asmlinkage long sys_signal(int sig, __sighandler_t handler);
-asmlinkage long sys_pause(void);
-
-asmlinkage long sys_sync(void);
-asmlinkage long sys_fsync(unsigned int fd);
-asmlinkage long sys_fdatasync(unsigned int fd);
-asmlinkage long sys_bdflush(int func, long data);
-asmlinkage long sys_mount(char __user *dev_name, char __user *dir_name,
- char __user *type, unsigned long flags,
- void __user *data);
-asmlinkage long sys_umount(char __user *name, int flags);
-asmlinkage long sys_oldumount(char __user *name);
-asmlinkage long sys_truncate(const char __user *path, long length);
-asmlinkage long sys_ftruncate(unsigned int fd, unsigned long length);
-asmlinkage long sys_stat(const char __user *filename,
- struct __old_kernel_stat __user *statbuf);
-asmlinkage long sys_statfs(const char __user * path,
- struct statfs __user *buf);
-asmlinkage long sys_statfs64(const char __user *path, size_t sz,
- struct statfs64 __user *buf);
-asmlinkage long sys_fstatfs(unsigned int fd, struct statfs __user *buf);
-asmlinkage long sys_fstatfs64(unsigned int fd, size_t sz,
- struct statfs64 __user *buf);
-asmlinkage long sys_lstat(const char __user *filename,
- struct __old_kernel_stat __user *statbuf);
-asmlinkage long sys_fstat(unsigned int fd,
- struct __old_kernel_stat __user *statbuf);
-asmlinkage long sys_newstat(const char __user *filename,
- struct stat __user *statbuf);
-asmlinkage long sys_newlstat(const char __user *filename,
- struct stat __user *statbuf);
-asmlinkage long sys_newfstat(unsigned int fd, struct stat __user *statbuf);
-asmlinkage long sys_ustat(unsigned dev, struct ustat __user *ubuf);
-#if defined(__ARCH_WANT_STAT64) || defined(__ARCH_WANT_COMPAT_STAT64)
-asmlinkage long sys_stat64(const char __user *filename,
- struct stat64 __user *statbuf);
-asmlinkage long sys_fstat64(unsigned long fd, struct stat64 __user *statbuf);
-asmlinkage long sys_lstat64(const char __user *filename,
- struct stat64 __user *statbuf);
-asmlinkage long sys_fstatat64(int dfd, const char __user *filename,
- struct stat64 __user *statbuf, int flag);
-#endif
-#if BITS_PER_LONG == 32
-asmlinkage long sys_truncate64(const char __user *path, loff_t length);
-asmlinkage long sys_ftruncate64(unsigned int fd, loff_t length);
-#endif
-
-asmlinkage long sys_setxattr(const char __user *path, const char __user *name,
- const void __user *value, size_t size, int flags);
-asmlinkage long sys_lsetxattr(const char __user *path, const char __user *name,
- const void __user *value, size_t size, int flags);
-asmlinkage long sys_fsetxattr(int fd, const char __user *name,
- const void __user *value, size_t size, int flags);
-asmlinkage long sys_getxattr(const char __user *path, const char __user *name,
- void __user *value, size_t size);
-asmlinkage long sys_lgetxattr(const char __user *path, const char __user *name,
- void __user *value, size_t size);
-asmlinkage long sys_fgetxattr(int fd, const char __user *name,
- void __user *value, size_t size);
-asmlinkage long sys_listxattr(const char __user *path, char __user *list,
- size_t size);
-asmlinkage long sys_llistxattr(const char __user *path, char __user *list,
- size_t size);
-asmlinkage long sys_flistxattr(int fd, char __user *list, size_t size);
-asmlinkage long sys_removexattr(const char __user *path,
- const char __user *name);
-asmlinkage long sys_lremovexattr(const char __user *path,
- const char __user *name);
-asmlinkage long sys_fremovexattr(int fd, const char __user *name);
-
-asmlinkage long sys_brk(unsigned long brk);
-asmlinkage long sys_mprotect(unsigned long start, size_t len,
- unsigned long prot);
-asmlinkage long sys_mremap(unsigned long addr,
- unsigned long old_len, unsigned long new_len,
- unsigned long flags, unsigned long new_addr);
-asmlinkage long sys_remap_file_pages(unsigned long start, unsigned long size,
- unsigned long prot, unsigned long pgoff,
- unsigned long flags);
-asmlinkage long sys_msync(unsigned long start, size_t len, int flags);
-asmlinkage long sys_fadvise64(int fd, loff_t offset, size_t len, int advice);
-asmlinkage long sys_fadvise64_64(int fd, loff_t offset, loff_t len, int advice);
-asmlinkage long sys_munmap(unsigned long addr, size_t len);
-asmlinkage long sys_mlock(unsigned long start, size_t len);
-asmlinkage long sys_munlock(unsigned long start, size_t len);
-asmlinkage long sys_mlockall(int flags);
-asmlinkage long sys_munlockall(void);
-asmlinkage long sys_madvise(unsigned long start, size_t len, int behavior);
-asmlinkage long sys_mincore(unsigned long start, size_t len,
- unsigned char __user * vec);
-
-asmlinkage long sys_pivot_root(const char __user *new_root,
- const char __user *put_old);
-asmlinkage long sys_chroot(const char __user *filename);
-asmlinkage long sys_mknod(const char __user *filename, umode_t mode,
- unsigned dev);
-asmlinkage long sys_link(const char __user *oldname,
- const char __user *newname);
-asmlinkage long sys_symlink(const char __user *old, const char __user *new);
-asmlinkage long sys_unlink(const char __user *pathname);
-asmlinkage long sys_rename(const char __user *oldname,
- const char __user *newname);
-asmlinkage long sys_chmod(const char __user *filename, umode_t mode);
-asmlinkage long sys_fchmod(unsigned int fd, umode_t mode);
-
-asmlinkage long sys_fcntl(unsigned int fd, unsigned int cmd, unsigned long arg);
-#if BITS_PER_LONG == 32
-asmlinkage long sys_fcntl64(unsigned int fd,
- unsigned int cmd, unsigned long arg);
-#endif
-asmlinkage long sys_pipe(int __user *fildes);
-asmlinkage long sys_pipe2(int __user *fildes, int flags);
-asmlinkage long sys_dup(unsigned int fildes);
-asmlinkage long sys_dup2(unsigned int oldfd, unsigned int newfd);
-asmlinkage long sys_dup3(unsigned int oldfd, unsigned int newfd, int flags);
-asmlinkage long sys_ioperm(unsigned long from, unsigned long num, int on);
-asmlinkage long sys_ioctl(unsigned int fd, unsigned int cmd,
- unsigned long arg);
-asmlinkage long sys_flock(unsigned int fd, unsigned int cmd);
-asmlinkage long sys_io_setup(unsigned nr_reqs, aio_context_t __user *ctx);
-asmlinkage long sys_io_destroy(aio_context_t ctx);
-asmlinkage long sys_io_getevents(aio_context_t ctx_id,
- long min_nr,
- long nr,
- struct io_event __user *events,
- struct timespec __user *timeout);
-asmlinkage long sys_io_submit(aio_context_t, long,
- struct iocb __user * __user *);
-asmlinkage long sys_io_cancel(aio_context_t ctx_id, struct iocb __user *iocb,
- struct io_event __user *result);
-asmlinkage long sys_sendfile(int out_fd, int in_fd,
- off_t __user *offset, size_t count);
-asmlinkage long sys_sendfile64(int out_fd, int in_fd,
- loff_t __user *offset, size_t count);
-asmlinkage long sys_readlink(const char __user *path,
- char __user *buf, int bufsiz);
-asmlinkage long sys_creat(const char __user *pathname, umode_t mode);
-asmlinkage long sys_open(const char __user *filename,
- int flags, umode_t mode);
-asmlinkage long sys_close(unsigned int fd);
-asmlinkage long sys_access(const char __user *filename, int mode);
-asmlinkage long sys_vhangup(void);
-asmlinkage long sys_chown(const char __user *filename,
- uid_t user, gid_t group);
-asmlinkage long sys_lchown(const char __user *filename,
- uid_t user, gid_t group);
-asmlinkage long sys_fchown(unsigned int fd, uid_t user, gid_t group);
-#ifdef CONFIG_HAVE_UID16
-asmlinkage long sys_chown16(const char __user *filename,
- old_uid_t user, old_gid_t group);
-asmlinkage long sys_lchown16(const char __user *filename,
- old_uid_t user, old_gid_t group);
-asmlinkage long sys_fchown16(unsigned int fd, old_uid_t user, old_gid_t group);
-asmlinkage long sys_setregid16(old_gid_t rgid, old_gid_t egid);
-asmlinkage long sys_setgid16(old_gid_t gid);
-asmlinkage long sys_setreuid16(old_uid_t ruid, old_uid_t euid);
-asmlinkage long sys_setuid16(old_uid_t uid);
-asmlinkage long sys_setresuid16(old_uid_t ruid, old_uid_t euid, old_uid_t suid);
-asmlinkage long sys_getresuid16(old_uid_t __user *ruid,
- old_uid_t __user *euid, old_uid_t __user *suid);
-asmlinkage long sys_setresgid16(old_gid_t rgid, old_gid_t egid, old_gid_t sgid);
-asmlinkage long sys_getresgid16(old_gid_t __user *rgid,
- old_gid_t __user *egid, old_gid_t __user *sgid);
-asmlinkage long sys_setfsuid16(old_uid_t uid);
-asmlinkage long sys_setfsgid16(old_gid_t gid);
-asmlinkage long sys_getgroups16(int gidsetsize, old_gid_t __user *grouplist);
-asmlinkage long sys_setgroups16(int gidsetsize, old_gid_t __user *grouplist);
-asmlinkage long sys_getuid16(void);
-asmlinkage long sys_geteuid16(void);
-asmlinkage long sys_getgid16(void);
-asmlinkage long sys_getegid16(void);
-#endif
-asmlinkage long sys_utime(char __user *filename,
- struct utimbuf __user *times);
-asmlinkage long sys_utimes(char __user *filename,
- struct timeval __user *utimes);
-asmlinkage long sys_lseek(unsigned int fd, off_t offset,
- unsigned int whence);
-asmlinkage long sys_llseek(unsigned int fd, unsigned long offset_high,
- unsigned long offset_low, loff_t __user *result,
- unsigned int whence);
-asmlinkage long sys_read(unsigned int fd, char __user *buf, size_t count);
-asmlinkage long sys_readahead(int fd, loff_t offset, size_t count);
-asmlinkage long sys_readv(unsigned long fd,
- const struct iovec __user *vec,
- unsigned long vlen);
-asmlinkage long sys_write(unsigned int fd, const char __user *buf,
- size_t count);
-asmlinkage long sys_writev(unsigned long fd,
- const struct iovec __user *vec,
- unsigned long vlen);
-asmlinkage long sys_pread64(unsigned int fd, char __user *buf,
- size_t count, loff_t pos);
-asmlinkage long sys_pwrite64(unsigned int fd, const char __user *buf,
- size_t count, loff_t pos);
-asmlinkage long sys_preadv(unsigned long fd, const struct iovec __user *vec,
- unsigned long vlen, unsigned long pos_l, unsigned long pos_h);
-asmlinkage long sys_preadv2(unsigned long fd, const struct iovec __user *vec,
- unsigned long vlen, unsigned long pos_l, unsigned long pos_h,
- rwf_t flags);
-asmlinkage long sys_pwritev(unsigned long fd, const struct iovec __user *vec,
- unsigned long vlen, unsigned long pos_l, unsigned long pos_h);
-asmlinkage long sys_pwritev2(unsigned long fd, const struct iovec __user *vec,
- unsigned long vlen, unsigned long pos_l, unsigned long pos_h,
- rwf_t flags);
-asmlinkage long sys_getcwd(char __user *buf, unsigned long size);
-asmlinkage long sys_mkdir(const char __user *pathname, umode_t mode);
-asmlinkage long sys_chdir(const char __user *filename);
-asmlinkage long sys_fchdir(unsigned int fd);
-asmlinkage long sys_rmdir(const char __user *pathname);
-asmlinkage long sys_lookup_dcookie(u64 cookie64, char __user *buf, size_t len);
-asmlinkage long sys_quotactl(unsigned int cmd, const char __user *special,
- qid_t id, void __user *addr);
-asmlinkage long sys_getdents(unsigned int fd,
- struct linux_dirent __user *dirent,
- unsigned int count);
-asmlinkage long sys_getdents64(unsigned int fd,
- struct linux_dirent64 __user *dirent,
- unsigned int count);
-
-asmlinkage long sys_setsockopt(int fd, int level, int optname,
- char __user *optval, int optlen);
-asmlinkage long sys_getsockopt(int fd, int level, int optname,
- char __user *optval, int __user *optlen);
-asmlinkage long sys_bind(int, struct sockaddr __user *, int);
-asmlinkage long sys_connect(int, struct sockaddr __user *, int);
-asmlinkage long sys_accept(int, struct sockaddr __user *, int __user *);
-asmlinkage long sys_accept4(int, struct sockaddr __user *, int __user *, int);
-asmlinkage long sys_getsockname(int, struct sockaddr __user *, int __user *);
-asmlinkage long sys_getpeername(int, struct sockaddr __user *, int __user *);
-asmlinkage long sys_send(int, void __user *, size_t, unsigned);
-asmlinkage long sys_sendto(int, void __user *, size_t, unsigned,
- struct sockaddr __user *, int);
-asmlinkage long sys_sendmsg(int fd, struct user_msghdr __user *msg, unsigned flags);
-asmlinkage long sys_sendmmsg(int fd, struct mmsghdr __user *msg,
- unsigned int vlen, unsigned flags);
-asmlinkage long sys_recv(int, void __user *, size_t, unsigned);
-asmlinkage long sys_recvfrom(int, void __user *, size_t, unsigned,
- struct sockaddr __user *, int __user *);
-asmlinkage long sys_recvmsg(int fd, struct user_msghdr __user *msg, unsigned flags);
-asmlinkage long sys_recvmmsg(int fd, struct mmsghdr __user *msg,
- unsigned int vlen, unsigned flags,
- struct timespec __user *timeout);
-asmlinkage long sys_socket(int, int, int);
-asmlinkage long sys_socketpair(int, int, int, int __user *);
-asmlinkage long sys_socketcall(int call, unsigned long __user *args);
-asmlinkage long sys_listen(int, int);
-asmlinkage long sys_poll(struct pollfd __user *ufds, unsigned int nfds,
- int timeout);
-asmlinkage long sys_select(int n, fd_set __user *inp, fd_set __user *outp,
- fd_set __user *exp, struct timeval __user *tvp);
-asmlinkage long sys_old_select(struct sel_arg_struct __user *arg);
-asmlinkage long sys_epoll_create(int size);
-asmlinkage long sys_epoll_create1(int flags);
-asmlinkage long sys_epoll_ctl(int epfd, int op, int fd,
- struct epoll_event __user *event);
-asmlinkage long sys_epoll_wait(int epfd, struct epoll_event __user *events,
- int maxevents, int timeout);
-asmlinkage long sys_epoll_pwait(int epfd, struct epoll_event __user *events,
- int maxevents, int timeout,
- const sigset_t __user *sigmask,
- size_t sigsetsize);
-asmlinkage long sys_gethostname(char __user *name, int len);
+/* kernel/sys.c */
+asmlinkage long sys_setpriority(int which, int who, int niceval);
+asmlinkage long sys_getpriority(int which, int who);
+asmlinkage long sys_reboot(int magic1, int magic2, unsigned int cmd,
+ void __user *arg);
+asmlinkage long sys_setregid(gid_t rgid, gid_t egid);
+asmlinkage long sys_setgid(gid_t gid);
+asmlinkage long sys_setreuid(uid_t ruid, uid_t euid);
+asmlinkage long sys_setuid(uid_t uid);
+asmlinkage long sys_setresuid(uid_t ruid, uid_t euid, uid_t suid);
+asmlinkage long sys_getresuid(uid_t __user *ruid, uid_t __user *euid, uid_t __user *suid);
+asmlinkage long sys_setresgid(gid_t rgid, gid_t egid, gid_t sgid);
+asmlinkage long sys_getresgid(gid_t __user *rgid, gid_t __user *egid, gid_t __user *sgid);
+asmlinkage long sys_setfsuid(uid_t uid);
+asmlinkage long sys_setfsgid(gid_t gid);
+asmlinkage long sys_times(struct tms __user *tbuf);
+asmlinkage long sys_setpgid(pid_t pid, pid_t pgid);
+asmlinkage long sys_getpgid(pid_t pid);
+asmlinkage long sys_getsid(pid_t pid);
+asmlinkage long sys_setsid(void);
+asmlinkage long sys_getgroups(int gidsetsize, gid_t __user *grouplist);
+asmlinkage long sys_setgroups(int gidsetsize, gid_t __user *grouplist);
+asmlinkage long sys_newuname(struct new_utsname __user *name);
asmlinkage long sys_sethostname(char __user *name, int len);
asmlinkage long sys_setdomainname(char __user *name, int len);
-asmlinkage long sys_newuname(struct new_utsname __user *name);
-asmlinkage long sys_uname(struct old_utsname __user *);
-asmlinkage long sys_olduname(struct oldold_utsname __user *);
-
asmlinkage long sys_getrlimit(unsigned int resource,
struct rlimit __user *rlim);
-#ifdef __ARCH_WANT_SYS_OLD_GETRLIMIT
-asmlinkage long sys_old_getrlimit(unsigned int resource, struct rlimit __user *rlim);
-#endif
asmlinkage long sys_setrlimit(unsigned int resource,
struct rlimit __user *rlim);
-asmlinkage long sys_prlimit64(pid_t pid, unsigned int resource,
- const struct rlimit64 __user *new_rlim,
- struct rlimit64 __user *old_rlim);
asmlinkage long sys_getrusage(int who, struct rusage __user *ru);
asmlinkage long sys_umask(int mask);
+asmlinkage long sys_prctl(int option, unsigned long arg2, unsigned long arg3,
+ unsigned long arg4, unsigned long arg5);
+asmlinkage long sys_getcpu(unsigned __user *cpu, unsigned __user *node, struct getcpu_cache __user *cache);
+/* kernel/time.c */
+asmlinkage long sys_gettimeofday(struct timeval __user *tv,
+ struct timezone __user *tz);
+asmlinkage long sys_settimeofday(struct timeval __user *tv,
+ struct timezone __user *tz);
+asmlinkage long sys_adjtimex(struct timex __user *txc_p);
+
+/* kernel/timer.c */
+asmlinkage long sys_getpid(void);
+asmlinkage long sys_getppid(void);
+asmlinkage long sys_getuid(void);
+asmlinkage long sys_geteuid(void);
+asmlinkage long sys_getgid(void);
+asmlinkage long sys_getegid(void);
+asmlinkage long sys_gettid(void);
+asmlinkage long sys_sysinfo(struct sysinfo __user *info);
+
+/* ipc/mqueue.c */
+asmlinkage long sys_mq_open(const char __user *name, int oflag, umode_t mode, struct mq_attr __user *attr);
+asmlinkage long sys_mq_unlink(const char __user *name);
+asmlinkage long sys_mq_timedsend(mqd_t mqdes, const char __user *msg_ptr, size_t msg_len, unsigned int msg_prio, const struct timespec __user *abs_timeout);
+asmlinkage long sys_mq_timedreceive(mqd_t mqdes, char __user *msg_ptr, size_t msg_len, unsigned int __user *msg_prio, const struct timespec __user *abs_timeout);
+asmlinkage long sys_mq_notify(mqd_t mqdes, const struct sigevent __user *notification);
+asmlinkage long sys_mq_getsetattr(mqd_t mqdes, const struct mq_attr __user *mqstat, struct mq_attr __user *omqstat);
+
+/* ipc/msg.c */
asmlinkage long sys_msgget(key_t key, int msgflg);
-asmlinkage long sys_msgsnd(int msqid, struct msgbuf __user *msgp,
- size_t msgsz, int msgflg);
+asmlinkage long sys_msgctl(int msqid, int cmd, struct msqid_ds __user *buf);
asmlinkage long sys_msgrcv(int msqid, struct msgbuf __user *msgp,
size_t msgsz, long msgtyp, int msgflg);
-asmlinkage long sys_msgctl(int msqid, int cmd, struct msqid_ds __user *buf);
+asmlinkage long sys_msgsnd(int msqid, struct msgbuf __user *msgp,
+ size_t msgsz, int msgflg);
+/* ipc/sem.c */
asmlinkage long sys_semget(key_t key, int nsems, int semflg);
-asmlinkage long sys_semop(int semid, struct sembuf __user *sops,
- unsigned nsops);
asmlinkage long sys_semctl(int semid, int semnum, int cmd, unsigned long arg);
asmlinkage long sys_semtimedop(int semid, struct sembuf __user *sops,
unsigned nsops,
const struct timespec __user *timeout);
-asmlinkage long sys_shmat(int shmid, char __user *shmaddr, int shmflg);
+asmlinkage long sys_semop(int semid, struct sembuf __user *sops,
+ unsigned nsops);
+
+/* ipc/shm.c */
asmlinkage long sys_shmget(key_t key, size_t size, int flag);
-asmlinkage long sys_shmdt(char __user *shmaddr);
asmlinkage long sys_shmctl(int shmid, int cmd, struct shmid_ds __user *buf);
-asmlinkage long sys_ipc(unsigned int call, int first, unsigned long second,
- unsigned long third, void __user *ptr, long fifth);
+asmlinkage long sys_shmat(int shmid, char __user *shmaddr, int shmflg);
+asmlinkage long sys_shmdt(char __user *shmaddr);
-asmlinkage long sys_mq_open(const char __user *name, int oflag, umode_t mode, struct mq_attr __user *attr);
-asmlinkage long sys_mq_unlink(const char __user *name);
-asmlinkage long sys_mq_timedsend(mqd_t mqdes, const char __user *msg_ptr, size_t msg_len, unsigned int msg_prio, const struct timespec __user *abs_timeout);
-asmlinkage long sys_mq_timedreceive(mqd_t mqdes, char __user *msg_ptr, size_t msg_len, unsigned int __user *msg_prio, const struct timespec __user *abs_timeout);
-asmlinkage long sys_mq_notify(mqd_t mqdes, const struct sigevent __user *notification);
-asmlinkage long sys_mq_getsetattr(mqd_t mqdes, const struct mq_attr __user *mqstat, struct mq_attr __user *omqstat);
+/* net/socket.c */
+asmlinkage long sys_socket(int, int, int);
+asmlinkage long sys_socketpair(int, int, int, int __user *);
+asmlinkage long sys_bind(int, struct sockaddr __user *, int);
+asmlinkage long sys_listen(int, int);
+asmlinkage long sys_accept(int, struct sockaddr __user *, int __user *);
+asmlinkage long sys_connect(int, struct sockaddr __user *, int);
+asmlinkage long sys_getsockname(int, struct sockaddr __user *, int __user *);
+asmlinkage long sys_getpeername(int, struct sockaddr __user *, int __user *);
+asmlinkage long sys_sendto(int, void __user *, size_t, unsigned,
+ struct sockaddr __user *, int);
+asmlinkage long sys_recvfrom(int, void __user *, size_t, unsigned,
+ struct sockaddr __user *, int __user *);
+asmlinkage long sys_setsockopt(int fd, int level, int optname,
+ char __user *optval, int optlen);
+asmlinkage long sys_getsockopt(int fd, int level, int optname,
+ char __user *optval, int __user *optlen);
+asmlinkage long sys_shutdown(int, int);
+asmlinkage long sys_sendmsg(int fd, struct user_msghdr __user *msg, unsigned flags);
+asmlinkage long sys_recvmsg(int fd, struct user_msghdr __user *msg, unsigned flags);
-asmlinkage long sys_pciconfig_iobase(long which, unsigned long bus, unsigned long devfn);
-asmlinkage long sys_pciconfig_read(unsigned long bus, unsigned long dfn,
- unsigned long off, unsigned long len,
- void __user *buf);
-asmlinkage long sys_pciconfig_write(unsigned long bus, unsigned long dfn,
- unsigned long off, unsigned long len,
- void __user *buf);
+/* mm/filemap.c */
+asmlinkage long sys_readahead(int fd, loff_t offset, size_t count);
-asmlinkage long sys_prctl(int option, unsigned long arg2, unsigned long arg3,
- unsigned long arg4, unsigned long arg5);
-asmlinkage long sys_swapon(const char __user *specialfile, int swap_flags);
-asmlinkage long sys_swapoff(const char __user *specialfile);
-asmlinkage long sys_sysctl(struct __sysctl_args __user *args);
-asmlinkage long sys_sysinfo(struct sysinfo __user *info);
-asmlinkage long sys_sysfs(int option,
- unsigned long arg1, unsigned long arg2);
-asmlinkage long sys_syslog(int type, char __user *buf, int len);
-asmlinkage long sys_uselib(const char __user *library);
-asmlinkage long sys_ni_syscall(void);
-asmlinkage long sys_ptrace(long request, long pid, unsigned long addr,
- unsigned long data);
+/* mm/nommu.c, also with MMU */
+asmlinkage long sys_brk(unsigned long brk);
+asmlinkage long sys_munmap(unsigned long addr, size_t len);
+asmlinkage long sys_mremap(unsigned long addr,
+ unsigned long old_len, unsigned long new_len,
+ unsigned long flags, unsigned long new_addr);
+/* security/keys/keyctl.c */
asmlinkage long sys_add_key(const char __user *_type,
const char __user *_description,
const void __user *_payload,
size_t plen,
key_serial_t destringid);
-
asmlinkage long sys_request_key(const char __user *_type,
const char __user *_description,
const char __user *_callout_info,
key_serial_t destringid);
-
asmlinkage long sys_keyctl(int cmd, unsigned long arg2, unsigned long arg3,
unsigned long arg4, unsigned long arg5);
-asmlinkage long sys_ioprio_set(int which, int who, int ioprio);
-asmlinkage long sys_ioprio_get(int which, int who);
-asmlinkage long sys_set_mempolicy(int mode, const unsigned long __user *nmask,
- unsigned long maxnode);
-asmlinkage long sys_migrate_pages(pid_t pid, unsigned long maxnode,
- const unsigned long __user *from,
- const unsigned long __user *to);
-asmlinkage long sys_move_pages(pid_t pid, unsigned long nr_pages,
- const void __user * __user *pages,
- const int __user *nodes,
- int __user *status,
- int flags);
-asmlinkage long sys_mbind(unsigned long start, unsigned long len,
- unsigned long mode,
- const unsigned long __user *nmask,
- unsigned long maxnode,
- unsigned flags);
-asmlinkage long sys_get_mempolicy(int __user *policy,
- unsigned long __user *nmask,
- unsigned long maxnode,
- unsigned long addr, unsigned long flags);
-
-asmlinkage long sys_inotify_init(void);
-asmlinkage long sys_inotify_init1(int flags);
-asmlinkage long sys_inotify_add_watch(int fd, const char __user *path,
- u32 mask);
-asmlinkage long sys_inotify_rm_watch(int fd, __s32 wd);
-
-asmlinkage long sys_spu_run(int fd, __u32 __user *unpc,
- __u32 __user *ustatus);
-asmlinkage long sys_spu_create(const char __user *name,
- unsigned int flags, umode_t mode, int fd);
-
-asmlinkage long sys_mknodat(int dfd, const char __user * filename, umode_t mode,
- unsigned dev);
-asmlinkage long sys_mkdirat(int dfd, const char __user * pathname, umode_t mode);
-asmlinkage long sys_unlinkat(int dfd, const char __user * pathname, int flag);
-asmlinkage long sys_symlinkat(const char __user * oldname,
- int newdfd, const char __user * newname);
-asmlinkage long sys_linkat(int olddfd, const char __user *oldname,
- int newdfd, const char __user *newname, int flags);
-asmlinkage long sys_renameat(int olddfd, const char __user * oldname,
- int newdfd, const char __user * newname);
-asmlinkage long sys_renameat2(int olddfd, const char __user *oldname,
- int newdfd, const char __user *newname,
- unsigned int flags);
-asmlinkage long sys_futimesat(int dfd, const char __user *filename,
- struct timeval __user *utimes);
-asmlinkage long sys_faccessat(int dfd, const char __user *filename, int mode);
-asmlinkage long sys_fchmodat(int dfd, const char __user * filename,
- umode_t mode);
-asmlinkage long sys_fchownat(int dfd, const char __user *filename, uid_t user,
- gid_t group, int flag);
-asmlinkage long sys_openat(int dfd, const char __user *filename, int flags,
- umode_t mode);
-asmlinkage long sys_newfstatat(int dfd, const char __user *filename,
- struct stat __user *statbuf, int flag);
-asmlinkage long sys_readlinkat(int dfd, const char __user *path, char __user *buf,
- int bufsiz);
-asmlinkage long sys_utimensat(int dfd, const char __user *filename,
- struct timespec __user *utimes, int flags);
-asmlinkage long sys_unshare(unsigned long unshare_flags);
-
-asmlinkage long sys_splice(int fd_in, loff_t __user *off_in,
- int fd_out, loff_t __user *off_out,
- size_t len, unsigned int flags);
-
-asmlinkage long sys_vmsplice(int fd, const struct iovec __user *iov,
- unsigned long nr_segs, unsigned int flags);
-
-asmlinkage long sys_tee(int fdin, int fdout, size_t len, unsigned int flags);
-
-asmlinkage long sys_sync_file_range(int fd, loff_t offset, loff_t nbytes,
- unsigned int flags);
-asmlinkage long sys_sync_file_range2(int fd, unsigned int flags,
- loff_t offset, loff_t nbytes);
-asmlinkage long sys_get_robust_list(int pid,
- struct robust_list_head __user * __user *head_ptr,
- size_t __user *len_ptr);
-asmlinkage long sys_set_robust_list(struct robust_list_head __user *head,
- size_t len);
-asmlinkage long sys_getcpu(unsigned __user *cpu, unsigned __user *node, struct getcpu_cache __user *cache);
-asmlinkage long sys_signalfd(int ufd, sigset_t __user *user_mask, size_t sizemask);
-asmlinkage long sys_signalfd4(int ufd, sigset_t __user *user_mask, size_t sizemask, int flags);
-asmlinkage long sys_timerfd_create(int clockid, int flags);
-asmlinkage long sys_timerfd_settime(int ufd, int flags,
- const struct itimerspec __user *utmr,
- struct itimerspec __user *otmr);
-asmlinkage long sys_timerfd_gettime(int ufd, struct itimerspec __user *otmr);
-asmlinkage long sys_eventfd(unsigned int count);
-asmlinkage long sys_eventfd2(unsigned int count, int flags);
-asmlinkage long sys_memfd_create(const char __user *uname_ptr, unsigned int flags);
-asmlinkage long sys_userfaultfd(int flags);
-asmlinkage long sys_fallocate(int fd, int mode, loff_t offset, loff_t len);
-asmlinkage long sys_old_readdir(unsigned int, struct old_linux_dirent __user *, unsigned int);
-asmlinkage long sys_pselect6(int, fd_set __user *, fd_set __user *,
- fd_set __user *, struct timespec __user *,
- void __user *);
-asmlinkage long sys_ppoll(struct pollfd __user *, unsigned int,
- struct timespec __user *, const sigset_t __user *,
- size_t);
-asmlinkage long sys_fanotify_init(unsigned int flags, unsigned int event_f_flags);
-asmlinkage long sys_fanotify_mark(int fanotify_fd, unsigned int flags,
- u64 mask, int fd,
- const char __user *pathname);
-asmlinkage long sys_syncfs(int fd);
-
-asmlinkage long sys_fork(void);
-asmlinkage long sys_vfork(void);
+/* arch/example/kernel/sys_example.c */
#ifdef CONFIG_CLONE_BACKWARDS
asmlinkage long sys_clone(unsigned long, unsigned long, int __user *, unsigned long,
int __user *);
@@ -881,26 +738,80 @@ asmlinkage long sys_clone(unsigned long, unsigned long, int __user *,
int __user *, unsigned long);
#endif
#endif
-
asmlinkage long sys_execve(const char __user *filename,
const char __user *const __user *argv,
const char __user *const __user *envp);
+/* mm/fadvise.c */
+asmlinkage long sys_fadvise64_64(int fd, loff_t offset, loff_t len, int advice);
+
+/* mm/, CONFIG_MMU only */
+asmlinkage long sys_swapon(const char __user *specialfile, int swap_flags);
+asmlinkage long sys_swapoff(const char __user *specialfile);
+asmlinkage long sys_mprotect(unsigned long start, size_t len,
+ unsigned long prot);
+asmlinkage long sys_msync(unsigned long start, size_t len, int flags);
+asmlinkage long sys_mlock(unsigned long start, size_t len);
+asmlinkage long sys_munlock(unsigned long start, size_t len);
+asmlinkage long sys_mlockall(int flags);
+asmlinkage long sys_munlockall(void);
+asmlinkage long sys_mincore(unsigned long start, size_t len,
+ unsigned char __user * vec);
+asmlinkage long sys_madvise(unsigned long start, size_t len, int behavior);
+asmlinkage long sys_remap_file_pages(unsigned long start, unsigned long size,
+ unsigned long prot, unsigned long pgoff,
+ unsigned long flags);
+asmlinkage long sys_mbind(unsigned long start, unsigned long len,
+ unsigned long mode,
+ const unsigned long __user *nmask,
+ unsigned long maxnode,
+ unsigned flags);
+asmlinkage long sys_get_mempolicy(int __user *policy,
+ unsigned long __user *nmask,
+ unsigned long maxnode,
+ unsigned long addr, unsigned long flags);
+asmlinkage long sys_set_mempolicy(int mode, const unsigned long __user *nmask,
+ unsigned long maxnode);
+asmlinkage long sys_migrate_pages(pid_t pid, unsigned long maxnode,
+ const unsigned long __user *from,
+ const unsigned long __user *to);
+asmlinkage long sys_move_pages(pid_t pid, unsigned long nr_pages,
+ const void __user * __user *pages,
+ const int __user *nodes,
+ int __user *status,
+ int flags);
+
+asmlinkage long sys_rt_tgsigqueueinfo(pid_t tgid, pid_t pid, int sig,
+ siginfo_t __user *uinfo);
asmlinkage long sys_perf_event_open(
struct perf_event_attr __user *attr_uptr,
pid_t pid, int cpu, int group_fd, unsigned long flags);
+asmlinkage long sys_accept4(int, struct sockaddr __user *, int __user *, int);
+asmlinkage long sys_recvmmsg(int fd, struct mmsghdr __user *msg,
+ unsigned int vlen, unsigned flags,
+ struct timespec __user *timeout);
-asmlinkage long sys_mmap_pgoff(unsigned long addr, unsigned long len,
- unsigned long prot, unsigned long flags,
- unsigned long fd, unsigned long pgoff);
-asmlinkage long sys_old_mmap(struct mmap_arg_struct __user *arg);
+asmlinkage long sys_wait4(pid_t pid, int __user *stat_addr,
+ int options, struct rusage __user *ru);
+asmlinkage long sys_prlimit64(pid_t pid, unsigned int resource,
+ const struct rlimit64 __user *new_rlim,
+ struct rlimit64 __user *old_rlim);
+asmlinkage long sys_fanotify_init(unsigned int flags, unsigned int event_f_flags);
+asmlinkage long sys_fanotify_mark(int fanotify_fd, unsigned int flags,
+ u64 mask, int fd,
+ const char __user *pathname);
asmlinkage long sys_name_to_handle_at(int dfd, const char __user *name,
struct file_handle __user *handle,
int __user *mnt_id, int flag);
asmlinkage long sys_open_by_handle_at(int mountdirfd,
struct file_handle __user *handle,
int flags);
+asmlinkage long sys_clock_adjtime(clockid_t which_clock,
+ struct timex __user *tx);
+asmlinkage long sys_syncfs(int fd);
asmlinkage long sys_setns(int fd, int nstype);
+asmlinkage long sys_sendmmsg(int fd, struct mmsghdr __user *msg,
+ unsigned int vlen, unsigned flags);
asmlinkage long sys_process_vm_readv(pid_t pid,
const struct iovec __user *lvec,
unsigned long liovcnt,
@@ -913,27 +824,40 @@ asmlinkage long sys_process_vm_writev(pid_t pid,
const struct iovec __user *rvec,
unsigned long riovcnt,
unsigned long flags);
-
asmlinkage long sys_kcmp(pid_t pid1, pid_t pid2, int type,
unsigned long idx1, unsigned long idx2);
asmlinkage long sys_finit_module(int fd, const char __user *uargs, int flags);
+asmlinkage long sys_sched_setattr(pid_t pid,
+ struct sched_attr __user *attr,
+ unsigned int flags);
+asmlinkage long sys_sched_getattr(pid_t pid,
+ struct sched_attr __user *attr,
+ unsigned int size,
+ unsigned int flags);
+asmlinkage long sys_renameat2(int olddfd, const char __user *oldname,
+ int newdfd, const char __user *newname,
+ unsigned int flags);
asmlinkage long sys_seccomp(unsigned int op, unsigned int flags,
const char __user *uargs);
asmlinkage long sys_getrandom(char __user *buf, size_t count,
unsigned int flags);
+asmlinkage long sys_memfd_create(const char __user *uname_ptr, unsigned int flags);
asmlinkage long sys_bpf(int cmd, union bpf_attr *attr, unsigned int size);
-
asmlinkage long sys_execveat(int dfd, const char __user *filename,
const char __user *const __user *argv,
const char __user *const __user *envp, int flags);
-
+asmlinkage long sys_userfaultfd(int flags);
asmlinkage long sys_membarrier(int cmd, int flags);
+asmlinkage long sys_mlock2(unsigned long start, size_t len, int flags);
asmlinkage long sys_copy_file_range(int fd_in, loff_t __user *off_in,
int fd_out, loff_t __user *off_out,
size_t len, unsigned int flags);
-
-asmlinkage long sys_mlock2(unsigned long start, size_t len, int flags);
-
+asmlinkage long sys_preadv2(unsigned long fd, const struct iovec __user *vec,
+ unsigned long vlen, unsigned long pos_l, unsigned long pos_h,
+ rwf_t flags);
+asmlinkage long sys_pwritev2(unsigned long fd, const struct iovec __user *vec,
+ unsigned long vlen, unsigned long pos_l, unsigned long pos_h,
+ rwf_t flags);
asmlinkage long sys_pkey_mprotect(unsigned long start, size_t len,
unsigned long prot, int pkey);
asmlinkage long sys_pkey_alloc(unsigned long flags, unsigned long init_val);
@@ -941,4 +865,379 @@ asmlinkage long sys_pkey_free(int pkey);
asmlinkage long sys_statx(int dfd, const char __user *path, unsigned flags,
unsigned mask, struct statx __user *buffer);
+
+/*
+ * Architecture-specific system calls
+ */
+
+/* arch/x86/kernel/ioport.c */
+asmlinkage long sys_ioperm(unsigned long from, unsigned long num, int on);
+
+/* pciconfig: alpha, arm, arm64, ia64, sparc */
+asmlinkage long sys_pciconfig_read(unsigned long bus, unsigned long dfn,
+ unsigned long off, unsigned long len,
+ void __user *buf);
+asmlinkage long sys_pciconfig_write(unsigned long bus, unsigned long dfn,
+ unsigned long off, unsigned long len,
+ void __user *buf);
+asmlinkage long sys_pciconfig_iobase(long which, unsigned long bus, unsigned long devfn);
+
+/* powerpc */
+asmlinkage long sys_spu_run(int fd, __u32 __user *unpc,
+ __u32 __user *ustatus);
+asmlinkage long sys_spu_create(const char __user *name,
+ unsigned int flags, umode_t mode, int fd);
+
+
+/*
+ * Deprecated system calls which are still defined in
+ * include/uapi/asm-generic/unistd.h and wanted by >= 1 arch
+ */
+
+/* __ARCH_WANT_SYSCALL_NO_AT */
+asmlinkage long sys_open(const char __user *filename,
+ int flags, umode_t mode);
+asmlinkage long sys_link(const char __user *oldname,
+ const char __user *newname);
+asmlinkage long sys_unlink(const char __user *pathname);
+asmlinkage long sys_mknod(const char __user *filename, umode_t mode,
+ unsigned dev);
+asmlinkage long sys_chmod(const char __user *filename, umode_t mode);
+asmlinkage long sys_chown(const char __user *filename,
+ uid_t user, gid_t group);
+asmlinkage long sys_mkdir(const char __user *pathname, umode_t mode);
+asmlinkage long sys_rmdir(const char __user *pathname);
+asmlinkage long sys_lchown(const char __user *filename,
+ uid_t user, gid_t group);
+asmlinkage long sys_access(const char __user *filename, int mode);
+asmlinkage long sys_rename(const char __user *oldname,
+ const char __user *newname);
+asmlinkage long sys_symlink(const char __user *old, const char __user *new);
+asmlinkage long sys_utimes(char __user *filename,
+ struct timeval __user *utimes);
+#if defined(__ARCH_WANT_STAT64) || defined(__ARCH_WANT_COMPAT_STAT64)
+asmlinkage long sys_stat64(const char __user *filename,
+ struct stat64 __user *statbuf);
+asmlinkage long sys_lstat64(const char __user *filename,
+ struct stat64 __user *statbuf);
+#endif
+
+/* __ARCH_WANT_SYSCALL_NO_FLAGS */
+asmlinkage long sys_pipe(int __user *fildes);
+asmlinkage long sys_dup2(unsigned int oldfd, unsigned int newfd);
+asmlinkage long sys_epoll_create(int size);
+asmlinkage long sys_inotify_init(void);
+asmlinkage long sys_eventfd(unsigned int count);
+asmlinkage long sys_signalfd(int ufd, sigset_t __user *user_mask, size_t sizemask);
+
+/* __ARCH_WANT_SYSCALL_OFF_T */
+asmlinkage long sys_sendfile(int out_fd, int in_fd,
+ off_t __user *offset, size_t count);
+asmlinkage long sys_newstat(const char __user *filename,
+ struct stat __user *statbuf);
+asmlinkage long sys_newlstat(const char __user *filename,
+ struct stat __user *statbuf);
+asmlinkage long sys_fadvise64(int fd, loff_t offset, size_t len, int advice);
+
+/* __ARCH_WANT_SYSCALL_DEPRECATED */
+asmlinkage long sys_alarm(unsigned int seconds);
+asmlinkage long sys_getpgrp(void);
+asmlinkage long sys_pause(void);
+asmlinkage long sys_time(time_t __user *tloc);
+asmlinkage long sys_utime(char __user *filename,
+ struct utimbuf __user *times);
+asmlinkage long sys_creat(const char __user *pathname, umode_t mode);
+asmlinkage long sys_getdents(unsigned int fd,
+ struct linux_dirent __user *dirent,
+ unsigned int count);
+asmlinkage long sys_futimesat(int dfd, const char __user *filename,
+ struct timeval __user *utimes);
+asmlinkage long sys_select(int n, fd_set __user *inp, fd_set __user *outp,
+ fd_set __user *exp, struct timeval __user *tvp);
+asmlinkage long sys_poll(struct pollfd __user *ufds, unsigned int nfds,
+ int timeout);
+asmlinkage long sys_epoll_wait(int epfd, struct epoll_event __user *events,
+ int maxevents, int timeout);
+asmlinkage long sys_ustat(unsigned dev, struct ustat __user *ubuf);
+asmlinkage long sys_vfork(void);
+asmlinkage long sys_recv(int, void __user *, size_t, unsigned);
+asmlinkage long sys_send(int, void __user *, size_t, unsigned);
+asmlinkage long sys_bdflush(int func, long data);
+asmlinkage long sys_oldumount(char __user *name);
+asmlinkage long sys_uselib(const char __user *library);
+asmlinkage long sys_sysctl(struct __sysctl_args __user *args);
+asmlinkage long sys_sysfs(int option,
+ unsigned long arg1, unsigned long arg2);
+asmlinkage long sys_fork(void);
+
+/* obsolete: kernel/time/time.c */
+asmlinkage long sys_stime(time_t __user *tptr);
+
+/* obsolete: kernel/signal.c */
+asmlinkage long sys_sigpending(old_sigset_t __user *uset);
+asmlinkage long sys_sigprocmask(int how, old_sigset_t __user *set,
+ old_sigset_t __user *oset);
+#ifdef CONFIG_OLD_SIGSUSPEND
+asmlinkage long sys_sigsuspend(old_sigset_t mask);
+#endif
+
+#ifdef CONFIG_OLD_SIGSUSPEND3
+asmlinkage long sys_sigsuspend(int unused1, int unused2, old_sigset_t mask);
+#endif
+
+#ifdef CONFIG_OLD_SIGACTION
+asmlinkage long sys_sigaction(int, const struct old_sigaction __user *,
+ struct old_sigaction __user *);
+#endif
+asmlinkage long sys_sgetmask(void);
+asmlinkage long sys_ssetmask(int newmask);
+asmlinkage long sys_signal(int sig, __sighandler_t handler);
+
+/* obsolete: kernel/sched/core.c */
+asmlinkage long sys_nice(int increment);
+
+/* obsolete: kernel/kexec_file.c */
+asmlinkage long sys_kexec_file_load(int kernel_fd, int initrd_fd,
+ unsigned long cmdline_len,
+ const char __user *cmdline_ptr,
+ unsigned long flags);
+
+/* obsolete: kernel/exit.c */
+asmlinkage long sys_waitpid(pid_t pid, int __user *stat_addr, int options);
+
+/* obsolete: kernel/uid16.c */
+#ifdef CONFIG_HAVE_UID16
+asmlinkage long sys_chown16(const char __user *filename,
+ old_uid_t user, old_gid_t group);
+asmlinkage long sys_lchown16(const char __user *filename,
+ old_uid_t user, old_gid_t group);
+asmlinkage long sys_fchown16(unsigned int fd, old_uid_t user, old_gid_t group);
+asmlinkage long sys_setregid16(old_gid_t rgid, old_gid_t egid);
+asmlinkage long sys_setgid16(old_gid_t gid);
+asmlinkage long sys_setreuid16(old_uid_t ruid, old_uid_t euid);
+asmlinkage long sys_setuid16(old_uid_t uid);
+asmlinkage long sys_setresuid16(old_uid_t ruid, old_uid_t euid, old_uid_t suid);
+asmlinkage long sys_getresuid16(old_uid_t __user *ruid,
+ old_uid_t __user *euid, old_uid_t __user *suid);
+asmlinkage long sys_setresgid16(old_gid_t rgid, old_gid_t egid, old_gid_t sgid);
+asmlinkage long sys_getresgid16(old_gid_t __user *rgid,
+ old_gid_t __user *egid, old_gid_t __user *sgid);
+asmlinkage long sys_setfsuid16(old_uid_t uid);
+asmlinkage long sys_setfsgid16(old_gid_t gid);
+asmlinkage long sys_getgroups16(int gidsetsize, old_gid_t __user *grouplist);
+asmlinkage long sys_setgroups16(int gidsetsize, old_gid_t __user *grouplist);
+asmlinkage long sys_getuid16(void);
+asmlinkage long sys_geteuid16(void);
+asmlinkage long sys_getgid16(void);
+asmlinkage long sys_getegid16(void);
+#endif
+
+/* obsolete: net/socket.c */
+asmlinkage long sys_socketcall(int call, unsigned long __user *args);
+
+/* obsolete: fs/stat.c */
+asmlinkage long sys_stat(const char __user *filename,
+ struct __old_kernel_stat __user *statbuf);
+asmlinkage long sys_lstat(const char __user *filename,
+ struct __old_kernel_stat __user *statbuf);
+asmlinkage long sys_fstat(unsigned int fd,
+ struct __old_kernel_stat __user *statbuf);
+asmlinkage long sys_readlink(const char __user *path,
+ char __user *buf, int bufsiz);
+
+/* obsolete: fs/select.c */
+asmlinkage long sys_old_select(struct sel_arg_struct __user *arg);
+
+/* obsolete: fs/readdir.c */
+asmlinkage long sys_old_readdir(unsigned int, struct old_linux_dirent __user *, unsigned int);
+
+/* obsolete: kernel/sys.c */
+asmlinkage long sys_gethostname(char __user *name, int len);
+asmlinkage long sys_uname(struct old_utsname __user *);
+asmlinkage long sys_olduname(struct oldold_utsname __user *);
+#ifdef __ARCH_WANT_SYS_OLD_GETRLIMIT
+asmlinkage long sys_old_getrlimit(unsigned int resource, struct rlimit __user *rlim);
+#endif
+
+/* obsolete: ipc */
+asmlinkage long sys_ipc(unsigned int call, int first, unsigned long second,
+ unsigned long third, void __user *ptr, long fifth);
+
+/* obsolete: mm/ */
+asmlinkage long sys_mmap_pgoff(unsigned long addr, unsigned long len,
+ unsigned long prot, unsigned long flags,
+ unsigned long fd, unsigned long pgoff);
+asmlinkage long sys_old_mmap(struct mmap_arg_struct __user *arg);
+
+
+/*
+ * Not a real system call, but a placeholder for syscalls which are
+ * not implemented -- see kernel/sys_ni.c
+ */
+asmlinkage long sys_ni_syscall(void);
+
+
+/*
+ * Kernel code should not call syscalls (i.e., sys_xyzyyz()) directly.
+ * Instead, use one of the functions which work equivalently, such as
+ * the ksys_xyzyyz() functions prototyped below.
+ */
+
+int ksys_mount(char __user *dev_name, char __user *dir_name, char __user *type,
+ unsigned long flags, void __user *data);
+int ksys_umount(char __user *name, int flags);
+int ksys_dup(unsigned int fildes);
+int ksys_chroot(const char __user *filename);
+ssize_t ksys_write(unsigned int fd, const char __user *buf, size_t count);
+int ksys_chdir(const char __user *filename);
+int ksys_fchmod(unsigned int fd, umode_t mode);
+int ksys_fchown(unsigned int fd, uid_t user, gid_t group);
+int ksys_getdents64(unsigned int fd, struct linux_dirent64 __user *dirent,
+ unsigned int count);
+int ksys_ioctl(unsigned int fd, unsigned int cmd, unsigned long arg);
+off_t ksys_lseek(unsigned int fd, off_t offset, unsigned int whence);
+ssize_t ksys_read(unsigned int fd, char __user *buf, size_t count);
+void ksys_sync(void);
+int ksys_unshare(unsigned long unshare_flags);
+int ksys_setsid(void);
+int ksys_sync_file_range(int fd, loff_t offset, loff_t nbytes,
+ unsigned int flags);
+ssize_t ksys_pread64(unsigned int fd, char __user *buf, size_t count,
+ loff_t pos);
+ssize_t ksys_pwrite64(unsigned int fd, const char __user *buf,
+ size_t count, loff_t pos);
+int ksys_fallocate(int fd, int mode, loff_t offset, loff_t len);
+#ifdef CONFIG_ADVISE_SYSCALLS
+int ksys_fadvise64_64(int fd, loff_t offset, loff_t len, int advice);
+#else
+static inline int ksys_fadvise64_64(int fd, loff_t offset, loff_t len,
+ int advice)
+{
+ return -EINVAL;
+}
+#endif
+unsigned long ksys_mmap_pgoff(unsigned long addr, unsigned long len,
+ unsigned long prot, unsigned long flags,
+ unsigned long fd, unsigned long pgoff);
+ssize_t ksys_readahead(int fd, loff_t offset, size_t count);
+
+/*
+ * The following kernel syscall equivalents are just wrappers to fs-internal
+ * functions. Therefore, provide stubs to be inlined at the callsites.
+ */
+extern long do_unlinkat(int dfd, struct filename *name);
+
+static inline long ksys_unlink(const char __user *pathname)
+{
+ return do_unlinkat(AT_FDCWD, getname(pathname));
+}
+
+extern long do_rmdir(int dfd, const char __user *pathname);
+
+static inline long ksys_rmdir(const char __user *pathname)
+{
+ return do_rmdir(AT_FDCWD, pathname);
+}
+
+extern long do_mkdirat(int dfd, const char __user *pathname, umode_t mode);
+
+static inline long ksys_mkdir(const char __user *pathname, umode_t mode)
+{
+ return do_mkdirat(AT_FDCWD, pathname, mode);
+}
+
+extern long do_symlinkat(const char __user *oldname, int newdfd,
+ const char __user *newname);
+
+static inline long ksys_symlink(const char __user *oldname,
+ const char __user *newname)
+{
+ return do_symlinkat(oldname, AT_FDCWD, newname);
+}
+
+extern long do_mknodat(int dfd, const char __user *filename, umode_t mode,
+ unsigned int dev);
+
+static inline long ksys_mknod(const char __user *filename, umode_t mode,
+ unsigned int dev)
+{
+ return do_mknodat(AT_FDCWD, filename, mode, dev);
+}
+
+extern int do_linkat(int olddfd, const char __user *oldname, int newdfd,
+ const char __user *newname, int flags);
+
+static inline long ksys_link(const char __user *oldname,
+ const char __user *newname)
+{
+ return do_linkat(AT_FDCWD, oldname, AT_FDCWD, newname, 0);
+}
+
+extern int do_fchmodat(int dfd, const char __user *filename, umode_t mode);
+
+static inline int ksys_chmod(const char __user *filename, umode_t mode)
+{
+ return do_fchmodat(AT_FDCWD, filename, mode);
+}
+
+extern long do_faccessat(int dfd, const char __user *filename, int mode);
+
+static inline long ksys_access(const char __user *filename, int mode)
+{
+ return do_faccessat(AT_FDCWD, filename, mode);
+}
+
+extern int do_fchownat(int dfd, const char __user *filename, uid_t user,
+ gid_t group, int flag);
+
+static inline long ksys_chown(const char __user *filename, uid_t user,
+ gid_t group)
+{
+ return do_fchownat(AT_FDCWD, filename, user, group, 0);
+}
+
+static inline long ksys_lchown(const char __user *filename, uid_t user,
+ gid_t group)
+{
+ return do_fchownat(AT_FDCWD, filename, user, group,
+ AT_SYMLINK_NOFOLLOW);
+}
+
+extern long do_sys_ftruncate(unsigned int fd, loff_t length, int small);
+
+static inline long ksys_ftruncate(unsigned int fd, unsigned long length)
+{
+ return do_sys_ftruncate(fd, length, 1);
+}
+
+extern int __close_fd(struct files_struct *files, unsigned int fd);
+
+/*
+ * In contrast to sys_close(), this stub does not check whether the syscall
+ * should or should not be restarted, but returns the raw error codes from
+ * __close_fd().
+ */
+static inline int ksys_close(unsigned int fd)
+{
+ return __close_fd(current->files, fd);
+}
+
+extern long do_sys_open(int dfd, const char __user *filename, int flags,
+ umode_t mode);
+
+static inline long ksys_open(const char __user *filename, int flags,
+ umode_t mode)
+{
+ if (force_o_largefile())
+ flags |= O_LARGEFILE;
+ return do_sys_open(AT_FDCWD, filename, flags, mode);
+}
+
+extern long do_sys_truncate(const char __user *pathname, loff_t length);
+
+static inline long ksys_truncate(const char __user *pathname, loff_t length)
+{
+ return do_sys_truncate(pathname, length);
+}
+
#endif
diff --git a/include/linux/sysctl.h b/include/linux/sysctl.h
index 992bc9948232..b769ecfcc3bd 100644
--- a/include/linux/sysctl.h
+++ b/include/linux/sysctl.h
@@ -51,9 +51,6 @@ extern int proc_dointvec_minmax(struct ctl_table *, int,
extern int proc_douintvec_minmax(struct ctl_table *table, int write,
void __user *buffer, size_t *lenp,
loff_t *ppos);
-extern int proc_dopipe_max_size(struct ctl_table *table, int write,
- void __user *buffer, size_t *lenp,
- loff_t *ppos);
extern int proc_dointvec_jiffies(struct ctl_table *, int,
void __user *, size_t *, loff_t *);
extern int proc_dointvec_userhz_jiffies(struct ctl_table *, int,
diff --git a/include/linux/sysfs.h b/include/linux/sysfs.h
index 40839c02d28c..b8bfdc173ec0 100644
--- a/include/linux/sysfs.h
+++ b/include/linux/sysfs.h
@@ -113,7 +113,7 @@ struct attribute_group {
}
#define __ATTR_RO(_name) { \
- .attr = { .name = __stringify(_name), .mode = S_IRUGO }, \
+ .attr = { .name = __stringify(_name), .mode = 0444 }, \
.show = _name##_show, \
}
@@ -124,12 +124,11 @@ struct attribute_group {
}
#define __ATTR_WO(_name) { \
- .attr = { .name = __stringify(_name), .mode = S_IWUSR }, \
+ .attr = { .name = __stringify(_name), .mode = 0200 }, \
.store = _name##_store, \
}
-#define __ATTR_RW(_name) __ATTR(_name, (S_IWUSR | S_IRUGO), \
- _name##_show, _name##_store)
+#define __ATTR_RW(_name) __ATTR(_name, 0644, _name##_show, _name##_store)
#define __ATTR_NULL { .attr = { .name = NULL } }
@@ -192,14 +191,13 @@ struct bin_attribute {
}
#define __BIN_ATTR_RO(_name, _size) { \
- .attr = { .name = __stringify(_name), .mode = S_IRUGO }, \
+ .attr = { .name = __stringify(_name), .mode = 0444 }, \
.read = _name##_read, \
.size = _size, \
}
-#define __BIN_ATTR_RW(_name, _size) __BIN_ATTR(_name, \
- (S_IWUSR | S_IRUGO), _name##_read, \
- _name##_write, _size)
+#define __BIN_ATTR_RW(_name, _size) \
+ __BIN_ATTR(_name, 0644, _name##_read, _name##_write, _size)
#define __BIN_ATTR_NULL __ATTR_NULL
diff --git a/include/linux/tcp.h b/include/linux/tcp.h
index ca4a6361389b..8f4c54986f97 100644
--- a/include/linux/tcp.h
+++ b/include/linux/tcp.h
@@ -335,6 +335,17 @@ struct tcp_sock {
int linger2;
+
+/* Sock_ops bpf program related variables */
+#ifdef CONFIG_BPF
+ u8 bpf_sock_ops_cb_flags; /* Control calling BPF programs
+ * values defined in uapi/linux/tcp.h
+ */
+#define BPF_SOCK_OPS_TEST_FLAG(TP, ARG) (TP->bpf_sock_ops_cb_flags & ARG)
+#else
+#define BPF_SOCK_OPS_TEST_FLAG(TP, ARG) 0
+#endif
+
/* Receiver side RTT estimation */
struct {
u32 rtt_us;
@@ -344,7 +355,7 @@ struct tcp_sock {
/* Receiver queue space */
struct {
- int space;
+ u32 space;
u32 seq;
u64 time;
} rcvq_space;
diff --git a/include/linux/tee_drv.h b/include/linux/tee_drv.h
index cb889afe576b..a2b3dfcee0b5 100644
--- a/include/linux/tee_drv.h
+++ b/include/linux/tee_drv.h
@@ -17,6 +17,7 @@
#include <linux/types.h>
#include <linux/idr.h>
+#include <linux/kref.h>
#include <linux/list.h>
#include <linux/tee.h>
@@ -25,8 +26,12 @@
* specific TEE driver.
*/
-#define TEE_SHM_MAPPED 0x1 /* Memory mapped by the kernel */
-#define TEE_SHM_DMA_BUF 0x2 /* Memory with dma-buf handle */
+#define TEE_SHM_MAPPED BIT(0) /* Memory mapped by the kernel */
+#define TEE_SHM_DMA_BUF BIT(1) /* Memory with dma-buf handle */
+#define TEE_SHM_EXT_DMA_BUF BIT(2) /* Memory with dma-buf handle */
+#define TEE_SHM_REGISTER BIT(3) /* Memory registered in secure world */
+#define TEE_SHM_USER_MAPPED BIT(4) /* Memory mapped in user space */
+#define TEE_SHM_POOL BIT(5) /* Memory allocated from pool */
struct device;
struct tee_device;
@@ -38,11 +43,17 @@ struct tee_shm_pool;
* @teedev: pointer to this drivers struct tee_device
* @list_shm: List of shared memory object owned by this context
* @data: driver specific context data, managed by the driver
+ * @refcount: reference counter for this structure
+ * @releasing: flag that indicates if context is being released right now.
+ * It is needed to break circular dependency on context during
+ * shared memory release.
*/
struct tee_context {
struct tee_device *teedev;
struct list_head list_shm;
void *data;
+ struct kref refcount;
+ bool releasing;
};
struct tee_param_memref {
@@ -76,6 +87,8 @@ struct tee_param {
* @cancel_req: request cancel of an ongoing invoke or open
* @supp_revc: called for supplicant to get a command
* @supp_send: called for supplicant to send a response
+ * @shm_register: register shared memory buffer in TEE
+ * @shm_unregister: unregister shared memory buffer in TEE
*/
struct tee_driver_ops {
void (*get_version)(struct tee_device *teedev,
@@ -94,6 +107,10 @@ struct tee_driver_ops {
struct tee_param *param);
int (*supp_send)(struct tee_context *ctx, u32 ret, u32 num_params,
struct tee_param *param);
+ int (*shm_register)(struct tee_context *ctx, struct tee_shm *shm,
+ struct page **pages, size_t num_pages,
+ unsigned long start);
+ int (*shm_unregister)(struct tee_context *ctx, struct tee_shm *shm);
};
/**
@@ -150,6 +167,97 @@ int tee_device_register(struct tee_device *teedev);
void tee_device_unregister(struct tee_device *teedev);
/**
+ * struct tee_shm - shared memory object
+ * @teedev: device used to allocate the object
+ * @ctx: context using the object, if NULL the context is gone
+ * @link link element
+ * @paddr: physical address of the shared memory
+ * @kaddr: virtual address of the shared memory
+ * @size: size of shared memory
+ * @offset: offset of buffer in user space
+ * @pages: locked pages from userspace
+ * @num_pages: number of locked pages
+ * @dmabuf: dmabuf used to for exporting to user space
+ * @flags: defined by TEE_SHM_* in tee_drv.h
+ * @id: unique id of a shared memory object on this device
+ *
+ * This pool is only supposed to be accessed directly from the TEE
+ * subsystem and from drivers that implements their own shm pool manager.
+ */
+struct tee_shm {
+ struct tee_device *teedev;
+ struct tee_context *ctx;
+ struct list_head link;
+ phys_addr_t paddr;
+ void *kaddr;
+ size_t size;
+ unsigned int offset;
+ struct page **pages;
+ size_t num_pages;
+ struct dma_buf *dmabuf;
+ u32 flags;
+ int id;
+};
+
+/**
+ * struct tee_shm_pool_mgr - shared memory manager
+ * @ops: operations
+ * @private_data: private data for the shared memory manager
+ */
+struct tee_shm_pool_mgr {
+ const struct tee_shm_pool_mgr_ops *ops;
+ void *private_data;
+};
+
+/**
+ * struct tee_shm_pool_mgr_ops - shared memory pool manager operations
+ * @alloc: called when allocating shared memory
+ * @free: called when freeing shared memory
+ * @destroy_poolmgr: called when destroying the pool manager
+ */
+struct tee_shm_pool_mgr_ops {
+ int (*alloc)(struct tee_shm_pool_mgr *poolmgr, struct tee_shm *shm,
+ size_t size);
+ void (*free)(struct tee_shm_pool_mgr *poolmgr, struct tee_shm *shm);
+ void (*destroy_poolmgr)(struct tee_shm_pool_mgr *poolmgr);
+};
+
+/**
+ * tee_shm_pool_alloc() - Create a shared memory pool from shm managers
+ * @priv_mgr: manager for driver private shared memory allocations
+ * @dmabuf_mgr: manager for dma-buf shared memory allocations
+ *
+ * Allocation with the flag TEE_SHM_DMA_BUF set will use the range supplied
+ * in @dmabuf, others will use the range provided by @priv.
+ *
+ * @returns pointer to a 'struct tee_shm_pool' or an ERR_PTR on failure.
+ */
+struct tee_shm_pool *tee_shm_pool_alloc(struct tee_shm_pool_mgr *priv_mgr,
+ struct tee_shm_pool_mgr *dmabuf_mgr);
+
+/*
+ * tee_shm_pool_mgr_alloc_res_mem() - Create a shm manager for reserved
+ * memory
+ * @vaddr: Virtual address of start of pool
+ * @paddr: Physical address of start of pool
+ * @size: Size in bytes of the pool
+ *
+ * @returns pointer to a 'struct tee_shm_pool_mgr' or an ERR_PTR on failure.
+ */
+struct tee_shm_pool_mgr *tee_shm_pool_mgr_alloc_res_mem(unsigned long vaddr,
+ phys_addr_t paddr,
+ size_t size,
+ int min_alloc_order);
+
+/**
+ * tee_shm_pool_mgr_destroy() - Free a shared memory manager
+ */
+static inline void tee_shm_pool_mgr_destroy(struct tee_shm_pool_mgr *poolm)
+{
+ poolm->ops->destroy_poolmgr(poolm);
+}
+
+/**
* struct tee_shm_pool_mem_info - holds information needed to create a shared
* memory pool
* @vaddr: Virtual address of start of pool
@@ -211,6 +319,40 @@ void *tee_get_drvdata(struct tee_device *teedev);
struct tee_shm *tee_shm_alloc(struct tee_context *ctx, size_t size, u32 flags);
/**
+ * tee_shm_priv_alloc() - Allocate shared memory privately
+ * @dev: Device that allocates the shared memory
+ * @size: Requested size of shared memory
+ *
+ * Allocates shared memory buffer that is not associated with any client
+ * context. Such buffers are owned by TEE driver and used for internal calls.
+ *
+ * @returns a pointer to 'struct tee_shm'
+ */
+struct tee_shm *tee_shm_priv_alloc(struct tee_device *teedev, size_t size);
+
+/**
+ * tee_shm_register() - Register shared memory buffer
+ * @ctx: Context that registers the shared memory
+ * @addr: Address is userspace of the shared buffer
+ * @length: Length of the shared buffer
+ * @flags: Flags setting properties for the requested shared memory.
+ *
+ * @returns a pointer to 'struct tee_shm'
+ */
+struct tee_shm *tee_shm_register(struct tee_context *ctx, unsigned long addr,
+ size_t length, u32 flags);
+
+/**
+ * tee_shm_is_registered() - Check if shared memory object in registered in TEE
+ * @shm: Shared memory handle
+ * @returns true if object is registered in TEE
+ */
+static inline bool tee_shm_is_registered(struct tee_shm *shm)
+{
+ return shm && (shm->flags & TEE_SHM_REGISTER);
+}
+
+/**
* tee_shm_free() - Free shared memory
* @shm: Handle to shared memory to free
*/
@@ -260,11 +402,47 @@ void *tee_shm_get_va(struct tee_shm *shm, size_t offs);
int tee_shm_get_pa(struct tee_shm *shm, size_t offs, phys_addr_t *pa);
/**
+ * tee_shm_get_size() - Get size of shared memory buffer
+ * @shm: Shared memory handle
+ * @returns size of shared memory
+ */
+static inline size_t tee_shm_get_size(struct tee_shm *shm)
+{
+ return shm->size;
+}
+
+/**
+ * tee_shm_get_pages() - Get list of pages that hold shared buffer
+ * @shm: Shared memory handle
+ * @num_pages: Number of pages will be stored there
+ * @returns pointer to pages array
+ */
+static inline struct page **tee_shm_get_pages(struct tee_shm *shm,
+ size_t *num_pages)
+{
+ *num_pages = shm->num_pages;
+ return shm->pages;
+}
+
+/**
+ * tee_shm_get_page_offset() - Get shared buffer offset from page start
+ * @shm: Shared memory handle
+ * @returns page offset of shared buffer
+ */
+static inline size_t tee_shm_get_page_offset(struct tee_shm *shm)
+{
+ return shm->offset;
+}
+
+/**
* tee_shm_get_id() - Get id of a shared memory object
* @shm: Shared memory handle
* @returns id
*/
-int tee_shm_get_id(struct tee_shm *shm);
+static inline int tee_shm_get_id(struct tee_shm *shm)
+{
+ return shm->id;
+}
/**
* tee_shm_get_from_id() - Find shared memory object and increase reference
@@ -275,4 +453,16 @@ int tee_shm_get_id(struct tee_shm *shm);
*/
struct tee_shm *tee_shm_get_from_id(struct tee_context *ctx, int id);
+static inline bool tee_param_is_memref(struct tee_param *param)
+{
+ switch (param->attr & TEE_IOCTL_PARAM_ATTR_TYPE_MASK) {
+ case TEE_IOCTL_PARAM_ATTR_TYPE_MEMREF_INPUT:
+ case TEE_IOCTL_PARAM_ATTR_TYPE_MEMREF_OUTPUT:
+ case TEE_IOCTL_PARAM_ATTR_TYPE_MEMREF_INOUT:
+ return true;
+ default:
+ return false;
+ }
+}
+
#endif /*__TEE_DRV_H*/
diff --git a/include/linux/thunderbolt.h b/include/linux/thunderbolt.h
index 7b69853188b1..a3ed26082bc1 100644
--- a/include/linux/thunderbolt.h
+++ b/include/linux/thunderbolt.h
@@ -45,12 +45,16 @@ enum tb_cfg_pkg_type {
* @TB_SECURITY_USER: User approval required at minimum
* @TB_SECURITY_SECURE: One time saved key required at minimum
* @TB_SECURITY_DPONLY: Only tunnel Display port (and USB)
+ * @TB_SECURITY_USBONLY: Only tunnel USB controller of the connected
+ * Thunderbolt dock (and Display Port). All PCIe
+ * links downstream of the dock are removed.
*/
enum tb_security_level {
TB_SECURITY_NONE,
TB_SECURITY_USER,
TB_SECURITY_SECURE,
TB_SECURITY_DPONLY,
+ TB_SECURITY_USBONLY,
};
/**
@@ -65,6 +69,7 @@ enum tb_security_level {
* @cm_ops: Connection manager specific operations vector
* @index: Linux assigned domain number
* @security_level: Current security level
+ * @nboot_acl: Number of boot ACLs the domain supports
* @privdata: Private connection manager specific data
*/
struct tb {
@@ -77,6 +82,7 @@ struct tb {
const struct tb_cm_ops *cm_ops;
int index;
enum tb_security_level security_level;
+ size_t nboot_acl;
unsigned long privdata[0];
};
@@ -237,6 +243,7 @@ int tb_xdomain_enable_paths(struct tb_xdomain *xd, u16 transmit_path,
u16 receive_ring);
int tb_xdomain_disable_paths(struct tb_xdomain *xd);
struct tb_xdomain *tb_xdomain_find_by_uuid(struct tb *tb, const uuid_t *uuid);
+struct tb_xdomain *tb_xdomain_find_by_route(struct tb *tb, u64 route);
static inline struct tb_xdomain *
tb_xdomain_find_by_uuid_locked(struct tb *tb, const uuid_t *uuid)
@@ -250,6 +257,18 @@ tb_xdomain_find_by_uuid_locked(struct tb *tb, const uuid_t *uuid)
return xd;
}
+static inline struct tb_xdomain *
+tb_xdomain_find_by_route_locked(struct tb *tb, u64 route)
+{
+ struct tb_xdomain *xd;
+
+ mutex_lock(&tb->lock);
+ xd = tb_xdomain_find_by_route(tb, route);
+ mutex_unlock(&tb->lock);
+
+ return xd;
+}
+
static inline struct tb_xdomain *tb_xdomain_get(struct tb_xdomain *xd)
{
if (xd)
diff --git a/include/linux/ti-emif-sram.h b/include/linux/ti-emif-sram.h
new file mode 100644
index 000000000000..45bc6b376492
--- /dev/null
+++ b/include/linux/ti-emif-sram.h
@@ -0,0 +1,69 @@
+/*
+ * TI AM33XX EMIF Routines
+ *
+ * Copyright (C) 2016-2017 Texas Instruments Inc.
+ * Dave Gerlach
+ *
+ * This program is free software; you can redistribute it and/or
+ * modify it under the terms of the GNU General Public License as
+ * published by the Free Software Foundation version 2.
+ *
+ * This program is distributed "as is" WITHOUT ANY WARRANTY of any
+ * kind, whether express or implied; without even the implied warranty
+ * of MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+ * GNU General Public License for more details.
+ */
+#ifndef __LINUX_TI_EMIF_H
+#define __LINUX_TI_EMIF_H
+
+#include <linux/kbuild.h>
+#include <linux/types.h>
+#ifndef __ASSEMBLY__
+
+struct emif_regs_amx3 {
+ u32 emif_sdcfg_val;
+ u32 emif_timing1_val;
+ u32 emif_timing2_val;
+ u32 emif_timing3_val;
+ u32 emif_ref_ctrl_val;
+ u32 emif_zqcfg_val;
+ u32 emif_pmcr_val;
+ u32 emif_pmcr_shdw_val;
+ u32 emif_rd_wr_level_ramp_ctrl;
+ u32 emif_rd_wr_exec_thresh;
+ u32 emif_cos_config;
+ u32 emif_priority_to_cos_mapping;
+ u32 emif_connect_id_serv_1_map;
+ u32 emif_connect_id_serv_2_map;
+ u32 emif_ocp_config_val;
+ u32 emif_lpddr2_nvm_tim;
+ u32 emif_lpddr2_nvm_tim_shdw;
+ u32 emif_dll_calib_ctrl_val;
+ u32 emif_dll_calib_ctrl_val_shdw;
+ u32 emif_ddr_phy_ctlr_1;
+ u32 emif_ext_phy_ctrl_vals[120];
+};
+
+struct ti_emif_pm_data {
+ void __iomem *ti_emif_base_addr_virt;
+ phys_addr_t ti_emif_base_addr_phys;
+ unsigned long ti_emif_sram_config;
+ struct emif_regs_amx3 *regs_virt;
+ phys_addr_t regs_phys;
+} __packed __aligned(8);
+
+struct ti_emif_pm_functions {
+ u32 save_context;
+ u32 restore_context;
+ u32 enter_sr;
+ u32 exit_sr;
+ u32 abort_sr;
+} __packed __aligned(8);
+
+struct gen_pool;
+
+int ti_emif_copy_pm_function_table(struct gen_pool *sram_pool, void *dst);
+int ti_emif_get_mem_type(void);
+
+#endif
+#endif /* __LINUX_TI_EMIF_H */
diff --git a/include/linux/tick.h b/include/linux/tick.h
index 7cc35921218e..55388ab45fd4 100644
--- a/include/linux/tick.h
+++ b/include/linux/tick.h
@@ -113,27 +113,48 @@ enum tick_dep_bits {
#ifdef CONFIG_NO_HZ_COMMON
extern bool tick_nohz_enabled;
-extern int tick_nohz_tick_stopped(void);
+extern bool tick_nohz_tick_stopped(void);
+extern bool tick_nohz_tick_stopped_cpu(int cpu);
+extern void tick_nohz_idle_stop_tick(void);
+extern void tick_nohz_idle_retain_tick(void);
+extern void tick_nohz_idle_restart_tick(void);
extern void tick_nohz_idle_enter(void);
extern void tick_nohz_idle_exit(void);
extern void tick_nohz_irq_exit(void);
-extern ktime_t tick_nohz_get_sleep_length(void);
+extern bool tick_nohz_idle_got_tick(void);
+extern ktime_t tick_nohz_get_sleep_length(ktime_t *delta_next);
extern unsigned long tick_nohz_get_idle_calls(void);
extern unsigned long tick_nohz_get_idle_calls_cpu(int cpu);
extern u64 get_cpu_idle_time_us(int cpu, u64 *last_update_time);
extern u64 get_cpu_iowait_time_us(int cpu, u64 *last_update_time);
+
+static inline void tick_nohz_idle_stop_tick_protected(void)
+{
+ local_irq_disable();
+ tick_nohz_idle_stop_tick();
+ local_irq_enable();
+}
+
#else /* !CONFIG_NO_HZ_COMMON */
#define tick_nohz_enabled (0)
static inline int tick_nohz_tick_stopped(void) { return 0; }
+static inline int tick_nohz_tick_stopped_cpu(int cpu) { return 0; }
+static inline void tick_nohz_idle_stop_tick(void) { }
+static inline void tick_nohz_idle_retain_tick(void) { }
+static inline void tick_nohz_idle_restart_tick(void) { }
static inline void tick_nohz_idle_enter(void) { }
static inline void tick_nohz_idle_exit(void) { }
+static inline bool tick_nohz_idle_got_tick(void) { return false; }
-static inline ktime_t tick_nohz_get_sleep_length(void)
+static inline ktime_t tick_nohz_get_sleep_length(ktime_t *delta_next)
{
- return NSEC_PER_SEC / HZ;
+ *delta_next = TICK_NSEC;
+ return *delta_next;
}
static inline u64 get_cpu_idle_time_us(int cpu, u64 *unused) { return -1; }
static inline u64 get_cpu_iowait_time_us(int cpu, u64 *unused) { return -1; }
+
+static inline void tick_nohz_idle_stop_tick_protected(void) { }
#endif /* !CONFIG_NO_HZ_COMMON */
#ifdef CONFIG_NO_HZ_FULL
diff --git a/include/linux/time32.h b/include/linux/time32.h
index 65b1de25198d..d2bcd4377b56 100644
--- a/include/linux/time32.h
+++ b/include/linux/time32.h
@@ -217,5 +217,6 @@ static inline s64 timeval_to_ns(const struct timeval *tv)
* Returns the timeval representation of the nsec parameter.
*/
extern struct timeval ns_to_timeval(const s64 nsec);
+extern struct __kernel_old_timeval ns_to_kernel_old_timeval(s64 nsec);
#endif
diff --git a/include/linux/timekeeper_internal.h b/include/linux/timekeeper_internal.h
index d315c3d6725c..4b3dca173e89 100644
--- a/include/linux/timekeeper_internal.h
+++ b/include/linux/timekeeper_internal.h
@@ -52,6 +52,7 @@ struct tk_read_base {
* @offs_real: Offset clock monotonic -> clock realtime
* @offs_boot: Offset clock monotonic -> clock boottime
* @offs_tai: Offset clock monotonic -> clock tai
+ * @time_suspended: Accumulated suspend time
* @tai_offset: The current UTC to TAI offset in seconds
* @clock_was_set_seq: The sequence number of clock was set events
* @cs_was_changed_seq: The sequence number of clocksource change events
@@ -94,6 +95,7 @@ struct timekeeper {
ktime_t offs_real;
ktime_t offs_boot;
ktime_t offs_tai;
+ ktime_t time_suspended;
s32 tai_offset;
unsigned int clock_was_set_seq;
u8 cs_was_changed_seq;
@@ -117,6 +119,8 @@ struct timekeeper {
s64 ntp_error;
u32 ntp_error_shift;
u32 ntp_err_mult;
+ /* Flag used to avoid updating NTP twice with same second */
+ u32 skip_second_overflow;
#ifdef CONFIG_DEBUG_TIMEKEEPING
long last_warning;
/*
diff --git a/include/linux/timekeeping.h b/include/linux/timekeeping.h
index 588a0e4b1ab9..9737fbec7019 100644
--- a/include/linux/timekeeping.h
+++ b/include/linux/timekeeping.h
@@ -33,20 +33,25 @@ extern void ktime_get_ts64(struct timespec64 *ts);
extern time64_t ktime_get_seconds(void);
extern time64_t __ktime_get_real_seconds(void);
extern time64_t ktime_get_real_seconds(void);
+extern void ktime_get_active_ts64(struct timespec64 *ts);
extern int __getnstimeofday64(struct timespec64 *tv);
extern void getnstimeofday64(struct timespec64 *tv);
extern void getboottime64(struct timespec64 *ts);
-#define ktime_get_real_ts64(ts) getnstimeofday64(ts)
+#define ktime_get_real_ts64(ts) getnstimeofday64(ts)
+
+/* Clock BOOTTIME compatibility wrappers */
+static inline void get_monotonic_boottime64(struct timespec64 *ts)
+{
+ ktime_get_ts64(ts);
+}
/*
* ktime_t based interfaces
*/
-
enum tk_offsets {
TK_OFFS_REAL,
- TK_OFFS_BOOT,
TK_OFFS_TAI,
TK_OFFS_MAX,
};
@@ -57,6 +62,10 @@ extern ktime_t ktime_mono_to_any(ktime_t tmono, enum tk_offsets offs);
extern ktime_t ktime_get_raw(void);
extern u32 ktime_get_resolution_ns(void);
+/* Clock BOOTTIME compatibility wrappers */
+static inline ktime_t ktime_get_boottime(void) { return ktime_get(); }
+static inline u64 ktime_get_boot_ns(void) { return ktime_get(); }
+
/**
* ktime_get_real - get the real (wall-) time in ktime_t format
*/
@@ -66,17 +75,6 @@ static inline ktime_t ktime_get_real(void)
}
/**
- * ktime_get_boottime - Returns monotonic time since boot in ktime_t format
- *
- * This is similar to CLOCK_MONTONIC/ktime_get, but also includes the
- * time spent in suspend.
- */
-static inline ktime_t ktime_get_boottime(void)
-{
- return ktime_get_with_offset(TK_OFFS_BOOT);
-}
-
-/**
* ktime_get_clocktai - Returns the TAI time of day in ktime_t format
*/
static inline ktime_t ktime_get_clocktai(void)
@@ -102,11 +100,6 @@ static inline u64 ktime_get_real_ns(void)
return ktime_to_ns(ktime_get_real());
}
-static inline u64 ktime_get_boot_ns(void)
-{
- return ktime_to_ns(ktime_get_boottime());
-}
-
static inline u64 ktime_get_tai_ns(void)
{
return ktime_to_ns(ktime_get_clocktai());
@@ -119,17 +112,11 @@ static inline u64 ktime_get_raw_ns(void)
extern u64 ktime_get_mono_fast_ns(void);
extern u64 ktime_get_raw_fast_ns(void);
-extern u64 ktime_get_boot_fast_ns(void);
extern u64 ktime_get_real_fast_ns(void);
/*
* timespec64 interfaces utilizing the ktime based ones
*/
-static inline void get_monotonic_boottime64(struct timespec64 *ts)
-{
- *ts = ktime_to_timespec64(ktime_get_boottime());
-}
-
static inline void timekeeping_clocktai64(struct timespec64 *ts)
{
*ts = ktime_to_timespec64(ktime_get_clocktai());
diff --git a/include/linux/torture.h b/include/linux/torture.h
index a45702eb3e7b..66272862070b 100644
--- a/include/linux/torture.h
+++ b/include/linux/torture.h
@@ -79,7 +79,7 @@ void stutter_wait(const char *title);
int torture_stutter_init(int s);
/* Initialization and cleanup. */
-bool torture_init_begin(char *ttype, bool v, int *runnable);
+bool torture_init_begin(char *ttype, bool v);
void torture_init_end(void);
bool torture_cleanup_begin(void);
void torture_cleanup_end(void);
@@ -96,4 +96,10 @@ void _torture_stop_kthread(char *m, struct task_struct **tp);
#define torture_stop_kthread(n, tp) \
_torture_stop_kthread("Stopping " #n " task", &(tp))
+#ifdef CONFIG_PREEMPT
+#define torture_preempt_schedule() preempt_schedule()
+#else
+#define torture_preempt_schedule()
+#endif
+
#endif /* __LINUX_TORTURE_H */
diff --git a/include/linux/tpm.h b/include/linux/tpm.h
index 5a090f5ab335..06639fb6ab85 100644
--- a/include/linux/tpm.h
+++ b/include/linux/tpm.h
@@ -24,11 +24,6 @@
#define TPM_DIGEST_SIZE 20 /* Max TPM v1.2 PCR size */
-/*
- * Chip num is this value or a valid tpm idx
- */
-#define TPM_ANY_NUM 0xFFFF
-
struct tpm_chip;
struct trusted_key_payload;
struct trusted_key_options;
@@ -49,47 +44,53 @@ struct tpm_class_ops {
bool (*update_timeouts)(struct tpm_chip *chip,
unsigned long *timeout_cap);
int (*request_locality)(struct tpm_chip *chip, int loc);
- void (*relinquish_locality)(struct tpm_chip *chip, int loc);
+ int (*relinquish_locality)(struct tpm_chip *chip, int loc);
+ void (*clk_enable)(struct tpm_chip *chip, bool value);
};
#if defined(CONFIG_TCG_TPM) || defined(CONFIG_TCG_TPM_MODULE)
-extern int tpm_is_tpm2(u32 chip_num);
-extern int tpm_pcr_read(u32 chip_num, int pcr_idx, u8 *res_buf);
-extern int tpm_pcr_extend(u32 chip_num, int pcr_idx, const u8 *hash);
-extern int tpm_send(u32 chip_num, void *cmd, size_t buflen);
-extern int tpm_get_random(u32 chip_num, u8 *data, size_t max);
-extern int tpm_seal_trusted(u32 chip_num,
+extern int tpm_is_tpm2(struct tpm_chip *chip);
+extern int tpm_pcr_read(struct tpm_chip *chip, int pcr_idx, u8 *res_buf);
+extern int tpm_pcr_extend(struct tpm_chip *chip, int pcr_idx, const u8 *hash);
+extern int tpm_send(struct tpm_chip *chip, void *cmd, size_t buflen);
+extern int tpm_get_random(struct tpm_chip *chip, u8 *data, size_t max);
+extern int tpm_seal_trusted(struct tpm_chip *chip,
struct trusted_key_payload *payload,
struct trusted_key_options *options);
-extern int tpm_unseal_trusted(u32 chip_num,
+extern int tpm_unseal_trusted(struct tpm_chip *chip,
struct trusted_key_payload *payload,
struct trusted_key_options *options);
#else
-static inline int tpm_is_tpm2(u32 chip_num)
+static inline int tpm_is_tpm2(struct tpm_chip *chip)
{
return -ENODEV;
}
-static inline int tpm_pcr_read(u32 chip_num, int pcr_idx, u8 *res_buf) {
+static inline int tpm_pcr_read(struct tpm_chip *chip, int pcr_idx, u8 *res_buf)
+{
return -ENODEV;
}
-static inline int tpm_pcr_extend(u32 chip_num, int pcr_idx, const u8 *hash) {
+static inline int tpm_pcr_extend(struct tpm_chip *chip, int pcr_idx,
+ const u8 *hash)
+{
return -ENODEV;
}
-static inline int tpm_send(u32 chip_num, void *cmd, size_t buflen) {
+static inline int tpm_send(struct tpm_chip *chip, void *cmd, size_t buflen)
+{
return -ENODEV;
}
-static inline int tpm_get_random(u32 chip_num, u8 *data, size_t max) {
+static inline int tpm_get_random(struct tpm_chip *chip, u8 *data, size_t max)
+{
return -ENODEV;
}
-static inline int tpm_seal_trusted(u32 chip_num,
+static inline int tpm_seal_trusted(struct tpm_chip *chip,
struct trusted_key_payload *payload,
struct trusted_key_options *options)
{
return -ENODEV;
}
-static inline int tpm_unseal_trusted(u32 chip_num,
+static inline int tpm_unseal_trusted(struct tpm_chip *chip,
struct trusted_key_payload *payload,
struct trusted_key_options *options)
{
diff --git a/include/linux/tpm_eventlog.h b/include/linux/tpm_eventlog.h
new file mode 100644
index 000000000000..20d9da77fc11
--- /dev/null
+++ b/include/linux/tpm_eventlog.h
@@ -0,0 +1,124 @@
+/* SPDX-License-Identifier: GPL-2.0 */
+
+#ifndef __LINUX_TPM_EVENTLOG_H__
+#define __LINUX_TPM_EVENTLOG_H__
+
+#include <crypto/hash_info.h>
+
+#define TCG_EVENT_NAME_LEN_MAX 255
+#define MAX_TEXT_EVENT 1000 /* Max event string length */
+#define ACPI_TCPA_SIG "TCPA" /* 0x41504354 /'TCPA' */
+#define TPM2_ACTIVE_PCR_BANKS 3
+
+#define EFI_TCG2_EVENT_LOG_FORMAT_TCG_1_2 0x1
+#define EFI_TCG2_EVENT_LOG_FORMAT_TCG_2 0x2
+
+#ifdef CONFIG_PPC64
+#define do_endian_conversion(x) be32_to_cpu(x)
+#else
+#define do_endian_conversion(x) x
+#endif
+
+enum bios_platform_class {
+ BIOS_CLIENT = 0x00,
+ BIOS_SERVER = 0x01,
+};
+
+struct tcpa_event {
+ u32 pcr_index;
+ u32 event_type;
+ u8 pcr_value[20]; /* SHA1 */
+ u32 event_size;
+ u8 event_data[0];
+};
+
+enum tcpa_event_types {
+ PREBOOT = 0,
+ POST_CODE,
+ UNUSED,
+ NO_ACTION,
+ SEPARATOR,
+ ACTION,
+ EVENT_TAG,
+ SCRTM_CONTENTS,
+ SCRTM_VERSION,
+ CPU_MICROCODE,
+ PLATFORM_CONFIG_FLAGS,
+ TABLE_OF_DEVICES,
+ COMPACT_HASH,
+ IPL,
+ IPL_PARTITION_DATA,
+ NONHOST_CODE,
+ NONHOST_CONFIG,
+ NONHOST_INFO,
+};
+
+struct tcpa_pc_event {
+ u32 event_id;
+ u32 event_size;
+ u8 event_data[0];
+};
+
+enum tcpa_pc_event_ids {
+ SMBIOS = 1,
+ BIS_CERT,
+ POST_BIOS_ROM,
+ ESCD,
+ CMOS,
+ NVRAM,
+ OPTION_ROM_EXEC,
+ OPTION_ROM_CONFIG,
+ OPTION_ROM_MICROCODE = 10,
+ S_CRTM_VERSION,
+ S_CRTM_CONTENTS,
+ POST_CONTENTS,
+ HOST_TABLE_OF_DEVICES,
+};
+
+/* http://www.trustedcomputinggroup.org/tcg-efi-protocol-specification/ */
+
+struct tcg_efi_specid_event_algs {
+ u16 alg_id;
+ u16 digest_size;
+} __packed;
+
+struct tcg_efi_specid_event {
+ u8 signature[16];
+ u32 platform_class;
+ u8 spec_version_minor;
+ u8 spec_version_major;
+ u8 spec_errata;
+ u8 uintnsize;
+ u32 num_algs;
+ struct tcg_efi_specid_event_algs digest_sizes[TPM2_ACTIVE_PCR_BANKS];
+ u8 vendor_info_size;
+ u8 vendor_info[0];
+} __packed;
+
+struct tcg_pcr_event {
+ u32 pcr_idx;
+ u32 event_type;
+ u8 digest[20];
+ u32 event_size;
+ u8 event[0];
+} __packed;
+
+struct tcg_event_field {
+ u32 event_size;
+ u8 event[0];
+} __packed;
+
+struct tpm2_digest {
+ u16 alg_id;
+ u8 digest[SHA512_DIGEST_SIZE];
+} __packed;
+
+struct tcg_pcr_event2 {
+ u32 pcr_idx;
+ u32 event_type;
+ u32 count;
+ struct tpm2_digest digests[TPM2_ACTIVE_PCR_BANKS];
+ struct tcg_event_field event;
+} __packed;
+
+#endif
diff --git a/include/linux/trace_events.h b/include/linux/trace_events.h
index af44e7c2d577..2bde3eff564c 100644
--- a/include/linux/trace_events.h
+++ b/include/linux/trace_events.h
@@ -430,11 +430,13 @@ enum event_trigger_type {
extern int filter_match_preds(struct event_filter *filter, void *rec);
-extern enum event_trigger_type event_triggers_call(struct trace_event_file *file,
- void *rec);
-extern void event_triggers_post_call(struct trace_event_file *file,
- enum event_trigger_type tt,
- void *rec);
+extern enum event_trigger_type
+event_triggers_call(struct trace_event_file *file, void *rec,
+ struct ring_buffer_event *event);
+extern void
+event_triggers_post_call(struct trace_event_file *file,
+ enum event_trigger_type tt,
+ void *rec, struct ring_buffer_event *event);
bool trace_event_ignore_this_pid(struct trace_event_file *trace_file);
@@ -454,7 +456,7 @@ trace_trigger_soft_disabled(struct trace_event_file *file)
if (!(eflags & EVENT_FILE_FL_TRIGGER_COND)) {
if (eflags & EVENT_FILE_FL_TRIGGER_MODE)
- event_triggers_call(file, NULL);
+ event_triggers_call(file, NULL, NULL);
if (eflags & EVENT_FILE_FL_SOFT_DISABLED)
return true;
if (eflags & EVENT_FILE_FL_PID_FILTER)
@@ -467,6 +469,10 @@ trace_trigger_soft_disabled(struct trace_event_file *file)
unsigned int trace_call_bpf(struct trace_event_call *call, void *ctx);
int perf_event_attach_bpf_prog(struct perf_event *event, struct bpf_prog *prog);
void perf_event_detach_bpf_prog(struct perf_event *event);
+int perf_event_query_prog_array(struct perf_event *event, void __user *info);
+int bpf_probe_register(struct bpf_raw_event_map *btp, struct bpf_prog *prog);
+int bpf_probe_unregister(struct bpf_raw_event_map *btp, struct bpf_prog *prog);
+struct bpf_raw_event_map *bpf_find_raw_tracepoint(const char *name);
#else
static inline unsigned int trace_call_bpf(struct trace_event_call *call, void *ctx)
{
@@ -481,6 +487,23 @@ perf_event_attach_bpf_prog(struct perf_event *event, struct bpf_prog *prog)
static inline void perf_event_detach_bpf_prog(struct perf_event *event) { }
+static inline int
+perf_event_query_prog_array(struct perf_event *event, void __user *info)
+{
+ return -EOPNOTSUPP;
+}
+static inline int bpf_probe_register(struct bpf_raw_event_map *btp, struct bpf_prog *p)
+{
+ return -EOPNOTSUPP;
+}
+static inline int bpf_probe_unregister(struct bpf_raw_event_map *btp, struct bpf_prog *p)
+{
+ return -EOPNOTSUPP;
+}
+static inline struct bpf_raw_event_map *bpf_find_raw_tracepoint(const char *name)
+{
+ return NULL;
+}
#endif
enum {
@@ -528,17 +551,53 @@ do { \
struct perf_event;
DECLARE_PER_CPU(struct pt_regs, perf_trace_regs);
+DECLARE_PER_CPU(int, bpf_kprobe_override);
extern int perf_trace_init(struct perf_event *event);
extern void perf_trace_destroy(struct perf_event *event);
extern int perf_trace_add(struct perf_event *event, int flags);
extern void perf_trace_del(struct perf_event *event, int flags);
+#ifdef CONFIG_KPROBE_EVENTS
+extern int perf_kprobe_init(struct perf_event *event, bool is_retprobe);
+extern void perf_kprobe_destroy(struct perf_event *event);
+#endif
+#ifdef CONFIG_UPROBE_EVENTS
+extern int perf_uprobe_init(struct perf_event *event, bool is_retprobe);
+extern void perf_uprobe_destroy(struct perf_event *event);
+#endif
extern int ftrace_profile_set_filter(struct perf_event *event, int event_id,
char *filter_str);
extern void ftrace_profile_free_filter(struct perf_event *event);
void perf_trace_buf_update(void *record, u16 type);
void *perf_trace_buf_alloc(int size, struct pt_regs **regs, int *rctxp);
+void bpf_trace_run1(struct bpf_prog *prog, u64 arg1);
+void bpf_trace_run2(struct bpf_prog *prog, u64 arg1, u64 arg2);
+void bpf_trace_run3(struct bpf_prog *prog, u64 arg1, u64 arg2,
+ u64 arg3);
+void bpf_trace_run4(struct bpf_prog *prog, u64 arg1, u64 arg2,
+ u64 arg3, u64 arg4);
+void bpf_trace_run5(struct bpf_prog *prog, u64 arg1, u64 arg2,
+ u64 arg3, u64 arg4, u64 arg5);
+void bpf_trace_run6(struct bpf_prog *prog, u64 arg1, u64 arg2,
+ u64 arg3, u64 arg4, u64 arg5, u64 arg6);
+void bpf_trace_run7(struct bpf_prog *prog, u64 arg1, u64 arg2,
+ u64 arg3, u64 arg4, u64 arg5, u64 arg6, u64 arg7);
+void bpf_trace_run8(struct bpf_prog *prog, u64 arg1, u64 arg2,
+ u64 arg3, u64 arg4, u64 arg5, u64 arg6, u64 arg7,
+ u64 arg8);
+void bpf_trace_run9(struct bpf_prog *prog, u64 arg1, u64 arg2,
+ u64 arg3, u64 arg4, u64 arg5, u64 arg6, u64 arg7,
+ u64 arg8, u64 arg9);
+void bpf_trace_run10(struct bpf_prog *prog, u64 arg1, u64 arg2,
+ u64 arg3, u64 arg4, u64 arg5, u64 arg6, u64 arg7,
+ u64 arg8, u64 arg9, u64 arg10);
+void bpf_trace_run11(struct bpf_prog *prog, u64 arg1, u64 arg2,
+ u64 arg3, u64 arg4, u64 arg5, u64 arg6, u64 arg7,
+ u64 arg8, u64 arg9, u64 arg10, u64 arg11);
+void bpf_trace_run12(struct bpf_prog *prog, u64 arg1, u64 arg2,
+ u64 arg3, u64 arg4, u64 arg5, u64 arg6, u64 arg7,
+ u64 arg8, u64 arg9, u64 arg10, u64 arg11, u64 arg12);
void perf_trace_run_bpf_submit(void *raw_data, int size, int rctx,
struct trace_event_call *call, u64 count,
struct pt_regs *regs, struct hlist_head *head,
diff --git a/include/linux/tracepoint-defs.h b/include/linux/tracepoint-defs.h
index 64ed7064f1fa..22c5a46e9693 100644
--- a/include/linux/tracepoint-defs.h
+++ b/include/linux/tracepoint-defs.h
@@ -35,4 +35,10 @@ struct tracepoint {
struct tracepoint_func __rcu *funcs;
};
+struct bpf_raw_event_map {
+ struct tracepoint *tp;
+ void *bpf_func;
+ u32 num_args;
+} __aligned(32);
+
#endif
diff --git a/include/linux/tracepoint.h b/include/linux/tracepoint.h
index a26ffbe09e71..c94f466d57ef 100644
--- a/include/linux/tracepoint.h
+++ b/include/linux/tracepoint.h
@@ -137,11 +137,8 @@ extern void syscall_unregfunc(void);
\
if (!(cond)) \
return; \
- if (rcucheck) { \
- if (WARN_ON_ONCE(rcu_irq_enter_disabled())) \
- return; \
+ if (rcucheck) \
rcu_irq_enter_irqson(); \
- } \
rcu_read_lock_sched_notrace(); \
it_func_ptr = rcu_dereference_sched((tp)->funcs); \
if (it_func_ptr) { \
diff --git a/include/linux/tty.h b/include/linux/tty.h
index 7ac8ba208b1f..47f8af22f216 100644
--- a/include/linux/tty.h
+++ b/include/linux/tty.h
@@ -364,6 +364,7 @@ struct tty_file_private {
#define TTY_PTY_LOCK 16 /* pty private */
#define TTY_NO_WRITE_SPLIT 17 /* Preserve write boundaries to driver */
#define TTY_HUPPED 18 /* Post driver->hangup() */
+#define TTY_HUPPING 19 /* Hangup in progress */
#define TTY_LDISC_HALTED 22 /* Line discipline is halted */
/* Values for tty->flow_change */
@@ -405,6 +406,8 @@ extern const char *tty_name(const struct tty_struct *tty);
extern struct tty_struct *tty_kopen(dev_t device);
extern void tty_kclose(struct tty_struct *tty);
extern int tty_dev_name_to_number(const char *name, dev_t *number);
+extern int tty_ldisc_lock(struct tty_struct *tty, unsigned long timeout);
+extern void tty_ldisc_unlock(struct tty_struct *tty);
#else
static inline void tty_kref_put(struct tty_struct *tty)
{ }
diff --git a/include/linux/tty_ldisc.h b/include/linux/tty_ldisc.h
index 3bc5144b1c7e..1ef64d4ad887 100644
--- a/include/linux/tty_ldisc.h
+++ b/include/linux/tty_ldisc.h
@@ -187,7 +187,7 @@ struct tty_ldisc_ops {
long (*compat_ioctl)(struct tty_struct *tty, struct file *file,
unsigned int cmd, unsigned long arg);
void (*set_termios)(struct tty_struct *tty, struct ktermios *old);
- unsigned int (*poll)(struct tty_struct *, struct file *,
+ __poll_t (*poll)(struct tty_struct *, struct file *,
struct poll_table_struct *);
int (*hangup)(struct tty_struct *tty);
diff --git a/include/linux/types.h b/include/linux/types.h
index c94d59ef96cc..ec13d02b3481 100644
--- a/include/linux/types.h
+++ b/include/linux/types.h
@@ -217,7 +217,7 @@ struct ustat {
*
* This guarantee is important for few reasons:
* - future call_rcu_lazy() will make use of lower bits in the pointer;
- * - the structure shares storage spacer in struct page with @compound_head,
+ * - the structure shares storage space in struct page with @compound_head,
* which encode PageTail() in bit 0. The guarantee is needed to avoid
* false-positive PageTail().
*/
diff --git a/include/linux/u64_stats_sync.h b/include/linux/u64_stats_sync.h
index 5bdbd9f49395..07ee0f84a46c 100644
--- a/include/linux/u64_stats_sync.h
+++ b/include/linux/u64_stats_sync.h
@@ -90,6 +90,28 @@ static inline void u64_stats_update_end(struct u64_stats_sync *syncp)
#endif
}
+static inline unsigned long
+u64_stats_update_begin_irqsave(struct u64_stats_sync *syncp)
+{
+ unsigned long flags = 0;
+
+#if BITS_PER_LONG==32 && defined(CONFIG_SMP)
+ local_irq_save(flags);
+ write_seqcount_begin(&syncp->seq);
+#endif
+ return flags;
+}
+
+static inline void
+u64_stats_update_end_irqrestore(struct u64_stats_sync *syncp,
+ unsigned long flags)
+{
+#if BITS_PER_LONG==32 && defined(CONFIG_SMP)
+ write_seqcount_end(&syncp->seq);
+ local_irq_restore(flags);
+#endif
+}
+
static inline void u64_stats_update_begin_raw(struct u64_stats_sync *syncp)
{
#if BITS_PER_LONG==32 && defined(CONFIG_SMP)
diff --git a/include/linux/uaccess.h b/include/linux/uaccess.h
index 251e655d407f..efe79c1cdd47 100644
--- a/include/linux/uaccess.h
+++ b/include/linux/uaccess.h
@@ -273,4 +273,12 @@ extern long strncpy_from_unsafe(char *dst, const void *unsafe_addr, long count);
#define unsafe_put_user(x, ptr, err) do { if (unlikely(__put_user(x, ptr))) goto err; } while (0)
#endif
+#ifdef CONFIG_HARDENED_USERCOPY
+void usercopy_warn(const char *name, const char *detail, bool to_user,
+ unsigned long offset, unsigned long len);
+void __noreturn usercopy_abort(const char *name, const char *detail,
+ bool to_user, unsigned long offset,
+ unsigned long len);
+#endif
+
#endif /* __LINUX_UACCESS_H__ */
diff --git a/include/linux/usb.h b/include/linux/usb.h
index fbbe974661f2..0173597e59aa 100644
--- a/include/linux/usb.h
+++ b/include/linux/usb.h
@@ -609,6 +609,10 @@ struct usb3_lpm_parameters {
* to keep track of the number of functions that require USB 3.0 Link Power
* Management to be disabled for this usb_device. This count should only
* be manipulated by those functions, with the bandwidth_mutex is held.
+ * @hub_delay: cached value consisting of:
+ * parent->hub_delay + wHubDelay + tTPTransmissionDelay (40ns)
+ *
+ * Will be used as wValue for SetIsochDelay requests.
*
* Notes:
* Usbcore drivers should not set usbdev->state directly. Instead use
@@ -689,6 +693,8 @@ struct usb_device {
struct usb3_lpm_parameters u1_params;
struct usb3_lpm_parameters u2_params;
unsigned lpm_disable_count;
+
+ u16 hub_delay;
};
#define to_usb_device(d) container_of(d, struct usb_device, dev)
@@ -1293,7 +1299,6 @@ extern int usb_disabled(void);
#define URB_ISO_ASAP 0x0002 /* iso-only; use the first unexpired
* slot in the schedule */
#define URB_NO_TRANSFER_DMA_MAP 0x0004 /* urb->transfer_dma valid on submit */
-#define URB_NO_FSBR 0x0020 /* UHCI-specific */
#define URB_ZERO_PACKET 0x0040 /* Finish bulk OUT with short packet */
#define URB_NO_INTERRUPT 0x0080 /* HINT: no non-error interrupt
* needed */
diff --git a/include/linux/usb/audio-v2.h b/include/linux/usb/audio-v2.h
index 3119d0ace7aa..aaafecf073ff 100644
--- a/include/linux/usb/audio-v2.h
+++ b/include/linux/usb/audio-v2.h
@@ -34,14 +34,14 @@
*
*/
-static inline bool uac2_control_is_readable(u32 bmControls, u8 control)
+static inline bool uac_v2v3_control_is_readable(u32 bmControls, u8 control)
{
- return (bmControls >> (control * 2)) & 0x1;
+ return (bmControls >> ((control - 1) * 2)) & 0x1;
}
-static inline bool uac2_control_is_writeable(u32 bmControls, u8 control)
+static inline bool uac_v2v3_control_is_writeable(u32 bmControls, u8 control)
{
- return (bmControls >> (control * 2)) & 0x2;
+ return (bmControls >> ((control - 1) * 2)) & 0x2;
}
/* 4.7.2 Class-Specific AC Interface Descriptor */
diff --git a/include/linux/usb/audio-v3.h b/include/linux/usb/audio-v3.h
new file mode 100644
index 000000000000..a8959aaba0ae
--- /dev/null
+++ b/include/linux/usb/audio-v3.h
@@ -0,0 +1,395 @@
+// SPDX-License-Identifier: GPL-2.0+
+/*
+ * Copyright (c) 2017 Ruslan Bilovol <ruslan.bilovol@gmail.com>
+ *
+ * This file holds USB constants and structures defined
+ * by the USB DEVICE CLASS DEFINITION FOR AUDIO DEVICES Release 3.0.
+ */
+
+#ifndef __LINUX_USB_AUDIO_V3_H
+#define __LINUX_USB_AUDIO_V3_H
+
+#include <linux/types.h>
+
+/*
+ * v1.0, v2.0 and v3.0 of this standard have many things in common. For the rest
+ * of the definitions, please refer to audio.h and audio-v2.h
+ */
+
+/* All High Capability descriptors have these 2 fields at the beginning */
+struct uac3_hc_descriptor_header {
+ __le16 wLength;
+ __u8 bDescriptorType;
+ __u8 bDescriptorSubtype;
+ __le16 wDescriptorID;
+} __attribute__ ((packed));
+
+/* 4.3.1 CLUSTER DESCRIPTOR HEADER */
+struct uac3_cluster_header_descriptor {
+ __le16 wLength;
+ __u8 bDescriptorType;
+ __u8 bDescriptorSubtype;
+ __le16 wDescriptorID;
+ __u8 bNrChannels;
+} __attribute__ ((packed));
+
+/* 4.3.2.1 SEGMENTS */
+struct uac3_cluster_segment_descriptor {
+ __le16 wLength;
+ __u8 bSegmentType;
+ /* __u8[0]; segment-specific data */
+} __attribute__ ((packed));
+
+/* 4.3.2.1.1 END SEGMENT */
+struct uac3_cluster_end_segment_descriptor {
+ __le16 wLength;
+ __u8 bSegmentType; /* Constant END_SEGMENT */
+} __attribute__ ((packed));
+
+/* 4.3.2.1.3.1 INFORMATION SEGMENT */
+struct uac3_cluster_information_segment_descriptor {
+ __le16 wLength;
+ __u8 bSegmentType;
+ __u8 bChPurpose;
+ __u8 bChRelationship;
+ __u8 bChGroupID;
+} __attribute__ ((packed));
+
+/* 4.5.2 CLASS-SPECIFIC AC INTERFACE DESCRIPTOR */
+struct uac3_ac_header_descriptor {
+ __u8 bLength; /* 10 */
+ __u8 bDescriptorType; /* CS_INTERFACE descriptor type */
+ __u8 bDescriptorSubtype; /* HEADER descriptor subtype */
+ __u8 bCategory;
+
+ /* includes Clock Source, Unit, Terminal, and Power Domain desc. */
+ __le16 wTotalLength;
+
+ __le32 bmControls;
+} __attribute__ ((packed));
+
+/* 4.5.2.1 INPUT TERMINAL DESCRIPTOR */
+struct uac3_input_terminal_descriptor {
+ __u8 bLength;
+ __u8 bDescriptorType;
+ __u8 bDescriptorSubtype;
+ __u8 bTerminalID;
+ __le16 wTerminalType;
+ __u8 bAssocTerminal;
+ __u8 bCSourceID;
+ __le32 bmControls;
+ __le16 wClusterDescrID;
+ __le16 wExTerminalDescrID;
+ __le16 wConnectorsDescrID;
+ __le16 wTerminalDescrStr;
+} __attribute__((packed));
+
+/* 4.5.2.2 OUTPUT TERMINAL DESCRIPTOR */
+struct uac3_output_terminal_descriptor {
+ __u8 bLength;
+ __u8 bDescriptorType;
+ __u8 bDescriptorSubtype;
+ __u8 bTerminalID;
+ __le16 wTerminalType;
+ __u8 bAssocTerminal;
+ __u8 bSourceID;
+ __u8 bCSourceID;
+ __le32 bmControls;
+ __le16 wExTerminalDescrID;
+ __le16 wConnectorsDescrID;
+ __le16 wTerminalDescrStr;
+} __attribute__((packed));
+
+/* 4.5.2.7 FEATURE UNIT DESCRIPTOR */
+struct uac3_feature_unit_descriptor {
+ __u8 bLength;
+ __u8 bDescriptorType;
+ __u8 bDescriptorSubtype;
+ __u8 bUnitID;
+ __u8 bSourceID;
+ /* bmaControls is actually u32,
+ * but u8 is needed for the hybrid parser */
+ __u8 bmaControls[0]; /* variable length */
+ /* wFeatureDescrStr omitted */
+} __attribute__((packed));
+
+#define UAC3_DT_FEATURE_UNIT_SIZE(ch) (7 + ((ch) + 1) * 4)
+
+/* As above, but more useful for defining your own descriptors */
+#define DECLARE_UAC3_FEATURE_UNIT_DESCRIPTOR(ch) \
+struct uac3_feature_unit_descriptor_##ch { \
+ __u8 bLength; \
+ __u8 bDescriptorType; \
+ __u8 bDescriptorSubtype; \
+ __u8 bUnitID; \
+ __u8 bSourceID; \
+ __le32 bmaControls[ch + 1]; \
+ __le16 wFeatureDescrStr; \
+} __attribute__ ((packed))
+
+/* 4.5.2.12 CLOCK SOURCE DESCRIPTOR */
+struct uac3_clock_source_descriptor {
+ __u8 bLength;
+ __u8 bDescriptorType;
+ __u8 bDescriptorSubtype;
+ __u8 bClockID;
+ __u8 bmAttributes;
+ __le32 bmControls;
+ __u8 bReferenceTerminal;
+ __le16 wClockSourceStr;
+} __attribute__((packed));
+
+/* bmAttribute fields */
+#define UAC3_CLOCK_SOURCE_TYPE_EXT 0x0
+#define UAC3_CLOCK_SOURCE_TYPE_INT 0x1
+#define UAC3_CLOCK_SOURCE_ASYNC (0 << 2)
+#define UAC3_CLOCK_SOURCE_SYNCED_TO_SOF (1 << 1)
+
+/* 4.5.2.13 CLOCK SELECTOR DESCRIPTOR */
+struct uac3_clock_selector_descriptor {
+ __u8 bLength;
+ __u8 bDescriptorType;
+ __u8 bDescriptorSubtype;
+ __u8 bClockID;
+ __u8 bNrInPins;
+ __u8 baCSourceID[];
+ /* bmControls and wCSelectorDescrStr omitted */
+} __attribute__((packed));
+
+/* 4.5.2.14 CLOCK MULTIPLIER DESCRIPTOR */
+struct uac3_clock_multiplier_descriptor {
+ __u8 bLength;
+ __u8 bDescriptorType;
+ __u8 bDescriptorSubtype;
+ __u8 bClockID;
+ __u8 bCSourceID;
+ __le32 bmControls;
+ __le16 wCMultiplierDescrStr;
+} __attribute__((packed));
+
+/* 4.5.2.15 POWER DOMAIN DESCRIPTOR */
+struct uac3_power_domain_descriptor {
+ __u8 bLength;
+ __u8 bDescriptorType;
+ __u8 bDescriptorSubtype;
+ __u8 bPowerDomainID;
+ __le16 waRecoveryTime1;
+ __le16 waRecoveryTime2;
+ __u8 bNrEntities;
+ __u8 baEntityID[];
+ /* wPDomainDescrStr omitted */
+} __attribute__((packed));
+
+/* As above, but more useful for defining your own descriptors */
+#define DECLARE_UAC3_POWER_DOMAIN_DESCRIPTOR(n) \
+struct uac3_power_domain_descriptor_##n { \
+ __u8 bLength; \
+ __u8 bDescriptorType; \
+ __u8 bDescriptorSubtype; \
+ __u8 bPowerDomainID; \
+ __le16 waRecoveryTime1; \
+ __le16 waRecoveryTime2; \
+ __u8 bNrEntities; \
+ __u8 baEntityID[n]; \
+ __le16 wPDomainDescrStr; \
+} __attribute__ ((packed))
+
+/* 4.7.2 CLASS-SPECIFIC AS INTERFACE DESCRIPTOR */
+struct uac3_as_header_descriptor {
+ __u8 bLength;
+ __u8 bDescriptorType;
+ __u8 bDescriptorSubtype;
+ __u8 bTerminalLink;
+ __le32 bmControls;
+ __le16 wClusterDescrID;
+ __le64 bmFormats;
+ __u8 bSubslotSize;
+ __u8 bBitResolution;
+ __le16 bmAuxProtocols;
+ __u8 bControlSize;
+} __attribute__((packed));
+
+#define UAC3_FORMAT_TYPE_I_RAW_DATA (1 << 6)
+
+/* 4.8.1.2 CLASS-SPECIFIC AS ISOCHRONOUS AUDIO DATA ENDPOINT DESCRIPTOR */
+struct uac3_iso_endpoint_descriptor {
+ __u8 bLength;
+ __u8 bDescriptorType;
+ __u8 bDescriptorSubtype;
+ __le32 bmControls;
+ __u8 bLockDelayUnits;
+ __le16 wLockDelay;
+} __attribute__((packed));
+
+/* 6.1 INTERRUPT DATA MESSAGE */
+struct uac3_interrupt_data_msg {
+ __u8 bInfo;
+ __u8 bSourceType;
+ __le16 wValue;
+ __le16 wIndex;
+} __attribute__((packed));
+
+/* A.2 AUDIO AUDIO FUNCTION SUBCLASS CODES */
+#define UAC3_FUNCTION_SUBCLASS_UNDEFINED 0x00
+#define UAC3_FUNCTION_SUBCLASS_FULL_ADC_3_0 0x01
+/* BADD profiles */
+#define UAC3_FUNCTION_SUBCLASS_GENERIC_IO 0x20
+#define UAC3_FUNCTION_SUBCLASS_HEADPHONE 0x21
+#define UAC3_FUNCTION_SUBCLASS_SPEAKER 0x22
+#define UAC3_FUNCTION_SUBCLASS_MICROPHONE 0x23
+#define UAC3_FUNCTION_SUBCLASS_HEADSET 0x24
+#define UAC3_FUNCTION_SUBCLASS_HEADSET_ADAPTER 0x25
+#define UAC3_FUNCTION_SUBCLASS_SPEAKERPHONE 0x26
+
+/* A.7 AUDIO FUNCTION CATEGORY CODES */
+#define UAC3_FUNCTION_SUBCLASS_UNDEFINED 0x00
+#define UAC3_FUNCTION_DESKTOP_SPEAKER 0x01
+#define UAC3_FUNCTION_HOME_THEATER 0x02
+#define UAC3_FUNCTION_MICROPHONE 0x03
+#define UAC3_FUNCTION_HEADSET 0x04
+#define UAC3_FUNCTION_TELEPHONE 0x05
+#define UAC3_FUNCTION_CONVERTER 0x06
+#define UAC3_FUNCTION_SOUND_RECORDER 0x07
+#define UAC3_FUNCTION_IO_BOX 0x08
+#define UAC3_FUNCTION_MUSICAL_INSTRUMENT 0x09
+#define UAC3_FUNCTION_PRO_AUDIO 0x0a
+#define UAC3_FUNCTION_AUDIO_VIDEO 0x0b
+#define UAC3_FUNCTION_CONTROL_PANEL 0x0c
+#define UAC3_FUNCTION_HEADPHONE 0x0d
+#define UAC3_FUNCTION_GENERIC_SPEAKER 0x0e
+#define UAC3_FUNCTION_HEADSET_ADAPTER 0x0f
+#define UAC3_FUNCTION_SPEAKERPHONE 0x10
+#define UAC3_FUNCTION_OTHER 0xff
+
+/* A.8 AUDIO CLASS-SPECIFIC DESCRIPTOR TYPES */
+#define UAC3_CS_UNDEFINED 0x20
+#define UAC3_CS_DEVICE 0x21
+#define UAC3_CS_CONFIGURATION 0x22
+#define UAC3_CS_STRING 0x23
+#define UAC3_CS_INTERFACE 0x24
+#define UAC3_CS_ENDPOINT 0x25
+#define UAC3_CS_CLUSTER 0x26
+
+/* A.10 CLUSTER DESCRIPTOR SEGMENT TYPES */
+#define UAC3_SEGMENT_UNDEFINED 0x00
+#define UAC3_CLUSTER_DESCRIPTION 0x01
+#define UAC3_CLUSTER_VENDOR_DEFINED 0x1F
+#define UAC3_CHANNEL_INFORMATION 0x20
+#define UAC3_CHANNEL_AMBISONIC 0x21
+#define UAC3_CHANNEL_DESCRIPTION 0x22
+#define UAC3_CHANNEL_VENDOR_DEFINED 0xFE
+#define UAC3_END_SEGMENT 0xFF
+
+/* A.11 CHANNEL PURPOSE DEFINITIONS */
+#define UAC3_PURPOSE_UNDEFINED 0x00
+#define UAC3_PURPOSE_GENERIC_AUDIO 0x01
+#define UAC3_PURPOSE_VOICE 0x02
+#define UAC3_PURPOSE_SPEECH 0x03
+#define UAC3_PURPOSE_AMBIENT 0x04
+#define UAC3_PURPOSE_REFERENCE 0x05
+#define UAC3_PURPOSE_ULTRASONIC 0x06
+#define UAC3_PURPOSE_VIBROKINETIC 0x07
+#define UAC3_PURPOSE_NON_AUDIO 0xFF
+
+/* A.12 CHANNEL RELATIONSHIP DEFINITIONS */
+#define UAC3_CH_RELATIONSHIP_UNDEFINED 0x00
+#define UAC3_CH_MONO 0x01
+#define UAC3_CH_LEFT 0x02
+#define UAC3_CH_RIGHT 0x03
+#define UAC3_CH_ARRAY 0x04
+#define UAC3_CH_PATTERN_X 0x20
+#define UAC3_CH_PATTERN_Y 0x21
+#define UAC3_CH_PATTERN_A 0x22
+#define UAC3_CH_PATTERN_B 0x23
+#define UAC3_CH_PATTERN_M 0x24
+#define UAC3_CH_PATTERN_S 0x25
+#define UAC3_CH_FRONT_LEFT 0x80
+#define UAC3_CH_FRONT_RIGHT 0x81
+#define UAC3_CH_FRONT_CENTER 0x82
+#define UAC3_CH_FRONT_LEFT_OF_CENTER 0x83
+#define UAC3_CH_FRONT_RIGHT_OF_CENTER 0x84
+#define UAC3_CH_FRONT_WIDE_LEFT 0x85
+#define UAC3_CH_FRONT_WIDE_RIGHT 0x86
+#define UAC3_CH_SIDE_LEFT 0x87
+#define UAC3_CH_SIDE_RIGHT 0x88
+#define UAC3_CH_SURROUND_ARRAY_LEFT 0x89
+#define UAC3_CH_SURROUND_ARRAY_RIGHT 0x8A
+#define UAC3_CH_BACK_LEFT 0x8B
+#define UAC3_CH_BACK_RIGHT 0x8C
+#define UAC3_CH_BACK_CENTER 0x8D
+#define UAC3_CH_BACK_LEFT_OF_CENTER 0x8E
+#define UAC3_CH_BACK_RIGHT_OF_CENTER 0x8F
+#define UAC3_CH_BACK_WIDE_LEFT 0x90
+#define UAC3_CH_BACK_WIDE_RIGHT 0x91
+#define UAC3_CH_TOP_CENTER 0x92
+#define UAC3_CH_TOP_FRONT_LEFT 0x93
+#define UAC3_CH_TOP_FRONT_RIGHT 0x94
+#define UAC3_CH_TOP_FRONT_CENTER 0x95
+#define UAC3_CH_TOP_FRONT_LOC 0x96
+#define UAC3_CH_TOP_FRONT_ROC 0x97
+#define UAC3_CH_TOP_FRONT_WIDE_LEFT 0x98
+#define UAC3_CH_TOP_FRONT_WIDE_RIGHT 0x99
+#define UAC3_CH_TOP_SIDE_LEFT 0x9A
+#define UAC3_CH_TOP_SIDE_RIGHT 0x9B
+#define UAC3_CH_TOP_SURR_ARRAY_LEFT 0x9C
+#define UAC3_CH_TOP_SURR_ARRAY_RIGHT 0x9D
+#define UAC3_CH_TOP_BACK_LEFT 0x9E
+#define UAC3_CH_TOP_BACK_RIGHT 0x9F
+#define UAC3_CH_TOP_BACK_CENTER 0xA0
+#define UAC3_CH_TOP_BACK_LOC 0xA1
+#define UAC3_CH_TOP_BACK_ROC 0xA2
+#define UAC3_CH_TOP_BACK_WIDE_LEFT 0xA3
+#define UAC3_CH_TOP_BACK_WIDE_RIGHT 0xA4
+#define UAC3_CH_BOTTOM_CENTER 0xA5
+#define UAC3_CH_BOTTOM_FRONT_LEFT 0xA6
+#define UAC3_CH_BOTTOM_FRONT_RIGHT 0xA7
+#define UAC3_CH_BOTTOM_FRONT_CENTER 0xA8
+#define UAC3_CH_BOTTOM_FRONT_LOC 0xA9
+#define UAC3_CH_BOTTOM_FRONT_ROC 0xAA
+#define UAC3_CH_BOTTOM_FRONT_WIDE_LEFT 0xAB
+#define UAC3_CH_BOTTOM_FRONT_WIDE_RIGHT 0xAC
+#define UAC3_CH_BOTTOM_SIDE_LEFT 0xAD
+#define UAC3_CH_BOTTOM_SIDE_RIGHT 0xAE
+#define UAC3_CH_BOTTOM_SURR_ARRAY_LEFT 0xAF
+#define UAC3_CH_BOTTOM_SURR_ARRAY_RIGHT 0xB0
+#define UAC3_CH_BOTTOM_BACK_LEFT 0xB1
+#define UAC3_CH_BOTTOM_BACK_RIGHT 0xB2
+#define UAC3_CH_BOTTOM_BACK_CENTER 0xB3
+#define UAC3_CH_BOTTOM_BACK_LOC 0xB4
+#define UAC3_CH_BOTTOM_BACK_ROC 0xB5
+#define UAC3_CH_BOTTOM_BACK_WIDE_LEFT 0xB6
+#define UAC3_CH_BOTTOM_BACK_WIDE_RIGHT 0xB7
+#define UAC3_CH_LOW_FREQUENCY_EFFECTS 0xB8
+#define UAC3_CH_LFE_LEFT 0xB9
+#define UAC3_CH_LFE_RIGHT 0xBA
+#define UAC3_CH_HEADPHONE_LEFT 0xBB
+#define UAC3_CH_HEADPHONE_RIGHT 0xBC
+
+/* A.15 AUDIO CLASS-SPECIFIC AC INTERFACE DESCRIPTOR SUBTYPES */
+/* see audio.h for the rest, which is identical to v1 */
+#define UAC3_EXTENDED_TERMINAL 0x04
+#define UAC3_MIXER_UNIT 0x05
+#define UAC3_SELECTOR_UNIT 0x06
+#define UAC3_FEATURE_UNIT 0x07
+#define UAC3_EFFECT_UNIT 0x08
+#define UAC3_PROCESSING_UNIT 0x09
+#define UAC3_EXTENSION_UNIT 0x0a
+#define UAC3_CLOCK_SOURCE 0x0b
+#define UAC3_CLOCK_SELECTOR 0x0c
+#define UAC3_CLOCK_MULTIPLIER 0x0d
+#define UAC3_SAMPLE_RATE_CONVERTER 0x0e
+#define UAC3_CONNECTORS 0x0f
+#define UAC3_POWER_DOMAIN 0x10
+
+/* A.22 AUDIO CLASS-SPECIFIC REQUEST CODES */
+/* see audio-v2.h for the rest, which is identical to v2 */
+#define UAC3_CS_REQ_INTEN 0x04
+#define UAC3_CS_REQ_STRING 0x05
+#define UAC3_CS_REQ_HIGH_CAPABILITY_DESCRIPTOR 0x06
+
+/* A.23.1 AUDIOCONTROL INTERFACE CONTROL SELECTORS */
+#define UAC3_AC_CONTROL_UNDEFINED 0x00
+#define UAC3_AC_ACTIVE_INTERFACE_CONTROL 0x01
+#define UAC3_AC_POWER_DOMAIN_CONTROL 0x02
+
+#endif /* __LINUX_USB_AUDIO_V3_H */
diff --git a/include/linux/usb/composite.h b/include/linux/usb/composite.h
index cef0e44601f8..4b6b9283fa7b 100644
--- a/include/linux/usb/composite.h
+++ b/include/linux/usb/composite.h
@@ -54,6 +54,9 @@
/* big enough to hold our biggest descriptor */
#define USB_COMP_EP0_BUFSIZ 1024
+/* OS feature descriptor length <= 4kB */
+#define USB_COMP_EP0_OS_DESC_BUFSIZ 4096
+
#define USB_MS_TO_HS_INTERVAL(x) (ilog2((x * 1000 / 125)) + 1)
struct usb_configuration;
diff --git a/include/linux/usb/gadget.h b/include/linux/usb/gadget.h
index 0142f3af0da6..847f423ad9b3 100644
--- a/include/linux/usb/gadget.h
+++ b/include/linux/usb/gadget.h
@@ -129,6 +129,7 @@ struct usb_ep_ops {
int (*enable) (struct usb_ep *ep,
const struct usb_endpoint_descriptor *desc);
int (*disable) (struct usb_ep *ep);
+ void (*dispose) (struct usb_ep *ep);
struct usb_request *(*alloc_request) (struct usb_ep *ep,
gfp_t gfp_flags);
@@ -330,6 +331,7 @@ struct usb_gadget_ops {
* @name: Identifies the controller hardware type. Used in diagnostics
* and sometimes configuration.
* @dev: Driver model state for this abstract device.
+ * @isoch_delay: value from Set Isoch Delay request. Only valid on SS/SSP
* @out_epnum: last used out ep number
* @in_epnum: last used in ep number
* @mA: last set mA value
@@ -394,6 +396,7 @@ struct usb_gadget {
enum usb_device_state state;
const char *name;
struct device dev;
+ unsigned isoch_delay;
unsigned out_epnum;
unsigned in_epnum;
unsigned mA;
@@ -803,6 +806,7 @@ int usb_otg_descriptor_init(struct usb_gadget *gadget,
/* utility to simplify map/unmap of usb_requests to/from DMA */
+#ifdef CONFIG_HAS_DMA
extern int usb_gadget_map_request_by_dev(struct device *dev,
struct usb_request *req, int is_in);
extern int usb_gadget_map_request(struct usb_gadget *gadget,
@@ -812,6 +816,17 @@ extern void usb_gadget_unmap_request_by_dev(struct device *dev,
struct usb_request *req, int is_in);
extern void usb_gadget_unmap_request(struct usb_gadget *gadget,
struct usb_request *req, int is_in);
+#else /* !CONFIG_HAS_DMA */
+static inline int usb_gadget_map_request_by_dev(struct device *dev,
+ struct usb_request *req, int is_in) { return -ENOSYS; }
+static inline int usb_gadget_map_request(struct usb_gadget *gadget,
+ struct usb_request *req, int is_in) { return -ENOSYS; }
+
+static inline void usb_gadget_unmap_request_by_dev(struct device *dev,
+ struct usb_request *req, int is_in) { }
+static inline void usb_gadget_unmap_request(struct usb_gadget *gadget,
+ struct usb_request *req, int is_in) { }
+#endif /* !CONFIG_HAS_DMA */
/*-------------------------------------------------------------------------*/
diff --git a/include/linux/usb/hcd.h b/include/linux/usb/hcd.h
index 176900528822..aef50cb2ed1b 100644
--- a/include/linux/usb/hcd.h
+++ b/include/linux/usb/hcd.h
@@ -103,7 +103,7 @@ struct usb_hcd {
* other external phys should be software-transparent
*/
struct usb_phy *usb_phy;
- struct phy *phy;
+ struct usb_phy_roothub *phy_roothub;
/* Flags that need to be manipulated atomically because they can
* change while the host controller is running. Always use
@@ -151,6 +151,12 @@ struct usb_hcd {
unsigned msix_enabled:1; /* driver has MSI-X enabled? */
unsigned msi_enabled:1; /* driver has MSI enabled? */
unsigned remove_phy:1; /* auto-remove USB phy */
+ /*
+ * do not manage the PHY state in the HCD core, instead let the driver
+ * handle this (for example if the PHY can only be turned on after a
+ * specific event)
+ */
+ unsigned skip_phy_initialization:1;
/* The next flag is a stopgap, to be removed when all the HCDs
* support the new root-hub polling mechanism. */
diff --git a/include/linux/usb/musb.h b/include/linux/usb/musb.h
index 5d19e6730475..9eb908a98033 100644
--- a/include/linux/usb/musb.h
+++ b/include/linux/usb/musb.h
@@ -89,13 +89,6 @@ struct musb_hdrc_config {
u8 ram_bits; /* ram address size */
struct musb_hdrc_eps_bits *eps_bits __deprecated;
-#ifdef CONFIG_BLACKFIN
- /* A GPIO controlling VRSEL in Blackfin */
- unsigned int gpio_vrsel;
- unsigned int gpio_vrsel_active;
- /* musb CLKIN in Blackfin in MHZ */
- unsigned char clkin;
-#endif
u32 maximum_speed;
};
diff --git a/include/linux/usb/of.h b/include/linux/usb/of.h
index 6cbe7a5c2b57..dba55ccb9b53 100644
--- a/include/linux/usb/of.h
+++ b/include/linux/usb/of.h
@@ -12,13 +12,17 @@
#include <linux/usb/otg.h>
#include <linux/usb/phy.h>
+struct usb_device;
+
#if IS_ENABLED(CONFIG_OF)
enum usb_dr_mode of_usb_get_dr_mode_by_phy(struct device_node *np, int arg0);
bool of_usb_host_tpl_support(struct device_node *np);
int of_usb_update_otg_caps(struct device_node *np,
struct usb_otg_caps *otg_caps);
-struct device_node *usb_of_get_child_node(struct device_node *parent,
- int portnum);
+struct device_node *usb_of_get_device_node(struct usb_device *hub, int port1);
+bool usb_of_has_combined_node(struct usb_device *udev);
+struct device_node *usb_of_get_interface_node(struct usb_device *udev,
+ u8 config, u8 ifnum);
struct device *usb_of_get_companion_dev(struct device *dev);
#else
static inline enum usb_dr_mode
@@ -35,8 +39,17 @@ static inline int of_usb_update_otg_caps(struct device_node *np,
{
return 0;
}
-static inline struct device_node *usb_of_get_child_node
- (struct device_node *parent, int portnum)
+static inline struct device_node *
+usb_of_get_device_node(struct usb_device *hub, int port1)
+{
+ return NULL;
+}
+static inline bool usb_of_has_combined_node(struct usb_device *udev)
+{
+ return false;
+}
+static inline struct device_node *
+usb_of_get_interface_node(struct usb_device *udev, u8 config, u8 ifnum)
{
return NULL;
}
diff --git a/include/linux/usb/pd.h b/include/linux/usb/pd.h
index e00051ced806..ff359bdfdc7b 100644
--- a/include/linux/usb/pd.h
+++ b/include/linux/usb/pd.h
@@ -35,6 +35,13 @@ enum pd_ctrl_msg_type {
PD_CTRL_WAIT = 12,
PD_CTRL_SOFT_RESET = 13,
/* 14-15 Reserved */
+ PD_CTRL_NOT_SUPP = 16,
+ PD_CTRL_GET_SOURCE_CAP_EXT = 17,
+ PD_CTRL_GET_STATUS = 18,
+ PD_CTRL_FR_SWAP = 19,
+ PD_CTRL_GET_PPS_STATUS = 20,
+ PD_CTRL_GET_COUNTRY_CODES = 21,
+ /* 22-31 Reserved */
};
enum pd_data_msg_type {
@@ -43,13 +50,39 @@ enum pd_data_msg_type {
PD_DATA_REQUEST = 2,
PD_DATA_BIST = 3,
PD_DATA_SINK_CAP = 4,
- /* 5-14 Reserved */
+ PD_DATA_BATT_STATUS = 5,
+ PD_DATA_ALERT = 6,
+ PD_DATA_GET_COUNTRY_INFO = 7,
+ /* 8-14 Reserved */
PD_DATA_VENDOR_DEF = 15,
+ /* 16-31 Reserved */
+};
+
+enum pd_ext_msg_type {
+ /* 0 Reserved */
+ PD_EXT_SOURCE_CAP_EXT = 1,
+ PD_EXT_STATUS = 2,
+ PD_EXT_GET_BATT_CAP = 3,
+ PD_EXT_GET_BATT_STATUS = 4,
+ PD_EXT_BATT_CAP = 5,
+ PD_EXT_GET_MANUFACTURER_INFO = 6,
+ PD_EXT_MANUFACTURER_INFO = 7,
+ PD_EXT_SECURITY_REQUEST = 8,
+ PD_EXT_SECURITY_RESPONSE = 9,
+ PD_EXT_FW_UPDATE_REQUEST = 10,
+ PD_EXT_FW_UPDATE_RESPONSE = 11,
+ PD_EXT_PPS_STATUS = 12,
+ PD_EXT_COUNTRY_INFO = 13,
+ PD_EXT_COUNTRY_CODES = 14,
+ /* 15-31 Reserved */
};
#define PD_REV10 0x0
#define PD_REV20 0x1
+#define PD_REV30 0x2
+#define PD_MAX_REV PD_REV30
+#define PD_HEADER_EXT_HDR BIT(15)
#define PD_HEADER_CNT_SHIFT 12
#define PD_HEADER_CNT_MASK 0x7
#define PD_HEADER_ID_SHIFT 9
@@ -59,18 +92,19 @@ enum pd_data_msg_type {
#define PD_HEADER_REV_MASK 0x3
#define PD_HEADER_DATA_ROLE BIT(5)
#define PD_HEADER_TYPE_SHIFT 0
-#define PD_HEADER_TYPE_MASK 0xf
+#define PD_HEADER_TYPE_MASK 0x1f
-#define PD_HEADER(type, pwr, data, id, cnt) \
+#define PD_HEADER(type, pwr, data, rev, id, cnt, ext_hdr) \
((((type) & PD_HEADER_TYPE_MASK) << PD_HEADER_TYPE_SHIFT) | \
((pwr) == TYPEC_SOURCE ? PD_HEADER_PWR_ROLE : 0) | \
((data) == TYPEC_HOST ? PD_HEADER_DATA_ROLE : 0) | \
- (PD_REV20 << PD_HEADER_REV_SHIFT) | \
+ (rev << PD_HEADER_REV_SHIFT) | \
(((id) & PD_HEADER_ID_MASK) << PD_HEADER_ID_SHIFT) | \
- (((cnt) & PD_HEADER_CNT_MASK) << PD_HEADER_CNT_SHIFT))
+ (((cnt) & PD_HEADER_CNT_MASK) << PD_HEADER_CNT_SHIFT) | \
+ ((ext_hdr) ? PD_HEADER_EXT_HDR : 0))
#define PD_HEADER_LE(type, pwr, data, id, cnt) \
- cpu_to_le16(PD_HEADER((type), (pwr), (data), (id), (cnt)))
+ cpu_to_le16(PD_HEADER((type), (pwr), (data), PD_REV20, (id), (cnt), (0)))
static inline unsigned int pd_header_cnt(u16 header)
{
@@ -102,16 +136,75 @@ static inline unsigned int pd_header_msgid_le(__le16 header)
return pd_header_msgid(le16_to_cpu(header));
}
+static inline unsigned int pd_header_rev(u16 header)
+{
+ return (header >> PD_HEADER_REV_SHIFT) & PD_HEADER_REV_MASK;
+}
+
+static inline unsigned int pd_header_rev_le(__le16 header)
+{
+ return pd_header_rev(le16_to_cpu(header));
+}
+
+#define PD_EXT_HDR_CHUNKED BIT(15)
+#define PD_EXT_HDR_CHUNK_NUM_SHIFT 11
+#define PD_EXT_HDR_CHUNK_NUM_MASK 0xf
+#define PD_EXT_HDR_REQ_CHUNK BIT(10)
+#define PD_EXT_HDR_DATA_SIZE_SHIFT 0
+#define PD_EXT_HDR_DATA_SIZE_MASK 0x1ff
+
+#define PD_EXT_HDR(data_size, req_chunk, chunk_num, chunked) \
+ ((((data_size) & PD_EXT_HDR_DATA_SIZE_MASK) << PD_EXT_HDR_DATA_SIZE_SHIFT) | \
+ ((req_chunk) ? PD_EXT_HDR_REQ_CHUNK : 0) | \
+ (((chunk_num) & PD_EXT_HDR_CHUNK_NUM_MASK) << PD_EXT_HDR_CHUNK_NUM_SHIFT) | \
+ ((chunked) ? PD_EXT_HDR_CHUNKED : 0))
+
+#define PD_EXT_HDR_LE(data_size, req_chunk, chunk_num, chunked) \
+ cpu_to_le16(PD_EXT_HDR((data_size), (req_chunk), (chunk_num), (chunked)))
+
+static inline unsigned int pd_ext_header_chunk_num(u16 ext_header)
+{
+ return (ext_header >> PD_EXT_HDR_CHUNK_NUM_SHIFT) &
+ PD_EXT_HDR_CHUNK_NUM_MASK;
+}
+
+static inline unsigned int pd_ext_header_data_size(u16 ext_header)
+{
+ return (ext_header >> PD_EXT_HDR_DATA_SIZE_SHIFT) &
+ PD_EXT_HDR_DATA_SIZE_MASK;
+}
+
+static inline unsigned int pd_ext_header_data_size_le(__le16 ext_header)
+{
+ return pd_ext_header_data_size(le16_to_cpu(ext_header));
+}
+
#define PD_MAX_PAYLOAD 7
+#define PD_EXT_MAX_CHUNK_DATA 26
/**
- * struct pd_message - PD message as seen on wire
- * @header: PD message header
- * @payload: PD message payload
- */
+ * struct pd_chunked_ext_message_data - PD chunked extended message data as
+ * seen on wire
+ * @header: PD extended message header
+ * @data: PD extended message data
+ */
+struct pd_chunked_ext_message_data {
+ __le16 header;
+ u8 data[PD_EXT_MAX_CHUNK_DATA];
+} __packed;
+
+/**
+ * struct pd_message - PD message as seen on wire
+ * @header: PD message header
+ * @payload: PD message payload
+ * @ext_msg: PD message chunked extended message data
+ */
struct pd_message {
__le16 header;
- __le32 payload[PD_MAX_PAYLOAD];
+ union {
+ __le32 payload[PD_MAX_PAYLOAD];
+ struct pd_chunked_ext_message_data ext_msg;
+ };
} __packed;
/* PDO: Power Data Object */
@@ -121,6 +214,7 @@ enum pd_pdo_type {
PDO_TYPE_FIXED = 0,
PDO_TYPE_BATT = 1,
PDO_TYPE_VAR = 2,
+ PDO_TYPE_APDO = 3,
};
#define PDO_TYPE_SHIFT 30
@@ -148,6 +242,8 @@ enum pd_pdo_type {
(PDO_TYPE(PDO_TYPE_FIXED) | (flags) | \
PDO_FIXED_VOLT(mv) | PDO_FIXED_CURR(ma))
+#define VSAFE5V 5000 /* mv units */
+
#define PDO_BATT_MAX_VOLT_SHIFT 20 /* 50mV units */
#define PDO_BATT_MIN_VOLT_SHIFT 10 /* 50mV units */
#define PDO_BATT_MAX_PWR_SHIFT 0 /* 250mW units */
@@ -172,6 +268,34 @@ enum pd_pdo_type {
(PDO_TYPE(PDO_TYPE_VAR) | PDO_VAR_MIN_VOLT(min_mv) | \
PDO_VAR_MAX_VOLT(max_mv) | PDO_VAR_MAX_CURR(max_ma))
+enum pd_apdo_type {
+ APDO_TYPE_PPS = 0,
+};
+
+#define PDO_APDO_TYPE_SHIFT 28 /* Only valid value currently is 0x0 - PPS */
+#define PDO_APDO_TYPE_MASK 0x3
+
+#define PDO_APDO_TYPE(t) ((t) << PDO_APDO_TYPE_SHIFT)
+
+#define PDO_PPS_APDO_MAX_VOLT_SHIFT 17 /* 100mV units */
+#define PDO_PPS_APDO_MIN_VOLT_SHIFT 8 /* 100mV units */
+#define PDO_PPS_APDO_MAX_CURR_SHIFT 0 /* 50mA units */
+
+#define PDO_PPS_APDO_VOLT_MASK 0xff
+#define PDO_PPS_APDO_CURR_MASK 0x7f
+
+#define PDO_PPS_APDO_MIN_VOLT(mv) \
+ ((((mv) / 100) & PDO_PPS_APDO_VOLT_MASK) << PDO_PPS_APDO_MIN_VOLT_SHIFT)
+#define PDO_PPS_APDO_MAX_VOLT(mv) \
+ ((((mv) / 100) & PDO_PPS_APDO_VOLT_MASK) << PDO_PPS_APDO_MAX_VOLT_SHIFT)
+#define PDO_PPS_APDO_MAX_CURR(ma) \
+ ((((ma) / 50) & PDO_PPS_APDO_CURR_MASK) << PDO_PPS_APDO_MAX_CURR_SHIFT)
+
+#define PDO_PPS_APDO(min_mv, max_mv, max_ma) \
+ (PDO_TYPE(PDO_TYPE_APDO) | PDO_APDO_TYPE(APDO_TYPE_PPS) | \
+ PDO_PPS_APDO_MIN_VOLT(min_mv) | PDO_PPS_APDO_MAX_VOLT(max_mv) | \
+ PDO_PPS_APDO_MAX_CURR(max_ma))
+
static inline enum pd_pdo_type pdo_type(u32 pdo)
{
return (pdo >> PDO_TYPE_SHIFT) & PDO_TYPE_MASK;
@@ -202,6 +326,29 @@ static inline unsigned int pdo_max_power(u32 pdo)
return ((pdo >> PDO_BATT_MAX_PWR_SHIFT) & PDO_PWR_MASK) * 250;
}
+static inline enum pd_apdo_type pdo_apdo_type(u32 pdo)
+{
+ return (pdo >> PDO_APDO_TYPE_SHIFT) & PDO_APDO_TYPE_MASK;
+}
+
+static inline unsigned int pdo_pps_apdo_min_voltage(u32 pdo)
+{
+ return ((pdo >> PDO_PPS_APDO_MIN_VOLT_SHIFT) &
+ PDO_PPS_APDO_VOLT_MASK) * 100;
+}
+
+static inline unsigned int pdo_pps_apdo_max_voltage(u32 pdo)
+{
+ return ((pdo >> PDO_PPS_APDO_MAX_VOLT_SHIFT) &
+ PDO_PPS_APDO_VOLT_MASK) * 100;
+}
+
+static inline unsigned int pdo_pps_apdo_max_current(u32 pdo)
+{
+ return ((pdo >> PDO_PPS_APDO_MAX_CURR_SHIFT) &
+ PDO_PPS_APDO_CURR_MASK) * 50;
+}
+
/* RDO: Request Data Object */
#define RDO_OBJ_POS_SHIFT 28
#define RDO_OBJ_POS_MASK 0x7
@@ -235,6 +382,24 @@ static inline unsigned int pdo_max_power(u32 pdo)
(RDO_OBJ(idx) | (flags) | \
RDO_BATT_OP_PWR(op_mw) | RDO_BATT_MAX_PWR(max_mw))
+#define RDO_PROG_VOLT_MASK 0x7ff
+#define RDO_PROG_CURR_MASK 0x7f
+
+#define RDO_PROG_VOLT_SHIFT 9
+#define RDO_PROG_CURR_SHIFT 0
+
+#define RDO_PROG_VOLT_MV_STEP 20
+#define RDO_PROG_CURR_MA_STEP 50
+
+#define PDO_PROG_OUT_VOLT(mv) \
+ ((((mv) / RDO_PROG_VOLT_MV_STEP) & RDO_PROG_VOLT_MASK) << RDO_PROG_VOLT_SHIFT)
+#define PDO_PROG_OP_CURR(ma) \
+ ((((ma) / RDO_PROG_CURR_MA_STEP) & RDO_PROG_CURR_MASK) << RDO_PROG_CURR_SHIFT)
+
+#define RDO_PROG(idx, out_mv, op_ma, flags) \
+ (RDO_OBJ(idx) | (flags) | \
+ PDO_PROG_OUT_VOLT(out_mv) | PDO_PROG_OP_CURR(op_ma))
+
static inline unsigned int rdo_index(u32 rdo)
{
return (rdo >> RDO_OBJ_POS_SHIFT) & RDO_OBJ_POS_MASK;
diff --git a/include/linux/usb/pd_ado.h b/include/linux/usb/pd_ado.h
new file mode 100644
index 000000000000..9aa1cf31c93c
--- /dev/null
+++ b/include/linux/usb/pd_ado.h
@@ -0,0 +1,42 @@
+// SPDX-License-Identifier: GPL-2.0+
+/*
+ * Copyright (c) 2017 Dialog Semiconductor
+ *
+ * Author: Adam Thomson <Adam.Thomson.Opensource@diasemi.com>
+ */
+
+#ifndef __LINUX_USB_PD_ADO_H
+#define __LINUX_USB_PD_ADO_H
+
+/* ADO : Alert Data Object */
+#define USB_PD_ADO_TYPE_SHIFT 24
+#define USB_PD_ADO_TYPE_MASK 0xff
+#define USB_PD_ADO_FIXED_BATT_SHIFT 20
+#define USB_PD_ADO_FIXED_BATT_MASK 0xf
+#define USB_PD_ADO_HOT_SWAP_BATT_SHIFT 16
+#define USB_PD_ADO_HOT_SWAP_BATT_MASK 0xf
+
+#define USB_PD_ADO_TYPE_BATT_STATUS_CHANGE BIT(1)
+#define USB_PD_ADO_TYPE_OCP BIT(2)
+#define USB_PD_ADO_TYPE_OTP BIT(3)
+#define USB_PD_ADO_TYPE_OP_COND_CHANGE BIT(4)
+#define USB_PD_ADO_TYPE_SRC_INPUT_CHANGE BIT(5)
+#define USB_PD_ADO_TYPE_OVP BIT(6)
+
+static inline unsigned int usb_pd_ado_type(u32 ado)
+{
+ return (ado >> USB_PD_ADO_TYPE_SHIFT) & USB_PD_ADO_TYPE_MASK;
+}
+
+static inline unsigned int usb_pd_ado_fixed_batt(u32 ado)
+{
+ return (ado >> USB_PD_ADO_FIXED_BATT_SHIFT) &
+ USB_PD_ADO_FIXED_BATT_MASK;
+}
+
+static inline unsigned int usb_pd_ado_hot_swap_batt(u32 ado)
+{
+ return (ado >> USB_PD_ADO_HOT_SWAP_BATT_SHIFT) &
+ USB_PD_ADO_HOT_SWAP_BATT_MASK;
+}
+#endif /* __LINUX_USB_PD_ADO_H */
diff --git a/include/linux/usb/pd_ext_sdb.h b/include/linux/usb/pd_ext_sdb.h
new file mode 100644
index 000000000000..0eb83ce19597
--- /dev/null
+++ b/include/linux/usb/pd_ext_sdb.h
@@ -0,0 +1,31 @@
+// SPDX-License-Identifier: GPL-2.0+
+/*
+ * Copyright (c) 2017 Dialog Semiconductor
+ *
+ * Author: Adam Thomson <Adam.Thomson.Opensource@diasemi.com>
+ */
+
+#ifndef __LINUX_USB_PD_EXT_SDB_H
+#define __LINUX_USB_PD_EXT_SDB_H
+
+/* SDB : Status Data Block */
+enum usb_pd_ext_sdb_fields {
+ USB_PD_EXT_SDB_INTERNAL_TEMP = 0,
+ USB_PD_EXT_SDB_PRESENT_INPUT,
+ USB_PD_EXT_SDB_PRESENT_BATT_INPUT,
+ USB_PD_EXT_SDB_EVENT_FLAGS,
+ USB_PD_EXT_SDB_TEMP_STATUS,
+ USB_PD_EXT_SDB_DATA_SIZE,
+};
+
+/* Event Flags */
+#define USB_PD_EXT_SDB_EVENT_OCP BIT(1)
+#define USB_PD_EXT_SDB_EVENT_OTP BIT(2)
+#define USB_PD_EXT_SDB_EVENT_OVP BIT(3)
+#define USB_PD_EXT_SDB_EVENT_CF_CV_MODE BIT(4)
+
+#define USB_PD_EXT_SDB_PPS_EVENTS (USB_PD_EXT_SDB_EVENT_OCP | \
+ USB_PD_EXT_SDB_EVENT_OTP | \
+ USB_PD_EXT_SDB_EVENT_OVP)
+
+#endif /* __LINUX_USB_PD_EXT_SDB_H */
diff --git a/include/linux/usb/pd_vdo.h b/include/linux/usb/pd_vdo.h
index d92259f8de0a..2b64d23ace5c 100644
--- a/include/linux/usb/pd_vdo.h
+++ b/include/linux/usb/pd_vdo.h
@@ -65,7 +65,7 @@
#define CMD_EXIT_MODE 5
#define CMD_ATTENTION 6
-#define VDO_CMD_VENDOR(x) (((10 + (x)) & 0x1f))
+#define VDO_CMD_VENDOR(x) (((0x10 + (x)) & 0x1f))
/* ChromeOS specific commands */
#define VDO_CMD_VERSION VDO_CMD_VENDOR(0)
diff --git a/include/linux/usb/quirks.h b/include/linux/usb/quirks.h
index f1fcec2fd5f8..b7a99ce56bc9 100644
--- a/include/linux/usb/quirks.h
+++ b/include/linux/usb/quirks.h
@@ -63,4 +63,7 @@
*/
#define USB_QUIRK_DISCONNECT_SUSPEND BIT(12)
+/* Device needs a pause after every control message. */
+#define USB_QUIRK_DELAY_CTRL_MSG BIT(13)
+
#endif /* __LINUX_USB_QUIRKS_H */
diff --git a/include/linux/usb/renesas_usbhs.h b/include/linux/usb/renesas_usbhs.h
index 67102f3d59d4..53924f8e840c 100644
--- a/include/linux/usb/renesas_usbhs.h
+++ b/include/linux/usb/renesas_usbhs.h
@@ -17,6 +17,7 @@
*/
#ifndef RENESAS_USB_H
#define RENESAS_USB_H
+#include <linux/notifier.h>
#include <linux/platform_device.h>
#include <linux/usb/ch9.h>
@@ -98,6 +99,13 @@ struct renesas_usbhs_platform_callback {
* VBUS control is needed for Host
*/
int (*set_vbus)(struct platform_device *pdev, int enable);
+
+ /*
+ * option:
+ * extcon notifier to set host/peripheral mode.
+ */
+ int (*notifier)(struct notifier_block *nb, unsigned long event,
+ void *data);
};
/*
@@ -187,6 +195,7 @@ struct renesas_usbhs_driver_param {
#define USBHS_TYPE_RCAR_GEN2 1
#define USBHS_TYPE_RCAR_GEN3 2
#define USBHS_TYPE_RCAR_GEN3_WITH_PLL 3
+#define USBHS_TYPE_RZA1 4
/*
* option:
diff --git a/include/linux/usb/role.h b/include/linux/usb/role.h
new file mode 100644
index 000000000000..edc51be4a77c
--- /dev/null
+++ b/include/linux/usb/role.h
@@ -0,0 +1,53 @@
+// SPDX-License-Identifier: GPL-2.0
+
+#ifndef __LINUX_USB_ROLE_H
+#define __LINUX_USB_ROLE_H
+
+#include <linux/device.h>
+
+struct usb_role_switch;
+
+enum usb_role {
+ USB_ROLE_NONE,
+ USB_ROLE_HOST,
+ USB_ROLE_DEVICE,
+};
+
+typedef int (*usb_role_switch_set_t)(struct device *dev, enum usb_role role);
+typedef enum usb_role (*usb_role_switch_get_t)(struct device *dev);
+
+/**
+ * struct usb_role_switch_desc - USB Role Switch Descriptor
+ * @usb2_port: Optional reference to the host controller port device (USB2)
+ * @usb3_port: Optional reference to the host controller port device (USB3)
+ * @udc: Optional reference to the peripheral controller device
+ * @set: Callback for setting the role
+ * @get: Callback for getting the role (optional)
+ * @allow_userspace_control: If true userspace may change the role through sysfs
+ *
+ * @usb2_port and @usb3_port will point to the USB host port and @udc to the USB
+ * device controller behind the USB connector with the role switch. If
+ * @usb2_port, @usb3_port and @udc are included in the description, the
+ * reference count for them should be incremented by the caller of
+ * usb_role_switch_register() before registering the switch.
+ */
+struct usb_role_switch_desc {
+ struct device *usb2_port;
+ struct device *usb3_port;
+ struct device *udc;
+ usb_role_switch_set_t set;
+ usb_role_switch_get_t get;
+ bool allow_userspace_control;
+};
+
+int usb_role_switch_set_role(struct usb_role_switch *sw, enum usb_role role);
+enum usb_role usb_role_switch_get_role(struct usb_role_switch *sw);
+struct usb_role_switch *usb_role_switch_get(struct device *dev);
+void usb_role_switch_put(struct usb_role_switch *sw);
+
+struct usb_role_switch *
+usb_role_switch_register(struct device *parent,
+ const struct usb_role_switch_desc *desc);
+void usb_role_switch_unregister(struct usb_role_switch *sw);
+
+#endif /* __LINUX_USB_ROLE_H */
diff --git a/include/linux/usb/tcpm.h b/include/linux/usb/tcpm.h
index 073197f0d2bb..f0d839daeaea 100644
--- a/include/linux/usb/tcpm.h
+++ b/include/linux/usb/tcpm.h
@@ -91,17 +91,13 @@ struct tcpc_config {
unsigned int operating_snk_mw;
enum typec_port_type type;
+ enum typec_port_data data;
enum typec_role default_role;
bool try_role_hw; /* try.{src,snk} implemented in hardware */
const struct typec_altmode_desc *alt_modes;
};
-enum tcpc_usb_switch {
- TCPC_USB_SWITCH_CONNECT,
- TCPC_USB_SWITCH_DISCONNECT,
-};
-
/* Mux state attributes */
#define TCPC_MUX_USB_ENABLED BIT(0) /* USB enabled */
#define TCPC_MUX_DP_ENABLED BIT(1) /* DP enabled */
@@ -116,14 +112,6 @@ enum tcpc_mux_mode {
TCPC_MUX_DP_ENABLED,
};
-struct tcpc_mux_dev {
- int (*set)(struct tcpc_mux_dev *dev, enum tcpc_mux_mode mux_mode,
- enum tcpc_usb_switch usb_config,
- enum typec_cc_polarity polarity);
- bool dfp_only;
- void *priv_data;
-};
-
/**
* struct tcpc_dev - Port configuration and callback functions
* @config: Pointer to port configuration
@@ -175,7 +163,6 @@ struct tcpc_dev {
int (*try_role)(struct tcpc_dev *dev, int role);
int (*pd_transmit)(struct tcpc_dev *dev, enum tcpm_transmit_type type,
const struct pd_message *msg);
- struct tcpc_mux_dev *mux;
};
struct tcpm_port;
@@ -183,14 +170,14 @@ struct tcpm_port;
struct tcpm_port *tcpm_register_port(struct device *dev, struct tcpc_dev *tcpc);
void tcpm_unregister_port(struct tcpm_port *port);
-void tcpm_update_source_capabilities(struct tcpm_port *port, const u32 *pdo,
- unsigned int nr_pdo);
-void tcpm_update_sink_capabilities(struct tcpm_port *port, const u32 *pdo,
- unsigned int nr_pdo,
- unsigned int max_snk_mv,
- unsigned int max_snk_ma,
- unsigned int max_snk_mw,
- unsigned int operating_snk_mw);
+int tcpm_update_source_capabilities(struct tcpm_port *port, const u32 *pdo,
+ unsigned int nr_pdo);
+int tcpm_update_sink_capabilities(struct tcpm_port *port, const u32 *pdo,
+ unsigned int nr_pdo,
+ unsigned int max_snk_mv,
+ unsigned int max_snk_ma,
+ unsigned int max_snk_mw,
+ unsigned int operating_snk_mw);
void tcpm_vbus_change(struct tcpm_port *port);
void tcpm_cc_change(struct tcpm_port *port);
diff --git a/include/linux/usb/tilegx.h b/include/linux/usb/tilegx.h
deleted file mode 100644
index 817908573fe8..000000000000
--- a/include/linux/usb/tilegx.h
+++ /dev/null
@@ -1,35 +0,0 @@
-// SPDX-License-Identifier: GPL-2.0
-/*
- * Copyright 2012 Tilera Corporation. All Rights Reserved.
- *
- * This program is free software; you can redistribute it and/or
- * modify it under the terms of the GNU General Public License
- * as published by the Free Software Foundation, version 2.
- *
- * This program is distributed in the hope that it will be useful, but
- * WITHOUT ANY WARRANTY; without even the implied warranty of
- * MERCHANTABILITY OR FITNESS FOR A PARTICULAR PURPOSE, GOOD TITLE or
- * NON INFRINGEMENT. See the GNU General Public License for
- * more details.
- *
- * Structure to contain platform-specific data related to Tile-Gx USB
- * controllers.
- */
-
-#ifndef _LINUX_USB_TILEGX_H
-#define _LINUX_USB_TILEGX_H
-
-#include <gxio/usb_host.h>
-
-struct tilegx_usb_platform_data {
- /* GXIO device index. */
- int dev_index;
-
- /* GXIO device context. */
- gxio_usb_host_context_t usb_ctx;
-
- /* Device IRQ. */
- unsigned int irq;
-};
-
-#endif /* _LINUX_USB_TILEGX_H */
diff --git a/include/linux/usb/typec.h b/include/linux/usb/typec.h
index 0d44ce6af08f..672b39bb0adc 100644
--- a/include/linux/usb/typec.h
+++ b/include/linux/usb/typec.h
@@ -22,9 +22,15 @@ struct typec_port;
struct fwnode_handle;
enum typec_port_type {
+ TYPEC_PORT_SRC,
+ TYPEC_PORT_SNK,
+ TYPEC_PORT_DRP,
+};
+
+enum typec_port_data {
TYPEC_PORT_DFP,
TYPEC_PORT_UFP,
- TYPEC_PORT_DRP,
+ TYPEC_PORT_DRD,
};
enum typec_plug_type {
@@ -60,6 +66,12 @@ enum typec_accessory {
#define TYPEC_MAX_ACCESSORY 3
+enum typec_orientation {
+ TYPEC_ORIENTATION_NONE,
+ TYPEC_ORIENTATION_NORMAL,
+ TYPEC_ORIENTATION_REVERSE,
+};
+
/*
* struct usb_pd_identity - USB Power Delivery identity data
* @id_header: ID Header VDO
@@ -180,11 +192,14 @@ struct typec_partner_desc {
/*
* struct typec_capability - USB Type-C Port Capabilities
- * @role: DFP (Host-only), UFP (Device-only) or DRP (Dual Role)
+ * @type: Supported power role of the port
+ * @data: Supported data role of the port
* @revision: USB Type-C Specification release. Binary coded decimal
* @pd_revision: USB Power Delivery Specification revision if supported
- * @prefer_role: Initial role preference
+ * @prefer_role: Initial role preference (DRP ports).
* @accessory: Supported Accessory Modes
+ * @sw: Cable plug orientation switch
+ * @mux: Multiplexer switch for Alternate/Accessory Modes
* @fwnode: Optional fwnode of the port
* @try_role: Set data role preference for DRP port
* @dr_set: Set Data Role
@@ -197,11 +212,14 @@ struct typec_partner_desc {
*/
struct typec_capability {
enum typec_port_type type;
+ enum typec_port_data data;
u16 revision; /* 0120H = "1.2" */
u16 pd_revision; /* 0300H = "3.0" */
int prefer_role;
enum typec_accessory accessory[TYPEC_MAX_ACCESSORY];
+ struct typec_switch *sw;
+ struct typec_mux *mux;
struct fwnode_handle *fwnode;
int (*try_role)(const struct typec_capability *,
@@ -245,4 +263,8 @@ void typec_set_pwr_role(struct typec_port *port, enum typec_role role);
void typec_set_vconn_role(struct typec_port *port, enum typec_role role);
void typec_set_pwr_opmode(struct typec_port *port, enum typec_pwr_opmode mode);
+int typec_set_orientation(struct typec_port *port,
+ enum typec_orientation orientation);
+int typec_set_mode(struct typec_port *port, int mode);
+
#endif /* __LINUX_USB_TYPEC_H */
diff --git a/include/linux/usb/typec_mux.h b/include/linux/usb/typec_mux.h
new file mode 100644
index 000000000000..12c1b057834b
--- /dev/null
+++ b/include/linux/usb/typec_mux.h
@@ -0,0 +1,55 @@
+// SPDX-License-Identifier: GPL-2.0
+
+#ifndef __USB_TYPEC_MUX
+#define __USB_TYPEC_MUX
+
+#include <linux/list.h>
+#include <linux/usb/typec.h>
+
+struct device;
+
+/**
+ * struct typec_switch - USB Type-C cable orientation switch
+ * @dev: Switch device
+ * @entry: List entry
+ * @set: Callback to the driver for setting the orientation
+ *
+ * USB Type-C pin flipper switch routing the correct data pairs from the
+ * connector to the USB controller depending on the orientation of the cable
+ * plug.
+ */
+struct typec_switch {
+ struct device *dev;
+ struct list_head entry;
+
+ int (*set)(struct typec_switch *sw, enum typec_orientation orientation);
+};
+
+/**
+ * struct typec_switch - USB Type-C connector pin mux
+ * @dev: Mux device
+ * @entry: List entry
+ * @set: Callback to the driver for setting the state of the mux
+ *
+ * Pin Multiplexer/DeMultiplexer switch routing the USB Type-C connector pins to
+ * different components depending on the requested mode of operation. Used with
+ * Accessory/Alternate modes.
+ */
+struct typec_mux {
+ struct device *dev;
+ struct list_head entry;
+
+ int (*set)(struct typec_mux *mux, int state);
+};
+
+struct typec_switch *typec_switch_get(struct device *dev);
+void typec_switch_put(struct typec_switch *sw);
+int typec_switch_register(struct typec_switch *sw);
+void typec_switch_unregister(struct typec_switch *sw);
+
+struct typec_mux *typec_mux_get(struct device *dev);
+void typec_mux_put(struct typec_mux *mux);
+int typec_mux_register(struct typec_mux *mux);
+void typec_mux_unregister(struct typec_mux *mux);
+
+#endif /* __USB_TYPEC_MUX */
diff --git a/include/linux/utsname.h b/include/linux/utsname.h
index c8060c2ecd04..44429d9142ca 100644
--- a/include/linux/utsname.h
+++ b/include/linux/utsname.h
@@ -44,6 +44,8 @@ static inline void put_uts_ns(struct uts_namespace *ns)
{
kref_put(&ns->kref, free_uts_ns);
}
+
+void uts_ns_init(void);
#else
static inline void get_uts_ns(struct uts_namespace *ns)
{
@@ -61,6 +63,10 @@ static inline struct uts_namespace *copy_utsname(unsigned long flags,
return old_ns;
}
+
+static inline void uts_ns_init(void)
+{
+}
#endif
#ifdef CONFIG_PROC_SYSCTL
diff --git a/include/linux/uuid.h b/include/linux/uuid.h
index 33b0bdbb613c..d9c4a6cce3c2 100644
--- a/include/linux/uuid.h
+++ b/include/linux/uuid.h
@@ -17,6 +17,7 @@
#define _LINUX_UUID_H_
#include <uapi/linux/uuid.h>
+#include <linux/string.h>
#define UUID_SIZE 16
diff --git a/include/linux/vbox_utils.h b/include/linux/vbox_utils.h
new file mode 100644
index 000000000000..c71def6b310f
--- /dev/null
+++ b/include/linux/vbox_utils.h
@@ -0,0 +1,79 @@
+/* SPDX-License-Identifier: (GPL-2.0 OR CDDL-1.0) */
+/* Copyright (C) 2006-2016 Oracle Corporation */
+
+#ifndef __VBOX_UTILS_H__
+#define __VBOX_UTILS_H__
+
+#include <linux/printk.h>
+#include <linux/vbox_vmmdev_types.h>
+
+struct vbg_dev;
+
+/**
+ * vboxguest logging functions, these log both to the backdoor and call
+ * the equivalent kernel pr_foo function.
+ */
+__printf(1, 2) void vbg_info(const char *fmt, ...);
+__printf(1, 2) void vbg_warn(const char *fmt, ...);
+__printf(1, 2) void vbg_err(const char *fmt, ...);
+
+/* Only use backdoor logging for non-dynamic debug builds */
+#if defined(DEBUG) && !defined(CONFIG_DYNAMIC_DEBUG)
+__printf(1, 2) void vbg_debug(const char *fmt, ...);
+#else
+#define vbg_debug pr_debug
+#endif
+
+/**
+ * Allocate memory for generic request and initialize the request header.
+ *
+ * Return: the allocated memory
+ * @len: Size of memory block required for the request.
+ * @req_type: The generic request type.
+ */
+void *vbg_req_alloc(size_t len, enum vmmdev_request_type req_type);
+
+/**
+ * Perform a generic request.
+ *
+ * Return: VBox status code
+ * @gdev: The Guest extension device.
+ * @req: Pointer to the request structure.
+ */
+int vbg_req_perform(struct vbg_dev *gdev, void *req);
+
+int vbg_hgcm_connect(struct vbg_dev *gdev,
+ struct vmmdev_hgcm_service_location *loc,
+ u32 *client_id, int *vbox_status);
+
+int vbg_hgcm_disconnect(struct vbg_dev *gdev, u32 client_id, int *vbox_status);
+
+int vbg_hgcm_call(struct vbg_dev *gdev, u32 client_id, u32 function,
+ u32 timeout_ms, struct vmmdev_hgcm_function_parameter *parms,
+ u32 parm_count, int *vbox_status);
+
+int vbg_hgcm_call32(
+ struct vbg_dev *gdev, u32 client_id, u32 function, u32 timeout_ms,
+ struct vmmdev_hgcm_function_parameter32 *parm32, u32 parm_count,
+ int *vbox_status);
+
+/**
+ * Convert a VirtualBox status code to a standard Linux kernel return value.
+ * Return: 0 or negative errno value.
+ * @rc: VirtualBox status code to convert.
+ */
+int vbg_status_code_to_errno(int rc);
+
+/**
+ * Helper for the vboxsf driver to get a reference to the guest device.
+ * Return: a pointer to the gdev; or a ERR_PTR value on error.
+ */
+struct vbg_dev *vbg_get_gdev(void);
+
+/**
+ * Helper for the vboxsf driver to put a guest device reference.
+ * @gdev: Reference returned by vbg_get_gdev to put.
+ */
+void vbg_put_gdev(struct vbg_dev *gdev);
+
+#endif
diff --git a/include/linux/vfio.h b/include/linux/vfio.h
index a47b985341d1..66741ab087c1 100644
--- a/include/linux/vfio.h
+++ b/include/linux/vfio.h
@@ -145,7 +145,8 @@ extern struct vfio_info_cap_header *vfio_info_cap_add(
extern void vfio_info_cap_shift(struct vfio_info_cap *caps, size_t offset);
extern int vfio_info_add_capability(struct vfio_info_cap *caps,
- int cap_type_id, void *cap_type);
+ struct vfio_info_cap_header *cap,
+ size_t size);
extern int vfio_set_irqs_validate_and_prepare(struct vfio_irq_set *hdr,
int num_irqs, int max_irq_type,
diff --git a/include/linux/vga_switcheroo.h b/include/linux/vga_switcheroo.h
index 960bedbdec87..77f0f0af3a71 100644
--- a/include/linux/vga_switcheroo.h
+++ b/include/linux/vga_switcheroo.h
@@ -168,11 +168,8 @@ int vga_switcheroo_process_delayed_switch(void);
bool vga_switcheroo_client_probe_defer(struct pci_dev *pdev);
enum vga_switcheroo_state vga_switcheroo_get_client_state(struct pci_dev *dev);
-void vga_switcheroo_set_dynamic_switch(struct pci_dev *pdev, enum vga_switcheroo_state dynamic);
-
int vga_switcheroo_init_domain_pm_ops(struct device *dev, struct dev_pm_domain *domain);
void vga_switcheroo_fini_domain_pm_ops(struct device *dev);
-int vga_switcheroo_init_domain_pm_optimus_hdmi_audio(struct device *dev, struct dev_pm_domain *domain);
#else
static inline void vga_switcheroo_unregister_client(struct pci_dev *dev) {}
@@ -192,11 +189,8 @@ static inline int vga_switcheroo_process_delayed_switch(void) { return 0; }
static inline bool vga_switcheroo_client_probe_defer(struct pci_dev *pdev) { return false; }
static inline enum vga_switcheroo_state vga_switcheroo_get_client_state(struct pci_dev *dev) { return VGA_SWITCHEROO_ON; }
-static inline void vga_switcheroo_set_dynamic_switch(struct pci_dev *pdev, enum vga_switcheroo_state dynamic) {}
-
static inline int vga_switcheroo_init_domain_pm_ops(struct device *dev, struct dev_pm_domain *domain) { return -EINVAL; }
static inline void vga_switcheroo_fini_domain_pm_ops(struct device *dev) {}
-static inline int vga_switcheroo_init_domain_pm_optimus_hdmi_audio(struct device *dev, struct dev_pm_domain *domain) { return -EINVAL; }
#endif
#endif /* _LINUX_VGA_SWITCHEROO_H_ */
diff --git a/include/linux/visorbus.h b/include/linux/visorbus.h
new file mode 100644
index 000000000000..0d8bd6769b13
--- /dev/null
+++ b/include/linux/visorbus.h
@@ -0,0 +1,344 @@
+// SPDX-License-Identifier: GPL-2.0+
+/*
+ * Copyright (C) 2010 - 2013 UNISYS CORPORATION
+ * All rights reserved.
+ */
+
+/*
+ * This header file is to be included by other kernel mode components that
+ * implement a particular kind of visor_device. Each of these other kernel
+ * mode components is called a visor device driver. Refer to visortemplate
+ * for a minimal sample visor device driver.
+ *
+ * There should be nothing in this file that is private to the visorbus
+ * bus implementation itself.
+ */
+
+#ifndef __VISORBUS_H__
+#define __VISORBUS_H__
+
+#include <linux/device.h>
+
+#define VISOR_CHANNEL_SIGNATURE ('L' << 24 | 'N' << 16 | 'C' << 8 | 'E')
+
+/*
+ * enum channel_serverstate
+ * @CHANNELSRV_UNINITIALIZED: Channel is in an undefined state.
+ * @CHANNELSRV_READY: Channel has been initialized by server.
+ */
+enum channel_serverstate {
+ CHANNELSRV_UNINITIALIZED = 0,
+ CHANNELSRV_READY = 1
+};
+
+/*
+ * enum channel_clientstate
+ * @CHANNELCLI_DETACHED:
+ * @CHANNELCLI_DISABLED: Client can see channel but is NOT allowed to use it
+ * unless given TBD* explicit request
+ * (should actually be < DETACHED).
+ * @CHANNELCLI_ATTACHING: Legacy EFI client request for EFI server to attach.
+ * @CHANNELCLI_ATTACHED: Idle, but client may want to use channel any time.
+ * @CHANNELCLI_BUSY: Client either wants to use or is using channel.
+ * @CHANNELCLI_OWNED: "No worries" state - client can access channel
+ * anytime.
+ */
+enum channel_clientstate {
+ CHANNELCLI_DETACHED = 0,
+ CHANNELCLI_DISABLED = 1,
+ CHANNELCLI_ATTACHING = 2,
+ CHANNELCLI_ATTACHED = 3,
+ CHANNELCLI_BUSY = 4,
+ CHANNELCLI_OWNED = 5
+};
+
+/*
+ * Values for VISOR_CHANNEL_PROTOCOL.Features: This define exists so that
+ * a guest can look at the FeatureFlags in the io channel, and configure the
+ * driver to use interrupts or not based on this setting. All feature bits for
+ * all channels should be defined here. The io channel feature bits are defined
+ * below.
+ */
+#define VISOR_DRIVER_ENABLES_INTS (0x1ULL << 1)
+#define VISOR_CHANNEL_IS_POLLING (0x1ULL << 3)
+#define VISOR_IOVM_OK_DRIVER_DISABLING_INTS (0x1ULL << 4)
+#define VISOR_DRIVER_DISABLES_INTS (0x1ULL << 5)
+#define VISOR_DRIVER_ENHANCED_RCVBUF_CHECKING (0x1ULL << 6)
+
+/*
+ * struct channel_header - Common Channel Header
+ * @signature: Signature.
+ * @legacy_state: DEPRECATED - being replaced by.
+ * @header_size: sizeof(struct channel_header).
+ * @size: Total size of this channel in bytes.
+ * @features: Flags to modify behavior.
+ * @chtype: Channel type: data, bus, control, etc..
+ * @partition_handle: ID of guest partition.
+ * @handle: Device number of this channel in client.
+ * @ch_space_offset: Offset in bytes to channel specific area.
+ * @version_id: Struct channel_header Version ID.
+ * @partition_index: Index of guest partition.
+ * @zone_uuid: Guid of Channel's zone.
+ * @cli_str_offset: Offset from channel header to null-terminated
+ * ClientString (0 if ClientString not present).
+ * @cli_state_boot: CHANNEL_CLIENTSTATE of pre-boot EFI client of this
+ * channel.
+ * @cmd_state_cli: CHANNEL_COMMANDSTATE (overloaded in Windows drivers, see
+ * ServerStateUp, ServerStateDown, etc).
+ * @cli_state_os: CHANNEL_CLIENTSTATE of Guest OS client of this channel.
+ * @ch_characteristic: CHANNEL_CHARACTERISTIC_<xxx>.
+ * @cmd_state_srv: CHANNEL_COMMANDSTATE (overloaded in Windows drivers, see
+ * ServerStateUp, ServerStateDown, etc).
+ * @srv_state: CHANNEL_SERVERSTATE.
+ * @cli_error_boot: Bits to indicate err states for boot clients, so err
+ * messages can be throttled.
+ * @cli_error_os: Bits to indicate err states for OS clients, so err
+ * messages can be throttled.
+ * @filler: Pad out to 128 byte cacheline.
+ * @recover_channel: Please add all new single-byte values below here.
+ */
+struct channel_header {
+ u64 signature;
+ u32 legacy_state;
+ /* SrvState, CliStateBoot, and CliStateOS below */
+ u32 header_size;
+ u64 size;
+ u64 features;
+ guid_t chtype;
+ u64 partition_handle;
+ u64 handle;
+ u64 ch_space_offset;
+ u32 version_id;
+ u32 partition_index;
+ guid_t zone_guid;
+ u32 cli_str_offset;
+ u32 cli_state_boot;
+ u32 cmd_state_cli;
+ u32 cli_state_os;
+ u32 ch_characteristic;
+ u32 cmd_state_srv;
+ u32 srv_state;
+ u8 cli_error_boot;
+ u8 cli_error_os;
+ u8 filler[1];
+ u8 recover_channel;
+} __packed;
+
+#define VISOR_CHANNEL_ENABLE_INTS (0x1ULL << 0)
+
+/*
+ * struct signal_queue_header - Subheader for the Signal Type variation of the
+ * Common Channel.
+ * @version: SIGNAL_QUEUE_HEADER Version ID.
+ * @chtype: Queue type: storage, network.
+ * @size: Total size of this queue in bytes.
+ * @sig_base_offset: Offset to signal queue area.
+ * @features: Flags to modify behavior.
+ * @num_sent: Total # of signals placed in this queue.
+ * @num_overflows: Total # of inserts failed due to full queue.
+ * @signal_size: Total size of a signal for this queue.
+ * @max_slots: Max # of slots in queue, 1 slot is always empty.
+ * @max_signals: Max # of signals in queue (MaxSignalSlots-1).
+ * @head: Queue head signal #.
+ * @num_received: Total # of signals removed from this queue.
+ * @tail: Queue tail signal.
+ * @reserved1: Reserved field.
+ * @reserved2: Reserved field.
+ * @client_queue:
+ * @num_irq_received: Total # of Interrupts received. This is incremented by the
+ * ISR in the guest windows driver.
+ * @num_empty: Number of times that visor_signal_remove is called and
+ * returned Empty Status.
+ * @errorflags: Error bits set during SignalReinit to denote trouble with
+ * client's fields.
+ * @filler: Pad out to 64 byte cacheline.
+ */
+struct signal_queue_header {
+ /* 1st cache line */
+ u32 version;
+ u32 chtype;
+ u64 size;
+ u64 sig_base_offset;
+ u64 features;
+ u64 num_sent;
+ u64 num_overflows;
+ u32 signal_size;
+ u32 max_slots;
+ u32 max_signals;
+ u32 head;
+ /* 2nd cache line */
+ u64 num_received;
+ u32 tail;
+ u32 reserved1;
+ u64 reserved2;
+ u64 client_queue;
+ u64 num_irq_received;
+ u64 num_empty;
+ u32 errorflags;
+ u8 filler[12];
+} __packed;
+
+/* VISORCHANNEL Guids */
+/* {414815ed-c58c-11da-95a9-00e08161165f} */
+#define VISOR_VHBA_CHANNEL_GUID \
+ GUID_INIT(0x414815ed, 0xc58c, 0x11da, \
+ 0x95, 0xa9, 0x0, 0xe0, 0x81, 0x61, 0x16, 0x5f)
+#define VISOR_VHBA_CHANNEL_GUID_STR \
+ "414815ed-c58c-11da-95a9-00e08161165f"
+struct visorchipset_state {
+ u32 created:1;
+ u32 attached:1;
+ u32 configured:1;
+ u32 running:1;
+ /* Remaining bits in this 32-bit word are reserved. */
+};
+
+/**
+ * struct visor_device - A device type for things "plugged" into the visorbus
+ * bus
+ * @visorchannel: Points to the channel that the device is
+ * associated with.
+ * @channel_type_guid: Identifies the channel type to the bus driver.
+ * @device: Device struct meant for use by the bus driver
+ * only.
+ * @list_all: Used by the bus driver to enumerate devices.
+ * @timer: Timer fired periodically to do interrupt-type
+ * activity.
+ * @being_removed: Indicates that the device is being removed from
+ * the bus. Private bus driver use only.
+ * @visordriver_callback_lock: Used by the bus driver to lock when adding and
+ * removing devices.
+ * @pausing: Indicates that a change towards a paused state.
+ * is in progress. Only modified by the bus driver.
+ * @resuming: Indicates that a change towards a running state
+ * is in progress. Only modified by the bus driver.
+ * @chipset_bus_no: Private field used by the bus driver.
+ * @chipset_dev_no: Private field used the bus driver.
+ * @state: Used to indicate the current state of the
+ * device.
+ * @inst: Unique GUID for this instance of the device.
+ * @name: Name of the device.
+ * @pending_msg_hdr: For private use by bus driver to respond to
+ * hypervisor requests.
+ * @vbus_hdr_info: A pointer to header info. Private use by bus
+ * driver.
+ * @partition_guid: Indicates client partion id. This should be the
+ * same across all visor_devices in the current
+ * guest. Private use by bus driver only.
+ */
+struct visor_device {
+ struct visorchannel *visorchannel;
+ guid_t channel_type_guid;
+ /* These fields are for private use by the bus driver only. */
+ struct device device;
+ struct list_head list_all;
+ struct timer_list timer;
+ bool timer_active;
+ bool being_removed;
+ struct mutex visordriver_callback_lock; /* synchronize probe/remove */
+ bool pausing;
+ bool resuming;
+ u32 chipset_bus_no;
+ u32 chipset_dev_no;
+ struct visorchipset_state state;
+ guid_t inst;
+ u8 *name;
+ struct controlvm_message_header *pending_msg_hdr;
+ void *vbus_hdr_info;
+ guid_t partition_guid;
+ struct dentry *debugfs_dir;
+ struct dentry *debugfs_bus_info;
+};
+
+#define to_visor_device(x) container_of(x, struct visor_device, device)
+
+typedef void (*visorbus_state_complete_func) (struct visor_device *dev,
+ int status);
+
+/*
+ * This struct describes a specific visor channel, by providing its GUID, name,
+ * and sizes.
+ */
+struct visor_channeltype_descriptor {
+ const guid_t guid;
+ const char *name;
+ u64 min_bytes;
+ u32 version;
+};
+
+/**
+ * struct visor_driver - Information provided by each visor driver when it
+ * registers with the visorbus driver
+ * @name: Name of the visor driver.
+ * @owner: The module owner.
+ * @channel_types: Types of channels handled by this driver, ending with
+ * a zero GUID. Our specialized BUS.match() method knows
+ * about this list, and uses it to determine whether this
+ * driver will in fact handle a new device that it has
+ * detected.
+ * @probe: Called when a new device comes online, by our probe()
+ * function specified by driver.probe() (triggered
+ * ultimately by some call to driver_register(),
+ * bus_add_driver(), or driver_attach()).
+ * @remove: Called when a new device is removed, by our remove()
+ * function specified by driver.remove() (triggered
+ * ultimately by some call to device_release_driver()).
+ * @channel_interrupt: Called periodically, whenever there is a possiblity
+ * that "something interesting" may have happened to the
+ * channel.
+ * @pause: Called to initiate a change of the device's state. If
+ * the return valu`e is < 0, there was an error and the
+ * state transition will NOT occur. If the return value
+ * is >= 0, then the state transition was INITIATED
+ * successfully, and complete_func() will be called (or
+ * was just called) with the final status when either the
+ * state transition fails or completes successfully.
+ * @resume: Behaves similar to pause.
+ * @driver: Private reference to the device driver. For use by bus
+ * driver only.
+ */
+struct visor_driver {
+ const char *name;
+ struct module *owner;
+ struct visor_channeltype_descriptor *channel_types;
+ int (*probe)(struct visor_device *dev);
+ void (*remove)(struct visor_device *dev);
+ void (*channel_interrupt)(struct visor_device *dev);
+ int (*pause)(struct visor_device *dev,
+ visorbus_state_complete_func complete_func);
+ int (*resume)(struct visor_device *dev,
+ visorbus_state_complete_func complete_func);
+
+ /* These fields are for private use by the bus driver only. */
+ struct device_driver driver;
+};
+
+#define to_visor_driver(x) (container_of(x, struct visor_driver, driver))
+
+int visor_check_channel(struct channel_header *ch, struct device *dev,
+ const guid_t *expected_uuid, char *chname,
+ u64 expected_min_bytes, u32 expected_version,
+ u64 expected_signature);
+
+int visorbus_register_visor_driver(struct visor_driver *drv);
+void visorbus_unregister_visor_driver(struct visor_driver *drv);
+int visorbus_read_channel(struct visor_device *dev,
+ unsigned long offset, void *dest,
+ unsigned long nbytes);
+int visorbus_write_channel(struct visor_device *dev,
+ unsigned long offset, void *src,
+ unsigned long nbytes);
+int visorbus_enable_channel_interrupts(struct visor_device *dev);
+void visorbus_disable_channel_interrupts(struct visor_device *dev);
+
+int visorchannel_signalremove(struct visorchannel *channel, u32 queue,
+ void *msg);
+int visorchannel_signalinsert(struct visorchannel *channel, u32 queue,
+ void *msg);
+bool visorchannel_signalempty(struct visorchannel *channel, u32 queue);
+const guid_t *visorchannel_get_guid(struct visorchannel *channel);
+
+#define BUS_ROOT_DEVICE UINT_MAX
+struct visor_device *visorbus_get_device_by_id(u32 bus_no, u32 dev_no,
+ struct visor_device *from);
+#endif
diff --git a/include/linux/vmstat.h b/include/linux/vmstat.h
index 1779c9817b39..f25cef84b41d 100644
--- a/include/linux/vmstat.h
+++ b/include/linux/vmstat.h
@@ -20,6 +20,17 @@ extern int sysctl_vm_numa_stat_handler(struct ctl_table *table,
int write, void __user *buffer, size_t *length, loff_t *ppos);
#endif
+struct reclaim_stat {
+ unsigned nr_dirty;
+ unsigned nr_unqueued_dirty;
+ unsigned nr_congested;
+ unsigned nr_writeback;
+ unsigned nr_immediate;
+ unsigned nr_activate;
+ unsigned nr_ref_keep;
+ unsigned nr_unmap_fail;
+};
+
#ifdef CONFIG_VM_EVENT_COUNTERS
/*
* Light weight per cpu counter implementation.
@@ -216,23 +227,6 @@ static inline unsigned long zone_page_state_snapshot(struct zone *zone,
return x;
}
-static inline unsigned long node_page_state_snapshot(pg_data_t *pgdat,
- enum node_stat_item item)
-{
- long x = atomic_long_read(&pgdat->vm_stat[item]);
-
-#ifdef CONFIG_SMP
- int cpu;
- for_each_online_cpu(cpu)
- x += per_cpu_ptr(pgdat->per_cpu_nodestats, cpu)->vm_node_stat_diff[item];
-
- if (x < 0)
- x = 0;
-#endif
- return x;
-}
-
-
#ifdef CONFIG_NUMA
extern void __inc_numa_state(struct zone *zone, enum numa_stat_item item);
extern unsigned long sum_zone_node_page_state(int node,
diff --git a/include/linux/w1-gpio.h b/include/linux/w1-gpio.h
index d58594a32324..78901ecd2f95 100644
--- a/include/linux/w1-gpio.h
+++ b/include/linux/w1-gpio.h
@@ -10,16 +10,15 @@
#ifndef _LINUX_W1_GPIO_H
#define _LINUX_W1_GPIO_H
+struct gpio_desc;
+
/**
* struct w1_gpio_platform_data - Platform-dependent data for w1-gpio
- * @pin: GPIO pin to use
- * @is_open_drain: GPIO pin is configured as open drain
*/
struct w1_gpio_platform_data {
- unsigned int pin;
- unsigned int is_open_drain:1;
+ struct gpio_desc *gpiod;
+ struct gpio_desc *pullup_gpiod;
void (*enable_external_pullup)(int enable);
- unsigned int ext_pullup_enable_pin;
unsigned int pullup_duration;
};
diff --git a/include/linux/wait.h b/include/linux/wait.h
index 158715445ffb..d9f131ecf708 100644
--- a/include/linux/wait.h
+++ b/include/linux/wait.h
@@ -206,14 +206,16 @@ void __wake_up_sync(struct wait_queue_head *wq_head, unsigned int mode, int nr);
/*
* Wakeup macros to be used to report events to the targets.
*/
+#define poll_to_key(m) ((void *)(__force uintptr_t)(__poll_t)(m))
+#define key_to_poll(m) ((__force __poll_t)(uintptr_t)(void *)(m))
#define wake_up_poll(x, m) \
- __wake_up(x, TASK_NORMAL, 1, (void *) (m))
+ __wake_up(x, TASK_NORMAL, 1, poll_to_key(m))
#define wake_up_locked_poll(x, m) \
- __wake_up_locked_key((x), TASK_NORMAL, (void *) (m))
+ __wake_up_locked_key((x), TASK_NORMAL, poll_to_key(m))
#define wake_up_interruptible_poll(x, m) \
- __wake_up(x, TASK_INTERRUPTIBLE, 1, (void *) (m))
+ __wake_up(x, TASK_INTERRUPTIBLE, 1, poll_to_key(m))
#define wake_up_interruptible_sync_poll(x, m) \
- __wake_up_sync_key((x), TASK_INTERRUPTIBLE, 1, (void *) (m))
+ __wake_up_sync_key((x), TASK_INTERRUPTIBLE, 1, poll_to_key(m))
#define ___wait_cond_timeout(condition) \
({ \
@@ -597,6 +599,120 @@ do { \
__ret; \
})
+/**
+ * wait_event_idle - wait for a condition without contributing to system load
+ * @wq_head: the waitqueue to wait on
+ * @condition: a C expression for the event to wait for
+ *
+ * The process is put to sleep (TASK_IDLE) until the
+ * @condition evaluates to true.
+ * The @condition is checked each time the waitqueue @wq_head is woken up.
+ *
+ * wake_up() has to be called after changing any variable that could
+ * change the result of the wait condition.
+ *
+ */
+#define wait_event_idle(wq_head, condition) \
+do { \
+ might_sleep(); \
+ if (!(condition)) \
+ ___wait_event(wq_head, condition, TASK_IDLE, 0, 0, schedule()); \
+} while (0)
+
+/**
+ * wait_event_idle_exclusive - wait for a condition with contributing to system load
+ * @wq_head: the waitqueue to wait on
+ * @condition: a C expression for the event to wait for
+ *
+ * The process is put to sleep (TASK_IDLE) until the
+ * @condition evaluates to true.
+ * The @condition is checked each time the waitqueue @wq_head is woken up.
+ *
+ * The process is put on the wait queue with an WQ_FLAG_EXCLUSIVE flag
+ * set thus if other processes wait on the same list, when this
+ * process is woken further processes are not considered.
+ *
+ * wake_up() has to be called after changing any variable that could
+ * change the result of the wait condition.
+ *
+ */
+#define wait_event_idle_exclusive(wq_head, condition) \
+do { \
+ might_sleep(); \
+ if (!(condition)) \
+ ___wait_event(wq_head, condition, TASK_IDLE, 1, 0, schedule()); \
+} while (0)
+
+#define __wait_event_idle_timeout(wq_head, condition, timeout) \
+ ___wait_event(wq_head, ___wait_cond_timeout(condition), \
+ TASK_IDLE, 0, timeout, \
+ __ret = schedule_timeout(__ret))
+
+/**
+ * wait_event_idle_timeout - sleep without load until a condition becomes true or a timeout elapses
+ * @wq_head: the waitqueue to wait on
+ * @condition: a C expression for the event to wait for
+ * @timeout: timeout, in jiffies
+ *
+ * The process is put to sleep (TASK_IDLE) until the
+ * @condition evaluates to true. The @condition is checked each time
+ * the waitqueue @wq_head is woken up.
+ *
+ * wake_up() has to be called after changing any variable that could
+ * change the result of the wait condition.
+ *
+ * Returns:
+ * 0 if the @condition evaluated to %false after the @timeout elapsed,
+ * 1 if the @condition evaluated to %true after the @timeout elapsed,
+ * or the remaining jiffies (at least 1) if the @condition evaluated
+ * to %true before the @timeout elapsed.
+ */
+#define wait_event_idle_timeout(wq_head, condition, timeout) \
+({ \
+ long __ret = timeout; \
+ might_sleep(); \
+ if (!___wait_cond_timeout(condition)) \
+ __ret = __wait_event_idle_timeout(wq_head, condition, timeout); \
+ __ret; \
+})
+
+#define __wait_event_idle_exclusive_timeout(wq_head, condition, timeout) \
+ ___wait_event(wq_head, ___wait_cond_timeout(condition), \
+ TASK_IDLE, 1, timeout, \
+ __ret = schedule_timeout(__ret))
+
+/**
+ * wait_event_idle_exclusive_timeout - sleep without load until a condition becomes true or a timeout elapses
+ * @wq_head: the waitqueue to wait on
+ * @condition: a C expression for the event to wait for
+ * @timeout: timeout, in jiffies
+ *
+ * The process is put to sleep (TASK_IDLE) until the
+ * @condition evaluates to true. The @condition is checked each time
+ * the waitqueue @wq_head is woken up.
+ *
+ * The process is put on the wait queue with an WQ_FLAG_EXCLUSIVE flag
+ * set thus if other processes wait on the same list, when this
+ * process is woken further processes are not considered.
+ *
+ * wake_up() has to be called after changing any variable that could
+ * change the result of the wait condition.
+ *
+ * Returns:
+ * 0 if the @condition evaluated to %false after the @timeout elapsed,
+ * 1 if the @condition evaluated to %true after the @timeout elapsed,
+ * or the remaining jiffies (at least 1) if the @condition evaluated
+ * to %true before the @timeout elapsed.
+ */
+#define wait_event_idle_exclusive_timeout(wq_head, condition, timeout) \
+({ \
+ long __ret = timeout; \
+ might_sleep(); \
+ if (!___wait_cond_timeout(condition)) \
+ __ret = __wait_event_idle_exclusive_timeout(wq_head, condition, timeout);\
+ __ret; \
+})
+
extern int do_wait_intr(wait_queue_head_t *, wait_queue_entry_t *);
extern int do_wait_intr_irq(wait_queue_head_t *, wait_queue_entry_t *);
diff --git a/include/linux/wait_bit.h b/include/linux/wait_bit.h
index 61b39eaf7cad..9318b2166439 100644
--- a/include/linux/wait_bit.h
+++ b/include/linux/wait_bit.h
@@ -10,7 +10,6 @@
struct wait_bit_key {
void *flags;
int bit_nr;
-#define WAIT_ATOMIC_T_BIT_NR -1
unsigned long timeout;
};
@@ -22,21 +21,15 @@ struct wait_bit_queue_entry {
#define __WAIT_BIT_KEY_INITIALIZER(word, bit) \
{ .flags = word, .bit_nr = bit, }
-#define __WAIT_ATOMIC_T_KEY_INITIALIZER(p) \
- { .flags = p, .bit_nr = WAIT_ATOMIC_T_BIT_NR, }
-
typedef int wait_bit_action_f(struct wait_bit_key *key, int mode);
-typedef int wait_atomic_t_action_f(atomic_t *counter, unsigned int mode);
void __wake_up_bit(struct wait_queue_head *wq_head, void *word, int bit);
int __wait_on_bit(struct wait_queue_head *wq_head, struct wait_bit_queue_entry *wbq_entry, wait_bit_action_f *action, unsigned int mode);
int __wait_on_bit_lock(struct wait_queue_head *wq_head, struct wait_bit_queue_entry *wbq_entry, wait_bit_action_f *action, unsigned int mode);
void wake_up_bit(void *word, int bit);
-void wake_up_atomic_t(atomic_t *p);
int out_of_line_wait_on_bit(void *word, int, wait_bit_action_f *action, unsigned int mode);
int out_of_line_wait_on_bit_timeout(void *word, int, wait_bit_action_f *action, unsigned int mode, unsigned long timeout);
int out_of_line_wait_on_bit_lock(void *word, int, wait_bit_action_f *action, unsigned int mode);
-int out_of_line_wait_on_atomic_t(atomic_t *p, wait_atomic_t_action_f action, unsigned int mode);
struct wait_queue_head *bit_waitqueue(void *word, int bit);
extern void __init wait_bit_init(void);
@@ -57,7 +50,6 @@ extern int bit_wait(struct wait_bit_key *key, int mode);
extern int bit_wait_io(struct wait_bit_key *key, int mode);
extern int bit_wait_timeout(struct wait_bit_key *key, int mode);
extern int bit_wait_io_timeout(struct wait_bit_key *key, int mode);
-extern int atomic_t_wait(atomic_t *counter, unsigned int mode);
/**
* wait_on_bit - wait for a bit to be cleared
@@ -243,23 +235,74 @@ wait_on_bit_lock_action(unsigned long *word, int bit, wait_bit_action_f *action,
return out_of_line_wait_on_bit_lock(word, bit, action, mode);
}
-/**
- * wait_on_atomic_t - Wait for an atomic_t to become 0
- * @val: The atomic value being waited on, a kernel virtual address
- * @action: the function used to sleep, which may take special actions
- * @mode: the task state to sleep in
- *
- * Wait for an atomic_t to become 0. We abuse the bit-wait waitqueue table for
- * the purpose of getting a waitqueue, but we set the key to a bit number
- * outside of the target 'word'.
- */
-static inline
-int wait_on_atomic_t(atomic_t *val, wait_atomic_t_action_f action, unsigned mode)
-{
- might_sleep();
- if (atomic_read(val) == 0)
- return 0;
- return out_of_line_wait_on_atomic_t(val, action, mode);
-}
+extern void init_wait_var_entry(struct wait_bit_queue_entry *wbq_entry, void *var, int flags);
+extern void wake_up_var(void *var);
+extern wait_queue_head_t *__var_waitqueue(void *p);
+
+#define ___wait_var_event(var, condition, state, exclusive, ret, cmd) \
+({ \
+ __label__ __out; \
+ struct wait_queue_head *__wq_head = __var_waitqueue(var); \
+ struct wait_bit_queue_entry __wbq_entry; \
+ long __ret = ret; /* explicit shadow */ \
+ \
+ init_wait_var_entry(&__wbq_entry, var, \
+ exclusive ? WQ_FLAG_EXCLUSIVE : 0); \
+ for (;;) { \
+ long __int = prepare_to_wait_event(__wq_head, \
+ &__wbq_entry.wq_entry, \
+ state); \
+ if (condition) \
+ break; \
+ \
+ if (___wait_is_interruptible(state) && __int) { \
+ __ret = __int; \
+ goto __out; \
+ } \
+ \
+ cmd; \
+ } \
+ finish_wait(__wq_head, &__wbq_entry.wq_entry); \
+__out: __ret; \
+})
+
+#define __wait_var_event(var, condition) \
+ ___wait_var_event(var, condition, TASK_UNINTERRUPTIBLE, 0, 0, \
+ schedule())
+
+#define wait_var_event(var, condition) \
+do { \
+ might_sleep(); \
+ if (condition) \
+ break; \
+ __wait_var_event(var, condition); \
+} while (0)
+
+#define __wait_var_event_killable(var, condition) \
+ ___wait_var_event(var, condition, TASK_KILLABLE, 0, 0, \
+ schedule())
+
+#define wait_var_event_killable(var, condition) \
+({ \
+ int __ret = 0; \
+ might_sleep(); \
+ if (!(condition)) \
+ __ret = __wait_var_event_killable(var, condition); \
+ __ret; \
+})
+
+#define __wait_var_event_timeout(var, condition, timeout) \
+ ___wait_var_event(var, ___wait_cond_timeout(condition), \
+ TASK_UNINTERRUPTIBLE, 0, timeout, \
+ __ret = schedule_timeout(__ret))
+
+#define wait_var_event_timeout(var, condition, timeout) \
+({ \
+ long __ret = timeout; \
+ might_sleep(); \
+ if (!___wait_cond_timeout(condition)) \
+ __ret = __wait_var_event_timeout(var, condition, timeout); \
+ __ret; \
+})
#endif /* _LINUX_WAIT_BIT_H */
diff --git a/include/linux/workqueue.h b/include/linux/workqueue.h
index 4a54ef96aff5..39a0e215022a 100644
--- a/include/linux/workqueue.h
+++ b/include/linux/workqueue.h
@@ -13,6 +13,7 @@
#include <linux/threads.h>
#include <linux/atomic.h>
#include <linux/cpumask.h>
+#include <linux/rcupdate.h>
struct workqueue_struct;
@@ -120,6 +121,14 @@ struct delayed_work {
int cpu;
};
+struct rcu_work {
+ struct work_struct work;
+ struct rcu_head rcu;
+
+ /* target workqueue ->rcu uses to queue ->work */
+ struct workqueue_struct *wq;
+};
+
/**
* struct workqueue_attrs - A struct for workqueue attributes.
*
@@ -151,6 +160,11 @@ static inline struct delayed_work *to_delayed_work(struct work_struct *work)
return container_of(work, struct delayed_work, work);
}
+static inline struct rcu_work *to_rcu_work(struct work_struct *work)
+{
+ return container_of(work, struct rcu_work, work);
+}
+
struct execute_work {
struct work_struct work;
};
@@ -266,6 +280,12 @@ static inline unsigned int work_static(struct work_struct *work) { return 0; }
#define INIT_DEFERRABLE_WORK_ONSTACK(_work, _func) \
__INIT_DELAYED_WORK_ONSTACK(_work, _func, TIMER_DEFERRABLE)
+#define INIT_RCU_WORK(_work, _func) \
+ INIT_WORK(&(_work)->work, (_func))
+
+#define INIT_RCU_WORK_ONSTACK(_work, _func) \
+ INIT_WORK_ONSTACK(&(_work)->work, (_func))
+
/**
* work_pending - Find out whether a work item is currently pending
* @work: The work item in question
@@ -447,6 +467,7 @@ extern bool queue_delayed_work_on(int cpu, struct workqueue_struct *wq,
struct delayed_work *work, unsigned long delay);
extern bool mod_delayed_work_on(int cpu, struct workqueue_struct *wq,
struct delayed_work *dwork, unsigned long delay);
+extern bool queue_rcu_work(struct workqueue_struct *wq, struct rcu_work *rwork);
extern void flush_workqueue(struct workqueue_struct *wq);
extern void drain_workqueue(struct workqueue_struct *wq);
@@ -456,15 +477,17 @@ extern int schedule_on_each_cpu(work_func_t func);
int execute_in_process_context(work_func_t fn, struct execute_work *);
extern bool flush_work(struct work_struct *work);
-extern bool cancel_work(struct work_struct *work);
extern bool cancel_work_sync(struct work_struct *work);
extern bool flush_delayed_work(struct delayed_work *dwork);
extern bool cancel_delayed_work(struct delayed_work *dwork);
extern bool cancel_delayed_work_sync(struct delayed_work *dwork);
+extern bool flush_rcu_work(struct rcu_work *rwork);
+
extern void workqueue_set_max_active(struct workqueue_struct *wq,
int max_active);
+extern struct work_struct *current_work(void);
extern bool current_is_workqueue_rescuer(void);
extern bool workqueue_congested(int cpu, struct workqueue_struct *wq);
extern unsigned int work_busy(struct work_struct *work);
diff --git a/include/linux/xarray.h b/include/linux/xarray.h
new file mode 100644
index 000000000000..2dfc8006fe64
--- /dev/null
+++ b/include/linux/xarray.h
@@ -0,0 +1,24 @@
+/* SPDX-License-Identifier: GPL-2.0+ */
+#ifndef _LINUX_XARRAY_H
+#define _LINUX_XARRAY_H
+/*
+ * eXtensible Arrays
+ * Copyright (c) 2017 Microsoft Corporation
+ * Author: Matthew Wilcox <mawilcox@microsoft.com>
+ */
+
+#include <linux/spinlock.h>
+
+#define xa_trylock(xa) spin_trylock(&(xa)->xa_lock)
+#define xa_lock(xa) spin_lock(&(xa)->xa_lock)
+#define xa_unlock(xa) spin_unlock(&(xa)->xa_lock)
+#define xa_lock_bh(xa) spin_lock_bh(&(xa)->xa_lock)
+#define xa_unlock_bh(xa) spin_unlock_bh(&(xa)->xa_lock)
+#define xa_lock_irq(xa) spin_lock_irq(&(xa)->xa_lock)
+#define xa_unlock_irq(xa) spin_unlock_irq(&(xa)->xa_lock)
+#define xa_lock_irqsave(xa, flags) \
+ spin_lock_irqsave(&(xa)->xa_lock, flags)
+#define xa_unlock_irqrestore(xa, flags) \
+ spin_unlock_irqrestore(&(xa)->xa_lock, flags)
+
+#endif /* _LINUX_XARRAY_H */
diff --git a/include/linux/zpool.h b/include/linux/zpool.h
index 004ba807df96..7238865e75b0 100644
--- a/include/linux/zpool.h
+++ b/include/linux/zpool.h
@@ -108,4 +108,6 @@ void zpool_register_driver(struct zpool_driver *driver);
int zpool_unregister_driver(struct zpool_driver *driver);
+bool zpool_evictable(struct zpool *pool);
+
#endif
diff --git a/include/linux/zsmalloc.h b/include/linux/zsmalloc.h
index 57a8e98f2708..2219cce81ca4 100644
--- a/include/linux/zsmalloc.h
+++ b/include/linux/zsmalloc.h
@@ -47,6 +47,8 @@ void zs_destroy_pool(struct zs_pool *pool);
unsigned long zs_malloc(struct zs_pool *pool, size_t size, gfp_t flags);
void zs_free(struct zs_pool *pool, unsigned long obj);
+size_t zs_huge_class_size(struct zs_pool *pool);
+
void *zs_map_object(struct zs_pool *pool, unsigned long handle,
enum zs_mapmode mm);
void zs_unmap_object(struct zs_pool *pool, unsigned long handle);
diff --git a/include/media/blackfin/bfin_capture.h b/include/media/blackfin/bfin_capture.h
deleted file mode 100644
index a999a3970c69..000000000000
--- a/include/media/blackfin/bfin_capture.h
+++ /dev/null
@@ -1,39 +0,0 @@
-/* SPDX-License-Identifier: GPL-2.0 */
-#ifndef _BFIN_CAPTURE_H_
-#define _BFIN_CAPTURE_H_
-
-#include <linux/i2c.h>
-
-struct v4l2_input;
-struct ppi_info;
-
-struct bcap_route {
- u32 input;
- u32 output;
- u32 ppi_control;
-};
-
-struct bfin_capture_config {
- /* card name */
- char *card_name;
- /* inputs available at the sub device */
- struct v4l2_input *inputs;
- /* number of inputs supported */
- int num_inputs;
- /* routing information for each input */
- struct bcap_route *routes;
- /* i2c bus adapter no */
- int i2c_adapter_id;
- /* i2c subdevice board info */
- struct i2c_board_info board_info;
- /* ppi board info */
- const struct ppi_info *ppi_info;
- /* ppi control */
- unsigned long ppi_control;
- /* ppi interrupt mask */
- u32 int_mask;
- /* horizontal blanking pixels */
- int blank_pixels;
-};
-
-#endif
diff --git a/include/media/blackfin/ppi.h b/include/media/blackfin/ppi.h
deleted file mode 100644
index 987e49e8f9c9..000000000000
--- a/include/media/blackfin/ppi.h
+++ /dev/null
@@ -1,94 +0,0 @@
-/*
- * Analog Devices PPI header file
- *
- * Copyright (c) 2011 Analog Devices Inc.
- *
- * This program is free software; you can redistribute it and/or modify
- * it under the terms of the GNU General Public License version 2 as
- * published by the Free Software Foundation.
- *
- * This program is distributed in the hope that it will be useful,
- * but WITHOUT ANY WARRANTY; without even the implied warranty of
- * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
- * GNU General Public License for more details.
- */
-
-#ifndef _PPI_H_
-#define _PPI_H_
-
-#include <linux/interrupt.h>
-#include <asm/blackfin.h>
-#include <asm/bfin_ppi.h>
-
-/* EPPI */
-#ifdef EPPI_EN
-#define PORT_EN EPPI_EN
-#define PORT_DIR EPPI_DIR
-#define DMA32 0
-#define PACK_EN PACKEN
-#endif
-
-/* EPPI3 */
-#ifdef EPPI0_CTL2
-#define PORT_EN EPPI_CTL_EN
-#define PORT_DIR EPPI_CTL_DIR
-#define PACK_EN EPPI_CTL_PACKEN
-#define DMA32 0
-#define DLEN_8 EPPI_CTL_DLEN08
-#define DLEN_16 EPPI_CTL_DLEN16
-#endif
-
-struct ppi_if;
-
-struct ppi_params {
- u32 width; /* width in pixels */
- u32 height; /* height in lines */
- u32 hdelay; /* delay after the HSYNC in pixels */
- u32 vdelay; /* delay after the VSYNC in lines */
- u32 line; /* total pixels per line */
- u32 frame; /* total lines per frame */
- u32 hsync; /* HSYNC length in pixels */
- u32 vsync; /* VSYNC length in lines */
- int bpp; /* bits per pixel */
- int dlen; /* data length for ppi in bits */
- u32 ppi_control; /* ppi configuration */
- u32 int_mask; /* interrupt mask */
-};
-
-struct ppi_ops {
- int (*attach_irq)(struct ppi_if *ppi, irq_handler_t handler);
- void (*detach_irq)(struct ppi_if *ppi);
- int (*start)(struct ppi_if *ppi);
- int (*stop)(struct ppi_if *ppi);
- int (*set_params)(struct ppi_if *ppi, struct ppi_params *params);
- void (*update_addr)(struct ppi_if *ppi, unsigned long addr);
-};
-
-enum ppi_type {
- PPI_TYPE_PPI,
- PPI_TYPE_EPPI,
- PPI_TYPE_EPPI3,
-};
-
-struct ppi_info {
- enum ppi_type type;
- int dma_ch;
- int irq_err;
- void __iomem *base;
- const unsigned short *pin_req;
-};
-
-struct ppi_if {
- struct device *dev;
- unsigned long ppi_control;
- const struct ppi_ops *ops;
- const struct ppi_info *info;
- bool err_int; /* if we need request error interrupt */
- bool err; /* if ppi has fifo error */
- void *priv;
-};
-
-struct ppi_if *ppi_create_instance(struct platform_device *pdev,
- const struct ppi_info *info);
-void ppi_delete_instance(struct ppi_if *ppi);
-#endif
diff --git a/include/media/cec-notifier.h b/include/media/cec-notifier.h
index 57ec319a7f44..cf0add70b0e7 100644
--- a/include/media/cec-notifier.h
+++ b/include/media/cec-notifier.h
@@ -1,21 +1,9 @@
+/* SPDX-License-Identifier: GPL-2.0-only */
/*
* cec-notifier.h - notify CEC drivers of physical address changes
*
* Copyright 2016 Russell King <rmk+kernel@arm.linux.org.uk>
* Copyright 2016-2017 Cisco Systems, Inc. and/or its affiliates. All rights reserved.
- *
- * This program is free software; you may redistribute it and/or modify
- * it under the terms of the GNU General Public License as published by
- * the Free Software Foundation; version 2 of the License.
- *
- * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND,
- * EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
- * MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND
- * NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS
- * BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN
- * ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN
- * CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
- * SOFTWARE.
*/
#ifndef LINUX_CEC_NOTIFIER_H
diff --git a/include/media/cec-pin.h b/include/media/cec-pin.h
index 83b3e17e0a07..ed16c6dde0ba 100644
--- a/include/media/cec-pin.h
+++ b/include/media/cec-pin.h
@@ -1,20 +1,8 @@
+/* SPDX-License-Identifier: GPL-2.0-only */
/*
* cec-pin.h - low-level CEC pin control
*
* Copyright 2017 Cisco Systems, Inc. and/or its affiliates. All rights reserved.
- *
- * This program is free software; you may redistribute it and/or modify
- * it under the terms of the GNU General Public License as published by
- * the Free Software Foundation; version 2 of the License.
- *
- * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND,
- * EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
- * MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND
- * NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS
- * BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN
- * ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN
- * CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
- * SOFTWARE.
*/
#ifndef LINUX_CEC_PIN_H
diff --git a/include/media/cec.h b/include/media/cec.h
index 16341210d3ba..580ab1042898 100644
--- a/include/media/cec.h
+++ b/include/media/cec.h
@@ -1,20 +1,8 @@
+/* SPDX-License-Identifier: GPL-2.0-only */
/*
* cec - HDMI Consumer Electronics Control support header
*
* Copyright 2016 Cisco Systems, Inc. and/or its affiliates. All rights reserved.
- *
- * This program is free software; you may redistribute it and/or modify
- * it under the terms of the GNU General Public License as published by
- * the Free Software Foundation; version 2 of the License.
- *
- * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND,
- * EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
- * MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND
- * NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS
- * BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN
- * ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN
- * CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
- * SOFTWARE.
*/
#ifndef _MEDIA_CEC_H
@@ -104,7 +92,7 @@ struct cec_fh {
wait_queue_head_t wait;
struct mutex lock;
struct list_head events[CEC_NUM_EVENTS]; /* queued events */
- u8 queued_events[CEC_NUM_EVENTS];
+ u16 queued_events[CEC_NUM_EVENTS];
unsigned int total_queued_events;
struct cec_event_entry core_events[CEC_NUM_CORE_EVENTS];
struct list_head msgs; /* queued messages */
@@ -122,12 +110,17 @@ struct cec_adap_ops {
/* Low-level callbacks */
int (*adap_enable)(struct cec_adapter *adap, bool enable);
int (*adap_monitor_all_enable)(struct cec_adapter *adap, bool enable);
+ int (*adap_monitor_pin_enable)(struct cec_adapter *adap, bool enable);
int (*adap_log_addr)(struct cec_adapter *adap, u8 logical_addr);
int (*adap_transmit)(struct cec_adapter *adap, u8 attempts,
u32 signal_free_time, struct cec_msg *msg);
void (*adap_status)(struct cec_adapter *adap, struct seq_file *file);
void (*adap_free)(struct cec_adapter *adap);
+ /* Error injection callbacks */
+ int (*error_inj_show)(struct cec_adapter *adap, struct seq_file *sf);
+ bool (*error_inj_parse_line)(struct cec_adapter *adap, char *line);
+
/* High-level CEC message callback */
int (*received)(struct cec_adapter *adap, struct cec_msg *msg);
};
@@ -191,11 +184,6 @@ struct cec_adapter {
u32 tx_timeouts;
-#ifdef CONFIG_MEDIA_CEC_RC
- bool rc_repeating;
- int rc_last_scancode;
- u64 rc_last_keypress;
-#endif
#ifdef CONFIG_CEC_NOTIFIER
struct cec_notifier *notifier;
#endif
@@ -205,6 +193,7 @@ struct cec_adapter {
struct dentry *cec_dir;
struct dentry *status_file;
+ struct dentry *error_inj_file;
u16 phys_addrs[15];
u32 sequence;
@@ -229,6 +218,18 @@ static inline bool cec_is_sink(const struct cec_adapter *adap)
return adap->phys_addr == 0;
}
+/**
+ * cec_is_registered() - is the CEC adapter registered?
+ *
+ * @adap: the CEC adapter, may be NULL.
+ *
+ * Return: true if the adapter is registered, false otherwise.
+ */
+static inline bool cec_is_registered(const struct cec_adapter *adap)
+{
+ return adap && adap->devnode.registered;
+}
+
#define cec_phys_addr_exp(pa) \
((pa) >> 12), ((pa) >> 8) & 0xf, ((pa) >> 4) & 0xf, (pa) & 0xf
@@ -290,11 +291,12 @@ static inline void cec_received_msg(struct cec_adapter *adap,
*
* @adap: pointer to the cec adapter
* @is_high: when true the CEC pin is high, otherwise it is low
+ * @dropped_events: when true some events were dropped
* @ts: the timestamp for this event
*
*/
-void cec_queue_pin_cec_event(struct cec_adapter *adap,
- bool is_high, ktime_t ts);
+void cec_queue_pin_cec_event(struct cec_adapter *adap, bool is_high,
+ bool dropped_events, ktime_t ts);
/**
* cec_queue_pin_hpd_event() - queue a pin event with a given timestamp.
diff --git a/include/media/demux.h b/include/media/demux.h
new file mode 100644
index 000000000000..bf00a5a41a90
--- /dev/null
+++ b/include/media/demux.h
@@ -0,0 +1,600 @@
+/*
+ * demux.h
+ *
+ * The Kernel Digital TV Demux kABI defines a driver-internal interface for
+ * registering low-level, hardware specific driver to a hardware independent
+ * demux layer.
+ *
+ * Copyright (c) 2002 Convergence GmbH
+ *
+ * based on code:
+ * Copyright (c) 2000 Nokia Research Center
+ * Tampere, FINLAND
+ *
+ * This program is free software; you can redistribute it and/or
+ * modify it under the terms of the GNU Lesser General Public License
+ * as published by the Free Software Foundation; either version 2.1
+ * of the License, or (at your option) any later version.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+ * GNU General Public License for more details.
+ *
+ */
+
+#ifndef __DEMUX_H
+#define __DEMUX_H
+
+#include <linux/types.h>
+#include <linux/errno.h>
+#include <linux/list.h>
+#include <linux/time.h>
+#include <linux/dvb/dmx.h>
+
+/*
+ * Common definitions
+ */
+
+/*
+ * DMX_MAX_FILTER_SIZE: Maximum length (in bytes) of a section/PES filter.
+ */
+
+#ifndef DMX_MAX_FILTER_SIZE
+#define DMX_MAX_FILTER_SIZE 18
+#endif
+
+/*
+ * DMX_MAX_SECFEED_SIZE: Maximum length (in bytes) of a private section feed
+ * filter.
+ */
+
+#ifndef DMX_MAX_SECTION_SIZE
+#define DMX_MAX_SECTION_SIZE 4096
+#endif
+#ifndef DMX_MAX_SECFEED_SIZE
+#define DMX_MAX_SECFEED_SIZE (DMX_MAX_SECTION_SIZE + 188)
+#endif
+
+/*
+ * TS packet reception
+ */
+
+/**
+ * enum ts_filter_type - filter type bitmap for dmx_ts_feed.set\(\)
+ *
+ * @TS_PACKET: Send TS packets (188 bytes) to callback (default).
+ * @TS_PAYLOAD_ONLY: In case TS_PACKET is set, only send the TS payload
+ * (<=184 bytes per packet) to callback
+ * @TS_DECODER: Send stream to built-in decoder (if present).
+ * @TS_DEMUX: In case TS_PACKET is set, send the TS to the demux
+ * device, not to the dvr device
+ */
+enum ts_filter_type {
+ TS_PACKET = 1,
+ TS_PAYLOAD_ONLY = 2,
+ TS_DECODER = 4,
+ TS_DEMUX = 8,
+};
+
+/**
+ * struct dmx_ts_feed - Structure that contains a TS feed filter
+ *
+ * @is_filtering: Set to non-zero when filtering in progress
+ * @parent: pointer to struct dmx_demux
+ * @priv: pointer to private data of the API client
+ * @set: sets the TS filter
+ * @start_filtering: starts TS filtering
+ * @stop_filtering: stops TS filtering
+ *
+ * A TS feed is typically mapped to a hardware PID filter on the demux chip.
+ * Using this API, the client can set the filtering properties to start/stop
+ * filtering TS packets on a particular TS feed.
+ */
+struct dmx_ts_feed {
+ int is_filtering;
+ struct dmx_demux *parent;
+ void *priv;
+ int (*set)(struct dmx_ts_feed *feed,
+ u16 pid,
+ int type,
+ enum dmx_ts_pes pes_type,
+ ktime_t timeout);
+ int (*start_filtering)(struct dmx_ts_feed *feed);
+ int (*stop_filtering)(struct dmx_ts_feed *feed);
+};
+
+/*
+ * Section reception
+ */
+
+/**
+ * struct dmx_section_filter - Structure that describes a section filter
+ *
+ * @filter_value: Contains up to 16 bytes (128 bits) of the TS section header
+ * that will be matched by the section filter
+ * @filter_mask: Contains a 16 bytes (128 bits) filter mask with the bits
+ * specified by @filter_value that will be used on the filter
+ * match logic.
+ * @filter_mode: Contains a 16 bytes (128 bits) filter mode.
+ * @parent: Back-pointer to struct dmx_section_feed.
+ * @priv: Pointer to private data of the API client.
+ *
+ *
+ * The @filter_mask controls which bits of @filter_value are compared with
+ * the section headers/payload. On a binary value of 1 in filter_mask, the
+ * corresponding bits are compared. The filter only accepts sections that are
+ * equal to filter_value in all the tested bit positions.
+ */
+struct dmx_section_filter {
+ u8 filter_value[DMX_MAX_FILTER_SIZE];
+ u8 filter_mask[DMX_MAX_FILTER_SIZE];
+ u8 filter_mode[DMX_MAX_FILTER_SIZE];
+ struct dmx_section_feed *parent;
+
+ void *priv;
+};
+
+/**
+ * struct dmx_section_feed - Structure that contains a section feed filter
+ *
+ * @is_filtering: Set to non-zero when filtering in progress
+ * @parent: pointer to struct dmx_demux
+ * @priv: pointer to private data of the API client
+ * @check_crc: If non-zero, check the CRC values of filtered sections.
+ * @set: sets the section filter
+ * @allocate_filter: This function is used to allocate a section filter on
+ * the demux. It should only be called when no filtering
+ * is in progress on this section feed. If a filter cannot
+ * be allocated, the function fails with -ENOSPC.
+ * @release_filter: This function releases all the resources of a
+ * previously allocated section filter. The function
+ * should not be called while filtering is in progress
+ * on this section feed. After calling this function,
+ * the caller should not try to dereference the filter
+ * pointer.
+ * @start_filtering: starts section filtering
+ * @stop_filtering: stops section filtering
+ *
+ * A TS feed is typically mapped to a hardware PID filter on the demux chip.
+ * Using this API, the client can set the filtering properties to start/stop
+ * filtering TS packets on a particular TS feed.
+ */
+struct dmx_section_feed {
+ int is_filtering;
+ struct dmx_demux *parent;
+ void *priv;
+
+ int check_crc;
+
+ /* private: Used internally at dvb_demux.c */
+ u32 crc_val;
+
+ u8 *secbuf;
+ u8 secbuf_base[DMX_MAX_SECFEED_SIZE];
+ u16 secbufp, seclen, tsfeedp;
+
+ /* public: */
+ int (*set)(struct dmx_section_feed *feed,
+ u16 pid,
+ int check_crc);
+ int (*allocate_filter)(struct dmx_section_feed *feed,
+ struct dmx_section_filter **filter);
+ int (*release_filter)(struct dmx_section_feed *feed,
+ struct dmx_section_filter *filter);
+ int (*start_filtering)(struct dmx_section_feed *feed);
+ int (*stop_filtering)(struct dmx_section_feed *feed);
+};
+
+/**
+ * typedef dmx_ts_cb - DVB demux TS filter callback function prototype
+ *
+ * @buffer1: Pointer to the start of the filtered TS packets.
+ * @buffer1_length: Length of the TS data in buffer1.
+ * @buffer2: Pointer to the tail of the filtered TS packets, or NULL.
+ * @buffer2_length: Length of the TS data in buffer2.
+ * @source: Indicates which TS feed is the source of the callback.
+ * @buffer_flags: Address where buffer flags are stored. Those are
+ * used to report discontinuity users via DVB
+ * memory mapped API, as defined by
+ * &enum dmx_buffer_flags.
+ *
+ * This function callback prototype, provided by the client of the demux API,
+ * is called from the demux code. The function is only called when filtering
+ * on a TS feed has been enabled using the start_filtering\(\) function at
+ * the &dmx_demux.
+ * Any TS packets that match the filter settings are copied to a circular
+ * buffer. The filtered TS packets are delivered to the client using this
+ * callback function.
+ * It is expected that the @buffer1 and @buffer2 callback parameters point to
+ * addresses within the circular buffer, but other implementations are also
+ * possible. Note that the called party should not try to free the memory
+ * the @buffer1 and @buffer2 parameters point to.
+ *
+ * When this function is called, the @buffer1 parameter typically points to
+ * the start of the first undelivered TS packet within a circular buffer.
+ * The @buffer2 buffer parameter is normally NULL, except when the received
+ * TS packets have crossed the last address of the circular buffer and
+ * "wrapped" to the beginning of the buffer. In the latter case the @buffer1
+ * parameter would contain an address within the circular buffer, while the
+ * @buffer2 parameter would contain the first address of the circular buffer.
+ * The number of bytes delivered with this function (i.e. @buffer1_length +
+ * @buffer2_length) is usually equal to the value of callback_length parameter
+ * given in the set() function, with one exception: if a timeout occurs before
+ * receiving callback_length bytes of TS data, any undelivered packets are
+ * immediately delivered to the client by calling this function. The timeout
+ * duration is controlled by the set() function in the TS Feed API.
+ *
+ * If a TS packet is received with errors that could not be fixed by the
+ * TS-level forward error correction (FEC), the Transport_error_indicator
+ * flag of the TS packet header should be set. The TS packet should not be
+ * discarded, as the error can possibly be corrected by a higher layer
+ * protocol. If the called party is slow in processing the callback, it
+ * is possible that the circular buffer eventually fills up. If this happens,
+ * the demux driver should discard any TS packets received while the buffer
+ * is full and return -EOVERFLOW.
+ *
+ * The type of data returned to the callback can be selected by the
+ * &dmx_ts_feed.@set function. The type parameter decides if the raw
+ * TS packet (TS_PACKET) or just the payload (TS_PACKET|TS_PAYLOAD_ONLY)
+ * should be returned. If additionally the TS_DECODER bit is set the stream
+ * will also be sent to the hardware MPEG decoder.
+ *
+ * Return:
+ *
+ * - 0, on success;
+ *
+ * - -EOVERFLOW, on buffer overflow.
+ */
+typedef int (*dmx_ts_cb)(const u8 *buffer1,
+ size_t buffer1_length,
+ const u8 *buffer2,
+ size_t buffer2_length,
+ struct dmx_ts_feed *source,
+ u32 *buffer_flags);
+
+/**
+ * typedef dmx_section_cb - DVB demux TS filter callback function prototype
+ *
+ * @buffer1: Pointer to the start of the filtered section, e.g.
+ * within the circular buffer of the demux driver.
+ * @buffer1_len: Length of the filtered section data in @buffer1,
+ * including headers and CRC.
+ * @buffer2: Pointer to the tail of the filtered section data,
+ * or NULL. Useful to handle the wrapping of a
+ * circular buffer.
+ * @buffer2_len: Length of the filtered section data in @buffer2,
+ * including headers and CRC.
+ * @source: Indicates which section feed is the source of the
+ * callback.
+ * @buffer_flags: Address where buffer flags are stored. Those are
+ * used to report discontinuity users via DVB
+ * memory mapped API, as defined by
+ * &enum dmx_buffer_flags.
+ *
+ * This function callback prototype, provided by the client of the demux API,
+ * is called from the demux code. The function is only called when
+ * filtering of sections has been enabled using the function
+ * &dmx_ts_feed.@start_filtering. When the demux driver has received a
+ * complete section that matches at least one section filter, the client
+ * is notified via this callback function. Normally this function is called
+ * for each received section; however, it is also possible to deliver
+ * multiple sections with one callback, for example when the system load
+ * is high. If an error occurs while receiving a section, this
+ * function should be called with the corresponding error type set in the
+ * success field, whether or not there is data to deliver. The Section Feed
+ * implementation should maintain a circular buffer for received sections.
+ * However, this is not necessary if the Section Feed API is implemented as
+ * a client of the TS Feed API, because the TS Feed implementation then
+ * buffers the received data. The size of the circular buffer can be
+ * configured using the &dmx_ts_feed.@set function in the Section Feed API.
+ * If there is no room in the circular buffer when a new section is received,
+ * the section must be discarded. If this happens, the value of the success
+ * parameter should be DMX_OVERRUN_ERROR on the next callback.
+ */
+typedef int (*dmx_section_cb)(const u8 *buffer1,
+ size_t buffer1_len,
+ const u8 *buffer2,
+ size_t buffer2_len,
+ struct dmx_section_filter *source,
+ u32 *buffer_flags);
+
+/*
+ * DVB Front-End
+ */
+
+/**
+ * enum dmx_frontend_source - Used to identify the type of frontend
+ *
+ * @DMX_MEMORY_FE: The source of the demux is memory. It means that
+ * the MPEG-TS to be filtered comes from userspace,
+ * via write() syscall.
+ *
+ * @DMX_FRONTEND_0: The source of the demux is a frontend connected
+ * to the demux.
+ */
+enum dmx_frontend_source {
+ DMX_MEMORY_FE,
+ DMX_FRONTEND_0,
+};
+
+/**
+ * struct dmx_frontend - Structure that lists the frontends associated with
+ * a demux
+ *
+ * @connectivity_list: List of front-ends that can be connected to a
+ * particular demux;
+ * @source: Type of the frontend.
+ *
+ * FIXME: this structure should likely be replaced soon by some
+ * media-controller based logic.
+ */
+struct dmx_frontend {
+ struct list_head connectivity_list;
+ enum dmx_frontend_source source;
+};
+
+/*
+ * MPEG-2 TS Demux
+ */
+
+/**
+ * enum dmx_demux_caps - MPEG-2 TS Demux capabilities bitmap
+ *
+ * @DMX_TS_FILTERING: set if TS filtering is supported;
+ * @DMX_SECTION_FILTERING: set if section filtering is supported;
+ * @DMX_MEMORY_BASED_FILTERING: set if write() available.
+ *
+ * Those flags are OR'ed in the &dmx_demux.capabilities field
+ */
+enum dmx_demux_caps {
+ DMX_TS_FILTERING = 1,
+ DMX_SECTION_FILTERING = 4,
+ DMX_MEMORY_BASED_FILTERING = 8,
+};
+
+/*
+ * Demux resource type identifier.
+ */
+
+/**
+ * DMX_FE_ENTRY - Casts elements in the list of registered
+ * front-ends from the generic type struct list_head
+ * to the type * struct dmx_frontend
+ *
+ * @list: list of struct dmx_frontend
+ */
+#define DMX_FE_ENTRY(list) \
+ list_entry(list, struct dmx_frontend, connectivity_list)
+
+/**
+ * struct dmx_demux - Structure that contains the demux capabilities and
+ * callbacks.
+ *
+ * @capabilities: Bitfield of capability flags.
+ *
+ * @frontend: Front-end connected to the demux
+ *
+ * @priv: Pointer to private data of the API client
+ *
+ * @open: This function reserves the demux for use by the caller and, if
+ * necessary, initializes the demux. When the demux is no longer needed,
+ * the function @close should be called. It should be possible for
+ * multiple clients to access the demux at the same time. Thus, the
+ * function implementation should increment the demux usage count when
+ * @open is called and decrement it when @close is called.
+ * The @demux function parameter contains a pointer to the demux API and
+ * instance data.
+ * It returns:
+ * 0 on success;
+ * -EUSERS, if maximum usage count was reached;
+ * -EINVAL, on bad parameter.
+ *
+ * @close: This function reserves the demux for use by the caller and, if
+ * necessary, initializes the demux. When the demux is no longer needed,
+ * the function @close should be called. It should be possible for
+ * multiple clients to access the demux at the same time. Thus, the
+ * function implementation should increment the demux usage count when
+ * @open is called and decrement it when @close is called.
+ * The @demux function parameter contains a pointer to the demux API and
+ * instance data.
+ * It returns:
+ * 0 on success;
+ * -ENODEV, if demux was not in use (e. g. no users);
+ * -EINVAL, on bad parameter.
+ *
+ * @write: This function provides the demux driver with a memory buffer
+ * containing TS packets. Instead of receiving TS packets from the DVB
+ * front-end, the demux driver software will read packets from memory.
+ * Any clients of this demux with active TS, PES or Section filters will
+ * receive filtered data via the Demux callback API (see 0). The function
+ * returns when all the data in the buffer has been consumed by the demux.
+ * Demux hardware typically cannot read TS from memory. If this is the
+ * case, memory-based filtering has to be implemented entirely in software.
+ * The @demux function parameter contains a pointer to the demux API and
+ * instance data.
+ * The @buf function parameter contains a pointer to the TS data in
+ * kernel-space memory.
+ * The @count function parameter contains the length of the TS data.
+ * It returns:
+ * 0 on success;
+ * -ERESTARTSYS, if mutex lock was interrupted;
+ * -EINTR, if a signal handling is pending;
+ * -ENODEV, if demux was removed;
+ * -EINVAL, on bad parameter.
+ *
+ * @allocate_ts_feed: Allocates a new TS feed, which is used to filter the TS
+ * packets carrying a certain PID. The TS feed normally corresponds to a
+ * hardware PID filter on the demux chip.
+ * The @demux function parameter contains a pointer to the demux API and
+ * instance data.
+ * The @feed function parameter contains a pointer to the TS feed API and
+ * instance data.
+ * The @callback function parameter contains a pointer to the callback
+ * function for passing received TS packet.
+ * It returns:
+ * 0 on success;
+ * -ERESTARTSYS, if mutex lock was interrupted;
+ * -EBUSY, if no more TS feeds is available;
+ * -EINVAL, on bad parameter.
+ *
+ * @release_ts_feed: Releases the resources allocated with @allocate_ts_feed.
+ * Any filtering in progress on the TS feed should be stopped before
+ * calling this function.
+ * The @demux function parameter contains a pointer to the demux API and
+ * instance data.
+ * The @feed function parameter contains a pointer to the TS feed API and
+ * instance data.
+ * It returns:
+ * 0 on success;
+ * -EINVAL on bad parameter.
+ *
+ * @allocate_section_feed: Allocates a new section feed, i.e. a demux resource
+ * for filtering and receiving sections. On platforms with hardware
+ * support for section filtering, a section feed is directly mapped to
+ * the demux HW. On other platforms, TS packets are first PID filtered in
+ * hardware and a hardware section filter then emulated in software. The
+ * caller obtains an API pointer of type dmx_section_feed_t as an out
+ * parameter. Using this API the caller can set filtering parameters and
+ * start receiving sections.
+ * The @demux function parameter contains a pointer to the demux API and
+ * instance data.
+ * The @feed function parameter contains a pointer to the TS feed API and
+ * instance data.
+ * The @callback function parameter contains a pointer to the callback
+ * function for passing received TS packet.
+ * It returns:
+ * 0 on success;
+ * -EBUSY, if no more TS feeds is available;
+ * -EINVAL, on bad parameter.
+ *
+ * @release_section_feed: Releases the resources allocated with
+ * @allocate_section_feed, including allocated filters. Any filtering in
+ * progress on the section feed should be stopped before calling this
+ * function.
+ * The @demux function parameter contains a pointer to the demux API and
+ * instance data.
+ * The @feed function parameter contains a pointer to the TS feed API and
+ * instance data.
+ * It returns:
+ * 0 on success;
+ * -EINVAL, on bad parameter.
+ *
+ * @add_frontend: Registers a connectivity between a demux and a front-end,
+ * i.e., indicates that the demux can be connected via a call to
+ * @connect_frontend to use the given front-end as a TS source. The
+ * client of this function has to allocate dynamic or static memory for
+ * the frontend structure and initialize its fields before calling this
+ * function. This function is normally called during the driver
+ * initialization. The caller must not free the memory of the frontend
+ * struct before successfully calling @remove_frontend.
+ * The @demux function parameter contains a pointer to the demux API and
+ * instance data.
+ * The @frontend function parameter contains a pointer to the front-end
+ * instance data.
+ * It returns:
+ * 0 on success;
+ * -EINVAL, on bad parameter.
+ *
+ * @remove_frontend: Indicates that the given front-end, registered by a call
+ * to @add_frontend, can no longer be connected as a TS source by this
+ * demux. The function should be called when a front-end driver or a demux
+ * driver is removed from the system. If the front-end is in use, the
+ * function fails with the return value of -EBUSY. After successfully
+ * calling this function, the caller can free the memory of the frontend
+ * struct if it was dynamically allocated before the @add_frontend
+ * operation.
+ * The @demux function parameter contains a pointer to the demux API and
+ * instance data.
+ * The @frontend function parameter contains a pointer to the front-end
+ * instance data.
+ * It returns:
+ * 0 on success;
+ * -ENODEV, if the front-end was not found,
+ * -EINVAL, on bad parameter.
+ *
+ * @get_frontends: Provides the APIs of the front-ends that have been
+ * registered for this demux. Any of the front-ends obtained with this
+ * call can be used as a parameter for @connect_frontend. The include
+ * file demux.h contains the macro DMX_FE_ENTRY() for converting an
+ * element of the generic type struct &list_head * to the type
+ * struct &dmx_frontend *. The caller must not free the memory of any of
+ * the elements obtained via this function call.
+ * The @demux function parameter contains a pointer to the demux API and
+ * instance data.
+ * It returns a struct list_head pointer to the list of front-end
+ * interfaces, or NULL in the case of an empty list.
+ *
+ * @connect_frontend: Connects the TS output of the front-end to the input of
+ * the demux. A demux can only be connected to a front-end registered to
+ * the demux with the function @add_frontend. It may or may not be
+ * possible to connect multiple demuxes to the same front-end, depending
+ * on the capabilities of the HW platform. When not used, the front-end
+ * should be released by calling @disconnect_frontend.
+ * The @demux function parameter contains a pointer to the demux API and
+ * instance data.
+ * The @frontend function parameter contains a pointer to the front-end
+ * instance data.
+ * It returns:
+ * 0 on success;
+ * -EINVAL, on bad parameter.
+ *
+ * @disconnect_frontend: Disconnects the demux and a front-end previously
+ * connected by a @connect_frontend call.
+ * The @demux function parameter contains a pointer to the demux API and
+ * instance data.
+ * It returns:
+ * 0 on success;
+ * -EINVAL on bad parameter.
+ *
+ * @get_pes_pids: Get the PIDs for DMX_PES_AUDIO0, DMX_PES_VIDEO0,
+ * DMX_PES_TELETEXT0, DMX_PES_SUBTITLE0 and DMX_PES_PCR0.
+ * The @demux function parameter contains a pointer to the demux API and
+ * instance data.
+ * The @pids function parameter contains an array with five u16 elements
+ * where the PIDs will be stored.
+ * It returns:
+ * 0 on success;
+ * -EINVAL on bad parameter.
+ */
+struct dmx_demux {
+ enum dmx_demux_caps capabilities;
+ struct dmx_frontend *frontend;
+ void *priv;
+ int (*open)(struct dmx_demux *demux);
+ int (*close)(struct dmx_demux *demux);
+ int (*write)(struct dmx_demux *demux, const char __user *buf,
+ size_t count);
+ int (*allocate_ts_feed)(struct dmx_demux *demux,
+ struct dmx_ts_feed **feed,
+ dmx_ts_cb callback);
+ int (*release_ts_feed)(struct dmx_demux *demux,
+ struct dmx_ts_feed *feed);
+ int (*allocate_section_feed)(struct dmx_demux *demux,
+ struct dmx_section_feed **feed,
+ dmx_section_cb callback);
+ int (*release_section_feed)(struct dmx_demux *demux,
+ struct dmx_section_feed *feed);
+ int (*add_frontend)(struct dmx_demux *demux,
+ struct dmx_frontend *frontend);
+ int (*remove_frontend)(struct dmx_demux *demux,
+ struct dmx_frontend *frontend);
+ struct list_head *(*get_frontends)(struct dmx_demux *demux);
+ int (*connect_frontend)(struct dmx_demux *demux,
+ struct dmx_frontend *frontend);
+ int (*disconnect_frontend)(struct dmx_demux *demux);
+
+ int (*get_pes_pids)(struct dmx_demux *demux, u16 *pids);
+
+ /* private: */
+
+ /*
+ * Only used at av7110, to read some data from firmware.
+ * As this was never documented, we have no clue about what's
+ * there, and its usage on other drivers aren't encouraged.
+ */
+ int (*get_stc)(struct dmx_demux *demux, unsigned int num,
+ u64 *stc, unsigned int *base);
+};
+
+#endif /* #ifndef __DEMUX_H */
diff --git a/include/media/dmxdev.h b/include/media/dmxdev.h
new file mode 100644
index 000000000000..baafa3b8aca4
--- /dev/null
+++ b/include/media/dmxdev.h
@@ -0,0 +1,214 @@
+/*
+ * dmxdev.h
+ *
+ * Copyright (C) 2000 Ralph Metzler & Marcus Metzler
+ * for convergence integrated media GmbH
+ *
+ * This program is free software; you can redistribute it and/or
+ * modify it under the terms of the GNU Lesser General Public License
+ * as published by the Free Software Foundation; either version 2.1
+ * of the License, or (at your option) any later version.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+ * GNU General Public License for more details.
+ *
+ */
+
+#ifndef _DMXDEV_H_
+#define _DMXDEV_H_
+
+#include <linux/types.h>
+#include <linux/spinlock.h>
+#include <linux/kernel.h>
+#include <linux/time.h>
+#include <linux/timer.h>
+#include <linux/wait.h>
+#include <linux/fs.h>
+#include <linux/string.h>
+#include <linux/mutex.h>
+#include <linux/slab.h>
+
+#include <linux/dvb/dmx.h>
+
+#include <media/dvbdev.h>
+#include <media/demux.h>
+#include <media/dvb_ringbuffer.h>
+#include <media/dvb_vb2.h>
+
+/**
+ * enum dmxdev_type - type of demux filter type.
+ *
+ * @DMXDEV_TYPE_NONE: no filter set.
+ * @DMXDEV_TYPE_SEC: section filter.
+ * @DMXDEV_TYPE_PES: Program Elementary Stream (PES) filter.
+ */
+enum dmxdev_type {
+ DMXDEV_TYPE_NONE,
+ DMXDEV_TYPE_SEC,
+ DMXDEV_TYPE_PES,
+};
+
+/**
+ * enum dmxdev_state - state machine for the dmxdev.
+ *
+ * @DMXDEV_STATE_FREE: indicates that the filter is freed.
+ * @DMXDEV_STATE_ALLOCATED: indicates that the filter was allocated
+ * to be used.
+ * @DMXDEV_STATE_SET: indicates that the filter parameters are set.
+ * @DMXDEV_STATE_GO: indicates that the filter is running.
+ * @DMXDEV_STATE_DONE: indicates that a packet was already filtered
+ * and the filter is now disabled.
+ * Set only if %DMX_ONESHOT. See
+ * &dmx_sct_filter_params.
+ * @DMXDEV_STATE_TIMEDOUT: Indicates a timeout condition.
+ */
+enum dmxdev_state {
+ DMXDEV_STATE_FREE,
+ DMXDEV_STATE_ALLOCATED,
+ DMXDEV_STATE_SET,
+ DMXDEV_STATE_GO,
+ DMXDEV_STATE_DONE,
+ DMXDEV_STATE_TIMEDOUT
+};
+
+/**
+ * struct dmxdev_feed - digital TV dmxdev feed
+ *
+ * @pid: Program ID to be filtered
+ * @ts: pointer to &struct dmx_ts_feed
+ * @next: &struct list_head pointing to the next feed.
+ */
+
+struct dmxdev_feed {
+ u16 pid;
+ struct dmx_ts_feed *ts;
+ struct list_head next;
+};
+
+/**
+ * struct dmxdev_filter - digital TV dmxdev filter
+ *
+ * @filter: a union describing a dmxdev filter.
+ * Currently used only for section filters.
+ * @filter.sec: a &struct dmx_section_filter pointer.
+ * For section filter only.
+ * @feed: a union describing a dmxdev feed.
+ * Depending on the filter type, it can be either
+ * @feed.ts or @feed.sec.
+ * @feed.ts: a &struct list_head list.
+ * For TS and PES feeds.
+ * @feed.sec: a &struct dmx_section_feed pointer.
+ * For section feed only.
+ * @params: a union describing dmxdev filter parameters.
+ * Depending on the filter type, it can be either
+ * @params.sec or @params.pes.
+ * @params.sec: a &struct dmx_sct_filter_params embedded struct.
+ * For section filter only.
+ * @params.pes: a &struct dmx_pes_filter_params embedded struct.
+ * For PES filter only.
+ * @type: type of the dmxdev filter, as defined by &enum dmxdev_type.
+ * @state: state of the dmxdev filter, as defined by &enum dmxdev_state.
+ * @dev: pointer to &struct dmxdev.
+ * @buffer: an embedded &struct dvb_ringbuffer buffer.
+ * @vb2_ctx: control struct for VB2 handler
+ * @mutex: protects the access to &struct dmxdev_filter.
+ * @timer: &struct timer_list embedded timer, used to check for
+ * feed timeouts.
+ * Only for section filter.
+ * @todo: index for the @secheader.
+ * Only for section filter.
+ * @secheader: buffer cache to parse the section header.
+ * Only for section filter.
+ */
+struct dmxdev_filter {
+ union {
+ struct dmx_section_filter *sec;
+ } filter;
+
+ union {
+ /* list of TS and PES feeds (struct dmxdev_feed) */
+ struct list_head ts;
+ struct dmx_section_feed *sec;
+ } feed;
+
+ union {
+ struct dmx_sct_filter_params sec;
+ struct dmx_pes_filter_params pes;
+ } params;
+
+ enum dmxdev_type type;
+ enum dmxdev_state state;
+ struct dmxdev *dev;
+ struct dvb_ringbuffer buffer;
+ struct dvb_vb2_ctx vb2_ctx;
+
+ struct mutex mutex;
+
+ /* only for sections */
+ struct timer_list timer;
+ int todo;
+ u8 secheader[3];
+};
+
+/**
+ * struct dmxdev - Describes a digital TV demux device.
+ *
+ * @dvbdev: pointer to &struct dvb_device associated with
+ * the demux device node.
+ * @dvr_dvbdev: pointer to &struct dvb_device associated with
+ * the dvr device node.
+ * @filter: pointer to &struct dmxdev_filter.
+ * @demux: pointer to &struct dmx_demux.
+ * @filternum: number of filters.
+ * @capabilities: demux capabilities as defined by &enum dmx_demux_caps.
+ * @may_do_mmap: flag used to indicate if the device may do mmap.
+ * @exit: flag to indicate that the demux is being released.
+ * @dvr_orig_fe: pointer to &struct dmx_frontend.
+ * @dvr_buffer: embedded &struct dvb_ringbuffer for DVB output.
+ * @dvr_vb2_ctx: control struct for VB2 handler
+ * @mutex: protects the usage of this structure.
+ * @lock: protects access to &dmxdev->filter->data.
+ */
+struct dmxdev {
+ struct dvb_device *dvbdev;
+ struct dvb_device *dvr_dvbdev;
+
+ struct dmxdev_filter *filter;
+ struct dmx_demux *demux;
+
+ int filternum;
+ int capabilities;
+
+ unsigned int may_do_mmap:1;
+ unsigned int exit:1;
+#define DMXDEV_CAP_DUPLEX 1
+ struct dmx_frontend *dvr_orig_fe;
+
+ struct dvb_ringbuffer dvr_buffer;
+#define DVR_BUFFER_SIZE (10*188*1024)
+
+ struct dvb_vb2_ctx dvr_vb2_ctx;
+
+ struct mutex mutex;
+ spinlock_t lock;
+};
+
+/**
+ * dvb_dmxdev_init - initializes a digital TV demux and registers both demux
+ * and DVR devices.
+ *
+ * @dmxdev: pointer to &struct dmxdev.
+ * @adap: pointer to &struct dvb_adapter.
+ */
+int dvb_dmxdev_init(struct dmxdev *dmxdev, struct dvb_adapter *adap);
+
+/**
+ * dvb_dmxdev_release - releases a digital TV demux and unregisters it.
+ *
+ * @dmxdev: pointer to &struct dmxdev.
+ */
+void dvb_dmxdev_release(struct dmxdev *dmxdev);
+
+#endif /* _DMXDEV_H_ */
diff --git a/include/media/drv-intf/cx2341x.h b/include/media/drv-intf/cx2341x.h
index 9635eebaab09..33a97bfcea58 100644
--- a/include/media/drv-intf/cx2341x.h
+++ b/include/media/drv-intf/cx2341x.h
@@ -29,8 +29,8 @@ enum cx2341x_port {
enum cx2341x_cap {
CX2341X_CAP_HAS_SLICED_VBI = 1 << 0,
- CX2341X_CAP_HAS_TS = 1 << 1,
- CX2341X_CAP_HAS_AC3 = 1 << 2,
+ CX2341X_CAP_HAS_TS = 1 << 1,
+ CX2341X_CAP_HAS_AC3 = 1 << 2,
};
struct cx2341x_mpeg_params {
@@ -204,92 +204,92 @@ void cx2341x_handler_set_busy(struct cx2341x_handler *cxhdl, int busy);
/* Firmware API commands */
/* MPEG decoder API, specific to the cx23415 */
-#define CX2341X_DEC_PING_FW 0x00
-#define CX2341X_DEC_START_PLAYBACK 0x01
-#define CX2341X_DEC_STOP_PLAYBACK 0x02
-#define CX2341X_DEC_SET_PLAYBACK_SPEED 0x03
-#define CX2341X_DEC_STEP_VIDEO 0x05
-#define CX2341X_DEC_SET_DMA_BLOCK_SIZE 0x08
+#define CX2341X_DEC_PING_FW 0x00
+#define CX2341X_DEC_START_PLAYBACK 0x01
+#define CX2341X_DEC_STOP_PLAYBACK 0x02
+#define CX2341X_DEC_SET_PLAYBACK_SPEED 0x03
+#define CX2341X_DEC_STEP_VIDEO 0x05
+#define CX2341X_DEC_SET_DMA_BLOCK_SIZE 0x08
#define CX2341X_DEC_GET_XFER_INFO 0x09
#define CX2341X_DEC_GET_DMA_STATUS 0x0a
#define CX2341X_DEC_SCHED_DMA_FROM_HOST 0x0b
-#define CX2341X_DEC_PAUSE_PLAYBACK 0x0d
-#define CX2341X_DEC_HALT_FW 0x0e
-#define CX2341X_DEC_SET_STANDARD 0x10
+#define CX2341X_DEC_PAUSE_PLAYBACK 0x0d
+#define CX2341X_DEC_HALT_FW 0x0e
+#define CX2341X_DEC_SET_STANDARD 0x10
#define CX2341X_DEC_GET_VERSION 0x11
-#define CX2341X_DEC_SET_STREAM_INPUT 0x14
-#define CX2341X_DEC_GET_TIMING_INFO 0x15
-#define CX2341X_DEC_SET_AUDIO_MODE 0x16
+#define CX2341X_DEC_SET_STREAM_INPUT 0x14
+#define CX2341X_DEC_GET_TIMING_INFO 0x15
+#define CX2341X_DEC_SET_AUDIO_MODE 0x16
#define CX2341X_DEC_SET_EVENT_NOTIFICATION 0x17
#define CX2341X_DEC_SET_DISPLAY_BUFFERS 0x18
-#define CX2341X_DEC_EXTRACT_VBI 0x19
-#define CX2341X_DEC_SET_DECODER_SOURCE 0x1a
+#define CX2341X_DEC_EXTRACT_VBI 0x19
+#define CX2341X_DEC_SET_DECODER_SOURCE 0x1a
#define CX2341X_DEC_SET_PREBUFFERING 0x1e
/* MPEG encoder API */
-#define CX2341X_ENC_PING_FW 0x80
-#define CX2341X_ENC_START_CAPTURE 0x81
-#define CX2341X_ENC_STOP_CAPTURE 0x82
-#define CX2341X_ENC_SET_AUDIO_ID 0x89
-#define CX2341X_ENC_SET_VIDEO_ID 0x8b
-#define CX2341X_ENC_SET_PCR_ID 0x8d
-#define CX2341X_ENC_SET_FRAME_RATE 0x8f
-#define CX2341X_ENC_SET_FRAME_SIZE 0x91
-#define CX2341X_ENC_SET_BIT_RATE 0x95
-#define CX2341X_ENC_SET_GOP_PROPERTIES 0x97
-#define CX2341X_ENC_SET_ASPECT_RATIO 0x99
-#define CX2341X_ENC_SET_DNR_FILTER_MODE 0x9b
-#define CX2341X_ENC_SET_DNR_FILTER_PROPS 0x9d
-#define CX2341X_ENC_SET_CORING_LEVELS 0x9f
-#define CX2341X_ENC_SET_SPATIAL_FILTER_TYPE 0xa1
-#define CX2341X_ENC_SET_VBI_LINE 0xb7
-#define CX2341X_ENC_SET_STREAM_TYPE 0xb9
-#define CX2341X_ENC_SET_OUTPUT_PORT 0xbb
-#define CX2341X_ENC_SET_AUDIO_PROPERTIES 0xbd
-#define CX2341X_ENC_HALT_FW 0xc3
+#define CX2341X_ENC_PING_FW 0x80
+#define CX2341X_ENC_START_CAPTURE 0x81
+#define CX2341X_ENC_STOP_CAPTURE 0x82
+#define CX2341X_ENC_SET_AUDIO_ID 0x89
+#define CX2341X_ENC_SET_VIDEO_ID 0x8b
+#define CX2341X_ENC_SET_PCR_ID 0x8d
+#define CX2341X_ENC_SET_FRAME_RATE 0x8f
+#define CX2341X_ENC_SET_FRAME_SIZE 0x91
+#define CX2341X_ENC_SET_BIT_RATE 0x95
+#define CX2341X_ENC_SET_GOP_PROPERTIES 0x97
+#define CX2341X_ENC_SET_ASPECT_RATIO 0x99
+#define CX2341X_ENC_SET_DNR_FILTER_MODE 0x9b
+#define CX2341X_ENC_SET_DNR_FILTER_PROPS 0x9d
+#define CX2341X_ENC_SET_CORING_LEVELS 0x9f
+#define CX2341X_ENC_SET_SPATIAL_FILTER_TYPE 0xa1
+#define CX2341X_ENC_SET_VBI_LINE 0xb7
+#define CX2341X_ENC_SET_STREAM_TYPE 0xb9
+#define CX2341X_ENC_SET_OUTPUT_PORT 0xbb
+#define CX2341X_ENC_SET_AUDIO_PROPERTIES 0xbd
+#define CX2341X_ENC_HALT_FW 0xc3
#define CX2341X_ENC_GET_VERSION 0xc4
-#define CX2341X_ENC_SET_GOP_CLOSURE 0xc5
-#define CX2341X_ENC_GET_SEQ_END 0xc6
-#define CX2341X_ENC_SET_PGM_INDEX_INFO 0xc7
+#define CX2341X_ENC_SET_GOP_CLOSURE 0xc5
+#define CX2341X_ENC_GET_SEQ_END 0xc6
+#define CX2341X_ENC_SET_PGM_INDEX_INFO 0xc7
#define CX2341X_ENC_SET_VBI_CONFIG 0xc8
-#define CX2341X_ENC_SET_DMA_BLOCK_SIZE 0xc9
+#define CX2341X_ENC_SET_DMA_BLOCK_SIZE 0xc9
#define CX2341X_ENC_GET_PREV_DMA_INFO_MB_10 0xca
#define CX2341X_ENC_GET_PREV_DMA_INFO_MB_9 0xcb
-#define CX2341X_ENC_SCHED_DMA_TO_HOST 0xcc
-#define CX2341X_ENC_INITIALIZE_INPUT 0xcd
-#define CX2341X_ENC_SET_FRAME_DROP_RATE 0xd0
-#define CX2341X_ENC_PAUSE_ENCODER 0xd2
-#define CX2341X_ENC_REFRESH_INPUT 0xd3
+#define CX2341X_ENC_SCHED_DMA_TO_HOST 0xcc
+#define CX2341X_ENC_INITIALIZE_INPUT 0xcd
+#define CX2341X_ENC_SET_FRAME_DROP_RATE 0xd0
+#define CX2341X_ENC_PAUSE_ENCODER 0xd2
+#define CX2341X_ENC_REFRESH_INPUT 0xd3
#define CX2341X_ENC_SET_COPYRIGHT 0xd4
-#define CX2341X_ENC_SET_EVENT_NOTIFICATION 0xd5
-#define CX2341X_ENC_SET_NUM_VSYNC_LINES 0xd6
-#define CX2341X_ENC_SET_PLACEHOLDER 0xd7
-#define CX2341X_ENC_MUTE_VIDEO 0xd9
-#define CX2341X_ENC_MUTE_AUDIO 0xda
+#define CX2341X_ENC_SET_EVENT_NOTIFICATION 0xd5
+#define CX2341X_ENC_SET_NUM_VSYNC_LINES 0xd6
+#define CX2341X_ENC_SET_PLACEHOLDER 0xd7
+#define CX2341X_ENC_MUTE_VIDEO 0xd9
+#define CX2341X_ENC_MUTE_AUDIO 0xda
#define CX2341X_ENC_SET_VERT_CROP_LINE 0xdb
-#define CX2341X_ENC_MISC 0xdc
+#define CX2341X_ENC_MISC 0xdc
/* OSD API, specific to the cx23415 */
-#define CX2341X_OSD_GET_FRAMEBUFFER 0x41
-#define CX2341X_OSD_GET_PIXEL_FORMAT 0x42
-#define CX2341X_OSD_SET_PIXEL_FORMAT 0x43
-#define CX2341X_OSD_GET_STATE 0x44
-#define CX2341X_OSD_SET_STATE 0x45
-#define CX2341X_OSD_GET_OSD_COORDS 0x46
-#define CX2341X_OSD_SET_OSD_COORDS 0x47
-#define CX2341X_OSD_GET_SCREEN_COORDS 0x48
-#define CX2341X_OSD_SET_SCREEN_COORDS 0x49
-#define CX2341X_OSD_GET_GLOBAL_ALPHA 0x4a
-#define CX2341X_OSD_SET_GLOBAL_ALPHA 0x4b
-#define CX2341X_OSD_SET_BLEND_COORDS 0x4c
-#define CX2341X_OSD_GET_FLICKER_STATE 0x4f
-#define CX2341X_OSD_SET_FLICKER_STATE 0x50
-#define CX2341X_OSD_BLT_COPY 0x52
-#define CX2341X_OSD_BLT_FILL 0x53
-#define CX2341X_OSD_BLT_TEXT 0x54
-#define CX2341X_OSD_SET_FRAMEBUFFER_WINDOW 0x56
-#define CX2341X_OSD_SET_CHROMA_KEY 0x60
-#define CX2341X_OSD_GET_ALPHA_CONTENT_INDEX 0x61
-#define CX2341X_OSD_SET_ALPHA_CONTENT_INDEX 0x62
+#define CX2341X_OSD_GET_FRAMEBUFFER 0x41
+#define CX2341X_OSD_GET_PIXEL_FORMAT 0x42
+#define CX2341X_OSD_SET_PIXEL_FORMAT 0x43
+#define CX2341X_OSD_GET_STATE 0x44
+#define CX2341X_OSD_SET_STATE 0x45
+#define CX2341X_OSD_GET_OSD_COORDS 0x46
+#define CX2341X_OSD_SET_OSD_COORDS 0x47
+#define CX2341X_OSD_GET_SCREEN_COORDS 0x48
+#define CX2341X_OSD_SET_SCREEN_COORDS 0x49
+#define CX2341X_OSD_GET_GLOBAL_ALPHA 0x4a
+#define CX2341X_OSD_SET_GLOBAL_ALPHA 0x4b
+#define CX2341X_OSD_SET_BLEND_COORDS 0x4c
+#define CX2341X_OSD_GET_FLICKER_STATE 0x4f
+#define CX2341X_OSD_SET_FLICKER_STATE 0x50
+#define CX2341X_OSD_BLT_COPY 0x52
+#define CX2341X_OSD_BLT_FILL 0x53
+#define CX2341X_OSD_BLT_TEXT 0x54
+#define CX2341X_OSD_SET_FRAMEBUFFER_WINDOW 0x56
+#define CX2341X_OSD_SET_CHROMA_KEY 0x60
+#define CX2341X_OSD_GET_ALPHA_CONTENT_INDEX 0x61
+#define CX2341X_OSD_SET_ALPHA_CONTENT_INDEX 0x62
#endif /* CX2341X_H */
diff --git a/include/media/drv-intf/exynos-fimc.h b/include/media/drv-intf/exynos-fimc.h
index 69bcd2a07d5c..f9c64338841f 100644
--- a/include/media/drv-intf/exynos-fimc.h
+++ b/include/media/drv-intf/exynos-fimc.h
@@ -155,7 +155,8 @@ static inline struct exynos_video_entity *vdev_to_exynos_video_entity(
}
#define fimc_pipeline_call(ent, op, args...) \
- (!(ent) ? -ENOENT : (((ent)->pipe->ops && (ent)->pipe->ops->op) ? \
+ ((!(ent) || !(ent)->pipe) ? -ENOENT : \
+ (((ent)->pipe->ops && (ent)->pipe->ops->op) ? \
(ent)->pipe->ops->op(((ent)->pipe), ##args) : -ENOIOCTLCMD)) \
#endif /* S5P_FIMC_H_ */
diff --git a/include/media/drv-intf/msp3400.h b/include/media/drv-intf/msp3400.h
index 1e6e80213a77..db98ce49e17b 100644
--- a/include/media/drv-intf/msp3400.h
+++ b/include/media/drv-intf/msp3400.h
@@ -80,17 +80,17 @@
*/
/* SCART input to DSP selection */
-#define MSP_IN_SCART1 0 /* Pin SC1_IN */
-#define MSP_IN_SCART2 1 /* Pin SC2_IN */
-#define MSP_IN_SCART3 2 /* Pin SC3_IN */
-#define MSP_IN_SCART4 3 /* Pin SC4_IN */
-#define MSP_IN_MONO 6 /* Pin MONO_IN */
-#define MSP_IN_MUTE 7 /* Mute DSP input */
-#define MSP_SCART_TO_DSP(in) (in)
+#define MSP_IN_SCART1 0 /* Pin SC1_IN */
+#define MSP_IN_SCART2 1 /* Pin SC2_IN */
+#define MSP_IN_SCART3 2 /* Pin SC3_IN */
+#define MSP_IN_SCART4 3 /* Pin SC4_IN */
+#define MSP_IN_MONO 6 /* Pin MONO_IN */
+#define MSP_IN_MUTE 7 /* Mute DSP input */
+#define MSP_SCART_TO_DSP(in) (in)
/* Tuner input to demodulator and DSP selection */
-#define MSP_IN_TUNER1 0 /* Analog Sound IF input pin ANA_IN1 */
-#define MSP_IN_TUNER2 1 /* Analog Sound IF input pin ANA_IN2 */
-#define MSP_TUNER_TO_DSP(in) ((in) << 3)
+#define MSP_IN_TUNER1 0 /* Analog Sound IF input pin ANA_IN1 */
+#define MSP_IN_TUNER2 1 /* Analog Sound IF input pin ANA_IN2 */
+#define MSP_TUNER_TO_DSP(in) ((in) << 3)
/* The msp has up to 5 DSP outputs, each output can independently select
a DSP input.
@@ -109,30 +109,30 @@
DSP. This is currently not implemented. Also not implemented is the
multi-channel capable I2S3 input of the 44x0G. If someone can demonstrate
a need for one of those features then additional support can be added. */
-#define MSP_DSP_IN_TUNER 0 /* Tuner DSP input */
-#define MSP_DSP_IN_SCART 2 /* SCART DSP input */
-#define MSP_DSP_IN_I2S1 5 /* I2S1 DSP input */
-#define MSP_DSP_IN_I2S2 6 /* I2S2 DSP input */
-#define MSP_DSP_IN_I2S3 7 /* I2S3 DSP input */
-#define MSP_DSP_IN_MAIN_AVC 11 /* MAIN AVC processed DSP input */
-#define MSP_DSP_IN_MAIN 12 /* MAIN DSP input */
-#define MSP_DSP_IN_AUX 13 /* AUX DSP input */
-#define MSP_DSP_TO_MAIN(in) ((in) << 4)
-#define MSP_DSP_TO_AUX(in) ((in) << 8)
-#define MSP_DSP_TO_SCART1(in) ((in) << 12)
-#define MSP_DSP_TO_SCART2(in) ((in) << 16)
-#define MSP_DSP_TO_I2S(in) ((in) << 20)
+#define MSP_DSP_IN_TUNER 0 /* Tuner DSP input */
+#define MSP_DSP_IN_SCART 2 /* SCART DSP input */
+#define MSP_DSP_IN_I2S1 5 /* I2S1 DSP input */
+#define MSP_DSP_IN_I2S2 6 /* I2S2 DSP input */
+#define MSP_DSP_IN_I2S3 7 /* I2S3 DSP input */
+#define MSP_DSP_IN_MAIN_AVC 11 /* MAIN AVC processed DSP input */
+#define MSP_DSP_IN_MAIN 12 /* MAIN DSP input */
+#define MSP_DSP_IN_AUX 13 /* AUX DSP input */
+#define MSP_DSP_TO_MAIN(in) ((in) << 4)
+#define MSP_DSP_TO_AUX(in) ((in) << 8)
+#define MSP_DSP_TO_SCART1(in) ((in) << 12)
+#define MSP_DSP_TO_SCART2(in) ((in) << 16)
+#define MSP_DSP_TO_I2S(in) ((in) << 20)
/* Output SCART select: the SCART outputs can select which input
to use. */
-#define MSP_SC_IN_SCART1 0 /* SCART1 input, bypassing the DSP */
-#define MSP_SC_IN_SCART2 1 /* SCART2 input, bypassing the DSP */
-#define MSP_SC_IN_SCART3 2 /* SCART3 input, bypassing the DSP */
-#define MSP_SC_IN_SCART4 3 /* SCART4 input, bypassing the DSP */
-#define MSP_SC_IN_DSP_SCART1 4 /* DSP SCART1 input */
-#define MSP_SC_IN_DSP_SCART2 5 /* DSP SCART2 input */
-#define MSP_SC_IN_MONO 6 /* MONO input, bypassing the DSP */
-#define MSP_SC_IN_MUTE 7 /* MUTE output */
+#define MSP_SC_IN_SCART1 0 /* SCART1 input, bypassing the DSP */
+#define MSP_SC_IN_SCART2 1 /* SCART2 input, bypassing the DSP */
+#define MSP_SC_IN_SCART3 2 /* SCART3 input, bypassing the DSP */
+#define MSP_SC_IN_SCART4 3 /* SCART4 input, bypassing the DSP */
+#define MSP_SC_IN_DSP_SCART1 4 /* DSP SCART1 input */
+#define MSP_SC_IN_DSP_SCART2 5 /* DSP SCART2 input */
+#define MSP_SC_IN_MONO 6 /* MONO input, bypassing the DSP */
+#define MSP_SC_IN_MUTE 7 /* MUTE output */
#define MSP_SC_TO_SCART1(in) (in)
#define MSP_SC_TO_SCART2(in) ((in) << 4)
diff --git a/include/media/drv-intf/renesas-ceu.h b/include/media/drv-intf/renesas-ceu.h
new file mode 100644
index 000000000000..52841d1b4763
--- /dev/null
+++ b/include/media/drv-intf/renesas-ceu.h
@@ -0,0 +1,26 @@
+// SPDX-License-Identifier: GPL-2.0
+/*
+ * renesas-ceu.h - Renesas CEU driver interface
+ *
+ * Copyright 2017-2018 Jacopo Mondi <jacopo+renesas@jmondi.org>
+ */
+
+#ifndef __MEDIA_DRV_INTF_RENESAS_CEU_H__
+#define __MEDIA_DRV_INTF_RENESAS_CEU_H__
+
+#define CEU_MAX_SUBDEVS 2
+
+struct ceu_async_subdev {
+ unsigned long flags;
+ unsigned char bus_width;
+ unsigned char bus_shift;
+ unsigned int i2c_adapter_id;
+ unsigned int i2c_address;
+};
+
+struct ceu_platform_data {
+ unsigned int num_subdevs;
+ struct ceu_async_subdev subdevs[CEU_MAX_SUBDEVS];
+};
+
+#endif /* ___MEDIA_DRV_INTF_RENESAS_CEU_H__ */
diff --git a/include/media/drv-intf/saa7146.h b/include/media/drv-intf/saa7146.h
index 769c6cf7eb4c..a7bf2c4a2e4d 100644
--- a/include/media/drv-intf/saa7146.h
+++ b/include/media/drv-intf/saa7146.h
@@ -118,7 +118,7 @@ struct saa7146_dev
{
struct module *module;
- struct v4l2_device v4l2_dev;
+ struct v4l2_device v4l2_dev;
struct v4l2_ctrl_handler ctrl_handler;
/* different device locks */
diff --git a/include/media/dvb-usb-ids.h b/include/media/dvb-usb-ids.h
new file mode 100644
index 000000000000..28e2be5c8a98
--- /dev/null
+++ b/include/media/dvb-usb-ids.h
@@ -0,0 +1,424 @@
+/* SPDX-License-Identifier: GPL-2.0 */
+/* dvb-usb-ids.h is part of the DVB USB library.
+ *
+ * Copyright (C) 2004-5 Patrick Boettcher (patrick.boettcher@posteo.de) see
+ * dvb-usb-init.c for copyright information.
+ *
+ * a header file containing define's for the USB device supported by the
+ * various drivers.
+ */
+#ifndef _DVB_USB_IDS_H_
+#define _DVB_USB_IDS_H_
+
+/* Vendor IDs */
+#define USB_VID_ADSTECH 0x06e1
+#define USB_VID_AFATECH 0x15a4
+#define USB_VID_ALCOR_MICRO 0x058f
+#define USB_VID_ALINK 0x05e3
+#define USB_VID_AMT 0x1c73
+#define USB_VID_ANCHOR 0x0547
+#define USB_VID_ANSONIC 0x10b9
+#define USB_VID_ANUBIS_ELECTRONIC 0x10fd
+#define USB_VID_ASUS 0x0b05
+#define USB_VID_AVERMEDIA 0x07ca
+#define USB_VID_COMPRO 0x185b
+#define USB_VID_COMPRO_UNK 0x145f
+#define USB_VID_CONEXANT 0x0572
+#define USB_VID_CYPRESS 0x04b4
+#define USB_VID_DEXATEK 0x1d19
+#define USB_VID_DIBCOM 0x10b8
+#define USB_VID_DPOSH 0x1498
+#define USB_VID_DVICO 0x0fe9
+#define USB_VID_E3C 0x18b4
+#define USB_VID_ELGATO 0x0fd9
+#define USB_VID_EMPIA 0xeb1a
+#define USB_VID_GENPIX 0x09c0
+#define USB_VID_GRANDTEC 0x5032
+#define USB_VID_GTEK 0x1f4d
+#define USB_VID_HANFTEK 0x15f4
+#define USB_VID_HAUPPAUGE 0x2040
+#define USB_VID_HYPER_PALTEK 0x1025
+#define USB_VID_INTEL 0x8086
+#define USB_VID_ITETECH 0x048d
+#define USB_VID_KWORLD 0xeb2a
+#define USB_VID_KWORLD_2 0x1b80
+#define USB_VID_KYE 0x0458
+#define USB_VID_LEADTEK 0x0413
+#define USB_VID_LITEON 0x04ca
+#define USB_VID_MEDION 0x1660
+#define USB_VID_MIGLIA 0x18f3
+#define USB_VID_MSI 0x0db0
+#define USB_VID_MSI_2 0x1462
+#define USB_VID_OPERA1 0x695c
+#define USB_VID_PINNACLE 0x2304
+#define USB_VID_PCTV 0x2013
+#define USB_VID_PIXELVIEW 0x1554
+#define USB_VID_REALTEK 0x0bda
+#define USB_VID_TECHNOTREND 0x0b48
+#define USB_VID_TERRATEC 0x0ccd
+#define USB_VID_TELESTAR 0x10b9
+#define USB_VID_VISIONPLUS 0x13d3
+#define USB_VID_SONY 0x1415
+#define USB_PID_TEVII_S421 0xd421
+#define USB_PID_TEVII_S480_1 0xd481
+#define USB_PID_TEVII_S480_2 0xd482
+#define USB_PID_TEVII_S630 0xd630
+#define USB_PID_TEVII_S632 0xd632
+#define USB_PID_TEVII_S650 0xd650
+#define USB_PID_TEVII_S660 0xd660
+#define USB_PID_TEVII_S662 0xd662
+#define USB_VID_TWINHAN 0x1822
+#define USB_VID_ULTIMA_ELECTRONIC 0x05d8
+#define USB_VID_UNIWILL 0x1584
+#define USB_VID_WIDEVIEW 0x14aa
+#define USB_VID_GIGABYTE 0x1044
+#define USB_VID_YUAN 0x1164
+#define USB_VID_XTENSIONS 0x1ae7
+#define USB_VID_ZYDAS 0x0ace
+#define USB_VID_HUMAX_COEX 0x10b9
+#define USB_VID_774 0x7a69
+#define USB_VID_EVOLUTEPC 0x1e59
+#define USB_VID_AZUREWAVE 0x13d3
+#define USB_VID_TECHNISAT 0x14f7
+#define USB_VID_HAMA 0x147f
+#define USB_VID_MICROSOFT 0x045e
+
+/* Product IDs */
+#define USB_PID_ADSTECH_USB2_COLD 0xa333
+#define USB_PID_ADSTECH_USB2_WARM 0xa334
+#define USB_PID_AFATECH_AF9005 0x9020
+#define USB_PID_AFATECH_AF9015_9015 0x9015
+#define USB_PID_AFATECH_AF9015_9016 0x9016
+#define USB_PID_AFATECH_AF9035_1000 0x1000
+#define USB_PID_AFATECH_AF9035_1001 0x1001
+#define USB_PID_AFATECH_AF9035_1002 0x1002
+#define USB_PID_AFATECH_AF9035_1003 0x1003
+#define USB_PID_AFATECH_AF9035_9035 0x9035
+#define USB_PID_TREKSTOR_DVBT 0x901b
+#define USB_PID_TREKSTOR_TERRES_2_0 0xC803
+#define USB_VID_ALINK_DTU 0xf170
+#define USB_PID_ANSONIC_DVBT_USB 0x6000
+#define USB_PID_ANYSEE 0x861f
+#define USB_PID_AZUREWAVE_AD_TU700 0x3237
+#define USB_PID_AZUREWAVE_6007 0x0ccd
+#define USB_PID_AVERMEDIA_DVBT_USB_COLD 0x0001
+#define USB_PID_AVERMEDIA_DVBT_USB_WARM 0x0002
+#define USB_PID_AVERMEDIA_DVBT_USB2_COLD 0xa800
+#define USB_PID_AVERMEDIA_DVBT_USB2_WARM 0xa801
+#define USB_PID_COMPRO_DVBU2000_COLD 0xd000
+#define USB_PID_COMPRO_DVBU2000_WARM 0xd001
+#define USB_PID_COMPRO_DVBU2000_UNK_COLD 0x010c
+#define USB_PID_COMPRO_DVBU2000_UNK_WARM 0x010d
+#define USB_PID_COMPRO_VIDEOMATE_U500 0x1e78
+#define USB_PID_COMPRO_VIDEOMATE_U500_PC 0x1e80
+#define USB_PID_CONCEPTRONIC_CTVDIGRCU 0xe397
+#define USB_PID_CONEXANT_D680_DMB 0x86d6
+#define USB_PID_CREATIX_CTX1921 0x1921
+#define USB_PID_DELOCK_USB2_DVBT 0xb803
+#define USB_PID_DIBCOM_HOOK_DEFAULT 0x0064
+#define USB_PID_DIBCOM_HOOK_DEFAULT_REENUM 0x0065
+#define USB_PID_DIBCOM_MOD3000_COLD 0x0bb8
+#define USB_PID_DIBCOM_MOD3000_WARM 0x0bb9
+#define USB_PID_DIBCOM_MOD3001_COLD 0x0bc6
+#define USB_PID_DIBCOM_MOD3001_WARM 0x0bc7
+#define USB_PID_DIBCOM_STK7700P 0x1e14
+#define USB_PID_DIBCOM_STK7700P_PC 0x1e78
+#define USB_PID_DIBCOM_STK7700D 0x1ef0
+#define USB_PID_DIBCOM_STK7700_U7000 0x7001
+#define USB_PID_DIBCOM_STK7070P 0x1ebc
+#define USB_PID_DIBCOM_STK7070PD 0x1ebe
+#define USB_PID_DIBCOM_STK807XP 0x1f90
+#define USB_PID_DIBCOM_STK807XPVR 0x1f98
+#define USB_PID_DIBCOM_STK8096GP 0x1fa0
+#define USB_PID_DIBCOM_STK8096PVR 0x1faa
+#define USB_PID_DIBCOM_NIM8096MD 0x1fa8
+#define USB_PID_DIBCOM_TFE8096P 0x1f9C
+#define USB_PID_DIBCOM_ANCHOR_2135_COLD 0x2131
+#define USB_PID_DIBCOM_STK7770P 0x1e80
+#define USB_PID_DIBCOM_NIM7090 0x1bb2
+#define USB_PID_DIBCOM_TFE7090PVR 0x1bb4
+#define USB_PID_DIBCOM_TFE7790P 0x1e6e
+#define USB_PID_DIBCOM_NIM9090M 0x2383
+#define USB_PID_DIBCOM_NIM9090MD 0x2384
+#define USB_PID_DPOSH_M9206_COLD 0x9206
+#define USB_PID_DPOSH_M9206_WARM 0xa090
+#define USB_PID_E3C_EC168 0x1689
+#define USB_PID_E3C_EC168_2 0xfffa
+#define USB_PID_E3C_EC168_3 0xfffb
+#define USB_PID_E3C_EC168_4 0x1001
+#define USB_PID_E3C_EC168_5 0x1002
+#define USB_PID_FREECOM_DVBT 0x0160
+#define USB_PID_FREECOM_DVBT_2 0x0161
+#define USB_PID_UNIWILL_STK7700P 0x6003
+#define USB_PID_GENIUS_TVGO_DVB_T03 0x4012
+#define USB_PID_GRANDTEC_DVBT_USB_COLD 0x0fa0
+#define USB_PID_GRANDTEC_DVBT_USB_WARM 0x0fa1
+#define USB_PID_GOTVIEW_SAT_HD 0x5456
+#define USB_PID_INTEL_CE9500 0x9500
+#define USB_PID_ITETECH_IT9135 0x9135
+#define USB_PID_ITETECH_IT9135_9005 0x9005
+#define USB_PID_ITETECH_IT9135_9006 0x9006
+#define USB_PID_ITETECH_IT9303 0x9306
+#define USB_PID_KWORLD_399U 0xe399
+#define USB_PID_KWORLD_399U_2 0xe400
+#define USB_PID_KWORLD_395U 0xe396
+#define USB_PID_KWORLD_395U_2 0xe39b
+#define USB_PID_KWORLD_395U_3 0xe395
+#define USB_PID_KWORLD_395U_4 0xe39a
+#define USB_PID_KWORLD_MC810 0xc810
+#define USB_PID_KWORLD_PC160_2T 0xc160
+#define USB_PID_KWORLD_PC160_T 0xc161
+#define USB_PID_KWORLD_UB383_T 0xe383
+#define USB_PID_KWORLD_UB499_2T_T09 0xe409
+#define USB_PID_KWORLD_VSTREAM_COLD 0x17de
+#define USB_PID_KWORLD_VSTREAM_WARM 0x17df
+#define USB_PID_PROF_1100 0xb012
+#define USB_PID_TERRATEC_CINERGY_S 0x0064
+#define USB_PID_TERRATEC_CINERGY_T_USB_XE 0x0055
+#define USB_PID_TERRATEC_CINERGY_T_USB_XE_REV2 0x0069
+#define USB_PID_TERRATEC_CINERGY_T_STICK 0x0093
+#define USB_PID_TERRATEC_CINERGY_T_STICK_RC 0x0097
+#define USB_PID_TERRATEC_CINERGY_T_STICK_DUAL_RC 0x0099
+#define USB_PID_TERRATEC_CINERGY_T_STICK_BLACK_REV1 0x00a9
+#define USB_PID_TWINHAN_VP7041_COLD 0x3201
+#define USB_PID_TWINHAN_VP7041_WARM 0x3202
+#define USB_PID_TWINHAN_VP7020_COLD 0x3203
+#define USB_PID_TWINHAN_VP7020_WARM 0x3204
+#define USB_PID_TWINHAN_VP7045_COLD 0x3205
+#define USB_PID_TWINHAN_VP7045_WARM 0x3206
+#define USB_PID_TWINHAN_VP7021_COLD 0x3207
+#define USB_PID_TWINHAN_VP7021_WARM 0x3208
+#define USB_PID_TWINHAN_VP7049 0x3219
+#define USB_PID_TINYTWIN 0x3226
+#define USB_PID_TINYTWIN_2 0xe402
+#define USB_PID_TINYTWIN_3 0x9016
+#define USB_PID_DNTV_TINYUSB2_COLD 0x3223
+#define USB_PID_DNTV_TINYUSB2_WARM 0x3224
+#define USB_PID_ULTIMA_TVBOX_COLD 0x8105
+#define USB_PID_ULTIMA_TVBOX_WARM 0x8106
+#define USB_PID_ULTIMA_TVBOX_AN2235_COLD 0x8107
+#define USB_PID_ULTIMA_TVBOX_AN2235_WARM 0x8108
+#define USB_PID_ULTIMA_TVBOX_ANCHOR_COLD 0x2235
+#define USB_PID_ULTIMA_TVBOX_USB2_COLD 0x8109
+#define USB_PID_ULTIMA_TVBOX_USB2_WARM 0x810a
+#define USB_PID_ARTEC_T14_COLD 0x810b
+#define USB_PID_ARTEC_T14_WARM 0x810c
+#define USB_PID_ARTEC_T14BR 0x810f
+#define USB_PID_ULTIMA_TVBOX_USB2_FX_COLD 0x8613
+#define USB_PID_ULTIMA_TVBOX_USB2_FX_WARM 0x1002
+#define USB_PID_UNK_HYPER_PALTEK_COLD 0x005e
+#define USB_PID_UNK_HYPER_PALTEK_WARM 0x005f
+#define USB_PID_HANFTEK_UMT_010_COLD 0x0001
+#define USB_PID_HANFTEK_UMT_010_WARM 0x0015
+#define USB_PID_DTT200U_COLD 0x0201
+#define USB_PID_DTT200U_WARM 0x0301
+#define USB_PID_WT220U_ZAP250_COLD 0x0220
+#define USB_PID_WT220U_COLD 0x0222
+#define USB_PID_WT220U_WARM 0x0221
+#define USB_PID_WT220U_FC_COLD 0x0225
+#define USB_PID_WT220U_FC_WARM 0x0226
+#define USB_PID_WT220U_ZL0353_COLD 0x022a
+#define USB_PID_WT220U_ZL0353_WARM 0x022b
+#define USB_PID_WINTV_NOVA_T_USB2_COLD 0x9300
+#define USB_PID_WINTV_NOVA_T_USB2_WARM 0x9301
+#define USB_PID_HAUPPAUGE_NOVA_T_500 0x9941
+#define USB_PID_HAUPPAUGE_NOVA_T_500_2 0x9950
+#define USB_PID_HAUPPAUGE_NOVA_T_500_3 0x8400
+#define USB_PID_HAUPPAUGE_NOVA_T_STICK 0x7050
+#define USB_PID_HAUPPAUGE_NOVA_T_STICK_2 0x7060
+#define USB_PID_HAUPPAUGE_NOVA_T_STICK_3 0x7070
+#define USB_PID_HAUPPAUGE_MYTV_T 0x7080
+#define USB_PID_HAUPPAUGE_NOVA_TD_STICK 0x9580
+#define USB_PID_HAUPPAUGE_NOVA_TD_STICK_52009 0x5200
+#define USB_PID_HAUPPAUGE_TIGER_ATSC 0xb200
+#define USB_PID_HAUPPAUGE_TIGER_ATSC_B210 0xb210
+#define USB_PID_AVERMEDIA_EXPRESS 0xb568
+#define USB_PID_AVERMEDIA_VOLAR 0xa807
+#define USB_PID_AVERMEDIA_VOLAR_2 0xb808
+#define USB_PID_AVERMEDIA_VOLAR_A868R 0xa868
+#define USB_PID_AVERMEDIA_MCE_USB_M038 0x1228
+#define USB_PID_AVERMEDIA_HYBRID_ULTRA_USB_M039R 0x0039
+#define USB_PID_AVERMEDIA_HYBRID_ULTRA_USB_M039R_ATSC 0x1039
+#define USB_PID_AVERMEDIA_HYBRID_ULTRA_USB_M039R_DVBT 0x2039
+#define USB_PID_AVERMEDIA_VOLAR_X 0xa815
+#define USB_PID_AVERMEDIA_VOLAR_X_2 0x8150
+#define USB_PID_AVERMEDIA_A309 0xa309
+#define USB_PID_AVERMEDIA_A310 0xa310
+#define USB_PID_AVERMEDIA_A850 0x850a
+#define USB_PID_AVERMEDIA_A850T 0x850b
+#define USB_PID_AVERMEDIA_A805 0xa805
+#define USB_PID_AVERMEDIA_A815M 0x815a
+#define USB_PID_AVERMEDIA_A835 0xa835
+#define USB_PID_AVERMEDIA_B835 0xb835
+#define USB_PID_AVERMEDIA_A835B_1835 0x1835
+#define USB_PID_AVERMEDIA_A835B_2835 0x2835
+#define USB_PID_AVERMEDIA_A835B_3835 0x3835
+#define USB_PID_AVERMEDIA_A835B_4835 0x4835
+#define USB_PID_AVERMEDIA_1867 0x1867
+#define USB_PID_AVERMEDIA_A867 0xa867
+#define USB_PID_AVERMEDIA_H335 0x0335
+#define USB_PID_AVERMEDIA_TD110 0xa110
+#define USB_PID_AVERMEDIA_TWINSTAR 0x0825
+#define USB_PID_TECHNOTREND_CONNECT_S2400 0x3006
+#define USB_PID_TECHNOTREND_CONNECT_S2400_8KEEPROM 0x3009
+#define USB_PID_TECHNOTREND_CONNECT_CT3650 0x300d
+#define USB_PID_TECHNOTREND_CONNECT_S2_4600 0x3011
+#define USB_PID_TECHNOTREND_CONNECT_CT2_4650_CI 0x3012
+#define USB_PID_TECHNOTREND_CONNECT_CT2_4650_CI_2 0x3015
+#define USB_PID_TECHNOTREND_TVSTICK_CT2_4400 0x3014
+#define USB_PID_TECHNOTREND_CONNECT_S2_4650_CI 0x3017
+#define USB_PID_TERRATEC_CINERGY_DT_XS_DIVERSITY 0x005a
+#define USB_PID_TERRATEC_CINERGY_DT_XS_DIVERSITY_2 0x0081
+#define USB_PID_TERRATEC_CINERGY_HT_USB_XE 0x0058
+#define USB_PID_TERRATEC_CINERGY_HT_EXPRESS 0x0060
+#define USB_PID_TERRATEC_CINERGY_T_EXPRESS 0x0062
+#define USB_PID_TERRATEC_CINERGY_T_XXS 0x0078
+#define USB_PID_TERRATEC_CINERGY_T_XXS_2 0x00ab
+#define USB_PID_TERRATEC_CINERGY_S2_R1 0x00a8
+#define USB_PID_TERRATEC_CINERGY_S2_R2 0x00b0
+#define USB_PID_TERRATEC_CINERGY_S2_R3 0x0102
+#define USB_PID_TERRATEC_CINERGY_S2_R4 0x0105
+#define USB_PID_TERRATEC_H7 0x10b4
+#define USB_PID_TERRATEC_H7_2 0x10a3
+#define USB_PID_TERRATEC_H7_3 0x10a5
+#define USB_PID_TERRATEC_T1 0x10ae
+#define USB_PID_TERRATEC_T3 0x10a0
+#define USB_PID_TERRATEC_T5 0x10a1
+#define USB_PID_NOXON_DAB_STICK 0x00b3
+#define USB_PID_NOXON_DAB_STICK_REV2 0x00e0
+#define USB_PID_NOXON_DAB_STICK_REV3 0x00b4
+#define USB_PID_PINNACLE_EXPRESSCARD_320CX 0x022e
+#define USB_PID_PINNACLE_PCTV2000E 0x022c
+#define USB_PID_PINNACLE_PCTV_DVB_T_FLASH 0x0228
+#define USB_PID_PINNACLE_PCTV_DUAL_DIVERSITY_DVB_T 0x0229
+#define USB_PID_PINNACLE_PCTV71E 0x022b
+#define USB_PID_PINNACLE_PCTV72E 0x0236
+#define USB_PID_PINNACLE_PCTV73E 0x0237
+#define USB_PID_PINNACLE_PCTV310E 0x3211
+#define USB_PID_PINNACLE_PCTV801E 0x023a
+#define USB_PID_PINNACLE_PCTV801E_SE 0x023b
+#define USB_PID_PINNACLE_PCTV340E 0x023d
+#define USB_PID_PINNACLE_PCTV340E_SE 0x023e
+#define USB_PID_PINNACLE_PCTV73A 0x0243
+#define USB_PID_PINNACLE_PCTV73ESE 0x0245
+#define USB_PID_PINNACLE_PCTV74E 0x0246
+#define USB_PID_PINNACLE_PCTV282E 0x0248
+#define USB_PID_PIXELVIEW_SBTVD 0x5010
+#define USB_PID_PCTV_200E 0x020e
+#define USB_PID_PCTV_400E 0x020f
+#define USB_PID_PCTV_450E 0x0222
+#define USB_PID_PCTV_452E 0x021f
+#define USB_PID_PCTV_78E 0x025a
+#define USB_PID_PCTV_79E 0x0262
+#define USB_PID_REALTEK_RTL2831U 0x2831
+#define USB_PID_REALTEK_RTL2832U 0x2832
+#define USB_PID_TECHNOTREND_CONNECT_S2_3600 0x3007
+#define USB_PID_TECHNOTREND_CONNECT_S2_3650_CI 0x300a
+#define USB_PID_NEBULA_DIGITV 0x0201
+#define USB_PID_DVICO_BLUEBIRD_LGDT 0xd820
+#define USB_PID_DVICO_BLUEBIRD_LG064F_COLD 0xd500
+#define USB_PID_DVICO_BLUEBIRD_LG064F_WARM 0xd501
+#define USB_PID_DVICO_BLUEBIRD_LGZ201_COLD 0xdb00
+#define USB_PID_DVICO_BLUEBIRD_LGZ201_WARM 0xdb01
+#define USB_PID_DVICO_BLUEBIRD_TH7579_COLD 0xdb10
+#define USB_PID_DVICO_BLUEBIRD_TH7579_WARM 0xdb11
+#define USB_PID_DVICO_BLUEBIRD_DUAL_1_COLD 0xdb50
+#define USB_PID_DVICO_BLUEBIRD_DUAL_1_WARM 0xdb51
+#define USB_PID_DVICO_BLUEBIRD_DUAL_2_COLD 0xdb58
+#define USB_PID_DVICO_BLUEBIRD_DUAL_2_WARM 0xdb59
+#define USB_PID_DVICO_BLUEBIRD_DUAL_4 0xdb78
+#define USB_PID_DVICO_BLUEBIRD_DUAL_4_REV_2 0xdb98
+#define USB_PID_DVICO_BLUEBIRD_DVB_T_NANO_2 0xdb70
+#define USB_PID_DVICO_BLUEBIRD_DVB_T_NANO_2_NFW_WARM 0xdb71
+#define USB_PID_DIGITALNOW_BLUEBIRD_DUAL_1_COLD 0xdb54
+#define USB_PID_DIGITALNOW_BLUEBIRD_DUAL_1_WARM 0xdb55
+#define USB_PID_MEDION_MD95700 0x0932
+#define USB_PID_MSI_MEGASKY580 0x5580
+#define USB_PID_MSI_MEGASKY580_55801 0x5581
+#define USB_PID_KYE_DVB_T_COLD 0x701e
+#define USB_PID_KYE_DVB_T_WARM 0x701f
+#define USB_PID_LITEON_DVB_T_COLD 0xf000
+#define USB_PID_LITEON_DVB_T_WARM 0xf001
+#define USB_PID_DIGIVOX_MINI_SL_COLD 0xe360
+#define USB_PID_DIGIVOX_MINI_SL_WARM 0xe361
+#define USB_PID_GRANDTEC_DVBT_USB2_COLD 0x0bc6
+#define USB_PID_GRANDTEC_DVBT_USB2_WARM 0x0bc7
+#define USB_PID_WINFAST_DTV2000DS 0x6a04
+#define USB_PID_WINFAST_DTV2000DS_PLUS 0x6f12
+#define USB_PID_WINFAST_DTV_DONGLE_COLD 0x6025
+#define USB_PID_WINFAST_DTV_DONGLE_WARM 0x6026
+#define USB_PID_WINFAST_DTV_DONGLE_STK7700P 0x6f00
+#define USB_PID_WINFAST_DTV_DONGLE_H 0x60f6
+#define USB_PID_WINFAST_DTV_DONGLE_STK7700P_2 0x6f01
+#define USB_PID_WINFAST_DTV_DONGLE_GOLD 0x6029
+#define USB_PID_WINFAST_DTV_DONGLE_MINID 0x6f0f
+#define USB_PID_GENPIX_8PSK_REV_1_COLD 0x0200
+#define USB_PID_GENPIX_8PSK_REV_1_WARM 0x0201
+#define USB_PID_GENPIX_8PSK_REV_2 0x0202
+#define USB_PID_GENPIX_SKYWALKER_1 0x0203
+#define USB_PID_GENPIX_SKYWALKER_CW3K 0x0204
+#define USB_PID_GENPIX_SKYWALKER_2 0x0206
+#define USB_PID_SIGMATEK_DVB_110 0x6610
+#define USB_PID_MSI_DIGI_VOX_MINI_II 0x1513
+#define USB_PID_MSI_DIGIVOX_DUO 0x8801
+#define USB_PID_OPERA1_COLD 0x2830
+#define USB_PID_OPERA1_WARM 0x3829
+#define USB_PID_LIFEVIEW_TV_WALKER_TWIN_COLD 0x0514
+#define USB_PID_LIFEVIEW_TV_WALKER_TWIN_WARM 0x0513
+#define USB_PID_GIGABYTE_U7000 0x7001
+#define USB_PID_GIGABYTE_U8000 0x7002
+#define USB_PID_ASUS_U3000 0x171f
+#define USB_PID_ASUS_U3000H 0x1736
+#define USB_PID_ASUS_U3100 0x173f
+#define USB_PID_ASUS_U3100MINI_PLUS 0x1779
+#define USB_PID_YUAN_EC372S 0x1edc
+#define USB_PID_YUAN_STK7700PH 0x1f08
+#define USB_PID_YUAN_PD378S 0x2edc
+#define USB_PID_YUAN_MC770 0x0871
+#define USB_PID_YUAN_STK7700D 0x1efc
+#define USB_PID_YUAN_STK7700D_2 0x1e8c
+#define USB_PID_DW2102 0x2102
+#define USB_PID_DW2104 0x2104
+#define USB_PID_DW3101 0x3101
+#define USB_PID_XTENSIONS_XD_380 0x0381
+#define USB_PID_TELESTAR_STARSTICK_2 0x8000
+#define USB_PID_MSI_DIGI_VOX_MINI_III 0x8807
+#define USB_PID_SONY_PLAYTV 0x0003
+#define USB_PID_MYGICA_D689 0xd811
+#define USB_PID_MYGICA_T230 0xc688
+#define USB_PID_MYGICA_T230C 0xc689
+#define USB_PID_ELGATO_EYETV_DIVERSITY 0x0011
+#define USB_PID_ELGATO_EYETV_DTT 0x0021
+#define USB_PID_ELGATO_EYETV_DTT_2 0x003f
+#define USB_PID_ELGATO_EYETV_DTT_Dlx 0x0020
+#define USB_PID_ELGATO_EYETV_SAT 0x002a
+#define USB_PID_ELGATO_EYETV_SAT_V2 0x0025
+#define USB_PID_ELGATO_EYETV_SAT_V3 0x0036
+#define USB_PID_DVB_T_USB_STICK_HIGH_SPEED_COLD 0x5000
+#define USB_PID_DVB_T_USB_STICK_HIGH_SPEED_WARM 0x5001
+#define USB_PID_FRIIO_WHITE 0x0001
+#define USB_PID_TVWAY_PLUS 0x0002
+#define USB_PID_SVEON_STV20 0xe39d
+#define USB_PID_SVEON_STV20_RTL2832U 0xd39d
+#define USB_PID_SVEON_STV21 0xd3b0
+#define USB_PID_SVEON_STV22 0xe401
+#define USB_PID_SVEON_STV22_IT9137 0xe411
+#define USB_PID_AZUREWAVE_AZ6027 0x3275
+#define USB_PID_TERRATEC_DVBS2CI_V1 0x10a4
+#define USB_PID_TERRATEC_DVBS2CI_V2 0x10ac
+#define USB_PID_TECHNISAT_USB2_HDCI_V1 0x0001
+#define USB_PID_TECHNISAT_USB2_HDCI_V2 0x0002
+#define USB_PID_TECHNISAT_USB2_CABLESTAR_HDCI 0x0003
+#define USB_PID_TECHNISAT_AIRSTAR_TELESTICK_2 0x0004
+#define USB_PID_TECHNISAT_USB2_DVB_S2 0x0500
+#define USB_PID_CPYTO_REDI_PC50A 0xa803
+#define USB_PID_CTVDIGDUAL_V2 0xe410
+#define USB_PID_PCTV_2002E 0x025c
+#define USB_PID_PCTV_2002E_SE 0x025d
+#define USB_PID_SVEON_STV27 0xd3af
+#define USB_PID_TURBOX_DTT_2000 0xd3a4
+#define USB_PID_WINTV_SOLOHD 0x0264
+#define USB_PID_EVOLVEO_XTRATV_STICK 0xa115
+#define USB_PID_HAMA_DVBT_HYBRID 0x2758
+#define USB_PID_XBOX_ONE_TUNER 0x02d5
+#endif
diff --git a/include/media/dvb_ca_en50221.h b/include/media/dvb_ca_en50221.h
new file mode 100644
index 000000000000..a1c014b0a837
--- /dev/null
+++ b/include/media/dvb_ca_en50221.h
@@ -0,0 +1,142 @@
+/*
+ * dvb_ca.h: generic DVB functions for EN50221 CA interfaces
+ *
+ * Copyright (C) 2004 Andrew de Quincey
+ *
+ * This program is free software; you can redistribute it and/or
+ * modify it under the terms of the GNU Lesser General Public License
+ * as published by the Free Software Foundation; either version 2.1
+ * of the License, or (at your option) any later version.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+ * GNU General Public License for more details.
+ */
+
+#ifndef _DVB_CA_EN50221_H_
+#define _DVB_CA_EN50221_H_
+
+#include <linux/list.h>
+#include <linux/dvb/ca.h>
+
+#include <media/dvbdev.h>
+
+#define DVB_CA_EN50221_POLL_CAM_PRESENT 1
+#define DVB_CA_EN50221_POLL_CAM_CHANGED 2
+#define DVB_CA_EN50221_POLL_CAM_READY 4
+
+#define DVB_CA_EN50221_FLAG_IRQ_CAMCHANGE 1
+#define DVB_CA_EN50221_FLAG_IRQ_FR 2
+#define DVB_CA_EN50221_FLAG_IRQ_DA 4
+
+#define DVB_CA_EN50221_CAMCHANGE_REMOVED 0
+#define DVB_CA_EN50221_CAMCHANGE_INSERTED 1
+
+/**
+ * struct dvb_ca_en50221- Structure describing a CA interface
+ *
+ * @owner: the module owning this structure
+ * @read_attribute_mem: function for reading attribute memory on the CAM
+ * @write_attribute_mem: function for writing attribute memory on the CAM
+ * @read_cam_control: function for reading the control interface on the CAM
+ * @write_cam_control: function for reading the control interface on the CAM
+ * @read_data: function for reading data (block mode)
+ * @write_data: function for writing data (block mode)
+ * @slot_reset: function to reset the CAM slot
+ * @slot_shutdown: function to shutdown a CAM slot
+ * @slot_ts_enable: function to enable the Transport Stream on a CAM slot
+ * @poll_slot_status: function to poll slot status. Only necessary if
+ * DVB_CA_FLAG_EN50221_IRQ_CAMCHANGE is not set.
+ * @data: private data, used by caller.
+ * @private: Opaque data used by the dvb_ca core. Do not modify!
+ *
+ * NOTE: the read_*, write_* and poll_slot_status functions will be
+ * called for different slots concurrently and need to use locks where
+ * and if appropriate. There will be no concurrent access to one slot.
+ */
+struct dvb_ca_en50221 {
+ struct module *owner;
+
+ int (*read_attribute_mem)(struct dvb_ca_en50221 *ca,
+ int slot, int address);
+ int (*write_attribute_mem)(struct dvb_ca_en50221 *ca,
+ int slot, int address, u8 value);
+
+ int (*read_cam_control)(struct dvb_ca_en50221 *ca,
+ int slot, u8 address);
+ int (*write_cam_control)(struct dvb_ca_en50221 *ca,
+ int slot, u8 address, u8 value);
+
+ int (*read_data)(struct dvb_ca_en50221 *ca,
+ int slot, u8 *ebuf, int ecount);
+ int (*write_data)(struct dvb_ca_en50221 *ca,
+ int slot, u8 *ebuf, int ecount);
+
+ int (*slot_reset)(struct dvb_ca_en50221 *ca, int slot);
+ int (*slot_shutdown)(struct dvb_ca_en50221 *ca, int slot);
+ int (*slot_ts_enable)(struct dvb_ca_en50221 *ca, int slot);
+
+ int (*poll_slot_status)(struct dvb_ca_en50221 *ca, int slot, int open);
+
+ void *data;
+
+ void *private;
+};
+
+/*
+ * Functions for reporting IRQ events
+ */
+
+/**
+ * dvb_ca_en50221_camchange_irq - A CAMCHANGE IRQ has occurred.
+ *
+ * @pubca: CA instance.
+ * @slot: Slot concerned.
+ * @change_type: One of the DVB_CA_CAMCHANGE_* values
+ */
+void dvb_ca_en50221_camchange_irq(struct dvb_ca_en50221 *pubca, int slot,
+ int change_type);
+
+/**
+ * dvb_ca_en50221_camready_irq - A CAMREADY IRQ has occurred.
+ *
+ * @pubca: CA instance.
+ * @slot: Slot concerned.
+ */
+void dvb_ca_en50221_camready_irq(struct dvb_ca_en50221 *pubca, int slot);
+
+/**
+ * dvb_ca_en50221_frda_irq - An FR or a DA IRQ has occurred.
+ *
+ * @ca: CA instance.
+ * @slot: Slot concerned.
+ */
+void dvb_ca_en50221_frda_irq(struct dvb_ca_en50221 *ca, int slot);
+
+/*
+ * Initialisation/shutdown functions
+ */
+
+/**
+ * dvb_ca_en50221_init - Initialise a new DVB CA device.
+ *
+ * @dvb_adapter: DVB adapter to attach the new CA device to.
+ * @ca: The dvb_ca instance.
+ * @flags: Flags describing the CA device (DVB_CA_EN50221_FLAG_*).
+ * @slot_count: Number of slots supported.
+ *
+ * @return 0 on success, nonzero on failure
+ */
+int dvb_ca_en50221_init(struct dvb_adapter *dvb_adapter,
+ struct dvb_ca_en50221 *ca, int flags,
+ int slot_count);
+
+/**
+ * dvb_ca_en50221_release - Release a DVB CA device.
+ *
+ * @ca: The associated dvb_ca instance.
+ */
+void dvb_ca_en50221_release(struct dvb_ca_en50221 *ca);
+
+#endif
diff --git a/include/media/dvb_demux.h b/include/media/dvb_demux.h
new file mode 100644
index 000000000000..3b6aeca7a49e
--- /dev/null
+++ b/include/media/dvb_demux.h
@@ -0,0 +1,354 @@
+/*
+ * dvb_demux.h: DVB kernel demux API
+ *
+ * Copyright (C) 2000-2001 Marcus Metzler & Ralph Metzler
+ * for convergence integrated media GmbH
+ *
+ * This program is free software; you can redistribute it and/or
+ * modify it under the terms of the GNU Lesser General Public License
+ * as published by the Free Software Foundation; either version 2.1
+ * of the License, or (at your option) any later version.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+ * GNU General Public License for more details.
+ *
+ */
+
+#ifndef _DVB_DEMUX_H_
+#define _DVB_DEMUX_H_
+
+#include <linux/time.h>
+#include <linux/timer.h>
+#include <linux/spinlock.h>
+#include <linux/mutex.h>
+
+#include <media/demux.h>
+
+/**
+ * enum dvb_dmx_filter_type - type of demux feed.
+ *
+ * @DMX_TYPE_TS: feed is in TS mode.
+ * @DMX_TYPE_SEC: feed is in Section mode.
+ */
+enum dvb_dmx_filter_type {
+ DMX_TYPE_TS,
+ DMX_TYPE_SEC,
+};
+
+/**
+ * enum dvb_dmx_state - state machine for a demux filter.
+ *
+ * @DMX_STATE_FREE: indicates that the filter is freed.
+ * @DMX_STATE_ALLOCATED: indicates that the filter was allocated
+ * to be used.
+ * @DMX_STATE_READY: indicates that the filter is ready
+ * to be used.
+ * @DMX_STATE_GO: indicates that the filter is running.
+ */
+enum dvb_dmx_state {
+ DMX_STATE_FREE,
+ DMX_STATE_ALLOCATED,
+ DMX_STATE_READY,
+ DMX_STATE_GO,
+};
+
+#define DVB_DEMUX_MASK_MAX 18
+
+#define MAX_PID 0x1fff
+
+#define SPEED_PKTS_INTERVAL 50000
+
+/**
+ * struct dvb_demux_filter - Describes a DVB demux section filter.
+ *
+ * @filter: Section filter as defined by &struct dmx_section_filter.
+ * @maskandmode: logical ``and`` bit mask.
+ * @maskandnotmode: logical ``and not`` bit mask.
+ * @doneq: flag that indicates when a filter is ready.
+ * @next: pointer to the next section filter.
+ * @feed: &struct dvb_demux_feed pointer.
+ * @index: index of the used demux filter.
+ * @state: state of the filter as described by &enum dvb_dmx_state.
+ * @type: type of the filter as described
+ * by &enum dvb_dmx_filter_type.
+ */
+
+struct dvb_demux_filter {
+ struct dmx_section_filter filter;
+ u8 maskandmode[DMX_MAX_FILTER_SIZE];
+ u8 maskandnotmode[DMX_MAX_FILTER_SIZE];
+ bool doneq;
+
+ struct dvb_demux_filter *next;
+ struct dvb_demux_feed *feed;
+ int index;
+ enum dvb_dmx_state state;
+ enum dvb_dmx_filter_type type;
+
+ /* private: used only by av7110 */
+ u16 hw_handle;
+};
+
+/**
+ * struct dvb_demux_feed - describes a DVB field
+ *
+ * @feed: a union describing a digital TV feed.
+ * Depending on the feed type, it can be either
+ * @feed.ts or @feed.sec.
+ * @feed.ts: a &struct dmx_ts_feed pointer.
+ * For TS feed only.
+ * @feed.sec: a &struct dmx_section_feed pointer.
+ * For section feed only.
+ * @cb: a union describing digital TV callbacks.
+ * Depending on the feed type, it can be either
+ * @cb.ts or @cb.sec.
+ * @cb.ts: a dmx_ts_cb() calback function pointer.
+ * For TS feed only.
+ * @cb.sec: a dmx_section_cb() callback function pointer.
+ * For section feed only.
+ * @demux: pointer to &struct dvb_demux.
+ * @priv: private data that can optionally be used by a DVB driver.
+ * @type: type of the filter, as defined by &enum dvb_dmx_filter_type.
+ * @state: state of the filter as defined by &enum dvb_dmx_state.
+ * @pid: PID to be filtered.
+ * @timeout: feed timeout.
+ * @filter: pointer to &struct dvb_demux_filter.
+ * @buffer_flags: Buffer flags used to report discontinuity users via DVB
+ * memory mapped API, as defined by &enum dmx_buffer_flags.
+ * @ts_type: type of TS, as defined by &enum ts_filter_type.
+ * @pes_type: type of PES, as defined by &enum dmx_ts_pes.
+ * @cc: MPEG-TS packet continuity counter
+ * @pusi_seen: if true, indicates that a discontinuity was detected.
+ * it is used to prevent feeding of garbage from previous section.
+ * @peslen: length of the PES (Packet Elementary Stream).
+ * @list_head: head for the list of digital TV demux feeds.
+ * @index: a unique index for each feed. Can be used as hardware
+ * pid filter index.
+ */
+struct dvb_demux_feed {
+ union {
+ struct dmx_ts_feed ts;
+ struct dmx_section_feed sec;
+ } feed;
+
+ union {
+ dmx_ts_cb ts;
+ dmx_section_cb sec;
+ } cb;
+
+ struct dvb_demux *demux;
+ void *priv;
+ enum dvb_dmx_filter_type type;
+ enum dvb_dmx_state state;
+ u16 pid;
+
+ ktime_t timeout;
+ struct dvb_demux_filter *filter;
+
+ u32 buffer_flags;
+
+ enum ts_filter_type ts_type;
+ enum dmx_ts_pes pes_type;
+
+ int cc;
+ bool pusi_seen;
+
+ u16 peslen;
+
+ struct list_head list_head;
+ unsigned int index;
+};
+
+/**
+ * struct dvb_demux - represents a digital TV demux
+ * @dmx: embedded &struct dmx_demux with demux capabilities
+ * and callbacks.
+ * @priv: private data that can optionally be used by
+ * a DVB driver.
+ * @filternum: maximum amount of DVB filters.
+ * @feednum: maximum amount of DVB feeds.
+ * @start_feed: callback routine to be called in order to start
+ * a DVB feed.
+ * @stop_feed: callback routine to be called in order to stop
+ * a DVB feed.
+ * @write_to_decoder: callback routine to be called if the feed is TS and
+ * it is routed to an A/V decoder, when a new TS packet
+ * is received.
+ * Used only on av7110-av.c.
+ * @check_crc32: callback routine to check CRC. If not initialized,
+ * dvb_demux will use an internal one.
+ * @memcopy: callback routine to memcopy received data.
+ * If not initialized, dvb_demux will default to memcpy().
+ * @users: counter for the number of demux opened file descriptors.
+ * Currently, it is limited to 10 users.
+ * @filter: pointer to &struct dvb_demux_filter.
+ * @feed: pointer to &struct dvb_demux_feed.
+ * @frontend_list: &struct list_head with frontends used by the demux.
+ * @pesfilter: array of &struct dvb_demux_feed with the PES types
+ * that will be filtered.
+ * @pids: list of filtered program IDs.
+ * @feed_list: &struct list_head with feeds.
+ * @tsbuf: temporary buffer used internally to store TS packets.
+ * @tsbufp: temporary buffer index used internally.
+ * @mutex: pointer to &struct mutex used to protect feed set
+ * logic.
+ * @lock: pointer to &spinlock_t, used to protect buffer handling.
+ * @cnt_storage: buffer used for TS/TEI continuity check.
+ * @speed_last_time: &ktime_t used for TS speed check.
+ * @speed_pkts_cnt: packets count used for TS speed check.
+ */
+struct dvb_demux {
+ struct dmx_demux dmx;
+ void *priv;
+ int filternum;
+ int feednum;
+ int (*start_feed)(struct dvb_demux_feed *feed);
+ int (*stop_feed)(struct dvb_demux_feed *feed);
+ int (*write_to_decoder)(struct dvb_demux_feed *feed,
+ const u8 *buf, size_t len);
+ u32 (*check_crc32)(struct dvb_demux_feed *feed,
+ const u8 *buf, size_t len);
+ void (*memcopy)(struct dvb_demux_feed *feed, u8 *dst,
+ const u8 *src, size_t len);
+
+ int users;
+#define MAX_DVB_DEMUX_USERS 10
+ struct dvb_demux_filter *filter;
+ struct dvb_demux_feed *feed;
+
+ struct list_head frontend_list;
+
+ struct dvb_demux_feed *pesfilter[DMX_PES_OTHER];
+ u16 pids[DMX_PES_OTHER];
+
+#define DMX_MAX_PID 0x2000
+ struct list_head feed_list;
+ u8 tsbuf[204];
+ int tsbufp;
+
+ struct mutex mutex;
+ spinlock_t lock;
+
+ uint8_t *cnt_storage; /* for TS continuity check */
+
+ ktime_t speed_last_time; /* for TS speed check */
+ uint32_t speed_pkts_cnt; /* for TS speed check */
+
+ /* private: used only on av7110 */
+ int playing;
+ int recording;
+};
+
+/**
+ * dvb_dmx_init - initialize a digital TV demux struct.
+ *
+ * @demux: &struct dvb_demux to be initialized.
+ *
+ * Before being able to register a digital TV demux struct, drivers
+ * should call this routine. On its typical usage, some fields should
+ * be initialized at the driver before calling it.
+ *
+ * A typical usecase is::
+ *
+ * dvb->demux.dmx.capabilities =
+ * DMX_TS_FILTERING | DMX_SECTION_FILTERING |
+ * DMX_MEMORY_BASED_FILTERING;
+ * dvb->demux.priv = dvb;
+ * dvb->demux.filternum = 256;
+ * dvb->demux.feednum = 256;
+ * dvb->demux.start_feed = driver_start_feed;
+ * dvb->demux.stop_feed = driver_stop_feed;
+ * ret = dvb_dmx_init(&dvb->demux);
+ * if (ret < 0)
+ * return ret;
+ */
+int dvb_dmx_init(struct dvb_demux *demux);
+
+/**
+ * dvb_dmx_release - releases a digital TV demux internal buffers.
+ *
+ * @demux: &struct dvb_demux to be released.
+ *
+ * The DVB core internally allocates data at @demux. This routine
+ * releases those data. Please notice that the struct itelf is not
+ * released, as it can be embedded on other structs.
+ */
+void dvb_dmx_release(struct dvb_demux *demux);
+
+/**
+ * dvb_dmx_swfilter_packets - use dvb software filter for a buffer with
+ * multiple MPEG-TS packets with 188 bytes each.
+ *
+ * @demux: pointer to &struct dvb_demux
+ * @buf: buffer with data to be filtered
+ * @count: number of MPEG-TS packets with size of 188.
+ *
+ * The routine will discard a DVB packet that don't start with 0x47.
+ *
+ * Use this routine if the DVB demux fills MPEG-TS buffers that are
+ * already aligned.
+ *
+ * NOTE: The @buf size should have size equal to ``count * 188``.
+ */
+void dvb_dmx_swfilter_packets(struct dvb_demux *demux, const u8 *buf,
+ size_t count);
+
+/**
+ * dvb_dmx_swfilter - use dvb software filter for a buffer with
+ * multiple MPEG-TS packets with 188 bytes each.
+ *
+ * @demux: pointer to &struct dvb_demux
+ * @buf: buffer with data to be filtered
+ * @count: number of MPEG-TS packets with size of 188.
+ *
+ * If a DVB packet doesn't start with 0x47, it will seek for the first
+ * byte that starts with 0x47.
+ *
+ * Use this routine if the DVB demux fill buffers that may not start with
+ * a packet start mark (0x47).
+ *
+ * NOTE: The @buf size should have size equal to ``count * 188``.
+ */
+void dvb_dmx_swfilter(struct dvb_demux *demux, const u8 *buf, size_t count);
+
+/**
+ * dvb_dmx_swfilter_204 - use dvb software filter for a buffer with
+ * multiple MPEG-TS packets with 204 bytes each.
+ *
+ * @demux: pointer to &struct dvb_demux
+ * @buf: buffer with data to be filtered
+ * @count: number of MPEG-TS packets with size of 204.
+ *
+ * If a DVB packet doesn't start with 0x47, it will seek for the first
+ * byte that starts with 0x47.
+ *
+ * Use this routine if the DVB demux fill buffers that may not start with
+ * a packet start mark (0x47).
+ *
+ * NOTE: The @buf size should have size equal to ``count * 204``.
+ */
+void dvb_dmx_swfilter_204(struct dvb_demux *demux, const u8 *buf,
+ size_t count);
+
+/**
+ * dvb_dmx_swfilter_raw - make the raw data available to userspace without
+ * filtering
+ *
+ * @demux: pointer to &struct dvb_demux
+ * @buf: buffer with data
+ * @count: number of packets to be passed. The actual size of each packet
+ * depends on the &dvb_demux->feed->cb.ts logic.
+ *
+ * Use it if the driver needs to deliver the raw payload to userspace without
+ * passing through the kernel demux. That is meant to support some
+ * delivery systems that aren't based on MPEG-TS.
+ *
+ * This function relies on &dvb_demux->feed->cb.ts to actually handle the
+ * buffer.
+ */
+void dvb_dmx_swfilter_raw(struct dvb_demux *demux, const u8 *buf,
+ size_t count);
+
+#endif /* _DVB_DEMUX_H_ */
diff --git a/include/media/dvb_frontend.h b/include/media/dvb_frontend.h
new file mode 100644
index 000000000000..331c8269c00e
--- /dev/null
+++ b/include/media/dvb_frontend.h
@@ -0,0 +1,795 @@
+/*
+ * dvb_frontend.h
+ *
+ * The Digital TV Frontend kABI defines a driver-internal interface for
+ * registering low-level, hardware specific driver to a hardware independent
+ * frontend layer.
+ *
+ * Copyright (C) 2001 convergence integrated media GmbH
+ * Copyright (C) 2004 convergence GmbH
+ *
+ * Written by Ralph Metzler
+ * Overhauled by Holger Waechtler
+ * Kernel I2C stuff by Michael Hunold <hunold@convergence.de>
+ *
+ * This program is free software; you can redistribute it and/or
+ * modify it under the terms of the GNU Lesser General Public License
+ * as published by the Free Software Foundation; either version 2.1
+ * of the License, or (at your option) any later version.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+ * GNU General Public License for more details.
+ *
+
+ * You should have received a copy of the GNU Lesser General Public License
+ * along with this program; if not, write to the Free Software
+ * Foundation, Inc., 59 Temple Place - Suite 330, Boston, MA 02111-1307, USA.
+ *
+ */
+
+#ifndef _DVB_FRONTEND_H_
+#define _DVB_FRONTEND_H_
+
+#include <linux/types.h>
+#include <linux/sched.h>
+#include <linux/ioctl.h>
+#include <linux/i2c.h>
+#include <linux/module.h>
+#include <linux/errno.h>
+#include <linux/delay.h>
+#include <linux/mutex.h>
+#include <linux/slab.h>
+
+#include <linux/dvb/frontend.h>
+
+#include <media/dvbdev.h>
+
+/*
+ * Maximum number of Delivery systems per frontend. It
+ * should be smaller or equal to 32
+ */
+#define MAX_DELSYS 8
+
+/**
+ * struct dvb_frontend_tune_settings - parameters to adjust frontend tuning
+ *
+ * @min_delay_ms: minimum delay for tuning, in ms
+ * @step_size: step size between two consecutive frequencies
+ * @max_drift: maximum drift
+ *
+ * NOTE: step_size is in Hz, for terrestrial/cable or kHz for satellite
+ */
+struct dvb_frontend_tune_settings {
+ int min_delay_ms;
+ int step_size;
+ int max_drift;
+};
+
+struct dvb_frontend;
+
+/**
+ * struct dvb_tuner_info - Frontend name and min/max ranges/bandwidths
+ *
+ * @name: name of the Frontend
+ * @frequency_min: minimal frequency supported
+ * @frequency_max: maximum frequency supported
+ * @frequency_step: frequency step
+ * @bandwidth_min: minimal frontend bandwidth supported
+ * @bandwidth_max: maximum frontend bandwidth supported
+ * @bandwidth_step: frontend bandwidth step
+ *
+ * NOTE: frequency parameters are in Hz, for terrestrial/cable or kHz for
+ * satellite.
+ */
+struct dvb_tuner_info {
+ char name[128];
+
+ u32 frequency_min;
+ u32 frequency_max;
+ u32 frequency_step;
+
+ u32 bandwidth_min;
+ u32 bandwidth_max;
+ u32 bandwidth_step;
+};
+
+/**
+ * struct analog_parameters - Parameters to tune into an analog/radio channel
+ *
+ * @frequency: Frequency used by analog TV tuner (either in 62.5 kHz step,
+ * for TV, or 62.5 Hz for radio)
+ * @mode: Tuner mode, as defined on enum v4l2_tuner_type
+ * @audmode: Audio mode as defined for the rxsubchans field at videodev2.h,
+ * e. g. V4L2_TUNER_MODE_*
+ * @std: TV standard bitmap as defined at videodev2.h, e. g. V4L2_STD_*
+ *
+ * Hybrid tuners should be supported by both V4L2 and DVB APIs. This
+ * struct contains the data that are used by the V4L2 side. To avoid
+ * dependencies from V4L2 headers, all enums here are declared as integers.
+ */
+struct analog_parameters {
+ unsigned int frequency;
+ unsigned int mode;
+ unsigned int audmode;
+ u64 std;
+};
+
+/**
+ * enum dvbfe_algo - defines the algorithm used to tune into a channel
+ *
+ * @DVBFE_ALGO_HW: Hardware Algorithm -
+ * Devices that support this algorithm do everything in hardware
+ * and no software support is needed to handle them.
+ * Requesting these devices to LOCK is the only thing required,
+ * device is supposed to do everything in the hardware.
+ *
+ * @DVBFE_ALGO_SW: Software Algorithm -
+ * These are dumb devices, that require software to do everything
+ *
+ * @DVBFE_ALGO_CUSTOM: Customizable Agorithm -
+ * Devices having this algorithm can be customized to have specific
+ * algorithms in the frontend driver, rather than simply doing a
+ * software zig-zag. In this case the zigzag maybe hardware assisted
+ * or it maybe completely done in hardware. In all cases, usage of
+ * this algorithm, in conjunction with the search and track
+ * callbacks, utilizes the driver specific algorithm.
+ *
+ * @DVBFE_ALGO_RECOVERY: Recovery Algorithm -
+ * These devices have AUTO recovery capabilities from LOCK failure
+ */
+enum dvbfe_algo {
+ DVBFE_ALGO_HW = (1 << 0),
+ DVBFE_ALGO_SW = (1 << 1),
+ DVBFE_ALGO_CUSTOM = (1 << 2),
+ DVBFE_ALGO_RECOVERY = (1 << 31)
+};
+
+/**
+ * enum dvbfe_search - search callback possible return status
+ *
+ * @DVBFE_ALGO_SEARCH_SUCCESS:
+ * The frontend search algorithm completed and returned successfully
+ *
+ * @DVBFE_ALGO_SEARCH_ASLEEP:
+ * The frontend search algorithm is sleeping
+ *
+ * @DVBFE_ALGO_SEARCH_FAILED:
+ * The frontend search for a signal failed
+ *
+ * @DVBFE_ALGO_SEARCH_INVALID:
+ * The frontend search algorith was probably supplied with invalid
+ * parameters and the search is an invalid one
+ *
+ * @DVBFE_ALGO_SEARCH_ERROR:
+ * The frontend search algorithm failed due to some error
+ *
+ * @DVBFE_ALGO_SEARCH_AGAIN:
+ * The frontend search algorithm was requested to search again
+ */
+enum dvbfe_search {
+ DVBFE_ALGO_SEARCH_SUCCESS = (1 << 0),
+ DVBFE_ALGO_SEARCH_ASLEEP = (1 << 1),
+ DVBFE_ALGO_SEARCH_FAILED = (1 << 2),
+ DVBFE_ALGO_SEARCH_INVALID = (1 << 3),
+ DVBFE_ALGO_SEARCH_AGAIN = (1 << 4),
+ DVBFE_ALGO_SEARCH_ERROR = (1 << 31),
+};
+
+/**
+ * struct dvb_tuner_ops - Tuner information and callbacks
+ *
+ * @info: embedded &struct dvb_tuner_info with tuner properties
+ * @release: callback function called when frontend is detached.
+ * drivers should free any allocated memory.
+ * @init: callback function used to initialize the tuner device.
+ * @sleep: callback function used to put the tuner to sleep.
+ * @suspend: callback function used to inform that the Kernel will
+ * suspend.
+ * @resume: callback function used to inform that the Kernel is
+ * resuming from suspend.
+ * @set_params: callback function used to inform the tuner to tune
+ * into a digital TV channel. The properties to be used
+ * are stored at &struct dvb_frontend.dtv_property_cache.
+ * The tuner demod can change the parameters to reflect
+ * the changes needed for the channel to be tuned, and
+ * update statistics. This is the recommended way to set
+ * the tuner parameters and should be used on newer
+ * drivers.
+ * @set_analog_params: callback function used to tune into an analog TV
+ * channel on hybrid tuners. It passes @analog_parameters
+ * to the driver.
+ * @set_config: callback function used to send some tuner-specific
+ * parameters.
+ * @get_frequency: get the actual tuned frequency
+ * @get_bandwidth: get the bandwitdh used by the low pass filters
+ * @get_if_frequency: get the Intermediate Frequency, in Hz. For baseband,
+ * should return 0.
+ * @get_status: returns the frontend lock status
+ * @get_rf_strength: returns the RF signal strength. Used mostly to support
+ * analog TV and radio. Digital TV should report, instead,
+ * via DVBv5 API (&struct dvb_frontend.dtv_property_cache).
+ * @get_afc: Used only by analog TV core. Reports the frequency
+ * drift due to AFC.
+ * @calc_regs: callback function used to pass register data settings
+ * for simple tuners. Shouldn't be used on newer drivers.
+ * @set_frequency: Set a new frequency. Shouldn't be used on newer drivers.
+ * @set_bandwidth: Set a new frequency. Shouldn't be used on newer drivers.
+ *
+ * NOTE: frequencies used on @get_frequency and @set_frequency are in Hz for
+ * terrestrial/cable or kHz for satellite.
+ *
+ */
+struct dvb_tuner_ops {
+
+ struct dvb_tuner_info info;
+
+ void (*release)(struct dvb_frontend *fe);
+ int (*init)(struct dvb_frontend *fe);
+ int (*sleep)(struct dvb_frontend *fe);
+ int (*suspend)(struct dvb_frontend *fe);
+ int (*resume)(struct dvb_frontend *fe);
+
+ /* This is the recomended way to set the tuner */
+ int (*set_params)(struct dvb_frontend *fe);
+ int (*set_analog_params)(struct dvb_frontend *fe, struct analog_parameters *p);
+
+ int (*set_config)(struct dvb_frontend *fe, void *priv_cfg);
+
+ int (*get_frequency)(struct dvb_frontend *fe, u32 *frequency);
+ int (*get_bandwidth)(struct dvb_frontend *fe, u32 *bandwidth);
+ int (*get_if_frequency)(struct dvb_frontend *fe, u32 *frequency);
+
+#define TUNER_STATUS_LOCKED 1
+#define TUNER_STATUS_STEREO 2
+ int (*get_status)(struct dvb_frontend *fe, u32 *status);
+ int (*get_rf_strength)(struct dvb_frontend *fe, u16 *strength);
+ int (*get_afc)(struct dvb_frontend *fe, s32 *afc);
+
+ /*
+ * This is support for demods like the mt352 - fills out the supplied
+ * buffer with what to write.
+ *
+ * Don't use on newer drivers.
+ */
+ int (*calc_regs)(struct dvb_frontend *fe, u8 *buf, int buf_len);
+
+ /*
+ * These are provided separately from set_params in order to
+ * facilitate silicon tuners which require sophisticated tuning loops,
+ * controlling each parameter separately.
+ *
+ * Don't use on newer drivers.
+ */
+ int (*set_frequency)(struct dvb_frontend *fe, u32 frequency);
+ int (*set_bandwidth)(struct dvb_frontend *fe, u32 bandwidth);
+};
+
+/**
+ * struct analog_demod_info - Information struct for analog TV part of the demod
+ *
+ * @name: Name of the analog TV demodulator
+ */
+struct analog_demod_info {
+ char *name;
+};
+
+/**
+ * struct analog_demod_ops - Demodulation information and callbacks for
+ * analog TV and radio
+ *
+ * @info: pointer to struct analog_demod_info
+ * @set_params: callback function used to inform the demod to set the
+ * demodulator parameters needed to decode an analog or
+ * radio channel. The properties are passed via
+ * &struct analog_params.
+ * @has_signal: returns 0xffff if has signal, or 0 if it doesn't.
+ * @get_afc: Used only by analog TV core. Reports the frequency
+ * drift due to AFC.
+ * @tuner_status: callback function that returns tuner status bits, e. g.
+ * %TUNER_STATUS_LOCKED and %TUNER_STATUS_STEREO.
+ * @standby: set the tuner to standby mode.
+ * @release: callback function called when frontend is detached.
+ * drivers should free any allocated memory.
+ * @i2c_gate_ctrl: controls the I2C gate. Newer drivers should use I2C
+ * mux support instead.
+ * @set_config: callback function used to send some tuner-specific
+ * parameters.
+ */
+struct analog_demod_ops {
+
+ struct analog_demod_info info;
+
+ void (*set_params)(struct dvb_frontend *fe,
+ struct analog_parameters *params);
+ int (*has_signal)(struct dvb_frontend *fe, u16 *signal);
+ int (*get_afc)(struct dvb_frontend *fe, s32 *afc);
+ void (*tuner_status)(struct dvb_frontend *fe);
+ void (*standby)(struct dvb_frontend *fe);
+ void (*release)(struct dvb_frontend *fe);
+ int (*i2c_gate_ctrl)(struct dvb_frontend *fe, int enable);
+
+ /** This is to allow setting tuner-specific configuration */
+ int (*set_config)(struct dvb_frontend *fe, void *priv_cfg);
+};
+
+struct dtv_frontend_properties;
+
+
+/**
+ * struct dvb_frontend_ops - Demodulation information and callbacks for
+ * ditialt TV
+ *
+ * @info: embedded &struct dvb_tuner_info with tuner properties
+ * @delsys: Delivery systems supported by the frontend
+ * @detach: callback function called when frontend is detached.
+ * drivers should clean up, but not yet free the &struct
+ * dvb_frontend allocation.
+ * @release: callback function called when frontend is ready to be
+ * freed.
+ * drivers should free any allocated memory.
+ * @release_sec: callback function requesting that the Satelite Equipment
+ * Control (SEC) driver to release and free any memory
+ * allocated by the driver.
+ * @init: callback function used to initialize the tuner device.
+ * @sleep: callback function used to put the tuner to sleep.
+ * @write: callback function used by some demod legacy drivers to
+ * allow other drivers to write data into their registers.
+ * Should not be used on new drivers.
+ * @tune: callback function used by demod drivers that use
+ * @DVBFE_ALGO_HW to tune into a frequency.
+ * @get_frontend_algo: returns the desired hardware algorithm.
+ * @set_frontend: callback function used to inform the demod to set the
+ * parameters for demodulating a digital TV channel.
+ * The properties to be used are stored at &struct
+ * dvb_frontend.dtv_property_cache. The demod can change
+ * the parameters to reflect the changes needed for the
+ * channel to be decoded, and update statistics.
+ * @get_tune_settings: callback function
+ * @get_frontend: callback function used to inform the parameters
+ * actuall in use. The properties to be used are stored at
+ * &struct dvb_frontend.dtv_property_cache and update
+ * statistics. Please notice that it should not return
+ * an error code if the statistics are not available
+ * because the demog is not locked.
+ * @read_status: returns the locking status of the frontend.
+ * @read_ber: legacy callback function to return the bit error rate.
+ * Newer drivers should provide such info via DVBv5 API,
+ * e. g. @set_frontend;/@get_frontend, implementing this
+ * callback only if DVBv3 API compatibility is wanted.
+ * @read_signal_strength: legacy callback function to return the signal
+ * strength. Newer drivers should provide such info via
+ * DVBv5 API, e. g. @set_frontend/@get_frontend,
+ * implementing this callback only if DVBv3 API
+ * compatibility is wanted.
+ * @read_snr: legacy callback function to return the Signal/Noise
+ * rate. Newer drivers should provide such info via
+ * DVBv5 API, e. g. @set_frontend/@get_frontend,
+ * implementing this callback only if DVBv3 API
+ * compatibility is wanted.
+ * @read_ucblocks: legacy callback function to return the Uncorrected Error
+ * Blocks. Newer drivers should provide such info via
+ * DVBv5 API, e. g. @set_frontend/@get_frontend,
+ * implementing this callback only if DVBv3 API
+ * compatibility is wanted.
+ * @diseqc_reset_overload: callback function to implement the
+ * FE_DISEQC_RESET_OVERLOAD() ioctl (only Satellite)
+ * @diseqc_send_master_cmd: callback function to implement the
+ * FE_DISEQC_SEND_MASTER_CMD() ioctl (only Satellite).
+ * @diseqc_recv_slave_reply: callback function to implement the
+ * FE_DISEQC_RECV_SLAVE_REPLY() ioctl (only Satellite)
+ * @diseqc_send_burst: callback function to implement the
+ * FE_DISEQC_SEND_BURST() ioctl (only Satellite).
+ * @set_tone: callback function to implement the
+ * FE_SET_TONE() ioctl (only Satellite).
+ * @set_voltage: callback function to implement the
+ * FE_SET_VOLTAGE() ioctl (only Satellite).
+ * @enable_high_lnb_voltage: callback function to implement the
+ * FE_ENABLE_HIGH_LNB_VOLTAGE() ioctl (only Satellite).
+ * @dishnetwork_send_legacy_command: callback function to implement the
+ * FE_DISHNETWORK_SEND_LEGACY_CMD() ioctl (only Satellite).
+ * Drivers should not use this, except when the DVB
+ * core emulation fails to provide proper support (e.g.
+ * if @set_voltage takes more than 8ms to work), and
+ * when backward compatibility with this legacy API is
+ * required.
+ * @i2c_gate_ctrl: controls the I2C gate. Newer drivers should use I2C
+ * mux support instead.
+ * @ts_bus_ctrl: callback function used to take control of the TS bus.
+ * @set_lna: callback function to power on/off/auto the LNA.
+ * @search: callback function used on some custom algo search algos.
+ * @tuner_ops: pointer to &struct dvb_tuner_ops
+ * @analog_ops: pointer to &struct analog_demod_ops
+ */
+struct dvb_frontend_ops {
+ struct dvb_frontend_info info;
+
+ u8 delsys[MAX_DELSYS];
+
+ void (*detach)(struct dvb_frontend *fe);
+ void (*release)(struct dvb_frontend* fe);
+ void (*release_sec)(struct dvb_frontend* fe);
+
+ int (*init)(struct dvb_frontend* fe);
+ int (*sleep)(struct dvb_frontend* fe);
+
+ int (*write)(struct dvb_frontend* fe, const u8 buf[], int len);
+
+ /* if this is set, it overrides the default swzigzag */
+ int (*tune)(struct dvb_frontend* fe,
+ bool re_tune,
+ unsigned int mode_flags,
+ unsigned int *delay,
+ enum fe_status *status);
+
+ /* get frontend tuning algorithm from the module */
+ enum dvbfe_algo (*get_frontend_algo)(struct dvb_frontend *fe);
+
+ /* these two are only used for the swzigzag code */
+ int (*set_frontend)(struct dvb_frontend *fe);
+ int (*get_tune_settings)(struct dvb_frontend* fe, struct dvb_frontend_tune_settings* settings);
+
+ int (*get_frontend)(struct dvb_frontend *fe,
+ struct dtv_frontend_properties *props);
+
+ int (*read_status)(struct dvb_frontend *fe, enum fe_status *status);
+ int (*read_ber)(struct dvb_frontend* fe, u32* ber);
+ int (*read_signal_strength)(struct dvb_frontend* fe, u16* strength);
+ int (*read_snr)(struct dvb_frontend* fe, u16* snr);
+ int (*read_ucblocks)(struct dvb_frontend* fe, u32* ucblocks);
+
+ int (*diseqc_reset_overload)(struct dvb_frontend* fe);
+ int (*diseqc_send_master_cmd)(struct dvb_frontend* fe, struct dvb_diseqc_master_cmd* cmd);
+ int (*diseqc_recv_slave_reply)(struct dvb_frontend* fe, struct dvb_diseqc_slave_reply* reply);
+ int (*diseqc_send_burst)(struct dvb_frontend *fe,
+ enum fe_sec_mini_cmd minicmd);
+ int (*set_tone)(struct dvb_frontend *fe, enum fe_sec_tone_mode tone);
+ int (*set_voltage)(struct dvb_frontend *fe,
+ enum fe_sec_voltage voltage);
+ int (*enable_high_lnb_voltage)(struct dvb_frontend* fe, long arg);
+ int (*dishnetwork_send_legacy_command)(struct dvb_frontend* fe, unsigned long cmd);
+ int (*i2c_gate_ctrl)(struct dvb_frontend* fe, int enable);
+ int (*ts_bus_ctrl)(struct dvb_frontend* fe, int acquire);
+ int (*set_lna)(struct dvb_frontend *);
+
+ /*
+ * These callbacks are for devices that implement their own
+ * tuning algorithms, rather than a simple swzigzag
+ */
+ enum dvbfe_search (*search)(struct dvb_frontend *fe);
+
+ struct dvb_tuner_ops tuner_ops;
+ struct analog_demod_ops analog_ops;
+};
+
+#ifdef __DVB_CORE__
+#define MAX_EVENT 8
+
+/* Used only internally at dvb_frontend.c */
+struct dvb_fe_events {
+ struct dvb_frontend_event events[MAX_EVENT];
+ int eventw;
+ int eventr;
+ int overflow;
+ wait_queue_head_t wait_queue;
+ struct mutex mtx;
+};
+#endif
+
+/**
+ * struct dtv_frontend_properties - contains a list of properties that are
+ * specific to a digital TV standard.
+ *
+ * @frequency: frequency in Hz for terrestrial/cable or in kHz for
+ * Satellite
+ * @modulation: Frontend modulation type
+ * @voltage: SEC voltage (only Satellite)
+ * @sectone: SEC tone mode (only Satellite)
+ * @inversion: Spectral inversion
+ * @fec_inner: Forward error correction inner Code Rate
+ * @transmission_mode: Transmission Mode
+ * @bandwidth_hz: Bandwidth, in Hz. A zero value means that userspace
+ * wants to autodetect.
+ * @guard_interval: Guard Interval
+ * @hierarchy: Hierarchy
+ * @symbol_rate: Symbol Rate
+ * @code_rate_HP: high priority stream code rate
+ * @code_rate_LP: low priority stream code rate
+ * @pilot: Enable/disable/autodetect pilot tones
+ * @rolloff: Rolloff factor (alpha)
+ * @delivery_system: FE delivery system (e. g. digital TV standard)
+ * @interleaving: interleaving
+ * @isdbt_partial_reception: ISDB-T partial reception (only ISDB standard)
+ * @isdbt_sb_mode: ISDB-T Sound Broadcast (SB) mode (only ISDB standard)
+ * @isdbt_sb_subchannel: ISDB-T SB subchannel (only ISDB standard)
+ * @isdbt_sb_segment_idx: ISDB-T SB segment index (only ISDB standard)
+ * @isdbt_sb_segment_count: ISDB-T SB segment count (only ISDB standard)
+ * @isdbt_layer_enabled: ISDB Layer enabled (only ISDB standard)
+ * @layer: ISDB per-layer data (only ISDB standard)
+ * @layer.segment_count: Segment Count;
+ * @layer.fec: per layer code rate;
+ * @layer.modulation: per layer modulation;
+ * @layer.interleaving: per layer interleaving.
+ * @stream_id: If different than zero, enable substream filtering, if
+ * hardware supports (DVB-S2 and DVB-T2).
+ * @scrambling_sequence_index: Carries the index of the DVB-S2 physical layer
+ * scrambling sequence.
+ * @atscmh_fic_ver: Version number of the FIC (Fast Information Channel)
+ * signaling data (only ATSC-M/H)
+ * @atscmh_parade_id: Parade identification number (only ATSC-M/H)
+ * @atscmh_nog: Number of MH groups per MH subframe for a designated
+ * parade (only ATSC-M/H)
+ * @atscmh_tnog: Total number of MH groups including all MH groups
+ * belonging to all MH parades in one MH subframe
+ * (only ATSC-M/H)
+ * @atscmh_sgn: Start group number (only ATSC-M/H)
+ * @atscmh_prc: Parade repetition cycle (only ATSC-M/H)
+ * @atscmh_rs_frame_mode: Reed Solomon (RS) frame mode (only ATSC-M/H)
+ * @atscmh_rs_frame_ensemble: RS frame ensemble (only ATSC-M/H)
+ * @atscmh_rs_code_mode_pri: RS code mode pri (only ATSC-M/H)
+ * @atscmh_rs_code_mode_sec: RS code mode sec (only ATSC-M/H)
+ * @atscmh_sccc_block_mode: Series Concatenated Convolutional Code (SCCC)
+ * Block Mode (only ATSC-M/H)
+ * @atscmh_sccc_code_mode_a: SCCC code mode A (only ATSC-M/H)
+ * @atscmh_sccc_code_mode_b: SCCC code mode B (only ATSC-M/H)
+ * @atscmh_sccc_code_mode_c: SCCC code mode C (only ATSC-M/H)
+ * @atscmh_sccc_code_mode_d: SCCC code mode D (only ATSC-M/H)
+ * @lna: Power ON/OFF/AUTO the Linear Now-noise Amplifier (LNA)
+ * @strength: DVBv5 API statistics: Signal Strength
+ * @cnr: DVBv5 API statistics: Signal to Noise ratio of the
+ * (main) carrier
+ * @pre_bit_error: DVBv5 API statistics: pre-Viterbi bit error count
+ * @pre_bit_count: DVBv5 API statistics: pre-Viterbi bit count
+ * @post_bit_error: DVBv5 API statistics: post-Viterbi bit error count
+ * @post_bit_count: DVBv5 API statistics: post-Viterbi bit count
+ * @block_error: DVBv5 API statistics: block error count
+ * @block_count: DVBv5 API statistics: block count
+ *
+ * NOTE: derivated statistics like Uncorrected Error blocks (UCE) are
+ * calculated on userspace.
+ *
+ * Only a subset of the properties are needed for a given delivery system.
+ * For more info, consult the media_api.html with the documentation of the
+ * Userspace API.
+ */
+struct dtv_frontend_properties {
+ u32 frequency;
+ enum fe_modulation modulation;
+
+ enum fe_sec_voltage voltage;
+ enum fe_sec_tone_mode sectone;
+ enum fe_spectral_inversion inversion;
+ enum fe_code_rate fec_inner;
+ enum fe_transmit_mode transmission_mode;
+ u32 bandwidth_hz; /* 0 = AUTO */
+ enum fe_guard_interval guard_interval;
+ enum fe_hierarchy hierarchy;
+ u32 symbol_rate;
+ enum fe_code_rate code_rate_HP;
+ enum fe_code_rate code_rate_LP;
+
+ enum fe_pilot pilot;
+ enum fe_rolloff rolloff;
+
+ enum fe_delivery_system delivery_system;
+
+ enum fe_interleaving interleaving;
+
+ /* ISDB-T specifics */
+ u8 isdbt_partial_reception;
+ u8 isdbt_sb_mode;
+ u8 isdbt_sb_subchannel;
+ u32 isdbt_sb_segment_idx;
+ u32 isdbt_sb_segment_count;
+ u8 isdbt_layer_enabled;
+ struct {
+ u8 segment_count;
+ enum fe_code_rate fec;
+ enum fe_modulation modulation;
+ u8 interleaving;
+ } layer[3];
+
+ /* Multistream specifics */
+ u32 stream_id;
+
+ /* Physical Layer Scrambling specifics */
+ u32 scrambling_sequence_index;
+
+ /* ATSC-MH specifics */
+ u8 atscmh_fic_ver;
+ u8 atscmh_parade_id;
+ u8 atscmh_nog;
+ u8 atscmh_tnog;
+ u8 atscmh_sgn;
+ u8 atscmh_prc;
+
+ u8 atscmh_rs_frame_mode;
+ u8 atscmh_rs_frame_ensemble;
+ u8 atscmh_rs_code_mode_pri;
+ u8 atscmh_rs_code_mode_sec;
+ u8 atscmh_sccc_block_mode;
+ u8 atscmh_sccc_code_mode_a;
+ u8 atscmh_sccc_code_mode_b;
+ u8 atscmh_sccc_code_mode_c;
+ u8 atscmh_sccc_code_mode_d;
+
+ u32 lna;
+
+ /* statistics data */
+ struct dtv_fe_stats strength;
+ struct dtv_fe_stats cnr;
+ struct dtv_fe_stats pre_bit_error;
+ struct dtv_fe_stats pre_bit_count;
+ struct dtv_fe_stats post_bit_error;
+ struct dtv_fe_stats post_bit_count;
+ struct dtv_fe_stats block_error;
+ struct dtv_fe_stats block_count;
+};
+
+#define DVB_FE_NO_EXIT 0
+#define DVB_FE_NORMAL_EXIT 1
+#define DVB_FE_DEVICE_REMOVED 2
+#define DVB_FE_DEVICE_RESUME 3
+
+/**
+ * struct dvb_frontend - Frontend structure to be used on drivers.
+ *
+ * @refcount: refcount to keep track of &struct dvb_frontend
+ * references
+ * @ops: embedded &struct dvb_frontend_ops
+ * @dvb: pointer to &struct dvb_adapter
+ * @demodulator_priv: demod private data
+ * @tuner_priv: tuner private data
+ * @frontend_priv: frontend private data
+ * @sec_priv: SEC private data
+ * @analog_demod_priv: Analog demod private data
+ * @dtv_property_cache: embedded &struct dtv_frontend_properties
+ * @callback: callback function used on some drivers to call
+ * either the tuner or the demodulator.
+ * @id: Frontend ID
+ * @exit: Used to inform the DVB core that the frontend
+ * thread should exit (usually, means that the hardware
+ * got disconnected.
+ */
+
+struct dvb_frontend {
+ struct kref refcount;
+ struct dvb_frontend_ops ops;
+ struct dvb_adapter *dvb;
+ void *demodulator_priv;
+ void *tuner_priv;
+ void *frontend_priv;
+ void *sec_priv;
+ void *analog_demod_priv;
+ struct dtv_frontend_properties dtv_property_cache;
+#define DVB_FRONTEND_COMPONENT_TUNER 0
+#define DVB_FRONTEND_COMPONENT_DEMOD 1
+ int (*callback)(void *adapter_priv, int component, int cmd, int arg);
+ int id;
+ unsigned int exit;
+};
+
+/**
+ * dvb_register_frontend() - Registers a DVB frontend at the adapter
+ *
+ * @dvb: pointer to &struct dvb_adapter
+ * @fe: pointer to &struct dvb_frontend
+ *
+ * Allocate and initialize the private data needed by the frontend core to
+ * manage the frontend and calls dvb_register_device() to register a new
+ * frontend. It also cleans the property cache that stores the frontend
+ * parameters and selects the first available delivery system.
+ */
+int dvb_register_frontend(struct dvb_adapter *dvb,
+ struct dvb_frontend *fe);
+
+/**
+ * dvb_unregister_frontend() - Unregisters a DVB frontend
+ *
+ * @fe: pointer to &struct dvb_frontend
+ *
+ * Stops the frontend kthread, calls dvb_unregister_device() and frees the
+ * private frontend data allocated by dvb_register_frontend().
+ *
+ * NOTE: This function doesn't frees the memory allocated by the demod,
+ * by the SEC driver and by the tuner. In order to free it, an explicit call to
+ * dvb_frontend_detach() is needed, after calling this function.
+ */
+int dvb_unregister_frontend(struct dvb_frontend *fe);
+
+/**
+ * dvb_frontend_detach() - Detaches and frees frontend specific data
+ *
+ * @fe: pointer to &struct dvb_frontend
+ *
+ * This function should be called after dvb_unregister_frontend(). It
+ * calls the SEC, tuner and demod release functions:
+ * &dvb_frontend_ops.release_sec, &dvb_frontend_ops.tuner_ops.release,
+ * &dvb_frontend_ops.analog_ops.release and &dvb_frontend_ops.release.
+ *
+ * If the driver is compiled with %CONFIG_MEDIA_ATTACH, it also decreases
+ * the module reference count, needed to allow userspace to remove the
+ * previously used DVB frontend modules.
+ */
+void dvb_frontend_detach(struct dvb_frontend *fe);
+
+/**
+ * dvb_frontend_suspend() - Suspends a Digital TV frontend
+ *
+ * @fe: pointer to &struct dvb_frontend
+ *
+ * This function prepares a Digital TV frontend to suspend.
+ *
+ * In order to prepare the tuner to suspend, if
+ * &dvb_frontend_ops.tuner_ops.suspend\(\) is available, it calls it. Otherwise,
+ * it will call &dvb_frontend_ops.tuner_ops.sleep\(\), if available.
+ *
+ * It will also call &dvb_frontend_ops.sleep\(\) to put the demod to suspend.
+ *
+ * The drivers should also call dvb_frontend_suspend\(\) as part of their
+ * handler for the &device_driver.suspend\(\).
+ */
+int dvb_frontend_suspend(struct dvb_frontend *fe);
+
+/**
+ * dvb_frontend_resume() - Resumes a Digital TV frontend
+ *
+ * @fe: pointer to &struct dvb_frontend
+ *
+ * This function resumes the usual operation of the tuner after resume.
+ *
+ * In order to resume the frontend, it calls the demod &dvb_frontend_ops.init\(\).
+ *
+ * If &dvb_frontend_ops.tuner_ops.resume\(\) is available, It, it calls it.
+ * Otherwise,t will call &dvb_frontend_ops.tuner_ops.init\(\), if available.
+ *
+ * Once tuner and demods are resumed, it will enforce that the SEC voltage and
+ * tone are restored to their previous values and wake up the frontend's
+ * kthread in order to retune the frontend.
+ *
+ * The drivers should also call dvb_frontend_resume() as part of their
+ * handler for the &device_driver.resume\(\).
+ */
+int dvb_frontend_resume(struct dvb_frontend *fe);
+
+/**
+ * dvb_frontend_reinitialise() - forces a reinitialisation at the frontend
+ *
+ * @fe: pointer to &struct dvb_frontend
+ *
+ * Calls &dvb_frontend_ops.init\(\) and &dvb_frontend_ops.tuner_ops.init\(\),
+ * and resets SEC tone and voltage (for Satellite systems).
+ *
+ * NOTE: Currently, this function is used only by one driver (budget-av).
+ * It seems to be due to address some special issue with that specific
+ * frontend.
+ */
+void dvb_frontend_reinitialise(struct dvb_frontend *fe);
+
+/**
+ * dvb_frontend_sleep_until() - Sleep for the amount of time given by
+ * add_usec parameter
+ *
+ * @waketime: pointer to &struct ktime_t
+ * @add_usec: time to sleep, in microseconds
+ *
+ * This function is used to measure the time required for the
+ * FE_DISHNETWORK_SEND_LEGACY_CMD() ioctl to work. It needs to be as precise
+ * as possible, as it affects the detection of the dish tone command at the
+ * satellite subsystem.
+ *
+ * Its used internally by the DVB frontend core, in order to emulate
+ * FE_DISHNETWORK_SEND_LEGACY_CMD() using the &dvb_frontend_ops.set_voltage\(\)
+ * callback.
+ *
+ * NOTE: it should not be used at the drivers, as the emulation for the
+ * legacy callback is provided by the Kernel. The only situation where this
+ * should be at the drivers is when there are some bugs at the hardware that
+ * would prevent the core emulation to work. On such cases, the driver would
+ * be writing a &dvb_frontend_ops.dishnetwork_send_legacy_command\(\) and
+ * calling this function directly.
+ */
+void dvb_frontend_sleep_until(ktime_t *waketime, u32 add_usec);
+
+#endif
diff --git a/include/media/dvb_math.h b/include/media/dvb_math.h
new file mode 100644
index 000000000000..8690ec42954d
--- /dev/null
+++ b/include/media/dvb_math.h
@@ -0,0 +1,66 @@
+/*
+ * dvb-math provides some complex fixed-point math
+ * operations shared between the dvb related stuff
+ *
+ * Copyright (C) 2006 Christoph Pfister (christophpfister@gmail.com)
+ *
+ * This library is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU Lesser General Public License as
+ * published by the Free Software Foundation; either version 2.1 of
+ * the License, or (at your option) any later version.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+ * GNU Lesser General Public License for more details.
+ */
+
+#ifndef __DVB_MATH_H
+#define __DVB_MATH_H
+
+#include <linux/types.h>
+
+/**
+ * intlog2 - computes log2 of a value; the result is shifted left by 24 bits
+ *
+ * @value: The value (must be != 0)
+ *
+ * to use rational values you can use the following method:
+ *
+ * intlog2(value) = intlog2(value * 2^x) - x * 2^24
+ *
+ * Some usecase examples:
+ *
+ * intlog2(8) will give 3 << 24 = 3 * 2^24
+ *
+ * intlog2(9) will give 3 << 24 + ... = 3.16... * 2^24
+ *
+ * intlog2(1.5) = intlog2(3) - 2^24 = 0.584... * 2^24
+ *
+ *
+ * return: log2(value) * 2^24
+ */
+extern unsigned int intlog2(u32 value);
+
+/**
+ * intlog10 - computes log10 of a value; the result is shifted left by 24 bits
+ *
+ * @value: The value (must be != 0)
+ *
+ * to use rational values you can use the following method:
+ *
+ * intlog10(value) = intlog10(value * 10^x) - x * 2^24
+ *
+ * An usecase example:
+ *
+ * intlog10(1000) will give 3 << 24 = 3 * 2^24
+ *
+ * due to the implementation intlog10(1000) might be not exactly 3 * 2^24
+ *
+ * look at intlog2 for similar examples
+ *
+ * return: log10(value) * 2^24
+ */
+extern unsigned int intlog10(u32 value);
+
+#endif
diff --git a/include/media/dvb_net.h b/include/media/dvb_net.h
new file mode 100644
index 000000000000..5e31d37f25fa
--- /dev/null
+++ b/include/media/dvb_net.h
@@ -0,0 +1,93 @@
+/*
+ * dvb_net.h
+ *
+ * Copyright (C) 2001 Ralph Metzler for convergence integrated media GmbH
+ *
+ * This program is free software; you can redistribute it and/or
+ * modify it under the terms of the GNU Lesser General Public License
+ * as published by the Free Software Foundation; either version 2.1
+ * of the License, or (at your option) any later version.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+ * GNU General Public License for more details.
+ *
+ */
+
+#ifndef _DVB_NET_H_
+#define _DVB_NET_H_
+
+#include <linux/module.h>
+#include <linux/netdevice.h>
+#include <linux/inetdevice.h>
+#include <linux/etherdevice.h>
+#include <linux/skbuff.h>
+
+#include <media/dvbdev.h>
+
+#define DVB_NET_DEVICES_MAX 10
+
+#ifdef CONFIG_DVB_NET
+
+/**
+ * struct dvb_net - describes a DVB network interface
+ *
+ * @dvbdev: pointer to &struct dvb_device.
+ * @device: array of pointers to &struct net_device.
+ * @state: array of integers to each net device. A value
+ * different than zero means that the interface is
+ * in usage.
+ * @exit: flag to indicate when the device is being removed.
+ * @demux: pointer to &struct dmx_demux.
+ * @ioctl_mutex: protect access to this struct.
+ *
+ * Currently, the core supports up to %DVB_NET_DEVICES_MAX (10) network
+ * devices.
+ */
+
+struct dvb_net {
+ struct dvb_device *dvbdev;
+ struct net_device *device[DVB_NET_DEVICES_MAX];
+ int state[DVB_NET_DEVICES_MAX];
+ unsigned int exit:1;
+ struct dmx_demux *demux;
+ struct mutex ioctl_mutex;
+};
+
+/**
+ * dvb_net_init - nitializes a digital TV network device and registers it.
+ *
+ * @adap: pointer to &struct dvb_adapter.
+ * @dvbnet: pointer to &struct dvb_net.
+ * @dmxdemux: pointer to &struct dmx_demux.
+ */
+int dvb_net_init(struct dvb_adapter *adap, struct dvb_net *dvbnet,
+ struct dmx_demux *dmxdemux);
+
+/**
+ * dvb_net_release - releases a digital TV network device and unregisters it.
+ *
+ * @dvbnet: pointer to &struct dvb_net.
+ */
+void dvb_net_release(struct dvb_net *dvbnet);
+
+#else
+
+struct dvb_net {
+ struct dvb_device *dvbdev;
+};
+
+static inline void dvb_net_release(struct dvb_net *dvbnet)
+{
+}
+
+static inline int dvb_net_init(struct dvb_adapter *adap,
+ struct dvb_net *dvbnet, struct dmx_demux *dmx)
+{
+ return 0;
+}
+
+#endif /* ifdef CONFIG_DVB_NET */
+
+#endif
diff --git a/include/media/dvb_ringbuffer.h b/include/media/dvb_ringbuffer.h
new file mode 100644
index 000000000000..8ed6bcc3a56e
--- /dev/null
+++ b/include/media/dvb_ringbuffer.h
@@ -0,0 +1,280 @@
+/*
+ *
+ * dvb_ringbuffer.h: ring buffer implementation for the dvb driver
+ *
+ * Copyright (C) 2003 Oliver Endriss
+ * Copyright (C) 2004 Andrew de Quincey
+ *
+ * based on code originally found in av7110.c & dvb_ci.c:
+ * Copyright (C) 1999-2003 Ralph Metzler & Marcus Metzler
+ * for convergence integrated media GmbH
+ *
+ * This program is free software; you can redistribute it and/or
+ * modify it under the terms of the GNU Lesser General Public License
+ * as published by the Free Software Foundation; either version 2.1
+ * of the License, or (at your option) any later version.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+ * GNU Lesser General Public License for more details.
+ */
+
+#ifndef _DVB_RINGBUFFER_H_
+#define _DVB_RINGBUFFER_H_
+
+#include <linux/spinlock.h>
+#include <linux/wait.h>
+
+/**
+ * struct dvb_ringbuffer - Describes a ring buffer used at DVB framework
+ *
+ * @data: Area were the ringbuffer data is written
+ * @size: size of the ringbuffer
+ * @pread: next position to read
+ * @pwrite: next position to write
+ * @error: used by ringbuffer clients to indicate that an error happened.
+ * @queue: Wait queue used by ringbuffer clients to indicate when buffer
+ * was filled
+ * @lock: Spinlock used to protect the ringbuffer
+ */
+struct dvb_ringbuffer {
+ u8 *data;
+ ssize_t size;
+ ssize_t pread;
+ ssize_t pwrite;
+ int error;
+
+ wait_queue_head_t queue;
+ spinlock_t lock;
+};
+
+#define DVB_RINGBUFFER_PKTHDRSIZE 3
+
+/**
+ * dvb_ringbuffer_init - initialize ring buffer, lock and queue
+ *
+ * @rbuf: pointer to struct dvb_ringbuffer
+ * @data: pointer to the buffer where the data will be stored
+ * @len: bytes from ring buffer into @buf
+ */
+extern void dvb_ringbuffer_init(struct dvb_ringbuffer *rbuf, void *data,
+ size_t len);
+
+/**
+ * dvb_ringbuffer_empty - test whether buffer is empty
+ *
+ * @rbuf: pointer to struct dvb_ringbuffer
+ */
+extern int dvb_ringbuffer_empty(struct dvb_ringbuffer *rbuf);
+
+/**
+ * dvb_ringbuffer_free - returns the number of free bytes in the buffer
+ *
+ * @rbuf: pointer to struct dvb_ringbuffer
+ *
+ * Return: number of free bytes in the buffer
+ */
+extern ssize_t dvb_ringbuffer_free(struct dvb_ringbuffer *rbuf);
+
+/**
+ * dvb_ringbuffer_avail - returns the number of bytes waiting in the buffer
+ *
+ * @rbuf: pointer to struct dvb_ringbuffer
+ *
+ * Return: number of bytes waiting in the buffer
+ */
+extern ssize_t dvb_ringbuffer_avail(struct dvb_ringbuffer *rbuf);
+
+/**
+ * dvb_ringbuffer_reset - resets the ringbuffer to initial state
+ *
+ * @rbuf: pointer to struct dvb_ringbuffer
+ *
+ * Resets the read and write pointers to zero and flush the buffer.
+ *
+ * This counts as a read and write operation
+ */
+extern void dvb_ringbuffer_reset(struct dvb_ringbuffer *rbuf);
+
+/*
+ * read routines & macros
+ */
+
+/**
+ * dvb_ringbuffer_flush - flush buffer
+ *
+ * @rbuf: pointer to struct dvb_ringbuffer
+ */
+extern void dvb_ringbuffer_flush(struct dvb_ringbuffer *rbuf);
+
+/**
+ * dvb_ringbuffer_flush_spinlock_wakeup- flush buffer protected by spinlock
+ * and wake-up waiting task(s)
+ *
+ * @rbuf: pointer to struct dvb_ringbuffer
+ */
+extern void dvb_ringbuffer_flush_spinlock_wakeup(struct dvb_ringbuffer *rbuf);
+
+/**
+ * DVB_RINGBUFFER_PEEK - peek at byte @offs in the buffer
+ *
+ * @rbuf: pointer to struct dvb_ringbuffer
+ * @offs: offset inside the ringbuffer
+ */
+#define DVB_RINGBUFFER_PEEK(rbuf, offs) \
+ ((rbuf)->data[((rbuf)->pread + (offs)) % (rbuf)->size])
+
+/**
+ * DVB_RINGBUFFER_SKIP - advance read ptr by @num bytes
+ *
+ * @rbuf: pointer to struct dvb_ringbuffer
+ * @num: number of bytes to advance
+ */
+#define DVB_RINGBUFFER_SKIP(rbuf, num) {\
+ (rbuf)->pread = ((rbuf)->pread + (num)) % (rbuf)->size;\
+}
+
+/**
+ * dvb_ringbuffer_read_user - Reads a buffer into a user pointer
+ *
+ * @rbuf: pointer to struct dvb_ringbuffer
+ * @buf: pointer to the buffer where the data will be stored
+ * @len: bytes from ring buffer into @buf
+ *
+ * This variant assumes that the buffer is a memory at the userspace. So,
+ * it will internally call copy_to_user().
+ *
+ * Return: number of bytes transferred or -EFAULT
+ */
+extern ssize_t dvb_ringbuffer_read_user(struct dvb_ringbuffer *rbuf,
+ u8 __user *buf, size_t len);
+
+/**
+ * dvb_ringbuffer_read - Reads a buffer into a pointer
+ *
+ * @rbuf: pointer to struct dvb_ringbuffer
+ * @buf: pointer to the buffer where the data will be stored
+ * @len: bytes from ring buffer into @buf
+ *
+ * This variant assumes that the buffer is a memory at the Kernel space
+ *
+ * Return: number of bytes transferred or -EFAULT
+ */
+extern void dvb_ringbuffer_read(struct dvb_ringbuffer *rbuf,
+ u8 *buf, size_t len);
+
+/*
+ * write routines & macros
+ */
+
+/**
+ * DVB_RINGBUFFER_WRITE_BYTE - write single byte to ring buffer
+ *
+ * @rbuf: pointer to struct dvb_ringbuffer
+ * @byte: byte to write
+ */
+#define DVB_RINGBUFFER_WRITE_BYTE(rbuf, byte) \
+ { (rbuf)->data[(rbuf)->pwrite] = (byte); \
+ (rbuf)->pwrite = ((rbuf)->pwrite + 1) % (rbuf)->size; }
+
+/**
+ * dvb_ringbuffer_write - Writes a buffer into the ringbuffer
+ *
+ * @rbuf: pointer to struct dvb_ringbuffer
+ * @buf: pointer to the buffer where the data will be read
+ * @len: bytes from ring buffer into @buf
+ *
+ * This variant assumes that the buffer is a memory at the Kernel space
+ *
+ * return: number of bytes transferred or -EFAULT
+ */
+extern ssize_t dvb_ringbuffer_write(struct dvb_ringbuffer *rbuf, const u8 *buf,
+ size_t len);
+
+/**
+ * dvb_ringbuffer_write_user - Writes a buffer received via a user pointer
+ *
+ * @rbuf: pointer to struct dvb_ringbuffer
+ * @buf: pointer to the buffer where the data will be read
+ * @len: bytes from ring buffer into @buf
+ *
+ * This variant assumes that the buffer is a memory at the userspace. So,
+ * it will internally call copy_from_user().
+ *
+ * Return: number of bytes transferred or -EFAULT
+ */
+extern ssize_t dvb_ringbuffer_write_user(struct dvb_ringbuffer *rbuf,
+ const u8 __user *buf, size_t len);
+
+/**
+ * dvb_ringbuffer_pkt_write - Write a packet into the ringbuffer.
+ *
+ * @rbuf: Ringbuffer to write to.
+ * @buf: Buffer to write.
+ * @len: Length of buffer (currently limited to 65535 bytes max).
+ *
+ * Return: Number of bytes written, or -EFAULT, -ENOMEM, -EVINAL.
+ */
+extern ssize_t dvb_ringbuffer_pkt_write(struct dvb_ringbuffer *rbuf, u8 *buf,
+ size_t len);
+
+/**
+ * dvb_ringbuffer_pkt_read_user - Read from a packet in the ringbuffer.
+ *
+ * @rbuf: Ringbuffer concerned.
+ * @idx: Packet index as returned by dvb_ringbuffer_pkt_next().
+ * @offset: Offset into packet to read from.
+ * @buf: Destination buffer for data.
+ * @len: Size of destination buffer.
+ *
+ * Return: Number of bytes read, or -EFAULT.
+ *
+ * .. note::
+ *
+ * unlike dvb_ringbuffer_read(), this does **NOT** update the read pointer
+ * in the ringbuffer. You must use dvb_ringbuffer_pkt_dispose() to mark a
+ * packet as no longer required.
+ */
+extern ssize_t dvb_ringbuffer_pkt_read_user(struct dvb_ringbuffer *rbuf,
+ size_t idx,
+ int offset, u8 __user *buf,
+ size_t len);
+
+/**
+ * dvb_ringbuffer_pkt_read - Read from a packet in the ringbuffer.
+ * Note: unlike dvb_ringbuffer_read_user(), this DOES update the read pointer
+ * in the ringbuffer.
+ *
+ * @rbuf: Ringbuffer concerned.
+ * @idx: Packet index as returned by dvb_ringbuffer_pkt_next().
+ * @offset: Offset into packet to read from.
+ * @buf: Destination buffer for data.
+ * @len: Size of destination buffer.
+ *
+ * Return: Number of bytes read, or -EFAULT.
+ */
+extern ssize_t dvb_ringbuffer_pkt_read(struct dvb_ringbuffer *rbuf, size_t idx,
+ int offset, u8 *buf, size_t len);
+
+/**
+ * dvb_ringbuffer_pkt_dispose - Dispose of a packet in the ring buffer.
+ *
+ * @rbuf: Ring buffer concerned.
+ * @idx: Packet index as returned by dvb_ringbuffer_pkt_next().
+ */
+extern void dvb_ringbuffer_pkt_dispose(struct dvb_ringbuffer *rbuf, size_t idx);
+
+/**
+ * dvb_ringbuffer_pkt_next - Get the index of the next packet in a ringbuffer.
+ *
+ * @rbuf: Ringbuffer concerned.
+ * @idx: Previous packet index, or -1 to return the first packet index.
+ * @pktlen: On success, will be updated to contain the length of the packet
+ * in bytes.
+ * returns Packet index (if >=0), or -1 if no packets available.
+ */
+extern ssize_t dvb_ringbuffer_pkt_next(struct dvb_ringbuffer *rbuf,
+ size_t idx, size_t *pktlen);
+
+#endif /* _DVB_RINGBUFFER_H_ */
diff --git a/include/media/dvb_vb2.h b/include/media/dvb_vb2.h
new file mode 100644
index 000000000000..8cb88452cd6c
--- /dev/null
+++ b/include/media/dvb_vb2.h
@@ -0,0 +1,280 @@
+/*
+ * SPDX-License-Identifier: GPL-2.0
+ *
+ * dvb-vb2.h - DVB driver helper framework for streaming I/O
+ *
+ * Copyright (C) 2015 Samsung Electronics
+ *
+ * Author: jh1009.sung@samsung.com
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License as published by
+ * the Free Software Foundation.
+ */
+
+#ifndef _DVB_VB2_H
+#define _DVB_VB2_H
+
+#include <linux/mutex.h>
+#include <linux/poll.h>
+#include <linux/dvb/dmx.h>
+#include <media/videobuf2-core.h>
+#include <media/videobuf2-dma-contig.h>
+#include <media/videobuf2-vmalloc.h>
+
+/**
+ * enum dvb_buf_type - types of Digital TV memory-mapped buffers
+ *
+ * @DVB_BUF_TYPE_CAPTURE: buffer is filled by the Kernel,
+ * with a received Digital TV stream
+ */
+enum dvb_buf_type {
+ DVB_BUF_TYPE_CAPTURE = 1,
+};
+
+/**
+ * enum dvb_vb2_states - states to control VB2 state machine
+ * @DVB_VB2_STATE_NONE:
+ * VB2 engine not initialized yet, init failed or VB2 was released.
+ * @DVB_VB2_STATE_INIT:
+ * VB2 engine initialized.
+ * @DVB_VB2_STATE_REQBUFS:
+ * Buffers were requested
+ * @DVB_VB2_STATE_STREAMON:
+ * VB2 is streaming. Callers should not check it directly. Instead,
+ * they should use dvb_vb2_is_streaming().
+ *
+ * Note:
+ *
+ * Callers should not touch at the state machine directly. This
+ * is handled inside dvb_vb2.c.
+ */
+enum dvb_vb2_states {
+ DVB_VB2_STATE_NONE = 0x0,
+ DVB_VB2_STATE_INIT = 0x1,
+ DVB_VB2_STATE_REQBUFS = 0x2,
+ DVB_VB2_STATE_STREAMON = 0x4,
+};
+
+#define DVB_VB2_NAME_MAX (20)
+
+/**
+ * struct dvb_buffer - video buffer information for v4l2.
+ *
+ * @vb: embedded struct &vb2_buffer.
+ * @list: list of &struct dvb_buffer.
+ */
+struct dvb_buffer {
+ struct vb2_buffer vb;
+ struct list_head list;
+};
+
+/**
+ * struct dvb_vb2_ctx - control struct for VB2 handler
+ * @vb_q: pointer to &struct vb2_queue with videobuf2 queue.
+ * @mutex: mutex to serialize vb2 operations. Used by
+ * vb2 core %wait_prepare and %wait_finish operations.
+ * @slock: spin lock used to protect buffer filling at dvb_vb2.c.
+ * @dvb_q: List of buffers that are not filled yet.
+ * @buf: Pointer to the buffer that are currently being filled.
+ * @offset: index to the next position at the @buf to be filled.
+ * @remain: How many bytes are left to be filled at @buf.
+ * @state: bitmask of buffer states as defined by &enum dvb_vb2_states.
+ * @buf_siz: size of each VB2 buffer.
+ * @buf_cnt: number of VB2 buffers.
+ * @nonblocking:
+ * If different than zero, device is operating on non-blocking
+ * mode.
+ * @flags: buffer flags as defined by &enum dmx_buffer_flags.
+ * Filled only at &DMX_DQBUF. &DMX_QBUF should zero this field.
+ * @count: monotonic counter for filled buffers. Helps to identify
+ * data stream loses. Filled only at &DMX_DQBUF. &DMX_QBUF should
+ * zero this field.
+ *
+ * @name: name of the device type. Currently, it can either be
+ * "dvr" or "demux_filter".
+ */
+struct dvb_vb2_ctx {
+ struct vb2_queue vb_q;
+ struct mutex mutex;
+ spinlock_t slock;
+ struct list_head dvb_q;
+ struct dvb_buffer *buf;
+ int offset;
+ int remain;
+ int state;
+ int buf_siz;
+ int buf_cnt;
+ int nonblocking;
+
+ enum dmx_buffer_flags flags;
+ u32 count;
+
+ char name[DVB_VB2_NAME_MAX + 1];
+};
+
+#ifndef CONFIG_DVB_MMAP
+static inline int dvb_vb2_init(struct dvb_vb2_ctx *ctx,
+ const char *name, int non_blocking)
+{
+ return 0;
+};
+static inline int dvb_vb2_release(struct dvb_vb2_ctx *ctx)
+{
+ return 0;
+};
+#define dvb_vb2_is_streaming(ctx) (0)
+#define dvb_vb2_fill_buffer(ctx, file, wait, flags) (0)
+
+static inline __poll_t dvb_vb2_poll(struct dvb_vb2_ctx *ctx,
+ struct file *file,
+ poll_table *wait)
+{
+ return 0;
+}
+#else
+/**
+ * dvb_vb2_init - initializes VB2 handler
+ *
+ * @ctx: control struct for VB2 handler
+ * @name: name for the VB2 handler
+ * @non_blocking:
+ * if not zero, it means that the device is at non-blocking mode
+ */
+int dvb_vb2_init(struct dvb_vb2_ctx *ctx, const char *name, int non_blocking);
+
+/**
+ * dvb_vb2_release - Releases the VB2 handler allocated resources and
+ * put @ctx at DVB_VB2_STATE_NONE state.
+ * @ctx: control struct for VB2 handler
+ */
+int dvb_vb2_release(struct dvb_vb2_ctx *ctx);
+
+/**
+ * dvb_vb2_is_streaming - checks if the VB2 handler is streaming
+ * @ctx: control struct for VB2 handler
+ *
+ * Return: 0 if not streaming, 1 otherwise.
+ */
+int dvb_vb2_is_streaming(struct dvb_vb2_ctx *ctx);
+
+/**
+ * dvb_vb2_fill_buffer - fills a VB2 buffer
+ * @ctx: control struct for VB2 handler
+ * @src: place where the data is stored
+ * @len: number of bytes to be copied from @src
+ * @buffer_flags:
+ * pointer to buffer flags as defined by &enum dmx_buffer_flags.
+ * can be NULL.
+ */
+int dvb_vb2_fill_buffer(struct dvb_vb2_ctx *ctx,
+ const unsigned char *src, int len,
+ enum dmx_buffer_flags *buffer_flags);
+
+/**
+ * dvb_vb2_poll - Wrapper to vb2_core_streamon() for Digital TV
+ * buffer handling.
+ *
+ * @ctx: control struct for VB2 handler
+ * @file: &struct file argument passed to the poll
+ * file operation handler.
+ * @wait: &poll_table wait argument passed to the poll
+ * file operation handler.
+ *
+ * Implements poll syscall() logic.
+ */
+__poll_t dvb_vb2_poll(struct dvb_vb2_ctx *ctx, struct file *file,
+ poll_table *wait);
+#endif
+
+/**
+ * dvb_vb2_stream_on() - Wrapper to vb2_core_streamon() for Digital TV
+ * buffer handling.
+ *
+ * @ctx: control struct for VB2 handler
+ *
+ * Starts dvb streaming
+ */
+int dvb_vb2_stream_on(struct dvb_vb2_ctx *ctx);
+/**
+ * dvb_vb2_stream_off() - Wrapper to vb2_core_streamoff() for Digital TV
+ * buffer handling.
+ *
+ * @ctx: control struct for VB2 handler
+ *
+ * Stops dvb streaming
+ */
+int dvb_vb2_stream_off(struct dvb_vb2_ctx *ctx);
+
+/**
+ * dvb_vb2_reqbufs() - Wrapper to vb2_core_reqbufs() for Digital TV
+ * buffer handling.
+ *
+ * @ctx: control struct for VB2 handler
+ * @req: &struct dmx_requestbuffers passed from userspace in
+ * order to handle &DMX_REQBUFS.
+ *
+ * Initiate streaming by requesting a number of buffers. Also used to
+ * free previously requested buffers, is ``req->count`` is zero.
+ */
+int dvb_vb2_reqbufs(struct dvb_vb2_ctx *ctx, struct dmx_requestbuffers *req);
+
+/**
+ * dvb_vb2_querybuf() - Wrapper to vb2_core_querybuf() for Digital TV
+ * buffer handling.
+ *
+ * @ctx: control struct for VB2 handler
+ * @b: &struct dmx_buffer passed from userspace in
+ * order to handle &DMX_QUERYBUF.
+ *
+ *
+ */
+int dvb_vb2_querybuf(struct dvb_vb2_ctx *ctx, struct dmx_buffer *b);
+
+/**
+ * dvb_vb2_expbuf() - Wrapper to vb2_core_expbuf() for Digital TV
+ * buffer handling.
+ *
+ * @ctx: control struct for VB2 handler
+ * @exp: &struct dmx_exportbuffer passed from userspace in
+ * order to handle &DMX_EXPBUF.
+ *
+ * Export a buffer as a file descriptor.
+ */
+int dvb_vb2_expbuf(struct dvb_vb2_ctx *ctx, struct dmx_exportbuffer *exp);
+
+/**
+ * dvb_vb2_qbuf() - Wrapper to vb2_core_qbuf() for Digital TV buffer handling.
+ *
+ * @ctx: control struct for VB2 handler
+ * @b: &struct dmx_buffer passed from userspace in
+ * order to handle &DMX_QBUF.
+ *
+ * Queue a Digital TV buffer as requested by userspace
+ */
+int dvb_vb2_qbuf(struct dvb_vb2_ctx *ctx, struct dmx_buffer *b);
+
+/**
+ * dvb_vb2_dqbuf() - Wrapper to vb2_core_dqbuf() for Digital TV
+ * buffer handling.
+ *
+ * @ctx: control struct for VB2 handler
+ * @b: &struct dmx_buffer passed from userspace in
+ * order to handle &DMX_DQBUF.
+ *
+ * Dequeue a Digital TV buffer to the userspace
+ */
+int dvb_vb2_dqbuf(struct dvb_vb2_ctx *ctx, struct dmx_buffer *b);
+
+/**
+ * dvb_vb2_mmap() - Wrapper to vb2_mmap() for Digital TV buffer handling.
+ *
+ * @ctx: control struct for VB2 handler
+ * @vma: pointer to &struct vm_area_struct with the vma passed
+ * to the mmap file operation handler in the driver.
+ *
+ * map Digital TV video buffers into application address space.
+ */
+int dvb_vb2_mmap(struct dvb_vb2_ctx *ctx, struct vm_area_struct *vma);
+
+#endif /* _DVB_VB2_H */
diff --git a/include/media/dvbdev.h b/include/media/dvbdev.h
new file mode 100644
index 000000000000..ee91516ad074
--- /dev/null
+++ b/include/media/dvbdev.h
@@ -0,0 +1,468 @@
+/*
+ * dvbdev.h
+ *
+ * Copyright (C) 2000 Ralph Metzler & Marcus Metzler
+ * for convergence integrated media GmbH
+ *
+ * This program is free software; you can redistribute it and/or
+ * modify it under the terms of the GNU General Lesser Public License
+ * as published by the Free Software Foundation; either version 2.1
+ * of the License, or (at your option) any later version.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+ * GNU General Public License for more details.
+ *
+ */
+
+#ifndef _DVBDEV_H_
+#define _DVBDEV_H_
+
+#include <linux/types.h>
+#include <linux/poll.h>
+#include <linux/fs.h>
+#include <linux/list.h>
+#include <media/media-device.h>
+
+#define DVB_MAJOR 212
+
+#if defined(CONFIG_DVB_MAX_ADAPTERS) && CONFIG_DVB_MAX_ADAPTERS > 0
+ #define DVB_MAX_ADAPTERS CONFIG_DVB_MAX_ADAPTERS
+#else
+ #define DVB_MAX_ADAPTERS 16
+#endif
+
+#define DVB_UNSET (-1)
+
+/* List of DVB device types */
+
+/**
+ * enum dvb_device_type - type of the Digital TV device
+ *
+ * @DVB_DEVICE_SEC: Digital TV standalone Common Interface (CI)
+ * @DVB_DEVICE_FRONTEND: Digital TV frontend.
+ * @DVB_DEVICE_DEMUX: Digital TV demux.
+ * @DVB_DEVICE_DVR: Digital TV digital video record (DVR).
+ * @DVB_DEVICE_CA: Digital TV Conditional Access (CA).
+ * @DVB_DEVICE_NET: Digital TV network.
+ *
+ * @DVB_DEVICE_VIDEO: Digital TV video decoder.
+ * Deprecated. Used only on av7110-av.
+ * @DVB_DEVICE_AUDIO: Digital TV audio decoder.
+ * Deprecated. Used only on av7110-av.
+ * @DVB_DEVICE_OSD: Digital TV On Screen Display (OSD).
+ * Deprecated. Used only on av7110.
+ */
+enum dvb_device_type {
+ DVB_DEVICE_SEC,
+ DVB_DEVICE_FRONTEND,
+ DVB_DEVICE_DEMUX,
+ DVB_DEVICE_DVR,
+ DVB_DEVICE_CA,
+ DVB_DEVICE_NET,
+
+ DVB_DEVICE_VIDEO,
+ DVB_DEVICE_AUDIO,
+ DVB_DEVICE_OSD,
+};
+
+#define DVB_DEFINE_MOD_OPT_ADAPTER_NR(adapter_nr) \
+ static short adapter_nr[] = \
+ {[0 ... (DVB_MAX_ADAPTERS - 1)] = DVB_UNSET }; \
+ module_param_array(adapter_nr, short, NULL, 0444); \
+ MODULE_PARM_DESC(adapter_nr, "DVB adapter numbers")
+
+struct dvb_frontend;
+
+/**
+ * struct dvb_adapter - represents a Digital TV adapter using Linux DVB API
+ *
+ * @num: Number of the adapter
+ * @list_head: List with the DVB adapters
+ * @device_list: List with the DVB devices
+ * @name: Name of the adapter
+ * @proposed_mac: proposed MAC address for the adapter
+ * @priv: private data
+ * @device: pointer to struct device
+ * @module: pointer to struct module
+ * @mfe_shared: mfe shared: indicates mutually exclusive frontends
+ * Thie usage of this flag is currently deprecated
+ * @mfe_dvbdev: Frontend device in use, in the case of MFE
+ * @mfe_lock: Lock to prevent using the other frontends when MFE is
+ * used.
+ * @mdev: pointer to struct media_device, used when the media
+ * controller is used.
+ * @conn: RF connector. Used only if the device has no separate
+ * tuner.
+ * @conn_pads: pointer to struct media_pad associated with @conn;
+ */
+struct dvb_adapter {
+ int num;
+ struct list_head list_head;
+ struct list_head device_list;
+ const char *name;
+ u8 proposed_mac [6];
+ void* priv;
+
+ struct device *device;
+
+ struct module *module;
+
+ int mfe_shared; /* indicates mutually exclusive frontends */
+ struct dvb_device *mfe_dvbdev; /* frontend device in use */
+ struct mutex mfe_lock; /* access lock for thread creation */
+
+#if defined(CONFIG_MEDIA_CONTROLLER_DVB)
+ struct media_device *mdev;
+ struct media_entity *conn;
+ struct media_pad *conn_pads;
+#endif
+};
+
+/**
+ * struct dvb_device - represents a DVB device node
+ *
+ * @list_head: List head with all DVB devices
+ * @fops: pointer to struct file_operations
+ * @adapter: pointer to the adapter that holds this device node
+ * @type: type of the device, as defined by &enum dvb_device_type.
+ * @minor: devnode minor number. Major number is always DVB_MAJOR.
+ * @id: device ID number, inside the adapter
+ * @readers: Initialized by the caller. Each call to open() in Read Only mode
+ * decreases this counter by one.
+ * @writers: Initialized by the caller. Each call to open() in Read/Write
+ * mode decreases this counter by one.
+ * @users: Initialized by the caller. Each call to open() in any mode
+ * decreases this counter by one.
+ * @wait_queue: wait queue, used to wait for certain events inside one of
+ * the DVB API callers
+ * @kernel_ioctl: callback function used to handle ioctl calls from userspace.
+ * @name: Name to be used for the device at the Media Controller
+ * @entity: pointer to struct media_entity associated with the device node
+ * @pads: pointer to struct media_pad associated with @entity;
+ * @priv: private data
+ * @intf_devnode: Pointer to media_intf_devnode. Used by the dvbdev core to
+ * store the MC device node interface
+ * @tsout_num_entities: Number of Transport Stream output entities
+ * @tsout_entity: array with MC entities associated to each TS output node
+ * @tsout_pads: array with the source pads for each @tsout_entity
+ *
+ * This structure is used by the DVB core (frontend, CA, net, demux) in
+ * order to create the device nodes. Usually, driver should not initialize
+ * this struct diretly.
+ */
+struct dvb_device {
+ struct list_head list_head;
+ const struct file_operations *fops;
+ struct dvb_adapter *adapter;
+ enum dvb_device_type type;
+ int minor;
+ u32 id;
+
+ /* in theory, 'users' can vanish now,
+ but I don't want to change too much now... */
+ int readers;
+ int writers;
+ int users;
+
+ wait_queue_head_t wait_queue;
+ /* don't really need those !? -- FIXME: use video_usercopy */
+ int (*kernel_ioctl)(struct file *file, unsigned int cmd, void *arg);
+
+ /* Needed for media controller register/unregister */
+#if defined(CONFIG_MEDIA_CONTROLLER_DVB)
+ const char *name;
+
+ /* Allocated and filled inside dvbdev.c */
+ struct media_intf_devnode *intf_devnode;
+
+ unsigned tsout_num_entities;
+ struct media_entity *entity, *tsout_entity;
+ struct media_pad *pads, *tsout_pads;
+#endif
+
+ void *priv;
+};
+
+/**
+ * dvb_register_adapter - Registers a new DVB adapter
+ *
+ * @adap: pointer to struct dvb_adapter
+ * @name: Adapter's name
+ * @module: initialized with THIS_MODULE at the caller
+ * @device: pointer to struct device that corresponds to the device driver
+ * @adapter_nums: Array with a list of the numbers for @dvb_register_adapter;
+ * to select among them. Typically, initialized with:
+ * DVB_DEFINE_MOD_OPT_ADAPTER_NR(adapter_nums)
+ */
+int dvb_register_adapter(struct dvb_adapter *adap, const char *name,
+ struct module *module, struct device *device,
+ short *adapter_nums);
+
+/**
+ * dvb_unregister_adapter - Unregisters a DVB adapter
+ *
+ * @adap: pointer to struct dvb_adapter
+ */
+int dvb_unregister_adapter(struct dvb_adapter *adap);
+
+/**
+ * dvb_register_device - Registers a new DVB device
+ *
+ * @adap: pointer to struct dvb_adapter
+ * @pdvbdev: pointer to the place where the new struct dvb_device will be
+ * stored
+ * @template: Template used to create &pdvbdev;
+ * @priv: private data
+ * @type: type of the device, as defined by &enum dvb_device_type.
+ * @demux_sink_pads: Number of demux outputs, to be used to create the TS
+ * outputs via the Media Controller.
+ */
+int dvb_register_device(struct dvb_adapter *adap,
+ struct dvb_device **pdvbdev,
+ const struct dvb_device *template,
+ void *priv,
+ enum dvb_device_type type,
+ int demux_sink_pads);
+
+/**
+ * dvb_remove_device - Remove a registered DVB device
+ *
+ * This does not free memory. To do that, call dvb_free_device().
+ *
+ * @dvbdev: pointer to struct dvb_device
+ */
+void dvb_remove_device(struct dvb_device *dvbdev);
+
+/**
+ * dvb_free_device - Free memory occupied by a DVB device.
+ *
+ * Call dvb_unregister_device() before calling this function.
+ *
+ * @dvbdev: pointer to struct dvb_device
+ */
+void dvb_free_device(struct dvb_device *dvbdev);
+
+/**
+ * dvb_unregister_device - Unregisters a DVB device
+ *
+ * This is a combination of dvb_remove_device() and dvb_free_device().
+ * Using this function is usually a mistake, and is often an indicator
+ * for a use-after-free bug (when a userspace process keeps a file
+ * handle to a detached device).
+ *
+ * @dvbdev: pointer to struct dvb_device
+ */
+void dvb_unregister_device(struct dvb_device *dvbdev);
+
+#ifdef CONFIG_MEDIA_CONTROLLER_DVB
+/**
+ * dvb_create_media_graph - Creates media graph for the Digital TV part of the
+ * device.
+ *
+ * @adap: pointer to &struct dvb_adapter
+ * @create_rf_connector: if true, it creates the RF connector too
+ *
+ * This function checks all DVB-related functions at the media controller
+ * entities and creates the needed links for the media graph. It is
+ * capable of working with multiple tuners or multiple frontends, but it
+ * won't create links if the device has multiple tuners and multiple frontends
+ * or if the device has multiple muxes. In such case, the caller driver should
+ * manually create the remaining links.
+ */
+__must_check int dvb_create_media_graph(struct dvb_adapter *adap,
+ bool create_rf_connector);
+
+/**
+ * dvb_register_media_controller - registers a media controller at DVB adapter
+ *
+ * @adap: pointer to &struct dvb_adapter
+ * @mdev: pointer to &struct media_device
+ */
+static inline void dvb_register_media_controller(struct dvb_adapter *adap,
+ struct media_device *mdev)
+{
+ adap->mdev = mdev;
+}
+
+/**
+ * dvb_get_media_controller - gets the associated media controller
+ *
+ * @adap: pointer to &struct dvb_adapter
+ */
+static inline struct media_device
+*dvb_get_media_controller(struct dvb_adapter *adap)
+{
+ return adap->mdev;
+}
+#else
+static inline
+int dvb_create_media_graph(struct dvb_adapter *adap,
+ bool create_rf_connector)
+{
+ return 0;
+};
+#define dvb_register_media_controller(a, b) {}
+#define dvb_get_media_controller(a) NULL
+#endif
+
+/**
+ * dvb_generic_open - Digital TV open function, used by DVB devices
+ *
+ * @inode: pointer to &struct inode.
+ * @file: pointer to &struct file.
+ *
+ * Checks if a DVB devnode is still valid, and if the permissions are
+ * OK and increment negative use count.
+ */
+int dvb_generic_open(struct inode *inode, struct file *file);
+
+/**
+ * dvb_generic_close - Digital TV close function, used by DVB devices
+ *
+ * @inode: pointer to &struct inode.
+ * @file: pointer to &struct file.
+ *
+ * Checks if a DVB devnode is still valid, and if the permissions are
+ * OK and decrement negative use count.
+ */
+int dvb_generic_release(struct inode *inode, struct file *file);
+
+/**
+ * dvb_generic_ioctl - Digital TV close function, used by DVB devices
+ *
+ * @file: pointer to &struct file.
+ * @cmd: Ioctl name.
+ * @arg: Ioctl argument.
+ *
+ * Checks if a DVB devnode and struct dvbdev.kernel_ioctl is still valid.
+ * If so, calls dvb_usercopy().
+ */
+long dvb_generic_ioctl(struct file *file,
+ unsigned int cmd, unsigned long arg);
+
+/**
+ * dvb_usercopy - copies data from/to userspace memory when an ioctl is
+ * issued.
+ *
+ * @file: Pointer to struct &file.
+ * @cmd: Ioctl name.
+ * @arg: Ioctl argument.
+ * @func: function that will actually handle the ioctl
+ *
+ * Ancillary function that uses ioctl direction and size to copy from
+ * userspace. Then, it calls @func, and, if needed, data is copied back
+ * to userspace.
+ */
+int dvb_usercopy(struct file *file, unsigned int cmd, unsigned long arg,
+ int (*func)(struct file *file, unsigned int cmd, void *arg));
+
+#if IS_ENABLED(CONFIG_I2C)
+
+struct i2c_adapter;
+struct i2c_client;
+/**
+ * dvb_module_probe - helper routine to probe an I2C module
+ *
+ * @module_name:
+ * Name of the I2C module to be probed
+ * @name:
+ * Optional name for the I2C module. Used for debug purposes.
+ * If %NULL, defaults to @module_name.
+ * @adap:
+ * pointer to &struct i2c_adapter that describes the I2C adapter where
+ * the module will be bound.
+ * @addr:
+ * I2C address of the adapter, in 7-bit notation.
+ * @platform_data:
+ * Platform data to be passed to the I2C module probed.
+ *
+ * This function binds an I2C device into the DVB core. Should be used by
+ * all drivers that use I2C bus to control the hardware. A module bound
+ * with dvb_module_probe() should use dvb_module_release() to unbind.
+ *
+ * Return:
+ * On success, return an &struct i2c_client, pointing the the bound
+ * I2C device. %NULL otherwise.
+ *
+ * .. note::
+ *
+ * In the past, DVB modules (mainly, frontends) were bound via dvb_attach()
+ * macro, with does an ugly hack, using I2C low level functions. Such
+ * usage is deprecated and will be removed soon. Instead, use this routine.
+ */
+struct i2c_client *dvb_module_probe(const char *module_name,
+ const char *name,
+ struct i2c_adapter *adap,
+ unsigned char addr,
+ void *platform_data);
+
+/**
+ * dvb_module_release - releases an I2C device allocated with
+ * dvb_module_probe().
+ *
+ * @client: pointer to &struct i2c_client with the I2C client to be released.
+ * can be %NULL.
+ *
+ * This function should be used to free all resources reserved by
+ * dvb_module_probe() and unbinding the I2C hardware.
+ */
+void dvb_module_release(struct i2c_client *client);
+
+#endif /* CONFIG_I2C */
+
+/* Legacy generic DVB attach function. */
+#ifdef CONFIG_MEDIA_ATTACH
+
+/**
+ * dvb_attach - attaches a DVB frontend into the DVB core.
+ *
+ * @FUNCTION: function on a frontend module to be called.
+ * @ARGS...: @FUNCTION arguments.
+ *
+ * This ancillary function loads a frontend module in runtime and runs
+ * the @FUNCTION function there, with @ARGS.
+ * As it increments symbol usage cont, at unregister, dvb_detach()
+ * should be called.
+ *
+ * .. note::
+ *
+ * In the past, DVB modules (mainly, frontends) were bound via dvb_attach()
+ * macro, with does an ugly hack, using I2C low level functions. Such
+ * usage is deprecated and will be removed soon. Instead, you should use
+ * dvb_module_probe().
+ */
+#define dvb_attach(FUNCTION, ARGS...) ({ \
+ void *__r = NULL; \
+ typeof(&FUNCTION) __a = symbol_request(FUNCTION); \
+ if (__a) { \
+ __r = (void *) __a(ARGS); \
+ if (__r == NULL) \
+ symbol_put(FUNCTION); \
+ } else { \
+ printk(KERN_ERR "DVB: Unable to find symbol "#FUNCTION"()\n"); \
+ } \
+ __r; \
+})
+
+/**
+ * dvb_detach - detaches a DVB frontend loaded via dvb_attach()
+ *
+ * @FUNC: attach function
+ *
+ * Decrements usage count for a function previously called via dvb_attach().
+ */
+
+#define dvb_detach(FUNC) symbol_put_addr(FUNC)
+
+#else
+#define dvb_attach(FUNCTION, ARGS...) ({ \
+ FUNCTION(ARGS); \
+})
+
+#define dvb_detach(FUNC) {}
+
+#endif /* CONFIG_MEDIA_ATTACH */
+
+#endif /* #ifndef _DVBDEV_H_ */
diff --git a/include/media/i2c-addr.h b/include/media/i2c-addr.h
deleted file mode 100644
index 1b6872f5e970..000000000000
--- a/include/media/i2c-addr.h
+++ /dev/null
@@ -1,43 +0,0 @@
-/* SPDX-License-Identifier: GPL-2.0 */
-/*
- * V4L I2C address list
- *
- *
- * Copyright (C) 2006 Mauro Carvalho Chehab <mchehab@infradead.org>
- * Based on a previous mapping by
- * Ralph Metzler (rjkm@thp.uni-koeln.de)
- * Gerd Knorr <kraxel@goldbach.in-berlin.de>
- *
- */
-
-/* bttv address list */
-#define I2C_ADDR_TSA5522 0xc2
-#define I2C_ADDR_TDA7432 0x8a
-#define I2C_ADDR_TDA8425 0x82
-#define I2C_ADDR_TDA9840 0x84
-#define I2C_ADDR_TDA9850 0xb6 /* also used by 9855,9873 */
-#define I2C_ADDR_TDA9874 0xb0 /* also used by 9875 */
-#define I2C_ADDR_TDA9875 0xb0
-#define I2C_ADDR_HAUPEE 0xa0
-#define I2C_ADDR_STBEE 0xae
-#define I2C_ADDR_VHX 0xc0
-#define I2C_ADDR_MSP3400 0x80
-#define I2C_ADDR_MSP3400_ALT 0x88
-#define I2C_ADDR_TEA6300 0x80 /* also used by 6320 */
-#define I2C_ADDR_DPL3518 0x84
-#define I2C_ADDR_TDA9887 0x86
-
-/*
- * i2c bus addresses for the chips supported by tvaudio.c
- */
-
-#define I2C_ADDR_TDA8425 0x82
-#define I2C_ADDR_TDA9840 0x84 /* also used by TA8874Z */
-#define I2C_ADDR_TDA985x_L 0xb4 /* also used by 9873 */
-#define I2C_ADDR_TDA985x_H 0xb6
-#define I2C_ADDR_TDA9874 0xb0 /* also used by 9875 */
-
-#define I2C_ADDR_TEA6300 0x80 /* also used by 6320 */
-#define I2C_ADDR_TEA6420 0x98
-
-#define I2C_ADDR_PIC16C54 0x96 /* PV951 */
diff --git a/include/media/i2c/ad9389b.h b/include/media/i2c/ad9389b.h
index 5ba9af869b8b..30f9ea9a1273 100644
--- a/include/media/i2c/ad9389b.h
+++ b/include/media/i2c/ad9389b.h
@@ -1,20 +1,8 @@
+/* SPDX-License-Identifier: GPL-2.0-only */
/*
* Analog Devices AD9389B/AD9889B video encoder driver header
*
* Copyright 2012 Cisco Systems, Inc. and/or its affiliates. All rights reserved.
- *
- * This program is free software; you may redistribute it and/or modify
- * it under the terms of the GNU General Public License as published by
- * the Free Software Foundation; version 2 of the License.
- *
- * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND,
- * EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
- * MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND
- * NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS
- * BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN
- * ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN
- * CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
- * SOFTWARE.
*/
#ifndef AD9389B_H
diff --git a/include/media/i2c/adv7511.h b/include/media/i2c/adv7511.h
index 61c3d711cc69..1874c05f486f 100644
--- a/include/media/i2c/adv7511.h
+++ b/include/media/i2c/adv7511.h
@@ -1,20 +1,8 @@
+/* SPDX-License-Identifier: GPL-2.0-only */
/*
* Analog Devices ADV7511 HDMI Transmitter Device Driver
*
* Copyright 2013 Cisco Systems, Inc. and/or its affiliates. All rights reserved.
- *
- * This program is free software; you may redistribute it and/or modify
- * it under the terms of the GNU General Public License as published by
- * the Free Software Foundation; version 2 of the License.
- *
- * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND,
- * EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
- * MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND
- * NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS
- * BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN
- * ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN
- * CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
- * SOFTWARE.
*/
#ifndef ADV7511_H
diff --git a/include/media/i2c/adv7604.h b/include/media/i2c/adv7604.h
index 2e6857dee0cc..77a9799128b6 100644
--- a/include/media/i2c/adv7604.h
+++ b/include/media/i2c/adv7604.h
@@ -1,21 +1,8 @@
+/* SPDX-License-Identifier: GPL-2.0-only */
/*
* adv7604 - Analog Devices ADV7604 video decoder driver
*
* Copyright 2012 Cisco Systems, Inc. and/or its affiliates. All rights reserved.
- *
- * This program is free software; you may redistribute it and/or modify
- * it under the terms of the GNU General Public License as published by
- * the Free Software Foundation; version 2 of the License.
- *
- * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND,
- * EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
- * MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND
- * NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS
- * BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN
- * ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN
- * CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
- * SOFTWARE.
- *
*/
#ifndef _ADV7604_
diff --git a/include/media/i2c/adv7842.h b/include/media/i2c/adv7842.h
index 7f53ada9bdf1..05e01f0dd3c2 100644
--- a/include/media/i2c/adv7842.h
+++ b/include/media/i2c/adv7842.h
@@ -1,21 +1,8 @@
+/* SPDX-License-Identifier: GPL-2.0-only */
/*
* adv7842 - Analog Devices ADV7842 video decoder driver
*
* Copyright 2013 Cisco Systems, Inc. and/or its affiliates. All rights reserved.
- *
- * This program is free software; you may redistribute it and/or modify
- * it under the terms of the GNU General Public License as published by
- * the Free Software Foundation; version 2 of the License.
- *
- * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND,
- * EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
- * MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND
- * NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS
- * BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN
- * ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN
- * CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
- * SOFTWARE.
- *
*/
#ifndef _ADV7842_
diff --git a/include/media/i2c/as3645a.h b/include/media/i2c/as3645a.h
deleted file mode 100644
index fffd4b563f5a..000000000000
--- a/include/media/i2c/as3645a.h
+++ /dev/null
@@ -1,66 +0,0 @@
-/*
- * include/media/i2c/as3645a.h
- *
- * Copyright (C) 2008-2011 Nokia Corporation
- *
- * Contact: Laurent Pinchart <laurent.pinchart@ideasonboard.com>
- *
- * This program is free software; you can redistribute it and/or
- * modify it under the terms of the GNU General Public License
- * version 2 as published by the Free Software Foundation.
- *
- * This program is distributed in the hope that it will be useful, but
- * WITHOUT ANY WARRANTY; without even the implied warranty of
- * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
- * General Public License for more details.
- *
- */
-
-#ifndef __AS3645A_H__
-#define __AS3645A_H__
-
-#include <media/v4l2-subdev.h>
-
-#define AS3645A_NAME "as3645a"
-#define AS3645A_I2C_ADDR (0x60 >> 1) /* W:0x60, R:0x61 */
-
-#define AS3645A_FLASH_TIMEOUT_MIN 100000 /* us */
-#define AS3645A_FLASH_TIMEOUT_MAX 850000
-#define AS3645A_FLASH_TIMEOUT_STEP 50000
-
-#define AS3645A_FLASH_INTENSITY_MIN 200 /* mA */
-#define AS3645A_FLASH_INTENSITY_MAX_1LED 500
-#define AS3645A_FLASH_INTENSITY_MAX_2LEDS 400
-#define AS3645A_FLASH_INTENSITY_STEP 20
-
-#define AS3645A_TORCH_INTENSITY_MIN 20 /* mA */
-#define AS3645A_TORCH_INTENSITY_MAX 160
-#define AS3645A_TORCH_INTENSITY_STEP 20
-
-#define AS3645A_INDICATOR_INTENSITY_MIN 0 /* uA */
-#define AS3645A_INDICATOR_INTENSITY_MAX 10000
-#define AS3645A_INDICATOR_INTENSITY_STEP 2500
-
-/*
- * as3645a_platform_data - Flash controller platform data
- * @set_power: Set power callback
- * @vref: VREF offset (0=0V, 1=+0.3V, 2=-0.3V, 3=+0.6V)
- * @peak: Inductor peak current limit (0=1.25A, 1=1.5A, 2=1.75A, 3=2.0A)
- * @ext_strobe: True if external flash strobe can be used
- * @flash_max_current: Max flash current (mA, <= AS3645A_FLASH_INTENSITY_MAX)
- * @torch_max_current: Max torch current (mA, >= AS3645A_TORCH_INTENSITY_MAX)
- * @timeout_max: Max flash timeout (us, <= AS3645A_FLASH_TIMEOUT_MAX)
- */
-struct as3645a_platform_data {
- int (*set_power)(struct v4l2_subdev *subdev, int on);
- unsigned int vref;
- unsigned int peak;
- bool ext_strobe;
-
- /* Flash and torch currents and timeout limits */
- unsigned int flash_max_current;
- unsigned int torch_max_current;
- unsigned int timeout_max;
-};
-
-#endif /* __AS3645A_H__ */
diff --git a/include/media/i2c/bt819.h b/include/media/i2c/bt819.h
index 8025f4bc2bb6..1bcf0dbeb516 100644
--- a/include/media/i2c/bt819.h
+++ b/include/media/i2c/bt819.h
@@ -30,7 +30,7 @@
Note: these ioctls that internal to the kernel and are never called
from userspace. */
-#define BT819_FIFO_RESET_LOW _IO('b', 0)
-#define BT819_FIFO_RESET_HIGH _IO('b', 1)
+#define BT819_FIFO_RESET_LOW _IO('b', 0)
+#define BT819_FIFO_RESET_HIGH _IO('b', 1)
#endif
diff --git a/include/media/i2c/ir-kbd-i2c.h b/include/media/i2c/ir-kbd-i2c.h
index 76491c62c254..9f47d6a48cff 100644
--- a/include/media/i2c/ir-kbd-i2c.h
+++ b/include/media/i2c/ir-kbd-i2c.h
@@ -19,11 +19,15 @@ struct IR_i2c {
u32 polling_interval; /* in ms */
struct delayed_work work;
- char name[32];
char phys[32];
int (*get_key)(struct IR_i2c *ir,
enum rc_proto *protocol,
u32 *scancode, u8 *toggle);
+ /* tx */
+ struct i2c_client *tx_c;
+ struct mutex lock; /* do not poll Rx during Tx */
+ unsigned int carrier;
+ unsigned int duty_cycle;
};
enum ir_kbd_get_key_fn {
diff --git a/include/media/i2c/m52790.h b/include/media/i2c/m52790.h
index 7ddffae31a67..8d9db3cf6fab 100644
--- a/include/media/i2c/m52790.h
+++ b/include/media/i2c/m52790.h
@@ -23,57 +23,57 @@
/* Input routing switch 1 */
-#define M52790_SW1_IN_MASK 0x0003
-#define M52790_SW1_IN_TUNER 0x0000
-#define M52790_SW1_IN_V2 0x0001
-#define M52790_SW1_IN_V3 0x0002
-#define M52790_SW1_IN_V4 0x0003
+#define M52790_SW1_IN_MASK 0x0003
+#define M52790_SW1_IN_TUNER 0x0000
+#define M52790_SW1_IN_V2 0x0001
+#define M52790_SW1_IN_V3 0x0002
+#define M52790_SW1_IN_V4 0x0003
/* Selects component input instead of composite */
-#define M52790_SW1_YCMIX 0x0004
+#define M52790_SW1_YCMIX 0x0004
/* Input routing switch 2 */
-#define M52790_SW2_IN_MASK 0x0300
-#define M52790_SW2_IN_TUNER 0x0000
-#define M52790_SW2_IN_V2 0x0100
-#define M52790_SW2_IN_V3 0x0200
-#define M52790_SW2_IN_V4 0x0300
+#define M52790_SW2_IN_MASK 0x0300
+#define M52790_SW2_IN_TUNER 0x0000
+#define M52790_SW2_IN_V2 0x0100
+#define M52790_SW2_IN_V3 0x0200
+#define M52790_SW2_IN_V4 0x0300
/* Selects component input instead of composite */
-#define M52790_SW2_YCMIX 0x0400
+#define M52790_SW2_YCMIX 0x0400
/* Output routing switch 1 */
/* Enable 6dB amplifier for composite out */
-#define M52790_SW1_V_AMP 0x0008
+#define M52790_SW1_V_AMP 0x0008
/* Enable 6dB amplifier for component out */
-#define M52790_SW1_YC_AMP 0x0010
+#define M52790_SW1_YC_AMP 0x0010
/* Audio output mode */
-#define M52790_SW1_AUDIO_MASK 0x00c0
-#define M52790_SW1_AUDIO_MUTE 0x0000
-#define M52790_SW1_AUDIO_R 0x0040
-#define M52790_SW1_AUDIO_L 0x0080
+#define M52790_SW1_AUDIO_MASK 0x00c0
+#define M52790_SW1_AUDIO_MUTE 0x0000
+#define M52790_SW1_AUDIO_R 0x0040
+#define M52790_SW1_AUDIO_L 0x0080
#define M52790_SW1_AUDIO_STEREO 0x00c0
/* Output routing switch 2 */
/* Enable 6dB amplifier for composite out */
-#define M52790_SW2_V_AMP 0x0800
+#define M52790_SW2_V_AMP 0x0800
/* Enable 6dB amplifier for component out */
-#define M52790_SW2_YC_AMP 0x1000
+#define M52790_SW2_YC_AMP 0x1000
/* Audio output mode */
-#define M52790_SW2_AUDIO_MASK 0xc000
-#define M52790_SW2_AUDIO_MUTE 0x0000
-#define M52790_SW2_AUDIO_R 0x4000
-#define M52790_SW2_AUDIO_L 0x8000
+#define M52790_SW2_AUDIO_MASK 0xc000
+#define M52790_SW2_AUDIO_MUTE 0x0000
+#define M52790_SW2_AUDIO_R 0x4000
+#define M52790_SW2_AUDIO_L 0x8000
#define M52790_SW2_AUDIO_STEREO 0xc000
@@ -83,9 +83,9 @@
#define M52790_IN_V3 (M52790_SW1_IN_V3 | M52790_SW2_IN_V3)
#define M52790_IN_V4 (M52790_SW1_IN_V4 | M52790_SW2_IN_V4)
-#define M52790_OUT_STEREO (M52790_SW1_AUDIO_STEREO | \
+#define M52790_OUT_STEREO (M52790_SW1_AUDIO_STEREO | \
M52790_SW2_AUDIO_STEREO)
-#define M52790_OUT_AMP_STEREO (M52790_SW1_AUDIO_STEREO | \
+#define M52790_OUT_AMP_STEREO (M52790_SW1_AUDIO_STEREO | \
M52790_SW1_V_AMP | \
M52790_SW2_AUDIO_STEREO | \
M52790_SW2_V_AMP)
diff --git a/include/media/i2c/mt9t112.h b/include/media/i2c/mt9t112.h
index a43c74ab05ec..cc80d5cc2104 100644
--- a/include/media/i2c/mt9t112.h
+++ b/include/media/i2c/mt9t112.h
@@ -1,28 +1,25 @@
+/* SPDX-License-Identifier: GPL-2.0 */
/* mt9t112 Camera
*
* Copyright (C) 2009 Renesas Solutions Corp.
* Kuninori Morimoto <morimoto.kuninori@renesas.com>
- *
- * This program is free software; you can redistribute it and/or modify
- * it under the terms of the GNU General Public License version 2 as
- * published by the Free Software Foundation.
*/
#ifndef __MT9T112_H__
#define __MT9T112_H__
-#define MT9T112_FLAG_PCLK_RISING_EDGE (1 << 0)
-#define MT9T112_FLAG_DATAWIDTH_8 (1 << 1) /* default width is 10 */
-
struct mt9t112_pll_divider {
u8 m, n;
u8 p1, p2, p3, p4, p5, p6, p7;
};
-/*
- * mt9t112 camera info
+/**
+ * mt9t112_platform_data - mt9t112 driver interface
+ * @flags: Sensor media bus configuration.
+ * @divider: Sensor PLL configuration
*/
-struct mt9t112_camera_info {
+struct mt9t112_platform_data {
+#define MT9T112_FLAG_PCLK_RISING_EDGE BIT(0)
u32 flags;
struct mt9t112_pll_divider divider;
};
diff --git a/include/media/i2c/ov772x.h b/include/media/i2c/ov772x.h
index 00dbb7c4feae..27d087baffc5 100644
--- a/include/media/i2c/ov772x.h
+++ b/include/media/i2c/ov772x.h
@@ -48,8 +48,10 @@ struct ov772x_edge_ctrl {
.threshold = (t & OV772X_EDGE_THRESHOLD_MASK), \
}
-/*
- * ov772x camera info
+/**
+ * ov772x_camera_info - ov772x driver interface structure
+ * @flags: Sensor configuration flags
+ * @edgectrl: Sensor edge control
*/
struct ov772x_camera_info {
unsigned long flags;
diff --git a/include/media/i2c/saa6588.h b/include/media/i2c/saa6588.h
index b5ec1aa60ed5..a0825f532f71 100644
--- a/include/media/i2c/saa6588.h
+++ b/include/media/i2c/saa6588.h
@@ -32,6 +32,7 @@ struct saa6588_command {
unsigned char __user *buffer;
struct file *instance;
poll_table *event_list;
+ __poll_t poll_mask;
};
/* These ioctls are internal to the kernel */
diff --git a/include/media/i2c/saa7115.h b/include/media/i2c/saa7115.h
index 53954c90e7f6..a0cda423509d 100644
--- a/include/media/i2c/saa7115.h
+++ b/include/media/i2c/saa7115.h
@@ -36,15 +36,15 @@
#define SAA7115_SVIDEO3 9
/* outputs */
-#define SAA7115_IPORT_ON 1
-#define SAA7115_IPORT_OFF 0
+#define SAA7115_IPORT_ON 1
+#define SAA7115_IPORT_OFF 0
/* SAA7111 specific outputs. */
-#define SAA7111_VBI_BYPASS 2
+#define SAA7111_VBI_BYPASS 2
#define SAA7111_FMT_YUV422 0x00
-#define SAA7111_FMT_RGB 0x40
-#define SAA7111_FMT_CCIR 0x80
-#define SAA7111_FMT_YUV411 0xc0
+#define SAA7111_FMT_RGB 0x40
+#define SAA7111_FMT_CCIR 0x80
+#define SAA7111_FMT_YUV411 0xc0
/* config flags */
/*
diff --git a/include/media/i2c/tc358743.h b/include/media/i2c/tc358743.h
index 4513f2f9cfbc..b343650c2948 100644
--- a/include/media/i2c/tc358743.h
+++ b/include/media/i2c/tc358743.h
@@ -1,22 +1,8 @@
+/* SPDX-License-Identifier: GPL-2.0-only */
/*
* tc358743 - Toshiba HDMI to CSI-2 bridge
*
- * Copyright 2015 Cisco Systems, Inc. and/or its affiliates. All rights
- * reserved.
- *
- * This program is free software; you may redistribute it and/or modify
- * it under the terms of the GNU General Public License as published by
- * the Free Software Foundation; version 2 of the License.
- *
- * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND,
- * EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
- * MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND
- * NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS
- * BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN
- * ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN
- * CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
- * SOFTWARE.
- *
+ * Copyright 2015 Cisco Systems, Inc. and/or its affiliates. All rights reserved.
*/
/*
diff --git a/include/media/i2c/tda1997x.h b/include/media/i2c/tda1997x.h
new file mode 100644
index 000000000000..c6c2a8ae413d
--- /dev/null
+++ b/include/media/i2c/tda1997x.h
@@ -0,0 +1,42 @@
+/* SPDX-License-Identifier: GPL-2.0 */
+/*
+ * tda1997x - NXP HDMI receiver
+ *
+ * Copyright 2017 Tim Harvey <tharvey@gateworks.com>
+ *
+ */
+
+#ifndef _TDA1997X_
+#define _TDA1997X_
+
+/* Platform Data */
+struct tda1997x_platform_data {
+ enum v4l2_mbus_type vidout_bus_type;
+ u32 vidout_bus_width;
+ u8 vidout_port_cfg[9];
+ /* pin polarity (1=invert) */
+ bool vidout_inv_de;
+ bool vidout_inv_hs;
+ bool vidout_inv_vs;
+ bool vidout_inv_pclk;
+ /* clock delays (0=-8, 1=-7 ... 15=+7 pixels) */
+ u8 vidout_delay_hs;
+ u8 vidout_delay_vs;
+ u8 vidout_delay_de;
+ u8 vidout_delay_pclk;
+ /* sync selections (controls how sync pins are derived) */
+ u8 vidout_sel_hs;
+ u8 vidout_sel_vs;
+ u8 vidout_sel_de;
+
+ /* Audio Port Output */
+ int audout_format;
+ u32 audout_mclk_fs; /* clock multiplier */
+ u32 audout_width; /* 13 or 32 bit */
+ u32 audout_layout; /* layout0=AP0 layout1=AP0,AP1,AP2,AP3 */
+ bool audout_layoutauto; /* audio layout dictated by pkt header */
+ bool audout_invert_clk; /* data valid on rising edge of BCLK */
+ bool audio_auto_mute; /* enable hardware audio auto-mute */
+};
+
+#endif
diff --git a/include/media/i2c/ths7303.h b/include/media/i2c/ths7303.h
index 834e2f95b630..95492d12786d 100644
--- a/include/media/i2c/ths7303.h
+++ b/include/media/i2c/ths7303.h
@@ -1,3 +1,4 @@
+/* SPDX-License-Identifier: GPL-2.0-only */
/*
* Copyright (C) 2013 Texas Instruments Inc
*
@@ -7,15 +8,6 @@
* Hans Verkuil <hans.verkuil@cisco.com>
* Lad, Prabhakar <prabhakar.lad@ti.com>
* Martin Bugge <marbugge@cisco.com>
- *
- * This program is free software; you can redistribute it and/or
- * modify it under the terms of the GNU General Public License as
- * published by the Free Software Foundation version 2.
- *
- * This program is distributed in the hope that it will be useful,
- * but WITHOUT ANY WARRANTY; without even the implied warranty of
- * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
- * GNU General Public License for more details.
*/
#ifndef THS7353_H
diff --git a/include/media/i2c/tvaudio.h b/include/media/i2c/tvaudio.h
index 1ac8184693f8..f13e1a386364 100644
--- a/include/media/i2c/tvaudio.h
+++ b/include/media/i2c/tvaudio.h
@@ -21,7 +21,22 @@
#ifndef _TVAUDIO_H
#define _TVAUDIO_H
-#include <media/i2c-addr.h>
+/*
+ * i2c bus addresses for the chips supported by tvaudio.c
+ */
+
+#define I2C_ADDR_TDA8425 0x82
+#define I2C_ADDR_TDA9840 0x84
+#define I2C_ADDR_TDA9874 0xb0 /* also used by 9875 */
+#define I2C_ADDR_TDA9875 0xb0
+#define I2C_ADDR_TDA8425 0x82
+#define I2C_ADDR_TDA9840 0x84 /* also used by TA8874Z */
+#define I2C_ADDR_TDA985x_L 0xb4 /* also used by 9873 */
+#define I2C_ADDR_TDA985x_H 0xb6
+#define I2C_ADDR_TDA9874 0xb0 /* also used by 9875 */
+#define I2C_ADDR_TEA6300 0x80 /* also used by 6320 */
+#define I2C_ADDR_TEA6420 0x98
+#define I2C_ADDR_PIC16C54 0x96 /* PV951 */
/* The tvaudio module accepts the following inputs: */
#define TVAUDIO_INPUT_TUNER 0
diff --git a/include/media/i2c/tw9910.h b/include/media/i2c/tw9910.h
index 90bcf1fa5421..bec8f7bce745 100644
--- a/include/media/i2c/tw9910.h
+++ b/include/media/i2c/tw9910.h
@@ -18,6 +18,9 @@
#include <media/soc_camera.h>
+/**
+ * tw9910_mpout_pin - MPOUT (multi-purpose output) pin functions
+ */
enum tw9910_mpout_pin {
TW9910_MPO_VLOSS,
TW9910_MPO_HLOCK,
@@ -29,6 +32,12 @@ enum tw9910_mpout_pin {
TW9910_MPO_RTCO,
};
+/**
+ * tw9910_video_info - tw9910 driver interface structure
+ * @buswidth: Parallel data bus width (8 or 16).
+ * @mpout: Selected function of MPOUT (multi-purpose output) pin.
+ * See &enum tw9910_mpout_pin
+ */
struct tw9910_video_info {
unsigned long buswidth;
enum tw9910_mpout_pin mpout;
diff --git a/include/media/i2c/uda1342.h b/include/media/i2c/uda1342.h
index cd156403a368..cb412d4c1088 100644
--- a/include/media/i2c/uda1342.h
+++ b/include/media/i2c/uda1342.h
@@ -1,21 +1,8 @@
+/* SPDX-License-Identifier: GPL-2.0-only */
/*
* uda1342.h - definition for uda1342 inputs
*
* Copyright 2013 Cisco Systems, Inc. and/or its affiliates. All rights reserved.
- *
- * This program is free software; you may redistribute it and/or modify
- * it under the terms of the GNU General Public License as published by
- * the Free Software Foundation; version 2 of the License.
- *
- * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND,
- * EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
- * MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND
- * NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS
- * BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN
- * ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN
- * CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
- * SOFTWARE.
- *
*/
#ifndef _UDA1342_H_
diff --git a/include/media/i2c/upd64031a.h b/include/media/i2c/upd64031a.h
index 48ec03c4ef23..1eba24dfee48 100644
--- a/include/media/i2c/upd64031a.h
+++ b/include/media/i2c/upd64031a.h
@@ -18,9 +18,9 @@
#define _UPD64031A_H_
/* Ghost reduction modes */
-#define UPD64031A_GR_ON 0
-#define UPD64031A_GR_OFF 1
-#define UPD64031A_GR_THROUGH 3
+#define UPD64031A_GR_ON 0
+#define UPD64031A_GR_OFF 1
+#define UPD64031A_GR_THROUGH 3
/* Direct 3D/YCS Connection */
#define UPD64031A_3DYCS_DISABLE (0 << 2)
diff --git a/include/media/lirc.h b/include/media/lirc.h
deleted file mode 100644
index 554988c860c1..000000000000
--- a/include/media/lirc.h
+++ /dev/null
@@ -1 +0,0 @@
-#include <uapi/linux/lirc.h>
diff --git a/include/media/lirc_dev.h b/include/media/lirc_dev.h
deleted file mode 100644
index 857da67bd931..000000000000
--- a/include/media/lirc_dev.h
+++ /dev/null
@@ -1,192 +0,0 @@
-/*
- * LIRC base driver
- *
- * by Artur Lipowski <alipowski@interia.pl>
- * This code is licensed under GNU GPL
- *
- */
-
-#ifndef _LINUX_LIRC_DEV_H
-#define _LINUX_LIRC_DEV_H
-
-#define BUFLEN 16
-
-#include <linux/slab.h>
-#include <linux/fs.h>
-#include <linux/ioctl.h>
-#include <linux/poll.h>
-#include <linux/kfifo.h>
-#include <media/lirc.h>
-#include <linux/device.h>
-#include <linux/cdev.h>
-
-struct lirc_buffer {
- wait_queue_head_t wait_poll;
- spinlock_t fifo_lock;
- unsigned int chunk_size;
- unsigned int size; /* in chunks */
- /* Using chunks instead of bytes pretends to simplify boundary checking
- * And should allow for some performance fine tunning later */
- struct kfifo fifo;
-};
-
-static inline void lirc_buffer_clear(struct lirc_buffer *buf)
-{
- unsigned long flags;
-
- if (kfifo_initialized(&buf->fifo)) {
- spin_lock_irqsave(&buf->fifo_lock, flags);
- kfifo_reset(&buf->fifo);
- spin_unlock_irqrestore(&buf->fifo_lock, flags);
- } else
- WARN(1, "calling %s on an uninitialized lirc_buffer\n",
- __func__);
-}
-
-static inline int lirc_buffer_init(struct lirc_buffer *buf,
- unsigned int chunk_size,
- unsigned int size)
-{
- int ret;
-
- init_waitqueue_head(&buf->wait_poll);
- spin_lock_init(&buf->fifo_lock);
- buf->chunk_size = chunk_size;
- buf->size = size;
- ret = kfifo_alloc(&buf->fifo, size * chunk_size, GFP_KERNEL);
-
- return ret;
-}
-
-static inline void lirc_buffer_free(struct lirc_buffer *buf)
-{
- if (kfifo_initialized(&buf->fifo)) {
- kfifo_free(&buf->fifo);
- } else
- WARN(1, "calling %s on an uninitialized lirc_buffer\n",
- __func__);
-}
-
-static inline int lirc_buffer_len(struct lirc_buffer *buf)
-{
- int len;
- unsigned long flags;
-
- spin_lock_irqsave(&buf->fifo_lock, flags);
- len = kfifo_len(&buf->fifo);
- spin_unlock_irqrestore(&buf->fifo_lock, flags);
-
- return len;
-}
-
-static inline int lirc_buffer_full(struct lirc_buffer *buf)
-{
- return lirc_buffer_len(buf) == buf->size * buf->chunk_size;
-}
-
-static inline int lirc_buffer_empty(struct lirc_buffer *buf)
-{
- return !lirc_buffer_len(buf);
-}
-
-static inline unsigned int lirc_buffer_read(struct lirc_buffer *buf,
- unsigned char *dest)
-{
- unsigned int ret = 0;
-
- if (lirc_buffer_len(buf) >= buf->chunk_size)
- ret = kfifo_out_locked(&buf->fifo, dest, buf->chunk_size,
- &buf->fifo_lock);
- return ret;
-
-}
-
-static inline unsigned int lirc_buffer_write(struct lirc_buffer *buf,
- unsigned char *orig)
-{
- unsigned int ret;
-
- ret = kfifo_in_locked(&buf->fifo, orig, buf->chunk_size,
- &buf->fifo_lock);
-
- return ret;
-}
-
-/**
- * struct lirc_dev - represents a LIRC device
- *
- * @name: used for logging
- * @minor: the minor device (/dev/lircX) number for the device
- * @code_length: length of a remote control key code expressed in bits
- * @features: lirc compatible hardware features, like LIRC_MODE_RAW,
- * LIRC_CAN\_\*, as defined at include/media/lirc.h.
- * @buffer_size: Number of FIFO buffers with @chunk_size size.
- * Only used if @rbuf is NULL.
- * @chunk_size: Size of each FIFO buffer.
- * Only used if @rbuf is NULL.
- * @data: private per-driver data
- * @buf: if %NULL, lirc_dev will allocate and manage the buffer,
- * otherwise allocated by the caller which will
- * have to write to the buffer by other means, like irq's
- * (see also lirc_serial.c).
- * @buf_internal: whether lirc_dev has allocated the read buffer or not
- * @rdev: &struct rc_dev associated with the device
- * @fops: &struct file_operations for the device
- * @owner: the module owning this struct
- * @attached: if the device is still live
- * @open: open count for the device's chardev
- * @mutex: serialises file_operations calls
- * @dev: &struct device assigned to the device
- * @cdev: &struct cdev assigned to the device
- */
-struct lirc_dev {
- char name[40];
- unsigned int minor;
- __u32 code_length;
- __u32 features;
-
- unsigned int buffer_size; /* in chunks holding one code each */
- unsigned int chunk_size;
- struct lirc_buffer *buf;
- bool buf_internal;
-
- void *data;
- struct rc_dev *rdev;
- const struct file_operations *fops;
- struct module *owner;
-
- bool attached;
- int open;
-
- struct mutex mutex; /* protect from simultaneous accesses */
-
- struct device dev;
- struct cdev cdev;
-};
-
-struct lirc_dev *lirc_allocate_device(void);
-
-void lirc_free_device(struct lirc_dev *d);
-
-int lirc_register_device(struct lirc_dev *d);
-
-void lirc_unregister_device(struct lirc_dev *d);
-
-/* Must be called in the open fop before lirc_get_pdata() can be used */
-void lirc_init_pdata(struct inode *inode, struct file *file);
-
-/* Returns the private data stored in the lirc_dev
- * associated with the given device file pointer.
- */
-void *lirc_get_pdata(struct file *file);
-
-/* default file operations
- * used by drivers if they override only some operations
- */
-int lirc_dev_fop_open(struct inode *inode, struct file *file);
-int lirc_dev_fop_close(struct inode *inode, struct file *file);
-unsigned int lirc_dev_fop_poll(struct file *file, poll_table *wait);
-long lirc_dev_fop_ioctl(struct file *file, unsigned int cmd, unsigned long arg);
-ssize_t lirc_dev_fop_read(struct file *file, char __user *buffer, size_t length,
- loff_t *ppos);
-#endif
diff --git a/include/media/media-devnode.h b/include/media/media-devnode.h
index 511615d3bf6f..dc2f64e1b08f 100644
--- a/include/media/media-devnode.h
+++ b/include/media/media-devnode.h
@@ -56,7 +56,7 @@ struct media_file_operations {
struct module *owner;
ssize_t (*read) (struct file *, char __user *, size_t, loff_t *);
ssize_t (*write) (struct file *, const char __user *, size_t, loff_t *);
- unsigned int (*poll) (struct file *, struct poll_table_struct *);
+ __poll_t (*poll) (struct file *, struct poll_table_struct *);
long (*ioctl) (struct file *, unsigned int, unsigned long);
long (*compat_ioctl) (struct file *, unsigned int, unsigned long);
int (*open) (struct file *);
diff --git a/include/media/media-entity.h b/include/media/media-entity.h
index 222d379960b7..a732af1dbba0 100644
--- a/include/media/media-entity.h
+++ b/include/media/media-entity.h
@@ -88,6 +88,8 @@ struct media_entity_enum {
* @stack: Graph traversal stack; the stack contains information
* on the path the media entities to be walked and the
* links through which they were reached.
+ * @stack.entity: pointer to &struct media_entity at the graph.
+ * @stack.link: pointer to &struct list_head.
* @ent_enum: Visited entities
* @top: The top of the stack
*/
@@ -247,6 +249,9 @@ enum media_entity_type {
* @pipe: Pipeline this entity belongs to.
* @info: Union with devnode information. Kept just for backward
* compatibility.
+ * @info.dev: Contains device major and minor info.
+ * @info.dev.major: device node major, if the device is a devnode.
+ * @info.dev.minor: device node minor, if the device is a devnode.
* @major: Devnode major number (zero if not applicable). Kept just
* for backward compatibility.
* @minor: Devnode minor number (zero if not applicable). Kept just
@@ -629,7 +634,11 @@ int media_entity_pads_init(struct media_entity *entity, u16 num_pads,
* This function must be called during the cleanup phase after unregistering
* the entity (currently, it does nothing).
*/
-static inline void media_entity_cleanup(struct media_entity *entity) {};
+#if IS_ENABLED(CONFIG_MEDIA_CONTROLLER)
+static inline void media_entity_cleanup(struct media_entity *entity) {}
+#else
+#define media_entity_cleanup(entity) do { } while (false)
+#endif
/**
* media_create_pad_link() - creates a link between two entities.
diff --git a/include/media/rc-core.h b/include/media/rc-core.h
index 314a1edb6189..6742fd86ff65 100644
--- a/include/media/rc-core.h
+++ b/include/media/rc-core.h
@@ -17,22 +17,16 @@
#define _RC_CORE
#include <linux/spinlock.h>
+#include <linux/cdev.h>
#include <linux/kfifo.h>
#include <linux/time.h>
#include <linux/timer.h>
#include <media/rc-map.h>
-extern int rc_core_debug;
-#define IR_dprintk(level, fmt, ...) \
-do { \
- if (rc_core_debug >= level) \
- printk(KERN_DEBUG pr_fmt(fmt), ##__VA_ARGS__); \
-} while (0)
-
/**
- * enum rc_driver_type - type of the RC output
+ * enum rc_driver_type - type of the RC driver.
*
- * @RC_DRIVER_SCANCODE: Driver or hardware generates a scancode
+ * @RC_DRIVER_SCANCODE: Driver or hardware generates a scancode.
* @RC_DRIVER_IR_RAW: Driver or hardware generates pulse/space sequences.
* It needs a Infra-Red pulse/space decoder
* @RC_DRIVER_IR_RAW_TX: Device transmitter only,
@@ -68,6 +62,33 @@ enum rc_filter_type {
};
/**
+ * struct lirc_fh - represents an open lirc file
+ * @list: list of open file handles
+ * @rc: rcdev for this lirc chardev
+ * @carrier_low: when setting the carrier range, first the low end must be
+ * set with an ioctl and then the high end with another ioctl
+ * @send_timeout_reports: report timeouts in lirc raw IR.
+ * @rawir: queue for incoming raw IR
+ * @scancodes: queue for incoming decoded scancodes
+ * @wait_poll: poll struct for lirc device
+ * @send_mode: lirc mode for sending, either LIRC_MODE_SCANCODE or
+ * LIRC_MODE_PULSE
+ * @rec_mode: lirc mode for receiving, either LIRC_MODE_SCANCODE or
+ * LIRC_MODE_MODE2
+ */
+struct lirc_fh {
+ struct list_head list;
+ struct rc_dev *rc;
+ int carrier_low;
+ bool send_timeout_reports;
+ DECLARE_KFIFO_PTR(rawir, unsigned int);
+ DECLARE_KFIFO_PTR(scancodes, struct lirc_scancode);
+ wait_queue_head_t wait_poll;
+ u8 send_mode;
+ u8 rec_mode;
+};
+
+/**
* struct rc_dev - represents a remote control device
* @dev: driver model's view of this device
* @managed_alloc: devm_rc_allocate_device was used to create rc_dev
@@ -106,6 +127,8 @@ enum rc_filter_type {
* @keypressed: whether a key is currently pressed
* @keyup_jiffies: time (in jiffies) when the current keypress should be released
* @timer_keyup: timer for releasing a keypress
+ * @timer_repeat: timer for autorepeat events. This is needed for CEC, which
+ * has non-standard repeats.
* @last_keycode: keycode of last keypress
* @last_protocol: protocol of last keypress
* @last_scancode: scancode of last keypress
@@ -115,6 +138,15 @@ enum rc_filter_type {
* @max_timeout: maximum timeout supported by device
* @rx_resolution : resolution (in ns) of input sampler
* @tx_resolution: resolution (in ns) of output sampler
+ * @lirc_dev: lirc device
+ * @lirc_cdev: lirc char cdev
+ * @gap_start: time when gap starts
+ * @gap_duration: duration of initial gap
+ * @gap: true if we're in a gap
+ * @lirc_fh_lock: protects lirc_fh list
+ * @lirc_fh: list of open files
+ * @registered: set to true by rc_register_device(), false by
+ * rc_unregister_device
* @change_protocol: allow changing the protocol used on hardware decoders
* @open: callback to allow drivers to enable polling/irq when IR input device
* is opened.
@@ -165,6 +197,7 @@ struct rc_dev {
bool keypressed;
unsigned long keyup_jiffies;
struct timer_list timer_keyup;
+ struct timer_list timer_repeat;
u32 last_keycode;
enum rc_proto last_protocol;
u32 last_scancode;
@@ -174,6 +207,16 @@ struct rc_dev {
u32 max_timeout;
u32 rx_resolution;
u32 tx_resolution;
+#ifdef CONFIG_LIRC
+ struct device lirc_dev;
+ struct cdev lirc_cdev;
+ ktime_t gap_start;
+ u64 gap_duration;
+ bool gap;
+ spinlock_t lirc_fh_lock;
+ struct list_head lirc_fh;
+#endif
+ bool registered;
int (*change_protocol)(struct rc_dev *dev, u64 *rc_proto);
int (*open)(struct rc_dev *dev);
void (*close)(struct rc_dev *dev);
@@ -248,20 +291,6 @@ int devm_rc_register_device(struct device *parent, struct rc_dev *dev);
*/
void rc_unregister_device(struct rc_dev *dev);
-/**
- * rc_open - Opens a RC device
- *
- * @rdev: pointer to struct rc_dev.
- */
-int rc_open(struct rc_dev *rdev);
-
-/**
- * rc_close - Closes a RC device
- *
- * @rdev: pointer to struct rc_dev.
- */
-void rc_close(struct rc_dev *rdev);
-
void rc_repeat(struct rc_dev *dev);
void rc_keydown(struct rc_dev *dev, enum rc_proto protocol, u32 scancode,
u8 toggle);
@@ -305,10 +334,13 @@ void ir_raw_event_handle(struct rc_dev *dev);
int ir_raw_event_store(struct rc_dev *dev, struct ir_raw_event *ev);
int ir_raw_event_store_edge(struct rc_dev *dev, bool pulse);
int ir_raw_event_store_with_filter(struct rc_dev *dev,
- struct ir_raw_event *ev);
+ struct ir_raw_event *ev);
+int ir_raw_event_store_with_timeout(struct rc_dev *dev,
+ struct ir_raw_event *ev);
void ir_raw_event_set_idle(struct rc_dev *dev, bool idle);
int ir_raw_encode_scancode(enum rc_proto protocol, u32 scancode,
struct ir_raw_event *events, unsigned int max);
+int ir_raw_encode_carrier(enum rc_proto protocol);
static inline void ir_raw_event_reset(struct rc_dev *dev)
{
diff --git a/include/media/rc-map.h b/include/media/rc-map.h
index 72197cb43781..bfa3017cecba 100644
--- a/include/media/rc-map.h
+++ b/include/media/rc-map.h
@@ -10,59 +10,7 @@
*/
#include <linux/input.h>
-
-/**
- * enum rc_proto - the Remote Controller protocol
- *
- * @RC_PROTO_UNKNOWN: Protocol not known
- * @RC_PROTO_OTHER: Protocol known but proprietary
- * @RC_PROTO_RC5: Philips RC5 protocol
- * @RC_PROTO_RC5X_20: Philips RC5x 20 bit protocol
- * @RC_PROTO_RC5_SZ: StreamZap variant of RC5
- * @RC_PROTO_JVC: JVC protocol
- * @RC_PROTO_SONY12: Sony 12 bit protocol
- * @RC_PROTO_SONY15: Sony 15 bit protocol
- * @RC_PROTO_SONY20: Sony 20 bit protocol
- * @RC_PROTO_NEC: NEC protocol
- * @RC_PROTO_NECX: Extended NEC protocol
- * @RC_PROTO_NEC32: NEC 32 bit protocol
- * @RC_PROTO_SANYO: Sanyo protocol
- * @RC_PROTO_MCIR2_KBD: RC6-ish MCE keyboard
- * @RC_PROTO_MCIR2_MSE: RC6-ish MCE mouse
- * @RC_PROTO_RC6_0: Philips RC6-0-16 protocol
- * @RC_PROTO_RC6_6A_20: Philips RC6-6A-20 protocol
- * @RC_PROTO_RC6_6A_24: Philips RC6-6A-24 protocol
- * @RC_PROTO_RC6_6A_32: Philips RC6-6A-32 protocol
- * @RC_PROTO_RC6_MCE: MCE (Philips RC6-6A-32 subtype) protocol
- * @RC_PROTO_SHARP: Sharp protocol
- * @RC_PROTO_XMP: XMP protocol
- * @RC_PROTO_CEC: CEC protocol
- */
-enum rc_proto {
- RC_PROTO_UNKNOWN = 0,
- RC_PROTO_OTHER = 1,
- RC_PROTO_RC5 = 2,
- RC_PROTO_RC5X_20 = 3,
- RC_PROTO_RC5_SZ = 4,
- RC_PROTO_JVC = 5,
- RC_PROTO_SONY12 = 6,
- RC_PROTO_SONY15 = 7,
- RC_PROTO_SONY20 = 8,
- RC_PROTO_NEC = 9,
- RC_PROTO_NECX = 10,
- RC_PROTO_NEC32 = 11,
- RC_PROTO_SANYO = 12,
- RC_PROTO_MCIR2_KBD = 13,
- RC_PROTO_MCIR2_MSE = 14,
- RC_PROTO_RC6_0 = 15,
- RC_PROTO_RC6_6A_20 = 16,
- RC_PROTO_RC6_6A_24 = 17,
- RC_PROTO_RC6_6A_32 = 18,
- RC_PROTO_RC6_MCE = 19,
- RC_PROTO_SHARP = 20,
- RC_PROTO_XMP = 21,
- RC_PROTO_CEC = 22,
-};
+#include <uapi/linux/lirc.h>
#define RC_PROTO_BIT_NONE 0ULL
#define RC_PROTO_BIT_UNKNOWN BIT_ULL(RC_PROTO_UNKNOWN)
@@ -88,6 +36,7 @@ enum rc_proto {
#define RC_PROTO_BIT_SHARP BIT_ULL(RC_PROTO_SHARP)
#define RC_PROTO_BIT_XMP BIT_ULL(RC_PROTO_XMP)
#define RC_PROTO_BIT_CEC BIT_ULL(RC_PROTO_CEC)
+#define RC_PROTO_BIT_IMON BIT_ULL(RC_PROTO_IMON)
#define RC_PROTO_BIT_ALL \
(RC_PROTO_BIT_UNKNOWN | RC_PROTO_BIT_OTHER | \
@@ -101,7 +50,8 @@ enum rc_proto {
RC_PROTO_BIT_RC6_0 | RC_PROTO_BIT_RC6_6A_20 | \
RC_PROTO_BIT_RC6_6A_24 | RC_PROTO_BIT_RC6_6A_32 | \
RC_PROTO_BIT_RC6_MCE | RC_PROTO_BIT_SHARP | \
- RC_PROTO_BIT_XMP | RC_PROTO_BIT_CEC)
+ RC_PROTO_BIT_XMP | RC_PROTO_BIT_CEC | \
+ RC_PROTO_BIT_IMON)
/* All rc protocols for which we have decoders */
#define RC_PROTO_BIT_ALL_IR_DECODER \
(RC_PROTO_BIT_RC5 | RC_PROTO_BIT_RC5X_20 | \
@@ -114,7 +64,7 @@ enum rc_proto {
RC_PROTO_BIT_RC6_0 | RC_PROTO_BIT_RC6_6A_20 | \
RC_PROTO_BIT_RC6_6A_24 | RC_PROTO_BIT_RC6_6A_32 | \
RC_PROTO_BIT_RC6_MCE | RC_PROTO_BIT_SHARP | \
- RC_PROTO_BIT_XMP)
+ RC_PROTO_BIT_XMP | RC_PROTO_BIT_IMON)
#define RC_PROTO_BIT_ALL_IR_ENCODER \
(RC_PROTO_BIT_RC5 | RC_PROTO_BIT_RC5X_20 | \
@@ -127,7 +77,7 @@ enum rc_proto {
RC_PROTO_BIT_RC6_0 | RC_PROTO_BIT_RC6_6A_20 | \
RC_PROTO_BIT_RC6_6A_24 | \
RC_PROTO_BIT_RC6_6A_32 | RC_PROTO_BIT_RC6_MCE | \
- RC_PROTO_BIT_SHARP)
+ RC_PROTO_BIT_SHARP | RC_PROTO_BIT_IMON)
#define RC_SCANCODE_UNKNOWN(x) (x)
#define RC_SCANCODE_OTHER(x) (x)
@@ -263,6 +213,7 @@ struct rc_map *rc_map_get(const char *name);
#define RC_MAP_HISI_TV_DEMO "rc-hisi-tv-demo"
#define RC_MAP_IMON_MCE "rc-imon-mce"
#define RC_MAP_IMON_PAD "rc-imon-pad"
+#define RC_MAP_IMON_RSC "rc-imon-rsc"
#define RC_MAP_IODATA_BCTV7E "rc-iodata-bctv7e"
#define RC_MAP_IT913X_V1 "rc-it913x-v1"
#define RC_MAP_IT913X_V2 "rc-it913x-v2"
diff --git a/include/media/soc_camera.h b/include/media/soc_camera.h
index 4d8cb0796bc6..b7e42a1b0910 100644
--- a/include/media/soc_camera.h
+++ b/include/media/soc_camera.h
@@ -117,7 +117,7 @@ struct soc_camera_host_ops {
int (*get_parm)(struct soc_camera_device *, struct v4l2_streamparm *);
int (*set_parm)(struct soc_camera_device *, struct v4l2_streamparm *);
int (*enum_framesizes)(struct soc_camera_device *, struct v4l2_frmsizeenum *);
- unsigned int (*poll)(struct file *, poll_table *);
+ __poll_t (*poll)(struct file *, poll_table *);
};
#define SOCAM_SENSOR_INVERT_PCLK (1 << 0)
diff --git a/include/media/v4l2-tpg.h b/include/media/tpg/v4l2-tpg.h
index 13e49d85cae3..eb191e85d363 100644
--- a/include/media/v4l2-tpg.h
+++ b/include/media/tpg/v4l2-tpg.h
@@ -1,20 +1,8 @@
+/* SPDX-License-Identifier: GPL-2.0-only */
/*
* v4l2-tpg.h - Test Pattern Generator
*
* Copyright 2014 Cisco Systems, Inc. and/or its affiliates. All rights reserved.
- *
- * This program is free software; you may redistribute it and/or modify
- * it under the terms of the GNU General Public License as published by
- * the Free Software Foundation; version 2 of the License.
- *
- * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND,
- * EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
- * MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND
- * NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS
- * BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN
- * ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN
- * CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
- * SOFTWARE.
*/
#ifndef _V4L2_TPG_H_
@@ -26,8 +14,51 @@
#include <linux/slab.h>
#include <linux/vmalloc.h>
#include <linux/videodev2.h>
-#include <media/v4l2-tpg-colors.h>
+struct tpg_rbg_color8 {
+ unsigned char r, g, b;
+};
+
+struct tpg_rbg_color16 {
+ __u16 r, g, b;
+};
+
+enum tpg_color {
+ TPG_COLOR_CSC_WHITE,
+ TPG_COLOR_CSC_YELLOW,
+ TPG_COLOR_CSC_CYAN,
+ TPG_COLOR_CSC_GREEN,
+ TPG_COLOR_CSC_MAGENTA,
+ TPG_COLOR_CSC_RED,
+ TPG_COLOR_CSC_BLUE,
+ TPG_COLOR_CSC_BLACK,
+ TPG_COLOR_75_YELLOW,
+ TPG_COLOR_75_CYAN,
+ TPG_COLOR_75_GREEN,
+ TPG_COLOR_75_MAGENTA,
+ TPG_COLOR_75_RED,
+ TPG_COLOR_75_BLUE,
+ TPG_COLOR_100_WHITE,
+ TPG_COLOR_100_YELLOW,
+ TPG_COLOR_100_CYAN,
+ TPG_COLOR_100_GREEN,
+ TPG_COLOR_100_MAGENTA,
+ TPG_COLOR_100_RED,
+ TPG_COLOR_100_BLUE,
+ TPG_COLOR_100_BLACK,
+ TPG_COLOR_TEXTFG,
+ TPG_COLOR_TEXTBG,
+ TPG_COLOR_RANDOM,
+ TPG_COLOR_RAMP,
+ TPG_COLOR_MAX = TPG_COLOR_RAMP + 256
+};
+
+extern const struct tpg_rbg_color8 tpg_colors[TPG_COLOR_MAX];
+extern const unsigned short tpg_rec709_to_linear[255 * 16 + 1];
+extern const unsigned short tpg_linear_to_rec709[255 * 16 + 1];
+extern const struct tpg_rbg_color16 tpg_csc_colors[V4L2_COLORSPACE_DCI_P3 + 1]
+ [V4L2_XFER_FUNC_SMPTE2084 + 1]
+ [TPG_COLOR_CSC_BLACK + 1];
enum tpg_pattern {
TPG_PAT_75_COLORBAR,
TPG_PAT_100_COLORBAR,
diff --git a/include/media/tuner-types.h b/include/media/tuner-types.h
index 78f0654d9c3d..df76ac8e658c 100644
--- a/include/media/tuner-types.h
+++ b/include/media/tuner-types.h
@@ -171,6 +171,21 @@ struct tuner_params {
struct tuner_range *ranges;
};
+/**
+ * struct tunertype - describes the known tuners.
+ *
+ * @name: string with the tuner's name.
+ * @count: size of &struct tuner_params array.
+ * @params: pointer to &struct tuner_params array.
+ *
+ * @min: minimal tuner frequency, in 62.5 kHz step.
+ * should be multiplied to 16 to convert to MHz.
+ * @max: minimal tuner frequency, in 62.5 kHz step.
+ * Should be multiplied to 16 to convert to MHz.
+ * @stepsize: frequency step, in Hz.
+ * @initdata: optional byte sequence to initialize the tuner.
+ * @sleepdata: optional byte sequence to power down the tuner.
+ */
struct tunertype {
char *name;
unsigned int count;
diff --git a/include/media/v4l2-async.h b/include/media/v4l2-async.h
index 6152434cbe82..1592d323c577 100644
--- a/include/media/v4l2-async.h
+++ b/include/media/v4l2-async.h
@@ -28,7 +28,7 @@ struct v4l2_async_notifier;
* in order to identify a match
*
* @V4L2_ASYNC_MATCH_CUSTOM: Match will use the logic provided by &struct
- * v4l2_async_subdev.match ops
+ * v4l2_async_subdev.match ops
* @V4L2_ASYNC_MATCH_DEVNAME: Match will use the device name
* @V4L2_ASYNC_MATCH_I2C: Match will check for I2C adapter ID and address
* @V4L2_ASYNC_MATCH_FWNODE: Match will use firmware node
@@ -48,6 +48,31 @@ enum v4l2_async_match_type {
*
* @match_type: type of match that will be used
* @match: union of per-bus type matching data sets
+ * @match.fwnode:
+ * pointer to &struct fwnode_handle to be matched.
+ * Used if @match_type is %V4L2_ASYNC_MATCH_FWNODE.
+ * @match.device_name:
+ * string containing the device name to be matched.
+ * Used if @match_type is %V4L2_ASYNC_MATCH_DEVNAME.
+ * @match.i2c: embedded struct with I2C parameters to be matched.
+ * Both @match.i2c.adapter_id and @match.i2c.address
+ * should be matched.
+ * Used if @match_type is %V4L2_ASYNC_MATCH_I2C.
+ * @match.i2c.adapter_id:
+ * I2C adapter ID to be matched.
+ * Used if @match_type is %V4L2_ASYNC_MATCH_I2C.
+ * @match.i2c.address:
+ * I2C address to be matched.
+ * Used if @match_type is %V4L2_ASYNC_MATCH_I2C.
+ * @match.custom:
+ * Driver-specific match criteria.
+ * Used if @match_type is %V4L2_ASYNC_MATCH_CUSTOM.
+ * @match.custom.match:
+ * Driver-specific match function to be used if
+ * %V4L2_ASYNC_MATCH_CUSTOM.
+ * @match.custom.priv:
+ * Driver-specific private struct with match parameters
+ * to be used if %V4L2_ASYNC_MATCH_CUSTOM.
* @list: used to link struct v4l2_async_subdev objects, waiting to be
* probed, to a notifier->waiting list
*
@@ -58,12 +83,8 @@ enum v4l2_async_match_type {
struct v4l2_async_subdev {
enum v4l2_async_match_type match_type;
union {
- struct {
- struct fwnode_handle *fwnode;
- } fwnode;
- struct {
- const char *name;
- } device_name;
+ struct fwnode_handle *fwnode;
+ const char *device_name;
struct {
int adapter_id;
unsigned short address;
@@ -167,7 +188,7 @@ void v4l2_async_notifier_cleanup(struct v4l2_async_notifier *notifier);
/**
* v4l2_async_register_subdev - registers a sub-device to the asynchronous
- * subdevice framework
+ * subdevice framework
*
* @sd: pointer to &struct v4l2_subdev
*/
@@ -197,7 +218,7 @@ int __must_check v4l2_async_register_subdev_sensor_common(
/**
* v4l2_async_unregister_subdev - unregisters a sub-device to the asynchronous
- * subdevice framework
+ * subdevice framework
*
* @sd: pointer to &struct v4l2_subdev
*/
diff --git a/include/media/v4l2-common.h b/include/media/v4l2-common.h
index aac8b7b6e691..160bca96d524 100644
--- a/include/media/v4l2-common.h
+++ b/include/media/v4l2-common.h
@@ -28,7 +28,7 @@
#include <media/v4l2-dev.h>
-/* Common printk constucts for v4l-i2c drivers. These macros create a unique
+/* Common printk constructs for v4l-i2c drivers. These macros create a unique
prefix consisting of the driver name, the adapter number and the i2c
address. */
#define v4l_printk(level, name, adapter, addr, fmt, arg...) \
@@ -50,7 +50,7 @@
/* These three macros assume that the debug level is set with a module
parameter called 'debug'. */
#define v4l_dbg(level, debug, client, fmt, arg...) \
- do { \
+ do { \
if (debug >= (level)) \
v4l_client_printk(KERN_DEBUG, client, fmt , ## arg); \
} while (0)
@@ -80,9 +80,9 @@
/* These three macros assume that the debug level is set with a module
parameter called 'debug'. */
#define v4l2_dbg(level, debug, dev, fmt, arg...) \
- do { \
+ do { \
if (debug >= (level)) \
- v4l2_printk(KERN_DEBUG, dev, fmt , ## arg); \
+ v4l2_printk(KERN_DEBUG, dev, fmt , ## arg); \
} while (0)
/**
@@ -127,7 +127,7 @@ struct v4l2_subdev_ops;
* @client_type: name of the chip that's on the adapter.
* @addr: I2C address. If zero, it will use @probe_addrs
* @probe_addrs: array with a list of address. The last entry at such
- * array should be %I2C_CLIENT_END.
+ * array should be %I2C_CLIENT_END.
*
* returns a &struct v4l2_subdev pointer.
*/
@@ -146,7 +146,7 @@ struct i2c_board_info;
* @info: pointer to struct i2c_board_info used to replace the irq,
* platform_data and addr arguments.
* @probe_addrs: array with a list of address. The last entry at such
- * array should be %I2C_CLIENT_END.
+ * array should be %I2C_CLIENT_END.
*
* returns a &struct v4l2_subdev pointer.
*/
@@ -174,17 +174,43 @@ void v4l2_i2c_subdev_init(struct v4l2_subdev *sd, struct i2c_client *client,
*/
unsigned short v4l2_i2c_subdev_addr(struct v4l2_subdev *sd);
+/**
+ * enum v4l2_i2c_tuner_type - specifies the range of tuner address that
+ * should be used when seeking for I2C devices.
+ *
+ * @ADDRS_RADIO: Radio tuner addresses.
+ * Represent the following I2C addresses:
+ * 0x10 (if compiled with tea5761 support)
+ * and 0x60.
+ * @ADDRS_DEMOD: Demod tuner addresses.
+ * Represent the following I2C addresses:
+ * 0x42, 0x43, 0x4a and 0x4b.
+ * @ADDRS_TV: TV tuner addresses.
+ * Represent the following I2C addresses:
+ * 0x42, 0x43, 0x4a, 0x4b, 0x60, 0x61, 0x62,
+ * 0x63 and 0x64.
+ * @ADDRS_TV_WITH_DEMOD: TV tuner addresses if demod is present, this
+ * excludes addresses used by the demodulator
+ * from the list of candidates.
+ * Represent the following I2C addresses:
+ * 0x60, 0x61, 0x62, 0x63 and 0x64.
+ *
+ * NOTE: All I2C addresses above use the 7-bit notation.
+ */
enum v4l2_i2c_tuner_type {
- ADDRS_RADIO, /* Radio tuner addresses */
- ADDRS_DEMOD, /* Demod tuner addresses */
- ADDRS_TV, /* TV tuner addresses */
- /* TV tuner addresses if demod is present, this excludes
- addresses used by the demodulator from the list of
- candidates. */
+ ADDRS_RADIO,
+ ADDRS_DEMOD,
+ ADDRS_TV,
ADDRS_TV_WITH_DEMOD,
};
-/* Return a list of I2C tuner addresses to probe. Use only if the tuner
- addresses are unknown. */
+/**
+ * v4l2_i2c_tuner_addrs - Return a list of I2C tuner addresses to probe.
+ *
+ * @type: type of the tuner to seek, as defined by
+ * &enum v4l2_i2c_tuner_type.
+ *
+ * NOTE: Use only if the tuner addresses are unknown.
+ */
const unsigned short *v4l2_i2c_tuner_addrs(enum v4l2_i2c_tuner_type type);
/* ------------------------------------------------------------------------- */
@@ -224,10 +250,14 @@ void v4l2_spi_subdev_init(struct v4l2_subdev *sd, struct spi_device *spi,
/* ------------------------------------------------------------------------- */
-/* Note: these remaining ioctls/structs should be removed as well, but they are
- still used in tuner-simple.c (TUNER_SET_CONFIG), cx18/ivtv (RESET) and
- v4l2-int-device.h (v4l2_routing). To remove these ioctls some more cleanup
- is needed in those modules. */
+/*
+ * FIXME: these remaining ioctls/structs should be removed as well, but they
+ * are still used in tuner-simple.c (TUNER_SET_CONFIG) and cx18/ivtv (RESET).
+ * To remove these ioctls some more cleanup is needed in those modules.
+ *
+ * It doesn't make much sense on documenting them, as what we really want is
+ * to get rid of them.
+ */
/* s_config */
struct v4l2_priv_tun_config {
@@ -236,32 +266,122 @@ struct v4l2_priv_tun_config {
};
#define TUNER_SET_CONFIG _IOW('d', 92, struct v4l2_priv_tun_config)
-#define VIDIOC_INT_RESET _IOW ('d', 102, u32)
-
-struct v4l2_routing {
- u32 input;
- u32 output;
-};
+#define VIDIOC_INT_RESET _IOW ('d', 102, u32)
/* ------------------------------------------------------------------------- */
/* Miscellaneous helper functions */
-void v4l_bound_align_image(unsigned int *w, unsigned int wmin,
+/**
+ * v4l_bound_align_image - adjust video dimensions according to
+ * a given constraints.
+ *
+ * @width: pointer to width that will be adjusted if needed.
+ * @wmin: minimum width.
+ * @wmax: maximum width.
+ * @walign: least significant bit on width.
+ * @height: pointer to height that will be adjusted if needed.
+ * @hmin: minimum height.
+ * @hmax: maximum height.
+ * @halign: least significant bit on width.
+ * @salign: least significant bit for the image size (e. g.
+ * :math:`width * height`).
+ *
+ * Clip an image to have @width between @wmin and @wmax, and @height between
+ * @hmin and @hmax, inclusive.
+ *
+ * Additionally, the @width will be a multiple of :math:`2^{walign}`,
+ * the @height will be a multiple of :math:`2^{halign}`, and the overall
+ * size :math:`width * height` will be a multiple of :math:`2^{salign}`.
+ *
+ * .. note::
+ *
+ * #. The clipping rectangle may be shrunk or enlarged to fit the alignment
+ * constraints.
+ * #. @wmax must not be smaller than @wmin.
+ * #. @hmax must not be smaller than @hmin.
+ * #. The alignments must not be so high there are no possible image
+ * sizes within the allowed bounds.
+ * #. @wmin and @hmin must be at least 1 (don't use 0).
+ * #. For @walign, @halign and @salign, if you don't care about a certain
+ * alignment, specify ``0``, as :math:`2^0 = 1` and one byte alignment
+ * is equivalent to no alignment.
+ * #. If you only want to adjust downward, specify a maximum that's the
+ * same as the initial value.
+ */
+void v4l_bound_align_image(unsigned int *width, unsigned int wmin,
unsigned int wmax, unsigned int walign,
- unsigned int *h, unsigned int hmin,
+ unsigned int *height, unsigned int hmin,
unsigned int hmax, unsigned int halign,
unsigned int salign);
-struct v4l2_discrete_probe {
- const struct v4l2_frmsize_discrete *sizes;
- int num_sizes;
-};
-
-const struct v4l2_frmsize_discrete *v4l2_find_nearest_format(
- const struct v4l2_discrete_probe *probe,
- s32 width, s32 height);
+/**
+ * v4l2_find_nearest_size - Find the nearest size among a discrete
+ * set of resolutions contained in an array of a driver specific struct.
+ *
+ * @array: a driver specific array of image sizes
+ * @array_size: the length of the driver specific array of image sizes
+ * @width_field: the name of the width field in the driver specific struct
+ * @height_field: the name of the height field in the driver specific struct
+ * @width: desired width.
+ * @height: desired height.
+ *
+ * Finds the closest resolution to minimize the width and height differences
+ * between what requested and the supported resolutions. The size of the width
+ * and height fields in the driver specific must equal to that of u32, i.e. four
+ * bytes.
+ *
+ * Returns the best match or NULL if the length of the array is zero.
+ */
+#define v4l2_find_nearest_size(array, array_size, width_field, height_field, \
+ width, height) \
+ ({ \
+ BUILD_BUG_ON(sizeof((array)->width_field) != sizeof(u32) || \
+ sizeof((array)->height_field) != sizeof(u32)); \
+ (typeof(&(*(array))))__v4l2_find_nearest_size( \
+ (array), array_size, sizeof(*(array)), \
+ offsetof(typeof(*(array)), width_field), \
+ offsetof(typeof(*(array)), height_field), \
+ width, height); \
+ })
+const void *
+__v4l2_find_nearest_size(const void *array, size_t array_size,
+ size_t entry_size, size_t width_offset,
+ size_t height_offset, s32 width, s32 height);
+/**
+ * v4l2_get_timestamp - helper routine to get a timestamp to be used when
+ * filling streaming metadata. Internally, it uses ktime_get_ts(),
+ * which is the recommended way to get it.
+ *
+ * @tv: pointer to &struct timeval to be filled.
+ */
void v4l2_get_timestamp(struct timeval *tv);
+/**
+ * v4l2_g_parm_cap - helper routine for vidioc_g_parm to fill this in by
+ * calling the g_frame_interval op of the given subdev. It only works
+ * for V4L2_BUF_TYPE_VIDEO_CAPTURE(_MPLANE), hence the _cap in the
+ * function name.
+ *
+ * @vdev: the struct video_device pointer. Used to determine the device caps.
+ * @sd: the sub-device pointer.
+ * @a: the VIDIOC_G_PARM argument.
+ */
+int v4l2_g_parm_cap(struct video_device *vdev,
+ struct v4l2_subdev *sd, struct v4l2_streamparm *a);
+
+/**
+ * v4l2_s_parm_cap - helper routine for vidioc_s_parm to fill this in by
+ * calling the s_frame_interval op of the given subdev. It only works
+ * for V4L2_BUF_TYPE_VIDEO_CAPTURE(_MPLANE), hence the _cap in the
+ * function name.
+ *
+ * @vdev: the struct video_device pointer. Used to determine the device caps.
+ * @sd: the sub-device pointer.
+ * @a: the VIDIOC_S_PARM argument.
+ */
+int v4l2_s_parm_cap(struct video_device *vdev,
+ struct v4l2_subdev *sd, struct v4l2_streamparm *a);
+
#endif /* V4L2_COMMON_H_ */
diff --git a/include/media/v4l2-ctrls.h b/include/media/v4l2-ctrls.h
index dacfe54057f8..5b445b5654f7 100644
--- a/include/media/v4l2-ctrls.h
+++ b/include/media/v4l2-ctrls.h
@@ -166,8 +166,15 @@ typedef void (*v4l2_ctrl_notify_fnc)(struct v4l2_ctrl *ctrl, void *priv);
* empty strings ("") correspond to non-existing menu items (this
* is in addition to the menu_skip_mask above). The last entry
* must be NULL.
+ * Used only if the @type is %V4L2_CTRL_TYPE_MENU.
+ * @qmenu_int: A 64-bit integer array for with integer menu items.
+ * The size of array must be equal to the menu size, e. g.:
+ * :math:`ceil(\frac{maximum - minimum}{step}) + 1`.
+ * Used only if the @type is %V4L2_CTRL_TYPE_INTEGER_MENU.
* @flags: The control's flags.
- * @cur: The control's current value.
+ * @cur: Structure to store the current value.
+ * @cur.val: The control's current value, if the @type is represented via
+ * a u32 integer (see &enum v4l2_ctrl_type).
* @val: The control's new s32 value.
* @priv: The control's private pointer. For use by the driver. It is
* untouched by the control framework. Note that this pointer is
@@ -754,8 +761,8 @@ void v4l2_ctrl_grab(struct v4l2_ctrl *ctrl, bool grabbed);
* An error is returned if one of the range arguments is invalid for this
* control type.
*
- * This function assumes that the control handler is not locked and will
- * take the lock itself.
+ * The caller is responsible for acquiring the control handler mutex on behalf
+ * of __v4l2_ctrl_modify_range().
*/
int __v4l2_ctrl_modify_range(struct v4l2_ctrl *ctrl,
s64 min, s64 max, u64 step, s64 def);
@@ -1037,7 +1044,7 @@ int v4l2_ctrl_subscribe_event(struct v4l2_fh *fh,
* @file: pointer to struct file
* @wait: pointer to struct poll_table_struct
*/
-unsigned int v4l2_ctrl_poll(struct file *file, struct poll_table_struct *wait);
+__poll_t v4l2_ctrl_poll(struct file *file, struct poll_table_struct *wait);
/* Helpers for ioctl_ops */
@@ -1139,7 +1146,7 @@ int v4l2_s_ext_ctrls(struct v4l2_fh *fh, struct v4l2_ctrl_handler *hdl,
/**
* v4l2_ctrl_subdev_subscribe_event - Helper function to implement
- * as a &struct v4l2_subdev_core_ops subscribe_event function
+ * as a &struct v4l2_subdev_core_ops subscribe_event function
* that just subscribes control events.
*
* @sd: pointer to &struct v4l2_subdev
diff --git a/include/media/v4l2-dev.h b/include/media/v4l2-dev.h
index 28a686eb7d09..f60cf9cf3b9c 100644
--- a/include/media/v4l2-dev.h
+++ b/include/media/v4l2-dev.h
@@ -21,31 +21,63 @@
#define VIDEO_MAJOR 81
-#define VFL_TYPE_GRABBER 0
-#define VFL_TYPE_VBI 1
-#define VFL_TYPE_RADIO 2
-#define VFL_TYPE_SUBDEV 3
-#define VFL_TYPE_SDR 4
-#define VFL_TYPE_TOUCH 5
-#define VFL_TYPE_MAX 6
-
-/* Is this a receiver, transmitter or mem-to-mem? */
-/* Ignored for VFL_TYPE_SUBDEV. */
-#define VFL_DIR_RX 0
-#define VFL_DIR_TX 1
-#define VFL_DIR_M2M 2
+/**
+ * enum vfl_devnode_type - type of V4L2 device node
+ *
+ * @VFL_TYPE_GRABBER: for video input/output devices
+ * @VFL_TYPE_VBI: for vertical blank data (i.e. closed captions, teletext)
+ * @VFL_TYPE_RADIO: for radio tuners
+ * @VFL_TYPE_SUBDEV: for V4L2 subdevices
+ * @VFL_TYPE_SDR: for Software Defined Radio tuners
+ * @VFL_TYPE_TOUCH: for touch sensors
+ */
+enum vfl_devnode_type {
+ VFL_TYPE_GRABBER = 0,
+ VFL_TYPE_VBI,
+ VFL_TYPE_RADIO,
+ VFL_TYPE_SUBDEV,
+ VFL_TYPE_SDR,
+ VFL_TYPE_TOUCH,
+ VFL_TYPE_MAX /* Shall be the last one */
+};
+
+/**
+ * enum vfl_direction - Identifies if a &struct video_device corresponds
+ * to a receiver, a transmitter or a mem-to-mem device.
+ *
+ * @VFL_DIR_RX: device is a receiver.
+ * @VFL_DIR_TX: device is a transmitter.
+ * @VFL_DIR_M2M: device is a memory to memory device.
+ *
+ * Note: Ignored if &enum vfl_devnode_type is %VFL_TYPE_SUBDEV.
+ */
+enum vfl_devnode_direction {
+ VFL_DIR_RX,
+ VFL_DIR_TX,
+ VFL_DIR_M2M,
+};
struct v4l2_ioctl_callbacks;
struct video_device;
struct v4l2_device;
struct v4l2_ctrl_handler;
-/* Flag to mark the video_device struct as registered.
- Drivers can clear this flag if they want to block all future
- device access. It is cleared by video_unregister_device. */
-#define V4L2_FL_REGISTERED (0)
-/* file->private_data points to struct v4l2_fh */
-#define V4L2_FL_USES_V4L2_FH (1)
+/**
+ * enum v4l2_video_device_flags - Flags used by &struct video_device
+ *
+ * @V4L2_FL_REGISTERED:
+ * indicates that a &struct video_device is registered.
+ * Drivers can clear this flag if they want to block all future
+ * device access. It is cleared by video_unregister_device.
+ * @V4L2_FL_USES_V4L2_FH:
+ * indicates that file->private_data points to &struct v4l2_fh.
+ * This flag is set by the core when v4l2_fh_init() is called.
+ * All new drivers should use it.
+ */
+enum v4l2_video_device_flags {
+ V4L2_FL_REGISTERED = 0,
+ V4L2_FL_USES_V4L2_FH = 1,
+};
/* Priority helper functions */
@@ -152,7 +184,7 @@ struct v4l2_file_operations {
struct module *owner;
ssize_t (*read) (struct file *, char __user *, size_t, loff_t *);
ssize_t (*write) (struct file *, const char __user *, size_t, loff_t *);
- unsigned int (*poll) (struct file *, struct poll_table_struct *);
+ __poll_t (*poll) (struct file *, struct poll_table_struct *);
long (*unlocked_ioctl) (struct file *, unsigned int, unsigned long);
#ifdef CONFIG_COMPAT
long (*compat_ioctl32) (struct file *, unsigned int, unsigned long);
@@ -166,7 +198,7 @@ struct v4l2_file_operations {
/*
* Newer version of video_device, handled by videodev2.c
- * This version moves redundant code from video device code to
+ * This version moves redundant code from video device code to
* the common handler
*/
@@ -189,11 +221,12 @@ struct v4l2_file_operations {
* @prio: pointer to &struct v4l2_prio_state with device's Priority state.
* If NULL, then v4l2_dev->prio will be used.
* @name: video device name
- * @vfl_type: V4L device type
+ * @vfl_type: V4L device type, as defined by &enum vfl_devnode_type
* @vfl_dir: V4L receiver, transmitter or m2m
* @minor: device node 'minor'. It is set to -1 if the registration failed
* @num: number of the video device node
- * @flags: video device flags. Use bitops to set/clear/test flags
+ * @flags: video device flags. Use bitops to set/clear/test flags.
+ * Contains a set of &enum v4l2_video_device_flags.
* @index: attribute to differentiate multiple indices on one physical device
* @fh_lock: Lock for all v4l2_fhs
* @fh_list: List of &struct v4l2_fh
@@ -237,8 +270,8 @@ struct video_device
/* device info */
char name[32];
- int vfl_type;
- int vfl_dir;
+ enum vfl_devnode_type vfl_type;
+ enum vfl_devnode_direction vfl_dir;
int minor;
u16 num;
unsigned long flags;
@@ -261,18 +294,30 @@ struct video_device
struct mutex *lock;
};
-#define media_entity_to_video_device(__e) \
- container_of(__e, struct video_device, entity)
-/* dev to video-device */
+/**
+ * media_entity_to_video_device - Returns a &struct video_device from
+ * the &struct media_entity embedded on it.
+ *
+ * @__entity: pointer to &struct media_entity
+ */
+#define media_entity_to_video_device(__entity) \
+ container_of(__entity, struct video_device, entity)
+
+/**
+ * to_video_device - Returns a &struct video_device from the
+ * &struct device embedded on it.
+ *
+ * @cd: pointer to &struct device
+ */
#define to_video_device(cd) container_of(cd, struct video_device, dev)
/**
* __video_register_device - register video4linux devices
*
* @vdev: struct video_device to register
- * @type: type of device to register
+ * @type: type of device to register, as defined by &enum vfl_devnode_type
* @nr: which device node number is desired:
- * (0 == /dev/video0, 1 == /dev/video1, ..., -1 == first free)
+ * (0 == /dev/video0, 1 == /dev/video1, ..., -1 == first free)
* @warn_if_nr_in_use: warn if the desired device node number
* was already in use and another number was chosen instead.
* @owner: module that owns the video device node
@@ -289,43 +334,37 @@ struct video_device
*
* Returns 0 on success.
*
- * Valid values for @type are:
- *
- * - %VFL_TYPE_GRABBER - A frame grabber
- * - %VFL_TYPE_VBI - Vertical blank data (undecoded)
- * - %VFL_TYPE_RADIO - A radio card
- * - %VFL_TYPE_SUBDEV - A subdevice
- * - %VFL_TYPE_SDR - Software Defined Radio
- * - %VFL_TYPE_TOUCH - A touch sensor
- *
* .. note::
*
* This function is meant to be used only inside the V4L2 core.
* Drivers should use video_register_device() or
* video_register_device_no_warn().
*/
-int __must_check __video_register_device(struct video_device *vdev, int type,
- int nr, int warn_if_nr_in_use, struct module *owner);
+int __must_check __video_register_device(struct video_device *vdev,
+ enum vfl_devnode_type type,
+ int nr, int warn_if_nr_in_use,
+ struct module *owner);
/**
* video_register_device - register video4linux devices
*
* @vdev: struct video_device to register
- * @type: type of device to register
+ * @type: type of device to register, as defined by &enum vfl_devnode_type
* @nr: which device node number is desired:
- * (0 == /dev/video0, 1 == /dev/video1, ..., -1 == first free)
+ * (0 == /dev/video0, 1 == /dev/video1, ..., -1 == first free)
*
* Internally, it calls __video_register_device(). Please see its
* documentation for more details.
*
* .. note::
- * if video_register_device fails, the release() callback of
+ * if video_register_device fails, the release() callback of
* &struct video_device structure is *not* called, so the caller
* is responsible for freeing any data. Usually that means that
* you video_device_release() should be called on failure.
*/
static inline int __must_check video_register_device(struct video_device *vdev,
- int type, int nr)
+ enum vfl_devnode_type type,
+ int nr)
{
return __video_register_device(vdev, type, nr, 1, vdev->fops->owner);
}
@@ -334,9 +373,9 @@ static inline int __must_check video_register_device(struct video_device *vdev,
* video_register_device_no_warn - register video4linux devices
*
* @vdev: struct video_device to register
- * @type: type of device to register
+ * @type: type of device to register, as defined by &enum vfl_devnode_type
* @nr: which device node number is desired:
- * (0 == /dev/video0, 1 == /dev/video1, ..., -1 == first free)
+ * (0 == /dev/video0, 1 == /dev/video1, ..., -1 == first free)
*
* This function is identical to video_register_device() except that no
* warning is issued if the desired device node number was already in use.
@@ -345,13 +384,14 @@ static inline int __must_check video_register_device(struct video_device *vdev,
* documentation for more details.
*
* .. note::
- * if video_register_device fails, the release() callback of
+ * if video_register_device fails, the release() callback of
* &struct video_device structure is *not* called, so the caller
* is responsible for freeing any data. Usually that means that
* you video_device_release() should be called on failure.
*/
-static inline int __must_check video_register_device_no_warn(
- struct video_device *vdev, int type, int nr)
+static inline int __must_check
+video_register_device_no_warn(struct video_device *vdev,
+ enum vfl_devnode_type type, int nr)
{
return __video_register_device(vdev, type, nr, 0, vdev->fops->owner);
}
@@ -383,7 +423,7 @@ void video_device_release(struct video_device *vdev);
/**
* video_device_release_empty - helper function to implement the
- * video_device->release\(\) callback.
+ * video_device->release\(\) callback.
*
* @vdev: pointer to &struct video_device
*
diff --git a/include/media/v4l2-device.h b/include/media/v4l2-device.h
index 8ffa94009d1a..0c9e4da55499 100644
--- a/include/media/v4l2-device.h
+++ b/include/media/v4l2-device.h
@@ -38,7 +38,7 @@ struct v4l2_ctrl_handler;
* @lock: lock this struct; can be used by the driver as well
* if this struct is embedded into a larger struct.
* @name: unique device name, by default the driver name + bus ID
- * @notify: notify callback called by some sub-devices.
+ * @notify: notify operation called by some sub-devices.
* @ctrl_handler: The control handler. May be %NULL.
* @prio: Device's priority state
* @ref: Keep track of the references to this struct.
@@ -56,7 +56,6 @@ struct v4l2_ctrl_handler;
* #) @dev->driver_data points to this struct.
* #) @dev might be %NULL if there is no parent device
*/
-
struct v4l2_device {
struct device *dev;
#if defined(CONFIG_MEDIA_CONTROLLER)
@@ -166,7 +165,7 @@ void v4l2_device_unregister(struct v4l2_device *v4l2_dev);
* v4l2_device_register_subdev - Registers a subdev with a v4l2 device.
*
* @v4l2_dev: pointer to struct &v4l2_device
- * @sd: pointer to struct &v4l2_subdev
+ * @sd: pointer to &struct v4l2_subdev
*
* While registered, the subdev module is marked as in-use.
*
@@ -179,7 +178,7 @@ int __must_check v4l2_device_register_subdev(struct v4l2_device *v4l2_dev,
/**
* v4l2_device_unregister_subdev - Unregisters a subdev with a v4l2 device.
*
- * @sd: pointer to struct &v4l2_subdev
+ * @sd: pointer to &struct v4l2_subdev
*
* .. note ::
*
@@ -201,7 +200,7 @@ v4l2_device_register_subdev_nodes(struct v4l2_device *v4l2_dev);
/**
* v4l2_subdev_notify - Sends a notification to v4l2_device.
*
- * @sd: pointer to struct &v4l2_subdev
+ * @sd: pointer to &struct v4l2_subdev
* @notification: type of notification. Please notice that the notification
* type is driver-specific.
* @arg: arguments for the notification. Those are specific to each
@@ -214,13 +213,43 @@ static inline void v4l2_subdev_notify(struct v4l2_subdev *sd,
sd->v4l2_dev->notify(sd, notification, arg);
}
-/* Iterate over all subdevs. */
+/* Helper macros to iterate over all subdevs. */
+
+/**
+ * v4l2_device_for_each_subdev - Helper macro that interates over all
+ * sub-devices of a given &v4l2_device.
+ *
+ * @sd: pointer that will be filled by the macro with all
+ * &struct v4l2_subdev pointer used as an iterator by the loop.
+ * @v4l2_dev: &struct v4l2_device owning the sub-devices to iterate over.
+ *
+ * This macro iterates over all sub-devices owned by the @v4l2_dev device.
+ * It acts as a for loop iterator and executes the next statement with
+ * the @sd variable pointing to each sub-device in turn.
+ */
#define v4l2_device_for_each_subdev(sd, v4l2_dev) \
list_for_each_entry(sd, &(v4l2_dev)->subdevs, list)
-/* Call the specified callback for all subdevs matching the condition.
- Ignore any errors. Note that you cannot add or delete a subdev
- while walking the subdevs list. */
+/**
+ * __v4l2_device_call_subdevs_p - Calls the specified operation for
+ * all subdevs matching the condition.
+ *
+ * @v4l2_dev: &struct v4l2_device owning the sub-devices to iterate over.
+ * @sd: pointer that will be filled by the macro with all
+ * &struct v4l2_subdev pointer used as an iterator by the loop.
+ * @cond: condition to be match
+ * @o: name of the element at &struct v4l2_subdev_ops that contains @f.
+ * Each element there groups a set of operations functions.
+ * @f: operation function that will be called if @cond matches.
+ * The operation functions are defined in groups, according to
+ * each element at &struct v4l2_subdev_ops.
+ * @args...: arguments for @f.
+ *
+ * Ignore any errors.
+ *
+ * Note: subdevs cannot be added or deleted while walking
+ * the subdevs list.
+ */
#define __v4l2_device_call_subdevs_p(v4l2_dev, sd, cond, o, f, args...) \
do { \
list_for_each_entry((sd), &(v4l2_dev)->subdevs, list) \
@@ -228,6 +257,24 @@ static inline void v4l2_subdev_notify(struct v4l2_subdev *sd,
(sd)->ops->o->f((sd) , ##args); \
} while (0)
+/**
+ * __v4l2_device_call_subdevs - Calls the specified operation for
+ * all subdevs matching the condition.
+ *
+ * @v4l2_dev: &struct v4l2_device owning the sub-devices to iterate over.
+ * @cond: condition to be match
+ * @o: name of the element at &struct v4l2_subdev_ops that contains @f.
+ * Each element there groups a set of operations functions.
+ * @f: operation function that will be called if @cond matches.
+ * The operation functions are defined in groups, according to
+ * each element at &struct v4l2_subdev_ops.
+ * @args...: arguments for @f.
+ *
+ * Ignore any errors.
+ *
+ * Note: subdevs cannot be added or deleted while walking
+ * the subdevs list.
+ */
#define __v4l2_device_call_subdevs(v4l2_dev, cond, o, f, args...) \
do { \
struct v4l2_subdev *__sd; \
@@ -236,10 +283,30 @@ static inline void v4l2_subdev_notify(struct v4l2_subdev *sd,
f , ##args); \
} while (0)
-/* Call the specified callback for all subdevs matching the condition.
- If the callback returns an error other than 0 or -ENOIOCTLCMD, then
- return with that error code. Note that you cannot add or delete a
- subdev while walking the subdevs list. */
+/**
+ * __v4l2_device_call_subdevs_until_err_p - Calls the specified operation for
+ * all subdevs matching the condition.
+ *
+ * @v4l2_dev: &struct v4l2_device owning the sub-devices to iterate over.
+ * @sd: pointer that will be filled by the macro with all
+ * &struct v4l2_subdev sub-devices associated with @v4l2_dev.
+ * @cond: condition to be match
+ * @o: name of the element at &struct v4l2_subdev_ops that contains @f.
+ * Each element there groups a set of operations functions.
+ * @f: operation function that will be called if @cond matches.
+ * The operation functions are defined in groups, according to
+ * each element at &struct v4l2_subdev_ops.
+ * @args...: arguments for @f.
+ *
+ * Return:
+ *
+ * If the operation returns an error other than 0 or ``-ENOIOCTLCMD``
+ * for any subdevice, then abort and return with that error code, zero
+ * otherwise.
+ *
+ * Note: subdevs cannot be added or deleted while walking
+ * the subdevs list.
+ */
#define __v4l2_device_call_subdevs_until_err_p(v4l2_dev, sd, cond, o, f, args...) \
({ \
long __err = 0; \
@@ -253,6 +320,28 @@ static inline void v4l2_subdev_notify(struct v4l2_subdev *sd,
(__err == -ENOIOCTLCMD) ? 0 : __err; \
})
+/**
+ * __v4l2_device_call_subdevs_until_err - Calls the specified operation for
+ * all subdevs matching the condition.
+ *
+ * @v4l2_dev: &struct v4l2_device owning the sub-devices to iterate over.
+ * @cond: condition to be match
+ * @o: name of the element at &struct v4l2_subdev_ops that contains @f.
+ * Each element there groups a set of operations functions.
+ * @f: operation function that will be called if @cond matches.
+ * The operation functions are defined in groups, according to
+ * each element at &struct v4l2_subdev_ops.
+ * @args...: arguments for @f.
+ *
+ * Return:
+ *
+ * If the operation returns an error other than 0 or ``-ENOIOCTLCMD``
+ * for any subdevice, then abort and return with that error code,
+ * zero otherwise.
+ *
+ * Note: subdevs cannot be added or deleted while walking
+ * the subdevs list.
+ */
#define __v4l2_device_call_subdevs_until_err(v4l2_dev, cond, o, f, args...) \
({ \
struct v4l2_subdev *__sd; \
@@ -260,9 +349,26 @@ static inline void v4l2_subdev_notify(struct v4l2_subdev *sd,
f , ##args); \
})
-/* Call the specified callback for all subdevs matching grp_id (if 0, then
- match them all). Ignore any errors. Note that you cannot add or delete
- a subdev while walking the subdevs list. */
+/**
+ * v4l2_device_call_all - Calls the specified operation for
+ * all subdevs matching the &v4l2_subdev.grp_id, as assigned
+ * by the bridge driver.
+ *
+ * @v4l2_dev: &struct v4l2_device owning the sub-devices to iterate over.
+ * @grpid: &struct v4l2_subdev->grp_id group ID to match.
+ * Use 0 to match them all.
+ * @o: name of the element at &struct v4l2_subdev_ops that contains @f.
+ * Each element there groups a set of operations functions.
+ * @f: operation function that will be called if @cond matches.
+ * The operation functions are defined in groups, according to
+ * each element at &struct v4l2_subdev_ops.
+ * @args...: arguments for @f.
+ *
+ * Ignore any errors.
+ *
+ * Note: subdevs cannot be added or deleted while walking
+ * the subdevs list.
+ */
#define v4l2_device_call_all(v4l2_dev, grpid, o, f, args...) \
do { \
struct v4l2_subdev *__sd; \
@@ -272,10 +378,30 @@ static inline void v4l2_subdev_notify(struct v4l2_subdev *sd,
##args); \
} while (0)
-/* Call the specified callback for all subdevs matching grp_id (if 0, then
- match them all). If the callback returns an error other than 0 or
- -ENOIOCTLCMD, then return with that error code. Note that you cannot
- add or delete a subdev while walking the subdevs list. */
+/**
+ * v4l2_device_call_until_err - Calls the specified operation for
+ * all subdevs matching the &v4l2_subdev.grp_id, as assigned
+ * by the bridge driver, until an error occurs.
+ *
+ * @v4l2_dev: &struct v4l2_device owning the sub-devices to iterate over.
+ * @grpid: &struct v4l2_subdev->grp_id group ID to match.
+ * Use 0 to match them all.
+ * @o: name of the element at &struct v4l2_subdev_ops that contains @f.
+ * Each element there groups a set of operations functions.
+ * @f: operation function that will be called if @cond matches.
+ * The operation functions are defined in groups, according to
+ * each element at &struct v4l2_subdev_ops.
+ * @args...: arguments for @f.
+ *
+ * Return:
+ *
+ * If the operation returns an error other than 0 or ``-ENOIOCTLCMD``
+ * for any subdevice, then abort and return with that error code,
+ * zero otherwise.
+ *
+ * Note: subdevs cannot be added or deleted while walking
+ * the subdevs list.
+ */
#define v4l2_device_call_until_err(v4l2_dev, grpid, o, f, args...) \
({ \
struct v4l2_subdev *__sd; \
@@ -284,10 +410,24 @@ static inline void v4l2_subdev_notify(struct v4l2_subdev *sd,
##args); \
})
-/*
- * Call the specified callback for all subdevs where grp_id & grpmsk != 0
- * (if grpmsk == `0, then match them all). Ignore any errors. Note that you
- * cannot add or delete a subdev while walking the subdevs list.
+/**
+ * v4l2_device_mask_call_all - Calls the specified operation for
+ * all subdevices where a group ID matches a specified bitmask.
+ *
+ * @v4l2_dev: &struct v4l2_device owning the sub-devices to iterate over.
+ * @grpmsk: bitmask to be checked against &struct v4l2_subdev->grp_id
+ * group ID to be matched. Use 0 to match them all.
+ * @o: name of the element at &struct v4l2_subdev_ops that contains @f.
+ * Each element there groups a set of operations functions.
+ * @f: operation function that will be called if @cond matches.
+ * The operation functions are defined in groups, according to
+ * each element at &struct v4l2_subdev_ops.
+ * @args...: arguments for @f.
+ *
+ * Ignore any errors.
+ *
+ * Note: subdevs cannot be added or deleted while walking
+ * the subdevs list.
*/
#define v4l2_device_mask_call_all(v4l2_dev, grpmsk, o, f, args...) \
do { \
@@ -298,11 +438,28 @@ static inline void v4l2_subdev_notify(struct v4l2_subdev *sd,
##args); \
} while (0)
-/*
- * Call the specified callback for all subdevs where grp_id & grpmsk != 0
- * (if grpmsk == 0, then match them all). If the callback returns an error
- * other than 0 or %-ENOIOCTLCMD, then return with that error code. Note that
- * you cannot add or delete a subdev while walking the subdevs list.
+/**
+ * v4l2_device_mask_call_until_err - Calls the specified operation for
+ * all subdevices where a group ID matches a specified bitmask.
+ *
+ * @v4l2_dev: &struct v4l2_device owning the sub-devices to iterate over.
+ * @grpmsk: bitmask to be checked against &struct v4l2_subdev->grp_id
+ * group ID to be matched. Use 0 to match them all.
+ * @o: name of the element at &struct v4l2_subdev_ops that contains @f.
+ * Each element there groups a set of operations functions.
+ * @f: operation function that will be called if @cond matches.
+ * The operation functions are defined in groups, according to
+ * each element at &struct v4l2_subdev_ops.
+ * @args...: arguments for @f.
+ *
+ * Return:
+ *
+ * If the operation returns an error other than 0 or ``-ENOIOCTLCMD``
+ * for any subdevice, then abort and return with that error code,
+ * zero otherwise.
+ *
+ * Note: subdevs cannot be added or deleted while walking
+ * the subdevs list.
*/
#define v4l2_device_mask_call_until_err(v4l2_dev, grpmsk, o, f, args...) \
({ \
@@ -312,9 +469,19 @@ static inline void v4l2_subdev_notify(struct v4l2_subdev *sd,
##args); \
})
-/*
- * Does any subdev with matching grpid (or all if grpid == 0) has the given
- * op?
+
+/**
+ * v4l2_device_has_op - checks if any subdev with matching grpid has a
+ * given ops.
+ *
+ * @v4l2_dev: &struct v4l2_device owning the sub-devices to iterate over.
+ * @grpid: &struct v4l2_subdev->grp_id group ID to match.
+ * Use 0 to match them all.
+ * @o: name of the element at &struct v4l2_subdev_ops that contains @f.
+ * Each element there groups a set of operations functions.
+ * @f: operation function that will be called if @cond matches.
+ * The operation functions are defined in groups, according to
+ * each element at &struct v4l2_subdev_ops.
*/
#define v4l2_device_has_op(v4l2_dev, grpid, o, f) \
({ \
@@ -331,9 +498,18 @@ static inline void v4l2_subdev_notify(struct v4l2_subdev *sd,
__result; \
})
-/*
- * Does any subdev with matching grpmsk (or all if grpmsk == 0) has the given
- * op?
+/**
+ * v4l2_device_mask_has_op - checks if any subdev with matching group
+ * mask has a given ops.
+ *
+ * @v4l2_dev: &struct v4l2_device owning the sub-devices to iterate over.
+ * @grpmsk: bitmask to be checked against &struct v4l2_subdev->grp_id
+ * group ID to be matched. Use 0 to match them all.
+ * @o: name of the element at &struct v4l2_subdev_ops that contains @f.
+ * Each element there groups a set of operations functions.
+ * @f: operation function that will be called if @cond matches.
+ * The operation functions are defined in groups, according to
+ * each element at &struct v4l2_subdev_ops.
*/
#define v4l2_device_mask_has_op(v4l2_dev, grpmsk, o, f) \
({ \
diff --git a/include/media/v4l2-dv-timings.h b/include/media/v4l2-dv-timings.h
index 61a18893e004..17cb27df1b81 100644
--- a/include/media/v4l2-dv-timings.h
+++ b/include/media/v4l2-dv-timings.h
@@ -1,21 +1,8 @@
+/* SPDX-License-Identifier: GPL-2.0-only */
/*
* v4l2-dv-timings - Internal header with dv-timings helper functions
*
* Copyright 2013 Cisco Systems, Inc. and/or its affiliates. All rights reserved.
- *
- * This program is free software; you may redistribute it and/or modify
- * it under the terms of the GNU General Public License as published by
- * the Free Software Foundation; version 2 of the License.
- *
- * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND,
- * EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
- * MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND
- * NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS
- * BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN
- * ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN
- * CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
- * SOFTWARE.
- *
*/
#ifndef __V4L2_DV_TIMINGS_H
@@ -203,13 +190,15 @@ struct v4l2_fract v4l2_calc_aspect_ratio(u8 hor_landscape, u8 vert_portrait);
*/
struct v4l2_fract v4l2_dv_timings_aspect_ratio(const struct v4l2_dv_timings *t);
-/*
- * reduce_fps - check if conditions for reduced fps are true.
- * bt - v4l2 timing structure
- * For different timings reduced fps is allowed if following conditions
- * are met -
- * For CVT timings: if reduced blanking v2 (vsync == 8) is true.
- * For CEA861 timings: if V4L2_DV_FL_CAN_REDUCE_FPS flag is true.
+/**
+ * can_reduce_fps - check if conditions for reduced fps are true.
+ * @bt: v4l2 timing structure
+ *
+ * For different timings reduced fps is allowed if the following conditions
+ * are met:
+ *
+ * - For CVT timings: if reduced blanking v2 (vsync == 8) is true.
+ * - For CEA861 timings: if %V4L2_DV_FL_CAN_REDUCE_FPS flag is true.
*/
static inline bool can_reduce_fps(struct v4l2_bt_timings *bt)
{
@@ -223,5 +212,26 @@ static inline bool can_reduce_fps(struct v4l2_bt_timings *bt)
return false;
}
+/**
+ * struct v4l2_hdmi_rx_colorimetry - describes the HDMI colorimetry information
+ * @colorspace: enum v4l2_colorspace, the colorspace
+ * @ycbcr_enc: enum v4l2_ycbcr_encoding, Y'CbCr encoding
+ * @quantization: enum v4l2_quantization, colorspace quantization
+ * @xfer_func: enum v4l2_xfer_func, colorspace transfer function
+ */
+struct v4l2_hdmi_colorimetry {
+ enum v4l2_colorspace colorspace;
+ enum v4l2_ycbcr_encoding ycbcr_enc;
+ enum v4l2_quantization quantization;
+ enum v4l2_xfer_func xfer_func;
+};
+
+struct hdmi_avi_infoframe;
+struct hdmi_vendor_infoframe;
+
+struct v4l2_hdmi_colorimetry
+v4l2_hdmi_rx_colorimetry(const struct hdmi_avi_infoframe *avi,
+ const struct hdmi_vendor_infoframe *hdmi,
+ unsigned int height);
#endif
diff --git a/include/media/v4l2-event.h b/include/media/v4l2-event.h
index 6741910c3a18..17833e886e11 100644
--- a/include/media/v4l2-event.h
+++ b/include/media/v4l2-event.h
@@ -24,40 +24,6 @@
#include <linux/videodev2.h>
#include <linux/wait.h>
-/*
- * Overview:
- *
- * Events are subscribed per-filehandle. An event specification consists of a
- * type and is optionally associated with an object identified through the
- * 'id' field. So an event is uniquely identified by the (type, id) tuple.
- *
- * The v4l2-fh struct has a list of subscribed events. The v4l2_subscribed_event
- * struct is added to that list, one for every subscribed event.
- *
- * Each v4l2_subscribed_event struct ends with an array of v4l2_kevent structs.
- * This array (ringbuffer, really) is used to store any events raised by the
- * driver. The v4l2_kevent struct links into the 'available' list of the
- * v4l2_fh struct so VIDIOC_DQEVENT will know which event to dequeue first.
- *
- * Finally, if the event subscription is associated with a particular object
- * such as a V4L2 control, then that object needs to know about that as well
- * so that an event can be raised by that object. So the 'node' field can
- * be used to link the v4l2_subscribed_event struct into a list of that
- * object.
- *
- * So to summarize:
- *
- * struct v4l2_fh has two lists: one of the subscribed events, and one of the
- * pending events.
- *
- * struct v4l2_subscribed_event has a ringbuffer of raised (pending) events of
- * that particular type.
- *
- * If struct v4l2_subscribed_event is associated with a specific object, then
- * that object will have an internal list of struct v4l2_subscribed_event so
- * it knows who subscribed an event to that object.
- */
-
struct v4l2_fh;
struct v4l2_subdev;
struct v4l2_subscribed_event;
@@ -218,7 +184,7 @@ int v4l2_event_subdev_unsubscribe(struct v4l2_subdev *sd,
struct v4l2_event_subscription *sub);
/**
* v4l2_src_change_event_subscribe - helper function that calls
- * v4l2_event_subscribe() if the event is %V4L2_EVENT_SOURCE_CHANGE.
+ * v4l2_event_subscribe() if the event is %V4L2_EVENT_SOURCE_CHANGE.
*
* @fh: pointer to struct v4l2_fh
* @sub: pointer to &struct v4l2_event_subscription
diff --git a/include/media/v4l2-fh.h b/include/media/v4l2-fh.h
index 62633e7d2630..ea73fef8bdc0 100644
--- a/include/media/v4l2-fh.h
+++ b/include/media/v4l2-fh.h
@@ -22,6 +22,7 @@
#define V4L2_FH_H
#include <linux/fs.h>
+#include <linux/kconfig.h>
#include <linux/list.h>
#include <linux/videodev2.h>
diff --git a/include/media/v4l2-flash-led-class.h b/include/media/v4l2-flash-led-class.h
index 5c1d50f78e12..0a5e4518ca11 100644
--- a/include/media/v4l2-flash-led-class.h
+++ b/include/media/v4l2-flash-led-class.h
@@ -91,12 +91,24 @@ struct v4l2_flash {
struct v4l2_ctrl **ctrls;
};
+/**
+ * v4l2_subdev_to_v4l2_flash - Returns a &struct v4l2_flash from the
+ * &struct v4l2_subdev embedded on it.
+ *
+ * @sd: pointer to &struct v4l2_subdev
+ */
static inline struct v4l2_flash *v4l2_subdev_to_v4l2_flash(
struct v4l2_subdev *sd)
{
return container_of(sd, struct v4l2_flash, sd);
}
+/**
+ * v4l2_ctrl_to_v4l2_flash - Returns a &struct v4l2_flash from the
+ * &struct v4l2_ctrl embedded on it.
+ *
+ * @c: pointer to &struct v4l2_ctrl
+ */
static inline struct v4l2_flash *v4l2_ctrl_to_v4l2_flash(struct v4l2_ctrl *c)
{
return container_of(c->handler, struct v4l2_flash, hdl);
diff --git a/include/media/v4l2-fwnode.h b/include/media/v4l2-fwnode.h
index b5b465677d28..c228ec1c77cf 100644
--- a/include/media/v4l2-fwnode.h
+++ b/include/media/v4l2-fwnode.h
@@ -81,7 +81,17 @@ struct v4l2_fwnode_bus_mipi_csi1 {
* struct v4l2_fwnode_endpoint - the endpoint data structure
* @base: fwnode endpoint of the v4l2_fwnode
* @bus_type: bus type
- * @bus: bus configuration data structure
+ * @bus: union with bus configuration data structure
+ * @bus.parallel: embedded &struct v4l2_fwnode_bus_parallel.
+ * Used if the bus is parallel.
+ * @bus.mipi_csi1: embedded &struct v4l2_fwnode_bus_mipi_csi1.
+ * Used if the bus is MIPI Alliance's Camera Serial
+ * Interface version 1 (MIPI CSI1) or Standard
+ * Mobile Imaging Architecture's Compact Camera Port 2
+ * (SMIA CCP2).
+ * @bus.mipi_csi2: embedded &struct v4l2_fwnode_bus_mipi_csi2.
+ * Used if the bus is MIPI Alliance's Camera Serial
+ * Interface version 2 (MIPI CSI2).
* @link_frequencies: array of supported link frequencies
* @nr_of_link_frequencies: number of elements in link_frequenccies array
*/
diff --git a/include/media/v4l2-mediabus.h b/include/media/v4l2-mediabus.h
index 93f8afcb7a22..4d8626c468bc 100644
--- a/include/media/v4l2-mediabus.h
+++ b/include/media/v4l2-mediabus.h
@@ -12,6 +12,8 @@
#define V4L2_MEDIABUS_H
#include <linux/v4l2-mediabus.h>
+#include <linux/bitops.h>
+
/* Parallel flags */
/*
@@ -20,44 +22,44 @@
* horizontal and vertical synchronisation. In "Slave mode" the host is
* providing these signals to the slave.
*/
-#define V4L2_MBUS_MASTER (1 << 0)
-#define V4L2_MBUS_SLAVE (1 << 1)
+#define V4L2_MBUS_MASTER BIT(0)
+#define V4L2_MBUS_SLAVE BIT(1)
/*
* Signal polarity flags
* Note: in BT.656 mode HSYNC, FIELD, and VSYNC are unused
* V4L2_MBUS_[HV]SYNC* flags should be also used for specifying
* configuration of hardware that uses [HV]REF signals
*/
-#define V4L2_MBUS_HSYNC_ACTIVE_HIGH (1 << 2)
-#define V4L2_MBUS_HSYNC_ACTIVE_LOW (1 << 3)
-#define V4L2_MBUS_VSYNC_ACTIVE_HIGH (1 << 4)
-#define V4L2_MBUS_VSYNC_ACTIVE_LOW (1 << 5)
-#define V4L2_MBUS_PCLK_SAMPLE_RISING (1 << 6)
-#define V4L2_MBUS_PCLK_SAMPLE_FALLING (1 << 7)
-#define V4L2_MBUS_DATA_ACTIVE_HIGH (1 << 8)
-#define V4L2_MBUS_DATA_ACTIVE_LOW (1 << 9)
+#define V4L2_MBUS_HSYNC_ACTIVE_HIGH BIT(2)
+#define V4L2_MBUS_HSYNC_ACTIVE_LOW BIT(3)
+#define V4L2_MBUS_VSYNC_ACTIVE_HIGH BIT(4)
+#define V4L2_MBUS_VSYNC_ACTIVE_LOW BIT(5)
+#define V4L2_MBUS_PCLK_SAMPLE_RISING BIT(6)
+#define V4L2_MBUS_PCLK_SAMPLE_FALLING BIT(7)
+#define V4L2_MBUS_DATA_ACTIVE_HIGH BIT(8)
+#define V4L2_MBUS_DATA_ACTIVE_LOW BIT(9)
/* FIELD = 0/1 - Field1 (odd)/Field2 (even) */
-#define V4L2_MBUS_FIELD_EVEN_HIGH (1 << 10)
+#define V4L2_MBUS_FIELD_EVEN_HIGH BIT(10)
/* FIELD = 1/0 - Field1 (odd)/Field2 (even) */
-#define V4L2_MBUS_FIELD_EVEN_LOW (1 << 11)
+#define V4L2_MBUS_FIELD_EVEN_LOW BIT(11)
/* Active state of Sync-on-green (SoG) signal, 0/1 for LOW/HIGH respectively. */
-#define V4L2_MBUS_VIDEO_SOG_ACTIVE_HIGH (1 << 12)
-#define V4L2_MBUS_VIDEO_SOG_ACTIVE_LOW (1 << 13)
+#define V4L2_MBUS_VIDEO_SOG_ACTIVE_HIGH BIT(12)
+#define V4L2_MBUS_VIDEO_SOG_ACTIVE_LOW BIT(13)
/* Serial flags */
/* How many lanes the client can use */
-#define V4L2_MBUS_CSI2_1_LANE (1 << 0)
-#define V4L2_MBUS_CSI2_2_LANE (1 << 1)
-#define V4L2_MBUS_CSI2_3_LANE (1 << 2)
-#define V4L2_MBUS_CSI2_4_LANE (1 << 3)
+#define V4L2_MBUS_CSI2_1_LANE BIT(0)
+#define V4L2_MBUS_CSI2_2_LANE BIT(1)
+#define V4L2_MBUS_CSI2_3_LANE BIT(2)
+#define V4L2_MBUS_CSI2_4_LANE BIT(3)
/* On which channels it can send video data */
-#define V4L2_MBUS_CSI2_CHANNEL_0 (1 << 4)
-#define V4L2_MBUS_CSI2_CHANNEL_1 (1 << 5)
-#define V4L2_MBUS_CSI2_CHANNEL_2 (1 << 6)
-#define V4L2_MBUS_CSI2_CHANNEL_3 (1 << 7)
+#define V4L2_MBUS_CSI2_CHANNEL_0 BIT(4)
+#define V4L2_MBUS_CSI2_CHANNEL_1 BIT(5)
+#define V4L2_MBUS_CSI2_CHANNEL_2 BIT(6)
+#define V4L2_MBUS_CSI2_CHANNEL_3 BIT(7)
/* Does it support only continuous or also non-continuous clock mode */
-#define V4L2_MBUS_CSI2_CONTINUOUS_CLOCK (1 << 8)
-#define V4L2_MBUS_CSI2_NONCONTINUOUS_CLOCK (1 << 9)
+#define V4L2_MBUS_CSI2_CONTINUOUS_CLOCK BIT(8)
+#define V4L2_MBUS_CSI2_NONCONTINUOUS_CLOCK BIT(9)
#define V4L2_MBUS_CSI2_LANES (V4L2_MBUS_CSI2_1_LANE | V4L2_MBUS_CSI2_2_LANE | \
V4L2_MBUS_CSI2_3_LANE | V4L2_MBUS_CSI2_4_LANE)
@@ -91,6 +93,13 @@ struct v4l2_mbus_config {
unsigned int flags;
};
+/**
+ * v4l2_fill_pix_format - Ancillary routine that fills a &struct
+ * v4l2_pix_format fields from a &struct v4l2_mbus_framefmt.
+ *
+ * @pix_fmt: pointer to &struct v4l2_pix_format to be filled
+ * @mbus_fmt: pointer to &struct v4l2_mbus_framefmt to be used as model
+ */
static inline void v4l2_fill_pix_format(struct v4l2_pix_format *pix_fmt,
const struct v4l2_mbus_framefmt *mbus_fmt)
{
@@ -103,6 +112,15 @@ static inline void v4l2_fill_pix_format(struct v4l2_pix_format *pix_fmt,
pix_fmt->xfer_func = mbus_fmt->xfer_func;
}
+/**
+ * v4l2_fill_pix_format - Ancillary routine that fills a &struct
+ * v4l2_mbus_framefmt from a &struct v4l2_pix_format and a
+ * data format code.
+ *
+ * @mbus_fmt: pointer to &struct v4l2_mbus_framefmt to be filled
+ * @pix_fmt: pointer to &struct v4l2_pix_format to be used as model
+ * @code: data format code (from &enum v4l2_mbus_pixelcode)
+ */
static inline void v4l2_fill_mbus_format(struct v4l2_mbus_framefmt *mbus_fmt,
const struct v4l2_pix_format *pix_fmt,
u32 code)
@@ -117,6 +135,13 @@ static inline void v4l2_fill_mbus_format(struct v4l2_mbus_framefmt *mbus_fmt,
mbus_fmt->code = code;
}
+/**
+ * v4l2_fill_pix_format - Ancillary routine that fills a &struct
+ * v4l2_pix_format_mplane fields from a media bus structure.
+ *
+ * @pix_mp_fmt: pointer to &struct v4l2_pix_format_mplane to be filled
+ * @mbus_fmt: pointer to &struct v4l2_mbus_framefmt to be used as model
+ */
static inline void v4l2_fill_pix_format_mplane(
struct v4l2_pix_format_mplane *pix_mp_fmt,
const struct v4l2_mbus_framefmt *mbus_fmt)
@@ -130,6 +155,13 @@ static inline void v4l2_fill_pix_format_mplane(
pix_mp_fmt->xfer_func = mbus_fmt->xfer_func;
}
+/**
+ * v4l2_fill_pix_format - Ancillary routine that fills a &struct
+ * v4l2_mbus_framefmt from a &struct v4l2_pix_format_mplane.
+ *
+ * @mbus_fmt: pointer to &struct v4l2_mbus_framefmt to be filled
+ * @pix_mp_fmt: pointer to &struct v4l2_pix_format_mplane to be used as model
+ */
static inline void v4l2_fill_mbus_format_mplane(
struct v4l2_mbus_framefmt *mbus_fmt,
const struct v4l2_pix_format_mplane *pix_mp_fmt)
diff --git a/include/media/v4l2-mem2mem.h b/include/media/v4l2-mem2mem.h
index e157d5c9b224..3d07ba3a8262 100644
--- a/include/media/v4l2-mem2mem.h
+++ b/include/media/v4l2-mem2mem.h
@@ -297,7 +297,7 @@ int v4l2_m2m_streamoff(struct file *file, struct v4l2_m2m_ctx *m2m_ctx,
* indicate that a non-blocking write can be performed, while read will be
* returned in case of the destination queue.
*/
-unsigned int v4l2_m2m_poll(struct file *file, struct v4l2_m2m_ctx *m2m_ctx,
+__poll_t v4l2_m2m_poll(struct file *file, struct v4l2_m2m_ctx *m2m_ctx,
struct poll_table_struct *wait);
/**
@@ -601,7 +601,7 @@ int v4l2_m2m_ioctl_streamon(struct file *file, void *fh,
int v4l2_m2m_ioctl_streamoff(struct file *file, void *fh,
enum v4l2_buf_type type);
int v4l2_m2m_fop_mmap(struct file *file, struct vm_area_struct *vma);
-unsigned int v4l2_m2m_fop_poll(struct file *file, poll_table *wait);
+__poll_t v4l2_m2m_fop_poll(struct file *file, poll_table *wait);
#endif /* _MEDIA_V4L2_MEM2MEM_H */
diff --git a/include/media/v4l2-rect.h b/include/media/v4l2-rect.h
index d2125f0cc7cd..595c3ba05f23 100644
--- a/include/media/v4l2-rect.h
+++ b/include/media/v4l2-rect.h
@@ -1,20 +1,8 @@
+/* SPDX-License-Identifier: GPL-2.0-only */
/*
* v4l2-rect.h - v4l2_rect helper functions
*
* Copyright 2014 Cisco Systems, Inc. and/or its affiliates. All rights reserved.
- *
- * This program is free software; you may redistribute it and/or modify
- * it under the terms of the GNU General Public License as published by
- * the Free Software Foundation; version 2 of the License.
- *
- * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND,
- * EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
- * MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND
- * NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS
- * BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN
- * ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN
- * CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
- * SOFTWARE.
*/
#ifndef _V4L2_RECT_H_
diff --git a/include/media/v4l2-subdev.h b/include/media/v4l2-subdev.h
index ec399c770301..9102d6ca566e 100644
--- a/include/media/v4l2-subdev.h
+++ b/include/media/v4l2-subdev.h
@@ -108,22 +108,31 @@ struct v4l2_decode_vbi_line {
* not yet implemented) since ops provide proper type-checking.
*/
-/* Subdevice external IO pin configuration */
-#define V4L2_SUBDEV_IO_PIN_DISABLE (1 << 0) /* ENABLE assumed */
-#define V4L2_SUBDEV_IO_PIN_OUTPUT (1 << 1)
-#define V4L2_SUBDEV_IO_PIN_INPUT (1 << 2)
-#define V4L2_SUBDEV_IO_PIN_SET_VALUE (1 << 3) /* Set output value */
-#define V4L2_SUBDEV_IO_PIN_ACTIVE_LOW (1 << 4) /* ACTIVE HIGH assumed */
+/**
+ * enum v4l2_subdev_io_pin_bits - Subdevice external IO pin configuration
+ * bits
+ *
+ * @V4L2_SUBDEV_IO_PIN_DISABLE: disables a pin config. ENABLE assumed.
+ * @V4L2_SUBDEV_IO_PIN_OUTPUT: set it if pin is an output.
+ * @V4L2_SUBDEV_IO_PIN_INPUT: set it if pin is an input.
+ * @V4L2_SUBDEV_IO_PIN_SET_VALUE: to set the output value via
+ * &struct v4l2_subdev_io_pin_config->value.
+ * @V4L2_SUBDEV_IO_PIN_ACTIVE_LOW: pin active is bit 0.
+ * Otherwise, ACTIVE HIGH is assumed.
+ */
+enum v4l2_subdev_io_pin_bits {
+ V4L2_SUBDEV_IO_PIN_DISABLE = 0,
+ V4L2_SUBDEV_IO_PIN_OUTPUT = 1,
+ V4L2_SUBDEV_IO_PIN_INPUT = 2,
+ V4L2_SUBDEV_IO_PIN_SET_VALUE = 3,
+ V4L2_SUBDEV_IO_PIN_ACTIVE_LOW = 4,
+};
/**
* struct v4l2_subdev_io_pin_config - Subdevice external IO pin configuration
*
- * @flags: bitmask with flags for this pin's config:
- * %V4L2_SUBDEV_IO_PIN_DISABLE - disables a pin config,
- * %V4L2_SUBDEV_IO_PIN_OUTPUT - if pin is an output,
- * %V4L2_SUBDEV_IO_PIN_INPUT - if pin is an input,
- * %V4L2_SUBDEV_IO_PIN_SET_VALUE - to set the output value via @value
- * and %V4L2_SUBDEV_IO_PIN_ACTIVE_LOW - if active is 0.
+ * @flags: bitmask with flags for this pin's config, whose bits are defined by
+ * &enum v4l2_subdev_io_pin_bits.
* @pin: Chip external IO pin to configure
* @function: Internal signal pad/function to route to IO pin
* @value: Initial value for pin - e.g. GPIO output value
@@ -140,7 +149,7 @@ struct v4l2_subdev_io_pin_config {
/**
* struct v4l2_subdev_core_ops - Define core ops callbacks for subdevs
*
- * @log_status: callback for %VIDIOC_LOG_STATUS ioctl handler code.
+ * @log_status: callback for VIDIOC_LOG_STATUS() ioctl handler code.
*
* @s_io_pin_config: configure one or more chip I/O pins for chips that
* multiplex different internal signal pads out to IO pins. This function
@@ -168,9 +177,9 @@ struct v4l2_subdev_io_pin_config {
* @compat_ioctl32: called when a 32 bits application uses a 64 bits Kernel,
* in order to fix data passed from/to userspace.
*
- * @g_register: callback for %VIDIOC_G_REGISTER ioctl handler code.
+ * @g_register: callback for VIDIOC_DBG_G_REGISTER() ioctl handler code.
*
- * @s_register: callback for %VIDIOC_G_REGISTER ioctl handler code.
+ * @s_register: callback for VIDIOC_DBG_S_REGISTER() ioctl handler code.
*
* @s_power: puts subdevice in power saving mode (on == 0) or normal operation
* mode (on == 1).
@@ -215,31 +224,54 @@ struct v4l2_subdev_core_ops {
* struct v4l2_subdev_tuner_ops - Callbacks used when v4l device was opened
* in radio mode.
*
- * @s_radio: callback for %VIDIOC_S_RADIO ioctl handler code.
+ * @standby: puts the tuner in standby mode. It will be woken up
+ * automatically the next time it is used.
+ *
+ * @s_radio: callback that switches the tuner to radio mode.
+ * drivers should explicitly call it when a tuner ops should
+ * operate on radio mode, before being able to handle it.
+ * Used on devices that have both AM/FM radio receiver and TV.
*
- * @s_frequency: callback for %VIDIOC_S_FREQUENCY ioctl handler code.
+ * @s_frequency: callback for VIDIOC_S_FREQUENCY() ioctl handler code.
*
- * @g_frequency: callback for %VIDIOC_G_FREQUENCY ioctl handler code.
+ * @g_frequency: callback for VIDIOC_G_FREQUENCY() ioctl handler code.
* freq->type must be filled in. Normally done by video_ioctl2()
* or the bridge driver.
*
- * @enum_freq_bands: callback for %VIDIOC_ENUM_FREQ_BANDS ioctl handler code.
+ * @enum_freq_bands: callback for VIDIOC_ENUM_FREQ_BANDS() ioctl handler code.
*
- * @g_tuner: callback for %VIDIOC_G_TUNER ioctl handler code.
+ * @g_tuner: callback for VIDIOC_G_TUNER() ioctl handler code.
*
- * @s_tuner: callback for %VIDIOC_S_TUNER ioctl handler code. @vt->type must be
+ * @s_tuner: callback for VIDIOC_S_TUNER() ioctl handler code. @vt->type must be
* filled in. Normally done by video_ioctl2 or the
* bridge driver.
*
- * @g_modulator: callback for %VIDIOC_G_MODULATOR ioctl handler code.
+ * @g_modulator: callback for VIDIOC_G_MODULATOR() ioctl handler code.
*
- * @s_modulator: callback for %VIDIOC_S_MODULATOR ioctl handler code.
+ * @s_modulator: callback for VIDIOC_S_MODULATOR() ioctl handler code.
*
* @s_type_addr: sets tuner type and its I2C addr.
*
* @s_config: sets tda9887 specific stuff, like port1, port2 and qss
+ *
+ * .. note::
+ *
+ * On devices that have both AM/FM and TV, it is up to the driver
+ * to explicitly call s_radio when the tuner should be switched to
+ * radio mode, before handling other &struct v4l2_subdev_tuner_ops
+ * that would require it. An example of such usage is::
+ *
+ * static void s_frequency(void *priv, const struct v4l2_frequency *f)
+ * {
+ * ...
+ * if (f.type == V4L2_TUNER_RADIO)
+ * v4l2_device_call_all(v4l2_dev, 0, tuner, s_radio);
+ * ...
+ * v4l2_device_call_all(v4l2_dev, 0, tuner, s_frequency);
+ * }
*/
struct v4l2_subdev_tuner_ops {
+ int (*standby)(struct v4l2_subdev *sd);
int (*s_radio)(struct v4l2_subdev *sd);
int (*s_frequency)(struct v4l2_subdev *sd, const struct v4l2_frequency *freq);
int (*g_frequency)(struct v4l2_subdev *sd, struct v4l2_frequency *freq);
@@ -285,25 +317,32 @@ struct v4l2_subdev_audio_ops {
int (*s_stream)(struct v4l2_subdev *sd, int enable);
};
-/* Indicates the @length field specifies maximum data length. */
-#define V4L2_MBUS_FRAME_DESC_FL_LEN_MAX (1U << 0)
-/*
- * Indicates that the format does not have line offsets, i.e. the
- * receiver should use 1D DMA.
+/**
+ * enum v4l2_mbus_frame_desc_entry - media bus frame description flags
+ *
+ * @V4L2_MBUS_FRAME_DESC_FL_LEN_MAX:
+ * Indicates that &struct v4l2_mbus_frame_desc_entry->length field
+ * specifies maximum data length.
+ * @V4L2_MBUS_FRAME_DESC_FL_BLOB:
+ * Indicates that the format does not have line offsets, i.e.
+ * the receiver should use 1D DMA.
*/
-#define V4L2_MBUS_FRAME_DESC_FL_BLOB (1U << 1)
+enum v4l2_mbus_frame_desc_flags {
+ V4L2_MBUS_FRAME_DESC_FL_LEN_MAX = BIT(0),
+ V4L2_MBUS_FRAME_DESC_FL_BLOB = BIT(1),
+};
/**
* struct v4l2_mbus_frame_desc_entry - media bus frame description structure
*
- * @flags: bitmask flags: %V4L2_MBUS_FRAME_DESC_FL_LEN_MAX and
- * %V4L2_MBUS_FRAME_DESC_FL_BLOB.
- * @pixelcode: media bus pixel code, valid if FRAME_DESC_FL_BLOB is not set
- * @length: number of octets per frame, valid if V4L2_MBUS_FRAME_DESC_FL_BLOB
- * is set
+ * @flags: bitmask flags, as defined by &enum v4l2_mbus_frame_desc_flags.
+ * @pixelcode: media bus pixel code, valid if @flags
+ * %FRAME_DESC_FL_BLOB is not set.
+ * @length: number of octets per frame, valid if @flags
+ * %V4L2_MBUS_FRAME_DESC_FL_LEN_MAX is set.
*/
struct v4l2_mbus_frame_desc_entry {
- u16 flags;
+ enum v4l2_mbus_frame_desc_flags flags;
u32 pixelcode;
u32 length;
};
@@ -332,9 +371,9 @@ struct v4l2_mbus_frame_desc {
* regarding clock frequency dividers, etc. If not used, then set flags
* to 0. If the frequency is not supported, then -EINVAL is returned.
*
- * @g_std: callback for %VIDIOC_G_STD ioctl handler code.
+ * @g_std: callback for VIDIOC_G_STD() ioctl handler code.
*
- * @s_std: callback for %VIDIOC_S_STD ioctl handler code.
+ * @s_std: callback for VIDIOC_S_STD() ioctl handler code.
*
* @s_std_output: set v4l2_std_id for video OUTPUT devices. This is ignored by
* video input devices.
@@ -342,7 +381,7 @@ struct v4l2_mbus_frame_desc {
* @g_std_output: get current standard for video OUTPUT devices. This is ignored
* by video input devices.
*
- * @querystd: callback for %VIDIOC_QUERYSTD ioctl handler code.
+ * @querystd: callback for VIDIOC_QUERYSTD() ioctl handler code.
*
* @g_tvnorms: get &v4l2_std_id with all standards supported by the video
* CAPTURE device. This is ignored by video output devices.
@@ -358,13 +397,11 @@ struct v4l2_mbus_frame_desc {
*
* @g_pixelaspect: callback to return the pixelaspect ratio.
*
- * @g_parm: callback for %VIDIOC_G_PARM ioctl handler code.
+ * @g_frame_interval: callback for VIDIOC_SUBDEV_G_FRAME_INTERVAL()
+ * ioctl handler code.
*
- * @s_parm: callback for %VIDIOC_S_PARM ioctl handler code.
- *
- * @g_frame_interval: callback for %VIDIOC_G_FRAMEINTERVAL ioctl handler code.
- *
- * @s_frame_interval: callback for %VIDIOC_S_FRAMEINTERVAL ioctl handler code.
+ * @s_frame_interval: callback for VIDIOC_SUBDEV_S_FRAME_INTERVAL()
+ * ioctl handler code.
*
* @s_dv_timings: Set custom dv timings in the sub device. This is used
* when sub device is capable of setting detailed timing information
@@ -372,7 +409,7 @@ struct v4l2_mbus_frame_desc {
*
* @g_dv_timings: Get custom dv timings in the sub device.
*
- * @query_dv_timings: callback for %VIDIOC_QUERY_DV_TIMINGS ioctl handler code.
+ * @query_dv_timings: callback for VIDIOC_QUERY_DV_TIMINGS() ioctl handler code.
*
* @g_mbus_config: get supported mediabus configurations
*
@@ -397,8 +434,6 @@ struct v4l2_subdev_video_ops {
int (*g_input_status)(struct v4l2_subdev *sd, u32 *status);
int (*s_stream)(struct v4l2_subdev *sd, int enable);
int (*g_pixelaspect)(struct v4l2_subdev *sd, struct v4l2_fract *aspect);
- int (*g_parm)(struct v4l2_subdev *sd, struct v4l2_streamparm *param);
- int (*s_parm)(struct v4l2_subdev *sd, struct v4l2_streamparm *param);
int (*g_frame_interval)(struct v4l2_subdev *sd,
struct v4l2_subdev_frame_interval *interval);
int (*s_frame_interval)(struct v4l2_subdev *sd,
@@ -443,7 +478,8 @@ struct v4l2_subdev_video_ops {
* member (to determine whether CC data from the first or second field
* should be obtained).
*
- * @g_sliced_vbi_cap: callback for %VIDIOC_SLICED_VBI_CAP ioctl handler code.
+ * @g_sliced_vbi_cap: callback for VIDIOC_G_SLICED_VBI_CAP() ioctl handler
+ * code.
*
* @s_raw_fmt: setup the video encoder/decoder for raw VBI.
*
@@ -610,30 +646,30 @@ struct v4l2_subdev_pad_config {
* struct v4l2_subdev_pad_ops - v4l2-subdev pad level operations
*
* @init_cfg: initialize the pad config to default values
- * @enum_mbus_code: callback for %VIDIOC_SUBDEV_ENUM_MBUS_CODE ioctl handler
+ * @enum_mbus_code: callback for VIDIOC_SUBDEV_ENUM_MBUS_CODE() ioctl handler
* code.
- * @enum_frame_size: callback for %VIDIOC_SUBDEV_ENUM_FRAME_SIZE ioctl handler
+ * @enum_frame_size: callback for VIDIOC_SUBDEV_ENUM_FRAME_SIZE() ioctl handler
* code.
*
- * @enum_frame_interval: callback for %VIDIOC_SUBDEV_ENUM_FRAME_INTERVAL ioctl
+ * @enum_frame_interval: callback for VIDIOC_SUBDEV_ENUM_FRAME_INTERVAL() ioctl
* handler code.
*
- * @get_fmt: callback for %VIDIOC_SUBDEV_G_FMT ioctl handler code.
+ * @get_fmt: callback for VIDIOC_SUBDEV_G_FMT() ioctl handler code.
*
- * @set_fmt: callback for %VIDIOC_SUBDEV_S_FMT ioctl handler code.
+ * @set_fmt: callback for VIDIOC_SUBDEV_S_FMT() ioctl handler code.
*
- * @get_selection: callback for %VIDIOC_SUBDEV_G_SELECTION ioctl handler code.
+ * @get_selection: callback for VIDIOC_SUBDEV_G_SELECTION() ioctl handler code.
*
- * @set_selection: callback for %VIDIOC_SUBDEV_S_SELECTION ioctl handler code.
+ * @set_selection: callback for VIDIOC_SUBDEV_S_SELECTION() ioctl handler code.
*
- * @get_edid: callback for %VIDIOC_SUBDEV_G_EDID ioctl handler code.
+ * @get_edid: callback for VIDIOC_SUBDEV_G_EDID() ioctl handler code.
*
- * @set_edid: callback for %VIDIOC_SUBDEV_S_EDID ioctl handler code.
+ * @set_edid: callback for VIDIOC_SUBDEV_S_EDID() ioctl handler code.
*
- * @dv_timings_cap: callback for %VIDIOC_SUBDEV_DV_TIMINGS_CAP ioctl handler
+ * @dv_timings_cap: callback for VIDIOC_SUBDEV_DV_TIMINGS_CAP() ioctl handler
* code.
*
- * @enum_dv_timings: callback for %VIDIOC_SUBDEV_ENUM_DV_TIMINGS ioctl handler
+ * @enum_dv_timings: callback for VIDIOC_SUBDEV_ENUM_DV_TIMINGS() ioctl handler
* code.
*
* @link_validate: used by the media controller code to check if the links
@@ -766,7 +802,7 @@ struct v4l2_subdev_platform_data {
* @list: List of sub-devices
* @owner: The owner is the same as the driver's &struct device owner.
* @owner_v4l2_dev: true if the &sd->owner matches the owner of @v4l2_dev->dev
- * ownner. Initialized by v4l2_device_register_subdev().
+ * owner. Initialized by v4l2_device_register_subdev().
* @flags: subdev flags. Can be:
* %V4L2_SUBDEV_FL_IS_I2C - Set this flag if this subdev is a i2c device;
* %V4L2_SUBDEV_FL_IS_SPI - Set this flag if this subdev is a spi device;
@@ -829,6 +865,13 @@ struct v4l2_subdev {
struct v4l2_subdev_platform_data *pdata;
};
+
+/**
+ * media_entity_to_v4l2_subdev - Returns a &struct v4l2_subdev from
+ * the &struct media_entity embedded in it.
+ *
+ * @ent: pointer to &struct media_entity.
+ */
#define media_entity_to_v4l2_subdev(ent) \
({ \
typeof(ent) __me_sd_ent = (ent); \
@@ -838,14 +881,20 @@ struct v4l2_subdev {
NULL; \
})
+/**
+ * vdev_to_v4l2_subdev - Returns a &struct v4l2_subdev from
+ * the &struct video_device embedded on it.
+ *
+ * @vdev: pointer to &struct video_device
+ */
#define vdev_to_v4l2_subdev(vdev) \
((struct v4l2_subdev *)video_get_drvdata(vdev))
/**
* struct v4l2_subdev_fh - Used for storing subdev information per file handle
*
- * @vfh: pointer to struct v4l2_fh
- * @pad: pointer to v4l2_subdev_pad_config
+ * @vfh: pointer to &struct v4l2_fh
+ * @pad: pointer to &struct v4l2_subdev_pad_config
*/
struct v4l2_subdev_fh {
struct v4l2_fh vfh;
@@ -854,23 +903,70 @@ struct v4l2_subdev_fh {
#endif
};
+/**
+ * to_v4l2_subdev_fh - Returns a &struct v4l2_subdev_fh from
+ * the &struct v4l2_fh embedded on it.
+ *
+ * @fh: pointer to &struct v4l2_fh
+ */
#define to_v4l2_subdev_fh(fh) \
container_of(fh, struct v4l2_subdev_fh, vfh)
#if defined(CONFIG_VIDEO_V4L2_SUBDEV_API)
-#define __V4L2_SUBDEV_MK_GET_TRY(rtype, fun_name, field_name) \
- static inline struct rtype * \
- fun_name(struct v4l2_subdev *sd, \
- struct v4l2_subdev_pad_config *cfg, \
- unsigned int pad) \
- { \
- BUG_ON(pad >= sd->entity.num_pads); \
- return &cfg[pad].field_name; \
- }
-
-__V4L2_SUBDEV_MK_GET_TRY(v4l2_mbus_framefmt, v4l2_subdev_get_try_format, try_fmt)
-__V4L2_SUBDEV_MK_GET_TRY(v4l2_rect, v4l2_subdev_get_try_crop, try_crop)
-__V4L2_SUBDEV_MK_GET_TRY(v4l2_rect, v4l2_subdev_get_try_compose, try_compose)
+
+/**
+ * v4l2_subdev_get_try_format - ancillary routine to call
+ * &struct v4l2_subdev_pad_config->try_fmt
+ *
+ * @sd: pointer to &struct v4l2_subdev
+ * @cfg: pointer to &struct v4l2_subdev_pad_config array.
+ * @pad: index of the pad in the @cfg array.
+ */
+static inline struct v4l2_mbus_framefmt
+*v4l2_subdev_get_try_format(struct v4l2_subdev *sd,
+ struct v4l2_subdev_pad_config *cfg,
+ unsigned int pad)
+{
+ if (WARN_ON(pad >= sd->entity.num_pads))
+ pad = 0;
+ return &cfg[pad].try_fmt;
+}
+
+/**
+ * v4l2_subdev_get_try_crop - ancillary routine to call
+ * &struct v4l2_subdev_pad_config->try_crop
+ *
+ * @sd: pointer to &struct v4l2_subdev
+ * @cfg: pointer to &struct v4l2_subdev_pad_config array.
+ * @pad: index of the pad in the @cfg array.
+ */
+static inline struct v4l2_rect
+*v4l2_subdev_get_try_crop(struct v4l2_subdev *sd,
+ struct v4l2_subdev_pad_config *cfg,
+ unsigned int pad)
+{
+ if (WARN_ON(pad >= sd->entity.num_pads))
+ pad = 0;
+ return &cfg[pad].try_crop;
+}
+
+/**
+ * v4l2_subdev_get_try_crop - ancillary routine to call
+ * &struct v4l2_subdev_pad_config->try_compose
+ *
+ * @sd: pointer to &struct v4l2_subdev
+ * @cfg: pointer to &struct v4l2_subdev_pad_config array.
+ * @pad: index of the pad in the @cfg array.
+ */
+static inline struct v4l2_rect
+*v4l2_subdev_get_try_compose(struct v4l2_subdev *sd,
+ struct v4l2_subdev_pad_config *cfg,
+ unsigned int pad)
+{
+ if (WARN_ON(pad >= sd->entity.num_pads))
+ pad = 0;
+ return &cfg[pad].try_compose;
+}
#endif
extern const struct v4l2_file_operations v4l2_subdev_fops;
@@ -978,9 +1074,16 @@ void v4l2_subdev_free_pad_config(struct v4l2_subdev_pad_config *cfg);
void v4l2_subdev_init(struct v4l2_subdev *sd,
const struct v4l2_subdev_ops *ops);
-/*
- * Call an ops of a v4l2_subdev, doing the right checks against
- * NULL pointers.
+/**
+ * v4l2_subdev_call - call an operation of a v4l2_subdev.
+ *
+ * @sd: pointer to the &struct v4l2_subdev
+ * @o: name of the element at &struct v4l2_subdev_ops that contains @f.
+ * Each element there groups a set of callbacks functions.
+ * @f: callback function that will be called if @cond matches.
+ * The callback functions are defined in groups, according to
+ * each element at &struct v4l2_subdev_ops.
+ * @args...: arguments for @f.
*
* Example: err = v4l2_subdev_call(sd, video, s_std, norm);
*/
@@ -996,6 +1099,14 @@ void v4l2_subdev_init(struct v4l2_subdev *sd,
__result; \
})
+/**
+ * v4l2_subdev_has_op - Checks if a subdev defines a certain operation.
+ *
+ * @sd: pointer to the &struct v4l2_subdev
+ * @o: The group of callback functions in &struct v4l2_subdev_ops
+ * which @f is a part of.
+ * @f: callback function to be checked for its existence.
+ */
#define v4l2_subdev_has_op(sd, o, f) \
((sd)->ops->o && (sd)->ops->o->f)
diff --git a/include/media/v4l2-tpg-colors.h b/include/media/v4l2-tpg-colors.h
deleted file mode 100644
index 2a88d1fae0cd..000000000000
--- a/include/media/v4l2-tpg-colors.h
+++ /dev/null
@@ -1,68 +0,0 @@
-/*
- * v4l2-tpg-colors.h - Color definitions for the test pattern generator
- *
- * Copyright 2014 Cisco Systems, Inc. and/or its affiliates. All rights reserved.
- *
- * This program is free software; you may redistribute it and/or modify
- * it under the terms of the GNU General Public License as published by
- * the Free Software Foundation; version 2 of the License.
- *
- * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND,
- * EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
- * MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND
- * NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS
- * BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN
- * ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN
- * CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
- * SOFTWARE.
- */
-
-#ifndef _V4L2_TPG_COLORS_H_
-#define _V4L2_TPG_COLORS_H_
-
-struct color {
- unsigned char r, g, b;
-};
-
-struct color16 {
- int r, g, b;
-};
-
-enum tpg_color {
- TPG_COLOR_CSC_WHITE,
- TPG_COLOR_CSC_YELLOW,
- TPG_COLOR_CSC_CYAN,
- TPG_COLOR_CSC_GREEN,
- TPG_COLOR_CSC_MAGENTA,
- TPG_COLOR_CSC_RED,
- TPG_COLOR_CSC_BLUE,
- TPG_COLOR_CSC_BLACK,
- TPG_COLOR_75_YELLOW,
- TPG_COLOR_75_CYAN,
- TPG_COLOR_75_GREEN,
- TPG_COLOR_75_MAGENTA,
- TPG_COLOR_75_RED,
- TPG_COLOR_75_BLUE,
- TPG_COLOR_100_WHITE,
- TPG_COLOR_100_YELLOW,
- TPG_COLOR_100_CYAN,
- TPG_COLOR_100_GREEN,
- TPG_COLOR_100_MAGENTA,
- TPG_COLOR_100_RED,
- TPG_COLOR_100_BLUE,
- TPG_COLOR_100_BLACK,
- TPG_COLOR_TEXTFG,
- TPG_COLOR_TEXTBG,
- TPG_COLOR_RANDOM,
- TPG_COLOR_RAMP,
- TPG_COLOR_MAX = TPG_COLOR_RAMP + 256
-};
-
-extern const struct color tpg_colors[TPG_COLOR_MAX];
-extern const unsigned short tpg_rec709_to_linear[255 * 16 + 1];
-extern const unsigned short tpg_linear_to_rec709[255 * 16 + 1];
-extern const struct color16 tpg_csc_colors[V4L2_COLORSPACE_DCI_P3 + 1]
- [V4L2_XFER_FUNC_SMPTE2084 + 1]
- [TPG_COLOR_CSC_BLACK + 1];
-
-#endif
diff --git a/include/media/videobuf-core.h b/include/media/videobuf-core.h
index d760aa73ebbb..0bda0adc744f 100644
--- a/include/media/videobuf-core.h
+++ b/include/media/videobuf-core.h
@@ -219,7 +219,7 @@ ssize_t videobuf_read_stream(struct videobuf_queue *q,
ssize_t videobuf_read_one(struct videobuf_queue *q,
char __user *data, size_t count, loff_t *ppos,
int nonblocking);
-unsigned int videobuf_poll_stream(struct file *file,
+__poll_t videobuf_poll_stream(struct file *file,
struct videobuf_queue *q,
poll_table *wait);
diff --git a/include/media/videobuf-dvb.h b/include/media/videobuf-dvb.h
index a14ac7711c92..c9c81990a56c 100644
--- a/include/media/videobuf-dvb.h
+++ b/include/media/videobuf-dvb.h
@@ -1,9 +1,9 @@
/* SPDX-License-Identifier: GPL-2.0 */
-#include <dvbdev.h>
-#include <dmxdev.h>
-#include <dvb_demux.h>
-#include <dvb_net.h>
-#include <dvb_frontend.h>
+#include <media/dvbdev.h>
+#include <media/dmxdev.h>
+#include <media/dvb_demux.h>
+#include <media/dvb_net.h>
+#include <media/dvb_frontend.h>
#ifndef _VIDEOBUF_DVB_H_
#define _VIDEOBUF_DVB_H_
diff --git a/include/media/videobuf2-core.h b/include/media/videobuf2-core.h
index ef9b64398c8c..f6818f732f34 100644
--- a/include/media/videobuf2-core.h
+++ b/include/media/videobuf2-core.h
@@ -16,6 +16,7 @@
#include <linux/mutex.h>
#include <linux/poll.h>
#include <linux/dma-buf.h>
+#include <linux/bitops.h>
#define VB2_MAX_FRAME (32)
#define VB2_MAX_PLANES (8)
@@ -45,7 +46,7 @@ struct vb2_fileio_data;
struct vb2_threadio_data;
/**
- * struct vb2_mem_ops - memory handling/memory allocator operations
+ * struct vb2_mem_ops - memory handling/memory allocator operations.
* @alloc: allocate video memory and, optionally, allocator private data,
* return ERR_PTR() on failure or a pointer to allocator private,
* per-buffer data on success; the returned private structure
@@ -69,7 +70,7 @@ struct vb2_threadio_data;
* argument to other ops in this structure.
* @put_userptr: inform the allocator that a USERPTR buffer will no longer
* be used.
- * @attach_dmabuf: attach a shared struct dma_buf for a hardware operation;
+ * @attach_dmabuf: attach a shared &struct dma_buf for a hardware operation;
* used for DMABUF memory types; dev is the alloc device
* dbuf is the shared dma_buf; returns ERR_PTR() on failure;
* allocator private per-buffer structure on success;
@@ -145,28 +146,28 @@ struct vb2_mem_ops {
};
/**
- * struct vb2_plane - plane information
- * @mem_priv: private data with this plane
- * @dbuf: dma_buf - shared buffer object
+ * struct vb2_plane - plane information.
+ * @mem_priv: private data with this plane.
+ * @dbuf: dma_buf - shared buffer object.
* @dbuf_mapped: flag to show whether dbuf is mapped or not
- * @bytesused: number of bytes occupied by data in the plane (payload)
- * @length: size of this plane (NOT the payload) in bytes
+ * @bytesused: number of bytes occupied by data in the plane (payload).
+ * @length: size of this plane (NOT the payload) in bytes.
* @min_length: minimum required size of this plane (NOT the payload) in bytes.
* @length is always greater or equal to @min_length.
- * @offset: when memory in the associated struct vb2_buffer is
- * VB2_MEMORY_MMAP, equals the offset from the start of
+ * @m: Union with memtype-specific data.
+ * @m.offset: when memory in the associated struct vb2_buffer is
+ * %VB2_MEMORY_MMAP, equals the offset from the start of
* the device memory for this plane (or is a "cookie" that
- * should be passed to mmap() called on the video node)
- * @userptr: when memory is VB2_MEMORY_USERPTR, a userspace pointer
- * pointing to this plane
- * @fd: when memory is VB2_MEMORY_DMABUF, a userspace file
- * descriptor associated with this plane
- * @m: Union with memtype-specific data (@offset, @userptr or
- * @fd).
+ * should be passed to mmap() called on the video node).
+ * @m.userptr: when memory is %VB2_MEMORY_USERPTR, a userspace pointer
+ * pointing to this plane.
+ * @m.fd: when memory is %VB2_MEMORY_DMABUF, a userspace file
+ * descriptor associated with this plane.
* @data_offset: offset in the plane to the start of data; usually 0,
- * unless there is a header in front of the data
+ * unless there is a header in front of the data.
+ *
* Should contain enough information to be able to cover all the fields
- * of struct v4l2_plane at videodev2.h
+ * of &struct v4l2_plane at videodev2.h.
*/
struct vb2_plane {
void *mem_priv;
@@ -184,35 +185,35 @@ struct vb2_plane {
};
/**
- * enum vb2_io_modes - queue access methods
- * @VB2_MMAP: driver supports MMAP with streaming API
- * @VB2_USERPTR: driver supports USERPTR with streaming API
- * @VB2_READ: driver supports read() style access
- * @VB2_WRITE: driver supports write() style access
- * @VB2_DMABUF: driver supports DMABUF with streaming API
+ * enum vb2_io_modes - queue access methods.
+ * @VB2_MMAP: driver supports MMAP with streaming API.
+ * @VB2_USERPTR: driver supports USERPTR with streaming API.
+ * @VB2_READ: driver supports read() style access.
+ * @VB2_WRITE: driver supports write() style access.
+ * @VB2_DMABUF: driver supports DMABUF with streaming API.
*/
enum vb2_io_modes {
- VB2_MMAP = (1 << 0),
- VB2_USERPTR = (1 << 1),
- VB2_READ = (1 << 2),
- VB2_WRITE = (1 << 3),
- VB2_DMABUF = (1 << 4),
+ VB2_MMAP = BIT(0),
+ VB2_USERPTR = BIT(1),
+ VB2_READ = BIT(2),
+ VB2_WRITE = BIT(3),
+ VB2_DMABUF = BIT(4),
};
/**
- * enum vb2_buffer_state - current video buffer state
- * @VB2_BUF_STATE_DEQUEUED: buffer under userspace control
- * @VB2_BUF_STATE_PREPARING: buffer is being prepared in videobuf
- * @VB2_BUF_STATE_PREPARED: buffer prepared in videobuf and by the driver
- * @VB2_BUF_STATE_QUEUED: buffer queued in videobuf, but not in driver
- * @VB2_BUF_STATE_REQUEUEING: re-queue a buffer to the driver
+ * enum vb2_buffer_state - current video buffer state.
+ * @VB2_BUF_STATE_DEQUEUED: buffer under userspace control.
+ * @VB2_BUF_STATE_PREPARING: buffer is being prepared in videobuf.
+ * @VB2_BUF_STATE_PREPARED: buffer prepared in videobuf and by the driver.
+ * @VB2_BUF_STATE_QUEUED: buffer queued in videobuf, but not in driver.
+ * @VB2_BUF_STATE_REQUEUEING: re-queue a buffer to the driver.
* @VB2_BUF_STATE_ACTIVE: buffer queued in driver and possibly used
- * in a hardware operation
+ * in a hardware operation.
* @VB2_BUF_STATE_DONE: buffer returned from driver to videobuf, but
- * not yet dequeued to userspace
+ * not yet dequeued to userspace.
* @VB2_BUF_STATE_ERROR: same as above, but the operation on the buffer
* has ended with an error, which will be reported
- * to the userspace when it is dequeued
+ * to the userspace when it is dequeued.
*/
enum vb2_buffer_state {
VB2_BUF_STATE_DEQUEUED,
@@ -228,15 +229,15 @@ enum vb2_buffer_state {
struct vb2_queue;
/**
- * struct vb2_buffer - represents a video buffer
- * @vb2_queue: the queue to which this driver belongs
- * @index: id number of the buffer
- * @type: buffer type
- * @memory: the method, in which the actual data is passed
+ * struct vb2_buffer - represents a video buffer.
+ * @vb2_queue: pointer to &struct vb2_queue with the queue to
+ * which this driver belongs.
+ * @index: id number of the buffer.
+ * @type: buffer type.
+ * @memory: the method, in which the actual data is passed.
* @num_planes: number of planes in the buffer
- * on an internal driver queue
- * @planes: private per-plane information; do not change
- * @timestamp: frame timestamp in ns
+ * on an internal driver queue.
+ * @timestamp: frame timestamp in ns.
*/
struct vb2_buffer {
struct vb2_queue *vb2_queue;
@@ -244,7 +245,6 @@ struct vb2_buffer {
unsigned int type;
unsigned int memory;
unsigned int num_planes;
- struct vb2_plane planes[VB2_MAX_PLANES];
u64 timestamp;
/* private: internal use only
@@ -254,9 +254,11 @@ struct vb2_buffer {
* all buffers queued from userspace
* done_entry: entry on the list that stores all buffers ready
* to be dequeued to userspace
+ * vb2_plane: per-plane information; do not change
*/
enum vb2_buffer_state state;
+ struct vb2_plane planes[VB2_MAX_PLANES];
struct list_head queued_entry;
struct list_head done_entry;
#ifdef CONFIG_VIDEO_ADV_DEBUG
@@ -292,7 +294,10 @@ struct vb2_buffer {
};
/**
- * struct vb2_ops - driver-specific callbacks
+ * struct vb2_ops - driver-specific callbacks.
+ *
+ * These operations are not called from interrupt context except where
+ * mentioned specifically.
*
* @queue_setup: called from VIDIOC_REQBUFS() and VIDIOC_CREATE_BUFS()
* handlers before memory allocation. It can be called
@@ -356,12 +361,12 @@ struct vb2_buffer {
* driver can return an error if hardware fails, in that
* case all buffers that have been already given by
* the @buf_queue callback are to be returned by the driver
- * by calling vb2_buffer_done() with %VB2_BUF_STATE_QUEUED.
- * If you need a minimum number of buffers before you can
- * start streaming, then set @min_buffers_needed in the
- * vb2_queue structure. If that is non-zero then
- * @start_streaming won't be called until at least that
- * many buffers have been queued up by userspace.
+ * by calling vb2_buffer_done() with %VB2_BUF_STATE_QUEUED
+ * or %VB2_BUF_STATE_REQUEUEING. If you need a minimum
+ * number of buffers before you can start streaming, then
+ * set &vb2_queue->min_buffers_needed. If that is non-zero
+ * then @start_streaming won't be called until at least
+ * that many buffers have been queued up by userspace.
* @stop_streaming: called when 'streaming' state must be disabled; driver
* should stop any DMA transactions or wait until they
* finish and give back all buffers it got from &buf_queue
@@ -396,18 +401,18 @@ struct vb2_ops {
};
/**
- * struct vb2_buf_ops - driver-specific callbacks
+ * struct vb2_buf_ops - driver-specific callbacks.
*
* @verify_planes_array: Verify that a given user space structure contains
* enough planes for the buffer. This is called
* for each dequeued buffer.
- * @fill_user_buffer: given a vb2_buffer fill in the userspace structure.
- * For V4L2 this is a struct v4l2_buffer.
- * @fill_vb2_buffer: given a userspace structure, fill in the vb2_buffer.
+ * @fill_user_buffer: given a &vb2_buffer fill in the userspace structure.
+ * For V4L2 this is a &struct v4l2_buffer.
+ * @fill_vb2_buffer: given a userspace structure, fill in the &vb2_buffer.
* If the userspace structure is invalid, then this op
* will return an error.
* @copy_timestamp: copy the timestamp from a userspace structure to
- * the vb2_buffer struct.
+ * the &struct vb2_buffer.
*/
struct vb2_buf_ops {
int (*verify_planes_array)(struct vb2_buffer *vb, const void *pb);
@@ -418,20 +423,21 @@ struct vb2_buf_ops {
};
/**
- * struct vb2_queue - a videobuf queue
+ * struct vb2_queue - a videobuf queue.
*
* @type: private buffer type whose content is defined by the vb2-core
* caller. For example, for V4L2, it should match
- * the types defined on enum &v4l2_buf_type
- * @io_modes: supported io methods (see vb2_io_modes enum)
+ * the types defined on &enum v4l2_buf_type.
+ * @io_modes: supported io methods (see &enum vb2_io_modes).
+ * @alloc_devs: &struct device memory type/allocator-specific per-plane device
* @dev: device to use for the default allocation context if the driver
* doesn't fill in the @alloc_devs array.
* @dma_attrs: DMA attributes to use for the DMA.
* @bidirectional: when this flag is set the DMA direction for the buffers of
- * this queue will be overridden with DMA_BIDIRECTIONAL direction.
+ * this queue will be overridden with %DMA_BIDIRECTIONAL direction.
* This is useful in cases where the hardware (firmware) writes to
- * a buffer which is mapped as read (DMA_TO_DEVICE), or reads from
- * buffer which is mapped for write (DMA_FROM_DEVICE) in order
+ * a buffer which is mapped as read (%DMA_TO_DEVICE), or reads from
+ * buffer which is mapped for write (%DMA_FROM_DEVICE) in order
* to satisfy some internal hardware restrictions or adds a padding
* needed by the processing algorithm. In case the DMA mapping is
* not bidirectional but the hardware (firmware) trying to access
@@ -440,10 +446,10 @@ struct vb2_buf_ops {
* @fileio_read_once: report EOF after reading the first buffer
* @fileio_write_immediately: queue buffer after each write() call
* @allow_zero_bytesused: allow bytesused == 0 to be passed to the driver
- * @quirk_poll_must_check_waiting_for_buffers: Return POLLERR at poll when QBUF
+ * @quirk_poll_must_check_waiting_for_buffers: Return %EPOLLERR at poll when QBUF
* has not been called. This is a vb1 idiom that has been adopted
* also by vb2.
- * @lock: pointer to a mutex that protects the vb2_queue struct. The
+ * @lock: pointer to a mutex that protects the &struct vb2_queue. The
* driver can set this to a mutex to let the v4l2 core serialize
* the queuing ioctls. If the driver wants to handle locking
* itself, then this should be set to NULL. This lock is not used
@@ -454,17 +460,17 @@ struct vb2_buf_ops {
* drivers to easily associate an owner filehandle with the queue.
* @ops: driver-specific callbacks
* @mem_ops: memory allocator specific callbacks
- * @buf_ops: callbacks to deliver buffer information
- * between user-space and kernel-space
- * @drv_priv: driver private data
+ * @buf_ops: callbacks to deliver buffer information.
+ * between user-space and kernel-space.
+ * @drv_priv: driver private data.
* @buf_struct_size: size of the driver-specific buffer structure;
* "0" indicates the driver doesn't want to use a custom buffer
- * structure type. for example, sizeof(struct vb2_v4l2_buffer)
+ * structure type. for example, ``sizeof(struct vb2_v4l2_buffer)``
* will be used for v4l2.
- * @timestamp_flags: Timestamp flags; V4L2_BUF_FLAG_TIMESTAMP_* and
- * V4L2_BUF_FLAG_TSTAMP_SRC_*
+ * @timestamp_flags: Timestamp flags; ``V4L2_BUF_FLAG_TIMESTAMP_*`` and
+ * ``V4L2_BUF_FLAG_TSTAMP_SRC_*``
* @gfp_flags: additional gfp flags used when allocating the buffers.
- * Typically this is 0, but it may be e.g. GFP_DMA or __GFP_DMA32
+ * Typically this is 0, but it may be e.g. %GFP_DMA or %__GFP_DMA32
* to force the buffer allocation to a specific memory zone.
* @min_buffers_needed: the minimum number of buffers needed before
* @start_streaming can be called. Used when a DMA engine
@@ -484,20 +490,19 @@ struct vb2_buf_ops {
* @done_list: list of buffers ready to be dequeued to userspace
* @done_lock: lock to protect done_list list
* @done_wq: waitqueue for processes waiting for buffers ready to be dequeued
- * @alloc_devs: memory type/allocator-specific per-plane device
* @streaming: current streaming state
* @start_streaming_called: @start_streaming was called successfully and we
* started streaming.
* @error: a fatal error occurred on the queue
* @waiting_for_buffers: used in poll() to check if vb2 is still waiting for
* buffers. Only set for capture queues if qbuf has not yet been
- * called since poll() needs to return POLLERR in that situation.
+ * called since poll() needs to return %EPOLLERR in that situation.
* @is_multiplanar: set if buffer type is multiplanar
* @is_output: set if buffer type is output
* @copy_timestamp: set if vb2-core should set timestamps
* @last_buffer_dequeued: used in poll() and DQBUF to immediately return if the
* last decoded buffer was already dequeued. Set for capture queues
- * when a buffer with the V4L2_BUF_FLAG_LAST is dequeued.
+ * when a buffer with the %V4L2_BUF_FLAG_LAST is dequeued.
* @fileio: file io emulator internal data, used only if emulator is active
* @threadio: thread io internal data, used only if thread is active
*/
@@ -525,6 +530,8 @@ struct vb2_queue {
gfp_t gfp_flags;
u32 min_buffers_needed;
+ struct device *alloc_devs[VB2_MAX_PLANES];
+
/* private: internal use only */
struct mutex mmap_lock;
unsigned int memory;
@@ -540,8 +547,6 @@ struct vb2_queue {
spinlock_t done_lock;
wait_queue_head_t done_wq;
- struct device *alloc_devs[VB2_MAX_PLANES];
-
unsigned int streaming:1;
unsigned int start_streaming_called:1;
unsigned int error:1;
@@ -568,9 +573,10 @@ struct vb2_queue {
};
/**
- * vb2_plane_vaddr() - Return a kernel virtual address of a given plane
- * @vb: vb2_buffer to which the plane in question belongs to
- * @plane_no: plane number for which the address is to be returned
+ * vb2_plane_vaddr() - Return a kernel virtual address of a given plane.
+ * @vb: pointer to &struct vb2_buffer to which the plane in
+ * question belongs to.
+ * @plane_no: plane number for which the address is to be returned.
*
* This function returns a kernel virtual address of a given plane if
* such a mapping exist, NULL otherwise.
@@ -578,9 +584,10 @@ struct vb2_queue {
void *vb2_plane_vaddr(struct vb2_buffer *vb, unsigned int plane_no);
/**
- * vb2_plane_cookie() - Return allocator specific cookie for the given plane
- * @vb: vb2_buffer to which the plane in question belongs to
- * @plane_no: plane number for which the cookie is to be returned
+ * vb2_plane_cookie() - Return allocator specific cookie for the given plane.
+ * @vb: pointer to &struct vb2_buffer to which the plane in
+ * question belongs to.
+ * @plane_no: plane number for which the cookie is to be returned.
*
* This function returns an allocator specific cookie for a given plane if
* available, NULL otherwise. The allocator should provide some simple static
@@ -591,14 +598,15 @@ void *vb2_plane_vaddr(struct vb2_buffer *vb, unsigned int plane_no);
void *vb2_plane_cookie(struct vb2_buffer *vb, unsigned int plane_no);
/**
- * vb2_buffer_done() - inform videobuf that an operation on a buffer is finished
- * @vb: vb2_buffer returned from the driver
- * @state: either %VB2_BUF_STATE_DONE if the operation finished
+ * vb2_buffer_done() - inform videobuf that an operation on a buffer
+ * is finished.
+ * @vb: pointer to &struct vb2_buffer to be used.
+ * @state: state of the buffer, as defined by &enum vb2_buffer_state.
+ * Either %VB2_BUF_STATE_DONE if the operation finished
* successfully, %VB2_BUF_STATE_ERROR if the operation finished
- * with an error or %VB2_BUF_STATE_QUEUED if the driver wants to
- * requeue buffers. If start_streaming fails then it should return
- * buffers with state %VB2_BUF_STATE_QUEUED to put them back into
- * the queue.
+ * with an error or any of %VB2_BUF_STATE_QUEUED or
+ * %VB2_BUF_STATE_REQUEUEING if the driver wants to
+ * requeue buffers (see below).
*
* This function should be called by the driver after a hardware operation on
* a buffer is finished and the buffer may be returned to userspace. The driver
@@ -607,15 +615,20 @@ void *vb2_plane_cookie(struct vb2_buffer *vb, unsigned int plane_no);
* to the driver by &vb2_ops->buf_queue can be passed to this function.
*
* While streaming a buffer can only be returned in state DONE or ERROR.
- * The start_streaming op can also return them in case the DMA engine cannot
- * be started for some reason. In that case the buffers should be returned with
- * state QUEUED.
+ * The &vb2_ops->start_streaming op can also return them in case the DMA engine
+ * cannot be started for some reason. In that case the buffers should be
+ * returned with state QUEUED or REQUEUEING to put them back into the queue.
+ *
+ * %VB2_BUF_STATE_REQUEUEING is like %VB2_BUF_STATE_QUEUED, but it also calls
+ * &vb2_ops->buf_queue to queue buffers back to the driver. Note that calling
+ * vb2_buffer_done(..., VB2_BUF_STATE_REQUEUEING) from interrupt context will
+ * result in &vb2_ops->buf_queue being called in interrupt context as well.
*/
void vb2_buffer_done(struct vb2_buffer *vb, enum vb2_buffer_state state);
/**
- * vb2_discard_done() - discard all buffers marked as DONE
- * @q: videobuf2 queue
+ * vb2_discard_done() - discard all buffers marked as DONE.
+ * @q: pointer to &struct vb2_queue with videobuf2 queue.
*
* This function is intended to be used with suspend/resume operations. It
* discards all 'done' buffers as they would be too old to be requested after
@@ -628,74 +641,83 @@ void vb2_buffer_done(struct vb2_buffer *vb, enum vb2_buffer_state state);
void vb2_discard_done(struct vb2_queue *q);
/**
- * vb2_wait_for_all_buffers() - wait until all buffers are given back to vb2
- * @q: videobuf2 queue
+ * vb2_wait_for_all_buffers() - wait until all buffers are given back to vb2.
+ * @q: pointer to &struct vb2_queue with videobuf2 queue.
*
* This function will wait until all buffers that have been given to the driver
* by &vb2_ops->buf_queue are given back to vb2 with vb2_buffer_done(). It
- * doesn't call wait_prepare()/wait_finish() pair. It is intended to be called
- * with all locks taken, for example from &vb2_ops->stop_streaming callback.
+ * doesn't call &vb2_ops->wait_prepare/&vb2_ops->wait_finish pair.
+ * It is intended to be called with all locks taken, for example from
+ * &vb2_ops->stop_streaming callback.
*/
int vb2_wait_for_all_buffers(struct vb2_queue *q);
/**
- * vb2_core_querybuf() - query video buffer information
- * @q: videobuf queue
- * @index: id number of the buffer
- * @pb: buffer struct passed from userspace
+ * vb2_core_querybuf() - query video buffer information.
+ * @q: pointer to &struct vb2_queue with videobuf2 queue.
+ * @index: id number of the buffer.
+ * @pb: buffer struct passed from userspace.
+ *
+ * Videobuf2 core helper to implement VIDIOC_QUERYBUF() operation. It is called
+ * internally by VB2 by an API-specific handler, like ``videobuf2-v4l2.h``.
*
- * Should be called from vidioc_querybuf ioctl handler in driver.
* The passed buffer should have been verified.
+ *
* This function fills the relevant information for the userspace.
+ *
+ * Return: returns zero on success; an error code otherwise.
*/
void vb2_core_querybuf(struct vb2_queue *q, unsigned int index, void *pb);
/**
- * vb2_core_reqbufs() - Initiate streaming
- * @q: videobuf2 queue
- * @memory: memory type
- * @count: requested buffer count
+ * vb2_core_reqbufs() - Initiate streaming.
+ * @q: pointer to &struct vb2_queue with videobuf2 queue.
+ * @memory: memory type, as defined by &enum vb2_memory.
+ * @count: requested buffer count.
*
- * Should be called from vidioc_reqbufs ioctl handler of a driver.
+ * Videobuf2 core helper to implement VIDIOC_REQBUF() operation. It is called
+ * internally by VB2 by an API-specific handler, like ``videobuf2-v4l2.h``.
*
* This function:
*
- * #) verifies streaming parameters passed from the userspace,
- * #) sets up the queue,
+ * #) verifies streaming parameters passed from the userspace;
+ * #) sets up the queue;
* #) negotiates number of buffers and planes per buffer with the driver
- * to be used during streaming,
- * #) allocates internal buffer structures (struct vb2_buffer), according to
- * the agreed parameters,
+ * to be used during streaming;
+ * #) allocates internal buffer structures (&struct vb2_buffer), according to
+ * the agreed parameters;
* #) for MMAP memory type, allocates actual video memory, using the
- * memory handling/allocation routines provided during queue initialization
+ * memory handling/allocation routines provided during queue initialization.
*
* If req->count is 0, all the memory will be freed instead.
- * If the queue has been allocated previously (by a previous vb2_reqbufs) call
- * and the queue is not busy, memory will be reallocated.
*
- * The return values from this function are intended to be directly returned
- * from vidioc_reqbufs handler in driver.
+ * If the queue has been allocated previously by a previous vb2_core_reqbufs()
+ * call and the queue is not busy, memory will be reallocated.
+ *
+ * Return: returns zero on success; an error code otherwise.
*/
int vb2_core_reqbufs(struct vb2_queue *q, enum vb2_memory memory,
unsigned int *count);
/**
* vb2_core_create_bufs() - Allocate buffers and any required auxiliary structs
- * @q: videobuf2 queue
- * @memory: memory type
- * @count: requested buffer count
- * @requested_planes: number of planes requested
- * @requested_sizes: array with the size of the planes
+ * @q: pointer to &struct vb2_queue with videobuf2 queue.
+ * @memory: memory type, as defined by &enum vb2_memory.
+ * @count: requested buffer count.
+ * @requested_planes: number of planes requested.
+ * @requested_sizes: array with the size of the planes.
+ *
+ * Videobuf2 core helper to implement VIDIOC_CREATE_BUFS() operation. It is
+ * called internally by VB2 by an API-specific handler, like
+ * ``videobuf2-v4l2.h``.
*
- * Should be called from VIDIOC_CREATE_BUFS() ioctl handler of a driver.
* This function:
*
- * #) verifies parameter sanity
- * #) calls the .queue_setup() queue operation
- * #) performs any necessary memory allocations
+ * #) verifies parameter sanity;
+ * #) calls the &vb2_ops->queue_setup queue operation;
+ * #) performs any necessary memory allocations.
*
- * Return: the return values from this function are intended to be directly
- * returned from VIDIOC_CREATE_BUFS() handler in driver.
+ * Return: returns zero on success; an error code otherwise.
*/
int vb2_core_create_bufs(struct vb2_queue *q, enum vb2_memory memory,
unsigned int *count, unsigned int requested_planes,
@@ -703,57 +725,61 @@ int vb2_core_create_bufs(struct vb2_queue *q, enum vb2_memory memory,
/**
* vb2_core_prepare_buf() - Pass ownership of a buffer from userspace
- * to the kernel
- * @q: videobuf2 queue
- * @index: id number of the buffer
- * @pb: buffer structure passed from userspace to vidioc_prepare_buf
- * handler in driver
+ * to the kernel.
+ * @q: pointer to &struct vb2_queue with videobuf2 queue.
+ * @index: id number of the buffer.
+ * @pb: buffer structure passed from userspace to
+ * &v4l2_ioctl_ops->vidioc_prepare_buf handler in driver.
+ *
+ * Videobuf2 core helper to implement VIDIOC_PREPARE_BUF() operation. It is
+ * called internally by VB2 by an API-specific handler, like
+ * ``videobuf2-v4l2.h``.
*
- * Should be called from vidioc_prepare_buf ioctl handler of a driver.
* The passed buffer should have been verified.
- * This function calls buf_prepare callback in the driver (if provided),
- * in which driver-specific buffer initialization can be performed,
*
- * The return values from this function are intended to be directly returned
- * from vidioc_prepare_buf handler in driver.
+ * This function calls vb2_ops->buf_prepare callback in the driver
+ * (if provided), in which driver-specific buffer initialization can
+ * be performed.
+ *
+ * Return: returns zero on success; an error code otherwise.
*/
int vb2_core_prepare_buf(struct vb2_queue *q, unsigned int index, void *pb);
/**
* vb2_core_qbuf() - Queue a buffer from userspace
*
- * @q: videobuf2 queue
+ * @q: pointer to &struct vb2_queue with videobuf2 queue.
* @index: id number of the buffer
- * @pb: buffer structure passed from userspace to vidioc_qbuf handler
- * in driver
+ * @pb: buffer structure passed from userspace to
+ * v4l2_ioctl_ops->vidioc_qbuf handler in driver
*
- * Should be called from vidioc_qbuf ioctl handler of a driver.
- * The passed buffer should have been verified.
+ * Videobuf2 core helper to implement VIDIOC_QBUF() operation. It is called
+ * internally by VB2 by an API-specific handler, like ``videobuf2-v4l2.h``.
*
* This function:
*
- * #) if necessary, calls buf_prepare callback in the driver (if provided), in
- * which driver-specific buffer initialization can be performed,
+ * #) if necessary, calls &vb2_ops->buf_prepare callback in the driver
+ * (if provided), in which driver-specific buffer initialization can
+ * be performed;
* #) if streaming is on, queues the buffer in driver by the means of
* &vb2_ops->buf_queue callback for processing.
*
- * The return values from this function are intended to be directly returned
- * from vidioc_qbuf handler in driver.
+ * Return: returns zero on success; an error code otherwise.
*/
int vb2_core_qbuf(struct vb2_queue *q, unsigned int index, void *pb);
/**
* vb2_core_dqbuf() - Dequeue a buffer to the userspace
- * @q: videobuf2 queue
+ * @q: pointer to &struct vb2_queue with videobuf2 queue
* @pindex: pointer to the buffer index. May be NULL
- * @pb: buffer structure passed from userspace to vidioc_dqbuf handler
- * in driver
+ * @pb: buffer structure passed from userspace to
+ * v4l2_ioctl_ops->vidioc_dqbuf handler in driver.
* @nonblocking: if true, this call will not sleep waiting for a buffer if no
* buffers ready for dequeuing are present. Normally the driver
- * would be passing (file->f_flags & O_NONBLOCK) here
+ * would be passing (file->f_flags & O_NONBLOCK) here.
*
- * Should be called from vidioc_dqbuf ioctl handler of a driver.
- * The passed buffer should have been verified.
+ * Videobuf2 core helper to implement VIDIOC_DQBUF() operation. It is called
+ * internally by VB2 by an API-specific handler, like ``videobuf2-v4l2.h``.
*
* This function:
*
@@ -763,73 +789,108 @@ int vb2_core_qbuf(struct vb2_queue *q, unsigned int index, void *pb);
* #) the buffer struct members are filled with relevant information for
* the userspace.
*
- * The return values from this function are intended to be directly returned
- * from vidioc_dqbuf handler in driver.
+ * Return: returns zero on success; an error code otherwise.
*/
int vb2_core_dqbuf(struct vb2_queue *q, unsigned int *pindex, void *pb,
bool nonblocking);
+/**
+ * vb2_core_streamon() - Implements VB2 stream ON logic
+ *
+ * @q: pointer to &struct vb2_queue with videobuf2 queue
+ * @type: type of the queue to be started.
+ * For V4L2, this is defined by &enum v4l2_buf_type type.
+ *
+ * Videobuf2 core helper to implement VIDIOC_STREAMON() operation. It is called
+ * internally by VB2 by an API-specific handler, like ``videobuf2-v4l2.h``.
+ *
+ * Return: returns zero on success; an error code otherwise.
+ */
int vb2_core_streamon(struct vb2_queue *q, unsigned int type);
+
+/**
+ * vb2_core_streamoff() - Implements VB2 stream OFF logic
+ *
+ * @q: pointer to &struct vb2_queue with videobuf2 queue
+ * @type: type of the queue to be started.
+ * For V4L2, this is defined by &enum v4l2_buf_type type.
+ *
+ * Videobuf2 core helper to implement VIDIOC_STREAMOFF() operation. It is
+ * called internally by VB2 by an API-specific handler, like
+ * ``videobuf2-v4l2.h``.
+ *
+ * Return: returns zero on success; an error code otherwise.
+ */
int vb2_core_streamoff(struct vb2_queue *q, unsigned int type);
/**
- * vb2_core_expbuf() - Export a buffer as a file descriptor
- * @q: videobuf2 queue
- * @fd: file descriptor associated with DMABUF (set by driver) *
- * @type: buffer type
- * @index: id number of the buffer
+ * vb2_core_expbuf() - Export a buffer as a file descriptor.
+ * @q: pointer to &struct vb2_queue with videobuf2 queue.
+ * @fd: pointer to the file descriptor associated with DMABUF
+ * (set by driver).
+ * @type: buffer type.
+ * @index: id number of the buffer.
* @plane: index of the plane to be exported, 0 for single plane queues
- * @flags: flags for newly created file, currently only O_CLOEXEC is
- * supported, refer to manual of open syscall for more details
+ * @flags: file flags for newly created file, as defined at
+ * include/uapi/asm-generic/fcntl.h.
+ * Currently, the only used flag is %O_CLOEXEC.
+ * is supported, refer to manual of open syscall for more details.
*
- * The return values from this function are intended to be directly returned
- * from vidioc_expbuf handler in driver.
+ *
+ * Videobuf2 core helper to implement VIDIOC_EXPBUF() operation. It is called
+ * internally by VB2 by an API-specific handler, like ``videobuf2-v4l2.h``.
+ *
+ * Return: returns zero on success; an error code otherwise.
*/
int vb2_core_expbuf(struct vb2_queue *q, int *fd, unsigned int type,
unsigned int index, unsigned int plane, unsigned int flags);
/**
* vb2_core_queue_init() - initialize a videobuf2 queue
- * @q: videobuf2 queue; this structure should be allocated in driver
+ * @q: pointer to &struct vb2_queue with videobuf2 queue.
+ * This structure should be allocated in driver
*
- * The vb2_queue structure should be allocated by the driver. The driver is
+ * The &vb2_queue structure should be allocated by the driver. The driver is
* responsible of clearing it's content and setting initial values for some
* required entries before calling this function.
- * q->ops, q->mem_ops, q->type and q->io_modes are mandatory. Please refer
- * to the struct vb2_queue description in include/media/videobuf2-core.h
- * for more information.
+ *
+ * .. note::
+ *
+ * The following fields at @q should be set before calling this function:
+ * &vb2_queue->ops, &vb2_queue->mem_ops, &vb2_queue->type.
*/
int vb2_core_queue_init(struct vb2_queue *q);
/**
* vb2_core_queue_release() - stop streaming, release the queue and free memory
- * @q: videobuf2 queue
+ * @q: pointer to &struct vb2_queue with videobuf2 queue.
*
* This function stops streaming and performs necessary clean ups, including
* freeing video buffer memory. The driver is responsible for freeing
- * the vb2_queue structure itself.
+ * the &struct vb2_queue itself.
*/
void vb2_core_queue_release(struct vb2_queue *q);
/**
* vb2_queue_error() - signal a fatal error on the queue
- * @q: videobuf2 queue
+ * @q: pointer to &struct vb2_queue with videobuf2 queue.
*
* Flag that a fatal unrecoverable error has occurred and wake up all processes
- * waiting on the queue. Polling will now set POLLERR and queuing and dequeuing
- * buffers will return -EIO.
+ * waiting on the queue. Polling will now set %EPOLLERR and queuing and dequeuing
+ * buffers will return %-EIO.
*
- * The error flag will be cleared when cancelling the queue, either from
- * vb2_streamoff or vb2_queue_release. Drivers should thus not call this
+ * The error flag will be cleared when canceling the queue, either from
+ * vb2_streamoff() or vb2_queue_release(). Drivers should thus not call this
* function before starting the stream, otherwise the error flag will remain set
* until the queue is released when closing the device node.
*/
void vb2_queue_error(struct vb2_queue *q);
/**
- * vb2_mmap() - map video buffers into application address space
- * @q: videobuf2 queue
- * @vma: vma passed to the mmap file operation handler in the driver
+ * vb2_mmap() - map video buffers into application address space.
+ * @q: pointer to &struct vb2_queue with videobuf2 queue.
+ * @vma: pointer to &struct vm_area_struct with the vma passed
+ * to the mmap file operation handler in the driver.
*
* Should be called from mmap file operation handler of a driver.
* This function maps one plane of one of the available video buffers to
@@ -837,8 +898,10 @@ void vb2_queue_error(struct vb2_queue *q);
* has to be called once per each plane per each buffer previously allocated.
*
* When the userspace application calls mmap, it passes to it an offset returned
- * to it earlier by the means of vidioc_querybuf handler. That offset acts as
- * a "cookie", which is then used to identify the plane to be mapped.
+ * to it earlier by the means of &v4l2_ioctl_ops->vidioc_querybuf handler.
+ * That offset acts as a "cookie", which is then used to identify the plane
+ * to be mapped.
+ *
* This function finds a plane with a matching offset and a mapping is performed
* by the means of a provided memory operation.
*
@@ -848,6 +911,21 @@ void vb2_queue_error(struct vb2_queue *q);
int vb2_mmap(struct vb2_queue *q, struct vm_area_struct *vma);
#ifndef CONFIG_MMU
+/**
+ * vb2_get_unmapped_area - map video buffers into application address space.
+ * @q: pointer to &struct vb2_queue with videobuf2 queue.
+ * @addr: memory address.
+ * @len: buffer size.
+ * @pgoff: page offset.
+ * @flags: memory flags.
+ *
+ * This function is used in noMMU platforms to propose address mapping
+ * for a given buffer. It's intended to be used as a handler for the
+ * &file_operations->get_unmapped_area operation.
+ *
+ * This is called by the mmap() syscall routines will call this
+ * to get a proposed address for the mapping, when ``!CONFIG_MMU``.
+ */
unsigned long vb2_get_unmapped_area(struct vb2_queue *q,
unsigned long addr,
unsigned long len,
@@ -856,10 +934,12 @@ unsigned long vb2_get_unmapped_area(struct vb2_queue *q,
#endif
/**
- * vb2_core_poll() - implements poll userspace operation
- * @q: videobuf2 queue
- * @file: file argument passed to the poll file operation handler
- * @wait: wait argument passed to the poll file operation handler
+ * vb2_core_poll() - implements poll syscall() logic.
+ * @q: pointer to &struct vb2_queue with videobuf2 queue.
+ * @file: &struct file argument passed to the poll
+ * file operation handler.
+ * @wait: &poll_table wait argument passed to the poll
+ * file operation handler.
*
* This function implements poll file operation handler for a driver.
* For CAPTURE queues, if a buffer is ready to be dequeued, the userspace will
@@ -871,19 +951,35 @@ unsigned long vb2_get_unmapped_area(struct vb2_queue *q,
* The return values from this function are intended to be directly returned
* from poll handler in driver.
*/
-unsigned int vb2_core_poll(struct vb2_queue *q, struct file *file,
+__poll_t vb2_core_poll(struct vb2_queue *q, struct file *file,
poll_table *wait);
+/**
+ * vb2_read() - implements read() syscall logic.
+ * @q: pointer to &struct vb2_queue with videobuf2 queue.
+ * @data: pointed to target userspace buffer
+ * @count: number of bytes to read
+ * @ppos: file handle position tracking pointer
+ * @nonblock: mode selector (1 means blocking calls, 0 means nonblocking)
+ */
size_t vb2_read(struct vb2_queue *q, char __user *data, size_t count,
loff_t *ppos, int nonblock);
+/**
+ * vb2_read() - implements write() syscall logic.
+ * @q: pointer to &struct vb2_queue with videobuf2 queue.
+ * @data: pointed to target userspace buffer
+ * @count: number of bytes to write
+ * @ppos: file handle position tracking pointer
+ * @nonblock: mode selector (1 means blocking calls, 0 means nonblocking)
+ */
size_t vb2_write(struct vb2_queue *q, const char __user *data, size_t count,
loff_t *ppos, int nonblock);
/**
- * typedef vb2_thread_fnc - callback function for use with vb2_thread
+ * typedef vb2_thread_fnc - callback function for use with vb2_thread.
*
- * @vb: pointer to struct &vb2_buffer
- * @priv: pointer to a private pointer
+ * @vb: pointer to struct &vb2_buffer.
+ * @priv: pointer to a private data.
*
* This is called whenever a buffer is dequeued in the thread.
*/
@@ -891,13 +987,13 @@ typedef int (*vb2_thread_fnc)(struct vb2_buffer *vb, void *priv);
/**
* vb2_thread_start() - start a thread for the given queue.
- * @q: videobuf queue
- * @fnc: callback function
- * @priv: priv pointer passed to the callback function
+ * @q: pointer to &struct vb2_queue with videobuf2 queue.
+ * @fnc: &vb2_thread_fnc callback function.
+ * @priv: priv pointer passed to the callback function.
* @thread_name:the name of the thread. This will be prefixed with "vb2-".
*
* This starts a thread that will queue and dequeue until an error occurs
- * or @vb2_thread_stop is called.
+ * or vb2_thread_stop() is called.
*
* .. attention::
*
@@ -910,13 +1006,13 @@ int vb2_thread_start(struct vb2_queue *q, vb2_thread_fnc fnc, void *priv,
/**
* vb2_thread_stop() - stop the thread for the given queue.
- * @q: videobuf queue
+ * @q: pointer to &struct vb2_queue with videobuf2 queue.
*/
int vb2_thread_stop(struct vb2_queue *q);
/**
- * vb2_is_streaming() - return streaming status of the queue
- * @q: videobuf queue
+ * vb2_is_streaming() - return streaming status of the queue.
+ * @q: pointer to &struct vb2_queue with videobuf2 queue.
*/
static inline bool vb2_is_streaming(struct vb2_queue *q)
{
@@ -925,15 +1021,16 @@ static inline bool vb2_is_streaming(struct vb2_queue *q)
/**
* vb2_fileio_is_active() - return true if fileio is active.
- * @q: videobuf queue
+ * @q: pointer to &struct vb2_queue with videobuf2 queue.
*
* This returns true if read() or write() is used to stream the data
* as opposed to stream I/O. This is almost never an important distinction,
* except in rare cases. One such case is that using read() or write() to
- * stream a format using V4L2_FIELD_ALTERNATE is not allowed since there
+ * stream a format using %V4L2_FIELD_ALTERNATE is not allowed since there
* is no way you can pass the field information of each buffer to/from
* userspace. A driver that supports this field format should check for
- * this in the queue_setup op and reject it if this function returns true.
+ * this in the &vb2_ops->queue_setup op and reject it if this function returns
+ * true.
*/
static inline bool vb2_fileio_is_active(struct vb2_queue *q)
{
@@ -941,8 +1038,8 @@ static inline bool vb2_fileio_is_active(struct vb2_queue *q)
}
/**
- * vb2_is_busy() - return busy status of the queue
- * @q: videobuf queue
+ * vb2_is_busy() - return busy status of the queue.
+ * @q: pointer to &struct vb2_queue with videobuf2 queue.
*
* This function checks if queue has any buffers allocated.
*/
@@ -952,8 +1049,8 @@ static inline bool vb2_is_busy(struct vb2_queue *q)
}
/**
- * vb2_get_drv_priv() - return driver private data associated with the queue
- * @q: videobuf queue
+ * vb2_get_drv_priv() - return driver private data associated with the queue.
+ * @q: pointer to &struct vb2_queue with videobuf2 queue.
*/
static inline void *vb2_get_drv_priv(struct vb2_queue *q)
{
@@ -961,10 +1058,11 @@ static inline void *vb2_get_drv_priv(struct vb2_queue *q)
}
/**
- * vb2_set_plane_payload() - set bytesused for the plane plane_no
- * @vb: buffer for which plane payload should be set
- * @plane_no: plane number for which payload should be set
- * @size: payload in bytes
+ * vb2_set_plane_payload() - set bytesused for the plane @plane_no.
+ * @vb: pointer to &struct vb2_buffer to which the plane in
+ * question belongs to.
+ * @plane_no: plane number for which payload should be set.
+ * @size: payload in bytes.
*/
static inline void vb2_set_plane_payload(struct vb2_buffer *vb,
unsigned int plane_no, unsigned long size)
@@ -975,8 +1073,9 @@ static inline void vb2_set_plane_payload(struct vb2_buffer *vb,
/**
* vb2_get_plane_payload() - get bytesused for the plane plane_no
- * @vb: buffer for which plane payload should be set
- * @plane_no: plane number for which payload should be set
+ * @vb: pointer to &struct vb2_buffer to which the plane in
+ * question belongs to.
+ * @plane_no: plane number for which payload should be set.
*/
static inline unsigned long vb2_get_plane_payload(struct vb2_buffer *vb,
unsigned int plane_no)
@@ -987,9 +1086,10 @@ static inline unsigned long vb2_get_plane_payload(struct vb2_buffer *vb,
}
/**
- * vb2_plane_size() - return plane size in bytes
- * @vb: buffer for which plane size should be returned
- * @plane_no: plane number for which size should be returned
+ * vb2_plane_size() - return plane size in bytes.
+ * @vb: pointer to &struct vb2_buffer to which the plane in
+ * question belongs to.
+ * @plane_no: plane number for which size should be returned.
*/
static inline unsigned long
vb2_plane_size(struct vb2_buffer *vb, unsigned int plane_no)
@@ -1000,8 +1100,8 @@ vb2_plane_size(struct vb2_buffer *vb, unsigned int plane_no)
}
/**
- * vb2_start_streaming_called() - return streaming status of driver
- * @q: videobuf queue
+ * vb2_start_streaming_called() - return streaming status of driver.
+ * @q: pointer to &struct vb2_queue with videobuf2 queue.
*/
static inline bool vb2_start_streaming_called(struct vb2_queue *q)
{
@@ -1009,8 +1109,8 @@ static inline bool vb2_start_streaming_called(struct vb2_queue *q)
}
/**
- * vb2_clear_last_buffer_dequeued() - clear last buffer dequeued flag of queue
- * @q: videobuf queue
+ * vb2_clear_last_buffer_dequeued() - clear last buffer dequeued flag of queue.
+ * @q: pointer to &struct vb2_queue with videobuf2 queue.
*/
static inline void vb2_clear_last_buffer_dequeued(struct vb2_queue *q)
{
@@ -1024,10 +1124,10 @@ static inline void vb2_clear_last_buffer_dequeued(struct vb2_queue *q)
/**
* vb2_buffer_in_use() - return true if the buffer is in use and
- * the queue cannot be freed (by the means of REQBUFS(0)) call
+ * the queue cannot be freed (by the means of VIDIOC_REQBUFS(0)) call.
*
- * @vb: buffer for which plane size should be returned
- * @q: videobuf queue
+ * @vb: buffer for which plane size should be returned.
+ * @q: pointer to &struct vb2_queue with videobuf2 queue.
*/
bool vb2_buffer_in_use(struct vb2_queue *q, struct vb2_buffer *vb);
@@ -1035,11 +1135,11 @@ bool vb2_buffer_in_use(struct vb2_queue *q, struct vb2_buffer *vb);
* vb2_verify_memory_type() - Check whether the memory type and buffer type
* passed to a buffer operation are compatible with the queue.
*
- * @q: videobuf queue
+ * @q: pointer to &struct vb2_queue with videobuf2 queue.
* @memory: memory model, as defined by enum &vb2_memory.
* @type: private buffer type whose content is defined by the vb2-core
* caller. For example, for V4L2, it should match
- * the types defined on enum &v4l2_buf_type
+ * the types defined on enum &v4l2_buf_type.
*/
int vb2_verify_memory_type(struct vb2_queue *q,
enum vb2_memory memory, unsigned int type);
diff --git a/include/media/videobuf2-dvb.h b/include/media/videobuf2-dvb.h
index 5a31faa24f1a..8605366ec87c 100644
--- a/include/media/videobuf2-dvb.h
+++ b/include/media/videobuf2-dvb.h
@@ -2,12 +2,11 @@
#ifndef _VIDEOBUF2_DVB_H_
#define _VIDEOBUF2_DVB_H_
-#include <dvbdev.h>
-#include <dmxdev.h>
-#include <dvb_demux.h>
-#include <dvb_net.h>
-#include <dvb_frontend.h>
-
+#include <media/dvbdev.h>
+#include <media/dmxdev.h>
+#include <media/dvb_demux.h>
+#include <media/dvb_net.h>
+#include <media/dvb_frontend.h>
#include <media/videobuf2-v4l2.h>
/* We don't actually need to include media-device.h here */
diff --git a/include/media/videobuf2-memops.h b/include/media/videobuf2-memops.h
index a6ed091b79ce..4b5b84f93538 100644
--- a/include/media/videobuf2-memops.h
+++ b/include/media/videobuf2-memops.h
@@ -19,11 +19,11 @@
#include <linux/refcount.h>
/**
- * struct vb2_vmarea_handler - common vma refcount tracking handler
+ * struct vb2_vmarea_handler - common vma refcount tracking handler.
*
- * @refcount: pointer to refcount entry in the buffer
- * @put: callback to function that decreases buffer refcount
- * @arg: argument for @put callback
+ * @refcount: pointer to &refcount_t entry in the buffer.
+ * @put: callback to function that decreases buffer refcount.
+ * @arg: argument for @put callback.
*/
struct vb2_vmarea_handler {
refcount_t *refcount;
diff --git a/include/media/videobuf2-v4l2.h b/include/media/videobuf2-v4l2.h
index 036127c54bbf..3d5e2d739f05 100644
--- a/include/media/videobuf2-v4l2.h
+++ b/include/media/videobuf2-v4l2.h
@@ -24,16 +24,17 @@
#endif
/**
- * struct vb2_v4l2_buffer - video buffer information for v4l2
+ * struct vb2_v4l2_buffer - video buffer information for v4l2.
*
- * @vb2_buf: video buffer 2
- * @flags: buffer informational flags
- * @field: enum v4l2_field; field order of the image in the buffer
- * @timecode: frame timecode
- * @sequence: sequence count of this frame
+ * @vb2_buf: embedded struct &vb2_buffer.
+ * @flags: buffer informational flags.
+ * @field: field order of the image in the buffer, as defined by
+ * &enum v4l2_field.
+ * @timecode: frame timecode.
+ * @sequence: sequence count of this frame.
*
* Should contain enough information to be able to cover all the fields
- * of struct v4l2_buffer at videodev2.h
+ * of &struct v4l2_buffer at ``videodev2.h``.
*/
struct vb2_v4l2_buffer {
struct vb2_buffer vb2_buf;
@@ -56,9 +57,9 @@ int vb2_querybuf(struct vb2_queue *q, struct v4l2_buffer *b);
* vb2_reqbufs() - Wrapper for vb2_core_reqbufs() that also verifies
* the memory and type values.
*
- * @q: videobuf2 queue
- * @req: struct passed from userspace to vidioc_reqbufs handler
- * in driver
+ * @q: pointer to &struct vb2_queue with videobuf2 queue.
+ * @req: &struct v4l2_requestbuffers passed from userspace to
+ * &v4l2_ioctl_ops->vidioc_reqbufs handler in driver.
*/
int vb2_reqbufs(struct vb2_queue *q, struct v4l2_requestbuffers *req);
@@ -66,94 +67,99 @@ int vb2_reqbufs(struct vb2_queue *q, struct v4l2_requestbuffers *req);
* vb2_create_bufs() - Wrapper for vb2_core_create_bufs() that also verifies
* the memory and type values.
*
- * @q: videobuf2 queue
- * @create: creation parameters, passed from userspace to vidioc_create_bufs
- * handler in driver
+ * @q: pointer to &struct vb2_queue with videobuf2 queue.
+ * @create: creation parameters, passed from userspace to
+ * &v4l2_ioctl_ops->vidioc_create_bufs handler in driver
*/
int vb2_create_bufs(struct vb2_queue *q, struct v4l2_create_buffers *create);
/**
* vb2_prepare_buf() - Pass ownership of a buffer from userspace to the kernel
*
- * @q: videobuf2 queue
- * @b: buffer structure passed from userspace to vidioc_prepare_buf
- * handler in driver
+ * @q: pointer to &struct vb2_queue with videobuf2 queue.
+ * @b: buffer structure passed from userspace to
+ * &v4l2_ioctl_ops->vidioc_prepare_buf handler in driver
+ *
+ * Should be called from &v4l2_ioctl_ops->vidioc_prepare_buf ioctl handler
+ * of a driver.
*
- * Should be called from vidioc_prepare_buf ioctl handler of a driver.
* This function:
*
* #) verifies the passed buffer,
- * #) calls buf_prepare callback in the driver (if provided), in which
- * driver-specific buffer initialization can be performed.
+ * #) calls &vb2_ops->buf_prepare callback in the driver (if provided),
+ * in which driver-specific buffer initialization can be performed.
*
* The return values from this function are intended to be directly returned
- * from vidioc_prepare_buf handler in driver.
+ * from &v4l2_ioctl_ops->vidioc_prepare_buf handler in driver.
*/
int vb2_prepare_buf(struct vb2_queue *q, struct v4l2_buffer *b);
/**
* vb2_qbuf() - Queue a buffer from userspace
- * @q: videobuf2 queue
- * @b: buffer structure passed from userspace to VIDIOC_QBUF() handler
- * in driver
+ * @q: pointer to &struct vb2_queue with videobuf2 queue.
+ * @b: buffer structure passed from userspace to
+ * &v4l2_ioctl_ops->vidioc_qbuf handler in driver
*
- * Should be called from VIDIOC_QBUF() ioctl handler of a driver.
+ * Should be called from &v4l2_ioctl_ops->vidioc_qbuf handler of a driver.
*
* This function:
*
- * #) verifies the passed buffer,
- * #) if necessary, calls buf_prepare callback in the driver (if provided), in
- * which driver-specific buffer initialization can be performed,
- * #) if streaming is on, queues the buffer in driver by the means of buf_queue
- * callback for processing.
+ * #) verifies the passed buffer;
+ * #) if necessary, calls &vb2_ops->buf_prepare callback in the driver
+ * (if provided), in which driver-specific buffer initialization can
+ * be performed;
+ * #) if streaming is on, queues the buffer in driver by the means of
+ * &vb2_ops->buf_queue callback for processing.
*
* The return values from this function are intended to be directly returned
- * from VIDIOC_QBUF() handler in driver.
+ * from &v4l2_ioctl_ops->vidioc_qbuf handler in driver.
*/
int vb2_qbuf(struct vb2_queue *q, struct v4l2_buffer *b);
/**
* vb2_expbuf() - Export a buffer as a file descriptor
- * @q: videobuf2 queue
- * @eb: export buffer structure passed from userspace to VIDIOC_EXPBUF()
- * handler in driver
+ * @q: pointer to &struct vb2_queue with videobuf2 queue.
+ * @eb: export buffer structure passed from userspace to
+ * &v4l2_ioctl_ops->vidioc_expbuf handler in driver
*
* The return values from this function are intended to be directly returned
- * from VIDIOC_EXPBUF() handler in driver.
+ * from &v4l2_ioctl_ops->vidioc_expbuf handler in driver.
*/
int vb2_expbuf(struct vb2_queue *q, struct v4l2_exportbuffer *eb);
/**
* vb2_dqbuf() - Dequeue a buffer to the userspace
- * @q: videobuf2 queue
- * @b: buffer structure passed from userspace to VIDIOC_DQBUF() handler
- * in driver
+ * @q: pointer to &struct vb2_queue with videobuf2 queue.
+ * @b: buffer structure passed from userspace to
+ * &v4l2_ioctl_ops->vidioc_dqbuf handler in driver
* @nonblocking: if true, this call will not sleep waiting for a buffer if no
* buffers ready for dequeuing are present. Normally the driver
- * would be passing (file->f_flags & O_NONBLOCK) here
+ * would be passing (&file->f_flags & %O_NONBLOCK) here
*
- * Should be called from VIDIOC_DQBUF() ioctl handler of a driver.
+ * Should be called from &v4l2_ioctl_ops->vidioc_dqbuf ioctl handler
+ * of a driver.
*
* This function:
*
- * #) verifies the passed buffer,
- * #) calls buf_finish callback in the driver (if provided), in which
+ * #) verifies the passed buffer;
+ * #) calls &vb2_ops->buf_finish callback in the driver (if provided), in which
* driver can perform any additional operations that may be required before
- * returning the buffer to userspace, such as cache sync,
+ * returning the buffer to userspace, such as cache sync;
* #) the buffer struct members are filled with relevant information for
* the userspace.
*
* The return values from this function are intended to be directly returned
- * from VIDIOC_DQBUF() handler in driver.
+ * from &v4l2_ioctl_ops->vidioc_dqbuf handler in driver.
*/
int vb2_dqbuf(struct vb2_queue *q, struct v4l2_buffer *b, bool nonblocking);
/**
* vb2_streamon - start streaming
- * @q: videobuf2 queue
- * @type: type argument passed from userspace to vidioc_streamon handler
+ * @q: pointer to &struct vb2_queue with videobuf2 queue.
+ * @type: type argument passed from userspace to vidioc_streamon handler,
+ * as defined by &enum v4l2_buf_type.
*
- * Should be called from vidioc_streamon handler of a driver.
+ * Should be called from &v4l2_ioctl_ops->vidioc_streamon handler of a driver.
*
* This function:
*
@@ -161,13 +167,13 @@ int vb2_dqbuf(struct vb2_queue *q, struct v4l2_buffer *b, bool nonblocking);
* 2) passes any previously queued buffers to the driver and starts streaming
*
* The return values from this function are intended to be directly returned
- * from vidioc_streamon handler in the driver.
+ * from &v4l2_ioctl_ops->vidioc_streamon handler in the driver.
*/
int vb2_streamon(struct vb2_queue *q, enum v4l2_buf_type type);
/**
* vb2_streamoff - stop streaming
- * @q: videobuf2 queue
+ * @q: pointer to &struct vb2_queue with videobuf2 queue.
* @type: type argument passed from userspace to vidioc_streamoff handler
*
* Should be called from vidioc_streamoff handler of a driver.
@@ -186,7 +192,7 @@ int vb2_streamoff(struct vb2_queue *q, enum v4l2_buf_type type);
/**
* vb2_queue_init() - initialize a videobuf2 queue
- * @q: videobuf2 queue; this structure should be allocated in driver
+ * @q: pointer to &struct vb2_queue with videobuf2 queue.
*
* The vb2_queue structure should be allocated by the driver. The driver is
* responsible of clearing it's content and setting initial values for some
@@ -199,7 +205,7 @@ int __must_check vb2_queue_init(struct vb2_queue *q);
/**
* vb2_queue_release() - stop streaming, release the queue and free memory
- * @q: videobuf2 queue
+ * @q: pointer to &struct vb2_queue with videobuf2 queue.
*
* This function stops streaming and performs necessary clean ups, including
* freeing video buffer memory. The driver is responsible for freeing
@@ -209,7 +215,7 @@ void vb2_queue_release(struct vb2_queue *q);
/**
* vb2_poll() - implements poll userspace operation
- * @q: videobuf2 queue
+ * @q: pointer to &struct vb2_queue with videobuf2 queue.
* @file: file argument passed to the poll file operation handler
* @wait: wait argument passed to the poll file operation handler
*
@@ -226,8 +232,7 @@ void vb2_queue_release(struct vb2_queue *q);
* The return values from this function are intended to be directly returned
* from poll handler in driver.
*/
-unsigned int vb2_poll(struct vb2_queue *q, struct file *file,
- poll_table *wait);
+__poll_t vb2_poll(struct vb2_queue *q, struct file *file, poll_table *wait);
/*
* The following functions are not part of the vb2 core API, but are simple
@@ -262,7 +267,7 @@ ssize_t vb2_fop_write(struct file *file, const char __user *buf,
size_t count, loff_t *ppos);
ssize_t vb2_fop_read(struct file *file, char __user *buf,
size_t count, loff_t *ppos);
-unsigned int vb2_fop_poll(struct file *file, poll_table *wait);
+__poll_t vb2_fop_poll(struct file *file, poll_table *wait);
#ifndef CONFIG_MMU
unsigned long vb2_fop_get_unmapped_area(struct file *file, unsigned long addr,
unsigned long len, unsigned long pgoff, unsigned long flags);
@@ -271,7 +276,7 @@ unsigned long vb2_fop_get_unmapped_area(struct file *file, unsigned long addr,
/**
* vb2_ops_wait_prepare - helper function to lock a struct &vb2_queue
*
- * @vq: pointer to struct vb2_queue
+ * @vq: pointer to &struct vb2_queue
*
* ..note:: only use if vq->lock is non-NULL.
*/
@@ -280,7 +285,7 @@ void vb2_ops_wait_prepare(struct vb2_queue *vq);
/**
* vb2_ops_wait_finish - helper function to unlock a struct &vb2_queue
*
- * @vq: pointer to struct vb2_queue
+ * @vq: pointer to &struct vb2_queue
*
* ..note:: only use if vq->lock is non-NULL.
*/
diff --git a/include/misc/cxl.h b/include/misc/cxl.h
index 480d50a0b8ba..b712be544f8c 100644
--- a/include/misc/cxl.h
+++ b/include/misc/cxl.h
@@ -267,7 +267,7 @@ int cxl_fd_open(struct inode *inode, struct file *file);
int cxl_fd_release(struct inode *inode, struct file *file);
long cxl_fd_ioctl(struct file *file, unsigned int cmd, unsigned long arg);
int cxl_fd_mmap(struct file *file, struct vm_area_struct *vm);
-unsigned int cxl_fd_poll(struct file *file, struct poll_table_struct *poll);
+__poll_t cxl_fd_poll(struct file *file, struct poll_table_struct *poll);
ssize_t cxl_fd_read(struct file *file, char __user *buf, size_t count,
loff_t *off);
diff --git a/include/misc/ocxl-config.h b/include/misc/ocxl-config.h
new file mode 100644
index 000000000000..3526fa996a22
--- /dev/null
+++ b/include/misc/ocxl-config.h
@@ -0,0 +1,45 @@
+// SPDX-License-Identifier: GPL-2.0+
+// Copyright 2017 IBM Corp.
+#ifndef _OCXL_CONFIG_H_
+#define _OCXL_CONFIG_H_
+
+/*
+ * This file lists the various constants used to read the
+ * configuration space of an opencapi adapter.
+ *
+ * It follows the specification for opencapi 3.0
+ */
+
+#define OCXL_EXT_CAP_ID_DVSEC 0x23
+
+#define OCXL_DVSEC_VENDOR_OFFSET 0x4
+#define OCXL_DVSEC_ID_OFFSET 0x8
+#define OCXL_DVSEC_TL_ID 0xF000
+#define OCXL_DVSEC_TL_BACKOFF_TIMERS 0x10
+#define OCXL_DVSEC_TL_RECV_CAP 0x18
+#define OCXL_DVSEC_TL_SEND_CAP 0x20
+#define OCXL_DVSEC_TL_RECV_RATE 0x30
+#define OCXL_DVSEC_TL_SEND_RATE 0x50
+#define OCXL_DVSEC_FUNC_ID 0xF001
+#define OCXL_DVSEC_FUNC_OFF_INDEX 0x08
+#define OCXL_DVSEC_FUNC_OFF_ACTAG 0x0C
+#define OCXL_DVSEC_AFU_INFO_ID 0xF003
+#define OCXL_DVSEC_AFU_INFO_AFU_IDX 0x0A
+#define OCXL_DVSEC_AFU_INFO_OFF 0x0C
+#define OCXL_DVSEC_AFU_INFO_DATA 0x10
+#define OCXL_DVSEC_AFU_CTRL_ID 0xF004
+#define OCXL_DVSEC_AFU_CTRL_AFU_IDX 0x0A
+#define OCXL_DVSEC_AFU_CTRL_TERM_PASID 0x0C
+#define OCXL_DVSEC_AFU_CTRL_ENABLE 0x0F
+#define OCXL_DVSEC_AFU_CTRL_PASID_SUP 0x10
+#define OCXL_DVSEC_AFU_CTRL_PASID_EN 0x11
+#define OCXL_DVSEC_AFU_CTRL_PASID_BASE 0x14
+#define OCXL_DVSEC_AFU_CTRL_ACTAG_SUP 0x18
+#define OCXL_DVSEC_AFU_CTRL_ACTAG_EN 0x1A
+#define OCXL_DVSEC_AFU_CTRL_ACTAG_BASE 0x1C
+#define OCXL_DVSEC_VENDOR_ID 0xF0F0
+#define OCXL_DVSEC_VENDOR_CFG_VERS 0x0C
+#define OCXL_DVSEC_VENDOR_TLX_VERS 0x10
+#define OCXL_DVSEC_VENDOR_DLX_VERS 0x20
+
+#endif /* _OCXL_CONFIG_H_ */
diff --git a/include/misc/ocxl.h b/include/misc/ocxl.h
new file mode 100644
index 000000000000..51ccf76db293
--- /dev/null
+++ b/include/misc/ocxl.h
@@ -0,0 +1,214 @@
+// SPDX-License-Identifier: GPL-2.0+
+// Copyright 2017 IBM Corp.
+#ifndef _MISC_OCXL_H_
+#define _MISC_OCXL_H_
+
+#include <linux/pci.h>
+
+/*
+ * Opencapi drivers all need some common facilities, like parsing the
+ * device configuration space, adding a Process Element to the Shared
+ * Process Area, etc...
+ *
+ * The ocxl module provides a kernel API, to allow other drivers to
+ * reuse common code. A bit like a in-kernel library.
+ */
+
+#define OCXL_AFU_NAME_SZ (24+1) /* add 1 for NULL termination */
+
+/*
+ * The following 2 structures are a fairly generic way of representing
+ * the configuration data for a function and AFU, as read from the
+ * configuration space.
+ */
+struct ocxl_afu_config {
+ u8 idx;
+ int dvsec_afu_control_pos; /* offset of AFU control DVSEC */
+ char name[OCXL_AFU_NAME_SZ];
+ u8 version_major;
+ u8 version_minor;
+ u8 afuc_type;
+ u8 afum_type;
+ u8 profile;
+ u8 global_mmio_bar; /* global MMIO area */
+ u64 global_mmio_offset;
+ u32 global_mmio_size;
+ u8 pp_mmio_bar; /* per-process MMIO area */
+ u64 pp_mmio_offset;
+ u32 pp_mmio_stride;
+ u8 log_mem_size;
+ u8 pasid_supported_log;
+ u16 actag_supported;
+};
+
+struct ocxl_fn_config {
+ int dvsec_tl_pos; /* offset of the Transaction Layer DVSEC */
+ int dvsec_function_pos; /* offset of the Function DVSEC */
+ int dvsec_afu_info_pos; /* offset of the AFU information DVSEC */
+ s8 max_pasid_log;
+ s8 max_afu_index;
+};
+
+/*
+ * Read the configuration space of a function and fill in a
+ * ocxl_fn_config structure with all the function details
+ */
+extern int ocxl_config_read_function(struct pci_dev *dev,
+ struct ocxl_fn_config *fn);
+
+/*
+ * Check if an AFU index is valid for the given function.
+ *
+ * AFU indexes can be sparse, so a driver should check all indexes up
+ * to the maximum found in the function description
+ */
+extern int ocxl_config_check_afu_index(struct pci_dev *dev,
+ struct ocxl_fn_config *fn, int afu_idx);
+
+/*
+ * Read the configuration space of a function for the AFU specified by
+ * the index 'afu_idx'. Fills in a ocxl_afu_config structure
+ */
+extern int ocxl_config_read_afu(struct pci_dev *dev,
+ struct ocxl_fn_config *fn,
+ struct ocxl_afu_config *afu,
+ u8 afu_idx);
+
+/*
+ * Get the max PASID value that can be used by the function
+ */
+extern int ocxl_config_get_pasid_info(struct pci_dev *dev, int *count);
+
+/*
+ * Tell an AFU, by writing in the configuration space, the PASIDs that
+ * it can use. Range starts at 'pasid_base' and its size is a multiple
+ * of 2
+ *
+ * 'afu_control_offset' is the offset of the AFU control DVSEC which
+ * can be found in the function configuration
+ */
+extern void ocxl_config_set_afu_pasid(struct pci_dev *dev,
+ int afu_control_offset,
+ int pasid_base, u32 pasid_count_log);
+
+/*
+ * Get the actag configuration for the function:
+ * 'base' is the first actag value that can be used.
+ * 'enabled' it the number of actags available, starting from base.
+ * 'supported' is the total number of actags desired by all the AFUs
+ * of the function.
+ */
+extern int ocxl_config_get_actag_info(struct pci_dev *dev,
+ u16 *base, u16 *enabled, u16 *supported);
+
+/*
+ * Tell a function, by writing in the configuration space, the actags
+ * it can use.
+ *
+ * 'func_offset' is the offset of the Function DVSEC that can found in
+ * the function configuration
+ */
+extern void ocxl_config_set_actag(struct pci_dev *dev, int func_offset,
+ u32 actag_base, u32 actag_count);
+
+/*
+ * Tell an AFU, by writing in the configuration space, the actags it
+ * can use.
+ *
+ * 'afu_control_offset' is the offset of the AFU control DVSEC for the
+ * desired AFU. It can be found in the AFU configuration
+ */
+extern void ocxl_config_set_afu_actag(struct pci_dev *dev,
+ int afu_control_offset,
+ int actag_base, int actag_count);
+
+/*
+ * Enable/disable an AFU, by writing in the configuration space.
+ *
+ * 'afu_control_offset' is the offset of the AFU control DVSEC for the
+ * desired AFU. It can be found in the AFU configuration
+ */
+extern void ocxl_config_set_afu_state(struct pci_dev *dev,
+ int afu_control_offset, int enable);
+
+/*
+ * Set the Transaction Layer configuration in the configuration space.
+ * Only needed for function 0.
+ *
+ * It queries the host TL capabilities, find some common ground
+ * between the host and device, and set the Transaction Layer on both
+ * accordingly.
+ */
+extern int ocxl_config_set_TL(struct pci_dev *dev, int tl_dvsec);
+
+/*
+ * Request an AFU to terminate a PASID.
+ * Will return once the AFU has acked the request, or an error in case
+ * of timeout.
+ *
+ * The hardware can only terminate one PASID at a time, so caller must
+ * guarantee some kind of serialization.
+ *
+ * 'afu_control_offset' is the offset of the AFU control DVSEC for the
+ * desired AFU. It can be found in the AFU configuration
+ */
+extern int ocxl_config_terminate_pasid(struct pci_dev *dev,
+ int afu_control_offset, int pasid);
+
+/*
+ * Set up the opencapi link for the function.
+ *
+ * When called for the first time for a link, it sets up the Shared
+ * Process Area for the link and the interrupt handler to process
+ * translation faults.
+ *
+ * Returns a 'link handle' that should be used for further calls for
+ * the link
+ */
+extern int ocxl_link_setup(struct pci_dev *dev, int PE_mask,
+ void **link_handle);
+
+/*
+ * Remove the association between the function and its link.
+ */
+extern void ocxl_link_release(struct pci_dev *dev, void *link_handle);
+
+/*
+ * Add a Process Element to the Shared Process Area for a link.
+ * The process is defined by its PASID, pid, tid and its mm_struct.
+ *
+ * 'xsl_err_cb' is an optional callback if the driver wants to be
+ * notified when the translation fault interrupt handler detects an
+ * address error.
+ * 'xsl_err_data' is an argument passed to the above callback, if
+ * defined
+ */
+extern int ocxl_link_add_pe(void *link_handle, int pasid, u32 pidr, u32 tidr,
+ u64 amr, struct mm_struct *mm,
+ void (*xsl_err_cb)(void *data, u64 addr, u64 dsisr),
+ void *xsl_err_data);
+
+/*
+ * Remove a Process Element from the Shared Process Area for a link
+ */
+extern int ocxl_link_remove_pe(void *link_handle, int pasid);
+
+/*
+ * Allocate an AFU interrupt associated to the link.
+ *
+ * 'hw_irq' is the hardware interrupt number
+ * 'obj_handle' is the 64-bit object handle to be passed to the AFU to
+ * trigger the interrupt.
+ * On P9, 'obj_handle' is an address, which, if written, triggers the
+ * interrupt. It is an MMIO address which needs to be remapped (one
+ * page).
+ */
+extern int ocxl_link_irq_alloc(void *link_handle, int *hw_irq,
+ u64 *obj_handle);
+
+/*
+ * Free a previously allocated AFU interrupt
+ */
+extern void ocxl_link_free_irq(void *link_handle, int hw_irq);
+
+#endif /* _MISC_OCXL_H_ */
diff --git a/include/net/Space.h b/include/net/Space.h
index 27fb5c937c4f..9cce0d80d37a 100644
--- a/include/net/Space.h
+++ b/include/net/Space.h
@@ -20,8 +20,6 @@ struct net_device *cs89x0_probe(int unit);
struct net_device *mvme147lance_probe(int unit);
struct net_device *tc515_probe(int unit);
struct net_device *lance_probe(int unit);
-struct net_device *mac8390_probe(int unit);
-struct net_device *mac89x0_probe(int unit);
struct net_device *cops_probe(int unit);
struct net_device *ltpc_probe(void);
diff --git a/include/net/act_api.h b/include/net/act_api.h
index fd08df74c466..9e59ebfded62 100644
--- a/include/net/act_api.h
+++ b/include/net/act_api.h
@@ -86,14 +86,18 @@ struct tc_action_ops {
int (*act)(struct sk_buff *, const struct tc_action *,
struct tcf_result *);
int (*dump)(struct sk_buff *, struct tc_action *, int, int);
- void (*cleanup)(struct tc_action *, int bind);
- int (*lookup)(struct net *, struct tc_action **, u32);
+ void (*cleanup)(struct tc_action *);
+ int (*lookup)(struct net *net, struct tc_action **a, u32 index,
+ struct netlink_ext_ack *extack);
int (*init)(struct net *net, struct nlattr *nla,
struct nlattr *est, struct tc_action **act, int ovr,
- int bind);
+ int bind, struct netlink_ext_ack *extack);
int (*walk)(struct net *, struct sk_buff *,
- struct netlink_callback *, int, const struct tc_action_ops *);
+ struct netlink_callback *, int,
+ const struct tc_action_ops *,
+ struct netlink_ext_ack *);
void (*stats_update)(struct tc_action *, u64, u32, u64);
+ size_t (*get_fill_size)(const struct tc_action *act);
struct net_device *(*get_dev)(const struct tc_action *a);
};
@@ -120,24 +124,31 @@ int tc_action_net_init(struct tc_action_net *tn,
void tcf_idrinfo_destroy(const struct tc_action_ops *ops,
struct tcf_idrinfo *idrinfo);
-static inline void tc_action_net_exit(struct tc_action_net *tn)
+static inline void tc_action_net_exit(struct list_head *net_list,
+ unsigned int id)
{
+ struct net *net;
+
rtnl_lock();
- tcf_idrinfo_destroy(tn->ops, tn->idrinfo);
+ list_for_each_entry(net, net_list, exit_list) {
+ struct tc_action_net *tn = net_generic(net, id);
+
+ tcf_idrinfo_destroy(tn->ops, tn->idrinfo);
+ kfree(tn->idrinfo);
+ }
rtnl_unlock();
- kfree(tn->idrinfo);
}
int tcf_generic_walker(struct tc_action_net *tn, struct sk_buff *skb,
struct netlink_callback *cb, int type,
- const struct tc_action_ops *ops);
+ const struct tc_action_ops *ops,
+ struct netlink_ext_ack *extack);
int tcf_idr_search(struct tc_action_net *tn, struct tc_action **a, u32 index);
bool tcf_idr_check(struct tc_action_net *tn, u32 index, struct tc_action **a,
int bind);
int tcf_idr_create(struct tc_action_net *tn, u32 index, struct nlattr *est,
struct tc_action **a, const struct tc_action_ops *ops,
int bind, bool cpustats);
-void tcf_idr_cleanup(struct tc_action *a, struct nlattr *est);
void tcf_idr_insert(struct tc_action_net *tn, struct tc_action *a);
int __tcf_idr_release(struct tc_action *a, bool bind, bool strict);
@@ -155,10 +166,12 @@ int tcf_action_exec(struct sk_buff *skb, struct tc_action **actions,
int nr_actions, struct tcf_result *res);
int tcf_action_init(struct net *net, struct tcf_proto *tp, struct nlattr *nla,
struct nlattr *est, char *name, int ovr, int bind,
- struct list_head *actions);
+ struct list_head *actions, size_t *attr_size,
+ struct netlink_ext_ack *extack);
struct tc_action *tcf_action_init_1(struct net *net, struct tcf_proto *tp,
struct nlattr *nla, struct nlattr *est,
- char *name, int ovr, int bind);
+ char *name, int ovr, int bind,
+ struct netlink_ext_ack *extack);
int tcf_action_dump(struct sk_buff *skb, struct list_head *, int, int);
int tcf_action_dump_old(struct sk_buff *skb, struct tc_action *a, int, int);
int tcf_action_dump_1(struct sk_buff *skb, struct tc_action *a, int, int);
diff --git a/include/net/addrconf.h b/include/net/addrconf.h
index b623b65a79d1..378d601258be 100644
--- a/include/net/addrconf.h
+++ b/include/net/addrconf.h
@@ -69,8 +69,8 @@ int addrconf_set_dstaddr(struct net *net, void __user *arg);
int ipv6_chk_addr(struct net *net, const struct in6_addr *addr,
const struct net_device *dev, int strict);
int ipv6_chk_addr_and_flags(struct net *net, const struct in6_addr *addr,
- const struct net_device *dev, int strict,
- u32 banned_flags);
+ const struct net_device *dev, bool skip_dev_check,
+ int strict, u32 banned_flags);
#if defined(CONFIG_IPV6_MIP6) || defined(CONFIG_IPV6_MIP6_MODULE)
int ipv6_chk_home_addr(struct net *net, const struct in6_addr *addr);
@@ -180,7 +180,7 @@ static inline int addrconf_finite_timeout(unsigned long timeout)
*/
int ipv6_addr_label_init(void);
void ipv6_addr_label_cleanup(void);
-void ipv6_addr_label_rtnl_register(void);
+int ipv6_addr_label_rtnl_register(void);
u32 ipv6_addr_label(struct net *net, const struct in6_addr *addr,
int type, int ifindex);
@@ -231,6 +231,13 @@ struct ipv6_stub {
};
extern const struct ipv6_stub *ipv6_stub __read_mostly;
+/* A stub used by bpf helpers. Similarly ugly as ipv6_stub */
+struct ipv6_bpf_stub {
+ int (*inet6_bind)(struct sock *sk, struct sockaddr *uaddr, int addr_len,
+ bool force_bind_address_no_port, bool with_lock);
+};
+extern const struct ipv6_bpf_stub *ipv6_bpf_stub __read_mostly;
+
/*
* identify MLD packets for MLD filter exceptions
*/
diff --git a/include/net/af_rxrpc.h b/include/net/af_rxrpc.h
index 2b3a6eec4570..8ae8ee004258 100644
--- a/include/net/af_rxrpc.h
+++ b/include/net/af_rxrpc.h
@@ -31,6 +31,11 @@ enum rxrpc_call_completion {
NR__RXRPC_CALL_COMPLETIONS
};
+/*
+ * Debug ID counter for tracing.
+ */
+extern atomic_t rxrpc_debug_id;
+
typedef void (*rxrpc_notify_rx_t)(struct sock *, struct rxrpc_call *,
unsigned long);
typedef void (*rxrpc_notify_end_tx_t)(struct sock *, struct rxrpc_call *,
@@ -50,7 +55,8 @@ struct rxrpc_call *rxrpc_kernel_begin_call(struct socket *,
s64,
gfp_t,
rxrpc_notify_rx_t,
- bool);
+ bool,
+ unsigned int);
int rxrpc_kernel_send_data(struct socket *, struct rxrpc_call *,
struct msghdr *, size_t,
rxrpc_notify_end_tx_t);
@@ -63,7 +69,8 @@ void rxrpc_kernel_get_peer(struct socket *, struct rxrpc_call *,
struct sockaddr_rxrpc *);
u64 rxrpc_kernel_get_rtt(struct socket *, struct rxrpc_call *);
int rxrpc_kernel_charge_accept(struct socket *, rxrpc_notify_rx_t,
- rxrpc_user_attach_call_t, unsigned long, gfp_t);
+ rxrpc_user_attach_call_t, unsigned long, gfp_t,
+ unsigned int);
void rxrpc_kernel_set_tx_length(struct socket *, struct rxrpc_call *, s64);
int rxrpc_kernel_retry_call(struct socket *, struct rxrpc_call *,
struct sockaddr_rxrpc *, struct key *);
diff --git a/include/net/ax25.h b/include/net/ax25.h
index 76fb39c272a7..c91bc87931c7 100644
--- a/include/net/ax25.h
+++ b/include/net/ax25.h
@@ -318,10 +318,12 @@ void ax25_digi_invert(const ax25_digi *, ax25_digi *);
extern ax25_dev *ax25_dev_list;
extern spinlock_t ax25_dev_lock;
+#if IS_ENABLED(CONFIG_AX25)
static inline ax25_dev *ax25_dev_ax25dev(struct net_device *dev)
{
return dev->ax25_ptr;
}
+#endif
ax25_dev *ax25_addr_ax25dev(ax25_address *);
void ax25_dev_device_up(struct net_device *);
diff --git a/include/net/bluetooth/bluetooth.h b/include/net/bluetooth/bluetooth.h
index e89cff0c4c23..ec9d6bc65855 100644
--- a/include/net/bluetooth/bluetooth.h
+++ b/include/net/bluetooth/bluetooth.h
@@ -271,7 +271,7 @@ int bt_sock_recvmsg(struct socket *sock, struct msghdr *msg, size_t len,
int flags);
int bt_sock_stream_recvmsg(struct socket *sock, struct msghdr *msg,
size_t len, int flags);
-uint bt_sock_poll(struct file *file, struct socket *sock, poll_table *wait);
+__poll_t bt_sock_poll(struct file *file, struct socket *sock, poll_table *wait);
int bt_sock_ioctl(struct socket *sock, unsigned int cmd, unsigned long arg);
int bt_sock_wait_state(struct sock *sk, int state, unsigned long timeo);
int bt_sock_wait_ready(struct sock *sk, unsigned long flags);
diff --git a/include/net/bluetooth/hci_core.h b/include/net/bluetooth/hci_core.h
index 95ccc1eef558..b619a190ff12 100644
--- a/include/net/bluetooth/hci_core.h
+++ b/include/net/bluetooth/hci_core.h
@@ -895,7 +895,7 @@ struct hci_conn *hci_connect_le_scan(struct hci_dev *hdev, bdaddr_t *dst,
u16 conn_timeout);
struct hci_conn *hci_connect_le(struct hci_dev *hdev, bdaddr_t *dst,
u8 dst_type, u8 sec_level, u16 conn_timeout,
- u8 role);
+ u8 role, bdaddr_t *direct_rpa);
struct hci_conn *hci_connect_acl(struct hci_dev *hdev, bdaddr_t *dst,
u8 sec_level, u8 auth_type);
struct hci_conn *hci_connect_sco(struct hci_dev *hdev, int type, bdaddr_t *dst,
diff --git a/include/net/bluetooth/mgmt.h b/include/net/bluetooth/mgmt.h
index 72a456bbbcd5..e7303eee65cd 100644
--- a/include/net/bluetooth/mgmt.h
+++ b/include/net/bluetooth/mgmt.h
@@ -600,7 +600,7 @@ struct mgmt_rp_read_ext_info {
#define MGMT_OP_SET_APPEARANCE 0x0043
struct mgmt_cp_set_appearance {
- __u16 appearance;
+ __le16 appearance;
} __packed;
#define MGMT_SET_APPEARANCE_SIZE 2
diff --git a/include/net/caif/cfpkt.h b/include/net/caif/cfpkt.h
index fe328c52c46b..801489bb14c3 100644
--- a/include/net/caif/cfpkt.h
+++ b/include/net/caif/cfpkt.h
@@ -32,6 +32,33 @@ void cfpkt_destroy(struct cfpkt *pkt);
*/
int cfpkt_extr_head(struct cfpkt *pkt, void *data, u16 len);
+static inline u8 cfpkt_extr_head_u8(struct cfpkt *pkt)
+{
+ u8 tmp;
+
+ cfpkt_extr_head(pkt, &tmp, 1);
+
+ return tmp;
+}
+
+static inline u16 cfpkt_extr_head_u16(struct cfpkt *pkt)
+{
+ __le16 tmp;
+
+ cfpkt_extr_head(pkt, &tmp, 2);
+
+ return le16_to_cpu(tmp);
+}
+
+static inline u32 cfpkt_extr_head_u32(struct cfpkt *pkt)
+{
+ __le32 tmp;
+
+ cfpkt_extr_head(pkt, &tmp, 4);
+
+ return le32_to_cpu(tmp);
+}
+
/*
* Peek header from packet.
* Reads data from packet without changing packet.
diff --git a/include/net/cfg80211.h b/include/net/cfg80211.h
index fb94a8bd8ab5..250dac390806 100644
--- a/include/net/cfg80211.h
+++ b/include/net/cfg80211.h
@@ -6,6 +6,7 @@
* Copyright 2006-2010 Johannes Berg <johannes@sipsolutions.net>
* Copyright 2013-2014 Intel Mobile Communications GmbH
* Copyright 2015-2017 Intel Deutschland GmbH
+ * Copyright (C) 2018 Intel Corporation
*
* This program is free software; you can redistribute it and/or modify
* it under the terms of the GNU General Public License version 2 as
@@ -646,6 +647,8 @@ struct survey_info {
* allowed through even on unauthorized ports
* @control_port_no_encrypt: TRUE to prevent encryption of control port
* protocol frames.
+ * @control_port_over_nl80211: TRUE if userspace expects to exchange control
+ * port frames over NL80211 instead of the network interface.
* @wep_keys: static WEP keys, if not NULL points to an array of
* CFG80211_MAX_WEP_KEYS WEP keys
* @wep_tx_key: key index (0..3) of the default TX static WEP key
@@ -661,6 +664,7 @@ struct cfg80211_crypto_settings {
bool control_port;
__be16 control_port_ethertype;
bool control_port_no_encrypt;
+ bool control_port_over_nl80211;
struct key_params *wep_keys;
int wep_tx_key;
const u8 *psk;
@@ -1147,6 +1151,7 @@ struct cfg80211_tid_stats {
* @rx_duration: aggregate PPDU duration(usecs) for all the frames from a peer
* @pertid: per-TID statistics, see &struct cfg80211_tid_stats, using the last
* (IEEE80211_NUM_TIDS) index for MSDUs not encapsulated in QoS-MPDUs.
+ * @ack_signal: signal strength (in dBm) of the last ACK frame.
*/
struct station_info {
u64 filled;
@@ -1191,6 +1196,7 @@ struct station_info {
u64 rx_duration;
u8 rx_beacon_signal_avg;
struct cfg80211_tid_stats pertid[IEEE80211_NUM_TIDS + 1];
+ s8 ack_signal;
};
#if IS_ENABLED(CONFIG_CFG80211)
@@ -1448,6 +1454,8 @@ struct mesh_config {
* @userspace_handles_dfs: whether user space controls DFS operation, i.e.
* changes the channel when a radar is detected. This is required
* to operate on DFS channels.
+ * @control_port_over_nl80211: TRUE if userspace expects to exchange control
+ * port frames over NL80211 instead of the network interface.
*
* These parameters are fixed when the mesh is created.
*/
@@ -1470,6 +1478,7 @@ struct mesh_setup {
u32 basic_rates;
struct cfg80211_bitrate_mask beacon_rate;
bool userspace_handles_dfs;
+ bool control_port_over_nl80211;
};
/**
@@ -1775,6 +1784,8 @@ enum cfg80211_signal_type {
* by %parent_bssid.
* @parent_bssid: the BSS according to which %parent_tsf is set. This is set to
* the BSS that requested the scan in which the beacon/probe was received.
+ * @chains: bitmask for filled values in @chain_signal.
+ * @chain_signal: per-chain signal strength of last received BSS in dBm.
*/
struct cfg80211_inform_bss {
struct ieee80211_channel *chan;
@@ -1783,6 +1794,8 @@ struct cfg80211_inform_bss {
u64 boottime_ns;
u64 parent_tsf;
u8 parent_bssid[ETH_ALEN] __aligned(2);
+ u8 chains;
+ s8 chain_signal[IEEE80211_MAX_CHAINS];
};
/**
@@ -1826,6 +1839,8 @@ struct cfg80211_bss_ies {
* that holds the beacon data. @beacon_ies is still valid, of course, and
* points to the same data as hidden_beacon_bss->beacon_ies in that case.
* @signal: signal strength value (type depends on the wiphy's signal_type)
+ * @chains: bitmask for filled values in @chain_signal.
+ * @chain_signal: per-chain signal strength of last received BSS in dBm.
* @priv: private area for driver use, has at least wiphy->bss_priv_size bytes
*/
struct cfg80211_bss {
@@ -1844,6 +1859,8 @@ struct cfg80211_bss {
u16 capability;
u8 bssid[ETH_ALEN];
+ u8 chains;
+ s8 chain_signal[IEEE80211_MAX_CHAINS];
u8 priv[0] __aligned(sizeof(void *));
};
@@ -1897,11 +1914,16 @@ struct cfg80211_auth_request {
* @ASSOC_REQ_DISABLE_HT: Disable HT (802.11n)
* @ASSOC_REQ_DISABLE_VHT: Disable VHT
* @ASSOC_REQ_USE_RRM: Declare RRM capability in this association
+ * @CONNECT_REQ_EXTERNAL_AUTH_SUPPORT: User space indicates external
+ * authentication capability. Drivers can offload authentication to
+ * userspace if this flag is set. Only applicable for cfg80211_connect()
+ * request (connect callback).
*/
enum cfg80211_assoc_req_flags {
- ASSOC_REQ_DISABLE_HT = BIT(0),
- ASSOC_REQ_DISABLE_VHT = BIT(1),
- ASSOC_REQ_USE_RRM = BIT(2),
+ ASSOC_REQ_DISABLE_HT = BIT(0),
+ ASSOC_REQ_DISABLE_VHT = BIT(1),
+ ASSOC_REQ_USE_RRM = BIT(2),
+ CONNECT_REQ_EXTERNAL_AUTH_SUPPORT = BIT(3),
};
/**
@@ -2015,6 +2037,8 @@ struct cfg80211_disassoc_request {
* sets/clears %NL80211_STA_FLAG_AUTHORIZED. If true, the driver is
* required to assume that the port is unauthorized until authorized by
* user space. Otherwise, port is marked authorized by default.
+ * @control_port_over_nl80211: TRUE if userspace expects to exchange control
+ * port frames over NL80211 instead of the network interface.
* @userspace_handles_dfs: whether user space controls DFS operation, i.e.
* changes the channel when a radar is detected. This is required
* to operate on DFS channels.
@@ -2023,6 +2047,9 @@ struct cfg80211_disassoc_request {
* @ht_capa: HT Capabilities over-rides. Values set in ht_capa_mask
* will be used in ht_capa. Un-supported values will be ignored.
* @ht_capa_mask: The bits of ht_capa which are to be used.
+ * @wep_keys: static WEP keys, if not NULL points to an array of
+ * CFG80211_MAX_WEP_KEYS WEP keys
+ * @wep_tx_key: key index (0..3) of the default TX static WEP key
*/
struct cfg80211_ibss_params {
const u8 *ssid;
@@ -2035,10 +2062,13 @@ struct cfg80211_ibss_params {
bool channel_fixed;
bool privacy;
bool control_port;
+ bool control_port_over_nl80211;
bool userspace_handles_dfs;
int mcast_rate[NUM_NL80211_BANDS];
struct ieee80211_ht_cap ht_capa;
struct ieee80211_ht_cap ht_capa_mask;
+ struct key_params *wep_keys;
+ int wep_tx_key;
};
/**
@@ -2588,6 +2618,33 @@ struct cfg80211_pmk_conf {
};
/**
+ * struct cfg80211_external_auth_params - Trigger External authentication.
+ *
+ * Commonly used across the external auth request and event interfaces.
+ *
+ * @action: action type / trigger for external authentication. Only significant
+ * for the authentication request event interface (driver to user space).
+ * @bssid: BSSID of the peer with which the authentication has
+ * to happen. Used by both the authentication request event and
+ * authentication response command interface.
+ * @ssid: SSID of the AP. Used by both the authentication request event and
+ * authentication response command interface.
+ * @key_mgmt_suite: AKM suite of the respective authentication. Used by the
+ * authentication request event interface.
+ * @status: status code, %WLAN_STATUS_SUCCESS for successful authentication,
+ * use %WLAN_STATUS_UNSPECIFIED_FAILURE if user space cannot give you
+ * the real status code for failures. Used only for the authentication
+ * response command interface (user space to driver).
+ */
+struct cfg80211_external_auth_params {
+ enum nl80211_external_auth_action action;
+ u8 bssid[ETH_ALEN] __aligned(2);
+ struct cfg80211_ssid ssid;
+ unsigned int key_mgmt_suite;
+ u16 status;
+};
+
+/**
* struct cfg80211_ops - backend description for wireless configuration
*
* This struct is registered by fullmac card drivers and/or wireless stacks
@@ -2910,6 +2967,12 @@ struct cfg80211_pmk_conf {
* (invoked with the wireless_dev mutex held)
* @del_pmk: delete the previously configured PMK for the given authenticator.
* (invoked with the wireless_dev mutex held)
+ *
+ * @external_auth: indicates result of offloaded authentication processing from
+ * user space
+ *
+ * @tx_control_port: TX a control port frame (EAPoL). The noencrypt parameter
+ * tells the driver that the frame should not be encrypted.
*/
struct cfg80211_ops {
int (*suspend)(struct wiphy *wiphy, struct cfg80211_wowlan *wow);
@@ -3203,6 +3266,14 @@ struct cfg80211_ops {
const struct cfg80211_pmk_conf *conf);
int (*del_pmk)(struct wiphy *wiphy, struct net_device *dev,
const u8 *aa);
+ int (*external_auth)(struct wiphy *wiphy, struct net_device *dev,
+ struct cfg80211_external_auth_params *params);
+
+ int (*tx_control_port)(struct wiphy *wiphy,
+ struct net_device *dev,
+ const u8 *buf, size_t len,
+ const u8 *dest, const __be16 proto,
+ const bool noencrypt);
};
/*
@@ -3504,6 +3575,35 @@ enum wiphy_vendor_command_flags {
};
/**
+ * enum wiphy_opmode_flag - Station's ht/vht operation mode information flags
+ *
+ * @STA_OPMODE_MAX_BW_CHANGED: Max Bandwidth changed
+ * @STA_OPMODE_SMPS_MODE_CHANGED: SMPS mode changed
+ * @STA_OPMODE_N_SS_CHANGED: max N_SS (number of spatial streams) changed
+ *
+ */
+enum wiphy_opmode_flag {
+ STA_OPMODE_MAX_BW_CHANGED = BIT(0),
+ STA_OPMODE_SMPS_MODE_CHANGED = BIT(1),
+ STA_OPMODE_N_SS_CHANGED = BIT(2),
+};
+
+/**
+ * struct sta_opmode_info - Station's ht/vht operation mode information
+ * @changed: contains value from &enum wiphy_opmode_flag
+ * @smps_mode: New SMPS mode value from &enum nl80211_smps_mode of a station
+ * @bw: new max bandwidth value from &enum nl80211_chan_width of a station
+ * @rx_nss: new rx_nss value of a station
+ */
+
+struct sta_opmode_info {
+ u32 changed;
+ enum nl80211_smps_mode smps_mode;
+ enum nl80211_chan_width bw;
+ u8 rx_nss;
+};
+
+/**
* struct wiphy_vendor_command - vendor command definition
* @info: vendor command identifying information, as used in nl80211
* @flags: flags, see &enum wiphy_vendor_command_flags
@@ -4329,10 +4429,12 @@ unsigned int ieee80211_get_mesh_hdrlen(struct ieee80211s_hdr *meshhdr);
* of it being pushed into the SKB
* @addr: the device MAC address
* @iftype: the virtual interface type
+ * @data_offset: offset of payload after the 802.11 header
* Return: 0 on success. Non-zero on error.
*/
int ieee80211_data_to_8023_exthdr(struct sk_buff *skb, struct ethhdr *ehdr,
- const u8 *addr, enum nl80211_iftype iftype);
+ const u8 *addr, enum nl80211_iftype iftype,
+ u8 data_offset);
/**
* ieee80211_data_to_8023 - convert an 802.11 data frame to 802.3
@@ -4344,7 +4446,7 @@ int ieee80211_data_to_8023_exthdr(struct sk_buff *skb, struct ethhdr *ehdr,
static inline int ieee80211_data_to_8023(struct sk_buff *skb, const u8 *addr,
enum nl80211_iftype iftype)
{
- return ieee80211_data_to_8023_exthdr(skb, NULL, addr, iftype);
+ return ieee80211_data_to_8023_exthdr(skb, NULL, addr, iftype, 0);
}
/**
@@ -4574,6 +4676,33 @@ const struct ieee80211_reg_rule *freq_reg_info(struct wiphy *wiphy,
*/
const char *reg_initiator_name(enum nl80211_reg_initiator initiator);
+/**
+ * DOC: Internal regulatory db functions
+ *
+ */
+
+/**
+ * reg_query_regdb_wmm - Query internal regulatory db for wmm rule
+ * Regulatory self-managed driver can use it to proactively
+ *
+ * @alpha2: the ISO/IEC 3166 alpha2 wmm rule to be queried.
+ * @freq: the freqency(in MHz) to be queried.
+ * @ptr: pointer where the regdb wmm data is to be stored (or %NULL if
+ * irrelevant). This can be used later for deduplication.
+ * @rule: pointer to store the wmm rule from the regulatory db.
+ *
+ * Self-managed wireless drivers can use this function to query
+ * the internal regulatory database to check whether the given
+ * ISO/IEC 3166 alpha2 country and freq have wmm rule limitations.
+ *
+ * Drivers should check the return value, its possible you can get
+ * an -ENODATA.
+ *
+ * Return: 0 on success. -ENODATA.
+ */
+int reg_query_regdb_wmm(char *alpha2, int freq, u32 *ptr,
+ struct ieee80211_wmm_rule *rule);
+
/*
* callbacks for asynchronous cfg80211 methods, notification
* functions and BSS handling helpers
@@ -5577,7 +5706,7 @@ void cfg80211_conn_failed(struct net_device *dev, const u8 *mac_addr,
* cfg80211_rx_mgmt - notification of received, unprocessed management frame
* @wdev: wireless device receiving the frame
* @freq: Frequency on which the frame was received in MHz
- * @sig_dbm: signal strength in mBm, or 0 if unknown
+ * @sig_dbm: signal strength in dBm, or 0 if unknown
* @buf: Management frame (header + body)
* @len: length of the frame data
* @flags: flags, as defined in enum nl80211_rxmgmt_flags
@@ -5611,6 +5740,28 @@ void cfg80211_mgmt_tx_status(struct wireless_dev *wdev, u64 cookie,
/**
+ * cfg80211_rx_control_port - notification about a received control port frame
+ * @dev: The device the frame matched to
+ * @buf: control port frame
+ * @len: length of the frame data
+ * @addr: The peer from which the frame was received
+ * @proto: frame protocol, typically PAE or Pre-authentication
+ * @unencrypted: Whether the frame was received unencrypted
+ *
+ * This function is used to inform userspace about a received control port
+ * frame. It should only be used if userspace indicated it wants to receive
+ * control port frames over nl80211.
+ *
+ * The frame is the data portion of the 802.3 or 802.11 data frame with all
+ * network layer headers removed (e.g. the raw EAPoL frame).
+ *
+ * Return: %true if the frame was passed to userspace
+ */
+bool cfg80211_rx_control_port(struct net_device *dev,
+ const u8 *buf, size_t len,
+ const u8 *addr, u16 proto, bool unencrypted);
+
+/**
* cfg80211_cqm_rssi_notify - connection quality monitoring rssi event
* @dev: network device
* @rssi_event: the triggered RSSI event
@@ -5672,6 +5823,20 @@ void cfg80211_radar_event(struct wiphy *wiphy,
struct cfg80211_chan_def *chandef, gfp_t gfp);
/**
+ * cfg80211_sta_opmode_change_notify - STA's ht/vht operation mode change event
+ * @dev: network device
+ * @mac: MAC address of a station which opmode got modified
+ * @sta_opmode: station's current opmode value
+ * @gfp: context flags
+ *
+ * Driver should call this function when station's opmode modified via action
+ * frame.
+ */
+void cfg80211_sta_opmode_change_notify(struct net_device *dev, const u8 *mac,
+ struct sta_opmode_info *sta_opmode,
+ gfp_t gfp);
+
+/**
* cfg80211_cac_event - Channel availability check (CAC) event
* @netdev: network device
* @chandef: chandef for the current channel
@@ -5745,10 +5910,13 @@ bool cfg80211_rx_unexpected_4addr_frame(struct net_device *dev,
* @addr: the address of the peer
* @cookie: the cookie filled in @probe_client previously
* @acked: indicates whether probe was acked or not
+ * @ack_signal: signal strength (in dBm) of the ACK frame.
+ * @is_valid_ack_signal: indicates the ack_signal is valid or not.
* @gfp: allocation flags
*/
void cfg80211_probe_status(struct net_device *dev, const u8 *addr,
- u64 cookie, bool acked, gfp_t gfp);
+ u64 cookie, bool acked, s32 ack_signal,
+ bool is_valid_ack_signal, gfp_t gfp);
/**
* cfg80211_report_obss_beacon - report beacon from other APs
@@ -5756,7 +5924,7 @@ void cfg80211_probe_status(struct net_device *dev, const u8 *addr,
* @frame: the frame
* @len: length of the frame
* @freq: frequency the frame was received on
- * @sig_dbm: signal strength in mBm, or 0 if unknown
+ * @sig_dbm: signal strength in dBm, or 0 if unknown
*
* Use this function to report to userspace when a beacon was
* received. It is not useful to call this when there is no
@@ -6189,6 +6357,17 @@ void cfg80211_nan_func_terminated(struct wireless_dev *wdev,
/* ethtool helper */
void cfg80211_get_drvinfo(struct net_device *dev, struct ethtool_drvinfo *info);
+/**
+ * cfg80211_external_auth_request - userspace request for authentication
+ * @netdev: network device
+ * @params: External authentication parameters
+ * @gfp: allocation flags
+ * Returns: 0 on success, < 0 on error
+ */
+int cfg80211_external_auth_request(struct net_device *netdev,
+ struct cfg80211_external_auth_params *params,
+ gfp_t gfp);
+
/* Logging, debugging and troubleshooting/diagnostic helpers. */
/* wiphy_printk helpers, similar to dev_printk */
diff --git a/include/net/compat.h b/include/net/compat.h
index a91bea80b9fc..4c6d75612b6c 100644
--- a/include/net/compat.h
+++ b/include/net/compat.h
@@ -44,17 +44,6 @@ int compat_sock_get_timestampns(struct sock *, struct timespec __user *);
int get_compat_msghdr(struct msghdr *, struct compat_msghdr __user *,
struct sockaddr __user **, struct iovec **);
struct sock_fprog __user *get_compat_bpf_fprog(char __user *optval);
-asmlinkage long compat_sys_sendmsg(int, struct compat_msghdr __user *,
- unsigned int);
-asmlinkage long compat_sys_sendmmsg(int, struct compat_mmsghdr __user *,
- unsigned int, unsigned int);
-asmlinkage long compat_sys_recvmsg(int, struct compat_msghdr __user *,
- unsigned int);
-asmlinkage long compat_sys_recvmmsg(int, struct compat_mmsghdr __user *,
- unsigned int, unsigned int,
- struct compat_timespec __user *);
-asmlinkage long compat_sys_getsockopt(int, int, int, char __user *,
- int __user *);
int put_cmsg_compat(struct msghdr*, int, int, int, void *);
int cmsghdr_from_user_compat_to_kern(struct msghdr *, struct sock *,
diff --git a/include/net/devlink.h b/include/net/devlink.h
index b9654e133599..2e4f71e16e95 100644
--- a/include/net/devlink.h
+++ b/include/net/devlink.h
@@ -26,10 +26,12 @@ struct devlink {
struct list_head port_list;
struct list_head sb_list;
struct list_head dpipe_table_list;
+ struct list_head resource_list;
struct devlink_dpipe_headers *dpipe_headers;
const struct devlink_ops *ops;
struct device *dev;
possible_net_t _net;
+ struct mutex lock;
char priv[0] __aligned(NETDEV_ALIGN);
};
@@ -181,6 +183,9 @@ struct devlink_dpipe_table_ops;
* @counters_enabled: indicates if counters are active
* @counter_control_extern: indicates if counter control is in dpipe or
* external tool
+ * @resource_valid: Indicate that the resource id is valid
+ * @resource_id: relative resource this table is related to
+ * @resource_units: number of resource's unit consumed per table's entry
* @table_ops: table operations
* @rcu: rcu
*/
@@ -190,6 +195,9 @@ struct devlink_dpipe_table {
const char *name;
bool counters_enabled;
bool counter_control_extern;
+ bool resource_valid;
+ u64 resource_id;
+ u64 resource_units;
struct devlink_dpipe_table_ops *table_ops;
struct rcu_head rcu;
};
@@ -223,7 +231,65 @@ struct devlink_dpipe_headers {
unsigned int headers_count;
};
+/**
+ * struct devlink_resource_size_params - resource's size parameters
+ * @size_min: minimum size which can be set
+ * @size_max: maximum size which can be set
+ * @size_granularity: size granularity
+ * @size_unit: resource's basic unit
+ */
+struct devlink_resource_size_params {
+ u64 size_min;
+ u64 size_max;
+ u64 size_granularity;
+ enum devlink_resource_unit unit;
+};
+
+static inline void
+devlink_resource_size_params_init(struct devlink_resource_size_params *size_params,
+ u64 size_min, u64 size_max,
+ u64 size_granularity,
+ enum devlink_resource_unit unit)
+{
+ size_params->size_min = size_min;
+ size_params->size_max = size_max;
+ size_params->size_granularity = size_granularity;
+ size_params->unit = unit;
+}
+
+typedef u64 devlink_resource_occ_get_t(void *priv);
+
+/**
+ * struct devlink_resource - devlink resource
+ * @name: name of the resource
+ * @id: id, per devlink instance
+ * @size: size of the resource
+ * @size_new: updated size of the resource, reload is needed
+ * @size_valid: valid in case the total size of the resource is valid
+ * including its children
+ * @parent: parent resource
+ * @size_params: size parameters
+ * @list: parent list
+ * @resource_list: list of child resources
+ */
+struct devlink_resource {
+ const char *name;
+ u64 id;
+ u64 size;
+ u64 size_new;
+ bool size_valid;
+ struct devlink_resource *parent;
+ struct devlink_resource_size_params size_params;
+ struct list_head list;
+ struct list_head resource_list;
+ devlink_resource_occ_get_t *occ_get;
+ void *occ_get_priv;
+};
+
+#define DEVLINK_RESOURCE_ID_PARENT_TOP 0
+
struct devlink_ops {
+ int (*reload)(struct devlink *devlink);
int (*port_type_set)(struct devlink_port *devlink_port,
enum devlink_port_type port_type);
int (*port_split)(struct devlink *devlink, unsigned int port_index,
@@ -332,6 +398,27 @@ extern struct devlink_dpipe_header devlink_dpipe_header_ethernet;
extern struct devlink_dpipe_header devlink_dpipe_header_ipv4;
extern struct devlink_dpipe_header devlink_dpipe_header_ipv6;
+int devlink_resource_register(struct devlink *devlink,
+ const char *resource_name,
+ u64 resource_size,
+ u64 resource_id,
+ u64 parent_resource_id,
+ const struct devlink_resource_size_params *size_params);
+void devlink_resources_unregister(struct devlink *devlink,
+ struct devlink_resource *resource);
+int devlink_resource_size_get(struct devlink *devlink,
+ u64 resource_id,
+ u64 *p_resource_size);
+int devlink_dpipe_table_resource_set(struct devlink *devlink,
+ const char *table_name, u64 resource_id,
+ u64 resource_units);
+void devlink_resource_occ_get_register(struct devlink *devlink,
+ u64 resource_id,
+ devlink_resource_occ_get_t *occ_get,
+ void *occ_get_priv);
+void devlink_resource_occ_get_unregister(struct devlink *devlink,
+ u64 resource_id);
+
#else
static inline struct devlink *devlink_alloc(const struct devlink_ops *ops,
@@ -468,6 +555,52 @@ devlink_dpipe_match_put(struct sk_buff *skb,
return 0;
}
+static inline int
+devlink_resource_register(struct devlink *devlink,
+ const char *resource_name,
+ u64 resource_size,
+ u64 resource_id,
+ u64 parent_resource_id,
+ const struct devlink_resource_size_params *size_params)
+{
+ return 0;
+}
+
+static inline void
+devlink_resources_unregister(struct devlink *devlink,
+ struct devlink_resource *resource)
+{
+}
+
+static inline int
+devlink_resource_size_get(struct devlink *devlink, u64 resource_id,
+ u64 *p_resource_size)
+{
+ return -EOPNOTSUPP;
+}
+
+static inline int
+devlink_dpipe_table_resource_set(struct devlink *devlink,
+ const char *table_name, u64 resource_id,
+ u64 resource_units)
+{
+ return -EOPNOTSUPP;
+}
+
+static inline void
+devlink_resource_occ_get_register(struct devlink *devlink,
+ u64 resource_id,
+ devlink_resource_occ_get_t *occ_get,
+ void *occ_get_priv)
+{
+}
+
+static inline void
+devlink_resource_occ_get_unregister(struct devlink *devlink,
+ u64 resource_id)
+{
+}
+
#endif
#endif /* _NET_DEVLINK_H_ */
diff --git a/include/net/dn_route.h b/include/net/dn_route.h
index 55df9939bca2..342d2503cba5 100644
--- a/include/net/dn_route.h
+++ b/include/net/dn_route.h
@@ -69,6 +69,7 @@ int dn_route_rcv(struct sk_buff *skb, struct net_device *dev,
*/
struct dn_route {
struct dst_entry dst;
+ struct dn_route __rcu *dn_next;
struct neighbour *n;
diff --git a/include/net/dsa.h b/include/net/dsa.h
index 2a05738570d8..60fb4ec8ba61 100644
--- a/include/net/dsa.h
+++ b/include/net/dsa.h
@@ -19,6 +19,7 @@
#include <linux/workqueue.h>
#include <linux/of.h>
#include <linux/ethtool.h>
+#include <linux/net_tstamp.h>
#include <net/devlink.h>
#include <net/switchdev.h>
@@ -101,6 +102,7 @@ struct dsa_platform_data {
};
struct packet_type;
+struct dsa_switch;
struct dsa_device_ops {
struct sk_buff *(*xmit)(struct sk_buff *skb, struct net_device *dev);
@@ -296,31 +298,39 @@ static inline u32 dsa_user_ports(struct dsa_switch *ds)
return mask;
}
-static inline u8 dsa_upstream_port(struct dsa_switch *ds)
+/* Return the local port used to reach an arbitrary switch port */
+static inline unsigned int dsa_towards_port(struct dsa_switch *ds, int device,
+ int port)
{
- struct dsa_switch_tree *dst = ds->dst;
-
- /*
- * If this is the root switch (i.e. the switch that connects
- * to the CPU), return the cpu port number on this switch.
- * Else return the (DSA) port number that connects to the
- * switch that is one hop closer to the cpu.
- */
- if (dst->cpu_dp->ds == ds)
- return dst->cpu_dp->index;
+ if (device == ds->index)
+ return port;
else
- return ds->rtable[dst->cpu_dp->ds->index];
+ return ds->rtable[device];
+}
+
+/* Return the local port used to reach the dedicated CPU port */
+static inline unsigned int dsa_upstream_port(struct dsa_switch *ds, int port)
+{
+ const struct dsa_port *dp = dsa_to_port(ds, port);
+ const struct dsa_port *cpu_dp = dp->cpu_dp;
+
+ if (!cpu_dp)
+ return port;
+
+ return dsa_towards_port(ds, cpu_dp->ds->index, cpu_dp->index);
}
typedef int dsa_fdb_dump_cb_t(const unsigned char *addr, u16 vid,
bool is_static, void *data);
struct dsa_switch_ops {
+#if IS_ENABLED(CONFIG_NET_DSA_LEGACY)
/*
* Legacy probing.
*/
const char *(*probe)(struct device *dsa_dev,
struct device *host_dev, int sw_addr,
void **priv);
+#endif
enum dsa_tag_protocol (*get_tag_protocol)(struct dsa_switch *ds,
int port);
@@ -349,7 +359,7 @@ struct dsa_switch_ops {
void (*get_strings)(struct dsa_switch *ds, int port, uint8_t *data);
void (*get_ethtool_stats)(struct dsa_switch *ds,
int port, uint64_t *data);
- int (*get_sset_count)(struct dsa_switch *ds);
+ int (*get_sset_count)(struct dsa_switch *ds, int port);
/*
* ethtool Wake-on-LAN
@@ -360,6 +370,12 @@ struct dsa_switch_ops {
struct ethtool_wolinfo *w);
/*
+ * ethtool timestamp info
+ */
+ int (*get_ts_info)(struct dsa_switch *ds, int port,
+ struct ethtool_ts_info *ts);
+
+ /*
* Suspend and resume
*/
int (*suspend)(struct dsa_switch *ds);
@@ -412,12 +428,10 @@ struct dsa_switch_ops {
*/
int (*port_vlan_filtering)(struct dsa_switch *ds, int port,
bool vlan_filtering);
- int (*port_vlan_prepare)(struct dsa_switch *ds, int port,
- const struct switchdev_obj_port_vlan *vlan,
- struct switchdev_trans *trans);
- void (*port_vlan_add)(struct dsa_switch *ds, int port,
- const struct switchdev_obj_port_vlan *vlan,
- struct switchdev_trans *trans);
+ int (*port_vlan_prepare)(struct dsa_switch *ds, int port,
+ const struct switchdev_obj_port_vlan *vlan);
+ void (*port_vlan_add)(struct dsa_switch *ds, int port,
+ const struct switchdev_obj_port_vlan *vlan);
int (*port_vlan_del)(struct dsa_switch *ds, int port,
const struct switchdev_obj_port_vlan *vlan);
/*
@@ -433,12 +447,10 @@ struct dsa_switch_ops {
/*
* Multicast database
*/
- int (*port_mdb_prepare)(struct dsa_switch *ds, int port,
- const struct switchdev_obj_port_mdb *mdb,
- struct switchdev_trans *trans);
- void (*port_mdb_add)(struct dsa_switch *ds, int port,
- const struct switchdev_obj_port_mdb *mdb,
- struct switchdev_trans *trans);
+ int (*port_mdb_prepare)(struct dsa_switch *ds, int port,
+ const struct switchdev_obj_port_mdb *mdb);
+ void (*port_mdb_add)(struct dsa_switch *ds, int port,
+ const struct switchdev_obj_port_mdb *mdb);
int (*port_mdb_del)(struct dsa_switch *ds, int port,
const struct switchdev_obj_port_mdb *mdb);
/*
@@ -465,6 +477,18 @@ struct dsa_switch_ops {
int port, struct net_device *br);
void (*crosschip_bridge_leave)(struct dsa_switch *ds, int sw_index,
int port, struct net_device *br);
+
+ /*
+ * PTP functionality
+ */
+ int (*port_hwtstamp_get)(struct dsa_switch *ds, int port,
+ struct ifreq *ifr);
+ int (*port_hwtstamp_set)(struct dsa_switch *ds, int port,
+ struct ifreq *ifr);
+ bool (*port_txtstamp)(struct dsa_switch *ds, int port,
+ struct sk_buff *clone, unsigned int type);
+ bool (*port_rxtstamp)(struct dsa_switch *ds, int port,
+ struct sk_buff *skb, unsigned int type);
};
struct dsa_switch_driver {
@@ -472,11 +496,20 @@ struct dsa_switch_driver {
const struct dsa_switch_ops *ops;
};
+#if IS_ENABLED(CONFIG_NET_DSA_LEGACY)
/* Legacy driver registration */
void register_switch_driver(struct dsa_switch_driver *type);
void unregister_switch_driver(struct dsa_switch_driver *type);
struct mii_bus *dsa_host_dev_to_mii_bus(struct device *dev);
+#else
+static inline void register_switch_driver(struct dsa_switch_driver *type) { }
+static inline void unregister_switch_driver(struct dsa_switch_driver *type) { }
+static inline struct mii_bus *dsa_host_dev_to_mii_bus(struct device *dev)
+{
+ return NULL;
+}
+#endif
struct net_device *dsa_dev_to_net_device(struct device *dev);
/* Keep inline for faster access in hot path */
diff --git a/include/net/dst.h b/include/net/dst.h
index b091fd536098..b3219cd8a5a1 100644
--- a/include/net/dst.h
+++ b/include/net/dst.h
@@ -34,13 +34,9 @@ struct sk_buff;
struct dst_entry {
struct net_device *dev;
- struct rcu_head rcu_head;
- struct dst_entry *child;
struct dst_ops *ops;
unsigned long _metrics;
unsigned long expires;
- struct dst_entry *path;
- struct dst_entry *from;
#ifdef CONFIG_XFRM
struct xfrm_state *xfrm;
#else
@@ -59,8 +55,6 @@ struct dst_entry {
#define DST_XFRM_QUEUE 0x0040
#define DST_METADATA 0x0080
- short error;
-
/* A non-zero value of dst->obsolete forces by-hand validation
* of the route entry. Positive values are set by the generic
* dst layer to indicate that the entry has been forcefully
@@ -76,35 +70,24 @@ struct dst_entry {
#define DST_OBSOLETE_KILL -2
unsigned short header_len; /* more space at head required */
unsigned short trailer_len; /* space to reserve at tail */
- unsigned short __pad3;
-
-#ifdef CONFIG_IP_ROUTE_CLASSID
- __u32 tclassid;
-#else
- __u32 __pad2;
-#endif
-#ifdef CONFIG_64BIT
- /*
- * Align __refcnt to a 64 bytes alignment
- * (L1_CACHE_SIZE would be too much)
- */
- long __pad_to_align_refcnt[2];
-#endif
/*
* __refcnt wants to be on a different cache line from
* input/output/ops or performance tanks badly
*/
- atomic_t __refcnt; /* client references */
+#ifdef CONFIG_64BIT
+ atomic_t __refcnt; /* 64-bit offset 64 */
+#endif
int __use;
unsigned long lastuse;
struct lwtunnel_state *lwtstate;
- union {
- struct dst_entry *next;
- struct rtable __rcu *rt_next;
- struct rt6_info __rcu *rt6_next;
- struct dn_route __rcu *dn_next;
- };
+ struct rcu_head rcu_head;
+ short error;
+ short __pad;
+ __u32 tclassid;
+#ifndef CONFIG_64BIT
+ atomic_t __refcnt; /* 32-bit offset 64 */
+#endif
};
struct dst_metrics {
@@ -250,7 +233,7 @@ static inline void dst_hold(struct dst_entry *dst)
{
/*
* If your kernel compilation stops here, please check
- * __pad_to_align_refcnt declaration in struct dst_entry
+ * the placement of __refcnt in struct dst_entry
*/
BUILD_BUG_ON(offsetof(struct dst_entry, __refcnt) & 63);
WARN_ON(atomic_inc_not_zero(&dst->__refcnt) == 0);
@@ -373,6 +356,7 @@ static inline void __skb_tunnel_rx(struct sk_buff *skb, struct net_device *dev,
* skb_tunnel_rx - prepare skb for rx reinsert
* @skb: buffer
* @dev: tunnel device
+ * @net: netns for packet i/o
*
* After decapsulation, packet is going to re-enter (netif_rx()) our stack,
* so make some cleanups, and perform accounting.
@@ -521,4 +505,12 @@ static inline struct xfrm_state *dst_xfrm(const struct dst_entry *dst)
}
#endif
+static inline void skb_dst_update_pmtu(struct sk_buff *skb, u32 mtu)
+{
+ struct dst_entry *dst = skb_dst(skb);
+
+ if (dst && dst->ops->update_pmtu)
+ dst->ops->update_pmtu(dst, NULL, skb, mtu);
+}
+
#endif /* _NET_DST_H */
diff --git a/include/net/dst_cache.h b/include/net/dst_cache.h
index 72fd5067c353..67634675e919 100644
--- a/include/net/dst_cache.h
+++ b/include/net/dst_cache.h
@@ -54,7 +54,7 @@ void dst_cache_set_ip4(struct dst_cache *dst_cache, struct dst_entry *dst,
* local BH must be disabled.
*/
void dst_cache_set_ip6(struct dst_cache *dst_cache, struct dst_entry *dst,
- const struct in6_addr *addr);
+ const struct in6_addr *saddr);
/**
* dst_cache_get_ip6 - perform cache lookup and fetch ipv6 source address
@@ -71,7 +71,7 @@ struct dst_entry *dst_cache_get_ip6(struct dst_cache *dst_cache,
* dst_cache_reset - invalidate the cache contents
* @dst_cache: the cache
*
- * This do not free the cached dst to avoid races and contentions.
+ * This does not free the cached dst to avoid races and contentions.
* the dst will be freed on later cache lookup.
*/
static inline void dst_cache_reset(struct dst_cache *dst_cache)
diff --git a/include/net/erspan.h b/include/net/erspan.h
index ca94fc86865e..d044aa60cc76 100644
--- a/include/net/erspan.h
+++ b/include/net/erspan.h
@@ -15,7 +15,7 @@
* s, Recur, Flags, Version fields only S (bit 03) is set to 1. The
* other fields are set to zero, so only a sequence number follows.
*
- * ERSPAN Type II header (8 octets [42:49])
+ * ERSPAN Version 1 (Type II) header (8 octets [42:49])
* 0 1 2 3
* 0 1 2 3 4 5 6 7 8 9 0 1 2 3 4 5 6 7 8 9 0 1 2 3 4 5 6 7 8 9 0 1
* +-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+
@@ -24,11 +24,31 @@
* | Reserved | Index |
* +-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+
*
+ *
+ * ERSPAN Version 2 (Type III) header (12 octets [42:49])
+ * 0 1 2 3
+ * 0 1 2 3 4 5 6 7 8 9 0 1 2 3 4 5 6 7 8 9 0 1 2 3 4 5 6 7 8 9 0 1
+ * +-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+
+ * | Ver | VLAN | COS |BSO|T| Session ID |
+ * +-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+
+ * | Timestamp |
+ * +-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+
+ * | SGT |P| FT | Hw ID |D|Gra|O|
+ * +-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+
+ *
+ * Platform Specific SubHeader (8 octets, optional)
+ * +-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+
+ * | Platf ID | Platform Specific Info |
+ * +-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+
+ * | Platform Specific Info |
+ * +-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+
+ *
* GRE proto ERSPAN type II = 0x88BE, type III = 0x22EB
*/
-#define ERSPAN_VERSION 0x1
+#include <uapi/linux/erspan.h>
+#define ERSPAN_VERSION 0x1 /* ERSPAN type II */
#define VER_MASK 0xf000
#define VLAN_MASK 0x0fff
#define COS_MASK 0xe000
@@ -37,6 +57,19 @@
#define ID_MASK 0x03ff
#define INDEX_MASK 0xfffff
+#define ERSPAN_VERSION2 0x2 /* ERSPAN type III*/
+#define BSO_MASK EN_MASK
+#define SGT_MASK 0xffff0000
+#define P_MASK 0x8000
+#define FT_MASK 0x7c00
+#define HWID_MASK 0x03f0
+#define DIR_MASK 0x0008
+#define GRA_MASK 0x0006
+#define O_MASK 0x0001
+
+#define HWID_OFFSET 4
+#define DIR_OFFSET 3
+
enum erspan_encap_type {
ERSPAN_ENCAP_NOVLAN = 0x0, /* originally without VLAN tag */
ERSPAN_ENCAP_ISL = 0x1, /* originally ISL encapsulated */
@@ -44,18 +77,199 @@ enum erspan_encap_type {
ERSPAN_ENCAP_INFRAME = 0x3, /* VLAN tag perserved in frame */
};
-struct erspan_metadata {
- __be32 index; /* type II */
-};
+#define ERSPAN_V1_MDSIZE 4
+#define ERSPAN_V2_MDSIZE 8
-struct erspanhdr {
- __be16 ver_vlan;
-#define VER_OFFSET 12
- __be16 session_id;
-#define COS_OFFSET 13
-#define EN_OFFSET 11
-#define T_OFFSET 10
- struct erspan_metadata md;
+struct erspan_base_hdr {
+#if defined(__LITTLE_ENDIAN_BITFIELD)
+ __u8 vlan_upper:4,
+ ver:4;
+ __u8 vlan:8;
+ __u8 session_id_upper:2,
+ t:1,
+ en:2,
+ cos:3;
+ __u8 session_id:8;
+#elif defined(__BIG_ENDIAN_BITFIELD)
+ __u8 ver: 4,
+ vlan_upper:4;
+ __u8 vlan:8;
+ __u8 cos:3,
+ en:2,
+ t:1,
+ session_id_upper:2;
+ __u8 session_id:8;
+#else
+#error "Please fix <asm/byteorder.h>"
+#endif
};
+static inline void set_session_id(struct erspan_base_hdr *ershdr, u16 id)
+{
+ ershdr->session_id = id & 0xff;
+ ershdr->session_id_upper = (id >> 8) & 0x3;
+}
+
+static inline u16 get_session_id(const struct erspan_base_hdr *ershdr)
+{
+ return (ershdr->session_id_upper << 8) + ershdr->session_id;
+}
+
+static inline void set_vlan(struct erspan_base_hdr *ershdr, u16 vlan)
+{
+ ershdr->vlan = vlan & 0xff;
+ ershdr->vlan_upper = (vlan >> 8) & 0xf;
+}
+
+static inline u16 get_vlan(const struct erspan_base_hdr *ershdr)
+{
+ return (ershdr->vlan_upper << 8) + ershdr->vlan;
+}
+
+static inline void set_hwid(struct erspan_md2 *md2, u8 hwid)
+{
+ md2->hwid = hwid & 0xf;
+ md2->hwid_upper = (hwid >> 4) & 0x3;
+}
+
+static inline u8 get_hwid(const struct erspan_md2 *md2)
+{
+ return (md2->hwid_upper << 4) + md2->hwid;
+}
+
+static inline int erspan_hdr_len(int version)
+{
+ return sizeof(struct erspan_base_hdr) +
+ (version == 1 ? ERSPAN_V1_MDSIZE : ERSPAN_V2_MDSIZE);
+}
+
+static inline u8 tos_to_cos(u8 tos)
+{
+ u8 dscp, cos;
+
+ dscp = tos >> 2;
+ cos = dscp >> 3;
+ return cos;
+}
+
+static inline void erspan_build_header(struct sk_buff *skb,
+ u32 id, u32 index,
+ bool truncate, bool is_ipv4)
+{
+ struct ethhdr *eth = (struct ethhdr *)skb->data;
+ enum erspan_encap_type enc_type;
+ struct erspan_base_hdr *ershdr;
+ struct qtag_prefix {
+ __be16 eth_type;
+ __be16 tci;
+ } *qp;
+ u16 vlan_tci = 0;
+ u8 tos;
+ __be32 *idx;
+
+ tos = is_ipv4 ? ip_hdr(skb)->tos :
+ (ipv6_hdr(skb)->priority << 4) +
+ (ipv6_hdr(skb)->flow_lbl[0] >> 4);
+
+ enc_type = ERSPAN_ENCAP_NOVLAN;
+
+ /* If mirrored packet has vlan tag, extract tci and
+ * perserve vlan header in the mirrored frame.
+ */
+ if (eth->h_proto == htons(ETH_P_8021Q)) {
+ qp = (struct qtag_prefix *)(skb->data + 2 * ETH_ALEN);
+ vlan_tci = ntohs(qp->tci);
+ enc_type = ERSPAN_ENCAP_INFRAME;
+ }
+
+ skb_push(skb, sizeof(*ershdr) + ERSPAN_V1_MDSIZE);
+ ershdr = (struct erspan_base_hdr *)skb->data;
+ memset(ershdr, 0, sizeof(*ershdr) + ERSPAN_V1_MDSIZE);
+
+ /* Build base header */
+ ershdr->ver = ERSPAN_VERSION;
+ ershdr->cos = tos_to_cos(tos);
+ ershdr->en = enc_type;
+ ershdr->t = truncate;
+ set_vlan(ershdr, vlan_tci);
+ set_session_id(ershdr, id);
+
+ /* Build metadata */
+ idx = (__be32 *)(ershdr + 1);
+ *idx = htonl(index & INDEX_MASK);
+}
+
+/* ERSPAN GRA: timestamp granularity
+ * 00b --> granularity = 100 microseconds
+ * 01b --> granularity = 100 nanoseconds
+ * 10b --> granularity = IEEE 1588
+ * Here we only support 100 microseconds.
+ */
+static inline __be32 erspan_get_timestamp(void)
+{
+ u64 h_usecs;
+ ktime_t kt;
+
+ kt = ktime_get_real();
+ h_usecs = ktime_divns(kt, 100 * NSEC_PER_USEC);
+
+ /* ERSPAN base header only has 32-bit,
+ * so it wraps around 4 days.
+ */
+ return htonl((u32)h_usecs);
+}
+
+static inline void erspan_build_header_v2(struct sk_buff *skb,
+ u32 id, u8 direction, u16 hwid,
+ bool truncate, bool is_ipv4)
+{
+ struct ethhdr *eth = (struct ethhdr *)skb->data;
+ struct erspan_base_hdr *ershdr;
+ struct erspan_md2 *md2;
+ struct qtag_prefix {
+ __be16 eth_type;
+ __be16 tci;
+ } *qp;
+ u16 vlan_tci = 0;
+ u8 gra = 0; /* 100 usec */
+ u8 bso = 0; /* Bad/Short/Oversized */
+ u8 sgt = 0;
+ u8 tos;
+
+ tos = is_ipv4 ? ip_hdr(skb)->tos :
+ (ipv6_hdr(skb)->priority << 4) +
+ (ipv6_hdr(skb)->flow_lbl[0] >> 4);
+
+ /* Unlike v1, v2 does not have En field,
+ * so only extract vlan tci field.
+ */
+ if (eth->h_proto == htons(ETH_P_8021Q)) {
+ qp = (struct qtag_prefix *)(skb->data + 2 * ETH_ALEN);
+ vlan_tci = ntohs(qp->tci);
+ }
+
+ skb_push(skb, sizeof(*ershdr) + ERSPAN_V2_MDSIZE);
+ ershdr = (struct erspan_base_hdr *)skb->data;
+ memset(ershdr, 0, sizeof(*ershdr) + ERSPAN_V2_MDSIZE);
+
+ /* Build base header */
+ ershdr->ver = ERSPAN_VERSION2;
+ ershdr->cos = tos_to_cos(tos);
+ ershdr->en = bso;
+ ershdr->t = truncate;
+ set_vlan(ershdr, vlan_tci);
+ set_session_id(ershdr, id);
+
+ /* Build metadata */
+ md2 = (struct erspan_md2 *)(ershdr + 1);
+ md2->timestamp = erspan_get_timestamp();
+ md2->sgt = htons(sgt);
+ md2->p = 1;
+ md2->ft = 0;
+ md2->dir = direction;
+ md2->gra = gra;
+ md2->o = 0;
+ set_hwid(md2, hwid);
+}
+
#endif
diff --git a/include/net/ethoc.h b/include/net/ethoc.h
index bb7f467da7fc..29ba069a1d93 100644
--- a/include/net/ethoc.h
+++ b/include/net/ethoc.h
@@ -21,4 +21,3 @@ struct ethoc_platform_data {
};
#endif /* !LINUX_NET_ETHOC_H */
-
diff --git a/include/net/fib_rules.h b/include/net/fib_rules.h
index 648caf90ec07..e5cfcfc7dd93 100644
--- a/include/net/fib_rules.h
+++ b/include/net/fib_rules.h
@@ -26,7 +26,8 @@ struct fib_rule {
u32 table;
u8 action;
u8 l3mdev;
- /* 2 bytes hole, try to use */
+ u8 proto;
+ u8 ip_proto;
u32 target;
__be64 tun_id;
struct fib_rule __rcu *ctarget;
@@ -39,11 +40,14 @@ struct fib_rule {
char iifname[IFNAMSIZ];
char oifname[IFNAMSIZ];
struct fib_kuid_range uid_range;
+ struct fib_rule_port_range sport_range;
+ struct fib_rule_port_range dport_range;
struct rcu_head rcu;
};
struct fib_lookup_arg {
void *lookup_ptr;
+ const void *lookup_data;
void *result;
struct fib_rule *rule;
u32 table;
@@ -108,7 +112,12 @@ struct fib_rule_notifier_info {
[FRA_SUPPRESS_IFGROUP] = { .type = NLA_U32 }, \
[FRA_GOTO] = { .type = NLA_U32 }, \
[FRA_L3MDEV] = { .type = NLA_U8 }, \
- [FRA_UID_RANGE] = { .len = sizeof(struct fib_rule_uid_range) }
+ [FRA_UID_RANGE] = { .len = sizeof(struct fib_rule_uid_range) }, \
+ [FRA_PROTOCOL] = { .type = NLA_U8 }, \
+ [FRA_IP_PROTO] = { .type = NLA_U8 }, \
+ [FRA_SPORT_RANGE] = { .len = sizeof(struct fib_rule_port_range) }, \
+ [FRA_DPORT_RANGE] = { .len = sizeof(struct fib_rule_port_range) }
+
static inline void fib_rule_get(struct fib_rule *rule)
{
@@ -142,6 +151,38 @@ static inline u32 frh_get_table(struct fib_rule_hdr *frh, struct nlattr **nla)
return frh->table;
}
+static inline bool fib_rule_port_range_set(const struct fib_rule_port_range *range)
+{
+ return range->start != 0 && range->end != 0;
+}
+
+static inline bool fib_rule_port_inrange(const struct fib_rule_port_range *a,
+ __be16 port)
+{
+ return ntohs(port) >= a->start &&
+ ntohs(port) <= a->end;
+}
+
+static inline bool fib_rule_port_range_valid(const struct fib_rule_port_range *a)
+{
+ return a->start != 0 && a->end != 0 && a->end < 0xffff &&
+ a->start <= a->end;
+}
+
+static inline bool fib_rule_port_range_compare(struct fib_rule_port_range *a,
+ struct fib_rule_port_range *b)
+{
+ return a->start == b->start &&
+ a->end == b->end;
+}
+
+static inline bool fib_rule_requires_fldissect(struct fib_rule *rule)
+{
+ return rule->ip_proto ||
+ fib_rule_port_range_set(&rule->sport_range) ||
+ fib_rule_port_range_set(&rule->dport_range);
+}
+
struct fib_rules_ops *fib_rules_register(const struct fib_rules_ops *,
struct net *);
void fib_rules_unregister(struct fib_rules_ops *);
diff --git a/include/net/flow.h b/include/net/flow.h
index f1624fd5b1d0..8ce21793094e 100644
--- a/include/net/flow.h
+++ b/include/net/flow.h
@@ -125,7 +125,7 @@ static inline void flowi4_update_output(struct flowi4 *fl4, int oif, __u8 tos,
fl4->daddr = daddr;
fl4->saddr = saddr;
}
-
+
struct flowi6 {
struct flowi_common __fl_common;
@@ -222,20 +222,4 @@ static inline unsigned int flow_key_size(u16 family)
__u32 __get_hash_from_flowi6(const struct flowi6 *fl6, struct flow_keys *keys);
-static inline __u32 get_hash_from_flowi6(const struct flowi6 *fl6)
-{
- struct flow_keys keys;
-
- return __get_hash_from_flowi6(fl6, &keys);
-}
-
-__u32 __get_hash_from_flowi4(const struct flowi4 *fl4, struct flow_keys *keys);
-
-static inline __u32 get_hash_from_flowi4(const struct flowi4 *fl4)
-{
- struct flow_keys keys;
-
- return __get_hash_from_flowi4(fl4, &keys);
-}
-
#endif
diff --git a/include/net/gen_stats.h b/include/net/gen_stats.h
index 304f7aa9cc01..0304ba2ae353 100644
--- a/include/net/gen_stats.h
+++ b/include/net/gen_stats.h
@@ -49,6 +49,9 @@ int gnet_stats_copy_rate_est(struct gnet_dump *d,
int gnet_stats_copy_queue(struct gnet_dump *d,
struct gnet_stats_queue __percpu *cpu_q,
struct gnet_stats_queue *q, __u32 qlen);
+void __gnet_stats_copy_queue(struct gnet_stats_queue *qstats,
+ const struct gnet_stats_queue __percpu *cpu_q,
+ const struct gnet_stats_queue *q, __u32 qlen);
int gnet_stats_copy_app(struct gnet_dump *d, void *st, int len);
int gnet_stats_finish_copy(struct gnet_dump *d);
diff --git a/include/net/gre.h b/include/net/gre.h
index f90585decbce..797142eee9cd 100644
--- a/include/net/gre.h
+++ b/include/net/gre.h
@@ -37,6 +37,9 @@ struct net_device *gretap_fb_dev_create(struct net *net, const char *name,
int gre_parse_header(struct sk_buff *skb, struct tnl_ptk_info *tpi,
bool *csum_err, __be16 proto, int nhs);
+bool is_gretap_dev(const struct net_device *dev);
+bool is_ip6gretap_dev(const struct net_device *dev);
+
static inline int gre_calc_hlen(__be16 o_flags)
{
int addend = 4;
diff --git a/include/net/ieee80211_radiotap.h b/include/net/ieee80211_radiotap.h
index d91f9e7f4d71..960236fb1681 100644
--- a/include/net/ieee80211_radiotap.h
+++ b/include/net/ieee80211_radiotap.h
@@ -149,6 +149,8 @@ enum ieee80211_radiotap_ampdu_flags {
IEEE80211_RADIOTAP_AMPDU_IS_LAST = 0x0008,
IEEE80211_RADIOTAP_AMPDU_DELIM_CRC_ERR = 0x0010,
IEEE80211_RADIOTAP_AMPDU_DELIM_CRC_KNOWN = 0x0020,
+ IEEE80211_RADIOTAP_AMPDU_EOF = 0x0040,
+ IEEE80211_RADIOTAP_AMPDU_EOF_KNOWN = 0x0080,
};
/* for IEEE80211_RADIOTAP_VHT */
diff --git a/include/net/inet_common.h b/include/net/inet_common.h
index 5a54c9570977..384b90c62c0b 100644
--- a/include/net/inet_common.h
+++ b/include/net/inet_common.h
@@ -32,7 +32,9 @@ int inet_shutdown(struct socket *sock, int how);
int inet_listen(struct socket *sock, int backlog);
void inet_sock_destruct(struct sock *sk);
int inet_bind(struct socket *sock, struct sockaddr *uaddr, int addr_len);
-int inet_getname(struct socket *sock, struct sockaddr *uaddr, int *uaddr_len,
+int __inet_bind(struct sock *sk, struct sockaddr *uaddr, int addr_len,
+ bool force_bind_address_no_port, bool with_lock);
+int inet_getname(struct socket *sock, struct sockaddr *uaddr,
int peer);
int inet_ioctl(struct socket *sock, unsigned int cmd, unsigned long arg);
int inet_ctl_sock_create(struct sock **sk, unsigned short family,
diff --git a/include/net/inet_connection_sock.h b/include/net/inet_connection_sock.h
index 0358745ea059..b68fea022a82 100644
--- a/include/net/inet_connection_sock.h
+++ b/include/net/inet_connection_sock.h
@@ -49,9 +49,9 @@ struct inet_connection_sock_af_ops {
u16 net_header_len;
u16 net_frag_header_len;
u16 sockaddr_len;
- int (*setsockopt)(struct sock *sk, int level, int optname,
+ int (*setsockopt)(struct sock *sk, int level, int optname,
char __user *optval, unsigned int optlen);
- int (*getsockopt)(struct sock *sk, int level, int optname,
+ int (*getsockopt)(struct sock *sk, int level, int optname,
char __user *optval, int __user *optlen);
#ifdef CONFIG_COMPAT
int (*compat_setsockopt)(struct sock *sk,
@@ -67,7 +67,7 @@ struct inet_connection_sock_af_ops {
/** inet_connection_sock - INET connection oriented sock
*
- * @icsk_accept_queue: FIFO of established children
+ * @icsk_accept_queue: FIFO of established children
* @icsk_bind_hash: Bind node
* @icsk_timeout: Timeout
* @icsk_retransmit_timer: Resend (no ack)
@@ -77,6 +77,7 @@ struct inet_connection_sock_af_ops {
* @icsk_af_ops Operations which are AF_INET{4,6} specific
* @icsk_ulp_ops Pluggable ULP control hook
* @icsk_ulp_data ULP private data
+ * @icsk_listen_portaddr_node hash to the portaddr listener hashtable
* @icsk_ca_state: Congestion control state
* @icsk_retransmits: Number of unrecovered [RTO] timeouts
* @icsk_pending: Scheduled timer event
@@ -101,6 +102,7 @@ struct inet_connection_sock {
const struct inet_connection_sock_af_ops *icsk_af_ops;
const struct tcp_ulp_ops *icsk_ulp_ops;
void *icsk_ulp_data;
+ struct hlist_node icsk_listen_portaddr_node;
unsigned int (*icsk_sync_mss)(struct sock *sk, u32 pmtu);
__u8 icsk_ca_state:6,
icsk_ca_setsockopt:1,
@@ -120,7 +122,7 @@ struct inet_connection_sock {
unsigned long timeout; /* Currently scheduled timeout */
__u32 lrcvtime; /* timestamp of last received data packet */
__u16 last_seg_size; /* Size of last incoming segment */
- __u16 rcv_mss; /* MSS used for delayed ACK decisions */
+ __u16 rcv_mss; /* MSS used for delayed ACK decisions */
} icsk_ack;
struct {
int enabled;
@@ -199,7 +201,7 @@ extern const char inet_csk_timer_bug_msg[];
static inline void inet_csk_clear_xmit_timer(struct sock *sk, const int what)
{
struct inet_connection_sock *icsk = inet_csk(sk);
-
+
if (what == ICSK_TIME_RETRANS || what == ICSK_TIME_PROBE0) {
icsk->icsk_pending = 0;
#ifdef INET_CSK_CLEAR_TIMERS
@@ -305,10 +307,10 @@ void inet_csk_prepare_forced_close(struct sock *sk);
/*
* LISTEN is a special case for poll..
*/
-static inline unsigned int inet_csk_listen_poll(const struct sock *sk)
+static inline __poll_t inet_csk_listen_poll(const struct sock *sk)
{
return !reqsk_queue_empty(&inet_csk(sk)->icsk_accept_queue) ?
- (POLLIN | POLLRDNORM) : 0;
+ (EPOLLIN | EPOLLRDNORM) : 0;
}
int inet_csk_listen_start(struct sock *sk, int backlog);
diff --git a/include/net/inet_frag.h b/include/net/inet_frag.h
index 351f0c3cdcd9..ed07e3786d98 100644
--- a/include/net/inet_frag.h
+++ b/include/net/inet_frag.h
@@ -2,14 +2,20 @@
#ifndef __NET_FRAG_H__
#define __NET_FRAG_H__
+#include <linux/rhashtable.h>
+
struct netns_frags {
- /* Keep atomic mem on separate cachelines in structs that include it */
- atomic_t mem ____cacheline_aligned_in_smp;
/* sysctls */
+ long high_thresh;
+ long low_thresh;
int timeout;
- int high_thresh;
- int low_thresh;
int max_dist;
+ struct inet_frags *f;
+
+ struct rhashtable rhashtable ____cacheline_aligned_in_smp;
+
+ /* Keep atomic mem on separate cachelines in structs that include it */
+ atomic_long_t mem ____cacheline_aligned_in_smp;
};
/**
@@ -25,12 +31,30 @@ enum {
INET_FRAG_COMPLETE = BIT(2),
};
+struct frag_v4_compare_key {
+ __be32 saddr;
+ __be32 daddr;
+ u32 user;
+ u32 vif;
+ __be16 id;
+ u16 protocol;
+};
+
+struct frag_v6_compare_key {
+ struct in6_addr saddr;
+ struct in6_addr daddr;
+ u32 user;
+ __be32 id;
+ u32 iif;
+};
+
/**
* struct inet_frag_queue - fragment queue
*
- * @lock: spinlock protecting the queue
+ * @node: rhash node
+ * @key: keys identifying this frag.
* @timer: queue expiration timer
- * @list: hash bucket list
+ * @lock: spinlock protecting this frag
* @refcnt: reference count of the queue
* @fragments: received fragments head
* @fragments_tail: received fragments tail
@@ -40,12 +64,16 @@ enum {
* @flags: fragment queue flags
* @max_size: maximum received fragment size
* @net: namespace that this frag belongs to
- * @list_evictor: list of queues to forcefully evict (e.g. due to low memory)
+ * @rcu: rcu head for freeing deferall
*/
struct inet_frag_queue {
- spinlock_t lock;
+ struct rhash_head node;
+ union {
+ struct frag_v4_compare_key v4;
+ struct frag_v6_compare_key v6;
+ } key;
struct timer_list timer;
- struct hlist_node list;
+ spinlock_t lock;
refcount_t refcnt;
struct sk_buff *fragments;
struct sk_buff *fragments_tail;
@@ -54,101 +82,57 @@ struct inet_frag_queue {
int meat;
__u8 flags;
u16 max_size;
- struct netns_frags *net;
- struct hlist_node list_evictor;
-};
-
-#define INETFRAGS_HASHSZ 1024
-
-/* averaged:
- * max_depth = default ipfrag_high_thresh / INETFRAGS_HASHSZ /
- * rounded up (SKB_TRUELEN(0) + sizeof(struct ipq or
- * struct frag_queue))
- */
-#define INETFRAGS_MAXDEPTH 128
-
-struct inet_frag_bucket {
- struct hlist_head chain;
- spinlock_t chain_lock;
+ struct netns_frags *net;
+ struct rcu_head rcu;
};
struct inet_frags {
- struct inet_frag_bucket hash[INETFRAGS_HASHSZ];
-
- struct work_struct frags_work;
- unsigned int next_bucket;
- unsigned long last_rebuild_jiffies;
- bool rebuild;
-
- /* The first call to hashfn is responsible to initialize
- * rnd. This is best done with net_get_random_once.
- *
- * rnd_seqlock is used to let hash insertion detect
- * when it needs to re-lookup the hash chain to use.
- */
- u32 rnd;
- seqlock_t rnd_seqlock;
unsigned int qsize;
- unsigned int (*hashfn)(const struct inet_frag_queue *);
- bool (*match)(const struct inet_frag_queue *q,
- const void *arg);
void (*constructor)(struct inet_frag_queue *q,
const void *arg);
void (*destructor)(struct inet_frag_queue *);
void (*frag_expire)(struct timer_list *t);
struct kmem_cache *frags_cachep;
const char *frags_cache_name;
+ struct rhashtable_params rhash_params;
};
int inet_frags_init(struct inet_frags *);
void inet_frags_fini(struct inet_frags *);
-static inline void inet_frags_init_net(struct netns_frags *nf)
+static inline int inet_frags_init_net(struct netns_frags *nf)
{
- atomic_set(&nf->mem, 0);
+ atomic_long_set(&nf->mem, 0);
+ return rhashtable_init(&nf->rhashtable, &nf->f->rhash_params);
}
-void inet_frags_exit_net(struct netns_frags *nf, struct inet_frags *f);
-
-void inet_frag_kill(struct inet_frag_queue *q, struct inet_frags *f);
-void inet_frag_destroy(struct inet_frag_queue *q, struct inet_frags *f);
-struct inet_frag_queue *inet_frag_find(struct netns_frags *nf,
- struct inet_frags *f, void *key, unsigned int hash);
+void inet_frags_exit_net(struct netns_frags *nf);
-void inet_frag_maybe_warn_overflow(struct inet_frag_queue *q,
- const char *prefix);
+void inet_frag_kill(struct inet_frag_queue *q);
+void inet_frag_destroy(struct inet_frag_queue *q);
+struct inet_frag_queue *inet_frag_find(struct netns_frags *nf, void *key);
-static inline void inet_frag_put(struct inet_frag_queue *q, struct inet_frags *f)
+static inline void inet_frag_put(struct inet_frag_queue *q)
{
if (refcount_dec_and_test(&q->refcnt))
- inet_frag_destroy(q, f);
-}
-
-static inline bool inet_frag_evicting(struct inet_frag_queue *q)
-{
- return !hlist_unhashed(&q->list_evictor);
+ inet_frag_destroy(q);
}
/* Memory Tracking Functions. */
-static inline int frag_mem_limit(struct netns_frags *nf)
-{
- return atomic_read(&nf->mem);
-}
-
-static inline void sub_frag_mem_limit(struct netns_frags *nf, int i)
+static inline long frag_mem_limit(const struct netns_frags *nf)
{
- atomic_sub(i, &nf->mem);
+ return atomic_long_read(&nf->mem);
}
-static inline void add_frag_mem_limit(struct netns_frags *nf, int i)
+static inline void sub_frag_mem_limit(struct netns_frags *nf, long val)
{
- atomic_add(i, &nf->mem);
+ atomic_long_sub(val, &nf->mem);
}
-static inline int sum_frag_mem_limit(struct netns_frags *nf)
+static inline void add_frag_mem_limit(struct netns_frags *nf, long val)
{
- return atomic_read(&nf->mem);
+ atomic_long_add(val, &nf->mem);
}
/* RFC 3168 support :
diff --git a/include/net/inet_hashtables.h b/include/net/inet_hashtables.h
index 2dbbbff5e1e3..9141e95529e7 100644
--- a/include/net/inet_hashtables.h
+++ b/include/net/inet_hashtables.h
@@ -111,6 +111,7 @@ struct inet_bind_hashbucket {
*/
struct inet_listen_hashbucket {
spinlock_t lock;
+ unsigned int count;
struct hlist_head head;
};
@@ -132,12 +133,13 @@ struct inet_hashinfo {
/* Ok, let's try this, I give up, we do need a local binding
* TCP hash as well as the others for fast bind/connect.
*/
+ struct kmem_cache *bind_bucket_cachep;
struct inet_bind_hashbucket *bhash;
-
unsigned int bhash_size;
- /* 4 bytes hole on 64 bit */
- struct kmem_cache *bind_bucket_cachep;
+ /* The 2nd listener table hashed by local port and address */
+ unsigned int lhash2_mask;
+ struct inet_listen_hashbucket *lhash2;
/* All the above members are written once at bootup and
* never written again _or_ are predominantly read-access.
@@ -145,14 +147,25 @@ struct inet_hashinfo {
* Now align to a new cache line as all the following members
* might be often dirty.
*/
- /* All sockets in TCP_LISTEN state will be in here. This is the only
- * table where wildcard'd TCP sockets can exist. Hash function here
- * is just local port number.
+ /* All sockets in TCP_LISTEN state will be in listening_hash.
+ * This is the only table where wildcard'd TCP sockets can
+ * exist. listening_hash is only hashed by local port number.
+ * If lhash2 is initialized, the same socket will also be hashed
+ * to lhash2 by port and address.
*/
struct inet_listen_hashbucket listening_hash[INET_LHTABLE_SIZE]
____cacheline_aligned_in_smp;
};
+#define inet_lhash2_for_each_icsk_rcu(__icsk, list) \
+ hlist_for_each_entry_rcu(__icsk, list, icsk_listen_portaddr_node)
+
+static inline struct inet_listen_hashbucket *
+inet_lhash2_bucket(struct inet_hashinfo *h, u32 hash)
+{
+ return &h->lhash2[hash & h->lhash2_mask];
+}
+
static inline struct inet_ehash_bucket *inet_ehash_bucket(
struct inet_hashinfo *hashinfo,
unsigned int hash)
@@ -208,6 +221,10 @@ int __inet_inherit_port(const struct sock *sk, struct sock *child);
void inet_put_port(struct sock *sk);
void inet_hashinfo_init(struct inet_hashinfo *h);
+void inet_hashinfo2_init(struct inet_hashinfo *h, const char *name,
+ unsigned long numentries, int scale,
+ unsigned long low_limit,
+ unsigned long high_limit);
bool inet_ehash_insert(struct sock *sk, struct sock *osk);
bool inet_ehash_nolisten(struct sock *sk, struct sock *osk);
diff --git a/include/net/inet_sock.h b/include/net/inet_sock.h
index 39efb968b7a4..0a671c32d6b9 100644
--- a/include/net/inet_sock.h
+++ b/include/net/inet_sock.h
@@ -291,6 +291,31 @@ static inline void inet_sk_copy_descendant(struct sock *sk_to,
int inet_sk_rebuild_header(struct sock *sk);
+/**
+ * inet_sk_state_load - read sk->sk_state for lockless contexts
+ * @sk: socket pointer
+ *
+ * Paired with inet_sk_state_store(). Used in places we don't hold socket lock:
+ * tcp_diag_get_info(), tcp_get_info(), tcp_poll(), get_tcp4_sock() ...
+ */
+static inline int inet_sk_state_load(const struct sock *sk)
+{
+ /* state change might impact lockless readers. */
+ return smp_load_acquire(&sk->sk_state);
+}
+
+/**
+ * inet_sk_state_store - update sk->sk_state
+ * @sk: socket pointer
+ * @newstate: new state
+ *
+ * Paired with inet_sk_state_load(). Should be used in contexts where
+ * state change might impact lockless readers.
+ */
+void inet_sk_state_store(struct sock *sk, int newstate);
+
+void inet_sk_set_state(struct sock *sk, int state);
+
static inline unsigned int __inet_ehashfn(const __be32 laddr,
const __u16 lport,
const __be32 faddr,
diff --git a/include/net/inet_timewait_sock.h b/include/net/inet_timewait_sock.h
index 1356fa6a7566..c7be1ca8e562 100644
--- a/include/net/inet_timewait_sock.h
+++ b/include/net/inet_timewait_sock.h
@@ -43,6 +43,7 @@ struct inet_timewait_sock {
#define tw_family __tw_common.skc_family
#define tw_state __tw_common.skc_state
#define tw_reuse __tw_common.skc_reuse
+#define tw_reuseport __tw_common.skc_reuseport
#define tw_ipv6only __tw_common.skc_ipv6only
#define tw_bound_dev_if __tw_common.skc_bound_dev_if
#define tw_node __tw_common.skc_nulls_node
@@ -93,8 +94,8 @@ struct inet_timewait_sock *inet_twsk_alloc(const struct sock *sk,
struct inet_timewait_death_row *dr,
const int state);
-void __inet_twsk_hashdance(struct inet_timewait_sock *tw, struct sock *sk,
- struct inet_hashinfo *hashinfo);
+void inet_twsk_hashdance(struct inet_timewait_sock *tw, struct sock *sk,
+ struct inet_hashinfo *hashinfo);
void __inet_twsk_schedule(struct inet_timewait_sock *tw, int timeo,
bool rearm);
diff --git a/include/net/ip.h b/include/net/ip.h
index af8addbaa3c1..ecffd843e7b8 100644
--- a/include/net/ip.h
+++ b/include/net/ip.h
@@ -26,12 +26,14 @@
#include <linux/ip.h>
#include <linux/in.h>
#include <linux/skbuff.h>
+#include <linux/jhash.h>
#include <net/inet_sock.h>
#include <net/route.h>
#include <net/snmp.h>
#include <net/flow.h>
#include <net/flow_dissector.h>
+#include <net/netns/hash.h>
#define IPV4_MAX_PMTU 65535U /* RFC 2675, Section 5.1 */
#define IPV4_MIN_MTU 68 /* RFC 791 */
@@ -89,6 +91,17 @@ static inline int inet_sdif(struct sk_buff *skb)
return 0;
}
+/* Special input handler for packets caught by router alert option.
+ They are selected only by protocol field, and then processed likely
+ local ones; but only if someone wants them! Otherwise, router
+ not running rsvpd will kill RSVP.
+
+ It is user level problem, what it will make with them.
+ I have no idea, how it will masquearde or NAT them (it is joke, joke :-)),
+ but receiver should be enough clever f.e. to forward mtrace requests,
+ sent to multicast group to reach destination designated router.
+ */
+
struct ip_ra_chain {
struct ip_ra_chain __rcu *next;
struct sock *sk;
@@ -99,8 +112,6 @@ struct ip_ra_chain {
struct rcu_head rcu;
};
-extern struct ip_ra_chain __rcu *ip_ra_chain;
-
/* IP flags. */
#define IP_CE 0x8000 /* Flag: "Congestion" */
#define IP_DF 0x4000 /* Flag: "Don't Fragment" */
@@ -184,15 +195,15 @@ int ip4_datagram_connect(struct sock *sk, struct sockaddr *uaddr, int addr_len);
void ip4_datagram_release_cb(struct sock *sk);
struct ip_reply_arg {
- struct kvec iov[1];
+ struct kvec iov[1];
int flags;
__wsum csum;
int csumoffset; /* u16 offset of csum in iov[0].iov_base */
- /* -1 if not needed */
+ /* -1 if not needed */
int bound_dev_if;
u8 tos;
kuid_t uid;
-};
+};
#define IP_REPLY_ARG_NOSRCCHECK 1
@@ -326,6 +337,13 @@ int ip_decrease_ttl(struct iphdr *iph)
return --iph->ttl;
}
+static inline int ip_mtu_locked(const struct dst_entry *dst)
+{
+ const struct rtable *rt = (const struct rtable *)dst;
+
+ return rt->rt_mtu_locked || dst_metric_locked(dst, RTAX_MTU);
+}
+
static inline
int ip_dont_fragment(const struct sock *sk, const struct dst_entry *dst)
{
@@ -333,7 +351,7 @@ int ip_dont_fragment(const struct sock *sk, const struct dst_entry *dst)
return pmtudisc == IP_PMTUDISC_DO ||
(pmtudisc == IP_PMTUDISC_WANT &&
- !(dst_metric_locked(dst, RTAX_MTU)));
+ !ip_mtu_locked(dst));
}
static inline bool ip_sk_accept_pmtu(const struct sock *sk)
@@ -359,7 +377,7 @@ static inline unsigned int ip_dst_mtu_maybe_forward(const struct dst_entry *dst,
struct net *net = dev_net(dst->dev);
if (net->ipv4.sysctl_ip_fwd_use_pmtu ||
- dst_metric_locked(dst, RTAX_MTU) ||
+ ip_mtu_locked(dst) ||
!forwarding)
return dst_mtu(dst);
@@ -522,6 +540,13 @@ static inline unsigned int ipv4_addr_hash(__be32 ip)
return (__force unsigned int) ip;
}
+static inline u32 ipv4_portaddr_hash(const struct net *net,
+ __be32 saddr,
+ unsigned int port)
+{
+ return jhash_1word((__force u32)saddr, net_hash_mix(net)) ^ port;
+}
+
bool ip_call_ra_chain(struct sk_buff *skb);
/*
@@ -563,18 +588,17 @@ static inline struct sk_buff *ip_check_defrag(struct net *net, struct sk_buff *s
return skb;
}
#endif
-int ip_frag_mem(struct net *net);
/*
* Functions provided by ip_forward.c
*/
-
+
int ip_forward(struct sk_buff *skb);
-
+
/*
* Functions provided by ip_options.c
*/
-
+
void ip_options_build(struct sk_buff *skb, struct ip_options *opt,
__be32 daddr, struct rtable *rt, int is_frag);
diff --git a/include/net/ip6_fib.h b/include/net/ip6_fib.h
index 10c913816032..5e86fd9dc857 100644
--- a/include/net/ip6_fib.h
+++ b/include/net/ip6_fib.h
@@ -129,6 +129,8 @@ struct rt6_exception {
struct rt6_info {
struct dst_entry dst;
+ struct rt6_info __rcu *rt6_next;
+ struct rt6_info *from;
/*
* Tail elements of dst_entry (__refcnt etc.)
@@ -147,6 +149,7 @@ struct rt6_info {
*/
struct list_head rt6i_siblings;
unsigned int rt6i_nsiblings;
+ atomic_t rt6i_nh_upper_bound;
atomic_t rt6i_ref;
@@ -168,19 +171,21 @@ struct rt6_info {
u32 rt6i_metric;
u32 rt6i_pmtu;
/* more non-fragment space at head required */
+ int rt6i_nh_weight;
unsigned short rt6i_nfheader_len;
u8 rt6i_protocol;
u8 exception_bucket_flushed:1,
- unused:7;
+ should_flush:1,
+ unused:6;
};
#define for_each_fib6_node_rt_rcu(fn) \
for (rt = rcu_dereference((fn)->leaf); rt; \
- rt = rcu_dereference(rt->dst.rt6_next))
+ rt = rcu_dereference(rt->rt6_next))
#define for_each_fib6_walker_rt(w) \
for (rt = (w)->leaf; rt; \
- rt = rcu_dereference_protected(rt->dst.rt6_next, 1))
+ rt = rcu_dereference_protected(rt->rt6_next, 1))
static inline struct inet6_dev *ip6_dst_idev(struct dst_entry *dst)
{
@@ -203,11 +208,9 @@ static inline void rt6_update_expires(struct rt6_info *rt0, int timeout)
{
struct rt6_info *rt;
- for (rt = rt0; rt && !(rt->rt6i_flags & RTF_EXPIRES);
- rt = (struct rt6_info *)rt->dst.from);
+ for (rt = rt0; rt && !(rt->rt6i_flags & RTF_EXPIRES); rt = rt->from);
if (rt && rt != rt0)
rt0->dst.expires = rt->dst.expires;
-
dst_set_expires(&rt0->dst, timeout);
rt0->rt6i_flags |= RTF_EXPIRES;
}
@@ -242,8 +245,8 @@ static inline u32 rt6_get_cookie(const struct rt6_info *rt)
u32 cookie = 0;
if (rt->rt6i_flags & RTF_PCPU ||
- (unlikely(!list_empty(&rt->rt6i_uncached)) && rt->dst.from))
- rt = (struct rt6_info *)(rt->dst.from);
+ (unlikely(!list_empty(&rt->rt6i_uncached)) && rt->from))
+ rt = rt->from;
rt6_get_cookie_safe(rt, &cookie);
@@ -347,7 +350,8 @@ struct fib6_table {
typedef struct rt6_info *(*pol_lookup_t)(struct net *,
struct fib6_table *,
- struct flowi6 *, int);
+ struct flowi6 *,
+ const struct sk_buff *, int);
struct fib6_entry_notifier_info {
struct fib_notifier_info info; /* must be first */
@@ -361,6 +365,7 @@ struct fib6_entry_notifier_info {
struct fib6_table *fib6_get_table(struct net *net, u32 id);
struct fib6_table *fib6_new_table(struct net *net, u32 id);
struct dst_entry *fib6_rule_lookup(struct net *net, struct flowi6 *fl6,
+ const struct sk_buff *skb,
int flags, pol_lookup_t lookup);
struct fib6_node *fib6_lookup(struct fib6_node *root,
@@ -404,6 +409,7 @@ unsigned int fib6_tables_seq_read(struct net *net);
int fib6_tables_dump(struct net *net, struct notifier_block *nb);
void fib6_update_sernum(struct rt6_info *rt);
+void fib6_update_sernum_upto_root(struct net *net, struct rt6_info *rt);
#ifdef CONFIG_IPV6_MULTIPLE_TABLES
int fib6_rules_init(void);
@@ -411,6 +417,24 @@ void fib6_rules_cleanup(void);
bool fib6_rule_default(const struct fib_rule *rule);
int fib6_rules_dump(struct net *net, struct notifier_block *nb);
unsigned int fib6_rules_seq_read(struct net *net);
+
+static inline bool fib6_rules_early_flow_dissect(struct net *net,
+ struct sk_buff *skb,
+ struct flowi6 *fl6,
+ struct flow_keys *flkeys)
+{
+ unsigned int flag = FLOW_DISSECTOR_F_STOP_AT_ENCAP;
+
+ if (!net->ipv6.fib6_rules_require_fldissect)
+ return false;
+
+ skb_flow_dissect_flow_keys(skb, flkeys, flag);
+ fl6->fl6_sport = flkeys->ports.src;
+ fl6->fl6_dport = flkeys->ports.dst;
+ fl6->flowi6_proto = flkeys->basic.ip_proto;
+
+ return true;
+}
#else
static inline int fib6_rules_init(void)
{
@@ -432,5 +456,12 @@ static inline unsigned int fib6_rules_seq_read(struct net *net)
{
return 0;
}
+static inline bool fib6_rules_early_flow_dissect(struct net *net,
+ struct sk_buff *skb,
+ struct flowi6 *fl6,
+ struct flow_keys *flkeys)
+{
+ return false;
+}
#endif
#endif
diff --git a/include/net/ip6_route.h b/include/net/ip6_route.h
index 18e442ea93d8..08b132381984 100644
--- a/include/net/ip6_route.h
+++ b/include/net/ip6_route.h
@@ -66,10 +66,17 @@ static inline bool rt6_need_strict(const struct in6_addr *daddr)
(IPV6_ADDR_MULTICAST | IPV6_ADDR_LINKLOCAL | IPV6_ADDR_LOOPBACK);
}
+static inline bool rt6_qualify_for_ecmp(const struct rt6_info *rt)
+{
+ return (rt->rt6i_flags & (RTF_GATEWAY|RTF_ADDRCONF|RTF_DYNAMIC)) ==
+ RTF_GATEWAY;
+}
+
void ip6_route_input(struct sk_buff *skb);
struct dst_entry *ip6_route_input_lookup(struct net *net,
struct net_device *dev,
- struct flowi6 *fl6, int flags);
+ struct flowi6 *fl6,
+ const struct sk_buff *skb, int flags);
struct dst_entry *ip6_route_output_flags(struct net *net, const struct sock *sk,
struct flowi6 *fl6, int flags);
@@ -82,9 +89,10 @@ static inline struct dst_entry *ip6_route_output(struct net *net,
}
struct dst_entry *ip6_route_lookup(struct net *net, struct flowi6 *fl6,
- int flags);
+ const struct sk_buff *skb, int flags);
struct rt6_info *ip6_pol_route(struct net *net, struct fib6_table *table,
- int ifindex, struct flowi6 *fl6, int flags);
+ int ifindex, struct flowi6 *fl6,
+ const struct sk_buff *skb, int flags);
void ip6_route_init_special_entries(void);
int ip6_route_init(void);
@@ -120,8 +128,10 @@ static inline int ip6_route_get_saddr(struct net *net, struct rt6_info *rt,
}
struct rt6_info *rt6_lookup(struct net *net, const struct in6_addr *daddr,
- const struct in6_addr *saddr, int oif, int flags);
-u32 rt6_multipath_hash(const struct flowi6 *fl6, const struct sk_buff *skb);
+ const struct in6_addr *saddr, int oif,
+ const struct sk_buff *skb, int flags);
+u32 rt6_multipath_hash(const struct net *net, const struct flowi6 *fl6,
+ const struct sk_buff *skb, struct flow_keys *hkeys);
struct dst_entry *icmp6_dst_alloc(struct net_device *dev, struct flowi6 *fl6);
@@ -165,10 +175,16 @@ struct rt6_rtnl_dump_arg {
};
int rt6_dump_route(struct rt6_info *rt, void *p_arg);
-void rt6_ifdown(struct net *net, struct net_device *dev);
void rt6_mtu_change(struct net_device *dev, unsigned int mtu);
void rt6_remove_prefsrc(struct inet6_ifaddr *ifp);
void rt6_clean_tohost(struct net *net, struct in6_addr *gateway);
+void rt6_sync_up(struct net_device *dev, unsigned int nh_flags);
+void rt6_disable_ip(struct net_device *dev, unsigned long event);
+void rt6_sync_down_dev(struct net_device *dev, unsigned long event);
+void rt6_multipath_rebalance(struct rt6_info *rt);
+
+void rt6_uncached_list_add(struct rt6_info *rt);
+void rt6_uncached_list_del(struct rt6_info *rt);
static inline const struct rt6_info *skb_rt6_info(const struct sk_buff *skb)
{
@@ -198,6 +214,9 @@ static inline void ip6_dst_store(struct sock *sk, struct dst_entry *dst,
#endif
}
+void ip6_sk_dst_store_flow(struct sock *sk, struct dst_entry *dst,
+ const struct flowi6 *fl6);
+
static inline bool ipv6_unicast_destination(const struct sk_buff *skb)
{
struct rt6_info *rt = (struct rt6_info *) skb_dst(skb);
@@ -257,4 +276,5 @@ static inline bool rt6_duplicate_nexthop(struct rt6_info *a, struct rt6_info *b)
ipv6_addr_equal(&a->rt6i_gateway, &b->rt6i_gateway) &&
!lwtunnel_cmp_encap(a->dst.lwtstate, b->dst.lwtstate);
}
+
#endif
diff --git a/include/net/ip6_tunnel.h b/include/net/ip6_tunnel.h
index d66f70f63734..236e40ba06bf 100644
--- a/include/net/ip6_tunnel.h
+++ b/include/net/ip6_tunnel.h
@@ -36,6 +36,10 @@ struct __ip6_tnl_parm {
__be32 o_key;
__u32 fwmark;
+ __u32 index; /* ERSPAN type II index */
+ __u8 erspan_ver; /* ERSPAN version */
+ __u8 dir; /* direction */
+ __u16 hwid; /* hwid */
};
/* IPv6 tunnel */
diff --git a/include/net/ip_fib.h b/include/net/ip_fib.h
index f80524396c06..81d0f2107ff1 100644
--- a/include/net/ip_fib.h
+++ b/include/net/ip_fib.h
@@ -59,6 +59,7 @@ struct fib_nh_exception {
int fnhe_genid;
__be32 fnhe_daddr;
u32 fnhe_pmtu;
+ bool fnhe_mtu_locked;
__be32 fnhe_gw;
unsigned long fnhe_expires;
struct rtable __rcu *fnhe_rth_input;
@@ -157,7 +158,7 @@ struct fib_result_nl {
unsigned char nh_sel;
unsigned char type;
unsigned char scope;
- int err;
+ int err;
};
#ifdef CONFIG_IP_ROUTE_MULTIPATH
@@ -293,6 +294,13 @@ static inline unsigned int fib4_rules_seq_read(struct net *net)
return 0;
}
+static inline bool fib4_rules_early_flow_dissect(struct net *net,
+ struct sk_buff *skb,
+ struct flowi4 *fl4,
+ struct flow_keys *flkeys)
+{
+ return false;
+}
#else /* CONFIG_IP_MULTIPLE_TABLES */
int __net_init fib4_rules_init(struct net *net);
void __net_exit fib4_rules_exit(struct net *net);
@@ -341,6 +349,24 @@ bool fib4_rule_default(const struct fib_rule *rule);
int fib4_rules_dump(struct net *net, struct notifier_block *nb);
unsigned int fib4_rules_seq_read(struct net *net);
+static inline bool fib4_rules_early_flow_dissect(struct net *net,
+ struct sk_buff *skb,
+ struct flowi4 *fl4,
+ struct flow_keys *flkeys)
+{
+ unsigned int flag = FLOW_DISSECTOR_F_STOP_AT_ENCAP;
+
+ if (!net->ipv4.fib_rules_require_fldissect)
+ return false;
+
+ skb_flow_dissect_flow_keys(skb, flkeys, flag);
+ fl4->fl4_sport = flkeys->ports.src;
+ fl4->fl4_dport = flkeys->ports.dst;
+ fl4->flowi4_proto = flkeys->basic.ip_proto;
+
+ return true;
+}
+
#endif /* CONFIG_IP_MULTIPLE_TABLES */
/* Exported by fib_frontend.c */
@@ -370,8 +396,8 @@ int fib_sync_down_addr(struct net_device *dev, __be32 local);
int fib_sync_up(struct net_device *dev, unsigned int nh_flags);
#ifdef CONFIG_IP_ROUTE_MULTIPATH
-int fib_multipath_hash(const struct fib_info *fi, const struct flowi4 *fl4,
- const struct sk_buff *skb);
+int fib_multipath_hash(const struct net *net, const struct flowi4 *fl4,
+ const struct sk_buff *skb, struct flow_keys *flkeys);
#endif
void fib_select_multipath(struct fib_result *res, int hash);
void fib_select_path(struct net *net, struct fib_result *res,
diff --git a/include/net/ip_tunnels.h b/include/net/ip_tunnels.h
index 24628f6b09bf..540a4b4417bf 100644
--- a/include/net/ip_tunnels.h
+++ b/include/net/ip_tunnels.h
@@ -116,8 +116,11 @@ struct ip_tunnel {
u32 o_seqno; /* The last output seqno */
int tun_hlen; /* Precalculated header length */
- /* This field used only by ERSPAN */
+ /* These four fields used only by ERSPAN */
u32 index; /* ERSPAN type II index */
+ u8 erspan_ver; /* ERSPAN version */
+ u8 dir; /* ERSPAN direction */
+ u16 hwid; /* ERSPAN hardware ID */
struct dst_cache dst_cache;
@@ -177,8 +180,10 @@ struct tnl_ptk_info {
struct ip_tunnel_net {
struct net_device *fb_tunnel_dev;
+ struct rtnl_link_ops *rtnl_link_ops;
struct hlist_head tunnels[IP_TNL_HASH_SIZE];
struct ip_tunnel __rcu *collect_md_tun;
+ int type;
};
static inline void ip_tunnel_key_init(struct ip_tunnel_key *key,
@@ -251,6 +256,22 @@ static inline __be32 tunnel_id_to_key32(__be64 tun_id)
#ifdef CONFIG_INET
+static inline void ip_tunnel_init_flow(struct flowi4 *fl4,
+ int proto,
+ __be32 daddr, __be32 saddr,
+ __be32 key, __u8 tos, int oif,
+ __u32 mark)
+{
+ memset(fl4, 0, sizeof(*fl4));
+ fl4->flowi4_oif = oif;
+ fl4->daddr = daddr;
+ fl4->saddr = saddr;
+ fl4->flowi4_tos = tos;
+ fl4->flowi4_proto = proto;
+ fl4->fl4_gre_key = key;
+ fl4->flowi4_mark = mark;
+}
+
int ip_tunnel_init(struct net_device *dev);
void ip_tunnel_uninit(struct net_device *dev);
void ip_tunnel_dellink(struct net_device *dev, struct list_head *head);
diff --git a/include/net/ip_vs.h b/include/net/ip_vs.h
index ff68cf288f9b..eb0bec043c96 100644
--- a/include/net/ip_vs.h
+++ b/include/net/ip_vs.h
@@ -69,8 +69,7 @@ struct ip_vs_iphdr {
};
static inline void *frag_safe_skb_hp(const struct sk_buff *skb, int offset,
- int len, void *buffer,
- const struct ip_vs_iphdr *ipvsh)
+ int len, void *buffer)
{
return skb_header_pointer(skb, offset, len, buffer);
}
diff --git a/include/net/ipv6.h b/include/net/ipv6.h
index 221238254eb7..836f31af1369 100644
--- a/include/net/ipv6.h
+++ b/include/net/ipv6.h
@@ -22,6 +22,7 @@
#include <net/flow.h>
#include <net/flow_dissector.h>
#include <net/snmp.h>
+#include <net/netns/hash.h>
#define SIN6_LEN_RFC2133 24
@@ -104,8 +105,8 @@
#define IPV6_ADDR_ANY 0x0000U
-#define IPV6_ADDR_UNICAST 0x0001U
-#define IPV6_ADDR_MULTICAST 0x0002U
+#define IPV6_ADDR_UNICAST 0x0001U
+#define IPV6_ADDR_MULTICAST 0x0002U
#define IPV6_ADDR_LOOPBACK 0x0010U
#define IPV6_ADDR_LINKLOCAL 0x0020U
@@ -378,13 +379,6 @@ static inline bool ipv6_accept_ra(struct inet6_dev *idev)
idev->cnf.accept_ra;
}
-#if IS_ENABLED(CONFIG_IPV6)
-static inline int ip6_frag_mem(struct net *net)
-{
- return sum_frag_mem_limit(&net->ipv6.frags);
-}
-#endif
-
#define IPV6_FRAG_HIGH_THRESH (4 * 1024*1024) /* 4194304 */
#define IPV6_FRAG_LOW_THRESH (3 * 1024*1024) /* 3145728 */
#define IPV6_FRAG_TIMEOUT (60 * HZ) /* 60 seconds */
@@ -446,7 +440,7 @@ ipv6_masked_addr_cmp(const struct in6_addr *a1, const struct in6_addr *m,
#endif
}
-static inline void ipv6_addr_prefix(struct in6_addr *pfx,
+static inline void ipv6_addr_prefix(struct in6_addr *pfx,
const struct in6_addr *addr,
int plen)
{
@@ -495,7 +489,7 @@ static inline void __ipv6_addr_set_half(__be32 *addr,
addr[1] = wl;
}
-static inline void ipv6_addr_set(struct in6_addr *addr,
+static inline void ipv6_addr_set(struct in6_addr *addr,
__be32 w1, __be32 w2,
__be32 w3, __be32 w4)
{
@@ -578,17 +572,8 @@ enum ip6_defrag_users {
__IP6_DEFRAG_CONNTRACK_BRIDGE_IN = IP6_DEFRAG_CONNTRACK_BRIDGE_IN + USHRT_MAX,
};
-struct ip6_create_arg {
- __be32 id;
- u32 user;
- const struct in6_addr *src;
- const struct in6_addr *dst;
- int iif;
- u8 ecn;
-};
-
void ip6_frag_init(struct inet_frag_queue *q, const void *a);
-bool ip6_frag_match(const struct inet_frag_queue *q, const void *a);
+extern const struct rhashtable_params ip6_rhash_params;
/*
* Equivalent of ipv4 struct ip
@@ -596,19 +581,12 @@ bool ip6_frag_match(const struct inet_frag_queue *q, const void *a);
struct frag_queue {
struct inet_frag_queue q;
- __be32 id; /* fragment id */
- u32 user;
- struct in6_addr saddr;
- struct in6_addr daddr;
-
int iif;
- unsigned int csum;
__u16 nhoffset;
u8 ecn;
};
-void ip6_expire_frag_queue(struct net *net, struct frag_queue *fq,
- struct inet_frags *frags);
+void ip6_expire_frag_queue(struct net *net, struct frag_queue *fq);
static inline bool ipv6_addr_any(const struct in6_addr *a)
{
@@ -674,6 +652,22 @@ static inline bool ipv6_addr_v4mapped(const struct in6_addr *a)
cpu_to_be32(0x0000ffff))) == 0UL;
}
+static inline u32 ipv6_portaddr_hash(const struct net *net,
+ const struct in6_addr *addr6,
+ unsigned int port)
+{
+ unsigned int hash, mix = net_hash_mix(net);
+
+ if (ipv6_addr_any(addr6))
+ hash = jhash_1word(0, mix);
+ else if (ipv6_addr_v4mapped(addr6))
+ hash = jhash_1word((__force u32)addr6->s6_addr32[3], mix);
+ else
+ hash = jhash2((__force u32 *)addr6->s6_addr32, 4, mix);
+
+ return hash ^ port;
+}
+
/*
* Check for a RFC 4843 ORCHID address
* (Overlay Routable Cryptographic Hash Identifiers)
@@ -715,7 +709,7 @@ static inline int __ipv6_addr_diff32(const void *token1, const void *token2, int
}
/*
- * we should *never* get to this point since that
+ * we should *never* get to this point since that
* would mean the addrs are equal
*
* However, we do get to it 8) And exacly, when
@@ -871,6 +865,17 @@ static inline int ip6_default_np_autolabel(struct net *net)
}
#endif
+#if IS_ENABLED(CONFIG_IPV6)
+static inline int ip6_multipath_hash_policy(const struct net *net)
+{
+ return net->ipv6.sysctl.multipath_hash_policy;
+}
+#else
+static inline int ip6_multipath_hash_policy(const struct net *net)
+{
+ return 0;
+}
+#endif
/*
* Header manipulation
@@ -953,12 +958,15 @@ static inline struct sk_buff *ip6_finish_skb(struct sock *sk)
&inet6_sk(sk)->cork);
}
+unsigned int ip6_dst_mtu_forward(const struct dst_entry *dst);
+
int ip6_dst_lookup(struct net *net, struct sock *sk, struct dst_entry **dst,
struct flowi6 *fl6);
struct dst_entry *ip6_dst_lookup_flow(const struct sock *sk, struct flowi6 *fl6,
const struct in6_addr *final_dst);
struct dst_entry *ip6_sk_dst_lookup_flow(struct sock *sk, struct flowi6 *fl6,
- const struct in6_addr *final_dst);
+ const struct in6_addr *final_dst,
+ bool connected);
struct dst_entry *ip6_blackhole_route(struct net *net,
struct dst_entry *orig_dst);
@@ -1036,8 +1044,10 @@ void ipv6_local_error(struct sock *sk, int err, struct flowi6 *fl6, u32 info);
void ipv6_local_rxpmtu(struct sock *sk, struct flowi6 *fl6, u32 mtu);
int inet6_release(struct socket *sock);
+int __inet6_bind(struct sock *sock, struct sockaddr *uaddr, int addr_len,
+ bool force_bind_address_no_port, bool with_lock);
int inet6_bind(struct socket *sock, struct sockaddr *uaddr, int addr_len);
-int inet6_getname(struct socket *sock, struct sockaddr *uaddr, int *uaddr_len,
+int inet6_getname(struct socket *sock, struct sockaddr *uaddr,
int peer);
int inet6_ioctl(struct socket *sock, unsigned int cmd, unsigned long arg);
diff --git a/include/net/iucv/af_iucv.h b/include/net/iucv/af_iucv.h
index 070e93a17c59..f4c21b5a1242 100644
--- a/include/net/iucv/af_iucv.h
+++ b/include/net/iucv/af_iucv.h
@@ -153,7 +153,7 @@ struct iucv_sock_list {
atomic_t autobind_name;
};
-unsigned int iucv_sock_poll(struct file *file, struct socket *sock,
+__poll_t iucv_sock_poll(struct file *file, struct socket *sock,
poll_table *wait);
void iucv_sock_link(struct iucv_sock_list *l, struct sock *s);
void iucv_sock_unlink(struct iucv_sock_list *l, struct sock *s);
diff --git a/include/net/iw_handler.h b/include/net/iw_handler.h
index 725282095840..d2ea5863eedc 100644
--- a/include/net/iw_handler.h
+++ b/include/net/iw_handler.h
@@ -364,7 +364,7 @@ struct iw_handler_def {
* defined in struct iw_priv_args.
*
* For standard IOCTLs, things are quite different and we need to
- * use the stuctures below. Actually, this struct is also more
+ * use the structures below. Actually, this struct is also more
* efficient, but that's another story...
*/
diff --git a/include/net/llc_conn.h b/include/net/llc_conn.h
index fe994d2e5286..5c40f118c0fa 100644
--- a/include/net/llc_conn.h
+++ b/include/net/llc_conn.h
@@ -103,7 +103,7 @@ void llc_sk_reset(struct sock *sk);
/* Access to a connection */
int llc_conn_state_process(struct sock *sk, struct sk_buff *skb);
-void llc_conn_send_pdu(struct sock *sk, struct sk_buff *skb);
+int llc_conn_send_pdu(struct sock *sk, struct sk_buff *skb);
void llc_conn_rtn_pdu(struct sock *sk, struct sk_buff *skb);
void llc_conn_resend_i_pdu_as_cmd(struct sock *sk, u8 nr, u8 first_p_bit);
void llc_conn_resend_i_pdu_as_rsp(struct sock *sk, u8 nr, u8 first_f_bit);
diff --git a/include/net/lwtunnel.h b/include/net/lwtunnel.h
index d747ef975cd8..33fd9ba7e0e5 100644
--- a/include/net/lwtunnel.h
+++ b/include/net/lwtunnel.h
@@ -127,6 +127,17 @@ int lwtunnel_output(struct net *net, struct sock *sk, struct sk_buff *skb);
int lwtunnel_input(struct sk_buff *skb);
int lwtunnel_xmit(struct sk_buff *skb);
+static inline void lwtunnel_set_redirect(struct dst_entry *dst)
+{
+ if (lwtunnel_output_redirect(dst->lwtstate)) {
+ dst->lwtstate->orig_output = dst->output;
+ dst->output = lwtunnel_output;
+ }
+ if (lwtunnel_input_redirect(dst->lwtstate)) {
+ dst->lwtstate->orig_input = dst->input;
+ dst->input = lwtunnel_input;
+ }
+}
#else
static inline void lwtstate_free(struct lwtunnel_state *lws)
@@ -158,6 +169,10 @@ static inline bool lwtunnel_xmit_redirect(struct lwtunnel_state *lwtstate)
return false;
}
+static inline void lwtunnel_set_redirect(struct dst_entry *dst)
+{
+}
+
static inline unsigned int lwtunnel_headroom(struct lwtunnel_state *lwtstate,
unsigned int mtu)
{
diff --git a/include/net/mac80211.h b/include/net/mac80211.h
index eec143cca1c0..d2279b2d61aa 100644
--- a/include/net/mac80211.h
+++ b/include/net/mac80211.h
@@ -6,6 +6,7 @@
* Copyright 2007-2010 Johannes Berg <johannes@sipsolutions.net>
* Copyright 2013-2014 Intel Mobile Communications GmbH
* Copyright (C) 2015 - 2017 Intel Deutschland GmbH
+ * Copyright (C) 2018 Intel Corporation
*
* This program is free software; you can redistribute it and/or modify
* it under the terms of the GNU General Public License version 2 as
@@ -301,6 +302,8 @@ struct ieee80211_vif_chanctx_switch {
* @BSS_CHANGED_MU_GROUPS: VHT MU-MIMO group id or user position changed
* @BSS_CHANGED_KEEP_ALIVE: keep alive options (idle period or protected
* keep alive) changed.
+ * @BSS_CHANGED_MCAST_RATE: Multicast Rate setting changed for this interface
+ *
*/
enum ieee80211_bss_change {
BSS_CHANGED_ASSOC = 1<<0,
@@ -328,6 +331,7 @@ enum ieee80211_bss_change {
BSS_CHANGED_OCB = 1<<22,
BSS_CHANGED_MU_GROUPS = 1<<23,
BSS_CHANGED_KEEP_ALIVE = 1<<24,
+ BSS_CHANGED_MCAST_RATE = 1<<25,
/* when adding here, make sure to change ieee80211_reconfig */
};
@@ -934,6 +938,7 @@ struct ieee80211_tx_info {
u8 ampdu_len;
u8 antenna;
u16 tx_time;
+ bool is_valid_ack_signal;
void *status_driver_data[19 / sizeof(void *)];
} status;
struct {
@@ -1098,6 +1103,9 @@ ieee80211_tx_info_clear_status(struct ieee80211_tx_info *info)
* the first subframe.
* @RX_FLAG_ICV_STRIPPED: The ICV is stripped from this frame. CRC checking must
* be done in the hardware.
+ * @RX_FLAG_AMPDU_EOF_BIT: Value of the EOF bit in the A-MPDU delimiter for this
+ * frame
+ * @RX_FLAG_AMPDU_EOF_BIT_KNOWN: The EOF value is known
*/
enum mac80211_rx_flags {
RX_FLAG_MMIC_ERROR = BIT(0),
@@ -1124,6 +1132,8 @@ enum mac80211_rx_flags {
RX_FLAG_MIC_STRIPPED = BIT(21),
RX_FLAG_ALLOW_SAME_PN = BIT(22),
RX_FLAG_ICV_STRIPPED = BIT(23),
+ RX_FLAG_AMPDU_EOF_BIT = BIT(24),
+ RX_FLAG_AMPDU_EOF_BIT_KNOWN = BIT(25),
};
/**
@@ -1552,6 +1562,9 @@ struct wireless_dev *ieee80211_vif_to_wdev(struct ieee80211_vif *vif);
* @IEEE80211_KEY_FLAG_RESERVE_TAILROOM: This flag should be set by the
* driver for a key to indicate that sufficient tailroom must always
* be reserved for ICV or MIC, even when HW encryption is enabled.
+ * @IEEE80211_KEY_FLAG_PUT_MIC_SPACE: This flag should be set by the driver for
+ * a TKIP key if it only requires MIC space. Do not set together with
+ * @IEEE80211_KEY_FLAG_GENERATE_MMIC on the same key.
*/
enum ieee80211_key_flags {
IEEE80211_KEY_FLAG_GENERATE_IV_MGMT = BIT(0),
@@ -1562,6 +1575,7 @@ enum ieee80211_key_flags {
IEEE80211_KEY_FLAG_PUT_IV_SPACE = BIT(5),
IEEE80211_KEY_FLAG_RX_MGMT = BIT(6),
IEEE80211_KEY_FLAG_RESERVE_TAILROOM = BIT(7),
+ IEEE80211_KEY_FLAG_PUT_MIC_SPACE = BIT(8),
};
/**
@@ -1593,8 +1607,8 @@ struct ieee80211_key_conf {
u8 icv_len;
u8 iv_len;
u8 hw_key_idx;
- u8 flags;
s8 keyidx;
+ u16 flags;
u8 keylen;
u8 key[0];
};
@@ -2056,6 +2070,20 @@ struct ieee80211_txq {
* The stack will not do fragmentation.
* The callback for @set_frag_threshold should be set as well.
*
+ * @IEEE80211_HW_SUPPORTS_TDLS_BUFFER_STA: Hardware supports buffer STA on
+ * TDLS links.
+ *
+ * @IEEE80211_HW_DEAUTH_NEED_MGD_TX_PREP: The driver requires the
+ * mgd_prepare_tx() callback to be called before transmission of a
+ * deauthentication frame in case the association was completed but no
+ * beacon was heard. This is required in multi-channel scenarios, where the
+ * virtual interface might not be given air time for the transmission of
+ * the frame, as it is not synced with the AP/P2P GO yet, and thus the
+ * deauthentication frame might not be transmitted.
+ >
+ * @IEEE80211_HW_DOESNT_SUPPORT_QOS_NDP: The driver (or firmware) doesn't
+ * support QoS NDP for AP probing - that's most likely a driver bug.
+ *
* @NUM_IEEE80211_HW_FLAGS: number of hardware flags, used for sizing arrays
*/
enum ieee80211_hw_flags {
@@ -2098,6 +2126,9 @@ enum ieee80211_hw_flags {
IEEE80211_HW_TX_FRAG_LIST,
IEEE80211_HW_REPORTS_LOW_ACK,
IEEE80211_HW_SUPPORTS_TX_FRAG,
+ IEEE80211_HW_SUPPORTS_TDLS_BUFFER_STA,
+ IEEE80211_HW_DEAUTH_NEED_MGD_TX_PREP,
+ IEEE80211_HW_DOESNT_SUPPORT_QOS_NDP,
/* keep last, obviously */
NUM_IEEE80211_HW_FLAGS
@@ -3342,6 +3373,9 @@ enum ieee80211_reconfig_type {
* management frame prior to having successfully associated to allow the
* driver to give it channel time for the transmission, to get a response
* and to be able to synchronize with the GO.
+ * For drivers that set %IEEE80211_HW_DEAUTH_NEED_MGD_TX_PREP, mac80211
+ * would also call this function before transmitting a deauthentication
+ * frame in case that no beacon was heard from the AP/P2P GO.
* The callback will be called before each transmission and upon return
* mac80211 will transmit the frame right away.
* The callback is optional and can (should!) sleep.
@@ -4141,7 +4175,7 @@ void ieee80211_sta_uapsd_trigger(struct ieee80211_sta *sta, u8 tid);
* The TX headroom reserved by mac80211 for its own tx_status functions.
* This is enough for the radiotap header.
*/
-#define IEEE80211_TX_STATUS_HEADROOM 14
+#define IEEE80211_TX_STATUS_HEADROOM ALIGN(14, 4)
/**
* ieee80211_sta_set_buffered - inform mac80211 about driver-buffered frames
diff --git a/include/net/net_namespace.h b/include/net/net_namespace.h
index 10f99dafd5ac..47e35cce3b64 100644
--- a/include/net/net_namespace.h
+++ b/include/net/net_namespace.h
@@ -40,7 +40,7 @@ struct net_device;
struct sock;
struct ctl_table_header;
struct net_generic;
-struct sock;
+struct uevent_sock;
struct netns_ipvs;
@@ -51,7 +51,7 @@ struct net {
refcount_t passive; /* To decided when the network
* namespace should be freed.
*/
- atomic_t count; /* To decided when the network
+ refcount_t count; /* To decided when the network
* namespace should be shut down.
*/
spinlock_t rules_mod_lock;
@@ -59,8 +59,13 @@ struct net {
atomic64_t cookie_gen;
struct list_head list; /* list of network namespaces */
- struct list_head cleanup_list; /* namespaces on death row */
- struct list_head exit_list; /* Use only net_mutex */
+ struct list_head exit_list; /* To linked to call pernet exit
+ * methods on dead net (
+ * pernet_ops_rwsem read locked),
+ * or to unregister pernet ops
+ * (pernet_ops_rwsem write locked).
+ */
+ struct llist_node cleanup_list; /* namespaces on death row */
struct user_namespace *user_ns; /* Owning user namespace */
struct ucounts *ucounts;
@@ -79,6 +84,8 @@ struct net {
struct sock *rtnl; /* rtnetlink socket */
struct sock *genl_sock;
+ struct uevent_sock *uevent_sock; /* uevent socket */
+
struct list_head dev_base_head;
struct hlist_head *dev_name_head;
struct hlist_head *dev_index_head;
@@ -89,8 +96,9 @@ struct net {
/* core fib_rules */
struct list_head rules_ops;
- struct list_head fib_notifier_ops; /* protected by net_mutex */
-
+ struct list_head fib_notifier_ops; /* Populated by
+ * register_pernet_subsys()
+ */
struct net_device *loopback_dev; /* The loopback */
struct netns_core core;
struct netns_mib mib;
@@ -195,7 +203,7 @@ void __put_net(struct net *net);
static inline struct net *get_net(struct net *net)
{
- atomic_inc(&net->count);
+ refcount_inc(&net->count);
return net;
}
@@ -206,14 +214,14 @@ static inline struct net *maybe_get_net(struct net *net)
* exists. If the reference count is zero this
* function fails and returns NULL.
*/
- if (!atomic_inc_not_zero(&net->count))
+ if (!refcount_inc_not_zero(&net->count))
net = NULL;
return net;
}
static inline void put_net(struct net *net)
{
- if (atomic_dec_and_test(&net->count))
+ if (refcount_dec_and_test(&net->count))
__put_net(net);
}
@@ -223,6 +231,11 @@ int net_eq(const struct net *net1, const struct net *net2)
return net1 == net2;
}
+static inline int check_net(const struct net *net)
+{
+ return refcount_read(&net->count) != 0;
+}
+
void net_drop_ns(void *);
#else
@@ -247,6 +260,11 @@ int net_eq(const struct net *net1, const struct net *net2)
return 1;
}
+static inline int check_net(const struct net *net)
+{
+ return 1;
+}
+
#define net_drop_ns NULL
#endif
@@ -273,6 +291,7 @@ static inline struct net *read_pnet(const possible_net_t *pnet)
#endif
}
+/* Protected by net_rwsem */
#define for_each_net(VAR) \
list_for_each_entry(VAR, &net_namespace_list, list)
@@ -298,6 +317,24 @@ struct net *get_net_ns_by_id(struct net *net, int id);
struct pernet_operations {
struct list_head list;
+ /*
+ * Below methods are called without any exclusive locks.
+ * More than one net may be constructed and destructed
+ * in parallel on several cpus. Every pernet_operations
+ * have to keep in mind all other pernet_operations and
+ * to introduce a locking, if they share common resources.
+ *
+ * The only time they are called with exclusive lock is
+ * from register_pernet_subsys(), unregister_pernet_subsys()
+ * register_pernet_device() and unregister_pernet_device().
+ *
+ * Exit methods using blocking RCU primitives, such as
+ * synchronize_rcu(), should be implemented via exit_batch.
+ * Then, destruction of a group of net requires single
+ * synchronize_rcu() related to these pernet_operations,
+ * instead of separate synchronize_rcu() for every net.
+ * Please, avoid synchronize_rcu() at all, where it's possible.
+ */
int (*init)(struct net *net);
void (*exit)(struct net *net);
void (*exit_batch)(struct list_head *net_exit_list);
diff --git a/include/net/netevent.h b/include/net/netevent.h
index 40e7bab68490..d9918261701c 100644
--- a/include/net/netevent.h
+++ b/include/net/netevent.h
@@ -26,7 +26,8 @@ enum netevent_notif_type {
NETEVENT_NEIGH_UPDATE = 1, /* arg is struct neighbour ptr */
NETEVENT_REDIRECT, /* arg is struct netevent_redirect ptr */
NETEVENT_DELAY_PROBE_TIME_UPDATE, /* arg is struct neigh_parms ptr */
- NETEVENT_MULTIPATH_HASH_UPDATE, /* arg is struct net ptr */
+ NETEVENT_IPV4_MPATH_HASH_UPDATE, /* arg is struct net ptr */
+ NETEVENT_IPV6_MPATH_HASH_UPDATE, /* arg is struct net ptr */
};
int register_netevent_notifier(struct notifier_block *nb);
diff --git a/include/net/netfilter/ipv4/nf_conntrack_ipv4.h b/include/net/netfilter/ipv4/nf_conntrack_ipv4.h
index 4ed1040bbe4a..73f825732326 100644
--- a/include/net/netfilter/ipv4/nf_conntrack_ipv4.h
+++ b/include/net/netfilter/ipv4/nf_conntrack_ipv4.h
@@ -13,17 +13,17 @@
const extern struct nf_conntrack_l3proto nf_conntrack_l3proto_ipv4;
-extern struct nf_conntrack_l4proto nf_conntrack_l4proto_tcp4;
-extern struct nf_conntrack_l4proto nf_conntrack_l4proto_udp4;
-extern struct nf_conntrack_l4proto nf_conntrack_l4proto_icmp;
+extern const struct nf_conntrack_l4proto nf_conntrack_l4proto_tcp4;
+extern const struct nf_conntrack_l4proto nf_conntrack_l4proto_udp4;
+extern const struct nf_conntrack_l4proto nf_conntrack_l4proto_icmp;
#ifdef CONFIG_NF_CT_PROTO_DCCP
-extern struct nf_conntrack_l4proto nf_conntrack_l4proto_dccp4;
+extern const struct nf_conntrack_l4proto nf_conntrack_l4proto_dccp4;
#endif
#ifdef CONFIG_NF_CT_PROTO_SCTP
-extern struct nf_conntrack_l4proto nf_conntrack_l4proto_sctp4;
+extern const struct nf_conntrack_l4proto nf_conntrack_l4proto_sctp4;
#endif
#ifdef CONFIG_NF_CT_PROTO_UDPLITE
-extern struct nf_conntrack_l4proto nf_conntrack_l4proto_udplite4;
+extern const struct nf_conntrack_l4proto nf_conntrack_l4proto_udplite4;
#endif
int nf_conntrack_ipv4_compat_init(void);
diff --git a/include/net/netfilter/ipv6/nf_conntrack_ipv6.h b/include/net/netfilter/ipv6/nf_conntrack_ipv6.h
index 9cd55be95853..effa8dfba68c 100644
--- a/include/net/netfilter/ipv6/nf_conntrack_ipv6.h
+++ b/include/net/netfilter/ipv6/nf_conntrack_ipv6.h
@@ -4,17 +4,17 @@
extern const struct nf_conntrack_l3proto nf_conntrack_l3proto_ipv6;
-extern struct nf_conntrack_l4proto nf_conntrack_l4proto_tcp6;
-extern struct nf_conntrack_l4proto nf_conntrack_l4proto_udp6;
-extern struct nf_conntrack_l4proto nf_conntrack_l4proto_icmpv6;
+extern const struct nf_conntrack_l4proto nf_conntrack_l4proto_tcp6;
+extern const struct nf_conntrack_l4proto nf_conntrack_l4proto_udp6;
+extern const struct nf_conntrack_l4proto nf_conntrack_l4proto_icmpv6;
#ifdef CONFIG_NF_CT_PROTO_DCCP
-extern struct nf_conntrack_l4proto nf_conntrack_l4proto_dccp6;
+extern const struct nf_conntrack_l4proto nf_conntrack_l4proto_dccp6;
#endif
#ifdef CONFIG_NF_CT_PROTO_SCTP
-extern struct nf_conntrack_l4proto nf_conntrack_l4proto_sctp6;
+extern const struct nf_conntrack_l4proto nf_conntrack_l4proto_sctp6;
#endif
#ifdef CONFIG_NF_CT_PROTO_UDPLITE
-extern struct nf_conntrack_l4proto nf_conntrack_l4proto_udplite6;
+extern const struct nf_conntrack_l4proto nf_conntrack_l4proto_udplite6;
#endif
#include <linux/sysctl.h>
diff --git a/include/net/netfilter/nf_conntrack.h b/include/net/netfilter/nf_conntrack.h
index f5223bf2c420..062dc19b5840 100644
--- a/include/net/netfilter/nf_conntrack.h
+++ b/include/net/netfilter/nf_conntrack.h
@@ -213,11 +213,6 @@ static inline bool nf_ct_kill(struct nf_conn *ct)
return nf_ct_delete(ct, 0, 0);
}
-/* These are for NAT. Icky. */
-extern s32 (*nf_ct_nat_offset)(const struct nf_conn *ct,
- enum ip_conntrack_dir dir,
- u32 seq);
-
/* Set all unconfirmed conntrack as dying */
void nf_ct_unconfirmed_destroy(struct net *);
diff --git a/include/net/netfilter/nf_conntrack_count.h b/include/net/netfilter/nf_conntrack_count.h
new file mode 100644
index 000000000000..e61184fbfb71
--- /dev/null
+++ b/include/net/netfilter/nf_conntrack_count.h
@@ -0,0 +1,16 @@
+#ifndef _NF_CONNTRACK_COUNT_H
+#define _NF_CONNTRACK_COUNT_H
+
+struct nf_conncount_data;
+
+struct nf_conncount_data *nf_conncount_init(struct net *net, unsigned int family,
+ unsigned int keylen);
+void nf_conncount_destroy(struct net *net, unsigned int family,
+ struct nf_conncount_data *data);
+
+unsigned int nf_conncount_count(struct net *net,
+ struct nf_conncount_data *data,
+ const u32 *key,
+ const struct nf_conntrack_tuple *tuple,
+ const struct nf_conntrack_zone *zone);
+#endif
diff --git a/include/net/netfilter/nf_conntrack_helper.h b/include/net/netfilter/nf_conntrack_helper.h
index fc39bbaf107c..32c2a94a219d 100644
--- a/include/net/netfilter/nf_conntrack_helper.h
+++ b/include/net/netfilter/nf_conntrack_helper.h
@@ -132,8 +132,7 @@ void nf_conntrack_helper_pernet_fini(struct net *net);
int nf_conntrack_helper_init(void);
void nf_conntrack_helper_fini(void);
-int nf_conntrack_broadcast_help(struct sk_buff *skb, unsigned int protoff,
- struct nf_conn *ct,
+int nf_conntrack_broadcast_help(struct sk_buff *skb, struct nf_conn *ct,
enum ip_conntrack_info ctinfo,
unsigned int timeout);
diff --git a/include/net/netfilter/nf_conntrack_l4proto.h b/include/net/netfilter/nf_conntrack_l4proto.h
index 7ef56c13698a..a7220eef9aee 100644
--- a/include/net/netfilter/nf_conntrack_l4proto.h
+++ b/include/net/netfilter/nf_conntrack_l4proto.h
@@ -27,6 +27,9 @@ struct nf_conntrack_l4proto {
/* Resolve clashes on insertion races. */
bool allow_clash;
+ /* protoinfo nlattr size, closes a hole */
+ u16 nlattr_size;
+
/* Try to fill in the third arg: dataoff is offset past network protocol
hdr. Return true if possible. */
bool (*pkt_to_tuple)(const struct sk_buff *skb, unsigned int dataoff,
@@ -66,8 +69,6 @@ struct nf_conntrack_l4proto {
/* convert protoinfo to nfnetink attributes */
int (*to_nlattr)(struct sk_buff *skb, struct nlattr *nla,
struct nf_conn *ct);
- /* Calculate protoinfo nlattr size */
- int (*nlattr_size)(void);
/* convert nfnetlink attributes to protoinfo */
int (*from_nlattr)(struct nlattr *tb[], struct nf_conn *ct);
@@ -80,8 +81,6 @@ struct nf_conntrack_l4proto {
struct nf_conntrack_tuple *t);
const struct nla_policy *nla_policy;
- size_t nla_size;
-
#if IS_ENABLED(CONFIG_NF_CT_NETLINK_TIMEOUT)
struct {
int (*nlattr_to_obj)(struct nlattr *tb[],
@@ -109,7 +108,7 @@ struct nf_conntrack_l4proto {
};
/* Existing built-in generic protocol */
-extern struct nf_conntrack_l4proto nf_conntrack_l4proto_generic;
+extern const struct nf_conntrack_l4proto nf_conntrack_l4proto_generic;
#define MAX_NF_CT_PROTO 256
@@ -126,18 +125,18 @@ int nf_ct_l4proto_pernet_register_one(struct net *net,
void nf_ct_l4proto_pernet_unregister_one(struct net *net,
const struct nf_conntrack_l4proto *proto);
int nf_ct_l4proto_pernet_register(struct net *net,
- struct nf_conntrack_l4proto *const proto[],
+ const struct nf_conntrack_l4proto *const proto[],
unsigned int num_proto);
void nf_ct_l4proto_pernet_unregister(struct net *net,
- struct nf_conntrack_l4proto *const proto[],
+ const struct nf_conntrack_l4proto *const proto[],
unsigned int num_proto);
/* Protocol global registration. */
-int nf_ct_l4proto_register_one(struct nf_conntrack_l4proto *proto);
+int nf_ct_l4proto_register_one(const struct nf_conntrack_l4proto *proto);
void nf_ct_l4proto_unregister_one(const struct nf_conntrack_l4proto *proto);
-int nf_ct_l4proto_register(struct nf_conntrack_l4proto *proto[],
+int nf_ct_l4proto_register(const struct nf_conntrack_l4proto * const proto[],
unsigned int num_proto);
-void nf_ct_l4proto_unregister(struct nf_conntrack_l4proto *proto[],
+void nf_ct_l4proto_unregister(const struct nf_conntrack_l4proto * const proto[],
unsigned int num_proto);
/* Generic netlink helpers */
diff --git a/include/net/netfilter/nf_flow_table.h b/include/net/netfilter/nf_flow_table.h
new file mode 100644
index 000000000000..833752dd0c58
--- /dev/null
+++ b/include/net/netfilter/nf_flow_table.h
@@ -0,0 +1,126 @@
+#ifndef _NF_FLOW_TABLE_H
+#define _NF_FLOW_TABLE_H
+
+#include <linux/in.h>
+#include <linux/in6.h>
+#include <linux/netdevice.h>
+#include <linux/rhashtable.h>
+#include <linux/rcupdate.h>
+#include <net/dst.h>
+
+struct nf_flowtable;
+
+struct nf_flowtable_type {
+ struct list_head list;
+ int family;
+ void (*gc)(struct work_struct *work);
+ void (*free)(struct nf_flowtable *ft);
+ const struct rhashtable_params *params;
+ nf_hookfn *hook;
+ struct module *owner;
+};
+
+struct nf_flowtable {
+ struct rhashtable rhashtable;
+ const struct nf_flowtable_type *type;
+ struct delayed_work gc_work;
+};
+
+enum flow_offload_tuple_dir {
+ FLOW_OFFLOAD_DIR_ORIGINAL,
+ FLOW_OFFLOAD_DIR_REPLY,
+ __FLOW_OFFLOAD_DIR_MAX = FLOW_OFFLOAD_DIR_REPLY,
+};
+#define FLOW_OFFLOAD_DIR_MAX (__FLOW_OFFLOAD_DIR_MAX + 1)
+
+struct flow_offload_tuple {
+ union {
+ struct in_addr src_v4;
+ struct in6_addr src_v6;
+ };
+ union {
+ struct in_addr dst_v4;
+ struct in6_addr dst_v6;
+ };
+ struct {
+ __be16 src_port;
+ __be16 dst_port;
+ };
+
+ int iifidx;
+
+ u8 l3proto;
+ u8 l4proto;
+ u8 dir;
+
+ int oifidx;
+
+ struct dst_entry *dst_cache;
+};
+
+struct flow_offload_tuple_rhash {
+ struct rhash_head node;
+ struct flow_offload_tuple tuple;
+};
+
+#define FLOW_OFFLOAD_SNAT 0x1
+#define FLOW_OFFLOAD_DNAT 0x2
+#define FLOW_OFFLOAD_DYING 0x4
+
+struct flow_offload {
+ struct flow_offload_tuple_rhash tuplehash[FLOW_OFFLOAD_DIR_MAX];
+ u32 flags;
+ union {
+ /* Your private driver data here. */
+ u32 timeout;
+ };
+};
+
+#define NF_FLOW_TIMEOUT (30 * HZ)
+
+struct nf_flow_route {
+ struct {
+ struct dst_entry *dst;
+ int ifindex;
+ } tuple[FLOW_OFFLOAD_DIR_MAX];
+};
+
+struct flow_offload *flow_offload_alloc(struct nf_conn *ct,
+ struct nf_flow_route *route);
+void flow_offload_free(struct flow_offload *flow);
+
+int flow_offload_add(struct nf_flowtable *flow_table, struct flow_offload *flow);
+struct flow_offload_tuple_rhash *flow_offload_lookup(struct nf_flowtable *flow_table,
+ struct flow_offload_tuple *tuple);
+int nf_flow_table_iterate(struct nf_flowtable *flow_table,
+ void (*iter)(struct flow_offload *flow, void *data),
+ void *data);
+
+void nf_flow_table_cleanup(struct net *net, struct net_device *dev);
+
+void nf_flow_table_free(struct nf_flowtable *flow_table);
+void nf_flow_offload_work_gc(struct work_struct *work);
+extern const struct rhashtable_params nf_flow_offload_rhash_params;
+
+void flow_offload_dead(struct flow_offload *flow);
+
+int nf_flow_snat_port(const struct flow_offload *flow,
+ struct sk_buff *skb, unsigned int thoff,
+ u8 protocol, enum flow_offload_tuple_dir dir);
+int nf_flow_dnat_port(const struct flow_offload *flow,
+ struct sk_buff *skb, unsigned int thoff,
+ u8 protocol, enum flow_offload_tuple_dir dir);
+
+struct flow_ports {
+ __be16 source, dest;
+};
+
+unsigned int nf_flow_offload_ip_hook(void *priv, struct sk_buff *skb,
+ const struct nf_hook_state *state);
+unsigned int nf_flow_offload_ipv6_hook(void *priv, struct sk_buff *skb,
+ const struct nf_hook_state *state);
+
+#define MODULE_ALIAS_NF_FLOWTABLE(family) \
+ MODULE_ALIAS("nf-flowtable-" __stringify(family))
+
+#endif /* _FLOW_OFFLOAD_H */
diff --git a/include/net/netfilter/nf_queue.h b/include/net/netfilter/nf_queue.h
index 814058d0f167..a50a69f5334c 100644
--- a/include/net/netfilter/nf_queue.h
+++ b/include/net/netfilter/nf_queue.h
@@ -25,7 +25,7 @@ struct nf_queue_entry {
struct nf_queue_handler {
int (*outfn)(struct nf_queue_entry *entry,
unsigned int queuenum);
- unsigned int (*nf_hook_drop)(struct net *net);
+ void (*nf_hook_drop)(struct net *net);
};
void nf_register_queue_handler(struct net *net, const struct nf_queue_handler *qh);
diff --git a/include/net/netfilter/nf_tables.h b/include/net/netfilter/nf_tables.h
index fecc6112c768..cd368d1b8cb8 100644
--- a/include/net/netfilter/nf_tables.h
+++ b/include/net/netfilter/nf_tables.h
@@ -9,6 +9,7 @@
#include <linux/netfilter/x_tables.h>
#include <linux/netfilter/nf_tables.h>
#include <linux/u64_stats_sync.h>
+#include <net/netfilter/nf_flow_table.h>
#include <net/netlink.h>
#define NFT_JUMP_STACK_SIZE 16
@@ -54,8 +55,8 @@ static inline void nft_set_pktinfo(struct nft_pktinfo *pkt,
pkt->xt.state = state;
}
-static inline void nft_set_pktinfo_proto_unspec(struct nft_pktinfo *pkt,
- struct sk_buff *skb)
+static inline void nft_set_pktinfo_unspec(struct nft_pktinfo *pkt,
+ struct sk_buff *skb)
{
pkt->tprot_set = false;
pkt->tprot = 0;
@@ -63,14 +64,6 @@ static inline void nft_set_pktinfo_proto_unspec(struct nft_pktinfo *pkt,
pkt->xt.fragoff = 0;
}
-static inline void nft_set_pktinfo_unspec(struct nft_pktinfo *pkt,
- struct sk_buff *skb,
- const struct nf_hook_state *state)
-{
- nft_set_pktinfo(pkt, skb, state);
- nft_set_pktinfo_proto_unspec(pkt, skb);
-}
-
/**
* struct nft_verdict - nf_tables verdict
*
@@ -150,22 +143,22 @@ static inline void nft_data_debug(const struct nft_data *data)
* struct nft_ctx - nf_tables rule/set context
*
* @net: net namespace
- * @afi: address family info
* @table: the table the chain is contained in
* @chain: the chain the rule is contained in
* @nla: netlink attributes
* @portid: netlink portID of the original message
* @seq: netlink sequence number
+ * @family: protocol family
* @report: notify via unicast netlink message
*/
struct nft_ctx {
struct net *net;
- struct nft_af_info *afi;
struct nft_table *table;
struct nft_chain *chain;
const struct nlattr * const *nla;
u32 portid;
u32 seq;
+ u8 family;
bool report;
};
@@ -381,6 +374,7 @@ void nft_unregister_set(struct nft_set_type *type);
* @list: table set list node
* @bindings: list of set bindings
* @name: name of the set
+ * @handle: unique handle of the set
* @ktype: key type (numeric type defined by userspace, not used in the kernel)
* @dtype: data type (verdict or numeric type defined by userspace)
* @objtype: object type (see NFT_OBJECT_* definitions)
@@ -403,6 +397,7 @@ struct nft_set {
struct list_head list;
struct list_head bindings;
char *name;
+ u64 handle;
u32 ktype;
u32 dtype;
u32 objtype;
@@ -424,6 +419,11 @@ struct nft_set {
__attribute__((aligned(__alignof__(u64))));
};
+static inline bool nft_set_is_anonymous(const struct nft_set *set)
+{
+ return set->flags & NFT_SET_ANONYMOUS;
+}
+
static inline void *nft_set_priv(const struct nft_set *set)
{
return (void *)set->data;
@@ -434,11 +434,11 @@ static inline struct nft_set *nft_set_container_of(const void *priv)
return (void *)priv - offsetof(struct nft_set, data);
}
-struct nft_set *nft_set_lookup(const struct net *net,
- const struct nft_table *table,
- const struct nlattr *nla_set_name,
- const struct nlattr *nla_set_id,
- u8 genmask);
+struct nft_set *nft_set_lookup_global(const struct net *net,
+ const struct nft_table *table,
+ const struct nlattr *nla_set_name,
+ const struct nlattr *nla_set_id,
+ u8 genmask);
static inline unsigned long nft_set_gc_interval(const struct nft_set *set)
{
@@ -868,7 +868,7 @@ struct nft_chain {
char *name;
};
-enum nft_chain_type {
+enum nft_chain_types {
NFT_CHAIN_T_DEFAULT = 0,
NFT_CHAIN_T_ROUTE,
NFT_CHAIN_T_NAT,
@@ -876,26 +876,30 @@ enum nft_chain_type {
};
/**
- * struct nf_chain_type - nf_tables chain type info
+ * struct nft_chain_type - nf_tables chain type info
*
* @name: name of the type
* @type: numeric identifier
* @family: address family
* @owner: module owner
* @hook_mask: mask of valid hooks
- * @hooks: hookfn overrides
+ * @hooks: array of hook functions
+ * @init: chain initialization function
+ * @free: chain release function
*/
-struct nf_chain_type {
+struct nft_chain_type {
const char *name;
- enum nft_chain_type type;
+ enum nft_chain_types type;
int family;
struct module *owner;
unsigned int hook_mask;
nf_hookfn *hooks[NF_MAX_HOOKS];
+ int (*init)(struct nft_ctx *ctx);
+ void (*free)(struct nft_ctx *ctx);
};
int nft_chain_validate_dependency(const struct nft_chain *chain,
- enum nft_chain_type type);
+ enum nft_chain_types type);
int nft_chain_validate_hooks(const struct nft_chain *chain,
unsigned int hook_flags);
@@ -905,8 +909,6 @@ struct nft_stats {
struct u64_stats_sync syncp;
};
-#define NFT_HOOK_OPS_MAX 2
-
/**
* struct nft_base_chain - nf_tables base chain
*
@@ -918,8 +920,8 @@ struct nft_stats {
* @dev_name: device name that this base chain is attached to (if any)
*/
struct nft_base_chain {
- struct nf_hook_ops ops[NFT_HOOK_OPS_MAX];
- const struct nf_chain_type *type;
+ struct nf_hook_ops ops;
+ const struct nft_chain_type *type;
u8 policy;
u8 flags;
struct nft_stats __percpu *stats;
@@ -948,10 +950,13 @@ unsigned int nft_do_chain(struct nft_pktinfo *pkt, void *priv);
* @chains: chains in the table
* @sets: sets in the table
* @objects: stateful objects in the table
+ * @flowtables: flow tables in the table
* @hgenerator: handle generator state
+ * @handle: table handle
* @use: number of chain references to this table
* @flags: table flag (see enum nft_table_flags)
* @genmask: generation mask
+ * @afinfo: address family info
* @name: name of the table
*/
struct nft_table {
@@ -959,48 +964,18 @@ struct nft_table {
struct list_head chains;
struct list_head sets;
struct list_head objects;
+ struct list_head flowtables;
u64 hgenerator;
+ u64 handle;
u32 use;
- u16 flags:14,
+ u16 family:6,
+ flags:8,
genmask:2;
char *name;
};
-enum nft_af_flags {
- NFT_AF_NEEDS_DEV = (1 << 0),
-};
-
-/**
- * struct nft_af_info - nf_tables address family info
- *
- * @list: used internally
- * @family: address family
- * @nhooks: number of hooks in this family
- * @owner: module owner
- * @tables: used internally
- * @flags: family flags
- * @nops: number of hook ops in this family
- * @hook_ops_init: initialization function for chain hook ops
- * @hooks: hookfn overrides for packet validation
- */
-struct nft_af_info {
- struct list_head list;
- int family;
- unsigned int nhooks;
- struct module *owner;
- struct list_head tables;
- u32 flags;
- unsigned int nops;
- void (*hook_ops_init)(struct nf_hook_ops *,
- unsigned int);
- nf_hookfn *hooks[NF_MAX_HOOKS];
-};
-
-int nft_register_afinfo(struct net *, struct nft_af_info *);
-void nft_unregister_afinfo(struct net *, struct nft_af_info *);
-
-int nft_register_chain_type(const struct nf_chain_type *);
-void nft_unregister_chain_type(const struct nf_chain_type *);
+void nft_register_chain_type(const struct nft_chain_type *);
+void nft_unregister_chain_type(const struct nft_chain_type *);
int nft_register_expr(struct nft_expr_type *);
void nft_unregister_expr(struct nft_expr_type *);
@@ -1016,9 +991,9 @@ int nft_verdict_dump(struct sk_buff *skb, int type,
* @name: name of this stateful object
* @genmask: generation mask
* @use: number of references to this stateful object
- * @data: object data, layout depends on type
+ * @handle: unique object handle
* @ops: object operations
- * @data: pointer to object data
+ * @data: object data, layout depends on type
*/
struct nft_object {
struct list_head list;
@@ -1026,6 +1001,7 @@ struct nft_object {
struct nft_table *table;
u32 genmask:2,
use:30;
+ u64 handle;
/* runtime data below here */
const struct nft_object_ops *ops ____cacheline_aligned;
unsigned char data[]
@@ -1096,6 +1072,50 @@ struct nft_object_ops {
int nft_register_obj(struct nft_object_type *obj_type);
void nft_unregister_obj(struct nft_object_type *obj_type);
+#define NFT_FLOWTABLE_DEVICE_MAX 8
+
+/**
+ * struct nft_flowtable - nf_tables flow table
+ *
+ * @list: flow table list node in table list
+ * @table: the table the flow table is contained in
+ * @name: name of this flow table
+ * @hooknum: hook number
+ * @priority: hook priority
+ * @ops_len: number of hooks in array
+ * @genmask: generation mask
+ * @use: number of references to this flow table
+ * @handle: unique object handle
+ * @dev_name: array of device names
+ * @data: rhashtable and garbage collector
+ * @ops: array of hooks
+ */
+struct nft_flowtable {
+ struct list_head list;
+ struct nft_table *table;
+ char *name;
+ int hooknum;
+ int priority;
+ int ops_len;
+ u32 genmask:2,
+ use:30;
+ u64 handle;
+ char *dev_name[NFT_FLOWTABLE_DEVICE_MAX];
+ /* runtime data below here */
+ struct nf_hook_ops *ops ____cacheline_aligned;
+ struct nf_flowtable data;
+};
+
+struct nft_flowtable *nf_tables_flowtable_lookup(const struct nft_table *table,
+ const struct nlattr *nla,
+ u8 genmask);
+void nft_flow_table_iterate(struct net *net,
+ void (*iter)(struct nf_flowtable *flowtable, void *data),
+ void *data);
+
+void nft_register_flowtable_type(struct nf_flowtable_type *type);
+void nft_unregister_flowtable_type(struct nf_flowtable_type *type);
+
/**
* struct nft_traceinfo - nft tracing information and state
*
@@ -1125,12 +1145,6 @@ void nft_trace_init(struct nft_traceinfo *info, const struct nft_pktinfo *pkt,
void nft_trace_notify(struct nft_traceinfo *info);
-#define nft_dereference(p) \
- nfnl_dereference(p, NFNL_SUBSYS_NFTABLES)
-
-#define MODULE_ALIAS_NFT_FAMILY(family) \
- MODULE_ALIAS("nft-afinfo-" __stringify(family))
-
#define MODULE_ALIAS_NFT_CHAIN(family, name) \
MODULE_ALIAS("nft-chain-" __stringify(family) "-" name)
@@ -1332,4 +1346,14 @@ struct nft_trans_obj {
#define nft_trans_obj(trans) \
(((struct nft_trans_obj *)trans->data)->obj)
+struct nft_trans_flowtable {
+ struct nft_flowtable *flowtable;
+};
+
+#define nft_trans_flowtable(trans) \
+ (((struct nft_trans_flowtable *)trans->data)->flowtable)
+
+int __init nft_chain_filter_init(void);
+void __exit nft_chain_filter_fini(void);
+
#endif /* _NET_NF_TABLES_H */
diff --git a/include/net/netfilter/nf_tables_ipv4.h b/include/net/netfilter/nf_tables_ipv4.h
index f0896ba456c4..ed7b511f0a59 100644
--- a/include/net/netfilter/nf_tables_ipv4.h
+++ b/include/net/netfilter/nf_tables_ipv4.h
@@ -5,15 +5,11 @@
#include <net/netfilter/nf_tables.h>
#include <net/ip.h>
-static inline void
-nft_set_pktinfo_ipv4(struct nft_pktinfo *pkt,
- struct sk_buff *skb,
- const struct nf_hook_state *state)
+static inline void nft_set_pktinfo_ipv4(struct nft_pktinfo *pkt,
+ struct sk_buff *skb)
{
struct iphdr *ip;
- nft_set_pktinfo(pkt, skb, state);
-
ip = ip_hdr(pkt->skb);
pkt->tprot_set = true;
pkt->tprot = ip->protocol;
@@ -21,10 +17,8 @@ nft_set_pktinfo_ipv4(struct nft_pktinfo *pkt,
pkt->xt.fragoff = ntohs(ip->frag_off) & IP_OFFSET;
}
-static inline int
-__nft_set_pktinfo_ipv4_validate(struct nft_pktinfo *pkt,
- struct sk_buff *skb,
- const struct nf_hook_state *state)
+static inline int __nft_set_pktinfo_ipv4_validate(struct nft_pktinfo *pkt,
+ struct sk_buff *skb)
{
struct iphdr *iph, _iph;
u32 len, thoff;
@@ -52,16 +46,11 @@ __nft_set_pktinfo_ipv4_validate(struct nft_pktinfo *pkt,
return 0;
}
-static inline void
-nft_set_pktinfo_ipv4_validate(struct nft_pktinfo *pkt,
- struct sk_buff *skb,
- const struct nf_hook_state *state)
+static inline void nft_set_pktinfo_ipv4_validate(struct nft_pktinfo *pkt,
+ struct sk_buff *skb)
{
- nft_set_pktinfo(pkt, skb, state);
- if (__nft_set_pktinfo_ipv4_validate(pkt, skb, state) < 0)
- nft_set_pktinfo_proto_unspec(pkt, skb);
+ if (__nft_set_pktinfo_ipv4_validate(pkt, skb) < 0)
+ nft_set_pktinfo_unspec(pkt, skb);
}
-extern struct nft_af_info nft_af_ipv4;
-
#endif
diff --git a/include/net/netfilter/nf_tables_ipv6.h b/include/net/netfilter/nf_tables_ipv6.h
index b8065b72f56e..dabe6fdb553a 100644
--- a/include/net/netfilter/nf_tables_ipv6.h
+++ b/include/net/netfilter/nf_tables_ipv6.h
@@ -5,20 +5,16 @@
#include <linux/netfilter_ipv6/ip6_tables.h>
#include <net/ipv6.h>
-static inline void
-nft_set_pktinfo_ipv6(struct nft_pktinfo *pkt,
- struct sk_buff *skb,
- const struct nf_hook_state *state)
+static inline void nft_set_pktinfo_ipv6(struct nft_pktinfo *pkt,
+ struct sk_buff *skb)
{
unsigned int flags = IP6_FH_F_AUTH;
int protohdr, thoff = 0;
unsigned short frag_off;
- nft_set_pktinfo(pkt, skb, state);
-
protohdr = ipv6_find_hdr(pkt->skb, &thoff, -1, &frag_off, &flags);
if (protohdr < 0) {
- nft_set_pktinfo_proto_unspec(pkt, skb);
+ nft_set_pktinfo_unspec(pkt, skb);
return;
}
@@ -28,10 +24,8 @@ nft_set_pktinfo_ipv6(struct nft_pktinfo *pkt,
pkt->xt.fragoff = frag_off;
}
-static inline int
-__nft_set_pktinfo_ipv6_validate(struct nft_pktinfo *pkt,
- struct sk_buff *skb,
- const struct nf_hook_state *state)
+static inline int __nft_set_pktinfo_ipv6_validate(struct nft_pktinfo *pkt,
+ struct sk_buff *skb)
{
#if IS_ENABLED(CONFIG_IPV6)
unsigned int flags = IP6_FH_F_AUTH;
@@ -68,16 +62,11 @@ __nft_set_pktinfo_ipv6_validate(struct nft_pktinfo *pkt,
#endif
}
-static inline void
-nft_set_pktinfo_ipv6_validate(struct nft_pktinfo *pkt,
- struct sk_buff *skb,
- const struct nf_hook_state *state)
+static inline void nft_set_pktinfo_ipv6_validate(struct nft_pktinfo *pkt,
+ struct sk_buff *skb)
{
- nft_set_pktinfo(pkt, skb, state);
- if (__nft_set_pktinfo_ipv6_validate(pkt, skb, state) < 0)
- nft_set_pktinfo_proto_unspec(pkt, skb);
+ if (__nft_set_pktinfo_ipv6_validate(pkt, skb) < 0)
+ nft_set_pktinfo_unspec(pkt, skb);
}
-extern struct nft_af_info nft_af_ipv6;
-
#endif
diff --git a/include/net/netfilter/xt_rateest.h b/include/net/netfilter/xt_rateest.h
index b1db13772554..832ab69efda5 100644
--- a/include/net/netfilter/xt_rateest.h
+++ b/include/net/netfilter/xt_rateest.h
@@ -21,7 +21,7 @@ struct xt_rateest {
struct net_rate_estimator __rcu *rate_est;
};
-struct xt_rateest *xt_rateest_lookup(const char *name);
-void xt_rateest_put(struct xt_rateest *est);
+struct xt_rateest *xt_rateest_lookup(struct net *net, const char *name);
+void xt_rateest_put(struct net *net, struct xt_rateest *est);
#endif /* _XT_RATEEST_H */
diff --git a/include/net/netns/can.h b/include/net/netns/can.h
index ecf238b8862c..ca9bd9fba5b5 100644
--- a/include/net/netns/can.h
+++ b/include/net/netns/can.h
@@ -8,7 +8,7 @@
#include <linux/spinlock.h>
-struct dev_rcv_lists;
+struct can_dev_rcv_lists;
struct s_stats;
struct s_pstats;
@@ -28,7 +28,7 @@ struct netns_can {
#endif
/* receive filters subscribed for 'all' CAN devices */
- struct dev_rcv_lists *can_rx_alldev_list;
+ struct can_dev_rcv_lists *can_rx_alldev_list;
spinlock_t can_rcvlists_lock;
struct timer_list can_stattimer;/* timer for statistics update */
struct s_stats *can_stats; /* packet statistics */
diff --git a/include/net/netns/core.h b/include/net/netns/core.h
index 0ad4d0c71228..36c2d998a43c 100644
--- a/include/net/netns/core.h
+++ b/include/net/netns/core.h
@@ -11,7 +11,10 @@ struct netns_core {
int sysctl_somaxconn;
- struct prot_inuse __percpu *inuse;
+#ifdef CONFIG_PROC_FS
+ int __percpu *sock_inuse;
+ struct prot_inuse __percpu *prot_inuse;
+#endif
};
#endif
diff --git a/include/net/netns/ipv4.h b/include/net/netns/ipv4.h
index 44668c29701a..8491bc9c86b1 100644
--- a/include/net/netns/ipv4.h
+++ b/include/net/netns/ipv4.h
@@ -49,9 +49,12 @@ struct netns_ipv4 {
#endif
struct ipv4_devconf *devconf_all;
struct ipv4_devconf *devconf_dflt;
+ struct ip_ra_chain __rcu *ra_chain;
+ struct mutex ra_mutex;
#ifdef CONFIG_IP_MULTIPLE_TABLES
struct fib_rules_ops *rules_ops;
bool fib_has_custom_rules;
+ unsigned int fib_rules_require_fldissect;
struct fib_table __rcu *fib_main;
struct fib_table __rcu *fib_default;
#endif
@@ -167,6 +170,9 @@ struct netns_ipv4 {
atomic_t tfo_active_disable_times;
unsigned long tfo_active_disable_stamp;
+ int sysctl_udp_wmem_min;
+ int sysctl_udp_rmem_min;
+
#ifdef CONFIG_NET_L3_MASTER_DEV
int sysctl_udp_l3mdev_accept;
#endif
diff --git a/include/net/netns/ipv6.h b/include/net/netns/ipv6.h
index 987cc4569cb8..c29f09cfc9d7 100644
--- a/include/net/netns/ipv6.h
+++ b/include/net/netns/ipv6.h
@@ -28,6 +28,7 @@ struct netns_sysctl_ipv6 {
int ip6_rt_gc_elasticity;
int ip6_rt_mtu_expires;
int ip6_rt_min_advmss;
+ int multipath_hash_policy;
int flowlabel_consistency;
int auto_flowlabels;
int icmpv6_time;
@@ -71,7 +72,8 @@ struct netns_ipv6 {
unsigned int ip6_rt_gc_expire;
unsigned long ip6_rt_last_gc;
#ifdef CONFIG_IPV6_MULTIPLE_TABLES
- bool fib6_has_custom_rules;
+ unsigned int fib6_rules_require_fldissect;
+ bool fib6_has_custom_rules;
struct rt6_info *ip6_prohibit_entry;
struct rt6_info *ip6_blk_hole_entry;
struct fib6_table *fib6_local_tbl;
@@ -84,7 +86,7 @@ struct netns_ipv6 {
struct sock *mc_autojoin_sk;
#ifdef CONFIG_IPV6_MROUTE
#ifndef CONFIG_IPV6_MROUTE_MULTIPLE_TABLES
- struct mr6_table *mrt6;
+ struct mr_table *mrt6;
#else
struct list_head mr6_tables;
struct fib_rules_ops *mr6_rules_ops;
@@ -94,6 +96,8 @@ struct netns_ipv6 {
atomic_t fib6_sernum;
struct seg6_pernet_data *seg6_data;
struct fib_notifier_ops *notifier_ops;
+ struct fib_notifier_ops *ip6mr_notifier_ops;
+ unsigned int ipmr_seq; /* protected by rtnl_mutex */
struct {
struct hlist_head head;
spinlock_t lock;
diff --git a/include/net/netns/netfilter.h b/include/net/netns/netfilter.h
index cc00af2ac2d7..ca043342c0eb 100644
--- a/include/net/netns/netfilter.h
+++ b/include/net/netns/netfilter.h
@@ -17,7 +17,17 @@ struct netns_nf {
#ifdef CONFIG_SYSCTL
struct ctl_table_header *nf_log_dir_header;
#endif
- struct nf_hook_entries __rcu *hooks[NFPROTO_NUMPROTO][NF_MAX_HOOKS];
+ struct nf_hook_entries __rcu *hooks_ipv4[NF_INET_NUMHOOKS];
+ struct nf_hook_entries __rcu *hooks_ipv6[NF_INET_NUMHOOKS];
+#ifdef CONFIG_NETFILTER_FAMILY_ARP
+ struct nf_hook_entries __rcu *hooks_arp[NF_ARP_NUMHOOKS];
+#endif
+#ifdef CONFIG_NETFILTER_FAMILY_BRIDGE
+ struct nf_hook_entries __rcu *hooks_bridge[NF_INET_NUMHOOKS];
+#endif
+#if IS_ENABLED(CONFIG_DECNET)
+ struct nf_hook_entries __rcu *hooks_decnet[NF_DN_NUMHOOKS];
+#endif
#if IS_ENABLED(CONFIG_NF_DEFRAG_IPV4)
bool defrag_ipv4;
#endif
diff --git a/include/net/netns/nftables.h b/include/net/netns/nftables.h
index 4109b5f3010f..48134353411d 100644
--- a/include/net/netns/nftables.h
+++ b/include/net/netns/nftables.h
@@ -7,14 +7,8 @@
struct nft_af_info;
struct netns_nftables {
- struct list_head af_info;
+ struct list_head tables;
struct list_head commit_list;
- struct nft_af_info *ipv4;
- struct nft_af_info *ipv6;
- struct nft_af_info *inet;
- struct nft_af_info *arp;
- struct nft_af_info *bridge;
- struct nft_af_info *netdev;
unsigned int base_seq;
u8 gencursor;
};
diff --git a/include/net/netns/sctp.h b/include/net/netns/sctp.h
index ebc813277662..0db7fb3e4e15 100644
--- a/include/net/netns/sctp.h
+++ b/include/net/netns/sctp.h
@@ -122,9 +122,12 @@ struct netns_sctp {
/* Flag to indicate if PR-CONFIG is enabled. */
int reconf_enable;
- /* Flag to idicate if SCTP-AUTH is enabled */
+ /* Flag to indicate if SCTP-AUTH is enabled */
int auth_enable;
+ /* Flag to indicate if stream interleave is enabled */
+ int intl_enable;
+
/*
* Policy to control SCTP IPv4 address scoping
* 0 - Disable IPv4 address scoping
diff --git a/include/net/nexthop.h b/include/net/nexthop.h
index 36bb794f5cd6..902ff382a6dc 100644
--- a/include/net/nexthop.h
+++ b/include/net/nexthop.h
@@ -7,7 +7,7 @@
static inline int rtnh_ok(const struct rtnexthop *rtnh, int remaining)
{
- return remaining >= sizeof(*rtnh) &&
+ return remaining >= (int)sizeof(*rtnh) &&
rtnh->rtnh_len >= sizeof(*rtnh) &&
rtnh->rtnh_len <= remaining;
}
diff --git a/include/net/pkt_cls.h b/include/net/pkt_cls.h
index 753ac9361154..e828d31be5da 100644
--- a/include/net/pkt_cls.h
+++ b/include/net/pkt_cls.h
@@ -29,6 +29,7 @@ struct tcf_block_ext_info {
enum tcf_block_binder_type binder_type;
tcf_chain_head_change_t *chain_head_change;
void *chain_head_change_priv;
+ u32 block_index;
};
struct tcf_block_cb;
@@ -38,16 +39,25 @@ bool tcf_queue_work(struct work_struct *work);
struct tcf_chain *tcf_chain_get(struct tcf_block *block, u32 chain_index,
bool create);
void tcf_chain_put(struct tcf_chain *chain);
+void tcf_block_netif_keep_dst(struct tcf_block *block);
int tcf_block_get(struct tcf_block **p_block,
- struct tcf_proto __rcu **p_filter_chain, struct Qdisc *q);
+ struct tcf_proto __rcu **p_filter_chain, struct Qdisc *q,
+ struct netlink_ext_ack *extack);
int tcf_block_get_ext(struct tcf_block **p_block, struct Qdisc *q,
- struct tcf_block_ext_info *ei);
+ struct tcf_block_ext_info *ei,
+ struct netlink_ext_ack *extack);
void tcf_block_put(struct tcf_block *block);
void tcf_block_put_ext(struct tcf_block *block, struct Qdisc *q,
struct tcf_block_ext_info *ei);
+static inline bool tcf_block_shared(struct tcf_block *block)
+{
+ return block->index;
+}
+
static inline struct Qdisc *tcf_block_q(struct tcf_block *block)
{
+ WARN_ON(tcf_block_shared(block));
return block->q;
}
@@ -77,14 +87,16 @@ int tcf_classify(struct sk_buff *skb, const struct tcf_proto *tp,
#else
static inline
int tcf_block_get(struct tcf_block **p_block,
- struct tcf_proto __rcu **p_filter_chain, struct Qdisc *q)
+ struct tcf_proto __rcu **p_filter_chain, struct Qdisc *q,
+ struct netlink_ext_ack *extack)
{
return 0;
}
static inline
int tcf_block_get_ext(struct tcf_block **p_block, struct Qdisc *q,
- struct tcf_block_ext_info *ei)
+ struct tcf_block_ext_info *ei,
+ struct netlink_ext_ack *extack)
{
return 0;
}
@@ -364,7 +376,8 @@ tcf_exts_exec(struct sk_buff *skb, struct tcf_exts *exts,
int tcf_exts_validate(struct net *net, struct tcf_proto *tp,
struct nlattr **tb, struct nlattr *rate_tlv,
- struct tcf_exts *exts, bool ovr);
+ struct tcf_exts *exts, bool ovr,
+ struct netlink_ext_ack *extack);
void tcf_exts_destroy(struct tcf_exts *exts);
void tcf_exts_change(struct tcf_exts *dst, struct tcf_exts *src);
int tcf_exts_dump(struct sk_buff *skb, struct tcf_exts *exts);
@@ -544,13 +557,16 @@ static inline int tcf_valid_offset(const struct sk_buff *skb,
#include <net/net_namespace.h>
static inline int
-tcf_change_indev(struct net *net, struct nlattr *indev_tlv)
+tcf_change_indev(struct net *net, struct nlattr *indev_tlv,
+ struct netlink_ext_ack *extack)
{
char indev[IFNAMSIZ];
struct net_device *dev;
- if (nla_strlcpy(indev, indev_tlv, IFNAMSIZ) >= IFNAMSIZ)
+ if (nla_strlcpy(indev, indev_tlv, IFNAMSIZ) >= IFNAMSIZ) {
+ NL_SET_ERR_MSG(extack, "Interface name too long");
return -EINVAL;
+ }
dev = __dev_get_by_name(net, indev);
if (!dev)
return -ENODEV;
@@ -586,17 +602,9 @@ struct tc_cls_common_offload {
u32 chain_index;
__be16 protocol;
u32 prio;
+ struct netlink_ext_ack *extack;
};
-static inline void
-tc_cls_common_offload_init(struct tc_cls_common_offload *cls_common,
- const struct tcf_proto *tp)
-{
- cls_common->chain_index = tp->chain->index;
- cls_common->protocol = tp->protocol;
- cls_common->prio = tp->prio;
-}
-
struct tc_cls_u32_knode {
struct tcf_exts *exts;
struct tc_u32_sel *sel;
@@ -637,6 +645,31 @@ static inline bool tc_can_offload(const struct net_device *dev)
return dev->features & NETIF_F_HW_TC;
}
+static inline bool tc_can_offload_extack(const struct net_device *dev,
+ struct netlink_ext_ack *extack)
+{
+ bool can = tc_can_offload(dev);
+
+ if (!can)
+ NL_SET_ERR_MSG(extack, "TC offload is disabled on net device");
+
+ return can;
+}
+
+static inline bool
+tc_cls_can_offload_and_chain0(const struct net_device *dev,
+ struct tc_cls_common_offload *common)
+{
+ if (!tc_can_offload_extack(dev, common->extack))
+ return false;
+ if (common->chain_index) {
+ NL_SET_ERR_MSG(common->extack,
+ "Driver supports only offload of chain 0");
+ return false;
+ }
+ return true;
+}
+
static inline bool tc_skip_hw(u32 flags)
{
return (flags & TCA_CLS_FLAGS_SKIP_HW) ? true : false;
@@ -664,6 +697,18 @@ static inline bool tc_in_hw(u32 flags)
return (flags & TCA_CLS_FLAGS_IN_HW) ? true : false;
}
+static inline void
+tc_cls_common_offload_init(struct tc_cls_common_offload *cls_common,
+ const struct tcf_proto *tp, u32 flags,
+ struct netlink_ext_ack *extack)
+{
+ cls_common->chain_index = tp->chain->index;
+ cls_common->protocol = tp->protocol;
+ cls_common->prio = tp->prio;
+ if (tc_skip_sw(flags))
+ cls_common->extack = extack;
+}
+
enum tc_fl_command {
TC_CLSFLOWER_REPLACE,
TC_CLSFLOWER_DESTROY,
@@ -706,7 +751,6 @@ struct tc_cls_bpf_offload {
struct bpf_prog *oldprog;
const char *name;
bool exts_integrated;
- u32 gen_flags;
};
struct tc_mqprio_qopt_offload {
@@ -727,6 +771,11 @@ struct tc_cookie {
u32 len;
};
+struct tc_qopt_offload_stats {
+ struct gnet_stats_basic_packed *bstats;
+ struct gnet_stats_queue *qstats;
+};
+
enum tc_red_command {
TC_RED_REPLACE,
TC_RED_DESTROY,
@@ -739,9 +788,6 @@ struct tc_red_qopt_offload_params {
u32 max;
u32 probability;
bool is_ecn;
-};
-struct tc_red_qopt_offload_stats {
- struct gnet_stats_basic_packed *bstats;
struct gnet_stats_queue *qstats;
};
@@ -751,9 +797,42 @@ struct tc_red_qopt_offload {
u32 parent;
union {
struct tc_red_qopt_offload_params set;
- struct tc_red_qopt_offload_stats stats;
+ struct tc_qopt_offload_stats stats;
struct red_stats *xstats;
};
};
+enum tc_prio_command {
+ TC_PRIO_REPLACE,
+ TC_PRIO_DESTROY,
+ TC_PRIO_STATS,
+ TC_PRIO_GRAFT,
+};
+
+struct tc_prio_qopt_offload_params {
+ int bands;
+ u8 priomap[TC_PRIO_MAX + 1];
+ /* In case that a prio qdisc is offloaded and now is changed to a
+ * non-offloadedable config, it needs to update the backlog & qlen
+ * values to negate the HW backlog & qlen values (and only them).
+ */
+ struct gnet_stats_queue *qstats;
+};
+
+struct tc_prio_qopt_offload_graft_params {
+ u8 band;
+ u32 child_handle;
+};
+
+struct tc_prio_qopt_offload {
+ enum tc_prio_command command;
+ u32 handle;
+ u32 parent;
+ union {
+ struct tc_prio_qopt_offload_params replace_params;
+ struct tc_qopt_offload_stats stats;
+ struct tc_prio_qopt_offload_graft_params graft_params;
+ };
+};
+
#endif
diff --git a/include/net/pkt_sched.h b/include/net/pkt_sched.h
index d1f413f06c72..815b92a23936 100644
--- a/include/net/pkt_sched.h
+++ b/include/net/pkt_sched.h
@@ -89,7 +89,8 @@ extern struct Qdisc_ops pfifo_head_drop_qdisc_ops;
int fifo_set_limit(struct Qdisc *q, unsigned int limit);
struct Qdisc *fifo_create_dflt(struct Qdisc *sch, struct Qdisc_ops *ops,
- unsigned int limit);
+ unsigned int limit,
+ struct netlink_ext_ack *extack);
int register_qdisc(struct Qdisc_ops *qops);
int unregister_qdisc(struct Qdisc_ops *qops);
@@ -99,22 +100,24 @@ int qdisc_set_default(const char *id);
void qdisc_hash_add(struct Qdisc *q, bool invisible);
void qdisc_hash_del(struct Qdisc *q);
struct Qdisc *qdisc_lookup(struct net_device *dev, u32 handle);
-struct Qdisc *qdisc_lookup_class(struct net_device *dev, u32 handle);
struct qdisc_rate_table *qdisc_get_rtab(struct tc_ratespec *r,
- struct nlattr *tab);
+ struct nlattr *tab,
+ struct netlink_ext_ack *extack);
void qdisc_put_rtab(struct qdisc_rate_table *tab);
void qdisc_put_stab(struct qdisc_size_table *tab);
void qdisc_warn_nonwc(const char *txt, struct Qdisc *qdisc);
-int sch_direct_xmit(struct sk_buff *skb, struct Qdisc *q,
- struct net_device *dev, struct netdev_queue *txq,
- spinlock_t *root_lock, bool validate);
+bool sch_direct_xmit(struct sk_buff *skb, struct Qdisc *q,
+ struct net_device *dev, struct netdev_queue *txq,
+ spinlock_t *root_lock, bool validate);
void __qdisc_run(struct Qdisc *q);
static inline void qdisc_run(struct Qdisc *q)
{
- if (qdisc_run_begin(q))
+ if (qdisc_run_begin(q)) {
__qdisc_run(q);
+ qdisc_run_end(q);
+ }
}
static inline __be16 tc_skb_protocol(const struct sk_buff *skb)
diff --git a/include/net/regulatory.h b/include/net/regulatory.h
index ebc5a2ed8631..60f8cc86a447 100644
--- a/include/net/regulatory.h
+++ b/include/net/regulatory.h
@@ -4,6 +4,7 @@
* regulatory support structures
*
* Copyright 2008-2009 Luis R. Rodriguez <mcgrof@qca.qualcomm.com>
+ * Copyright (C) 2018 Intel Corporation
*
* Permission to use, copy, modify, and/or distribute this software for any
* purpose with or without fee is hereby granted, provided that the above
@@ -78,7 +79,7 @@ struct regulatory_request {
int wiphy_idx;
enum nl80211_reg_initiator initiator;
enum nl80211_user_reg_hint_type user_reg_hint_type;
- char alpha2[2];
+ char alpha2[3];
enum nl80211_dfs_regions dfs_region;
bool intersect;
bool processed;
@@ -188,9 +189,35 @@ struct ieee80211_power_rule {
u32 max_eirp;
};
+/**
+ * struct ieee80211_wmm_ac - used to store per ac wmm regulatory limitation
+ *
+ * The information provided in this structure is required for QoS
+ * transmit queue configuration. Cf. IEEE 802.11 7.3.2.29.
+ *
+ * @cw_min: minimum contention window [a value of the form
+ * 2^n-1 in the range 1..32767]
+ * @cw_max: maximum contention window [like @cw_min]
+ * @cot: maximum burst time in units of 32 usecs, 0 meaning disabled
+ * @aifsn: arbitration interframe space [0..255]
+ *
+ */
+struct ieee80211_wmm_ac {
+ u16 cw_min;
+ u16 cw_max;
+ u16 cot;
+ u8 aifsn;
+};
+
+struct ieee80211_wmm_rule {
+ struct ieee80211_wmm_ac client[IEEE80211_NUM_ACS];
+ struct ieee80211_wmm_ac ap[IEEE80211_NUM_ACS];
+};
+
struct ieee80211_reg_rule {
struct ieee80211_freq_range freq_range;
struct ieee80211_power_rule power_rule;
+ struct ieee80211_wmm_rule *wmm_rule;
u32 flags;
u32 dfs_cac_ms;
};
@@ -198,6 +225,7 @@ struct ieee80211_reg_rule {
struct ieee80211_regdomain {
struct rcu_head rcu_head;
u32 n_reg_rules;
+ u32 n_wmm_rules;
char alpha2[3];
enum nl80211_dfs_regions dfs_region;
struct ieee80211_reg_rule reg_rules[];
diff --git a/include/net/route.h b/include/net/route.h
index d538e6db1afe..dbb032d5921b 100644
--- a/include/net/route.h
+++ b/include/net/route.h
@@ -63,9 +63,8 @@ struct rtable {
__be32 rt_gateway;
/* Miscellaneous cached information */
- u32 rt_pmtu;
-
- u32 rt_table_id;
+ u32 rt_mtu_locked:1,
+ rt_pmtu:31;
struct list_head rt_uncached;
struct uncached_list *rt_uncached_list;
@@ -217,7 +216,7 @@ unsigned int inet_addr_type_dev_table(struct net *net,
const struct net_device *dev,
__be32 addr);
void ip_rt_multicast_event(struct in_device *);
-int ip_rt_ioctl(struct net *, unsigned int cmd, void __user *arg);
+int ip_rt_ioctl(struct net *, unsigned int cmd, struct rtentry *rt);
void ip_rt_get_source(u8 *src, struct sk_buff *skb, struct rtable *rt);
struct rtable *rt_dst_alloc(struct net_device *dev,
unsigned int flags, u16 type,
@@ -227,6 +226,9 @@ struct in_ifaddr;
void fib_add_ifaddr(struct in_ifaddr *);
void fib_del_ifaddr(struct in_ifaddr *, struct in_ifaddr *);
+void rt_add_uncached_list(struct rtable *rt);
+void rt_del_uncached_list(struct rtable *rt);
+
static inline void ip_rt_put(struct rtable *rt)
{
/* dst_release() accepts a NULL parameter.
diff --git a/include/net/rsi_91x.h b/include/net/rsi_91x.h
new file mode 100644
index 000000000000..040f07b47f1f
--- /dev/null
+++ b/include/net/rsi_91x.h
@@ -0,0 +1,56 @@
+/**
+ * Copyright (c) 2017 Redpine Signals Inc.
+ *
+ * Permission to use, copy, modify, and/or distribute this software for any
+ * purpose with or without fee is hereby granted, provided that the above
+ * copyright notice and this permission notice appear in all copies.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS" AND THE AUTHOR DISCLAIMS ALL WARRANTIES
+ * WITH REGARD TO THIS SOFTWARE INCLUDING ALL IMPLIED WARRANTIES OF
+ * MERCHANTABILITY AND FITNESS. IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR
+ * ANY SPECIAL, DIRECT, INDIRECT, OR CONSEQUENTIAL DAMAGES OR ANY DAMAGES
+ * WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR PROFITS, WHETHER IN AN
+ * ACTION OF CONTRACT, NEGLIGENCE OR OTHER TORTIOUS ACTION, ARISING OUT OF
+ * OR IN CONNECTION WITH THE USE OR PERFORMANCE OF THIS SOFTWARE.
+ */
+
+#ifndef __RSI_HEADER_H__
+#define __RSI_HEADER_H__
+
+#include <linux/skbuff.h>
+
+/* HAL queue information */
+#define RSI_COEX_Q 0x0
+#define RSI_BT_Q 0x2
+#define RSI_WLAN_Q 0x3
+#define RSI_WIFI_MGMT_Q 0x4
+#define RSI_WIFI_DATA_Q 0x5
+#define RSI_BT_MGMT_Q 0x6
+#define RSI_BT_DATA_Q 0x7
+
+enum rsi_coex_queues {
+ RSI_COEX_Q_INVALID = -1,
+ RSI_COEX_Q_COMMON = 0,
+ RSI_COEX_Q_BT,
+ RSI_COEX_Q_WLAN
+};
+
+enum rsi_host_intf {
+ RSI_HOST_INTF_SDIO = 0,
+ RSI_HOST_INTF_USB
+};
+
+struct rsi_proto_ops {
+ int (*coex_send_pkt)(void *priv, struct sk_buff *skb, u8 hal_queue);
+ enum rsi_host_intf (*get_host_intf)(void *priv);
+ void (*set_bt_context)(void *priv, void *context);
+};
+
+struct rsi_mod_ops {
+ int (*attach)(void *priv, struct rsi_proto_ops *ops);
+ void (*detach)(void *priv);
+ int (*recv_pkt)(void *priv, const u8 *msg);
+};
+
+extern const struct rsi_mod_ops rsi_bt_ops;
+#endif
diff --git a/include/net/rtnetlink.h b/include/net/rtnetlink.h
index ead018744ff5..14b6b3af8918 100644
--- a/include/net/rtnetlink.h
+++ b/include/net/rtnetlink.h
@@ -13,10 +13,10 @@ enum rtnl_link_flags {
RTNL_FLAG_DOIT_UNLOCKED = 1,
};
-int __rtnl_register(int protocol, int msgtype,
- rtnl_doit_func, rtnl_dumpit_func, unsigned int flags);
void rtnl_register(int protocol, int msgtype,
rtnl_doit_func, rtnl_dumpit_func, unsigned int flags);
+int rtnl_register_module(struct module *owner, int protocol, int msgtype,
+ rtnl_doit_func, rtnl_dumpit_func, unsigned int flags);
int rtnl_unregister(int protocol, int msgtype);
void rtnl_unregister_all(int protocol);
diff --git a/include/net/sch_generic.h b/include/net/sch_generic.h
index becf86aa4ac6..5154c8300262 100644
--- a/include/net/sch_generic.h
+++ b/include/net/sch_generic.h
@@ -30,6 +30,7 @@ struct qdisc_rate_table {
enum qdisc_state_t {
__QDISC_STATE_SCHED,
__QDISC_STATE_DEACTIVATED,
+ __QDISC_STATE_RUNNING,
};
struct qdisc_size_table {
@@ -71,6 +72,7 @@ struct Qdisc {
* qdisc_tree_decrease_qlen() should stop.
*/
#define TCQ_F_INVISIBLE 0x80 /* invisible by default in dump */
+#define TCQ_F_NOLOCK 0x100 /* qdisc does not require locking */
#define TCQ_F_OFFLOADED 0x200 /* qdisc is offloaded to HW */
u32 limit;
const struct Qdisc_ops *ops;
@@ -88,14 +90,14 @@ struct Qdisc {
/*
* For performance sake on SMP, we put highly modified fields at the end
*/
- struct sk_buff *gso_skb ____cacheline_aligned_in_smp;
+ struct sk_buff_head gso_skb ____cacheline_aligned_in_smp;
struct qdisc_skb_head q;
struct gnet_stats_basic_packed bstats;
seqcount_t running;
struct gnet_stats_queue qstats;
unsigned long state;
struct Qdisc *next_sched;
- struct sk_buff *skb_bad_txq;
+ struct sk_buff_head skb_bad_txq;
int padded;
refcount_t refcnt;
@@ -150,19 +152,23 @@ struct Qdisc_class_ops {
/* Child qdisc manipulation */
struct netdev_queue * (*select_queue)(struct Qdisc *, struct tcmsg *);
int (*graft)(struct Qdisc *, unsigned long cl,
- struct Qdisc *, struct Qdisc **);
+ struct Qdisc *, struct Qdisc **,
+ struct netlink_ext_ack *extack);
struct Qdisc * (*leaf)(struct Qdisc *, unsigned long cl);
void (*qlen_notify)(struct Qdisc *, unsigned long);
/* Class manipulation routines */
unsigned long (*find)(struct Qdisc *, u32 classid);
int (*change)(struct Qdisc *, u32, u32,
- struct nlattr **, unsigned long *);
+ struct nlattr **, unsigned long *,
+ struct netlink_ext_ack *);
int (*delete)(struct Qdisc *, unsigned long);
void (*walk)(struct Qdisc *, struct qdisc_walker * arg);
/* Filter manipulation */
- struct tcf_block * (*tcf_block)(struct Qdisc *, unsigned long);
+ struct tcf_block * (*tcf_block)(struct Qdisc *sch,
+ unsigned long arg,
+ struct netlink_ext_ack *extack);
unsigned long (*bind_tcf)(struct Qdisc *, unsigned long,
u32 classid);
void (*unbind_tcf)(struct Qdisc *, unsigned long);
@@ -187,15 +193,26 @@ struct Qdisc_ops {
struct sk_buff * (*dequeue)(struct Qdisc *);
struct sk_buff * (*peek)(struct Qdisc *);
- int (*init)(struct Qdisc *, struct nlattr *arg);
+ int (*init)(struct Qdisc *sch, struct nlattr *arg,
+ struct netlink_ext_ack *extack);
void (*reset)(struct Qdisc *);
void (*destroy)(struct Qdisc *);
- int (*change)(struct Qdisc *, struct nlattr *arg);
- void (*attach)(struct Qdisc *);
+ int (*change)(struct Qdisc *sch,
+ struct nlattr *arg,
+ struct netlink_ext_ack *extack);
+ void (*attach)(struct Qdisc *sch);
+ int (*change_tx_queue_len)(struct Qdisc *, unsigned int);
int (*dump)(struct Qdisc *, struct sk_buff *);
int (*dump_stats)(struct Qdisc *, struct gnet_dump *);
+ void (*ingress_block_set)(struct Qdisc *sch,
+ u32 block_index);
+ void (*egress_block_set)(struct Qdisc *sch,
+ u32 block_index);
+ u32 (*ingress_block_get)(struct Qdisc *sch);
+ u32 (*egress_block_get)(struct Qdisc *sch);
+
struct module *owner;
};
@@ -218,14 +235,18 @@ struct tcf_proto_ops {
const struct tcf_proto *,
struct tcf_result *);
int (*init)(struct tcf_proto*);
- void (*destroy)(struct tcf_proto*);
+ void (*destroy)(struct tcf_proto *tp,
+ struct netlink_ext_ack *extack);
void* (*get)(struct tcf_proto*, u32 handle);
int (*change)(struct net *net, struct sk_buff *,
struct tcf_proto*, unsigned long,
u32 handle, struct nlattr **,
- void **, bool);
- int (*delete)(struct tcf_proto*, void *, bool*);
+ void **, bool,
+ struct netlink_ext_ack *);
+ int (*delete)(struct tcf_proto *tp, void *arg,
+ bool *last,
+ struct netlink_ext_ack *);
void (*walk)(struct tcf_proto*, struct tcf_walker *arg);
void (*bind_class)(void *, u32, unsigned long);
@@ -247,8 +268,6 @@ struct tcf_proto {
/* All the rest */
u32 prio;
- u32 classid;
- struct Qdisc *q;
void *data;
const struct tcf_proto_ops *ops;
struct tcf_chain *chain;
@@ -267,8 +286,7 @@ typedef void tcf_chain_head_change_t(struct tcf_proto *tp_head, void *priv);
struct tcf_chain {
struct tcf_proto __rcu *filter_chain;
- tcf_chain_head_change_t *chain_head_change;
- void *chain_head_change_priv;
+ struct list_head filter_chain_list;
struct list_head list;
struct tcf_block *block;
u32 index; /* chain index */
@@ -277,12 +295,33 @@ struct tcf_chain {
struct tcf_block {
struct list_head chain_list;
+ u32 index; /* block index for shared blocks */
+ unsigned int refcnt;
struct net *net;
struct Qdisc *q;
struct list_head cb_list;
- struct work_struct work;
+ struct list_head owner_list;
+ bool keep_dst;
+ unsigned int offloadcnt; /* Number of oddloaded filters */
+ unsigned int nooffloaddevcnt; /* Number of devs unable to do offload */
};
+static inline void tcf_block_offload_inc(struct tcf_block *block, u32 *flags)
+{
+ if (*flags & TCA_CLS_FLAGS_IN_HW)
+ return;
+ *flags |= TCA_CLS_FLAGS_IN_HW;
+ block->offloadcnt++;
+}
+
+static inline void tcf_block_offload_dec(struct tcf_block *block, u32 *flags)
+{
+ if (!(*flags & TCA_CLS_FLAGS_IN_HW))
+ return;
+ *flags &= ~TCA_CLS_FLAGS_IN_HW;
+ block->offloadcnt--;
+}
+
static inline void qdisc_cb_private_validate(const struct sk_buff *skb, int sz)
{
struct qdisc_skb_cb *qcb;
@@ -291,11 +330,31 @@ static inline void qdisc_cb_private_validate(const struct sk_buff *skb, int sz)
BUILD_BUG_ON(sizeof(qcb->data) < sz);
}
+static inline int qdisc_qlen_cpu(const struct Qdisc *q)
+{
+ return this_cpu_ptr(q->cpu_qstats)->qlen;
+}
+
static inline int qdisc_qlen(const struct Qdisc *q)
{
return q->q.qlen;
}
+static inline int qdisc_qlen_sum(const struct Qdisc *q)
+{
+ __u32 qlen = 0;
+ int i;
+
+ if (q->flags & TCQ_F_NOLOCK) {
+ for_each_possible_cpu(i)
+ qlen += per_cpu_ptr(q->cpu_qstats, i)->qlen;
+ } else {
+ qlen = q->q.qlen;
+ }
+
+ return qlen;
+}
+
static inline struct qdisc_skb_cb *qdisc_skb_cb(const struct sk_buff *skb)
{
return (struct qdisc_skb_cb *)skb->cb;
@@ -432,6 +491,7 @@ void qdisc_class_hash_remove(struct Qdisc_class_hash *,
void qdisc_class_hash_grow(struct Qdisc *, struct Qdisc_class_hash *);
void qdisc_class_hash_destroy(struct Qdisc_class_hash *);
+int dev_qdisc_change_tx_queue_len(struct net_device *dev);
void dev_init_scheduler(struct net_device *dev);
void dev_shutdown(struct net_device *dev);
void dev_activate(struct net_device *dev);
@@ -444,10 +504,12 @@ void qdisc_destroy(struct Qdisc *qdisc);
void qdisc_tree_reduce_backlog(struct Qdisc *qdisc, unsigned int n,
unsigned int len);
struct Qdisc *qdisc_alloc(struct netdev_queue *dev_queue,
- const struct Qdisc_ops *ops);
+ const struct Qdisc_ops *ops,
+ struct netlink_ext_ack *extack);
void qdisc_free(struct Qdisc *qdisc);
struct Qdisc *qdisc_create_dflt(struct netdev_queue *dev_queue,
- const struct Qdisc_ops *ops, u32 parentid);
+ const struct Qdisc_ops *ops, u32 parentid,
+ struct netlink_ext_ack *extack);
void __qdisc_calculate_pkt_len(struct sk_buff *skb,
const struct qdisc_size_table *stab);
int skb_do_redirect(struct sk_buff *);
@@ -479,7 +541,7 @@ static inline bool skb_skip_tc_classify(struct sk_buff *skb)
return false;
}
-/* Reset all TX qdiscs greater then index of a device. */
+/* Reset all TX qdiscs greater than index of a device. */
static inline void qdisc_reset_all_tx_gt(struct net_device *dev, unsigned int i)
{
struct Qdisc *qdisc;
@@ -633,12 +695,39 @@ static inline void qdisc_qstats_backlog_dec(struct Qdisc *sch,
sch->qstats.backlog -= qdisc_pkt_len(skb);
}
+static inline void qdisc_qstats_cpu_backlog_dec(struct Qdisc *sch,
+ const struct sk_buff *skb)
+{
+ this_cpu_sub(sch->cpu_qstats->backlog, qdisc_pkt_len(skb));
+}
+
static inline void qdisc_qstats_backlog_inc(struct Qdisc *sch,
const struct sk_buff *skb)
{
sch->qstats.backlog += qdisc_pkt_len(skb);
}
+static inline void qdisc_qstats_cpu_backlog_inc(struct Qdisc *sch,
+ const struct sk_buff *skb)
+{
+ this_cpu_add(sch->cpu_qstats->backlog, qdisc_pkt_len(skb));
+}
+
+static inline void qdisc_qstats_cpu_qlen_inc(struct Qdisc *sch)
+{
+ this_cpu_inc(sch->cpu_qstats->qlen);
+}
+
+static inline void qdisc_qstats_cpu_qlen_dec(struct Qdisc *sch)
+{
+ this_cpu_dec(sch->cpu_qstats->qlen);
+}
+
+static inline void qdisc_qstats_cpu_requeues_inc(struct Qdisc *sch)
+{
+ this_cpu_inc(sch->cpu_qstats->requeues);
+}
+
static inline void __qdisc_qstats_drop(struct Qdisc *sch, int count)
{
sch->qstats.drops += count;
@@ -736,6 +825,16 @@ static inline void __qdisc_drop(struct sk_buff *skb, struct sk_buff **to_free)
*to_free = skb;
}
+static inline void __qdisc_drop_all(struct sk_buff *skb,
+ struct sk_buff **to_free)
+{
+ if (skb->prev)
+ skb->prev->next = *to_free;
+ else
+ skb->next = *to_free;
+ *to_free = skb;
+}
+
static inline unsigned int __qdisc_queue_drop_head(struct Qdisc *sch,
struct qdisc_skb_head *qh,
struct sk_buff **to_free)
@@ -769,26 +868,30 @@ static inline struct sk_buff *qdisc_peek_head(struct Qdisc *sch)
/* generic pseudo peek method for non-work-conserving qdisc */
static inline struct sk_buff *qdisc_peek_dequeued(struct Qdisc *sch)
{
+ struct sk_buff *skb = skb_peek(&sch->gso_skb);
+
/* we can reuse ->gso_skb because peek isn't called for root qdiscs */
- if (!sch->gso_skb) {
- sch->gso_skb = sch->dequeue(sch);
- if (sch->gso_skb) {
+ if (!skb) {
+ skb = sch->dequeue(sch);
+
+ if (skb) {
+ __skb_queue_head(&sch->gso_skb, skb);
/* it's still part of the queue */
- qdisc_qstats_backlog_inc(sch, sch->gso_skb);
+ qdisc_qstats_backlog_inc(sch, skb);
sch->q.qlen++;
}
}
- return sch->gso_skb;
+ return skb;
}
/* use instead of qdisc->dequeue() for all qdiscs queried with ->peek() */
static inline struct sk_buff *qdisc_dequeue_peeked(struct Qdisc *sch)
{
- struct sk_buff *skb = sch->gso_skb;
+ struct sk_buff *skb = skb_peek(&sch->gso_skb);
if (skb) {
- sch->gso_skb = NULL;
+ skb = __skb_dequeue(&sch->gso_skb);
qdisc_qstats_backlog_dec(sch, skb);
sch->q.qlen--;
} else {
@@ -846,6 +949,14 @@ static inline void rtnl_qdisc_drop(struct sk_buff *skb, struct Qdisc *sch)
qdisc_qstats_drop(sch);
}
+static inline int qdisc_drop_cpu(struct sk_buff *skb, struct Qdisc *sch,
+ struct sk_buff **to_free)
+{
+ __qdisc_drop(skb, to_free);
+ qdisc_qstats_cpu_drop(sch);
+
+ return NET_XMIT_DROP;
+}
static inline int qdisc_drop(struct sk_buff *skb, struct Qdisc *sch,
struct sk_buff **to_free)
@@ -856,6 +967,15 @@ static inline int qdisc_drop(struct sk_buff *skb, struct Qdisc *sch,
return NET_XMIT_DROP;
}
+static inline int qdisc_drop_all(struct sk_buff *skb, struct Qdisc *sch,
+ struct sk_buff **to_free)
+{
+ __qdisc_drop_all(skb, to_free);
+ qdisc_qstats_drop(sch);
+
+ return NET_XMIT_DROP;
+}
+
/* Length to Time (L2T) lookup in a qdisc_rate_table, to determine how
long it will take to send a packet given its size.
*/
diff --git a/include/net/sctp/auth.h b/include/net/sctp/auth.h
index e5c57d0a082d..687e7f80037d 100644
--- a/include/net/sctp/auth.h
+++ b/include/net/sctp/auth.h
@@ -62,8 +62,10 @@ struct sctp_auth_bytes {
/* Definition for a shared key, weather endpoint or association */
struct sctp_shared_key {
struct list_head key_list;
- __u16 key_id;
struct sctp_auth_bytes *key;
+ refcount_t refcnt;
+ __u16 key_id;
+ __u8 deactivated;
};
#define key_for_each(__key, __list_head) \
@@ -103,21 +105,22 @@ int sctp_auth_send_cid(enum sctp_cid chunk,
int sctp_auth_recv_cid(enum sctp_cid chunk,
const struct sctp_association *asoc);
void sctp_auth_calculate_hmac(const struct sctp_association *asoc,
- struct sk_buff *skb,
- struct sctp_auth_chunk *auth, gfp_t gfp);
+ struct sk_buff *skb, struct sctp_auth_chunk *auth,
+ struct sctp_shared_key *ep_key, gfp_t gfp);
+void sctp_auth_shkey_release(struct sctp_shared_key *sh_key);
+void sctp_auth_shkey_hold(struct sctp_shared_key *sh_key);
/* API Helpers */
int sctp_auth_ep_add_chunkid(struct sctp_endpoint *ep, __u8 chunk_id);
int sctp_auth_ep_set_hmacs(struct sctp_endpoint *ep,
struct sctp_hmacalgo *hmacs);
-int sctp_auth_set_key(struct sctp_endpoint *ep,
- struct sctp_association *asoc,
+int sctp_auth_set_key(struct sctp_endpoint *ep, struct sctp_association *asoc,
struct sctp_authkey *auth_key);
int sctp_auth_set_active_key(struct sctp_endpoint *ep,
- struct sctp_association *asoc,
- __u16 key_id);
+ struct sctp_association *asoc, __u16 key_id);
int sctp_auth_del_key_id(struct sctp_endpoint *ep,
- struct sctp_association *asoc,
- __u16 key_id);
+ struct sctp_association *asoc, __u16 key_id);
+int sctp_auth_deact_key_id(struct sctp_endpoint *ep,
+ struct sctp_association *asoc, __u16 key_id);
#endif
diff --git a/include/net/sctp/command.h b/include/net/sctp/command.h
index b55c6a48a206..6640f84fe536 100644
--- a/include/net/sctp/command.h
+++ b/include/net/sctp/command.h
@@ -100,6 +100,7 @@ enum sctp_verb {
SCTP_CMD_SET_SK_ERR, /* Set sk_err */
SCTP_CMD_ASSOC_CHANGE, /* generate and send assoc_change event */
SCTP_CMD_ADAPTATION_IND, /* generate and send adaptation event */
+ SCTP_CMD_PEER_NO_AUTH, /* generate and send authentication event */
SCTP_CMD_ASSOC_SHKEY, /* generate the association shared keys */
SCTP_CMD_T1_RETRAN, /* Mark for retransmission after T1 timeout */
SCTP_CMD_UPDATE_INITTAG, /* Update peer inittag */
diff --git a/include/net/sctp/constants.h b/include/net/sctp/constants.h
index deaafa9b09cb..20ff237c5eb2 100644
--- a/include/net/sctp/constants.h
+++ b/include/net/sctp/constants.h
@@ -145,12 +145,13 @@ SCTP_SUBTYPE_CONSTRUCTOR(OTHER, enum sctp_event_other, other)
SCTP_SUBTYPE_CONSTRUCTOR(PRIMITIVE, enum sctp_event_primitive, primitive)
-#define sctp_chunk_is_data(a) (a->chunk_hdr->type == SCTP_CID_DATA)
+#define sctp_chunk_is_data(a) (a->chunk_hdr->type == SCTP_CID_DATA || \
+ a->chunk_hdr->type == SCTP_CID_I_DATA)
/* Calculate the actual data size in a data chunk */
-#define SCTP_DATA_SNDSIZE(c) ((int)((unsigned long)(c->chunk_end)\
- - (unsigned long)(c->chunk_hdr)\
- - sizeof(struct sctp_data_chunk)))
+#define SCTP_DATA_SNDSIZE(c) ((int)((unsigned long)(c->chunk_end) - \
+ (unsigned long)(c->chunk_hdr) - \
+ sctp_datachk_len(&c->asoc->stream)))
/* Internal error codes */
enum sctp_ierror {
diff --git a/include/net/sctp/sctp.h b/include/net/sctp/sctp.h
index 906a9c0efa71..28b996d63490 100644
--- a/include/net/sctp/sctp.h
+++ b/include/net/sctp/sctp.h
@@ -107,7 +107,7 @@ int sctp_backlog_rcv(struct sock *sk, struct sk_buff *skb);
int sctp_inet_listen(struct socket *sock, int backlog);
void sctp_write_space(struct sock *sk);
void sctp_data_ready(struct sock *sk);
-unsigned int sctp_poll(struct file *file, struct socket *sock,
+__poll_t sctp_poll(struct file *file, struct socket *sock,
poll_table *wait);
void sctp_sock_rfree(struct sk_buff *skb);
void sctp_copy_sock(struct sock *newsk, struct sock *sk,
@@ -116,7 +116,7 @@ extern struct percpu_counter sctp_sockets_allocated;
int sctp_asconf_mgmt(struct sctp_sock *, struct sctp_sockaddr_entry *);
struct sk_buff *sctp_skb_recv_datagram(struct sock *, int, int, int *);
-int sctp_transport_walk_start(struct rhashtable_iter *iter);
+void sctp_transport_walk_start(struct rhashtable_iter *iter);
void sctp_transport_walk_stop(struct rhashtable_iter *iter);
struct sctp_transport *sctp_transport_get_next(struct net *net,
struct rhashtable_iter *iter);
@@ -180,14 +180,7 @@ struct sctp_transport *sctp_epaddr_lookup_transport(
/*
* sctp/proc.c
*/
-int sctp_snmp_proc_init(struct net *net);
-void sctp_snmp_proc_exit(struct net *net);
-int sctp_eps_proc_init(struct net *net);
-void sctp_eps_proc_exit(struct net *net);
-int sctp_assocs_proc_init(struct net *net);
-void sctp_assocs_proc_exit(struct net *net);
-int sctp_remaddr_proc_init(struct net *net);
-void sctp_remaddr_proc_exit(struct net *net);
+int __net_init sctp_proc_init(struct net *net);
/*
* sctp/offload.c
@@ -318,7 +311,6 @@ atomic_t sctp_dbg_objcnt_## name = ATOMIC_INIT(0)
{.label= #name, .counter= &sctp_dbg_objcnt_## name}
void sctp_dbg_objcnt_init(struct net *);
-void sctp_dbg_objcnt_exit(struct net *);
#else
@@ -326,7 +318,6 @@ void sctp_dbg_objcnt_exit(struct net *);
#define SCTP_DBG_OBJCNT_DEC(name)
static inline void sctp_dbg_objcnt_init(struct net *net) { return; }
-static inline void sctp_dbg_objcnt_exit(struct net *net) { return; }
#endif /* CONFIG_SCTP_DBG_OBJCOUNT */
@@ -441,16 +432,18 @@ static inline int sctp_list_single_entry(struct list_head *head)
static inline int sctp_frag_point(const struct sctp_association *asoc, int pmtu)
{
struct sctp_sock *sp = sctp_sk(asoc->base.sk);
+ struct sctp_af *af = sp->pf->af;
int frag = pmtu;
- frag -= sp->pf->af->net_header_len;
- frag -= sizeof(struct sctphdr) + sizeof(struct sctp_data_chunk);
+ frag -= af->ip_options_len(asoc->base.sk);
+ frag -= af->net_header_len;
+ frag -= sizeof(struct sctphdr) + sctp_datachk_len(&asoc->stream);
if (asoc->user_frag)
frag = min_t(int, frag, asoc->user_frag);
frag = SCTP_TRUNC4(min_t(int, frag, SCTP_MAX_CHUNK_LEN -
- sizeof(struct sctp_data_chunk)));
+ sctp_datachk_len(&asoc->stream)));
return frag;
}
diff --git a/include/net/sctp/sm.h b/include/net/sctp/sm.h
index 70fb397f65b0..2d0e782c9055 100644
--- a/include/net/sctp/sm.h
+++ b/include/net/sctp/sm.h
@@ -197,10 +197,14 @@ struct sctp_chunk *sctp_make_cookie_ack(const struct sctp_association *asoc,
struct sctp_chunk *sctp_make_cwr(const struct sctp_association *asoc,
const __u32 lowest_tsn,
const struct sctp_chunk *chunk);
-struct sctp_chunk *sctp_make_datafrag_empty(struct sctp_association *asoc,
+struct sctp_chunk *sctp_make_idata(const struct sctp_association *asoc,
+ __u8 flags, int paylen, gfp_t gfp);
+struct sctp_chunk *sctp_make_ifwdtsn(const struct sctp_association *asoc,
+ __u32 new_cum_tsn, size_t nstreams,
+ struct sctp_ifwdtsn_skip *skiplist);
+struct sctp_chunk *sctp_make_datafrag_empty(const struct sctp_association *asoc,
const struct sctp_sndrcvinfo *sinfo,
- int len, const __u8 flags,
- __u16 ssn, gfp_t gfp);
+ int len, __u8 flags, gfp_t gfp);
struct sctp_chunk *sctp_make_ecne(const struct sctp_association *asoc,
const __u32 lowest_tsn);
struct sctp_chunk *sctp_make_sack(const struct sctp_association *asoc);
@@ -259,7 +263,8 @@ int sctp_process_asconf_ack(struct sctp_association *asoc,
struct sctp_chunk *sctp_make_fwdtsn(const struct sctp_association *asoc,
__u32 new_cum_tsn, size_t nstreams,
struct sctp_fwdtsn_skip *skiplist);
-struct sctp_chunk *sctp_make_auth(const struct sctp_association *asoc);
+struct sctp_chunk *sctp_make_auth(const struct sctp_association *asoc,
+ __u16 key_id);
struct sctp_chunk *sctp_make_strreset_req(const struct sctp_association *asoc,
__u16 stream_num, __be16 *stream_list,
bool out, bool in);
@@ -342,7 +347,7 @@ static inline __u16 sctp_data_size(struct sctp_chunk *chunk)
__u16 size;
size = ntohs(chunk->chunk_hdr->length);
- size -= sizeof(struct sctp_data_chunk);
+ size -= sctp_datahdr_len(&chunk->asoc->stream);
return size;
}
@@ -358,6 +363,12 @@ static inline __u16 sctp_data_size(struct sctp_chunk *chunk)
typecheck(__u32, b) && \
((__s32)((a) - (b)) <= 0))
+/* Compare two MIDs */
+#define MID_lt(a, b) \
+ (typecheck(__u32, a) && \
+ typecheck(__u32, b) && \
+ ((__s32)((a) - (b)) < 0))
+
/* Compare two SSNs */
#define SSN_lt(a,b) \
(typecheck(__u16, a) && \
diff --git a/include/net/sctp/stream_interleave.h b/include/net/sctp/stream_interleave.h
new file mode 100644
index 000000000000..6657711c8bc4
--- /dev/null
+++ b/include/net/sctp/stream_interleave.h
@@ -0,0 +1,61 @@
+/* SCTP kernel implementation
+ * (C) Copyright Red Hat Inc. 2017
+ *
+ * These are definitions used by the stream schedulers, defined in RFC
+ * draft ndata (https://tools.ietf.org/html/draft-ietf-tsvwg-sctp-ndata-11)
+ *
+ * This SCTP implementation is free software;
+ * you can redistribute it and/or modify it under the terms of
+ * the GNU General Public License as published by
+ * the Free Software Foundation; either version 2, or (at your option)
+ * any later version.
+ *
+ * This SCTP implementation is distributed in the hope that it
+ * will be useful, but WITHOUT ANY WARRANTY; without even the implied
+ * ************************
+ * warranty of MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.
+ * See the GNU General Public License for more details.
+ *
+ * You should have received a copy of the GNU General Public License
+ * along with GNU CC; see the file COPYING. If not, see
+ * <http://www.gnu.org/licenses/>.
+ *
+ * Please send any bug reports or fixes you make to the
+ * email addresses:
+ * lksctp developers <linux-sctp@vger.kernel.org>
+ *
+ * Written or modified by:
+ * Xin Long <lucien.xin@gmail.com>
+ */
+
+#ifndef __sctp_stream_interleave_h__
+#define __sctp_stream_interleave_h__
+
+struct sctp_stream_interleave {
+ __u16 data_chunk_len;
+ __u16 ftsn_chunk_len;
+ /* (I-)DATA process */
+ struct sctp_chunk *(*make_datafrag)(const struct sctp_association *asoc,
+ const struct sctp_sndrcvinfo *sinfo,
+ int len, __u8 flags, gfp_t gfp);
+ void (*assign_number)(struct sctp_chunk *chunk);
+ bool (*validate_data)(struct sctp_chunk *chunk);
+ int (*ulpevent_data)(struct sctp_ulpq *ulpq,
+ struct sctp_chunk *chunk, gfp_t gfp);
+ int (*enqueue_event)(struct sctp_ulpq *ulpq,
+ struct sctp_ulpevent *event);
+ void (*renege_events)(struct sctp_ulpq *ulpq,
+ struct sctp_chunk *chunk, gfp_t gfp);
+ void (*start_pd)(struct sctp_ulpq *ulpq, gfp_t gfp);
+ void (*abort_pd)(struct sctp_ulpq *ulpq, gfp_t gfp);
+ /* (I-)FORWARD-TSN process */
+ void (*generate_ftsn)(struct sctp_outq *q, __u32 ctsn);
+ bool (*validate_ftsn)(struct sctp_chunk *chunk);
+ void (*report_ftsn)(struct sctp_ulpq *ulpq, __u32 ftsn);
+ void (*handle_ftsn)(struct sctp_ulpq *ulpq,
+ struct sctp_chunk *chunk);
+};
+
+void sctp_stream_interleave_init(struct sctp_stream *stream);
+
+#endif /* __sctp_stream_interleave_h__ */
diff --git a/include/net/sctp/structs.h b/include/net/sctp/structs.h
index 9a5ccf03a59b..a0ec462bc1a9 100644
--- a/include/net/sctp/structs.h
+++ b/include/net/sctp/structs.h
@@ -89,6 +89,7 @@ struct sctp_stream;
#include <net/sctp/tsnmap.h>
#include <net/sctp/ulpevent.h>
#include <net/sctp/ulpqueue.h>
+#include <net/sctp/stream_interleave.h>
/* Structures useful for managing bind/connect. */
@@ -202,12 +203,17 @@ struct sctp_sock {
/* Flags controlling Heartbeat, SACK delay, and Path MTU Discovery. */
__u32 param_flags;
- struct sctp_initmsg initmsg;
struct sctp_rtoinfo rtoinfo;
struct sctp_paddrparams paddrparam;
- struct sctp_event_subscribe subscribe;
struct sctp_assocparams assocparams;
+ /*
+ * These two structures must be grouped together for the usercopy
+ * whitelist region.
+ */
+ struct sctp_event_subscribe subscribe;
+ struct sctp_initmsg initmsg;
+
int user_frag;
__u32 autoclose;
@@ -217,6 +223,7 @@ struct sctp_sock {
disable_fragments:1,
v4mapped:1,
frag_interleave:1,
+ strm_interleave:1,
recvrcvinfo:1,
recvnxtinfo:1,
data_ready_signalled:1;
@@ -397,6 +404,28 @@ void sctp_stream_update(struct sctp_stream *stream, struct sctp_stream *new);
#define sctp_ssn_skip(stream, type, sid, ssn) \
((stream)->type[sid].ssn = ssn + 1)
+/* What is the current MID number for this stream? */
+#define sctp_mid_peek(stream, type, sid) \
+ ((stream)->type[sid].mid)
+
+/* Return the next MID number for this stream. */
+#define sctp_mid_next(stream, type, sid) \
+ ((stream)->type[sid].mid++)
+
+/* Skip over this mid and all below. */
+#define sctp_mid_skip(stream, type, sid, mid) \
+ ((stream)->type[sid].mid = mid + 1)
+
+#define sctp_stream_in(asoc, sid) (&(asoc)->stream.in[sid])
+
+/* What is the current MID_uo number for this stream? */
+#define sctp_mid_uo_peek(stream, type, sid) \
+ ((stream)->type[sid].mid_uo)
+
+/* Return the next MID_uo number for this stream. */
+#define sctp_mid_uo_next(stream, type, sid) \
+ ((stream)->type[sid].mid_uo++)
+
/*
* Pointers to address related SCTP functions.
* (i.e. things that depend on the address family.)
@@ -462,6 +491,7 @@ struct sctp_af {
void (*ecn_capable)(struct sock *sk);
__u16 net_header_len;
int sockaddr_len;
+ int (*ip_options_len)(struct sock *sk);
sa_family_t sa_family;
struct list_head list;
};
@@ -486,6 +516,7 @@ struct sctp_pf {
int (*addr_to_user)(struct sctp_sock *sk, union sctp_addr *addr);
void (*to_sk_saddr)(union sctp_addr *, struct sock *sk);
void (*to_sk_daddr)(union sctp_addr *, struct sock *sk);
+ void (*copy_ip_options)(struct sock *sk, struct sock *newsk);
struct sctp_af *af;
};
@@ -548,8 +579,12 @@ struct sctp_chunk {
/* This points to the sk_buff containing the actual data. */
struct sk_buff *skb;
- /* In case of GSO packets, this will store the head one */
- struct sk_buff *head_skb;
+ union {
+ /* In case of GSO packets, this will store the head one */
+ struct sk_buff *head_skb;
+ /* In case of auth enabled, this will point to the shkey */
+ struct sctp_shared_key *shkey;
+ };
/* These are the SCTP headers by reverse order in a packet.
* Note that some of these may happen more than once. In that
@@ -574,6 +609,8 @@ struct sctp_chunk {
struct sctp_addiphdr *addip_hdr;
struct sctp_fwdtsn_hdr *fwdtsn_hdr;
struct sctp_authhdr *auth_hdr;
+ struct sctp_idatahdr *idata_hdr;
+ struct sctp_ifwdtsn_hdr *ifwdtsn_hdr;
} subh;
__u8 *chunk_end;
@@ -620,6 +657,7 @@ struct sctp_chunk {
__u16 rtt_in_progress:1, /* This chunk used for RTT calc? */
has_tsn:1, /* Does this chunk have a TSN yet? */
has_ssn:1, /* Does this chunk have a SSN yet? */
+#define has_mid has_ssn
singleton:1, /* Only chunk in the packet? */
end_of_packet:1, /* Last chunk in the packet? */
ecn_ce_done:1, /* Have we processed the ECN CE bit? */
@@ -1073,6 +1111,7 @@ void sctp_retransmit_mark(struct sctp_outq *, struct sctp_transport *, __u8);
void sctp_outq_uncork(struct sctp_outq *, gfp_t gfp);
void sctp_prsctp_prune(struct sctp_association *asoc,
struct sctp_sndrcvinfo *sinfo, int msg_len);
+void sctp_generate_fwdtsn(struct sctp_outq *q, __u32 sack_ctsn);
/* Uncork and flush an outqueue. */
static inline void sctp_outq_cork(struct sctp_outq *q)
{
@@ -1283,6 +1322,16 @@ struct sctp_endpoint {
reconf_enable:1;
__u8 strreset_enable;
+
+ /* Security identifiers from incoming (INIT). These are set by
+ * security_sctp_assoc_request(). These will only be used by
+ * SCTP TCP type sockets and peeled off connections as they
+ * cause a new socket to be generated. security_sctp_sk_clone()
+ * will then plug these into the new socket.
+ */
+
+ u32 secid;
+ u32 peer_secid;
};
/* Recover the outter endpoint structure. */
@@ -1304,12 +1353,12 @@ struct sctp_association *sctp_endpoint_lookup_assoc(
const struct sctp_endpoint *ep,
const union sctp_addr *paddr,
struct sctp_transport **);
-int sctp_endpoint_is_peeled_off(struct sctp_endpoint *,
- const union sctp_addr *);
+bool sctp_endpoint_is_peeled_off(struct sctp_endpoint *ep,
+ const union sctp_addr *paddr);
struct sctp_endpoint *sctp_endpoint_is_match(struct sctp_endpoint *,
struct net *, const union sctp_addr *);
-int sctp_has_association(struct net *net, const union sctp_addr *laddr,
- const union sctp_addr *paddr);
+bool sctp_has_association(struct net *net, const union sctp_addr *laddr,
+ const union sctp_addr *paddr);
int sctp_verify_init(struct net *net, const struct sctp_endpoint *ep,
const struct sctp_association *asoc,
@@ -1357,13 +1406,25 @@ struct sctp_stream_out_ext {
};
struct sctp_stream_out {
- __u16 ssn;
- __u8 state;
+ union {
+ __u32 mid;
+ __u16 ssn;
+ };
+ __u32 mid_uo;
struct sctp_stream_out_ext *ext;
+ __u8 state;
};
struct sctp_stream_in {
- __u16 ssn;
+ union {
+ __u32 mid;
+ __u16 ssn;
+ };
+ __u32 mid_uo;
+ __u32 fsn;
+ __u32 fsn_uo;
+ char pd_mode;
+ char pd_mode_uo;
};
struct sctp_stream {
@@ -1387,11 +1448,32 @@ struct sctp_stream {
struct sctp_stream_out_ext *rr_next;
};
};
+ struct sctp_stream_interleave *si;
};
#define SCTP_STREAM_CLOSED 0x00
#define SCTP_STREAM_OPEN 0x01
+static inline __u16 sctp_datachk_len(const struct sctp_stream *stream)
+{
+ return stream->si->data_chunk_len;
+}
+
+static inline __u16 sctp_datahdr_len(const struct sctp_stream *stream)
+{
+ return stream->si->data_chunk_len - sizeof(struct sctp_chunkhdr);
+}
+
+static inline __u16 sctp_ftsnchk_len(const struct sctp_stream *stream)
+{
+ return stream->si->ftsn_chunk_len;
+}
+
+static inline __u16 sctp_ftsnhdr_len(const struct sctp_stream *stream)
+{
+ return stream->si->ftsn_chunk_len - sizeof(struct sctp_chunkhdr);
+}
+
/* SCTP_GET_ASSOC_STATS counters */
struct sctp_priv_assoc_stats {
/* Maximum observed rto in the association during subsequent
@@ -1929,6 +2011,7 @@ struct sctp_association {
* The current generated assocaition shared key (secret)
*/
struct sctp_auth_bytes *asoc_shared_key;
+ struct sctp_shared_key *shkey;
/* SCTP AUTH: hmac id of the first peer requested algorithm
* that we support.
@@ -1940,6 +2023,7 @@ struct sctp_association {
__u8 need_ecne:1, /* Need to send an ECNE Chunk? */
temp:1, /* Is it a temporary association? */
force_delay:1,
+ intl_enable:1,
prsctp_enable:1,
reconf_enable:1;
@@ -2045,6 +2129,9 @@ struct sctp_cmsgs {
struct sctp_initmsg *init;
struct sctp_sndrcvinfo *srinfo;
struct sctp_sndinfo *sinfo;
+ struct sctp_prinfo *prinfo;
+ struct sctp_authinfo *authinfo;
+ struct msghdr *addrs_msg;
};
/* Structure for tracking memory objects */
diff --git a/include/net/sctp/ulpevent.h b/include/net/sctp/ulpevent.h
index 231dc42f1da6..51b4e0626c34 100644
--- a/include/net/sctp/ulpevent.h
+++ b/include/net/sctp/ulpevent.h
@@ -45,19 +45,29 @@
/* A structure to carry information to the ULP (e.g. Sockets API) */
/* Warning: This sits inside an skb.cb[] area. Be very careful of
* growing this structure as it is at the maximum limit now.
+ *
+ * sctp_ulpevent is saved in sk->cb(48 bytes), whose last 4 bytes
+ * have been taken by sock_skb_cb, So here it has to use 'packed'
+ * to make sctp_ulpevent fit into the rest 44 bytes.
*/
struct sctp_ulpevent {
struct sctp_association *asoc;
struct sctp_chunk *chunk;
unsigned int rmem_len;
- __u32 ppid;
+ union {
+ __u32 mid;
+ __u16 ssn;
+ };
+ union {
+ __u32 ppid;
+ __u32 fsn;
+ };
__u32 tsn;
__u32 cumtsn;
__u16 stream;
- __u16 ssn;
__u16 flags;
__u16 msg_flags;
-};
+} __packed;
/* Retrieve the skb this event sits inside of. */
static inline struct sk_buff *sctp_event2skb(const struct sctp_ulpevent *ev)
@@ -112,7 +122,8 @@ struct sctp_ulpevent *sctp_ulpevent_make_shutdown_event(
struct sctp_ulpevent *sctp_ulpevent_make_pdapi(
const struct sctp_association *asoc,
- __u32 indication, gfp_t gfp);
+ __u32 indication, __u32 sid, __u32 seq,
+ __u32 flags, gfp_t gfp);
struct sctp_ulpevent *sctp_ulpevent_make_adaptation_indication(
const struct sctp_association *asoc, gfp_t gfp);
@@ -140,6 +151,10 @@ struct sctp_ulpevent *sctp_ulpevent_make_stream_change_event(
const struct sctp_association *asoc, __u16 flags,
__u32 strchange_instrms, __u32 strchange_outstrms, gfp_t gfp);
+struct sctp_ulpevent *sctp_make_reassembled_event(
+ struct net *net, struct sk_buff_head *queue,
+ struct sk_buff *f_frag, struct sk_buff *l_frag);
+
void sctp_ulpevent_read_sndrcvinfo(const struct sctp_ulpevent *event,
struct msghdr *);
void sctp_ulpevent_read_rcvinfo(const struct sctp_ulpevent *event,
diff --git a/include/net/sctp/ulpqueue.h b/include/net/sctp/ulpqueue.h
index e0dce07b8794..bb0ecba3db2b 100644
--- a/include/net/sctp/ulpqueue.h
+++ b/include/net/sctp/ulpqueue.h
@@ -45,6 +45,7 @@ struct sctp_ulpq {
char pd_mode;
struct sctp_association *asoc;
struct sk_buff_head reasm;
+ struct sk_buff_head reasm_uo;
struct sk_buff_head lobby;
};
@@ -76,11 +77,8 @@ int sctp_clear_pd(struct sock *sk, struct sctp_association *asoc);
void sctp_ulpq_skip(struct sctp_ulpq *ulpq, __u16 sid, __u16 ssn);
void sctp_ulpq_reasm_flushtsn(struct sctp_ulpq *, __u32);
-#endif /* __sctp_ulpqueue_h__ */
-
-
-
-
-
+__u16 sctp_ulpq_renege_list(struct sctp_ulpq *ulpq,
+ struct sk_buff_head *list, __u16 needed);
+#endif /* __sctp_ulpqueue_h__ */
diff --git a/include/net/sock.h b/include/net/sock.h
index 7a7b14e9628a..74d725fdbe0f 100644
--- a/include/net/sock.h
+++ b/include/net/sock.h
@@ -72,6 +72,7 @@
#include <net/tcp_states.h>
#include <linux/net_tstamp.h>
#include <net/smc.h>
+#include <net/l3mdev.h>
/*
* This structure really needs to be cleaned up.
@@ -416,6 +417,7 @@ struct sock {
struct page_frag sk_frag;
netdev_features_t sk_route_caps;
netdev_features_t sk_route_nocaps;
+ netdev_features_t sk_route_forced_caps;
int sk_gso_type;
unsigned int sk_gso_max_size;
gfp_t sk_allocation;
@@ -1024,6 +1026,9 @@ static inline void sk_prot_clear_nulls(struct sock *sk, int size)
struct proto {
void (*close)(struct sock *sk,
long timeout);
+ int (*pre_connect)(struct sock *sk,
+ struct sockaddr *uaddr,
+ int addr_len);
int (*connect)(struct sock *sk,
struct sockaddr *uaddr,
int addr_len);
@@ -1083,6 +1088,7 @@ struct proto {
#endif
bool (*stream_memory_free)(const struct sock *sk);
+ bool (*stream_memory_read)(const struct sock *sk);
/* Memory pressure */
void (*enter_memory_pressure)(struct sock *sk);
void (*leave_memory_pressure)(struct sock *sk);
@@ -1108,6 +1114,8 @@ struct proto {
struct kmem_cache *slab;
unsigned int obj_size;
slab_flags_t slab_flags;
+ unsigned int useroffset; /* Usercopy region offset */
+ unsigned int usersize; /* Usercopy region size */
struct percpu_counter *orphan_count;
@@ -1134,6 +1142,7 @@ struct proto {
int proto_register(struct proto *prot, int alloc_slab);
void proto_unregister(struct proto *prot);
+int sock_load_diag_module(int family, int protocol);
#ifdef SOCK_REFCNT_DEBUG
static inline void sk_refcnt_debug_inc(struct sock *sk)
@@ -1262,6 +1271,7 @@ proto_memory_pressure(struct proto *prot)
/* Called with local bh disabled */
void sock_prot_inuse_add(struct net *net, struct proto *prot, int inc);
int sock_prot_inuse_get(struct net *net, struct proto *proto);
+int sock_inuse_get(struct net *net);
#else
static inline void sock_prot_inuse_add(struct net *net, struct proto *prot,
int inc)
@@ -1445,10 +1455,8 @@ do { \
} while (0)
#ifdef CONFIG_LOCKDEP
-static inline bool lockdep_sock_is_held(const struct sock *csk)
+static inline bool lockdep_sock_is_held(const struct sock *sk)
{
- struct sock *sk = (struct sock *)csk;
-
return lockdep_is_held(&sk->sk_lock) ||
lockdep_is_held(&sk->sk_lock.slock);
}
@@ -1582,8 +1590,8 @@ int sock_no_bind(struct socket *, struct sockaddr *, int);
int sock_no_connect(struct socket *, struct sockaddr *, int, int);
int sock_no_socketpair(struct socket *, struct socket *);
int sock_no_accept(struct socket *, struct socket *, int, bool);
-int sock_no_getname(struct socket *, struct sockaddr *, int *, int);
-unsigned int sock_no_poll(struct file *, struct socket *,
+int sock_no_getname(struct socket *, struct sockaddr *, int);
+__poll_t sock_no_poll(struct file *, struct socket *,
struct poll_table_struct *);
int sock_no_ioctl(struct socket *, unsigned int, unsigned long);
int sock_no_listen(struct socket *, int);
@@ -1860,15 +1868,6 @@ static inline void sk_nocaps_add(struct sock *sk, netdev_features_t flags)
sk->sk_route_caps &= ~flags;
}
-static inline bool sk_check_csum_caps(struct sock *sk)
-{
- return (sk->sk_route_caps & NETIF_F_HW_CSUM) ||
- (sk->sk_family == PF_INET &&
- (sk->sk_route_caps & NETIF_F_IP_CSUM)) ||
- (sk->sk_family == PF_INET6 &&
- (sk->sk_route_caps & NETIF_F_IPV6_CSUM));
-}
-
static inline int skb_do_copy_data_nocache(struct sock *sk, struct sk_buff *skb,
struct iov_iter *from, char *to,
int copy, int offset)
@@ -2147,6 +2146,10 @@ static inline struct page_frag *sk_page_frag(struct sock *sk)
bool sk_page_frag_refill(struct sock *sk, struct page_frag *pfrag);
+int sk_alloc_sg(struct sock *sk, int len, struct scatterlist *sg,
+ int sg_start, int *sg_curr, unsigned int *sg_size,
+ int first_coalesce);
+
/*
* Default write policy as shown to user space via poll/select/SIGIO
*/
@@ -2337,31 +2340,6 @@ static inline bool sk_listener(const struct sock *sk)
return (1 << sk->sk_state) & (TCPF_LISTEN | TCPF_NEW_SYN_RECV);
}
-/**
- * sk_state_load - read sk->sk_state for lockless contexts
- * @sk: socket pointer
- *
- * Paired with sk_state_store(). Used in places we do not hold socket lock :
- * tcp_diag_get_info(), tcp_get_info(), tcp_poll(), get_tcp4_sock() ...
- */
-static inline int sk_state_load(const struct sock *sk)
-{
- return smp_load_acquire(&sk->sk_state);
-}
-
-/**
- * sk_state_store - update sk->sk_state
- * @sk: socket pointer
- * @newstate: new state
- *
- * Paired with sk_state_load(). Should be used in contexts where
- * state change might impact lockless readers.
- */
-static inline void sk_state_store(struct sock *sk, int newstate)
-{
- smp_store_release(&sk->sk_state, newstate);
-}
-
void sock_enable_timestamp(struct sock *sk, int flag);
int sock_get_timestamp(struct sock *, struct timeval __user *);
int sock_get_timestampns(struct sock *, struct timespec __user *);
@@ -2412,4 +2390,34 @@ static inline int sk_get_rmem0(const struct sock *sk, const struct proto *proto)
return *proto->sysctl_rmem;
}
+/* Default TCP Small queue budget is ~1 ms of data (1sec >> 10)
+ * Some wifi drivers need to tweak it to get more chunks.
+ * They can use this helper from their ndo_start_xmit()
+ */
+static inline void sk_pacing_shift_update(struct sock *sk, int val)
+{
+ if (!sk || !sk_fullsock(sk) || sk->sk_pacing_shift == val)
+ return;
+ sk->sk_pacing_shift = val;
+}
+
+/* if a socket is bound to a device, check that the given device
+ * index is either the same or that the socket is bound to an L3
+ * master device and the given device index is also enslaved to
+ * that L3 master
+ */
+static inline bool sk_dev_equal_l3scope(struct sock *sk, int dif)
+{
+ int mdif;
+
+ if (!sk->sk_bound_dev_if || sk->sk_bound_dev_if == dif)
+ return true;
+
+ mdif = l3mdev_master_ifindex_by_index(sock_net(sk), dif);
+ if (mdif && mdif == sk->sk_bound_dev_if)
+ return true;
+
+ return false;
+}
+
#endif /* _SOCK_H */
diff --git a/include/net/tc_act/tc_csum.h b/include/net/tc_act/tc_csum.h
index 781f3433a0be..9470fd7e4350 100644
--- a/include/net/tc_act/tc_csum.h
+++ b/include/net/tc_act/tc_csum.h
@@ -6,10 +6,16 @@
#include <net/act_api.h>
#include <linux/tc_act/tc_csum.h>
+struct tcf_csum_params {
+ int action;
+ u32 update_flags;
+ struct rcu_head rcu;
+};
+
struct tcf_csum {
struct tc_action common;
- u32 update_flags;
+ struct tcf_csum_params __rcu *params;
};
#define to_tcf_csum(a) ((struct tcf_csum *)a)
@@ -24,7 +30,13 @@ static inline bool is_tcf_csum(const struct tc_action *a)
static inline u32 tcf_csum_update_flags(const struct tc_action *a)
{
- return to_tcf_csum(a)->update_flags;
+ u32 update_flags;
+
+ rcu_read_lock();
+ update_flags = rcu_dereference(to_tcf_csum(a)->params)->update_flags;
+ rcu_read_unlock();
+
+ return update_flags;
}
#endif /* __NET_TC_CSUM_H */
diff --git a/include/net/tc_act/tc_mirred.h b/include/net/tc_act/tc_mirred.h
index 21d253c9a8c6..a2e9cbca5c9e 100644
--- a/include/net/tc_act/tc_mirred.h
+++ b/include/net/tc_act/tc_mirred.h
@@ -8,10 +8,8 @@
struct tcf_mirred {
struct tc_action common;
int tcfm_eaction;
- int tcfm_ifindex;
bool tcfm_mac_header_xmit;
struct net_device __rcu *tcfm_dev;
- struct net *net;
struct list_head tcfm_list;
};
#define to_mirred(a) ((struct tcf_mirred *)a)
@@ -34,9 +32,9 @@ static inline bool is_tcf_mirred_egress_mirror(const struct tc_action *a)
return false;
}
-static inline int tcf_mirred_ifindex(const struct tc_action *a)
+static inline struct net_device *tcf_mirred_dev(const struct tc_action *a)
{
- return to_mirred(a)->tcfm_ifindex;
+ return rtnl_dereference(to_mirred(a)->tcfm_dev);
}
#endif /* __NET_TC_MIR_H */
diff --git a/include/net/tcp.h b/include/net/tcp.h
index 6da880d2f022..9c9b3768b350 100644
--- a/include/net/tcp.h
+++ b/include/net/tcp.h
@@ -374,7 +374,8 @@ enum tcp_tw_status tcp_timewait_state_process(struct inet_timewait_sock *tw,
struct sk_buff *skb,
const struct tcphdr *th);
struct sock *tcp_check_req(struct sock *sk, struct sk_buff *skb,
- struct request_sock *req, bool fastopen);
+ struct request_sock *req, bool fastopen,
+ bool *lost_race);
int tcp_child_process(struct sock *parent, struct sock *child,
struct sk_buff *skb);
void tcp_enter_loss(struct sock *sk);
@@ -387,7 +388,7 @@ bool tcp_peer_is_proven(struct request_sock *req, struct dst_entry *dst);
void tcp_close(struct sock *sk, long timeout);
void tcp_init_sock(struct sock *sk);
void tcp_init_transfer(struct sock *sk, int bpf_op);
-unsigned int tcp_poll(struct file *file, struct socket *sock,
+__poll_t tcp_poll(struct file *file, struct socket *sock,
struct poll_table_struct *wait);
int tcp_getsockopt(struct sock *sk, int level, int optname,
char __user *optval, int __user *optlen);
@@ -510,8 +511,6 @@ __u32 cookie_v6_init_sequence(const struct sk_buff *skb, __u16 *mss);
#endif
/* tcp_output.c */
-u32 tcp_tso_autosize(const struct sock *sk, unsigned int mss_now,
- int min_tso_segs);
void __tcp_push_pending_frames(struct sock *sk, unsigned int cur_mss,
int nonagle);
int __tcp_retransmit_skb(struct sock *sk, struct sk_buff *skb, int segs);
@@ -953,6 +952,7 @@ struct rate_sample {
u32 prior_in_flight; /* in flight before this ACK */
bool is_app_limited; /* is sample from packet with bubble in pipe? */
bool is_retrans; /* is sample from retransmission? */
+ bool is_ack_delayed; /* is this (likely) a delayed ACK? */
};
struct tcp_congestion_ops {
@@ -979,8 +979,8 @@ struct tcp_congestion_ops {
u32 (*undo_cwnd)(struct sock *sk);
/* hook for packet ack accounting (optional) */
void (*pkts_acked)(struct sock *sk, const struct ack_sample *sample);
- /* suggest number of segments for each skb to transmit (optional) */
- u32 (*tso_segs_goal)(struct sock *sk);
+ /* override sysctl_tcp_min_tso_segs */
+ u32 (*min_tso_segs)(struct sock *sk);
/* returns the multiplier used in tcp_sndbuf_expand (optional) */
u32 (*sndbuf_expand)(struct sock *sk);
/* call when packets are delivered to update cwnd and pacing rate,
@@ -1507,8 +1507,7 @@ int tcp_md5_hash_key(struct tcp_md5sig_pool *hp,
/* From tcp_fastopen.c */
void tcp_fastopen_cache_get(struct sock *sk, u16 *mss,
- struct tcp_fastopen_cookie *cookie, int *syn_loss,
- unsigned long *last_syn_loss);
+ struct tcp_fastopen_cookie *cookie);
void tcp_fastopen_cache_set(struct sock *sk, u16 mss,
struct tcp_fastopen_cookie *cookie, bool syn_lost,
u16 try_exp);
@@ -1546,7 +1545,7 @@ extern unsigned int sysctl_tcp_fastopen_blackhole_timeout;
void tcp_fastopen_active_disable(struct sock *sk);
bool tcp_fastopen_active_should_disable(struct sock *sk);
void tcp_fastopen_active_disable_ofo_check(struct sock *sk);
-void tcp_fastopen_active_timeout_reset(void);
+void tcp_fastopen_active_detect_blackhole(struct sock *sk, bool expired);
/* Latencies incurred by various limits for a sender. They are
* chronograph-like stats that are mutually exclusive.
@@ -1983,6 +1982,11 @@ enum hrtimer_restart tcp_pace_kick(struct hrtimer *timer);
#define TCP_ULP_MAX 128
#define TCP_ULP_BUF_MAX (TCP_ULP_NAME_MAX*TCP_ULP_MAX)
+enum {
+ TCP_ULP_TLS,
+ TCP_ULP_BPF,
+};
+
struct tcp_ulp_ops {
struct list_head list;
@@ -1991,12 +1995,15 @@ struct tcp_ulp_ops {
/* cleanup ulp */
void (*release)(struct sock *sk);
+ int uid;
char name[TCP_ULP_NAME_MAX];
+ bool user_visible;
struct module *owner;
};
int tcp_register_ulp(struct tcp_ulp_ops *type);
void tcp_unregister_ulp(struct tcp_ulp_ops *type);
int tcp_set_ulp(struct sock *sk, const char *name);
+int tcp_set_ulp_id(struct sock *sk, const int ulp);
void tcp_get_available_ulp(char *buf, size_t len);
void tcp_cleanup_ulp(struct sock *sk);
@@ -2006,17 +2013,21 @@ void tcp_cleanup_ulp(struct sock *sk);
* program loaded).
*/
#ifdef CONFIG_BPF
-static inline int tcp_call_bpf(struct sock *sk, int op)
+static inline int tcp_call_bpf(struct sock *sk, int op, u32 nargs, u32 *args)
{
struct bpf_sock_ops_kern sock_ops;
int ret;
- if (sk_fullsock(sk))
+ memset(&sock_ops, 0, offsetof(struct bpf_sock_ops_kern, temp));
+ if (sk_fullsock(sk)) {
+ sock_ops.is_fullsock = 1;
sock_owned_by_me(sk);
+ }
- memset(&sock_ops, 0, sizeof(sock_ops));
sock_ops.sk = sk;
sock_ops.op = op;
+ if (nargs > 0)
+ memcpy(sock_ops.args, args, nargs * sizeof(*args));
ret = BPF_CGROUP_RUN_PROG_SOCK_OPS(&sock_ops);
if (ret == 0)
@@ -2025,18 +2036,46 @@ static inline int tcp_call_bpf(struct sock *sk, int op)
ret = -1;
return ret;
}
+
+static inline int tcp_call_bpf_2arg(struct sock *sk, int op, u32 arg1, u32 arg2)
+{
+ u32 args[2] = {arg1, arg2};
+
+ return tcp_call_bpf(sk, op, 2, args);
+}
+
+static inline int tcp_call_bpf_3arg(struct sock *sk, int op, u32 arg1, u32 arg2,
+ u32 arg3)
+{
+ u32 args[3] = {arg1, arg2, arg3};
+
+ return tcp_call_bpf(sk, op, 3, args);
+}
+
#else
-static inline int tcp_call_bpf(struct sock *sk, int op)
+static inline int tcp_call_bpf(struct sock *sk, int op, u32 nargs, u32 *args)
+{
+ return -EPERM;
+}
+
+static inline int tcp_call_bpf_2arg(struct sock *sk, int op, u32 arg1, u32 arg2)
{
return -EPERM;
}
+
+static inline int tcp_call_bpf_3arg(struct sock *sk, int op, u32 arg1, u32 arg2,
+ u32 arg3)
+{
+ return -EPERM;
+}
+
#endif
static inline u32 tcp_timeout_init(struct sock *sk)
{
int timeout;
- timeout = tcp_call_bpf(sk, BPF_SOCK_OPS_TIMEOUT_INIT);
+ timeout = tcp_call_bpf(sk, BPF_SOCK_OPS_TIMEOUT_INIT, 0, NULL);
if (timeout <= 0)
timeout = TCP_TIMEOUT_INIT;
@@ -2047,7 +2086,7 @@ static inline u32 tcp_rwnd_init_bpf(struct sock *sk)
{
int rwnd;
- rwnd = tcp_call_bpf(sk, BPF_SOCK_OPS_RWND_INIT);
+ rwnd = tcp_call_bpf(sk, BPF_SOCK_OPS_RWND_INIT, 0, NULL);
if (rwnd < 0)
rwnd = 0;
@@ -2056,7 +2095,7 @@ static inline u32 tcp_rwnd_init_bpf(struct sock *sk)
static inline bool tcp_bpf_ca_needs_ecn(struct sock *sk)
{
- return (tcp_call_bpf(sk, BPF_SOCK_OPS_NEEDS_ECN) == 1);
+ return (tcp_call_bpf(sk, BPF_SOCK_OPS_NEEDS_ECN, 0, NULL) == 1);
}
#if IS_ENABLED(CONFIG_SMC)
diff --git a/include/net/tcp_states.h b/include/net/tcp_states.h
index 50e78a74d0df..2875e169d744 100644
--- a/include/net/tcp_states.h
+++ b/include/net/tcp_states.h
@@ -32,21 +32,21 @@ enum {
#define TCP_STATE_MASK 0xF
-#define TCP_ACTION_FIN (1 << 7)
+#define TCP_ACTION_FIN (1 << TCP_CLOSE)
enum {
- TCPF_ESTABLISHED = (1 << 1),
- TCPF_SYN_SENT = (1 << 2),
- TCPF_SYN_RECV = (1 << 3),
- TCPF_FIN_WAIT1 = (1 << 4),
- TCPF_FIN_WAIT2 = (1 << 5),
- TCPF_TIME_WAIT = (1 << 6),
- TCPF_CLOSE = (1 << 7),
- TCPF_CLOSE_WAIT = (1 << 8),
- TCPF_LAST_ACK = (1 << 9),
- TCPF_LISTEN = (1 << 10),
- TCPF_CLOSING = (1 << 11),
- TCPF_NEW_SYN_RECV = (1 << 12),
+ TCPF_ESTABLISHED = (1 << TCP_ESTABLISHED),
+ TCPF_SYN_SENT = (1 << TCP_SYN_SENT),
+ TCPF_SYN_RECV = (1 << TCP_SYN_RECV),
+ TCPF_FIN_WAIT1 = (1 << TCP_FIN_WAIT1),
+ TCPF_FIN_WAIT2 = (1 << TCP_FIN_WAIT2),
+ TCPF_TIME_WAIT = (1 << TCP_TIME_WAIT),
+ TCPF_CLOSE = (1 << TCP_CLOSE),
+ TCPF_CLOSE_WAIT = (1 << TCP_CLOSE_WAIT),
+ TCPF_LAST_ACK = (1 << TCP_LAST_ACK),
+ TCPF_LISTEN = (1 << TCP_LISTEN),
+ TCPF_CLOSING = (1 << TCP_CLOSING),
+ TCPF_NEW_SYN_RECV = (1 << TCP_NEW_SYN_RECV),
};
#endif /* _LINUX_TCP_STATES_H */
diff --git a/include/net/tls.h b/include/net/tls.h
index 9185e53a743c..3da8e13a6d96 100644
--- a/include/net/tls.h
+++ b/include/net/tls.h
@@ -36,9 +36,11 @@
#include <linux/types.h>
#include <asm/byteorder.h>
+#include <linux/crypto.h>
#include <linux/socket.h>
#include <linux/tcp.h>
#include <net/tcp.h>
+#include <net/strparser.h>
#include <uapi/linux/tls.h>
@@ -54,9 +56,46 @@
#define TLS_RECORD_TYPE_DATA 0x17
#define TLS_AAD_SPACE_SIZE 13
+#define TLS_DEVICE_NAME_MAX 32
+
+/*
+ * This structure defines the routines for Inline TLS driver.
+ * The following routines are optional and filled with a
+ * null pointer if not defined.
+ *
+ * @name: Its the name of registered Inline tls device
+ * @dev_list: Inline tls device list
+ * int (*feature)(struct tls_device *device);
+ * Called to return Inline TLS driver capability
+ *
+ * int (*hash)(struct tls_device *device, struct sock *sk);
+ * This function sets Inline driver for listen and program
+ * device specific functioanlity as required
+ *
+ * void (*unhash)(struct tls_device *device, struct sock *sk);
+ * This function cleans listen state set by Inline TLS driver
+ */
+struct tls_device {
+ char name[TLS_DEVICE_NAME_MAX];
+ struct list_head dev_list;
+ int (*feature)(struct tls_device *device);
+ int (*hash)(struct tls_device *device, struct sock *sk);
+ void (*unhash)(struct tls_device *device, struct sock *sk);
+};
struct tls_sw_context {
struct crypto_aead *aead_send;
+ struct crypto_aead *aead_recv;
+ struct crypto_wait async_wait;
+
+ /* Receive context */
+ struct strparser strp;
+ void (*saved_data_ready)(struct sock *sk);
+ unsigned int (*sk_poll)(struct file *file, struct socket *sock,
+ struct poll_table_struct *wait);
+ struct sk_buff *recv_pkt;
+ u8 control;
+ bool decrypted;
/* Sending context */
char aad_space[TLS_AAD_SPACE_SIZE];
@@ -79,23 +118,32 @@ enum {
TLS_PENDING_CLOSED_RECORD
};
+struct cipher_context {
+ u16 prepend_size;
+ u16 tag_size;
+ u16 overhead_size;
+ u16 iv_size;
+ char *iv;
+ u16 rec_seq_size;
+ char *rec_seq;
+};
+
struct tls_context {
union {
struct tls_crypto_info crypto_send;
struct tls12_crypto_info_aes_gcm_128 crypto_send_aes_gcm_128;
};
+ union {
+ struct tls_crypto_info crypto_recv;
+ struct tls12_crypto_info_aes_gcm_128 crypto_recv_aes_gcm_128;
+ };
void *priv_ctx;
- u8 tx_conf:2;
+ u8 conf:3;
- u16 prepend_size;
- u16 tag_size;
- u16 overhead_size;
- u16 iv_size;
- char *iv;
- u16 rec_seq_size;
- char *rec_seq;
+ struct cipher_context tx;
+ struct cipher_context rx;
struct scatterlist *partially_sent_record;
u16 partially_sent_offset;
@@ -113,6 +161,8 @@ struct tls_context {
int (*getsockopt)(struct sock *sk, int level,
int optname, char __user *optval,
int __user *optlen);
+ int (*hash)(struct sock *sk);
+ void (*unhash)(struct sock *sk);
};
int wait_on_pending_writer(struct sock *sk, long *timeo);
@@ -122,12 +172,19 @@ int tls_sk_attach(struct sock *sk, int optname, char __user *optval,
unsigned int optlen);
-int tls_set_sw_offload(struct sock *sk, struct tls_context *ctx);
+int tls_set_sw_offload(struct sock *sk, struct tls_context *ctx, int tx);
int tls_sw_sendmsg(struct sock *sk, struct msghdr *msg, size_t size);
int tls_sw_sendpage(struct sock *sk, struct page *page,
int offset, size_t size, int flags);
void tls_sw_close(struct sock *sk, long timeout);
-void tls_sw_free_tx_resources(struct sock *sk);
+void tls_sw_free_resources(struct sock *sk);
+int tls_sw_recvmsg(struct sock *sk, struct msghdr *msg, size_t len,
+ int nonblock, int flags, int *addr_len);
+unsigned int tls_sw_poll(struct file *file, struct socket *sock,
+ struct poll_table_struct *wait);
+ssize_t tls_sw_splice_read(struct socket *sock, loff_t *ppos,
+ struct pipe_inode_info *pipe,
+ size_t len, unsigned int flags);
void tls_sk_destruct(struct sock *sk, struct tls_context *ctx);
void tls_icsk_clean_acked(struct sock *sk);
@@ -168,9 +225,9 @@ static inline bool tls_is_pending_open_record(struct tls_context *tls_ctx)
return tls_ctx->pending_open_record_frags;
}
-static inline void tls_err_abort(struct sock *sk)
+static inline void tls_err_abort(struct sock *sk, int err)
{
- sk->sk_err = EBADMSG;
+ sk->sk_err = err;
sk->sk_error_report(sk);
}
@@ -188,10 +245,10 @@ static inline bool tls_bigint_increment(unsigned char *seq, int len)
}
static inline void tls_advance_record_sn(struct sock *sk,
- struct tls_context *ctx)
+ struct cipher_context *ctx)
{
if (tls_bigint_increment(ctx->rec_seq, ctx->rec_seq_size))
- tls_err_abort(sk);
+ tls_err_abort(sk, EBADMSG);
tls_bigint_increment(ctx->iv + TLS_CIPHER_AES_GCM_128_SALT_SIZE,
ctx->iv_size);
}
@@ -201,9 +258,9 @@ static inline void tls_fill_prepend(struct tls_context *ctx,
size_t plaintext_len,
unsigned char record_type)
{
- size_t pkt_len, iv_size = ctx->iv_size;
+ size_t pkt_len, iv_size = ctx->tx.iv_size;
- pkt_len = plaintext_len + iv_size + ctx->tag_size;
+ pkt_len = plaintext_len + iv_size + ctx->tx.tag_size;
/* we cover nonce explicit here as well, so buf should be of
* size KTLS_DTLS_HEADER_SIZE + KTLS_DTLS_NONCE_EXPLICIT_SIZE
@@ -215,7 +272,7 @@ static inline void tls_fill_prepend(struct tls_context *ctx,
buf[3] = pkt_len >> 8;
buf[4] = pkt_len & 0xFF;
memcpy(buf + TLS_NONCE_OFFSET,
- ctx->iv + TLS_CIPHER_AES_GCM_128_SALT_SIZE, iv_size);
+ ctx->tx.iv + TLS_CIPHER_AES_GCM_128_SALT_SIZE, iv_size);
}
static inline void tls_make_aad(char *buf,
@@ -254,5 +311,7 @@ static inline struct tls_offload_context *tls_offload_ctx(
int tls_proccess_cmsg(struct sock *sk, struct msghdr *msg,
unsigned char *record_type);
+void tls_register_device(struct tls_device *device);
+void tls_unregister_device(struct tls_device *device);
#endif /* _TLS_OFFLOAD_H */
diff --git a/include/net/udp.h b/include/net/udp.h
index 6c759c8594e2..0676b272f6ac 100644
--- a/include/net/udp.h
+++ b/include/net/udp.h
@@ -273,9 +273,10 @@ void udp4_hwcsum(struct sk_buff *skb, __be32 src, __be32 dst);
int udp_rcv(struct sk_buff *skb);
int udp_ioctl(struct sock *sk, int cmd, unsigned long arg);
int udp_init_sock(struct sock *sk);
+int udp_pre_connect(struct sock *sk, struct sockaddr *uaddr, int addr_len);
int __udp_disconnect(struct sock *sk, int flags);
int udp_disconnect(struct sock *sk, int flags);
-unsigned int udp_poll(struct file *file, struct socket *sock, poll_table *wait);
+__poll_t udp_poll(struct file *file, struct socket *sock, poll_table *wait);
struct sk_buff *skb_udp_tunnel_segment(struct sk_buff *skb,
netdev_features_t features,
bool is_ipv6);
diff --git a/include/net/udplite.h b/include/net/udplite.h
index 81bdbf97319b..9185e45b997f 100644
--- a/include/net/udplite.h
+++ b/include/net/udplite.h
@@ -64,6 +64,7 @@ static inline int udplite_checksum_init(struct sk_buff *skb, struct udphdr *uh)
UDP_SKB_CB(skb)->cscov = cscov;
if (skb->ip_summed == CHECKSUM_COMPLETE)
skb->ip_summed = CHECKSUM_NONE;
+ skb->csum_valid = 0;
}
return 0;
diff --git a/include/net/vxlan.h b/include/net/vxlan.h
index f96391e84a8a..ad73d8b3fcc2 100644
--- a/include/net/vxlan.h
+++ b/include/net/vxlan.h
@@ -301,7 +301,7 @@ static inline netdev_features_t vxlan_features_check(struct sk_buff *skb,
l4_hdr = ipv6_hdr(skb)->nexthdr;
break;
default:
- return features;;
+ return features;
}
if ((l4_hdr == IPPROTO_UDP) &&
diff --git a/include/net/wext.h b/include/net/wext.h
index e51f067fdb3a..aa192a670304 100644
--- a/include/net/wext.h
+++ b/include/net/wext.h
@@ -7,7 +7,7 @@
struct net;
#ifdef CONFIG_WEXT_CORE
-int wext_handle_ioctl(struct net *net, struct iwreq *iwr, unsigned int cmd,
+int wext_handle_ioctl(struct net *net, unsigned int cmd,
void __user *arg);
int compat_wext_handle_ioctl(struct net *net, unsigned int cmd,
unsigned long arg);
@@ -15,7 +15,7 @@ int compat_wext_handle_ioctl(struct net *net, unsigned int cmd,
struct iw_statistics *get_wireless_stats(struct net_device *dev);
int call_commit_handler(struct net_device *dev);
#else
-static inline int wext_handle_ioctl(struct net *net, struct iwreq *iwr, unsigned int cmd,
+static inline int wext_handle_ioctl(struct net *net, unsigned int cmd,
void __user *arg)
{
return -EINVAL;
diff --git a/include/net/xdp.h b/include/net/xdp.h
new file mode 100644
index 000000000000..b2362ddfa694
--- /dev/null
+++ b/include/net/xdp.h
@@ -0,0 +1,48 @@
+/* include/net/xdp.h
+ *
+ * Copyright (c) 2017 Jesper Dangaard Brouer, Red Hat Inc.
+ * Released under terms in GPL version 2. See COPYING.
+ */
+#ifndef __LINUX_NET_XDP_H__
+#define __LINUX_NET_XDP_H__
+
+/**
+ * DOC: XDP RX-queue information
+ *
+ * The XDP RX-queue info (xdp_rxq_info) is associated with the driver
+ * level RX-ring queues. It is information that is specific to how
+ * the driver have configured a given RX-ring queue.
+ *
+ * Each xdp_buff frame received in the driver carry a (pointer)
+ * reference to this xdp_rxq_info structure. This provides the XDP
+ * data-path read-access to RX-info for both kernel and bpf-side
+ * (limited subset).
+ *
+ * For now, direct access is only safe while running in NAPI/softirq
+ * context. Contents is read-mostly and must not be updated during
+ * driver NAPI/softirq poll.
+ *
+ * The driver usage API is a register and unregister API.
+ *
+ * The struct is not directly tied to the XDP prog. A new XDP prog
+ * can be attached as long as it doesn't change the underlying
+ * RX-ring. If the RX-ring does change significantly, the NIC driver
+ * naturally need to stop the RX-ring before purging and reallocating
+ * memory. In that process the driver MUST call unregistor (which
+ * also apply for driver shutdown and unload). The register API is
+ * also mandatory during RX-ring setup.
+ */
+
+struct xdp_rxq_info {
+ struct net_device *dev;
+ u32 queue_index;
+ u32 reg_state;
+} ____cacheline_aligned; /* perf critical, avoid false-sharing */
+
+int xdp_rxq_info_reg(struct xdp_rxq_info *xdp_rxq,
+ struct net_device *dev, u32 queue_index);
+void xdp_rxq_info_unreg(struct xdp_rxq_info *xdp_rxq);
+void xdp_rxq_info_unused(struct xdp_rxq_info *xdp_rxq);
+bool xdp_rxq_info_is_reg(struct xdp_rxq_info *xdp_rxq);
+
+#endif /* __LINUX_NET_XDP_H__ */
diff --git a/include/net/xfrm.h b/include/net/xfrm.h
index ae35991b5877..a872379b69da 100644
--- a/include/net/xfrm.h
+++ b/include/net/xfrm.h
@@ -968,7 +968,7 @@ static inline bool xfrm_sec_ctx_match(struct xfrm_sec_ctx *s1, struct xfrm_sec_c
/* A struct encoding bundle of transformations to apply to some set of flow.
*
- * dst->child points to the next element of bundle.
+ * xdst->child points to the next element of bundle.
* dst->xfrm points to an instanse of transformer.
*
* Due to unfortunate limitations of current routing cache, which we
@@ -984,6 +984,8 @@ struct xfrm_dst {
struct rt6_info rt6;
} u;
struct dst_entry *route;
+ struct dst_entry *child;
+ struct dst_entry *path;
struct xfrm_policy *pols[XFRM_POLICY_TYPE_MAX];
int num_pols, num_xfrms;
u32 xfrm_genid;
@@ -994,7 +996,35 @@ struct xfrm_dst {
u32 path_cookie;
};
+static inline struct dst_entry *xfrm_dst_path(const struct dst_entry *dst)
+{
+#ifdef CONFIG_XFRM
+ if (dst->xfrm) {
+ const struct xfrm_dst *xdst = (const struct xfrm_dst *) dst;
+
+ return xdst->path;
+ }
+#endif
+ return (struct dst_entry *) dst;
+}
+
+static inline struct dst_entry *xfrm_dst_child(const struct dst_entry *dst)
+{
+#ifdef CONFIG_XFRM
+ if (dst->xfrm) {
+ struct xfrm_dst *xdst = (struct xfrm_dst *) dst;
+ return xdst->child;
+ }
+#endif
+ return NULL;
+}
+
#ifdef CONFIG_XFRM
+static inline void xfrm_dst_set_child(struct xfrm_dst *xdst, struct dst_entry *child)
+{
+ xdst->child = child;
+}
+
static inline void xfrm_dst_destroy(struct xfrm_dst *xdst)
{
xfrm_pols_put(xdst->pols, xdst->num_pols);
@@ -1021,6 +1051,7 @@ struct xfrm_offload {
#define XFRM_GSO_SEGMENT 16
#define XFRM_GRO 32
#define XFRM_ESP_NO_TRAILER 64
+#define XFRM_DEV_RESUME 128
__u32 status;
#define CRYPTO_SUCCESS 1
@@ -1236,12 +1267,12 @@ static inline void xfrm_sk_free_policy(struct sock *sk)
static inline void xfrm_sk_free_policy(struct sock *sk) {}
static inline int xfrm_sk_clone_policy(struct sock *sk, const struct sock *osk) { return 0; }
-static inline int xfrm6_route_forward(struct sk_buff *skb) { return 1; }
-static inline int xfrm4_route_forward(struct sk_buff *skb) { return 1; }
+static inline int xfrm6_route_forward(struct sk_buff *skb) { return 1; }
+static inline int xfrm4_route_forward(struct sk_buff *skb) { return 1; }
static inline int xfrm6_policy_check(struct sock *sk, int dir, struct sk_buff *skb)
-{
- return 1;
-}
+{
+ return 1;
+}
static inline int xfrm4_policy_check(struct sock *sk, int dir, struct sk_buff *skb)
{
return 1;
@@ -1325,7 +1356,7 @@ __xfrm6_state_addr_check(const struct xfrm_state *x,
{
if (ipv6_addr_equal((struct in6_addr *)daddr, (struct in6_addr *)&x->id.daddr) &&
(ipv6_addr_equal((struct in6_addr *)saddr, (struct in6_addr *)&x->props.saddr) ||
- ipv6_addr_any((struct in6_addr *)saddr) ||
+ ipv6_addr_any((struct in6_addr *)saddr) ||
ipv6_addr_any((struct in6_addr *)&x->props.saddr)))
return 1;
return 0;
@@ -1635,7 +1666,7 @@ int xfrm_user_policy(struct sock *sk, int optname,
static inline int xfrm_user_policy(struct sock *sk, int optname, u8 __user *optval, int optlen)
{
return -ENOPROTOOPT;
-}
+}
static inline int xfrm4_udp_encap_rcv(struct sock *sk, struct sk_buff *skb)
{
@@ -1847,34 +1878,53 @@ static inline struct xfrm_state *xfrm_input_state(struct sk_buff *skb)
{
return skb->sp->xvec[skb->sp->len - 1];
}
+#endif
+
static inline struct xfrm_offload *xfrm_offload(struct sk_buff *skb)
{
+#ifdef CONFIG_XFRM
struct sec_path *sp = skb->sp;
if (!sp || !sp->olen || sp->len != sp->olen)
return NULL;
return &sp->ovec[sp->olen - 1];
-}
+#else
+ return NULL;
#endif
+}
-void __net_init xfrm_dev_init(void);
+void __init xfrm_dev_init(void);
#ifdef CONFIG_XFRM_OFFLOAD
-int validate_xmit_xfrm(struct sk_buff *skb, netdev_features_t features);
+void xfrm_dev_resume(struct sk_buff *skb);
+void xfrm_dev_backlog(struct softnet_data *sd);
+struct sk_buff *validate_xmit_xfrm(struct sk_buff *skb, netdev_features_t features, bool *again);
int xfrm_dev_state_add(struct net *net, struct xfrm_state *x,
struct xfrm_user_offload *xuo);
bool xfrm_dev_offload_ok(struct sk_buff *skb, struct xfrm_state *x);
+static inline void xfrm_dev_state_advance_esn(struct xfrm_state *x)
+{
+ struct xfrm_state_offload *xso = &x->xso;
+
+ if (xso->dev && xso->dev->xfrmdev_ops->xdo_dev_state_advance_esn)
+ xso->dev->xfrmdev_ops->xdo_dev_state_advance_esn(x);
+}
+
static inline bool xfrm_dst_offload_ok(struct dst_entry *dst)
{
struct xfrm_state *x = dst->xfrm;
+ struct xfrm_dst *xdst;
if (!x || !x->type_offload)
return false;
- if (x->xso.offload_handle && (x->xso.dev == dst->path->dev) &&
- !dst->child->xfrm)
+ xdst = (struct xfrm_dst *) dst;
+ if (!x->xso.offload_handle && !xdst->child->xfrm)
+ return true;
+ if (x->xso.offload_handle && (x->xso.dev == xfrm_dst_path(dst)->dev) &&
+ !xdst->child->xfrm)
return true;
return false;
@@ -1894,15 +1944,24 @@ static inline void xfrm_dev_state_free(struct xfrm_state *x)
struct net_device *dev = xso->dev;
if (dev && dev->xfrmdev_ops) {
- dev->xfrmdev_ops->xdo_dev_state_free(x);
+ if (dev->xfrmdev_ops->xdo_dev_state_free)
+ dev->xfrmdev_ops->xdo_dev_state_free(x);
xso->dev = NULL;
dev_put(dev);
}
}
#else
-static inline int validate_xmit_xfrm(struct sk_buff *skb, netdev_features_t features)
+static inline void xfrm_dev_resume(struct sk_buff *skb)
+{
+}
+
+static inline void xfrm_dev_backlog(struct softnet_data *sd)
{
- return 0;
+}
+
+static inline struct sk_buff *validate_xmit_xfrm(struct sk_buff *skb, netdev_features_t features, bool *again)
+{
+ return skb;
}
static inline int xfrm_dev_state_add(struct net *net, struct xfrm_state *x, struct xfrm_user_offload *xuo)
@@ -1923,6 +1982,10 @@ static inline bool xfrm_dev_offload_ok(struct sk_buff *skb, struct xfrm_state *x
return false;
}
+static inline void xfrm_dev_state_advance_esn(struct xfrm_state *x)
+{
+}
+
static inline bool xfrm_dst_offload_ok(struct dst_entry *dst)
{
return false;
diff --git a/include/rdma/ib_addr.h b/include/rdma/ib_addr.h
index 18c564f60e93..a08cc7278980 100644
--- a/include/rdma/ib_addr.h
+++ b/include/rdma/ib_addr.h
@@ -94,7 +94,7 @@ struct rdma_dev_addr {
* The dev_addr->net field must be initialized.
*/
int rdma_translate_ip(const struct sockaddr *addr,
- struct rdma_dev_addr *dev_addr, u16 *vlan_id);
+ struct rdma_dev_addr *dev_addr);
/**
* rdma_resolve_ip - Resolve source and destination IP addresses to
@@ -119,10 +119,6 @@ int rdma_resolve_ip(struct rdma_addr_client *client,
struct rdma_dev_addr *addr, void *context),
void *context);
-int rdma_resolve_ip_route(struct sockaddr *src_addr,
- const struct sockaddr *dst_addr,
- struct rdma_dev_addr *addr);
-
void rdma_addr_cancel(struct rdma_dev_addr *addr);
void rdma_copy_addr(struct rdma_dev_addr *dev_addr,
@@ -130,12 +126,8 @@ void rdma_copy_addr(struct rdma_dev_addr *dev_addr,
const unsigned char *dst_dev_addr);
int rdma_addr_size(struct sockaddr *addr);
-
-int rdma_addr_find_smac_by_sgid(union ib_gid *sgid, u8 *smac, u16 *vlan_id);
-int rdma_addr_find_l2_eth_by_grh(const union ib_gid *sgid,
- const union ib_gid *dgid,
- u8 *smac, u16 *vlan_id, int *if_index,
- int *hoplimit);
+int rdma_addr_size_in6(struct sockaddr_in6 *addr);
+int rdma_addr_size_kss(struct __kernel_sockaddr_storage *addr);
static inline u16 ib_addr_get_pkey(struct rdma_dev_addr *dev_addr)
{
@@ -198,34 +190,15 @@ static inline void rdma_gid2ip(struct sockaddr *out, const union ib_gid *gid)
}
}
-static inline void iboe_addr_get_sgid(struct rdma_dev_addr *dev_addr,
- union ib_gid *gid)
-{
- struct net_device *dev;
- struct in_device *ip4;
-
- dev = dev_get_by_index(&init_net, dev_addr->bound_dev_if);
- if (dev) {
- ip4 = in_dev_get(dev);
- if (ip4 && ip4->ifa_list && ip4->ifa_list->ifa_address)
- ipv6_addr_set_v4mapped(ip4->ifa_list->ifa_address,
- (struct in6_addr *)gid);
-
- if (ip4)
- in_dev_put(ip4);
-
- dev_put(dev);
- }
-}
-
+/*
+ * rdma_get/set_sgid/dgid() APIs are applicable to IB, and iWarp.
+ * They are not applicable to RoCE.
+ * RoCE GIDs are derived from the IP addresses.
+ */
static inline void rdma_addr_get_sgid(struct rdma_dev_addr *dev_addr, union ib_gid *gid)
{
- if (dev_addr->transport == RDMA_TRANSPORT_IB &&
- dev_addr->dev_type != ARPHRD_INFINIBAND)
- iboe_addr_get_sgid(dev_addr, gid);
- else
- memcpy(gid, dev_addr->src_dev_addr +
- rdma_addr_gid_offset(dev_addr), sizeof *gid);
+ memcpy(gid, dev_addr->src_dev_addr + rdma_addr_gid_offset(dev_addr),
+ sizeof(*gid));
}
static inline void rdma_addr_set_sgid(struct rdma_dev_addr *dev_addr, union ib_gid *gid)
diff --git a/include/rdma/ib_cache.h b/include/rdma/ib_cache.h
index 385ec88ee9e5..eb49cc8d1f95 100644
--- a/include/rdma/ib_cache.h
+++ b/include/rdma/ib_cache.h
@@ -55,20 +55,6 @@ int ib_get_cached_gid(struct ib_device *device,
union ib_gid *gid,
struct ib_gid_attr *attr);
-/**
- * ib_find_cached_gid - Returns the port number and GID table index where
- * a specified GID value occurs.
- * @device: The device to query.
- * @gid: The GID value to search for.
- * @gid_type: The GID type to search for.
- * @ndev: In RoCE, the net device of the device. NULL means ignore.
- * @port_num: The port number of the device where the GID value was found.
- * @index: The index into the cached GID table where the GID was found. This
- * parameter may be NULL.
- *
- * ib_find_cached_gid() searches for the specified GID value in
- * the local software cache.
- */
int ib_find_cached_gid(struct ib_device *device,
const union ib_gid *gid,
enum ib_gid_type gid_type,
@@ -76,21 +62,6 @@ int ib_find_cached_gid(struct ib_device *device,
u8 *port_num,
u16 *index);
-/**
- * ib_find_cached_gid_by_port - Returns the GID table index where a specified
- * GID value occurs
- * @device: The device to query.
- * @gid: The GID value to search for.
- * @gid_type: The GID type to search for.
- * @port_num: The port number of the device where the GID value sould be
- * searched.
- * @ndev: In RoCE, the net device of the device. Null means ignore.
- * @index: The index into the cached GID table where the GID was found. This
- * parameter may be NULL.
- *
- * ib_find_cached_gid() searches for the specified GID value in
- * the local software cache.
- */
int ib_find_cached_gid_by_port(struct ib_device *device,
const union ib_gid *gid,
enum ib_gid_type gid_type,
diff --git a/include/rdma/ib_hdrs.h b/include/rdma/ib_hdrs.h
index c124d515f7d5..6e35416170a3 100644
--- a/include/rdma/ib_hdrs.h
+++ b/include/rdma/ib_hdrs.h
@@ -313,16 +313,14 @@ static inline u32 ib_bth_get_qpn(struct ib_other_headers *ohdr)
return (u32)((be32_to_cpu(ohdr->bth[1])) & IB_QPN_MASK);
}
-static inline u8 ib_bth_get_becn(struct ib_other_headers *ohdr)
+static inline bool ib_bth_get_becn(struct ib_other_headers *ohdr)
{
- return (u8)((be32_to_cpu(ohdr->bth[1]) >> IB_BECN_SHIFT) &
- IB_BECN_MASK);
+ return (ohdr->bth[1]) & cpu_to_be32(IB_BECN_SMASK);
}
-static inline u8 ib_bth_get_fecn(struct ib_other_headers *ohdr)
+static inline bool ib_bth_get_fecn(struct ib_other_headers *ohdr)
{
- return (u8)((be32_to_cpu(ohdr->bth[1]) >> IB_FECN_SHIFT) &
- IB_FECN_MASK);
+ return (ohdr->bth[1]) & cpu_to_be32(IB_FECN_SMASK);
}
static inline u8 ib_bth_get_tver(struct ib_other_headers *ohdr)
@@ -331,4 +329,13 @@ static inline u8 ib_bth_get_tver(struct ib_other_headers *ohdr)
IB_BTH_TVER_MASK);
}
+static inline bool ib_bth_is_solicited(struct ib_other_headers *ohdr)
+{
+ return ohdr->bth[0] & cpu_to_be32(IB_BTH_SOLICITED);
+}
+
+static inline bool ib_bth_is_migration(struct ib_other_headers *ohdr)
+{
+ return ohdr->bth[0] & cpu_to_be32(IB_BTH_MIG_REQ);
+}
#endif /* IB_HDRS_H */
diff --git a/include/rdma/ib_sa.h b/include/rdma/ib_sa.h
index 1f7f604db5aa..bacb144f7780 100644
--- a/include/rdma/ib_sa.h
+++ b/include/rdma/ib_sa.h
@@ -163,7 +163,15 @@ struct sa_path_rec_ib {
u8 raw_traffic;
};
+/**
+ * struct sa_path_rec_roce - RoCE specific portion of the path record entry
+ * @route_resolved: When set, it indicates that this route is already
+ * resolved for this path record entry.
+ * @dmac: Destination mac address for the given DGID entry
+ * of the path record entry.
+ */
struct sa_path_rec_roce {
+ bool route_resolved;
u8 dmac[ETH_ALEN];
/* ignored in IB */
int ifindex;
@@ -549,12 +557,12 @@ int ib_init_ah_from_mcmember(struct ib_device *device, u8 port_num,
struct rdma_ah_attr *ah_attr);
/**
- * ib_init_ah_from_path - Initialize address handle attributes based on an SA
- * path record.
+ * ib_init_ah_attr_from_path - Initialize address handle attributes based on
+ * an SA path record.
*/
-int ib_init_ah_from_path(struct ib_device *device, u8 port_num,
- struct sa_path_rec *rec,
- struct rdma_ah_attr *ah_attr);
+int ib_init_ah_attr_from_path(struct ib_device *device, u8 port_num,
+ struct sa_path_rec *rec,
+ struct rdma_ah_attr *ah_attr);
/**
* ib_sa_pack_path - Conert a path record from struct ib_sa_path_rec
@@ -590,6 +598,11 @@ static inline bool sa_path_is_roce(struct sa_path_rec *rec)
(rec->rec_type == SA_PATH_REC_TYPE_ROCE_V2));
}
+static inline bool sa_path_is_opa(struct sa_path_rec *rec)
+{
+ return (rec->rec_type == SA_PATH_REC_TYPE_OPA);
+}
+
static inline void sa_path_set_slid(struct sa_path_rec *rec, u32 slid)
{
if (rec->rec_type == SA_PATH_REC_TYPE_IB)
diff --git a/include/rdma/ib_verbs.h b/include/rdma/ib_verbs.h
index fd84cda5ed7c..9fc8a825aa28 100644
--- a/include/rdma/ib_verbs.h
+++ b/include/rdma/ib_verbs.h
@@ -63,6 +63,9 @@
#include <linux/uaccess.h>
#include <linux/cgroup_rdma.h>
#include <uapi/rdma/ib_user_verbs.h>
+#include <rdma/restrack.h>
+#include <uapi/rdma/rdma_user_ioctl.h>
+#include <uapi/rdma/ib_user_ioctl_verbs.h>
#define IB_FW_VERSION_NAME_MAX ETHTOOL_FWVERS_LEN
@@ -89,8 +92,11 @@ enum ib_gid_type {
#define ROCE_V2_UDP_DPORT 4791
struct ib_gid_attr {
- enum ib_gid_type gid_type;
struct net_device *ndev;
+ struct ib_device *device;
+ enum ib_gid_type gid_type;
+ u16 index;
+ u8 port_num;
};
enum rdma_node_type {
@@ -300,11 +306,6 @@ struct ib_tm_caps {
u32 max_sge;
};
-enum ib_cq_creation_flags {
- IB_CQ_FLAGS_TIMESTAMP_COMPLETION = 1 << 0,
- IB_CQ_FLAGS_IGNORE_OVERRUN = 1 << 1,
-};
-
struct ib_cq_init_attr {
unsigned int cqe;
int comp_vector;
@@ -320,6 +321,18 @@ struct ib_cq_caps {
u16 max_cq_moderation_period;
};
+struct ib_dm_mr_attr {
+ u64 length;
+ u64 offset;
+ u32 access_flags;
+};
+
+struct ib_dm_alloc_attr {
+ u64 length;
+ u32 alignment;
+ u32 flags;
+};
+
struct ib_device_attr {
u64 fw_ver;
__be64 sys_image_guid;
@@ -371,6 +384,7 @@ struct ib_device_attr {
u32 raw_packet_caps; /* Use ib_raw_packet_caps enum */
struct ib_tm_caps tm_caps;
struct ib_cq_caps cq_caps;
+ u64 max_dm_size;
};
enum ib_mtu {
@@ -473,6 +487,9 @@ enum ib_port_speed {
/**
* struct rdma_hw_stats
+ * @lock - Mutex to protect parallel write access to lifespan and values
+ * of counters, which are 64bits and not guaranteeed to be written
+ * atomicaly on 32bits systems.
* @timestamp - Used by the core code to track when the last update was
* @lifespan - Used by the core code to determine how old the counters
* should be before being updated again. Stored in jiffies, defaults
@@ -488,6 +505,7 @@ enum ib_port_speed {
* filled in by the drivers get_stats routine
*/
struct rdma_hw_stats {
+ struct mutex lock; /* Protect lifespan and values[] */
unsigned long timestamp;
unsigned long lifespan;
const char * const *names;
@@ -878,6 +896,7 @@ struct ib_mr_status {
__attribute_const__ enum ib_rate mult_to_ib_rate(int mult);
enum rdma_ah_attr_type {
+ RDMA_AH_ATTR_TYPE_UNDEFINED,
RDMA_AH_ATTR_TYPE_IB,
RDMA_AH_ATTR_TYPE_ROCE,
RDMA_AH_ATTR_TYPE_OPA,
@@ -983,9 +1002,9 @@ struct ib_wc {
u32 invalidate_rkey;
} ex;
u32 src_qp;
+ u32 slid;
int wc_flags;
u16 pkey_index;
- u32 slid;
u8 sl;
u8 dlid_path_bits;
u8 port_num; /* valid only for DR SMPs on switches */
@@ -1082,6 +1101,7 @@ enum ib_qp_type {
IB_QPT_XRC_INI = 9,
IB_QPT_XRC_TGT,
IB_QPT_MAX,
+ IB_QPT_DRIVER = 0xFF,
/* Reserve a range for qp types internal to the low level driver.
* These qp types will not be visible at the IB core layer, so the
* IB_QPT_MAX usages should not be affected in the core layer
@@ -1529,6 +1549,7 @@ struct ib_pd {
* Implementation details of the RDMA core, don't use in drivers:
*/
struct ib_mr *__internal_mr;
+ struct rdma_restrack_entry res;
};
struct ib_xrcd {
@@ -1569,6 +1590,10 @@ struct ib_cq {
struct irq_poll iop;
struct work_struct work;
};
+ /*
+ * Implementation details of the RDMA core, don't use in drivers:
+ */
+ struct rdma_restrack_entry res;
};
struct ib_srq {
@@ -1745,6 +1770,19 @@ struct ib_qp {
struct ib_rwq_ind_table *rwq_ind_tbl;
struct ib_qp_security *qp_sec;
u8 port;
+
+ /*
+ * Implementation details of the RDMA core, don't use in drivers:
+ */
+ struct rdma_restrack_entry res;
+};
+
+struct ib_dm {
+ struct ib_device *device;
+ u32 length;
+ u32 flags;
+ struct ib_uobject *uobject;
+ atomic_t usecnt;
};
struct ib_mr {
@@ -1760,6 +1798,13 @@ struct ib_mr {
struct ib_uobject *uobject; /* user */
struct list_head qp_entry; /* FR */
};
+
+ struct ib_dm *dm;
+
+ /*
+ * Implementation details of the RDMA core, don't use in drivers:
+ */
+ struct rdma_restrack_entry res;
};
struct ib_mw {
@@ -1802,6 +1847,7 @@ enum ib_flow_spec_type {
/* L3 header*/
IB_FLOW_SPEC_IPV4 = 0x30,
IB_FLOW_SPEC_IPV6 = 0x31,
+ IB_FLOW_SPEC_ESP = 0x34,
/* L4 headers*/
IB_FLOW_SPEC_TCP = 0x40,
IB_FLOW_SPEC_UDP = 0x41,
@@ -1810,6 +1856,7 @@ enum ib_flow_spec_type {
/* Actions */
IB_FLOW_SPEC_ACTION_TAG = 0x1000,
IB_FLOW_SPEC_ACTION_DROP = 0x1001,
+ IB_FLOW_SPEC_ACTION_HANDLE = 0x1002,
};
#define IB_FLOW_SPEC_LAYER_MASK 0xF0
#define IB_FLOW_SPEC_SUPPORT_LAYERS 8
@@ -1827,7 +1874,8 @@ enum ib_flow_domain {
enum ib_flow_flags {
IB_FLOW_ATTR_FLAGS_DONT_TRAP = 1UL << 1, /* Continue match, no steal */
- IB_FLOW_ATTR_FLAGS_RESERVED = 1UL << 2 /* Must be last */
+ IB_FLOW_ATTR_FLAGS_EGRESS = 1UL << 2, /* Egress flow */
+ IB_FLOW_ATTR_FLAGS_RESERVED = 1UL << 3 /* Must be last */
};
struct ib_flow_eth_filter {
@@ -1932,6 +1980,20 @@ struct ib_flow_spec_tunnel {
struct ib_flow_tunnel_filter mask;
};
+struct ib_flow_esp_filter {
+ __be32 spi;
+ __be32 seq;
+ /* Must be last */
+ u8 real_sz[0];
+};
+
+struct ib_flow_spec_esp {
+ u32 type;
+ u16 size;
+ struct ib_flow_esp_filter val;
+ struct ib_flow_esp_filter mask;
+};
+
struct ib_flow_spec_action_tag {
enum ib_flow_spec_type type;
u16 size;
@@ -1943,6 +2005,12 @@ struct ib_flow_spec_action_drop {
u16 size;
};
+struct ib_flow_spec_action_handle {
+ enum ib_flow_spec_type type;
+ u16 size;
+ struct ib_flow_action *act;
+};
+
union ib_flow_spec {
struct {
u32 type;
@@ -1954,8 +2022,10 @@ union ib_flow_spec {
struct ib_flow_spec_tcp_udp tcp_udp;
struct ib_flow_spec_ipv6 ipv6;
struct ib_flow_spec_tunnel tunnel;
+ struct ib_flow_spec_esp esp;
struct ib_flow_spec_action_tag flow_tag;
struct ib_flow_spec_action_drop drop;
+ struct ib_flow_spec_action_handle action;
};
struct ib_flow_attr {
@@ -1976,6 +2046,64 @@ struct ib_flow {
struct ib_uobject *uobject;
};
+enum ib_flow_action_type {
+ IB_FLOW_ACTION_UNSPECIFIED,
+ IB_FLOW_ACTION_ESP = 1,
+};
+
+struct ib_flow_action_attrs_esp_keymats {
+ enum ib_uverbs_flow_action_esp_keymat protocol;
+ union {
+ struct ib_uverbs_flow_action_esp_keymat_aes_gcm aes_gcm;
+ } keymat;
+};
+
+struct ib_flow_action_attrs_esp_replays {
+ enum ib_uverbs_flow_action_esp_replay protocol;
+ union {
+ struct ib_uverbs_flow_action_esp_replay_bmp bmp;
+ } replay;
+};
+
+enum ib_flow_action_attrs_esp_flags {
+ /* All user-space flags at the top: Use enum ib_uverbs_flow_action_esp_flags
+ * This is done in order to share the same flags between user-space and
+ * kernel and spare an unnecessary translation.
+ */
+
+ /* Kernel flags */
+ IB_FLOW_ACTION_ESP_FLAGS_ESN_TRIGGERED = 1ULL << 32,
+ IB_FLOW_ACTION_ESP_FLAGS_MOD_ESP_ATTRS = 1ULL << 33,
+};
+
+struct ib_flow_spec_list {
+ struct ib_flow_spec_list *next;
+ union ib_flow_spec spec;
+};
+
+struct ib_flow_action_attrs_esp {
+ struct ib_flow_action_attrs_esp_keymats *keymat;
+ struct ib_flow_action_attrs_esp_replays *replay;
+ struct ib_flow_spec_list *encap;
+ /* Used only if IB_FLOW_ACTION_ESP_FLAGS_ESN_TRIGGERED is enabled.
+ * Value of 0 is a valid value.
+ */
+ u32 esn;
+ u32 spi;
+ u32 seq;
+ u32 tfc_pad;
+ /* Use enum ib_flow_action_attrs_esp_flags */
+ u64 flags;
+ u64 hard_limit_pkts;
+};
+
+struct ib_flow_action {
+ struct ib_device *device;
+ struct ib_uobject *uobject;
+ enum ib_flow_action_type type;
+ atomic_t usecnt;
+};
+
struct ib_mad_hdr;
struct ib_grh;
@@ -2052,6 +2180,8 @@ struct ib_port_pkey_list {
struct list_head pkey_list;
};
+struct uverbs_attr_bundle;
+
struct ib_device {
/* Do not access @dma_device directly from ULP nor from HW drivers. */
struct device *dma_device;
@@ -2114,42 +2244,41 @@ struct ib_device {
* net device of device @device at port @port_num or NULL if such
* a net device doesn't exist. The vendor driver should call dev_hold
* on this net device. The HW vendor's device driver must guarantee
- * that this function returns NULL before the net device reaches
- * NETDEV_UNREGISTER_FINAL state.
+ * that this function returns NULL before the net device has finished
+ * NETDEV_UNREGISTER state.
*/
struct net_device *(*get_netdev)(struct ib_device *device,
u8 port_num);
+ /* query_gid should be return GID value for @device, when @port_num
+ * link layer is either IB or iWarp. It is no-op if @port_num port
+ * is RoCE link layer.
+ */
int (*query_gid)(struct ib_device *device,
u8 port_num, int index,
union ib_gid *gid);
- /* When calling add_gid, the HW vendor's driver should
- * add the gid of device @device at gid index @index of
- * port @port_num to be @gid. Meta-info of that gid (for example,
- * the network device related to this gid is available
- * at @attr. @context allows the HW vendor driver to store extra
- * information together with a GID entry. The HW vendor may allocate
- * memory to contain this information and store it in @context when a
- * new GID entry is written to. Params are consistent until the next
- * call of add_gid or delete_gid. The function should return 0 on
+ /* When calling add_gid, the HW vendor's driver should add the gid
+ * of device of port at gid index available at @attr. Meta-info of
+ * that gid (for example, the network device related to this gid) is
+ * available at @attr. @context allows the HW vendor driver to store
+ * extra information together with a GID entry. The HW vendor driver may
+ * allocate memory to contain this information and store it in @context
+ * when a new GID entry is written to. Params are consistent until the
+ * next call of add_gid or delete_gid. The function should return 0 on
* success or error otherwise. The function could be called
- * concurrently for different ports. This function is only called
- * when roce_gid_table is used.
+ * concurrently for different ports. This function is only called when
+ * roce_gid_table is used.
*/
- int (*add_gid)(struct ib_device *device,
- u8 port_num,
- unsigned int index,
- const union ib_gid *gid,
+ int (*add_gid)(const union ib_gid *gid,
const struct ib_gid_attr *attr,
void **context);
/* When calling del_gid, the HW vendor's driver should delete the
- * gid of device @device at gid index @index of port @port_num.
+ * gid of device @device at gid index gid_index of port port_num
+ * available in @attr.
* Upon the deletion of a GID entry, the HW vendor must free any
* allocated memory. The caller will clear @context afterwards.
* This function is only called when roce_gid_table is used.
*/
- int (*del_gid)(struct ib_device *device,
- u8 port_num,
- unsigned int index,
+ int (*del_gid)(const struct ib_gid_attr *attr,
void **context);
int (*query_pkey)(struct ib_device *device,
u8 port_num, u16 index, u16 *pkey);
@@ -2307,6 +2436,21 @@ struct ib_device {
struct ib_rwq_ind_table_init_attr *init_attr,
struct ib_udata *udata);
int (*destroy_rwq_ind_table)(struct ib_rwq_ind_table *wq_ind_table);
+ struct ib_flow_action * (*create_flow_action_esp)(struct ib_device *device,
+ const struct ib_flow_action_attrs_esp *attr,
+ struct uverbs_attr_bundle *attrs);
+ int (*destroy_flow_action)(struct ib_flow_action *action);
+ int (*modify_flow_action_esp)(struct ib_flow_action *action,
+ const struct ib_flow_action_attrs_esp *attr,
+ struct uverbs_attr_bundle *attrs);
+ struct ib_dm * (*alloc_dm)(struct ib_device *device,
+ struct ib_ucontext *context,
+ struct ib_dm_alloc_attr *attr,
+ struct uverbs_attr_bundle *attrs);
+ int (*dealloc_dm)(struct ib_dm *dm);
+ struct ib_mr * (*reg_dm_mr)(struct ib_pd *pd, struct ib_dm *dm,
+ struct ib_dm_mr_attr *attr,
+ struct uverbs_attr_bundle *attrs);
/**
* rdma netdev operation
*
@@ -2351,6 +2495,10 @@ struct ib_device {
#endif
u32 index;
+ /*
+ * Implementation details of the RDMA core, don't use in drivers
+ */
+ struct rdma_restrack_root res;
/**
* The following mandatory functions are used only at device
@@ -2364,6 +2512,7 @@ struct ib_device {
int comp_vector);
struct uverbs_root_spec *specs_root;
+ enum rdma_driver_id driver_id;
};
struct ib_client {
@@ -2423,11 +2572,9 @@ static inline int ib_copy_to_udata(struct ib_udata *udata, void *src, size_t len
return copy_to_user(udata->outbuf, src, len) ? -EFAULT : 0;
}
-static inline bool ib_is_udata_cleared(struct ib_udata *udata,
- size_t offset,
- size_t len)
+static inline bool ib_is_buffer_cleared(const void __user *p,
+ size_t len)
{
- const void __user *p = udata->inbuf + offset;
bool ret;
u8 *buf;
@@ -2443,6 +2590,13 @@ static inline bool ib_is_udata_cleared(struct ib_udata *udata,
return ret;
}
+static inline bool ib_is_udata_cleared(struct ib_udata *udata,
+ size_t offset,
+ size_t len)
+{
+ return ib_is_buffer_cleared(udata->inbuf + offset, len);
+}
+
/**
* ib_modify_qp_is_ok - Check that the supplied attribute mask
* contains all required attributes and no attributes not allowed for
@@ -2459,9 +2613,9 @@ static inline bool ib_is_udata_cleared(struct ib_udata *udata,
* transition from cur_state to next_state is allowed by the IB spec,
* and that the attribute mask supplied is allowed for the transition.
*/
-int ib_modify_qp_is_ok(enum ib_qp_state cur_state, enum ib_qp_state next_state,
- enum ib_qp_type type, enum ib_qp_attr_mask mask,
- enum rdma_link_layer ll);
+bool ib_modify_qp_is_ok(enum ib_qp_state cur_state, enum ib_qp_state next_state,
+ enum ib_qp_type type, enum ib_qp_attr_mask mask,
+ enum rdma_link_layer ll);
void ib_register_event_handler(struct ib_event_handler *event_handler);
void ib_unregister_event_handler(struct ib_event_handler *event_handler);
@@ -2836,7 +2990,6 @@ int ib_modify_port(struct ib_device *device,
struct ib_port_modify *port_modify);
int ib_find_gid(struct ib_device *device, union ib_gid *gid,
- enum ib_gid_type gid_type, struct net_device *ndev,
u8 *port_num, u16 *index);
int ib_find_pkey(struct ib_device *device,
@@ -2858,7 +3011,7 @@ enum ib_pd_flags {
struct ib_pd *__ib_alloc_pd(struct ib_device *device, unsigned int flags,
const char *caller);
#define ib_alloc_pd(device, flags) \
- __ib_alloc_pd((device), (flags), __func__)
+ __ib_alloc_pd((device), (flags), KBUILD_MODNAME)
void ib_dealloc_pd(struct ib_pd *pd);
/**
@@ -2905,7 +3058,7 @@ int ib_get_gids_from_rdma_hdr(const union rdma_network_hdr *hdr,
int ib_get_rdma_header_version(const union rdma_network_hdr *hdr);
/**
- * ib_init_ah_from_wc - Initializes address handle attributes from a
+ * ib_init_ah_attr_from_wc - Initializes address handle attributes from a
* work completion.
* @device: Device on which the received message arrived.
* @port_num: Port on which the received message arrived.
@@ -2915,9 +3068,9 @@ int ib_get_rdma_header_version(const union rdma_network_hdr *hdr);
* @ah_attr: Returned attributes that can be used when creating an address
* handle for replying to the message.
*/
-int ib_init_ah_from_wc(struct ib_device *device, u8 port_num,
- const struct ib_wc *wc, const struct ib_grh *grh,
- struct rdma_ah_attr *ah_attr);
+int ib_init_ah_attr_from_wc(struct ib_device *device, u8 port_num,
+ const struct ib_wc *wc, const struct ib_grh *grh,
+ struct rdma_ah_attr *ah_attr);
/**
* ib_create_ah_from_wc - Creates an address handle associated with the
@@ -3135,8 +3288,12 @@ static inline int ib_post_recv(struct ib_qp *qp,
return qp->device->post_recv(qp, recv_wr, bad_recv_wr);
}
-struct ib_cq *ib_alloc_cq(struct ib_device *dev, void *private,
- int nr_cqe, int comp_vector, enum ib_poll_context poll_ctx);
+struct ib_cq *__ib_alloc_cq(struct ib_device *dev, void *private,
+ int nr_cqe, int comp_vector,
+ enum ib_poll_context poll_ctx, const char *caller);
+#define ib_alloc_cq(device, priv, nr_cqe, comp_vect, poll_ctx) \
+ __ib_alloc_cq((device), (priv), (nr_cqe), (comp_vect), (poll_ctx), KBUILD_MODNAME)
+
void ib_free_cq(struct ib_cq *cq);
int ib_process_cq_direct(struct ib_cq *cq, int budget);
@@ -3202,18 +3359,6 @@ static inline int ib_poll_cq(struct ib_cq *cq, int num_entries,
}
/**
- * ib_peek_cq - Returns the number of unreaped completions currently
- * on the specified CQ.
- * @cq: The CQ to peek.
- * @wc_cnt: A minimum number of unreaped completions to check for.
- *
- * If the number of unreaped completions is greater than or equal to wc_cnt,
- * this function returns wc_cnt, otherwise, it returns the actual number of
- * unreaped completions.
- */
-int ib_peek_cq(struct ib_cq *cq, int wc_cnt);
-
-/**
* ib_req_notify_cq - Request completion notification on a CQ.
* @cq: The CQ to generate an event for.
* @flags:
@@ -3560,8 +3705,11 @@ int ib_detach_mcast(struct ib_qp *qp, union ib_gid *gid, u16 lid);
/**
* ib_alloc_xrcd - Allocates an XRC domain.
* @device: The device on which to allocate the XRC domain.
+ * @caller: Module name for kernel consumers
*/
-struct ib_xrcd *ib_alloc_xrcd(struct ib_device *device);
+struct ib_xrcd *__ib_alloc_xrcd(struct ib_device *device, const char *caller);
+#define ib_alloc_xrcd(device) \
+ __ib_alloc_xrcd((device), KBUILD_MODNAME)
/**
* ib_dealloc_xrcd - Deallocates an XRC domain.
@@ -3789,18 +3937,24 @@ static inline void rdma_ah_set_grh(struct rdma_ah_attr *attr,
grh->traffic_class = traffic_class;
}
-/*Get AH type */
+/**
+ * rdma_ah_find_type - Return address handle type.
+ *
+ * @dev: Device to be checked
+ * @port_num: Port number
+ */
static inline enum rdma_ah_attr_type rdma_ah_find_type(struct ib_device *dev,
- u32 port_num)
+ u8 port_num)
{
- if ((rdma_protocol_roce(dev, port_num)) ||
- (rdma_protocol_iwarp(dev, port_num)))
+ if (rdma_protocol_roce(dev, port_num))
return RDMA_AH_ATTR_TYPE_ROCE;
- else if ((rdma_protocol_ib(dev, port_num)) &&
- (rdma_cap_opa_ah(dev, port_num)))
- return RDMA_AH_ATTR_TYPE_OPA;
- else
+ if (rdma_protocol_ib(dev, port_num)) {
+ if (rdma_cap_opa_ah(dev, port_num))
+ return RDMA_AH_ATTR_TYPE_OPA;
return RDMA_AH_ATTR_TYPE_IB;
+ }
+
+ return RDMA_AH_ATTR_TYPE_UNDEFINED;
}
/**
@@ -3850,4 +4004,12 @@ ib_get_vector_affinity(struct ib_device *device, int comp_vector)
}
+/**
+ * rdma_roce_rescan_device - Rescan all of the network devices in the system
+ * and add their gids, as needed, to the relevant RoCE devices.
+ *
+ * @device: the rdma device
+ */
+void rdma_roce_rescan_device(struct ib_device *ibdev);
+
#endif /* IB_VERBS_H */
diff --git a/include/rdma/opa_addr.h b/include/rdma/opa_addr.h
index f68fca296631..2bbb7a67e643 100644
--- a/include/rdma/opa_addr.h
+++ b/include/rdma/opa_addr.h
@@ -114,4 +114,20 @@ static inline u32 opa_get_mcast_base(u32 nr_top_bits)
return (be32_to_cpu(OPA_LID_PERMISSIVE) << (32 - nr_top_bits));
}
+/* Check for a valid unicast LID for non-SM traffic types */
+static inline bool rdma_is_valid_unicast_lid(struct rdma_ah_attr *attr)
+{
+ if (attr->type == RDMA_AH_ATTR_TYPE_IB) {
+ if (!rdma_ah_get_dlid(attr) ||
+ rdma_ah_get_dlid(attr) >=
+ be32_to_cpu(IB_MULTICAST_LID_BASE))
+ return false;
+ } else if (attr->type == RDMA_AH_ATTR_TYPE_OPA) {
+ if (!rdma_ah_get_dlid(attr) ||
+ rdma_ah_get_dlid(attr) >=
+ opa_get_mcast_base(OPA_MCAST_NR))
+ return false;
+ }
+ return true;
+}
#endif /* OPA_ADDR_H */
diff --git a/include/rdma/rdma_cm.h b/include/rdma/rdma_cm.h
index 3d2eed3c4e75..690934733ba7 100644
--- a/include/rdma/rdma_cm.h
+++ b/include/rdma/rdma_cm.h
@@ -38,6 +38,7 @@
#include <linux/in6.h>
#include <rdma/ib_addr.h>
#include <rdma/ib_sa.h>
+#include <uapi/rdma/rdma_user_cm.h>
/*
* Upon receiving a device removal event, users must destroy the associated
@@ -64,14 +65,6 @@ enum rdma_cm_event_type {
const char *__attribute_const__ rdma_event_msg(enum rdma_cm_event_type event);
-enum rdma_port_space {
- RDMA_PS_SDP = 0x0001,
- RDMA_PS_IPOIB = 0x0002,
- RDMA_PS_IB = 0x013F,
- RDMA_PS_TCP = 0x0106,
- RDMA_PS_UDP = 0x0111,
-};
-
#define RDMA_IB_IP_PS_MASK 0xFFFFFFFFFFFF0000ULL
#define RDMA_IB_IP_PS_TCP 0x0000000001060000ULL
#define RDMA_IB_IP_PS_UDP 0x0000000001110000ULL
@@ -120,20 +113,6 @@ struct rdma_cm_event {
} param;
};
-enum rdma_cm_state {
- RDMA_CM_IDLE,
- RDMA_CM_ADDR_QUERY,
- RDMA_CM_ADDR_RESOLVED,
- RDMA_CM_ROUTE_QUERY,
- RDMA_CM_ROUTE_RESOLVED,
- RDMA_CM_CONNECT,
- RDMA_CM_DISCONNECT,
- RDMA_CM_ADDR_BOUND,
- RDMA_CM_LISTEN,
- RDMA_CM_DEVICE_REMOVAL,
- RDMA_CM_DESTROYING
-};
-
struct rdma_cm_id;
/**
@@ -152,11 +131,17 @@ struct rdma_cm_id {
struct ib_qp *qp;
rdma_cm_event_handler event_handler;
struct rdma_route route;
- enum rdma_port_space ps;
+ enum rdma_ucm_port_space ps;
enum ib_qp_type qp_type;
u8 port_num;
};
+struct rdma_cm_id *__rdma_create_id(struct net *net,
+ rdma_cm_event_handler event_handler,
+ void *context, enum rdma_ucm_port_space ps,
+ enum ib_qp_type qp_type,
+ const char *caller);
+
/**
* rdma_create_id - Create an RDMA identifier.
*
@@ -169,10 +154,9 @@ struct rdma_cm_id {
*
* The id holds a reference on the network namespace until it is destroyed.
*/
-struct rdma_cm_id *rdma_create_id(struct net *net,
- rdma_cm_event_handler event_handler,
- void *context, enum rdma_port_space ps,
- enum ib_qp_type qp_type);
+#define rdma_create_id(net, event_handler, context, ps, qp_type) \
+ __rdma_create_id((net), (event_handler), (context), (ps), (qp_type), \
+ KBUILD_MODNAME)
/**
* rdma_destroy_id - Destroys an RDMA identifier.
@@ -284,6 +268,9 @@ int rdma_connect(struct rdma_cm_id *id, struct rdma_conn_param *conn_param);
*/
int rdma_listen(struct rdma_cm_id *id, int backlog);
+int __rdma_accept(struct rdma_cm_id *id, struct rdma_conn_param *conn_param,
+ const char *caller);
+
/**
* rdma_accept - Called to accept a connection request or response.
* @id: Connection identifier associated with the request.
@@ -299,7 +286,8 @@ int rdma_listen(struct rdma_cm_id *id, int backlog);
* state of the qp associated with the id is modified to error, such that any
* previously posted receive buffers would be flushed.
*/
-int rdma_accept(struct rdma_cm_id *id, struct rdma_conn_param *conn_param);
+#define rdma_accept(id, conn_param) \
+ __rdma_accept((id), (conn_param), KBUILD_MODNAME)
/**
* rdma_notify - Notifies the RDMA CM of an asynchronous event that has
@@ -413,4 +401,23 @@ bool rdma_is_consumer_reject(struct rdma_cm_id *id, int reason);
const void *rdma_consumer_reject_data(struct rdma_cm_id *id,
struct rdma_cm_event *ev, u8 *data_len);
+/**
+ * rdma_read_gids - Return the SGID and DGID used for establishing
+ * connection. This can be used after rdma_resolve_addr()
+ * on client side. This can be use on new connection
+ * on server side. This is applicable to IB, RoCE, iWarp.
+ * If cm_id is not bound yet to the RDMA device, it doesn't
+ * copy and SGID or DGID to the given pointers.
+ * @id: Communication identifier whose GIDs are queried.
+ * @sgid: Pointer to SGID where SGID will be returned. It is optional.
+ * @dgid: Pointer to DGID where DGID will be returned. It is optional.
+ * Note: This API should not be used by any new ULPs or new code.
+ * Instead, users interested in querying GIDs should refer to path record
+ * of the rdma_cm_id to query the GIDs.
+ * This API is provided for compatibility for existing users.
+ */
+
+void rdma_read_gids(struct rdma_cm_id *cm_id, union ib_gid *sgid,
+ union ib_gid *dgid);
+
#endif /* RDMA_CM_H */
diff --git a/include/rdma/rdma_cm_ib.h b/include/rdma/rdma_cm_ib.h
index 6947a6ba2557..6a69d71a21a5 100644
--- a/include/rdma/rdma_cm_ib.h
+++ b/include/rdma/rdma_cm_ib.h
@@ -36,17 +36,17 @@
#include <rdma/rdma_cm.h>
/**
- * rdma_set_ib_paths - Manually sets the path records used to establish a
+ * rdma_set_ib_path - Manually sets the path record used to establish a
* connection.
* @id: Connection identifier associated with the request.
* @path_rec: Reference to the path record
*
* This call permits a user to specify routing information for rdma_cm_id's
- * bound to Infiniband devices. It is called on the client side of a
+ * bound to InfiniBand devices. It is called on the client side of a
* connection and replaces the call to rdma_resolve_route.
*/
-int rdma_set_ib_paths(struct rdma_cm_id *id,
- struct sa_path_rec *path_rec, int num_paths);
+int rdma_set_ib_path(struct rdma_cm_id *id,
+ struct sa_path_rec *path_rec);
/* Global qkey for UDP QPs and multicast groups. */
#define RDMA_UDP_QKEY 0x01234567
diff --git a/include/rdma/rdma_vt.h b/include/rdma/rdma_vt.h
index 1ba84a78f1c5..3f4c187e435d 100644
--- a/include/rdma/rdma_vt.h
+++ b/include/rdma/rdma_vt.h
@@ -228,13 +228,6 @@ struct rvt_driver_provided {
int (*port_callback)(struct ib_device *, u8, struct kobject *);
/*
- * Returns a string to represent the device for which is being
- * registered. This is primarily used for error and debug messages on
- * the console.
- */
- const char * (*get_card_name)(struct rvt_dev_info *rdi);
-
- /*
* Returns a pointer to the undelying hardware's PCI device. This is
* used to display information as to what hardware is being referenced
* in an output message
@@ -419,6 +412,30 @@ struct rvt_dev_info {
};
+/**
+ * rvt_set_ibdev_name - Craft an IB device name from client info
+ * @rdi: pointer to the client rvt_dev_info structure
+ * @name: client specific name
+ * @unit: client specific unit number.
+ */
+static inline void rvt_set_ibdev_name(struct rvt_dev_info *rdi,
+ const char *fmt, const char *name,
+ const int unit)
+{
+ snprintf(rdi->ibdev.name, sizeof(rdi->ibdev.name), fmt, name, unit);
+}
+
+/**
+ * rvt_get_ibdev_name - return the IB name
+ * @rdi: rdmavt device
+ *
+ * Return the registered name of the device.
+ */
+static inline const char *rvt_get_ibdev_name(const struct rvt_dev_info *rdi)
+{
+ return rdi->ibdev.name;
+}
+
static inline struct rvt_pd *ibpd_to_rvtpd(struct ib_pd *ibpd)
{
return container_of(ibpd, struct rvt_pd, ibpd);
@@ -521,7 +538,7 @@ static inline void rvt_mod_retry_timer(struct rvt_qp *qp)
struct rvt_dev_info *rvt_alloc_device(size_t size, int nports);
void rvt_dealloc_device(struct rvt_dev_info *rdi);
-int rvt_register_device(struct rvt_dev_info *rvd);
+int rvt_register_device(struct rvt_dev_info *rvd, u32 driver_id);
void rvt_unregister_device(struct rvt_dev_info *rvd);
int rvt_check_ah(struct ib_device *ibdev, struct rdma_ah_attr *ah_attr);
int rvt_init_port(struct rvt_dev_info *rdi, struct rvt_ibport *port,
diff --git a/include/rdma/restrack.h b/include/rdma/restrack.h
new file mode 100644
index 000000000000..f3b3e3576f6a
--- /dev/null
+++ b/include/rdma/restrack.h
@@ -0,0 +1,177 @@
+/* SPDX-License-Identifier: (GPL-2.0+ OR BSD-3-Clause) */
+/*
+ * Copyright (c) 2017-2018 Mellanox Technologies. All rights reserved.
+ */
+
+#ifndef _RDMA_RESTRACK_H_
+#define _RDMA_RESTRACK_H_
+
+#include <linux/typecheck.h>
+#include <linux/rwsem.h>
+#include <linux/sched.h>
+#include <linux/kref.h>
+#include <linux/completion.h>
+#include <linux/sched/task.h>
+
+/**
+ * enum rdma_restrack_type - HW objects to track
+ */
+enum rdma_restrack_type {
+ /**
+ * @RDMA_RESTRACK_PD: Protection domain (PD)
+ */
+ RDMA_RESTRACK_PD,
+ /**
+ * @RDMA_RESTRACK_CQ: Completion queue (CQ)
+ */
+ RDMA_RESTRACK_CQ,
+ /**
+ * @RDMA_RESTRACK_QP: Queue pair (QP)
+ */
+ RDMA_RESTRACK_QP,
+ /**
+ * @RDMA_RESTRACK_CM_ID: Connection Manager ID (CM_ID)
+ */
+ RDMA_RESTRACK_CM_ID,
+ /**
+ * @RDMA_RESTRACK_MR: Memory Region (MR)
+ */
+ RDMA_RESTRACK_MR,
+ /**
+ * @RDMA_RESTRACK_MAX: Last entry, used for array dclarations
+ */
+ RDMA_RESTRACK_MAX
+};
+
+#define RDMA_RESTRACK_HASH_BITS 8
+/**
+ * struct rdma_restrack_root - main resource tracking management
+ * entity, per-device
+ */
+struct rdma_restrack_root {
+ /*
+ * @rwsem: Read/write lock to protect lists
+ */
+ struct rw_semaphore rwsem;
+ /**
+ * @hash: global database for all resources per-device
+ */
+ DECLARE_HASHTABLE(hash, RDMA_RESTRACK_HASH_BITS);
+};
+
+/**
+ * struct rdma_restrack_entry - metadata per-entry
+ */
+struct rdma_restrack_entry {
+ /**
+ * @valid: validity indicator
+ *
+ * The entries are filled during rdma_restrack_add,
+ * can be attempted to be free during rdma_restrack_del.
+ *
+ * As an example for that, see mlx5 QPs with type MLX5_IB_QPT_HW_GSI
+ */
+ bool valid;
+ /*
+ * @kref: Protect destroy of the resource
+ */
+ struct kref kref;
+ /*
+ * @comp: Signal that all consumers of resource are completed their work
+ */
+ struct completion comp;
+ /**
+ * @task: owner of resource tracking entity
+ *
+ * There are two types of entities: created by user and created
+ * by kernel.
+ *
+ * This is relevant for the entities created by users.
+ * For the entities created by kernel, this pointer will be NULL.
+ */
+ struct task_struct *task;
+ /**
+ * @kern_name: name of owner for the kernel created entities.
+ */
+ const char *kern_name;
+ /**
+ * @node: hash table entry
+ */
+ struct hlist_node node;
+ /**
+ * @type: various objects in restrack database
+ */
+ enum rdma_restrack_type type;
+};
+
+/**
+ * rdma_restrack_init() - initialize resource tracking
+ * @res: resource tracking root
+ */
+void rdma_restrack_init(struct rdma_restrack_root *res);
+
+/**
+ * rdma_restrack_clean() - clean resource tracking
+ * @res: resource tracking root
+ */
+void rdma_restrack_clean(struct rdma_restrack_root *res);
+
+/**
+ * rdma_restrack_count() - the current usage of specific object
+ * @res: resource entry
+ * @type: actual type of object to operate
+ * @ns: PID namespace
+ */
+int rdma_restrack_count(struct rdma_restrack_root *res,
+ enum rdma_restrack_type type,
+ struct pid_namespace *ns);
+
+/**
+ * rdma_restrack_add() - add object to the reource tracking database
+ * @res: resource entry
+ */
+void rdma_restrack_add(struct rdma_restrack_entry *res);
+
+/**
+ * rdma_restrack_del() - delete object from the reource tracking database
+ * @res: resource entry
+ * @type: actual type of object to operate
+ */
+void rdma_restrack_del(struct rdma_restrack_entry *res);
+
+/**
+ * rdma_is_kernel_res() - check the owner of resource
+ * @res: resource entry
+ */
+static inline bool rdma_is_kernel_res(struct rdma_restrack_entry *res)
+{
+ return !res->task;
+}
+
+/**
+ * rdma_restrack_get() - grab to protect resource from release
+ * @res: resource entry
+ */
+int __must_check rdma_restrack_get(struct rdma_restrack_entry *res);
+
+/**
+ * rdma_restrack_put() - release resource
+ * @res: resource entry
+ */
+int rdma_restrack_put(struct rdma_restrack_entry *res);
+
+/**
+ * rdma_restrack_set_task() - set the task for this resource
+ * @res: resource entry
+ * @task: task struct
+ */
+static inline void rdma_restrack_set_task(struct rdma_restrack_entry *res,
+ struct task_struct *task)
+{
+ if (res->task)
+ put_task_struct(res->task);
+ get_task_struct(task);
+ res->task = task;
+}
+
+#endif /* _RDMA_RESTRACK_H_ */
diff --git a/include/rdma/uverbs_ioctl.h b/include/rdma/uverbs_ioctl.h
index 6da44079aa58..4a4201d997a7 100644
--- a/include/rdma/uverbs_ioctl.h
+++ b/include/rdma/uverbs_ioctl.h
@@ -37,6 +37,7 @@
#include <linux/uaccess.h>
#include <rdma/rdma_user_ioctl.h>
#include <rdma/ib_user_ioctl_verbs.h>
+#include <rdma/ib_user_ioctl_cmds.h>
/*
* =======================================
@@ -50,6 +51,7 @@ enum uverbs_attr_type {
UVERBS_ATTR_TYPE_PTR_OUT,
UVERBS_ATTR_TYPE_IDR,
UVERBS_ATTR_TYPE_FD,
+ UVERBS_ATTR_TYPE_ENUM_IN,
};
enum uverbs_obj_access {
@@ -61,15 +63,32 @@ enum uverbs_obj_access {
enum {
UVERBS_ATTR_SPEC_F_MANDATORY = 1U << 0,
- /* Support extending attributes by length */
- UVERBS_ATTR_SPEC_F_MIN_SZ = 1U << 1,
+ /* Support extending attributes by length, validate all unknown size == zero */
+ UVERBS_ATTR_SPEC_F_MIN_SZ_OR_ZERO = 1U << 1,
};
+/* Specification of a single attribute inside the ioctl message */
struct uverbs_attr_spec {
- enum uverbs_attr_type type;
union {
- u16 len;
+ /* Header shared by all following union members - to reduce space. */
struct {
+ enum uverbs_attr_type type;
+ /* Combination of bits from enum UVERBS_ATTR_SPEC_F_XXXX */
+ u8 flags;
+ };
+ struct {
+ enum uverbs_attr_type type;
+ /* Combination of bits from enum UVERBS_ATTR_SPEC_F_XXXX */
+ u8 flags;
+ /* Current known size to kernel */
+ u16 len;
+ /* User isn't allowed to provide something < min_len */
+ u16 min_len;
+ } ptr;
+ struct {
+ enum uverbs_attr_type type;
+ /* Combination of bits from enum UVERBS_ATTR_SPEC_F_XXXX */
+ u8 flags;
/*
* higher bits mean the namespace and lower bits mean
* the type id within the namespace.
@@ -77,9 +96,19 @@ struct uverbs_attr_spec {
u16 obj_type;
u8 access;
} obj;
+ struct {
+ enum uverbs_attr_type type;
+ /* Combination of bits from enum UVERBS_ATTR_SPEC_F_XXXX */
+ u8 flags;
+ u8 num_elems;
+ /*
+ * The enum attribute can select one of the attributes
+ * contained in the ids array. Currently only PTR_IN
+ * attributes are supported in the ids array.
+ */
+ const struct uverbs_attr_spec *ids;
+ } enum_def;
};
- /* Combination of bits from enum UVERBS_ATTR_SPEC_F_XXXX */
- u8 flags;
};
struct uverbs_attr_spec_hash {
@@ -164,30 +193,45 @@ struct uverbs_object_tree_def {
};
#define UA_FLAGS(_flags) .flags = _flags
-#define __UVERBS_ATTR0(_id, _len, _type, ...) \
+#define __UVERBS_ATTR0(_id, _type, _fld, _attr, ...) \
+ ((const struct uverbs_attr_def) \
+ {.id = _id, .attr = {{._fld = {.type = _type, _attr, .flags = 0, } }, } })
+#define __UVERBS_ATTR1(_id, _type, _fld, _attr, _extra1, ...) \
((const struct uverbs_attr_def) \
- {.id = _id, .attr = {.type = _type, {.len = _len}, .flags = 0, } })
-#define __UVERBS_ATTR1(_id, _len, _type, _flags) \
+ {.id = _id, .attr = {{._fld = {.type = _type, _attr, _extra1 } },} })
+#define __UVERBS_ATTR2(_id, _type, _fld, _attr, _extra1, _extra2) \
((const struct uverbs_attr_def) \
- {.id = _id, .attr = {.type = _type, {.len = _len}, _flags, } })
-#define __UVERBS_ATTR(_id, _len, _type, _flags, _n, ...) \
- __UVERBS_ATTR##_n(_id, _len, _type, _flags)
+ {.id = _id, .attr = {{._fld = {.type = _type, _attr, _extra1, _extra2 } },} })
+#define __UVERBS_ATTR(_id, _type, _fld, _attr, _extra1, _extra2, _n, ...) \
+ __UVERBS_ATTR##_n(_id, _type, _fld, _attr, _extra1, _extra2)
+
+#define UVERBS_ATTR_TYPE(_type) \
+ .min_len = sizeof(_type), .len = sizeof(_type)
+#define UVERBS_ATTR_STRUCT(_type, _last) \
+ .min_len = ((uintptr_t)(&((_type *)0)->_last + 1)), .len = sizeof(_type)
+#define UVERBS_ATTR_SIZE(_min_len, _len) \
+ .min_len = _min_len, .len = _len
+
/*
* In new compiler, UVERBS_ATTR could be simplified by declaring it as
* [_id] = {.type = _type, .len = _len, ##__VA_ARGS__}
* But since we support older compilers too, we need the more complex code.
*/
-#define UVERBS_ATTR(_id, _len, _type, ...) \
- __UVERBS_ATTR(_id, _len, _type, ##__VA_ARGS__, 1, 0)
+#define UVERBS_ATTR(_id, _type, _fld, _attr, ...) \
+ __UVERBS_ATTR(_id, _type, _fld, _attr, ##__VA_ARGS__, 2, 1, 0)
#define UVERBS_ATTR_PTR_IN_SZ(_id, _len, ...) \
- UVERBS_ATTR(_id, _len, UVERBS_ATTR_TYPE_PTR_IN, ##__VA_ARGS__)
+ UVERBS_ATTR(_id, UVERBS_ATTR_TYPE_PTR_IN, ptr, _len, ##__VA_ARGS__)
/* If sizeof(_type) <= sizeof(u64), this will be inlined rather than a pointer */
#define UVERBS_ATTR_PTR_IN(_id, _type, ...) \
- UVERBS_ATTR_PTR_IN_SZ(_id, sizeof(_type), ##__VA_ARGS__)
+ UVERBS_ATTR_PTR_IN_SZ(_id, _type, ##__VA_ARGS__)
#define UVERBS_ATTR_PTR_OUT_SZ(_id, _len, ...) \
- UVERBS_ATTR(_id, _len, UVERBS_ATTR_TYPE_PTR_OUT, ##__VA_ARGS__)
+ UVERBS_ATTR(_id, UVERBS_ATTR_TYPE_PTR_OUT, ptr, _len, ##__VA_ARGS__)
#define UVERBS_ATTR_PTR_OUT(_id, _type, ...) \
- UVERBS_ATTR_PTR_OUT_SZ(_id, sizeof(_type), ##__VA_ARGS__)
+ UVERBS_ATTR_PTR_OUT_SZ(_id, _type, ##__VA_ARGS__)
+#define UVERBS_ATTR_ENUM_IN(_id, _enum_arr, ...) \
+ UVERBS_ATTR(_id, UVERBS_ATTR_TYPE_ENUM_IN, enum_def, \
+ .ids = (_enum_arr), \
+ .num_elems = ARRAY_SIZE(_enum_arr), ##__VA_ARGS__)
/*
* In new compiler, UVERBS_ATTR_IDR (and FD) could be simplified by declaring
@@ -202,15 +246,13 @@ struct uverbs_object_tree_def {
#define ___UVERBS_ATTR_OBJ0(_id, _obj_class, _obj_type, _access, ...)\
((const struct uverbs_attr_def) \
{.id = _id, \
- .attr = {.type = _obj_class, \
- {.obj = {.obj_type = _obj_type, .access = _access } },\
- .flags = 0} })
+ .attr = { {.obj = {.type = _obj_class, .obj_type = _obj_type, \
+ .access = _access, .flags = 0 } }, } })
#define ___UVERBS_ATTR_OBJ1(_id, _obj_class, _obj_type, _access, _flags)\
((const struct uverbs_attr_def) \
{.id = _id, \
- .attr = {.type = _obj_class, \
- {.obj = {.obj_type = _obj_type, .access = _access} }, \
- _flags} })
+ .attr = { {.obj = {.type = _obj_class, .obj_type = _obj_type, \
+ .access = _access, _flags} }, } })
#define ___UVERBS_ATTR_OBJ(_id, _obj_class, _obj_type, _access, _flags, \
_n, ...) \
___UVERBS_ATTR_OBJ##_n(_id, _obj_class, _obj_type, _access, _flags)
@@ -229,6 +271,11 @@ struct uverbs_object_tree_def {
#define DECLARE_UVERBS_ATTR_SPEC(_name, ...) \
const struct uverbs_attr_def _name = __VA_ARGS__
+#define DECLARE_UVERBS_ENUM(_name, ...) \
+ const struct uverbs_enum_spec _name = { \
+ .len = ARRAY_SIZE(((struct uverbs_attr_spec[]){__VA_ARGS__})),\
+ .ids = {__VA_ARGS__}, \
+ }
#define _UVERBS_METHOD_ATTRS_SZ(...) \
(sizeof((const struct uverbs_attr_def * const []){__VA_ARGS__}) /\
sizeof(const struct uverbs_attr_def *))
@@ -276,13 +323,11 @@ struct uverbs_object_tree_def {
*/
struct uverbs_ptr_attr {
- union {
- u64 data;
- void __user *ptr;
- };
+ u64 data;
u16 len;
/* Combination of bits from enum UVERBS_ATTR_F_XXXX */
u16 flags;
+ u8 enum_id;
};
struct uverbs_obj_attr {
@@ -339,6 +384,8 @@ static inline bool uverbs_attr_is_valid(const struct uverbs_attr_bundle *attrs_b
idx & ~UVERBS_ID_NS_MASK);
}
+#define IS_UVERBS_COPY_ERR(_ret) ((_ret) && (_ret) != -ENOENT)
+
static inline const struct uverbs_attr *uverbs_attr_get(const struct uverbs_attr_bundle *attrs_bundle,
u16 idx)
{
@@ -350,39 +397,112 @@ static inline const struct uverbs_attr *uverbs_attr_get(const struct uverbs_attr
return &attrs_bundle->hash[idx_bucket].attrs[idx & ~UVERBS_ID_NS_MASK];
}
+static inline int uverbs_attr_get_enum_id(const struct uverbs_attr_bundle *attrs_bundle,
+ u16 idx)
+{
+ const struct uverbs_attr *attr = uverbs_attr_get(attrs_bundle, idx);
+
+ if (IS_ERR(attr))
+ return PTR_ERR(attr);
+
+ return attr->ptr_attr.enum_id;
+}
+
+static inline void *uverbs_attr_get_obj(const struct uverbs_attr_bundle *attrs_bundle,
+ u16 idx)
+{
+ struct ib_uobject *uobj =
+ uverbs_attr_get(attrs_bundle, idx)->obj_attr.uobject;
+
+ if (IS_ERR(uobj))
+ return uobj;
+
+ return uobj->object;
+}
+
static inline int uverbs_copy_to(const struct uverbs_attr_bundle *attrs_bundle,
- size_t idx, const void *from)
+ size_t idx, const void *from, size_t size)
{
const struct uverbs_attr *attr = uverbs_attr_get(attrs_bundle, idx);
u16 flags;
+ size_t min_size;
if (IS_ERR(attr))
return PTR_ERR(attr);
+ min_size = min_t(size_t, attr->ptr_attr.len, size);
+ if (copy_to_user(u64_to_user_ptr(attr->ptr_attr.data), from, min_size))
+ return -EFAULT;
+
flags = attr->ptr_attr.flags | UVERBS_ATTR_F_VALID_OUTPUT;
- return (!copy_to_user(attr->ptr_attr.ptr, from, attr->ptr_attr.len) &&
- !put_user(flags, &attr->uattr->flags)) ? 0 : -EFAULT;
+ if (put_user(flags, &attr->uattr->flags))
+ return -EFAULT;
+
+ return 0;
+}
+
+static inline bool uverbs_attr_ptr_is_inline(const struct uverbs_attr *attr)
+{
+ return attr->ptr_attr.len <= sizeof(attr->ptr_attr.data);
}
-static inline int _uverbs_copy_from(void *to, size_t to_size,
+static inline int _uverbs_copy_from(void *to,
const struct uverbs_attr_bundle *attrs_bundle,
- size_t idx)
+ size_t idx,
+ size_t size)
{
const struct uverbs_attr *attr = uverbs_attr_get(attrs_bundle, idx);
if (IS_ERR(attr))
return PTR_ERR(attr);
- if (to_size <= sizeof(((struct ib_uverbs_attr *)0)->data))
+ /*
+ * Validation ensures attr->ptr_attr.len >= size. If the caller is
+ * using UVERBS_ATTR_SPEC_F_MIN_SZ_OR_ZERO then it must call
+ * uverbs_copy_from_or_zero.
+ */
+ if (unlikely(size < attr->ptr_attr.len))
+ return -EINVAL;
+
+ if (uverbs_attr_ptr_is_inline(attr))
memcpy(to, &attr->ptr_attr.data, attr->ptr_attr.len);
- else if (copy_from_user(to, attr->ptr_attr.ptr, attr->ptr_attr.len))
+ else if (copy_from_user(to, u64_to_user_ptr(attr->ptr_attr.data),
+ attr->ptr_attr.len))
+ return -EFAULT;
+
+ return 0;
+}
+
+static inline int _uverbs_copy_from_or_zero(void *to,
+ const struct uverbs_attr_bundle *attrs_bundle,
+ size_t idx,
+ size_t size)
+{
+ const struct uverbs_attr *attr = uverbs_attr_get(attrs_bundle, idx);
+ size_t min_size;
+
+ if (IS_ERR(attr))
+ return PTR_ERR(attr);
+
+ min_size = min_t(size_t, size, attr->ptr_attr.len);
+
+ if (uverbs_attr_ptr_is_inline(attr))
+ memcpy(to, &attr->ptr_attr.data, min_size);
+ else if (copy_from_user(to, u64_to_user_ptr(attr->ptr_attr.data),
+ min_size))
return -EFAULT;
+ if (size > min_size)
+ memset(to + min_size, 0, size - min_size);
+
return 0;
}
#define uverbs_copy_from(to, attrs_bundle, idx) \
- _uverbs_copy_from(to, sizeof(*(to)), attrs_bundle, idx)
+ _uverbs_copy_from(to, attrs_bundle, idx, sizeof(*to))
+
+#define uverbs_copy_from_or_zero(to, attrs_bundle, idx) \
+ _uverbs_copy_from_or_zero(to, attrs_bundle, idx, sizeof(*to))
/* =================================================
* Definitions -> Specs infrastructure
diff --git a/include/rdma/uverbs_named_ioctl.h b/include/rdma/uverbs_named_ioctl.h
new file mode 100644
index 000000000000..c5bb4ebdb0b0
--- /dev/null
+++ b/include/rdma/uverbs_named_ioctl.h
@@ -0,0 +1,90 @@
+/*
+ * Copyright (c) 2018, Mellanox Technologies inc. All rights reserved.
+ *
+ * This software is available to you under a choice of one of two
+ * licenses. You may choose to be licensed under the terms of the GNU
+ * General Public License (GPL) Version 2, available from the file
+ * COPYING in the main directory of this source tree, or the
+ * OpenIB.org BSD license below:
+ *
+ * Redistribution and use in source and binary forms, with or
+ * without modification, are permitted provided that the following
+ * conditions are met:
+ *
+ * - Redistributions of source code must retain the above
+ * copyright notice, this list of conditions and the following
+ * disclaimer.
+ *
+ * - Redistributions in binary form must reproduce the above
+ * copyright notice, this list of conditions and the following
+ * disclaimer in the documentation and/or other materials
+ * provided with the distribution.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND,
+ * EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
+ * MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND
+ * NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS
+ * BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN
+ * ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN
+ * CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
+ * SOFTWARE.
+ */
+
+#ifndef _UVERBS_NAMED_IOCTL_
+#define _UVERBS_NAMED_IOCTL_
+
+#include <rdma/uverbs_ioctl.h>
+
+#ifndef UVERBS_MODULE_NAME
+#error "Please #define UVERBS_MODULE_NAME before including rdma/uverbs_named_ioctl.h"
+#endif
+
+#define _UVERBS_PASTE(x, y) x ## y
+#define _UVERBS_NAME(x, y) _UVERBS_PASTE(x, y)
+#define UVERBS_METHOD(id) _UVERBS_NAME(UVERBS_MODULE_NAME, _method_##id)
+#define UVERBS_HANDLER(id) _UVERBS_NAME(UVERBS_MODULE_NAME, _handler_##id)
+
+#define DECLARE_UVERBS_NAMED_METHOD(id, ...) \
+ DECLARE_UVERBS_METHOD(UVERBS_METHOD(id), id, UVERBS_HANDLER(id), ##__VA_ARGS__)
+
+#define DECLARE_UVERBS_NAMED_METHOD_WITH_HANDLER(id, handler, ...) \
+ DECLARE_UVERBS_METHOD(UVERBS_METHOD(id), id, handler, ##__VA_ARGS__)
+
+#define DECLARE_UVERBS_NAMED_METHOD_NO_OVERRIDE(id, handler, ...) \
+ DECLARE_UVERBS_METHOD(UVERBS_METHOD(id), id, NULL, ##__VA_ARGS__)
+
+#define DECLARE_UVERBS_NAMED_OBJECT(id, ...) \
+ DECLARE_UVERBS_OBJECT(UVERBS_OBJECT(id), id, ##__VA_ARGS__)
+
+#define _UVERBS_COMP_NAME(x, y, z) _UVERBS_NAME(_UVERBS_NAME(x, y), z)
+
+#define UVERBS_NO_OVERRIDE NULL
+
+/* This declares a parsing tree with one object and one method. This is usually
+ * used for merging driver attributes to the common attributes. The driver has
+ * a chance to override the handler and type attrs of the original object.
+ * The __VA_ARGS__ just contains a list of attributes.
+ */
+#define ADD_UVERBS_ATTRIBUTES(_name, _object, _method, _type_attrs, _handler, ...) \
+static DECLARE_UVERBS_METHOD(_UVERBS_COMP_NAME(UVERBS_MODULE_NAME, \
+ _method_, _name), \
+ _method, _handler, ##__VA_ARGS__); \
+ \
+static DECLARE_UVERBS_OBJECT(_UVERBS_COMP_NAME(UVERBS_MODULE_NAME, \
+ _object_, _name), \
+ _object, _type_attrs, \
+ &_UVERBS_COMP_NAME(UVERBS_MODULE_NAME, \
+ _method_, _name)); \
+ \
+static DECLARE_UVERBS_OBJECT_TREE(_name, \
+ &_UVERBS_COMP_NAME(UVERBS_MODULE_NAME, \
+ _object_, _name))
+
+/* A very common use case is that the driver doesn't override the handler and
+ * type_attrs. Therefore, we provide a simplified macro for this common case.
+ */
+#define ADD_UVERBS_ATTRIBUTES_SIMPLE(_name, _object, _method, ...) \
+ ADD_UVERBS_ATTRIBUTES(_name, _object, _method, UVERBS_NO_OVERRIDE, \
+ UVERBS_NO_OVERRIDE, ##__VA_ARGS__)
+
+#endif
diff --git a/include/rdma/uverbs_std_types.h b/include/rdma/uverbs_std_types.h
index 5f8e20bbd67c..9d56cdb84655 100644
--- a/include/rdma/uverbs_std_types.h
+++ b/include/rdma/uverbs_std_types.h
@@ -37,26 +37,10 @@
#include <rdma/uverbs_ioctl.h>
#include <rdma/ib_user_ioctl_verbs.h>
+#define UVERBS_OBJECT(id) uverbs_object_##id
+
#if IS_ENABLED(CONFIG_INFINIBAND_USER_ACCESS)
-extern const struct uverbs_object_def uverbs_object_comp_channel;
-extern const struct uverbs_object_def uverbs_object_cq;
-extern const struct uverbs_object_def uverbs_object_qp;
-extern const struct uverbs_object_def uverbs_object_rwq_ind_table;
-extern const struct uverbs_object_def uverbs_object_wq;
-extern const struct uverbs_object_def uverbs_object_srq;
-extern const struct uverbs_object_def uverbs_object_ah;
-extern const struct uverbs_object_def uverbs_object_flow;
-extern const struct uverbs_object_def uverbs_object_mr;
-extern const struct uverbs_object_def uverbs_object_mw;
-extern const struct uverbs_object_def uverbs_object_pd;
-extern const struct uverbs_object_def uverbs_object_xrcd;
-extern const struct uverbs_object_def uverbs_object_device;
-
-extern const struct uverbs_object_tree_def uverbs_default_objects;
-static inline const struct uverbs_object_tree_def *uverbs_default_get_objects(void)
-{
- return &uverbs_default_objects;
-}
+const struct uverbs_object_tree_def *uverbs_default_get_objects(void);
#else
static inline const struct uverbs_object_tree_def *uverbs_default_get_objects(void)
{
@@ -72,22 +56,22 @@ static inline struct ib_uobject *__uobj_get(const struct uverbs_obj_type *type,
return rdma_lookup_get_uobject(type, ucontext, id, write);
}
-#define uobj_get_type(_object) uverbs_object_##_object.type_attrs
+#define uobj_get_type(_object) UVERBS_OBJECT(_object).type_attrs
#define uobj_get_read(_type, _id, _ucontext) \
- __uobj_get(_type, false, _ucontext, _id)
+ __uobj_get(uobj_get_type(_type), false, _ucontext, _id)
-#define uobj_get_obj_read(_object, _id, _ucontext) \
+#define uobj_get_obj_read(_object, _type, _id, _ucontext) \
({ \
struct ib_uobject *__uobj = \
- __uobj_get(uverbs_object_##_object.type_attrs, \
+ __uobj_get(uobj_get_type(_type), \
false, _ucontext, _id); \
\
(struct ib_##_object *)(IS_ERR(__uobj) ? NULL : __uobj->object);\
})
#define uobj_get_write(_type, _id, _ucontext) \
- __uobj_get(_type, true, _ucontext, _id)
+ __uobj_get(uobj_get_type(_type), true, _ucontext, _id)
static inline void uobj_put_read(struct ib_uobject *uobj)
{
@@ -124,7 +108,7 @@ static inline struct ib_uobject *__uobj_alloc(const struct uverbs_obj_type *type
}
#define uobj_alloc(_type, ucontext) \
- __uobj_alloc(_type, ucontext)
+ __uobj_alloc(uobj_get_type(_type), ucontext)
#endif
diff --git a/include/scsi/libsas.h b/include/scsi/libsas.h
index 6df6fe0c2198..225ab7783dfd 100644
--- a/include/scsi/libsas.h
+++ b/include/scsi/libsas.h
@@ -75,16 +75,15 @@ enum phy_event {
PHYE_OOB_ERROR,
PHYE_SPINUP_HOLD, /* hot plug SATA, no COMWAKE sent */
PHYE_RESUME_TIMEOUT,
+ PHYE_SHUTDOWN,
PHY_NUM_EVENTS,
};
enum discover_event {
DISCE_DISCOVER_DOMAIN = 0U,
DISCE_REVALIDATE_DOMAIN,
- DISCE_PROBE,
DISCE_SUSPEND,
DISCE_RESUME,
- DISCE_DESTRUCT,
DISC_NUM_EVENTS,
};
@@ -261,6 +260,7 @@ struct asd_sas_port {
struct list_head dev_list;
struct list_head disco_list;
struct list_head destroy_list;
+ struct list_head sas_port_del_list;
enum sas_linkrate linkrate;
struct sas_work work;
@@ -292,6 +292,7 @@ struct asd_sas_port {
struct asd_sas_event {
struct sas_work work;
struct asd_sas_phy *phy;
+ int event;
};
static inline struct asd_sas_event *to_asd_sas_event(struct work_struct *work)
@@ -301,17 +302,24 @@ static inline struct asd_sas_event *to_asd_sas_event(struct work_struct *work)
return ev;
}
+static inline void INIT_SAS_EVENT(struct asd_sas_event *ev,
+ void (*fn)(struct work_struct *),
+ struct asd_sas_phy *phy, int event)
+{
+ INIT_SAS_WORK(&ev->work, fn);
+ ev->phy = phy;
+ ev->event = event;
+}
+
+#define SAS_PHY_SHUTDOWN_THRES 1024
+
/* The phy pretty much is controlled by the LLDD.
* The class only reads those fields.
*/
struct asd_sas_phy {
/* private: */
- struct asd_sas_event port_events[PORT_NUM_EVENTS];
- struct asd_sas_event phy_events[PHY_NUM_EVENTS];
-
- unsigned long port_events_pending;
- unsigned long phy_events_pending;
-
+ atomic_t event_nr;
+ int in_shutdown;
int error;
int suspended;
@@ -380,6 +388,9 @@ struct sas_ha_struct {
struct device *dev; /* should be set */
struct module *lldd_module; /* should be set */
+ struct workqueue_struct *event_q;
+ struct workqueue_struct *disco_q;
+
u8 *sas_addr; /* must be set */
u8 hashed_sas_addr[HASHED_SAS_ADDR_SIZE];
@@ -399,6 +410,8 @@ struct sas_ha_struct {
struct list_head eh_done_q; /* complete via scsi_eh_flush_done_q */
struct list_head eh_ata_q; /* scmds to promote from sas to ata eh */
+
+ int event_thres;
};
#define SHOST_TO_SAS_HA(_shost) (*(struct sas_ha_struct **)(_shost)->hostdata)
@@ -670,6 +683,7 @@ extern int sas_bios_param(struct scsi_device *,
sector_t capacity, int *hsc);
extern struct scsi_transport_template *
sas_domain_attach_transport(struct sas_domain_function_template *);
+extern struct device_attribute dev_attr_phy_event_threshold;
int sas_discover_root_expander(struct domain_device *);
diff --git a/include/scsi/scsi.h b/include/scsi/scsi.h
index cb85eddb47ea..eb7853c1a23b 100644
--- a/include/scsi/scsi.h
+++ b/include/scsi/scsi.h
@@ -47,6 +47,8 @@ static inline int scsi_status_is_good(int status)
*/
status &= 0xfe;
return ((status == SAM_STAT_GOOD) ||
+ (status == SAM_STAT_CONDITION_MET) ||
+ /* Next two "intermediate" statuses are obsolete in SAM-4 */
(status == SAM_STAT_INTERMEDIATE) ||
(status == SAM_STAT_INTERMEDIATE_CONDITION_MET) ||
/* FIXME: this is obsolete in SAM-3 */
diff --git a/include/scsi/scsi_cmnd.h b/include/scsi/scsi_cmnd.h
index 7fb57e905526..aaf1e971c6a3 100644
--- a/include/scsi/scsi_cmnd.h
+++ b/include/scsi/scsi_cmnd.h
@@ -58,8 +58,7 @@ struct scsi_pointer {
/* for scmd->flags */
#define SCMD_TAGGED (1 << 0)
#define SCMD_UNCHECKED_ISA_DMA (1 << 1)
-#define SCMD_ZONE_WRITE_LOCK (1 << 2)
-#define SCMD_INITIALIZED (1 << 3)
+#define SCMD_INITIALIZED (1 << 2)
/* flags preserved across unprep / reprep */
#define SCMD_PRESERVED_FLAGS (SCMD_UNCHECKED_ISA_DMA | SCMD_INITIALIZED)
@@ -69,6 +68,9 @@ struct scsi_cmnd {
struct list_head list; /* scsi_cmnd participates in queue lists */
struct list_head eh_entry; /* entry for the host eh_cmd_q */
struct delayed_work abort_work;
+
+ struct rcu_head rcu;
+
int eh_eflags; /* Used by error handlr */
/*
@@ -171,10 +173,14 @@ extern void *scsi_kmap_atomic_sg(struct scatterlist *sg, int sg_count,
extern void scsi_kunmap_atomic_sg(void *virt);
extern int scsi_init_io(struct scsi_cmnd *cmd);
-extern void scsi_initialize_rq(struct request *rq);
+#ifdef CONFIG_SCSI_DMA
extern int scsi_dma_map(struct scsi_cmnd *cmd);
extern void scsi_dma_unmap(struct scsi_cmnd *cmd);
+#else /* !CONFIG_SCSI_DMA */
+static inline int scsi_dma_map(struct scsi_cmnd *cmd) { return -ENOSYS; }
+static inline void scsi_dma_unmap(struct scsi_cmnd *cmd) { }
+#endif /* !CONFIG_SCSI_DMA */
static inline unsigned scsi_sg_count(struct scsi_cmnd *cmd)
{
diff --git a/include/scsi/scsi_host.h b/include/scsi/scsi_host.h
index a8b7bf879ced..12f454cb6f61 100644
--- a/include/scsi/scsi_host.h
+++ b/include/scsi/scsi_host.h
@@ -52,21 +52,6 @@ struct scsi_host_template {
const char *name;
/*
- * Used to initialize old-style drivers. For new-style drivers
- * just perform all work in your module initialization function.
- *
- * Status: OBSOLETE
- */
- int (* detect)(struct scsi_host_template *);
-
- /*
- * Used as unload callback for hosts with old-style drivers.
- *
- * Status: OBSOLETE
- */
- int (* release)(struct Scsi_Host *);
-
- /*
* The info function will return whatever useful information the
* developer sees fit. If not provided, then the name field will
* be used instead.
@@ -452,6 +437,9 @@ struct scsi_host_template {
/* True if the controller does not support WRITE SAME */
unsigned no_write_same:1;
+ /* True if the low-level driver supports blk-mq only */
+ unsigned force_blk_mq:1;
+
/*
* Countdown for host blocking with no commands outstanding.
*/
@@ -477,13 +465,10 @@ struct scsi_host_template {
struct device_attribute **sdev_attrs;
/*
- * List of hosts per template.
- *
- * This is only for use by scsi_module.c for legacy templates.
- * For these access to it is synchronized implicitly by
- * module_init/module_exit.
+ * Pointer to the SCSI device attribute groups for this host,
+ * NULL terminated.
*/
- struct list_head legacy_hosts;
+ const struct attribute_group **sdev_groups;
/*
* Vendor Identifier associated with the host
@@ -706,15 +691,6 @@ struct Scsi_Host {
struct device shost_gendev, shost_dev;
/*
- * List of hosts per template.
- *
- * This is only for use by scsi_module.c for legacy templates.
- * For these access to it is synchronized implicitly by
- * module_init/module_exit.
- */
- struct list_head sht_legacy_list;
-
- /*
* Points to the transport data (if any) which is allocated
* separately
*/
@@ -914,9 +890,6 @@ static inline unsigned char scsi_host_get_guard(struct Scsi_Host *shost)
return shost->prot_guard_type;
}
-/* legacy interfaces */
-extern struct Scsi_Host *scsi_register(struct scsi_host_template *, int);
-extern void scsi_unregister(struct Scsi_Host *);
extern int scsi_host_set_state(struct Scsi_Host *, enum scsi_host_state);
#endif /* _SCSI_SCSI_HOST_H */
diff --git a/include/scsi/scsi_proto.h b/include/scsi/scsi_proto.h
index 1df8efb0ee01..c36860111932 100644
--- a/include/scsi/scsi_proto.h
+++ b/include/scsi/scsi_proto.h
@@ -236,6 +236,7 @@ struct scsi_varlen_cdb_hdr {
#define UNIT_ATTENTION 0x06
#define DATA_PROTECT 0x07
#define BLANK_CHECK 0x08
+#define VENDOR_SPECIFIC 0x09
#define COPY_ABORTED 0x0a
#define ABORTED_COMMAND 0x0b
#define VOLUME_OVERFLOW 0x0d
diff --git a/include/scsi/scsi_transport_fc.h b/include/scsi/scsi_transport_fc.h
index 8cf30215c177..15da45dc2a5d 100644
--- a/include/scsi/scsi_transport_fc.h
+++ b/include/scsi/scsi_transport_fc.h
@@ -139,8 +139,8 @@ enum fc_vport_state {
#define FC_PORTSPEED_50GBIT 0x200
#define FC_PORTSPEED_100GBIT 0x400
#define FC_PORTSPEED_25GBIT 0x800
-#define FC_PORTSPEED_64BIT 0x1000
-#define FC_PORTSPEED_128BIT 0x2000
+#define FC_PORTSPEED_64GBIT 0x1000
+#define FC_PORTSPEED_128GBIT 0x2000
#define FC_PORTSPEED_NOT_NEGOTIATED (1 << 15) /* Speed not established */
/*
diff --git a/include/scsi/scsi_transport_sas.h b/include/scsi/scsi_transport_sas.h
index 62895b405933..05ec927a3c72 100644
--- a/include/scsi/scsi_transport_sas.h
+++ b/include/scsi/scsi_transport_sas.h
@@ -156,6 +156,7 @@ struct sas_port {
struct mutex phy_list_mutex;
struct list_head phy_list;
+ struct list_head del_list; /* libsas only */
};
#define dev_to_sas_port(d) \
diff --git a/include/scsi/srp.h b/include/scsi/srp.h
index 5be834de491a..c16a3c9a4d9b 100644
--- a/include/scsi/srp.h
+++ b/include/scsi/srp.h
@@ -129,6 +129,23 @@ struct srp_login_req {
u8 target_port_id[16];
};
+/**
+ * struct srp_login_req_rdma - RDMA/CM login parameters.
+ *
+ * RDMA/CM over InfiniBand can only carry 92 - 36 = 56 bytes of private
+ * data. The %srp_login_req_rdma structure contains the same information as
+ * %srp_login_req but with the reserved data removed.
+ */
+struct srp_login_req_rdma {
+ u64 tag;
+ __be16 req_buf_fmt;
+ u8 req_flags;
+ u8 opcode;
+ __be32 req_it_iu_len;
+ u8 initiator_port_id[16];
+ u8 target_port_id[16];
+};
+
/*
* The SRP spec defines the size of the LOGIN_RSP structure to be 52
* bytes, so it needs to be packed to avoid having it padded to 56
diff --git a/include/soc/arc/mcip.h b/include/soc/arc/mcip.h
index c2d1b15da136..a91f25151a5b 100644
--- a/include/soc/arc/mcip.h
+++ b/include/soc/arc/mcip.h
@@ -15,6 +15,7 @@
#define ARC_REG_MCIP_BCR 0x0d0
#define ARC_REG_MCIP_IDU_BCR 0x0D5
+#define ARC_REG_GFRC_BUILD 0x0D6
#define ARC_REG_MCIP_CMD 0x600
#define ARC_REG_MCIP_WDATA 0x601
#define ARC_REG_MCIP_READBACK 0x602
@@ -36,10 +37,14 @@ struct mcip_cmd {
#define CMD_SEMA_RELEASE 0x12
#define CMD_DEBUG_SET_MASK 0x34
+#define CMD_DEBUG_READ_MASK 0x35
#define CMD_DEBUG_SET_SELECT 0x36
+#define CMD_DEBUG_READ_SELECT 0x37
#define CMD_GFRC_READ_LO 0x42
#define CMD_GFRC_READ_HI 0x43
+#define CMD_GFRC_SET_CORE 0x47
+#define CMD_GFRC_READ_CORE 0x48
#define CMD_IDU_ENABLE 0x71
#define CMD_IDU_DISABLE 0x72
diff --git a/include/soc/bcm2835/raspberrypi-firmware.h b/include/soc/bcm2835/raspberrypi-firmware.h
index cb979ad90401..50df5b28d2c9 100644
--- a/include/soc/bcm2835/raspberrypi-firmware.h
+++ b/include/soc/bcm2835/raspberrypi-firmware.h
@@ -63,6 +63,7 @@ enum rpi_firmware_property_tag {
RPI_FIRMWARE_GET_MIN_VOLTAGE = 0x00030008,
RPI_FIRMWARE_GET_TURBO = 0x00030009,
RPI_FIRMWARE_GET_MAX_TEMPERATURE = 0x0003000a,
+ RPI_FIRMWARE_GET_STC = 0x0003000b,
RPI_FIRMWARE_ALLOCATE_MEMORY = 0x0003000c,
RPI_FIRMWARE_LOCK_MEMORY = 0x0003000d,
RPI_FIRMWARE_UNLOCK_MEMORY = 0x0003000e,
@@ -72,12 +73,22 @@ enum rpi_firmware_property_tag {
RPI_FIRMWARE_SET_ENABLE_QPU = 0x00030012,
RPI_FIRMWARE_GET_DISPMANX_RESOURCE_MEM_HANDLE = 0x00030014,
RPI_FIRMWARE_GET_EDID_BLOCK = 0x00030020,
+ RPI_FIRMWARE_GET_CUSTOMER_OTP = 0x00030021,
RPI_FIRMWARE_GET_DOMAIN_STATE = 0x00030030,
RPI_FIRMWARE_SET_CLOCK_STATE = 0x00038001,
RPI_FIRMWARE_SET_CLOCK_RATE = 0x00038002,
RPI_FIRMWARE_SET_VOLTAGE = 0x00038003,
RPI_FIRMWARE_SET_TURBO = 0x00038009,
+ RPI_FIRMWARE_SET_CUSTOMER_OTP = 0x00038021,
RPI_FIRMWARE_SET_DOMAIN_STATE = 0x00038030,
+ RPI_FIRMWARE_GET_GPIO_STATE = 0x00030041,
+ RPI_FIRMWARE_SET_GPIO_STATE = 0x00038041,
+ RPI_FIRMWARE_SET_SDHOST_CLOCK = 0x00038042,
+ RPI_FIRMWARE_GET_GPIO_CONFIG = 0x00030043,
+ RPI_FIRMWARE_SET_GPIO_CONFIG = 0x00038043,
+ RPI_FIRMWARE_GET_PERIPH_REG = 0x00030045,
+ RPI_FIRMWARE_SET_PERIPH_REG = 0x00038045,
+
/* Dispmanx TAGS */
RPI_FIRMWARE_FRAMEBUFFER_ALLOCATE = 0x00040001,
@@ -91,6 +102,8 @@ enum rpi_firmware_property_tag {
RPI_FIRMWARE_FRAMEBUFFER_GET_VIRTUAL_OFFSET = 0x00040009,
RPI_FIRMWARE_FRAMEBUFFER_GET_OVERSCAN = 0x0004000a,
RPI_FIRMWARE_FRAMEBUFFER_GET_PALETTE = 0x0004000b,
+ RPI_FIRMWARE_FRAMEBUFFER_GET_TOUCHBUF = 0x0004000f,
+ RPI_FIRMWARE_FRAMEBUFFER_GET_GPIOVIRTBUF = 0x00040010,
RPI_FIRMWARE_FRAMEBUFFER_RELEASE = 0x00048001,
RPI_FIRMWARE_FRAMEBUFFER_TEST_PHYSICAL_WIDTH_HEIGHT = 0x00044003,
RPI_FIRMWARE_FRAMEBUFFER_TEST_VIRTUAL_WIDTH_HEIGHT = 0x00044004,
@@ -100,6 +113,7 @@ enum rpi_firmware_property_tag {
RPI_FIRMWARE_FRAMEBUFFER_TEST_VIRTUAL_OFFSET = 0x00044009,
RPI_FIRMWARE_FRAMEBUFFER_TEST_OVERSCAN = 0x0004400a,
RPI_FIRMWARE_FRAMEBUFFER_TEST_PALETTE = 0x0004400b,
+ RPI_FIRMWARE_FRAMEBUFFER_TEST_VSYNC = 0x0004400e,
RPI_FIRMWARE_FRAMEBUFFER_SET_PHYSICAL_WIDTH_HEIGHT = 0x00048003,
RPI_FIRMWARE_FRAMEBUFFER_SET_VIRTUAL_WIDTH_HEIGHT = 0x00048004,
RPI_FIRMWARE_FRAMEBUFFER_SET_DEPTH = 0x00048005,
@@ -108,6 +122,10 @@ enum rpi_firmware_property_tag {
RPI_FIRMWARE_FRAMEBUFFER_SET_VIRTUAL_OFFSET = 0x00048009,
RPI_FIRMWARE_FRAMEBUFFER_SET_OVERSCAN = 0x0004800a,
RPI_FIRMWARE_FRAMEBUFFER_SET_PALETTE = 0x0004800b,
+ RPI_FIRMWARE_FRAMEBUFFER_SET_TOUCHBUF = 0x0004801f,
+ RPI_FIRMWARE_FRAMEBUFFER_SET_GPIOVIRTBUF = 0x00048020,
+ RPI_FIRMWARE_FRAMEBUFFER_SET_VSYNC = 0x0004800e,
+ RPI_FIRMWARE_FRAMEBUFFER_SET_BACKLIGHT = 0x0004800f,
RPI_FIRMWARE_VCHIQ_INIT = 0x00048010,
diff --git a/include/soc/tegra/bpmp.h b/include/soc/tegra/bpmp.h
index aeae4466dd25..e69e4c4d80ae 100644
--- a/include/soc/tegra/bpmp.h
+++ b/include/soc/tegra/bpmp.h
@@ -75,8 +75,8 @@ struct tegra_bpmp {
struct mbox_chan *channel;
} mbox;
- struct tegra_bpmp_channel *channels;
- unsigned int num_channels;
+ spinlock_t atomic_tx_lock;
+ struct tegra_bpmp_channel *tx_channel, *rx_channel, *threaded_channels;
struct {
unsigned long *allocated;
diff --git a/include/soc/tegra/mc.h b/include/soc/tegra/mc.h
index 44202ff897fd..233bae954970 100644
--- a/include/soc/tegra/mc.h
+++ b/include/soc/tegra/mc.h
@@ -51,6 +51,12 @@ struct tegra_smmu_swgroup {
unsigned int reg;
};
+struct tegra_smmu_group_soc {
+ const char *name;
+ const unsigned int *swgroups;
+ unsigned int num_swgroups;
+};
+
struct tegra_smmu_soc {
const struct tegra_mc_client *clients;
unsigned int num_clients;
@@ -58,6 +64,9 @@ struct tegra_smmu_soc {
const struct tegra_smmu_swgroup *swgroups;
unsigned int num_swgroups;
+ const struct tegra_smmu_group_soc *groups;
+ unsigned int num_groups;
+
bool supports_round_robin_arbitration;
bool supports_request_limit;
diff --git a/include/soc/tegra/pmc.h b/include/soc/tegra/pmc.h
index 1c3982bc558f..c32bf91c23e6 100644
--- a/include/soc/tegra/pmc.h
+++ b/include/soc/tegra/pmc.h
@@ -83,6 +83,7 @@ enum tegra_io_pad {
TEGRA_IO_PAD_BB,
TEGRA_IO_PAD_CAM,
TEGRA_IO_PAD_COMP,
+ TEGRA_IO_PAD_CONN,
TEGRA_IO_PAD_CSIA,
TEGRA_IO_PAD_CSIB,
TEGRA_IO_PAD_CSIC,
@@ -92,31 +93,42 @@ enum tegra_io_pad {
TEGRA_IO_PAD_DBG,
TEGRA_IO_PAD_DEBUG_NONAO,
TEGRA_IO_PAD_DMIC,
+ TEGRA_IO_PAD_DMIC_HV,
TEGRA_IO_PAD_DP,
TEGRA_IO_PAD_DSI,
TEGRA_IO_PAD_DSIB,
TEGRA_IO_PAD_DSIC,
TEGRA_IO_PAD_DSID,
+ TEGRA_IO_PAD_EDP,
TEGRA_IO_PAD_EMMC,
TEGRA_IO_PAD_EMMC2,
TEGRA_IO_PAD_GPIO,
TEGRA_IO_PAD_HDMI,
+ TEGRA_IO_PAD_HDMI_DP0,
+ TEGRA_IO_PAD_HDMI_DP1,
TEGRA_IO_PAD_HSIC,
TEGRA_IO_PAD_HV,
TEGRA_IO_PAD_LVDS,
TEGRA_IO_PAD_MIPI_BIAS,
TEGRA_IO_PAD_NAND,
TEGRA_IO_PAD_PEX_BIAS,
+ TEGRA_IO_PAD_PEX_CLK_BIAS,
TEGRA_IO_PAD_PEX_CLK1,
TEGRA_IO_PAD_PEX_CLK2,
+ TEGRA_IO_PAD_PEX_CLK3,
TEGRA_IO_PAD_PEX_CNTRL,
TEGRA_IO_PAD_SDMMC1,
+ TEGRA_IO_PAD_SDMMC1_HV,
+ TEGRA_IO_PAD_SDMMC2,
+ TEGRA_IO_PAD_SDMMC2_HV,
TEGRA_IO_PAD_SDMMC3,
+ TEGRA_IO_PAD_SDMMC3_HV,
TEGRA_IO_PAD_SDMMC4,
TEGRA_IO_PAD_SPI,
TEGRA_IO_PAD_SPI_HV,
TEGRA_IO_PAD_SYS_DDC,
TEGRA_IO_PAD_UART,
+ TEGRA_IO_PAD_UFS,
TEGRA_IO_PAD_USB0,
TEGRA_IO_PAD_USB1,
TEGRA_IO_PAD_USB2,
diff --git a/include/sound/ac97/regs.h b/include/sound/ac97/regs.h
index 4bb86d379bd5..9a4fa0c3264a 100644
--- a/include/sound/ac97/regs.h
+++ b/include/sound/ac97/regs.h
@@ -31,7 +31,7 @@
#define AC97_HEADPHONE 0x04 /* Headphone Volume (optional) */
#define AC97_MASTER_MONO 0x06 /* Master Volume Mono (optional) */
#define AC97_MASTER_TONE 0x08 /* Master Tone (Bass & Treble) (optional) */
-#define AC97_PC_BEEP 0x0a /* PC Beep Volume (optinal) */
+#define AC97_PC_BEEP 0x0a /* PC Beep Volume (optional) */
#define AC97_PHONE 0x0c /* Phone Volume (optional) */
#define AC97_MIC 0x0e /* MIC Volume */
#define AC97_LINE 0x10 /* Line In Volume */
diff --git a/include/sound/da7219.h b/include/sound/da7219.h
index 409ef1397fd3..1bfcb16f2d10 100644
--- a/include/sound/da7219.h
+++ b/include/sound/da7219.h
@@ -36,6 +36,8 @@ struct da7219_aad_pdata;
struct da7219_pdata {
bool wakeup_source;
+ const char *dai_clks_name;
+
/* Mic */
enum da7219_micbias_voltage micbias_lvl;
enum da7219_mic_amp_in_sel mic_amp_in_sel;
diff --git a/include/sound/dmaengine_pcm.h b/include/sound/dmaengine_pcm.h
index 67be2445941a..e3481eebdd98 100644
--- a/include/sound/dmaengine_pcm.h
+++ b/include/sound/dmaengine_pcm.h
@@ -118,6 +118,8 @@ void snd_dmaengine_pcm_set_config_from_dai_data(
* PCM substream. Will be called from the PCM drivers hwparams callback.
* @compat_request_channel: Callback to request a DMA channel for platforms
* which do not use devicetree.
+ * @process: Callback used to apply processing on samples transferred from/to
+ * user space.
* @compat_filter_fn: Will be used as the filter function when requesting a
* channel for platforms which do not use devicetree. The filter parameter
* will be the DAI's DMA data.
@@ -140,6 +142,9 @@ struct snd_dmaengine_pcm_config {
struct dma_chan *(*compat_request_channel)(
struct snd_soc_pcm_runtime *rtd,
struct snd_pcm_substream *substream);
+ int (*process)(struct snd_pcm_substream *substream,
+ int channel, unsigned long hwoff,
+ void *buf, unsigned long bytes);
dma_filter_fn compat_filter_fn;
struct device *dma_dev;
const char *chan_names[SNDRV_PCM_STREAM_LAST + 1];
@@ -161,4 +166,6 @@ int snd_dmaengine_pcm_prepare_slave_config(struct snd_pcm_substream *substream,
struct snd_pcm_hw_params *params,
struct dma_slave_config *slave_config);
+#define SND_DMAENGINE_PCM_DRV_NAME "snd_dmaengine_pcm"
+
#endif
diff --git a/include/sound/emu10k1.h b/include/sound/emu10k1.h
index 4f42affe777c..5ebcc51c0a6a 100644
--- a/include/sound/emu10k1.h
+++ b/include/sound/emu10k1.h
@@ -1710,6 +1710,7 @@ struct snd_emu10k1 {
unsigned int ecard_ctrl; /* ecard control bits */
unsigned int address_mode; /* address mode */
unsigned long dma_mask; /* PCI DMA mask */
+ bool iommu_workaround; /* IOMMU workaround needed */
unsigned int delay_pcm_irq; /* in samples */
int max_cache_pages; /* max memory size / PAGE_SIZE */
struct snd_dma_buffer silent_page; /* silent page */
@@ -1718,7 +1719,6 @@ struct snd_emu10k1 {
struct snd_dma_buffer p16v_buffer;
struct snd_util_memhdr *memhdr; /* page allocation list */
- struct snd_emu10k1_memblk *reserved_page; /* reserved page */
struct list_head mapped_link_head;
struct list_head mapped_order_link_head;
@@ -1878,6 +1878,8 @@ void snd_p16v_resume(struct snd_emu10k1 *emu);
/* memory allocation */
struct snd_util_memblk *snd_emu10k1_alloc_pages(struct snd_emu10k1 *emu, struct snd_pcm_substream *substream);
int snd_emu10k1_free_pages(struct snd_emu10k1 *emu, struct snd_util_memblk *blk);
+int snd_emu10k1_alloc_pages_maybe_wider(struct snd_emu10k1 *emu, size_t size,
+ struct snd_dma_buffer *dmab);
struct snd_util_memblk *snd_emu10k1_synth_alloc(struct snd_emu10k1 *emu, unsigned int size);
int snd_emu10k1_synth_free(struct snd_emu10k1 *emu, struct snd_util_memblk *blk);
int snd_emu10k1_synth_bzero(struct snd_emu10k1 *emu, struct snd_util_memblk *blk, int offset, int size);
diff --git a/include/sound/hdaudio.h b/include/sound/hdaudio.h
index 68169e3749de..06536e01ed94 100644
--- a/include/sound/hdaudio.h
+++ b/include/sound/hdaudio.h
@@ -146,6 +146,8 @@ int snd_hdac_codec_write(struct hdac_device *hdac, hda_nid_t nid,
int flags, unsigned int verb, unsigned int parm);
bool snd_hdac_check_power_state(struct hdac_device *hdac,
hda_nid_t nid, unsigned int target_state);
+unsigned int snd_hdac_sync_power_state(struct hdac_device *hdac,
+ hda_nid_t nid, unsigned int target_state);
/**
* snd_hdac_read_parm - read a codec parameter
* @codec: the codec object
@@ -227,9 +229,6 @@ struct hdac_io_ops {
#define HDA_UNSOL_QUEUE_SIZE 64
#define HDA_MAX_CODECS 8 /* limit by controller side */
-/* HD Audio class code */
-#define PCI_CLASS_MULTIMEDIA_HD_AUDIO 0x0403
-
/*
* CORB/RIRB
*
diff --git a/include/sound/hdaudio_ext.h b/include/sound/hdaudio_ext.h
index ca00130cb028..9c14e21dda85 100644
--- a/include/sound/hdaudio_ext.h
+++ b/include/sound/hdaudio_ext.h
@@ -193,7 +193,7 @@ struct hda_dai_map {
* @pvt_data - private data, for asoc contains asoc codec object
*/
struct hdac_ext_device {
- struct hdac_device hdac;
+ struct hdac_device hdev;
struct hdac_ext_bus *ebus;
/* soc-dai to nid map */
@@ -213,7 +213,7 @@ struct hdac_ext_dma_params {
u8 stream_tag;
};
#define to_ehdac_device(dev) (container_of((dev), \
- struct hdac_ext_device, hdac))
+ struct hdac_ext_device, hdev))
/*
* HD-audio codec base driver
*/
diff --git a/include/sound/hwdep.h b/include/sound/hwdep.h
index ab9fcb2f97f0..afeca593188a 100644
--- a/include/sound/hwdep.h
+++ b/include/sound/hwdep.h
@@ -37,7 +37,7 @@ struct snd_hwdep_ops {
long count, loff_t *offset);
int (*open)(struct snd_hwdep *hw, struct file * file);
int (*release)(struct snd_hwdep *hw, struct file * file);
- unsigned int (*poll)(struct snd_hwdep *hw, struct file *file,
+ __poll_t (*poll)(struct snd_hwdep *hw, struct file *file,
poll_table *wait);
int (*ioctl)(struct snd_hwdep *hw, struct file *file,
unsigned int cmd, unsigned long arg);
diff --git a/include/sound/info.h b/include/sound/info.h
index 67390ee846aa..becdf66d2825 100644
--- a/include/sound/info.h
+++ b/include/sound/info.h
@@ -62,7 +62,7 @@ struct snd_info_entry_ops {
loff_t (*llseek)(struct snd_info_entry *entry,
void *file_private_data, struct file *file,
loff_t offset, int orig);
- unsigned int (*poll)(struct snd_info_entry *entry,
+ __poll_t (*poll)(struct snd_info_entry *entry,
void *file_private_data, struct file *file,
poll_table *wait);
int (*ioctl)(struct snd_info_entry *entry, void *file_private_data,
diff --git a/include/sound/pcm.h b/include/sound/pcm.h
index 24febf9e177c..e054c583d3b3 100644
--- a/include/sound/pcm.h
+++ b/include/sound/pcm.h
@@ -169,6 +169,10 @@ struct snd_pcm_ops {
#define SNDRV_PCM_FMTBIT_IMA_ADPCM _SNDRV_PCM_FMTBIT(IMA_ADPCM)
#define SNDRV_PCM_FMTBIT_MPEG _SNDRV_PCM_FMTBIT(MPEG)
#define SNDRV_PCM_FMTBIT_GSM _SNDRV_PCM_FMTBIT(GSM)
+#define SNDRV_PCM_FMTBIT_S20_LE _SNDRV_PCM_FMTBIT(S20_LE)
+#define SNDRV_PCM_FMTBIT_U20_LE _SNDRV_PCM_FMTBIT(U20_LE)
+#define SNDRV_PCM_FMTBIT_S20_BE _SNDRV_PCM_FMTBIT(S20_BE)
+#define SNDRV_PCM_FMTBIT_U20_BE _SNDRV_PCM_FMTBIT(U20_BE)
#define SNDRV_PCM_FMTBIT_SPECIAL _SNDRV_PCM_FMTBIT(SPECIAL)
#define SNDRV_PCM_FMTBIT_S24_3LE _SNDRV_PCM_FMTBIT(S24_3LE)
#define SNDRV_PCM_FMTBIT_U24_3LE _SNDRV_PCM_FMTBIT(U24_3LE)
@@ -202,6 +206,8 @@ struct snd_pcm_ops {
#define SNDRV_PCM_FMTBIT_FLOAT SNDRV_PCM_FMTBIT_FLOAT_LE
#define SNDRV_PCM_FMTBIT_FLOAT64 SNDRV_PCM_FMTBIT_FLOAT64_LE
#define SNDRV_PCM_FMTBIT_IEC958_SUBFRAME SNDRV_PCM_FMTBIT_IEC958_SUBFRAME_LE
+#define SNDRV_PCM_FMTBIT_S20 SNDRV_PCM_FMTBIT_S20_LE
+#define SNDRV_PCM_FMTBIT_U20 SNDRV_PCM_FMTBIT_U20_LE
#endif
#ifdef SNDRV_BIG_ENDIAN
#define SNDRV_PCM_FMTBIT_S16 SNDRV_PCM_FMTBIT_S16_BE
@@ -213,6 +219,8 @@ struct snd_pcm_ops {
#define SNDRV_PCM_FMTBIT_FLOAT SNDRV_PCM_FMTBIT_FLOAT_BE
#define SNDRV_PCM_FMTBIT_FLOAT64 SNDRV_PCM_FMTBIT_FLOAT64_BE
#define SNDRV_PCM_FMTBIT_IEC958_SUBFRAME SNDRV_PCM_FMTBIT_IEC958_SUBFRAME_BE
+#define SNDRV_PCM_FMTBIT_S20 SNDRV_PCM_FMTBIT_S20_BE
+#define SNDRV_PCM_FMTBIT_U20 SNDRV_PCM_FMTBIT_U20_BE
#endif
struct snd_pcm_file {
diff --git a/include/sound/pcm_oss.h b/include/sound/pcm_oss.h
index 760c969d885d..12bbf8c81112 100644
--- a/include/sound/pcm_oss.h
+++ b/include/sound/pcm_oss.h
@@ -57,6 +57,7 @@ struct snd_pcm_oss_runtime {
char *buffer; /* vmallocated period */
size_t buffer_used; /* used length from period buffer */
struct mutex params_lock;
+ atomic_t rw_ref; /* concurrent read/write accesses */
#ifdef CONFIG_SND_PCM_OSS_PLUGINS
struct snd_pcm_plugin *plugin_first;
struct snd_pcm_plugin *plugin_last;
diff --git a/include/sound/rt5514.h b/include/sound/rt5514.h
index ef18494769ee..64d027dbaaca 100644
--- a/include/sound/rt5514.h
+++ b/include/sound/rt5514.h
@@ -14,6 +14,8 @@
struct rt5514_platform_data {
unsigned int dmic_init_delay;
+ const char *dsp_calib_clk_name;
+ unsigned int dsp_calib_clk_rate;
};
#endif
diff --git a/include/sound/rt5645.h b/include/sound/rt5645.h
index d0c33a9972b9..f218c742f08e 100644
--- a/include/sound/rt5645.h
+++ b/include/sound/rt5645.h
@@ -25,6 +25,9 @@ struct rt5645_platform_data {
bool level_trigger_irq;
/* Invert JD1_1 status polarity */
bool inv_jd1_1;
+
+ /* Value to asign to snd_soc_card.long_name */
+ const char *long_name;
};
#endif
diff --git a/include/sound/rt5651.h b/include/sound/rt5651.h
deleted file mode 100644
index 18b79a761f10..000000000000
--- a/include/sound/rt5651.h
+++ /dev/null
@@ -1,29 +0,0 @@
-/*
- * linux/sound/rt286.h -- Platform data for RT286
- *
- * Copyright 2013 Realtek Microelectronics
- *
- * This program is free software; you can redistribute it and/or modify
- * it under the terms of the GNU General Public License version 2 as
- * published by the Free Software Foundation.
- */
-
-#ifndef __LINUX_SND_RT5651_H
-#define __LINUX_SND_RT5651_H
-
-enum rt5651_jd_src {
- RT5651_JD_NULL,
- RT5651_JD1_1,
- RT5651_JD1_2,
- RT5651_JD2,
-};
-
-struct rt5651_platform_data {
- /* IN2 can optionally be differential */
- bool in2_diff;
-
- bool dmic_en;
- enum rt5651_jd_src jd_src;
-};
-
-#endif
diff --git a/include/sound/rt5659.h b/include/sound/rt5659.h
index 656c4d58948d..9012e2b25360 100644
--- a/include/sound/rt5659.h
+++ b/include/sound/rt5659.h
@@ -30,6 +30,7 @@ enum rt5659_dmic2_data_pin {
enum rt5659_jd_src {
RT5659_JD_NULL,
RT5659_JD3,
+ RT5659_JD_HDA_HEADER,
};
struct rt5659_platform_data {
diff --git a/include/sound/soc-acpi-intel-match.h b/include/sound/soc-acpi-intel-match.h
index 1a9191cd4bb3..9da6388c20a1 100644
--- a/include/sound/soc-acpi-intel-match.h
+++ b/include/sound/soc-acpi-intel-match.h
@@ -16,6 +16,7 @@
#ifndef __LINUX_SND_SOC_ACPI_INTEL_MATCH_H
#define __LINUX_SND_SOC_ACPI_INTEL_MATCH_H
+#include <linux/module.h>
#include <linux/stddef.h>
#include <linux/acpi.h>
diff --git a/include/sound/soc-acpi.h b/include/sound/soc-acpi.h
index a7d8d335b043..082224275f52 100644
--- a/include/sound/soc-acpi.h
+++ b/include/sound/soc-acpi.h
@@ -17,6 +17,7 @@
#include <linux/stddef.h>
#include <linux/acpi.h>
+#include <linux/mod_devicetable.h>
struct snd_soc_acpi_package_context {
char *name; /* package name */
@@ -26,17 +27,13 @@ struct snd_soc_acpi_package_context {
bool data_valid;
};
+/* codec name is used in DAIs is i2c-<HID>:00 with HID being 8 chars */
+#define SND_ACPI_I2C_ID_LEN (4 + ACPI_ID_LEN + 3 + 1)
+
#if IS_ENABLED(CONFIG_ACPI)
-/* translation fron HID to I2C name, needed for DAI codec_name */
-const char *snd_soc_acpi_find_name_from_hid(const u8 hid[ACPI_ID_LEN]);
bool snd_soc_acpi_find_package_from_hid(const u8 hid[ACPI_ID_LEN],
struct snd_soc_acpi_package_context *ctx);
#else
-static inline const char *
-snd_soc_acpi_find_name_from_hid(const u8 hid[ACPI_ID_LEN])
-{
- return NULL;
-}
static inline bool
snd_soc_acpi_find_package_from_hid(const u8 hid[ACPI_ID_LEN],
struct snd_soc_acpi_package_context *ctx)
@@ -49,9 +46,6 @@ snd_soc_acpi_find_package_from_hid(const u8 hid[ACPI_ID_LEN],
struct snd_soc_acpi_mach *
snd_soc_acpi_find_machine(struct snd_soc_acpi_mach *machines);
-/* acpi check hid */
-bool snd_soc_acpi_check_hid(const u8 hid[ACPI_ID_LEN]);
-
/**
* snd_soc_acpi_mach: ACPI-based machine descriptor. Most of the fields are
* related to the hardware, except for the firmware and topology file names.
diff --git a/include/sound/soc-dai.h b/include/sound/soc-dai.h
index 58acd00cae19..8ad11669e4d8 100644
--- a/include/sound/soc-dai.h
+++ b/include/sound/soc-dai.h
@@ -102,6 +102,8 @@ struct snd_compr_stream;
SNDRV_PCM_FMTBIT_S16_BE |\
SNDRV_PCM_FMTBIT_S20_3LE |\
SNDRV_PCM_FMTBIT_S20_3BE |\
+ SNDRV_PCM_FMTBIT_S20_LE |\
+ SNDRV_PCM_FMTBIT_S20_BE |\
SNDRV_PCM_FMTBIT_S24_3LE |\
SNDRV_PCM_FMTBIT_S24_3BE |\
SNDRV_PCM_FMTBIT_S32_LE |\
@@ -294,9 +296,6 @@ struct snd_soc_dai {
/* DAI runtime info */
unsigned int capture_active:1; /* stream is in use */
unsigned int playback_active:1; /* stream is in use */
- unsigned int symmetric_rates:1;
- unsigned int symmetric_channels:1;
- unsigned int symmetric_samplebits:1;
unsigned int probed:1;
unsigned int active;
diff --git a/include/sound/soc-dapm.h b/include/sound/soc-dapm.h
index 344b96c206a3..a6ce2de4e20a 100644
--- a/include/sound/soc-dapm.h
+++ b/include/sound/soc-dapm.h
@@ -269,6 +269,13 @@ struct device;
.reg = SND_SOC_NOPM, .shift = wdelay, .event = dapm_regulator_event, \
.event_flags = SND_SOC_DAPM_PRE_PMU | SND_SOC_DAPM_POST_PMD, \
.on_val = wflags}
+#define SND_SOC_DAPM_PINCTRL(wname, active, sleep) \
+{ .id = snd_soc_dapm_pinctrl, .name = wname, \
+ .priv = (&(struct snd_soc_dapm_pinctrl_priv) \
+ { .active_state = active, .sleep_state = sleep,}), \
+ .reg = SND_SOC_NOPM, .event = dapm_pinctrl_event, \
+ .event_flags = SND_SOC_DAPM_PRE_PMU | SND_SOC_DAPM_POST_PMD }
+
/* dapm kcontrol types */
@@ -374,6 +381,8 @@ int dapm_regulator_event(struct snd_soc_dapm_widget *w,
struct snd_kcontrol *kcontrol, int event);
int dapm_clock_event(struct snd_soc_dapm_widget *w,
struct snd_kcontrol *kcontrol, int event);
+int dapm_pinctrl_event(struct snd_soc_dapm_widget *w,
+ struct snd_kcontrol *kcontrol, int event);
/* dapm controls */
int snd_soc_dapm_put_volsw(struct snd_kcontrol *kcontrol,
@@ -500,6 +509,7 @@ enum snd_soc_dapm_type {
snd_soc_dapm_pre, /* machine specific pre widget - exec first */
snd_soc_dapm_post, /* machine specific post widget - exec last */
snd_soc_dapm_supply, /* power/clock supply */
+ snd_soc_dapm_pinctrl, /* pinctrl */
snd_soc_dapm_regulator_supply, /* external regulator */
snd_soc_dapm_clock_supply, /* external clock */
snd_soc_dapm_aif_in, /* audio interface input */
@@ -581,6 +591,7 @@ struct snd_soc_dapm_widget {
void *priv; /* widget specific data */
struct regulator *regulator; /* attached regulator */
+ struct pinctrl *pinctrl; /* attached pinctrl */
const struct snd_soc_pcm_stream *params; /* params for dai links */
unsigned int num_params; /* number of params for dai links */
unsigned int params_select; /* currently selected param for dai link */
@@ -683,6 +694,11 @@ struct snd_soc_dapm_stats {
int neighbour_checks;
};
+struct snd_soc_dapm_pinctrl_priv {
+ const char *active_state;
+ const char *sleep_state;
+};
+
/**
* snd_soc_dapm_init_bias_level() - Initialize DAPM bias level
* @dapm: The DAPM context to initialize
diff --git a/include/sound/soc.h b/include/sound/soc.h
index 1a7323238c49..ad266d7e9553 100644
--- a/include/sound/soc.h
+++ b/include/sound/soc.h
@@ -494,6 +494,8 @@ int soc_new_pcm(struct snd_soc_pcm_runtime *rtd, int num);
int snd_soc_new_compress(struct snd_soc_pcm_runtime *rtd, int num);
#endif
+void snd_soc_disconnect_sync(struct device *dev);
+
struct snd_pcm_substream *snd_soc_get_dai_substream(struct snd_soc_card *card,
const char *dai_link, int stream);
struct snd_soc_pcm_runtime *snd_soc_get_pcm_runtime(struct snd_soc_card *card,
@@ -584,10 +586,17 @@ int snd_soc_test_bits(struct snd_soc_codec *codec, unsigned int reg,
unsigned int mask, unsigned int value);
#ifdef CONFIG_SND_SOC_AC97_BUS
-struct snd_ac97 *snd_soc_alloc_ac97_codec(struct snd_soc_codec *codec);
-struct snd_ac97 *snd_soc_new_ac97_codec(struct snd_soc_codec *codec,
+#define snd_soc_alloc_ac97_codec(codec) \
+ snd_soc_alloc_ac97_component(&codec->component)
+#define snd_soc_new_ac97_codec(codec, id, id_mask) \
+ snd_soc_new_ac97_component(&codec->component, id, id_mask)
+#define snd_soc_free_ac97_codec(ac97) \
+ snd_soc_free_ac97_component(ac97)
+
+struct snd_ac97 *snd_soc_alloc_ac97_component(struct snd_soc_component *component);
+struct snd_ac97 *snd_soc_new_ac97_component(struct snd_soc_component *component,
unsigned int id, unsigned int id_mask);
-void snd_soc_free_ac97_codec(struct snd_ac97 *ac97);
+void snd_soc_free_ac97_component(struct snd_ac97 *ac97);
int snd_soc_set_ac97_ops(struct snd_ac97_bus_ops *ops);
int snd_soc_set_ac97_ops_of_reset(struct snd_ac97_bus_ops *ops,
@@ -802,6 +811,9 @@ struct snd_soc_component_driver {
int (*suspend)(struct snd_soc_component *);
int (*resume)(struct snd_soc_component *);
+ unsigned int (*read)(struct snd_soc_component *, unsigned int);
+ int (*write)(struct snd_soc_component *, unsigned int, unsigned int);
+
/* pcm creation and destruction */
int (*pcm_new)(struct snd_soc_pcm_runtime *);
void (*pcm_free)(struct snd_pcm *);
@@ -836,7 +848,7 @@ struct snd_soc_component_driver {
/* bits */
unsigned int idle_bias_on:1;
unsigned int suspend_bias_off:1;
- unsigned int pmdown_time:1; /* care pmdown_time at stop */
+ unsigned int use_pmdown_time:1; /* care pmdown_time at stop */
unsigned int endianness:1;
unsigned int non_legacy_dai_naming:1;
};
@@ -858,12 +870,10 @@ struct snd_soc_component {
struct list_head card_aux_list; /* for auxiliary bound components */
struct list_head card_list;
- struct snd_soc_dai_driver *dai_drv;
- int num_dai;
-
const struct snd_soc_component_driver *driver;
struct list_head dai_list;
+ int num_dai;
int (*read)(struct snd_soc_component *, unsigned int, unsigned int *);
int (*write)(struct snd_soc_component *, unsigned int, unsigned int);
@@ -1804,6 +1814,7 @@ int snd_soc_of_get_dai_name(struct device_node *of_node,
int snd_soc_of_get_dai_link_codecs(struct device *dev,
struct device_node *of_node,
struct snd_soc_dai_link *dai_link);
+void snd_soc_of_put_dai_link_codecs(struct snd_soc_dai_link *dai_link);
int snd_soc_add_dai_link(struct snd_soc_card *card,
struct snd_soc_dai_link *dai_link);
diff --git a/include/target/target_core_backend.h b/include/target/target_core_backend.h
index b6b3fb444a92..34a15d59ed88 100644
--- a/include/target/target_core_backend.h
+++ b/include/target/target_core_backend.h
@@ -53,6 +53,7 @@ struct target_backend_ops {
void (*free_prot)(struct se_device *);
struct configfs_attribute **tb_dev_attrib_attrs;
+ struct configfs_attribute **tb_dev_action_attrs;
};
struct sbc_ops {
diff --git a/include/target/target_core_base.h b/include/target/target_core_base.h
index 2c8d8115469d..9f9f5902af38 100644
--- a/include/target/target_core_base.h
+++ b/include/target/target_core_base.h
@@ -183,6 +183,7 @@ enum tcm_sense_reason_table {
TCM_TOO_MANY_SEGMENT_DESCS = R(0x1b),
TCM_UNSUPPORTED_SEGMENT_DESC_TYPE_CODE = R(0x1c),
TCM_INSUFFICIENT_REGISTRATION_RESOURCES = R(0x1d),
+ TCM_LUN_BUSY = R(0x1e),
#undef R
};
@@ -808,6 +809,7 @@ struct se_device {
/* T10 SPC-2 + SPC-3 Reservations */
struct t10_reservation t10_pr;
struct se_dev_attrib dev_attrib;
+ struct config_group dev_action_group;
struct config_group dev_group;
struct config_group dev_pr_group;
struct se_dev_stat_grps dev_stat_grps;
diff --git a/include/trace/bpf_probe.h b/include/trace/bpf_probe.h
new file mode 100644
index 000000000000..505dae0bed80
--- /dev/null
+++ b/include/trace/bpf_probe.h
@@ -0,0 +1,92 @@
+/* SPDX-License-Identifier: GPL-2.0 */
+
+#undef TRACE_SYSTEM_VAR
+
+#ifdef CONFIG_BPF_EVENTS
+
+#undef __entry
+#define __entry entry
+
+#undef __get_dynamic_array
+#define __get_dynamic_array(field) \
+ ((void *)__entry + (__entry->__data_loc_##field & 0xffff))
+
+#undef __get_dynamic_array_len
+#define __get_dynamic_array_len(field) \
+ ((__entry->__data_loc_##field >> 16) & 0xffff)
+
+#undef __get_str
+#define __get_str(field) ((char *)__get_dynamic_array(field))
+
+#undef __get_bitmask
+#define __get_bitmask(field) (char *)__get_dynamic_array(field)
+
+#undef __perf_count
+#define __perf_count(c) (c)
+
+#undef __perf_task
+#define __perf_task(t) (t)
+
+/* cast any integer, pointer, or small struct to u64 */
+#define UINTTYPE(size) \
+ __typeof__(__builtin_choose_expr(size == 1, (u8)1, \
+ __builtin_choose_expr(size == 2, (u16)2, \
+ __builtin_choose_expr(size == 4, (u32)3, \
+ __builtin_choose_expr(size == 8, (u64)4, \
+ (void)5)))))
+#define __CAST_TO_U64(x) ({ \
+ typeof(x) __src = (x); \
+ UINTTYPE(sizeof(x)) __dst; \
+ memcpy(&__dst, &__src, sizeof(__dst)); \
+ (u64)__dst; })
+
+#define __CAST1(a,...) __CAST_TO_U64(a)
+#define __CAST2(a,...) __CAST_TO_U64(a), __CAST1(__VA_ARGS__)
+#define __CAST3(a,...) __CAST_TO_U64(a), __CAST2(__VA_ARGS__)
+#define __CAST4(a,...) __CAST_TO_U64(a), __CAST3(__VA_ARGS__)
+#define __CAST5(a,...) __CAST_TO_U64(a), __CAST4(__VA_ARGS__)
+#define __CAST6(a,...) __CAST_TO_U64(a), __CAST5(__VA_ARGS__)
+#define __CAST7(a,...) __CAST_TO_U64(a), __CAST6(__VA_ARGS__)
+#define __CAST8(a,...) __CAST_TO_U64(a), __CAST7(__VA_ARGS__)
+#define __CAST9(a,...) __CAST_TO_U64(a), __CAST8(__VA_ARGS__)
+#define __CAST10(a,...) __CAST_TO_U64(a), __CAST9(__VA_ARGS__)
+#define __CAST11(a,...) __CAST_TO_U64(a), __CAST10(__VA_ARGS__)
+#define __CAST12(a,...) __CAST_TO_U64(a), __CAST11(__VA_ARGS__)
+/* tracepoints with more than 12 arguments will hit build error */
+#define CAST_TO_U64(...) CONCATENATE(__CAST, COUNT_ARGS(__VA_ARGS__))(__VA_ARGS__)
+
+#undef DECLARE_EVENT_CLASS
+#define DECLARE_EVENT_CLASS(call, proto, args, tstruct, assign, print) \
+static notrace void \
+__bpf_trace_##call(void *__data, proto) \
+{ \
+ struct bpf_prog *prog = __data; \
+ CONCATENATE(bpf_trace_run, COUNT_ARGS(args))(prog, CAST_TO_U64(args)); \
+}
+
+/*
+ * This part is compiled out, it is only here as a build time check
+ * to make sure that if the tracepoint handling changes, the
+ * bpf probe will fail to compile unless it too is updated.
+ */
+#undef DEFINE_EVENT
+#define DEFINE_EVENT(template, call, proto, args) \
+static inline void bpf_test_probe_##call(void) \
+{ \
+ check_trace_callback_type_##call(__bpf_trace_##template); \
+} \
+static struct bpf_raw_event_map __used \
+ __attribute__((section("__bpf_raw_tp_map"))) \
+__bpf_trace_tp_map_##call = { \
+ .tp = &__tracepoint_##call, \
+ .bpf_func = (void *)__bpf_trace_##template, \
+ .num_args = COUNT_ARGS(args), \
+};
+
+
+#undef DEFINE_EVENT_PRINT
+#define DEFINE_EVENT_PRINT(template, name, proto, args, print) \
+ DEFINE_EVENT(template, name, PARAMS(proto), PARAMS(args))
+
+#include TRACE_INCLUDE(TRACE_INCLUDE_FILE)
+#endif /* CONFIG_BPF_EVENTS */
diff --git a/include/trace/define_trace.h b/include/trace/define_trace.h
index d9e3d4aa3f6e..cb30c5532144 100644
--- a/include/trace/define_trace.h
+++ b/include/trace/define_trace.h
@@ -95,6 +95,7 @@
#ifdef TRACEPOINTS_ENABLED
#include <trace/trace_events.h>
#include <trace/perf.h>
+#include <trace/bpf_probe.h>
#endif
#undef TRACE_EVENT
diff --git a/include/trace/events/afs.h b/include/trace/events/afs.h
index 6b59c63a8e51..63815f66b274 100644
--- a/include/trace/events/afs.h
+++ b/include/trace/events/afs.h
@@ -133,8 +133,7 @@ TRACE_EVENT(afs_recv_data,
TP_ARGS(call, count, offset, want_more, ret),
TP_STRUCT__entry(
- __field(struct rxrpc_call *, rxcall )
- __field(struct afs_call *, call )
+ __field(unsigned int, call )
__field(enum afs_call_state, state )
__field(unsigned int, count )
__field(unsigned int, offset )
@@ -144,8 +143,7 @@ TRACE_EVENT(afs_recv_data,
),
TP_fast_assign(
- __entry->rxcall = call->rxcall;
- __entry->call = call;
+ __entry->call = call->debug_id;
__entry->state = call->state;
__entry->unmarshall = call->unmarshall;
__entry->count = count;
@@ -154,8 +152,7 @@ TRACE_EVENT(afs_recv_data,
__entry->ret = ret;
),
- TP_printk("c=%p ac=%p s=%u u=%u %u/%u wm=%u ret=%d",
- __entry->rxcall,
+ TP_printk("c=%08x s=%u u=%u %u/%u wm=%u ret=%d",
__entry->call,
__entry->state, __entry->unmarshall,
__entry->offset, __entry->count,
@@ -168,21 +165,18 @@ TRACE_EVENT(afs_notify_call,
TP_ARGS(rxcall, call),
TP_STRUCT__entry(
- __field(struct rxrpc_call *, rxcall )
- __field(struct afs_call *, call )
+ __field(unsigned int, call )
__field(enum afs_call_state, state )
__field(unsigned short, unmarshall )
),
TP_fast_assign(
- __entry->rxcall = rxcall;
- __entry->call = call;
+ __entry->call = call->debug_id;
__entry->state = call->state;
__entry->unmarshall = call->unmarshall;
),
- TP_printk("c=%p ac=%p s=%u u=%u",
- __entry->rxcall,
+ TP_printk("c=%08x s=%u u=%u",
__entry->call,
__entry->state, __entry->unmarshall)
);
@@ -193,21 +187,18 @@ TRACE_EVENT(afs_cb_call,
TP_ARGS(call),
TP_STRUCT__entry(
- __field(struct rxrpc_call *, rxcall )
- __field(struct afs_call *, call )
+ __field(unsigned int, call )
__field(const char *, name )
__field(u32, op )
),
TP_fast_assign(
- __entry->rxcall = call->rxcall;
- __entry->call = call;
+ __entry->call = call->debug_id;
__entry->name = call->type->name;
__entry->op = call->operation_ID;
),
- TP_printk("c=%p ac=%p %s o=%u",
- __entry->rxcall,
+ TP_printk("c=%08x %s o=%u",
__entry->call,
__entry->name,
__entry->op)
@@ -220,7 +211,7 @@ TRACE_EVENT(afs_call,
TP_ARGS(call, op, usage, outstanding, where),
TP_STRUCT__entry(
- __field(struct afs_call *, call )
+ __field(unsigned int, call )
__field(int, op )
__field(int, usage )
__field(int, outstanding )
@@ -228,14 +219,14 @@ TRACE_EVENT(afs_call,
),
TP_fast_assign(
- __entry->call = call;
+ __entry->call = call->debug_id;
__entry->op = op;
__entry->usage = usage;
__entry->outstanding = outstanding;
__entry->where = where;
),
- TP_printk("c=%p %s u=%d o=%d sp=%pSR",
+ TP_printk("c=%08x %s u=%d o=%d sp=%pSR",
__entry->call,
__print_symbolic(__entry->op, afs_call_traces),
__entry->usage,
@@ -249,13 +240,13 @@ TRACE_EVENT(afs_make_fs_call,
TP_ARGS(call, fid),
TP_STRUCT__entry(
- __field(struct afs_call *, call )
+ __field(unsigned int, call )
__field(enum afs_fs_operation, op )
__field_struct(struct afs_fid, fid )
),
TP_fast_assign(
- __entry->call = call;
+ __entry->call = call->debug_id;
__entry->op = call->operation_ID;
if (fid) {
__entry->fid = *fid;
@@ -266,7 +257,7 @@ TRACE_EVENT(afs_make_fs_call,
}
),
- TP_printk("c=%p %06x:%06x:%06x %s",
+ TP_printk("c=%08x %06x:%06x:%06x %s",
__entry->call,
__entry->fid.vid,
__entry->fid.vnode,
@@ -280,16 +271,16 @@ TRACE_EVENT(afs_make_vl_call,
TP_ARGS(call),
TP_STRUCT__entry(
- __field(struct afs_call *, call )
+ __field(unsigned int, call )
__field(enum afs_vl_operation, op )
),
TP_fast_assign(
- __entry->call = call;
+ __entry->call = call->debug_id;
__entry->op = call->operation_ID;
),
- TP_printk("c=%p %s",
+ TP_printk("c=%08x %s",
__entry->call,
__print_symbolic(__entry->op, afs_vl_operations))
);
@@ -300,20 +291,20 @@ TRACE_EVENT(afs_call_done,
TP_ARGS(call),
TP_STRUCT__entry(
- __field(struct afs_call *, call )
+ __field(unsigned int, call )
__field(struct rxrpc_call *, rx_call )
__field(int, ret )
__field(u32, abort_code )
),
TP_fast_assign(
- __entry->call = call;
+ __entry->call = call->debug_id;
__entry->rx_call = call->rxcall;
__entry->ret = call->error;
__entry->abort_code = call->abort_code;
),
- TP_printk(" c=%p ret=%d ab=%d [%p]",
+ TP_printk(" c=%08x ret=%d ab=%d [%p]",
__entry->call,
__entry->ret,
__entry->abort_code,
@@ -327,7 +318,7 @@ TRACE_EVENT(afs_send_pages,
TP_ARGS(call, msg, first, last, offset),
TP_STRUCT__entry(
- __field(struct afs_call *, call )
+ __field(unsigned int, call )
__field(pgoff_t, first )
__field(pgoff_t, last )
__field(unsigned int, nr )
@@ -337,7 +328,7 @@ TRACE_EVENT(afs_send_pages,
),
TP_fast_assign(
- __entry->call = call;
+ __entry->call = call->debug_id;
__entry->first = first;
__entry->last = last;
__entry->nr = msg->msg_iter.nr_segs;
@@ -346,7 +337,7 @@ TRACE_EVENT(afs_send_pages,
__entry->flags = msg->msg_flags;
),
- TP_printk(" c=%p %lx-%lx-%lx b=%x o=%x f=%x",
+ TP_printk(" c=%08x %lx-%lx-%lx b=%x o=%x f=%x",
__entry->call,
__entry->first, __entry->first + __entry->nr - 1, __entry->last,
__entry->bytes, __entry->offset,
@@ -360,7 +351,7 @@ TRACE_EVENT(afs_sent_pages,
TP_ARGS(call, first, last, cursor, ret),
TP_STRUCT__entry(
- __field(struct afs_call *, call )
+ __field(unsigned int, call )
__field(pgoff_t, first )
__field(pgoff_t, last )
__field(pgoff_t, cursor )
@@ -368,14 +359,14 @@ TRACE_EVENT(afs_sent_pages,
),
TP_fast_assign(
- __entry->call = call;
+ __entry->call = call->debug_id;
__entry->first = first;
__entry->last = last;
__entry->cursor = cursor;
__entry->ret = ret;
),
- TP_printk(" c=%p %lx-%lx c=%lx r=%d",
+ TP_printk(" c=%08x %lx-%lx c=%lx r=%d",
__entry->call,
__entry->first, __entry->last,
__entry->cursor, __entry->ret)
@@ -450,7 +441,7 @@ TRACE_EVENT(afs_call_state,
TP_ARGS(call, from, to, ret, remote_abort),
TP_STRUCT__entry(
- __field(struct afs_call *, call )
+ __field(unsigned int, call )
__field(enum afs_call_state, from )
__field(enum afs_call_state, to )
__field(int, ret )
@@ -458,14 +449,14 @@ TRACE_EVENT(afs_call_state,
),
TP_fast_assign(
- __entry->call = call;
+ __entry->call = call->debug_id;
__entry->from = from;
__entry->to = to;
__entry->ret = ret;
__entry->abort = remote_abort;
),
- TP_printk("c=%p %u->%u r=%d ab=%d",
+ TP_printk("c=%08x %u->%u r=%d ab=%d",
__entry->call,
__entry->from, __entry->to,
__entry->ret, __entry->abort)
diff --git a/include/trace/events/bridge.h b/include/trace/events/bridge.h
index 1bee3e7fdf32..8ea966448b58 100644
--- a/include/trace/events/bridge.h
+++ b/include/trace/events/bridge.h
@@ -82,8 +82,8 @@ TRACE_EVENT(fdb_delete,
TP_fast_assign(
__assign_str(br_dev, br->dev->name);
__assign_str(dev, f->dst ? f->dst->dev->name : "null");
- memcpy(__entry->addr, f->addr.addr, ETH_ALEN);
- __entry->vid = f->vlan_id;
+ memcpy(__entry->addr, f->key.addr.addr, ETH_ALEN);
+ __entry->vid = f->key.vlan_id;
),
TP_printk("br_dev %s dev %s addr %02x:%02x:%02x:%02x:%02x:%02x vid %u",
diff --git a/include/trace/events/btrfs.h b/include/trace/events/btrfs.h
index 4342a329821f..965c650a5273 100644
--- a/include/trace/events/btrfs.h
+++ b/include/trace/events/btrfs.h
@@ -71,6 +71,12 @@ TRACE_DEFINE_ENUM(COMMIT_TRANS);
{ BTRFS_FILE_EXTENT_REG, "REG" }, \
{ BTRFS_FILE_EXTENT_PREALLOC, "PREALLOC"})
+#define show_qgroup_rsv_type(type) \
+ __print_symbolic(type, \
+ { BTRFS_QGROUP_RSV_DATA, "DATA" }, \
+ { BTRFS_QGROUP_RSV_META_PERTRANS, "META_PERTRANS" }, \
+ { BTRFS_QGROUP_RSV_META_PREALLOC, "META_PREALLOC" })
+
#define BTRFS_GROUP_FLAGS \
{ BTRFS_BLOCK_GROUP_DATA, "DATA"}, \
{ BTRFS_BLOCK_GROUP_SYSTEM, "SYSTEM"}, \
@@ -193,7 +199,6 @@ DEFINE_EVENT(btrfs__inode, btrfs_inode_evict,
__print_flags(flag, "|", \
{ (1 << EXTENT_FLAG_PINNED), "PINNED" },\
{ (1 << EXTENT_FLAG_COMPRESSED), "COMPRESSED" },\
- { (1 << EXTENT_FLAG_VACANCY), "VACANCY" },\
{ (1 << EXTENT_FLAG_PREALLOC), "PREALLOC" },\
{ (1 << EXTENT_FLAG_LOGGING), "LOGGING" },\
{ (1 << EXTENT_FLAG_FILLING), "FILLING" },\
@@ -249,6 +254,41 @@ TRACE_EVENT_CONDITION(btrfs_get_extent,
__entry->refs, __entry->compress_type)
);
+TRACE_EVENT(btrfs_handle_em_exist,
+
+ TP_PROTO(const struct extent_map *existing, const struct extent_map *map, u64 start, u64 len),
+
+ TP_ARGS(existing, map, start, len),
+
+ TP_STRUCT__entry(
+ __field( u64, e_start )
+ __field( u64, e_len )
+ __field( u64, map_start )
+ __field( u64, map_len )
+ __field( u64, start )
+ __field( u64, len )
+ ),
+
+ TP_fast_assign(
+ __entry->e_start = existing->start;
+ __entry->e_len = existing->len;
+ __entry->map_start = map->start;
+ __entry->map_len = map->len;
+ __entry->start = start;
+ __entry->len = len;
+ ),
+
+ TP_printk("start=%llu len=%llu "
+ "existing(start=%llu len=%llu) "
+ "em(start=%llu len=%llu)",
+ (unsigned long long)__entry->start,
+ (unsigned long long)__entry->len,
+ (unsigned long long)__entry->e_start,
+ (unsigned long long)__entry->e_len,
+ (unsigned long long)__entry->map_start,
+ (unsigned long long)__entry->map_len)
+);
+
/* file extent item */
DECLARE_EVENT_CLASS(btrfs__file_extent_item_regular,
@@ -1599,28 +1639,52 @@ TRACE_EVENT(qgroup_update_counters,
TRACE_EVENT(qgroup_update_reserve,
TP_PROTO(struct btrfs_fs_info *fs_info, struct btrfs_qgroup *qgroup,
- s64 diff),
+ s64 diff, int type),
- TP_ARGS(fs_info, qgroup, diff),
+ TP_ARGS(fs_info, qgroup, diff, type),
TP_STRUCT__entry_btrfs(
__field( u64, qgid )
__field( u64, cur_reserved )
__field( s64, diff )
+ __field( int, type )
),
TP_fast_assign_btrfs(fs_info,
__entry->qgid = qgroup->qgroupid;
- __entry->cur_reserved = qgroup->reserved;
+ __entry->cur_reserved = qgroup->rsv.values[type];
__entry->diff = diff;
),
- TP_printk_btrfs("qgid=%llu cur_reserved=%llu diff=%lld",
- __entry->qgid, __entry->cur_reserved, __entry->diff)
+ TP_printk_btrfs("qgid=%llu type=%s cur_reserved=%llu diff=%lld",
+ __entry->qgid, show_qgroup_rsv_type(__entry->type),
+ __entry->cur_reserved, __entry->diff)
);
TRACE_EVENT(qgroup_meta_reserve,
+ TP_PROTO(struct btrfs_root *root, s64 diff, int type),
+
+ TP_ARGS(root, diff, type),
+
+ TP_STRUCT__entry_btrfs(
+ __field( u64, refroot )
+ __field( s64, diff )
+ __field( int, type )
+ ),
+
+ TP_fast_assign_btrfs(root->fs_info,
+ __entry->refroot = root->objectid;
+ __entry->diff = diff;
+ ),
+
+ TP_printk_btrfs("refroot=%llu(%s) type=%s diff=%lld",
+ show_root_type(__entry->refroot),
+ show_qgroup_rsv_type(__entry->type), __entry->diff)
+);
+
+TRACE_EVENT(qgroup_meta_convert,
+
TP_PROTO(struct btrfs_root *root, s64 diff),
TP_ARGS(root, diff),
@@ -1628,6 +1692,7 @@ TRACE_EVENT(qgroup_meta_reserve,
TP_STRUCT__entry_btrfs(
__field( u64, refroot )
__field( s64, diff )
+ __field( int, type )
),
TP_fast_assign_btrfs(root->fs_info,
@@ -1635,8 +1700,36 @@ TRACE_EVENT(qgroup_meta_reserve,
__entry->diff = diff;
),
- TP_printk_btrfs("refroot=%llu(%s) diff=%lld",
- show_root_type(__entry->refroot), __entry->diff)
+ TP_printk_btrfs("refroot=%llu(%s) type=%s->%s diff=%lld",
+ show_root_type(__entry->refroot),
+ show_qgroup_rsv_type(BTRFS_QGROUP_RSV_META_PREALLOC),
+ show_qgroup_rsv_type(BTRFS_QGROUP_RSV_META_PERTRANS),
+ __entry->diff)
+);
+
+TRACE_EVENT(qgroup_meta_free_all_pertrans,
+
+ TP_PROTO(struct btrfs_root *root),
+
+ TP_ARGS(root),
+
+ TP_STRUCT__entry_btrfs(
+ __field( u64, refroot )
+ __field( s64, diff )
+ __field( int, type )
+ ),
+
+ TP_fast_assign_btrfs(root->fs_info,
+ __entry->refroot = root->objectid;
+ spin_lock(&root->qgroup_meta_rsv_lock);
+ __entry->diff = -(s64)root->qgroup_meta_rsv_pertrans;
+ spin_unlock(&root->qgroup_meta_rsv_lock);
+ __entry->type = BTRFS_QGROUP_RSV_META_PERTRANS;
+ ),
+
+ TP_printk_btrfs("refroot=%llu(%s) type=%s diff=%lld",
+ show_root_type(__entry->refroot),
+ show_qgroup_rsv_type(__entry->type), __entry->diff)
);
DECLARE_EVENT_CLASS(btrfs__prelim_ref,
diff --git a/include/trace/events/cachefiles.h b/include/trace/events/cachefiles.h
new file mode 100644
index 000000000000..aa86e7dba511
--- /dev/null
+++ b/include/trace/events/cachefiles.h
@@ -0,0 +1,325 @@
+/* CacheFiles tracepoints
+ *
+ * Copyright (C) 2016 Red Hat, Inc. All Rights Reserved.
+ * Written by David Howells (dhowells@redhat.com)
+ *
+ * This program is free software; you can redistribute it and/or
+ * modify it under the terms of the GNU General Public Licence
+ * as published by the Free Software Foundation; either version
+ * 2 of the Licence, or (at your option) any later version.
+ */
+#undef TRACE_SYSTEM
+#define TRACE_SYSTEM cachefiles
+
+#if !defined(_TRACE_CACHEFILES_H) || defined(TRACE_HEADER_MULTI_READ)
+#define _TRACE_CACHEFILES_H
+
+#include <linux/tracepoint.h>
+
+/*
+ * Define enums for tracing information.
+ */
+#ifndef __CACHEFILES_DECLARE_TRACE_ENUMS_ONCE_ONLY
+#define __CACHEFILES_DECLARE_TRACE_ENUMS_ONCE_ONLY
+
+enum cachefiles_obj_ref_trace {
+ cachefiles_obj_put_wait_retry = fscache_obj_ref__nr_traces,
+ cachefiles_obj_put_wait_timeo,
+ cachefiles_obj_ref__nr_traces
+};
+
+#endif
+
+/*
+ * Define enum -> string mappings for display.
+ */
+#define cachefiles_obj_kill_traces \
+ EM(FSCACHE_OBJECT_IS_STALE, "stale") \
+ EM(FSCACHE_OBJECT_NO_SPACE, "no_space") \
+ EM(FSCACHE_OBJECT_WAS_RETIRED, "was_retired") \
+ E_(FSCACHE_OBJECT_WAS_CULLED, "was_culled")
+
+#define cachefiles_obj_ref_traces \
+ EM(fscache_obj_get_add_to_deps, "GET add_to_deps") \
+ EM(fscache_obj_get_queue, "GET queue") \
+ EM(fscache_obj_put_alloc_fail, "PUT alloc_fail") \
+ EM(fscache_obj_put_attach_fail, "PUT attach_fail") \
+ EM(fscache_obj_put_drop_obj, "PUT drop_obj") \
+ EM(fscache_obj_put_enq_dep, "PUT enq_dep") \
+ EM(fscache_obj_put_queue, "PUT queue") \
+ EM(fscache_obj_put_work, "PUT work") \
+ EM(cachefiles_obj_put_wait_retry, "PUT wait_retry") \
+ E_(cachefiles_obj_put_wait_timeo, "PUT wait_timeo")
+
+/*
+ * Export enum symbols via userspace.
+ */
+#undef EM
+#undef E_
+#define EM(a, b) TRACE_DEFINE_ENUM(a);
+#define E_(a, b) TRACE_DEFINE_ENUM(a);
+
+cachefiles_obj_kill_traces;
+cachefiles_obj_ref_traces;
+
+/*
+ * Now redefine the EM() and E_() macros to map the enums to the strings that
+ * will be printed in the output.
+ */
+#undef EM
+#undef E_
+#define EM(a, b) { a, b },
+#define E_(a, b) { a, b }
+
+
+TRACE_EVENT(cachefiles_ref,
+ TP_PROTO(struct cachefiles_object *obj,
+ struct fscache_cookie *cookie,
+ enum cachefiles_obj_ref_trace why,
+ int usage),
+
+ TP_ARGS(obj, cookie, why, usage),
+
+ /* Note that obj may be NULL */
+ TP_STRUCT__entry(
+ __field(struct cachefiles_object *, obj )
+ __field(struct fscache_cookie *, cookie )
+ __field(enum cachefiles_obj_ref_trace, why )
+ __field(int, usage )
+ ),
+
+ TP_fast_assign(
+ __entry->obj = obj;
+ __entry->cookie = cookie;
+ __entry->usage = usage;
+ __entry->why = why;
+ ),
+
+ TP_printk("c=%p o=%p u=%d %s",
+ __entry->cookie, __entry->obj, __entry->usage,
+ __print_symbolic(__entry->why, cachefiles_obj_ref_traces))
+ );
+
+TRACE_EVENT(cachefiles_lookup,
+ TP_PROTO(struct cachefiles_object *obj,
+ struct dentry *de,
+ struct inode *inode),
+
+ TP_ARGS(obj, de, inode),
+
+ TP_STRUCT__entry(
+ __field(struct cachefiles_object *, obj )
+ __field(struct dentry *, de )
+ __field(struct inode *, inode )
+ ),
+
+ TP_fast_assign(
+ __entry->obj = obj;
+ __entry->de = de;
+ __entry->inode = inode;
+ ),
+
+ TP_printk("o=%p d=%p i=%p",
+ __entry->obj, __entry->de, __entry->inode)
+ );
+
+TRACE_EVENT(cachefiles_mkdir,
+ TP_PROTO(struct cachefiles_object *obj,
+ struct dentry *de, int ret),
+
+ TP_ARGS(obj, de, ret),
+
+ TP_STRUCT__entry(
+ __field(struct cachefiles_object *, obj )
+ __field(struct dentry *, de )
+ __field(int, ret )
+ ),
+
+ TP_fast_assign(
+ __entry->obj = obj;
+ __entry->de = de;
+ __entry->ret = ret;
+ ),
+
+ TP_printk("o=%p d=%p r=%u",
+ __entry->obj, __entry->de, __entry->ret)
+ );
+
+TRACE_EVENT(cachefiles_create,
+ TP_PROTO(struct cachefiles_object *obj,
+ struct dentry *de, int ret),
+
+ TP_ARGS(obj, de, ret),
+
+ TP_STRUCT__entry(
+ __field(struct cachefiles_object *, obj )
+ __field(struct dentry *, de )
+ __field(int, ret )
+ ),
+
+ TP_fast_assign(
+ __entry->obj = obj;
+ __entry->de = de;
+ __entry->ret = ret;
+ ),
+
+ TP_printk("o=%p d=%p r=%u",
+ __entry->obj, __entry->de, __entry->ret)
+ );
+
+TRACE_EVENT(cachefiles_unlink,
+ TP_PROTO(struct cachefiles_object *obj,
+ struct dentry *de,
+ enum fscache_why_object_killed why),
+
+ TP_ARGS(obj, de, why),
+
+ /* Note that obj may be NULL */
+ TP_STRUCT__entry(
+ __field(struct cachefiles_object *, obj )
+ __field(struct dentry *, de )
+ __field(enum fscache_why_object_killed, why )
+ ),
+
+ TP_fast_assign(
+ __entry->obj = obj;
+ __entry->de = de;
+ __entry->why = why;
+ ),
+
+ TP_printk("o=%p d=%p w=%s",
+ __entry->obj, __entry->de,
+ __print_symbolic(__entry->why, cachefiles_obj_kill_traces))
+ );
+
+TRACE_EVENT(cachefiles_rename,
+ TP_PROTO(struct cachefiles_object *obj,
+ struct dentry *de,
+ struct dentry *to,
+ enum fscache_why_object_killed why),
+
+ TP_ARGS(obj, de, to, why),
+
+ /* Note that obj may be NULL */
+ TP_STRUCT__entry(
+ __field(struct cachefiles_object *, obj )
+ __field(struct dentry *, de )
+ __field(struct dentry *, to )
+ __field(enum fscache_why_object_killed, why )
+ ),
+
+ TP_fast_assign(
+ __entry->obj = obj;
+ __entry->de = de;
+ __entry->to = to;
+ __entry->why = why;
+ ),
+
+ TP_printk("o=%p d=%p t=%p w=%s",
+ __entry->obj, __entry->de, __entry->to,
+ __print_symbolic(__entry->why, cachefiles_obj_kill_traces))
+ );
+
+TRACE_EVENT(cachefiles_mark_active,
+ TP_PROTO(struct cachefiles_object *obj,
+ struct dentry *de),
+
+ TP_ARGS(obj, de),
+
+ /* Note that obj may be NULL */
+ TP_STRUCT__entry(
+ __field(struct cachefiles_object *, obj )
+ __field(struct dentry *, de )
+ ),
+
+ TP_fast_assign(
+ __entry->obj = obj;
+ __entry->de = de;
+ ),
+
+ TP_printk("o=%p d=%p",
+ __entry->obj, __entry->de)
+ );
+
+TRACE_EVENT(cachefiles_wait_active,
+ TP_PROTO(struct cachefiles_object *obj,
+ struct dentry *de,
+ struct cachefiles_object *xobj),
+
+ TP_ARGS(obj, de, xobj),
+
+ /* Note that obj may be NULL */
+ TP_STRUCT__entry(
+ __field(struct cachefiles_object *, obj )
+ __field(struct dentry *, de )
+ __field(struct cachefiles_object *, xobj )
+ __field(u16, flags )
+ __field(u16, fsc_flags )
+ ),
+
+ TP_fast_assign(
+ __entry->obj = obj;
+ __entry->de = de;
+ __entry->xobj = xobj;
+ __entry->flags = xobj->flags;
+ __entry->fsc_flags = xobj->fscache.flags;
+ ),
+
+ TP_printk("o=%p d=%p wo=%p wf=%x wff=%x",
+ __entry->obj, __entry->de, __entry->xobj,
+ __entry->flags, __entry->fsc_flags)
+ );
+
+TRACE_EVENT(cachefiles_mark_inactive,
+ TP_PROTO(struct cachefiles_object *obj,
+ struct dentry *de,
+ struct inode *inode),
+
+ TP_ARGS(obj, de, inode),
+
+ /* Note that obj may be NULL */
+ TP_STRUCT__entry(
+ __field(struct cachefiles_object *, obj )
+ __field(struct dentry *, de )
+ __field(struct inode *, inode )
+ ),
+
+ TP_fast_assign(
+ __entry->obj = obj;
+ __entry->de = de;
+ __entry->inode = inode;
+ ),
+
+ TP_printk("o=%p d=%p i=%p",
+ __entry->obj, __entry->de, __entry->inode)
+ );
+
+TRACE_EVENT(cachefiles_mark_buried,
+ TP_PROTO(struct cachefiles_object *obj,
+ struct dentry *de,
+ enum fscache_why_object_killed why),
+
+ TP_ARGS(obj, de, why),
+
+ /* Note that obj may be NULL */
+ TP_STRUCT__entry(
+ __field(struct cachefiles_object *, obj )
+ __field(struct dentry *, de )
+ __field(enum fscache_why_object_killed, why )
+ ),
+
+ TP_fast_assign(
+ __entry->obj = obj;
+ __entry->de = de;
+ __entry->why = why;
+ ),
+
+ TP_printk("o=%p d=%p w=%s",
+ __entry->obj, __entry->de,
+ __print_symbolic(__entry->why, cachefiles_obj_kill_traces))
+ );
+
+#endif /* _TRACE_CACHEFILES_H */
+
+/* This part must be outside protection */
+#include <trace/define_trace.h>
diff --git a/include/trace/events/ext4.h b/include/trace/events/ext4.h
index 4d0e3af4e561..0e31eb136c57 100644
--- a/include/trace/events/ext4.h
+++ b/include/trace/events/ext4.h
@@ -2585,6 +2585,49 @@ DEFINE_GETFSMAP_EVENT(ext4_getfsmap_low_key);
DEFINE_GETFSMAP_EVENT(ext4_getfsmap_high_key);
DEFINE_GETFSMAP_EVENT(ext4_getfsmap_mapping);
+TRACE_EVENT(ext4_shutdown,
+ TP_PROTO(struct super_block *sb, unsigned long flags),
+
+ TP_ARGS(sb, flags),
+
+ TP_STRUCT__entry(
+ __field( dev_t, dev )
+ __field( unsigned, flags )
+ ),
+
+ TP_fast_assign(
+ __entry->dev = sb->s_dev;
+ __entry->flags = flags;
+ ),
+
+ TP_printk("dev %d,%d flags %u",
+ MAJOR(__entry->dev), MINOR(__entry->dev),
+ __entry->flags)
+);
+
+TRACE_EVENT(ext4_error,
+ TP_PROTO(struct super_block *sb, const char *function,
+ unsigned int line),
+
+ TP_ARGS(sb, function, line),
+
+ TP_STRUCT__entry(
+ __field( dev_t, dev )
+ __field( const char *, function )
+ __field( unsigned, line )
+ ),
+
+ TP_fast_assign(
+ __entry->dev = sb->s_dev;
+ __entry->function = function;
+ __entry->line = line;
+ ),
+
+ TP_printk("dev %d,%d function %s line %u",
+ MAJOR(__entry->dev), MINOR(__entry->dev),
+ __entry->function, __entry->line)
+);
+
#endif /* _TRACE_EXT4_H */
/* This part must be outside protection */
diff --git a/include/trace/events/f2fs.h b/include/trace/events/f2fs.h
index 8f8dd42fa57b..795698925d20 100644
--- a/include/trace/events/f2fs.h
+++ b/include/trace/events/f2fs.h
@@ -147,7 +147,8 @@ TRACE_DEFINE_ENUM(CP_TRIMMED);
{ CP_NO_SPC_ROLL, "no space roll forward" }, \
{ CP_NODE_NEED_CP, "node needs cp" }, \
{ CP_FASTBOOT_MODE, "fastboot mode" }, \
- { CP_SPEC_LOG_NUM, "log type is 2" })
+ { CP_SPEC_LOG_NUM, "log type is 2" }, \
+ { CP_RECOVER_DIR, "dir needs recovery" })
struct victim_sel_policy;
struct f2fs_map_blocks;
@@ -490,7 +491,7 @@ DEFINE_EVENT(f2fs__truncate_node, f2fs_truncate_node,
TRACE_EVENT(f2fs_truncate_partial_nodes,
- TP_PROTO(struct inode *inode, nid_t nid[], int depth, int err),
+ TP_PROTO(struct inode *inode, nid_t *nid, int depth, int err),
TP_ARGS(inode, nid, depth, err),
diff --git a/include/trace/events/fscache.h b/include/trace/events/fscache.h
new file mode 100644
index 000000000000..686cfe997ed2
--- /dev/null
+++ b/include/trace/events/fscache.h
@@ -0,0 +1,537 @@
+/* FS-Cache tracepoints
+ *
+ * Copyright (C) 2016 Red Hat, Inc. All Rights Reserved.
+ * Written by David Howells (dhowells@redhat.com)
+ *
+ * This program is free software; you can redistribute it and/or
+ * modify it under the terms of the GNU General Public Licence
+ * as published by the Free Software Foundation; either version
+ * 2 of the Licence, or (at your option) any later version.
+ */
+#undef TRACE_SYSTEM
+#define TRACE_SYSTEM fscache
+
+#if !defined(_TRACE_FSCACHE_H) || defined(TRACE_HEADER_MULTI_READ)
+#define _TRACE_FSCACHE_H
+
+#include <linux/fscache.h>
+#include <linux/tracepoint.h>
+
+/*
+ * Define enums for tracing information.
+ */
+#ifndef __FSCACHE_DECLARE_TRACE_ENUMS_ONCE_ONLY
+#define __FSCACHE_DECLARE_TRACE_ENUMS_ONCE_ONLY
+
+enum fscache_cookie_trace {
+ fscache_cookie_collision,
+ fscache_cookie_discard,
+ fscache_cookie_get_acquire_parent,
+ fscache_cookie_get_attach_object,
+ fscache_cookie_get_reacquire,
+ fscache_cookie_get_register_netfs,
+ fscache_cookie_put_acquire_nobufs,
+ fscache_cookie_put_dup_netfs,
+ fscache_cookie_put_relinquish,
+ fscache_cookie_put_object,
+ fscache_cookie_put_parent,
+};
+
+enum fscache_page_trace {
+ fscache_page_cached,
+ fscache_page_inval,
+ fscache_page_maybe_release,
+ fscache_page_radix_clear_store,
+ fscache_page_radix_delete,
+ fscache_page_radix_insert,
+ fscache_page_radix_pend2store,
+ fscache_page_radix_set_pend,
+ fscache_page_uncache,
+ fscache_page_write,
+ fscache_page_write_end,
+ fscache_page_write_end_pend,
+ fscache_page_write_end_noc,
+ fscache_page_write_wait,
+ fscache_page_trace__nr
+};
+
+enum fscache_op_trace {
+ fscache_op_cancel,
+ fscache_op_cancel_all,
+ fscache_op_cancelled,
+ fscache_op_completed,
+ fscache_op_enqueue_async,
+ fscache_op_enqueue_mythread,
+ fscache_op_gc,
+ fscache_op_init,
+ fscache_op_put,
+ fscache_op_run,
+ fscache_op_signal,
+ fscache_op_submit,
+ fscache_op_submit_ex,
+ fscache_op_work,
+ fscache_op_trace__nr
+};
+
+enum fscache_page_op_trace {
+ fscache_page_op_alloc_one,
+ fscache_page_op_attr_changed,
+ fscache_page_op_check_consistency,
+ fscache_page_op_invalidate,
+ fscache_page_op_retr_multi,
+ fscache_page_op_retr_one,
+ fscache_page_op_write_one,
+ fscache_page_op_trace__nr
+};
+
+#endif
+
+/*
+ * Declare tracing information enums and their string mappings for display.
+ */
+#define fscache_cookie_traces \
+ EM(fscache_cookie_collision, "*COLLISION*") \
+ EM(fscache_cookie_discard, "DISCARD") \
+ EM(fscache_cookie_get_acquire_parent, "GET prn") \
+ EM(fscache_cookie_get_attach_object, "GET obj") \
+ EM(fscache_cookie_get_reacquire, "GET raq") \
+ EM(fscache_cookie_get_register_netfs, "GET net") \
+ EM(fscache_cookie_put_acquire_nobufs, "PUT nbf") \
+ EM(fscache_cookie_put_dup_netfs, "PUT dnt") \
+ EM(fscache_cookie_put_relinquish, "PUT rlq") \
+ EM(fscache_cookie_put_object, "PUT obj") \
+ E_(fscache_cookie_put_parent, "PUT prn")
+
+#define fscache_page_traces \
+ EM(fscache_page_cached, "Cached ") \
+ EM(fscache_page_inval, "InvalPg") \
+ EM(fscache_page_maybe_release, "MayRels") \
+ EM(fscache_page_uncache, "Uncache") \
+ EM(fscache_page_radix_clear_store, "RxCStr ") \
+ EM(fscache_page_radix_delete, "RxDel ") \
+ EM(fscache_page_radix_insert, "RxIns ") \
+ EM(fscache_page_radix_pend2store, "RxP2S ") \
+ EM(fscache_page_radix_set_pend, "RxSPend ") \
+ EM(fscache_page_write, "WritePg") \
+ EM(fscache_page_write_end, "EndPgWr") \
+ EM(fscache_page_write_end_pend, "EndPgWP") \
+ EM(fscache_page_write_end_noc, "EndPgNC") \
+ E_(fscache_page_write_wait, "WtOnWrt")
+
+#define fscache_op_traces \
+ EM(fscache_op_cancel, "Cancel1") \
+ EM(fscache_op_cancel_all, "CancelA") \
+ EM(fscache_op_cancelled, "Canclld") \
+ EM(fscache_op_completed, "Complet") \
+ EM(fscache_op_enqueue_async, "EnqAsyn") \
+ EM(fscache_op_enqueue_mythread, "EnqMyTh") \
+ EM(fscache_op_gc, "GC ") \
+ EM(fscache_op_init, "Init ") \
+ EM(fscache_op_put, "Put ") \
+ EM(fscache_op_run, "Run ") \
+ EM(fscache_op_signal, "Signal ") \
+ EM(fscache_op_submit, "Submit ") \
+ EM(fscache_op_submit_ex, "SubmitX") \
+ E_(fscache_op_work, "Work ")
+
+#define fscache_page_op_traces \
+ EM(fscache_page_op_alloc_one, "Alloc1 ") \
+ EM(fscache_page_op_attr_changed, "AttrChg") \
+ EM(fscache_page_op_check_consistency, "CheckCn") \
+ EM(fscache_page_op_invalidate, "Inval ") \
+ EM(fscache_page_op_retr_multi, "RetrMul") \
+ EM(fscache_page_op_retr_one, "Retr1 ") \
+ E_(fscache_page_op_write_one, "Write1 ")
+
+/*
+ * Export enum symbols via userspace.
+ */
+#undef EM
+#undef E_
+#define EM(a, b) TRACE_DEFINE_ENUM(a);
+#define E_(a, b) TRACE_DEFINE_ENUM(a);
+
+fscache_cookie_traces;
+
+/*
+ * Now redefine the EM() and E_() macros to map the enums to the strings that
+ * will be printed in the output.
+ */
+#undef EM
+#undef E_
+#define EM(a, b) { a, b },
+#define E_(a, b) { a, b }
+
+
+TRACE_EVENT(fscache_cookie,
+ TP_PROTO(struct fscache_cookie *cookie,
+ enum fscache_cookie_trace where,
+ int usage),
+
+ TP_ARGS(cookie, where, usage),
+
+ TP_STRUCT__entry(
+ __field(struct fscache_cookie *, cookie )
+ __field(struct fscache_cookie *, parent )
+ __field(enum fscache_cookie_trace, where )
+ __field(int, usage )
+ __field(int, n_children )
+ __field(int, n_active )
+ __field(u8, flags )
+ ),
+
+ TP_fast_assign(
+ __entry->cookie = cookie;
+ __entry->parent = cookie->parent;
+ __entry->where = where;
+ __entry->usage = usage;
+ __entry->n_children = atomic_read(&cookie->n_children);
+ __entry->n_active = atomic_read(&cookie->n_active);
+ __entry->flags = cookie->flags;
+ ),
+
+ TP_printk("%s c=%p u=%d p=%p Nc=%d Na=%d f=%02x",
+ __print_symbolic(__entry->where, fscache_cookie_traces),
+ __entry->cookie, __entry->usage,
+ __entry->parent, __entry->n_children, __entry->n_active,
+ __entry->flags)
+ );
+
+TRACE_EVENT(fscache_netfs,
+ TP_PROTO(struct fscache_netfs *netfs),
+
+ TP_ARGS(netfs),
+
+ TP_STRUCT__entry(
+ __field(struct fscache_cookie *, cookie )
+ __array(char, name, 8 )
+ ),
+
+ TP_fast_assign(
+ __entry->cookie = netfs->primary_index;
+ strncpy(__entry->name, netfs->name, 8);
+ __entry->name[7] = 0;
+ ),
+
+ TP_printk("c=%p n=%s",
+ __entry->cookie, __entry->name)
+ );
+
+TRACE_EVENT(fscache_acquire,
+ TP_PROTO(struct fscache_cookie *cookie),
+
+ TP_ARGS(cookie),
+
+ TP_STRUCT__entry(
+ __field(struct fscache_cookie *, cookie )
+ __field(struct fscache_cookie *, parent )
+ __array(char, name, 8 )
+ __field(int, p_usage )
+ __field(int, p_n_children )
+ __field(u8, p_flags )
+ ),
+
+ TP_fast_assign(
+ __entry->cookie = cookie;
+ __entry->parent = cookie->parent;
+ __entry->p_usage = atomic_read(&cookie->parent->usage);
+ __entry->p_n_children = atomic_read(&cookie->parent->n_children);
+ __entry->p_flags = cookie->parent->flags;
+ memcpy(__entry->name, cookie->def->name, 8);
+ __entry->name[7] = 0;
+ ),
+
+ TP_printk("c=%p p=%p pu=%d pc=%d pf=%02x n=%s",
+ __entry->cookie, __entry->parent, __entry->p_usage,
+ __entry->p_n_children, __entry->p_flags, __entry->name)
+ );
+
+TRACE_EVENT(fscache_relinquish,
+ TP_PROTO(struct fscache_cookie *cookie, bool retire),
+
+ TP_ARGS(cookie, retire),
+
+ TP_STRUCT__entry(
+ __field(struct fscache_cookie *, cookie )
+ __field(struct fscache_cookie *, parent )
+ __field(int, usage )
+ __field(int, n_children )
+ __field(int, n_active )
+ __field(u8, flags )
+ __field(bool, retire )
+ ),
+
+ TP_fast_assign(
+ __entry->cookie = cookie;
+ __entry->parent = cookie->parent;
+ __entry->usage = atomic_read(&cookie->usage);
+ __entry->n_children = atomic_read(&cookie->n_children);
+ __entry->n_active = atomic_read(&cookie->n_active);
+ __entry->flags = cookie->flags;
+ __entry->retire = retire;
+ ),
+
+ TP_printk("c=%p u=%d p=%p Nc=%d Na=%d f=%02x r=%u",
+ __entry->cookie, __entry->usage,
+ __entry->parent, __entry->n_children, __entry->n_active,
+ __entry->flags, __entry->retire)
+ );
+
+TRACE_EVENT(fscache_enable,
+ TP_PROTO(struct fscache_cookie *cookie),
+
+ TP_ARGS(cookie),
+
+ TP_STRUCT__entry(
+ __field(struct fscache_cookie *, cookie )
+ __field(int, usage )
+ __field(int, n_children )
+ __field(int, n_active )
+ __field(u8, flags )
+ ),
+
+ TP_fast_assign(
+ __entry->cookie = cookie;
+ __entry->usage = atomic_read(&cookie->usage);
+ __entry->n_children = atomic_read(&cookie->n_children);
+ __entry->n_active = atomic_read(&cookie->n_active);
+ __entry->flags = cookie->flags;
+ ),
+
+ TP_printk("c=%p u=%d Nc=%d Na=%d f=%02x",
+ __entry->cookie, __entry->usage,
+ __entry->n_children, __entry->n_active, __entry->flags)
+ );
+
+TRACE_EVENT(fscache_disable,
+ TP_PROTO(struct fscache_cookie *cookie),
+
+ TP_ARGS(cookie),
+
+ TP_STRUCT__entry(
+ __field(struct fscache_cookie *, cookie )
+ __field(int, usage )
+ __field(int, n_children )
+ __field(int, n_active )
+ __field(u8, flags )
+ ),
+
+ TP_fast_assign(
+ __entry->cookie = cookie;
+ __entry->usage = atomic_read(&cookie->usage);
+ __entry->n_children = atomic_read(&cookie->n_children);
+ __entry->n_active = atomic_read(&cookie->n_active);
+ __entry->flags = cookie->flags;
+ ),
+
+ TP_printk("c=%p u=%d Nc=%d Na=%d f=%02x",
+ __entry->cookie, __entry->usage,
+ __entry->n_children, __entry->n_active, __entry->flags)
+ );
+
+TRACE_EVENT(fscache_osm,
+ TP_PROTO(struct fscache_object *object,
+ const struct fscache_state *state,
+ bool wait, bool oob, s8 event_num),
+
+ TP_ARGS(object, state, wait, oob, event_num),
+
+ TP_STRUCT__entry(
+ __field(struct fscache_cookie *, cookie )
+ __field(struct fscache_object *, object )
+ __array(char, state, 8 )
+ __field(bool, wait )
+ __field(bool, oob )
+ __field(s8, event_num )
+ ),
+
+ TP_fast_assign(
+ __entry->cookie = object->cookie;
+ __entry->object = object;
+ __entry->wait = wait;
+ __entry->oob = oob;
+ __entry->event_num = event_num;
+ memcpy(__entry->state, state->short_name, 8);
+ ),
+
+ TP_printk("c=%p o=%p %s %s%sev=%d",
+ __entry->cookie,
+ __entry->object,
+ __entry->state,
+ __print_symbolic(__entry->wait,
+ { true, "WAIT" },
+ { false, "WORK" }),
+ __print_symbolic(__entry->oob,
+ { true, " OOB " },
+ { false, " " }),
+ __entry->event_num)
+ );
+
+TRACE_EVENT(fscache_page,
+ TP_PROTO(struct fscache_cookie *cookie, struct page *page,
+ enum fscache_page_trace why),
+
+ TP_ARGS(cookie, page, why),
+
+ TP_STRUCT__entry(
+ __field(struct fscache_cookie *, cookie )
+ __field(pgoff_t, page )
+ __field(enum fscache_page_trace, why )
+ ),
+
+ TP_fast_assign(
+ __entry->cookie = cookie;
+ __entry->page = page->index;
+ __entry->why = why;
+ ),
+
+ TP_printk("c=%p %s pg=%lx",
+ __entry->cookie,
+ __print_symbolic(__entry->why, fscache_page_traces),
+ __entry->page)
+ );
+
+TRACE_EVENT(fscache_check_page,
+ TP_PROTO(struct fscache_cookie *cookie, struct page *page,
+ void *val, int n),
+
+ TP_ARGS(cookie, page, val, n),
+
+ TP_STRUCT__entry(
+ __field(struct fscache_cookie *, cookie )
+ __field(void *, page )
+ __field(void *, val )
+ __field(int, n )
+ ),
+
+ TP_fast_assign(
+ __entry->cookie = cookie;
+ __entry->page = page;
+ __entry->val = val;
+ __entry->n = n;
+ ),
+
+ TP_printk("c=%p pg=%p val=%p n=%d",
+ __entry->cookie, __entry->page, __entry->val, __entry->n)
+ );
+
+TRACE_EVENT(fscache_wake_cookie,
+ TP_PROTO(struct fscache_cookie *cookie),
+
+ TP_ARGS(cookie),
+
+ TP_STRUCT__entry(
+ __field(struct fscache_cookie *, cookie )
+ ),
+
+ TP_fast_assign(
+ __entry->cookie = cookie;
+ ),
+
+ TP_printk("c=%p", __entry->cookie)
+ );
+
+TRACE_EVENT(fscache_op,
+ TP_PROTO(struct fscache_cookie *cookie, struct fscache_operation *op,
+ enum fscache_op_trace why),
+
+ TP_ARGS(cookie, op, why),
+
+ TP_STRUCT__entry(
+ __field(struct fscache_cookie *, cookie )
+ __field(struct fscache_operation *, op )
+ __field(enum fscache_op_trace, why )
+ ),
+
+ TP_fast_assign(
+ __entry->cookie = cookie;
+ __entry->op = op;
+ __entry->why = why;
+ ),
+
+ TP_printk("c=%p op=%p %s",
+ __entry->cookie, __entry->op,
+ __print_symbolic(__entry->why, fscache_op_traces))
+ );
+
+TRACE_EVENT(fscache_page_op,
+ TP_PROTO(struct fscache_cookie *cookie, struct page *page,
+ struct fscache_operation *op, enum fscache_page_op_trace what),
+
+ TP_ARGS(cookie, page, op, what),
+
+ TP_STRUCT__entry(
+ __field(struct fscache_cookie *, cookie )
+ __field(pgoff_t, page )
+ __field(struct fscache_operation *, op )
+ __field(enum fscache_page_op_trace, what )
+ ),
+
+ TP_fast_assign(
+ __entry->cookie = cookie;
+ __entry->page = page ? page->index : 0;
+ __entry->op = op;
+ __entry->what = what;
+ ),
+
+ TP_printk("c=%p %s pg=%lx op=%p",
+ __entry->cookie,
+ __print_symbolic(__entry->what, fscache_page_op_traces),
+ __entry->page, __entry->op)
+ );
+
+TRACE_EVENT(fscache_wrote_page,
+ TP_PROTO(struct fscache_cookie *cookie, struct page *page,
+ struct fscache_operation *op, int ret),
+
+ TP_ARGS(cookie, page, op, ret),
+
+ TP_STRUCT__entry(
+ __field(struct fscache_cookie *, cookie )
+ __field(pgoff_t, page )
+ __field(struct fscache_operation *, op )
+ __field(int, ret )
+ ),
+
+ TP_fast_assign(
+ __entry->cookie = cookie;
+ __entry->page = page->index;
+ __entry->op = op;
+ __entry->ret = ret;
+ ),
+
+ TP_printk("c=%p pg=%lx op=%p ret=%d",
+ __entry->cookie, __entry->page, __entry->op, __entry->ret)
+ );
+
+TRACE_EVENT(fscache_gang_lookup,
+ TP_PROTO(struct fscache_cookie *cookie, struct fscache_operation *op,
+ void **results, int n, pgoff_t store_limit),
+
+ TP_ARGS(cookie, op, results, n, store_limit),
+
+ TP_STRUCT__entry(
+ __field(struct fscache_cookie *, cookie )
+ __field(struct fscache_operation *, op )
+ __field(pgoff_t, results0 )
+ __field(int, n )
+ __field(pgoff_t, store_limit )
+ ),
+
+ TP_fast_assign(
+ __entry->cookie = cookie;
+ __entry->op = op;
+ __entry->results0 = results[0] ? ((struct page *)results[0])->index : (pgoff_t)-1;
+ __entry->n = n;
+ __entry->store_limit = store_limit;
+ ),
+
+ TP_printk("c=%p op=%p r0=%lx n=%d sl=%lx",
+ __entry->cookie, __entry->op, __entry->results0, __entry->n,
+ __entry->store_limit)
+ );
+
+#endif /* _TRACE_FSCACHE_H */
+
+/* This part must be outside protection */
+#include <trace/define_trace.h>
diff --git a/include/trace/events/initcall.h b/include/trace/events/initcall.h
new file mode 100644
index 000000000000..8d6cf10d27c9
--- /dev/null
+++ b/include/trace/events/initcall.h
@@ -0,0 +1,66 @@
+/* SPDX-License-Identifier: GPL-2.0 */
+#undef TRACE_SYSTEM
+#define TRACE_SYSTEM initcall
+
+#if !defined(_TRACE_INITCALL_H) || defined(TRACE_HEADER_MULTI_READ)
+#define _TRACE_INITCALL_H
+
+#include <linux/tracepoint.h>
+
+TRACE_EVENT(initcall_level,
+
+ TP_PROTO(const char *level),
+
+ TP_ARGS(level),
+
+ TP_STRUCT__entry(
+ __string(level, level)
+ ),
+
+ TP_fast_assign(
+ __assign_str(level, level);
+ ),
+
+ TP_printk("level=%s", __get_str(level))
+);
+
+TRACE_EVENT(initcall_start,
+
+ TP_PROTO(initcall_t func),
+
+ TP_ARGS(func),
+
+ TP_STRUCT__entry(
+ __field(initcall_t, func)
+ ),
+
+ TP_fast_assign(
+ __entry->func = func;
+ ),
+
+ TP_printk("func=%pS", __entry->func)
+);
+
+TRACE_EVENT(initcall_finish,
+
+ TP_PROTO(initcall_t func, int ret),
+
+ TP_ARGS(func, ret),
+
+ TP_STRUCT__entry(
+ __field(initcall_t, func)
+ __field(int, ret)
+ ),
+
+ TP_fast_assign(
+ __entry->func = func;
+ __entry->ret = ret;
+ ),
+
+ TP_printk("func=%pS ret=%d", __entry->func, __entry->ret)
+);
+
+#endif /* if !defined(_TRACE_GPIO_H) || defined(TRACE_HEADER_MULTI_READ) */
+
+/* This part must be outside protection */
+#include <trace/define_trace.h>
diff --git a/include/trace/events/migrate.h b/include/trace/events/migrate.h
index bcf4daccd6be..711372845945 100644
--- a/include/trace/events/migrate.h
+++ b/include/trace/events/migrate.h
@@ -20,7 +20,7 @@
EM( MR_SYSCALL, "syscall_or_cpuset") \
EM( MR_MEMPOLICY_MBIND, "mempolicy_mbind") \
EM( MR_NUMA_MISPLACED, "numa_misplaced") \
- EMe(MR_CMA, "cma")
+ EMe(MR_CONTIG_RANGE, "contig_range")
/*
* First define the enums in the above macros to be exported to userspace
diff --git a/include/trace/events/mmc.h b/include/trace/events/mmc.h
index 200f731be557..7b706ff21335 100644
--- a/include/trace/events/mmc.h
+++ b/include/trace/events/mmc.h
@@ -86,8 +86,8 @@ TRACE_EVENT(mmc_request_start,
__entry->stop_flags, __entry->stop_retries,
__entry->sbc_opcode, __entry->sbc_arg,
__entry->sbc_flags, __entry->sbc_retries,
- __entry->blocks, __entry->blk_addr,
- __entry->blksz, __entry->data_flags, __entry->tag,
+ __entry->blocks, __entry->blksz,
+ __entry->blk_addr, __entry->data_flags, __entry->tag,
__entry->can_retune, __entry->doing_retune,
__entry->retune_now, __entry->need_retune,
__entry->hold_retune, __entry->retune_period)
diff --git a/include/trace/events/mmflags.h b/include/trace/events/mmflags.h
index dbe1bb058c09..a81cffb76d89 100644
--- a/include/trace/events/mmflags.h
+++ b/include/trace/events/mmflags.h
@@ -115,7 +115,7 @@ IF_HAVE_PG_IDLE(PG_idle, "idle" )
#define __VM_ARCH_SPECIFIC_1 {VM_PAT, "pat" }
#elif defined(CONFIG_PPC)
#define __VM_ARCH_SPECIFIC_1 {VM_SAO, "sao" }
-#elif defined(CONFIG_PARISC) || defined(CONFIG_METAG) || defined(CONFIG_IA64)
+#elif defined(CONFIG_PARISC) || defined(CONFIG_IA64)
#define __VM_ARCH_SPECIFIC_1 {VM_GROWSUP, "growsup" }
#elif !defined(CONFIG_MMU)
#define __VM_ARCH_SPECIFIC_1 {VM_MAPPED_COPY,"mappedcopy" }
diff --git a/include/trace/events/net_probe_common.h b/include/trace/events/net_probe_common.h
new file mode 100644
index 000000000000..3930119cab08
--- /dev/null
+++ b/include/trace/events/net_probe_common.h
@@ -0,0 +1,44 @@
+/* SPDX-License-Identifier: GPL-2.0 */
+
+#if !defined(_TRACE_NET_PROBE_COMMON_H) || defined(TRACE_HEADER_MULTI_READ)
+#define _TRACE_NET_PROBE_COMMON_H
+
+#define TP_STORE_ADDR_PORTS_V4(__entry, inet, sk) \
+ do { \
+ struct sockaddr_in *v4 = (void *)__entry->saddr; \
+ \
+ v4->sin_family = AF_INET; \
+ v4->sin_port = inet->inet_sport; \
+ v4->sin_addr.s_addr = inet->inet_saddr; \
+ v4 = (void *)__entry->daddr; \
+ v4->sin_family = AF_INET; \
+ v4->sin_port = inet->inet_dport; \
+ v4->sin_addr.s_addr = inet->inet_daddr; \
+ } while (0)
+
+#if IS_ENABLED(CONFIG_IPV6)
+
+#define TP_STORE_ADDR_PORTS(__entry, inet, sk) \
+ do { \
+ if (sk->sk_family == AF_INET6) { \
+ struct sockaddr_in6 *v6 = (void *)__entry->saddr; \
+ \
+ v6->sin6_family = AF_INET6; \
+ v6->sin6_port = inet->inet_sport; \
+ v6->sin6_addr = inet6_sk(sk)->saddr; \
+ v6 = (void *)__entry->daddr; \
+ v6->sin6_family = AF_INET6; \
+ v6->sin6_port = inet->inet_dport; \
+ v6->sin6_addr = sk->sk_v6_daddr; \
+ } else \
+ TP_STORE_ADDR_PORTS_V4(__entry, inet, sk); \
+ } while (0)
+
+#else
+
+#define TP_STORE_ADDR_PORTS(__entry, inet, sk) \
+ TP_STORE_ADDR_PORTS_V4(__entry, inet, sk);
+
+#endif
+
+#endif
diff --git a/include/trace/events/rcu.h b/include/trace/events/rcu.h
index 59d40c454aa0..d8c33298c153 100644
--- a/include/trace/events/rcu.h
+++ b/include/trace/events/rcu.h
@@ -179,6 +179,10 @@ TRACE_EVENT(rcu_grace_period_init,
*
* "snap": Captured snapshot of expedited grace period sequence number.
* "start": Started a real expedited grace period.
+ * "reset": Started resetting the tree
+ * "select": Started selecting the CPUs to wait on.
+ * "selectofl": Selected CPU partially offline.
+ * "startwait": Started waiting on selected CPUs.
* "end": Ended a real expedited grace period.
* "endwake": Woke piggybackers up.
* "done": Someone else did the expedited grace period for us.
@@ -243,6 +247,7 @@ TRACE_EVENT(rcu_exp_funnel_lock,
__entry->grphi, __entry->gpevent)
);
+#ifdef CONFIG_RCU_NOCB_CPU
/*
* Tracepoint for RCU no-CBs CPU callback handoffs. This event is intended
* to assist debugging of these handoffs.
@@ -285,6 +290,7 @@ TRACE_EVENT(rcu_nocb_wake,
TP_printk("%s %d %s", __entry->rcuname, __entry->cpu, __entry->reason)
);
+#endif
/*
* Tracepoint for tasks blocking within preemptible-RCU read-side
@@ -421,76 +427,40 @@ TRACE_EVENT(rcu_fqs,
/*
* Tracepoint for dyntick-idle entry/exit events. These take a string
- * as argument: "Start" for entering dyntick-idle mode, "End" for
- * leaving it, "--=" for events moving towards idle, and "++=" for events
- * moving away from idle. "Error on entry: not idle task" and "Error on
- * exit: not idle task" indicate that a non-idle task is erroneously
- * toying with the idle loop.
+ * as argument: "Start" for entering dyntick-idle mode, "Startirq" for
+ * entering it from irq/NMI, "End" for leaving it, "Endirq" for leaving it
+ * to irq/NMI, "--=" for events moving towards idle, and "++=" for events
+ * moving away from idle.
*
* These events also take a pair of numbers, which indicate the nesting
- * depth before and after the event of interest. Note that task-related
- * events use the upper bits of each number, while interrupt-related
- * events use the lower bits.
+ * depth before and after the event of interest, and a third number that is
+ * the ->dynticks counter. Note that task-related and interrupt-related
+ * events use two separate counters, and that the "++=" and "--=" events
+ * for irq/NMI will change the counter by two, otherwise by one.
*/
TRACE_EVENT(rcu_dyntick,
- TP_PROTO(const char *polarity, long long oldnesting, long long newnesting),
+ TP_PROTO(const char *polarity, long oldnesting, long newnesting, atomic_t dynticks),
- TP_ARGS(polarity, oldnesting, newnesting),
+ TP_ARGS(polarity, oldnesting, newnesting, dynticks),
TP_STRUCT__entry(
__field(const char *, polarity)
- __field(long long, oldnesting)
- __field(long long, newnesting)
+ __field(long, oldnesting)
+ __field(long, newnesting)
+ __field(int, dynticks)
),
TP_fast_assign(
__entry->polarity = polarity;
__entry->oldnesting = oldnesting;
__entry->newnesting = newnesting;
+ __entry->dynticks = atomic_read(&dynticks);
),
- TP_printk("%s %llx %llx", __entry->polarity,
- __entry->oldnesting, __entry->newnesting)
-);
-
-/*
- * Tracepoint for RCU preparation for idle, the goal being to get RCU
- * processing done so that the current CPU can shut off its scheduling
- * clock and enter dyntick-idle mode. One way to accomplish this is
- * to drain all RCU callbacks from this CPU, and the other is to have
- * done everything RCU requires for the current grace period. In this
- * latter case, the CPU will be awakened at the end of the current grace
- * period in order to process the remainder of its callbacks.
- *
- * These tracepoints take a string as argument:
- *
- * "No callbacks": Nothing to do, no callbacks on this CPU.
- * "In holdoff": Nothing to do, holding off after unsuccessful attempt.
- * "Begin holdoff": Attempt failed, don't retry until next jiffy.
- * "Dyntick with callbacks": Entering dyntick-idle despite callbacks.
- * "Dyntick with lazy callbacks": Entering dyntick-idle w/lazy callbacks.
- * "More callbacks": Still more callbacks, try again to clear them out.
- * "Callbacks drained": All callbacks processed, off to dyntick idle!
- * "Timer": Timer fired to cause CPU to continue processing callbacks.
- * "Demigrate": Timer fired on wrong CPU, woke up correct CPU.
- * "Cleanup after idle": Idle exited, timer canceled.
- */
-TRACE_EVENT(rcu_prep_idle,
-
- TP_PROTO(const char *reason),
-
- TP_ARGS(reason),
-
- TP_STRUCT__entry(
- __field(const char *, reason)
- ),
-
- TP_fast_assign(
- __entry->reason = reason;
- ),
-
- TP_printk("%s", __entry->reason)
+ TP_printk("%s %lx %lx %#3x", __entry->polarity,
+ __entry->oldnesting, __entry->newnesting,
+ __entry->dynticks & 0xfff)
);
/*
@@ -799,8 +769,7 @@ TRACE_EVENT(rcu_barrier,
grplo, grphi, gp_tasks) do { } \
while (0)
#define trace_rcu_fqs(rcuname, gpnum, cpu, qsevent) do { } while (0)
-#define trace_rcu_dyntick(polarity, oldnesting, newnesting) do { } while (0)
-#define trace_rcu_prep_idle(reason) do { } while (0)
+#define trace_rcu_dyntick(polarity, oldnesting, newnesting, dyntick) do { } while (0)
#define trace_rcu_callback(rcuname, rhp, qlen_lazy, qlen) do { } while (0)
#define trace_rcu_kfree_callback(rcuname, rhp, offset, qlen_lazy, qlen) \
do { } while (0)
diff --git a/include/trace/events/rdma.h b/include/trace/events/rdma.h
new file mode 100644
index 000000000000..aa19afc73a4e
--- /dev/null
+++ b/include/trace/events/rdma.h
@@ -0,0 +1,129 @@
+/* SPDX-License-Identifier: GPL-2.0 */
+/*
+ * Copyright (c) 2017 Oracle. All rights reserved.
+ */
+
+/*
+ * enum ib_event_type, from include/rdma/ib_verbs.h
+ */
+
+#define IB_EVENT_LIST \
+ ib_event(CQ_ERR) \
+ ib_event(QP_FATAL) \
+ ib_event(QP_REQ_ERR) \
+ ib_event(QP_ACCESS_ERR) \
+ ib_event(COMM_EST) \
+ ib_event(SQ_DRAINED) \
+ ib_event(PATH_MIG) \
+ ib_event(PATH_MIG_ERR) \
+ ib_event(DEVICE_FATAL) \
+ ib_event(PORT_ACTIVE) \
+ ib_event(PORT_ERR) \
+ ib_event(LID_CHANGE) \
+ ib_event(PKEY_CHANGE) \
+ ib_event(SM_CHANGE) \
+ ib_event(SRQ_ERR) \
+ ib_event(SRQ_LIMIT_REACHED) \
+ ib_event(QP_LAST_WQE_REACHED) \
+ ib_event(CLIENT_REREGISTER) \
+ ib_event(GID_CHANGE) \
+ ib_event_end(WQ_FATAL)
+
+#undef ib_event
+#undef ib_event_end
+
+#define ib_event(x) TRACE_DEFINE_ENUM(IB_EVENT_##x);
+#define ib_event_end(x) TRACE_DEFINE_ENUM(IB_EVENT_##x);
+
+IB_EVENT_LIST
+
+#undef ib_event
+#undef ib_event_end
+
+#define ib_event(x) { IB_EVENT_##x, #x },
+#define ib_event_end(x) { IB_EVENT_##x, #x }
+
+#define rdma_show_ib_event(x) \
+ __print_symbolic(x, IB_EVENT_LIST)
+
+/*
+ * enum ib_wc_status type, from include/rdma/ib_verbs.h
+ */
+#define IB_WC_STATUS_LIST \
+ ib_wc_status(SUCCESS) \
+ ib_wc_status(LOC_LEN_ERR) \
+ ib_wc_status(LOC_QP_OP_ERR) \
+ ib_wc_status(LOC_EEC_OP_ERR) \
+ ib_wc_status(LOC_PROT_ERR) \
+ ib_wc_status(WR_FLUSH_ERR) \
+ ib_wc_status(MW_BIND_ERR) \
+ ib_wc_status(BAD_RESP_ERR) \
+ ib_wc_status(LOC_ACCESS_ERR) \
+ ib_wc_status(REM_INV_REQ_ERR) \
+ ib_wc_status(REM_ACCESS_ERR) \
+ ib_wc_status(REM_OP_ERR) \
+ ib_wc_status(RETRY_EXC_ERR) \
+ ib_wc_status(RNR_RETRY_EXC_ERR) \
+ ib_wc_status(LOC_RDD_VIOL_ERR) \
+ ib_wc_status(REM_INV_RD_REQ_ERR) \
+ ib_wc_status(REM_ABORT_ERR) \
+ ib_wc_status(INV_EECN_ERR) \
+ ib_wc_status(INV_EEC_STATE_ERR) \
+ ib_wc_status(FATAL_ERR) \
+ ib_wc_status(RESP_TIMEOUT_ERR) \
+ ib_wc_status_end(GENERAL_ERR)
+
+#undef ib_wc_status
+#undef ib_wc_status_end
+
+#define ib_wc_status(x) TRACE_DEFINE_ENUM(IB_WC_##x);
+#define ib_wc_status_end(x) TRACE_DEFINE_ENUM(IB_WC_##x);
+
+IB_WC_STATUS_LIST
+
+#undef ib_wc_status
+#undef ib_wc_status_end
+
+#define ib_wc_status(x) { IB_WC_##x, #x },
+#define ib_wc_status_end(x) { IB_WC_##x, #x }
+
+#define rdma_show_wc_status(x) \
+ __print_symbolic(x, IB_WC_STATUS_LIST)
+
+/*
+ * enum rdma_cm_event_type, from include/rdma/rdma_cm.h
+ */
+#define RDMA_CM_EVENT_LIST \
+ rdma_cm_event(ADDR_RESOLVED) \
+ rdma_cm_event(ADDR_ERROR) \
+ rdma_cm_event(ROUTE_RESOLVED) \
+ rdma_cm_event(ROUTE_ERROR) \
+ rdma_cm_event(CONNECT_REQUEST) \
+ rdma_cm_event(CONNECT_RESPONSE) \
+ rdma_cm_event(CONNECT_ERROR) \
+ rdma_cm_event(UNREACHABLE) \
+ rdma_cm_event(REJECTED) \
+ rdma_cm_event(ESTABLISHED) \
+ rdma_cm_event(DISCONNECTED) \
+ rdma_cm_event(DEVICE_REMOVAL) \
+ rdma_cm_event(MULTICAST_JOIN) \
+ rdma_cm_event(MULTICAST_ERROR) \
+ rdma_cm_event(ADDR_CHANGE) \
+ rdma_cm_event_end(TIMEWAIT_EXIT)
+
+#undef rdma_cm_event
+#undef rdma_cm_event_end
+
+#define rdma_cm_event(x) TRACE_DEFINE_ENUM(RDMA_CM_EVENT_##x);
+#define rdma_cm_event_end(x) TRACE_DEFINE_ENUM(RDMA_CM_EVENT_##x);
+
+RDMA_CM_EVENT_LIST
+
+#undef rdma_cm_event
+#undef rdma_cm_event_end
+
+#define rdma_cm_event(x) { RDMA_CM_EVENT_##x, #x },
+#define rdma_cm_event_end(x) { RDMA_CM_EVENT_##x, #x }
+
+#define rdma_show_cm_event(x) \
+ __print_symbolic(x, RDMA_CM_EVENT_LIST)
diff --git a/include/trace/events/rpcrdma.h b/include/trace/events/rpcrdma.h
new file mode 100644
index 000000000000..50ed3f8bf534
--- /dev/null
+++ b/include/trace/events/rpcrdma.h
@@ -0,0 +1,890 @@
+/* SPDX-License-Identifier: GPL-2.0 */
+/*
+ * Copyright (c) 2017 Oracle. All rights reserved.
+ */
+#undef TRACE_SYSTEM
+#define TRACE_SYSTEM rpcrdma
+
+#if !defined(_TRACE_RPCRDMA_H) || defined(TRACE_HEADER_MULTI_READ)
+#define _TRACE_RPCRDMA_H
+
+#include <linux/tracepoint.h>
+#include <trace/events/rdma.h>
+
+/**
+ ** Event classes
+ **/
+
+DECLARE_EVENT_CLASS(xprtrdma_reply_event,
+ TP_PROTO(
+ const struct rpcrdma_rep *rep
+ ),
+
+ TP_ARGS(rep),
+
+ TP_STRUCT__entry(
+ __field(const void *, rep)
+ __field(const void *, r_xprt)
+ __field(u32, xid)
+ __field(u32, version)
+ __field(u32, proc)
+ ),
+
+ TP_fast_assign(
+ __entry->rep = rep;
+ __entry->r_xprt = rep->rr_rxprt;
+ __entry->xid = be32_to_cpu(rep->rr_xid);
+ __entry->version = be32_to_cpu(rep->rr_vers);
+ __entry->proc = be32_to_cpu(rep->rr_proc);
+ ),
+
+ TP_printk("rxprt %p xid=0x%08x rep=%p: version %u proc %u",
+ __entry->r_xprt, __entry->xid, __entry->rep,
+ __entry->version, __entry->proc
+ )
+);
+
+#define DEFINE_REPLY_EVENT(name) \
+ DEFINE_EVENT(xprtrdma_reply_event, name, \
+ TP_PROTO( \
+ const struct rpcrdma_rep *rep \
+ ), \
+ TP_ARGS(rep))
+
+DECLARE_EVENT_CLASS(xprtrdma_rxprt,
+ TP_PROTO(
+ const struct rpcrdma_xprt *r_xprt
+ ),
+
+ TP_ARGS(r_xprt),
+
+ TP_STRUCT__entry(
+ __field(const void *, r_xprt)
+ __string(addr, rpcrdma_addrstr(r_xprt))
+ __string(port, rpcrdma_portstr(r_xprt))
+ ),
+
+ TP_fast_assign(
+ __entry->r_xprt = r_xprt;
+ __assign_str(addr, rpcrdma_addrstr(r_xprt));
+ __assign_str(port, rpcrdma_portstr(r_xprt));
+ ),
+
+ TP_printk("peer=[%s]:%s r_xprt=%p",
+ __get_str(addr), __get_str(port), __entry->r_xprt
+ )
+);
+
+#define DEFINE_RXPRT_EVENT(name) \
+ DEFINE_EVENT(xprtrdma_rxprt, name, \
+ TP_PROTO( \
+ const struct rpcrdma_xprt *r_xprt \
+ ), \
+ TP_ARGS(r_xprt))
+
+DECLARE_EVENT_CLASS(xprtrdma_rdch_event,
+ TP_PROTO(
+ const struct rpc_task *task,
+ unsigned int pos,
+ struct rpcrdma_mr *mr,
+ int nsegs
+ ),
+
+ TP_ARGS(task, pos, mr, nsegs),
+
+ TP_STRUCT__entry(
+ __field(unsigned int, task_id)
+ __field(unsigned int, client_id)
+ __field(const void *, mr)
+ __field(unsigned int, pos)
+ __field(int, nents)
+ __field(u32, handle)
+ __field(u32, length)
+ __field(u64, offset)
+ __field(int, nsegs)
+ ),
+
+ TP_fast_assign(
+ __entry->task_id = task->tk_pid;
+ __entry->client_id = task->tk_client->cl_clid;
+ __entry->mr = mr;
+ __entry->pos = pos;
+ __entry->nents = mr->mr_nents;
+ __entry->handle = mr->mr_handle;
+ __entry->length = mr->mr_length;
+ __entry->offset = mr->mr_offset;
+ __entry->nsegs = nsegs;
+ ),
+
+ TP_printk("task:%u@%u mr=%p pos=%u %u@0x%016llx:0x%08x (%s)",
+ __entry->task_id, __entry->client_id, __entry->mr,
+ __entry->pos, __entry->length,
+ (unsigned long long)__entry->offset, __entry->handle,
+ __entry->nents < __entry->nsegs ? "more" : "last"
+ )
+);
+
+#define DEFINE_RDCH_EVENT(name) \
+ DEFINE_EVENT(xprtrdma_rdch_event, name, \
+ TP_PROTO( \
+ const struct rpc_task *task, \
+ unsigned int pos, \
+ struct rpcrdma_mr *mr, \
+ int nsegs \
+ ), \
+ TP_ARGS(task, pos, mr, nsegs))
+
+DECLARE_EVENT_CLASS(xprtrdma_wrch_event,
+ TP_PROTO(
+ const struct rpc_task *task,
+ struct rpcrdma_mr *mr,
+ int nsegs
+ ),
+
+ TP_ARGS(task, mr, nsegs),
+
+ TP_STRUCT__entry(
+ __field(unsigned int, task_id)
+ __field(unsigned int, client_id)
+ __field(const void *, mr)
+ __field(int, nents)
+ __field(u32, handle)
+ __field(u32, length)
+ __field(u64, offset)
+ __field(int, nsegs)
+ ),
+
+ TP_fast_assign(
+ __entry->task_id = task->tk_pid;
+ __entry->client_id = task->tk_client->cl_clid;
+ __entry->mr = mr;
+ __entry->nents = mr->mr_nents;
+ __entry->handle = mr->mr_handle;
+ __entry->length = mr->mr_length;
+ __entry->offset = mr->mr_offset;
+ __entry->nsegs = nsegs;
+ ),
+
+ TP_printk("task:%u@%u mr=%p %u@0x%016llx:0x%08x (%s)",
+ __entry->task_id, __entry->client_id, __entry->mr,
+ __entry->length, (unsigned long long)__entry->offset,
+ __entry->handle,
+ __entry->nents < __entry->nsegs ? "more" : "last"
+ )
+);
+
+#define DEFINE_WRCH_EVENT(name) \
+ DEFINE_EVENT(xprtrdma_wrch_event, name, \
+ TP_PROTO( \
+ const struct rpc_task *task, \
+ struct rpcrdma_mr *mr, \
+ int nsegs \
+ ), \
+ TP_ARGS(task, mr, nsegs))
+
+TRACE_DEFINE_ENUM(FRWR_IS_INVALID);
+TRACE_DEFINE_ENUM(FRWR_IS_VALID);
+TRACE_DEFINE_ENUM(FRWR_FLUSHED_FR);
+TRACE_DEFINE_ENUM(FRWR_FLUSHED_LI);
+
+#define xprtrdma_show_frwr_state(x) \
+ __print_symbolic(x, \
+ { FRWR_IS_INVALID, "INVALID" }, \
+ { FRWR_IS_VALID, "VALID" }, \
+ { FRWR_FLUSHED_FR, "FLUSHED_FR" }, \
+ { FRWR_FLUSHED_LI, "FLUSHED_LI" })
+
+DECLARE_EVENT_CLASS(xprtrdma_frwr_done,
+ TP_PROTO(
+ const struct ib_wc *wc,
+ const struct rpcrdma_frwr *frwr
+ ),
+
+ TP_ARGS(wc, frwr),
+
+ TP_STRUCT__entry(
+ __field(const void *, mr)
+ __field(unsigned int, state)
+ __field(unsigned int, status)
+ __field(unsigned int, vendor_err)
+ ),
+
+ TP_fast_assign(
+ __entry->mr = container_of(frwr, struct rpcrdma_mr, frwr);
+ __entry->state = frwr->fr_state;
+ __entry->status = wc->status;
+ __entry->vendor_err = __entry->status ? wc->vendor_err : 0;
+ ),
+
+ TP_printk(
+ "mr=%p state=%s: %s (%u/0x%x)",
+ __entry->mr, xprtrdma_show_frwr_state(__entry->state),
+ rdma_show_wc_status(__entry->status),
+ __entry->status, __entry->vendor_err
+ )
+);
+
+#define DEFINE_FRWR_DONE_EVENT(name) \
+ DEFINE_EVENT(xprtrdma_frwr_done, name, \
+ TP_PROTO( \
+ const struct ib_wc *wc, \
+ const struct rpcrdma_frwr *frwr \
+ ), \
+ TP_ARGS(wc, frwr))
+
+DECLARE_EVENT_CLASS(xprtrdma_mr,
+ TP_PROTO(
+ const struct rpcrdma_mr *mr
+ ),
+
+ TP_ARGS(mr),
+
+ TP_STRUCT__entry(
+ __field(const void *, mr)
+ __field(u32, handle)
+ __field(u32, length)
+ __field(u64, offset)
+ ),
+
+ TP_fast_assign(
+ __entry->mr = mr;
+ __entry->handle = mr->mr_handle;
+ __entry->length = mr->mr_length;
+ __entry->offset = mr->mr_offset;
+ ),
+
+ TP_printk("mr=%p %u@0x%016llx:0x%08x",
+ __entry->mr, __entry->length,
+ (unsigned long long)__entry->offset,
+ __entry->handle
+ )
+);
+
+#define DEFINE_MR_EVENT(name) \
+ DEFINE_EVENT(xprtrdma_mr, name, \
+ TP_PROTO( \
+ const struct rpcrdma_mr *mr \
+ ), \
+ TP_ARGS(mr))
+
+DECLARE_EVENT_CLASS(xprtrdma_cb_event,
+ TP_PROTO(
+ const struct rpc_rqst *rqst
+ ),
+
+ TP_ARGS(rqst),
+
+ TP_STRUCT__entry(
+ __field(const void *, rqst)
+ __field(const void *, rep)
+ __field(const void *, req)
+ __field(u32, xid)
+ ),
+
+ TP_fast_assign(
+ __entry->rqst = rqst;
+ __entry->req = rpcr_to_rdmar(rqst);
+ __entry->rep = rpcr_to_rdmar(rqst)->rl_reply;
+ __entry->xid = be32_to_cpu(rqst->rq_xid);
+ ),
+
+ TP_printk("xid=0x%08x, rqst=%p req=%p rep=%p",
+ __entry->xid, __entry->rqst, __entry->req, __entry->rep
+ )
+);
+
+#define DEFINE_CB_EVENT(name) \
+ DEFINE_EVENT(xprtrdma_cb_event, name, \
+ TP_PROTO( \
+ const struct rpc_rqst *rqst \
+ ), \
+ TP_ARGS(rqst))
+
+/**
+ ** Connection events
+ **/
+
+TRACE_EVENT(xprtrdma_conn_upcall,
+ TP_PROTO(
+ const struct rpcrdma_xprt *r_xprt,
+ struct rdma_cm_event *event
+ ),
+
+ TP_ARGS(r_xprt, event),
+
+ TP_STRUCT__entry(
+ __field(const void *, r_xprt)
+ __field(unsigned int, event)
+ __field(int, status)
+ __string(addr, rpcrdma_addrstr(r_xprt))
+ __string(port, rpcrdma_portstr(r_xprt))
+ ),
+
+ TP_fast_assign(
+ __entry->r_xprt = r_xprt;
+ __entry->event = event->event;
+ __entry->status = event->status;
+ __assign_str(addr, rpcrdma_addrstr(r_xprt));
+ __assign_str(port, rpcrdma_portstr(r_xprt));
+ ),
+
+ TP_printk("peer=[%s]:%s r_xprt=%p: %s (%u/%d)",
+ __get_str(addr), __get_str(port),
+ __entry->r_xprt, rdma_show_cm_event(__entry->event),
+ __entry->event, __entry->status
+ )
+);
+
+TRACE_EVENT(xprtrdma_disconnect,
+ TP_PROTO(
+ const struct rpcrdma_xprt *r_xprt,
+ int status
+ ),
+
+ TP_ARGS(r_xprt, status),
+
+ TP_STRUCT__entry(
+ __field(const void *, r_xprt)
+ __field(int, status)
+ __field(int, connected)
+ __string(addr, rpcrdma_addrstr(r_xprt))
+ __string(port, rpcrdma_portstr(r_xprt))
+ ),
+
+ TP_fast_assign(
+ __entry->r_xprt = r_xprt;
+ __entry->status = status;
+ __entry->connected = r_xprt->rx_ep.rep_connected;
+ __assign_str(addr, rpcrdma_addrstr(r_xprt));
+ __assign_str(port, rpcrdma_portstr(r_xprt));
+ ),
+
+ TP_printk("peer=[%s]:%s r_xprt=%p: status=%d %sconnected",
+ __get_str(addr), __get_str(port),
+ __entry->r_xprt, __entry->status,
+ __entry->connected == 1 ? "still " : "dis"
+ )
+);
+
+DEFINE_RXPRT_EVENT(xprtrdma_conn_start);
+DEFINE_RXPRT_EVENT(xprtrdma_conn_tout);
+DEFINE_RXPRT_EVENT(xprtrdma_create);
+DEFINE_RXPRT_EVENT(xprtrdma_destroy);
+DEFINE_RXPRT_EVENT(xprtrdma_remove);
+DEFINE_RXPRT_EVENT(xprtrdma_reinsert);
+DEFINE_RXPRT_EVENT(xprtrdma_reconnect);
+DEFINE_RXPRT_EVENT(xprtrdma_inject_dsc);
+
+TRACE_EVENT(xprtrdma_qp_error,
+ TP_PROTO(
+ const struct rpcrdma_xprt *r_xprt,
+ const struct ib_event *event
+ ),
+
+ TP_ARGS(r_xprt, event),
+
+ TP_STRUCT__entry(
+ __field(const void *, r_xprt)
+ __field(unsigned int, event)
+ __string(name, event->device->name)
+ __string(addr, rpcrdma_addrstr(r_xprt))
+ __string(port, rpcrdma_portstr(r_xprt))
+ ),
+
+ TP_fast_assign(
+ __entry->r_xprt = r_xprt;
+ __entry->event = event->event;
+ __assign_str(name, event->device->name);
+ __assign_str(addr, rpcrdma_addrstr(r_xprt));
+ __assign_str(port, rpcrdma_portstr(r_xprt));
+ ),
+
+ TP_printk("peer=[%s]:%s r_xprt=%p: dev %s: %s (%u)",
+ __get_str(addr), __get_str(port), __entry->r_xprt,
+ __get_str(name), rdma_show_ib_event(__entry->event),
+ __entry->event
+ )
+);
+
+/**
+ ** Call events
+ **/
+
+TRACE_EVENT(xprtrdma_createmrs,
+ TP_PROTO(
+ const struct rpcrdma_xprt *r_xprt,
+ unsigned int count
+ ),
+
+ TP_ARGS(r_xprt, count),
+
+ TP_STRUCT__entry(
+ __field(const void *, r_xprt)
+ __field(unsigned int, count)
+ ),
+
+ TP_fast_assign(
+ __entry->r_xprt = r_xprt;
+ __entry->count = count;
+ ),
+
+ TP_printk("r_xprt=%p: created %u MRs",
+ __entry->r_xprt, __entry->count
+ )
+);
+
+DEFINE_RXPRT_EVENT(xprtrdma_nomrs);
+
+DEFINE_RDCH_EVENT(xprtrdma_read_chunk);
+DEFINE_WRCH_EVENT(xprtrdma_write_chunk);
+DEFINE_WRCH_EVENT(xprtrdma_reply_chunk);
+
+TRACE_DEFINE_ENUM(rpcrdma_noch);
+TRACE_DEFINE_ENUM(rpcrdma_readch);
+TRACE_DEFINE_ENUM(rpcrdma_areadch);
+TRACE_DEFINE_ENUM(rpcrdma_writech);
+TRACE_DEFINE_ENUM(rpcrdma_replych);
+
+#define xprtrdma_show_chunktype(x) \
+ __print_symbolic(x, \
+ { rpcrdma_noch, "inline" }, \
+ { rpcrdma_readch, "read list" }, \
+ { rpcrdma_areadch, "*read list" }, \
+ { rpcrdma_writech, "write list" }, \
+ { rpcrdma_replych, "reply chunk" })
+
+TRACE_EVENT(xprtrdma_marshal,
+ TP_PROTO(
+ const struct rpc_rqst *rqst,
+ unsigned int hdrlen,
+ unsigned int rtype,
+ unsigned int wtype
+ ),
+
+ TP_ARGS(rqst, hdrlen, rtype, wtype),
+
+ TP_STRUCT__entry(
+ __field(unsigned int, task_id)
+ __field(unsigned int, client_id)
+ __field(u32, xid)
+ __field(unsigned int, hdrlen)
+ __field(unsigned int, headlen)
+ __field(unsigned int, pagelen)
+ __field(unsigned int, taillen)
+ __field(unsigned int, rtype)
+ __field(unsigned int, wtype)
+ ),
+
+ TP_fast_assign(
+ __entry->task_id = rqst->rq_task->tk_pid;
+ __entry->client_id = rqst->rq_task->tk_client->cl_clid;
+ __entry->xid = be32_to_cpu(rqst->rq_xid);
+ __entry->hdrlen = hdrlen;
+ __entry->headlen = rqst->rq_snd_buf.head[0].iov_len;
+ __entry->pagelen = rqst->rq_snd_buf.page_len;
+ __entry->taillen = rqst->rq_snd_buf.tail[0].iov_len;
+ __entry->rtype = rtype;
+ __entry->wtype = wtype;
+ ),
+
+ TP_printk("task:%u@%u xid=0x%08x: hdr=%u xdr=%u/%u/%u %s/%s",
+ __entry->task_id, __entry->client_id, __entry->xid,
+ __entry->hdrlen,
+ __entry->headlen, __entry->pagelen, __entry->taillen,
+ xprtrdma_show_chunktype(__entry->rtype),
+ xprtrdma_show_chunktype(__entry->wtype)
+ )
+);
+
+TRACE_EVENT(xprtrdma_post_send,
+ TP_PROTO(
+ const struct rpcrdma_req *req,
+ int status
+ ),
+
+ TP_ARGS(req, status),
+
+ TP_STRUCT__entry(
+ __field(const void *, req)
+ __field(int, num_sge)
+ __field(bool, signaled)
+ __field(int, status)
+ ),
+
+ TP_fast_assign(
+ __entry->req = req;
+ __entry->num_sge = req->rl_sendctx->sc_wr.num_sge;
+ __entry->signaled = req->rl_sendctx->sc_wr.send_flags &
+ IB_SEND_SIGNALED;
+ __entry->status = status;
+ ),
+
+ TP_printk("req=%p, %d SGEs%s, status=%d",
+ __entry->req, __entry->num_sge,
+ (__entry->signaled ? ", signaled" : ""),
+ __entry->status
+ )
+);
+
+TRACE_EVENT(xprtrdma_post_recv,
+ TP_PROTO(
+ const struct rpcrdma_rep *rep,
+ int status
+ ),
+
+ TP_ARGS(rep, status),
+
+ TP_STRUCT__entry(
+ __field(const void *, rep)
+ __field(int, status)
+ ),
+
+ TP_fast_assign(
+ __entry->rep = rep;
+ __entry->status = status;
+ ),
+
+ TP_printk("rep=%p status=%d",
+ __entry->rep, __entry->status
+ )
+);
+
+/**
+ ** Completion events
+ **/
+
+TRACE_EVENT(xprtrdma_wc_send,
+ TP_PROTO(
+ const struct rpcrdma_sendctx *sc,
+ const struct ib_wc *wc
+ ),
+
+ TP_ARGS(sc, wc),
+
+ TP_STRUCT__entry(
+ __field(const void *, req)
+ __field(unsigned int, unmap_count)
+ __field(unsigned int, status)
+ __field(unsigned int, vendor_err)
+ ),
+
+ TP_fast_assign(
+ __entry->req = sc->sc_req;
+ __entry->unmap_count = sc->sc_unmap_count;
+ __entry->status = wc->status;
+ __entry->vendor_err = __entry->status ? wc->vendor_err : 0;
+ ),
+
+ TP_printk("req=%p, unmapped %u pages: %s (%u/0x%x)",
+ __entry->req, __entry->unmap_count,
+ rdma_show_wc_status(__entry->status),
+ __entry->status, __entry->vendor_err
+ )
+);
+
+TRACE_EVENT(xprtrdma_wc_receive,
+ TP_PROTO(
+ const struct rpcrdma_rep *rep,
+ const struct ib_wc *wc
+ ),
+
+ TP_ARGS(rep, wc),
+
+ TP_STRUCT__entry(
+ __field(const void *, rep)
+ __field(unsigned int, byte_len)
+ __field(unsigned int, status)
+ __field(unsigned int, vendor_err)
+ ),
+
+ TP_fast_assign(
+ __entry->rep = rep;
+ __entry->byte_len = wc->byte_len;
+ __entry->status = wc->status;
+ __entry->vendor_err = __entry->status ? wc->vendor_err : 0;
+ ),
+
+ TP_printk("rep=%p, %u bytes: %s (%u/0x%x)",
+ __entry->rep, __entry->byte_len,
+ rdma_show_wc_status(__entry->status),
+ __entry->status, __entry->vendor_err
+ )
+);
+
+DEFINE_FRWR_DONE_EVENT(xprtrdma_wc_fastreg);
+DEFINE_FRWR_DONE_EVENT(xprtrdma_wc_li);
+DEFINE_FRWR_DONE_EVENT(xprtrdma_wc_li_wake);
+
+DEFINE_MR_EVENT(xprtrdma_localinv);
+DEFINE_MR_EVENT(xprtrdma_dma_unmap);
+DEFINE_MR_EVENT(xprtrdma_remoteinv);
+DEFINE_MR_EVENT(xprtrdma_recover_mr);
+
+/**
+ ** Reply events
+ **/
+
+TRACE_EVENT(xprtrdma_reply,
+ TP_PROTO(
+ const struct rpc_task *task,
+ const struct rpcrdma_rep *rep,
+ const struct rpcrdma_req *req,
+ unsigned int credits
+ ),
+
+ TP_ARGS(task, rep, req, credits),
+
+ TP_STRUCT__entry(
+ __field(unsigned int, task_id)
+ __field(unsigned int, client_id)
+ __field(const void *, rep)
+ __field(const void *, req)
+ __field(u32, xid)
+ __field(unsigned int, credits)
+ ),
+
+ TP_fast_assign(
+ __entry->task_id = task->tk_pid;
+ __entry->client_id = task->tk_client->cl_clid;
+ __entry->rep = rep;
+ __entry->req = req;
+ __entry->xid = be32_to_cpu(rep->rr_xid);
+ __entry->credits = credits;
+ ),
+
+ TP_printk("task:%u@%u xid=0x%08x, %u credits, rep=%p -> req=%p",
+ __entry->task_id, __entry->client_id, __entry->xid,
+ __entry->credits, __entry->rep, __entry->req
+ )
+);
+
+TRACE_EVENT(xprtrdma_defer_cmp,
+ TP_PROTO(
+ const struct rpcrdma_rep *rep
+ ),
+
+ TP_ARGS(rep),
+
+ TP_STRUCT__entry(
+ __field(unsigned int, task_id)
+ __field(unsigned int, client_id)
+ __field(const void *, rep)
+ __field(u32, xid)
+ ),
+
+ TP_fast_assign(
+ __entry->task_id = rep->rr_rqst->rq_task->tk_pid;
+ __entry->client_id = rep->rr_rqst->rq_task->tk_client->cl_clid;
+ __entry->rep = rep;
+ __entry->xid = be32_to_cpu(rep->rr_xid);
+ ),
+
+ TP_printk("task:%u@%u xid=0x%08x rep=%p",
+ __entry->task_id, __entry->client_id, __entry->xid,
+ __entry->rep
+ )
+);
+
+DEFINE_REPLY_EVENT(xprtrdma_reply_vers);
+DEFINE_REPLY_EVENT(xprtrdma_reply_rqst);
+DEFINE_REPLY_EVENT(xprtrdma_reply_short);
+DEFINE_REPLY_EVENT(xprtrdma_reply_hdr);
+
+TRACE_EVENT(xprtrdma_fixup,
+ TP_PROTO(
+ const struct rpc_rqst *rqst,
+ int len,
+ int hdrlen
+ ),
+
+ TP_ARGS(rqst, len, hdrlen),
+
+ TP_STRUCT__entry(
+ __field(unsigned int, task_id)
+ __field(unsigned int, client_id)
+ __field(const void *, base)
+ __field(int, len)
+ __field(int, hdrlen)
+ ),
+
+ TP_fast_assign(
+ __entry->task_id = rqst->rq_task->tk_pid;
+ __entry->client_id = rqst->rq_task->tk_client->cl_clid;
+ __entry->base = rqst->rq_rcv_buf.head[0].iov_base;
+ __entry->len = len;
+ __entry->hdrlen = hdrlen;
+ ),
+
+ TP_printk("task:%u@%u base=%p len=%d hdrlen=%d",
+ __entry->task_id, __entry->client_id,
+ __entry->base, __entry->len, __entry->hdrlen
+ )
+);
+
+TRACE_EVENT(xprtrdma_fixup_pg,
+ TP_PROTO(
+ const struct rpc_rqst *rqst,
+ int pageno,
+ const void *pos,
+ int len,
+ int curlen
+ ),
+
+ TP_ARGS(rqst, pageno, pos, len, curlen),
+
+ TP_STRUCT__entry(
+ __field(unsigned int, task_id)
+ __field(unsigned int, client_id)
+ __field(const void *, pos)
+ __field(int, pageno)
+ __field(int, len)
+ __field(int, curlen)
+ ),
+
+ TP_fast_assign(
+ __entry->task_id = rqst->rq_task->tk_pid;
+ __entry->client_id = rqst->rq_task->tk_client->cl_clid;
+ __entry->pos = pos;
+ __entry->pageno = pageno;
+ __entry->len = len;
+ __entry->curlen = curlen;
+ ),
+
+ TP_printk("task:%u@%u pageno=%d pos=%p len=%d curlen=%d",
+ __entry->task_id, __entry->client_id,
+ __entry->pageno, __entry->pos, __entry->len, __entry->curlen
+ )
+);
+
+TRACE_EVENT(xprtrdma_decode_seg,
+ TP_PROTO(
+ u32 handle,
+ u32 length,
+ u64 offset
+ ),
+
+ TP_ARGS(handle, length, offset),
+
+ TP_STRUCT__entry(
+ __field(u32, handle)
+ __field(u32, length)
+ __field(u64, offset)
+ ),
+
+ TP_fast_assign(
+ __entry->handle = handle;
+ __entry->length = length;
+ __entry->offset = offset;
+ ),
+
+ TP_printk("%u@0x%016llx:0x%08x",
+ __entry->length, (unsigned long long)__entry->offset,
+ __entry->handle
+ )
+);
+
+/**
+ ** Allocation/release of rpcrdma_reqs and rpcrdma_reps
+ **/
+
+TRACE_EVENT(xprtrdma_allocate,
+ TP_PROTO(
+ const struct rpc_task *task,
+ const struct rpcrdma_req *req
+ ),
+
+ TP_ARGS(task, req),
+
+ TP_STRUCT__entry(
+ __field(unsigned int, task_id)
+ __field(unsigned int, client_id)
+ __field(const void *, req)
+ __field(const void *, rep)
+ __field(size_t, callsize)
+ __field(size_t, rcvsize)
+ ),
+
+ TP_fast_assign(
+ __entry->task_id = task->tk_pid;
+ __entry->client_id = task->tk_client->cl_clid;
+ __entry->req = req;
+ __entry->rep = req ? req->rl_reply : NULL;
+ __entry->callsize = task->tk_rqstp->rq_callsize;
+ __entry->rcvsize = task->tk_rqstp->rq_rcvsize;
+ ),
+
+ TP_printk("task:%u@%u req=%p rep=%p (%zu, %zu)",
+ __entry->task_id, __entry->client_id,
+ __entry->req, __entry->rep,
+ __entry->callsize, __entry->rcvsize
+ )
+);
+
+TRACE_EVENT(xprtrdma_rpc_done,
+ TP_PROTO(
+ const struct rpc_task *task,
+ const struct rpcrdma_req *req
+ ),
+
+ TP_ARGS(task, req),
+
+ TP_STRUCT__entry(
+ __field(unsigned int, task_id)
+ __field(unsigned int, client_id)
+ __field(const void *, req)
+ __field(const void *, rep)
+ ),
+
+ TP_fast_assign(
+ __entry->task_id = task->tk_pid;
+ __entry->client_id = task->tk_client->cl_clid;
+ __entry->req = req;
+ __entry->rep = req->rl_reply;
+ ),
+
+ TP_printk("task:%u@%u req=%p rep=%p",
+ __entry->task_id, __entry->client_id,
+ __entry->req, __entry->rep
+ )
+);
+
+DEFINE_RXPRT_EVENT(xprtrdma_noreps);
+
+/**
+ ** Callback events
+ **/
+
+TRACE_EVENT(xprtrdma_cb_setup,
+ TP_PROTO(
+ const struct rpcrdma_xprt *r_xprt,
+ unsigned int reqs
+ ),
+
+ TP_ARGS(r_xprt, reqs),
+
+ TP_STRUCT__entry(
+ __field(const void *, r_xprt)
+ __field(unsigned int, reqs)
+ __string(addr, rpcrdma_addrstr(r_xprt))
+ __string(port, rpcrdma_portstr(r_xprt))
+ ),
+
+ TP_fast_assign(
+ __entry->r_xprt = r_xprt;
+ __entry->reqs = reqs;
+ __assign_str(addr, rpcrdma_addrstr(r_xprt));
+ __assign_str(port, rpcrdma_portstr(r_xprt));
+ ),
+
+ TP_printk("peer=[%s]:%s r_xprt=%p: %u reqs",
+ __get_str(addr), __get_str(port),
+ __entry->r_xprt, __entry->reqs
+ )
+);
+
+DEFINE_CB_EVENT(xprtrdma_cb_call);
+DEFINE_CB_EVENT(xprtrdma_cb_reply);
+
+#endif /* _TRACE_RPCRDMA_H */
+
+#include <trace/define_trace.h>
diff --git a/include/trace/events/rtc.h b/include/trace/events/rtc.h
new file mode 100644
index 000000000000..621333f1c890
--- /dev/null
+++ b/include/trace/events/rtc.h
@@ -0,0 +1,206 @@
+#undef TRACE_SYSTEM
+#define TRACE_SYSTEM rtc
+
+#if !defined(_TRACE_RTC_H) || defined(TRACE_HEADER_MULTI_READ)
+#define _TRACE_RTC_H
+
+#include <linux/rtc.h>
+#include <linux/tracepoint.h>
+
+DECLARE_EVENT_CLASS(rtc_time_alarm_class,
+
+ TP_PROTO(time64_t secs, int err),
+
+ TP_ARGS(secs, err),
+
+ TP_STRUCT__entry(
+ __field(time64_t, secs)
+ __field(int, err)
+ ),
+
+ TP_fast_assign(
+ __entry->secs = secs;
+ __entry->err = err;
+ ),
+
+ TP_printk("UTC (%lld) (%d)",
+ __entry->secs, __entry->err
+ )
+);
+
+DEFINE_EVENT(rtc_time_alarm_class, rtc_set_time,
+
+ TP_PROTO(time64_t secs, int err),
+
+ TP_ARGS(secs, err)
+);
+
+DEFINE_EVENT(rtc_time_alarm_class, rtc_read_time,
+
+ TP_PROTO(time64_t secs, int err),
+
+ TP_ARGS(secs, err)
+);
+
+DEFINE_EVENT(rtc_time_alarm_class, rtc_set_alarm,
+
+ TP_PROTO(time64_t secs, int err),
+
+ TP_ARGS(secs, err)
+);
+
+DEFINE_EVENT(rtc_time_alarm_class, rtc_read_alarm,
+
+ TP_PROTO(time64_t secs, int err),
+
+ TP_ARGS(secs, err)
+);
+
+TRACE_EVENT(rtc_irq_set_freq,
+
+ TP_PROTO(int freq, int err),
+
+ TP_ARGS(freq, err),
+
+ TP_STRUCT__entry(
+ __field(int, freq)
+ __field(int, err)
+ ),
+
+ TP_fast_assign(
+ __entry->freq = freq;
+ __entry->err = err;
+ ),
+
+ TP_printk("set RTC periodic IRQ frequency:%u (%d)",
+ __entry->freq, __entry->err
+ )
+);
+
+TRACE_EVENT(rtc_irq_set_state,
+
+ TP_PROTO(int enabled, int err),
+
+ TP_ARGS(enabled, err),
+
+ TP_STRUCT__entry(
+ __field(int, enabled)
+ __field(int, err)
+ ),
+
+ TP_fast_assign(
+ __entry->enabled = enabled;
+ __entry->err = err;
+ ),
+
+ TP_printk("%s RTC 2^N Hz periodic IRQs (%d)",
+ __entry->enabled ? "enable" : "disable",
+ __entry->err
+ )
+);
+
+TRACE_EVENT(rtc_alarm_irq_enable,
+
+ TP_PROTO(unsigned int enabled, int err),
+
+ TP_ARGS(enabled, err),
+
+ TP_STRUCT__entry(
+ __field(unsigned int, enabled)
+ __field(int, err)
+ ),
+
+ TP_fast_assign(
+ __entry->enabled = enabled;
+ __entry->err = err;
+ ),
+
+ TP_printk("%s RTC alarm IRQ (%d)",
+ __entry->enabled ? "enable" : "disable",
+ __entry->err
+ )
+);
+
+DECLARE_EVENT_CLASS(rtc_offset_class,
+
+ TP_PROTO(long offset, int err),
+
+ TP_ARGS(offset, err),
+
+ TP_STRUCT__entry(
+ __field(long, offset)
+ __field(int, err)
+ ),
+
+ TP_fast_assign(
+ __entry->offset = offset;
+ __entry->err = err;
+ ),
+
+ TP_printk("RTC offset: %ld (%d)",
+ __entry->offset, __entry->err
+ )
+);
+
+DEFINE_EVENT(rtc_offset_class, rtc_set_offset,
+
+ TP_PROTO(long offset, int err),
+
+ TP_ARGS(offset, err)
+);
+
+DEFINE_EVENT(rtc_offset_class, rtc_read_offset,
+
+ TP_PROTO(long offset, int err),
+
+ TP_ARGS(offset, err)
+);
+
+DECLARE_EVENT_CLASS(rtc_timer_class,
+
+ TP_PROTO(struct rtc_timer *timer),
+
+ TP_ARGS(timer),
+
+ TP_STRUCT__entry(
+ __field(struct rtc_timer *, timer)
+ __field(ktime_t, expires)
+ __field(ktime_t, period)
+ ),
+
+ TP_fast_assign(
+ __entry->timer = timer;
+ __entry->expires = timer->node.expires;
+ __entry->period = timer->period;
+ ),
+
+ TP_printk("RTC timer:(%p) expires:%lld period:%lld",
+ __entry->timer, __entry->expires, __entry->period
+ )
+);
+
+DEFINE_EVENT(rtc_timer_class, rtc_timer_enqueue,
+
+ TP_PROTO(struct rtc_timer *timer),
+
+ TP_ARGS(timer)
+);
+
+DEFINE_EVENT(rtc_timer_class, rtc_timer_dequeue,
+
+ TP_PROTO(struct rtc_timer *timer),
+
+ TP_ARGS(timer)
+);
+
+DEFINE_EVENT(rtc_timer_class, rtc_timer_fired,
+
+ TP_PROTO(struct rtc_timer *timer),
+
+ TP_ARGS(timer)
+);
+
+#endif /* _TRACE_RTC_H */
+
+/* This part must be outside protection */
+#include <trace/define_trace.h>
diff --git a/include/trace/events/rxrpc.h b/include/trace/events/rxrpc.h
index 36cb50c111a6..9e96c2fe2793 100644
--- a/include/trace/events/rxrpc.h
+++ b/include/trace/events/rxrpc.h
@@ -42,6 +42,22 @@ enum rxrpc_skb_trace {
rxrpc_skb_tx_seen,
};
+enum rxrpc_local_trace {
+ rxrpc_local_got,
+ rxrpc_local_new,
+ rxrpc_local_processing,
+ rxrpc_local_put,
+ rxrpc_local_queued,
+};
+
+enum rxrpc_peer_trace {
+ rxrpc_peer_got,
+ rxrpc_peer_new,
+ rxrpc_peer_processing,
+ rxrpc_peer_put,
+ rxrpc_peer_queued_error,
+};
+
enum rxrpc_conn_trace {
rxrpc_conn_got,
rxrpc_conn_new_client,
@@ -215,6 +231,20 @@ enum rxrpc_congest_change {
EM(rxrpc_skb_tx_rotated, "Tx ROT") \
E_(rxrpc_skb_tx_seen, "Tx SEE")
+#define rxrpc_local_traces \
+ EM(rxrpc_local_got, "GOT") \
+ EM(rxrpc_local_new, "NEW") \
+ EM(rxrpc_local_processing, "PRO") \
+ EM(rxrpc_local_put, "PUT") \
+ E_(rxrpc_local_queued, "QUE")
+
+#define rxrpc_peer_traces \
+ EM(rxrpc_peer_got, "GOT") \
+ EM(rxrpc_peer_new, "NEW") \
+ EM(rxrpc_peer_processing, "PRO") \
+ EM(rxrpc_peer_put, "PUT") \
+ E_(rxrpc_peer_queued_error, "QER")
+
#define rxrpc_conn_traces \
EM(rxrpc_conn_got, "GOT") \
EM(rxrpc_conn_new_client, "NWc") \
@@ -400,6 +430,13 @@ enum rxrpc_congest_change {
EM(RXRPC_ACK_IDLE, "IDL") \
E_(RXRPC_ACK__INVALID, "-?-")
+#define rxrpc_completions \
+ EM(RXRPC_CALL_SUCCEEDED, "Succeeded") \
+ EM(RXRPC_CALL_REMOTELY_ABORTED, "RemoteAbort") \
+ EM(RXRPC_CALL_LOCALLY_ABORTED, "LocalAbort") \
+ EM(RXRPC_CALL_LOCAL_ERROR, "LocalError") \
+ E_(RXRPC_CALL_NETWORK_ERROR, "NetError")
+
/*
* Export enum symbols via userspace.
*/
@@ -409,6 +446,7 @@ enum rxrpc_congest_change {
#define E_(a, b) TRACE_DEFINE_ENUM(a);
rxrpc_skb_traces;
+rxrpc_local_traces;
rxrpc_conn_traces;
rxrpc_client_traces;
rxrpc_call_traces;
@@ -420,6 +458,7 @@ rxrpc_rtt_rx_traces;
rxrpc_timer_traces;
rxrpc_propose_ack_traces;
rxrpc_propose_ack_outcomes;
+rxrpc_congest_modes;
rxrpc_congest_changes;
/*
@@ -431,6 +470,60 @@ rxrpc_congest_changes;
#define EM(a, b) { a, b },
#define E_(a, b) { a, b }
+TRACE_EVENT(rxrpc_local,
+ TP_PROTO(struct rxrpc_local *local, enum rxrpc_local_trace op,
+ int usage, const void *where),
+
+ TP_ARGS(local, op, usage, where),
+
+ TP_STRUCT__entry(
+ __field(unsigned int, local )
+ __field(int, op )
+ __field(int, usage )
+ __field(const void *, where )
+ ),
+
+ TP_fast_assign(
+ __entry->local = local->debug_id;
+ __entry->op = op;
+ __entry->usage = usage;
+ __entry->where = where;
+ ),
+
+ TP_printk("L=%08x %s u=%d sp=%pSR",
+ __entry->local,
+ __print_symbolic(__entry->op, rxrpc_local_traces),
+ __entry->usage,
+ __entry->where)
+ );
+
+TRACE_EVENT(rxrpc_peer,
+ TP_PROTO(struct rxrpc_peer *peer, enum rxrpc_peer_trace op,
+ int usage, const void *where),
+
+ TP_ARGS(peer, op, usage, where),
+
+ TP_STRUCT__entry(
+ __field(unsigned int, peer )
+ __field(int, op )
+ __field(int, usage )
+ __field(const void *, where )
+ ),
+
+ TP_fast_assign(
+ __entry->peer = peer->debug_id;
+ __entry->op = op;
+ __entry->usage = usage;
+ __entry->where = where;
+ ),
+
+ TP_printk("P=%08x %s u=%d sp=%pSR",
+ __entry->peer,
+ __print_symbolic(__entry->op, rxrpc_peer_traces),
+ __entry->usage,
+ __entry->where)
+ );
+
TRACE_EVENT(rxrpc_conn,
TP_PROTO(struct rxrpc_connection *conn, enum rxrpc_conn_trace op,
int usage, const void *where),
@@ -438,20 +531,20 @@ TRACE_EVENT(rxrpc_conn,
TP_ARGS(conn, op, usage, where),
TP_STRUCT__entry(
- __field(struct rxrpc_connection *, conn )
- __field(int, op )
- __field(int, usage )
- __field(const void *, where )
+ __field(unsigned int, conn )
+ __field(int, op )
+ __field(int, usage )
+ __field(const void *, where )
),
TP_fast_assign(
- __entry->conn = conn;
+ __entry->conn = conn->debug_id;
__entry->op = op;
__entry->usage = usage;
__entry->where = where;
),
- TP_printk("C=%p %s u=%d sp=%pSR",
+ TP_printk("C=%08x %s u=%d sp=%pSR",
__entry->conn,
__print_symbolic(__entry->op, rxrpc_conn_traces),
__entry->usage,
@@ -465,7 +558,7 @@ TRACE_EVENT(rxrpc_client,
TP_ARGS(conn, channel, op),
TP_STRUCT__entry(
- __field(struct rxrpc_connection *, conn )
+ __field(unsigned int, conn )
__field(u32, cid )
__field(int, channel )
__field(int, usage )
@@ -474,7 +567,7 @@ TRACE_EVENT(rxrpc_client,
),
TP_fast_assign(
- __entry->conn = conn;
+ __entry->conn = conn->debug_id;
__entry->channel = channel;
__entry->usage = atomic_read(&conn->usage);
__entry->op = op;
@@ -482,7 +575,7 @@ TRACE_EVENT(rxrpc_client,
__entry->cs = conn->cache_state;
),
- TP_printk("C=%p h=%2d %s %s i=%08x u=%d",
+ TP_printk("C=%08x h=%2d %s %s i=%08x u=%d",
__entry->conn,
__entry->channel,
__print_symbolic(__entry->op, rxrpc_client_traces),
@@ -498,7 +591,7 @@ TRACE_EVENT(rxrpc_call,
TP_ARGS(call, op, usage, where, aux),
TP_STRUCT__entry(
- __field(struct rxrpc_call *, call )
+ __field(unsigned int, call )
__field(int, op )
__field(int, usage )
__field(const void *, where )
@@ -506,14 +599,14 @@ TRACE_EVENT(rxrpc_call,
),
TP_fast_assign(
- __entry->call = call;
+ __entry->call = call->debug_id;
__entry->op = op;
__entry->usage = usage;
__entry->where = where;
__entry->aux = aux;
),
- TP_printk("c=%p %s u=%d sp=%pSR a=%p",
+ TP_printk("c=%08x %s u=%d sp=%pSR a=%p",
__entry->call,
__print_symbolic(__entry->op, rxrpc_call_traces),
__entry->usage,
@@ -592,12 +685,13 @@ TRACE_EVENT(rxrpc_rx_done,
);
TRACE_EVENT(rxrpc_abort,
- TP_PROTO(const char *why, u32 cid, u32 call_id, rxrpc_seq_t seq,
- int abort_code, int error),
+ TP_PROTO(unsigned int call_nr, const char *why, u32 cid, u32 call_id,
+ rxrpc_seq_t seq, int abort_code, int error),
- TP_ARGS(why, cid, call_id, seq, abort_code, error),
+ TP_ARGS(call_nr, why, cid, call_id, seq, abort_code, error),
TP_STRUCT__entry(
+ __field(unsigned int, call_nr )
__array(char, why, 4 )
__field(u32, cid )
__field(u32, call_id )
@@ -608,6 +702,7 @@ TRACE_EVENT(rxrpc_abort,
TP_fast_assign(
memcpy(__entry->why, why, 4);
+ __entry->call_nr = call_nr;
__entry->cid = cid;
__entry->call_id = call_id;
__entry->abort_code = abort_code;
@@ -615,18 +710,45 @@ TRACE_EVENT(rxrpc_abort,
__entry->seq = seq;
),
- TP_printk("%08x:%08x s=%u a=%d e=%d %s",
+ TP_printk("c=%08x %08x:%08x s=%u a=%d e=%d %s",
+ __entry->call_nr,
__entry->cid, __entry->call_id, __entry->seq,
__entry->abort_code, __entry->error, __entry->why)
);
+TRACE_EVENT(rxrpc_call_complete,
+ TP_PROTO(struct rxrpc_call *call),
+
+ TP_ARGS(call),
+
+ TP_STRUCT__entry(
+ __field(unsigned int, call )
+ __field(enum rxrpc_call_completion, compl )
+ __field(int, error )
+ __field(u32, abort_code )
+ ),
+
+ TP_fast_assign(
+ __entry->call = call->debug_id;
+ __entry->compl = call->completion;
+ __entry->error = call->error;
+ __entry->abort_code = call->abort_code;
+ ),
+
+ TP_printk("c=%08x %s r=%d ac=%d",
+ __entry->call,
+ __print_symbolic(__entry->compl, rxrpc_completions),
+ __entry->error,
+ __entry->abort_code)
+ );
+
TRACE_EVENT(rxrpc_transmit,
TP_PROTO(struct rxrpc_call *call, enum rxrpc_transmit_trace why),
TP_ARGS(call, why),
TP_STRUCT__entry(
- __field(struct rxrpc_call *, call )
+ __field(unsigned int, call )
__field(enum rxrpc_transmit_trace, why )
__field(rxrpc_seq_t, tx_hard_ack )
__field(rxrpc_seq_t, tx_top )
@@ -634,14 +756,14 @@ TRACE_EVENT(rxrpc_transmit,
),
TP_fast_assign(
- __entry->call = call;
+ __entry->call = call->debug_id;
__entry->why = why;
__entry->tx_hard_ack = call->tx_hard_ack;
__entry->tx_top = call->tx_top;
__entry->tx_winsize = call->tx_winsize;
),
- TP_printk("c=%p %s f=%08x n=%u/%u",
+ TP_printk("c=%08x %s f=%08x n=%u/%u",
__entry->call,
__print_symbolic(__entry->why, rxrpc_transmit_traces),
__entry->tx_hard_ack + 1,
@@ -656,7 +778,7 @@ TRACE_EVENT(rxrpc_rx_data,
TP_ARGS(call, seq, serial, flags, anno),
TP_STRUCT__entry(
- __field(struct rxrpc_call *, call )
+ __field(unsigned int, call )
__field(rxrpc_seq_t, seq )
__field(rxrpc_serial_t, serial )
__field(u8, flags )
@@ -664,14 +786,14 @@ TRACE_EVENT(rxrpc_rx_data,
),
TP_fast_assign(
- __entry->call = call;
+ __entry->call = call->debug_id;
__entry->seq = seq;
__entry->serial = serial;
__entry->flags = flags;
__entry->anno = anno;
),
- TP_printk("c=%p DATA %08x q=%08x fl=%02x a=%02x",
+ TP_printk("c=%08x DATA %08x q=%08x fl=%02x a=%02x",
__entry->call,
__entry->serial,
__entry->seq,
@@ -687,7 +809,7 @@ TRACE_EVENT(rxrpc_rx_ack,
TP_ARGS(call, serial, ack_serial, first, prev, reason, n_acks),
TP_STRUCT__entry(
- __field(struct rxrpc_call *, call )
+ __field(unsigned int, call )
__field(rxrpc_serial_t, serial )
__field(rxrpc_serial_t, ack_serial )
__field(rxrpc_seq_t, first )
@@ -697,7 +819,7 @@ TRACE_EVENT(rxrpc_rx_ack,
),
TP_fast_assign(
- __entry->call = call;
+ __entry->call = call->debug_id;
__entry->serial = serial;
__entry->ack_serial = ack_serial;
__entry->first = first;
@@ -706,7 +828,7 @@ TRACE_EVENT(rxrpc_rx_ack,
__entry->n_acks = n_acks;
),
- TP_printk("c=%p %08x %s r=%08x f=%08x p=%08x n=%u",
+ TP_printk("c=%08x %08x %s r=%08x f=%08x p=%08x n=%u",
__entry->call,
__entry->serial,
__print_symbolic(__entry->reason, rxrpc_ack_names),
@@ -723,18 +845,18 @@ TRACE_EVENT(rxrpc_rx_abort,
TP_ARGS(call, serial, abort_code),
TP_STRUCT__entry(
- __field(struct rxrpc_call *, call )
+ __field(unsigned int, call )
__field(rxrpc_serial_t, serial )
__field(u32, abort_code )
),
TP_fast_assign(
- __entry->call = call;
+ __entry->call = call->debug_id;
__entry->serial = serial;
__entry->abort_code = abort_code;
),
- TP_printk("c=%p ABORT %08x ac=%d",
+ TP_printk("c=%08x ABORT %08x ac=%d",
__entry->call,
__entry->serial,
__entry->abort_code)
@@ -747,20 +869,20 @@ TRACE_EVENT(rxrpc_rx_rwind_change,
TP_ARGS(call, serial, rwind, wake),
TP_STRUCT__entry(
- __field(struct rxrpc_call *, call )
+ __field(unsigned int, call )
__field(rxrpc_serial_t, serial )
__field(u32, rwind )
__field(bool, wake )
),
TP_fast_assign(
- __entry->call = call;
+ __entry->call = call->debug_id;
__entry->serial = serial;
__entry->rwind = rwind;
__entry->wake = wake;
),
- TP_printk("c=%p %08x rw=%u%s",
+ TP_printk("c=%08x %08x rw=%u%s",
__entry->call,
__entry->serial,
__entry->rwind,
@@ -774,7 +896,7 @@ TRACE_EVENT(rxrpc_tx_data,
TP_ARGS(call, seq, serial, flags, retrans, lose),
TP_STRUCT__entry(
- __field(struct rxrpc_call *, call )
+ __field(unsigned int, call )
__field(rxrpc_seq_t, seq )
__field(rxrpc_serial_t, serial )
__field(u8, flags )
@@ -783,7 +905,7 @@ TRACE_EVENT(rxrpc_tx_data,
),
TP_fast_assign(
- __entry->call = call;
+ __entry->call = call->debug_id;
__entry->seq = seq;
__entry->serial = serial;
__entry->flags = flags;
@@ -791,7 +913,7 @@ TRACE_EVENT(rxrpc_tx_data,
__entry->lose = lose;
),
- TP_printk("c=%p DATA %08x q=%08x fl=%02x%s%s",
+ TP_printk("c=%08x DATA %08x q=%08x fl=%02x%s%s",
__entry->call,
__entry->serial,
__entry->seq,
@@ -808,7 +930,7 @@ TRACE_EVENT(rxrpc_tx_ack,
TP_ARGS(call, serial, ack_first, ack_serial, reason, n_acks),
TP_STRUCT__entry(
- __field(struct rxrpc_call *, call )
+ __field(unsigned int, call )
__field(rxrpc_serial_t, serial )
__field(rxrpc_seq_t, ack_first )
__field(rxrpc_serial_t, ack_serial )
@@ -817,7 +939,7 @@ TRACE_EVENT(rxrpc_tx_ack,
),
TP_fast_assign(
- __entry->call = call;
+ __entry->call = call ? call->debug_id : 0;
__entry->serial = serial;
__entry->ack_first = ack_first;
__entry->ack_serial = ack_serial;
@@ -825,7 +947,7 @@ TRACE_EVENT(rxrpc_tx_ack,
__entry->n_acks = n_acks;
),
- TP_printk(" c=%p ACK %08x %s f=%08x r=%08x n=%u",
+ TP_printk(" c=%08x ACK %08x %s f=%08x r=%08x n=%u",
__entry->call,
__entry->serial,
__print_symbolic(__entry->reason, rxrpc_ack_names),
@@ -841,7 +963,7 @@ TRACE_EVENT(rxrpc_receive,
TP_ARGS(call, why, serial, seq),
TP_STRUCT__entry(
- __field(struct rxrpc_call *, call )
+ __field(unsigned int, call )
__field(enum rxrpc_receive_trace, why )
__field(rxrpc_serial_t, serial )
__field(rxrpc_seq_t, seq )
@@ -850,7 +972,7 @@ TRACE_EVENT(rxrpc_receive,
),
TP_fast_assign(
- __entry->call = call;
+ __entry->call = call->debug_id;
__entry->why = why;
__entry->serial = serial;
__entry->seq = seq;
@@ -858,7 +980,7 @@ TRACE_EVENT(rxrpc_receive,
__entry->top = call->rx_top;
),
- TP_printk("c=%p %s r=%08x q=%08x w=%08x-%08x",
+ TP_printk("c=%08x %s r=%08x q=%08x w=%08x-%08x",
__entry->call,
__print_symbolic(__entry->why, rxrpc_receive_traces),
__entry->serial,
@@ -875,7 +997,7 @@ TRACE_EVENT(rxrpc_recvmsg,
TP_ARGS(call, why, seq, offset, len, ret),
TP_STRUCT__entry(
- __field(struct rxrpc_call *, call )
+ __field(unsigned int, call )
__field(enum rxrpc_recvmsg_trace, why )
__field(rxrpc_seq_t, seq )
__field(unsigned int, offset )
@@ -884,7 +1006,7 @@ TRACE_EVENT(rxrpc_recvmsg,
),
TP_fast_assign(
- __entry->call = call;
+ __entry->call = call->debug_id;
__entry->why = why;
__entry->seq = seq;
__entry->offset = offset;
@@ -892,7 +1014,7 @@ TRACE_EVENT(rxrpc_recvmsg,
__entry->ret = ret;
),
- TP_printk("c=%p %s q=%08x o=%u l=%u ret=%d",
+ TP_printk("c=%08x %s q=%08x o=%u l=%u ret=%d",
__entry->call,
__print_symbolic(__entry->why, rxrpc_recvmsg_traces),
__entry->seq,
@@ -908,18 +1030,18 @@ TRACE_EVENT(rxrpc_rtt_tx,
TP_ARGS(call, why, send_serial),
TP_STRUCT__entry(
- __field(struct rxrpc_call *, call )
+ __field(unsigned int, call )
__field(enum rxrpc_rtt_tx_trace, why )
__field(rxrpc_serial_t, send_serial )
),
TP_fast_assign(
- __entry->call = call;
+ __entry->call = call->debug_id;
__entry->why = why;
__entry->send_serial = send_serial;
),
- TP_printk("c=%p %s sr=%08x",
+ TP_printk("c=%08x %s sr=%08x",
__entry->call,
__print_symbolic(__entry->why, rxrpc_rtt_tx_traces),
__entry->send_serial)
@@ -933,7 +1055,7 @@ TRACE_EVENT(rxrpc_rtt_rx,
TP_ARGS(call, why, send_serial, resp_serial, rtt, nr, avg),
TP_STRUCT__entry(
- __field(struct rxrpc_call *, call )
+ __field(unsigned int, call )
__field(enum rxrpc_rtt_rx_trace, why )
__field(u8, nr )
__field(rxrpc_serial_t, send_serial )
@@ -943,7 +1065,7 @@ TRACE_EVENT(rxrpc_rtt_rx,
),
TP_fast_assign(
- __entry->call = call;
+ __entry->call = call->debug_id;
__entry->why = why;
__entry->send_serial = send_serial;
__entry->resp_serial = resp_serial;
@@ -952,7 +1074,7 @@ TRACE_EVENT(rxrpc_rtt_rx,
__entry->avg = avg;
),
- TP_printk("c=%p %s sr=%08x rr=%08x rtt=%lld nr=%u avg=%lld",
+ TP_printk("c=%08x %s sr=%08x rr=%08x rtt=%lld nr=%u avg=%lld",
__entry->call,
__print_symbolic(__entry->why, rxrpc_rtt_rx_traces),
__entry->send_serial,
@@ -969,7 +1091,7 @@ TRACE_EVENT(rxrpc_timer,
TP_ARGS(call, why, now),
TP_STRUCT__entry(
- __field(struct rxrpc_call *, call )
+ __field(unsigned int, call )
__field(enum rxrpc_timer_trace, why )
__field(long, now )
__field(long, ack_at )
@@ -983,7 +1105,7 @@ TRACE_EVENT(rxrpc_timer,
),
TP_fast_assign(
- __entry->call = call;
+ __entry->call = call->debug_id;
__entry->why = why;
__entry->now = now;
__entry->ack_at = call->ack_at;
@@ -995,7 +1117,7 @@ TRACE_EVENT(rxrpc_timer,
__entry->timer = call->timer.expires;
),
- TP_printk("c=%p %s a=%ld la=%ld r=%ld xr=%ld xq=%ld xt=%ld t=%ld",
+ TP_printk("c=%08x %s a=%ld la=%ld r=%ld xr=%ld xq=%ld xt=%ld t=%ld",
__entry->call,
__print_symbolic(__entry->why, rxrpc_timer_traces),
__entry->ack_at - __entry->now,
@@ -1038,7 +1160,7 @@ TRACE_EVENT(rxrpc_propose_ack,
outcome),
TP_STRUCT__entry(
- __field(struct rxrpc_call *, call )
+ __field(unsigned int, call )
__field(enum rxrpc_propose_ack_trace, why )
__field(rxrpc_serial_t, serial )
__field(u8, ack_reason )
@@ -1048,7 +1170,7 @@ TRACE_EVENT(rxrpc_propose_ack,
),
TP_fast_assign(
- __entry->call = call;
+ __entry->call = call->debug_id;
__entry->why = why;
__entry->serial = serial;
__entry->ack_reason = ack_reason;
@@ -1057,7 +1179,7 @@ TRACE_EVENT(rxrpc_propose_ack,
__entry->outcome = outcome;
),
- TP_printk("c=%p %s %s r=%08x i=%u b=%u%s",
+ TP_printk("c=%08x %s %s r=%08x i=%u b=%u%s",
__entry->call,
__print_symbolic(__entry->why, rxrpc_propose_ack_traces),
__print_symbolic(__entry->ack_reason, rxrpc_ack_names),
@@ -1074,20 +1196,20 @@ TRACE_EVENT(rxrpc_retransmit,
TP_ARGS(call, seq, annotation, expiry),
TP_STRUCT__entry(
- __field(struct rxrpc_call *, call )
+ __field(unsigned int, call )
__field(rxrpc_seq_t, seq )
__field(u8, annotation )
__field(s64, expiry )
),
TP_fast_assign(
- __entry->call = call;
+ __entry->call = call->debug_id;
__entry->seq = seq;
__entry->annotation = annotation;
__entry->expiry = expiry;
),
- TP_printk("c=%p q=%x a=%02x xp=%lld",
+ TP_printk("c=%08x q=%x a=%02x xp=%lld",
__entry->call,
__entry->seq,
__entry->annotation,
@@ -1101,7 +1223,7 @@ TRACE_EVENT(rxrpc_congest,
TP_ARGS(call, summary, ack_serial, change),
TP_STRUCT__entry(
- __field(struct rxrpc_call *, call )
+ __field(unsigned int, call )
__field(enum rxrpc_congest_change, change )
__field(rxrpc_seq_t, hard_ack )
__field(rxrpc_seq_t, top )
@@ -1111,7 +1233,7 @@ TRACE_EVENT(rxrpc_congest,
),
TP_fast_assign(
- __entry->call = call;
+ __entry->call = call->debug_id;
__entry->change = change;
__entry->hard_ack = call->tx_hard_ack;
__entry->top = call->tx_top;
@@ -1120,7 +1242,7 @@ TRACE_EVENT(rxrpc_congest,
memcpy(&__entry->sum, summary, sizeof(__entry->sum));
),
- TP_printk("c=%p r=%08x %s q=%08x %s cw=%u ss=%u nr=%u,%u nw=%u,%u r=%u b=%u u=%u d=%u l=%x%s%s%s",
+ TP_printk("c=%08x r=%08x %s q=%08x %s cw=%u ss=%u nr=%u,%u nw=%u,%u r=%u b=%u u=%u d=%u l=%x%s%s%s",
__entry->call,
__entry->ack_serial,
__print_symbolic(__entry->sum.ack_reason, rxrpc_ack_names),
@@ -1145,16 +1267,16 @@ TRACE_EVENT(rxrpc_disconnect_call,
TP_ARGS(call),
TP_STRUCT__entry(
- __field(struct rxrpc_call *, call )
+ __field(unsigned int, call )
__field(u32, abort_code )
),
TP_fast_assign(
- __entry->call = call;
+ __entry->call = call->debug_id;
__entry->abort_code = call->abort_code;
),
- TP_printk("c=%p ab=%08x",
+ TP_printk("c=%08x ab=%08x",
__entry->call,
__entry->abort_code)
);
@@ -1165,16 +1287,16 @@ TRACE_EVENT(rxrpc_improper_term,
TP_ARGS(call),
TP_STRUCT__entry(
- __field(struct rxrpc_call *, call )
+ __field(unsigned int, call )
__field(u32, abort_code )
),
TP_fast_assign(
- __entry->call = call;
+ __entry->call = call->debug_id;
__entry->abort_code = call->abort_code;
),
- TP_printk("c=%p ab=%08x",
+ TP_printk("c=%08x ab=%08x",
__entry->call,
__entry->abort_code)
);
@@ -1186,18 +1308,18 @@ TRACE_EVENT(rxrpc_rx_eproto,
TP_ARGS(call, serial, why),
TP_STRUCT__entry(
- __field(struct rxrpc_call *, call )
+ __field(unsigned int, call )
__field(rxrpc_serial_t, serial )
__field(const char *, why )
),
TP_fast_assign(
- __entry->call = call;
+ __entry->call = call->debug_id;
__entry->serial = serial;
__entry->why = why;
),
- TP_printk("c=%p EPROTO %08x %s",
+ TP_printk("c=%08x EPROTO %08x %s",
__entry->call,
__entry->serial,
__entry->why)
@@ -1209,26 +1331,49 @@ TRACE_EVENT(rxrpc_connect_call,
TP_ARGS(call),
TP_STRUCT__entry(
- __field(struct rxrpc_call *, call )
+ __field(unsigned int, call )
__field(unsigned long, user_call_ID )
__field(u32, cid )
__field(u32, call_id )
),
TP_fast_assign(
- __entry->call = call;
+ __entry->call = call->debug_id;
__entry->user_call_ID = call->user_call_ID;
__entry->cid = call->cid;
__entry->call_id = call->call_id;
),
- TP_printk("c=%p u=%p %08x:%08x",
+ TP_printk("c=%08x u=%p %08x:%08x",
__entry->call,
(void *)__entry->user_call_ID,
__entry->cid,
__entry->call_id)
);
+TRACE_EVENT(rxrpc_resend,
+ TP_PROTO(struct rxrpc_call *call, int ix),
+
+ TP_ARGS(call, ix),
+
+ TP_STRUCT__entry(
+ __field(unsigned int, call )
+ __field(int, ix )
+ __array(u8, anno, 64 )
+ ),
+
+ TP_fast_assign(
+ __entry->call = call->debug_id;
+ __entry->ix = ix;
+ memcpy(__entry->anno, call->rxtx_annotations, 64);
+ ),
+
+ TP_printk("c=%08x ix=%u a=%64phN",
+ __entry->call,
+ __entry->ix,
+ __entry->anno)
+ );
+
#endif /* _TRACE_RXRPC_H */
/* This part must be outside protection */
diff --git a/include/trace/events/sctp.h b/include/trace/events/sctp.h
new file mode 100644
index 000000000000..7475c7be165a
--- /dev/null
+++ b/include/trace/events/sctp.h
@@ -0,0 +1,99 @@
+/* SPDX-License-Identifier: GPL-2.0 */
+#undef TRACE_SYSTEM
+#define TRACE_SYSTEM sctp
+
+#if !defined(_TRACE_SCTP_H) || defined(TRACE_HEADER_MULTI_READ)
+#define _TRACE_SCTP_H
+
+#include <net/sctp/structs.h>
+#include <linux/tracepoint.h>
+
+TRACE_EVENT(sctp_probe_path,
+
+ TP_PROTO(struct sctp_transport *sp,
+ const struct sctp_association *asoc),
+
+ TP_ARGS(sp, asoc),
+
+ TP_STRUCT__entry(
+ __field(__u64, asoc)
+ __field(__u32, primary)
+ __array(__u8, ipaddr, sizeof(union sctp_addr))
+ __field(__u32, state)
+ __field(__u32, cwnd)
+ __field(__u32, ssthresh)
+ __field(__u32, flight_size)
+ __field(__u32, partial_bytes_acked)
+ __field(__u32, pathmtu)
+ ),
+
+ TP_fast_assign(
+ __entry->asoc = (unsigned long)asoc;
+ __entry->primary = (sp == asoc->peer.primary_path);
+ memcpy(__entry->ipaddr, &sp->ipaddr, sizeof(union sctp_addr));
+ __entry->state = sp->state;
+ __entry->cwnd = sp->cwnd;
+ __entry->ssthresh = sp->ssthresh;
+ __entry->flight_size = sp->flight_size;
+ __entry->partial_bytes_acked = sp->partial_bytes_acked;
+ __entry->pathmtu = sp->pathmtu;
+ ),
+
+ TP_printk("asoc=%#llx%s ipaddr=%pISpc state=%u cwnd=%u ssthresh=%u "
+ "flight_size=%u partial_bytes_acked=%u pathmtu=%u",
+ __entry->asoc, __entry->primary ? "(*)" : "",
+ __entry->ipaddr, __entry->state, __entry->cwnd,
+ __entry->ssthresh, __entry->flight_size,
+ __entry->partial_bytes_acked, __entry->pathmtu)
+);
+
+TRACE_EVENT(sctp_probe,
+
+ TP_PROTO(const struct sctp_endpoint *ep,
+ const struct sctp_association *asoc,
+ struct sctp_chunk *chunk),
+
+ TP_ARGS(ep, asoc, chunk),
+
+ TP_STRUCT__entry(
+ __field(__u64, asoc)
+ __field(__u32, mark)
+ __field(__u16, bind_port)
+ __field(__u16, peer_port)
+ __field(__u32, pathmtu)
+ __field(__u32, rwnd)
+ __field(__u16, unack_data)
+ ),
+
+ TP_fast_assign(
+ struct sk_buff *skb = chunk->skb;
+
+ __entry->asoc = (unsigned long)asoc;
+ __entry->mark = skb->mark;
+ __entry->bind_port = ep->base.bind_addr.port;
+ __entry->peer_port = asoc->peer.port;
+ __entry->pathmtu = asoc->pathmtu;
+ __entry->rwnd = asoc->peer.rwnd;
+ __entry->unack_data = asoc->unack_data;
+
+ if (trace_sctp_probe_path_enabled()) {
+ struct sctp_transport *sp;
+
+ list_for_each_entry(sp, &asoc->peer.transport_addr_list,
+ transports) {
+ trace_sctp_probe_path(sp, asoc);
+ }
+ }
+ ),
+
+ TP_printk("asoc=%#llx mark=%#x bind_port=%d peer_port=%d pathmtu=%d "
+ "rwnd=%u unack_data=%d",
+ __entry->asoc, __entry->mark, __entry->bind_port,
+ __entry->peer_port, __entry->pathmtu, __entry->rwnd,
+ __entry->unack_data)
+);
+
+#endif /* _TRACE_SCTP_H */
+
+/* This part must be outside protection */
+#include <trace/define_trace.h>
diff --git a/include/trace/events/siox.h b/include/trace/events/siox.h
new file mode 100644
index 000000000000..68a43fc2c3a5
--- /dev/null
+++ b/include/trace/events/siox.h
@@ -0,0 +1,66 @@
+#undef TRACE_SYSTEM
+#define TRACE_SYSTEM siox
+
+#if !defined(_TRACE_SIOX_H) || defined(TRACE_HEADER_MULTI_READ)
+#define _TRACE_SIOX_H
+
+#include <linux/tracepoint.h>
+
+TRACE_EVENT(siox_set_data,
+ TP_PROTO(const struct siox_master *smaster,
+ const struct siox_device *sdevice,
+ unsigned int devno, size_t bufoffset),
+ TP_ARGS(smaster, sdevice, devno, bufoffset),
+ TP_STRUCT__entry(
+ __field(int, busno)
+ __field(unsigned int, devno)
+ __field(size_t, inbytes)
+ __dynamic_array(u8, buf, sdevice->inbytes)
+ ),
+ TP_fast_assign(
+ __entry->busno = smaster->busno;
+ __entry->devno = devno;
+ __entry->inbytes = sdevice->inbytes;
+ memcpy(__get_dynamic_array(buf),
+ smaster->buf + bufoffset, sdevice->inbytes);
+ ),
+ TP_printk("siox-%d-%u [%*phD]",
+ __entry->busno,
+ __entry->devno,
+ (int)__entry->inbytes, __get_dynamic_array(buf)
+ )
+);
+
+TRACE_EVENT(siox_get_data,
+ TP_PROTO(const struct siox_master *smaster,
+ const struct siox_device *sdevice,
+ unsigned int devno, u8 status_clean,
+ size_t bufoffset),
+ TP_ARGS(smaster, sdevice, devno, status_clean, bufoffset),
+ TP_STRUCT__entry(
+ __field(int, busno)
+ __field(unsigned int, devno)
+ __field(u8, status_clean)
+ __field(size_t, outbytes)
+ __dynamic_array(u8, buf, sdevice->outbytes)
+ ),
+ TP_fast_assign(
+ __entry->busno = smaster->busno;
+ __entry->devno = devno;
+ __entry->status_clean = status_clean;
+ __entry->outbytes = sdevice->outbytes;
+ memcpy(__get_dynamic_array(buf),
+ smaster->buf + bufoffset, sdevice->outbytes);
+ ),
+ TP_printk("siox-%d-%u (%02hhx) [%*phD]",
+ __entry->busno,
+ __entry->devno,
+ __entry->status_clean,
+ (int)__entry->outbytes, __get_dynamic_array(buf)
+ )
+);
+
+#endif /* if !defined(_TRACE_SIOX_H) || defined(TRACE_HEADER_MULTI_READ) */
+
+/* This part must be outside protection */
+#include <trace/define_trace.h>
diff --git a/include/trace/events/sock.h b/include/trace/events/sock.h
index ec4dade24466..3176a3931107 100644
--- a/include/trace/events/sock.h
+++ b/include/trace/events/sock.h
@@ -6,7 +6,58 @@
#define _TRACE_SOCK_H
#include <net/sock.h>
+#include <net/ipv6.h>
#include <linux/tracepoint.h>
+#include <linux/ipv6.h>
+#include <linux/tcp.h>
+
+#define family_names \
+ EM(AF_INET) \
+ EMe(AF_INET6)
+
+/* The protocol traced by inet_sock_set_state */
+#define inet_protocol_names \
+ EM(IPPROTO_TCP) \
+ EM(IPPROTO_DCCP) \
+ EMe(IPPROTO_SCTP)
+
+#define tcp_state_names \
+ EM(TCP_ESTABLISHED) \
+ EM(TCP_SYN_SENT) \
+ EM(TCP_SYN_RECV) \
+ EM(TCP_FIN_WAIT1) \
+ EM(TCP_FIN_WAIT2) \
+ EM(TCP_TIME_WAIT) \
+ EM(TCP_CLOSE) \
+ EM(TCP_CLOSE_WAIT) \
+ EM(TCP_LAST_ACK) \
+ EM(TCP_LISTEN) \
+ EM(TCP_CLOSING) \
+ EMe(TCP_NEW_SYN_RECV)
+
+/* enums need to be exported to user space */
+#undef EM
+#undef EMe
+#define EM(a) TRACE_DEFINE_ENUM(a);
+#define EMe(a) TRACE_DEFINE_ENUM(a);
+
+family_names
+inet_protocol_names
+tcp_state_names
+
+#undef EM
+#undef EMe
+#define EM(a) { a, #a },
+#define EMe(a) { a, #a }
+
+#define show_family_name(val) \
+ __print_symbolic(val, family_names)
+
+#define show_inet_protocol_name(val) \
+ __print_symbolic(val, inet_protocol_names)
+
+#define show_tcp_state_name(val) \
+ __print_symbolic(val, tcp_state_names)
TRACE_EVENT(sock_rcvqueue_full,
@@ -63,6 +114,72 @@ TRACE_EVENT(sock_exceed_buf_limit,
__entry->rmem_alloc)
);
+TRACE_EVENT(inet_sock_set_state,
+
+ TP_PROTO(const struct sock *sk, const int oldstate, const int newstate),
+
+ TP_ARGS(sk, oldstate, newstate),
+
+ TP_STRUCT__entry(
+ __field(const void *, skaddr)
+ __field(int, oldstate)
+ __field(int, newstate)
+ __field(__u16, sport)
+ __field(__u16, dport)
+ __field(__u16, family)
+ __field(__u8, protocol)
+ __array(__u8, saddr, 4)
+ __array(__u8, daddr, 4)
+ __array(__u8, saddr_v6, 16)
+ __array(__u8, daddr_v6, 16)
+ ),
+
+ TP_fast_assign(
+ struct inet_sock *inet = inet_sk(sk);
+ struct in6_addr *pin6;
+ __be32 *p32;
+
+ __entry->skaddr = sk;
+ __entry->oldstate = oldstate;
+ __entry->newstate = newstate;
+
+ __entry->family = sk->sk_family;
+ __entry->protocol = sk->sk_protocol;
+ __entry->sport = ntohs(inet->inet_sport);
+ __entry->dport = ntohs(inet->inet_dport);
+
+ p32 = (__be32 *) __entry->saddr;
+ *p32 = inet->inet_saddr;
+
+ p32 = (__be32 *) __entry->daddr;
+ *p32 = inet->inet_daddr;
+
+#if IS_ENABLED(CONFIG_IPV6)
+ if (sk->sk_family == AF_INET6) {
+ pin6 = (struct in6_addr *)__entry->saddr_v6;
+ *pin6 = sk->sk_v6_rcv_saddr;
+ pin6 = (struct in6_addr *)__entry->daddr_v6;
+ *pin6 = sk->sk_v6_daddr;
+ } else
+#endif
+ {
+ pin6 = (struct in6_addr *)__entry->saddr_v6;
+ ipv6_addr_set_v4mapped(inet->inet_saddr, pin6);
+ pin6 = (struct in6_addr *)__entry->daddr_v6;
+ ipv6_addr_set_v4mapped(inet->inet_daddr, pin6);
+ }
+ ),
+
+ TP_printk("family=%s protocol=%s sport=%hu dport=%hu saddr=%pI4 daddr=%pI4 saddrv6=%pI6c daddrv6=%pI6c oldstate=%s newstate=%s",
+ show_family_name(__entry->family),
+ show_inet_protocol_name(__entry->protocol),
+ __entry->sport, __entry->dport,
+ __entry->saddr, __entry->daddr,
+ __entry->saddr_v6, __entry->daddr_v6,
+ show_tcp_state_name(__entry->oldstate),
+ show_tcp_state_name(__entry->newstate))
+);
+
#endif /* _TRACE_SOCK_H */
/* This part must be outside protection */
diff --git a/include/trace/events/sunrpc.h b/include/trace/events/sunrpc.h
index 8c153f68509e..922cb8968fb2 100644
--- a/include/trace/events/sunrpc.h
+++ b/include/trace/events/sunrpc.h
@@ -32,7 +32,7 @@ DECLARE_EVENT_CLASS(rpc_task_status,
__entry->status = task->tk_status;
),
- TP_printk("task:%u@%u, status %d",
+ TP_printk("task:%u@%u status=%d",
__entry->task_id, __entry->client_id,
__entry->status)
);
@@ -66,7 +66,7 @@ TRACE_EVENT(rpc_connect_status,
__entry->status = status;
),
- TP_printk("task:%u@%u, status %d",
+ TP_printk("task:%u@%u status=%d",
__entry->task_id, __entry->client_id,
__entry->status)
);
@@ -175,7 +175,7 @@ DECLARE_EVENT_CLASS(rpc_task_queued,
),
TP_fast_assign(
- __entry->client_id = clnt->cl_clid;
+ __entry->client_id = clnt ? clnt->cl_clid : -1;
__entry->task_id = task->tk_pid;
__entry->timeout = task->tk_timeout;
__entry->runstate = task->tk_runstate;
@@ -184,7 +184,7 @@ DECLARE_EVENT_CLASS(rpc_task_queued,
__assign_str(q_name, rpc_qname(q));
),
- TP_printk("task:%u@%u flags=%4.4x state=%4.4lx status=%d timeout=%lu queue=%s",
+ TP_printk("task:%u@%d flags=%4.4x state=%4.4lx status=%d timeout=%lu queue=%s",
__entry->task_id, __entry->client_id,
__entry->flags,
__entry->runstate,
@@ -390,6 +390,10 @@ DECLARE_EVENT_CLASS(rpc_xprt_event,
__entry->status)
);
+DEFINE_EVENT(rpc_xprt_event, xprt_timer,
+ TP_PROTO(struct rpc_xprt *xprt, __be32 xid, int status),
+ TP_ARGS(xprt, xid, status));
+
DEFINE_EVENT(rpc_xprt_event, xprt_lookup_rqst,
TP_PROTO(struct rpc_xprt *xprt, __be32 xid, int status),
TP_ARGS(xprt, xid, status));
@@ -481,31 +485,55 @@ TRACE_EVENT(xs_tcp_data_recv,
{ (1UL << RQ_BUSY), "RQ_BUSY"})
TRACE_EVENT(svc_recv,
- TP_PROTO(struct svc_rqst *rqst, int status),
+ TP_PROTO(struct svc_rqst *rqst, int len),
- TP_ARGS(rqst, status),
+ TP_ARGS(rqst, len),
TP_STRUCT__entry(
__field(u32, xid)
- __field(int, status)
+ __field(int, len)
__field(unsigned long, flags)
- __dynamic_array(unsigned char, addr, rqst->rq_addrlen)
+ __string(addr, rqst->rq_xprt->xpt_remotebuf)
),
TP_fast_assign(
- __entry->xid = status > 0 ? be32_to_cpu(rqst->rq_xid) : 0;
- __entry->status = status;
+ __entry->xid = be32_to_cpu(rqst->rq_xid);
+ __entry->len = len;
__entry->flags = rqst->rq_flags;
- memcpy(__get_dynamic_array(addr),
- &rqst->rq_addr, rqst->rq_addrlen);
+ __assign_str(addr, rqst->rq_xprt->xpt_remotebuf);
),
- TP_printk("addr=%pIScp xid=0x%08x status=%d flags=%s",
- (struct sockaddr *)__get_dynamic_array(addr),
- __entry->xid, __entry->status,
+ TP_printk("addr=%s xid=0x%08x len=%d flags=%s",
+ __get_str(addr), __entry->xid, __entry->len,
show_rqstp_flags(__entry->flags))
);
+TRACE_EVENT(svc_process,
+ TP_PROTO(const struct svc_rqst *rqst, const char *name),
+
+ TP_ARGS(rqst, name),
+
+ TP_STRUCT__entry(
+ __field(u32, xid)
+ __field(u32, vers)
+ __field(u32, proc)
+ __string(service, name)
+ __string(addr, rqst->rq_xprt->xpt_remotebuf)
+ ),
+
+ TP_fast_assign(
+ __entry->xid = be32_to_cpu(rqst->rq_xid);
+ __entry->vers = rqst->rq_vers;
+ __entry->proc = rqst->rq_proc;
+ __assign_str(service, name);
+ __assign_str(addr, rqst->rq_xprt->xpt_remotebuf);
+ ),
+
+ TP_printk("addr=%s xid=0x%08x service=%s vers=%u proc=%u",
+ __get_str(addr), __entry->xid,
+ __get_str(service), __entry->vers, __entry->proc)
+);
+
DECLARE_EVENT_CLASS(svc_rqst_event,
TP_PROTO(struct svc_rqst *rqst),
@@ -515,20 +543,18 @@ DECLARE_EVENT_CLASS(svc_rqst_event,
TP_STRUCT__entry(
__field(u32, xid)
__field(unsigned long, flags)
- __dynamic_array(unsigned char, addr, rqst->rq_addrlen)
+ __string(addr, rqst->rq_xprt->xpt_remotebuf)
),
TP_fast_assign(
__entry->xid = be32_to_cpu(rqst->rq_xid);
__entry->flags = rqst->rq_flags;
- memcpy(__get_dynamic_array(addr),
- &rqst->rq_addr, rqst->rq_addrlen);
+ __assign_str(addr, rqst->rq_xprt->xpt_remotebuf);
),
- TP_printk("addr=%pIScp rq_xid=0x%08x flags=%s",
- (struct sockaddr *)__get_dynamic_array(addr),
- __entry->xid,
- show_rqstp_flags(__entry->flags))
+ TP_printk("addr=%s xid=0x%08x flags=%s",
+ __get_str(addr), __entry->xid,
+ show_rqstp_flags(__entry->flags))
);
DEFINE_EVENT(svc_rqst_event, svc_defer,
@@ -549,27 +575,21 @@ DECLARE_EVENT_CLASS(svc_rqst_status,
__field(u32, xid)
__field(int, status)
__field(unsigned long, flags)
- __dynamic_array(unsigned char, addr, rqst->rq_addrlen)
+ __string(addr, rqst->rq_xprt->xpt_remotebuf)
),
TP_fast_assign(
__entry->xid = be32_to_cpu(rqst->rq_xid);
__entry->status = status;
__entry->flags = rqst->rq_flags;
- memcpy(__get_dynamic_array(addr),
- &rqst->rq_addr, rqst->rq_addrlen);
+ __assign_str(addr, rqst->rq_xprt->xpt_remotebuf);
),
- TP_printk("addr=%pIScp rq_xid=0x%08x status=%d flags=%s",
- (struct sockaddr *)__get_dynamic_array(addr),
- __entry->xid,
- __entry->status, show_rqstp_flags(__entry->flags))
+ TP_printk("addr=%s xid=0x%08x status=%d flags=%s",
+ __get_str(addr), __entry->xid,
+ __entry->status, show_rqstp_flags(__entry->flags))
);
-DEFINE_EVENT(svc_rqst_status, svc_process,
- TP_PROTO(struct svc_rqst *rqst, int status),
- TP_ARGS(rqst, status));
-
DEFINE_EVENT(svc_rqst_status, svc_send,
TP_PROTO(struct svc_rqst *rqst, int status),
TP_ARGS(rqst, status));
@@ -587,7 +607,9 @@ DEFINE_EVENT(svc_rqst_status, svc_send,
{ (1UL << XPT_OLD), "XPT_OLD"}, \
{ (1UL << XPT_LISTENER), "XPT_LISTENER"}, \
{ (1UL << XPT_CACHE_AUTH), "XPT_CACHE_AUTH"}, \
- { (1UL << XPT_LOCAL), "XPT_LOCAL"})
+ { (1UL << XPT_LOCAL), "XPT_LOCAL"}, \
+ { (1UL << XPT_KILL_TEMP), "XPT_KILL_TEMP"}, \
+ { (1UL << XPT_CONG_CTRL), "XPT_CONG_CTRL"})
TRACE_EVENT(svc_xprt_do_enqueue,
TP_PROTO(struct svc_xprt *xprt, struct svc_rqst *rqst),
@@ -598,26 +620,19 @@ TRACE_EVENT(svc_xprt_do_enqueue,
__field(struct svc_xprt *, xprt)
__field(int, pid)
__field(unsigned long, flags)
- __dynamic_array(unsigned char, addr, xprt != NULL ?
- xprt->xpt_remotelen : 0)
+ __string(addr, xprt->xpt_remotebuf)
),
TP_fast_assign(
__entry->xprt = xprt;
__entry->pid = rqst? rqst->rq_task->pid : 0;
- if (xprt) {
- memcpy(__get_dynamic_array(addr),
- &xprt->xpt_remote,
- xprt->xpt_remotelen);
- __entry->flags = xprt->xpt_flags;
- } else
- __entry->flags = 0;
- ),
-
- TP_printk("xprt=0x%p addr=%pIScp pid=%d flags=%s", __entry->xprt,
- __get_dynamic_array_len(addr) != 0 ?
- (struct sockaddr *)__get_dynamic_array(addr) : NULL,
- __entry->pid, show_svc_xprt_flags(__entry->flags))
+ __entry->flags = xprt->xpt_flags;
+ __assign_str(addr, xprt->xpt_remotebuf);
+ ),
+
+ TP_printk("xprt=%p addr=%s pid=%d flags=%s",
+ __entry->xprt, __get_str(addr),
+ __entry->pid, show_svc_xprt_flags(__entry->flags))
);
DECLARE_EVENT_CLASS(svc_xprt_event,
@@ -628,35 +643,50 @@ DECLARE_EVENT_CLASS(svc_xprt_event,
TP_STRUCT__entry(
__field(struct svc_xprt *, xprt)
__field(unsigned long, flags)
- __dynamic_array(unsigned char, addr, xprt != NULL ?
- xprt->xpt_remotelen : 0)
+ __string(addr, xprt->xpt_remotebuf)
),
TP_fast_assign(
__entry->xprt = xprt;
- if (xprt) {
- memcpy(__get_dynamic_array(addr),
- &xprt->xpt_remote,
- xprt->xpt_remotelen);
- __entry->flags = xprt->xpt_flags;
- } else
- __entry->flags = 0;
- ),
-
- TP_printk("xprt=0x%p addr=%pIScp flags=%s", __entry->xprt,
- __get_dynamic_array_len(addr) != 0 ?
- (struct sockaddr *)__get_dynamic_array(addr) : NULL,
- show_svc_xprt_flags(__entry->flags))
-);
+ __entry->flags = xprt->xpt_flags;
+ __assign_str(addr, xprt->xpt_remotebuf);
+ ),
-DEFINE_EVENT(svc_xprt_event, svc_xprt_dequeue,
- TP_PROTO(struct svc_xprt *xprt),
- TP_ARGS(xprt));
+ TP_printk("xprt=%p addr=%s flags=%s",
+ __entry->xprt, __get_str(addr),
+ show_svc_xprt_flags(__entry->flags))
+);
DEFINE_EVENT(svc_xprt_event, svc_xprt_no_write_space,
TP_PROTO(struct svc_xprt *xprt),
TP_ARGS(xprt));
+TRACE_EVENT(svc_xprt_dequeue,
+ TP_PROTO(struct svc_rqst *rqst),
+
+ TP_ARGS(rqst),
+
+ TP_STRUCT__entry(
+ __field(struct svc_xprt *, xprt)
+ __field(unsigned long, flags)
+ __field(unsigned long, wakeup)
+ __string(addr, rqst->rq_xprt->xpt_remotebuf)
+ ),
+
+ TP_fast_assign(
+ __entry->xprt = rqst->rq_xprt;
+ __entry->flags = rqst->rq_xprt->xpt_flags;
+ __entry->wakeup = ktime_to_us(ktime_sub(ktime_get(),
+ rqst->rq_qtime));
+ __assign_str(addr, rqst->rq_xprt->xpt_remotebuf);
+ ),
+
+ TP_printk("xprt=%p addr=%s flags=%s wakeup-us=%lu",
+ __entry->xprt, __get_str(addr),
+ show_svc_xprt_flags(__entry->flags),
+ __entry->wakeup)
+);
+
TRACE_EVENT(svc_wake_up,
TP_PROTO(int pid),
@@ -682,28 +712,42 @@ TRACE_EVENT(svc_handle_xprt,
__field(struct svc_xprt *, xprt)
__field(int, len)
__field(unsigned long, flags)
- __dynamic_array(unsigned char, addr, xprt != NULL ?
- xprt->xpt_remotelen : 0)
+ __string(addr, xprt->xpt_remotebuf)
),
TP_fast_assign(
__entry->xprt = xprt;
__entry->len = len;
- if (xprt) {
- memcpy(__get_dynamic_array(addr),
- &xprt->xpt_remote,
- xprt->xpt_remotelen);
- __entry->flags = xprt->xpt_flags;
- } else
- __entry->flags = 0;
- ),
-
- TP_printk("xprt=0x%p addr=%pIScp len=%d flags=%s", __entry->xprt,
- __get_dynamic_array_len(addr) != 0 ?
- (struct sockaddr *)__get_dynamic_array(addr) : NULL,
+ __entry->flags = xprt->xpt_flags;
+ __assign_str(addr, xprt->xpt_remotebuf);
+ ),
+
+ TP_printk("xprt=%p addr=%s len=%d flags=%s",
+ __entry->xprt, __get_str(addr),
__entry->len, show_svc_xprt_flags(__entry->flags))
);
+TRACE_EVENT(svc_stats_latency,
+ TP_PROTO(const struct svc_rqst *rqst),
+
+ TP_ARGS(rqst),
+
+ TP_STRUCT__entry(
+ __field(u32, xid)
+ __field(unsigned long, execute)
+ __string(addr, rqst->rq_xprt->xpt_remotebuf)
+ ),
+
+ TP_fast_assign(
+ __entry->xid = be32_to_cpu(rqst->rq_xid);
+ __entry->execute = ktime_to_us(ktime_sub(ktime_get(),
+ rqst->rq_stime));
+ __assign_str(addr, rqst->rq_xprt->xpt_remotebuf);
+ ),
+
+ TP_printk("addr=%s xid=0x%08x execute-us=%lu",
+ __get_str(addr), __entry->xid, __entry->execute)
+);
DECLARE_EVENT_CLASS(svc_deferred_event,
TP_PROTO(struct svc_deferred_req *dr),
@@ -712,18 +756,16 @@ DECLARE_EVENT_CLASS(svc_deferred_event,
TP_STRUCT__entry(
__field(u32, xid)
- __dynamic_array(unsigned char, addr, dr->addrlen)
+ __string(addr, dr->xprt->xpt_remotebuf)
),
TP_fast_assign(
__entry->xid = be32_to_cpu(*(__be32 *)(dr->args +
(dr->xprt_hlen>>2)));
- memcpy(__get_dynamic_array(addr), &dr->addr, dr->addrlen);
+ __assign_str(addr, dr->xprt->xpt_remotebuf);
),
- TP_printk("addr=%pIScp xid=0x%08x",
- (struct sockaddr *)__get_dynamic_array(addr),
- __entry->xid)
+ TP_printk("addr=%s xid=0x%08x", __get_str(addr), __entry->xid)
);
DEFINE_EVENT(svc_deferred_event, svc_drop_deferred,
diff --git a/include/trace/events/tcp.h b/include/trace/events/tcp.h
index ab34c561f26b..878b2be7ce77 100644
--- a/include/trace/events/tcp.h
+++ b/include/trace/events/tcp.h
@@ -1,3 +1,4 @@
+/* SPDX-License-Identifier: GPL-2.0 */
#undef TRACE_SYSTEM
#define TRACE_SYSTEM tcp
@@ -8,22 +9,7 @@
#include <linux/tcp.h>
#include <linux/tracepoint.h>
#include <net/ipv6.h>
-
-#define tcp_state_name(state) { state, #state }
-#define show_tcp_state_name(val) \
- __print_symbolic(val, \
- tcp_state_name(TCP_ESTABLISHED), \
- tcp_state_name(TCP_SYN_SENT), \
- tcp_state_name(TCP_SYN_RECV), \
- tcp_state_name(TCP_FIN_WAIT1), \
- tcp_state_name(TCP_FIN_WAIT2), \
- tcp_state_name(TCP_TIME_WAIT), \
- tcp_state_name(TCP_CLOSE), \
- tcp_state_name(TCP_CLOSE_WAIT), \
- tcp_state_name(TCP_LAST_ACK), \
- tcp_state_name(TCP_LISTEN), \
- tcp_state_name(TCP_CLOSING), \
- tcp_state_name(TCP_NEW_SYN_RECV))
+#include <net/tcp.h>
#define TP_STORE_V4MAPPED(__entry, saddr, daddr) \
do { \
@@ -270,6 +256,64 @@ TRACE_EVENT(tcp_retransmit_synack,
__entry->saddr_v6, __entry->daddr_v6)
);
+#include <trace/events/net_probe_common.h>
+
+TRACE_EVENT(tcp_probe,
+
+ TP_PROTO(struct sock *sk, struct sk_buff *skb),
+
+ TP_ARGS(sk, skb),
+
+ TP_STRUCT__entry(
+ /* sockaddr_in6 is always bigger than sockaddr_in */
+ __array(__u8, saddr, sizeof(struct sockaddr_in6))
+ __array(__u8, daddr, sizeof(struct sockaddr_in6))
+ __field(__u16, sport)
+ __field(__u16, dport)
+ __field(__u32, mark)
+ __field(__u16, length)
+ __field(__u32, snd_nxt)
+ __field(__u32, snd_una)
+ __field(__u32, snd_cwnd)
+ __field(__u32, ssthresh)
+ __field(__u32, snd_wnd)
+ __field(__u32, srtt)
+ __field(__u32, rcv_wnd)
+ ),
+
+ TP_fast_assign(
+ const struct tcp_sock *tp = tcp_sk(sk);
+ const struct inet_sock *inet = inet_sk(sk);
+
+ memset(__entry->saddr, 0, sizeof(struct sockaddr_in6));
+ memset(__entry->daddr, 0, sizeof(struct sockaddr_in6));
+
+ TP_STORE_ADDR_PORTS(__entry, inet, sk);
+
+ /* For filtering use */
+ __entry->sport = ntohs(inet->inet_sport);
+ __entry->dport = ntohs(inet->inet_dport);
+ __entry->mark = skb->mark;
+
+ __entry->length = skb->len;
+ __entry->snd_nxt = tp->snd_nxt;
+ __entry->snd_una = tp->snd_una;
+ __entry->snd_cwnd = tp->snd_cwnd;
+ __entry->snd_wnd = tp->snd_wnd;
+ __entry->rcv_wnd = tp->rcv_wnd;
+ __entry->ssthresh = tcp_current_ssthresh(sk);
+ __entry->srtt = tp->srtt_us >> 3;
+ ),
+
+ TP_printk("src=%pISpc dest=%pISpc mark=%#x length=%d snd_nxt=%#x "
+ "snd_una=%#x snd_cwnd=%u ssthresh=%u snd_wnd=%u srtt=%u "
+ "rcv_wnd=%u",
+ __entry->saddr, __entry->daddr, __entry->mark,
+ __entry->length, __entry->snd_nxt, __entry->snd_una,
+ __entry->snd_cwnd, __entry->ssthresh, __entry->snd_wnd,
+ __entry->srtt, __entry->rcv_wnd)
+);
+
#endif /* _TRACE_TCP_H */
/* This part must be outside protection */
diff --git a/include/trace/events/thermal.h b/include/trace/events/thermal.h
index 78946640fe03..135e5421f003 100644
--- a/include/trace/events/thermal.h
+++ b/include/trace/events/thermal.h
@@ -94,9 +94,9 @@ TRACE_EVENT(thermal_zone_trip,
#ifdef CONFIG_CPU_THERMAL
TRACE_EVENT(thermal_power_cpu_get_power,
TP_PROTO(const struct cpumask *cpus, unsigned long freq, u32 *load,
- size_t load_len, u32 dynamic_power, u32 static_power),
+ size_t load_len, u32 dynamic_power),
- TP_ARGS(cpus, freq, load, load_len, dynamic_power, static_power),
+ TP_ARGS(cpus, freq, load, load_len, dynamic_power),
TP_STRUCT__entry(
__bitmask(cpumask, num_possible_cpus())
@@ -104,7 +104,6 @@ TRACE_EVENT(thermal_power_cpu_get_power,
__dynamic_array(u32, load, load_len)
__field(size_t, load_len )
__field(u32, dynamic_power )
- __field(u32, static_power )
),
TP_fast_assign(
@@ -115,13 +114,12 @@ TRACE_EVENT(thermal_power_cpu_get_power,
load_len * sizeof(*load));
__entry->load_len = load_len;
__entry->dynamic_power = dynamic_power;
- __entry->static_power = static_power;
),
- TP_printk("cpus=%s freq=%lu load={%s} dynamic_power=%d static_power=%d",
+ TP_printk("cpus=%s freq=%lu load={%s} dynamic_power=%d",
__get_bitmask(cpumask), __entry->freq,
__print_array(__get_dynamic_array(load), __entry->load_len, 4),
- __entry->dynamic_power, __entry->static_power)
+ __entry->dynamic_power)
);
TRACE_EVENT(thermal_power_cpu_limit,
diff --git a/include/trace/events/timer.h b/include/trace/events/timer.h
index 16e305e69f34..a57e4ee989d6 100644
--- a/include/trace/events/timer.h
+++ b/include/trace/events/timer.h
@@ -136,6 +136,24 @@ DEFINE_EVENT(timer_class, timer_cancel,
TP_ARGS(timer)
);
+#define decode_clockid(type) \
+ __print_symbolic(type, \
+ { CLOCK_REALTIME, "CLOCK_REALTIME" }, \
+ { CLOCK_MONOTONIC, "CLOCK_MONOTONIC" }, \
+ { CLOCK_BOOTTIME, "CLOCK_BOOTTIME" }, \
+ { CLOCK_TAI, "CLOCK_TAI" })
+
+#define decode_hrtimer_mode(mode) \
+ __print_symbolic(mode, \
+ { HRTIMER_MODE_ABS, "ABS" }, \
+ { HRTIMER_MODE_REL, "REL" }, \
+ { HRTIMER_MODE_ABS_PINNED, "ABS|PINNED" }, \
+ { HRTIMER_MODE_REL_PINNED, "REL|PINNED" }, \
+ { HRTIMER_MODE_ABS_SOFT, "ABS|SOFT" }, \
+ { HRTIMER_MODE_REL_SOFT, "REL|SOFT" }, \
+ { HRTIMER_MODE_ABS_PINNED_SOFT, "ABS|PINNED|SOFT" }, \
+ { HRTIMER_MODE_REL_PINNED_SOFT, "REL|PINNED|SOFT" })
+
/**
* hrtimer_init - called when the hrtimer is initialized
* @hrtimer: pointer to struct hrtimer
@@ -162,10 +180,8 @@ TRACE_EVENT(hrtimer_init,
),
TP_printk("hrtimer=%p clockid=%s mode=%s", __entry->hrtimer,
- __entry->clockid == CLOCK_REALTIME ?
- "CLOCK_REALTIME" : "CLOCK_MONOTONIC",
- __entry->mode == HRTIMER_MODE_ABS ?
- "HRTIMER_MODE_ABS" : "HRTIMER_MODE_REL")
+ decode_clockid(__entry->clockid),
+ decode_hrtimer_mode(__entry->mode))
);
/**
@@ -174,15 +190,16 @@ TRACE_EVENT(hrtimer_init,
*/
TRACE_EVENT(hrtimer_start,
- TP_PROTO(struct hrtimer *hrtimer),
+ TP_PROTO(struct hrtimer *hrtimer, enum hrtimer_mode mode),
- TP_ARGS(hrtimer),
+ TP_ARGS(hrtimer, mode),
TP_STRUCT__entry(
__field( void *, hrtimer )
__field( void *, function )
__field( s64, expires )
__field( s64, softexpires )
+ __field( enum hrtimer_mode, mode )
),
TP_fast_assign(
@@ -190,12 +207,14 @@ TRACE_EVENT(hrtimer_start,
__entry->function = hrtimer->function;
__entry->expires = hrtimer_get_expires(hrtimer);
__entry->softexpires = hrtimer_get_softexpires(hrtimer);
+ __entry->mode = mode;
),
- TP_printk("hrtimer=%p function=%pf expires=%llu softexpires=%llu",
- __entry->hrtimer, __entry->function,
+ TP_printk("hrtimer=%p function=%pf expires=%llu softexpires=%llu "
+ "mode=%s", __entry->hrtimer, __entry->function,
(unsigned long long) __entry->expires,
- (unsigned long long) __entry->softexpires)
+ (unsigned long long) __entry->softexpires,
+ decode_hrtimer_mode(__entry->mode))
);
/**
diff --git a/include/trace/events/vmscan.h b/include/trace/events/vmscan.h
index d70b53e65f43..a1cb91342231 100644
--- a/include/trace/events/vmscan.h
+++ b/include/trace/events/vmscan.h
@@ -78,26 +78,29 @@ TRACE_EVENT(mm_vmscan_kswapd_wake,
TRACE_EVENT(mm_vmscan_wakeup_kswapd,
- TP_PROTO(int nid, int zid, int order),
+ TP_PROTO(int nid, int zid, int order, gfp_t gfp_flags),
- TP_ARGS(nid, zid, order),
+ TP_ARGS(nid, zid, order, gfp_flags),
TP_STRUCT__entry(
- __field( int, nid )
- __field( int, zid )
- __field( int, order )
+ __field( int, nid )
+ __field( int, zid )
+ __field( int, order )
+ __field( gfp_t, gfp_flags )
),
TP_fast_assign(
__entry->nid = nid;
__entry->zid = zid;
__entry->order = order;
+ __entry->gfp_flags = gfp_flags;
),
- TP_printk("nid=%d zid=%d order=%d",
+ TP_printk("nid=%d zid=%d order=%d gfp_flags=%s",
__entry->nid,
__entry->zid,
- __entry->order)
+ __entry->order,
+ show_gfp_flags(__entry->gfp_flags))
);
DECLARE_EVENT_CLASS(mm_vmscan_direct_reclaim_begin_template,
@@ -192,12 +195,12 @@ DEFINE_EVENT(mm_vmscan_direct_reclaim_end_template, mm_vmscan_memcg_softlimit_re
TRACE_EVENT(mm_shrink_slab_start,
TP_PROTO(struct shrinker *shr, struct shrink_control *sc,
- long nr_objects_to_shrink, unsigned long pgs_scanned,
- unsigned long lru_pgs, unsigned long cache_items,
- unsigned long long delta, unsigned long total_scan),
+ long nr_objects_to_shrink, unsigned long cache_items,
+ unsigned long long delta, unsigned long total_scan,
+ int priority),
- TP_ARGS(shr, sc, nr_objects_to_shrink, pgs_scanned, lru_pgs,
- cache_items, delta, total_scan),
+ TP_ARGS(shr, sc, nr_objects_to_shrink, cache_items, delta, total_scan,
+ priority),
TP_STRUCT__entry(
__field(struct shrinker *, shr)
@@ -205,11 +208,10 @@ TRACE_EVENT(mm_shrink_slab_start,
__field(int, nid)
__field(long, nr_objects_to_shrink)
__field(gfp_t, gfp_flags)
- __field(unsigned long, pgs_scanned)
- __field(unsigned long, lru_pgs)
__field(unsigned long, cache_items)
__field(unsigned long long, delta)
__field(unsigned long, total_scan)
+ __field(int, priority)
),
TP_fast_assign(
@@ -218,24 +220,22 @@ TRACE_EVENT(mm_shrink_slab_start,
__entry->nid = sc->nid;
__entry->nr_objects_to_shrink = nr_objects_to_shrink;
__entry->gfp_flags = sc->gfp_mask;
- __entry->pgs_scanned = pgs_scanned;
- __entry->lru_pgs = lru_pgs;
__entry->cache_items = cache_items;
__entry->delta = delta;
__entry->total_scan = total_scan;
+ __entry->priority = priority;
),
- TP_printk("%pF %p: nid: %d objects to shrink %ld gfp_flags %s pgs_scanned %ld lru_pgs %ld cache items %ld delta %lld total_scan %ld",
+ TP_printk("%pF %p: nid: %d objects to shrink %ld gfp_flags %s cache items %ld delta %lld total_scan %ld priority %d",
__entry->shrink,
__entry->shr,
__entry->nid,
__entry->nr_objects_to_shrink,
show_gfp_flags(__entry->gfp_flags),
- __entry->pgs_scanned,
- __entry->lru_pgs,
__entry->cache_items,
__entry->delta,
- __entry->total_scan)
+ __entry->total_scan,
+ __entry->priority)
);
TRACE_EVENT(mm_shrink_slab_end,
@@ -346,15 +346,9 @@ TRACE_EVENT(mm_vmscan_lru_shrink_inactive,
TP_PROTO(int nid,
unsigned long nr_scanned, unsigned long nr_reclaimed,
- unsigned long nr_dirty, unsigned long nr_writeback,
- unsigned long nr_congested, unsigned long nr_immediate,
- unsigned long nr_activate, unsigned long nr_ref_keep,
- unsigned long nr_unmap_fail,
- int priority, int file),
+ struct reclaim_stat *stat, int priority, int file),
- TP_ARGS(nid, nr_scanned, nr_reclaimed, nr_dirty, nr_writeback,
- nr_congested, nr_immediate, nr_activate, nr_ref_keep,
- nr_unmap_fail, priority, file),
+ TP_ARGS(nid, nr_scanned, nr_reclaimed, stat, priority, file),
TP_STRUCT__entry(
__field(int, nid)
@@ -375,13 +369,13 @@ TRACE_EVENT(mm_vmscan_lru_shrink_inactive,
__entry->nid = nid;
__entry->nr_scanned = nr_scanned;
__entry->nr_reclaimed = nr_reclaimed;
- __entry->nr_dirty = nr_dirty;
- __entry->nr_writeback = nr_writeback;
- __entry->nr_congested = nr_congested;
- __entry->nr_immediate = nr_immediate;
- __entry->nr_activate = nr_activate;
- __entry->nr_ref_keep = nr_ref_keep;
- __entry->nr_unmap_fail = nr_unmap_fail;
+ __entry->nr_dirty = stat->nr_dirty;
+ __entry->nr_writeback = stat->nr_writeback;
+ __entry->nr_congested = stat->nr_congested;
+ __entry->nr_immediate = stat->nr_immediate;
+ __entry->nr_activate = stat->nr_activate;
+ __entry->nr_ref_keep = stat->nr_ref_keep;
+ __entry->nr_unmap_fail = stat->nr_unmap_fail;
__entry->priority = priority;
__entry->reclaim_flags = trace_shrink_flags(file);
),
diff --git a/include/trace/events/xen.h b/include/trace/events/xen.h
index b8adf05c534e..7dd8f34c37df 100644
--- a/include/trace/events/xen.h
+++ b/include/trace/events/xen.h
@@ -368,7 +368,7 @@ TRACE_EVENT(xen_mmu_flush_tlb,
TP_printk("%s", "")
);
-TRACE_EVENT(xen_mmu_flush_tlb_single,
+TRACE_EVENT(xen_mmu_flush_tlb_one_user,
TP_PROTO(unsigned long addr),
TP_ARGS(addr),
TP_STRUCT__entry(
diff --git a/include/uapi/asm-generic/mman-common.h b/include/uapi/asm-generic/mman-common.h
index f8b134f5608f..e7ee32861d51 100644
--- a/include/uapi/asm-generic/mman-common.h
+++ b/include/uapi/asm-generic/mman-common.h
@@ -27,6 +27,9 @@
# define MAP_UNINITIALIZED 0x0 /* Don't support this flag */
#endif
+/* 0x0100 - 0x80000 flags are defined in asm-generic/mman.h */
+#define MAP_FIXED_NOREPLACE 0x100000 /* MAP_FIXED which doesn't unmap underlying mapping */
+
/*
* Flags for mlock
*/
diff --git a/include/uapi/asm-generic/poll.h b/include/uapi/asm-generic/poll.h
index fefb3d2c3fac..41b509f410bf 100644
--- a/include/uapi/asm-generic/poll.h
+++ b/include/uapi/asm-generic/poll.h
@@ -29,9 +29,9 @@
#define POLLRDHUP 0x2000
#endif
-#define POLLFREE 0x4000 /* currently only for epoll */
+#define POLLFREE (__force __poll_t)0x4000 /* currently only for epoll */
-#define POLL_BUSY_LOOP 0x8000
+#define POLL_BUSY_LOOP (__force __poll_t)0x8000
struct pollfd {
int fd;
diff --git a/include/uapi/asm-generic/siginfo.h b/include/uapi/asm-generic/siginfo.h
index e447283b8f52..544208fd3db1 100644
--- a/include/uapi/asm-generic/siginfo.h
+++ b/include/uapi/asm-generic/siginfo.h
@@ -23,10 +23,6 @@ typedef union sigval {
#define SI_PAD_SIZE ((SI_MAX_SIZE - __ARCH_SI_PREAMBLE_SIZE) / sizeof(int))
#endif
-#ifndef __ARCH_SI_UID_T
-#define __ARCH_SI_UID_T __kernel_uid32_t
-#endif
-
/*
* The default "si_band" type is "long", as specified by POSIX.
* However, some architectures want to override this to "int"
@@ -44,12 +40,15 @@ typedef union sigval {
#define __ARCH_SI_ATTRIBUTES
#endif
-#ifndef HAVE_ARCH_SIGINFO_T
-
typedef struct siginfo {
int si_signo;
+#ifndef __ARCH_HAS_SWAPPED_SIGINFO
int si_errno;
int si_code;
+#else
+ int si_code;
+ int si_errno;
+#endif
union {
int _pad[SI_PAD_SIZE];
@@ -57,14 +56,13 @@ typedef struct siginfo {
/* kill() */
struct {
__kernel_pid_t _pid; /* sender's pid */
- __ARCH_SI_UID_T _uid; /* sender's uid */
+ __kernel_uid32_t _uid; /* sender's uid */
} _kill;
/* POSIX.1b timers */
struct {
__kernel_timer_t _tid; /* timer id */
int _overrun; /* overrun count */
- char _pad[sizeof( __ARCH_SI_UID_T) - sizeof(int)];
sigval_t _sigval; /* same as below */
int _sys_private; /* not to be passed to user */
} _timer;
@@ -72,34 +70,50 @@ typedef struct siginfo {
/* POSIX.1b signals */
struct {
__kernel_pid_t _pid; /* sender's pid */
- __ARCH_SI_UID_T _uid; /* sender's uid */
+ __kernel_uid32_t _uid; /* sender's uid */
sigval_t _sigval;
} _rt;
/* SIGCHLD */
struct {
__kernel_pid_t _pid; /* which child */
- __ARCH_SI_UID_T _uid; /* sender's uid */
+ __kernel_uid32_t _uid; /* sender's uid */
int _status; /* exit code */
__ARCH_SI_CLOCK_T _utime;
__ARCH_SI_CLOCK_T _stime;
} _sigchld;
- /* SIGILL, SIGFPE, SIGSEGV, SIGBUS */
+ /* SIGILL, SIGFPE, SIGSEGV, SIGBUS, SIGTRAP, SIGEMT */
struct {
void __user *_addr; /* faulting insn/memory ref. */
#ifdef __ARCH_SI_TRAPNO
int _trapno; /* TRAP # which caused the signal */
#endif
- short _addr_lsb; /* LSB of the reported address */
+#ifdef __ia64__
+ int _imm; /* immediate value for "break" */
+ unsigned int _flags; /* see ia64 si_flags */
+ unsigned long _isr; /* isr */
+#endif
+
+#define __ADDR_BND_PKEY_PAD (__alignof__(void *) < sizeof(short) ? \
+ sizeof(short) : __alignof__(void *))
union {
+ /*
+ * used when si_code=BUS_MCEERR_AR or
+ * used when si_code=BUS_MCEERR_AO
+ */
+ short _addr_lsb; /* LSB of the reported address */
/* used when si_code=SEGV_BNDERR */
struct {
+ char _dummy_bnd[__ADDR_BND_PKEY_PAD];
void __user *_lower;
void __user *_upper;
} _addr_bnd;
/* used when si_code=SEGV_PKUERR */
- __u32 _pkey;
+ struct {
+ char _dummy_pkey[__ADDR_BND_PKEY_PAD];
+ __u32 _pkey;
+ } _addr_pkey;
};
} _sigfault;
@@ -118,10 +132,6 @@ typedef struct siginfo {
} _sifields;
} __ARCH_SI_ATTRIBUTES siginfo_t;
-/* If the arch shares siginfo, then it has SIGSYS. */
-#define __ARCH_SIGSYS
-#endif
-
/*
* How these fields are to be accessed.
*/
@@ -143,14 +153,12 @@ typedef struct siginfo {
#define si_addr_lsb _sifields._sigfault._addr_lsb
#define si_lower _sifields._sigfault._addr_bnd._lower
#define si_upper _sifields._sigfault._addr_bnd._upper
-#define si_pkey _sifields._sigfault._pkey
+#define si_pkey _sifields._sigfault._addr_pkey._pkey
#define si_band _sifields._sigpoll._band
#define si_fd _sifields._sigpoll._fd
-#ifdef __ARCH_SIGSYS
#define si_call_addr _sifields._sigsys._call_addr
#define si_syscall _sifields._sigsys._syscall
#define si_arch _sifields._sigsys._arch
-#endif
/*
* si_code values
@@ -165,6 +173,7 @@ typedef struct siginfo {
#define SI_SIGIO -5 /* sent by queued SIGIO */
#define SI_TKILL -6 /* sent by tkill system call */
#define SI_DETHREAD -7 /* sent by execve() killing subsidiary threads */
+#define SI_ASYNCNL -60 /* sent by glibc async name lookup completion */
#define SI_FROMUSER(siptr) ((siptr)->si_code <= 0)
#define SI_FROMKERNEL(siptr) ((siptr)->si_code > 0)
@@ -180,7 +189,10 @@ typedef struct siginfo {
#define ILL_PRVREG 6 /* privileged register */
#define ILL_COPROC 7 /* coprocessor error */
#define ILL_BADSTK 8 /* internal stack error */
-#define NSIGILL 8
+#define ILL_BADIADDR 9 /* unimplemented instruction address */
+#define __ILL_BREAK 10 /* illegal break */
+#define __ILL_BNDMOD 11 /* bundle-update (modification) in progress */
+#define NSIGILL 11
/*
* SIGFPE si_codes
@@ -193,7 +205,13 @@ typedef struct siginfo {
#define FPE_FLTRES 6 /* floating point inexact result */
#define FPE_FLTINV 7 /* floating point invalid operation */
#define FPE_FLTSUB 8 /* subscript out of range */
-#define NSIGFPE 8
+#define __FPE_DECOVF 9 /* decimal overflow */
+#define __FPE_DECDIV 10 /* decimal division by zero */
+#define __FPE_DECERR 11 /* packed decimal error */
+#define __FPE_INVASC 12 /* invalid ASCII digit */
+#define __FPE_INVDEC 13 /* invalid decimal digit */
+#define FPE_FLTUNK 14 /* undiagnosed floating-point exception */
+#define NSIGFPE 14
/*
* SIGSEGV si_codes
@@ -201,8 +219,15 @@ typedef struct siginfo {
#define SEGV_MAPERR 1 /* address not mapped to object */
#define SEGV_ACCERR 2 /* invalid permissions for mapped object */
#define SEGV_BNDERR 3 /* failed address bound checks */
-#define SEGV_PKUERR 4 /* failed protection key checks */
-#define NSIGSEGV 4
+#ifdef __ia64__
+# define __SEGV_PSTKOVF 4 /* paragraph stack overflow */
+#else
+# define SEGV_PKUERR 4 /* failed protection key checks */
+#endif
+#define SEGV_ACCADI 5 /* ADI not enabled for mapped object */
+#define SEGV_ADIDERR 6 /* Disrupting MCD error */
+#define SEGV_ADIPERR 7 /* Precise MCD exception */
+#define NSIGSEGV 7
/*
* SIGBUS si_codes
@@ -226,6 +251,11 @@ typedef struct siginfo {
#define NSIGTRAP 4
/*
+ * There is an additional set of SIGTRAP si_codes used by ptrace
+ * that are of the form: ((PTRACE_EVENT_XXX << 8) | SIGTRAP)
+ */
+
+/*
* SIGCHLD si_codes
*/
#define CLD_EXITED 1 /* child has exited */
diff --git a/include/uapi/asm-generic/unistd.h b/include/uapi/asm-generic/unistd.h
index 8b87de067bc7..8bcb186c6f67 100644
--- a/include/uapi/asm-generic/unistd.h
+++ b/include/uapi/asm-generic/unistd.h
@@ -737,169 +737,6 @@ __SYSCALL(__NR_statx, sys_statx)
#define __NR_syscalls 292
/*
- * All syscalls below here should go away really,
- * these are provided for both review and as a porting
- * help for the C library version.
- *
- * Last chance: are any of these important enough to
- * enable by default?
- */
-#ifdef __ARCH_WANT_SYSCALL_NO_AT
-#define __NR_open 1024
-__SYSCALL(__NR_open, sys_open)
-#define __NR_link 1025
-__SYSCALL(__NR_link, sys_link)
-#define __NR_unlink 1026
-__SYSCALL(__NR_unlink, sys_unlink)
-#define __NR_mknod 1027
-__SYSCALL(__NR_mknod, sys_mknod)
-#define __NR_chmod 1028
-__SYSCALL(__NR_chmod, sys_chmod)
-#define __NR_chown 1029
-__SYSCALL(__NR_chown, sys_chown)
-#define __NR_mkdir 1030
-__SYSCALL(__NR_mkdir, sys_mkdir)
-#define __NR_rmdir 1031
-__SYSCALL(__NR_rmdir, sys_rmdir)
-#define __NR_lchown 1032
-__SYSCALL(__NR_lchown, sys_lchown)
-#define __NR_access 1033
-__SYSCALL(__NR_access, sys_access)
-#define __NR_rename 1034
-__SYSCALL(__NR_rename, sys_rename)
-#define __NR_readlink 1035
-__SYSCALL(__NR_readlink, sys_readlink)
-#define __NR_symlink 1036
-__SYSCALL(__NR_symlink, sys_symlink)
-#define __NR_utimes 1037
-__SYSCALL(__NR_utimes, sys_utimes)
-#define __NR3264_stat 1038
-__SC_3264(__NR3264_stat, sys_stat64, sys_newstat)
-#define __NR3264_lstat 1039
-__SC_3264(__NR3264_lstat, sys_lstat64, sys_newlstat)
-
-#undef __NR_syscalls
-#define __NR_syscalls (__NR3264_lstat+1)
-#endif /* __ARCH_WANT_SYSCALL_NO_AT */
-
-#ifdef __ARCH_WANT_SYSCALL_NO_FLAGS
-#define __NR_pipe 1040
-__SYSCALL(__NR_pipe, sys_pipe)
-#define __NR_dup2 1041
-__SYSCALL(__NR_dup2, sys_dup2)
-#define __NR_epoll_create 1042
-__SYSCALL(__NR_epoll_create, sys_epoll_create)
-#define __NR_inotify_init 1043
-__SYSCALL(__NR_inotify_init, sys_inotify_init)
-#define __NR_eventfd 1044
-__SYSCALL(__NR_eventfd, sys_eventfd)
-#define __NR_signalfd 1045
-__SYSCALL(__NR_signalfd, sys_signalfd)
-
-#undef __NR_syscalls
-#define __NR_syscalls (__NR_signalfd+1)
-#endif /* __ARCH_WANT_SYSCALL_NO_FLAGS */
-
-#if (__BITS_PER_LONG == 32 || defined(__SYSCALL_COMPAT)) && \
- defined(__ARCH_WANT_SYSCALL_OFF_T)
-#define __NR_sendfile 1046
-__SYSCALL(__NR_sendfile, sys_sendfile)
-#define __NR_ftruncate 1047
-__SYSCALL(__NR_ftruncate, sys_ftruncate)
-#define __NR_truncate 1048
-__SYSCALL(__NR_truncate, sys_truncate)
-#define __NR_stat 1049
-__SYSCALL(__NR_stat, sys_newstat)
-#define __NR_lstat 1050
-__SYSCALL(__NR_lstat, sys_newlstat)
-#define __NR_fstat 1051
-__SYSCALL(__NR_fstat, sys_newfstat)
-#define __NR_fcntl 1052
-__SYSCALL(__NR_fcntl, sys_fcntl)
-#define __NR_fadvise64 1053
-#define __ARCH_WANT_SYS_FADVISE64
-__SYSCALL(__NR_fadvise64, sys_fadvise64)
-#define __NR_newfstatat 1054
-#define __ARCH_WANT_SYS_NEWFSTATAT
-__SYSCALL(__NR_newfstatat, sys_newfstatat)
-#define __NR_fstatfs 1055
-__SYSCALL(__NR_fstatfs, sys_fstatfs)
-#define __NR_statfs 1056
-__SYSCALL(__NR_statfs, sys_statfs)
-#define __NR_lseek 1057
-__SYSCALL(__NR_lseek, sys_lseek)
-#define __NR_mmap 1058
-__SYSCALL(__NR_mmap, sys_mmap)
-
-#undef __NR_syscalls
-#define __NR_syscalls (__NR_mmap+1)
-#endif /* 32 bit off_t syscalls */
-
-#ifdef __ARCH_WANT_SYSCALL_DEPRECATED
-#define __NR_alarm 1059
-#define __ARCH_WANT_SYS_ALARM
-__SYSCALL(__NR_alarm, sys_alarm)
-#define __NR_getpgrp 1060
-#define __ARCH_WANT_SYS_GETPGRP
-__SYSCALL(__NR_getpgrp, sys_getpgrp)
-#define __NR_pause 1061
-#define __ARCH_WANT_SYS_PAUSE
-__SYSCALL(__NR_pause, sys_pause)
-#define __NR_time 1062
-#define __ARCH_WANT_SYS_TIME
-#define __ARCH_WANT_COMPAT_SYS_TIME
-__SYSCALL(__NR_time, sys_time)
-#define __NR_utime 1063
-#define __ARCH_WANT_SYS_UTIME
-__SYSCALL(__NR_utime, sys_utime)
-
-#define __NR_creat 1064
-__SYSCALL(__NR_creat, sys_creat)
-#define __NR_getdents 1065
-#define __ARCH_WANT_SYS_GETDENTS
-__SYSCALL(__NR_getdents, sys_getdents)
-#define __NR_futimesat 1066
-__SYSCALL(__NR_futimesat, sys_futimesat)
-#define __NR_select 1067
-#define __ARCH_WANT_SYS_SELECT
-__SYSCALL(__NR_select, sys_select)
-#define __NR_poll 1068
-__SYSCALL(__NR_poll, sys_poll)
-#define __NR_epoll_wait 1069
-__SYSCALL(__NR_epoll_wait, sys_epoll_wait)
-#define __NR_ustat 1070
-__SYSCALL(__NR_ustat, sys_ustat)
-#define __NR_vfork 1071
-__SYSCALL(__NR_vfork, sys_vfork)
-#define __NR_oldwait4 1072
-__SYSCALL(__NR_oldwait4, sys_wait4)
-#define __NR_recv 1073
-__SYSCALL(__NR_recv, sys_recv)
-#define __NR_send 1074
-__SYSCALL(__NR_send, sys_send)
-#define __NR_bdflush 1075
-__SYSCALL(__NR_bdflush, sys_bdflush)
-#define __NR_umount 1076
-__SYSCALL(__NR_umount, sys_oldumount)
-#define __ARCH_WANT_SYS_OLDUMOUNT
-#define __NR_uselib 1077
-__SYSCALL(__NR_uselib, sys_uselib)
-#define __NR__sysctl 1078
-__SYSCALL(__NR__sysctl, sys_sysctl)
-
-#define __NR_fork 1079
-#ifdef CONFIG_MMU
-__SYSCALL(__NR_fork, sys_fork)
-#else
-__SYSCALL(__NR_fork, sys_ni_syscall)
-#endif /* CONFIG_MMU */
-
-#undef __NR_syscalls
-#define __NR_syscalls (__NR_fork+1)
-
-#endif /* __ARCH_WANT_SYSCALL_DEPRECATED */
-
-/*
* 32 bit systems traditionally used different
* syscalls for off_t and loff_t arguments, while
* 64 bit systems only need the off_t version.
diff --git a/include/uapi/drm/amdgpu_drm.h b/include/uapi/drm/amdgpu_drm.h
index 919248fb4028..c363b67f2d0a 100644
--- a/include/uapi/drm/amdgpu_drm.h
+++ b/include/uapi/drm/amdgpu_drm.h
@@ -160,6 +160,7 @@ union drm_amdgpu_bo_list {
#define AMDGPU_CTX_OP_ALLOC_CTX 1
#define AMDGPU_CTX_OP_FREE_CTX 2
#define AMDGPU_CTX_OP_QUERY_STATE 3
+#define AMDGPU_CTX_OP_QUERY_STATE2 4
/* GPU reset status */
#define AMDGPU_CTX_NO_RESET 0
@@ -170,6 +171,13 @@ union drm_amdgpu_bo_list {
/* unknown cause */
#define AMDGPU_CTX_UNKNOWN_RESET 3
+/* indicate gpu reset occured after ctx created */
+#define AMDGPU_CTX_QUERY2_FLAGS_RESET (1<<0)
+/* indicate vram lost occured after ctx created */
+#define AMDGPU_CTX_QUERY2_FLAGS_VRAMLOST (1<<1)
+/* indicate some job from this context once cause gpu hang */
+#define AMDGPU_CTX_QUERY2_FLAGS_GUILTY (1<<2)
+
/* Context priority level */
#define AMDGPU_CTX_PRIORITY_UNSET -2048
#define AMDGPU_CTX_PRIORITY_VERY_LOW -1023
@@ -610,6 +618,8 @@ struct drm_amdgpu_cs_chunk_data {
#define AMDGPU_INFO_FW_SOS 0x0c
/* Subquery id: Query PSP ASD firmware version */
#define AMDGPU_INFO_FW_ASD 0x0d
+ /* Subquery id: Query VCN firmware version */
+ #define AMDGPU_INFO_FW_VCN 0x0e
/* number of bytes moved for TTM migration */
#define AMDGPU_INFO_NUM_BYTES_MOVED 0x0f
/* the used VRAM size */
@@ -656,6 +666,10 @@ struct drm_amdgpu_cs_chunk_data {
#define AMDGPU_INFO_SENSOR_VDDNB 0x6
/* Subquery id: Query graphics voltage */
#define AMDGPU_INFO_SENSOR_VDDGFX 0x7
+ /* Subquery id: Query GPU stable pstate shader clock */
+ #define AMDGPU_INFO_SENSOR_STABLE_PSTATE_GFX_SCLK 0x8
+ /* Subquery id: Query GPU stable pstate memory clock */
+ #define AMDGPU_INFO_SENSOR_STABLE_PSTATE_GFX_MCLK 0x9
/* Number of VRAM page faults on CPU access. */
#define AMDGPU_INFO_NUM_VRAM_CPU_PAGE_FAULTS 0x1E
#define AMDGPU_INFO_VRAM_LOST_COUNTER 0x1F
@@ -794,6 +808,7 @@ struct drm_amdgpu_info_firmware {
#define AMDGPU_VRAM_TYPE_GDDR5 5
#define AMDGPU_VRAM_TYPE_HBM 6
#define AMDGPU_VRAM_TYPE_DDR3 7
+#define AMDGPU_VRAM_TYPE_DDR4 8
struct drm_amdgpu_info_device {
/** PCI Device ID */
@@ -869,6 +884,10 @@ struct drm_amdgpu_info_device {
__u32 _pad1;
/* always on cu bitmap */
__u32 cu_ao_bitmap[4][4];
+ /** Starting high virtual address for UMDs. */
+ __u64 high_va_offset;
+ /** The maximum high virtual address */
+ __u64 high_va_max;
};
struct drm_amdgpu_info_hw_ip {
diff --git a/include/uapi/drm/drm_fourcc.h b/include/uapi/drm/drm_fourcc.h
index 3ad838d3f93f..e04613d30a13 100644
--- a/include/uapi/drm/drm_fourcc.h
+++ b/include/uapi/drm/drm_fourcc.h
@@ -178,7 +178,7 @@ extern "C" {
#define DRM_FORMAT_MOD_VENDOR_NONE 0
#define DRM_FORMAT_MOD_VENDOR_INTEL 0x01
#define DRM_FORMAT_MOD_VENDOR_AMD 0x02
-#define DRM_FORMAT_MOD_VENDOR_NV 0x03
+#define DRM_FORMAT_MOD_VENDOR_NVIDIA 0x03
#define DRM_FORMAT_MOD_VENDOR_SAMSUNG 0x04
#define DRM_FORMAT_MOD_VENDOR_QCOM 0x05
#define DRM_FORMAT_MOD_VENDOR_VIVANTE 0x06
@@ -188,7 +188,7 @@ extern "C" {
#define DRM_FORMAT_RESERVED ((1ULL << 56) - 1)
#define fourcc_mod_code(vendor, val) \
- ((((__u64)DRM_FORMAT_MOD_VENDOR_## vendor) << 56) | (val & 0x00ffffffffffffffULL))
+ ((((__u64)DRM_FORMAT_MOD_VENDOR_## vendor) << 56) | ((val) & 0x00ffffffffffffffULL))
/*
* Format Modifier tokens:
@@ -338,29 +338,17 @@ extern "C" {
*/
#define DRM_FORMAT_MOD_VIVANTE_SPLIT_SUPER_TILED fourcc_mod_code(VIVANTE, 4)
-/* NVIDIA Tegra frame buffer modifiers */
-
-/*
- * Some modifiers take parameters, for example the number of vertical GOBs in
- * a block. Reserve the lower 32 bits for parameters
- */
-#define __fourcc_mod_tegra_mode_shift 32
-#define fourcc_mod_tegra_code(val, params) \
- fourcc_mod_code(NV, ((((__u64)val) << __fourcc_mod_tegra_mode_shift) | params))
-#define fourcc_mod_tegra_mod(m) \
- (m & ~((1ULL << __fourcc_mod_tegra_mode_shift) - 1))
-#define fourcc_mod_tegra_param(m) \
- (m & ((1ULL << __fourcc_mod_tegra_mode_shift) - 1))
+/* NVIDIA frame buffer modifiers */
/*
* Tegra Tiled Layout, used by Tegra 2, 3 and 4.
*
* Pixels are arranged in simple tiles of 16 x 16 bytes.
*/
-#define NV_FORMAT_MOD_TEGRA_TILED fourcc_mod_tegra_code(1, 0)
+#define DRM_FORMAT_MOD_NVIDIA_TEGRA_TILED fourcc_mod_code(NVIDIA, 1)
/*
- * Tegra 16Bx2 Block Linear layout, used by TK1/TX1
+ * 16Bx2 Block Linear layout, used by desktop GPUs, and Tegra K1 and later
*
* Pixels are arranged in 64x8 Groups Of Bytes (GOBs). GOBs are then stacked
* vertically by a power of 2 (1 to 32 GOBs) to form a block.
@@ -380,7 +368,21 @@ extern "C" {
* Chapter 20 "Pixel Memory Formats" of the Tegra X1 TRM describes this format
* in full detail.
*/
-#define NV_FORMAT_MOD_TEGRA_16BX2_BLOCK(v) fourcc_mod_tegra_code(2, v)
+#define DRM_FORMAT_MOD_NVIDIA_16BX2_BLOCK(v) \
+ fourcc_mod_code(NVIDIA, 0x10 | ((v) & 0xf))
+
+#define DRM_FORMAT_MOD_NVIDIA_16BX2_BLOCK_ONE_GOB \
+ fourcc_mod_code(NVIDIA, 0x10)
+#define DRM_FORMAT_MOD_NVIDIA_16BX2_BLOCK_TWO_GOB \
+ fourcc_mod_code(NVIDIA, 0x11)
+#define DRM_FORMAT_MOD_NVIDIA_16BX2_BLOCK_FOUR_GOB \
+ fourcc_mod_code(NVIDIA, 0x12)
+#define DRM_FORMAT_MOD_NVIDIA_16BX2_BLOCK_EIGHT_GOB \
+ fourcc_mod_code(NVIDIA, 0x13)
+#define DRM_FORMAT_MOD_NVIDIA_16BX2_BLOCK_SIXTEEN_GOB \
+ fourcc_mod_code(NVIDIA, 0x14)
+#define DRM_FORMAT_MOD_NVIDIA_16BX2_BLOCK_THIRTYTWO_GOB \
+ fourcc_mod_code(NVIDIA, 0x15)
/*
* Broadcom VC4 "T" format
diff --git a/include/uapi/drm/drm_mode.h b/include/uapi/drm/drm_mode.h
index 5597a87154e5..50bcf4214ff9 100644
--- a/include/uapi/drm/drm_mode.h
+++ b/include/uapi/drm/drm_mode.h
@@ -38,14 +38,18 @@ extern "C" {
#define DRM_DISPLAY_MODE_LEN 32
#define DRM_PROP_NAME_LEN 32
-#define DRM_MODE_TYPE_BUILTIN (1<<0)
-#define DRM_MODE_TYPE_CLOCK_C ((1<<1) | DRM_MODE_TYPE_BUILTIN)
-#define DRM_MODE_TYPE_CRTC_C ((1<<2) | DRM_MODE_TYPE_BUILTIN)
+#define DRM_MODE_TYPE_BUILTIN (1<<0) /* deprecated */
+#define DRM_MODE_TYPE_CLOCK_C ((1<<1) | DRM_MODE_TYPE_BUILTIN) /* deprecated */
+#define DRM_MODE_TYPE_CRTC_C ((1<<2) | DRM_MODE_TYPE_BUILTIN) /* deprecated */
#define DRM_MODE_TYPE_PREFERRED (1<<3)
-#define DRM_MODE_TYPE_DEFAULT (1<<4)
+#define DRM_MODE_TYPE_DEFAULT (1<<4) /* deprecated */
#define DRM_MODE_TYPE_USERDEF (1<<5)
#define DRM_MODE_TYPE_DRIVER (1<<6)
+#define DRM_MODE_TYPE_ALL (DRM_MODE_TYPE_PREFERRED | \
+ DRM_MODE_TYPE_USERDEF | \
+ DRM_MODE_TYPE_DRIVER)
+
/* Video mode flags */
/* bit compatible with the xrandr RR_ definitions (bits 0-13)
*
@@ -66,8 +70,8 @@ extern "C" {
#define DRM_MODE_FLAG_PCSYNC (1<<7)
#define DRM_MODE_FLAG_NCSYNC (1<<8)
#define DRM_MODE_FLAG_HSKEW (1<<9) /* hskew provided */
-#define DRM_MODE_FLAG_BCAST (1<<10)
-#define DRM_MODE_FLAG_PIXMUX (1<<11)
+#define DRM_MODE_FLAG_BCAST (1<<10) /* deprecated */
+#define DRM_MODE_FLAG_PIXMUX (1<<11) /* deprecated */
#define DRM_MODE_FLAG_DBLCLK (1<<12)
#define DRM_MODE_FLAG_CLKDIV2 (1<<13)
/*
@@ -99,6 +103,20 @@ extern "C" {
#define DRM_MODE_FLAG_PIC_AR_16_9 \
(DRM_MODE_PICTURE_ASPECT_16_9<<19)
+#define DRM_MODE_FLAG_ALL (DRM_MODE_FLAG_PHSYNC | \
+ DRM_MODE_FLAG_NHSYNC | \
+ DRM_MODE_FLAG_PVSYNC | \
+ DRM_MODE_FLAG_NVSYNC | \
+ DRM_MODE_FLAG_INTERLACE | \
+ DRM_MODE_FLAG_DBLSCAN | \
+ DRM_MODE_FLAG_CSYNC | \
+ DRM_MODE_FLAG_PCSYNC | \
+ DRM_MODE_FLAG_NCSYNC | \
+ DRM_MODE_FLAG_HSKEW | \
+ DRM_MODE_FLAG_DBLCLK | \
+ DRM_MODE_FLAG_CLKDIV2 | \
+ DRM_MODE_FLAG_3D_MASK)
+
/* DPMS flags */
/* bit compatible with the xorg definitions. */
#define DRM_MODE_DPMS_ON 0
@@ -173,6 +191,10 @@ extern "C" {
DRM_MODE_REFLECT_X | \
DRM_MODE_REFLECT_Y)
+/* Content Protection Flags */
+#define DRM_MODE_CONTENT_PROTECTION_UNDESIRED 0
+#define DRM_MODE_CONTENT_PROTECTION_DESIRED 1
+#define DRM_MODE_CONTENT_PROTECTION_ENABLED 2
struct drm_mode_modeinfo {
__u32 clock;
@@ -341,7 +363,7 @@ struct drm_mode_get_connector {
__u32 pad;
};
-#define DRM_MODE_PROP_PENDING (1<<0)
+#define DRM_MODE_PROP_PENDING (1<<0) /* deprecated, do not use */
#define DRM_MODE_PROP_RANGE (1<<1)
#define DRM_MODE_PROP_IMMUTABLE (1<<2)
#define DRM_MODE_PROP_ENUM (1<<3) /* enumerated type with text strings */
@@ -576,8 +598,11 @@ struct drm_mode_crtc_lut {
};
struct drm_color_ctm {
- /* Conversion matrix in S31.32 format. */
- __s64 matrix[9];
+ /*
+ * Conversion matrix in S31.32 sign-magnitude
+ * (not two's complement!) format.
+ */
+ __u64 matrix[9];
};
struct drm_color_lut {
diff --git a/include/uapi/drm/etnaviv_drm.h b/include/uapi/drm/etnaviv_drm.h
index e9b997a0ef27..0d5c49dc478c 100644
--- a/include/uapi/drm/etnaviv_drm.h
+++ b/include/uapi/drm/etnaviv_drm.h
@@ -55,6 +55,12 @@ struct drm_etnaviv_timespec {
#define ETNAVIV_PARAM_GPU_FEATURES_4 0x07
#define ETNAVIV_PARAM_GPU_FEATURES_5 0x08
#define ETNAVIV_PARAM_GPU_FEATURES_6 0x09
+#define ETNAVIV_PARAM_GPU_FEATURES_7 0x0a
+#define ETNAVIV_PARAM_GPU_FEATURES_8 0x0b
+#define ETNAVIV_PARAM_GPU_FEATURES_9 0x0c
+#define ETNAVIV_PARAM_GPU_FEATURES_10 0x0d
+#define ETNAVIV_PARAM_GPU_FEATURES_11 0x0e
+#define ETNAVIV_PARAM_GPU_FEATURES_12 0x0f
#define ETNAVIV_PARAM_GPU_STREAM_COUNT 0x10
#define ETNAVIV_PARAM_GPU_REGISTER_MAX 0x11
diff --git a/include/uapi/drm/exynos_drm.h b/include/uapi/drm/exynos_drm.h
index d01087b2a651..4a54305120e0 100644
--- a/include/uapi/drm/exynos_drm.h
+++ b/include/uapi/drm/exynos_drm.h
@@ -135,172 +135,6 @@ struct drm_exynos_g2d_exec {
__u64 async;
};
-enum drm_exynos_ops_id {
- EXYNOS_DRM_OPS_SRC,
- EXYNOS_DRM_OPS_DST,
- EXYNOS_DRM_OPS_MAX,
-};
-
-struct drm_exynos_sz {
- __u32 hsize;
- __u32 vsize;
-};
-
-struct drm_exynos_pos {
- __u32 x;
- __u32 y;
- __u32 w;
- __u32 h;
-};
-
-enum drm_exynos_flip {
- EXYNOS_DRM_FLIP_NONE = (0 << 0),
- EXYNOS_DRM_FLIP_VERTICAL = (1 << 0),
- EXYNOS_DRM_FLIP_HORIZONTAL = (1 << 1),
- EXYNOS_DRM_FLIP_BOTH = EXYNOS_DRM_FLIP_VERTICAL |
- EXYNOS_DRM_FLIP_HORIZONTAL,
-};
-
-enum drm_exynos_degree {
- EXYNOS_DRM_DEGREE_0,
- EXYNOS_DRM_DEGREE_90,
- EXYNOS_DRM_DEGREE_180,
- EXYNOS_DRM_DEGREE_270,
-};
-
-enum drm_exynos_planer {
- EXYNOS_DRM_PLANAR_Y,
- EXYNOS_DRM_PLANAR_CB,
- EXYNOS_DRM_PLANAR_CR,
- EXYNOS_DRM_PLANAR_MAX,
-};
-
-/**
- * A structure for ipp supported property list.
- *
- * @version: version of this structure.
- * @ipp_id: id of ipp driver.
- * @count: count of ipp driver.
- * @writeback: flag of writeback supporting.
- * @flip: flag of flip supporting.
- * @degree: flag of degree information.
- * @csc: flag of csc supporting.
- * @crop: flag of crop supporting.
- * @scale: flag of scale supporting.
- * @refresh_min: min hz of refresh.
- * @refresh_max: max hz of refresh.
- * @crop_min: crop min resolution.
- * @crop_max: crop max resolution.
- * @scale_min: scale min resolution.
- * @scale_max: scale max resolution.
- */
-struct drm_exynos_ipp_prop_list {
- __u32 version;
- __u32 ipp_id;
- __u32 count;
- __u32 writeback;
- __u32 flip;
- __u32 degree;
- __u32 csc;
- __u32 crop;
- __u32 scale;
- __u32 refresh_min;
- __u32 refresh_max;
- __u32 reserved;
- struct drm_exynos_sz crop_min;
- struct drm_exynos_sz crop_max;
- struct drm_exynos_sz scale_min;
- struct drm_exynos_sz scale_max;
-};
-
-/**
- * A structure for ipp config.
- *
- * @ops_id: property of operation directions.
- * @flip: property of mirror, flip.
- * @degree: property of rotation degree.
- * @fmt: property of image format.
- * @sz: property of image size.
- * @pos: property of image position(src-cropped,dst-scaler).
- */
-struct drm_exynos_ipp_config {
- __u32 ops_id;
- __u32 flip;
- __u32 degree;
- __u32 fmt;
- struct drm_exynos_sz sz;
- struct drm_exynos_pos pos;
-};
-
-enum drm_exynos_ipp_cmd {
- IPP_CMD_NONE,
- IPP_CMD_M2M,
- IPP_CMD_WB,
- IPP_CMD_OUTPUT,
- IPP_CMD_MAX,
-};
-
-/**
- * A structure for ipp property.
- *
- * @config: source, destination config.
- * @cmd: definition of command.
- * @ipp_id: id of ipp driver.
- * @prop_id: id of property.
- * @refresh_rate: refresh rate.
- */
-struct drm_exynos_ipp_property {
- struct drm_exynos_ipp_config config[EXYNOS_DRM_OPS_MAX];
- __u32 cmd;
- __u32 ipp_id;
- __u32 prop_id;
- __u32 refresh_rate;
-};
-
-enum drm_exynos_ipp_buf_type {
- IPP_BUF_ENQUEUE,
- IPP_BUF_DEQUEUE,
-};
-
-/**
- * A structure for ipp buffer operations.
- *
- * @ops_id: operation directions.
- * @buf_type: definition of buffer.
- * @prop_id: id of property.
- * @buf_id: id of buffer.
- * @handle: Y, Cb, Cr each planar handle.
- * @user_data: user data.
- */
-struct drm_exynos_ipp_queue_buf {
- __u32 ops_id;
- __u32 buf_type;
- __u32 prop_id;
- __u32 buf_id;
- __u32 handle[EXYNOS_DRM_PLANAR_MAX];
- __u32 reserved;
- __u64 user_data;
-};
-
-enum drm_exynos_ipp_ctrl {
- IPP_CTRL_PLAY,
- IPP_CTRL_STOP,
- IPP_CTRL_PAUSE,
- IPP_CTRL_RESUME,
- IPP_CTRL_MAX,
-};
-
-/**
- * A structure for ipp start/stop operations.
- *
- * @prop_id: id of property.
- * @ctrl: definition of control.
- */
-struct drm_exynos_ipp_cmd_ctrl {
- __u32 prop_id;
- __u32 ctrl;
-};
-
#define DRM_EXYNOS_GEM_CREATE 0x00
#define DRM_EXYNOS_GEM_MAP 0x01
/* Reserved 0x03 ~ 0x05 for exynos specific gem ioctl */
@@ -312,11 +146,7 @@ struct drm_exynos_ipp_cmd_ctrl {
#define DRM_EXYNOS_G2D_SET_CMDLIST 0x21
#define DRM_EXYNOS_G2D_EXEC 0x22
-/* IPP - Image Post Processing */
-#define DRM_EXYNOS_IPP_GET_PROPERTY 0x30
-#define DRM_EXYNOS_IPP_SET_PROPERTY 0x31
-#define DRM_EXYNOS_IPP_QUEUE_BUF 0x32
-#define DRM_EXYNOS_IPP_CMD_CTRL 0x33
+/* Reserved 0x30 ~ 0x33 for obsolete Exynos IPP ioctls */
#define DRM_IOCTL_EXYNOS_GEM_CREATE DRM_IOWR(DRM_COMMAND_BASE + \
DRM_EXYNOS_GEM_CREATE, struct drm_exynos_gem_create)
@@ -335,18 +165,8 @@ struct drm_exynos_ipp_cmd_ctrl {
#define DRM_IOCTL_EXYNOS_G2D_EXEC DRM_IOWR(DRM_COMMAND_BASE + \
DRM_EXYNOS_G2D_EXEC, struct drm_exynos_g2d_exec)
-#define DRM_IOCTL_EXYNOS_IPP_GET_PROPERTY DRM_IOWR(DRM_COMMAND_BASE + \
- DRM_EXYNOS_IPP_GET_PROPERTY, struct drm_exynos_ipp_prop_list)
-#define DRM_IOCTL_EXYNOS_IPP_SET_PROPERTY DRM_IOWR(DRM_COMMAND_BASE + \
- DRM_EXYNOS_IPP_SET_PROPERTY, struct drm_exynos_ipp_property)
-#define DRM_IOCTL_EXYNOS_IPP_QUEUE_BUF DRM_IOWR(DRM_COMMAND_BASE + \
- DRM_EXYNOS_IPP_QUEUE_BUF, struct drm_exynos_ipp_queue_buf)
-#define DRM_IOCTL_EXYNOS_IPP_CMD_CTRL DRM_IOWR(DRM_COMMAND_BASE + \
- DRM_EXYNOS_IPP_CMD_CTRL, struct drm_exynos_ipp_cmd_ctrl)
-
/* EXYNOS specific events */
#define DRM_EXYNOS_G2D_EVENT 0x80000000
-#define DRM_EXYNOS_IPP_EVENT 0x80000001
struct drm_exynos_g2d_event {
struct drm_event base;
@@ -357,16 +177,6 @@ struct drm_exynos_g2d_event {
__u32 reserved;
};
-struct drm_exynos_ipp_event {
- struct drm_event base;
- __u64 user_data;
- __u32 tv_sec;
- __u32 tv_usec;
- __u32 prop_id;
- __u32 reserved;
- __u32 buf_id[EXYNOS_DRM_OPS_MAX];
-};
-
#if defined(__cplusplus)
}
#endif
diff --git a/include/uapi/drm/i915_drm.h b/include/uapi/drm/i915_drm.h
index ac3c6503ca27..7f5634ce8e88 100644
--- a/include/uapi/drm/i915_drm.h
+++ b/include/uapi/drm/i915_drm.h
@@ -86,6 +86,62 @@ enum i915_mocs_table_index {
I915_MOCS_CACHED,
};
+/*
+ * Different engines serve different roles, and there may be more than one
+ * engine serving each role. enum drm_i915_gem_engine_class provides a
+ * classification of the role of the engine, which may be used when requesting
+ * operations to be performed on a certain subset of engines, or for providing
+ * information about that group.
+ */
+enum drm_i915_gem_engine_class {
+ I915_ENGINE_CLASS_RENDER = 0,
+ I915_ENGINE_CLASS_COPY = 1,
+ I915_ENGINE_CLASS_VIDEO = 2,
+ I915_ENGINE_CLASS_VIDEO_ENHANCE = 3,
+
+ I915_ENGINE_CLASS_INVALID = -1
+};
+
+/**
+ * DOC: perf_events exposed by i915 through /sys/bus/event_sources/drivers/i915
+ *
+ */
+
+enum drm_i915_pmu_engine_sample {
+ I915_SAMPLE_BUSY = 0,
+ I915_SAMPLE_WAIT = 1,
+ I915_SAMPLE_SEMA = 2
+};
+
+#define I915_PMU_SAMPLE_BITS (4)
+#define I915_PMU_SAMPLE_MASK (0xf)
+#define I915_PMU_SAMPLE_INSTANCE_BITS (8)
+#define I915_PMU_CLASS_SHIFT \
+ (I915_PMU_SAMPLE_BITS + I915_PMU_SAMPLE_INSTANCE_BITS)
+
+#define __I915_PMU_ENGINE(class, instance, sample) \
+ ((class) << I915_PMU_CLASS_SHIFT | \
+ (instance) << I915_PMU_SAMPLE_BITS | \
+ (sample))
+
+#define I915_PMU_ENGINE_BUSY(class, instance) \
+ __I915_PMU_ENGINE(class, instance, I915_SAMPLE_BUSY)
+
+#define I915_PMU_ENGINE_WAIT(class, instance) \
+ __I915_PMU_ENGINE(class, instance, I915_SAMPLE_WAIT)
+
+#define I915_PMU_ENGINE_SEMA(class, instance) \
+ __I915_PMU_ENGINE(class, instance, I915_SAMPLE_SEMA)
+
+#define __I915_PMU_OTHER(x) (__I915_PMU_ENGINE(0xff, 0xff, 0xf) + 1 + (x))
+
+#define I915_PMU_ACTUAL_FREQUENCY __I915_PMU_OTHER(0)
+#define I915_PMU_REQUESTED_FREQUENCY __I915_PMU_OTHER(1)
+#define I915_PMU_INTERRUPTS __I915_PMU_OTHER(2)
+#define I915_PMU_RC6_RESIDENCY __I915_PMU_OTHER(3)
+
+#define I915_PMU_LAST I915_PMU_RC6_RESIDENCY
+
/* Each region is a minimum of 16k, and there are at most 255 of them.
*/
#define I915_NR_TEX_REGIONS 255 /* table size 2k - maximum due to use
@@ -262,6 +318,7 @@ typedef struct _drm_i915_sarea {
#define DRM_I915_PERF_OPEN 0x36
#define DRM_I915_PERF_ADD_CONFIG 0x37
#define DRM_I915_PERF_REMOVE_CONFIG 0x38
+#define DRM_I915_QUERY 0x39
#define DRM_IOCTL_I915_INIT DRM_IOW( DRM_COMMAND_BASE + DRM_I915_INIT, drm_i915_init_t)
#define DRM_IOCTL_I915_FLUSH DRM_IO ( DRM_COMMAND_BASE + DRM_I915_FLUSH)
@@ -319,6 +376,7 @@ typedef struct _drm_i915_sarea {
#define DRM_IOCTL_I915_PERF_OPEN DRM_IOW(DRM_COMMAND_BASE + DRM_I915_PERF_OPEN, struct drm_i915_perf_open_param)
#define DRM_IOCTL_I915_PERF_ADD_CONFIG DRM_IOW(DRM_COMMAND_BASE + DRM_I915_PERF_ADD_CONFIG, struct drm_i915_perf_oa_config)
#define DRM_IOCTL_I915_PERF_REMOVE_CONFIG DRM_IOW(DRM_COMMAND_BASE + DRM_I915_PERF_REMOVE_CONFIG, __u64)
+#define DRM_IOCTL_I915_QUERY DRM_IOWR(DRM_COMMAND_BASE + DRM_I915_QUERY, struct drm_i915_query)
/* Allow drivers to submit batchbuffers directly to hardware, relying
* on the security mechanisms provided by hardware.
@@ -450,6 +508,27 @@ typedef struct drm_i915_irq_wait {
*/
#define I915_PARAM_HAS_EXEC_FENCE_ARRAY 49
+/*
+ * Query whether every context (both per-file default and user created) is
+ * isolated (insofar as HW supports). If this parameter is not true, then
+ * freshly created contexts may inherit values from an existing context,
+ * rather than default HW values. If true, it also ensures (insofar as HW
+ * supports) that all state set by this context will not leak to any other
+ * context.
+ *
+ * As not every engine across every gen support contexts, the returned
+ * value reports the support of context isolation for individual engines by
+ * returning a bitmask of each engine class set to true if that class supports
+ * isolation.
+ */
+#define I915_PARAM_HAS_CONTEXT_ISOLATION 50
+
+/* Frequency of the command streamer timestamps given by the *_TIMESTAMP
+ * registers. This used to be fixed per platform but from CNL onwards, this
+ * might vary depending on the parts.
+ */
+#define I915_PARAM_CS_TIMESTAMP_FREQUENCY 51
+
typedef struct drm_i915_getparam {
__s32 param;
/*
@@ -1281,7 +1360,9 @@ struct drm_intel_overlay_attrs {
* active on a given plane.
*/
-#define I915_SET_COLORKEY_NONE (1<<0) /* disable color key matching */
+#define I915_SET_COLORKEY_NONE (1<<0) /* Deprecated. Instead set
+ * flags==0 to disable colorkeying.
+ */
#define I915_SET_COLORKEY_DESTINATION (1<<1)
#define I915_SET_COLORKEY_SOURCE (1<<2)
struct drm_intel_sprite_colorkey {
@@ -1527,15 +1608,115 @@ struct drm_i915_perf_oa_config {
__u32 n_flex_regs;
/*
- * These fields are pointers to tuples of u32 values (register
- * address, value). For example the expected length of the buffer
- * pointed by mux_regs_ptr is (2 * sizeof(u32) * n_mux_regs).
+ * These fields are pointers to tuples of u32 values (register address,
+ * value). For example the expected length of the buffer pointed by
+ * mux_regs_ptr is (2 * sizeof(u32) * n_mux_regs).
*/
__u64 mux_regs_ptr;
__u64 boolean_regs_ptr;
__u64 flex_regs_ptr;
};
+struct drm_i915_query_item {
+ __u64 query_id;
+#define DRM_I915_QUERY_TOPOLOGY_INFO 1
+
+ /*
+ * When set to zero by userspace, this is filled with the size of the
+ * data to be written at the data_ptr pointer. The kernel sets this
+ * value to a negative value to signal an error on a particular query
+ * item.
+ */
+ __s32 length;
+
+ /*
+ * Unused for now. Must be cleared to zero.
+ */
+ __u32 flags;
+
+ /*
+ * Data will be written at the location pointed by data_ptr when the
+ * value of length matches the length of the data to be written by the
+ * kernel.
+ */
+ __u64 data_ptr;
+};
+
+struct drm_i915_query {
+ __u32 num_items;
+
+ /*
+ * Unused for now. Must be cleared to zero.
+ */
+ __u32 flags;
+
+ /*
+ * This points to an array of num_items drm_i915_query_item structures.
+ */
+ __u64 items_ptr;
+};
+
+/*
+ * Data written by the kernel with query DRM_I915_QUERY_TOPOLOGY_INFO :
+ *
+ * data: contains the 3 pieces of information :
+ *
+ * - the slice mask with one bit per slice telling whether a slice is
+ * available. The availability of slice X can be queried with the following
+ * formula :
+ *
+ * (data[X / 8] >> (X % 8)) & 1
+ *
+ * - the subslice mask for each slice with one bit per subslice telling
+ * whether a subslice is available. The availability of subslice Y in slice
+ * X can be queried with the following formula :
+ *
+ * (data[subslice_offset +
+ * X * subslice_stride +
+ * Y / 8] >> (Y % 8)) & 1
+ *
+ * - the EU mask for each subslice in each slice with one bit per EU telling
+ * whether an EU is available. The availability of EU Z in subslice Y in
+ * slice X can be queried with the following formula :
+ *
+ * (data[eu_offset +
+ * (X * max_subslices + Y) * eu_stride +
+ * Z / 8] >> (Z % 8)) & 1
+ */
+struct drm_i915_query_topology_info {
+ /*
+ * Unused for now. Must be cleared to zero.
+ */
+ __u16 flags;
+
+ __u16 max_slices;
+ __u16 max_subslices;
+ __u16 max_eus_per_subslice;
+
+ /*
+ * Offset in data[] at which the subslice masks are stored.
+ */
+ __u16 subslice_offset;
+
+ /*
+ * Stride at which each of the subslice masks for each slice are
+ * stored.
+ */
+ __u16 subslice_stride;
+
+ /*
+ * Offset in data[] at which the EU masks are stored.
+ */
+ __u16 eu_offset;
+
+ /*
+ * Stride at which each of the EU masks for each subslice are stored.
+ */
+ __u16 eu_stride;
+
+ __u8 data[];
+};
+
#if defined(__cplusplus)
}
#endif
diff --git a/include/uapi/drm/msm_drm.h b/include/uapi/drm/msm_drm.h
index bbbaffad772d..c06d0a5bdd80 100644
--- a/include/uapi/drm/msm_drm.h
+++ b/include/uapi/drm/msm_drm.h
@@ -201,10 +201,12 @@ struct drm_msm_gem_submit_bo {
#define MSM_SUBMIT_NO_IMPLICIT 0x80000000 /* disable implicit sync */
#define MSM_SUBMIT_FENCE_FD_IN 0x40000000 /* enable input fence_fd */
#define MSM_SUBMIT_FENCE_FD_OUT 0x20000000 /* enable output fence_fd */
+#define MSM_SUBMIT_SUDO 0x10000000 /* run submitted cmds from RB */
#define MSM_SUBMIT_FLAGS ( \
MSM_SUBMIT_NO_IMPLICIT | \
MSM_SUBMIT_FENCE_FD_IN | \
MSM_SUBMIT_FENCE_FD_OUT | \
+ MSM_SUBMIT_SUDO | \
0)
/* Each cmdstream submit consists of a table of buffers involved, and
diff --git a/include/uapi/drm/vc4_drm.h b/include/uapi/drm/vc4_drm.h
index 52263b575bdc..b95a0e11cb07 100644
--- a/include/uapi/drm/vc4_drm.h
+++ b/include/uapi/drm/vc4_drm.h
@@ -42,6 +42,9 @@ extern "C" {
#define DRM_VC4_GET_TILING 0x09
#define DRM_VC4_LABEL_BO 0x0a
#define DRM_VC4_GEM_MADVISE 0x0b
+#define DRM_VC4_PERFMON_CREATE 0x0c
+#define DRM_VC4_PERFMON_DESTROY 0x0d
+#define DRM_VC4_PERFMON_GET_VALUES 0x0e
#define DRM_IOCTL_VC4_SUBMIT_CL DRM_IOWR(DRM_COMMAND_BASE + DRM_VC4_SUBMIT_CL, struct drm_vc4_submit_cl)
#define DRM_IOCTL_VC4_WAIT_SEQNO DRM_IOWR(DRM_COMMAND_BASE + DRM_VC4_WAIT_SEQNO, struct drm_vc4_wait_seqno)
@@ -55,6 +58,9 @@ extern "C" {
#define DRM_IOCTL_VC4_GET_TILING DRM_IOWR(DRM_COMMAND_BASE + DRM_VC4_GET_TILING, struct drm_vc4_get_tiling)
#define DRM_IOCTL_VC4_LABEL_BO DRM_IOWR(DRM_COMMAND_BASE + DRM_VC4_LABEL_BO, struct drm_vc4_label_bo)
#define DRM_IOCTL_VC4_GEM_MADVISE DRM_IOWR(DRM_COMMAND_BASE + DRM_VC4_GEM_MADVISE, struct drm_vc4_gem_madvise)
+#define DRM_IOCTL_VC4_PERFMON_CREATE DRM_IOWR(DRM_COMMAND_BASE + DRM_VC4_PERFMON_CREATE, struct drm_vc4_perfmon_create)
+#define DRM_IOCTL_VC4_PERFMON_DESTROY DRM_IOWR(DRM_COMMAND_BASE + DRM_VC4_PERFMON_DESTROY, struct drm_vc4_perfmon_destroy)
+#define DRM_IOCTL_VC4_PERFMON_GET_VALUES DRM_IOWR(DRM_COMMAND_BASE + DRM_VC4_PERFMON_GET_VALUES, struct drm_vc4_perfmon_get_values)
struct drm_vc4_submit_rcl_surface {
__u32 hindex; /* Handle index, or ~0 if not present. */
@@ -173,6 +179,15 @@ struct drm_vc4_submit_cl {
* wait ioctl).
*/
__u64 seqno;
+
+ /* ID of the perfmon to attach to this job. 0 means no perfmon. */
+ __u32 perfmonid;
+
+ /* Unused field to align this struct on 64 bits. Must be set to 0.
+ * If one ever needs to add an u32 field to this struct, this field
+ * can be used.
+ */
+ __u32 pad2;
};
/**
@@ -308,6 +323,7 @@ struct drm_vc4_get_hang_state {
#define DRM_VC4_PARAM_SUPPORTS_THREADED_FS 5
#define DRM_VC4_PARAM_SUPPORTS_FIXED_RCL_ORDER 6
#define DRM_VC4_PARAM_SUPPORTS_MADVISE 7
+#define DRM_VC4_PARAM_SUPPORTS_PERFMON 8
struct drm_vc4_get_param {
__u32 param;
@@ -352,6 +368,66 @@ struct drm_vc4_gem_madvise {
__u32 pad;
};
+enum {
+ VC4_PERFCNT_FEP_VALID_PRIMS_NO_RENDER,
+ VC4_PERFCNT_FEP_VALID_PRIMS_RENDER,
+ VC4_PERFCNT_FEP_CLIPPED_QUADS,
+ VC4_PERFCNT_FEP_VALID_QUADS,
+ VC4_PERFCNT_TLB_QUADS_NOT_PASSING_STENCIL,
+ VC4_PERFCNT_TLB_QUADS_NOT_PASSING_Z_AND_STENCIL,
+ VC4_PERFCNT_TLB_QUADS_PASSING_Z_AND_STENCIL,
+ VC4_PERFCNT_TLB_QUADS_ZERO_COVERAGE,
+ VC4_PERFCNT_TLB_QUADS_NON_ZERO_COVERAGE,
+ VC4_PERFCNT_TLB_QUADS_WRITTEN_TO_COLOR_BUF,
+ VC4_PERFCNT_PLB_PRIMS_OUTSIDE_VIEWPORT,
+ VC4_PERFCNT_PLB_PRIMS_NEED_CLIPPING,
+ VC4_PERFCNT_PSE_PRIMS_REVERSED,
+ VC4_PERFCNT_QPU_TOTAL_IDLE_CYCLES,
+ VC4_PERFCNT_QPU_TOTAL_CLK_CYCLES_VERTEX_COORD_SHADING,
+ VC4_PERFCNT_QPU_TOTAL_CLK_CYCLES_FRAGMENT_SHADING,
+ VC4_PERFCNT_QPU_TOTAL_CLK_CYCLES_EXEC_VALID_INST,
+ VC4_PERFCNT_QPU_TOTAL_CLK_CYCLES_WAITING_TMUS,
+ VC4_PERFCNT_QPU_TOTAL_CLK_CYCLES_WAITING_SCOREBOARD,
+ VC4_PERFCNT_QPU_TOTAL_CLK_CYCLES_WAITING_VARYINGS,
+ VC4_PERFCNT_QPU_TOTAL_INST_CACHE_HIT,
+ VC4_PERFCNT_QPU_TOTAL_INST_CACHE_MISS,
+ VC4_PERFCNT_QPU_TOTAL_UNIFORM_CACHE_HIT,
+ VC4_PERFCNT_QPU_TOTAL_UNIFORM_CACHE_MISS,
+ VC4_PERFCNT_TMU_TOTAL_TEXT_QUADS_PROCESSED,
+ VC4_PERFCNT_TMU_TOTAL_TEXT_CACHE_MISS,
+ VC4_PERFCNT_VPM_TOTAL_CLK_CYCLES_VDW_STALLED,
+ VC4_PERFCNT_VPM_TOTAL_CLK_CYCLES_VCD_STALLED,
+ VC4_PERFCNT_L2C_TOTAL_L2_CACHE_HIT,
+ VC4_PERFCNT_L2C_TOTAL_L2_CACHE_MISS,
+ VC4_PERFCNT_NUM_EVENTS,
+};
+
+#define DRM_VC4_MAX_PERF_COUNTERS 16
+
+struct drm_vc4_perfmon_create {
+ __u32 id;
+ __u32 ncounters;
+ __u8 events[DRM_VC4_MAX_PERF_COUNTERS];
+};
+
+struct drm_vc4_perfmon_destroy {
+ __u32 id;
+};
+
+/*
+ * Returns the values of the performance counters tracked by this
+ * perfmon (as an array of ncounters u64 values).
+ *
+ * No implicit synchronization is performed, so the user has to
+ * guarantee that any jobs using this perfmon have already been
+ * completed (probably by blocking on the seqno returned by the
+ * last exec that used the perfmon).
+ */
+struct drm_vc4_perfmon_get_values {
+ __u32 id;
+ __u64 values_ptr;
+};
+
#if defined(__cplusplus)
}
#endif
diff --git a/include/uapi/drm/virtgpu_drm.h b/include/uapi/drm/virtgpu_drm.h
index 91a31ffed828..9a781f0611df 100644
--- a/include/uapi/drm/virtgpu_drm.h
+++ b/include/uapi/drm/virtgpu_drm.h
@@ -63,6 +63,7 @@ struct drm_virtgpu_execbuffer {
};
#define VIRTGPU_PARAM_3D_FEATURES 1 /* do we have 3D features in the hw */
+#define VIRTGPU_PARAM_CAPSET_QUERY_FIX 2 /* do we have the capset fix */
struct drm_virtgpu_getparam {
__u64 param;
diff --git a/include/uapi/linux/arm_sdei.h b/include/uapi/linux/arm_sdei.h
new file mode 100644
index 000000000000..af0630ba5437
--- /dev/null
+++ b/include/uapi/linux/arm_sdei.h
@@ -0,0 +1,73 @@
+/* SPDX-License-Identifier: GPL-2.0 WITH Linux-syscall-note */
+/* Copyright (C) 2017 Arm Ltd. */
+#ifndef _UAPI_LINUX_ARM_SDEI_H
+#define _UAPI_LINUX_ARM_SDEI_H
+
+#define SDEI_1_0_FN_BASE 0xC4000020
+#define SDEI_1_0_MASK 0xFFFFFFE0
+#define SDEI_1_0_FN(n) (SDEI_1_0_FN_BASE + (n))
+
+#define SDEI_1_0_FN_SDEI_VERSION SDEI_1_0_FN(0x00)
+#define SDEI_1_0_FN_SDEI_EVENT_REGISTER SDEI_1_0_FN(0x01)
+#define SDEI_1_0_FN_SDEI_EVENT_ENABLE SDEI_1_0_FN(0x02)
+#define SDEI_1_0_FN_SDEI_EVENT_DISABLE SDEI_1_0_FN(0x03)
+#define SDEI_1_0_FN_SDEI_EVENT_CONTEXT SDEI_1_0_FN(0x04)
+#define SDEI_1_0_FN_SDEI_EVENT_COMPLETE SDEI_1_0_FN(0x05)
+#define SDEI_1_0_FN_SDEI_EVENT_COMPLETE_AND_RESUME SDEI_1_0_FN(0x06)
+#define SDEI_1_0_FN_SDEI_EVENT_UNREGISTER SDEI_1_0_FN(0x07)
+#define SDEI_1_0_FN_SDEI_EVENT_STATUS SDEI_1_0_FN(0x08)
+#define SDEI_1_0_FN_SDEI_EVENT_GET_INFO SDEI_1_0_FN(0x09)
+#define SDEI_1_0_FN_SDEI_EVENT_ROUTING_SET SDEI_1_0_FN(0x0A)
+#define SDEI_1_0_FN_SDEI_PE_MASK SDEI_1_0_FN(0x0B)
+#define SDEI_1_0_FN_SDEI_PE_UNMASK SDEI_1_0_FN(0x0C)
+#define SDEI_1_0_FN_SDEI_INTERRUPT_BIND SDEI_1_0_FN(0x0D)
+#define SDEI_1_0_FN_SDEI_INTERRUPT_RELEASE SDEI_1_0_FN(0x0E)
+#define SDEI_1_0_FN_SDEI_PRIVATE_RESET SDEI_1_0_FN(0x11)
+#define SDEI_1_0_FN_SDEI_SHARED_RESET SDEI_1_0_FN(0x12)
+
+#define SDEI_VERSION_MAJOR_SHIFT 48
+#define SDEI_VERSION_MAJOR_MASK 0x7fff
+#define SDEI_VERSION_MINOR_SHIFT 32
+#define SDEI_VERSION_MINOR_MASK 0xffff
+#define SDEI_VERSION_VENDOR_SHIFT 0
+#define SDEI_VERSION_VENDOR_MASK 0xffffffff
+
+#define SDEI_VERSION_MAJOR(x) (x>>SDEI_VERSION_MAJOR_SHIFT & SDEI_VERSION_MAJOR_MASK)
+#define SDEI_VERSION_MINOR(x) (x>>SDEI_VERSION_MINOR_SHIFT & SDEI_VERSION_MINOR_MASK)
+#define SDEI_VERSION_VENDOR(x) (x>>SDEI_VERSION_VENDOR_SHIFT & SDEI_VERSION_VENDOR_MASK)
+
+/* SDEI return values */
+#define SDEI_SUCCESS 0
+#define SDEI_NOT_SUPPORTED -1
+#define SDEI_INVALID_PARAMETERS -2
+#define SDEI_DENIED -3
+#define SDEI_PENDING -5
+#define SDEI_OUT_OF_RESOURCE -10
+
+/* EVENT_REGISTER flags */
+#define SDEI_EVENT_REGISTER_RM_ANY 0
+#define SDEI_EVENT_REGISTER_RM_PE 1
+
+/* EVENT_STATUS return value bits */
+#define SDEI_EVENT_STATUS_RUNNING 2
+#define SDEI_EVENT_STATUS_ENABLED 1
+#define SDEI_EVENT_STATUS_REGISTERED 0
+
+/* EVENT_COMPLETE status values */
+#define SDEI_EV_HANDLED 0
+#define SDEI_EV_FAILED 1
+
+/* GET_INFO values */
+#define SDEI_EVENT_INFO_EV_TYPE 0
+#define SDEI_EVENT_INFO_EV_SIGNALED 1
+#define SDEI_EVENT_INFO_EV_PRIORITY 2
+#define SDEI_EVENT_INFO_EV_ROUTING_MODE 3
+#define SDEI_EVENT_INFO_EV_ROUTING_AFF 4
+
+/* and their results */
+#define SDEI_EVENT_TYPE_PRIVATE 0
+#define SDEI_EVENT_TYPE_SHARED 1
+#define SDEI_EVENT_PRIORITY_NORMAL 0
+#define SDEI_EVENT_PRIORITY_CRITICAL 1
+
+#endif /* _UAPI_LINUX_ARM_SDEI_H */
diff --git a/include/uapi/linux/batadv_packet.h b/include/uapi/linux/batadv_packet.h
new file mode 100644
index 000000000000..894d8d2f713d
--- /dev/null
+++ b/include/uapi/linux/batadv_packet.h
@@ -0,0 +1,635 @@
+/* SPDX-License-Identifier: (GPL-2.0 WITH Linux-syscall-note) */
+/* Copyright (C) 2007-2018 B.A.T.M.A.N. contributors:
+ *
+ * Marek Lindner, Simon Wunderlich
+ *
+ * This program is free software; you can redistribute it and/or
+ * modify it under the terms of version 2 of the GNU General Public
+ * License as published by the Free Software Foundation.
+ *
+ * This program is distributed in the hope that it will be useful, but
+ * WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
+ * General Public License for more details.
+ *
+ * You should have received a copy of the GNU General Public License
+ * along with this program; if not, see <http://www.gnu.org/licenses/>.
+ */
+
+#ifndef _UAPI_LINUX_BATADV_PACKET_H_
+#define _UAPI_LINUX_BATADV_PACKET_H_
+
+#include <asm/byteorder.h>
+#include <linux/if_ether.h>
+#include <linux/types.h>
+
+/**
+ * batadv_tp_is_error() - Check throughput meter return code for error
+ * @n: throughput meter return code
+ *
+ * Return: 0 when not error was detected, != 0 otherwise
+ */
+#define batadv_tp_is_error(n) ((__u8)(n) > 127 ? 1 : 0)
+
+/**
+ * enum batadv_packettype - types for batman-adv encapsulated packets
+ * @BATADV_IV_OGM: originator messages for B.A.T.M.A.N. IV
+ * @BATADV_BCAST: broadcast packets carrying broadcast payload
+ * @BATADV_CODED: network coded packets
+ * @BATADV_ELP: echo location packets for B.A.T.M.A.N. V
+ * @BATADV_OGM2: originator messages for B.A.T.M.A.N. V
+ *
+ * @BATADV_UNICAST: unicast packets carrying unicast payload traffic
+ * @BATADV_UNICAST_FRAG: unicast packets carrying a fragment of the original
+ * payload packet
+ * @BATADV_UNICAST_4ADDR: unicast packet including the originator address of
+ * the sender
+ * @BATADV_ICMP: unicast packet like IP ICMP used for ping or traceroute
+ * @BATADV_UNICAST_TVLV: unicast packet carrying TVLV containers
+ */
+enum batadv_packettype {
+ /* 0x00 - 0x3f: local packets or special rules for handling */
+ BATADV_IV_OGM = 0x00,
+ BATADV_BCAST = 0x01,
+ BATADV_CODED = 0x02,
+ BATADV_ELP = 0x03,
+ BATADV_OGM2 = 0x04,
+ /* 0x40 - 0x7f: unicast */
+#define BATADV_UNICAST_MIN 0x40
+ BATADV_UNICAST = 0x40,
+ BATADV_UNICAST_FRAG = 0x41,
+ BATADV_UNICAST_4ADDR = 0x42,
+ BATADV_ICMP = 0x43,
+ BATADV_UNICAST_TVLV = 0x44,
+#define BATADV_UNICAST_MAX 0x7f
+ /* 0x80 - 0xff: reserved */
+};
+
+/**
+ * enum batadv_subtype - packet subtype for unicast4addr
+ * @BATADV_P_DATA: user payload
+ * @BATADV_P_DAT_DHT_GET: DHT request message
+ * @BATADV_P_DAT_DHT_PUT: DHT store message
+ * @BATADV_P_DAT_CACHE_REPLY: ARP reply generated by DAT
+ */
+enum batadv_subtype {
+ BATADV_P_DATA = 0x01,
+ BATADV_P_DAT_DHT_GET = 0x02,
+ BATADV_P_DAT_DHT_PUT = 0x03,
+ BATADV_P_DAT_CACHE_REPLY = 0x04,
+};
+
+/* this file is included by batctl which needs these defines */
+#define BATADV_COMPAT_VERSION 15
+
+/**
+ * enum batadv_iv_flags - flags used in B.A.T.M.A.N. IV OGM packets
+ * @BATADV_NOT_BEST_NEXT_HOP: flag is set when ogm packet is forwarded and was
+ * previously received from someone else than the best neighbor.
+ * @BATADV_PRIMARIES_FIRST_HOP: flag unused.
+ * @BATADV_DIRECTLINK: flag is for the first hop or if rebroadcasted from a
+ * one hop neighbor on the interface where it was originally received.
+ */
+enum batadv_iv_flags {
+ BATADV_NOT_BEST_NEXT_HOP = 1UL << 0,
+ BATADV_PRIMARIES_FIRST_HOP = 1UL << 1,
+ BATADV_DIRECTLINK = 1UL << 2,
+};
+
+/**
+ * enum batadv_icmp_packettype - ICMP message types
+ * @BATADV_ECHO_REPLY: success reply to BATADV_ECHO_REQUEST
+ * @BATADV_DESTINATION_UNREACHABLE: failure when route to destination not found
+ * @BATADV_ECHO_REQUEST: request BATADV_ECHO_REPLY from destination
+ * @BATADV_TTL_EXCEEDED: error after BATADV_ECHO_REQUEST traversed too many hops
+ * @BATADV_PARAMETER_PROBLEM: return code for malformed messages
+ * @BATADV_TP: throughput meter packet
+ */
+enum batadv_icmp_packettype {
+ BATADV_ECHO_REPLY = 0,
+ BATADV_DESTINATION_UNREACHABLE = 3,
+ BATADV_ECHO_REQUEST = 8,
+ BATADV_TTL_EXCEEDED = 11,
+ BATADV_PARAMETER_PROBLEM = 12,
+ BATADV_TP = 15,
+};
+
+/**
+ * enum batadv_mcast_flags - flags for multicast capabilities and settings
+ * @BATADV_MCAST_WANT_ALL_UNSNOOPABLES: we want all packets destined for
+ * 224.0.0.0/24 or ff02::1
+ * @BATADV_MCAST_WANT_ALL_IPV4: we want all IPv4 multicast packets
+ * @BATADV_MCAST_WANT_ALL_IPV6: we want all IPv6 multicast packets
+ */
+enum batadv_mcast_flags {
+ BATADV_MCAST_WANT_ALL_UNSNOOPABLES = 1UL << 0,
+ BATADV_MCAST_WANT_ALL_IPV4 = 1UL << 1,
+ BATADV_MCAST_WANT_ALL_IPV6 = 1UL << 2,
+};
+
+/* tt data subtypes */
+#define BATADV_TT_DATA_TYPE_MASK 0x0F
+
+/**
+ * enum batadv_tt_data_flags - flags for tt data tvlv
+ * @BATADV_TT_OGM_DIFF: TT diff propagated through OGM
+ * @BATADV_TT_REQUEST: TT request message
+ * @BATADV_TT_RESPONSE: TT response message
+ * @BATADV_TT_FULL_TABLE: contains full table to replace existing table
+ */
+enum batadv_tt_data_flags {
+ BATADV_TT_OGM_DIFF = 1UL << 0,
+ BATADV_TT_REQUEST = 1UL << 1,
+ BATADV_TT_RESPONSE = 1UL << 2,
+ BATADV_TT_FULL_TABLE = 1UL << 4,
+};
+
+/**
+ * enum batadv_vlan_flags - flags for the four MSB of any vlan ID field
+ * @BATADV_VLAN_HAS_TAG: whether the field contains a valid vlan tag or not
+ */
+enum batadv_vlan_flags {
+ BATADV_VLAN_HAS_TAG = 1UL << 15,
+};
+
+/**
+ * enum batadv_bla_claimframe - claim frame types for the bridge loop avoidance
+ * @BATADV_CLAIM_TYPE_CLAIM: claim of a client mac address
+ * @BATADV_CLAIM_TYPE_UNCLAIM: unclaim of a client mac address
+ * @BATADV_CLAIM_TYPE_ANNOUNCE: announcement of backbone with current crc
+ * @BATADV_CLAIM_TYPE_REQUEST: request of full claim table
+ * @BATADV_CLAIM_TYPE_LOOPDETECT: mesh-traversing loop detect packet
+ */
+enum batadv_bla_claimframe {
+ BATADV_CLAIM_TYPE_CLAIM = 0x00,
+ BATADV_CLAIM_TYPE_UNCLAIM = 0x01,
+ BATADV_CLAIM_TYPE_ANNOUNCE = 0x02,
+ BATADV_CLAIM_TYPE_REQUEST = 0x03,
+ BATADV_CLAIM_TYPE_LOOPDETECT = 0x04,
+};
+
+/**
+ * enum batadv_tvlv_type - tvlv type definitions
+ * @BATADV_TVLV_GW: gateway tvlv
+ * @BATADV_TVLV_DAT: distributed arp table tvlv
+ * @BATADV_TVLV_NC: network coding tvlv
+ * @BATADV_TVLV_TT: translation table tvlv
+ * @BATADV_TVLV_ROAM: roaming advertisement tvlv
+ * @BATADV_TVLV_MCAST: multicast capability tvlv
+ */
+enum batadv_tvlv_type {
+ BATADV_TVLV_GW = 0x01,
+ BATADV_TVLV_DAT = 0x02,
+ BATADV_TVLV_NC = 0x03,
+ BATADV_TVLV_TT = 0x04,
+ BATADV_TVLV_ROAM = 0x05,
+ BATADV_TVLV_MCAST = 0x06,
+};
+
+#pragma pack(2)
+/* the destination hardware field in the ARP frame is used to
+ * transport the claim type and the group id
+ */
+struct batadv_bla_claim_dst {
+ __u8 magic[3]; /* FF:43:05 */
+ __u8 type; /* bla_claimframe */
+ __be16 group; /* group id */
+};
+
+/**
+ * struct batadv_ogm_packet - ogm (routing protocol) packet
+ * @packet_type: batman-adv packet type, part of the general header
+ * @version: batman-adv protocol version, part of the genereal header
+ * @ttl: time to live for this packet, part of the genereal header
+ * @flags: contains routing relevant flags - see enum batadv_iv_flags
+ * @seqno: sequence identification
+ * @orig: address of the source node
+ * @prev_sender: address of the previous sender
+ * @reserved: reserved byte for alignment
+ * @tq: transmission quality
+ * @tvlv_len: length of tvlv data following the ogm header
+ */
+struct batadv_ogm_packet {
+ __u8 packet_type;
+ __u8 version;
+ __u8 ttl;
+ __u8 flags;
+ __be32 seqno;
+ __u8 orig[ETH_ALEN];
+ __u8 prev_sender[ETH_ALEN];
+ __u8 reserved;
+ __u8 tq;
+ __be16 tvlv_len;
+};
+
+#define BATADV_OGM_HLEN sizeof(struct batadv_ogm_packet)
+
+/**
+ * struct batadv_ogm2_packet - ogm2 (routing protocol) packet
+ * @packet_type: batman-adv packet type, part of the general header
+ * @version: batman-adv protocol version, part of the general header
+ * @ttl: time to live for this packet, part of the general header
+ * @flags: reseved for routing relevant flags - currently always 0
+ * @seqno: sequence number
+ * @orig: originator mac address
+ * @tvlv_len: length of the appended tvlv buffer (in bytes)
+ * @throughput: the currently flooded path throughput
+ */
+struct batadv_ogm2_packet {
+ __u8 packet_type;
+ __u8 version;
+ __u8 ttl;
+ __u8 flags;
+ __be32 seqno;
+ __u8 orig[ETH_ALEN];
+ __be16 tvlv_len;
+ __be32 throughput;
+};
+
+#define BATADV_OGM2_HLEN sizeof(struct batadv_ogm2_packet)
+
+/**
+ * struct batadv_elp_packet - elp (neighbor discovery) packet
+ * @packet_type: batman-adv packet type, part of the general header
+ * @version: batman-adv protocol version, part of the genereal header
+ * @orig: originator mac address
+ * @seqno: sequence number
+ * @elp_interval: currently used ELP sending interval in ms
+ */
+struct batadv_elp_packet {
+ __u8 packet_type;
+ __u8 version;
+ __u8 orig[ETH_ALEN];
+ __be32 seqno;
+ __be32 elp_interval;
+};
+
+#define BATADV_ELP_HLEN sizeof(struct batadv_elp_packet)
+
+/**
+ * struct batadv_icmp_header - common members among all the ICMP packets
+ * @packet_type: batman-adv packet type, part of the general header
+ * @version: batman-adv protocol version, part of the genereal header
+ * @ttl: time to live for this packet, part of the genereal header
+ * @msg_type: ICMP packet type
+ * @dst: address of the destination node
+ * @orig: address of the source node
+ * @uid: local ICMP socket identifier
+ * @align: not used - useful for alignment purposes only
+ *
+ * This structure is used for ICMP packets parsing only and it is never sent
+ * over the wire. The alignment field at the end is there to ensure that
+ * members are padded the same way as they are in real packets.
+ */
+struct batadv_icmp_header {
+ __u8 packet_type;
+ __u8 version;
+ __u8 ttl;
+ __u8 msg_type; /* see ICMP message types above */
+ __u8 dst[ETH_ALEN];
+ __u8 orig[ETH_ALEN];
+ __u8 uid;
+ __u8 align[3];
+};
+
+/**
+ * struct batadv_icmp_packet - ICMP packet
+ * @packet_type: batman-adv packet type, part of the general header
+ * @version: batman-adv protocol version, part of the genereal header
+ * @ttl: time to live for this packet, part of the genereal header
+ * @msg_type: ICMP packet type
+ * @dst: address of the destination node
+ * @orig: address of the source node
+ * @uid: local ICMP socket identifier
+ * @reserved: not used - useful for alignment
+ * @seqno: ICMP sequence number
+ */
+struct batadv_icmp_packet {
+ __u8 packet_type;
+ __u8 version;
+ __u8 ttl;
+ __u8 msg_type; /* see ICMP message types above */
+ __u8 dst[ETH_ALEN];
+ __u8 orig[ETH_ALEN];
+ __u8 uid;
+ __u8 reserved;
+ __be16 seqno;
+};
+
+/**
+ * struct batadv_icmp_tp_packet - ICMP TP Meter packet
+ * @packet_type: batman-adv packet type, part of the general header
+ * @version: batman-adv protocol version, part of the genereal header
+ * @ttl: time to live for this packet, part of the genereal header
+ * @msg_type: ICMP packet type
+ * @dst: address of the destination node
+ * @orig: address of the source node
+ * @uid: local ICMP socket identifier
+ * @subtype: TP packet subtype (see batadv_icmp_tp_subtype)
+ * @session: TP session identifier
+ * @seqno: the TP sequence number
+ * @timestamp: time when the packet has been sent. This value is filled in a
+ * TP_MSG and echoed back in the next TP_ACK so that the sender can compute the
+ * RTT. Since it is read only by the host which wrote it, there is no need to
+ * store it using network order
+ */
+struct batadv_icmp_tp_packet {
+ __u8 packet_type;
+ __u8 version;
+ __u8 ttl;
+ __u8 msg_type; /* see ICMP message types above */
+ __u8 dst[ETH_ALEN];
+ __u8 orig[ETH_ALEN];
+ __u8 uid;
+ __u8 subtype;
+ __u8 session[2];
+ __be32 seqno;
+ __be32 timestamp;
+};
+
+/**
+ * enum batadv_icmp_tp_subtype - ICMP TP Meter packet subtypes
+ * @BATADV_TP_MSG: Msg from sender to receiver
+ * @BATADV_TP_ACK: acknowledgment from receiver to sender
+ */
+enum batadv_icmp_tp_subtype {
+ BATADV_TP_MSG = 0,
+ BATADV_TP_ACK,
+};
+
+#define BATADV_RR_LEN 16
+
+/**
+ * struct batadv_icmp_packet_rr - ICMP RouteRecord packet
+ * @packet_type: batman-adv packet type, part of the general header
+ * @version: batman-adv protocol version, part of the genereal header
+ * @ttl: time to live for this packet, part of the genereal header
+ * @msg_type: ICMP packet type
+ * @dst: address of the destination node
+ * @orig: address of the source node
+ * @uid: local ICMP socket identifier
+ * @rr_cur: number of entries the rr array
+ * @seqno: ICMP sequence number
+ * @rr: route record array
+ */
+struct batadv_icmp_packet_rr {
+ __u8 packet_type;
+ __u8 version;
+ __u8 ttl;
+ __u8 msg_type; /* see ICMP message types above */
+ __u8 dst[ETH_ALEN];
+ __u8 orig[ETH_ALEN];
+ __u8 uid;
+ __u8 rr_cur;
+ __be16 seqno;
+ __u8 rr[BATADV_RR_LEN][ETH_ALEN];
+};
+
+#define BATADV_ICMP_MAX_PACKET_SIZE sizeof(struct batadv_icmp_packet_rr)
+
+/* All packet headers in front of an ethernet header have to be completely
+ * divisible by 2 but not by 4 to make the payload after the ethernet
+ * header again 4 bytes boundary aligned.
+ *
+ * A packing of 2 is necessary to avoid extra padding at the end of the struct
+ * caused by a structure member which is larger than two bytes. Otherwise
+ * the structure would not fulfill the previously mentioned rule to avoid the
+ * misalignment of the payload after the ethernet header. It may also lead to
+ * leakage of information when the padding it not initialized before sending.
+ */
+
+/**
+ * struct batadv_unicast_packet - unicast packet for network payload
+ * @packet_type: batman-adv packet type, part of the general header
+ * @version: batman-adv protocol version, part of the genereal header
+ * @ttl: time to live for this packet, part of the genereal header
+ * @ttvn: translation table version number
+ * @dest: originator destination of the unicast packet
+ */
+struct batadv_unicast_packet {
+ __u8 packet_type;
+ __u8 version;
+ __u8 ttl;
+ __u8 ttvn; /* destination translation table version number */
+ __u8 dest[ETH_ALEN];
+ /* "4 bytes boundary + 2 bytes" long to make the payload after the
+ * following ethernet header again 4 bytes boundary aligned
+ */
+};
+
+/**
+ * struct batadv_unicast_4addr_packet - extended unicast packet
+ * @u: common unicast packet header
+ * @src: address of the source
+ * @subtype: packet subtype
+ * @reserved: reserved byte for alignment
+ */
+struct batadv_unicast_4addr_packet {
+ struct batadv_unicast_packet u;
+ __u8 src[ETH_ALEN];
+ __u8 subtype;
+ __u8 reserved;
+ /* "4 bytes boundary + 2 bytes" long to make the payload after the
+ * following ethernet header again 4 bytes boundary aligned
+ */
+};
+
+/**
+ * struct batadv_frag_packet - fragmented packet
+ * @packet_type: batman-adv packet type, part of the general header
+ * @version: batman-adv protocol version, part of the genereal header
+ * @ttl: time to live for this packet, part of the genereal header
+ * @dest: final destination used when routing fragments
+ * @orig: originator of the fragment used when merging the packet
+ * @no: fragment number within this sequence
+ * @priority: priority of frame, from ToS IP precedence or 802.1p
+ * @reserved: reserved byte for alignment
+ * @seqno: sequence identification
+ * @total_size: size of the merged packet
+ */
+struct batadv_frag_packet {
+ __u8 packet_type;
+ __u8 version; /* batman version field */
+ __u8 ttl;
+#if defined(__BIG_ENDIAN_BITFIELD)
+ __u8 no:4;
+ __u8 priority:3;
+ __u8 reserved:1;
+#elif defined(__LITTLE_ENDIAN_BITFIELD)
+ __u8 reserved:1;
+ __u8 priority:3;
+ __u8 no:4;
+#else
+#error "unknown bitfield endianness"
+#endif
+ __u8 dest[ETH_ALEN];
+ __u8 orig[ETH_ALEN];
+ __be16 seqno;
+ __be16 total_size;
+};
+
+/**
+ * struct batadv_bcast_packet - broadcast packet for network payload
+ * @packet_type: batman-adv packet type, part of the general header
+ * @version: batman-adv protocol version, part of the genereal header
+ * @ttl: time to live for this packet, part of the genereal header
+ * @reserved: reserved byte for alignment
+ * @seqno: sequence identification
+ * @orig: originator of the broadcast packet
+ */
+struct batadv_bcast_packet {
+ __u8 packet_type;
+ __u8 version; /* batman version field */
+ __u8 ttl;
+ __u8 reserved;
+ __be32 seqno;
+ __u8 orig[ETH_ALEN];
+ /* "4 bytes boundary + 2 bytes" long to make the payload after the
+ * following ethernet header again 4 bytes boundary aligned
+ */
+};
+
+/**
+ * struct batadv_coded_packet - network coded packet
+ * @packet_type: batman-adv packet type, part of the general header
+ * @version: batman-adv protocol version, part of the genereal header
+ * @ttl: time to live for this packet, part of the genereal header
+ * @first_source: original source of first included packet
+ * @first_orig_dest: original destinal of first included packet
+ * @first_crc: checksum of first included packet
+ * @first_ttvn: tt-version number of first included packet
+ * @second_ttl: ttl of second packet
+ * @second_dest: second receiver of this coded packet
+ * @second_source: original source of second included packet
+ * @second_orig_dest: original destination of second included packet
+ * @second_crc: checksum of second included packet
+ * @second_ttvn: tt version number of second included packet
+ * @coded_len: length of network coded part of the payload
+ */
+struct batadv_coded_packet {
+ __u8 packet_type;
+ __u8 version; /* batman version field */
+ __u8 ttl;
+ __u8 first_ttvn;
+ /* __u8 first_dest[ETH_ALEN]; - saved in mac header destination */
+ __u8 first_source[ETH_ALEN];
+ __u8 first_orig_dest[ETH_ALEN];
+ __be32 first_crc;
+ __u8 second_ttl;
+ __u8 second_ttvn;
+ __u8 second_dest[ETH_ALEN];
+ __u8 second_source[ETH_ALEN];
+ __u8 second_orig_dest[ETH_ALEN];
+ __be32 second_crc;
+ __be16 coded_len;
+};
+
+/**
+ * struct batadv_unicast_tvlv_packet - generic unicast packet with tvlv payload
+ * @packet_type: batman-adv packet type, part of the general header
+ * @version: batman-adv protocol version, part of the genereal header
+ * @ttl: time to live for this packet, part of the genereal header
+ * @reserved: reserved field (for packet alignment)
+ * @src: address of the source
+ * @dst: address of the destination
+ * @tvlv_len: length of tvlv data following the unicast tvlv header
+ * @align: 2 bytes to align the header to a 4 byte boundary
+ */
+struct batadv_unicast_tvlv_packet {
+ __u8 packet_type;
+ __u8 version; /* batman version field */
+ __u8 ttl;
+ __u8 reserved;
+ __u8 dst[ETH_ALEN];
+ __u8 src[ETH_ALEN];
+ __be16 tvlv_len;
+ __u16 align;
+};
+
+/**
+ * struct batadv_tvlv_hdr - base tvlv header struct
+ * @type: tvlv container type (see batadv_tvlv_type)
+ * @version: tvlv container version
+ * @len: tvlv container length
+ */
+struct batadv_tvlv_hdr {
+ __u8 type;
+ __u8 version;
+ __be16 len;
+};
+
+/**
+ * struct batadv_tvlv_gateway_data - gateway data propagated through gw tvlv
+ * container
+ * @bandwidth_down: advertised uplink download bandwidth
+ * @bandwidth_up: advertised uplink upload bandwidth
+ */
+struct batadv_tvlv_gateway_data {
+ __be32 bandwidth_down;
+ __be32 bandwidth_up;
+};
+
+/**
+ * struct batadv_tvlv_tt_data - tt data propagated through the tt tvlv container
+ * @flags: translation table flags (see batadv_tt_data_flags)
+ * @ttvn: translation table version number
+ * @num_vlan: number of announced VLANs. In the TVLV this struct is followed by
+ * one batadv_tvlv_tt_vlan_data object per announced vlan
+ */
+struct batadv_tvlv_tt_data {
+ __u8 flags;
+ __u8 ttvn;
+ __be16 num_vlan;
+};
+
+/**
+ * struct batadv_tvlv_tt_vlan_data - vlan specific tt data propagated through
+ * the tt tvlv container
+ * @crc: crc32 checksum of the entries belonging to this vlan
+ * @vid: vlan identifier
+ * @reserved: unused, useful for alignment purposes
+ */
+struct batadv_tvlv_tt_vlan_data {
+ __be32 crc;
+ __be16 vid;
+ __u16 reserved;
+};
+
+/**
+ * struct batadv_tvlv_tt_change - translation table diff data
+ * @flags: status indicators concerning the non-mesh client (see
+ * batadv_tt_client_flags)
+ * @reserved: reserved field - useful for alignment purposes only
+ * @addr: mac address of non-mesh client that triggered this tt change
+ * @vid: VLAN identifier
+ */
+struct batadv_tvlv_tt_change {
+ __u8 flags;
+ __u8 reserved[3];
+ __u8 addr[ETH_ALEN];
+ __be16 vid;
+};
+
+/**
+ * struct batadv_tvlv_roam_adv - roaming advertisement
+ * @client: mac address of roaming client
+ * @vid: VLAN identifier
+ */
+struct batadv_tvlv_roam_adv {
+ __u8 client[ETH_ALEN];
+ __be16 vid;
+};
+
+/**
+ * struct batadv_tvlv_mcast_data - payload of a multicast tvlv
+ * @flags: multicast flags announced by the orig node
+ * @reserved: reserved field
+ */
+struct batadv_tvlv_mcast_data {
+ __u8 flags;
+ __u8 reserved[3];
+};
+
+#pragma pack()
+
+#endif /* _UAPI_LINUX_BATADV_PACKET_H_ */
diff --git a/include/uapi/linux/batman_adv.h b/include/uapi/linux/batman_adv.h
index efd641c8a5d6..324a0e1143e7 100644
--- a/include/uapi/linux/batman_adv.h
+++ b/include/uapi/linux/batman_adv.h
@@ -1,18 +1,25 @@
-/* Copyright (C) 2016-2017 B.A.T.M.A.N. contributors:
+/* SPDX-License-Identifier: MIT */
+/* Copyright (C) 2016-2018 B.A.T.M.A.N. contributors:
*
* Matthias Schiffer
*
- * Permission to use, copy, modify, and/or distribute this software for any
- * purpose with or without fee is hereby granted, provided that the above
- * copyright notice and this permission notice appear in all copies.
+ * Permission is hereby granted, free of charge, to any person obtaining a
+ * copy of this software and associated documentation files (the "Software"),
+ * to deal in the Software without restriction, including without limitation
+ * the rights to use, copy, modify, merge, publish, distribute, sublicense,
+ * and/or sell copies of the Software, and to permit persons to whom the
+ * Software is furnished to do so, subject to the following conditions:
*
- * THE SOFTWARE IS PROVIDED "AS IS" AND THE AUTHOR DISCLAIMS ALL WARRANTIES
- * WITH REGARD TO THIS SOFTWARE INCLUDING ALL IMPLIED WARRANTIES OF
- * MERCHANTABILITY AND FITNESS. IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR
- * ANY SPECIAL, DIRECT, INDIRECT, OR CONSEQUENTIAL DAMAGES OR ANY DAMAGES
- * WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR PROFITS, WHETHER IN AN
- * ACTION OF CONTRACT, NEGLIGENCE OR OTHER TORTIOUS ACTION, ARISING OUT OF
- * OR IN CONNECTION WITH THE USE OR PERFORMANCE OF THIS SOFTWARE.
+ * The above copyright notice and this permission notice shall be included in
+ * all copies or substantial portions of the Software.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
+ * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
+ * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
+ * AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
+ * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING
+ * FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER
+ * DEALINGS IN THE SOFTWARE.
*/
#ifndef _UAPI_LINUX_BATMAN_ADV_H_
@@ -85,6 +92,53 @@ enum batadv_tt_client_flags {
};
/**
+ * enum batadv_mcast_flags_priv - Private, own multicast flags
+ *
+ * These are internal, multicast related flags. Currently they describe certain
+ * multicast related attributes of the segment this originator bridges into the
+ * mesh.
+ *
+ * Those attributes are used to determine the public multicast flags this
+ * originator is going to announce via TT.
+ *
+ * For netlink, if BATADV_MCAST_FLAGS_BRIDGED is unset then all querier
+ * related flags are undefined.
+ */
+enum batadv_mcast_flags_priv {
+ /**
+ * @BATADV_MCAST_FLAGS_BRIDGED: There is a bridge on top of the mesh
+ * interface.
+ */
+ BATADV_MCAST_FLAGS_BRIDGED = (1 << 0),
+
+ /**
+ * @BATADV_MCAST_FLAGS_QUERIER_IPV4_EXISTS: Whether an IGMP querier
+ * exists in the mesh
+ */
+ BATADV_MCAST_FLAGS_QUERIER_IPV4_EXISTS = (1 << 1),
+
+ /**
+ * @BATADV_MCAST_FLAGS_QUERIER_IPV6_EXISTS: Whether an MLD querier
+ * exists in the mesh
+ */
+ BATADV_MCAST_FLAGS_QUERIER_IPV6_EXISTS = (1 << 2),
+
+ /**
+ * @BATADV_MCAST_FLAGS_QUERIER_IPV4_SHADOWING: If an IGMP querier
+ * exists, whether it is potentially shadowing multicast listeners
+ * (i.e. querier is behind our own bridge segment)
+ */
+ BATADV_MCAST_FLAGS_QUERIER_IPV4_SHADOWING = (1 << 3),
+
+ /**
+ * @BATADV_MCAST_FLAGS_QUERIER_IPV6_SHADOWING: If an MLD querier
+ * exists, whether it is potentially shadowing multicast listeners
+ * (i.e. querier is behind our own bridge segment)
+ */
+ BATADV_MCAST_FLAGS_QUERIER_IPV6_SHADOWING = (1 << 4),
+};
+
+/**
* enum batadv_nl_attrs - batman-adv netlink attributes
*/
enum batadv_nl_attrs {
@@ -265,6 +319,31 @@ enum batadv_nl_attrs {
*/
BATADV_ATTR_BLA_CRC,
+ /**
+ * @BATADV_ATTR_DAT_CACHE_IP4ADDRESS: Client IPv4 address
+ */
+ BATADV_ATTR_DAT_CACHE_IP4ADDRESS,
+
+ /**
+ * @BATADV_ATTR_DAT_CACHE_HWADDRESS: Client MAC address
+ */
+ BATADV_ATTR_DAT_CACHE_HWADDRESS,
+
+ /**
+ * @BATADV_ATTR_DAT_CACHE_VID: VLAN ID
+ */
+ BATADV_ATTR_DAT_CACHE_VID,
+
+ /**
+ * @BATADV_ATTR_MCAST_FLAGS: Per originator multicast flags
+ */
+ BATADV_ATTR_MCAST_FLAGS,
+
+ /**
+ * @BATADV_ATTR_MCAST_FLAGS_PRIV: Private, own multicast flags
+ */
+ BATADV_ATTR_MCAST_FLAGS_PRIV,
+
/* add attributes above here, update the policy in netlink.c */
/**
@@ -354,6 +433,16 @@ enum batadv_nl_commands {
*/
BATADV_CMD_GET_BLA_BACKBONE,
+ /**
+ * @BATADV_CMD_GET_DAT_CACHE: Query list of DAT cache entries
+ */
+ BATADV_CMD_GET_DAT_CACHE,
+
+ /**
+ * @BATADV_CMD_GET_MCAST_FLAGS: Query list of multicast flags
+ */
+ BATADV_CMD_GET_MCAST_FLAGS,
+
/* add new commands above here */
/**
diff --git a/include/uapi/linux/blktrace_api.h b/include/uapi/linux/blktrace_api.h
index 20d1490d6377..690621b610e5 100644
--- a/include/uapi/linux/blktrace_api.h
+++ b/include/uapi/linux/blktrace_api.h
@@ -101,7 +101,7 @@ enum blktrace_notify {
struct blk_io_trace {
__u32 magic; /* MAGIC << 8 | version */
__u32 sequence; /* event number */
- __u64 time; /* in microseconds */
+ __u64 time; /* in nanoseconds */
__u64 sector; /* disk offset */
__u32 bytes; /* transfer length */
__u32 action; /* what happened */
@@ -131,7 +131,7 @@ enum {
#define BLKTRACE_BDEV_SIZE 32
/*
- * User setup structure passed with BLKTRACESTART
+ * User setup structure passed with BLKTRACESETUP
*/
struct blk_user_trace_setup {
char name[BLKTRACE_BDEV_SIZE]; /* output */
diff --git a/include/uapi/linux/bpf.h b/include/uapi/linux/bpf.h
index 4c223ab30293..c5ec89732a8d 100644
--- a/include/uapi/linux/bpf.h
+++ b/include/uapi/linux/bpf.h
@@ -17,7 +17,7 @@
#define BPF_ALU64 0x07 /* alu mode in double word width */
/* ld/ldx fields */
-#define BPF_DW 0x18 /* double word */
+#define BPF_DW 0x18 /* double word (64-bit) */
#define BPF_XADD 0xc0 /* exclusive add */
/* alu/jmp fields */
@@ -94,6 +94,7 @@ enum bpf_cmd {
BPF_MAP_GET_FD_BY_ID,
BPF_OBJ_GET_INFO_BY_FD,
BPF_PROG_QUERY,
+ BPF_RAW_TRACEPOINT_OPEN,
};
enum bpf_map_type {
@@ -133,6 +134,9 @@ enum bpf_prog_type {
BPF_PROG_TYPE_SOCK_OPS,
BPF_PROG_TYPE_SK_SKB,
BPF_PROG_TYPE_CGROUP_DEVICE,
+ BPF_PROG_TYPE_SK_MSG,
+ BPF_PROG_TYPE_RAW_TRACEPOINT,
+ BPF_PROG_TYPE_CGROUP_SOCK_ADDR,
};
enum bpf_attach_type {
@@ -143,6 +147,13 @@ enum bpf_attach_type {
BPF_SK_SKB_STREAM_PARSER,
BPF_SK_SKB_STREAM_VERDICT,
BPF_CGROUP_DEVICE,
+ BPF_SK_MSG_VERDICT,
+ BPF_CGROUP_INET4_BIND,
+ BPF_CGROUP_INET6_BIND,
+ BPF_CGROUP_INET4_CONNECT,
+ BPF_CGROUP_INET6_CONNECT,
+ BPF_CGROUP_INET4_POST_BIND,
+ BPF_CGROUP_INET6_POST_BIND,
__MAX_BPF_ATTACH_TYPE
};
@@ -197,8 +208,14 @@ enum bpf_attach_type {
*/
#define BPF_F_STRICT_ALIGNMENT (1U << 0)
+/* when bpf_ldimm64->src_reg == BPF_PSEUDO_MAP_FD, bpf_ldimm64->imm == fd */
#define BPF_PSEUDO_MAP_FD 1
+/* when bpf_call->src_reg == BPF_PSEUDO_CALL, bpf_call->imm == pc-relative
+ * offset to another bpf function
+ */
+#define BPF_PSEUDO_CALL 1
+
/* flags for BPF_MAP_UPDATE_ELEM command */
#define BPF_ANY 0 /* create new element or update existing */
#define BPF_NOEXIST 1 /* create new element if it didn't exist */
@@ -225,6 +242,28 @@ enum bpf_attach_type {
#define BPF_F_RDONLY (1U << 3)
#define BPF_F_WRONLY (1U << 4)
+/* Flag for stack_map, store build_id+offset instead of pointer */
+#define BPF_F_STACK_BUILD_ID (1U << 5)
+
+enum bpf_stack_build_id_status {
+ /* user space need an empty entry to identify end of a trace */
+ BPF_STACK_BUILD_ID_EMPTY = 0,
+ /* with valid build_id and offset */
+ BPF_STACK_BUILD_ID_VALID = 1,
+ /* couldn't get build_id, fallback to ip */
+ BPF_STACK_BUILD_ID_IP = 2,
+};
+
+#define BPF_BUILD_ID_SIZE 20
+struct bpf_stack_build_id {
+ __s32 status;
+ unsigned char build_id[BPF_BUILD_ID_SIZE];
+ union {
+ __u64 offset;
+ __u64 ip;
+ };
+};
+
union bpf_attr {
struct { /* anonymous struct used by BPF_MAP_CREATE command */
__u32 map_type; /* one of enum bpf_map_type */
@@ -239,6 +278,7 @@ union bpf_attr {
* BPF_F_NUMA_NODE is set).
*/
char map_name[BPF_OBJ_NAME_LEN];
+ __u32 map_ifindex; /* ifindex of netdev to create on */
};
struct { /* anonymous struct used by BPF_MAP_*_ELEM commands */
@@ -263,6 +303,11 @@ union bpf_attr {
__u32 prog_flags;
char prog_name[BPF_OBJ_NAME_LEN];
__u32 prog_ifindex; /* ifindex of netdev to prep for */
+ /* For some prog types expected attach type must be known at
+ * load time to verify attach type specific parts of prog
+ * (context accesses, allowed helpers, etc).
+ */
+ __u32 expected_attach_type;
};
struct { /* anonymous struct used by BPF_OBJ_* commands */
@@ -313,6 +358,11 @@ union bpf_attr {
__aligned_u64 prog_ids;
__u32 prog_cnt;
} query;
+
+ struct {
+ __u64 name;
+ __u32 prog_fd;
+ } raw_tracepoint;
} __attribute__((aligned(8)));
/* BPF helper function descriptions:
@@ -635,6 +685,14 @@ union bpf_attr {
* @optlen: length of optval in bytes
* Return: 0 or negative error
*
+ * int bpf_sock_ops_cb_flags_set(bpf_sock_ops, flags)
+ * Set callback flags for sock_ops
+ * @bpf_sock_ops: pointer to bpf_sock_ops_kern struct
+ * @flags: flags value
+ * Return: 0 for no error
+ * -EINVAL if there is no full tcp socket
+ * bits in flags that are not supported by current kernel
+ *
* int bpf_skb_adjust_room(skb, len_diff, mode, flags)
* Grow or shrink room in sk_buff.
* @skb: pointer to skb
@@ -677,6 +735,26 @@ union bpf_attr {
* @buf: buf to fill
* @buf_size: size of the buf
* Return : 0 on success or negative error code
+ *
+ * int bpf_override_return(pt_regs, rc)
+ * @pt_regs: pointer to struct pt_regs
+ * @rc: the return value to set
+ *
+ * int bpf_msg_redirect_map(map, key, flags)
+ * Redirect msg to a sock in map using key as a lookup key for the
+ * sock in map.
+ * @map: pointer to sockmap
+ * @key: key to lookup sock in map
+ * @flags: reserved for future use
+ * Return: SK_PASS
+ *
+ * int bpf_bind(ctx, addr, addr_len)
+ * Bind socket to address. Only binding to IP is supported, no port can be
+ * set in addr.
+ * @ctx: pointer to context of type bpf_sock_addr
+ * @addr: pointer to struct sockaddr to bind socket to
+ * @addr_len: length of sockaddr structure
+ * Return: 0 on success or negative error code
*/
#define __BPF_FUNC_MAPPER(FN) \
FN(unspec), \
@@ -736,7 +814,14 @@ union bpf_attr {
FN(xdp_adjust_meta), \
FN(perf_event_read_value), \
FN(perf_prog_read_value), \
- FN(getsockopt),
+ FN(getsockopt), \
+ FN(override_return), \
+ FN(sock_ops_cb_flags_set), \
+ FN(msg_redirect_map), \
+ FN(msg_apply_bytes), \
+ FN(msg_cork_bytes), \
+ FN(msg_pull_data), \
+ FN(bind),
/* integer value in 'imm' field of BPF_CALL instruction selects which helper
* function eBPF program intends to call
@@ -779,6 +864,7 @@ enum bpf_func_id {
/* BPF_FUNC_skb_set_tunnel_key flags. */
#define BPF_F_ZERO_CSUM_TX (1ULL << 1)
#define BPF_F_DONT_FRAGMENT (1ULL << 2)
+#define BPF_F_SEQ_NUMBER (1ULL << 3)
/* BPF_FUNC_perf_event_output, BPF_FUNC_perf_event_read and
* BPF_FUNC_perf_event_read_value flags.
@@ -864,6 +950,15 @@ struct bpf_sock {
__u32 protocol;
__u32 mark;
__u32 priority;
+ __u32 src_ip4; /* Allows 1,2,4-byte read.
+ * Stored in network byte order.
+ */
+ __u32 src_ip6[4]; /* Allows 1,2,4-byte read.
+ * Stored in network byte order.
+ */
+ __u32 src_port; /* Allows 4-byte read.
+ * Stored in host byte order
+ */
};
#define XDP_PACKET_HEADROOM 256
@@ -888,6 +983,9 @@ struct xdp_md {
__u32 data;
__u32 data_end;
__u32 data_meta;
+ /* Below access go through struct xdp_rxq_info */
+ __u32 ingress_ifindex; /* rxq->dev->ifindex */
+ __u32 rx_queue_index; /* rxq->queue_index */
};
enum sk_action {
@@ -895,6 +993,14 @@ enum sk_action {
SK_PASS,
};
+/* user accessible metadata for SK_MSG packet hook, new fields must
+ * be added to the end of this structure
+ */
+struct sk_msg_md {
+ void *data;
+ void *data_end;
+};
+
#define BPF_TAG_SIZE 8
struct bpf_prog_info {
@@ -910,6 +1016,9 @@ struct bpf_prog_info {
__u32 nr_map_ids;
__aligned_u64 map_ids;
char name[BPF_OBJ_NAME_LEN];
+ __u32 ifindex;
+ __u64 netns_dev;
+ __u64 netns_ino;
} __attribute__((aligned(8)));
struct bpf_map_info {
@@ -920,8 +1029,31 @@ struct bpf_map_info {
__u32 max_entries;
__u32 map_flags;
char name[BPF_OBJ_NAME_LEN];
+ __u32 ifindex;
+ __u64 netns_dev;
+ __u64 netns_ino;
} __attribute__((aligned(8)));
+/* User bpf_sock_addr struct to access socket fields and sockaddr struct passed
+ * by user and intended to be used by socket (e.g. to bind to, depends on
+ * attach attach type).
+ */
+struct bpf_sock_addr {
+ __u32 user_family; /* Allows 4-byte read, but no write. */
+ __u32 user_ip4; /* Allows 1,2,4-byte read and 4-byte write.
+ * Stored in network byte order.
+ */
+ __u32 user_ip6[4]; /* Allows 1,2,4-byte read an 4-byte write.
+ * Stored in network byte order.
+ */
+ __u32 user_port; /* Allows 4-byte read and write.
+ * Stored in network byte order
+ */
+ __u32 family; /* Allows 4-byte read, but no write */
+ __u32 type; /* Allows 4-byte read, but no write */
+ __u32 protocol; /* Allows 4-byte read, but no write */
+};
+
/* User bpf_sock_ops struct to access socket values and specify request ops
* and their replies.
* Some of this fields are in network (bigendian) byte order and may need
@@ -931,8 +1063,9 @@ struct bpf_map_info {
struct bpf_sock_ops {
__u32 op;
union {
- __u32 reply;
- __u32 replylong[4];
+ __u32 args[4]; /* Optionally passed to bpf program */
+ __u32 reply; /* Returned by bpf program */
+ __u32 replylong[4]; /* Optionally returned by bpf prog */
};
__u32 family;
__u32 remote_ip4; /* Stored in network byte order */
@@ -941,8 +1074,45 @@ struct bpf_sock_ops {
__u32 local_ip6[4]; /* Stored in network byte order */
__u32 remote_port; /* Stored in network byte order */
__u32 local_port; /* stored in host byte order */
+ __u32 is_fullsock; /* Some TCP fields are only valid if
+ * there is a full socket. If not, the
+ * fields read as zero.
+ */
+ __u32 snd_cwnd;
+ __u32 srtt_us; /* Averaged RTT << 3 in usecs */
+ __u32 bpf_sock_ops_cb_flags; /* flags defined in uapi/linux/tcp.h */
+ __u32 state;
+ __u32 rtt_min;
+ __u32 snd_ssthresh;
+ __u32 rcv_nxt;
+ __u32 snd_nxt;
+ __u32 snd_una;
+ __u32 mss_cache;
+ __u32 ecn_flags;
+ __u32 rate_delivered;
+ __u32 rate_interval_us;
+ __u32 packets_out;
+ __u32 retrans_out;
+ __u32 total_retrans;
+ __u32 segs_in;
+ __u32 data_segs_in;
+ __u32 segs_out;
+ __u32 data_segs_out;
+ __u32 lost_out;
+ __u32 sacked_out;
+ __u32 sk_txhash;
+ __u64 bytes_received;
+ __u64 bytes_acked;
};
+/* Definitions for bpf_sock_ops_cb_flags */
+#define BPF_SOCK_OPS_RTO_CB_FLAG (1<<0)
+#define BPF_SOCK_OPS_RETRANS_CB_FLAG (1<<1)
+#define BPF_SOCK_OPS_STATE_CB_FLAG (1<<2)
+#define BPF_SOCK_OPS_ALL_CB_FLAGS 0x7 /* Mask of all currently
+ * supported cb flags
+ */
+
/* List of known BPF sock_ops operators.
* New entries can only be added at the end
*/
@@ -976,6 +1146,43 @@ enum {
* a congestion threshold. RTTs above
* this indicate congestion
*/
+ BPF_SOCK_OPS_RTO_CB, /* Called when an RTO has triggered.
+ * Arg1: value of icsk_retransmits
+ * Arg2: value of icsk_rto
+ * Arg3: whether RTO has expired
+ */
+ BPF_SOCK_OPS_RETRANS_CB, /* Called when skb is retransmitted.
+ * Arg1: sequence number of 1st byte
+ * Arg2: # segments
+ * Arg3: return value of
+ * tcp_transmit_skb (0 => success)
+ */
+ BPF_SOCK_OPS_STATE_CB, /* Called when TCP changes state.
+ * Arg1: old_state
+ * Arg2: new_state
+ */
+};
+
+/* List of TCP states. There is a build check in net/ipv4/tcp.c to detect
+ * changes between the TCP and BPF versions. Ideally this should never happen.
+ * If it does, we need to add code to convert them before calling
+ * the BPF sock_ops function.
+ */
+enum {
+ BPF_TCP_ESTABLISHED = 1,
+ BPF_TCP_SYN_SENT,
+ BPF_TCP_SYN_RECV,
+ BPF_TCP_FIN_WAIT1,
+ BPF_TCP_FIN_WAIT2,
+ BPF_TCP_TIME_WAIT,
+ BPF_TCP_CLOSE,
+ BPF_TCP_CLOSE_WAIT,
+ BPF_TCP_LAST_ACK,
+ BPF_TCP_LISTEN,
+ BPF_TCP_CLOSING, /* Now a valid state */
+ BPF_TCP_NEW_SYN_RECV,
+
+ BPF_TCP_MAX_STATES /* Leave at the end! */
};
#define TCP_BPF_IW 1001 /* Set TCP initial congestion window */
@@ -995,9 +1202,14 @@ struct bpf_perf_event_value {
#define BPF_DEVCG_DEV_CHAR (1ULL << 1)
struct bpf_cgroup_dev_ctx {
- __u32 access_type; /* (access << 16) | type */
+ /* access_type encoded as (BPF_DEVCG_ACC_* << 16) | BPF_DEVCG_DEV_* */
+ __u32 access_type;
__u32 major;
__u32 minor;
};
+struct bpf_raw_tracepoint_args {
+ __u64 args[0];
+};
+
#endif /* _UAPI__LINUX_BPF_H__ */
diff --git a/include/uapi/linux/bpf_common.h b/include/uapi/linux/bpf_common.h
index 18be90725ab0..ee97668bdadb 100644
--- a/include/uapi/linux/bpf_common.h
+++ b/include/uapi/linux/bpf_common.h
@@ -15,9 +15,10 @@
/* ld/ldx fields */
#define BPF_SIZE(code) ((code) & 0x18)
-#define BPF_W 0x00
-#define BPF_H 0x08
-#define BPF_B 0x10
+#define BPF_W 0x00 /* 32-bit */
+#define BPF_H 0x08 /* 16-bit */
+#define BPF_B 0x10 /* 8-bit */
+/* eBPF BPF_DW 0x18 64-bit */
#define BPF_MODE(code) ((code) & 0xe0)
#define BPF_IMM 0x00
#define BPF_ABS 0x20
diff --git a/include/uapi/linux/bpf_perf_event.h b/include/uapi/linux/bpf_perf_event.h
index 8f95303f9d80..eb1b9d21250c 100644
--- a/include/uapi/linux/bpf_perf_event.h
+++ b/include/uapi/linux/bpf_perf_event.h
@@ -13,6 +13,7 @@
struct bpf_perf_event_data {
bpf_user_pt_regs_t regs;
__u64 sample_period;
+ __u64 addr;
};
#endif /* _UAPI__LINUX_BPF_PERF_EVENT_H__ */
diff --git a/include/uapi/linux/btrfs.h b/include/uapi/linux/btrfs.h
index ce615b75e855..c8d99b9ca550 100644
--- a/include/uapi/linux/btrfs.h
+++ b/include/uapi/linux/btrfs.h
@@ -33,7 +33,12 @@ struct btrfs_ioctl_vol_args {
char name[BTRFS_PATH_NAME_MAX + 1];
};
-#define BTRFS_DEVICE_PATH_NAME_MAX 1024
+#define BTRFS_DEVICE_PATH_NAME_MAX 1024
+#define BTRFS_SUBVOL_NAME_MAX 4039
+
+#define BTRFS_SUBVOL_CREATE_ASYNC (1ULL << 0)
+#define BTRFS_SUBVOL_RDONLY (1ULL << 1)
+#define BTRFS_SUBVOL_QGROUP_INHERIT (1ULL << 2)
#define BTRFS_DEVICE_SPEC_BY_ID (1ULL << 3)
@@ -101,11 +106,7 @@ struct btrfs_ioctl_qgroup_limit_args {
* - BTRFS_IOC_SUBVOL_GETFLAGS
* - BTRFS_IOC_SUBVOL_SETFLAGS
*/
-#define BTRFS_SUBVOL_CREATE_ASYNC (1ULL << 0)
-#define BTRFS_SUBVOL_RDONLY (1ULL << 1)
-#define BTRFS_SUBVOL_QGROUP_INHERIT (1ULL << 2)
-#define BTRFS_SUBVOL_NAME_MAX 4039
struct btrfs_ioctl_vol_args_v2 {
__s64 fd;
__u64 transid;
diff --git a/include/uapi/linux/btrfs_tree.h b/include/uapi/linux/btrfs_tree.h
index 6d6e5da51527..aff1356c2bb8 100644
--- a/include/uapi/linux/btrfs_tree.h
+++ b/include/uapi/linux/btrfs_tree.h
@@ -456,6 +456,8 @@ struct btrfs_free_space_header {
#define BTRFS_SUPER_FLAG_SEEDING (1ULL << 32)
#define BTRFS_SUPER_FLAG_METADUMP (1ULL << 33)
+#define BTRFS_SUPER_FLAG_METADUMP_V2 (1ULL << 34)
+#define BTRFS_SUPER_FLAG_CHANGING_FSID (1ULL << 35)
/*
diff --git a/include/uapi/linux/can/netlink.h b/include/uapi/linux/can/netlink.h
index 96710e76d5ce..9f56fad4785b 100644
--- a/include/uapi/linux/can/netlink.h
+++ b/include/uapi/linux/can/netlink.h
@@ -132,6 +132,7 @@ enum {
IFLA_CAN_TERMINATION_CONST,
IFLA_CAN_BITRATE_CONST,
IFLA_CAN_DATA_BITRATE_CONST,
+ IFLA_CAN_BITRATE_MAX,
__IFLA_CAN_MAX
};
diff --git a/include/uapi/linux/cec-funcs.h b/include/uapi/linux/cec-funcs.h
index 28e8a2a86e16..8997d5068c08 100644
--- a/include/uapi/linux/cec-funcs.h
+++ b/include/uapi/linux/cec-funcs.h
@@ -3,35 +3,6 @@
* cec - HDMI Consumer Electronics Control message functions
*
* Copyright 2016 Cisco Systems, Inc. and/or its affiliates. All rights reserved.
- *
- * This program is free software; you may redistribute it and/or modify
- * it under the terms of the GNU General Public License as published by
- * the Free Software Foundation; version 2 of the License.
- *
- * Alternatively you can redistribute this file under the terms of the
- * BSD license as stated below:
- *
- * Redistribution and use in source and binary forms, with or without
- * modification, are permitted provided that the following conditions
- * are met:
- * 1. Redistributions of source code must retain the above copyright
- * notice, this list of conditions and the following disclaimer.
- * 2. Redistributions in binary form must reproduce the above copyright
- * notice, this list of conditions and the following disclaimer in
- * the documentation and/or other materials provided with the
- * distribution.
- * 3. The names of its contributors may not be used to endorse or promote
- * products derived from this software without specific prior written
- * permission.
- *
- * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND,
- * EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
- * MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND
- * NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS
- * BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN
- * ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN
- * CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
- * SOFTWARE.
*/
#ifndef _CEC_UAPI_FUNCS_H
diff --git a/include/uapi/linux/cec.h b/include/uapi/linux/cec.h
index b51fbe1941a7..20fe091b7e96 100644
--- a/include/uapi/linux/cec.h
+++ b/include/uapi/linux/cec.h
@@ -3,35 +3,6 @@
* cec - HDMI Consumer Electronics Control public header
*
* Copyright 2016 Cisco Systems, Inc. and/or its affiliates. All rights reserved.
- *
- * This program is free software; you may redistribute it and/or modify
- * it under the terms of the GNU General Public License as published by
- * the Free Software Foundation; version 2 of the License.
- *
- * Alternatively you can redistribute this file under the terms of the
- * BSD license as stated below:
- *
- * Redistribution and use in source and binary forms, with or without
- * modification, are permitted provided that the following conditions
- * are met:
- * 1. Redistributions of source code must retain the above copyright
- * notice, this list of conditions and the following disclaimer.
- * 2. Redistributions in binary form must reproduce the above copyright
- * notice, this list of conditions and the following disclaimer in
- * the documentation and/or other materials provided with the
- * distribution.
- * 3. The names of its contributors may not be used to endorse or promote
- * products derived from this software without specific prior written
- * permission.
- *
- * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND,
- * EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
- * MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND
- * NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS
- * BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN
- * ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN
- * CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
- * SOFTWARE.
*/
#ifndef _CEC_UAPI_H
diff --git a/include/uapi/linux/const.h b/include/uapi/linux/const.h
index 92537757590a..5ed721ad5b19 100644
--- a/include/uapi/linux/const.h
+++ b/include/uapi/linux/const.h
@@ -1,8 +1,8 @@
/* SPDX-License-Identifier: GPL-2.0 WITH Linux-syscall-note */
/* const.h: Macros for dealing with constants. */
-#ifndef _LINUX_CONST_H
-#define _LINUX_CONST_H
+#ifndef _UAPI_LINUX_CONST_H
+#define _UAPI_LINUX_CONST_H
/* Some constant macros are used in both assembler and
* C code. Therefore we cannot annotate them always with
@@ -22,7 +22,10 @@
#define _AT(T,X) ((T)(X))
#endif
-#define _BITUL(x) (_AC(1,UL) << (x))
-#define _BITULL(x) (_AC(1,ULL) << (x))
+#define _UL(x) (_AC(x, UL))
+#define _ULL(x) (_AC(x, ULL))
-#endif /* !(_LINUX_CONST_H) */
+#define _BITUL(x) (_UL(1) << (x))
+#define _BITULL(x) (_ULL(1) << (x))
+
+#endif /* _UAPI_LINUX_CONST_H */
diff --git a/include/uapi/linux/devlink.h b/include/uapi/linux/devlink.h
index 6665df69e26a..1df65a4c2044 100644
--- a/include/uapi/linux/devlink.h
+++ b/include/uapi/linux/devlink.h
@@ -70,6 +70,13 @@ enum devlink_command {
DEVLINK_CMD_DPIPE_ENTRIES_GET,
DEVLINK_CMD_DPIPE_HEADERS_GET,
DEVLINK_CMD_DPIPE_TABLE_COUNTERS_SET,
+ DEVLINK_CMD_RESOURCE_SET,
+ DEVLINK_CMD_RESOURCE_DUMP,
+
+ /* Hot driver reload, makes configuration changes take place. The
+ * devlink instance is not released during the process.
+ */
+ DEVLINK_CMD_RELOAD,
/* add new commands above here */
__DEVLINK_CMD_MAX,
@@ -202,6 +209,20 @@ enum devlink_attr {
DEVLINK_ATTR_PAD,
DEVLINK_ATTR_ESWITCH_ENCAP_MODE, /* u8 */
+ DEVLINK_ATTR_RESOURCE_LIST, /* nested */
+ DEVLINK_ATTR_RESOURCE, /* nested */
+ DEVLINK_ATTR_RESOURCE_NAME, /* string */
+ DEVLINK_ATTR_RESOURCE_ID, /* u64 */
+ DEVLINK_ATTR_RESOURCE_SIZE, /* u64 */
+ DEVLINK_ATTR_RESOURCE_SIZE_NEW, /* u64 */
+ DEVLINK_ATTR_RESOURCE_SIZE_VALID, /* u8 */
+ DEVLINK_ATTR_RESOURCE_SIZE_MIN, /* u64 */
+ DEVLINK_ATTR_RESOURCE_SIZE_MAX, /* u64 */
+ DEVLINK_ATTR_RESOURCE_SIZE_GRAN, /* u64 */
+ DEVLINK_ATTR_RESOURCE_UNIT, /* u8 */
+ DEVLINK_ATTR_RESOURCE_OCC, /* u64 */
+ DEVLINK_ATTR_DPIPE_TABLE_RESOURCE_ID, /* u64 */
+ DEVLINK_ATTR_DPIPE_TABLE_RESOURCE_UNITS,/* u64 */
/* add new attributes above here, update the policy in devlink.c */
@@ -245,4 +266,8 @@ enum devlink_dpipe_header_id {
DEVLINK_DPIPE_HEADER_IPV6,
};
+enum devlink_resource_unit {
+ DEVLINK_RESOURCE_UNIT_ENTRY,
+};
+
#endif /* _UAPI_LINUX_DEVLINK_H_ */
diff --git a/include/uapi/linux/dm-ioctl.h b/include/uapi/linux/dm-ioctl.h
index 14c44ec8b622..d1e49514977b 100644
--- a/include/uapi/linux/dm-ioctl.h
+++ b/include/uapi/linux/dm-ioctl.h
@@ -270,9 +270,9 @@ enum {
#define DM_DEV_SET_GEOMETRY _IOWR(DM_IOCTL, DM_DEV_SET_GEOMETRY_CMD, struct dm_ioctl)
#define DM_VERSION_MAJOR 4
-#define DM_VERSION_MINOR 37
+#define DM_VERSION_MINOR 39
#define DM_VERSION_PATCHLEVEL 0
-#define DM_VERSION_EXTRA "-ioctl (2017-09-20)"
+#define DM_VERSION_EXTRA "-ioctl (2018-04-03)"
/* Status bits */
#define DM_READONLY_FLAG (1 << 0) /* In/Out */
diff --git a/include/uapi/linux/dvb/dmx.h b/include/uapi/linux/dvb/dmx.h
index c10f1324b4ca..b4112f0b6dd3 100644
--- a/include/uapi/linux/dvb/dmx.h
+++ b/include/uapi/linux/dvb/dmx.h
@@ -211,6 +211,96 @@ struct dmx_stc {
__u64 stc;
};
+/**
+ * enum dmx_buffer_flags - DMX memory-mapped buffer flags
+ *
+ * @DMX_BUFFER_FLAG_HAD_CRC32_DISCARD:
+ * Indicates that the Kernel discarded one or more frames due to wrong
+ * CRC32 checksum.
+ * @DMX_BUFFER_FLAG_TEI:
+ * Indicates that the Kernel has detected a Transport Error indicator
+ * (TEI) on a filtered pid.
+ * @DMX_BUFFER_PKT_COUNTER_MISMATCH:
+ * Indicates that the Kernel has detected a packet counter mismatch
+ * on a filtered pid.
+ * @DMX_BUFFER_FLAG_DISCONTINUITY_DETECTED:
+ * Indicates that the Kernel has detected one or more frame discontinuity.
+ * @DMX_BUFFER_FLAG_DISCONTINUITY_INDICATOR:
+ * Received at least one packet with a frame discontinuity indicator.
+ */
+
+enum dmx_buffer_flags {
+ DMX_BUFFER_FLAG_HAD_CRC32_DISCARD = 1 << 0,
+ DMX_BUFFER_FLAG_TEI = 1 << 1,
+ DMX_BUFFER_PKT_COUNTER_MISMATCH = 1 << 2,
+ DMX_BUFFER_FLAG_DISCONTINUITY_DETECTED = 1 << 3,
+ DMX_BUFFER_FLAG_DISCONTINUITY_INDICATOR = 1 << 4,
+};
+
+/**
+ * struct dmx_buffer - dmx buffer info
+ *
+ * @index: id number of the buffer
+ * @bytesused: number of bytes occupied by data in the buffer (payload);
+ * @offset: for buffers with memory == DMX_MEMORY_MMAP;
+ * offset from the start of the device memory for this plane,
+ * (or a "cookie" that should be passed to mmap() as offset)
+ * @length: size in bytes of the buffer
+ * @flags: bit array of buffer flags as defined by &enum dmx_buffer_flags.
+ * Filled only at &DMX_DQBUF.
+ * @count: monotonic counter for filled buffers. Helps to identify
+ * data stream loses. Filled only at &DMX_DQBUF.
+ *
+ * Contains data exchanged by application and driver using one of the streaming
+ * I/O methods.
+ *
+ * Please notice that, for &DMX_QBUF, only @index should be filled.
+ * On &DMX_DQBUF calls, all fields will be filled by the Kernel.
+ */
+struct dmx_buffer {
+ __u32 index;
+ __u32 bytesused;
+ __u32 offset;
+ __u32 length;
+ __u32 flags;
+ __u32 count;
+};
+
+/**
+ * struct dmx_requestbuffers - request dmx buffer information
+ *
+ * @count: number of requested buffers,
+ * @size: size in bytes of the requested buffer
+ *
+ * Contains data used for requesting a dmx buffer.
+ * All reserved fields must be set to zero.
+ */
+struct dmx_requestbuffers {
+ __u32 count;
+ __u32 size;
+};
+
+/**
+ * struct dmx_exportbuffer - export of dmx buffer as DMABUF file descriptor
+ *
+ * @index: id number of the buffer
+ * @flags: flags for newly created file, currently only O_CLOEXEC is
+ * supported, refer to manual of open syscall for more details
+ * @fd: file descriptor associated with DMABUF (set by driver)
+ *
+ * Contains data used for exporting a dmx buffer as DMABUF file descriptor.
+ * The buffer is identified by a 'cookie' returned by DMX_QUERYBUF
+ * (identical to the cookie used to mmap() the buffer to userspace). All
+ * reserved fields must be set to zero. The field reserved0 is expected to
+ * become a structure 'type' allowing an alternative layout of the structure
+ * content. Therefore this field should not be used for any other extensions.
+ */
+struct dmx_exportbuffer {
+ __u32 index;
+ __u32 flags;
+ __s32 fd;
+};
+
#define DMX_START _IO('o', 41)
#define DMX_STOP _IO('o', 42)
#define DMX_SET_FILTER _IOW('o', 43, struct dmx_sct_filter_params)
@@ -231,4 +321,10 @@ typedef struct dmx_filter dmx_filter_t;
#endif
-#endif /* _UAPI_DVBDMX_H_ */
+#define DMX_REQBUFS _IOWR('o', 60, struct dmx_requestbuffers)
+#define DMX_QUERYBUF _IOWR('o', 61, struct dmx_buffer)
+#define DMX_EXPBUF _IOWR('o', 62, struct dmx_exportbuffer)
+#define DMX_QBUF _IOWR('o', 63, struct dmx_buffer)
+#define DMX_DQBUF _IOWR('o', 64, struct dmx_buffer)
+
+#endif /* _DVBDMX_H_ */
diff --git a/include/uapi/linux/dvb/frontend.h b/include/uapi/linux/dvb/frontend.h
index b297b65845d6..4f9b4551c534 100644
--- a/include/uapi/linux/dvb/frontend.h
+++ b/include/uapi/linux/dvb/frontend.h
@@ -547,7 +547,10 @@ enum fe_interleaving {
#define DTV_STAT_ERROR_BLOCK_COUNT 68
#define DTV_STAT_TOTAL_BLOCK_COUNT 69
-#define DTV_MAX_COMMAND DTV_STAT_TOTAL_BLOCK_COUNT
+/* Physical layer scrambling */
+#define DTV_SCRAMBLING_SEQUENCE_INDEX 70
+
+#define DTV_MAX_COMMAND DTV_SCRAMBLING_SEQUENCE_INDEX
/**
* enum fe_pilot - Type of pilot tone
@@ -756,16 +759,15 @@ enum fecap_scale_params {
/**
* struct dtv_stats - Used for reading a DTV status property
*
- * @scale: Filled with enum fecap_scale_params - the scale
- * in usage for that parameter
- *
- * The ``{unnamed_union}`` may have either one of the values below:
+ * @scale:
+ * Filled with enum fecap_scale_params - the scale in usage
+ * for that parameter
*
- * %svalue
+ * @svalue:
* integer value of the measure, for %FE_SCALE_DECIBEL,
* used for dB measures. The unit is 0.001 dB.
*
- * %uvalue
+ * @uvalue:
* unsigned integer value of the measure, used when @scale is
* either %FE_SCALE_RELATIVE or %FE_SCALE_COUNTER.
*
@@ -828,19 +830,19 @@ struct dtv_fe_stats {
/**
* struct dtv_property - store one of frontend command and its value
*
- * @cmd: Digital TV command.
- * @reserved: Not used.
- * @u: Union with the values for the command.
- * @result: Unused
- *
- * The @u union may have either one of the values below:
+ * @cmd: Digital TV command.
+ * @reserved: Not used.
+ * @u: Union with the values for the command.
+ * @u.data: A unsigned 32 bits integer with command value.
+ * @u.buffer: Struct to store bigger properties.
+ * Currently unused.
+ * @u.buffer.data: an unsigned 32-bits array.
+ * @u.buffer.len: number of elements of the buffer.
+ * @u.buffer.reserved1: Reserved.
+ * @u.buffer.reserved2: Reserved.
+ * @u.st: a &struct dtv_fe_stats array of statistics.
+ * @result: Currently unused.
*
- * %data
- * an unsigned 32-bits number.
- * %st
- * a &struct dtv_fe_stats array of statistics.
- * %buffer
- * a buffer of up to 32 characters (currently unused).
*/
struct dtv_property {
__u32 cmd;
diff --git a/include/uapi/linux/dvb/version.h b/include/uapi/linux/dvb/version.h
index 02e32ea83984..2c5cffe6d2a0 100644
--- a/include/uapi/linux/dvb/version.h
+++ b/include/uapi/linux/dvb/version.h
@@ -25,6 +25,6 @@
#define _DVBVERSION_H_
#define DVB_API_VERSION 5
-#define DVB_API_VERSION_MINOR 10
+#define DVB_API_VERSION_MINOR 11
#endif /*_DVBVERSION_H_*/
diff --git a/include/uapi/linux/dvb/video.h b/include/uapi/linux/dvb/video.h
index 4d51f98182bb..df3d7028c807 100644
--- a/include/uapi/linux/dvb/video.h
+++ b/include/uapi/linux/dvb/video.h
@@ -83,11 +83,11 @@ typedef enum {
#define VIDEO_CMD_CONTINUE (3)
/* Flags for VIDEO_CMD_FREEZE */
-#define VIDEO_CMD_FREEZE_TO_BLACK (1 << 0)
+#define VIDEO_CMD_FREEZE_TO_BLACK (1 << 0)
/* Flags for VIDEO_CMD_STOP */
-#define VIDEO_CMD_STOP_TO_BLACK (1 << 0)
-#define VIDEO_CMD_STOP_IMMEDIATELY (1 << 1)
+#define VIDEO_CMD_STOP_TO_BLACK (1 << 0)
+#define VIDEO_CMD_STOP_IMMEDIATELY (1 << 1)
/* Play input formats: */
/* The decoder has no special format requirements */
@@ -124,8 +124,8 @@ struct video_command {
/* FIELD_UNKNOWN can be used if the hardware does not know whether
the Vsync is for an odd, even or progressive (i.e. non-interlaced)
field. */
-#define VIDEO_VSYNC_FIELD_UNKNOWN (0)
-#define VIDEO_VSYNC_FIELD_ODD (1)
+#define VIDEO_VSYNC_FIELD_UNKNOWN (0)
+#define VIDEO_VSYNC_FIELD_ODD (1)
#define VIDEO_VSYNC_FIELD_EVEN (2)
#define VIDEO_VSYNC_FIELD_PROGRESSIVE (3)
@@ -133,8 +133,8 @@ struct video_event {
__s32 type;
#define VIDEO_EVENT_SIZE_CHANGED 1
#define VIDEO_EVENT_FRAME_RATE_CHANGED 2
-#define VIDEO_EVENT_DECODER_STOPPED 3
-#define VIDEO_EVENT_VSYNC 4
+#define VIDEO_EVENT_DECODER_STOPPED 3
+#define VIDEO_EVENT_VSYNC 4
/* unused, make sure to use atomic time for y2038 if it ever gets used */
long timestamp;
union {
@@ -268,9 +268,9 @@ typedef __u16 video_attributes_t;
#define VIDEO_GET_PTS _IOR('o', 57, __u64)
/* Read the number of displayed frames since the decoder was started */
-#define VIDEO_GET_FRAME_COUNT _IOR('o', 58, __u64)
+#define VIDEO_GET_FRAME_COUNT _IOR('o', 58, __u64)
-#define VIDEO_COMMAND _IOWR('o', 59, struct video_command)
-#define VIDEO_TRY_COMMAND _IOWR('o', 60, struct video_command)
+#define VIDEO_COMMAND _IOWR('o', 59, struct video_command)
+#define VIDEO_TRY_COMMAND _IOWR('o', 60, struct video_command)
#endif /* _UAPI_DVBVIDEO_H_ */
diff --git a/include/uapi/linux/elf.h b/include/uapi/linux/elf.h
index bb6836986200..e2535d6dcec7 100644
--- a/include/uapi/linux/elf.h
+++ b/include/uapi/linux/elf.h
@@ -396,6 +396,7 @@ typedef struct elf64_shdr {
#define NT_PPC_TM_CTAR 0x10d /* TM checkpointed Target Address Register */
#define NT_PPC_TM_CPPR 0x10e /* TM checkpointed Program Priority Register */
#define NT_PPC_TM_CDSCR 0x10f /* TM checkpointed Data Stream Control Register */
+#define NT_PPC_PKEY 0x110 /* Memory Protection Keys registers */
#define NT_386_TLS 0x200 /* i386 TLS slots (struct user_desc) */
#define NT_386_IOPERM 0x201 /* x86 io permission bitmap (1=deny) */
#define NT_X86_XSTATE 0x202 /* x86 extended state using xsave */
@@ -419,9 +420,6 @@ typedef struct elf64_shdr {
#define NT_ARM_HW_WATCH 0x403 /* ARM hardware watchpoint registers */
#define NT_ARM_SYSTEM_CALL 0x404 /* ARM system call number */
#define NT_ARM_SVE 0x405 /* ARM Scalable Vector Extension registers */
-#define NT_METAG_CBUF 0x500 /* Metag catch buffer registers */
-#define NT_METAG_RPIPE 0x501 /* Metag read pipeline state */
-#define NT_METAG_TLS 0x502 /* Metag TLS pointer */
#define NT_ARC_V2 0x600 /* ARCv2 accumulator/extra registers */
/* Note header in a PT_NOTE section */
diff --git a/include/uapi/linux/erspan.h b/include/uapi/linux/erspan.h
new file mode 100644
index 000000000000..841573019ae1
--- /dev/null
+++ b/include/uapi/linux/erspan.h
@@ -0,0 +1,52 @@
+/* SPDX-License-Identifier: GPL-2.0 WITH Linux-syscall-note */
+/*
+ * ERSPAN Tunnel Metadata
+ *
+ * Copyright (c) 2018 VMware
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License version 2
+ * as published by the Free Software Foundation.
+ *
+ * Userspace API for metadata mode ERSPAN tunnel
+ */
+#ifndef _UAPI_ERSPAN_H
+#define _UAPI_ERSPAN_H
+
+#include <linux/types.h> /* For __beXX in userspace */
+#include <asm/byteorder.h>
+
+/* ERSPAN version 2 metadata header */
+struct erspan_md2 {
+ __be32 timestamp;
+ __be16 sgt; /* security group tag */
+#if defined(__LITTLE_ENDIAN_BITFIELD)
+ __u8 hwid_upper:2,
+ ft:5,
+ p:1;
+ __u8 o:1,
+ gra:2,
+ dir:1,
+ hwid:4;
+#elif defined(__BIG_ENDIAN_BITFIELD)
+ __u8 p:1,
+ ft:5,
+ hwid_upper:2;
+ __u8 hwid:4,
+ dir:1,
+ gra:2,
+ o:1;
+#else
+#error "Please fix <asm/byteorder.h>"
+#endif
+};
+
+struct erspan_metadata {
+ int version;
+ union {
+ __be32 index; /* Version 1 (type II)*/
+ struct erspan_md2 md2; /* Version 2 (type III) */
+ } u;
+};
+
+#endif /* _UAPI_ERSPAN_H */
diff --git a/include/uapi/linux/ethtool.h b/include/uapi/linux/ethtool.h
index ac71559314e7..4ca65b56084f 100644
--- a/include/uapi/linux/ethtool.h
+++ b/include/uapi/linux/ethtool.h
@@ -217,10 +217,14 @@ struct ethtool_value {
__u32 data;
};
+#define PFC_STORM_PREVENTION_AUTO 0xffff
+#define PFC_STORM_PREVENTION_DISABLE 0
+
enum tunable_id {
ETHTOOL_ID_UNSPEC,
ETHTOOL_RX_COPYBREAK,
ETHTOOL_TX_COPYBREAK,
+ ETHTOOL_PFC_PREVENTION_TOUT, /* timeout in msecs */
/*
* Add your fresh new tubale attribute above and remember to update
* tunable_strings[] in net/core/ethtool.c
@@ -914,12 +918,15 @@ static inline __u64 ethtool_get_flow_spec_ring_vf(__u64 ring_cookie)
* @flow_type: Type of flow to be affected, e.g. %TCP_V4_FLOW
* @data: Command-dependent value
* @fs: Flow classification rule
+ * @rss_context: RSS context to be affected
* @rule_cnt: Number of rules to be affected
* @rule_locs: Array of used rule locations
*
* For %ETHTOOL_GRXFH and %ETHTOOL_SRXFH, @data is a bitmask indicating
* the fields included in the flow hash, e.g. %RXH_IP_SRC. The following
- * structure fields must not be used.
+ * structure fields must not be used, except that if @flow_type includes
+ * the %FLOW_RSS flag, then @rss_context determines which RSS context to
+ * act on.
*
* For %ETHTOOL_GRXRINGS, @data is set to the number of RX rings/queues
* on return.
@@ -931,7 +938,9 @@ static inline __u64 ethtool_get_flow_spec_ring_vf(__u64 ring_cookie)
* set in @data then special location values should not be used.
*
* For %ETHTOOL_GRXCLSRULE, @fs.@location specifies the location of an
- * existing rule on entry and @fs contains the rule on return.
+ * existing rule on entry and @fs contains the rule on return; if
+ * @fs.@flow_type includes the %FLOW_RSS flag, then @rss_context is
+ * filled with the RSS context ID associated with the rule.
*
* For %ETHTOOL_GRXCLSRLALL, @rule_cnt specifies the array size of the
* user buffer for @rule_locs on entry. On return, @data is the size
@@ -942,7 +951,11 @@ static inline __u64 ethtool_get_flow_spec_ring_vf(__u64 ring_cookie)
* For %ETHTOOL_SRXCLSRLINS, @fs specifies the rule to add or update.
* @fs.@location either specifies the location to use or is a special
* location value with %RX_CLS_LOC_SPECIAL flag set. On return,
- * @fs.@location is the actual rule location.
+ * @fs.@location is the actual rule location. If @fs.@flow_type
+ * includes the %FLOW_RSS flag, @rss_context is the RSS context ID to
+ * use for flow spreading traffic which matches this rule. The value
+ * from the rxfh indirection table will be added to @fs.@ring_cookie
+ * to choose which ring to deliver to.
*
* For %ETHTOOL_SRXCLSRLDEL, @fs.@location specifies the location of an
* existing rule on entry.
@@ -963,7 +976,10 @@ struct ethtool_rxnfc {
__u32 flow_type;
__u64 data;
struct ethtool_rx_flow_spec fs;
- __u32 rule_cnt;
+ union {
+ __u32 rule_cnt;
+ __u32 rss_context;
+ };
__u32 rule_locs[0];
};
@@ -990,7 +1006,11 @@ struct ethtool_rxfh_indir {
/**
* struct ethtool_rxfh - command to get/set RX flow hash indir or/and hash key.
* @cmd: Specific command number - %ETHTOOL_GRSSH or %ETHTOOL_SRSSH
- * @rss_context: RSS context identifier.
+ * @rss_context: RSS context identifier. Context 0 is the default for normal
+ * traffic; other contexts can be referenced as the destination for RX flow
+ * classification rules. %ETH_RXFH_CONTEXT_ALLOC is used with command
+ * %ETHTOOL_SRSSH to allocate a new RSS context; on return this field will
+ * contain the ID of the newly allocated context.
* @indir_size: On entry, the array size of the user buffer for the
* indirection table, which may be zero, or (for %ETHTOOL_SRSSH),
* %ETH_RXFH_INDIR_NO_CHANGE. On return from %ETHTOOL_GRSSH,
@@ -1009,7 +1029,8 @@ struct ethtool_rxfh_indir {
* size should be returned. For %ETHTOOL_SRSSH, an @indir_size of
* %ETH_RXFH_INDIR_NO_CHANGE means that indir table setting is not requested
* and a @indir_size of zero means the indir table should be reset to default
- * values. An hfunc of zero means that hash function setting is not requested.
+ * values (if @rss_context == 0) or that the RSS context should be deleted.
+ * An hfunc of zero means that hash function setting is not requested.
*/
struct ethtool_rxfh {
__u32 cmd;
@@ -1021,6 +1042,7 @@ struct ethtool_rxfh {
__u32 rsvd32;
__u32 rss_config[0];
};
+#define ETH_RXFH_CONTEXT_ALLOC 0xffffffff
#define ETH_RXFH_INDIR_NO_CHANGE 0xffffffff
/**
@@ -1635,6 +1657,8 @@ static inline int ethtool_validate_duplex(__u8 duplex)
/* Flag to enable additional fields in struct ethtool_rx_flow_spec */
#define FLOW_EXT 0x80000000
#define FLOW_MAC_EXT 0x40000000
+/* Flag to enable RSS spreading of traffic matching rule (nfc only) */
+#define FLOW_RSS 0x20000000
/* L3-L4 network traffic flow hash options */
#define RXH_L2DA (1 << 1)
@@ -1686,6 +1710,7 @@ enum ethtool_reset_flags {
ETH_RESET_PHY = 1 << 6, /* Transceiver/PHY */
ETH_RESET_RAM = 1 << 7, /* RAM shared between
* multiple components */
+ ETH_RESET_AP = 1 << 8, /* Application processor */
ETH_RESET_DEDICATED = 0x0000ffff, /* All components dedicated to
* this interface */
diff --git a/include/uapi/linux/eventpoll.h b/include/uapi/linux/eventpoll.h
index 63e21be30f15..bf48e71f2634 100644
--- a/include/uapi/linux/eventpoll.h
+++ b/include/uapi/linux/eventpoll.h
@@ -28,20 +28,21 @@
#define EPOLL_CTL_MOD 3
/* Epoll event masks */
-#define EPOLLIN 0x00000001
-#define EPOLLPRI 0x00000002
-#define EPOLLOUT 0x00000004
-#define EPOLLERR 0x00000008
-#define EPOLLHUP 0x00000010
-#define EPOLLRDNORM 0x00000040
-#define EPOLLRDBAND 0x00000080
-#define EPOLLWRNORM 0x00000100
-#define EPOLLWRBAND 0x00000200
-#define EPOLLMSG 0x00000400
-#define EPOLLRDHUP 0x00002000
+#define EPOLLIN (__force __poll_t)0x00000001
+#define EPOLLPRI (__force __poll_t)0x00000002
+#define EPOLLOUT (__force __poll_t)0x00000004
+#define EPOLLERR (__force __poll_t)0x00000008
+#define EPOLLHUP (__force __poll_t)0x00000010
+#define EPOLLNVAL (__force __poll_t)0x00000020
+#define EPOLLRDNORM (__force __poll_t)0x00000040
+#define EPOLLRDBAND (__force __poll_t)0x00000080
+#define EPOLLWRNORM (__force __poll_t)0x00000100
+#define EPOLLWRBAND (__force __poll_t)0x00000200
+#define EPOLLMSG (__force __poll_t)0x00000400
+#define EPOLLRDHUP (__force __poll_t)0x00002000
/* Set exclusive wakeup mode for the target file descriptor */
-#define EPOLLEXCLUSIVE (1U << 28)
+#define EPOLLEXCLUSIVE (__force __poll_t)(1U << 28)
/*
* Request the handling of system wakeup events so as to prevent system suspends
@@ -53,13 +54,13 @@
*
* Requires CAP_BLOCK_SUSPEND
*/
-#define EPOLLWAKEUP (1U << 29)
+#define EPOLLWAKEUP (__force __poll_t)(1U << 29)
/* Set the One Shot behaviour for the target file descriptor */
-#define EPOLLONESHOT (1U << 30)
+#define EPOLLONESHOT (__force __poll_t)(1U << 30)
/* Set the Edge Triggered behaviour for the target file descriptor */
-#define EPOLLET (1U << 31)
+#define EPOLLET (__force __poll_t)(1U << 31)
/*
* On x86-64 make the 64bit structure have the same alignment as the
@@ -74,7 +75,7 @@
#endif
struct epoll_event {
- __u32 events;
+ __poll_t events;
__u64 data;
} EPOLL_PACKED;
diff --git a/include/uapi/linux/fib_rules.h b/include/uapi/linux/fib_rules.h
index 2b642bf9b5a0..232df14e1287 100644
--- a/include/uapi/linux/fib_rules.h
+++ b/include/uapi/linux/fib_rules.h
@@ -23,7 +23,7 @@ struct fib_rule_hdr {
__u8 tos;
__u8 table;
- __u8 res1; /* reserved */
+ __u8 res1; /* reserved */
__u8 res2; /* reserved */
__u8 action;
@@ -35,6 +35,11 @@ struct fib_rule_uid_range {
__u32 end;
};
+struct fib_rule_port_range {
+ __u16 start;
+ __u16 end;
+};
+
enum {
FRA_UNSPEC,
FRA_DST, /* destination address */
@@ -58,6 +63,10 @@ enum {
FRA_PAD,
FRA_L3MDEV, /* iif or oif is l3mdev goto its table */
FRA_UID_RANGE, /* UID range */
+ FRA_PROTOCOL, /* Originator of the rule */
+ FRA_IP_PROTO, /* ip proto */
+ FRA_SPORT_RANGE, /* sport */
+ FRA_DPORT_RANGE, /* dport */
__FRA_MAX
};
diff --git a/include/uapi/linux/fs.h b/include/uapi/linux/fs.h
index 4199f8acbce5..d2a8313fabd7 100644
--- a/include/uapi/linux/fs.h
+++ b/include/uapi/linux/fs.h
@@ -377,7 +377,11 @@ typedef int __bitwise __kernel_rwf_t;
/* per-IO, return -EAGAIN if operation would block */
#define RWF_NOWAIT ((__force __kernel_rwf_t)0x00000008)
+/* per-IO O_APPEND */
+#define RWF_APPEND ((__force __kernel_rwf_t)0x00000010)
+
/* mask of flags supported by the kernel */
-#define RWF_SUPPORTED (RWF_HIPRI | RWF_DSYNC | RWF_SYNC | RWF_NOWAIT)
+#define RWF_SUPPORTED (RWF_HIPRI | RWF_DSYNC | RWF_SYNC | RWF_NOWAIT |\
+ RWF_APPEND)
#endif /* _UAPI_LINUX_FS_H */
diff --git a/include/uapi/linux/gfs2_ondisk.h b/include/uapi/linux/gfs2_ondisk.h
index 5156bad77b47..2dc10a034de1 100644
--- a/include/uapi/linux/gfs2_ondisk.h
+++ b/include/uapi/linux/gfs2_ondisk.h
@@ -187,10 +187,19 @@ struct gfs2_rgrp {
__be32 rg_flags;
__be32 rg_free;
__be32 rg_dinodes;
- __be32 __pad;
+ union {
+ __be32 __pad;
+ __be32 rg_skip; /* Distance to the next rgrp in fs blocks */
+ };
__be64 rg_igeneration;
-
- __u8 rg_reserved[80]; /* Several fields from gfs1 now reserved */
+ /* The following 3 fields are duplicated from gfs2_rindex to reduce
+ reliance on the rindex */
+ __be64 rg_data0; /* First data location */
+ __be32 rg_data; /* Number of data blocks in rgrp */
+ __be32 rg_bitbytes; /* Number of bytes in data bitmaps */
+ __be32 rg_crc; /* crc32 of the structure with this field 0 */
+
+ __u8 rg_reserved[60]; /* Several fields from gfs1 now reserved */
};
/*
@@ -394,7 +403,36 @@ struct gfs2_ea_header {
* Log header structure
*/
-#define GFS2_LOG_HEAD_UNMOUNT 0x00000001 /* log is clean */
+#define GFS2_LOG_HEAD_UNMOUNT 0x00000001 /* log is clean */
+#define GFS2_LOG_HEAD_FLUSH_NORMAL 0x00000002 /* normal log flush */
+#define GFS2_LOG_HEAD_FLUSH_SYNC 0x00000004 /* Sync log flush */
+#define GFS2_LOG_HEAD_FLUSH_SHUTDOWN 0x00000008 /* Shutdown log flush */
+#define GFS2_LOG_HEAD_FLUSH_FREEZE 0x00000010 /* Freeze flush */
+#define GFS2_LOG_HEAD_RECOVERY 0x00000020 /* Journal recovery */
+#define GFS2_LOG_HEAD_USERSPACE 0x80000000 /* Written by gfs2-utils */
+
+/* Log flush callers */
+#define GFS2_LFC_SHUTDOWN 0x00000100
+#define GFS2_LFC_JDATA_WPAGES 0x00000200
+#define GFS2_LFC_SET_FLAGS 0x00000400
+#define GFS2_LFC_AIL_EMPTY_GL 0x00000800
+#define GFS2_LFC_AIL_FLUSH 0x00001000
+#define GFS2_LFC_RGRP_GO_SYNC 0x00002000
+#define GFS2_LFC_INODE_GO_SYNC 0x00004000
+#define GFS2_LFC_INODE_GO_INVAL 0x00008000
+#define GFS2_LFC_FREEZE_GO_SYNC 0x00010000
+#define GFS2_LFC_KILL_SB 0x00020000
+#define GFS2_LFC_DO_SYNC 0x00040000
+#define GFS2_LFC_INPLACE_RESERVE 0x00080000
+#define GFS2_LFC_WRITE_INODE 0x00100000
+#define GFS2_LFC_MAKE_FS_RO 0x00200000
+#define GFS2_LFC_SYNC_FS 0x00400000
+#define GFS2_LFC_EVICT_INODE 0x00800000
+#define GFS2_LFC_TRANS_END 0x01000000
+#define GFS2_LFC_LOGD_JFLUSH_REQD 0x02000000
+#define GFS2_LFC_LOGD_AIL_FLUSH_REQD 0x04000000
+
+#define LH_V1_SIZE (offsetofend(struct gfs2_log_header, lh_hash))
struct gfs2_log_header {
struct gfs2_meta_header lh_header;
@@ -403,7 +441,21 @@ struct gfs2_log_header {
__be32 lh_flags; /* GFS2_LOG_HEAD_... */
__be32 lh_tail; /* Block number of log tail */
__be32 lh_blkno;
- __be32 lh_hash;
+ __be32 lh_hash; /* crc up to here with this field 0 */
+
+ /* Version 2 additional fields start here */
+ __be32 lh_crc; /* crc32c from lh_nsec to end of block */
+ __be32 lh_nsec; /* Nanoseconds of timestamp */
+ __be64 lh_sec; /* Seconds of timestamp */
+ __be64 lh_addr; /* Block addr of this log header (absolute) */
+ __be64 lh_jinode; /* Journal inode number */
+ __be64 lh_statfs_addr; /* Local statfs inode number */
+ __be64 lh_quota_addr; /* Local quota change inode number */
+
+ /* Statfs local changes (i.e. diff from global statfs) */
+ __be64 lh_local_total;
+ __be64 lh_local_free;
+ __be64 lh_local_dinodes;
};
/*
diff --git a/include/uapi/linux/i2c.h b/include/uapi/linux/i2c.h
index fe648032d6b9..f71a1751cacf 100644
--- a/include/uapi/linux/i2c.h
+++ b/include/uapi/linux/i2c.h
@@ -72,6 +72,9 @@ struct i2c_msg {
#define I2C_M_RD 0x0001 /* read data, from slave to master */
/* I2C_M_RD is guaranteed to be 0x0001! */
#define I2C_M_TEN 0x0010 /* this is a ten bit chip address */
+#define I2C_M_DMA_SAFE 0x0200 /* the buffer of this message is DMA safe */
+ /* makes only sense in kernelspace */
+ /* userspace buffers are copied anyway */
#define I2C_M_RECV_LEN 0x0400 /* length will be first received byte */
#define I2C_M_NO_RD_ACK 0x0800 /* if I2C_FUNC_PROTOCOL_MANGLING */
#define I2C_M_IGNORE_NAK 0x1000 /* if I2C_FUNC_PROTOCOL_MANGLING */
diff --git a/include/uapi/linux/if_ether.h b/include/uapi/linux/if_ether.h
index 144de4d2f385..3a45b4ad71a3 100644
--- a/include/uapi/linux/if_ether.h
+++ b/include/uapi/linux/if_ether.h
@@ -23,7 +23,6 @@
#define _UAPI_LINUX_IF_ETHER_H
#include <linux/types.h>
-#include <linux/libc-compat.h>
/*
* IEEE 802.3 Ethernet magic constants. The frame sizes omit the preamble
@@ -31,6 +30,7 @@
*/
#define ETH_ALEN 6 /* Octets in one ethernet addr */
+#define ETH_TLEN 2 /* Octets in ethernet type field */
#define ETH_HLEN 14 /* Total octets in header. */
#define ETH_ZLEN 60 /* Min. octets in frame sans FCS */
#define ETH_DATA_LEN 1500 /* Max. octets in payload */
@@ -48,6 +48,7 @@
#define ETH_P_PUP 0x0200 /* Xerox PUP packet */
#define ETH_P_PUPAT 0x0201 /* Xerox PUP Addr Trans packet */
#define ETH_P_TSN 0x22F0 /* TSN (IEEE 1722) packet */
+#define ETH_P_ERSPAN2 0x22EB /* ERSPAN version 2 (type III) */
#define ETH_P_IP 0x0800 /* Internet Protocol packet */
#define ETH_P_X25 0x0805 /* CCITT X.25 */
#define ETH_P_ARP 0x0806 /* Address Resolution packet */
@@ -88,6 +89,7 @@
#define ETH_P_AOE 0x88A2 /* ATA over Ethernet */
#define ETH_P_8021AD 0x88A8 /* 802.1ad Service VLAN */
#define ETH_P_802_EX1 0x88B5 /* 802.1 Local Experimental 1. */
+#define ETH_P_PREAUTH 0x88C7 /* 802.11 Preauthentication */
#define ETH_P_TIPC 0x88CA /* TIPC */
#define ETH_P_MACSEC 0x88E5 /* 802.1ae MACsec */
#define ETH_P_8021AH 0x88E7 /* 802.1ah Backbone Service Tag */
@@ -150,6 +152,11 @@
* This is an Ethernet frame header.
*/
+/* allow libcs like musl to deactivate this, glibc does not implement this. */
+#ifndef __UAPI_DEF_ETHHDR
+#define __UAPI_DEF_ETHHDR 1
+#endif
+
#if __UAPI_DEF_ETHHDR
struct ethhdr {
unsigned char h_dest[ETH_ALEN]; /* destination eth addr */
diff --git a/include/uapi/linux/if_link.h b/include/uapi/linux/if_link.h
index 19fc02660e0c..68699f654118 100644
--- a/include/uapi/linux/if_link.h
+++ b/include/uapi/linux/if_link.h
@@ -161,6 +161,9 @@ enum {
IFLA_EVENT,
IFLA_NEW_NETNSID,
IFLA_IF_NETNSID,
+ IFLA_CARRIER_UP_COUNT,
+ IFLA_CARRIER_DOWN_COUNT,
+ IFLA_NEW_IFINDEX,
__IFLA_MAX
};
@@ -732,6 +735,8 @@ enum {
IFLA_VF_STATS_BROADCAST,
IFLA_VF_STATS_MULTICAST,
IFLA_VF_STATS_PAD,
+ IFLA_VF_STATS_RX_DROPPED,
+ IFLA_VF_STATS_TX_DROPPED,
__IFLA_VF_STATS_MAX,
};
@@ -936,4 +941,43 @@ enum {
IFLA_EVENT_BONDING_OPTIONS, /* change in bonding options */
};
+/* tun section */
+
+enum {
+ IFLA_TUN_UNSPEC,
+ IFLA_TUN_OWNER,
+ IFLA_TUN_GROUP,
+ IFLA_TUN_TYPE,
+ IFLA_TUN_PI,
+ IFLA_TUN_VNET_HDR,
+ IFLA_TUN_PERSIST,
+ IFLA_TUN_MULTI_QUEUE,
+ IFLA_TUN_NUM_QUEUES,
+ IFLA_TUN_NUM_DISABLED_QUEUES,
+ __IFLA_TUN_MAX,
+};
+
+#define IFLA_TUN_MAX (__IFLA_TUN_MAX - 1)
+
+/* rmnet section */
+
+#define RMNET_FLAGS_INGRESS_DEAGGREGATION (1U << 0)
+#define RMNET_FLAGS_INGRESS_MAP_COMMANDS (1U << 1)
+#define RMNET_FLAGS_INGRESS_MAP_CKSUMV4 (1U << 2)
+#define RMNET_FLAGS_EGRESS_MAP_CKSUMV4 (1U << 3)
+
+enum {
+ IFLA_RMNET_UNSPEC,
+ IFLA_RMNET_MUX_ID,
+ IFLA_RMNET_FLAGS,
+ __IFLA_RMNET_MAX,
+};
+
+#define IFLA_RMNET_MAX (__IFLA_RMNET_MAX - 1)
+
+struct ifla_rmnet_flags {
+ __u32 flags;
+ __u32 mask;
+};
+
#endif /* _UAPI_LINUX_IF_LINK_H */
diff --git a/include/uapi/linux/if_macsec.h b/include/uapi/linux/if_macsec.h
index 719d243471f4..98e4d5d7c45c 100644
--- a/include/uapi/linux/if_macsec.h
+++ b/include/uapi/linux/if_macsec.h
@@ -22,8 +22,13 @@
#define MACSEC_KEYID_LEN 16
-#define MACSEC_DEFAULT_CIPHER_ID 0x0080020001000001ULL
-#define MACSEC_DEFAULT_CIPHER_ALT 0x0080C20001000001ULL
+/* cipher IDs as per IEEE802.1AEbn-2011 */
+#define MACSEC_CIPHER_ID_GCM_AES_128 0x0080C20001000001ULL
+#define MACSEC_CIPHER_ID_GCM_AES_256 0x0080C20001000002ULL
+
+/* deprecated cipher ID for GCM-AES-128 */
+#define MACSEC_DEFAULT_CIPHER_ID 0x0080020001000001ULL
+#define MACSEC_DEFAULT_CIPHER_ALT MACSEC_CIPHER_ID_GCM_AES_128
#define MACSEC_MIN_ICV_LEN 8
#define MACSEC_MAX_ICV_LEN 32
diff --git a/include/uapi/linux/if_tun.h b/include/uapi/linux/if_tun.h
index 030d3e6d6029..ee432cd3018c 100644
--- a/include/uapi/linux/if_tun.h
+++ b/include/uapi/linux/if_tun.h
@@ -57,6 +57,8 @@
*/
#define TUNSETVNETBE _IOW('T', 222, int)
#define TUNGETVNETBE _IOR('T', 223, int)
+#define TUNSETSTEERINGEBPF _IOR('T', 224, int)
+#define TUNSETFILTEREBPF _IOR('T', 225, int)
/* TUNSETIFF ifr flags */
#define IFF_TUN 0x0001
diff --git a/include/uapi/linux/if_tunnel.h b/include/uapi/linux/if_tunnel.h
index e68dadbd6d45..1b3d148c4560 100644
--- a/include/uapi/linux/if_tunnel.h
+++ b/include/uapi/linux/if_tunnel.h
@@ -137,6 +137,9 @@ enum {
IFLA_GRE_IGNORE_DF,
IFLA_GRE_FWMARK,
IFLA_GRE_ERSPAN_INDEX,
+ IFLA_GRE_ERSPAN_VER,
+ IFLA_GRE_ERSPAN_DIR,
+ IFLA_GRE_ERSPAN_HWID,
__IFLA_GRE_MAX,
};
diff --git a/include/uapi/linux/inet_diag.h b/include/uapi/linux/inet_diag.h
index 817d807e9481..14565d703291 100644
--- a/include/uapi/linux/inet_diag.h
+++ b/include/uapi/linux/inet_diag.h
@@ -92,6 +92,8 @@ enum {
INET_DIAG_BC_D_COND,
INET_DIAG_BC_DEV_COND, /* u32 ifindex */
INET_DIAG_BC_MARK_COND,
+ INET_DIAG_BC_S_EQ,
+ INET_DIAG_BC_D_EQ,
};
struct inet_diag_hostcond {
diff --git a/include/uapi/linux/inotify.h b/include/uapi/linux/inotify.h
index 5474461683db..4800bf2a531d 100644
--- a/include/uapi/linux/inotify.h
+++ b/include/uapi/linux/inotify.h
@@ -71,5 +71,13 @@ struct inotify_event {
#define IN_CLOEXEC O_CLOEXEC
#define IN_NONBLOCK O_NONBLOCK
+/*
+ * ioctl numbers: inotify uses 'I' prefix for all ioctls,
+ * except historical FIONREAD, which is based on 'T'.
+ *
+ * INOTIFY_IOC_SETNEXTWD: set desired number of next created
+ * watch descriptor.
+ */
+#define INOTIFY_IOC_SETNEXTWD _IOW('I', 0, __s32)
#endif /* _UAPI_LINUX_INOTIFY_H */
diff --git a/include/uapi/linux/input-event-codes.h b/include/uapi/linux/input-event-codes.h
index 061fa62958a2..53fbae27b280 100644
--- a/include/uapi/linux/input-event-codes.h
+++ b/include/uapi/linux/input-event-codes.h
@@ -594,6 +594,7 @@
#define BTN_DPAD_RIGHT 0x223
#define KEY_ALS_TOGGLE 0x230 /* Ambient light sensor */
+#define KEY_ROTATE_LOCK_TOGGLE 0x231 /* Display rotation lock */
#define KEY_BUTTONCONFIG 0x240 /* AL Button Configuration */
#define KEY_TASKMANAGER 0x241 /* AL Task/Project Manager */
diff --git a/include/uapi/linux/input.h b/include/uapi/linux/input.h
index 8c5a0bf6ee35..7288a7c573cc 100644
--- a/include/uapi/linux/input.h
+++ b/include/uapi/linux/input.h
@@ -21,10 +21,21 @@
/*
* The event structure itself
+ * Note that __USE_TIME_BITS64 is defined by libc based on
+ * application's request to use 64 bit time_t.
*/
struct input_event {
+#if (__BITS_PER_LONG != 32 || !defined(__USE_TIME_BITS64)) && !defined(__KERNEL)
struct timeval time;
+#define input_event_sec time.tv_sec
+#define input_event_usec time.tv_usec
+#else
+ __kernel_ulong_t __sec;
+ __kernel_ulong_t __usec;
+#define input_event_sec __sec
+#define input_event_usec __usec
+#endif
__u16 type;
__u16 code;
__s32 value;
diff --git a/include/uapi/linux/ipmi.h b/include/uapi/linux/ipmi.h
index b076f7a47407..32d148309b16 100644
--- a/include/uapi/linux/ipmi.h
+++ b/include/uapi/linux/ipmi.h
@@ -10,26 +10,6 @@
*
* Copyright 2002 MontaVista Software Inc.
*
- * This program is free software; you can redistribute it and/or modify it
- * under the terms of the GNU General Public License as published by the
- * Free Software Foundation; either version 2 of the License, or (at your
- * option) any later version.
- *
- *
- * THIS SOFTWARE IS PROVIDED ``AS IS'' AND ANY EXPRESS OR IMPLIED
- * WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES OF
- * MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE DISCLAIMED.
- * IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR ANY DIRECT, INDIRECT,
- * INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING,
- * BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS
- * OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND
- * ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR
- * TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE
- * USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
- *
- * You should have received a copy of the GNU General Public License along
- * with this program; if not, write to the Free Software Foundation, Inc.,
- * 675 Mass Ave, Cambridge, MA 02139, USA.
*/
#ifndef _UAPI__LINUX_IPMI_H
diff --git a/include/uapi/linux/ipmi_bmc.h b/include/uapi/linux/ipmi_bmc.h
new file mode 100644
index 000000000000..1670f0944227
--- /dev/null
+++ b/include/uapi/linux/ipmi_bmc.h
@@ -0,0 +1,16 @@
+/* SPDX-License-Identifier: GPL-2.0 */
+/*
+ * Copyright (c) 2015-2018, Intel Corporation.
+ */
+
+#ifndef _UAPI_LINUX_IPMI_BMC_H
+#define _UAPI_LINUX_IPMI_BMC_H
+
+#include <linux/ioctl.h>
+
+#define __IPMI_BMC_IOCTL_MAGIC 0xB1
+#define IPMI_BMC_IOCTL_SET_SMS_ATN _IO(__IPMI_BMC_IOCTL_MAGIC, 0x00)
+#define IPMI_BMC_IOCTL_CLEAR_SMS_ATN _IO(__IPMI_BMC_IOCTL_MAGIC, 0x01)
+#define IPMI_BMC_IOCTL_FORCE_ABORT _IO(__IPMI_BMC_IOCTL_MAGIC, 0x02)
+
+#endif /* _UAPI_LINUX_IPMI_BMC_H */
diff --git a/include/uapi/linux/ipmi_msgdefs.h b/include/uapi/linux/ipmi_msgdefs.h
index 17f349459587..c2b23a9fdf3d 100644
--- a/include/uapi/linux/ipmi_msgdefs.h
+++ b/include/uapi/linux/ipmi_msgdefs.h
@@ -10,26 +10,6 @@
*
* Copyright 2002 MontaVista Software Inc.
*
- * This program is free software; you can redistribute it and/or modify it
- * under the terms of the GNU General Public License as published by the
- * Free Software Foundation; either version 2 of the License, or (at your
- * option) any later version.
- *
- *
- * THIS SOFTWARE IS PROVIDED ``AS IS'' AND ANY EXPRESS OR IMPLIED
- * WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES OF
- * MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE DISCLAIMED.
- * IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR ANY DIRECT, INDIRECT,
- * INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING,
- * BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS
- * OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND
- * ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR
- * TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE
- * USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
- *
- * You should have received a copy of the GNU General Public License along
- * with this program; if not, write to the Free Software Foundation, Inc.,
- * 675 Mass Ave, Cambridge, MA 02139, USA.
*/
#ifndef __LINUX_IPMI_MSGDEFS_H
diff --git a/include/uapi/linux/irda.h b/include/uapi/linux/irda.h
deleted file mode 100644
index 2105c266aa6e..000000000000
--- a/include/uapi/linux/irda.h
+++ /dev/null
@@ -1,252 +0,0 @@
-/* SPDX-License-Identifier: GPL-2.0+ WITH Linux-syscall-note */
-/*********************************************************************
- *
- * Filename: irda.h
- * Version:
- * Description:
- * Status: Experimental.
- * Author: Dag Brattli <dagb@cs.uit.no>
- * Created at: Mon Mar 8 14:06:12 1999
- * Modified at: Sat Dec 25 16:06:42 1999
- * Modified by: Dag Brattli <dagb@cs.uit.no>
- *
- * Copyright (c) 1999 Dag Brattli, All Rights Reserved.
- *
- * This program is free software; you can redistribute it and/or
- * modify it under the terms of the GNU General Public License as
- * published by the Free Software Foundation; either version 2 of
- * the License, or (at your option) any later version.
- *
- * Neither Dag Brattli nor University of Tromsø admit liability nor
- * provide warranty for any of this software. This material is
- * provided "AS-IS" and at no charge.
- *
- ********************************************************************/
-
-#ifndef KERNEL_IRDA_H
-#define KERNEL_IRDA_H
-
-#include <linux/types.h>
-#include <linux/socket.h>
-
-/* Note that this file is shared with user space. */
-
-/* Hint bit positions for first hint byte */
-#define HINT_PNP 0x01
-#define HINT_PDA 0x02
-#define HINT_COMPUTER 0x04
-#define HINT_PRINTER 0x08
-#define HINT_MODEM 0x10
-#define HINT_FAX 0x20
-#define HINT_LAN 0x40
-#define HINT_EXTENSION 0x80
-
-/* Hint bit positions for second hint byte (first extension byte) */
-#define HINT_TELEPHONY 0x01
-#define HINT_FILE_SERVER 0x02
-#define HINT_COMM 0x04
-#define HINT_MESSAGE 0x08
-#define HINT_HTTP 0x10
-#define HINT_OBEX 0x20
-
-/* IrLMP character code values */
-#define CS_ASCII 0x00
-#define CS_ISO_8859_1 0x01
-#define CS_ISO_8859_2 0x02
-#define CS_ISO_8859_3 0x03
-#define CS_ISO_8859_4 0x04
-#define CS_ISO_8859_5 0x05
-#define CS_ISO_8859_6 0x06
-#define CS_ISO_8859_7 0x07
-#define CS_ISO_8859_8 0x08
-#define CS_ISO_8859_9 0x09
-#define CS_UNICODE 0xff
-
-/* These are the currently known dongles */
-typedef enum {
- IRDA_TEKRAM_DONGLE = 0,
- IRDA_ESI_DONGLE = 1,
- IRDA_ACTISYS_DONGLE = 2,
- IRDA_ACTISYS_PLUS_DONGLE = 3,
- IRDA_GIRBIL_DONGLE = 4,
- IRDA_LITELINK_DONGLE = 5,
- IRDA_AIRPORT_DONGLE = 6,
- IRDA_OLD_BELKIN_DONGLE = 7,
- IRDA_EP7211_IR = 8,
- IRDA_MCP2120_DONGLE = 9,
- IRDA_ACT200L_DONGLE = 10,
- IRDA_MA600_DONGLE = 11,
- IRDA_TOIM3232_DONGLE = 12,
- IRDA_EP7211_DONGLE = 13,
-} IRDA_DONGLE;
-
-/* Protocol types to be used for SOCK_DGRAM */
-enum {
- IRDAPROTO_UNITDATA = 0,
- IRDAPROTO_ULTRA = 1,
- IRDAPROTO_MAX
-};
-
-#define SOL_IRLMP 266 /* Same as SOL_IRDA for now */
-#define SOL_IRTTP 266 /* Same as SOL_IRDA for now */
-
-#define IRLMP_ENUMDEVICES 1 /* Return discovery log */
-#define IRLMP_IAS_SET 2 /* Set an attribute in local IAS */
-#define IRLMP_IAS_QUERY 3 /* Query remote IAS for attribute */
-#define IRLMP_HINTS_SET 4 /* Set hint bits advertised */
-#define IRLMP_QOS_SET 5
-#define IRLMP_QOS_GET 6
-#define IRLMP_MAX_SDU_SIZE 7
-#define IRLMP_IAS_GET 8 /* Get an attribute from local IAS */
-#define IRLMP_IAS_DEL 9 /* Remove attribute from local IAS */
-#define IRLMP_HINT_MASK_SET 10 /* Set discovery filter */
-#define IRLMP_WAITDEVICE 11 /* Wait for a new discovery */
-
-#define IRTTP_MAX_SDU_SIZE IRLMP_MAX_SDU_SIZE /* Compatibility */
-
-#define IAS_MAX_STRING 256 /* See IrLMP 1.1, 4.3.3.2 */
-#define IAS_MAX_OCTET_STRING 1024 /* See IrLMP 1.1, 4.3.3.2 */
-#define IAS_MAX_CLASSNAME 60 /* See IrLMP 1.1, 4.3.1 */
-#define IAS_MAX_ATTRIBNAME 60 /* See IrLMP 1.1, 4.3.3.1 */
-#define IAS_MAX_ATTRIBNUMBER 256 /* See IrLMP 1.1, 4.3.3.1 */
-/* For user space backward compatibility - may be fixed in kernel 2.5.X
- * Note : need 60+1 ('\0'), make it 64 for alignement - Jean II */
-#define IAS_EXPORT_CLASSNAME 64
-#define IAS_EXPORT_ATTRIBNAME 256
-
-/* Attribute type needed for struct irda_ias_set */
-#define IAS_MISSING 0
-#define IAS_INTEGER 1
-#define IAS_OCT_SEQ 2
-#define IAS_STRING 3
-
-#define LSAP_ANY 0xff
-
-struct sockaddr_irda {
- __kernel_sa_family_t sir_family; /* AF_IRDA */
- __u8 sir_lsap_sel; /* LSAP selector */
- __u32 sir_addr; /* Device address */
- char sir_name[25]; /* Usually <service>:IrDA:TinyTP */
-};
-
-struct irda_device_info {
- __u32 saddr; /* Address of local interface */
- __u32 daddr; /* Address of remote device */
- char info[22]; /* Description */
- __u8 charset; /* Charset used for description */
- __u8 hints[2]; /* Hint bits */
-};
-
-struct irda_device_list {
- __u32 len;
- struct irda_device_info dev[1];
-};
-
-struct irda_ias_set {
- char irda_class_name[IAS_EXPORT_CLASSNAME];
- char irda_attrib_name[IAS_EXPORT_ATTRIBNAME];
- unsigned int irda_attrib_type;
- union {
- unsigned int irda_attrib_int;
- struct {
- unsigned short len;
- __u8 octet_seq[IAS_MAX_OCTET_STRING];
- } irda_attrib_octet_seq;
- struct {
- __u8 len;
- __u8 charset;
- __u8 string[IAS_MAX_STRING];
- } irda_attrib_string;
- } attribute;
- __u32 daddr; /* Address of device (for some queries only) */
-};
-
-/* Some private IOCTL's (max 16) */
-#define SIOCSDONGLE (SIOCDEVPRIVATE + 0)
-#define SIOCGDONGLE (SIOCDEVPRIVATE + 1)
-#define SIOCSBANDWIDTH (SIOCDEVPRIVATE + 2)
-#define SIOCSMEDIABUSY (SIOCDEVPRIVATE + 3)
-#define SIOCGMEDIABUSY (SIOCDEVPRIVATE + 4)
-#define SIOCGRECEIVING (SIOCDEVPRIVATE + 5)
-#define SIOCSMODE (SIOCDEVPRIVATE + 6)
-#define SIOCGMODE (SIOCDEVPRIVATE + 7)
-#define SIOCSDTRRTS (SIOCDEVPRIVATE + 8)
-#define SIOCGQOS (SIOCDEVPRIVATE + 9)
-
-/* No reason to include <linux/if.h> just because of this one ;-) */
-#define IRNAMSIZ 16
-
-/* IrDA quality of service information (must not exceed 16 bytes) */
-struct if_irda_qos {
- unsigned long baudrate;
- unsigned short data_size;
- unsigned short window_size;
- unsigned short min_turn_time;
- unsigned short max_turn_time;
- unsigned char add_bofs;
- unsigned char link_disc;
-};
-
-/* For setting RTS and DTR lines of a dongle */
-struct if_irda_line {
- __u8 dtr;
- __u8 rts;
-};
-
-/* IrDA interface configuration (data part must not exceed 16 bytes) */
-struct if_irda_req {
- union {
- char ifrn_name[IRNAMSIZ]; /* if name, e.g. "irda0" */
- } ifr_ifrn;
-
- /* Data part */
- union {
- struct if_irda_line ifru_line;
- struct if_irda_qos ifru_qos;
- unsigned short ifru_flags;
- unsigned int ifru_receiving;
- unsigned int ifru_mode;
- unsigned int ifru_dongle;
- } ifr_ifru;
-};
-
-#define ifr_baudrate ifr_ifru.ifru_qos.baudrate
-#define ifr_receiving ifr_ifru.ifru_receiving
-#define ifr_dongle ifr_ifru.ifru_dongle
-#define ifr_mode ifr_ifru.ifru_mode
-#define ifr_dtr ifr_ifru.ifru_line.dtr
-#define ifr_rts ifr_ifru.ifru_line.rts
-
-
-/* IrDA netlink definitions */
-#define IRDA_NL_NAME "irda"
-#define IRDA_NL_VERSION 1
-
-enum irda_nl_commands {
- IRDA_NL_CMD_UNSPEC,
- IRDA_NL_CMD_SET_MODE,
- IRDA_NL_CMD_GET_MODE,
-
- __IRDA_NL_CMD_AFTER_LAST
-};
-#define IRDA_NL_CMD_MAX (__IRDA_NL_CMD_AFTER_LAST - 1)
-
-enum nl80211_attrs {
- IRDA_NL_ATTR_UNSPEC,
- IRDA_NL_ATTR_IFNAME,
- IRDA_NL_ATTR_MODE,
-
- __IRDA_NL_ATTR_AFTER_LAST
-};
-#define IRDA_NL_ATTR_MAX (__IRDA_NL_ATTR_AFTER_LAST - 1)
-
-/* IrDA modes */
-#define IRDA_MODE_PRIMARY 0x1
-#define IRDA_MODE_SECONDARY 0x2
-#define IRDA_MODE_MONITOR 0x4
-
-#endif /* KERNEL_IRDA_H */
-
-
-
-
diff --git a/include/uapi/linux/ixjuser.h b/include/uapi/linux/ixjuser.h
deleted file mode 100644
index ba245007cfe7..000000000000
--- a/include/uapi/linux/ixjuser.h
+++ /dev/null
@@ -1,721 +0,0 @@
-/* SPDX-License-Identifier: GPL-2.0+ WITH Linux-syscall-note */
-#ifndef __LINUX_IXJUSER_H
-#define __LINUX_IXJUSER_H
-
-/******************************************************************************
- *
- * ixjuser.h
- *
- * Device Driver for Quicknet Technologies, Inc.'s Telephony cards
- * including the Internet PhoneJACK, Internet PhoneJACK Lite,
- * Internet PhoneJACK PCI, Internet LineJACK, Internet PhoneCARD and
- * SmartCABLE
- *
- * (c) Copyright 1999-2001 Quicknet Technologies, Inc.
- *
- * This program is free software; you can redistribute it and/or
- * modify it under the terms of the GNU General Public License
- * as published by the Free Software Foundation; either version
- * 2 of the License, or (at your option) any later version.
- *
- * Author: Ed Okerson, <eokerson@quicknet.net>
- *
- * Contributors: Greg Herlein, <gherlein@quicknet.net>
- * David W. Erhart, <derhart@quicknet.net>
- * John Sellers, <jsellers@quicknet.net>
- * Mike Preston, <mpreston@quicknet.net>
- *
- * More information about the hardware related to this driver can be found
- * at our website: http://www.quicknet.net
- *
- * Fixes:
- *
- * IN NO EVENT SHALL QUICKNET TECHNOLOGIES, INC. BE LIABLE TO ANY PARTY FOR
- * DIRECT, INDIRECT, SPECIAL, INCIDENTAL, OR CONSEQUENTIAL DAMAGES ARISING OUT
- * OF THE USE OF THIS SOFTWARE AND ITS DOCUMENTATION, EVEN IF QUICKNET
- * TECHNOLOGIES, INC.HAS BEEN ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
- *
- * QUICKNET TECHNOLOGIES, INC. SPECIFICALLY DISCLAIMS ANY WARRANTIES,
- * INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY
- * AND FITNESS FOR A PARTICULAR PURPOSE. THE SOFTWARE PROVIDED HEREUNDER IS
- * ON AN "AS IS" BASIS, AND QUICKNET TECHNOLOGIES, INC. HAS NO OBLIGATION
- * TO PROVIDE MAINTENANCE, SUPPORT, UPDATES, ENHANCEMENTS, OR MODIFICATIONS.
- *
- *****************************************************************************/
-
-#include <linux/telephony.h>
-
-
-/******************************************************************************
-*
-* IOCTL's used for the Quicknet Telephony Cards
-*
-* If you use the IXJCTL_TESTRAM command, the card must be power cycled to
-* reset the SRAM values before further use.
-*
-******************************************************************************/
-
-#define IXJCTL_DSP_RESET _IO ('q', 0xC0)
-
-#define IXJCTL_RING PHONE_RING
-#define IXJCTL_HOOKSTATE PHONE_HOOKSTATE
-#define IXJCTL_MAXRINGS PHONE_MAXRINGS
-#define IXJCTL_RING_CADENCE PHONE_RING_CADENCE
-#define IXJCTL_RING_START PHONE_RING_START
-#define IXJCTL_RING_STOP PHONE_RING_STOP
-
-#define IXJCTL_CARDTYPE _IOR ('q', 0xC1, int)
-#define IXJCTL_SERIAL _IOR ('q', 0xC2, int)
-#define IXJCTL_DSP_TYPE _IOR ('q', 0xC3, int)
-#define IXJCTL_DSP_VERSION _IOR ('q', 0xC4, int)
-#define IXJCTL_VERSION _IOR ('q', 0xDA, char *)
-#define IXJCTL_DSP_IDLE _IO ('q', 0xC5)
-#define IXJCTL_TESTRAM _IO ('q', 0xC6)
-
-/******************************************************************************
-*
-* This group of IOCTLs deal with the record settings of the DSP
-*
-* The IXJCTL_REC_DEPTH command sets the internal buffer depth of the DSP.
-* Setting a lower depth reduces latency, but increases the demand of the
-* application to service the driver without frame loss. The DSP has 480
-* bytes of physical buffer memory for the record channel so the true
-* maximum limit is determined by how many frames will fit in the buffer.
-*
-* 1 uncompressed (480 byte) 16-bit linear frame.
-* 2 uncompressed (240 byte) 8-bit A-law/mu-law frames.
-* 15 TrueSpeech 8.5 frames.
-* 20 TrueSpeech 6.3,5.3,4.8 or 4.1 frames.
-*
-* The default in the driver is currently set to 2 frames.
-*
-* The IXJCTL_REC_VOLUME and IXJCTL_PLAY_VOLUME commands both use a Q8
-* number as a parameter, 0x100 scales the signal by 1.0, 0x200 scales the
-* signal by 2.0, 0x80 scales the signal by 0.5. No protection is given
-* against over-scaling, if the multiplication factor times the input
-* signal exceeds 16 bits, overflow distortion will occur. The default
-* setting is 0x100 (1.0).
-*
-* The IXJCTL_REC_LEVEL returns the average signal level (not r.m.s.) on
-* the most recently recorded frame as a 16 bit value.
-******************************************************************************/
-
-#define IXJCTL_REC_CODEC PHONE_REC_CODEC
-#define IXJCTL_REC_START PHONE_REC_START
-#define IXJCTL_REC_STOP PHONE_REC_STOP
-#define IXJCTL_REC_DEPTH PHONE_REC_DEPTH
-#define IXJCTL_FRAME PHONE_FRAME
-#define IXJCTL_REC_VOLUME PHONE_REC_VOLUME
-#define IXJCTL_REC_LEVEL PHONE_REC_LEVEL
-
-typedef enum {
- f300_640 = 4, f300_500, f1100, f350, f400, f480, f440, f620, f20_50,
- f133_200, f300, f300_420, f330, f300_425, f330_440, f340, f350_400,
- f350_440, f350_450, f360, f380_420, f392, f400_425, f400_440, f400_450,
- f420, f425, f425_450, f425_475, f435, f440_450, f440_480, f445, f450,
- f452, f475, f480_620, f494, f500, f520, f523, f525, f540_660, f587,
- f590, f600, f660, f700, f740, f750, f750_1450, f770, f800, f816, f850,
- f857_1645, f900, f900_1300, f935_1215, f941_1477, f942, f950, f950_1400,
- f975, f1000, f1020, f1050, f1100_1750, f1140, f1200, f1209, f1330, f1336,
- lf1366, f1380, f1400, f1477, f1600, f1633_1638, f1800, f1860
-} IXJ_FILTER_FREQ;
-
-typedef struct {
- unsigned int filter;
- IXJ_FILTER_FREQ freq;
- char enable;
-} IXJ_FILTER;
-
-typedef struct {
- char enable;
- char en_filter;
- unsigned int filter;
- unsigned int on1;
- unsigned int off1;
- unsigned int on2;
- unsigned int off2;
- unsigned int on3;
- unsigned int off3;
-} IXJ_FILTER_CADENCE;
-
-#define IXJCTL_SET_FILTER _IOW ('q', 0xC7, IXJ_FILTER *)
-#define IXJCTL_SET_FILTER_RAW _IOW ('q', 0xDD, IXJ_FILTER_RAW *)
-#define IXJCTL_GET_FILTER_HIST _IOW ('q', 0xC8, int)
-#define IXJCTL_FILTER_CADENCE _IOW ('q', 0xD6, IXJ_FILTER_CADENCE *)
-#define IXJCTL_PLAY_CID _IO ('q', 0xD7)
-/******************************************************************************
-*
-* This IOCTL allows you to reassign values in the tone index table. The
-* tone table has 32 entries (0 - 31), but the driver only allows entries
-* 13 - 27 to be modified, entry 0 is reserved for silence and 1 - 12 are
-* the standard DTMF digits and 28 - 31 are the DTMF tones for A, B, C & D.
-* The positions used internally for Call Progress Tones are as follows:
-* Dial Tone - 25
-* Ring Back - 26
-* Busy Signal - 27
-*
-* The freq values are calculated as:
-* freq = cos(2 * PI * frequency / 8000)
-*
-* The most commonly needed values are already calculated and listed in the
-* enum IXJ_TONE_FREQ. Each tone index can have two frequencies with
-* different gains, if you are only using a single frequency set the unused
-* one to 0.
-*
-* The gain values range from 0 to 15 indicating +6dB to -24dB in 2dB
-* increments.
-*
-******************************************************************************/
-
-typedef enum {
- hz20 = 0x7ffa,
- hz50 = 0x7fe5,
- hz133 = 0x7f4c,
- hz200 = 0x7e6b,
- hz261 = 0x7d50, /* .63 C1 */
- hz277 = 0x7cfa, /* .18 CS1 */
- hz293 = 0x7c9f, /* .66 D1 */
- hz300 = 0x7c75,
- hz311 = 0x7c32, /* .13 DS1 */
- hz329 = 0x7bbf, /* .63 E1 */
- hz330 = 0x7bb8,
- hz340 = 0x7b75,
- hz349 = 0x7b37, /* .23 F1 */
- hz350 = 0x7b30,
- hz360 = 0x7ae9,
- hz369 = 0x7aa8, /* .99 FS1 */
- hz380 = 0x7a56,
- hz392 = 0x79fa, /* .00 G1 */
- hz400 = 0x79bb,
- hz415 = 0x7941, /* .30 GS1 */
- hz420 = 0x7918,
- hz425 = 0x78ee,
- hz435 = 0x7899,
- hz440 = 0x786d, /* .00 A1 */
- hz445 = 0x7842,
- hz450 = 0x7815,
- hz452 = 0x7803,
- hz466 = 0x7784, /* .16 AS1 */
- hz475 = 0x7731,
- hz480 = 0x7701,
- hz493 = 0x7685, /* .88 B1 */
- hz494 = 0x767b,
- hz500 = 0x7640,
- hz520 = 0x7578,
- hz523 = 0x7559, /* .25 C2 */
- hz525 = 0x7544,
- hz540 = 0x74a7,
- hz554 = 0x7411, /* .37 CS2 */
- hz587 = 0x72a1, /* .33 D2 */
- hz590 = 0x727f,
- hz600 = 0x720b,
- hz620 = 0x711e,
- hz622 = 0x7106, /* .25 DS2 */
- hz659 = 0x6f3b, /* .26 E2 */
- hz660 = 0x6f2e,
- hz698 = 0x6d3d, /* .46 F2 */
- hz700 = 0x6d22,
- hz739 = 0x6b09, /* .99 FS2 */
- hz740 = 0x6afa,
- hz750 = 0x6a6c,
- hz770 = 0x694b,
- hz783 = 0x688b, /* .99 G2 */
- hz800 = 0x678d,
- hz816 = 0x6698,
- hz830 = 0x65bf, /* .61 GS2 */
- hz850 = 0x6484,
- hz857 = 0x6414,
- hz880 = 0x629f, /* .00 A2 */
- hz900 = 0x6154,
- hz932 = 0x5f35, /* .33 AS2 */
- hz935 = 0x5f01,
- hz941 = 0x5e9a,
- hz942 = 0x5e88,
- hz950 = 0x5dfd,
- hz975 = 0x5c44,
- hz1000 = 0x5a81,
- hz1020 = 0x5912,
- hz1050 = 0x56e2,
- hz1100 = 0x5320,
- hz1140 = 0x5007,
- hz1200 = 0x4b3b,
- hz1209 = 0x4a80,
- hz1215 = 0x4a02,
- hz1250 = 0x471c,
- hz1300 = 0x42e0,
- hz1330 = 0x4049,
- hz1336 = 0x3fc4,
- hz1366 = 0x3d22,
- hz1380 = 0x3be4,
- hz1400 = 0x3a1b,
- hz1450 = 0x3596,
- hz1477 = 0x331c,
- hz1500 = 0x30fb,
- hz1600 = 0x278d,
- hz1633 = 0x2462,
- hz1638 = 0x23e7,
- hz1645 = 0x233a,
- hz1750 = 0x18f8,
- hz1800 = 0x1405,
- hz1860 = 0xe0b,
- hz2100 = 0xf5f6,
- hz2130 = 0xf2f5,
- hz2450 = 0xd3b3,
- hz2750 = 0xb8e4
-} IXJ_FREQ;
-
-typedef enum {
- C1 = hz261,
- CS1 = hz277,
- D1 = hz293,
- DS1 = hz311,
- E1 = hz329,
- F1 = hz349,
- FS1 = hz369,
- G1 = hz392,
- GS1 = hz415,
- A1 = hz440,
- AS1 = hz466,
- B1 = hz493,
- C2 = hz523,
- CS2 = hz554,
- D2 = hz587,
- DS2 = hz622,
- E2 = hz659,
- F2 = hz698,
- FS2 = hz739,
- G2 = hz783,
- GS2 = hz830,
- A2 = hz880,
- AS2 = hz932,
-} IXJ_NOTE;
-
-typedef struct {
- int tone_index;
- int freq0;
- int gain0;
- int freq1;
- int gain1;
-} IXJ_TONE;
-
-#define IXJCTL_INIT_TONE _IOW ('q', 0xC9, IXJ_TONE *)
-
-/******************************************************************************
-*
-* The IXJCTL_TONE_CADENCE ioctl defines tone sequences used for various
-* Call Progress Tones (CPT). This is accomplished by setting up an array of
-* IXJ_CADENCE_ELEMENT structures that sequentially define the states of
-* the tone sequence. The tone_on_time and tone_off time are in
-* 250 microsecond intervals. A pointer to this array is passed to the
-* driver as the ce element of an IXJ_CADENCE structure. The elements_used
-* must be set to the number of IXJ_CADENCE_ELEMENTS in the array. The
-* termination variable defines what to do at the end of a cadence, the
-* options are to play the cadence once and stop, to repeat the last
-* element of the cadence indefinitely, or to repeat the entire cadence
-* indefinitely. The ce variable is a pointer to the array of IXJ_TONE
-* structures. If the freq0 variable is non-zero, the tone table contents
-* for the tone_index are updated to the frequencies and gains defined. It
-* should be noted that DTMF tones cannot be reassigned, so if DTMF tone
-* table indexes are used in a cadence the frequency and gain variables will
-* be ignored.
-*
-* If the array elements contain frequency parameters the driver will
-* initialize the needed tone table elements and begin playing the tone,
-* there is no preset limit on the number of elements in the cadence. If
-* there is more than one frequency used in the cadence, sequential elements
-* of different frequencies MUST use different tone table indexes. Only one
-* cadence can be played at a time. It is possible to build complex
-* cadences with multiple frequencies using 2 tone table indexes by
-* alternating between them.
-*
-******************************************************************************/
-
-typedef struct {
- int index;
- int tone_on_time;
- int tone_off_time;
- int freq0;
- int gain0;
- int freq1;
- int gain1;
-} IXJ_CADENCE_ELEMENT;
-
-typedef enum {
- PLAY_ONCE,
- REPEAT_LAST_ELEMENT,
- REPEAT_ALL
-} IXJ_CADENCE_TERM;
-
-typedef struct {
- int elements_used;
- IXJ_CADENCE_TERM termination;
- IXJ_CADENCE_ELEMENT __user *ce;
-} IXJ_CADENCE;
-
-#define IXJCTL_TONE_CADENCE _IOW ('q', 0xCA, IXJ_CADENCE *)
-/******************************************************************************
-*
-* This group of IOCTLs deal with the playback settings of the DSP
-*
-******************************************************************************/
-
-#define IXJCTL_PLAY_CODEC PHONE_PLAY_CODEC
-#define IXJCTL_PLAY_START PHONE_PLAY_START
-#define IXJCTL_PLAY_STOP PHONE_PLAY_STOP
-#define IXJCTL_PLAY_DEPTH PHONE_PLAY_DEPTH
-#define IXJCTL_PLAY_VOLUME PHONE_PLAY_VOLUME
-#define IXJCTL_PLAY_LEVEL PHONE_PLAY_LEVEL
-
-/******************************************************************************
-*
-* This group of IOCTLs deal with the Acoustic Echo Cancellation settings
-* of the DSP
-*
-* Issuing the IXJCTL_AEC_START command with a value of AEC_OFF has the
-* same effect as IXJCTL_AEC_STOP. This is to simplify slider bar
-* controls. IXJCTL_AEC_GET_LEVEL returns the current setting of the AEC.
-******************************************************************************/
-#define IXJCTL_AEC_START _IOW ('q', 0xCB, int)
-#define IXJCTL_AEC_STOP _IO ('q', 0xCC)
-#define IXJCTL_AEC_GET_LEVEL _IO ('q', 0xCD)
-
-#define AEC_OFF 0
-#define AEC_LOW 1
-#define AEC_MED 2
-#define AEC_HIGH 3
-#define AEC_AUTO 4
-#define AEC_AGC 5
-/******************************************************************************
-*
-* Call Progress Tones, DTMF, etc.
-* IXJCTL_DTMF_OOB determines if DTMF signaling is sent as Out-Of-Band
-* only. If you pass a 1, DTMF is suppressed from the audio stream.
-* Tone on and off times are in 250 microsecond intervals so
-* ioctl(ixj1, IXJCTL_SET_TONE_ON_TIME, 360);
-* will set the tone on time of board ixj1 to 360 * 250us = 90ms
-* the default values of tone on and off times is 840 or 210ms
-******************************************************************************/
-
-#define IXJCTL_DTMF_READY PHONE_DTMF_READY
-#define IXJCTL_GET_DTMF PHONE_GET_DTMF
-#define IXJCTL_GET_DTMF_ASCII PHONE_GET_DTMF_ASCII
-#define IXJCTL_DTMF_OOB PHONE_DTMF_OOB
-#define IXJCTL_EXCEPTION PHONE_EXCEPTION
-#define IXJCTL_PLAY_TONE PHONE_PLAY_TONE
-#define IXJCTL_SET_TONE_ON_TIME PHONE_SET_TONE_ON_TIME
-#define IXJCTL_SET_TONE_OFF_TIME PHONE_SET_TONE_OFF_TIME
-#define IXJCTL_GET_TONE_ON_TIME PHONE_GET_TONE_ON_TIME
-#define IXJCTL_GET_TONE_OFF_TIME PHONE_GET_TONE_OFF_TIME
-#define IXJCTL_GET_TONE_STATE PHONE_GET_TONE_STATE
-#define IXJCTL_BUSY PHONE_BUSY
-#define IXJCTL_RINGBACK PHONE_RINGBACK
-#define IXJCTL_DIALTONE PHONE_DIALTONE
-#define IXJCTL_CPT_STOP PHONE_CPT_STOP
-
-/******************************************************************************
-* LineJACK specific IOCTLs
-*
-* The lsb 4 bits of the LED argument represent the state of each of the 4
-* LED's on the LineJACK
-******************************************************************************/
-
-#define IXJCTL_SET_LED _IOW ('q', 0xCE, int)
-#define IXJCTL_MIXER _IOW ('q', 0xCF, int)
-
-/******************************************************************************
-*
-* The master volume controls use attenuation with 32 levels from 0 to -62dB
-* with steps of 2dB each, the defines should be OR'ed together then sent
-* as the parameter to the mixer command to change the mixer settings.
-*
-******************************************************************************/
-#define MIXER_MASTER_L 0x0000
-#define MIXER_MASTER_R 0x0100
-#define ATT00DB 0x00
-#define ATT02DB 0x01
-#define ATT04DB 0x02
-#define ATT06DB 0x03
-#define ATT08DB 0x04
-#define ATT10DB 0x05
-#define ATT12DB 0x06
-#define ATT14DB 0x07
-#define ATT16DB 0x08
-#define ATT18DB 0x09
-#define ATT20DB 0x0A
-#define ATT22DB 0x0B
-#define ATT24DB 0x0C
-#define ATT26DB 0x0D
-#define ATT28DB 0x0E
-#define ATT30DB 0x0F
-#define ATT32DB 0x10
-#define ATT34DB 0x11
-#define ATT36DB 0x12
-#define ATT38DB 0x13
-#define ATT40DB 0x14
-#define ATT42DB 0x15
-#define ATT44DB 0x16
-#define ATT46DB 0x17
-#define ATT48DB 0x18
-#define ATT50DB 0x19
-#define ATT52DB 0x1A
-#define ATT54DB 0x1B
-#define ATT56DB 0x1C
-#define ATT58DB 0x1D
-#define ATT60DB 0x1E
-#define ATT62DB 0x1F
-#define MASTER_MUTE 0x80
-
-/******************************************************************************
-*
-* The input volume controls use gain with 32 levels from +12dB to -50dB
-* with steps of 2dB each, the defines should be OR'ed together then sent
-* as the parameter to the mixer command to change the mixer settings.
-*
-******************************************************************************/
-#define MIXER_PORT_CD_L 0x0600
-#define MIXER_PORT_CD_R 0x0700
-#define MIXER_PORT_LINE_IN_L 0x0800
-#define MIXER_PORT_LINE_IN_R 0x0900
-#define MIXER_PORT_POTS_REC 0x0C00
-#define MIXER_PORT_MIC 0x0E00
-
-#define GAIN12DB 0x00
-#define GAIN10DB 0x01
-#define GAIN08DB 0x02
-#define GAIN06DB 0x03
-#define GAIN04DB 0x04
-#define GAIN02DB 0x05
-#define GAIN00DB 0x06
-#define GAIN_02DB 0x07
-#define GAIN_04DB 0x08
-#define GAIN_06DB 0x09
-#define GAIN_08DB 0x0A
-#define GAIN_10DB 0x0B
-#define GAIN_12DB 0x0C
-#define GAIN_14DB 0x0D
-#define GAIN_16DB 0x0E
-#define GAIN_18DB 0x0F
-#define GAIN_20DB 0x10
-#define GAIN_22DB 0x11
-#define GAIN_24DB 0x12
-#define GAIN_26DB 0x13
-#define GAIN_28DB 0x14
-#define GAIN_30DB 0x15
-#define GAIN_32DB 0x16
-#define GAIN_34DB 0x17
-#define GAIN_36DB 0x18
-#define GAIN_38DB 0x19
-#define GAIN_40DB 0x1A
-#define GAIN_42DB 0x1B
-#define GAIN_44DB 0x1C
-#define GAIN_46DB 0x1D
-#define GAIN_48DB 0x1E
-#define GAIN_50DB 0x1F
-#define INPUT_MUTE 0x80
-
-/******************************************************************************
-*
-* The POTS volume control use attenuation with 8 levels from 0dB to -28dB
-* with steps of 4dB each, the defines should be OR'ed together then sent
-* as the parameter to the mixer command to change the mixer settings.
-*
-******************************************************************************/
-#define MIXER_PORT_POTS_PLAY 0x0F00
-
-#define POTS_ATT_00DB 0x00
-#define POTS_ATT_04DB 0x01
-#define POTS_ATT_08DB 0x02
-#define POTS_ATT_12DB 0x03
-#define POTS_ATT_16DB 0x04
-#define POTS_ATT_20DB 0x05
-#define POTS_ATT_24DB 0x06
-#define POTS_ATT_28DB 0x07
-#define POTS_MUTE 0x80
-
-/******************************************************************************
-*
-* The DAA controls the interface to the PSTN port. The driver loads the
-* US coefficients by default, so if you live in a different country you
-* need to load the set for your countries phone system.
-*
-******************************************************************************/
-#define IXJCTL_DAA_COEFF_SET _IOW ('q', 0xD0, int)
-
-#define DAA_US 1 /*PITA 8kHz */
-#define DAA_UK 2 /*ISAR34 8kHz */
-#define DAA_FRANCE 3 /* */
-#define DAA_GERMANY 4
-#define DAA_AUSTRALIA 5
-#define DAA_JAPAN 6
-
-/******************************************************************************
-*
-* Use IXJCTL_PORT to set or query the port the card is set to. If the
-* argument is set to PORT_QUERY, the return value of the ioctl will
-* indicate which port is currently in use, otherwise it will change the
-* port.
-*
-******************************************************************************/
-#define IXJCTL_PORT _IOW ('q', 0xD1, int)
-
-#define PORT_QUERY 0
-#define PORT_POTS 1
-#define PORT_PSTN 2
-#define PORT_SPEAKER 3
-#define PORT_HANDSET 4
-
-#define IXJCTL_PSTN_SET_STATE PHONE_PSTN_SET_STATE
-#define IXJCTL_PSTN_GET_STATE PHONE_PSTN_GET_STATE
-
-#define PSTN_ON_HOOK 0
-#define PSTN_RINGING 1
-#define PSTN_OFF_HOOK 2
-#define PSTN_PULSE_DIAL 3
-
-/******************************************************************************
-*
-* The DAA Analog GAIN sets 2 parameters at one time, the receive gain (AGRR),
-* and the transmit gain (AGX). OR together the components and pass them
-* as the parameter to IXJCTL_DAA_AGAIN. The default setting is both at 0dB.
-*
-******************************************************************************/
-#define IXJCTL_DAA_AGAIN _IOW ('q', 0xD2, int)
-
-#define AGRR00DB 0x00 /* Analog gain in receive direction 0dB */
-#define AGRR3_5DB 0x10 /* Analog gain in receive direction 3.5dB */
-#define AGRR06DB 0x30 /* Analog gain in receive direction 6dB */
-
-#define AGX00DB 0x00 /* Analog gain in transmit direction 0dB */
-#define AGX_6DB 0x04 /* Analog gain in transmit direction -6dB */
-#define AGX3_5DB 0x08 /* Analog gain in transmit direction 3.5dB */
-#define AGX_2_5B 0x0C /* Analog gain in transmit direction -2.5dB */
-
-#define IXJCTL_PSTN_LINETEST _IO ('q', 0xD3)
-
-#define IXJCTL_CID _IOR ('q', 0xD4, PHONE_CID *)
-#define IXJCTL_VMWI _IOR ('q', 0xD8, int)
-#define IXJCTL_CIDCW _IOW ('q', 0xD9, PHONE_CID *)
-/******************************************************************************
-*
-* The wink duration is tunable with this ioctl. The default wink duration
-* is 320ms. You do not need to use this ioctl if you do not require a
-* different wink duration.
-*
-******************************************************************************/
-#define IXJCTL_WINK_DURATION PHONE_WINK_DURATION
-
-/******************************************************************************
-*
-* This ioctl will connect the POTS port to the PSTN port on the LineJACK
-* In order for this to work properly the port selection should be set to
-* the PSTN port with IXJCTL_PORT prior to calling this ioctl. This will
-* enable conference calls between PSTN callers and network callers.
-* Passing a 1 to this ioctl enables the POTS<->PSTN connection while
-* passing a 0 turns it back off.
-*
-******************************************************************************/
-#define IXJCTL_POTS_PSTN _IOW ('q', 0xD5, int)
-
-/******************************************************************************
-*
-* IOCTLs added by request.
-*
-* IXJCTL_HZ sets the value your Linux kernel uses for HZ as defined in
-* /usr/include/asm/param.h, this determines the fundamental
-* frequency of the clock ticks on your Linux system. The kernel
-* must be rebuilt if you change this value, also all modules you
-* use (except this one) must be recompiled. The default value
-* is 100, and you only need to use this IOCTL if you use some
-* other value.
-*
-*
-* IXJCTL_RATE sets the number of times per second that the driver polls
-* the DSP. This value cannot be larger than HZ. By
-* increasing both of these values, you may be able to reduce
-* latency because the max hang time that can exist between the
-* driver and the DSP will be reduced.
-*
-******************************************************************************/
-
-#define IXJCTL_HZ _IOW ('q', 0xE0, int)
-#define IXJCTL_RATE _IOW ('q', 0xE1, int)
-#define IXJCTL_FRAMES_READ _IOR ('q', 0xE2, unsigned long)
-#define IXJCTL_FRAMES_WRITTEN _IOR ('q', 0xE3, unsigned long)
-#define IXJCTL_READ_WAIT _IOR ('q', 0xE4, unsigned long)
-#define IXJCTL_WRITE_WAIT _IOR ('q', 0xE5, unsigned long)
-#define IXJCTL_DRYBUFFER_READ _IOR ('q', 0xE6, unsigned long)
-#define IXJCTL_DRYBUFFER_CLEAR _IO ('q', 0xE7)
-#define IXJCTL_DTMF_PRESCALE _IOW ('q', 0xE8, int)
-
-/******************************************************************************
-*
-* This ioctl allows the user application to control what events the driver
-* will send signals for, and what signals it will send for which event.
-* By default, if signaling is enabled, all events will send SIGIO when
-* they occur. To disable signals for an event set the signal to 0.
-*
-******************************************************************************/
-typedef enum {
- SIG_DTMF_READY,
- SIG_HOOKSTATE,
- SIG_FLASH,
- SIG_PSTN_RING,
- SIG_CALLER_ID,
- SIG_PSTN_WINK,
- SIG_F0, SIG_F1, SIG_F2, SIG_F3,
- SIG_FC0, SIG_FC1, SIG_FC2, SIG_FC3,
- SIG_READ_READY = 33,
- SIG_WRITE_READY = 34
-} IXJ_SIGEVENT;
-
-typedef struct {
- unsigned int event;
- int signal;
-} IXJ_SIGDEF;
-
-#define IXJCTL_SIGCTL _IOW ('q', 0xE9, IXJ_SIGDEF *)
-
-/******************************************************************************
-*
-* These ioctls allow the user application to change the gain in the
-* Smart Cable of the Internet Phone Card. Sending -1 as a value will cause
-* return value to be the current setting. Valid values to set are 0x00 - 0x1F
-*
-* 11111 = +12 dB
-* 10111 = 0 dB
-* 00000 = -34.5 dB
-*
-* IXJCTL_SC_RXG sets the Receive gain
-* IXJCTL_SC_TXG sets the Transmit gain
-*
-******************************************************************************/
-#define IXJCTL_SC_RXG _IOW ('q', 0xEA, int)
-#define IXJCTL_SC_TXG _IOW ('q', 0xEB, int)
-
-/******************************************************************************
-*
-* The intercom IOCTL's short the output from one card to the input of the
-* other and vice versa (actually done in the DSP read function). It is only
-* necessary to execute the IOCTL on one card, but it is necessary to have
-* both devices open to be able to detect hook switch changes. The record
-* codec and rate of each card must match the playback codec and rate of
-* the other card for this to work properly.
-*
-******************************************************************************/
-
-#define IXJCTL_INTERCOM_START _IOW ('q', 0xFD, int)
-#define IXJCTL_INTERCOM_STOP _IOW ('q', 0xFE, int)
-
-/******************************************************************************
- *
- * new structure for accessing raw filter information
- *
- ******************************************************************************/
-
-typedef struct {
- unsigned int filter;
- char enable;
- unsigned int coeff[19];
-} IXJ_FILTER_RAW;
-
-#endif
diff --git a/include/uapi/linux/kfd_ioctl.h b/include/uapi/linux/kfd_ioctl.h
index 6e80501368ae..b4f5073dbac2 100644
--- a/include/uapi/linux/kfd_ioctl.h
+++ b/include/uapi/linux/kfd_ioctl.h
@@ -58,7 +58,8 @@ struct kfd_ioctl_create_queue_args {
__u64 eop_buffer_address; /* to KFD */
__u64 eop_buffer_size; /* to KFD */
__u64 ctx_save_restore_address; /* to KFD */
- __u64 ctx_save_restore_size; /* to KFD */
+ __u32 ctx_save_restore_size; /* to KFD */
+ __u32 ctl_stack_size; /* to KFD */
};
struct kfd_ioctl_destroy_queue_args {
@@ -106,8 +107,6 @@ struct kfd_ioctl_get_clock_counters_args {
__u32 pad;
};
-#define NUM_OF_SUPPORTED_GPUS 7
-
struct kfd_process_device_apertures {
__u64 lds_base; /* from KFD */
__u64 lds_limit; /* from KFD */
@@ -119,6 +118,12 @@ struct kfd_process_device_apertures {
__u32 pad;
};
+/*
+ * AMDKFD_IOC_GET_PROCESS_APERTURES is deprecated. Use
+ * AMDKFD_IOC_GET_PROCESS_APERTURES_NEW instead, which supports an
+ * unlimited number of GPUs.
+ */
+#define NUM_OF_SUPPORTED_GPUS 7
struct kfd_ioctl_get_process_apertures_args {
struct kfd_process_device_apertures
process_apertures[NUM_OF_SUPPORTED_GPUS];/* from KFD */
@@ -128,6 +133,19 @@ struct kfd_ioctl_get_process_apertures_args {
__u32 pad;
};
+struct kfd_ioctl_get_process_apertures_new_args {
+ /* User allocated. Pointer to struct kfd_process_device_apertures
+ * filled in by Kernel
+ */
+ __u64 kfd_process_device_apertures_ptr;
+ /* to KFD - indicates amount of memory present in
+ * kfd_process_device_apertures_ptr
+ * from KFD - Number of entries filled by KFD.
+ */
+ __u32 num_of_nodes;
+ __u32 pad;
+};
+
#define MAX_ALLOWED_NUM_POINTS 100
#define MAX_ALLOWED_AW_BUFF_SIZE 4096
#define MAX_ALLOWED_WAC_BUFF_SIZE 128
@@ -261,6 +279,93 @@ struct kfd_ioctl_get_tile_config_args {
*/
};
+struct kfd_ioctl_set_trap_handler_args {
+ __u64 tba_addr; /* to KFD */
+ __u64 tma_addr; /* to KFD */
+ __u32 gpu_id; /* to KFD */
+ __u32 pad;
+};
+
+struct kfd_ioctl_acquire_vm_args {
+ __u32 drm_fd; /* to KFD */
+ __u32 gpu_id; /* to KFD */
+};
+
+/* Allocation flags: memory types */
+#define KFD_IOC_ALLOC_MEM_FLAGS_VRAM (1 << 0)
+#define KFD_IOC_ALLOC_MEM_FLAGS_GTT (1 << 1)
+#define KFD_IOC_ALLOC_MEM_FLAGS_USERPTR (1 << 2)
+#define KFD_IOC_ALLOC_MEM_FLAGS_DOORBELL (1 << 3)
+/* Allocation flags: attributes/access options */
+#define KFD_IOC_ALLOC_MEM_FLAGS_WRITABLE (1 << 31)
+#define KFD_IOC_ALLOC_MEM_FLAGS_EXECUTABLE (1 << 30)
+#define KFD_IOC_ALLOC_MEM_FLAGS_PUBLIC (1 << 29)
+#define KFD_IOC_ALLOC_MEM_FLAGS_NO_SUBSTITUTE (1 << 28)
+#define KFD_IOC_ALLOC_MEM_FLAGS_AQL_QUEUE_MEM (1 << 27)
+#define KFD_IOC_ALLOC_MEM_FLAGS_COHERENT (1 << 26)
+
+/* Allocate memory for later SVM (shared virtual memory) mapping.
+ *
+ * @va_addr: virtual address of the memory to be allocated
+ * all later mappings on all GPUs will use this address
+ * @size: size in bytes
+ * @handle: buffer handle returned to user mode, used to refer to
+ * this allocation for mapping, unmapping and freeing
+ * @mmap_offset: for CPU-mapping the allocation by mmapping a render node
+ * for userptrs this is overloaded to specify the CPU address
+ * @gpu_id: device identifier
+ * @flags: memory type and attributes. See KFD_IOC_ALLOC_MEM_FLAGS above
+ */
+struct kfd_ioctl_alloc_memory_of_gpu_args {
+ __u64 va_addr; /* to KFD */
+ __u64 size; /* to KFD */
+ __u64 handle; /* from KFD */
+ __u64 mmap_offset; /* to KFD (userptr), from KFD (mmap offset) */
+ __u32 gpu_id; /* to KFD */
+ __u32 flags;
+};
+
+/* Free memory allocated with kfd_ioctl_alloc_memory_of_gpu
+ *
+ * @handle: memory handle returned by alloc
+ */
+struct kfd_ioctl_free_memory_of_gpu_args {
+ __u64 handle; /* to KFD */
+};
+
+/* Map memory to one or more GPUs
+ *
+ * @handle: memory handle returned by alloc
+ * @device_ids_array_ptr: array of gpu_ids (__u32 per device)
+ * @n_devices: number of devices in the array
+ * @n_success: number of devices mapped successfully
+ *
+ * @n_success returns information to the caller how many devices from
+ * the start of the array have mapped the buffer successfully. It can
+ * be passed into a subsequent retry call to skip those devices. For
+ * the first call the caller should initialize it to 0.
+ *
+ * If the ioctl completes with return code 0 (success), n_success ==
+ * n_devices.
+ */
+struct kfd_ioctl_map_memory_to_gpu_args {
+ __u64 handle; /* to KFD */
+ __u64 device_ids_array_ptr; /* to KFD */
+ __u32 n_devices; /* to KFD */
+ __u32 n_success; /* to/from KFD */
+};
+
+/* Unmap memory from one or more GPUs
+ *
+ * same arguments as for mapping
+ */
+struct kfd_ioctl_unmap_memory_from_gpu_args {
+ __u64 handle; /* to KFD */
+ __u64 device_ids_array_ptr; /* to KFD */
+ __u32 n_devices; /* to KFD */
+ __u32 n_success; /* to/from KFD */
+};
+
#define AMDKFD_IOCTL_BASE 'K'
#define AMDKFD_IO(nr) _IO(AMDKFD_IOCTL_BASE, nr)
#define AMDKFD_IOR(nr, type) _IOR(AMDKFD_IOCTL_BASE, nr, type)
@@ -321,7 +426,29 @@ struct kfd_ioctl_get_tile_config_args {
#define AMDKFD_IOC_GET_TILE_CONFIG \
AMDKFD_IOWR(0x12, struct kfd_ioctl_get_tile_config_args)
+#define AMDKFD_IOC_SET_TRAP_HANDLER \
+ AMDKFD_IOW(0x13, struct kfd_ioctl_set_trap_handler_args)
+
+#define AMDKFD_IOC_GET_PROCESS_APERTURES_NEW \
+ AMDKFD_IOWR(0x14, \
+ struct kfd_ioctl_get_process_apertures_new_args)
+
+#define AMDKFD_IOC_ACQUIRE_VM \
+ AMDKFD_IOW(0x15, struct kfd_ioctl_acquire_vm_args)
+
+#define AMDKFD_IOC_ALLOC_MEMORY_OF_GPU \
+ AMDKFD_IOWR(0x16, struct kfd_ioctl_alloc_memory_of_gpu_args)
+
+#define AMDKFD_IOC_FREE_MEMORY_OF_GPU \
+ AMDKFD_IOW(0x17, struct kfd_ioctl_free_memory_of_gpu_args)
+
+#define AMDKFD_IOC_MAP_MEMORY_TO_GPU \
+ AMDKFD_IOWR(0x18, struct kfd_ioctl_map_memory_to_gpu_args)
+
+#define AMDKFD_IOC_UNMAP_MEMORY_FROM_GPU \
+ AMDKFD_IOWR(0x19, struct kfd_ioctl_unmap_memory_from_gpu_args)
+
#define AMDKFD_COMMAND_START 0x01
-#define AMDKFD_COMMAND_END 0x13
+#define AMDKFD_COMMAND_END 0x1A
#endif
diff --git a/include/uapi/linux/kvm.h b/include/uapi/linux/kvm.h
index 8fb90a0819c3..1065006c9bf5 100644
--- a/include/uapi/linux/kvm.h
+++ b/include/uapi/linux/kvm.h
@@ -396,6 +396,10 @@ struct kvm_run {
char padding[256];
};
+ /* 2048 is the size of the char array used to bound/pad the size
+ * of the union that holds sync regs.
+ */
+ #define SYNC_REGS_SIZE_BYTES 2048
/*
* shared registers between kvm and userspace.
* kvm_valid_regs specifies the register classes set by the host
@@ -407,7 +411,7 @@ struct kvm_run {
__u64 kvm_dirty_regs;
union {
struct kvm_sync_regs regs;
- char padding[2048];
+ char padding[SYNC_REGS_SIZE_BYTES];
} s;
};
@@ -761,6 +765,7 @@ struct kvm_ppc_resize_hpt {
#define KVM_TRACE_PAUSE __KVM_DEPRECATED_MAIN_0x07
#define KVM_TRACE_DISABLE __KVM_DEPRECATED_MAIN_0x08
#define KVM_GET_EMULATED_CPUID _IOWR(KVMIO, 0x09, struct kvm_cpuid2)
+#define KVM_GET_MSR_FEATURE_INDEX_LIST _IOWR(KVMIO, 0x0a, struct kvm_msr_list)
/*
* Extension capability list.
@@ -924,7 +929,7 @@ struct kvm_ppc_resize_hpt {
#define KVM_CAP_S390_GS 140
#define KVM_CAP_S390_AIS 141
#define KVM_CAP_SPAPR_TCE_VFIO 142
-#define KVM_CAP_X86_GUEST_MWAIT 143
+#define KVM_CAP_X86_DISABLE_EXITS 143
#define KVM_CAP_ARM_USER_IRQ 144
#define KVM_CAP_S390_CMMA_MIGRATION 145
#define KVM_CAP_PPC_FWNMI 146
@@ -934,6 +939,8 @@ struct kvm_ppc_resize_hpt {
#define KVM_CAP_S390_AIS_MIGRATION 150
#define KVM_CAP_PPC_GET_CPU_CHAR 151
#define KVM_CAP_S390_BPB 152
+#define KVM_CAP_GET_MSR_FEATURES 153
+#define KVM_CAP_HYPERV_EVENTFD 154
#ifdef KVM_CAP_IRQ_ROUTING
@@ -1362,6 +1369,100 @@ struct kvm_s390_ucas_mapping {
/* Available with KVM_CAP_S390_CMMA_MIGRATION */
#define KVM_S390_GET_CMMA_BITS _IOWR(KVMIO, 0xb8, struct kvm_s390_cmma_log)
#define KVM_S390_SET_CMMA_BITS _IOW(KVMIO, 0xb9, struct kvm_s390_cmma_log)
+/* Memory Encryption Commands */
+#define KVM_MEMORY_ENCRYPT_OP _IOWR(KVMIO, 0xba, unsigned long)
+
+struct kvm_enc_region {
+ __u64 addr;
+ __u64 size;
+};
+
+#define KVM_MEMORY_ENCRYPT_REG_REGION _IOR(KVMIO, 0xbb, struct kvm_enc_region)
+#define KVM_MEMORY_ENCRYPT_UNREG_REGION _IOR(KVMIO, 0xbc, struct kvm_enc_region)
+
+/* Available with KVM_CAP_HYPERV_EVENTFD */
+#define KVM_HYPERV_EVENTFD _IOW(KVMIO, 0xbd, struct kvm_hyperv_eventfd)
+
+
+/* Secure Encrypted Virtualization command */
+enum sev_cmd_id {
+ /* Guest initialization commands */
+ KVM_SEV_INIT = 0,
+ KVM_SEV_ES_INIT,
+ /* Guest launch commands */
+ KVM_SEV_LAUNCH_START,
+ KVM_SEV_LAUNCH_UPDATE_DATA,
+ KVM_SEV_LAUNCH_UPDATE_VMSA,
+ KVM_SEV_LAUNCH_SECRET,
+ KVM_SEV_LAUNCH_MEASURE,
+ KVM_SEV_LAUNCH_FINISH,
+ /* Guest migration commands (outgoing) */
+ KVM_SEV_SEND_START,
+ KVM_SEV_SEND_UPDATE_DATA,
+ KVM_SEV_SEND_UPDATE_VMSA,
+ KVM_SEV_SEND_FINISH,
+ /* Guest migration commands (incoming) */
+ KVM_SEV_RECEIVE_START,
+ KVM_SEV_RECEIVE_UPDATE_DATA,
+ KVM_SEV_RECEIVE_UPDATE_VMSA,
+ KVM_SEV_RECEIVE_FINISH,
+ /* Guest status and debug commands */
+ KVM_SEV_GUEST_STATUS,
+ KVM_SEV_DBG_DECRYPT,
+ KVM_SEV_DBG_ENCRYPT,
+ /* Guest certificates commands */
+ KVM_SEV_CERT_EXPORT,
+
+ KVM_SEV_NR_MAX,
+};
+
+struct kvm_sev_cmd {
+ __u32 id;
+ __u64 data;
+ __u32 error;
+ __u32 sev_fd;
+};
+
+struct kvm_sev_launch_start {
+ __u32 handle;
+ __u32 policy;
+ __u64 dh_uaddr;
+ __u32 dh_len;
+ __u64 session_uaddr;
+ __u32 session_len;
+};
+
+struct kvm_sev_launch_update_data {
+ __u64 uaddr;
+ __u32 len;
+};
+
+
+struct kvm_sev_launch_secret {
+ __u64 hdr_uaddr;
+ __u32 hdr_len;
+ __u64 guest_uaddr;
+ __u32 guest_len;
+ __u64 trans_uaddr;
+ __u32 trans_len;
+};
+
+struct kvm_sev_launch_measure {
+ __u64 uaddr;
+ __u32 len;
+};
+
+struct kvm_sev_guest_status {
+ __u32 handle;
+ __u32 policy;
+ __u32 state;
+};
+
+struct kvm_sev_dbg {
+ __u64 src_uaddr;
+ __u64 dst_uaddr;
+ __u32 len;
+};
#define KVM_DEV_ASSIGN_ENABLE_IOMMU (1 << 0)
#define KVM_DEV_ASSIGN_PCI_2_3 (1 << 1)
@@ -1423,4 +1524,14 @@ struct kvm_assigned_msix_entry {
#define KVM_ARM_DEV_EL1_PTIMER (1 << 1)
#define KVM_ARM_DEV_PMU (1 << 2)
+struct kvm_hyperv_eventfd {
+ __u32 conn_id;
+ __s32 fd;
+ __u32 flags;
+ __u32 padding[3];
+};
+
+#define KVM_HYPERV_CONN_ID_MASK 0x00ffffff
+#define KVM_HYPERV_EVENTFD_DEASSIGN (1 << 0)
+
#endif /* __LINUX_KVM_H */
diff --git a/include/uapi/linux/l2tp.h b/include/uapi/linux/l2tp.h
index d84ce5c1c9aa..7d570c7bd117 100644
--- a/include/uapi/linux/l2tp.h
+++ b/include/uapi/linux/l2tp.h
@@ -65,7 +65,7 @@ struct sockaddr_l2tpip6 {
* TUNNEL_MODIFY - CONN_ID, udpcsum
* TUNNEL_GETSTATS - CONN_ID, (stats)
* TUNNEL_GET - CONN_ID, (...)
- * SESSION_CREATE - SESSION_ID, PW_TYPE, offset, data_seq, cookie, peer_cookie, offset, l2spec
+ * SESSION_CREATE - SESSION_ID, PW_TYPE, data_seq, cookie, peer_cookie, l2spec
* SESSION_DELETE - SESSION_ID
* SESSION_MODIFY - SESSION_ID, data_seq
* SESSION_GET - SESSION_ID, (...)
@@ -94,10 +94,10 @@ enum {
L2TP_ATTR_NONE, /* no data */
L2TP_ATTR_PW_TYPE, /* u16, enum l2tp_pwtype */
L2TP_ATTR_ENCAP_TYPE, /* u16, enum l2tp_encap_type */
- L2TP_ATTR_OFFSET, /* u16 */
+ L2TP_ATTR_OFFSET, /* u16 (not used) */
L2TP_ATTR_DATA_SEQ, /* u16 */
L2TP_ATTR_L2SPEC_TYPE, /* u8, enum l2tp_l2spec_type */
- L2TP_ATTR_L2SPEC_LEN, /* u8, enum l2tp_l2spec_type */
+ L2TP_ATTR_L2SPEC_LEN, /* u8 (not used) */
L2TP_ATTR_PROTO_VERSION, /* u8 */
L2TP_ATTR_IFNAME, /* string */
L2TP_ATTR_CONN_ID, /* u32 */
diff --git a/include/uapi/linux/libc-compat.h b/include/uapi/linux/libc-compat.h
index fc29efaa918c..8254c937c9f4 100644
--- a/include/uapi/linux/libc-compat.h
+++ b/include/uapi/linux/libc-compat.h
@@ -264,10 +264,4 @@
#endif /* __GLIBC__ */
-/* Definitions for if_ether.h */
-/* allow libcs like musl to deactivate this, glibc does not implement this. */
-#ifndef __UAPI_DEF_ETHHDR
-#define __UAPI_DEF_ETHHDR 1
-#endif
-
#endif /* _UAPI_LIBC_COMPAT_H */
diff --git a/include/uapi/linux/lightnvm.h b/include/uapi/linux/lightnvm.h
index 42d1a434af29..f9a1be7fc696 100644
--- a/include/uapi/linux/lightnvm.h
+++ b/include/uapi/linux/lightnvm.h
@@ -75,14 +75,23 @@ struct nvm_ioctl_create_simple {
__u32 lun_end;
};
+struct nvm_ioctl_create_extended {
+ __u16 lun_begin;
+ __u16 lun_end;
+ __u16 op;
+ __u16 rsv;
+};
+
enum {
NVM_CONFIG_TYPE_SIMPLE = 0,
+ NVM_CONFIG_TYPE_EXTENDED = 1,
};
struct nvm_ioctl_create_conf {
__u32 type;
union {
struct nvm_ioctl_create_simple s;
+ struct nvm_ioctl_create_extended e;
};
};
diff --git a/include/uapi/linux/lirc.h b/include/uapi/linux/lirc.h
index c3aef4316fbf..f189931042a7 100644
--- a/include/uapi/linux/lirc.h
+++ b/include/uapi/linux/lirc.h
@@ -47,6 +47,7 @@
#define LIRC_MODE_RAW 0x00000001
#define LIRC_MODE_PULSE 0x00000002
#define LIRC_MODE_MODE2 0x00000004
+#define LIRC_MODE_SCANCODE 0x00000008
#define LIRC_MODE_LIRCCODE 0x00000010
@@ -64,6 +65,7 @@
#define LIRC_CAN_REC_RAW LIRC_MODE2REC(LIRC_MODE_RAW)
#define LIRC_CAN_REC_PULSE LIRC_MODE2REC(LIRC_MODE_PULSE)
#define LIRC_CAN_REC_MODE2 LIRC_MODE2REC(LIRC_MODE_MODE2)
+#define LIRC_CAN_REC_SCANCODE LIRC_MODE2REC(LIRC_MODE_SCANCODE)
#define LIRC_CAN_REC_LIRCCODE LIRC_MODE2REC(LIRC_MODE_LIRCCODE)
#define LIRC_CAN_REC_MASK LIRC_MODE2REC(LIRC_CAN_SEND_MASK)
@@ -131,4 +133,85 @@
#define LIRC_SET_WIDEBAND_RECEIVER _IOW('i', 0x00000023, __u32)
+/*
+ * struct lirc_scancode - decoded scancode with protocol for use with
+ * LIRC_MODE_SCANCODE
+ *
+ * @timestamp: Timestamp in nanoseconds using CLOCK_MONOTONIC when IR
+ * was decoded.
+ * @flags: should be 0 for transmit. When receiving scancodes,
+ * LIRC_SCANCODE_FLAG_TOGGLE or LIRC_SCANCODE_FLAG_REPEAT can be set
+ * depending on the protocol
+ * @rc_proto: see enum rc_proto
+ * @keycode: the translated keycode. Set to 0 for transmit.
+ * @scancode: the scancode received or to be sent
+ */
+struct lirc_scancode {
+ __u64 timestamp;
+ __u16 flags;
+ __u16 rc_proto;
+ __u32 keycode;
+ __u64 scancode;
+};
+
+/* Set if the toggle bit of rc-5 or rc-6 is enabled */
+#define LIRC_SCANCODE_FLAG_TOGGLE 1
+/* Set if this is a nec or sanyo repeat */
+#define LIRC_SCANCODE_FLAG_REPEAT 2
+
+/**
+ * enum rc_proto - the Remote Controller protocol
+ *
+ * @RC_PROTO_UNKNOWN: Protocol not known
+ * @RC_PROTO_OTHER: Protocol known but proprietary
+ * @RC_PROTO_RC5: Philips RC5 protocol
+ * @RC_PROTO_RC5X_20: Philips RC5x 20 bit protocol
+ * @RC_PROTO_RC5_SZ: StreamZap variant of RC5
+ * @RC_PROTO_JVC: JVC protocol
+ * @RC_PROTO_SONY12: Sony 12 bit protocol
+ * @RC_PROTO_SONY15: Sony 15 bit protocol
+ * @RC_PROTO_SONY20: Sony 20 bit protocol
+ * @RC_PROTO_NEC: NEC protocol
+ * @RC_PROTO_NECX: Extended NEC protocol
+ * @RC_PROTO_NEC32: NEC 32 bit protocol
+ * @RC_PROTO_SANYO: Sanyo protocol
+ * @RC_PROTO_MCIR2_KBD: RC6-ish MCE keyboard
+ * @RC_PROTO_MCIR2_MSE: RC6-ish MCE mouse
+ * @RC_PROTO_RC6_0: Philips RC6-0-16 protocol
+ * @RC_PROTO_RC6_6A_20: Philips RC6-6A-20 protocol
+ * @RC_PROTO_RC6_6A_24: Philips RC6-6A-24 protocol
+ * @RC_PROTO_RC6_6A_32: Philips RC6-6A-32 protocol
+ * @RC_PROTO_RC6_MCE: MCE (Philips RC6-6A-32 subtype) protocol
+ * @RC_PROTO_SHARP: Sharp protocol
+ * @RC_PROTO_XMP: XMP protocol
+ * @RC_PROTO_CEC: CEC protocol
+ * @RC_PROTO_IMON: iMon Pad protocol
+ */
+enum rc_proto {
+ RC_PROTO_UNKNOWN = 0,
+ RC_PROTO_OTHER = 1,
+ RC_PROTO_RC5 = 2,
+ RC_PROTO_RC5X_20 = 3,
+ RC_PROTO_RC5_SZ = 4,
+ RC_PROTO_JVC = 5,
+ RC_PROTO_SONY12 = 6,
+ RC_PROTO_SONY15 = 7,
+ RC_PROTO_SONY20 = 8,
+ RC_PROTO_NEC = 9,
+ RC_PROTO_NECX = 10,
+ RC_PROTO_NEC32 = 11,
+ RC_PROTO_SANYO = 12,
+ RC_PROTO_MCIR2_KBD = 13,
+ RC_PROTO_MCIR2_MSE = 14,
+ RC_PROTO_RC6_0 = 15,
+ RC_PROTO_RC6_6A_20 = 16,
+ RC_PROTO_RC6_6A_24 = 17,
+ RC_PROTO_RC6_6A_32 = 18,
+ RC_PROTO_RC6_MCE = 19,
+ RC_PROTO_SHARP = 20,
+ RC_PROTO_XMP = 21,
+ RC_PROTO_CEC = 22,
+ RC_PROTO_IMON = 23,
+};
+
#endif
diff --git a/include/uapi/linux/lp.h b/include/uapi/linux/lp.h
index dafcfe4e4834..8589a27037d7 100644
--- a/include/uapi/linux/lp.h
+++ b/include/uapi/linux/lp.h
@@ -8,6 +8,8 @@
#ifndef _UAPI_LINUX_LP_H
#define _UAPI_LINUX_LP_H
+#include <linux/types.h>
+#include <linux/ioctl.h>
/*
* Per POSIX guidelines, this module reserves the LP and lp prefixes
@@ -88,7 +90,15 @@
#define LPGETSTATS 0x060d /* get statistics (struct lp_stats) */
#endif
#define LPGETFLAGS 0x060e /* get status flags */
-#define LPSETTIMEOUT 0x060f /* set parport timeout */
+#define LPSETTIMEOUT_OLD 0x060f /* set parport timeout */
+#define LPSETTIMEOUT_NEW \
+ _IOW(0x6, 0xf, __s64[2]) /* set parport timeout */
+#if __BITS_PER_LONG == 64
+#define LPSETTIMEOUT LPSETTIMEOUT_OLD
+#else
+#define LPSETTIMEOUT (sizeof(time_t) > sizeof(__kernel_long_t) ? \
+ LPSETTIMEOUT_NEW : LPSETTIMEOUT_OLD)
+#endif
/* timeout for printk'ing a timeout, in jiffies (100ths of a second).
This is also used for re-checking error conditions if LP_ABORT is
diff --git a/include/uapi/linux/media.h b/include/uapi/linux/media.h
index b9b9446095e9..c7e9a5cba24e 100644
--- a/include/uapi/linux/media.h
+++ b/include/uapi/linux/media.h
@@ -15,10 +15,6 @@
* but WITHOUT ANY WARRANTY; without even the implied warranty of
* MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
* GNU General Public License for more details.
- *
- * You should have received a copy of the GNU General Public License
- * along with this program; if not, write to the Free Software
- * Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA
*/
#ifndef __LINUX_MEDIA_H
@@ -42,108 +38,66 @@ struct media_device_info {
__u32 reserved[31];
};
-#define MEDIA_ENT_ID_FLAG_NEXT (1 << 31)
-
-/*
- * Initial value to be used when a new entity is created
- * Drivers should change it to something useful
- */
-#define MEDIA_ENT_F_UNKNOWN 0x00000000
-
/*
* Base number ranges for entity functions
*
- * NOTE: those ranges and entity function number are phased just to
- * make it easier to maintain this file. Userspace should not rely on
- * the ranges to identify a group of function types, as newer
- * functions can be added with any name within the full u32 range.
- */
-#define MEDIA_ENT_F_BASE 0x00000000
-#define MEDIA_ENT_F_OLD_BASE 0x00010000
-#define MEDIA_ENT_F_OLD_SUBDEV_BASE 0x00020000
-
-/*
- * DVB entities
+ * NOTE: Userspace should not rely on these ranges to identify a group
+ * of function types, as newer functions can be added with any name within
+ * the full u32 range.
+ *
+ * Some older functions use the MEDIA_ENT_F_OLD_*_BASE range. Do not
+ * change this, this is for backwards compatibility. When adding new
+ * functions always use MEDIA_ENT_F_BASE.
*/
-#define MEDIA_ENT_F_DTV_DEMOD (MEDIA_ENT_F_BASE + 0x00001)
-#define MEDIA_ENT_F_TS_DEMUX (MEDIA_ENT_F_BASE + 0x00002)
-#define MEDIA_ENT_F_DTV_CA (MEDIA_ENT_F_BASE + 0x00003)
-#define MEDIA_ENT_F_DTV_NET_DECAP (MEDIA_ENT_F_BASE + 0x00004)
+#define MEDIA_ENT_F_BASE 0x00000000
+#define MEDIA_ENT_F_OLD_BASE 0x00010000
+#define MEDIA_ENT_F_OLD_SUBDEV_BASE 0x00020000
/*
- * I/O entities
+ * Initial value to be used when a new entity is created
+ * Drivers should change it to something useful.
*/
-#define MEDIA_ENT_F_IO_DTV (MEDIA_ENT_F_BASE + 0x01001)
-#define MEDIA_ENT_F_IO_VBI (MEDIA_ENT_F_BASE + 0x01002)
-#define MEDIA_ENT_F_IO_SWRADIO (MEDIA_ENT_F_BASE + 0x01003)
+#define MEDIA_ENT_F_UNKNOWN MEDIA_ENT_F_BASE
/*
- * Analog TV IF-PLL decoders
- *
- * It is a responsibility of the master/bridge drivers to create links
- * for MEDIA_ENT_F_IF_VID_DECODER and MEDIA_ENT_F_IF_AUD_DECODER.
+ * Subdevs are initialized with MEDIA_ENT_F_V4L2_SUBDEV_UNKNOWN in order
+ * to preserve backward compatibility. Drivers must change to the proper
+ * subdev type before registering the entity.
*/
-#define MEDIA_ENT_F_IF_VID_DECODER (MEDIA_ENT_F_BASE + 0x02001)
-#define MEDIA_ENT_F_IF_AUD_DECODER (MEDIA_ENT_F_BASE + 0x02002)
+#define MEDIA_ENT_F_V4L2_SUBDEV_UNKNOWN MEDIA_ENT_F_OLD_SUBDEV_BASE
/*
- * Audio Entity Functions
+ * DVB entity functions
*/
-#define MEDIA_ENT_F_AUDIO_CAPTURE (MEDIA_ENT_F_BASE + 0x03001)
-#define MEDIA_ENT_F_AUDIO_PLAYBACK (MEDIA_ENT_F_BASE + 0x03002)
-#define MEDIA_ENT_F_AUDIO_MIXER (MEDIA_ENT_F_BASE + 0x03003)
+#define MEDIA_ENT_F_DTV_DEMOD (MEDIA_ENT_F_BASE + 0x00001)
+#define MEDIA_ENT_F_TS_DEMUX (MEDIA_ENT_F_BASE + 0x00002)
+#define MEDIA_ENT_F_DTV_CA (MEDIA_ENT_F_BASE + 0x00003)
+#define MEDIA_ENT_F_DTV_NET_DECAP (MEDIA_ENT_F_BASE + 0x00004)
/*
- * Processing entities
+ * I/O entity functions
*/
-#define MEDIA_ENT_F_PROC_VIDEO_COMPOSER (MEDIA_ENT_F_BASE + 0x4001)
-#define MEDIA_ENT_F_PROC_VIDEO_PIXEL_FORMATTER (MEDIA_ENT_F_BASE + 0x4002)
-#define MEDIA_ENT_F_PROC_VIDEO_PIXEL_ENC_CONV (MEDIA_ENT_F_BASE + 0x4003)
-#define MEDIA_ENT_F_PROC_VIDEO_LUT (MEDIA_ENT_F_BASE + 0x4004)
-#define MEDIA_ENT_F_PROC_VIDEO_SCALER (MEDIA_ENT_F_BASE + 0x4005)
-#define MEDIA_ENT_F_PROC_VIDEO_STATISTICS (MEDIA_ENT_F_BASE + 0x4006)
+#define MEDIA_ENT_F_IO_V4L (MEDIA_ENT_F_OLD_BASE + 1)
+#define MEDIA_ENT_F_IO_DTV (MEDIA_ENT_F_BASE + 0x01001)
+#define MEDIA_ENT_F_IO_VBI (MEDIA_ENT_F_BASE + 0x01002)
+#define MEDIA_ENT_F_IO_SWRADIO (MEDIA_ENT_F_BASE + 0x01003)
/*
- * Switch and bridge entitites
+ * Sensor functions
*/
-#define MEDIA_ENT_F_VID_MUX (MEDIA_ENT_F_BASE + 0x5001)
-#define MEDIA_ENT_F_VID_IF_BRIDGE (MEDIA_ENT_F_BASE + 0x5002)
+#define MEDIA_ENT_F_CAM_SENSOR (MEDIA_ENT_F_OLD_SUBDEV_BASE + 1)
+#define MEDIA_ENT_F_FLASH (MEDIA_ENT_F_OLD_SUBDEV_BASE + 2)
+#define MEDIA_ENT_F_LENS (MEDIA_ENT_F_OLD_SUBDEV_BASE + 3)
/*
- * Connectors
+ * Video decoder functions
*/
-/* It is a responsibility of the entity drivers to add connectors and links */
-#ifdef __KERNEL__
- /*
- * For now, it should not be used in userspace, as some
- * definitions may change
- */
-
-#define MEDIA_ENT_F_CONN_RF (MEDIA_ENT_F_BASE + 0x30001)
-#define MEDIA_ENT_F_CONN_SVIDEO (MEDIA_ENT_F_BASE + 0x30002)
-#define MEDIA_ENT_F_CONN_COMPOSITE (MEDIA_ENT_F_BASE + 0x30003)
-
-#endif
+#define MEDIA_ENT_F_ATV_DECODER (MEDIA_ENT_F_OLD_SUBDEV_BASE + 4)
+#define MEDIA_ENT_F_DTV_DECODER (MEDIA_ENT_F_BASE + 0x6001)
/*
- * Don't touch on those. The ranges MEDIA_ENT_F_OLD_BASE and
- * MEDIA_ENT_F_OLD_SUBDEV_BASE are kept to keep backward compatibility
- * with the legacy v1 API.The number range is out of range by purpose:
- * several previously reserved numbers got excluded from this range.
+ * Digital TV, analog TV, radio and/or software defined radio tuner functions.
*
- * Subdevs are initialized with MEDIA_ENT_T_V4L2_SUBDEV_UNKNOWN,
- * in order to preserve backward compatibility.
- * Drivers must change to the proper subdev type before
- * registering the entity.
- */
-
-#define MEDIA_ENT_F_IO_V4L (MEDIA_ENT_F_OLD_BASE + 1)
-
-#define MEDIA_ENT_F_CAM_SENSOR (MEDIA_ENT_F_OLD_SUBDEV_BASE + 1)
-#define MEDIA_ENT_F_FLASH (MEDIA_ENT_F_OLD_SUBDEV_BASE + 2)
-#define MEDIA_ENT_F_LENS (MEDIA_ENT_F_OLD_SUBDEV_BASE + 3)
-#define MEDIA_ENT_F_ATV_DECODER (MEDIA_ENT_F_OLD_SUBDEV_BASE + 4)
-/*
* It is a responsibility of the master/bridge drivers to add connectors
* and links for MEDIA_ENT_F_TUNER. Please notice that some old tuners
* may require the usage of separate I2C chips to decode analog TV signals,
@@ -151,49 +105,46 @@ struct media_device_info {
* On such cases, the IF-PLL staging is mapped via one or two entities:
* MEDIA_ENT_F_IF_VID_DECODER and/or MEDIA_ENT_F_IF_AUD_DECODER.
*/
-#define MEDIA_ENT_F_TUNER (MEDIA_ENT_F_OLD_SUBDEV_BASE + 5)
+#define MEDIA_ENT_F_TUNER (MEDIA_ENT_F_OLD_SUBDEV_BASE + 5)
-#define MEDIA_ENT_F_V4L2_SUBDEV_UNKNOWN MEDIA_ENT_F_OLD_SUBDEV_BASE
+/*
+ * Analog TV IF-PLL decoder functions
+ *
+ * It is a responsibility of the master/bridge drivers to create links
+ * for MEDIA_ENT_F_IF_VID_DECODER and MEDIA_ENT_F_IF_AUD_DECODER.
+ */
+#define MEDIA_ENT_F_IF_VID_DECODER (MEDIA_ENT_F_BASE + 0x02001)
+#define MEDIA_ENT_F_IF_AUD_DECODER (MEDIA_ENT_F_BASE + 0x02002)
-#if !defined(__KERNEL__) || defined(__NEED_MEDIA_LEGACY_API)
+/*
+ * Audio entity functions
+ */
+#define MEDIA_ENT_F_AUDIO_CAPTURE (MEDIA_ENT_F_BASE + 0x03001)
+#define MEDIA_ENT_F_AUDIO_PLAYBACK (MEDIA_ENT_F_BASE + 0x03002)
+#define MEDIA_ENT_F_AUDIO_MIXER (MEDIA_ENT_F_BASE + 0x03003)
/*
- * Legacy symbols used to avoid userspace compilation breakages
- *
- * Those symbols map the entity function into types and should be
- * used only on legacy programs for legacy hardware. Don't rely
- * on those for MEDIA_IOC_G_TOPOLOGY.
+ * Processing entity functions
*/
-#define MEDIA_ENT_TYPE_SHIFT 16
-#define MEDIA_ENT_TYPE_MASK 0x00ff0000
-#define MEDIA_ENT_SUBTYPE_MASK 0x0000ffff
-
-/* End of the old subdev reserved numberspace */
-#define MEDIA_ENT_T_DEVNODE_UNKNOWN (MEDIA_ENT_T_DEVNODE | \
- MEDIA_ENT_SUBTYPE_MASK)
-
-#define MEDIA_ENT_T_DEVNODE MEDIA_ENT_F_OLD_BASE
-#define MEDIA_ENT_T_DEVNODE_V4L MEDIA_ENT_F_IO_V4L
-#define MEDIA_ENT_T_DEVNODE_FB (MEDIA_ENT_T_DEVNODE + 2)
-#define MEDIA_ENT_T_DEVNODE_ALSA (MEDIA_ENT_T_DEVNODE + 3)
-#define MEDIA_ENT_T_DEVNODE_DVB (MEDIA_ENT_T_DEVNODE + 4)
-
-#define MEDIA_ENT_T_UNKNOWN MEDIA_ENT_F_UNKNOWN
-#define MEDIA_ENT_T_V4L2_VIDEO MEDIA_ENT_F_IO_V4L
-#define MEDIA_ENT_T_V4L2_SUBDEV MEDIA_ENT_F_V4L2_SUBDEV_UNKNOWN
-#define MEDIA_ENT_T_V4L2_SUBDEV_SENSOR MEDIA_ENT_F_CAM_SENSOR
-#define MEDIA_ENT_T_V4L2_SUBDEV_FLASH MEDIA_ENT_F_FLASH
-#define MEDIA_ENT_T_V4L2_SUBDEV_LENS MEDIA_ENT_F_LENS
-#define MEDIA_ENT_T_V4L2_SUBDEV_DECODER MEDIA_ENT_F_ATV_DECODER
-#define MEDIA_ENT_T_V4L2_SUBDEV_TUNER MEDIA_ENT_F_TUNER
+#define MEDIA_ENT_F_PROC_VIDEO_COMPOSER (MEDIA_ENT_F_BASE + 0x4001)
+#define MEDIA_ENT_F_PROC_VIDEO_PIXEL_FORMATTER (MEDIA_ENT_F_BASE + 0x4002)
+#define MEDIA_ENT_F_PROC_VIDEO_PIXEL_ENC_CONV (MEDIA_ENT_F_BASE + 0x4003)
+#define MEDIA_ENT_F_PROC_VIDEO_LUT (MEDIA_ENT_F_BASE + 0x4004)
+#define MEDIA_ENT_F_PROC_VIDEO_SCALER (MEDIA_ENT_F_BASE + 0x4005)
+#define MEDIA_ENT_F_PROC_VIDEO_STATISTICS (MEDIA_ENT_F_BASE + 0x4006)
-/* Obsolete symbol for media_version, no longer used in the kernel */
-#define MEDIA_API_VERSION KERNEL_VERSION(0, 1, 0)
-#endif
+/*
+ * Switch and bridge entity functions
+ */
+#define MEDIA_ENT_F_VID_MUX (MEDIA_ENT_F_BASE + 0x5001)
+#define MEDIA_ENT_F_VID_IF_BRIDGE (MEDIA_ENT_F_BASE + 0x5002)
/* Entity flags */
-#define MEDIA_ENT_FL_DEFAULT (1 << 0)
-#define MEDIA_ENT_FL_CONNECTOR (1 << 1)
+#define MEDIA_ENT_FL_DEFAULT (1 << 0)
+#define MEDIA_ENT_FL_CONNECTOR (1 << 1)
+
+/* OR with the entity id value to find the next entity */
+#define MEDIA_ENT_ID_FLAG_NEXT (1 << 31)
struct media_entity_desc {
__u32 id;
@@ -214,7 +165,7 @@ struct media_entity_desc {
__u32 minor;
} dev;
-#if 1
+#if !defined(__KERNEL__)
/*
* TODO: this shouldn't have been added without
* actual drivers that use this. When the first real driver
@@ -225,24 +176,17 @@ struct media_entity_desc {
* contain the subdevice information. In addition, struct dev
* can only refer to a single device, and not to multiple (e.g.
* pcm and mixer devices).
- *
- * So for now mark this as a to do.
*/
struct {
__u32 card;
__u32 device;
__u32 subdevice;
} alsa;
-#endif
-#if 1
/*
* DEPRECATED: previous node specifications. Kept just to
- * avoid breaking compilation, but media_entity_desc.dev
- * should be used instead. In particular, alsa and dvb
- * fields below are wrong: for all devnodes, there should
- * be just major/minor inside the struct, as this is enough
- * to represent any devnode, no matter what type.
+ * avoid breaking compilation. Use media_entity_desc.dev
+ * instead.
*/
struct {
__u32 major;
@@ -261,9 +205,9 @@ struct media_entity_desc {
};
};
-#define MEDIA_PAD_FL_SINK (1 << 0)
-#define MEDIA_PAD_FL_SOURCE (1 << 1)
-#define MEDIA_PAD_FL_MUST_CONNECT (1 << 2)
+#define MEDIA_PAD_FL_SINK (1 << 0)
+#define MEDIA_PAD_FL_SOURCE (1 << 1)
+#define MEDIA_PAD_FL_MUST_CONNECT (1 << 2)
struct media_pad_desc {
__u32 entity; /* entity ID */
@@ -272,13 +216,13 @@ struct media_pad_desc {
__u32 reserved[2];
};
-#define MEDIA_LNK_FL_ENABLED (1 << 0)
-#define MEDIA_LNK_FL_IMMUTABLE (1 << 1)
-#define MEDIA_LNK_FL_DYNAMIC (1 << 2)
+#define MEDIA_LNK_FL_ENABLED (1 << 0)
+#define MEDIA_LNK_FL_IMMUTABLE (1 << 1)
+#define MEDIA_LNK_FL_DYNAMIC (1 << 2)
-#define MEDIA_LNK_FL_LINK_TYPE (0xf << 28)
-# define MEDIA_LNK_FL_DATA_LINK (0 << 28)
-# define MEDIA_LNK_FL_INTERFACE_LINK (1 << 28)
+#define MEDIA_LNK_FL_LINK_TYPE (0xf << 28)
+# define MEDIA_LNK_FL_DATA_LINK (0 << 28)
+# define MEDIA_LNK_FL_INTERFACE_LINK (1 << 28)
struct media_link_desc {
struct media_pad_desc source;
@@ -298,57 +242,47 @@ struct media_links_enum {
/* Interface type ranges */
-#define MEDIA_INTF_T_DVB_BASE 0x00000100
-#define MEDIA_INTF_T_V4L_BASE 0x00000200
-#define MEDIA_INTF_T_ALSA_BASE 0x00000300
+#define MEDIA_INTF_T_DVB_BASE 0x00000100
+#define MEDIA_INTF_T_V4L_BASE 0x00000200
/* Interface types */
-#define MEDIA_INTF_T_DVB_FE (MEDIA_INTF_T_DVB_BASE)
-#define MEDIA_INTF_T_DVB_DEMUX (MEDIA_INTF_T_DVB_BASE + 1)
-#define MEDIA_INTF_T_DVB_DVR (MEDIA_INTF_T_DVB_BASE + 2)
-#define MEDIA_INTF_T_DVB_CA (MEDIA_INTF_T_DVB_BASE + 3)
-#define MEDIA_INTF_T_DVB_NET (MEDIA_INTF_T_DVB_BASE + 4)
-
-#define MEDIA_INTF_T_V4L_VIDEO (MEDIA_INTF_T_V4L_BASE)
-#define MEDIA_INTF_T_V4L_VBI (MEDIA_INTF_T_V4L_BASE + 1)
-#define MEDIA_INTF_T_V4L_RADIO (MEDIA_INTF_T_V4L_BASE + 2)
-#define MEDIA_INTF_T_V4L_SUBDEV (MEDIA_INTF_T_V4L_BASE + 3)
-#define MEDIA_INTF_T_V4L_SWRADIO (MEDIA_INTF_T_V4L_BASE + 4)
-#define MEDIA_INTF_T_V4L_TOUCH (MEDIA_INTF_T_V4L_BASE + 5)
-
-#define MEDIA_INTF_T_ALSA_PCM_CAPTURE (MEDIA_INTF_T_ALSA_BASE)
-#define MEDIA_INTF_T_ALSA_PCM_PLAYBACK (MEDIA_INTF_T_ALSA_BASE + 1)
-#define MEDIA_INTF_T_ALSA_CONTROL (MEDIA_INTF_T_ALSA_BASE + 2)
-#define MEDIA_INTF_T_ALSA_COMPRESS (MEDIA_INTF_T_ALSA_BASE + 3)
-#define MEDIA_INTF_T_ALSA_RAWMIDI (MEDIA_INTF_T_ALSA_BASE + 4)
-#define MEDIA_INTF_T_ALSA_HWDEP (MEDIA_INTF_T_ALSA_BASE + 5)
-#define MEDIA_INTF_T_ALSA_SEQUENCER (MEDIA_INTF_T_ALSA_BASE + 6)
-#define MEDIA_INTF_T_ALSA_TIMER (MEDIA_INTF_T_ALSA_BASE + 7)
+#define MEDIA_INTF_T_DVB_FE (MEDIA_INTF_T_DVB_BASE)
+#define MEDIA_INTF_T_DVB_DEMUX (MEDIA_INTF_T_DVB_BASE + 1)
+#define MEDIA_INTF_T_DVB_DVR (MEDIA_INTF_T_DVB_BASE + 2)
+#define MEDIA_INTF_T_DVB_CA (MEDIA_INTF_T_DVB_BASE + 3)
+#define MEDIA_INTF_T_DVB_NET (MEDIA_INTF_T_DVB_BASE + 4)
+
+#define MEDIA_INTF_T_V4L_VIDEO (MEDIA_INTF_T_V4L_BASE)
+#define MEDIA_INTF_T_V4L_VBI (MEDIA_INTF_T_V4L_BASE + 1)
+#define MEDIA_INTF_T_V4L_RADIO (MEDIA_INTF_T_V4L_BASE + 2)
+#define MEDIA_INTF_T_V4L_SUBDEV (MEDIA_INTF_T_V4L_BASE + 3)
+#define MEDIA_INTF_T_V4L_SWRADIO (MEDIA_INTF_T_V4L_BASE + 4)
+#define MEDIA_INTF_T_V4L_TOUCH (MEDIA_INTF_T_V4L_BASE + 5)
+
+#if defined(__KERNEL__)
/*
- * MC next gen API definitions
+ * Connector functions
*
- * NOTE: The declarations below are close to the MC RFC for the Media
- * Controller, the next generation. Yet, there are a few adjustments
- * to do, as we want to be able to have a functional API before
- * the MC properties change. Those will be properly marked below.
- * Please also notice that I removed "num_pads", "num_links",
- * from the proposal, as a proper userspace application will likely
- * use lists for pads/links, just as we intend to do in Kernelspace.
- * The API definition should be freed from fields that are bound to
- * some specific data structure.
+ * For now these should not be used in userspace, as some definitions may
+ * change.
*
- * FIXME: Currently, I opted to name the new types as "media_v2", as this
- * won't cause any conflict with the Kernelspace namespace, nor with
- * the previous kAPI media_*_desc namespace. This can be changed
- * later, before the adding this API upstream.
+ * It is the responsibility of the entity drivers to add connectors and links.
*/
+#define MEDIA_ENT_F_CONN_RF (MEDIA_ENT_F_BASE + 0x30001)
+#define MEDIA_ENT_F_CONN_SVIDEO (MEDIA_ENT_F_BASE + 0x30002)
+#define MEDIA_ENT_F_CONN_COMPOSITE (MEDIA_ENT_F_BASE + 0x30003)
+#endif
+
+/*
+ * MC next gen API definitions
+ */
struct media_v2_entity {
__u32 id;
- char name[64]; /* FIXME: move to a property? (RFC says so) */
+ char name[64];
__u32 function; /* Main function of the entity */
__u32 reserved[6];
} __attribute__ ((packed));
@@ -408,10 +342,62 @@ struct media_v2_topology {
/* ioctls */
-#define MEDIA_IOC_DEVICE_INFO _IOWR('|', 0x00, struct media_device_info)
-#define MEDIA_IOC_ENUM_ENTITIES _IOWR('|', 0x01, struct media_entity_desc)
-#define MEDIA_IOC_ENUM_LINKS _IOWR('|', 0x02, struct media_links_enum)
-#define MEDIA_IOC_SETUP_LINK _IOWR('|', 0x03, struct media_link_desc)
-#define MEDIA_IOC_G_TOPOLOGY _IOWR('|', 0x04, struct media_v2_topology)
+#define MEDIA_IOC_DEVICE_INFO _IOWR('|', 0x00, struct media_device_info)
+#define MEDIA_IOC_ENUM_ENTITIES _IOWR('|', 0x01, struct media_entity_desc)
+#define MEDIA_IOC_ENUM_LINKS _IOWR('|', 0x02, struct media_links_enum)
+#define MEDIA_IOC_SETUP_LINK _IOWR('|', 0x03, struct media_link_desc)
+#define MEDIA_IOC_G_TOPOLOGY _IOWR('|', 0x04, struct media_v2_topology)
+
+#if !defined(__KERNEL__) || defined(__NEED_MEDIA_LEGACY_API)
+
+/*
+ * Legacy symbols used to avoid userspace compilation breakages.
+ * Do not use any of this in new applications!
+ *
+ * Those symbols map the entity function into types and should be
+ * used only on legacy programs for legacy hardware. Don't rely
+ * on those for MEDIA_IOC_G_TOPOLOGY.
+ */
+#define MEDIA_ENT_TYPE_SHIFT 16
+#define MEDIA_ENT_TYPE_MASK 0x00ff0000
+#define MEDIA_ENT_SUBTYPE_MASK 0x0000ffff
+
+#define MEDIA_ENT_T_DEVNODE_UNKNOWN (MEDIA_ENT_F_OLD_BASE | \
+ MEDIA_ENT_SUBTYPE_MASK)
+
+#define MEDIA_ENT_T_DEVNODE MEDIA_ENT_F_OLD_BASE
+#define MEDIA_ENT_T_DEVNODE_V4L MEDIA_ENT_F_IO_V4L
+#define MEDIA_ENT_T_DEVNODE_FB (MEDIA_ENT_F_OLD_BASE + 2)
+#define MEDIA_ENT_T_DEVNODE_ALSA (MEDIA_ENT_F_OLD_BASE + 3)
+#define MEDIA_ENT_T_DEVNODE_DVB (MEDIA_ENT_F_OLD_BASE + 4)
+
+#define MEDIA_ENT_T_UNKNOWN MEDIA_ENT_F_UNKNOWN
+#define MEDIA_ENT_T_V4L2_VIDEO MEDIA_ENT_F_IO_V4L
+#define MEDIA_ENT_T_V4L2_SUBDEV MEDIA_ENT_F_V4L2_SUBDEV_UNKNOWN
+#define MEDIA_ENT_T_V4L2_SUBDEV_SENSOR MEDIA_ENT_F_CAM_SENSOR
+#define MEDIA_ENT_T_V4L2_SUBDEV_FLASH MEDIA_ENT_F_FLASH
+#define MEDIA_ENT_T_V4L2_SUBDEV_LENS MEDIA_ENT_F_LENS
+#define MEDIA_ENT_T_V4L2_SUBDEV_DECODER MEDIA_ENT_F_ATV_DECODER
+#define MEDIA_ENT_T_V4L2_SUBDEV_TUNER MEDIA_ENT_F_TUNER
+
+/*
+ * There is still no ALSA support in the media controller. These
+ * defines should not have been added and we leave them here only
+ * in case some application tries to use these defines.
+ */
+#define MEDIA_INTF_T_ALSA_BASE 0x00000300
+#define MEDIA_INTF_T_ALSA_PCM_CAPTURE (MEDIA_INTF_T_ALSA_BASE)
+#define MEDIA_INTF_T_ALSA_PCM_PLAYBACK (MEDIA_INTF_T_ALSA_BASE + 1)
+#define MEDIA_INTF_T_ALSA_CONTROL (MEDIA_INTF_T_ALSA_BASE + 2)
+#define MEDIA_INTF_T_ALSA_COMPRESS (MEDIA_INTF_T_ALSA_BASE + 3)
+#define MEDIA_INTF_T_ALSA_RAWMIDI (MEDIA_INTF_T_ALSA_BASE + 4)
+#define MEDIA_INTF_T_ALSA_HWDEP (MEDIA_INTF_T_ALSA_BASE + 5)
+#define MEDIA_INTF_T_ALSA_SEQUENCER (MEDIA_INTF_T_ALSA_BASE + 6)
+#define MEDIA_INTF_T_ALSA_TIMER (MEDIA_INTF_T_ALSA_BASE + 7)
+
+/* Obsolete symbol for media_version, no longer used in the kernel */
+#define MEDIA_API_VERSION KERNEL_VERSION(0, 1, 0)
+
+#endif
#endif /* __LINUX_MEDIA_H */
diff --git a/include/uapi/linux/membarrier.h b/include/uapi/linux/membarrier.h
index 4e01ad7ffe98..5891d7614c8c 100644
--- a/include/uapi/linux/membarrier.h
+++ b/include/uapi/linux/membarrier.h
@@ -31,7 +31,7 @@
* enum membarrier_cmd - membarrier system call command
* @MEMBARRIER_CMD_QUERY: Query the set of supported commands. It returns
* a bitmask of valid commands.
- * @MEMBARRIER_CMD_SHARED: Execute a memory barrier on all running threads.
+ * @MEMBARRIER_CMD_GLOBAL: Execute a memory barrier on all running threads.
* Upon return from system call, the caller thread
* is ensured that all running threads have passed
* through a state where all memory accesses to
@@ -40,6 +40,28 @@
* (non-running threads are de facto in such a
* state). This covers threads from all processes
* running on the system. This command returns 0.
+ * @MEMBARRIER_CMD_GLOBAL_EXPEDITED:
+ * Execute a memory barrier on all running threads
+ * of all processes which previously registered
+ * with MEMBARRIER_CMD_REGISTER_GLOBAL_EXPEDITED.
+ * Upon return from system call, the caller thread
+ * is ensured that all running threads have passed
+ * through a state where all memory accesses to
+ * user-space addresses match program order between
+ * entry to and return from the system call
+ * (non-running threads are de facto in such a
+ * state). This only covers threads from processes
+ * which registered with
+ * MEMBARRIER_CMD_REGISTER_GLOBAL_EXPEDITED.
+ * This command returns 0. Given that
+ * registration is about the intent to receive
+ * the barriers, it is valid to invoke
+ * MEMBARRIER_CMD_GLOBAL_EXPEDITED from a
+ * non-registered process.
+ * @MEMBARRIER_CMD_REGISTER_GLOBAL_EXPEDITED:
+ * Register the process intent to receive
+ * MEMBARRIER_CMD_GLOBAL_EXPEDITED memory
+ * barriers. Always returns 0.
* @MEMBARRIER_CMD_PRIVATE_EXPEDITED:
* Execute a memory barrier on each running
* thread belonging to the same process as the current
@@ -51,7 +73,7 @@
* to and return from the system call
* (non-running threads are de facto in such a
* state). This only covers threads from the
- * same processes as the caller thread. This
+ * same process as the caller thread. This
* command returns 0 on success. The
* "expedited" commands complete faster than
* the non-expedited ones, they never block,
@@ -64,18 +86,54 @@
* Register the process intent to use
* MEMBARRIER_CMD_PRIVATE_EXPEDITED. Always
* returns 0.
+ * @MEMBARRIER_CMD_PRIVATE_EXPEDITED_SYNC_CORE:
+ * In addition to provide memory ordering
+ * guarantees described in
+ * MEMBARRIER_CMD_PRIVATE_EXPEDITED, ensure
+ * the caller thread, upon return from system
+ * call, that all its running threads siblings
+ * have executed a core serializing
+ * instruction. (architectures are required to
+ * guarantee that non-running threads issue
+ * core serializing instructions before they
+ * resume user-space execution). This only
+ * covers threads from the same process as the
+ * caller thread. This command returns 0 on
+ * success. The "expedited" commands complete
+ * faster than the non-expedited ones, they
+ * never block, but have the downside of
+ * causing extra overhead. If this command is
+ * not implemented by an architecture, -EINVAL
+ * is returned. A process needs to register its
+ * intent to use the private expedited sync
+ * core command prior to using it, otherwise
+ * this command returns -EPERM.
+ * @MEMBARRIER_CMD_REGISTER_PRIVATE_EXPEDITED_SYNC_CORE:
+ * Register the process intent to use
+ * MEMBARRIER_CMD_PRIVATE_EXPEDITED_SYNC_CORE.
+ * If this command is not implemented by an
+ * architecture, -EINVAL is returned.
+ * Returns 0 on success.
+ * @MEMBARRIER_CMD_SHARED:
+ * Alias to MEMBARRIER_CMD_GLOBAL. Provided for
+ * header backward compatibility.
*
* Command to be passed to the membarrier system call. The commands need to
* be a single bit each, except for MEMBARRIER_CMD_QUERY which is assigned to
* the value 0.
*/
enum membarrier_cmd {
- MEMBARRIER_CMD_QUERY = 0,
- MEMBARRIER_CMD_SHARED = (1 << 0),
- /* reserved for MEMBARRIER_CMD_SHARED_EXPEDITED (1 << 1) */
- /* reserved for MEMBARRIER_CMD_PRIVATE (1 << 2) */
- MEMBARRIER_CMD_PRIVATE_EXPEDITED = (1 << 3),
- MEMBARRIER_CMD_REGISTER_PRIVATE_EXPEDITED = (1 << 4),
+ MEMBARRIER_CMD_QUERY = 0,
+ MEMBARRIER_CMD_GLOBAL = (1 << 0),
+ MEMBARRIER_CMD_GLOBAL_EXPEDITED = (1 << 1),
+ MEMBARRIER_CMD_REGISTER_GLOBAL_EXPEDITED = (1 << 2),
+ MEMBARRIER_CMD_PRIVATE_EXPEDITED = (1 << 3),
+ MEMBARRIER_CMD_REGISTER_PRIVATE_EXPEDITED = (1 << 4),
+ MEMBARRIER_CMD_PRIVATE_EXPEDITED_SYNC_CORE = (1 << 5),
+ MEMBARRIER_CMD_REGISTER_PRIVATE_EXPEDITED_SYNC_CORE = (1 << 6),
+
+ /* Alias for header backward compatibility. */
+ MEMBARRIER_CMD_SHARED = MEMBARRIER_CMD_GLOBAL,
};
#endif /* _UAPI_LINUX_MEMBARRIER_H */
diff --git a/include/uapi/linux/msdos_fs.h b/include/uapi/linux/msdos_fs.h
index a45d0754102e..fde753735aba 100644
--- a/include/uapi/linux/msdos_fs.h
+++ b/include/uapi/linux/msdos_fs.h
@@ -10,7 +10,9 @@
* The MS-DOS filesystem constants/structures
*/
+#ifndef SECTOR_SIZE
#define SECTOR_SIZE 512 /* sector size (bytes) */
+#endif
#define SECTOR_BITS 9 /* log2(SECTOR_SIZE) */
#define MSDOS_DPB (MSDOS_DPS) /* dir entries per block */
#define MSDOS_DPB_BITS 4 /* log2(MSDOS_DPB) */
diff --git a/include/uapi/linux/msg.h b/include/uapi/linux/msg.h
index 5d5ab81dc9be..e4a0d9a9a9e8 100644
--- a/include/uapi/linux/msg.h
+++ b/include/uapi/linux/msg.h
@@ -7,6 +7,7 @@
/* ipcs ctl commands */
#define MSG_STAT 11
#define MSG_INFO 12
+#define MSG_STAT_ANY 13
/* msgrcv options */
#define MSG_NOERROR 010000 /* no error if message is too big */
diff --git a/include/uapi/linux/ncsi.h b/include/uapi/linux/ncsi.h
new file mode 100644
index 000000000000..4c292ecbb748
--- /dev/null
+++ b/include/uapi/linux/ncsi.h
@@ -0,0 +1,115 @@
+/*
+ * Copyright Samuel Mendoza-Jonas, IBM Corporation 2018.
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License as published by
+ * the Free Software Foundation; either version 2 of the License, or
+ * (at your option) any later version.
+ */
+
+#ifndef __UAPI_NCSI_NETLINK_H__
+#define __UAPI_NCSI_NETLINK_H__
+
+/**
+ * enum ncsi_nl_commands - supported NCSI commands
+ *
+ * @NCSI_CMD_UNSPEC: unspecified command to catch errors
+ * @NCSI_CMD_PKG_INFO: list package and channel attributes. Requires
+ * NCSI_ATTR_IFINDEX. If NCSI_ATTR_PACKAGE_ID is specified returns the
+ * specific package and its channels - otherwise a dump request returns
+ * all packages and their associated channels.
+ * @NCSI_CMD_SET_INTERFACE: set preferred package and channel combination.
+ * Requires NCSI_ATTR_IFINDEX and the preferred NCSI_ATTR_PACKAGE_ID and
+ * optionally the preferred NCSI_ATTR_CHANNEL_ID.
+ * @NCSI_CMD_CLEAR_INTERFACE: clear any preferred package/channel combination.
+ * Requires NCSI_ATTR_IFINDEX.
+ * @NCSI_CMD_MAX: highest command number
+ */
+enum ncsi_nl_commands {
+ NCSI_CMD_UNSPEC,
+ NCSI_CMD_PKG_INFO,
+ NCSI_CMD_SET_INTERFACE,
+ NCSI_CMD_CLEAR_INTERFACE,
+
+ __NCSI_CMD_AFTER_LAST,
+ NCSI_CMD_MAX = __NCSI_CMD_AFTER_LAST - 1
+};
+
+/**
+ * enum ncsi_nl_attrs - General NCSI netlink attributes
+ *
+ * @NCSI_ATTR_UNSPEC: unspecified attributes to catch errors
+ * @NCSI_ATTR_IFINDEX: ifindex of network device using NCSI
+ * @NCSI_ATTR_PACKAGE_LIST: nested array of NCSI_PKG_ATTR attributes
+ * @NCSI_ATTR_PACKAGE_ID: package ID
+ * @NCSI_ATTR_CHANNEL_ID: channel ID
+ * @NCSI_ATTR_MAX: highest attribute number
+ */
+enum ncsi_nl_attrs {
+ NCSI_ATTR_UNSPEC,
+ NCSI_ATTR_IFINDEX,
+ NCSI_ATTR_PACKAGE_LIST,
+ NCSI_ATTR_PACKAGE_ID,
+ NCSI_ATTR_CHANNEL_ID,
+
+ __NCSI_ATTR_AFTER_LAST,
+ NCSI_ATTR_MAX = __NCSI_ATTR_AFTER_LAST - 1
+};
+
+/**
+ * enum ncsi_nl_pkg_attrs - NCSI netlink package-specific attributes
+ *
+ * @NCSI_PKG_ATTR_UNSPEC: unspecified attributes to catch errors
+ * @NCSI_PKG_ATTR: nested array of package attributes
+ * @NCSI_PKG_ATTR_ID: package ID
+ * @NCSI_PKG_ATTR_FORCED: flag signifying a package has been set as preferred
+ * @NCSI_PKG_ATTR_CHANNEL_LIST: nested array of NCSI_CHANNEL_ATTR attributes
+ * @NCSI_PKG_ATTR_MAX: highest attribute number
+ */
+enum ncsi_nl_pkg_attrs {
+ NCSI_PKG_ATTR_UNSPEC,
+ NCSI_PKG_ATTR,
+ NCSI_PKG_ATTR_ID,
+ NCSI_PKG_ATTR_FORCED,
+ NCSI_PKG_ATTR_CHANNEL_LIST,
+
+ __NCSI_PKG_ATTR_AFTER_LAST,
+ NCSI_PKG_ATTR_MAX = __NCSI_PKG_ATTR_AFTER_LAST - 1
+};
+
+/**
+ * enum ncsi_nl_channel_attrs - NCSI netlink channel-specific attributes
+ *
+ * @NCSI_CHANNEL_ATTR_UNSPEC: unspecified attributes to catch errors
+ * @NCSI_CHANNEL_ATTR: nested array of channel attributes
+ * @NCSI_CHANNEL_ATTR_ID: channel ID
+ * @NCSI_CHANNEL_ATTR_VERSION_MAJOR: channel major version number
+ * @NCSI_CHANNEL_ATTR_VERSION_MINOR: channel minor version number
+ * @NCSI_CHANNEL_ATTR_VERSION_STR: channel version string
+ * @NCSI_CHANNEL_ATTR_LINK_STATE: channel link state flags
+ * @NCSI_CHANNEL_ATTR_ACTIVE: channels with this flag are in
+ * NCSI_CHANNEL_ACTIVE state
+ * @NCSI_CHANNEL_ATTR_FORCED: flag signifying a channel has been set as
+ * preferred
+ * @NCSI_CHANNEL_ATTR_VLAN_LIST: nested array of NCSI_CHANNEL_ATTR_VLAN_IDs
+ * @NCSI_CHANNEL_ATTR_VLAN_ID: VLAN ID being filtered on this channel
+ * @NCSI_CHANNEL_ATTR_MAX: highest attribute number
+ */
+enum ncsi_nl_channel_attrs {
+ NCSI_CHANNEL_ATTR_UNSPEC,
+ NCSI_CHANNEL_ATTR,
+ NCSI_CHANNEL_ATTR_ID,
+ NCSI_CHANNEL_ATTR_VERSION_MAJOR,
+ NCSI_CHANNEL_ATTR_VERSION_MINOR,
+ NCSI_CHANNEL_ATTR_VERSION_STR,
+ NCSI_CHANNEL_ATTR_LINK_STATE,
+ NCSI_CHANNEL_ATTR_ACTIVE,
+ NCSI_CHANNEL_ATTR_FORCED,
+ NCSI_CHANNEL_ATTR_VLAN_LIST,
+ NCSI_CHANNEL_ATTR_VLAN_ID,
+
+ __NCSI_CHANNEL_ATTR_AFTER_LAST,
+ NCSI_CHANNEL_ATTR_MAX = __NCSI_CHANNEL_ATTR_AFTER_LAST - 1
+};
+
+#endif /* __UAPI_NCSI_NETLINK_H__ */
diff --git a/include/uapi/linux/ndctl.h b/include/uapi/linux/ndctl.h
index 3f03567631cb..7e27070b9440 100644
--- a/include/uapi/linux/ndctl.h
+++ b/include/uapi/linux/ndctl.h
@@ -15,54 +15,6 @@
#include <linux/types.h>
-struct nd_cmd_smart {
- __u32 status;
- __u8 data[128];
-} __packed;
-
-#define ND_SMART_HEALTH_VALID (1 << 0)
-#define ND_SMART_SPARES_VALID (1 << 1)
-#define ND_SMART_USED_VALID (1 << 2)
-#define ND_SMART_TEMP_VALID (1 << 3)
-#define ND_SMART_CTEMP_VALID (1 << 4)
-#define ND_SMART_ALARM_VALID (1 << 9)
-#define ND_SMART_SHUTDOWN_VALID (1 << 10)
-#define ND_SMART_VENDOR_VALID (1 << 11)
-#define ND_SMART_SPARE_TRIP (1 << 0)
-#define ND_SMART_TEMP_TRIP (1 << 1)
-#define ND_SMART_CTEMP_TRIP (1 << 2)
-#define ND_SMART_NON_CRITICAL_HEALTH (1 << 0)
-#define ND_SMART_CRITICAL_HEALTH (1 << 1)
-#define ND_SMART_FATAL_HEALTH (1 << 2)
-
-struct nd_smart_payload {
- __u32 flags;
- __u8 reserved0[4];
- __u8 health;
- __u8 spares;
- __u8 life_used;
- __u8 alarm_flags;
- __u16 temperature;
- __u16 ctrl_temperature;
- __u8 reserved1[15];
- __u8 shutdown_state;
- __u32 vendor_size;
- __u8 vendor_data[92];
-} __packed;
-
-struct nd_cmd_smart_threshold {
- __u32 status;
- __u8 data[8];
-} __packed;
-
-struct nd_smart_threshold_payload {
- __u8 alarm_control;
- __u8 reserved0;
- __u16 temperature;
- __u8 spares;
- __u8 reserved[3];
-} __packed;
-
struct nd_cmd_dimm_flags {
__u32 status;
__u32 flags;
@@ -211,12 +163,6 @@ static inline const char *nvdimm_cmd_name(unsigned cmd)
#define ND_IOCTL 'N'
-#define ND_IOCTL_SMART _IOWR(ND_IOCTL, ND_CMD_SMART,\
- struct nd_cmd_smart)
-
-#define ND_IOCTL_SMART_THRESHOLD _IOWR(ND_IOCTL, ND_CMD_SMART_THRESHOLD,\
- struct nd_cmd_smart_threshold)
-
#define ND_IOCTL_DIMM_FLAGS _IOWR(ND_IOCTL, ND_CMD_DIMM_FLAGS,\
struct nd_cmd_dimm_flags)
@@ -263,7 +209,7 @@ enum nd_driver_flags {
};
enum {
- ND_MIN_NAMESPACE_SIZE = 0x00400000,
+ ND_MIN_NAMESPACE_SIZE = PAGE_SIZE,
};
enum ars_masks {
diff --git a/include/uapi/linux/netfilter/nf_conntrack_common.h b/include/uapi/linux/netfilter/nf_conntrack_common.h
index 57ccfb32e87f..c712eb6879f1 100644
--- a/include/uapi/linux/netfilter/nf_conntrack_common.h
+++ b/include/uapi/linux/netfilter/nf_conntrack_common.h
@@ -101,12 +101,16 @@ enum ip_conntrack_status {
IPS_HELPER_BIT = 13,
IPS_HELPER = (1 << IPS_HELPER_BIT),
+ /* Conntrack has been offloaded to flow table. */
+ IPS_OFFLOAD_BIT = 14,
+ IPS_OFFLOAD = (1 << IPS_OFFLOAD_BIT),
+
/* Be careful here, modifying these bits can make things messy,
* so don't let users modify them directly.
*/
IPS_UNCHANGEABLE_MASK = (IPS_NAT_DONE_MASK | IPS_NAT_MASK |
IPS_EXPECTED | IPS_CONFIRMED | IPS_DYING |
- IPS_SEQ_ADJUST | IPS_TEMPLATE),
+ IPS_SEQ_ADJUST | IPS_TEMPLATE | IPS_OFFLOAD),
__IPS_MAX_BIT = 14,
};
@@ -125,6 +129,7 @@ enum ip_conntrack_events {
IPCT_NATSEQADJ = IPCT_SEQADJ,
IPCT_SECMARK, /* new security mark has been set */
IPCT_LABEL, /* new connlabel has been set */
+ IPCT_SYNPROXY, /* synproxy has been set */
#ifdef __KERNEL__
__IPCT_MAX
#endif
diff --git a/include/uapi/linux/netfilter/nf_tables.h b/include/uapi/linux/netfilter/nf_tables.h
index a3ee277b17a1..6a3d653d5b27 100644
--- a/include/uapi/linux/netfilter/nf_tables.h
+++ b/include/uapi/linux/netfilter/nf_tables.h
@@ -92,6 +92,9 @@ enum nft_verdicts {
* @NFT_MSG_GETOBJ: get a stateful object (enum nft_obj_attributes)
* @NFT_MSG_DELOBJ: delete a stateful object (enum nft_obj_attributes)
* @NFT_MSG_GETOBJ_RESET: get and reset a stateful object (enum nft_obj_attributes)
+ * @NFT_MSG_NEWFLOWTABLE: add new flow table (enum nft_flowtable_attributes)
+ * @NFT_MSG_GETFLOWTABLE: get flow table (enum nft_flowtable_attributes)
+ * @NFT_MSG_DELFLOWTABLE: delete flow table (enum nft_flowtable_attributes)
*/
enum nf_tables_msg_types {
NFT_MSG_NEWTABLE,
@@ -116,6 +119,9 @@ enum nf_tables_msg_types {
NFT_MSG_GETOBJ,
NFT_MSG_DELOBJ,
NFT_MSG_GETOBJ_RESET,
+ NFT_MSG_NEWFLOWTABLE,
+ NFT_MSG_GETFLOWTABLE,
+ NFT_MSG_DELFLOWTABLE,
NFT_MSG_MAX,
};
@@ -168,6 +174,8 @@ enum nft_table_attributes {
NFTA_TABLE_NAME,
NFTA_TABLE_FLAGS,
NFTA_TABLE_USE,
+ NFTA_TABLE_HANDLE,
+ NFTA_TABLE_PAD,
__NFTA_TABLE_MAX
};
#define NFTA_TABLE_MAX (__NFTA_TABLE_MAX - 1)
@@ -311,6 +319,7 @@ enum nft_set_desc_attributes {
* @NFTA_SET_GC_INTERVAL: garbage collection interval (NLA_U32)
* @NFTA_SET_USERDATA: user data (NLA_BINARY)
* @NFTA_SET_OBJ_TYPE: stateful object type (NLA_U32: NFT_OBJECT_*)
+ * @NFTA_SET_HANDLE: set handle (NLA_U64)
*/
enum nft_set_attributes {
NFTA_SET_UNSPEC,
@@ -329,6 +338,7 @@ enum nft_set_attributes {
NFTA_SET_USERDATA,
NFTA_SET_PAD,
NFTA_SET_OBJ_TYPE,
+ NFTA_SET_HANDLE,
__NFTA_SET_MAX
};
#define NFTA_SET_MAX (__NFTA_SET_MAX - 1)
@@ -777,6 +787,7 @@ enum nft_exthdr_attributes {
* @NFT_META_OIFGROUP: packet output interface group
* @NFT_META_CGROUP: socket control group (skb->sk->sk_classid)
* @NFT_META_PRANDOM: a 32bit pseudo-random number
+ * @NFT_META_SECPATH: boolean, secpath_exists (!!skb->sp)
*/
enum nft_meta_keys {
NFT_META_LEN,
@@ -804,6 +815,7 @@ enum nft_meta_keys {
NFT_META_OIFGROUP,
NFT_META_CGROUP,
NFT_META_PRANDOM,
+ NFT_META_SECPATH,
};
/**
@@ -897,8 +909,8 @@ enum nft_rt_attributes {
* @NFT_CT_EXPIRATION: relative conntrack expiration time in ms
* @NFT_CT_HELPER: connection tracking helper assigned to conntrack
* @NFT_CT_L3PROTOCOL: conntrack layer 3 protocol
- * @NFT_CT_SRC: conntrack layer 3 protocol source (IPv4/IPv6 address)
- * @NFT_CT_DST: conntrack layer 3 protocol destination (IPv4/IPv6 address)
+ * @NFT_CT_SRC: conntrack layer 3 protocol source (IPv4/IPv6 address, deprecated)
+ * @NFT_CT_DST: conntrack layer 3 protocol destination (IPv4/IPv6 address, deprecated)
* @NFT_CT_PROTOCOL: conntrack layer 4 protocol
* @NFT_CT_PROTO_SRC: conntrack layer 4 protocol source
* @NFT_CT_PROTO_DST: conntrack layer 4 protocol destination
@@ -908,6 +920,10 @@ enum nft_rt_attributes {
* @NFT_CT_AVGPKT: conntrack average bytes per packet
* @NFT_CT_ZONE: conntrack zone
* @NFT_CT_EVENTMASK: ctnetlink events to be generated for this conntrack
+ * @NFT_CT_SRC_IP: conntrack layer 3 protocol source (IPv4 address)
+ * @NFT_CT_DST_IP: conntrack layer 3 protocol destination (IPv4 address)
+ * @NFT_CT_SRC_IP6: conntrack layer 3 protocol source (IPv6 address)
+ * @NFT_CT_DST_IP6: conntrack layer 3 protocol destination (IPv6 address)
*/
enum nft_ct_keys {
NFT_CT_STATE,
@@ -929,6 +945,10 @@ enum nft_ct_keys {
NFT_CT_AVGPKT,
NFT_CT_ZONE,
NFT_CT_EVENTMASK,
+ NFT_CT_SRC_IP,
+ NFT_CT_DST_IP,
+ NFT_CT_SRC_IP6,
+ NFT_CT_DST_IP6,
};
/**
@@ -949,6 +969,17 @@ enum nft_ct_attributes {
};
#define NFTA_CT_MAX (__NFTA_CT_MAX - 1)
+/**
+ * enum nft_flow_attributes - ct offload expression attributes
+ * @NFTA_FLOW_TABLE_NAME: flow table name (NLA_STRING)
+ */
+enum nft_offload_attributes {
+ NFTA_FLOW_UNSPEC,
+ NFTA_FLOW_TABLE_NAME,
+ __NFTA_FLOW_MAX,
+};
+#define NFTA_FLOW_MAX (__NFTA_FLOW_MAX - 1)
+
enum nft_limit_type {
NFT_LIMIT_PKTS,
NFT_LIMIT_PKT_BYTES
@@ -1295,6 +1326,7 @@ enum nft_ct_helper_attributes {
* @NFTA_OBJ_TYPE: stateful object type (NLA_U32)
* @NFTA_OBJ_DATA: stateful object data (NLA_NESTED)
* @NFTA_OBJ_USE: number of references to this expression (NLA_U32)
+ * @NFTA_OBJ_HANDLE: object handle (NLA_U64)
*/
enum nft_object_attributes {
NFTA_OBJ_UNSPEC,
@@ -1303,11 +1335,63 @@ enum nft_object_attributes {
NFTA_OBJ_TYPE,
NFTA_OBJ_DATA,
NFTA_OBJ_USE,
+ NFTA_OBJ_HANDLE,
+ NFTA_OBJ_PAD,
__NFTA_OBJ_MAX
};
#define NFTA_OBJ_MAX (__NFTA_OBJ_MAX - 1)
/**
+ * enum nft_flowtable_attributes - nf_tables flow table netlink attributes
+ *
+ * @NFTA_FLOWTABLE_TABLE: name of the table containing the expression (NLA_STRING)
+ * @NFTA_FLOWTABLE_NAME: name of this flow table (NLA_STRING)
+ * @NFTA_FLOWTABLE_HOOK: netfilter hook configuration(NLA_U32)
+ * @NFTA_FLOWTABLE_USE: number of references to this flow table (NLA_U32)
+ * @NFTA_FLOWTABLE_HANDLE: object handle (NLA_U64)
+ */
+enum nft_flowtable_attributes {
+ NFTA_FLOWTABLE_UNSPEC,
+ NFTA_FLOWTABLE_TABLE,
+ NFTA_FLOWTABLE_NAME,
+ NFTA_FLOWTABLE_HOOK,
+ NFTA_FLOWTABLE_USE,
+ NFTA_FLOWTABLE_HANDLE,
+ NFTA_FLOWTABLE_PAD,
+ __NFTA_FLOWTABLE_MAX
+};
+#define NFTA_FLOWTABLE_MAX (__NFTA_FLOWTABLE_MAX - 1)
+
+/**
+ * enum nft_flowtable_hook_attributes - nf_tables flow table hook netlink attributes
+ *
+ * @NFTA_FLOWTABLE_HOOK_NUM: netfilter hook number (NLA_U32)
+ * @NFTA_FLOWTABLE_HOOK_PRIORITY: netfilter hook priority (NLA_U32)
+ * @NFTA_FLOWTABLE_HOOK_DEVS: input devices this flow table is bound to (NLA_NESTED)
+ */
+enum nft_flowtable_hook_attributes {
+ NFTA_FLOWTABLE_HOOK_UNSPEC,
+ NFTA_FLOWTABLE_HOOK_NUM,
+ NFTA_FLOWTABLE_HOOK_PRIORITY,
+ NFTA_FLOWTABLE_HOOK_DEVS,
+ __NFTA_FLOWTABLE_HOOK_MAX
+};
+#define NFTA_FLOWTABLE_HOOK_MAX (__NFTA_FLOWTABLE_HOOK_MAX - 1)
+
+/**
+ * enum nft_device_attributes - nf_tables device netlink attributes
+ *
+ * @NFTA_DEVICE_NAME: name of this device (NLA_STRING)
+ */
+enum nft_devices_attributes {
+ NFTA_DEVICE_UNSPEC,
+ NFTA_DEVICE_NAME,
+ __NFTA_DEVICE_MAX
+};
+#define NFTA_DEVICE_MAX (__NFTA_DEVICE_MAX - 1)
+
+
+/**
* enum nft_trace_attributes - nf_tables trace netlink attributes
*
* @NFTA_TRACE_TABLE: name of the table (NLA_STRING)
diff --git a/include/uapi/linux/netfilter/nfnetlink_conntrack.h b/include/uapi/linux/netfilter/nfnetlink_conntrack.h
index 7397e022ce6e..77987111cab0 100644
--- a/include/uapi/linux/netfilter/nfnetlink_conntrack.h
+++ b/include/uapi/linux/netfilter/nfnetlink_conntrack.h
@@ -54,6 +54,7 @@ enum ctattr_type {
CTA_MARK_MASK,
CTA_LABELS,
CTA_LABELS_MASK,
+ CTA_SYNPROXY,
__CTA_MAX
};
#define CTA_MAX (__CTA_MAX - 1)
@@ -190,6 +191,15 @@ enum ctattr_natseq {
};
#define CTA_NAT_SEQ_MAX (__CTA_NAT_SEQ_MAX - 1)
+enum ctattr_synproxy {
+ CTA_SYNPROXY_UNSPEC,
+ CTA_SYNPROXY_ISN,
+ CTA_SYNPROXY_ITS,
+ CTA_SYNPROXY_TSOFF,
+ __CTA_SYNPROXY_MAX,
+};
+#define CTA_SYNPROXY_MAX (__CTA_SYNPROXY_MAX - 1)
+
enum ctattr_expect {
CTA_EXPECT_UNSPEC,
CTA_EXPECT_MASTER,
diff --git a/include/uapi/linux/netfilter/xt_connlimit.h b/include/uapi/linux/netfilter/xt_connlimit.h
index 07e5e9d47882..d4d1943dcd11 100644
--- a/include/uapi/linux/netfilter/xt_connlimit.h
+++ b/include/uapi/linux/netfilter/xt_connlimit.h
@@ -27,7 +27,7 @@ struct xt_connlimit_info {
__u32 flags;
/* Used internally by the kernel */
- struct xt_connlimit_data *data __attribute__((aligned(8)));
+ struct nf_conncount_data *data __attribute__((aligned(8)));
};
#endif /* _XT_CONNLIMIT_H */
diff --git a/include/uapi/linux/netfilter/xt_connmark.h b/include/uapi/linux/netfilter/xt_connmark.h
index 408a9654f05c..1aa5c955ee1e 100644
--- a/include/uapi/linux/netfilter/xt_connmark.h
+++ b/include/uapi/linux/netfilter/xt_connmark.h
@@ -19,11 +19,21 @@ enum {
XT_CONNMARK_RESTORE
};
+enum {
+ D_SHIFT_LEFT = 0,
+ D_SHIFT_RIGHT,
+};
+
struct xt_connmark_tginfo1 {
__u32 ctmark, ctmask, nfmask;
__u8 mode;
};
+struct xt_connmark_tginfo2 {
+ __u32 ctmark, ctmask, nfmask;
+ __u8 shift_dir, shift_bits, mode;
+};
+
struct xt_connmark_mtinfo1 {
__u32 mark, mask;
__u8 invert;
diff --git a/include/uapi/linux/netfilter_arp.h b/include/uapi/linux/netfilter_arp.h
index 81b6a4cbcb72..791dfc5ae907 100644
--- a/include/uapi/linux/netfilter_arp.h
+++ b/include/uapi/linux/netfilter_arp.h
@@ -15,6 +15,9 @@
#define NF_ARP_IN 0
#define NF_ARP_OUT 1
#define NF_ARP_FORWARD 2
+
+#ifndef __KERNEL__
#define NF_ARP_NUMHOOKS 3
+#endif
#endif /* __LINUX_ARP_NETFILTER_H */
diff --git a/include/uapi/linux/netfilter_bridge/ebt_ip.h b/include/uapi/linux/netfilter_bridge/ebt_ip.h
index 8e462fb1983f..46d6261370b0 100644
--- a/include/uapi/linux/netfilter_bridge/ebt_ip.h
+++ b/include/uapi/linux/netfilter_bridge/ebt_ip.h
@@ -24,8 +24,10 @@
#define EBT_IP_PROTO 0x08
#define EBT_IP_SPORT 0x10
#define EBT_IP_DPORT 0x20
+#define EBT_IP_ICMP 0x40
+#define EBT_IP_IGMP 0x80
#define EBT_IP_MASK (EBT_IP_SOURCE | EBT_IP_DEST | EBT_IP_TOS | EBT_IP_PROTO |\
- EBT_IP_SPORT | EBT_IP_DPORT )
+ EBT_IP_SPORT | EBT_IP_DPORT | EBT_IP_ICMP | EBT_IP_IGMP)
#define EBT_IP_MATCH "ip"
/* the same values are used for the invflags */
@@ -38,8 +40,15 @@ struct ebt_ip_info {
__u8 protocol;
__u8 bitmask;
__u8 invflags;
- __u16 sport[2];
- __u16 dport[2];
+ union {
+ __u16 sport[2];
+ __u8 icmp_type[2];
+ __u8 igmp_type[2];
+ };
+ union {
+ __u16 dport[2];
+ __u8 icmp_code[2];
+ };
};
#endif
diff --git a/include/uapi/linux/netfilter_bridge/ebtables.h b/include/uapi/linux/netfilter_bridge/ebtables.h
index 9ff57c0a0199..0c7dc8315013 100644
--- a/include/uapi/linux/netfilter_bridge/ebtables.h
+++ b/include/uapi/linux/netfilter_bridge/ebtables.h
@@ -20,6 +20,7 @@
#define EBT_TABLE_MAXNAMELEN 32
#define EBT_CHAIN_MAXNAMELEN EBT_TABLE_MAXNAMELEN
#define EBT_FUNCTION_MAXNAMELEN EBT_TABLE_MAXNAMELEN
+#define EBT_EXTENSION_MAXNAMELEN 31
/* verdicts >0 are "branches" */
#define EBT_ACCEPT -1
@@ -120,7 +121,10 @@ struct ebt_entries {
struct ebt_entry_match {
union {
- char name[EBT_FUNCTION_MAXNAMELEN];
+ struct {
+ char name[EBT_EXTENSION_MAXNAMELEN];
+ uint8_t revision;
+ };
struct xt_match *match;
} u;
/* size of data */
@@ -130,7 +134,10 @@ struct ebt_entry_match {
struct ebt_entry_watcher {
union {
- char name[EBT_FUNCTION_MAXNAMELEN];
+ struct {
+ char name[EBT_EXTENSION_MAXNAMELEN];
+ uint8_t revision;
+ };
struct xt_target *watcher;
} u;
/* size of data */
@@ -140,7 +147,10 @@ struct ebt_entry_watcher {
struct ebt_entry_target {
union {
- char name[EBT_FUNCTION_MAXNAMELEN];
+ struct {
+ char name[EBT_EXTENSION_MAXNAMELEN];
+ uint8_t revision;
+ };
struct xt_target *target;
} u;
/* size of data */
diff --git a/include/uapi/linux/netfilter_decnet.h b/include/uapi/linux/netfilter_decnet.h
index 9089c38f6abe..61f1c7dfd033 100644
--- a/include/uapi/linux/netfilter_decnet.h
+++ b/include/uapi/linux/netfilter_decnet.h
@@ -24,6 +24,9 @@
#define NFC_DN_IF_IN 0x0004
/* Output device. */
#define NFC_DN_IF_OUT 0x0008
+
+/* kernel define is in netfilter_defs.h */
+#define NF_DN_NUMHOOKS 7
#endif /* ! __KERNEL__ */
/* DECnet Hooks */
@@ -41,7 +44,6 @@
#define NF_DN_HELLO 5
/* Input Routing Packets */
#define NF_DN_ROUTE 6
-#define NF_DN_NUMHOOKS 7
enum nf_dn_hook_priorities {
NF_DN_PRI_FIRST = INT_MIN,
diff --git a/include/uapi/linux/netfilter_ipv4.h b/include/uapi/linux/netfilter_ipv4.h
index e6b1a84f5dd3..c3b060775e13 100644
--- a/include/uapi/linux/netfilter_ipv4.h
+++ b/include/uapi/linux/netfilter_ipv4.h
@@ -57,6 +57,7 @@
enum nf_ip_hook_priorities {
NF_IP_PRI_FIRST = INT_MIN,
+ NF_IP_PRI_RAW_BEFORE_DEFRAG = -450,
NF_IP_PRI_CONNTRACK_DEFRAG = -400,
NF_IP_PRI_RAW = -300,
NF_IP_PRI_SELINUX_FIRST = -225,
diff --git a/include/uapi/linux/netfilter_ipv6.h b/include/uapi/linux/netfilter_ipv6.h
index 2f9724611cc2..dc624fd24d25 100644
--- a/include/uapi/linux/netfilter_ipv6.h
+++ b/include/uapi/linux/netfilter_ipv6.h
@@ -62,6 +62,7 @@
enum nf_ip6_hook_priorities {
NF_IP6_PRI_FIRST = INT_MIN,
+ NF_IP6_PRI_RAW_BEFORE_DEFRAG = -450,
NF_IP6_PRI_CONNTRACK_DEFRAG = -400,
NF_IP6_PRI_RAW = -300,
NF_IP6_PRI_SELINUX_FIRST = -225,
diff --git a/include/uapi/linux/netfilter_ipv6/ip6t_srh.h b/include/uapi/linux/netfilter_ipv6/ip6t_srh.h
new file mode 100644
index 000000000000..f3cc0ef514a7
--- /dev/null
+++ b/include/uapi/linux/netfilter_ipv6/ip6t_srh.h
@@ -0,0 +1,57 @@
+/* SPDX-License-Identifier: GPL-2.0 WITH Linux-syscall-note */
+#ifndef _IP6T_SRH_H
+#define _IP6T_SRH_H
+
+#include <linux/types.h>
+#include <linux/netfilter.h>
+
+/* Values for "mt_flags" field in struct ip6t_srh */
+#define IP6T_SRH_NEXTHDR 0x0001
+#define IP6T_SRH_LEN_EQ 0x0002
+#define IP6T_SRH_LEN_GT 0x0004
+#define IP6T_SRH_LEN_LT 0x0008
+#define IP6T_SRH_SEGS_EQ 0x0010
+#define IP6T_SRH_SEGS_GT 0x0020
+#define IP6T_SRH_SEGS_LT 0x0040
+#define IP6T_SRH_LAST_EQ 0x0080
+#define IP6T_SRH_LAST_GT 0x0100
+#define IP6T_SRH_LAST_LT 0x0200
+#define IP6T_SRH_TAG 0x0400
+#define IP6T_SRH_MASK 0x07FF
+
+/* Values for "mt_invflags" field in struct ip6t_srh */
+#define IP6T_SRH_INV_NEXTHDR 0x0001
+#define IP6T_SRH_INV_LEN_EQ 0x0002
+#define IP6T_SRH_INV_LEN_GT 0x0004
+#define IP6T_SRH_INV_LEN_LT 0x0008
+#define IP6T_SRH_INV_SEGS_EQ 0x0010
+#define IP6T_SRH_INV_SEGS_GT 0x0020
+#define IP6T_SRH_INV_SEGS_LT 0x0040
+#define IP6T_SRH_INV_LAST_EQ 0x0080
+#define IP6T_SRH_INV_LAST_GT 0x0100
+#define IP6T_SRH_INV_LAST_LT 0x0200
+#define IP6T_SRH_INV_TAG 0x0400
+#define IP6T_SRH_INV_MASK 0x07FF
+
+/**
+ * struct ip6t_srh - SRH match options
+ * @ next_hdr: Next header field of SRH
+ * @ hdr_len: Extension header length field of SRH
+ * @ segs_left: Segments left field of SRH
+ * @ last_entry: Last entry field of SRH
+ * @ tag: Tag field of SRH
+ * @ mt_flags: match options
+ * @ mt_invflags: Invert the sense of match options
+ */
+
+struct ip6t_srh {
+ __u8 next_hdr;
+ __u8 hdr_len;
+ __u8 segs_left;
+ __u8 last_entry;
+ __u16 tag;
+ __u16 mt_flags;
+ __u16 mt_invflags;
+};
+
+#endif /*_IP6T_SRH_H*/
diff --git a/include/uapi/linux/nfs.h b/include/uapi/linux/nfs.h
index 057d22a48416..946cb62d64b0 100644
--- a/include/uapi/linux/nfs.h
+++ b/include/uapi/linux/nfs.h
@@ -12,6 +12,7 @@
#define NFS_PROGRAM 100003
#define NFS_PORT 2049
+#define NFS_RDMA_PORT 20049
#define NFS_MAXDATA 8192
#define NFS_MAXPATHLEN 1024
#define NFS_MAXNAMLEN 255
diff --git a/include/uapi/linux/nl80211.h b/include/uapi/linux/nl80211.h
index f882fe1f9709..15daf5e2638d 100644
--- a/include/uapi/linux/nl80211.h
+++ b/include/uapi/linux/nl80211.h
@@ -542,7 +542,8 @@
* IEs in %NL80211_ATTR_IE, %NL80211_ATTR_AUTH_TYPE, %NL80211_ATTR_USE_MFP,
* %NL80211_ATTR_MAC, %NL80211_ATTR_WIPHY_FREQ, %NL80211_ATTR_CONTROL_PORT,
* %NL80211_ATTR_CONTROL_PORT_ETHERTYPE,
- * %NL80211_ATTR_CONTROL_PORT_NO_ENCRYPT, %NL80211_ATTR_MAC_HINT, and
+ * %NL80211_ATTR_CONTROL_PORT_NO_ENCRYPT,
+ * %NL80211_ATTR_CONTROL_PORT_OVER_NL80211, %NL80211_ATTR_MAC_HINT, and
* %NL80211_ATTR_WIPHY_FREQ_HINT.
* If included, %NL80211_ATTR_MAC and %NL80211_ATTR_WIPHY_FREQ are
* restrictions on BSS selection, i.e., they effectively prevent roaming
@@ -990,8 +991,45 @@
* &NL80211_CMD_CONNECT or &NL80211_CMD_ROAM. If the 4 way handshake failed
* &NL80211_CMD_DISCONNECT should be indicated instead.
*
+ * @NL80211_CMD_CONTROL_PORT_FRAME: Control Port (e.g. PAE) frame TX request
+ * and RX notification. This command is used both as a request to transmit
+ * a control port frame and as a notification that a control port frame
+ * has been received. %NL80211_ATTR_FRAME is used to specify the
+ * frame contents. The frame is the raw EAPoL data, without ethernet or
+ * 802.11 headers.
+ * When used as an event indication %NL80211_ATTR_CONTROL_PORT_ETHERTYPE,
+ * %NL80211_ATTR_CONTROL_PORT_NO_ENCRYPT and %NL80211_ATTR_MAC are added
+ * indicating the protocol type of the received frame; whether the frame
+ * was received unencrypted and the MAC address of the peer respectively.
+ *
* @NL80211_CMD_RELOAD_REGDB: Request that the regdb firmware file is reloaded.
*
+ * @NL80211_CMD_EXTERNAL_AUTH: This interface is exclusively defined for host
+ * drivers that do not define separate commands for authentication and
+ * association, but rely on user space for the authentication to happen.
+ * This interface acts both as the event request (driver to user space)
+ * to trigger the authentication and command response (userspace to
+ * driver) to indicate the authentication status.
+ *
+ * User space uses the %NL80211_CMD_CONNECT command to the host driver to
+ * trigger a connection. The host driver selects a BSS and further uses
+ * this interface to offload only the authentication part to the user
+ * space. Authentication frames are passed between the driver and user
+ * space through the %NL80211_CMD_FRAME interface. Host driver proceeds
+ * further with the association after getting successful authentication
+ * status. User space indicates the authentication status through
+ * %NL80211_ATTR_STATUS_CODE attribute in %NL80211_CMD_EXTERNAL_AUTH
+ * command interface.
+ *
+ * Host driver reports this status on an authentication failure to the
+ * user space through the connect result as the user space would have
+ * initiated the connection through the connect request.
+ *
+ * @NL80211_CMD_STA_OPMODE_CHANGED: An event that notify station's
+ * ht opmode or vht opmode changes using any of &NL80211_ATTR_SMPS_MODE,
+ * &NL80211_ATTR_CHANNEL_WIDTH,&NL80211_ATTR_NSS attributes with its
+ * address(specified in &NL80211_ATTR_MAC).
+ *
* @NL80211_CMD_MAX: highest used command number
* @__NL80211_CMD_AFTER_LAST: internal use
*/
@@ -1198,6 +1236,12 @@ enum nl80211_commands {
NL80211_CMD_RELOAD_REGDB,
+ NL80211_CMD_EXTERNAL_AUTH,
+
+ NL80211_CMD_STA_OPMODE_CHANGED,
+
+ NL80211_CMD_CONTROL_PORT_FRAME,
+
/* add new commands above here */
/* used to define NL80211_CMD_MAX below */
@@ -1445,6 +1489,15 @@ enum nl80211_commands {
* @NL80211_ATTR_CONTROL_PORT_NO_ENCRYPT: When included along with
* %NL80211_ATTR_CONTROL_PORT_ETHERTYPE, indicates that the custom
* ethertype frames used for key negotiation must not be encrypted.
+ * @NL80211_ATTR_CONTROL_PORT_OVER_NL80211: A flag indicating whether control
+ * port frames (e.g. of type given in %NL80211_ATTR_CONTROL_PORT_ETHERTYPE)
+ * will be sent directly to the network interface or sent via the NL80211
+ * socket. If this attribute is missing, then legacy behavior of sending
+ * control port frames directly to the network interface is used. If the
+ * flag is included, then control port frames are sent over NL80211 instead
+ * using %CMD_CONTROL_PORT_FRAME. If control port routing over NL80211 is
+ * to be used then userspace must also use the %NL80211_ATTR_SOCKET_OWNER
+ * flag.
*
* @NL80211_ATTR_TESTDATA: Testmode data blob, passed through to the driver.
* We recommend using nested, driver-specific attributes within this.
@@ -1932,6 +1985,12 @@ enum nl80211_commands {
* multicast group.
* If set during %NL80211_CMD_ASSOCIATE or %NL80211_CMD_CONNECT the
* station will deauthenticate when the socket is closed.
+ * If set during %NL80211_CMD_JOIN_IBSS the IBSS will be automatically
+ * torn down when the socket is closed.
+ * If set during %NL80211_CMD_JOIN_MESH the mesh setup will be
+ * automatically torn down when the socket is closed.
+ * If set during %NL80211_CMD_START_AP the AP will be automatically
+ * disabled when the socket is closed.
*
* @NL80211_ATTR_TDLS_INITIATOR: flag attribute indicating the current end is
* the TDLS link initiator.
@@ -2153,6 +2212,19 @@ enum nl80211_commands {
* @NL80211_ATTR_PMKR0_NAME: PMK-R0 Name for offloaded FT.
* @NL80211_ATTR_PORT_AUTHORIZED: (reserved)
*
+ * @NL80211_ATTR_EXTERNAL_AUTH_ACTION: Identify the requested external
+ * authentication operation (u32 attribute with an
+ * &enum nl80211_external_auth_action value). This is used with the
+ * &NL80211_CMD_EXTERNAL_AUTH request event.
+ * @NL80211_ATTR_EXTERNAL_AUTH_SUPPORT: Flag attribute indicating that the user
+ * space supports external authentication. This attribute shall be used
+ * only with %NL80211_CMD_CONNECT request. The driver may offload
+ * authentication processing to user space if this capability is indicated
+ * in NL80211_CMD_CONNECT requests from the user space.
+ *
+ * @NL80211_ATTR_NSS: Station's New/updated RX_NSS value notified using this
+ * u8 attribute. This is used with %NL80211_CMD_STA_OPMODE_CHANGED.
+ *
* @NUM_NL80211_ATTR: total number of nl80211_attrs available
* @NL80211_ATTR_MAX: highest attribute number currently defined
* @__NL80211_ATTR_AFTER_LAST: internal use
@@ -2579,6 +2651,14 @@ enum nl80211_attrs {
NL80211_ATTR_PMKR0_NAME,
NL80211_ATTR_PORT_AUTHORIZED,
+ NL80211_ATTR_EXTERNAL_AUTH_ACTION,
+ NL80211_ATTR_EXTERNAL_AUTH_SUPPORT,
+
+ NL80211_ATTR_NSS,
+ NL80211_ATTR_ACK_SIGNAL,
+
+ NL80211_ATTR_CONTROL_PORT_OVER_NL80211,
+
/* add attributes here, update the policy in nl80211.c */
__NL80211_ATTR_AFTER_LAST,
@@ -2899,6 +2979,7 @@ enum nl80211_sta_bss_param {
* @NL80211_STA_INFO_RX_DURATION: aggregate PPDU duration for all frames
* received from the station (u64, usec)
* @NL80211_STA_INFO_PAD: attribute used for padding for 64-bit alignment
+ * @NL80211_STA_INFO_ACK_SIGNAL: signal strength of the last ACK frame(u8, dBm)
* @__NL80211_STA_INFO_AFTER_LAST: internal
* @NL80211_STA_INFO_MAX: highest possible station info attribute
*/
@@ -2937,6 +3018,7 @@ enum nl80211_sta_info {
NL80211_STA_INFO_TID_STATS,
NL80211_STA_INFO_RX_DURATION,
NL80211_STA_INFO_PAD,
+ NL80211_STA_INFO_ACK_SIGNAL,
/* keep last */
__NL80211_STA_INFO_AFTER_LAST,
@@ -3862,6 +3944,9 @@ enum nl80211_bss_scan_width {
* @NL80211_BSS_PARENT_BSSID. (u64).
* @NL80211_BSS_PARENT_BSSID: the BSS according to which @NL80211_BSS_PARENT_TSF
* is set.
+ * @NL80211_BSS_CHAIN_SIGNAL: per-chain signal strength of last BSS update.
+ * Contains a nested array of signal strength attributes (u8, dBm),
+ * using the nesting index as the antenna number.
* @__NL80211_BSS_AFTER_LAST: internal
* @NL80211_BSS_MAX: highest BSS attribute
*/
@@ -3885,6 +3970,7 @@ enum nl80211_bss {
NL80211_BSS_PAD,
NL80211_BSS_PARENT_TSF,
NL80211_BSS_PARENT_BSSID,
+ NL80211_BSS_CHAIN_SIGNAL,
/* keep last */
__NL80211_BSS_AFTER_LAST,
@@ -4941,6 +5027,17 @@ enum nl80211_feature_flags {
* probe request tx deferral and suppression
* @NL80211_EXT_FEATURE_MFP_OPTIONAL: Driver supports the %NL80211_MFP_OPTIONAL
* value in %NL80211_ATTR_USE_MFP.
+ * @NL80211_EXT_FEATURE_LOW_SPAN_SCAN: Driver supports low span scan.
+ * @NL80211_EXT_FEATURE_LOW_POWER_SCAN: Driver supports low power scan.
+ * @NL80211_EXT_FEATURE_HIGH_ACCURACY_SCAN: Driver supports high accuracy scan.
+ * @NL80211_EXT_FEATURE_DFS_OFFLOAD: HW/driver will offload DFS actions.
+ * Device or driver will do all DFS-related actions by itself,
+ * informing user-space about CAC progress, radar detection event,
+ * channel change triggered by radar detection event.
+ * No need to start CAC from user-space, no need to react to
+ * "radar detected" event.
+ * @NL80211_EXT_FEATURE_CONTROL_PORT_OVER_NL80211: Driver supports sending and
+ * receiving control port frames over nl80211 instead of the netdevice.
*
* @NUM_NL80211_EXT_FEATURES: number of extended features.
* @MAX_NL80211_EXT_FEATURES: highest extended feature index.
@@ -4968,6 +5065,11 @@ enum nl80211_ext_feature_index {
NL80211_EXT_FEATURE_OCE_PROBE_REQ_HIGH_TX_RATE,
NL80211_EXT_FEATURE_OCE_PROBE_REQ_DEFERRAL_SUPPRESSION,
NL80211_EXT_FEATURE_MFP_OPTIONAL,
+ NL80211_EXT_FEATURE_LOW_SPAN_SCAN,
+ NL80211_EXT_FEATURE_LOW_POWER_SCAN,
+ NL80211_EXT_FEATURE_HIGH_ACCURACY_SCAN,
+ NL80211_EXT_FEATURE_DFS_OFFLOAD,
+ NL80211_EXT_FEATURE_CONTROL_PORT_OVER_NL80211,
/* add new features before the definition below */
NUM_NL80211_EXT_FEATURES,
@@ -5028,6 +5130,10 @@ enum nl80211_timeout_reason {
* of NL80211_CMD_TRIGGER_SCAN and NL80211_CMD_START_SCHED_SCAN
* requests.
*
+ * NL80211_SCAN_FLAG_LOW_SPAN, NL80211_SCAN_FLAG_LOW_POWER, and
+ * NL80211_SCAN_FLAG_HIGH_ACCURACY flags are exclusive of each other, i.e., only
+ * one of them can be used in the request.
+ *
* @NL80211_SCAN_FLAG_LOW_PRIORITY: scan request has low priority
* @NL80211_SCAN_FLAG_FLUSH: flush cache before scanning
* @NL80211_SCAN_FLAG_AP: force a scan even if the interface is configured
@@ -5055,7 +5161,20 @@ enum nl80211_timeout_reason {
* and suppression (if it has received a broadcast Probe Response frame,
* Beacon frame or FILS Discovery frame from an AP that the STA considers
* a suitable candidate for (re-)association - suitable in terms of
- * SSID and/or RSSI
+ * SSID and/or RSSI.
+ * @NL80211_SCAN_FLAG_LOW_SPAN: Span corresponds to the total time taken to
+ * accomplish the scan. Thus, this flag intends the driver to perform the
+ * scan request with lesser span/duration. It is specific to the driver
+ * implementations on how this is accomplished. Scan accuracy may get
+ * impacted with this flag.
+ * @NL80211_SCAN_FLAG_LOW_POWER: This flag intends the scan attempts to consume
+ * optimal possible power. Drivers can resort to their specific means to
+ * optimize the power. Scan accuracy may get impacted with this flag.
+ * @NL80211_SCAN_FLAG_HIGH_ACCURACY: Accuracy here intends to the extent of scan
+ * results obtained. Thus HIGH_ACCURACY scan flag aims to get maximum
+ * possible scan results. This flag hints the driver to use the best
+ * possible scan configuration to improve the accuracy in scanning.
+ * Latency and power use may get impacted with this flag.
*/
enum nl80211_scan_flags {
NL80211_SCAN_FLAG_LOW_PRIORITY = 1<<0,
@@ -5066,6 +5185,9 @@ enum nl80211_scan_flags {
NL80211_SCAN_FLAG_ACCEPT_BCAST_PROBE_RESP = 1<<5,
NL80211_SCAN_FLAG_OCE_PROBE_REQ_HIGH_TX_RATE = 1<<6,
NL80211_SCAN_FLAG_OCE_PROBE_REQ_DEFERRAL_SUPPRESSION = 1<<7,
+ NL80211_SCAN_FLAG_LOW_SPAN = 1<<8,
+ NL80211_SCAN_FLAG_LOW_POWER = 1<<9,
+ NL80211_SCAN_FLAG_HIGH_ACCURACY = 1<<10,
};
/**
@@ -5123,6 +5245,8 @@ enum nl80211_smps_mode {
* non-operating channel is expired and no longer valid. New CAC must
* be done on this channel before starting the operation. This is not
* applicable for ETSI dfs domain where pre-CAC is valid for ever.
+ * @NL80211_RADAR_CAC_STARTED: Channel Availability Check has been started,
+ * should be generated by HW if NL80211_EXT_FEATURE_DFS_OFFLOAD is enabled.
*/
enum nl80211_radar_event {
NL80211_RADAR_DETECTED,
@@ -5130,6 +5254,7 @@ enum nl80211_radar_event {
NL80211_RADAR_CAC_ABORTED,
NL80211_RADAR_NOP_FINISHED,
NL80211_RADAR_PRE_CAC_EXPIRED,
+ NL80211_RADAR_CAC_STARTED,
};
/**
@@ -5465,4 +5590,15 @@ enum nl80211_nan_match_attributes {
NL80211_NAN_MATCH_ATTR_MAX = NUM_NL80211_NAN_MATCH_ATTR - 1
};
+/**
+ * nl80211_external_auth_action - Action to perform with external
+ * authentication request. Used by NL80211_ATTR_EXTERNAL_AUTH_ACTION.
+ * @NL80211_EXTERNAL_AUTH_START: Start the authentication.
+ * @NL80211_EXTERNAL_AUTH_ABORT: Abort the ongoing authentication.
+ */
+enum nl80211_external_auth_action {
+ NL80211_EXTERNAL_AUTH_START,
+ NL80211_EXTERNAL_AUTH_ABORT,
+};
+
#endif /* __LINUX_NL80211_H */
diff --git a/include/uapi/linux/nubus.h b/include/uapi/linux/nubus.h
index f3776cc80f4d..48031e7858f1 100644
--- a/include/uapi/linux/nubus.h
+++ b/include/uapi/linux/nubus.h
@@ -221,27 +221,4 @@ enum nubus_display_res_id {
NUBUS_RESID_SIXTHMODE = 0x0085
};
-struct nubus_dir
-{
- unsigned char *base;
- unsigned char *ptr;
- int done;
- int mask;
-};
-
-struct nubus_dirent
-{
- unsigned char *base;
- unsigned char type;
- __u32 data; /* Actually 24bits used */
- int mask;
-};
-
-
-/* We'd like to get rid of this eventually. Only daynaport.c uses it now. */
-static inline void *nubus_slot_addr(int slot)
-{
- return (void *)(0xF0000000|(slot<<24));
-}
-
#endif /* _UAPILINUX_NUBUS_H */
diff --git a/include/uapi/linux/openvswitch.h b/include/uapi/linux/openvswitch.h
index dcfab5e3b55c..713e56ce681f 100644
--- a/include/uapi/linux/openvswitch.h
+++ b/include/uapi/linux/openvswitch.h
@@ -363,6 +363,7 @@ enum ovs_tunnel_key_attr {
OVS_TUNNEL_KEY_ATTR_IPV6_SRC, /* struct in6_addr src IPv6 address. */
OVS_TUNNEL_KEY_ATTR_IPV6_DST, /* struct in6_addr dst IPv6 address. */
OVS_TUNNEL_KEY_ATTR_PAD,
+ OVS_TUNNEL_KEY_ATTR_ERSPAN_OPTS, /* struct erspan_metadata */
__OVS_TUNNEL_KEY_ATTR_MAX
};
diff --git a/include/uapi/linux/pci_regs.h b/include/uapi/linux/pci_regs.h
index 70c2b2ade048..103ba797a8f3 100644
--- a/include/uapi/linux/pci_regs.h
+++ b/include/uapi/linux/pci_regs.h
@@ -520,6 +520,7 @@
#define PCI_EXP_LNKCAP_SLS_2_5GB 0x00000001 /* LNKCAP2 SLS Vector bit 0 */
#define PCI_EXP_LNKCAP_SLS_5_0GB 0x00000002 /* LNKCAP2 SLS Vector bit 1 */
#define PCI_EXP_LNKCAP_SLS_8_0GB 0x00000003 /* LNKCAP2 SLS Vector bit 2 */
+#define PCI_EXP_LNKCAP_SLS_16_0GB 0x00000004 /* LNKCAP2 SLS Vector bit 3 */
#define PCI_EXP_LNKCAP_MLW 0x000003f0 /* Maximum Link Width */
#define PCI_EXP_LNKCAP_ASPMS 0x00000c00 /* ASPM Support */
#define PCI_EXP_LNKCAP_L0SEL 0x00007000 /* L0s Exit Latency */
@@ -547,6 +548,7 @@
#define PCI_EXP_LNKSTA_CLS_2_5GB 0x0001 /* Current Link Speed 2.5GT/s */
#define PCI_EXP_LNKSTA_CLS_5_0GB 0x0002 /* Current Link Speed 5.0GT/s */
#define PCI_EXP_LNKSTA_CLS_8_0GB 0x0003 /* Current Link Speed 8.0GT/s */
+#define PCI_EXP_LNKSTA_CLS_16_0GB 0x0004 /* Current Link Speed 16.0GT/s */
#define PCI_EXP_LNKSTA_NLW 0x03f0 /* Negotiated Link Width */
#define PCI_EXP_LNKSTA_NLW_X1 0x0010 /* Current Link Width x1 */
#define PCI_EXP_LNKSTA_NLW_X2 0x0020 /* Current Link Width x2 */
@@ -622,15 +624,19 @@
* safely.
*/
#define PCI_EXP_DEVCAP2 36 /* Device Capabilities 2 */
+#define PCI_EXP_DEVCAP2_COMP_TMOUT_DIS 0x00000010 /* Completion Timeout Disable supported */
#define PCI_EXP_DEVCAP2_ARI 0x00000020 /* Alternative Routing-ID */
#define PCI_EXP_DEVCAP2_ATOMIC_ROUTE 0x00000040 /* Atomic Op routing */
-#define PCI_EXP_DEVCAP2_ATOMIC_COMP64 0x00000100 /* Atomic 64-bit compare */
+#define PCI_EXP_DEVCAP2_ATOMIC_COMP32 0x00000080 /* 32b AtomicOp completion */
+#define PCI_EXP_DEVCAP2_ATOMIC_COMP64 0x00000100 /* 64b AtomicOp completion */
+#define PCI_EXP_DEVCAP2_ATOMIC_COMP128 0x00000200 /* 128b AtomicOp completion */
#define PCI_EXP_DEVCAP2_LTR 0x00000800 /* Latency tolerance reporting */
#define PCI_EXP_DEVCAP2_OBFF_MASK 0x000c0000 /* OBFF support mechanism */
#define PCI_EXP_DEVCAP2_OBFF_MSG 0x00040000 /* New message signaling */
#define PCI_EXP_DEVCAP2_OBFF_WAKE 0x00080000 /* Re-use WAKE# for OBFF */
#define PCI_EXP_DEVCTL2 40 /* Device Control 2 */
#define PCI_EXP_DEVCTL2_COMP_TIMEOUT 0x000f /* Completion Timeout Value */
+#define PCI_EXP_DEVCTL2_COMP_TMOUT_DIS 0x0010 /* Completion Timeout Disable */
#define PCI_EXP_DEVCTL2_ARI 0x0020 /* Alternative Routing-ID */
#define PCI_EXP_DEVCTL2_ATOMIC_REQ 0x0040 /* Set Atomic requests */
#define PCI_EXP_DEVCTL2_ATOMIC_EGRESS_BLOCK 0x0080 /* Block atomic egress */
@@ -644,8 +650,9 @@
#define PCI_CAP_EXP_RC_ENDPOINT_SIZEOF_V2 44 /* v2 endpoints without link end here */
#define PCI_EXP_LNKCAP2 44 /* Link Capabilities 2 */
#define PCI_EXP_LNKCAP2_SLS_2_5GB 0x00000002 /* Supported Speed 2.5GT/s */
-#define PCI_EXP_LNKCAP2_SLS_5_0GB 0x00000004 /* Supported Speed 5.0GT/s */
-#define PCI_EXP_LNKCAP2_SLS_8_0GB 0x00000008 /* Supported Speed 8.0GT/s */
+#define PCI_EXP_LNKCAP2_SLS_5_0GB 0x00000004 /* Supported Speed 5GT/s */
+#define PCI_EXP_LNKCAP2_SLS_8_0GB 0x00000008 /* Supported Speed 8GT/s */
+#define PCI_EXP_LNKCAP2_SLS_16_0GB 0x00000010 /* Supported Speed 16GT/s */
#define PCI_EXP_LNKCAP2_CROSSLINK 0x00000100 /* Crosslink supported */
#define PCI_EXP_LNKCTL2 48 /* Link Control 2 */
#define PCI_EXP_LNKSTA2 50 /* Link Status 2 */
@@ -966,26 +973,28 @@
/* Downstream Port Containment */
#define PCI_EXP_DPC_CAP 4 /* DPC Capability */
-#define PCI_EXP_DPC_IRQ 0x1f /* DPC Interrupt Message Number */
-#define PCI_EXP_DPC_CAP_RP_EXT 0x20 /* Root Port Extensions for DPC */
-#define PCI_EXP_DPC_CAP_POISONED_TLP 0x40 /* Poisoned TLP Egress Blocking Supported */
-#define PCI_EXP_DPC_CAP_SW_TRIGGER 0x80 /* Software Triggering Supported */
-#define PCI_EXP_DPC_RP_PIO_LOG_SIZE 0xF00 /* RP PIO log size */
+#define PCI_EXP_DPC_IRQ 0x001F /* Interrupt Message Number */
+#define PCI_EXP_DPC_CAP_RP_EXT 0x0020 /* Root Port Extensions */
+#define PCI_EXP_DPC_CAP_POISONED_TLP 0x0040 /* Poisoned TLP Egress Blocking Supported */
+#define PCI_EXP_DPC_CAP_SW_TRIGGER 0x0080 /* Software Triggering Supported */
+#define PCI_EXP_DPC_RP_PIO_LOG_SIZE 0x0F00 /* RP PIO Log Size */
#define PCI_EXP_DPC_CAP_DL_ACTIVE 0x1000 /* ERR_COR signal on DL_Active supported */
#define PCI_EXP_DPC_CTL 6 /* DPC control */
-#define PCI_EXP_DPC_CTL_EN_NONFATAL 0x02 /* Enable trigger on ERR_NONFATAL message */
-#define PCI_EXP_DPC_CTL_INT_EN 0x08 /* DPC Interrupt Enable */
+#define PCI_EXP_DPC_CTL_EN_NONFATAL 0x0002 /* Enable trigger on ERR_NONFATAL message */
+#define PCI_EXP_DPC_CTL_INT_EN 0x0008 /* DPC Interrupt Enable */
#define PCI_EXP_DPC_STATUS 8 /* DPC Status */
-#define PCI_EXP_DPC_STATUS_TRIGGER 0x01 /* Trigger Status */
-#define PCI_EXP_DPC_STATUS_INTERRUPT 0x08 /* Interrupt Status */
-#define PCI_EXP_DPC_RP_BUSY 0x10 /* Root Port Busy */
+#define PCI_EXP_DPC_STATUS_TRIGGER 0x0001 /* Trigger Status */
+#define PCI_EXP_DPC_STATUS_TRIGGER_RSN 0x0006 /* Trigger Reason */
+#define PCI_EXP_DPC_STATUS_INTERRUPT 0x0008 /* Interrupt Status */
+#define PCI_EXP_DPC_RP_BUSY 0x0010 /* Root Port Busy */
+#define PCI_EXP_DPC_STATUS_TRIGGER_RSN_EXT 0x0060 /* Trig Reason Extension */
#define PCI_EXP_DPC_SOURCE_ID 10 /* DPC Source Identifier */
#define PCI_EXP_DPC_RP_PIO_STATUS 0x0C /* RP PIO Status */
-#define PCI_EXP_DPC_RP_PIO_MASK 0x10 /* RP PIO MASK */
+#define PCI_EXP_DPC_RP_PIO_MASK 0x10 /* RP PIO Mask */
#define PCI_EXP_DPC_RP_PIO_SEVERITY 0x14 /* RP PIO Severity */
#define PCI_EXP_DPC_RP_PIO_SYSERROR 0x18 /* RP PIO SysError */
#define PCI_EXP_DPC_RP_PIO_EXCEPTION 0x1C /* RP PIO Exception */
diff --git a/include/uapi/linux/perf_event.h b/include/uapi/linux/perf_event.h
index b9a4953018ed..912b85b52344 100644
--- a/include/uapi/linux/perf_event.h
+++ b/include/uapi/linux/perf_event.h
@@ -380,10 +380,14 @@ struct perf_event_attr {
__u32 bp_type;
union {
__u64 bp_addr;
+ __u64 kprobe_func; /* for perf_kprobe */
+ __u64 uprobe_path; /* for perf_uprobe */
__u64 config1; /* extension of config */
};
union {
__u64 bp_len;
+ __u64 kprobe_addr; /* when kprobe_func == NULL */
+ __u64 probe_offset; /* for perf_[k,u]probe */
__u64 config2; /* extension of config1 */
};
__u64 branch_sample_type; /* enum perf_branch_sample_type */
@@ -418,21 +422,44 @@ struct perf_event_attr {
__u16 __reserved_2; /* align to __u64 */
};
+/*
+ * Structure used by below PERF_EVENT_IOC_QUERY_BPF command
+ * to query bpf programs attached to the same perf tracepoint
+ * as the given perf event.
+ */
+struct perf_event_query_bpf {
+ /*
+ * The below ids array length
+ */
+ __u32 ids_len;
+ /*
+ * Set by the kernel to indicate the number of
+ * available programs
+ */
+ __u32 prog_cnt;
+ /*
+ * User provided buffer to store program ids
+ */
+ __u32 ids[0];
+};
+
#define perf_flags(attr) (*(&(attr)->read_format + 1))
/*
* Ioctls that can be done on a perf event fd:
*/
-#define PERF_EVENT_IOC_ENABLE _IO ('$', 0)
-#define PERF_EVENT_IOC_DISABLE _IO ('$', 1)
-#define PERF_EVENT_IOC_REFRESH _IO ('$', 2)
-#define PERF_EVENT_IOC_RESET _IO ('$', 3)
-#define PERF_EVENT_IOC_PERIOD _IOW('$', 4, __u64)
-#define PERF_EVENT_IOC_SET_OUTPUT _IO ('$', 5)
-#define PERF_EVENT_IOC_SET_FILTER _IOW('$', 6, char *)
-#define PERF_EVENT_IOC_ID _IOR('$', 7, __u64 *)
-#define PERF_EVENT_IOC_SET_BPF _IOW('$', 8, __u32)
-#define PERF_EVENT_IOC_PAUSE_OUTPUT _IOW('$', 9, __u32)
+#define PERF_EVENT_IOC_ENABLE _IO ('$', 0)
+#define PERF_EVENT_IOC_DISABLE _IO ('$', 1)
+#define PERF_EVENT_IOC_REFRESH _IO ('$', 2)
+#define PERF_EVENT_IOC_RESET _IO ('$', 3)
+#define PERF_EVENT_IOC_PERIOD _IOW('$', 4, __u64)
+#define PERF_EVENT_IOC_SET_OUTPUT _IO ('$', 5)
+#define PERF_EVENT_IOC_SET_FILTER _IOW('$', 6, char *)
+#define PERF_EVENT_IOC_ID _IOR('$', 7, __u64 *)
+#define PERF_EVENT_IOC_SET_BPF _IOW('$', 8, __u32)
+#define PERF_EVENT_IOC_PAUSE_OUTPUT _IOW('$', 9, __u32)
+#define PERF_EVENT_IOC_QUERY_BPF _IOWR('$', 10, struct perf_event_query_bpf *)
+#define PERF_EVENT_IOC_MODIFY_ATTRIBUTES _IOW('$', 11, struct perf_event_attr *)
enum perf_event_ioc_flags {
PERF_IOC_FLAG_GROUP = 1U << 0,
@@ -612,9 +639,12 @@ struct perf_event_mmap_page {
*/
#define PERF_RECORD_MISC_PROC_MAP_PARSE_TIMEOUT (1 << 12)
/*
- * PERF_RECORD_MISC_MMAP_DATA and PERF_RECORD_MISC_COMM_EXEC are used on
- * different events so can reuse the same bit position.
- * Ditto PERF_RECORD_MISC_SWITCH_OUT.
+ * Following PERF_RECORD_MISC_* are used on different
+ * events, so can reuse the same bit position:
+ *
+ * PERF_RECORD_MISC_MMAP_DATA - PERF_RECORD_MMAP* events
+ * PERF_RECORD_MISC_COMM_EXEC - PERF_RECORD_COMM event
+ * PERF_RECORD_MISC_SWITCH_OUT - PERF_RECORD_SWITCH* events
*/
#define PERF_RECORD_MISC_MMAP_DATA (1 << 13)
#define PERF_RECORD_MISC_COMM_EXEC (1 << 13)
@@ -864,6 +894,7 @@ enum perf_event_type {
* struct perf_event_header header;
* u32 pid;
* u32 tid;
+ * struct sample_id sample_id;
* };
*/
PERF_RECORD_ITRACE_START = 12,
diff --git a/include/uapi/linux/pkt_cls.h b/include/uapi/linux/pkt_cls.h
index 46c506615f4a..be05e66c167b 100644
--- a/include/uapi/linux/pkt_cls.h
+++ b/include/uapi/linux/pkt_cls.h
@@ -475,6 +475,7 @@ enum {
enum {
TCA_FLOWER_KEY_FLAGS_IS_FRAGMENT = (1 << 0),
+ TCA_FLOWER_KEY_FLAGS_FRAG_IS_FIRST = (1 << 1),
};
/* Match-all classifier */
@@ -555,7 +556,8 @@ enum {
#define TCF_EM_VLAN 6
#define TCF_EM_CANID 7
#define TCF_EM_IPSET 8
-#define TCF_EM_MAX 8
+#define TCF_EM_IPT 9
+#define TCF_EM_MAX 9
enum {
TCF_EM_PROG_TC
diff --git a/include/uapi/linux/psci.h b/include/uapi/linux/psci.h
index 760e52a9640f..b3bcabe380da 100644
--- a/include/uapi/linux/psci.h
+++ b/include/uapi/linux/psci.h
@@ -88,6 +88,9 @@
(((ver) & PSCI_VERSION_MAJOR_MASK) >> PSCI_VERSION_MAJOR_SHIFT)
#define PSCI_VERSION_MINOR(ver) \
((ver) & PSCI_VERSION_MINOR_MASK)
+#define PSCI_VERSION(maj, min) \
+ ((((maj) << PSCI_VERSION_MAJOR_SHIFT) & PSCI_VERSION_MAJOR_MASK) | \
+ ((min) & PSCI_VERSION_MINOR_MASK))
/* PSCI features decoding (>=1.0) */
#define PSCI_1_0_FEATURES_CPU_SUSPEND_PF_SHIFT 1
diff --git a/include/uapi/linux/psp-sev.h b/include/uapi/linux/psp-sev.h
new file mode 100644
index 000000000000..9008f31c7eb6
--- /dev/null
+++ b/include/uapi/linux/psp-sev.h
@@ -0,0 +1,142 @@
+/*
+ * Userspace interface for AMD Secure Encrypted Virtualization (SEV)
+ * platform management commands.
+ *
+ * Copyright (C) 2016-2017 Advanced Micro Devices, Inc.
+ *
+ * Author: Brijesh Singh <brijesh.singh@amd.com>
+ *
+ * SEV spec 0.14 is available at:
+ * http://support.amd.com/TechDocs/55766_SEV-KM%20API_Specification.pdf
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License version 2 as
+ * published by the Free Software Foundation.
+ */
+
+#ifndef __PSP_SEV_USER_H__
+#define __PSP_SEV_USER_H__
+
+#include <linux/types.h>
+
+/**
+ * SEV platform commands
+ */
+enum {
+ SEV_FACTORY_RESET = 0,
+ SEV_PLATFORM_STATUS,
+ SEV_PEK_GEN,
+ SEV_PEK_CSR,
+ SEV_PDH_GEN,
+ SEV_PDH_CERT_EXPORT,
+ SEV_PEK_CERT_IMPORT,
+
+ SEV_MAX,
+};
+
+/**
+ * SEV Firmware status code
+ */
+typedef enum {
+ SEV_RET_SUCCESS = 0,
+ SEV_RET_INVALID_PLATFORM_STATE,
+ SEV_RET_INVALID_GUEST_STATE,
+ SEV_RET_INAVLID_CONFIG,
+ SEV_RET_INVALID_LEN,
+ SEV_RET_ALREADY_OWNED,
+ SEV_RET_INVALID_CERTIFICATE,
+ SEV_RET_POLICY_FAILURE,
+ SEV_RET_INACTIVE,
+ SEV_RET_INVALID_ADDRESS,
+ SEV_RET_BAD_SIGNATURE,
+ SEV_RET_BAD_MEASUREMENT,
+ SEV_RET_ASID_OWNED,
+ SEV_RET_INVALID_ASID,
+ SEV_RET_WBINVD_REQUIRED,
+ SEV_RET_DFFLUSH_REQUIRED,
+ SEV_RET_INVALID_GUEST,
+ SEV_RET_INVALID_COMMAND,
+ SEV_RET_ACTIVE,
+ SEV_RET_HWSEV_RET_PLATFORM,
+ SEV_RET_HWSEV_RET_UNSAFE,
+ SEV_RET_UNSUPPORTED,
+ SEV_RET_MAX,
+} sev_ret_code;
+
+/**
+ * struct sev_user_data_status - PLATFORM_STATUS command parameters
+ *
+ * @major: major API version
+ * @minor: minor API version
+ * @state: platform state
+ * @flags: platform config flags
+ * @build: firmware build id for API version
+ * @guest_count: number of active guests
+ */
+struct sev_user_data_status {
+ __u8 api_major; /* Out */
+ __u8 api_minor; /* Out */
+ __u8 state; /* Out */
+ __u32 flags; /* Out */
+ __u8 build; /* Out */
+ __u32 guest_count; /* Out */
+} __packed;
+
+/**
+ * struct sev_user_data_pek_csr - PEK_CSR command parameters
+ *
+ * @address: PEK certificate chain
+ * @length: length of certificate
+ */
+struct sev_user_data_pek_csr {
+ __u64 address; /* In */
+ __u32 length; /* In/Out */
+} __packed;
+
+/**
+ * struct sev_user_data_cert_import - PEK_CERT_IMPORT command parameters
+ *
+ * @pek_address: PEK certificate chain
+ * @pek_len: length of PEK certificate
+ * @oca_address: OCA certificate chain
+ * @oca_len: length of OCA certificate
+ */
+struct sev_user_data_pek_cert_import {
+ __u64 pek_cert_address; /* In */
+ __u32 pek_cert_len; /* In */
+ __u64 oca_cert_address; /* In */
+ __u32 oca_cert_len; /* In */
+} __packed;
+
+/**
+ * struct sev_user_data_pdh_cert_export - PDH_CERT_EXPORT command parameters
+ *
+ * @pdh_address: PDH certificate address
+ * @pdh_len: length of PDH certificate
+ * @cert_chain_address: PDH certificate chain
+ * @cert_chain_len: length of PDH certificate chain
+ */
+struct sev_user_data_pdh_cert_export {
+ __u64 pdh_cert_address; /* In */
+ __u32 pdh_cert_len; /* In/Out */
+ __u64 cert_chain_address; /* In */
+ __u32 cert_chain_len; /* In/Out */
+} __packed;
+
+/**
+ * struct sev_issue_cmd - SEV ioctl parameters
+ *
+ * @cmd: SEV commands to execute
+ * @opaque: pointer to the command structure
+ * @error: SEV FW return code on failure
+ */
+struct sev_issue_cmd {
+ __u32 cmd; /* In */
+ __u64 data; /* In */
+ __u32 error; /* Out */
+} __packed;
+
+#define SEV_IOC_TYPE 'S'
+#define SEV_ISSUE_CMD _IOWR(SEV_IOC_TYPE, 0x0, struct sev_issue_cmd)
+
+#endif /* __PSP_USER_SEV_H */
diff --git a/include/uapi/linux/ptrace.h b/include/uapi/linux/ptrace.h
index e3939e00980b..d5a1b8a492b9 100644
--- a/include/uapi/linux/ptrace.h
+++ b/include/uapi/linux/ptrace.h
@@ -66,6 +66,12 @@ struct ptrace_peeksiginfo_args {
#define PTRACE_SETSIGMASK 0x420b
#define PTRACE_SECCOMP_GET_FILTER 0x420c
+#define PTRACE_SECCOMP_GET_METADATA 0x420d
+
+struct seccomp_metadata {
+ __u64 filter_off; /* Input: which filter */
+ __u64 flags; /* Output: filter's flags */
+};
/* Read signals from a shared (process wide) queue */
#define PTRACE_PEEKSIGINFO_SHARED (1 << 0)
diff --git a/include/uapi/linux/qemu_fw_cfg.h b/include/uapi/linux/qemu_fw_cfg.h
new file mode 100644
index 000000000000..e089c0159ec2
--- /dev/null
+++ b/include/uapi/linux/qemu_fw_cfg.h
@@ -0,0 +1,97 @@
+/* SPDX-License-Identifier: BSD-3-Clause */
+#ifndef _LINUX_FW_CFG_H
+#define _LINUX_FW_CFG_H
+
+#include <linux/types.h>
+
+#define FW_CFG_ACPI_DEVICE_ID "QEMU0002"
+
+/* selector key values for "well-known" fw_cfg entries */
+#define FW_CFG_SIGNATURE 0x00
+#define FW_CFG_ID 0x01
+#define FW_CFG_UUID 0x02
+#define FW_CFG_RAM_SIZE 0x03
+#define FW_CFG_NOGRAPHIC 0x04
+#define FW_CFG_NB_CPUS 0x05
+#define FW_CFG_MACHINE_ID 0x06
+#define FW_CFG_KERNEL_ADDR 0x07
+#define FW_CFG_KERNEL_SIZE 0x08
+#define FW_CFG_KERNEL_CMDLINE 0x09
+#define FW_CFG_INITRD_ADDR 0x0a
+#define FW_CFG_INITRD_SIZE 0x0b
+#define FW_CFG_BOOT_DEVICE 0x0c
+#define FW_CFG_NUMA 0x0d
+#define FW_CFG_BOOT_MENU 0x0e
+#define FW_CFG_MAX_CPUS 0x0f
+#define FW_CFG_KERNEL_ENTRY 0x10
+#define FW_CFG_KERNEL_DATA 0x11
+#define FW_CFG_INITRD_DATA 0x12
+#define FW_CFG_CMDLINE_ADDR 0x13
+#define FW_CFG_CMDLINE_SIZE 0x14
+#define FW_CFG_CMDLINE_DATA 0x15
+#define FW_CFG_SETUP_ADDR 0x16
+#define FW_CFG_SETUP_SIZE 0x17
+#define FW_CFG_SETUP_DATA 0x18
+#define FW_CFG_FILE_DIR 0x19
+
+#define FW_CFG_FILE_FIRST 0x20
+#define FW_CFG_FILE_SLOTS_MIN 0x10
+
+#define FW_CFG_WRITE_CHANNEL 0x4000
+#define FW_CFG_ARCH_LOCAL 0x8000
+#define FW_CFG_ENTRY_MASK (~(FW_CFG_WRITE_CHANNEL | FW_CFG_ARCH_LOCAL))
+
+#define FW_CFG_INVALID 0xffff
+
+/* width in bytes of fw_cfg control register */
+#define FW_CFG_CTL_SIZE 0x02
+
+/* fw_cfg "file name" is up to 56 characters (including terminating nul) */
+#define FW_CFG_MAX_FILE_PATH 56
+
+/* size in bytes of fw_cfg signature */
+#define FW_CFG_SIG_SIZE 4
+
+/* FW_CFG_ID bits */
+#define FW_CFG_VERSION 0x01
+#define FW_CFG_VERSION_DMA 0x02
+
+/* fw_cfg file directory entry type */
+struct fw_cfg_file {
+ __be32 size;
+ __be16 select;
+ __u16 reserved;
+ char name[FW_CFG_MAX_FILE_PATH];
+};
+
+/* FW_CFG_DMA_CONTROL bits */
+#define FW_CFG_DMA_CTL_ERROR 0x01
+#define FW_CFG_DMA_CTL_READ 0x02
+#define FW_CFG_DMA_CTL_SKIP 0x04
+#define FW_CFG_DMA_CTL_SELECT 0x08
+#define FW_CFG_DMA_CTL_WRITE 0x10
+
+#define FW_CFG_DMA_SIGNATURE 0x51454d5520434647ULL /* "QEMU CFG" */
+
+/* Control as first field allows for different structures selected by this
+ * field, which might be useful in the future
+ */
+struct fw_cfg_dma_access {
+ __be32 control;
+ __be32 length;
+ __be64 address;
+};
+
+#define FW_CFG_VMCOREINFO_FILENAME "etc/vmcoreinfo"
+
+#define FW_CFG_VMCOREINFO_FORMAT_NONE 0x0
+#define FW_CFG_VMCOREINFO_FORMAT_ELF 0x1
+
+struct fw_cfg_vmcoreinfo {
+ __le16 host_format;
+ __le16 guest_format;
+ __le32 size;
+ __le64 paddr;
+};
+
+#endif
diff --git a/include/uapi/linux/rds.h b/include/uapi/linux/rds.h
index e71d4491f225..a66b213de3d7 100644
--- a/include/uapi/linux/rds.h
+++ b/include/uapi/linux/rds.h
@@ -103,6 +103,8 @@
#define RDS_CMSG_MASKED_ATOMIC_FADD 8
#define RDS_CMSG_MASKED_ATOMIC_CSWP 9
#define RDS_CMSG_RXPATH_LATENCY 11
+#define RDS_CMSG_ZCOPY_COOKIE 12
+#define RDS_CMSG_ZCOPY_COMPLETION 13
#define RDS_INFO_FIRST 10000
#define RDS_INFO_COUNTERS 10000
@@ -316,6 +318,12 @@ struct rds_rdma_notify {
#define RDS_RDMA_DROPPED 3
#define RDS_RDMA_OTHER_ERROR 4
+#define RDS_MAX_ZCOOKIES 8
+struct rds_zcopy_cookies {
+ __u32 num;
+ __u32 cookies[RDS_MAX_ZCOOKIES];
+};
+
/*
* Common set of flags for all RDMA related structs
*/
diff --git a/include/uapi/linux/rtnetlink.h b/include/uapi/linux/rtnetlink.h
index 843e29aa3cac..9b15005955fa 100644
--- a/include/uapi/linux/rtnetlink.h
+++ b/include/uapi/linux/rtnetlink.h
@@ -541,9 +541,19 @@ struct tcmsg {
int tcm_ifindex;
__u32 tcm_handle;
__u32 tcm_parent;
+/* tcm_block_index is used instead of tcm_parent
+ * in case tcm_ifindex == TCM_IFINDEX_MAGIC_BLOCK
+ */
+#define tcm_block_index tcm_parent
__u32 tcm_info;
};
+/* For manipulation of filters in shared block, tcm_ifindex is set to
+ * TCM_IFINDEX_MAGIC_BLOCK, and tcm_parent is aliased to tcm_block_index
+ * which is the block index.
+ */
+#define TCM_IFINDEX_MAGIC_BLOCK (0xFFFFFFFFU)
+
enum {
TCA_UNSPEC,
TCA_KIND,
@@ -558,6 +568,8 @@ enum {
TCA_DUMP_INVISIBLE,
TCA_CHAIN,
TCA_HW_OFFLOAD,
+ TCA_INGRESS_BLOCK,
+ TCA_EGRESS_BLOCK,
__TCA_MAX
};
diff --git a/include/uapi/linux/sched.h b/include/uapi/linux/sched.h
index 30a9e51bbb1e..22627f80063e 100644
--- a/include/uapi/linux/sched.h
+++ b/include/uapi/linux/sched.h
@@ -49,5 +49,10 @@
*/
#define SCHED_FLAG_RESET_ON_FORK 0x01
#define SCHED_FLAG_RECLAIM 0x02
+#define SCHED_FLAG_DL_OVERRUN 0x04
+
+#define SCHED_FLAG_ALL (SCHED_FLAG_RESET_ON_FORK | \
+ SCHED_FLAG_RECLAIM | \
+ SCHED_FLAG_DL_OVERRUN)
#endif /* _UAPI_LINUX_SCHED_H */
diff --git a/include/uapi/linux/sctp.h b/include/uapi/linux/sctp.h
index d9adab32dbee..b64d583bf053 100644
--- a/include/uapi/linux/sctp.h
+++ b/include/uapi/linux/sctp.h
@@ -99,6 +99,7 @@ typedef __s32 sctp_assoc_t;
#define SCTP_RECVRCVINFO 32
#define SCTP_RECVNXTINFO 33
#define SCTP_DEFAULT_SNDINFO 34
+#define SCTP_AUTH_DEACTIVATE_KEY 35
/* Internal Socket Options. Some of the sctp library functions are
* implemented using these socket options.
@@ -125,6 +126,8 @@ typedef __s32 sctp_assoc_t;
#define SCTP_SOCKOPT_PEELOFF_FLAGS 122
#define SCTP_STREAM_SCHEDULER 123
#define SCTP_STREAM_SCHEDULER_VALUE 124
+#define SCTP_INTERLEAVING_SUPPORTED 125
+#define SCTP_SENDMSG_CONNECT 126
/* PR-SCTP policies */
#define SCTP_PR_SCTP_NONE 0x0000
@@ -259,6 +262,31 @@ struct sctp_nxtinfo {
sctp_assoc_t nxt_assoc_id;
};
+/* 5.3.7 SCTP PR-SCTP Information Structure (SCTP_PRINFO)
+ *
+ * This cmsghdr structure specifies SCTP options for sendmsg().
+ *
+ * cmsg_level cmsg_type cmsg_data[]
+ * ------------ ------------ -------------------
+ * IPPROTO_SCTP SCTP_PRINFO struct sctp_prinfo
+ */
+struct sctp_prinfo {
+ __u16 pr_policy;
+ __u32 pr_value;
+};
+
+/* 5.3.8 SCTP AUTH Information Structure (SCTP_AUTHINFO)
+ *
+ * This cmsghdr structure specifies SCTP options for sendmsg().
+ *
+ * cmsg_level cmsg_type cmsg_data[]
+ * ------------ ------------ -------------------
+ * IPPROTO_SCTP SCTP_AUTHINFO struct sctp_authinfo
+ */
+struct sctp_authinfo {
+ __u16 auth_keynumber;
+};
+
/*
* sinfo_flags: 16 bits (unsigned integer)
*
@@ -270,6 +298,8 @@ enum sctp_sinfo_flags {
SCTP_ADDR_OVER = (1 << 1), /* Override the primary destination. */
SCTP_ABORT = (1 << 2), /* Send an ABORT message to the peer. */
SCTP_SACK_IMMEDIATELY = (1 << 3), /* SACK should be sent without delay. */
+ /* 2 bits here have been used by SCTP_PR_SCTP_MASK */
+ SCTP_SENDALL = (1 << 6),
SCTP_NOTIFICATION = MSG_NOTIFICATION, /* Next message is not user msg but notification. */
SCTP_EOF = MSG_FIN, /* Initiate graceful shutdown process. */
};
@@ -292,6 +322,14 @@ typedef enum sctp_cmsg_type {
#define SCTP_RCVINFO SCTP_RCVINFO
SCTP_NXTINFO, /* 5.3.6 SCTP Next Receive Information Structure */
#define SCTP_NXTINFO SCTP_NXTINFO
+ SCTP_PRINFO, /* 5.3.7 SCTP PR-SCTP Information Structure */
+#define SCTP_PRINFO SCTP_PRINFO
+ SCTP_AUTHINFO, /* 5.3.8 SCTP AUTH Information Structure */
+#define SCTP_AUTHINFO SCTP_AUTHINFO
+ SCTP_DSTADDRV4, /* 5.3.9 SCTP Destination IPv4 Address Structure */
+#define SCTP_DSTADDRV4 SCTP_DSTADDRV4
+ SCTP_DSTADDRV6, /* 5.3.10 SCTP Destination IPv6 Address Structure */
+#define SCTP_DSTADDRV6 SCTP_DSTADDRV6
} sctp_cmsg_t;
/*
@@ -459,6 +497,8 @@ struct sctp_pdapi_event {
__u32 pdapi_length;
__u32 pdapi_indication;
sctp_assoc_t pdapi_assoc_id;
+ __u32 pdapi_stream;
+ __u32 pdapi_seq;
};
enum { SCTP_PARTIAL_DELIVERY_ABORTED=0, };
@@ -479,7 +519,12 @@ struct sctp_authkey_event {
sctp_assoc_t auth_assoc_id;
};
-enum { SCTP_AUTH_NEWKEY = 0, };
+enum {
+ SCTP_AUTH_NEW_KEY,
+#define SCTP_AUTH_NEWKEY SCTP_AUTH_NEW_KEY /* compatible with before */
+ SCTP_AUTH_FREE_KEY,
+ SCTP_AUTH_NO_AUTH,
+};
/*
* 6.1.9. SCTP_SENDER_DRY_EVENT
diff --git a/include/uapi/linux/sem.h b/include/uapi/linux/sem.h
index 9c3e745b0656..39a1876f039e 100644
--- a/include/uapi/linux/sem.h
+++ b/include/uapi/linux/sem.h
@@ -19,6 +19,7 @@
/* ipcs ctl cmds */
#define SEM_STAT 18
#define SEM_INFO 19
+#define SEM_STAT_ANY 20
/* Obsolete, used only for backwards compatibility and libc5 compiles */
struct semid_ds {
diff --git a/include/uapi/linux/serial_core.h b/include/uapi/linux/serial_core.h
index 1c8413f93e3d..dce5f9dae121 100644
--- a/include/uapi/linux/serial_core.h
+++ b/include/uapi/linux/serial_core.h
@@ -76,6 +76,9 @@
#define PORT_SUNZILOG 38
#define PORT_SUNSAB 39
+/* Nuvoton UART */
+#define PORT_NPCM 40
+
/* Intel EG20 */
#define PORT_PCH_8LINE 44
#define PORT_PCH_2LINE 45
diff --git a/include/uapi/linux/shm.h b/include/uapi/linux/shm.h
index 4de12a39b075..dde1344f047c 100644
--- a/include/uapi/linux/shm.h
+++ b/include/uapi/linux/shm.h
@@ -83,8 +83,9 @@ struct shmid_ds {
#define SHM_UNLOCK 12
/* ipcs ctl commands */
-#define SHM_STAT 13
-#define SHM_INFO 14
+#define SHM_STAT 13
+#define SHM_INFO 14
+#define SHM_STAT_ANY 15
/* Obsolete, used only for backwards compatibility */
struct shminfo {
diff --git a/include/uapi/linux/stm.h b/include/uapi/linux/stm.h
index dbffdc23d804..7bac318b4440 100644
--- a/include/uapi/linux/stm.h
+++ b/include/uapi/linux/stm.h
@@ -3,15 +3,6 @@
* System Trace Module (STM) userspace interfaces
* Copyright (c) 2014, Intel Corporation.
*
- * This program is free software; you can redistribute it and/or modify it
- * under the terms and conditions of the GNU General Public License,
- * version 2, as published by the Free Software Foundation.
- *
- * This program is distributed in the hope it will be useful, but WITHOUT
- * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
- * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for
- * more details.
- *
* STM class implements generic infrastructure for System Trace Module devices
* as defined in MIPI STPv2 specification.
*/
@@ -21,6 +12,10 @@
#include <linux/types.h>
+/* Maximum allowed master and channel values */
+#define STP_MASTER_MAX 0xffff
+#define STP_CHANNEL_MAX 0xffff
+
/**
* struct stp_policy_id - identification for the STP policy
* @size: size of the structure including real id[] length
diff --git a/include/uapi/linux/switchtec_ioctl.h b/include/uapi/linux/switchtec_ioctl.h
index 75df44373034..4f4daf8db954 100644
--- a/include/uapi/linux/switchtec_ioctl.h
+++ b/include/uapi/linux/switchtec_ioctl.h
@@ -88,7 +88,8 @@ struct switchtec_ioctl_event_summary {
#define SWITCHTEC_IOCTL_EVENT_FORCE_SPEED 26
#define SWITCHTEC_IOCTL_EVENT_CREDIT_TIMEOUT 27
#define SWITCHTEC_IOCTL_EVENT_LINK_STATE 28
-#define SWITCHTEC_IOCTL_MAX_EVENTS 29
+#define SWITCHTEC_IOCTL_EVENT_GFMS 29
+#define SWITCHTEC_IOCTL_MAX_EVENTS 30
#define SWITCHTEC_IOCTL_EVENT_LOCAL_PART_IDX -1
#define SWITCHTEC_IOCTL_EVENT_IDX_ALL -2
diff --git a/include/uapi/linux/tc_ematch/tc_em_ipt.h b/include/uapi/linux/tc_ematch/tc_em_ipt.h
new file mode 100644
index 000000000000..49a65530992c
--- /dev/null
+++ b/include/uapi/linux/tc_ematch/tc_em_ipt.h
@@ -0,0 +1,20 @@
+/* SPDX-License-Identifier: GPL-2.0 WITH Linux-syscall-note */
+#ifndef __LINUX_TC_EM_IPT_H
+#define __LINUX_TC_EM_IPT_H
+
+#include <linux/types.h>
+#include <linux/pkt_cls.h>
+
+enum {
+ TCA_EM_IPT_UNSPEC,
+ TCA_EM_IPT_HOOK,
+ TCA_EM_IPT_MATCH_NAME,
+ TCA_EM_IPT_MATCH_REVISION,
+ TCA_EM_IPT_NFPROTO,
+ TCA_EM_IPT_MATCH_DATA,
+ __TCA_EM_IPT_MAX
+};
+
+#define TCA_EM_IPT_MAX (__TCA_EM_IPT_MAX - 1)
+
+#endif
diff --git a/include/uapi/linux/tcp.h b/include/uapi/linux/tcp.h
index b4a4f64635fa..560374c978f9 100644
--- a/include/uapi/linux/tcp.h
+++ b/include/uapi/linux/tcp.h
@@ -241,6 +241,9 @@ enum {
TCP_NLA_MIN_RTT, /* minimum RTT */
TCP_NLA_RECUR_RETRANS, /* Recurring retransmits for the current pkt */
TCP_NLA_DELIVERY_RATE_APP_LMT, /* delivery rate application limited ? */
+ TCP_NLA_SNDQ_SIZE, /* Data (bytes) pending in send queue */
+ TCP_NLA_CA_STATE, /* ca_state of socket */
+ TCP_NLA_SND_SSTHRESH, /* Slow start size threshold */
};
diff --git a/include/uapi/linux/tee.h b/include/uapi/linux/tee.h
index 688782e90140..4b9eb064d7e7 100644
--- a/include/uapi/linux/tee.h
+++ b/include/uapi/linux/tee.h
@@ -50,6 +50,7 @@
#define TEE_GEN_CAP_GP (1 << 0)/* GlobalPlatform compliant TEE */
#define TEE_GEN_CAP_PRIVILEGED (1 << 1)/* Privileged device (for supplicant) */
+#define TEE_GEN_CAP_REG_MEM (1 << 2)/* Supports registering shared memory */
/*
* TEE Implementation ID
@@ -154,6 +155,13 @@ struct tee_ioctl_buf_data {
*/
#define TEE_IOCTL_PARAM_ATTR_TYPE_MASK 0xff
+/* Meta parameter carrying extra information about the message. */
+#define TEE_IOCTL_PARAM_ATTR_META 0x100
+
+/* Mask of all known attr bits */
+#define TEE_IOCTL_PARAM_ATTR_MASK \
+ (TEE_IOCTL_PARAM_ATTR_TYPE_MASK | TEE_IOCTL_PARAM_ATTR_META)
+
/*
* Matches TEEC_LOGIN_* in GP TEE Client API
* Are only defined for GP compliant TEEs
@@ -332,6 +340,35 @@ struct tee_iocl_supp_send_arg {
#define TEE_IOC_SUPPL_SEND _IOR(TEE_IOC_MAGIC, TEE_IOC_BASE + 7, \
struct tee_ioctl_buf_data)
+/**
+ * struct tee_ioctl_shm_register_data - Shared memory register argument
+ * @addr: [in] Start address of shared memory to register
+ * @length: [in/out] Length of shared memory to register
+ * @flags: [in/out] Flags to/from registration.
+ * @id: [out] Identifier of the shared memory
+ *
+ * The flags field should currently be zero as input. Updated by the call
+ * with actual flags as defined by TEE_IOCTL_SHM_* above.
+ * This structure is used as argument for TEE_IOC_SHM_REGISTER below.
+ */
+struct tee_ioctl_shm_register_data {
+ __u64 addr;
+ __u64 length;
+ __u32 flags;
+ __s32 id;
+};
+
+/**
+ * TEE_IOC_SHM_REGISTER - Register shared memory argument
+ *
+ * Registers shared memory between the user space process and secure OS.
+ *
+ * Returns a file descriptor on success or < 0 on failure
+ *
+ * The shared memory is unregisterred when the descriptor is closed.
+ */
+#define TEE_IOC_SHM_REGISTER _IOWR(TEE_IOC_MAGIC, TEE_IOC_BASE + 9, \
+ struct tee_ioctl_shm_register_data)
/*
* Five syscalls are used when communicating with the TEE driver.
* open(): opens the device associated with the driver
diff --git a/include/uapi/linux/telephony.h b/include/uapi/linux/telephony.h
deleted file mode 100644
index d2c9f7105f4b..000000000000
--- a/include/uapi/linux/telephony.h
+++ /dev/null
@@ -1,263 +0,0 @@
-/* SPDX-License-Identifier: GPL-2.0+ WITH Linux-syscall-note */
-/******************************************************************************
- *
- * telephony.h
- *
- * Basic Linux Telephony Interface
- *
- * (c) Copyright 1999-2001 Quicknet Technologies, Inc.
- *
- * This program is free software; you can redistribute it and/or
- * modify it under the terms of the GNU General Public License
- * as published by the Free Software Foundation; either version
- * 2 of the License, or (at your option) any later version.
- *
- * Authors: Ed Okerson, <eokerson@quicknet.net>
- * Greg Herlein, <gherlein@quicknet.net>
- *
- * Contributors: Alan Cox, <alan@lxorguk.ukuu.org.uk>
- * David W. Erhart, <derhart@quicknet.net>
- *
- * IN NO EVENT SHALL QUICKNET TECHNOLOGIES, INC. BE LIABLE TO ANY PARTY FOR
- * DIRECT, INDIRECT, SPECIAL, INCIDENTAL, OR CONSEQUENTIAL DAMAGES ARISING OUT
- * OF THE USE OF THIS SOFTWARE AND ITS DOCUMENTATION, EVEN IF QUICKNET
- * TECHNOLOGIES, INC. HAS BEEN ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
- *
- * QUICKNET TECHNOLOGIES, INC. SPECIFICALLY DISCLAIMS ANY WARRANTIES,
- * INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY
- * AND FITNESS FOR A PARTICULAR PURPOSE. THE SOFTWARE PROVIDED HEREUNDER IS
- * ON AN "AS IS" BASIS, AND QUICKNET TECHNOLOGIES, INC. HAS NO OBLIGATION
- * TO PROVIDE MAINTENANCE, SUPPORT, UPDATES, ENHANCEMENTS, OR MODIFICATIONS.
- *
- *****************************************************************************/
-
-#ifndef TELEPHONY_H
-#define TELEPHONY_H
-
-#define TELEPHONY_VERSION 3013
-
-#define PHONE_VENDOR_IXJ 1
-#define PHONE_VENDOR_QUICKNET PHONE_VENDOR_IXJ
-#define PHONE_VENDOR_VOICETRONIX 2
-#define PHONE_VENDOR_ACULAB 3
-#define PHONE_VENDOR_DIGI 4
-#define PHONE_VENDOR_FRANKLIN 5
-
-/******************************************************************************
- * Vendor Summary Information Area
- *
- * Quicknet Technologies, Inc. - makes low density analog telephony cards
- * with audio compression, POTS and PSTN interfaces (www.quicknet.net)
- *
- * (other vendors following this API shuld add a short description of
- * the telephony products they support under Linux)
- *
- *****************************************************************************/
-#define QTI_PHONEJACK 100
-#define QTI_LINEJACK 300
-#define QTI_PHONEJACK_LITE 400
-#define QTI_PHONEJACK_PCI 500
-#define QTI_PHONECARD 600
-
-/******************************************************************************
-*
-* The capabilities ioctls can inform you of the capabilities of each phone
-* device installed in your system. The PHONECTL_CAPABILITIES ioctl
-* returns an integer value indicating the number of capabilities the
-* device has. The PHONECTL_CAPABILITIES_LIST will fill an array of
-* capability structs with all of its capabilities. The
-* PHONECTL_CAPABILITIES_CHECK takes a single capability struct and returns
-* a TRUE if the device has that capability, otherwise it returns false.
-*
-******************************************************************************/
-typedef enum {
- vendor = 0,
- device,
- port,
- codec,
- dsp
-} phone_cap;
-
-struct phone_capability {
- char desc[80];
- phone_cap captype;
- int cap;
- int handle;
-};
-
-typedef enum {
- pots = 0,
- pstn,
- handset,
- speaker
-} phone_ports;
-
-#define PHONE_CAPABILITIES _IO ('q', 0x80)
-#define PHONE_CAPABILITIES_LIST _IOR ('q', 0x81, struct phone_capability *)
-#define PHONE_CAPABILITIES_CHECK _IOW ('q', 0x82, struct phone_capability *)
-
-typedef struct {
- char month[3];
- char day[3];
- char hour[3];
- char min[3];
- int numlen;
- char number[11];
- int namelen;
- char name[80];
-} PHONE_CID;
-
-#define PHONE_RING _IO ('q', 0x83)
-#define PHONE_HOOKSTATE _IO ('q', 0x84)
-#define PHONE_MAXRINGS _IOW ('q', 0x85, char)
-#define PHONE_RING_CADENCE _IOW ('q', 0x86, short)
-#define OLD_PHONE_RING_START _IO ('q', 0x87)
-#define PHONE_RING_START _IOW ('q', 0x87, PHONE_CID *)
-#define PHONE_RING_STOP _IO ('q', 0x88)
-
-#define USA_RING_CADENCE 0xC0C0
-
-#define PHONE_REC_CODEC _IOW ('q', 0x89, int)
-#define PHONE_REC_START _IO ('q', 0x8A)
-#define PHONE_REC_STOP _IO ('q', 0x8B)
-#define PHONE_REC_DEPTH _IOW ('q', 0x8C, int)
-#define PHONE_FRAME _IOW ('q', 0x8D, int)
-#define PHONE_REC_VOLUME _IOW ('q', 0x8E, int)
-#define PHONE_REC_VOLUME_LINEAR _IOW ('q', 0xDB, int)
-#define PHONE_REC_LEVEL _IO ('q', 0x8F)
-
-#define PHONE_PLAY_CODEC _IOW ('q', 0x90, int)
-#define PHONE_PLAY_START _IO ('q', 0x91)
-#define PHONE_PLAY_STOP _IO ('q', 0x92)
-#define PHONE_PLAY_DEPTH _IOW ('q', 0x93, int)
-#define PHONE_PLAY_VOLUME _IOW ('q', 0x94, int)
-#define PHONE_PLAY_VOLUME_LINEAR _IOW ('q', 0xDC, int)
-#define PHONE_PLAY_LEVEL _IO ('q', 0x95)
-#define PHONE_DTMF_READY _IOR ('q', 0x96, int)
-#define PHONE_GET_DTMF _IOR ('q', 0x97, int)
-#define PHONE_GET_DTMF_ASCII _IOR ('q', 0x98, int)
-#define PHONE_DTMF_OOB _IOW ('q', 0x99, int)
-#define PHONE_EXCEPTION _IOR ('q', 0x9A, int)
-#define PHONE_PLAY_TONE _IOW ('q', 0x9B, char)
-#define PHONE_SET_TONE_ON_TIME _IOW ('q', 0x9C, int)
-#define PHONE_SET_TONE_OFF_TIME _IOW ('q', 0x9D, int)
-#define PHONE_GET_TONE_ON_TIME _IO ('q', 0x9E)
-#define PHONE_GET_TONE_OFF_TIME _IO ('q', 0x9F)
-#define PHONE_GET_TONE_STATE _IO ('q', 0xA0)
-#define PHONE_BUSY _IO ('q', 0xA1)
-#define PHONE_RINGBACK _IO ('q', 0xA2)
-#define PHONE_DIALTONE _IO ('q', 0xA3)
-#define PHONE_CPT_STOP _IO ('q', 0xA4)
-
-#define PHONE_PSTN_SET_STATE _IOW ('q', 0xA4, int)
-#define PHONE_PSTN_GET_STATE _IO ('q', 0xA5)
-
-#define PSTN_ON_HOOK 0
-#define PSTN_RINGING 1
-#define PSTN_OFF_HOOK 2
-#define PSTN_PULSE_DIAL 3
-
-/******************************************************************************
-*
-* The wink duration is tunable with this ioctl. The default wink duration
-* is 320ms. You do not need to use this ioctl if you do not require a
-* different wink duration.
-*
-******************************************************************************/
-#define PHONE_WINK_DURATION _IOW ('q', 0xA6, int)
-#define PHONE_WINK _IOW ('q', 0xAA, int)
-
-/******************************************************************************
-*
-* Codec Definitions
-*
-******************************************************************************/
-typedef enum {
- G723_63 = 1,
- G723_53 = 2,
- TS85 = 3,
- TS48 = 4,
- TS41 = 5,
- G728 = 6,
- G729 = 7,
- ULAW = 8,
- ALAW = 9,
- LINEAR16 = 10,
- LINEAR8 = 11,
- WSS = 12,
- G729B = 13
-} phone_codec;
-
-struct phone_codec_data
-{
- phone_codec type;
- unsigned short buf_min, buf_opt, buf_max;
-};
-
-#define PHONE_QUERY_CODEC _IOWR ('q', 0xA7, struct phone_codec_data *)
-#define PHONE_PSTN_LINETEST _IO ('q', 0xA8)
-
-/******************************************************************************
-*
-* This controls the VAD/CNG functionality of G.723.1. The driver will
-* always pass full size frames, any unused bytes will be padded with zeros,
-* and frames passed to the driver should also be padded with zeros. The
-* frame type is encoded in the least significant two bits of the first
-* WORD of the frame as follows:
-*
-* bits 1-0 Frame Type Data Rate Significant Words
-* 00 0 G.723.1 6.3 12
-* 01 1 G.723.1 5.3 10
-* 10 2 VAD/CNG 2
-* 11 3 Repeat last CNG 2 bits
-*
-******************************************************************************/
-#define PHONE_VAD _IOW ('q', 0xA9, int)
-
-
-/******************************************************************************
-*
-* The exception structure allows us to multiplex multiple events onto the
-* select() exception set. If any of these flags are set select() will
-* return with a positive indication on the exception set. The dtmf_ready
-* bit indicates if there is data waiting in the DTMF buffer. The
-* hookstate bit is set if there is a change in hookstate status, it does not
-* indicate the current state of the hookswitch. The pstn_ring bit
-* indicates that the DAA on a LineJACK card has detected ring voltage on
-* the PSTN port. The caller_id bit indicates that caller_id data has been
-* received and is available. The pstn_wink bit indicates that the DAA on
-* the LineJACK has received a wink from the telco switch. The f0, f1, f2
-* and f3 bits indicate that the filter has been triggered by detecting the
-* frequency programmed into that filter.
-*
-* The remaining bits should be set to zero. They will become defined over time
-* for other interface cards and their needs.
-*
-******************************************************************************/
-struct phone_except
-{
- unsigned int dtmf_ready:1;
- unsigned int hookstate:1;
- unsigned int pstn_ring:1;
- unsigned int caller_id:1;
- unsigned int pstn_wink:1;
- unsigned int f0:1;
- unsigned int f1:1;
- unsigned int f2:1;
- unsigned int f3:1;
- unsigned int flash:1;
- unsigned int fc0:1;
- unsigned int fc1:1;
- unsigned int fc2:1;
- unsigned int fc3:1;
- unsigned int reserved:18;
-};
-
-union telephony_exception {
- struct phone_except bits;
- unsigned int bytes;
-};
-
-
-#endif /* TELEPHONY_H */
-
diff --git a/include/uapi/linux/time.h b/include/uapi/linux/time.h
index 53f8dd84beb5..16a296612ba4 100644
--- a/include/uapi/linux/time.h
+++ b/include/uapi/linux/time.h
@@ -43,6 +43,18 @@ struct itimerval {
};
/*
+ * legacy timeval structure, only embedded in structures that
+ * traditionally used 'timeval' to pass time intervals (not absolute
+ * times). Do not add new users. If user space fails to compile
+ * here, this is probably because it is not y2038 safe and needs to
+ * be changed to use another interface.
+ */
+struct __kernel_old_timeval {
+ __kernel_long_t tv_sec;
+ __kernel_long_t tv_usec;
+};
+
+/*
* The IDs of the various system clocks (for POSIX.1b interval timers):
*/
#define CLOCK_REALTIME 0
@@ -61,6 +73,7 @@ struct itimerval {
*/
#define CLOCK_SGI_CYCLE 10
#define CLOCK_TAI 11
+#define CLOCK_MONOTONIC_ACTIVE 12
#define MAX_CLOCKS 16
#define CLOCKS_MASK (CLOCK_REALTIME | CLOCK_MONOTONIC)
diff --git a/include/uapi/linux/tipc.h b/include/uapi/linux/tipc.h
index 35f79d1f8c3a..bf6d28677cfe 100644
--- a/include/uapi/linux/tipc.h
+++ b/include/uapi/linux/tipc.h
@@ -45,82 +45,38 @@
* TIPC addressing primitives
*/
-struct tipc_portid {
+struct tipc_socket_addr {
__u32 ref;
__u32 node;
};
-struct tipc_name {
+struct tipc_service_addr {
__u32 type;
__u32 instance;
};
-struct tipc_name_seq {
+struct tipc_service_range {
__u32 type;
__u32 lower;
__u32 upper;
};
-/* TIPC Address Size, Offset, Mask specification for Z.C.N
- */
-#define TIPC_NODE_BITS 12
-#define TIPC_CLUSTER_BITS 12
-#define TIPC_ZONE_BITS 8
-
-#define TIPC_NODE_OFFSET 0
-#define TIPC_CLUSTER_OFFSET TIPC_NODE_BITS
-#define TIPC_ZONE_OFFSET (TIPC_CLUSTER_OFFSET + TIPC_CLUSTER_BITS)
-
-#define TIPC_NODE_SIZE ((1UL << TIPC_NODE_BITS) - 1)
-#define TIPC_CLUSTER_SIZE ((1UL << TIPC_CLUSTER_BITS) - 1)
-#define TIPC_ZONE_SIZE ((1UL << TIPC_ZONE_BITS) - 1)
-
-#define TIPC_NODE_MASK (TIPC_NODE_SIZE << TIPC_NODE_OFFSET)
-#define TIPC_CLUSTER_MASK (TIPC_CLUSTER_SIZE << TIPC_CLUSTER_OFFSET)
-#define TIPC_ZONE_MASK (TIPC_ZONE_SIZE << TIPC_ZONE_OFFSET)
-
-#define TIPC_ZONE_CLUSTER_MASK (TIPC_ZONE_MASK | TIPC_CLUSTER_MASK)
-
-static inline __u32 tipc_addr(unsigned int zone,
- unsigned int cluster,
- unsigned int node)
-{
- return (zone << TIPC_ZONE_OFFSET) |
- (cluster << TIPC_CLUSTER_OFFSET) |
- node;
-}
-
-static inline unsigned int tipc_zone(__u32 addr)
-{
- return addr >> TIPC_ZONE_OFFSET;
-}
-
-static inline unsigned int tipc_cluster(__u32 addr)
-{
- return (addr & TIPC_CLUSTER_MASK) >> TIPC_CLUSTER_OFFSET;
-}
-
-static inline unsigned int tipc_node(__u32 addr)
-{
- return addr & TIPC_NODE_MASK;
-}
-
/*
- * Application-accessible port name types
+ * Application-accessible service types
*/
-#define TIPC_CFG_SRV 0 /* configuration service name type */
-#define TIPC_TOP_SRV 1 /* topology service name type */
-#define TIPC_LINK_STATE 2 /* link state name type */
-#define TIPC_RESERVED_TYPES 64 /* lowest user-publishable name type */
+#define TIPC_NODE_STATE 0 /* node state service type */
+#define TIPC_TOP_SRV 1 /* topology server service type */
+#define TIPC_LINK_STATE 2 /* link state service type */
+#define TIPC_RESERVED_TYPES 64 /* lowest user-allowed service type */
/*
- * Publication scopes when binding port names and port name sequences
+ * Publication scopes when binding service / service range
*/
-
-#define TIPC_ZONE_SCOPE 1
-#define TIPC_CLUSTER_SCOPE 2
-#define TIPC_NODE_SCOPE 3
+enum tipc_scope {
+ TIPC_CLUSTER_SCOPE = 2, /* 0 can also be used */
+ TIPC_NODE_SCOPE = 3
+};
/*
* Limiting values for messages
@@ -152,28 +108,28 @@ static inline unsigned int tipc_node(__u32 addr)
* TIPC topology subscription service definitions
*/
-#define TIPC_SUB_PORTS 0x01 /* filter for port availability */
-#define TIPC_SUB_SERVICE 0x02 /* filter for service availability */
-#define TIPC_SUB_CANCEL 0x04 /* cancel a subscription */
+#define TIPC_SUB_PORTS 0x01 /* filter: evt at each match */
+#define TIPC_SUB_SERVICE 0x02 /* filter: evt at first up/last down */
+#define TIPC_SUB_CANCEL 0x04 /* filter: cancel a subscription */
#define TIPC_WAIT_FOREVER (~0) /* timeout for permanent subscription */
struct tipc_subscr {
- struct tipc_name_seq seq; /* name sequence of interest */
+ struct tipc_service_range seq; /* range of interest */
__u32 timeout; /* subscription duration (in ms) */
__u32 filter; /* bitmask of filter options */
char usr_handle[8]; /* available for subscriber use */
};
#define TIPC_PUBLISHED 1 /* publication event */
-#define TIPC_WITHDRAWN 2 /* withdraw event */
+#define TIPC_WITHDRAWN 2 /* withdrawal event */
#define TIPC_SUBSCR_TIMEOUT 3 /* subscription timeout event */
struct tipc_event {
__u32 event; /* event type */
- __u32 found_lower; /* matching name seq instances */
- __u32 found_upper; /* " " " " */
- struct tipc_portid port; /* associated port */
+ __u32 found_lower; /* matching range */
+ __u32 found_upper; /* " " */
+ struct tipc_socket_addr port; /* associated socket */
struct tipc_subscr s; /* associated subscription */
};
@@ -193,20 +149,20 @@ struct tipc_event {
#define SOL_TIPC 271
#endif
-#define TIPC_ADDR_NAMESEQ 1
-#define TIPC_ADDR_MCAST 1
-#define TIPC_ADDR_NAME 2
-#define TIPC_ADDR_ID 3
+#define TIPC_ADDR_MCAST 1
+#define TIPC_SERVICE_RANGE 1
+#define TIPC_SERVICE_ADDR 2
+#define TIPC_SOCKET_ADDR 3
struct sockaddr_tipc {
unsigned short family;
unsigned char addrtype;
signed char scope;
union {
- struct tipc_portid id;
- struct tipc_name_seq nameseq;
+ struct tipc_socket_addr id;
+ struct tipc_service_range nameseq;
struct {
- struct tipc_name name;
+ struct tipc_service_addr name;
__u32 domain;
} name;
} addr;
@@ -244,7 +200,7 @@ struct sockaddr_tipc {
struct tipc_group_req {
__u32 type; /* group id */
__u32 instance; /* member id */
- __u32 scope; /* zone/cluster/node */
+ __u32 scope; /* cluster/node */
__u32 flags;
};
@@ -260,7 +216,7 @@ struct tipc_group_req {
#define TIPC_MAX_MEDIA_NAME 16
#define TIPC_MAX_IF_NAME 16
#define TIPC_MAX_BEARER_NAME 32
-#define TIPC_MAX_LINK_NAME 60
+#define TIPC_MAX_LINK_NAME 68
#define SIOCGETLINKNAME SIOCPROTOPRIVATE
@@ -269,4 +225,62 @@ struct tipc_sioc_ln_req {
__u32 bearer_id;
char linkname[TIPC_MAX_LINK_NAME];
};
+
+
+/* The macros and functions below are deprecated:
+ */
+
+#define TIPC_CFG_SRV 0
+#define TIPC_ZONE_SCOPE 1
+
+#define TIPC_ADDR_NAMESEQ 1
+#define TIPC_ADDR_NAME 2
+#define TIPC_ADDR_ID 3
+
+#define TIPC_NODE_BITS 12
+#define TIPC_CLUSTER_BITS 12
+#define TIPC_ZONE_BITS 8
+
+#define TIPC_NODE_OFFSET 0
+#define TIPC_CLUSTER_OFFSET TIPC_NODE_BITS
+#define TIPC_ZONE_OFFSET (TIPC_CLUSTER_OFFSET + TIPC_CLUSTER_BITS)
+
+#define TIPC_NODE_SIZE ((1UL << TIPC_NODE_BITS) - 1)
+#define TIPC_CLUSTER_SIZE ((1UL << TIPC_CLUSTER_BITS) - 1)
+#define TIPC_ZONE_SIZE ((1UL << TIPC_ZONE_BITS) - 1)
+
+#define TIPC_NODE_MASK (TIPC_NODE_SIZE << TIPC_NODE_OFFSET)
+#define TIPC_CLUSTER_MASK (TIPC_CLUSTER_SIZE << TIPC_CLUSTER_OFFSET)
+#define TIPC_ZONE_MASK (TIPC_ZONE_SIZE << TIPC_ZONE_OFFSET)
+
+#define TIPC_ZONE_CLUSTER_MASK (TIPC_ZONE_MASK | TIPC_CLUSTER_MASK)
+
+#define tipc_portid tipc_socket_addr
+#define tipc_name tipc_service_addr
+#define tipc_name_seq tipc_service_range
+
+static inline __u32 tipc_addr(unsigned int zone,
+ unsigned int cluster,
+ unsigned int node)
+{
+ return (zone << TIPC_ZONE_OFFSET) |
+ (cluster << TIPC_CLUSTER_OFFSET) |
+ node;
+}
+
+static inline unsigned int tipc_zone(__u32 addr)
+{
+ return addr >> TIPC_ZONE_OFFSET;
+}
+
+static inline unsigned int tipc_cluster(__u32 addr)
+{
+ return (addr & TIPC_CLUSTER_MASK) >> TIPC_CLUSTER_OFFSET;
+}
+
+static inline unsigned int tipc_node(__u32 addr)
+{
+ return addr & TIPC_NODE_MASK;
+}
+
#endif
diff --git a/include/uapi/linux/tipc_netlink.h b/include/uapi/linux/tipc_netlink.h
index 469aa67a5ecb..0affb682e5e3 100644
--- a/include/uapi/linux/tipc_netlink.h
+++ b/include/uapi/linux/tipc_netlink.h
@@ -114,6 +114,13 @@ enum {
TIPC_NLA_SOCK_REF, /* u32 */
TIPC_NLA_SOCK_CON, /* nest */
TIPC_NLA_SOCK_HAS_PUBL, /* flag */
+ TIPC_NLA_SOCK_STAT, /* nest */
+ TIPC_NLA_SOCK_TYPE, /* u32 */
+ TIPC_NLA_SOCK_INO, /* u32 */
+ TIPC_NLA_SOCK_UID, /* u32 */
+ TIPC_NLA_SOCK_TIPC_STATE, /* u32 */
+ TIPC_NLA_SOCK_COOKIE, /* u64 */
+ TIPC_NLA_SOCK_PAD, /* flag */
__TIPC_NLA_SOCK_MAX,
TIPC_NLA_SOCK_MAX = __TIPC_NLA_SOCK_MAX - 1
@@ -162,6 +169,8 @@ enum {
TIPC_NLA_NET_UNSPEC,
TIPC_NLA_NET_ID, /* u32 */
TIPC_NLA_NET_ADDR, /* u32 */
+ TIPC_NLA_NET_NODEID, /* u64 */
+ TIPC_NLA_NET_NODEID_W1, /* u64 */
__TIPC_NLA_NET_MAX,
TIPC_NLA_NET_MAX = __TIPC_NLA_NET_MAX - 1
@@ -238,6 +247,18 @@ enum {
TIPC_NLA_CON_MAX = __TIPC_NLA_CON_MAX - 1
};
+/* Nest, socket statistics info */
+enum {
+ TIPC_NLA_SOCK_STAT_RCVQ, /* u32 */
+ TIPC_NLA_SOCK_STAT_SENDQ, /* u32 */
+ TIPC_NLA_SOCK_STAT_LINK_CONG, /* flag */
+ TIPC_NLA_SOCK_STAT_CONN_CONG, /* flag */
+ TIPC_NLA_SOCK_STAT_DROP, /* u32 */
+
+ __TIPC_NLA_SOCK_STAT_MAX,
+ TIPC_NLA_SOCK_STAT_MAX = __TIPC_NLA_SOCK_STAT_MAX - 1
+};
+
/* Nest, link propreties. Valid for link, media and bearer */
enum {
TIPC_NLA_PROP_UNSPEC,
diff --git a/include/uapi/linux/tipc_sockets_diag.h b/include/uapi/linux/tipc_sockets_diag.h
new file mode 100644
index 000000000000..7678cf2f0dcc
--- /dev/null
+++ b/include/uapi/linux/tipc_sockets_diag.h
@@ -0,0 +1,17 @@
+/* SPDX-License-Identifier: GPL-2.0 WITH Linux-syscall-note */
+/* AF_TIPC sock_diag interface for querying open sockets */
+
+#ifndef _UAPI__TIPC_SOCKETS_DIAG_H__
+#define _UAPI__TIPC_SOCKETS_DIAG_H__
+
+#include <linux/types.h>
+#include <linux/sock_diag.h>
+
+/* Request */
+struct tipc_sock_diag_req {
+ __u8 sdiag_family; /* must be AF_TIPC */
+ __u8 sdiag_protocol; /* must be 0 */
+ __u16 pad; /* must be 0 */
+ __u32 tidiag_states; /* query*/
+};
+#endif /* _UAPI__TIPC_SOCKETS_DIAG_H__ */
diff --git a/include/uapi/linux/tls.h b/include/uapi/linux/tls.h
index 293b2cdad88d..c6633e97eca4 100644
--- a/include/uapi/linux/tls.h
+++ b/include/uapi/linux/tls.h
@@ -38,6 +38,7 @@
/* TLS socket options */
#define TLS_TX 1 /* Set transmit parameters */
+#define TLS_RX 2 /* Set receive parameters */
/* Supported versions */
#define TLS_VERSION_MINOR(ver) ((ver) & 0xFF)
@@ -59,6 +60,7 @@
#define TLS_CIPHER_AES_GCM_128_REC_SEQ_SIZE 8
#define TLS_SET_RECORD_TYPE 1
+#define TLS_GET_RECORD_TYPE 2
struct tls_crypto_info {
__u16 version;
diff --git a/include/uapi/linux/types.h b/include/uapi/linux/types.h
index e3d1d0c78f3c..cd4f0b897a48 100644
--- a/include/uapi/linux/types.h
+++ b/include/uapi/linux/types.h
@@ -49,5 +49,11 @@ typedef __u32 __bitwise __wsum;
#define __aligned_be64 __be64 __attribute__((aligned(8)))
#define __aligned_le64 __le64 __attribute__((aligned(8)))
+#ifdef __CHECK_POLL
+typedef unsigned __bitwise __poll_t;
+#else
+typedef unsigned __poll_t;
+#endif
+
#endif /* __ASSEMBLY__ */
#endif /* _UAPI_LINUX_TYPES_H */
diff --git a/include/uapi/linux/usb/audio.h b/include/uapi/linux/usb/audio.h
index 17a022c5b414..3a78e7145689 100644
--- a/include/uapi/linux/usb/audio.h
+++ b/include/uapi/linux/usb/audio.h
@@ -27,6 +27,7 @@
/* bInterfaceProtocol values to denote the version of the standard used */
#define UAC_VERSION_1 0x00
#define UAC_VERSION_2 0x20
+#define UAC_VERSION_3 0x30
/* A.2 Audio Interface Subclass Codes */
#define USB_SUBCLASS_AUDIOCONTROL 0x01
@@ -370,7 +371,7 @@ static inline __u8 uac_processing_unit_bControlSize(struct uac_processing_unit_d
{
return (protocol == UAC_VERSION_1) ?
desc->baSourceID[desc->bNrInPins + 4] :
- desc->baSourceID[desc->bNrInPins + 6];
+ 2; /* in UAC2, this value is constant */
}
static inline __u8 *uac_processing_unit_bmControls(struct uac_processing_unit_descriptor *desc,
@@ -378,7 +379,7 @@ static inline __u8 *uac_processing_unit_bmControls(struct uac_processing_unit_de
{
return (protocol == UAC_VERSION_1) ?
&desc->baSourceID[desc->bNrInPins + 5] :
- &desc->baSourceID[desc->bNrInPins + 7];
+ &desc->baSourceID[desc->bNrInPins + 6];
}
static inline __u8 uac_processing_unit_iProcessing(struct uac_processing_unit_descriptor *desc,
diff --git a/include/uapi/linux/usb/ch9.h b/include/uapi/linux/usb/ch9.h
index c4c79aa331bd..d5a5caec8fbc 100644
--- a/include/uapi/linux/usb/ch9.h
+++ b/include/uapi/linux/usb/ch9.h
@@ -1077,9 +1077,9 @@ struct usb_ptm_cap_descriptor {
#define USB_DT_USB_PTM_ID_SIZE 3
/*
* The size of the descriptor for the Sublink Speed Attribute Count
- * (SSAC) specified in bmAttributes[4:0].
+ * (SSAC) specified in bmAttributes[4:0]. SSAC is zero-based
*/
-#define USB_DT_USB_SSP_CAP_SIZE(ssac) (16 + ssac * 4)
+#define USB_DT_USB_SSP_CAP_SIZE(ssac) (12 + (ssac + 1) * 4)
/*-------------------------------------------------------------------------*/
diff --git a/include/uapi/linux/usbdevice_fs.h b/include/uapi/linux/usbdevice_fs.h
index 70ed5338d447..964e87217be4 100644
--- a/include/uapi/linux/usbdevice_fs.h
+++ b/include/uapi/linux/usbdevice_fs.h
@@ -79,7 +79,7 @@ struct usbdevfs_connectinfo {
#define USBDEVFS_URB_SHORT_NOT_OK 0x01
#define USBDEVFS_URB_ISO_ASAP 0x02
#define USBDEVFS_URB_BULK_CONTINUATION 0x04
-#define USBDEVFS_URB_NO_FSBR 0x20
+#define USBDEVFS_URB_NO_FSBR 0x20 /* Not used */
#define USBDEVFS_URB_ZERO_PACKET 0x40
#define USBDEVFS_URB_NO_INTERRUPT 0x80
diff --git a/include/uapi/linux/uuid.h b/include/uapi/linux/uuid.h
index 5c04130bb524..e5a7eecef7c3 100644
--- a/include/uapi/linux/uuid.h
+++ b/include/uapi/linux/uuid.h
@@ -19,7 +19,6 @@
#define _UAPI_LINUX_UUID_H_
#include <linux/types.h>
-#include <linux/string.h>
typedef struct {
__u8 b[16];
diff --git a/include/uapi/linux/uvcvideo.h b/include/uapi/linux/uvcvideo.h
index e80b4655d8cd..020714d2c5bd 100644
--- a/include/uapi/linux/uvcvideo.h
+++ b/include/uapi/linux/uvcvideo.h
@@ -68,4 +68,30 @@ struct uvc_xu_control_query {
#define UVCIOC_CTRL_MAP _IOWR('u', 0x20, struct uvc_xu_control_mapping)
#define UVCIOC_CTRL_QUERY _IOWR('u', 0x21, struct uvc_xu_control_query)
+/*
+ * Metadata node
+ */
+
+/**
+ * struct uvc_meta_buf - metadata buffer building block
+ * @ns - system timestamp of the payload in nanoseconds
+ * @sof - USB Frame Number
+ * @length - length of the payload header
+ * @flags - payload header flags
+ * @buf - optional device-specific header data
+ *
+ * UVC metadata nodes fill buffers with possibly multiple instances of this
+ * struct. The first two fields are added by the driver, they can be used for
+ * clock synchronisation. The rest is an exact copy of a UVC payload header.
+ * Only complete objects with complete buffers are included. Therefore it's
+ * always sizeof(meta->ts) + sizeof(meta->sof) + meta->length bytes large.
+ */
+struct uvc_meta_buf {
+ __u64 ns;
+ __u16 sof;
+ __u8 length;
+ __u8 flags;
+ __u8 buf[];
+} __packed;
+
#endif
diff --git a/include/uapi/linux/v4l2-controls.h b/include/uapi/linux/v4l2-controls.h
index a692623e0236..8d473c979b61 100644
--- a/include/uapi/linux/v4l2-controls.h
+++ b/include/uapi/linux/v4l2-controls.h
@@ -67,8 +67,8 @@
/* User-class control IDs */
#define V4L2_CID_BASE (V4L2_CTRL_CLASS_USER | 0x900)
-#define V4L2_CID_USER_BASE V4L2_CID_BASE
-#define V4L2_CID_USER_CLASS (V4L2_CTRL_CLASS_USER | 1)
+#define V4L2_CID_USER_BASE V4L2_CID_BASE
+#define V4L2_CID_USER_CLASS (V4L2_CTRL_CLASS_USER | 1)
#define V4L2_CID_BRIGHTNESS (V4L2_CID_BASE+0)
#define V4L2_CID_CONTRAST (V4L2_CID_BASE+1)
#define V4L2_CID_SATURATION (V4L2_CID_BASE+2)
@@ -102,7 +102,7 @@ enum v4l2_power_line_frequency {
#define V4L2_CID_HUE_AUTO (V4L2_CID_BASE+25)
#define V4L2_CID_WHITE_BALANCE_TEMPERATURE (V4L2_CID_BASE+26)
#define V4L2_CID_SHARPNESS (V4L2_CID_BASE+27)
-#define V4L2_CID_BACKLIGHT_COMPENSATION (V4L2_CID_BASE+28)
+#define V4L2_CID_BACKLIGHT_COMPENSATION (V4L2_CID_BASE+28)
#define V4L2_CID_CHROMA_AGC (V4L2_CID_BASE+29)
#define V4L2_CID_COLOR_KILLER (V4L2_CID_BASE+30)
#define V4L2_CID_COLORFX (V4L2_CID_BASE+31)
@@ -194,11 +194,11 @@ enum v4l2_colorfx {
/* The MPEG controls are applicable to all codec controls
* and the 'MPEG' part of the define is historical */
-#define V4L2_CID_MPEG_BASE (V4L2_CTRL_CLASS_MPEG | 0x900)
-#define V4L2_CID_MPEG_CLASS (V4L2_CTRL_CLASS_MPEG | 1)
+#define V4L2_CID_MPEG_BASE (V4L2_CTRL_CLASS_MPEG | 0x900)
+#define V4L2_CID_MPEG_CLASS (V4L2_CTRL_CLASS_MPEG | 1)
/* MPEG streams, specific to multiplexed streams */
-#define V4L2_CID_MPEG_STREAM_TYPE (V4L2_CID_MPEG_BASE+0)
+#define V4L2_CID_MPEG_STREAM_TYPE (V4L2_CID_MPEG_BASE+0)
enum v4l2_mpeg_stream_type {
V4L2_MPEG_STREAM_TYPE_MPEG2_PS = 0, /* MPEG-2 program stream */
V4L2_MPEG_STREAM_TYPE_MPEG2_TS = 1, /* MPEG-2 transport stream */
@@ -207,26 +207,26 @@ enum v4l2_mpeg_stream_type {
V4L2_MPEG_STREAM_TYPE_MPEG1_VCD = 4, /* MPEG-1 VCD-compatible stream */
V4L2_MPEG_STREAM_TYPE_MPEG2_SVCD = 5, /* MPEG-2 SVCD-compatible stream */
};
-#define V4L2_CID_MPEG_STREAM_PID_PMT (V4L2_CID_MPEG_BASE+1)
-#define V4L2_CID_MPEG_STREAM_PID_AUDIO (V4L2_CID_MPEG_BASE+2)
-#define V4L2_CID_MPEG_STREAM_PID_VIDEO (V4L2_CID_MPEG_BASE+3)
-#define V4L2_CID_MPEG_STREAM_PID_PCR (V4L2_CID_MPEG_BASE+4)
-#define V4L2_CID_MPEG_STREAM_PES_ID_AUDIO (V4L2_CID_MPEG_BASE+5)
-#define V4L2_CID_MPEG_STREAM_PES_ID_VIDEO (V4L2_CID_MPEG_BASE+6)
-#define V4L2_CID_MPEG_STREAM_VBI_FMT (V4L2_CID_MPEG_BASE+7)
+#define V4L2_CID_MPEG_STREAM_PID_PMT (V4L2_CID_MPEG_BASE+1)
+#define V4L2_CID_MPEG_STREAM_PID_AUDIO (V4L2_CID_MPEG_BASE+2)
+#define V4L2_CID_MPEG_STREAM_PID_VIDEO (V4L2_CID_MPEG_BASE+3)
+#define V4L2_CID_MPEG_STREAM_PID_PCR (V4L2_CID_MPEG_BASE+4)
+#define V4L2_CID_MPEG_STREAM_PES_ID_AUDIO (V4L2_CID_MPEG_BASE+5)
+#define V4L2_CID_MPEG_STREAM_PES_ID_VIDEO (V4L2_CID_MPEG_BASE+6)
+#define V4L2_CID_MPEG_STREAM_VBI_FMT (V4L2_CID_MPEG_BASE+7)
enum v4l2_mpeg_stream_vbi_fmt {
V4L2_MPEG_STREAM_VBI_FMT_NONE = 0, /* No VBI in the MPEG stream */
V4L2_MPEG_STREAM_VBI_FMT_IVTV = 1, /* VBI in private packets, IVTV format */
};
/* MPEG audio controls specific to multiplexed streams */
-#define V4L2_CID_MPEG_AUDIO_SAMPLING_FREQ (V4L2_CID_MPEG_BASE+100)
+#define V4L2_CID_MPEG_AUDIO_SAMPLING_FREQ (V4L2_CID_MPEG_BASE+100)
enum v4l2_mpeg_audio_sampling_freq {
V4L2_MPEG_AUDIO_SAMPLING_FREQ_44100 = 0,
V4L2_MPEG_AUDIO_SAMPLING_FREQ_48000 = 1,
V4L2_MPEG_AUDIO_SAMPLING_FREQ_32000 = 2,
};
-#define V4L2_CID_MPEG_AUDIO_ENCODING (V4L2_CID_MPEG_BASE+101)
+#define V4L2_CID_MPEG_AUDIO_ENCODING (V4L2_CID_MPEG_BASE+101)
enum v4l2_mpeg_audio_encoding {
V4L2_MPEG_AUDIO_ENCODING_LAYER_1 = 0,
V4L2_MPEG_AUDIO_ENCODING_LAYER_2 = 1,
@@ -234,7 +234,7 @@ enum v4l2_mpeg_audio_encoding {
V4L2_MPEG_AUDIO_ENCODING_AAC = 3,
V4L2_MPEG_AUDIO_ENCODING_AC3 = 4,
};
-#define V4L2_CID_MPEG_AUDIO_L1_BITRATE (V4L2_CID_MPEG_BASE+102)
+#define V4L2_CID_MPEG_AUDIO_L1_BITRATE (V4L2_CID_MPEG_BASE+102)
enum v4l2_mpeg_audio_l1_bitrate {
V4L2_MPEG_AUDIO_L1_BITRATE_32K = 0,
V4L2_MPEG_AUDIO_L1_BITRATE_64K = 1,
@@ -251,7 +251,7 @@ enum v4l2_mpeg_audio_l1_bitrate {
V4L2_MPEG_AUDIO_L1_BITRATE_416K = 12,
V4L2_MPEG_AUDIO_L1_BITRATE_448K = 13,
};
-#define V4L2_CID_MPEG_AUDIO_L2_BITRATE (V4L2_CID_MPEG_BASE+103)
+#define V4L2_CID_MPEG_AUDIO_L2_BITRATE (V4L2_CID_MPEG_BASE+103)
enum v4l2_mpeg_audio_l2_bitrate {
V4L2_MPEG_AUDIO_L2_BITRATE_32K = 0,
V4L2_MPEG_AUDIO_L2_BITRATE_48K = 1,
@@ -268,7 +268,7 @@ enum v4l2_mpeg_audio_l2_bitrate {
V4L2_MPEG_AUDIO_L2_BITRATE_320K = 12,
V4L2_MPEG_AUDIO_L2_BITRATE_384K = 13,
};
-#define V4L2_CID_MPEG_AUDIO_L3_BITRATE (V4L2_CID_MPEG_BASE+104)
+#define V4L2_CID_MPEG_AUDIO_L3_BITRATE (V4L2_CID_MPEG_BASE+104)
enum v4l2_mpeg_audio_l3_bitrate {
V4L2_MPEG_AUDIO_L3_BITRATE_32K = 0,
V4L2_MPEG_AUDIO_L3_BITRATE_40K = 1,
@@ -285,32 +285,32 @@ enum v4l2_mpeg_audio_l3_bitrate {
V4L2_MPEG_AUDIO_L3_BITRATE_256K = 12,
V4L2_MPEG_AUDIO_L3_BITRATE_320K = 13,
};
-#define V4L2_CID_MPEG_AUDIO_MODE (V4L2_CID_MPEG_BASE+105)
+#define V4L2_CID_MPEG_AUDIO_MODE (V4L2_CID_MPEG_BASE+105)
enum v4l2_mpeg_audio_mode {
V4L2_MPEG_AUDIO_MODE_STEREO = 0,
V4L2_MPEG_AUDIO_MODE_JOINT_STEREO = 1,
V4L2_MPEG_AUDIO_MODE_DUAL = 2,
V4L2_MPEG_AUDIO_MODE_MONO = 3,
};
-#define V4L2_CID_MPEG_AUDIO_MODE_EXTENSION (V4L2_CID_MPEG_BASE+106)
+#define V4L2_CID_MPEG_AUDIO_MODE_EXTENSION (V4L2_CID_MPEG_BASE+106)
enum v4l2_mpeg_audio_mode_extension {
V4L2_MPEG_AUDIO_MODE_EXTENSION_BOUND_4 = 0,
V4L2_MPEG_AUDIO_MODE_EXTENSION_BOUND_8 = 1,
V4L2_MPEG_AUDIO_MODE_EXTENSION_BOUND_12 = 2,
V4L2_MPEG_AUDIO_MODE_EXTENSION_BOUND_16 = 3,
};
-#define V4L2_CID_MPEG_AUDIO_EMPHASIS (V4L2_CID_MPEG_BASE+107)
+#define V4L2_CID_MPEG_AUDIO_EMPHASIS (V4L2_CID_MPEG_BASE+107)
enum v4l2_mpeg_audio_emphasis {
V4L2_MPEG_AUDIO_EMPHASIS_NONE = 0,
V4L2_MPEG_AUDIO_EMPHASIS_50_DIV_15_uS = 1,
V4L2_MPEG_AUDIO_EMPHASIS_CCITT_J17 = 2,
};
-#define V4L2_CID_MPEG_AUDIO_CRC (V4L2_CID_MPEG_BASE+108)
+#define V4L2_CID_MPEG_AUDIO_CRC (V4L2_CID_MPEG_BASE+108)
enum v4l2_mpeg_audio_crc {
V4L2_MPEG_AUDIO_CRC_NONE = 0,
V4L2_MPEG_AUDIO_CRC_CRC16 = 1,
};
-#define V4L2_CID_MPEG_AUDIO_MUTE (V4L2_CID_MPEG_BASE+109)
+#define V4L2_CID_MPEG_AUDIO_MUTE (V4L2_CID_MPEG_BASE+109)
#define V4L2_CID_MPEG_AUDIO_AAC_BITRATE (V4L2_CID_MPEG_BASE+110)
#define V4L2_CID_MPEG_AUDIO_AC3_BITRATE (V4L2_CID_MPEG_BASE+111)
enum v4l2_mpeg_audio_ac3_bitrate {
@@ -346,33 +346,33 @@ enum v4l2_mpeg_audio_dec_playback {
#define V4L2_CID_MPEG_AUDIO_DEC_MULTILINGUAL_PLAYBACK (V4L2_CID_MPEG_BASE+113)
/* MPEG video controls specific to multiplexed streams */
-#define V4L2_CID_MPEG_VIDEO_ENCODING (V4L2_CID_MPEG_BASE+200)
+#define V4L2_CID_MPEG_VIDEO_ENCODING (V4L2_CID_MPEG_BASE+200)
enum v4l2_mpeg_video_encoding {
V4L2_MPEG_VIDEO_ENCODING_MPEG_1 = 0,
V4L2_MPEG_VIDEO_ENCODING_MPEG_2 = 1,
V4L2_MPEG_VIDEO_ENCODING_MPEG_4_AVC = 2,
};
-#define V4L2_CID_MPEG_VIDEO_ASPECT (V4L2_CID_MPEG_BASE+201)
+#define V4L2_CID_MPEG_VIDEO_ASPECT (V4L2_CID_MPEG_BASE+201)
enum v4l2_mpeg_video_aspect {
V4L2_MPEG_VIDEO_ASPECT_1x1 = 0,
V4L2_MPEG_VIDEO_ASPECT_4x3 = 1,
V4L2_MPEG_VIDEO_ASPECT_16x9 = 2,
V4L2_MPEG_VIDEO_ASPECT_221x100 = 3,
};
-#define V4L2_CID_MPEG_VIDEO_B_FRAMES (V4L2_CID_MPEG_BASE+202)
-#define V4L2_CID_MPEG_VIDEO_GOP_SIZE (V4L2_CID_MPEG_BASE+203)
-#define V4L2_CID_MPEG_VIDEO_GOP_CLOSURE (V4L2_CID_MPEG_BASE+204)
-#define V4L2_CID_MPEG_VIDEO_PULLDOWN (V4L2_CID_MPEG_BASE+205)
-#define V4L2_CID_MPEG_VIDEO_BITRATE_MODE (V4L2_CID_MPEG_BASE+206)
+#define V4L2_CID_MPEG_VIDEO_B_FRAMES (V4L2_CID_MPEG_BASE+202)
+#define V4L2_CID_MPEG_VIDEO_GOP_SIZE (V4L2_CID_MPEG_BASE+203)
+#define V4L2_CID_MPEG_VIDEO_GOP_CLOSURE (V4L2_CID_MPEG_BASE+204)
+#define V4L2_CID_MPEG_VIDEO_PULLDOWN (V4L2_CID_MPEG_BASE+205)
+#define V4L2_CID_MPEG_VIDEO_BITRATE_MODE (V4L2_CID_MPEG_BASE+206)
enum v4l2_mpeg_video_bitrate_mode {
V4L2_MPEG_VIDEO_BITRATE_MODE_VBR = 0,
V4L2_MPEG_VIDEO_BITRATE_MODE_CBR = 1,
};
-#define V4L2_CID_MPEG_VIDEO_BITRATE (V4L2_CID_MPEG_BASE+207)
-#define V4L2_CID_MPEG_VIDEO_BITRATE_PEAK (V4L2_CID_MPEG_BASE+208)
+#define V4L2_CID_MPEG_VIDEO_BITRATE (V4L2_CID_MPEG_BASE+207)
+#define V4L2_CID_MPEG_VIDEO_BITRATE_PEAK (V4L2_CID_MPEG_BASE+208)
#define V4L2_CID_MPEG_VIDEO_TEMPORAL_DECIMATION (V4L2_CID_MPEG_BASE+209)
-#define V4L2_CID_MPEG_VIDEO_MUTE (V4L2_CID_MPEG_BASE+210)
-#define V4L2_CID_MPEG_VIDEO_MUTE_YUV (V4L2_CID_MPEG_BASE+211)
+#define V4L2_CID_MPEG_VIDEO_MUTE (V4L2_CID_MPEG_BASE+210)
+#define V4L2_CID_MPEG_VIDEO_MUTE_YUV (V4L2_CID_MPEG_BASE+211)
#define V4L2_CID_MPEG_VIDEO_DECODER_SLICE_INTERFACE (V4L2_CID_MPEG_BASE+212)
#define V4L2_CID_MPEG_VIDEO_DECODER_MPEG4_DEBLOCK_FILTER (V4L2_CID_MPEG_BASE+213)
#define V4L2_CID_MPEG_VIDEO_CYCLIC_INTRA_REFRESH_MB (V4L2_CID_MPEG_BASE+214)
@@ -589,15 +589,107 @@ enum v4l2_vp8_golden_frame_sel {
#define V4L2_CID_MPEG_VIDEO_VPX_P_FRAME_QP (V4L2_CID_MPEG_BASE+510)
#define V4L2_CID_MPEG_VIDEO_VPX_PROFILE (V4L2_CID_MPEG_BASE+511)
+/* CIDs for HEVC encoding. */
+
+#define V4L2_CID_MPEG_VIDEO_HEVC_MIN_QP (V4L2_CID_MPEG_BASE + 600)
+#define V4L2_CID_MPEG_VIDEO_HEVC_MAX_QP (V4L2_CID_MPEG_BASE + 601)
+#define V4L2_CID_MPEG_VIDEO_HEVC_I_FRAME_QP (V4L2_CID_MPEG_BASE + 602)
+#define V4L2_CID_MPEG_VIDEO_HEVC_P_FRAME_QP (V4L2_CID_MPEG_BASE + 603)
+#define V4L2_CID_MPEG_VIDEO_HEVC_B_FRAME_QP (V4L2_CID_MPEG_BASE + 604)
+#define V4L2_CID_MPEG_VIDEO_HEVC_HIER_QP (V4L2_CID_MPEG_BASE + 605)
+#define V4L2_CID_MPEG_VIDEO_HEVC_HIER_CODING_TYPE (V4L2_CID_MPEG_BASE + 606)
+enum v4l2_mpeg_video_hevc_hier_coding_type {
+ V4L2_MPEG_VIDEO_HEVC_HIERARCHICAL_CODING_B = 0,
+ V4L2_MPEG_VIDEO_HEVC_HIERARCHICAL_CODING_P = 1,
+};
+#define V4L2_CID_MPEG_VIDEO_HEVC_HIER_CODING_LAYER (V4L2_CID_MPEG_BASE + 607)
+#define V4L2_CID_MPEG_VIDEO_HEVC_HIER_CODING_L0_QP (V4L2_CID_MPEG_BASE + 608)
+#define V4L2_CID_MPEG_VIDEO_HEVC_HIER_CODING_L1_QP (V4L2_CID_MPEG_BASE + 609)
+#define V4L2_CID_MPEG_VIDEO_HEVC_HIER_CODING_L2_QP (V4L2_CID_MPEG_BASE + 610)
+#define V4L2_CID_MPEG_VIDEO_HEVC_HIER_CODING_L3_QP (V4L2_CID_MPEG_BASE + 611)
+#define V4L2_CID_MPEG_VIDEO_HEVC_HIER_CODING_L4_QP (V4L2_CID_MPEG_BASE + 612)
+#define V4L2_CID_MPEG_VIDEO_HEVC_HIER_CODING_L5_QP (V4L2_CID_MPEG_BASE + 613)
+#define V4L2_CID_MPEG_VIDEO_HEVC_HIER_CODING_L6_QP (V4L2_CID_MPEG_BASE + 614)
+#define V4L2_CID_MPEG_VIDEO_HEVC_PROFILE (V4L2_CID_MPEG_BASE + 615)
+enum v4l2_mpeg_video_hevc_profile {
+ V4L2_MPEG_VIDEO_HEVC_PROFILE_MAIN = 0,
+ V4L2_MPEG_VIDEO_HEVC_PROFILE_MAIN_STILL_PICTURE = 1,
+ V4L2_MPEG_VIDEO_HEVC_PROFILE_MAIN_10 = 2,
+};
+#define V4L2_CID_MPEG_VIDEO_HEVC_LEVEL (V4L2_CID_MPEG_BASE + 616)
+enum v4l2_mpeg_video_hevc_level {
+ V4L2_MPEG_VIDEO_HEVC_LEVEL_1 = 0,
+ V4L2_MPEG_VIDEO_HEVC_LEVEL_2 = 1,
+ V4L2_MPEG_VIDEO_HEVC_LEVEL_2_1 = 2,
+ V4L2_MPEG_VIDEO_HEVC_LEVEL_3 = 3,
+ V4L2_MPEG_VIDEO_HEVC_LEVEL_3_1 = 4,
+ V4L2_MPEG_VIDEO_HEVC_LEVEL_4 = 5,
+ V4L2_MPEG_VIDEO_HEVC_LEVEL_4_1 = 6,
+ V4L2_MPEG_VIDEO_HEVC_LEVEL_5 = 7,
+ V4L2_MPEG_VIDEO_HEVC_LEVEL_5_1 = 8,
+ V4L2_MPEG_VIDEO_HEVC_LEVEL_5_2 = 9,
+ V4L2_MPEG_VIDEO_HEVC_LEVEL_6 = 10,
+ V4L2_MPEG_VIDEO_HEVC_LEVEL_6_1 = 11,
+ V4L2_MPEG_VIDEO_HEVC_LEVEL_6_2 = 12,
+};
+#define V4L2_CID_MPEG_VIDEO_HEVC_FRAME_RATE_RESOLUTION (V4L2_CID_MPEG_BASE + 617)
+#define V4L2_CID_MPEG_VIDEO_HEVC_TIER (V4L2_CID_MPEG_BASE + 618)
+enum v4l2_mpeg_video_hevc_tier {
+ V4L2_MPEG_VIDEO_HEVC_TIER_MAIN = 0,
+ V4L2_MPEG_VIDEO_HEVC_TIER_HIGH = 1,
+};
+#define V4L2_CID_MPEG_VIDEO_HEVC_MAX_PARTITION_DEPTH (V4L2_CID_MPEG_BASE + 619)
+#define V4L2_CID_MPEG_VIDEO_HEVC_LOOP_FILTER_MODE (V4L2_CID_MPEG_BASE + 620)
+enum v4l2_cid_mpeg_video_hevc_loop_filter_mode {
+ V4L2_MPEG_VIDEO_HEVC_LOOP_FILTER_MODE_DISABLED = 0,
+ V4L2_MPEG_VIDEO_HEVC_LOOP_FILTER_MODE_ENABLED = 1,
+ V4L2_MPEG_VIDEO_HEVC_LOOP_FILTER_MODE_DISABLED_AT_SLICE_BOUNDARY = 2,
+};
+#define V4L2_CID_MPEG_VIDEO_HEVC_LF_BETA_OFFSET_DIV2 (V4L2_CID_MPEG_BASE + 621)
+#define V4L2_CID_MPEG_VIDEO_HEVC_LF_TC_OFFSET_DIV2 (V4L2_CID_MPEG_BASE + 622)
+#define V4L2_CID_MPEG_VIDEO_HEVC_REFRESH_TYPE (V4L2_CID_MPEG_BASE + 623)
+enum v4l2_cid_mpeg_video_hevc_refresh_type {
+ V4L2_MPEG_VIDEO_HEVC_REFRESH_NONE = 0,
+ V4L2_MPEG_VIDEO_HEVC_REFRESH_CRA = 1,
+ V4L2_MPEG_VIDEO_HEVC_REFRESH_IDR = 2,
+};
+#define V4L2_CID_MPEG_VIDEO_HEVC_REFRESH_PERIOD (V4L2_CID_MPEG_BASE + 624)
+#define V4L2_CID_MPEG_VIDEO_HEVC_LOSSLESS_CU (V4L2_CID_MPEG_BASE + 625)
+#define V4L2_CID_MPEG_VIDEO_HEVC_CONST_INTRA_PRED (V4L2_CID_MPEG_BASE + 626)
+#define V4L2_CID_MPEG_VIDEO_HEVC_WAVEFRONT (V4L2_CID_MPEG_BASE + 627)
+#define V4L2_CID_MPEG_VIDEO_HEVC_GENERAL_PB (V4L2_CID_MPEG_BASE + 628)
+#define V4L2_CID_MPEG_VIDEO_HEVC_TEMPORAL_ID (V4L2_CID_MPEG_BASE + 629)
+#define V4L2_CID_MPEG_VIDEO_HEVC_STRONG_SMOOTHING (V4L2_CID_MPEG_BASE + 630)
+#define V4L2_CID_MPEG_VIDEO_HEVC_MAX_NUM_MERGE_MV_MINUS1 (V4L2_CID_MPEG_BASE + 631)
+#define V4L2_CID_MPEG_VIDEO_HEVC_INTRA_PU_SPLIT (V4L2_CID_MPEG_BASE + 632)
+#define V4L2_CID_MPEG_VIDEO_HEVC_TMV_PREDICTION (V4L2_CID_MPEG_BASE + 633)
+#define V4L2_CID_MPEG_VIDEO_HEVC_WITHOUT_STARTCODE (V4L2_CID_MPEG_BASE + 634)
+#define V4L2_CID_MPEG_VIDEO_HEVC_SIZE_OF_LENGTH_FIELD (V4L2_CID_MPEG_BASE + 635)
+enum v4l2_cid_mpeg_video_hevc_size_of_length_field {
+ V4L2_MPEG_VIDEO_HEVC_SIZE_0 = 0,
+ V4L2_MPEG_VIDEO_HEVC_SIZE_1 = 1,
+ V4L2_MPEG_VIDEO_HEVC_SIZE_2 = 2,
+ V4L2_MPEG_VIDEO_HEVC_SIZE_4 = 3,
+};
+#define V4L2_CID_MPEG_VIDEO_HEVC_HIER_CODING_L0_BR (V4L2_CID_MPEG_BASE + 636)
+#define V4L2_CID_MPEG_VIDEO_HEVC_HIER_CODING_L1_BR (V4L2_CID_MPEG_BASE + 637)
+#define V4L2_CID_MPEG_VIDEO_HEVC_HIER_CODING_L2_BR (V4L2_CID_MPEG_BASE + 638)
+#define V4L2_CID_MPEG_VIDEO_HEVC_HIER_CODING_L3_BR (V4L2_CID_MPEG_BASE + 639)
+#define V4L2_CID_MPEG_VIDEO_HEVC_HIER_CODING_L4_BR (V4L2_CID_MPEG_BASE + 640)
+#define V4L2_CID_MPEG_VIDEO_HEVC_HIER_CODING_L5_BR (V4L2_CID_MPEG_BASE + 641)
+#define V4L2_CID_MPEG_VIDEO_HEVC_HIER_CODING_L6_BR (V4L2_CID_MPEG_BASE + 642)
+#define V4L2_CID_MPEG_VIDEO_REF_NUMBER_FOR_PFRAMES (V4L2_CID_MPEG_BASE + 643)
+#define V4L2_CID_MPEG_VIDEO_PREPEND_SPSPPS_TO_IDR (V4L2_CID_MPEG_BASE + 644)
+
/* MPEG-class control IDs specific to the CX2341x driver as defined by V4L2 */
-#define V4L2_CID_MPEG_CX2341X_BASE (V4L2_CTRL_CLASS_MPEG | 0x1000)
-#define V4L2_CID_MPEG_CX2341X_VIDEO_SPATIAL_FILTER_MODE (V4L2_CID_MPEG_CX2341X_BASE+0)
+#define V4L2_CID_MPEG_CX2341X_BASE (V4L2_CTRL_CLASS_MPEG | 0x1000)
+#define V4L2_CID_MPEG_CX2341X_VIDEO_SPATIAL_FILTER_MODE (V4L2_CID_MPEG_CX2341X_BASE+0)
enum v4l2_mpeg_cx2341x_video_spatial_filter_mode {
V4L2_MPEG_CX2341X_VIDEO_SPATIAL_FILTER_MODE_MANUAL = 0,
V4L2_MPEG_CX2341X_VIDEO_SPATIAL_FILTER_MODE_AUTO = 1,
};
-#define V4L2_CID_MPEG_CX2341X_VIDEO_SPATIAL_FILTER (V4L2_CID_MPEG_CX2341X_BASE+1)
-#define V4L2_CID_MPEG_CX2341X_VIDEO_LUMA_SPATIAL_FILTER_TYPE (V4L2_CID_MPEG_CX2341X_BASE+2)
+#define V4L2_CID_MPEG_CX2341X_VIDEO_SPATIAL_FILTER (V4L2_CID_MPEG_CX2341X_BASE+1)
+#define V4L2_CID_MPEG_CX2341X_VIDEO_LUMA_SPATIAL_FILTER_TYPE (V4L2_CID_MPEG_CX2341X_BASE+2)
enum v4l2_mpeg_cx2341x_video_luma_spatial_filter_type {
V4L2_MPEG_CX2341X_VIDEO_LUMA_SPATIAL_FILTER_TYPE_OFF = 0,
V4L2_MPEG_CX2341X_VIDEO_LUMA_SPATIAL_FILTER_TYPE_1D_HOR = 1,
@@ -605,18 +697,18 @@ enum v4l2_mpeg_cx2341x_video_luma_spatial_filter_type {
V4L2_MPEG_CX2341X_VIDEO_LUMA_SPATIAL_FILTER_TYPE_2D_HV_SEPARABLE = 3,
V4L2_MPEG_CX2341X_VIDEO_LUMA_SPATIAL_FILTER_TYPE_2D_SYM_NON_SEPARABLE = 4,
};
-#define V4L2_CID_MPEG_CX2341X_VIDEO_CHROMA_SPATIAL_FILTER_TYPE (V4L2_CID_MPEG_CX2341X_BASE+3)
+#define V4L2_CID_MPEG_CX2341X_VIDEO_CHROMA_SPATIAL_FILTER_TYPE (V4L2_CID_MPEG_CX2341X_BASE+3)
enum v4l2_mpeg_cx2341x_video_chroma_spatial_filter_type {
V4L2_MPEG_CX2341X_VIDEO_CHROMA_SPATIAL_FILTER_TYPE_OFF = 0,
V4L2_MPEG_CX2341X_VIDEO_CHROMA_SPATIAL_FILTER_TYPE_1D_HOR = 1,
};
-#define V4L2_CID_MPEG_CX2341X_VIDEO_TEMPORAL_FILTER_MODE (V4L2_CID_MPEG_CX2341X_BASE+4)
+#define V4L2_CID_MPEG_CX2341X_VIDEO_TEMPORAL_FILTER_MODE (V4L2_CID_MPEG_CX2341X_BASE+4)
enum v4l2_mpeg_cx2341x_video_temporal_filter_mode {
V4L2_MPEG_CX2341X_VIDEO_TEMPORAL_FILTER_MODE_MANUAL = 0,
V4L2_MPEG_CX2341X_VIDEO_TEMPORAL_FILTER_MODE_AUTO = 1,
};
-#define V4L2_CID_MPEG_CX2341X_VIDEO_TEMPORAL_FILTER (V4L2_CID_MPEG_CX2341X_BASE+5)
-#define V4L2_CID_MPEG_CX2341X_VIDEO_MEDIAN_FILTER_TYPE (V4L2_CID_MPEG_CX2341X_BASE+6)
+#define V4L2_CID_MPEG_CX2341X_VIDEO_TEMPORAL_FILTER (V4L2_CID_MPEG_CX2341X_BASE+5)
+#define V4L2_CID_MPEG_CX2341X_VIDEO_MEDIAN_FILTER_TYPE (V4L2_CID_MPEG_CX2341X_BASE+6)
enum v4l2_mpeg_cx2341x_video_median_filter_type {
V4L2_MPEG_CX2341X_VIDEO_MEDIAN_FILTER_TYPE_OFF = 0,
V4L2_MPEG_CX2341X_VIDEO_MEDIAN_FILTER_TYPE_HOR = 1,
@@ -624,11 +716,11 @@ enum v4l2_mpeg_cx2341x_video_median_filter_type {
V4L2_MPEG_CX2341X_VIDEO_MEDIAN_FILTER_TYPE_HOR_VERT = 3,
V4L2_MPEG_CX2341X_VIDEO_MEDIAN_FILTER_TYPE_DIAG = 4,
};
-#define V4L2_CID_MPEG_CX2341X_VIDEO_LUMA_MEDIAN_FILTER_BOTTOM (V4L2_CID_MPEG_CX2341X_BASE+7)
-#define V4L2_CID_MPEG_CX2341X_VIDEO_LUMA_MEDIAN_FILTER_TOP (V4L2_CID_MPEG_CX2341X_BASE+8)
+#define V4L2_CID_MPEG_CX2341X_VIDEO_LUMA_MEDIAN_FILTER_BOTTOM (V4L2_CID_MPEG_CX2341X_BASE+7)
+#define V4L2_CID_MPEG_CX2341X_VIDEO_LUMA_MEDIAN_FILTER_TOP (V4L2_CID_MPEG_CX2341X_BASE+8)
#define V4L2_CID_MPEG_CX2341X_VIDEO_CHROMA_MEDIAN_FILTER_BOTTOM (V4L2_CID_MPEG_CX2341X_BASE+9)
-#define V4L2_CID_MPEG_CX2341X_VIDEO_CHROMA_MEDIAN_FILTER_TOP (V4L2_CID_MPEG_CX2341X_BASE+10)
-#define V4L2_CID_MPEG_CX2341X_STREAM_INSERT_NAV_PACKETS (V4L2_CID_MPEG_CX2341X_BASE+11)
+#define V4L2_CID_MPEG_CX2341X_VIDEO_CHROMA_MEDIAN_FILTER_TOP (V4L2_CID_MPEG_CX2341X_BASE+10)
+#define V4L2_CID_MPEG_CX2341X_STREAM_INSERT_NAV_PACKETS (V4L2_CID_MPEG_CX2341X_BASE+11)
/* MPEG-class control IDs specific to the Samsung MFC 5.1 driver as defined by V4L2 */
#define V4L2_CID_MPEG_MFC51_BASE (V4L2_CTRL_CLASS_MPEG | 0x1100)
@@ -657,11 +749,10 @@ enum v4l2_mpeg_mfc51_video_force_frame_type {
#define V4L2_CID_MPEG_MFC51_VIDEO_H264_ADAPTIVE_RC_STATIC (V4L2_CID_MPEG_MFC51_BASE+53)
#define V4L2_CID_MPEG_MFC51_VIDEO_H264_NUM_REF_PIC_FOR_P (V4L2_CID_MPEG_MFC51_BASE+54)
-
/* Camera class control IDs */
-#define V4L2_CID_CAMERA_CLASS_BASE (V4L2_CTRL_CLASS_CAMERA | 0x900)
-#define V4L2_CID_CAMERA_CLASS (V4L2_CTRL_CLASS_CAMERA | 1)
+#define V4L2_CID_CAMERA_CLASS_BASE (V4L2_CTRL_CLASS_CAMERA | 0x900)
+#define V4L2_CID_CAMERA_CLASS (V4L2_CTRL_CLASS_CAMERA | 1)
#define V4L2_CID_EXPOSURE_AUTO (V4L2_CID_CAMERA_CLASS_BASE+1)
enum v4l2_exposure_auto_type {
diff --git a/include/uapi/linux/v4l2-mediabus.h b/include/uapi/linux/v4l2-mediabus.h
index 6e20de63ec59..123a231001a8 100644
--- a/include/uapi/linux/v4l2-mediabus.h
+++ b/include/uapi/linux/v4l2-mediabus.h
@@ -18,8 +18,8 @@
/**
* struct v4l2_mbus_framefmt - frame format on the media bus
- * @width: frame width
- * @height: frame height
+ * @width: image width
+ * @height: image height
* @code: data format code (from enum v4l2_mbus_pixelcode)
* @field: used interlacing type (from enum v4l2_field)
* @colorspace: colorspace of the data (from enum v4l2_colorspace)
diff --git a/include/uapi/linux/vbox_err.h b/include/uapi/linux/vbox_err.h
new file mode 100644
index 000000000000..7eae536ff1e6
--- /dev/null
+++ b/include/uapi/linux/vbox_err.h
@@ -0,0 +1,151 @@
+/* SPDX-License-Identifier: MIT */
+/* Copyright (C) 2017 Oracle Corporation */
+
+#ifndef __UAPI_VBOX_ERR_H__
+#define __UAPI_VBOX_ERR_H__
+
+#define VINF_SUCCESS 0
+#define VERR_GENERAL_FAILURE (-1)
+#define VERR_INVALID_PARAMETER (-2)
+#define VERR_INVALID_MAGIC (-3)
+#define VERR_INVALID_HANDLE (-4)
+#define VERR_LOCK_FAILED (-5)
+#define VERR_INVALID_POINTER (-6)
+#define VERR_IDT_FAILED (-7)
+#define VERR_NO_MEMORY (-8)
+#define VERR_ALREADY_LOADED (-9)
+#define VERR_PERMISSION_DENIED (-10)
+#define VERR_VERSION_MISMATCH (-11)
+#define VERR_NOT_IMPLEMENTED (-12)
+#define VERR_INVALID_FLAGS (-13)
+
+#define VERR_NOT_EQUAL (-18)
+#define VERR_NOT_SYMLINK (-19)
+#define VERR_NO_TMP_MEMORY (-20)
+#define VERR_INVALID_FMODE (-21)
+#define VERR_WRONG_ORDER (-22)
+#define VERR_NO_TLS_FOR_SELF (-23)
+#define VERR_FAILED_TO_SET_SELF_TLS (-24)
+#define VERR_NO_CONT_MEMORY (-26)
+#define VERR_NO_PAGE_MEMORY (-27)
+#define VERR_THREAD_IS_DEAD (-29)
+#define VERR_THREAD_NOT_WAITABLE (-30)
+#define VERR_PAGE_TABLE_NOT_PRESENT (-31)
+#define VERR_INVALID_CONTEXT (-32)
+#define VERR_TIMER_BUSY (-33)
+#define VERR_ADDRESS_CONFLICT (-34)
+#define VERR_UNRESOLVED_ERROR (-35)
+#define VERR_INVALID_FUNCTION (-36)
+#define VERR_NOT_SUPPORTED (-37)
+#define VERR_ACCESS_DENIED (-38)
+#define VERR_INTERRUPTED (-39)
+#define VERR_TIMEOUT (-40)
+#define VERR_BUFFER_OVERFLOW (-41)
+#define VERR_TOO_MUCH_DATA (-42)
+#define VERR_MAX_THRDS_REACHED (-43)
+#define VERR_MAX_PROCS_REACHED (-44)
+#define VERR_SIGNAL_REFUSED (-45)
+#define VERR_SIGNAL_PENDING (-46)
+#define VERR_SIGNAL_INVALID (-47)
+#define VERR_STATE_CHANGED (-48)
+#define VERR_INVALID_UUID_FORMAT (-49)
+#define VERR_PROCESS_NOT_FOUND (-50)
+#define VERR_PROCESS_RUNNING (-51)
+#define VERR_TRY_AGAIN (-52)
+#define VERR_PARSE_ERROR (-53)
+#define VERR_OUT_OF_RANGE (-54)
+#define VERR_NUMBER_TOO_BIG (-55)
+#define VERR_NO_DIGITS (-56)
+#define VERR_NEGATIVE_UNSIGNED (-57)
+#define VERR_NO_TRANSLATION (-58)
+
+#define VERR_NOT_FOUND (-78)
+#define VERR_INVALID_STATE (-79)
+#define VERR_OUT_OF_RESOURCES (-80)
+
+#define VERR_FILE_NOT_FOUND (-102)
+#define VERR_PATH_NOT_FOUND (-103)
+#define VERR_INVALID_NAME (-104)
+#define VERR_ALREADY_EXISTS (-105)
+#define VERR_TOO_MANY_OPEN_FILES (-106)
+#define VERR_SEEK (-107)
+#define VERR_NEGATIVE_SEEK (-108)
+#define VERR_SEEK_ON_DEVICE (-109)
+#define VERR_EOF (-110)
+#define VERR_READ_ERROR (-111)
+#define VERR_WRITE_ERROR (-112)
+#define VERR_WRITE_PROTECT (-113)
+#define VERR_SHARING_VIOLATION (-114)
+#define VERR_FILE_LOCK_FAILED (-115)
+#define VERR_FILE_LOCK_VIOLATION (-116)
+#define VERR_CANT_CREATE (-117)
+#define VERR_CANT_DELETE_DIRECTORY (-118)
+#define VERR_NOT_SAME_DEVICE (-119)
+#define VERR_FILENAME_TOO_LONG (-120)
+#define VERR_MEDIA_NOT_PRESENT (-121)
+#define VERR_MEDIA_NOT_RECOGNIZED (-122)
+#define VERR_FILE_NOT_LOCKED (-123)
+#define VERR_FILE_LOCK_LOST (-124)
+#define VERR_DIR_NOT_EMPTY (-125)
+#define VERR_NOT_A_DIRECTORY (-126)
+#define VERR_IS_A_DIRECTORY (-127)
+#define VERR_FILE_TOO_BIG (-128)
+
+#define VERR_NET_IO_ERROR (-400)
+#define VERR_NET_OUT_OF_RESOURCES (-401)
+#define VERR_NET_HOST_NOT_FOUND (-402)
+#define VERR_NET_PATH_NOT_FOUND (-403)
+#define VERR_NET_PRINT_ERROR (-404)
+#define VERR_NET_NO_NETWORK (-405)
+#define VERR_NET_NOT_UNIQUE_NAME (-406)
+
+#define VERR_NET_IN_PROGRESS (-436)
+#define VERR_NET_ALREADY_IN_PROGRESS (-437)
+#define VERR_NET_NOT_SOCKET (-438)
+#define VERR_NET_DEST_ADDRESS_REQUIRED (-439)
+#define VERR_NET_MSG_SIZE (-440)
+#define VERR_NET_PROTOCOL_TYPE (-441)
+#define VERR_NET_PROTOCOL_NOT_AVAILABLE (-442)
+#define VERR_NET_PROTOCOL_NOT_SUPPORTED (-443)
+#define VERR_NET_SOCKET_TYPE_NOT_SUPPORTED (-444)
+#define VERR_NET_OPERATION_NOT_SUPPORTED (-445)
+#define VERR_NET_PROTOCOL_FAMILY_NOT_SUPPORTED (-446)
+#define VERR_NET_ADDRESS_FAMILY_NOT_SUPPORTED (-447)
+#define VERR_NET_ADDRESS_IN_USE (-448)
+#define VERR_NET_ADDRESS_NOT_AVAILABLE (-449)
+#define VERR_NET_DOWN (-450)
+#define VERR_NET_UNREACHABLE (-451)
+#define VERR_NET_CONNECTION_RESET (-452)
+#define VERR_NET_CONNECTION_ABORTED (-453)
+#define VERR_NET_CONNECTION_RESET_BY_PEER (-454)
+#define VERR_NET_NO_BUFFER_SPACE (-455)
+#define VERR_NET_ALREADY_CONNECTED (-456)
+#define VERR_NET_NOT_CONNECTED (-457)
+#define VERR_NET_SHUTDOWN (-458)
+#define VERR_NET_TOO_MANY_REFERENCES (-459)
+#define VERR_NET_CONNECTION_TIMED_OUT (-460)
+#define VERR_NET_CONNECTION_REFUSED (-461)
+#define VERR_NET_HOST_DOWN (-464)
+#define VERR_NET_HOST_UNREACHABLE (-465)
+#define VERR_NET_PROTOCOL_ERROR (-466)
+#define VERR_NET_INCOMPLETE_TX_PACKET (-467)
+
+/* misc. unsorted codes */
+#define VERR_RESOURCE_BUSY (-138)
+#define VERR_DISK_FULL (-152)
+#define VERR_TOO_MANY_SYMLINKS (-156)
+#define VERR_NO_MORE_FILES (-201)
+#define VERR_INTERNAL_ERROR (-225)
+#define VERR_INTERNAL_ERROR_2 (-226)
+#define VERR_INTERNAL_ERROR_3 (-227)
+#define VERR_INTERNAL_ERROR_4 (-228)
+#define VERR_DEV_IO_ERROR (-250)
+#define VERR_IO_BAD_LENGTH (-255)
+#define VERR_BROKEN_PIPE (-301)
+#define VERR_NO_DATA (-304)
+#define VERR_SEM_DESTROYED (-363)
+#define VERR_DEADLOCK (-365)
+#define VERR_BAD_EXE_FORMAT (-608)
+#define VINF_HGCM_ASYNC_EXECUTE (2903)
+
+#endif
diff --git a/include/uapi/linux/vbox_vmmdev_types.h b/include/uapi/linux/vbox_vmmdev_types.h
new file mode 100644
index 000000000000..0e68024f36c7
--- /dev/null
+++ b/include/uapi/linux/vbox_vmmdev_types.h
@@ -0,0 +1,226 @@
+/* SPDX-License-Identifier: (GPL-2.0 OR CDDL-1.0) */
+/*
+ * Virtual Device for Guest <-> VMM/Host communication, type definitions
+ * which are also used for the vboxguest ioctl interface / by vboxsf
+ *
+ * Copyright (C) 2006-2016 Oracle Corporation
+ */
+
+#ifndef __UAPI_VBOX_VMMDEV_TYPES_H__
+#define __UAPI_VBOX_VMMDEV_TYPES_H__
+
+#include <asm/bitsperlong.h>
+#include <linux/types.h>
+
+/*
+ * We cannot use linux' compiletime_assert here because it expects to be used
+ * inside a function only. Use a typedef to a char array with a negative size.
+ */
+#define VMMDEV_ASSERT_SIZE(type, size) \
+ typedef char type ## _asrt_size[1 - 2*!!(sizeof(struct type) != (size))]
+
+/** enum vmmdev_request_type - VMMDev request types. */
+enum vmmdev_request_type {
+ VMMDEVREQ_INVALID_REQUEST = 0,
+ VMMDEVREQ_GET_MOUSE_STATUS = 1,
+ VMMDEVREQ_SET_MOUSE_STATUS = 2,
+ VMMDEVREQ_SET_POINTER_SHAPE = 3,
+ VMMDEVREQ_GET_HOST_VERSION = 4,
+ VMMDEVREQ_IDLE = 5,
+ VMMDEVREQ_GET_HOST_TIME = 10,
+ VMMDEVREQ_GET_HYPERVISOR_INFO = 20,
+ VMMDEVREQ_SET_HYPERVISOR_INFO = 21,
+ VMMDEVREQ_REGISTER_PATCH_MEMORY = 22, /* since version 3.0.6 */
+ VMMDEVREQ_DEREGISTER_PATCH_MEMORY = 23, /* since version 3.0.6 */
+ VMMDEVREQ_SET_POWER_STATUS = 30,
+ VMMDEVREQ_ACKNOWLEDGE_EVENTS = 41,
+ VMMDEVREQ_CTL_GUEST_FILTER_MASK = 42,
+ VMMDEVREQ_REPORT_GUEST_INFO = 50,
+ VMMDEVREQ_REPORT_GUEST_INFO2 = 58, /* since version 3.2.0 */
+ VMMDEVREQ_REPORT_GUEST_STATUS = 59, /* since version 3.2.8 */
+ VMMDEVREQ_REPORT_GUEST_USER_STATE = 74, /* since version 4.3 */
+ /* Retrieve a display resize request sent by the host, deprecated. */
+ VMMDEVREQ_GET_DISPLAY_CHANGE_REQ = 51,
+ VMMDEVREQ_VIDEMODE_SUPPORTED = 52,
+ VMMDEVREQ_GET_HEIGHT_REDUCTION = 53,
+ /**
+ * @VMMDEVREQ_GET_DISPLAY_CHANGE_REQ2:
+ * Retrieve a display resize request sent by the host.
+ *
+ * Queries a display resize request sent from the host. If the
+ * event_ack member is sent to true and there is an unqueried request
+ * available for one of the virtual display then that request will
+ * be returned. If several displays have unqueried requests the lowest
+ * numbered display will be chosen first. Only the most recent unseen
+ * request for each display is remembered.
+ * If event_ack is set to false, the last host request queried with
+ * event_ack set is resent, or failing that the most recent received
+ * from the host. If no host request was ever received then all zeros
+ * are returned.
+ */
+ VMMDEVREQ_GET_DISPLAY_CHANGE_REQ2 = 54,
+ VMMDEVREQ_REPORT_GUEST_CAPABILITIES = 55,
+ VMMDEVREQ_SET_GUEST_CAPABILITIES = 56,
+ VMMDEVREQ_VIDEMODE_SUPPORTED2 = 57, /* since version 3.2.0 */
+ VMMDEVREQ_GET_DISPLAY_CHANGE_REQEX = 80, /* since version 4.2.4 */
+ VMMDEVREQ_HGCM_CONNECT = 60,
+ VMMDEVREQ_HGCM_DISCONNECT = 61,
+ VMMDEVREQ_HGCM_CALL32 = 62,
+ VMMDEVREQ_HGCM_CALL64 = 63,
+ VMMDEVREQ_HGCM_CANCEL = 64,
+ VMMDEVREQ_HGCM_CANCEL2 = 65,
+ VMMDEVREQ_VIDEO_ACCEL_ENABLE = 70,
+ VMMDEVREQ_VIDEO_ACCEL_FLUSH = 71,
+ VMMDEVREQ_VIDEO_SET_VISIBLE_REGION = 72,
+ VMMDEVREQ_GET_SEAMLESS_CHANGE_REQ = 73,
+ VMMDEVREQ_QUERY_CREDENTIALS = 100,
+ VMMDEVREQ_REPORT_CREDENTIALS_JUDGEMENT = 101,
+ VMMDEVREQ_REPORT_GUEST_STATS = 110,
+ VMMDEVREQ_GET_MEMBALLOON_CHANGE_REQ = 111,
+ VMMDEVREQ_GET_STATISTICS_CHANGE_REQ = 112,
+ VMMDEVREQ_CHANGE_MEMBALLOON = 113,
+ VMMDEVREQ_GET_VRDPCHANGE_REQ = 150,
+ VMMDEVREQ_LOG_STRING = 200,
+ VMMDEVREQ_GET_CPU_HOTPLUG_REQ = 210,
+ VMMDEVREQ_SET_CPU_HOTPLUG_STATUS = 211,
+ VMMDEVREQ_REGISTER_SHARED_MODULE = 212,
+ VMMDEVREQ_UNREGISTER_SHARED_MODULE = 213,
+ VMMDEVREQ_CHECK_SHARED_MODULES = 214,
+ VMMDEVREQ_GET_PAGE_SHARING_STATUS = 215,
+ VMMDEVREQ_DEBUG_IS_PAGE_SHARED = 216,
+ VMMDEVREQ_GET_SESSION_ID = 217, /* since version 3.2.8 */
+ VMMDEVREQ_WRITE_COREDUMP = 218,
+ VMMDEVREQ_GUEST_HEARTBEAT = 219,
+ VMMDEVREQ_HEARTBEAT_CONFIGURE = 220,
+ /* Ensure the enum is a 32 bit data-type */
+ VMMDEVREQ_SIZEHACK = 0x7fffffff
+};
+
+#if __BITS_PER_LONG == 64
+#define VMMDEVREQ_HGCM_CALL VMMDEVREQ_HGCM_CALL64
+#else
+#define VMMDEVREQ_HGCM_CALL VMMDEVREQ_HGCM_CALL32
+#endif
+
+/** HGCM service location types. */
+enum vmmdev_hgcm_service_location_type {
+ VMMDEV_HGCM_LOC_INVALID = 0,
+ VMMDEV_HGCM_LOC_LOCALHOST = 1,
+ VMMDEV_HGCM_LOC_LOCALHOST_EXISTING = 2,
+ /* Ensure the enum is a 32 bit data-type */
+ VMMDEV_HGCM_LOC_SIZEHACK = 0x7fffffff
+};
+
+/** HGCM host service location. */
+struct vmmdev_hgcm_service_location_localhost {
+ /** Service name */
+ char service_name[128];
+};
+VMMDEV_ASSERT_SIZE(vmmdev_hgcm_service_location_localhost, 128);
+
+/** HGCM service location. */
+struct vmmdev_hgcm_service_location {
+ /** Type of the location. */
+ enum vmmdev_hgcm_service_location_type type;
+
+ union {
+ struct vmmdev_hgcm_service_location_localhost localhost;
+ } u;
+};
+VMMDEV_ASSERT_SIZE(vmmdev_hgcm_service_location, 128 + 4);
+
+/** HGCM function parameter type. */
+enum vmmdev_hgcm_function_parameter_type {
+ VMMDEV_HGCM_PARM_TYPE_INVALID = 0,
+ VMMDEV_HGCM_PARM_TYPE_32BIT = 1,
+ VMMDEV_HGCM_PARM_TYPE_64BIT = 2,
+ /** Deprecated Doesn't work, use PAGELIST. */
+ VMMDEV_HGCM_PARM_TYPE_PHYSADDR = 3,
+ /** In and Out, user-memory */
+ VMMDEV_HGCM_PARM_TYPE_LINADDR = 4,
+ /** In, user-memory (read; host<-guest) */
+ VMMDEV_HGCM_PARM_TYPE_LINADDR_IN = 5,
+ /** Out, user-memory (write; host->guest) */
+ VMMDEV_HGCM_PARM_TYPE_LINADDR_OUT = 6,
+ /** In and Out, kernel-memory */
+ VMMDEV_HGCM_PARM_TYPE_LINADDR_KERNEL = 7,
+ /** In, kernel-memory (read; host<-guest) */
+ VMMDEV_HGCM_PARM_TYPE_LINADDR_KERNEL_IN = 8,
+ /** Out, kernel-memory (write; host->guest) */
+ VMMDEV_HGCM_PARM_TYPE_LINADDR_KERNEL_OUT = 9,
+ /** Physical addresses of locked pages for a buffer. */
+ VMMDEV_HGCM_PARM_TYPE_PAGELIST = 10,
+ /* Ensure the enum is a 32 bit data-type */
+ VMMDEV_HGCM_PARM_TYPE_SIZEHACK = 0x7fffffff
+};
+
+/** HGCM function parameter, 32-bit client. */
+struct vmmdev_hgcm_function_parameter32 {
+ enum vmmdev_hgcm_function_parameter_type type;
+ union {
+ __u32 value32;
+ __u64 value64;
+ struct {
+ __u32 size;
+ union {
+ __u32 phys_addr;
+ __u32 linear_addr;
+ } u;
+ } pointer;
+ struct {
+ /** Size of the buffer described by the page list. */
+ __u32 size;
+ /** Relative to the request header. */
+ __u32 offset;
+ } page_list;
+ } u;
+} __packed;
+VMMDEV_ASSERT_SIZE(vmmdev_hgcm_function_parameter32, 4 + 8);
+
+/** HGCM function parameter, 64-bit client. */
+struct vmmdev_hgcm_function_parameter64 {
+ enum vmmdev_hgcm_function_parameter_type type;
+ union {
+ __u32 value32;
+ __u64 value64;
+ struct {
+ __u32 size;
+ union {
+ __u64 phys_addr;
+ __u64 linear_addr;
+ } u;
+ } __packed pointer;
+ struct {
+ /** Size of the buffer described by the page list. */
+ __u32 size;
+ /** Relative to the request header. */
+ __u32 offset;
+ } page_list;
+ } __packed u;
+} __packed;
+VMMDEV_ASSERT_SIZE(vmmdev_hgcm_function_parameter64, 4 + 12);
+
+#if __BITS_PER_LONG == 64
+#define vmmdev_hgcm_function_parameter vmmdev_hgcm_function_parameter64
+#else
+#define vmmdev_hgcm_function_parameter vmmdev_hgcm_function_parameter32
+#endif
+
+#define VMMDEV_HGCM_F_PARM_DIRECTION_NONE 0x00000000U
+#define VMMDEV_HGCM_F_PARM_DIRECTION_TO_HOST 0x00000001U
+#define VMMDEV_HGCM_F_PARM_DIRECTION_FROM_HOST 0x00000002U
+#define VMMDEV_HGCM_F_PARM_DIRECTION_BOTH 0x00000003U
+
+/**
+ * struct vmmdev_hgcm_pagelist - VMMDEV_HGCM_PARM_TYPE_PAGELIST parameters
+ * point to this structure to actually describe the buffer.
+ */
+struct vmmdev_hgcm_pagelist {
+ __u32 flags; /** VMMDEV_HGCM_F_PARM_*. */
+ __u16 offset_first_page; /** Data offset in the first page. */
+ __u16 page_count; /** Number of pages. */
+ __u64 pages[1]; /** Page addresses. */
+};
+VMMDEV_ASSERT_SIZE(vmmdev_hgcm_pagelist, 4 + 2 + 2 + 8);
+
+#endif
diff --git a/include/uapi/linux/vboxguest.h b/include/uapi/linux/vboxguest.h
new file mode 100644
index 000000000000..612f0c7d3558
--- /dev/null
+++ b/include/uapi/linux/vboxguest.h
@@ -0,0 +1,330 @@
+/* SPDX-License-Identifier: (GPL-2.0 OR CDDL-1.0) */
+/*
+ * VBoxGuest - VirtualBox Guest Additions Driver Interface.
+ *
+ * Copyright (C) 2006-2016 Oracle Corporation
+ */
+
+#ifndef __UAPI_VBOXGUEST_H__
+#define __UAPI_VBOXGUEST_H__
+
+#include <asm/bitsperlong.h>
+#include <linux/ioctl.h>
+#include <linux/vbox_err.h>
+#include <linux/vbox_vmmdev_types.h>
+
+/* Version of vbg_ioctl_hdr structure. */
+#define VBG_IOCTL_HDR_VERSION 0x10001
+/* Default request type. Use this for non-VMMDev requests. */
+#define VBG_IOCTL_HDR_TYPE_DEFAULT 0
+
+/**
+ * Common ioctl header.
+ *
+ * This is a mirror of vmmdev_request_header to prevent duplicating data and
+ * needing to verify things multiple times.
+ */
+struct vbg_ioctl_hdr {
+ /** IN: The request input size, and output size if size_out is zero. */
+ __u32 size_in;
+ /** IN: Structure version (VBG_IOCTL_HDR_VERSION) */
+ __u32 version;
+ /** IN: The VMMDev request type or VBG_IOCTL_HDR_TYPE_DEFAULT. */
+ __u32 type;
+ /**
+ * OUT: The VBox status code of the operation, out direction only.
+ * This is a VINF_ or VERR_ value as defined in vbox_err.h.
+ */
+ __s32 rc;
+ /** IN: Output size. Set to zero to use size_in as output size. */
+ __u32 size_out;
+ /** Reserved, MBZ. */
+ __u32 reserved;
+};
+VMMDEV_ASSERT_SIZE(vbg_ioctl_hdr, 24);
+
+
+/*
+ * The VBoxGuest I/O control version.
+ *
+ * As usual, the high word contains the major version and changes to it
+ * signifies incompatible changes.
+ *
+ * The lower word is the minor version number, it is increased when new
+ * functions are added or existing changed in a backwards compatible manner.
+ */
+#define VBG_IOC_VERSION 0x00010000u
+
+/**
+ * VBG_IOCTL_DRIVER_VERSION_INFO data structure
+ *
+ * Note VBG_IOCTL_DRIVER_VERSION_INFO may switch the session to a backwards
+ * compatible interface version if uClientVersion indicates older client code.
+ */
+struct vbg_ioctl_driver_version_info {
+ /** The header. */
+ struct vbg_ioctl_hdr hdr;
+ union {
+ struct {
+ /** Requested interface version (VBG_IOC_VERSION). */
+ __u32 req_version;
+ /**
+ * Minimum interface version number (typically the
+ * major version part of VBG_IOC_VERSION).
+ */
+ __u32 min_version;
+ /** Reserved, MBZ. */
+ __u32 reserved1;
+ /** Reserved, MBZ. */
+ __u32 reserved2;
+ } in;
+ struct {
+ /** Version for this session (typ. VBG_IOC_VERSION). */
+ __u32 session_version;
+ /** Version of the IDC interface (VBG_IOC_VERSION). */
+ __u32 driver_version;
+ /** The SVN revision of the driver, or 0. */
+ __u32 driver_revision;
+ /** Reserved \#1 (zero until defined). */
+ __u32 reserved1;
+ /** Reserved \#2 (zero until defined). */
+ __u32 reserved2;
+ } out;
+ } u;
+};
+VMMDEV_ASSERT_SIZE(vbg_ioctl_driver_version_info, 24 + 20);
+
+#define VBG_IOCTL_DRIVER_VERSION_INFO \
+ _IOWR('V', 0, struct vbg_ioctl_driver_version_info)
+
+
+/* IOCTL to perform a VMM Device request less than 1KB in size. */
+#define VBG_IOCTL_VMMDEV_REQUEST(s) _IOC(_IOC_READ | _IOC_WRITE, 'V', 2, s)
+
+
+/* IOCTL to perform a VMM Device request larger then 1KB. */
+#define VBG_IOCTL_VMMDEV_REQUEST_BIG _IOC(_IOC_READ | _IOC_WRITE, 'V', 3, 0)
+
+
+/** VBG_IOCTL_HGCM_CONNECT data structure. */
+struct vbg_ioctl_hgcm_connect {
+ struct vbg_ioctl_hdr hdr;
+ union {
+ struct {
+ struct vmmdev_hgcm_service_location loc;
+ } in;
+ struct {
+ __u32 client_id;
+ } out;
+ } u;
+};
+VMMDEV_ASSERT_SIZE(vbg_ioctl_hgcm_connect, 24 + 132);
+
+#define VBG_IOCTL_HGCM_CONNECT \
+ _IOWR('V', 4, struct vbg_ioctl_hgcm_connect)
+
+
+/** VBG_IOCTL_HGCM_DISCONNECT data structure. */
+struct vbg_ioctl_hgcm_disconnect {
+ struct vbg_ioctl_hdr hdr;
+ union {
+ struct {
+ __u32 client_id;
+ } in;
+ } u;
+};
+VMMDEV_ASSERT_SIZE(vbg_ioctl_hgcm_disconnect, 24 + 4);
+
+#define VBG_IOCTL_HGCM_DISCONNECT \
+ _IOWR('V', 5, struct vbg_ioctl_hgcm_disconnect)
+
+
+/** VBG_IOCTL_HGCM_CALL data structure. */
+struct vbg_ioctl_hgcm_call {
+ /** The header. */
+ struct vbg_ioctl_hdr hdr;
+ /** Input: The id of the caller. */
+ __u32 client_id;
+ /** Input: Function number. */
+ __u32 function;
+ /**
+ * Input: How long to wait (milliseconds) for completion before
+ * cancelling the call. Set to -1 to wait indefinitely.
+ */
+ __u32 timeout_ms;
+ /** Interruptable flag, ignored for userspace calls. */
+ __u8 interruptible;
+ /** Explicit padding, MBZ. */
+ __u8 reserved;
+ /**
+ * Input: How many parameters following this structure.
+ *
+ * The parameters are either HGCMFunctionParameter64 or 32,
+ * depending on whether we're receiving a 64-bit or 32-bit request.
+ *
+ * The current maximum is 61 parameters (given a 1KB max request size,
+ * and a 64-bit parameter size of 16 bytes).
+ */
+ __u16 parm_count;
+ /*
+ * Parameters follow in form:
+ * struct hgcm_function_parameter<32|64> parms[parm_count]
+ */
+};
+VMMDEV_ASSERT_SIZE(vbg_ioctl_hgcm_call, 24 + 16);
+
+#define VBG_IOCTL_HGCM_CALL_32(s) _IOC(_IOC_READ | _IOC_WRITE, 'V', 6, s)
+#define VBG_IOCTL_HGCM_CALL_64(s) _IOC(_IOC_READ | _IOC_WRITE, 'V', 7, s)
+#if __BITS_PER_LONG == 64
+#define VBG_IOCTL_HGCM_CALL(s) VBG_IOCTL_HGCM_CALL_64(s)
+#else
+#define VBG_IOCTL_HGCM_CALL(s) VBG_IOCTL_HGCM_CALL_32(s)
+#endif
+
+
+/** VBG_IOCTL_LOG data structure. */
+struct vbg_ioctl_log {
+ /** The header. */
+ struct vbg_ioctl_hdr hdr;
+ union {
+ struct {
+ /**
+ * The log message, this may be zero terminated. If it
+ * is not zero terminated then the length is determined
+ * from the input size.
+ */
+ char msg[1];
+ } in;
+ } u;
+};
+
+#define VBG_IOCTL_LOG(s) _IOC(_IOC_READ | _IOC_WRITE, 'V', 9, s)
+
+
+/** VBG_IOCTL_WAIT_FOR_EVENTS data structure. */
+struct vbg_ioctl_wait_for_events {
+ /** The header. */
+ struct vbg_ioctl_hdr hdr;
+ union {
+ struct {
+ /** Timeout in milliseconds. */
+ __u32 timeout_ms;
+ /** Events to wait for. */
+ __u32 events;
+ } in;
+ struct {
+ /** Events that occurred. */
+ __u32 events;
+ } out;
+ } u;
+};
+VMMDEV_ASSERT_SIZE(vbg_ioctl_wait_for_events, 24 + 8);
+
+#define VBG_IOCTL_WAIT_FOR_EVENTS \
+ _IOWR('V', 10, struct vbg_ioctl_wait_for_events)
+
+
+/*
+ * IOCTL to VBoxGuest to interrupt (cancel) any pending
+ * VBG_IOCTL_WAIT_FOR_EVENTS and return.
+ *
+ * Handled inside the vboxguest driver and not seen by the host at all.
+ * After calling this, VBG_IOCTL_WAIT_FOR_EVENTS should no longer be called in
+ * the same session. Any VBOXGUEST_IOCTL_WAITEVENT calls in the same session
+ * done after calling this will directly exit with -EINTR.
+ */
+#define VBG_IOCTL_INTERRUPT_ALL_WAIT_FOR_EVENTS \
+ _IOWR('V', 11, struct vbg_ioctl_hdr)
+
+
+/** VBG_IOCTL_CHANGE_FILTER_MASK data structure. */
+struct vbg_ioctl_change_filter {
+ /** The header. */
+ struct vbg_ioctl_hdr hdr;
+ union {
+ struct {
+ /** Flags to set. */
+ __u32 or_mask;
+ /** Flags to remove. */
+ __u32 not_mask;
+ } in;
+ } u;
+};
+VMMDEV_ASSERT_SIZE(vbg_ioctl_change_filter, 24 + 8);
+
+/* IOCTL to VBoxGuest to control the event filter mask. */
+#define VBG_IOCTL_CHANGE_FILTER_MASK \
+ _IOWR('V', 12, struct vbg_ioctl_change_filter)
+
+
+/** VBG_IOCTL_CHANGE_GUEST_CAPABILITIES data structure. */
+struct vbg_ioctl_set_guest_caps {
+ /** The header. */
+ struct vbg_ioctl_hdr hdr;
+ union {
+ struct {
+ /** Capabilities to set (VMMDEV_GUEST_SUPPORTS_XXX). */
+ __u32 or_mask;
+ /** Capabilities to drop (VMMDEV_GUEST_SUPPORTS_XXX). */
+ __u32 not_mask;
+ } in;
+ struct {
+ /** Capabilities held by the session after the call. */
+ __u32 session_caps;
+ /** Capabilities for all the sessions after the call. */
+ __u32 global_caps;
+ } out;
+ } u;
+};
+VMMDEV_ASSERT_SIZE(vbg_ioctl_set_guest_caps, 24 + 8);
+
+#define VBG_IOCTL_CHANGE_GUEST_CAPABILITIES \
+ _IOWR('V', 14, struct vbg_ioctl_set_guest_caps)
+
+
+/** VBG_IOCTL_CHECK_BALLOON data structure. */
+struct vbg_ioctl_check_balloon {
+ /** The header. */
+ struct vbg_ioctl_hdr hdr;
+ union {
+ struct {
+ /** The size of the balloon in chunks of 1MB. */
+ __u32 balloon_chunks;
+ /**
+ * false = handled in R0, no further action required.
+ * true = allocate balloon memory in R3.
+ */
+ __u8 handle_in_r3;
+ /** Explicit padding, MBZ. */
+ __u8 padding[3];
+ } out;
+ } u;
+};
+VMMDEV_ASSERT_SIZE(vbg_ioctl_check_balloon, 24 + 8);
+
+/*
+ * IOCTL to check memory ballooning.
+ *
+ * The guest kernel module will ask the host for the current size of the
+ * balloon and adjust the size. Or it will set handle_in_r3 = true and R3 is
+ * responsible for allocating memory and calling VBG_IOCTL_CHANGE_BALLOON.
+ */
+#define VBG_IOCTL_CHECK_BALLOON \
+ _IOWR('V', 17, struct vbg_ioctl_check_balloon)
+
+
+/** VBG_IOCTL_WRITE_CORE_DUMP data structure. */
+struct vbg_ioctl_write_coredump {
+ struct vbg_ioctl_hdr hdr;
+ union {
+ struct {
+ __u32 flags; /** Flags (reserved, MBZ). */
+ } in;
+ } u;
+};
+VMMDEV_ASSERT_SIZE(vbg_ioctl_write_coredump, 24 + 4);
+
+#define VBG_IOCTL_WRITE_CORE_DUMP \
+ _IOWR('V', 19, struct vbg_ioctl_write_coredump)
+
+#endif
diff --git a/include/uapi/linux/vfio.h b/include/uapi/linux/vfio.h
index e3301dbd27d4..1aa7b82e8169 100644
--- a/include/uapi/linux/vfio.h
+++ b/include/uapi/linux/vfio.h
@@ -301,6 +301,16 @@ struct vfio_region_info_cap_type {
#define VFIO_REGION_SUBTYPE_INTEL_IGD_HOST_CFG (2)
#define VFIO_REGION_SUBTYPE_INTEL_IGD_LPC_CFG (3)
+/*
+ * The MSIX mappable capability informs that MSIX data of a BAR can be mmapped
+ * which allows direct access to non-MSIX registers which happened to be within
+ * the same system page.
+ *
+ * Even though the userspace gets direct access to the MSIX data, the existing
+ * VFIO_DEVICE_SET_IRQS interface must still be used for MSIX configuration.
+ */
+#define VFIO_REGION_INFO_CAP_MSIX_MAPPABLE 3
+
/**
* VFIO_DEVICE_GET_IRQ_INFO - _IOWR(VFIO_TYPE, VFIO_BASE + 9,
* struct vfio_irq_info)
@@ -503,6 +513,95 @@ struct vfio_pci_hot_reset {
#define VFIO_DEVICE_PCI_HOT_RESET _IO(VFIO_TYPE, VFIO_BASE + 13)
+/**
+ * VFIO_DEVICE_QUERY_GFX_PLANE - _IOW(VFIO_TYPE, VFIO_BASE + 14,
+ * struct vfio_device_query_gfx_plane)
+ *
+ * Set the drm_plane_type and flags, then retrieve the gfx plane info.
+ *
+ * flags supported:
+ * - VFIO_GFX_PLANE_TYPE_PROBE and VFIO_GFX_PLANE_TYPE_DMABUF are set
+ * to ask if the mdev supports dma-buf. 0 on support, -EINVAL on no
+ * support for dma-buf.
+ * - VFIO_GFX_PLANE_TYPE_PROBE and VFIO_GFX_PLANE_TYPE_REGION are set
+ * to ask if the mdev supports region. 0 on support, -EINVAL on no
+ * support for region.
+ * - VFIO_GFX_PLANE_TYPE_DMABUF or VFIO_GFX_PLANE_TYPE_REGION is set
+ * with each call to query the plane info.
+ * - Others are invalid and return -EINVAL.
+ *
+ * Note:
+ * 1. Plane could be disabled by guest. In that case, success will be
+ * returned with zero-initialized drm_format, size, width and height
+ * fields.
+ * 2. x_hot/y_hot is set to 0xFFFFFFFF if no hotspot information available
+ *
+ * Return: 0 on success, -errno on other failure.
+ */
+struct vfio_device_gfx_plane_info {
+ __u32 argsz;
+ __u32 flags;
+#define VFIO_GFX_PLANE_TYPE_PROBE (1 << 0)
+#define VFIO_GFX_PLANE_TYPE_DMABUF (1 << 1)
+#define VFIO_GFX_PLANE_TYPE_REGION (1 << 2)
+ /* in */
+ __u32 drm_plane_type; /* type of plane: DRM_PLANE_TYPE_* */
+ /* out */
+ __u32 drm_format; /* drm format of plane */
+ __u64 drm_format_mod; /* tiled mode */
+ __u32 width; /* width of plane */
+ __u32 height; /* height of plane */
+ __u32 stride; /* stride of plane */
+ __u32 size; /* size of plane in bytes, align on page*/
+ __u32 x_pos; /* horizontal position of cursor plane */
+ __u32 y_pos; /* vertical position of cursor plane*/
+ __u32 x_hot; /* horizontal position of cursor hotspot */
+ __u32 y_hot; /* vertical position of cursor hotspot */
+ union {
+ __u32 region_index; /* region index */
+ __u32 dmabuf_id; /* dma-buf id */
+ };
+};
+
+#define VFIO_DEVICE_QUERY_GFX_PLANE _IO(VFIO_TYPE, VFIO_BASE + 14)
+
+/**
+ * VFIO_DEVICE_GET_GFX_DMABUF - _IOW(VFIO_TYPE, VFIO_BASE + 15, __u32)
+ *
+ * Return a new dma-buf file descriptor for an exposed guest framebuffer
+ * described by the provided dmabuf_id. The dmabuf_id is returned from VFIO_
+ * DEVICE_QUERY_GFX_PLANE as a token of the exposed guest framebuffer.
+ */
+
+#define VFIO_DEVICE_GET_GFX_DMABUF _IO(VFIO_TYPE, VFIO_BASE + 15)
+
+/**
+ * VFIO_DEVICE_IOEVENTFD - _IOW(VFIO_TYPE, VFIO_BASE + 16,
+ * struct vfio_device_ioeventfd)
+ *
+ * Perform a write to the device at the specified device fd offset, with
+ * the specified data and width when the provided eventfd is triggered.
+ * vfio bus drivers may not support this for all regions, for all widths,
+ * or at all. vfio-pci currently only enables support for BAR regions,
+ * excluding the MSI-X vector table.
+ *
+ * Return: 0 on success, -errno on failure.
+ */
+struct vfio_device_ioeventfd {
+ __u32 argsz;
+ __u32 flags;
+#define VFIO_DEVICE_IOEVENTFD_8 (1 << 0) /* 1-byte write */
+#define VFIO_DEVICE_IOEVENTFD_16 (1 << 1) /* 2-byte write */
+#define VFIO_DEVICE_IOEVENTFD_32 (1 << 2) /* 4-byte write */
+#define VFIO_DEVICE_IOEVENTFD_64 (1 << 3) /* 8-byte write */
+#define VFIO_DEVICE_IOEVENTFD_SIZE_MASK (0xf)
+ __u64 offset; /* device fd offset of write */
+ __u64 data; /* data to be written */
+ __s32 fd; /* -1 for de-assignment */
+};
+
+#define VFIO_DEVICE_IOEVENTFD _IO(VFIO_TYPE, VFIO_BASE + 16)
+
/* -------- API for Type1 VFIO IOMMU -------- */
/**
diff --git a/include/uapi/linux/videodev2.h b/include/uapi/linux/videodev2.h
index 1c095b5a99c5..600877be5c22 100644
--- a/include/uapi/linux/videodev2.h
+++ b/include/uapi/linux/videodev2.h
@@ -107,14 +107,14 @@ enum v4l2_field {
transmitted first */
};
#define V4L2_FIELD_HAS_TOP(field) \
- ((field) == V4L2_FIELD_TOP ||\
+ ((field) == V4L2_FIELD_TOP ||\
(field) == V4L2_FIELD_INTERLACED ||\
(field) == V4L2_FIELD_INTERLACED_TB ||\
(field) == V4L2_FIELD_INTERLACED_BT ||\
(field) == V4L2_FIELD_SEQ_TB ||\
(field) == V4L2_FIELD_SEQ_BT)
#define V4L2_FIELD_HAS_BOTTOM(field) \
- ((field) == V4L2_FIELD_BOTTOM ||\
+ ((field) == V4L2_FIELD_BOTTOM ||\
(field) == V4L2_FIELD_INTERLACED ||\
(field) == V4L2_FIELD_INTERLACED_TB ||\
(field) == V4L2_FIELD_INTERLACED_BT ||\
@@ -467,12 +467,12 @@ struct v4l2_capability {
* V I D E O I M A G E F O R M A T
*/
struct v4l2_pix_format {
- __u32 width;
+ __u32 width;
__u32 height;
__u32 pixelformat;
__u32 field; /* enum v4l2_field */
- __u32 bytesperline; /* for padding, zero if unused */
- __u32 sizeimage;
+ __u32 bytesperline; /* for padding, zero if unused */
+ __u32 sizeimage;
__u32 colorspace; /* enum v4l2_colorspace */
__u32 priv; /* private data, depends on pixelformat */
__u32 flags; /* format flags (V4L2_PIX_FMT_FLAG_*) */
@@ -635,6 +635,7 @@ struct v4l2_pix_format {
#define V4L2_PIX_FMT_VC1_ANNEX_L v4l2_fourcc('V', 'C', '1', 'L') /* SMPTE 421M Annex L compliant stream */
#define V4L2_PIX_FMT_VP8 v4l2_fourcc('V', 'P', '8', '0') /* VP8 */
#define V4L2_PIX_FMT_VP9 v4l2_fourcc('V', 'P', '9', '0') /* VP9 */
+#define V4L2_PIX_FMT_HEVC v4l2_fourcc('H', 'E', 'V', 'C') /* HEVC aka H.265 */
/* Vendor-specific formats */
#define V4L2_PIX_FMT_CPIA1 v4l2_fourcc('C', 'P', 'I', 'A') /* cpia1 YUV */
@@ -669,6 +670,12 @@ struct v4l2_pix_format {
#define V4L2_PIX_FMT_MT21C v4l2_fourcc('M', 'T', '2', '1') /* Mediatek compressed block mode */
#define V4L2_PIX_FMT_INZI v4l2_fourcc('I', 'N', 'Z', 'I') /* Intel Planar Greyscale 10-bit and Depth 16-bit */
+/* 10bit raw bayer packed, 32 bytes for every 25 pixels, last LSB 6 bits unused */
+#define V4L2_PIX_FMT_IPU3_SBGGR10 v4l2_fourcc('i', 'p', '3', 'b') /* IPU3 packed 10-bit BGGR bayer */
+#define V4L2_PIX_FMT_IPU3_SGBRG10 v4l2_fourcc('i', 'p', '3', 'g') /* IPU3 packed 10-bit GBRG bayer */
+#define V4L2_PIX_FMT_IPU3_SGRBG10 v4l2_fourcc('i', 'p', '3', 'G') /* IPU3 packed 10-bit GRBG bayer */
+#define V4L2_PIX_FMT_IPU3_SRGGB10 v4l2_fourcc('i', 'p', '3', 'r') /* IPU3 packed 10-bit RGGB bayer */
+
/* SDR formats - used only for Software Defined Radio devices */
#define V4L2_SDR_FMT_CU8 v4l2_fourcc('C', 'U', '0', '8') /* IQ u8 */
#define V4L2_SDR_FMT_CU16LE v4l2_fourcc('C', 'U', '1', '6') /* IQ u16le */
@@ -688,6 +695,7 @@ struct v4l2_pix_format {
/* Meta-data formats */
#define V4L2_META_FMT_VSP1_HGO v4l2_fourcc('V', 'S', 'P', 'H') /* R-Car VSP1 1-D Histogram */
#define V4L2_META_FMT_VSP1_HGT v4l2_fourcc('V', 'S', 'P', 'T') /* R-Car VSP1 2-D Histogram */
+#define V4L2_META_FMT_UVC v4l2_fourcc('U', 'V', 'C', 'H') /* UVC Payload Header metadata */
/* priv field value to indicates that subsequent fields are valid. */
#define V4L2_PIX_FMT_PRIV_MAGIC 0xfeedcafe
@@ -1166,7 +1174,7 @@ typedef __u64 v4l2_std_id;
V4L2_STD_NTSC_M_JP |\
V4L2_STD_NTSC_M_KR)
/* Secam macros */
-#define V4L2_STD_SECAM_DK (V4L2_STD_SECAM_D |\
+#define V4L2_STD_SECAM_DK (V4L2_STD_SECAM_D |\
V4L2_STD_SECAM_K |\
V4L2_STD_SECAM_K1)
/* All Secam Standards */
@@ -1247,7 +1255,7 @@ struct v4l2_standard {
};
/*
- * D V B T T I M I N G S
+ * D V B T T I M I N G S
*/
/** struct v4l2_bt_timings - BT.656/BT.1120 timing data
@@ -1588,7 +1596,7 @@ struct v4l2_ext_controls {
struct v4l2_ext_control *controls;
};
-#define V4L2_CTRL_ID_MASK (0x0fffffff)
+#define V4L2_CTRL_ID_MASK (0x0fffffff)
#ifndef __KERNEL__
#define V4L2_CTRL_ID2CLASS(id) ((id) & 0x0fff0000UL)
#endif
@@ -1660,11 +1668,11 @@ struct v4l2_querymenu {
/* Control flags */
#define V4L2_CTRL_FLAG_DISABLED 0x0001
#define V4L2_CTRL_FLAG_GRABBED 0x0002
-#define V4L2_CTRL_FLAG_READ_ONLY 0x0004
-#define V4L2_CTRL_FLAG_UPDATE 0x0008
-#define V4L2_CTRL_FLAG_INACTIVE 0x0010
-#define V4L2_CTRL_FLAG_SLIDER 0x0020
-#define V4L2_CTRL_FLAG_WRITE_ONLY 0x0040
+#define V4L2_CTRL_FLAG_READ_ONLY 0x0004
+#define V4L2_CTRL_FLAG_UPDATE 0x0008
+#define V4L2_CTRL_FLAG_INACTIVE 0x0010
+#define V4L2_CTRL_FLAG_SLIDER 0x0020
+#define V4L2_CTRL_FLAG_WRITE_ONLY 0x0040
#define V4L2_CTRL_FLAG_VOLATILE 0x0080
#define V4L2_CTRL_FLAG_HAS_PAYLOAD 0x0100
#define V4L2_CTRL_FLAG_EXECUTE_ON_WRITE 0x0200
@@ -1778,21 +1786,21 @@ struct v4l2_hw_freq_seek {
*/
struct v4l2_rds_data {
- __u8 lsb;
- __u8 msb;
- __u8 block;
+ __u8 lsb;
+ __u8 msb;
+ __u8 block;
} __attribute__ ((packed));
-#define V4L2_RDS_BLOCK_MSK 0x7
-#define V4L2_RDS_BLOCK_A 0
-#define V4L2_RDS_BLOCK_B 1
-#define V4L2_RDS_BLOCK_C 2
-#define V4L2_RDS_BLOCK_D 3
-#define V4L2_RDS_BLOCK_C_ALT 4
-#define V4L2_RDS_BLOCK_INVALID 7
+#define V4L2_RDS_BLOCK_MSK 0x7
+#define V4L2_RDS_BLOCK_A 0
+#define V4L2_RDS_BLOCK_B 1
+#define V4L2_RDS_BLOCK_C 2
+#define V4L2_RDS_BLOCK_D 3
+#define V4L2_RDS_BLOCK_C_ALT 4
+#define V4L2_RDS_BLOCK_INVALID 7
#define V4L2_RDS_BLOCK_CORRECTED 0x40
-#define V4L2_RDS_BLOCK_ERROR 0x80
+#define V4L2_RDS_BLOCK_ERROR 0x80
/*
* A U D I O
@@ -2348,8 +2356,8 @@ struct v4l2_create_buffers {
#define VIDIOC_S_CROP _IOW('V', 60, struct v4l2_crop)
#define VIDIOC_G_JPEGCOMP _IOR('V', 61, struct v4l2_jpegcompression)
#define VIDIOC_S_JPEGCOMP _IOW('V', 62, struct v4l2_jpegcompression)
-#define VIDIOC_QUERYSTD _IOR('V', 63, v4l2_std_id)
-#define VIDIOC_TRY_FMT _IOWR('V', 64, struct v4l2_format)
+#define VIDIOC_QUERYSTD _IOR('V', 63, v4l2_std_id)
+#define VIDIOC_TRY_FMT _IOWR('V', 64, struct v4l2_format)
#define VIDIOC_ENUMAUDIO _IOWR('V', 65, struct v4l2_audio)
#define VIDIOC_ENUMAUDOUT _IOWR('V', 66, struct v4l2_audioout)
#define VIDIOC_G_PRIORITY _IOR('V', 67, __u32) /* enum v4l2_priority */
@@ -2370,8 +2378,8 @@ struct v4l2_create_buffers {
* Only implemented if CONFIG_VIDEO_ADV_DEBUG is defined.
* You must be root to use these ioctls. Never use these in applications!
*/
-#define VIDIOC_DBG_S_REGISTER _IOW('V', 79, struct v4l2_dbg_register)
-#define VIDIOC_DBG_G_REGISTER _IOWR('V', 80, struct v4l2_dbg_register)
+#define VIDIOC_DBG_S_REGISTER _IOW('V', 79, struct v4l2_dbg_register)
+#define VIDIOC_DBG_G_REGISTER _IOWR('V', 80, struct v4l2_dbg_register)
#define VIDIOC_S_HW_FREQ_SEEK _IOW('V', 82, struct v4l2_hw_freq_seek)
#define VIDIOC_S_DV_TIMINGS _IOWR('V', 87, struct v4l2_dv_timings)
diff --git a/include/uapi/linux/virtio_balloon.h b/include/uapi/linux/virtio_balloon.h
index 343d7ddefe04..40297a3181ed 100644
--- a/include/uapi/linux/virtio_balloon.h
+++ b/include/uapi/linux/virtio_balloon.h
@@ -52,7 +52,10 @@ struct virtio_balloon_config {
#define VIRTIO_BALLOON_S_MEMFREE 4 /* Total amount of free memory */
#define VIRTIO_BALLOON_S_MEMTOT 5 /* Total amount of memory */
#define VIRTIO_BALLOON_S_AVAIL 6 /* Available memory as in /proc */
-#define VIRTIO_BALLOON_S_NR 7
+#define VIRTIO_BALLOON_S_CACHES 7 /* Disk caches */
+#define VIRTIO_BALLOON_S_HTLB_PGALLOC 8 /* Hugetlb page allocations */
+#define VIRTIO_BALLOON_S_HTLB_PGFAIL 9 /* Hugetlb page allocation failures */
+#define VIRTIO_BALLOON_S_NR 10
/*
* Memory statistics structure.
diff --git a/include/uapi/linux/virtio_net.h b/include/uapi/linux/virtio_net.h
index fc353b518288..5de6ed37695b 100644
--- a/include/uapi/linux/virtio_net.h
+++ b/include/uapi/linux/virtio_net.h
@@ -57,6 +57,8 @@
* Steering */
#define VIRTIO_NET_F_CTRL_MAC_ADDR 23 /* Set MAC address */
+#define VIRTIO_NET_F_SPEED_DUPLEX 63 /* Device set linkspeed and duplex */
+
#ifndef VIRTIO_NET_NO_LEGACY
#define VIRTIO_NET_F_GSO 6 /* Host handles pkts w/ any GSO type */
#endif /* VIRTIO_NET_NO_LEGACY */
@@ -76,6 +78,17 @@ struct virtio_net_config {
__u16 max_virtqueue_pairs;
/* Default maximum transmit unit advice */
__u16 mtu;
+ /*
+ * speed, in units of 1Mb. All values 0 to INT_MAX are legal.
+ * Any other value stands for unknown.
+ */
+ __u32 speed;
+ /*
+ * 0x00 - half duplex
+ * 0x01 - full duplex
+ * Any other value stands for unknown.
+ */
+ __u8 duplex;
} __attribute__((packed));
/*
diff --git a/include/uapi/misc/cxl.h b/include/uapi/misc/cxl.h
index 49e8fd08855a..56376d3907d8 100644
--- a/include/uapi/misc/cxl.h
+++ b/include/uapi/misc/cxl.h
@@ -20,20 +20,22 @@ struct cxl_ioctl_start_work {
__u64 work_element_descriptor;
__u64 amr;
__s16 num_interrupts;
- __s16 reserved1;
- __s32 reserved2;
+ __u16 tid;
+ __s32 reserved1;
+ __u64 reserved2;
__u64 reserved3;
__u64 reserved4;
__u64 reserved5;
- __u64 reserved6;
};
#define CXL_START_WORK_AMR 0x0000000000000001ULL
#define CXL_START_WORK_NUM_IRQS 0x0000000000000002ULL
#define CXL_START_WORK_ERR_FF 0x0000000000000004ULL
+#define CXL_START_WORK_TID 0x0000000000000008ULL
#define CXL_START_WORK_ALL (CXL_START_WORK_AMR |\
CXL_START_WORK_NUM_IRQS |\
- CXL_START_WORK_ERR_FF)
+ CXL_START_WORK_ERR_FF |\
+ CXL_START_WORK_TID)
/* Possible modes that an afu can be in */
diff --git a/include/uapi/misc/ocxl.h b/include/uapi/misc/ocxl.h
new file mode 100644
index 000000000000..0af83d80fb3e
--- /dev/null
+++ b/include/uapi/misc/ocxl.h
@@ -0,0 +1,66 @@
+/* SPDX-License-Identifier: GPL-2.0+ WITH Linux-syscall-note */
+/* Copyright 2017 IBM Corp. */
+#ifndef _UAPI_MISC_OCXL_H
+#define _UAPI_MISC_OCXL_H
+
+#include <linux/types.h>
+#include <linux/ioctl.h>
+
+enum ocxl_event_type {
+ OCXL_AFU_EVENT_XSL_FAULT_ERROR = 0,
+};
+
+#define OCXL_KERNEL_EVENT_FLAG_LAST 0x0001 /* This is the last event pending */
+
+struct ocxl_kernel_event_header {
+ __u16 type;
+ __u16 flags;
+ __u32 reserved;
+};
+
+struct ocxl_kernel_event_xsl_fault_error {
+ __u64 addr;
+ __u64 dsisr;
+ __u64 count;
+ __u64 reserved;
+};
+
+struct ocxl_ioctl_attach {
+ __u64 amr;
+ __u64 reserved1;
+ __u64 reserved2;
+ __u64 reserved3;
+};
+
+struct ocxl_ioctl_metadata {
+ __u16 version; // struct version, always backwards compatible
+
+ // Version 0 fields
+ __u8 afu_version_major;
+ __u8 afu_version_minor;
+ __u32 pasid; // PASID assigned to the current context
+
+ __u64 pp_mmio_size; // Per PASID MMIO size
+ __u64 global_mmio_size;
+
+ // End version 0 fields
+
+ __u64 reserved[13]; // Total of 16*u64
+};
+
+struct ocxl_ioctl_irq_fd {
+ __u64 irq_offset;
+ __s32 eventfd;
+ __u32 reserved;
+};
+
+/* ioctl numbers */
+#define OCXL_MAGIC 0xCA
+/* AFU devices */
+#define OCXL_IOCTL_ATTACH _IOW(OCXL_MAGIC, 0x10, struct ocxl_ioctl_attach)
+#define OCXL_IOCTL_IRQ_ALLOC _IOR(OCXL_MAGIC, 0x11, __u64)
+#define OCXL_IOCTL_IRQ_FREE _IOW(OCXL_MAGIC, 0x12, __u64)
+#define OCXL_IOCTL_IRQ_SET_FD _IOW(OCXL_MAGIC, 0x13, struct ocxl_ioctl_irq_fd)
+#define OCXL_IOCTL_GET_METADATA _IOR(OCXL_MAGIC, 0x14, struct ocxl_ioctl_metadata)
+
+#endif /* _UAPI_MISC_OCXL_H */
diff --git a/include/uapi/rdma/bnxt_re-abi.h b/include/uapi/rdma/bnxt_re-abi.h
index 398a514ee446..a7a6111e50c7 100644
--- a/include/uapi/rdma/bnxt_re-abi.h
+++ b/include/uapi/rdma/bnxt_re-abi.h
@@ -53,15 +53,20 @@ struct bnxt_re_uctx_resp {
__u32 rsvd;
};
+/*
+ * This struct is placed after the ib_uverbs_alloc_pd_resp struct, which is
+ * not 8 byted aligned. To avoid undesired padding in various cases we have to
+ * set this struct to packed.
+ */
struct bnxt_re_pd_resp {
__u32 pdid;
__u32 dpi;
__u64 dbr;
-};
+} __attribute__((packed, aligned(4)));
struct bnxt_re_cq_req {
- __u64 cq_va;
- __u64 cq_handle;
+ __aligned_u64 cq_va;
+ __aligned_u64 cq_handle;
};
struct bnxt_re_cq_resp {
@@ -72,9 +77,9 @@ struct bnxt_re_cq_resp {
};
struct bnxt_re_qp_req {
- __u64 qpsva;
- __u64 qprva;
- __u64 qp_handle;
+ __aligned_u64 qpsva;
+ __aligned_u64 qprva;
+ __aligned_u64 qp_handle;
};
struct bnxt_re_qp_resp {
@@ -82,6 +87,15 @@ struct bnxt_re_qp_resp {
__u32 rsvd;
};
+struct bnxt_re_srq_req {
+ __aligned_u64 srqva;
+ __aligned_u64 srq_handle;
+};
+
+struct bnxt_re_srq_resp {
+ __u32 srqid;
+};
+
enum bnxt_re_shpg_offt {
BNXT_RE_BEG_RESV_OFFT = 0x00,
BNXT_RE_AVID_OFFT = 0x10,
diff --git a/include/uapi/rdma/cxgb3-abi.h b/include/uapi/rdma/cxgb3-abi.h
index d5745e43ae85..9acb4b7a6246 100644
--- a/include/uapi/rdma/cxgb3-abi.h
+++ b/include/uapi/rdma/cxgb3-abi.h
@@ -41,21 +41,21 @@
* Make sure that all structs defined in this file remain laid out so
* that they pack the same way on 32-bit and 64-bit architectures (to
* avoid incompatibility between 32-bit userspace and 64-bit kernels).
- * In particular do not use pointer types -- pass pointers in __u64
+ * In particular do not use pointer types -- pass pointers in __aligned_u64
* instead.
*/
struct iwch_create_cq_req {
- __u64 user_rptr_addr;
+ __aligned_u64 user_rptr_addr;
};
struct iwch_create_cq_resp_v0 {
- __u64 key;
+ __aligned_u64 key;
__u32 cqid;
__u32 size_log2;
};
struct iwch_create_cq_resp {
- __u64 key;
+ __aligned_u64 key;
__u32 cqid;
__u32 size_log2;
__u32 memsize;
@@ -63,8 +63,8 @@ struct iwch_create_cq_resp {
};
struct iwch_create_qp_resp {
- __u64 key;
- __u64 db_key;
+ __aligned_u64 key;
+ __aligned_u64 db_key;
__u32 qpid;
__u32 size_log2;
__u32 sq_size_log2;
@@ -74,4 +74,9 @@ struct iwch_create_qp_resp {
struct iwch_reg_user_mr_resp {
__u32 pbl_addr;
};
+
+struct iwch_alloc_pd_resp {
+ __u32 pdid;
+};
+
#endif /* CXGB3_ABI_USER_H */
diff --git a/include/uapi/rdma/cxgb4-abi.h b/include/uapi/rdma/cxgb4-abi.h
index 05f71f1bc119..1fefd0140c26 100644
--- a/include/uapi/rdma/cxgb4-abi.h
+++ b/include/uapi/rdma/cxgb4-abi.h
@@ -41,13 +41,13 @@
* Make sure that all structs defined in this file remain laid out so
* that they pack the same way on 32-bit and 64-bit architectures (to
* avoid incompatibility between 32-bit userspace and 64-bit kernels).
- * In particular do not use pointer types -- pass pointers in __u64
+ * In particular do not use pointer types -- pass pointers in __aligned_u64
* instead.
*/
struct c4iw_create_cq_resp {
- __u64 key;
- __u64 gts_key;
- __u64 memsize;
+ __aligned_u64 key;
+ __aligned_u64 gts_key;
+ __aligned_u64 memsize;
__u32 cqid;
__u32 size;
__u32 qid_mask;
@@ -59,13 +59,13 @@ enum {
};
struct c4iw_create_qp_resp {
- __u64 ma_sync_key;
- __u64 sq_key;
- __u64 rq_key;
- __u64 sq_db_gts_key;
- __u64 rq_db_gts_key;
- __u64 sq_memsize;
- __u64 rq_memsize;
+ __aligned_u64 ma_sync_key;
+ __aligned_u64 sq_key;
+ __aligned_u64 rq_key;
+ __aligned_u64 sq_db_gts_key;
+ __aligned_u64 rq_db_gts_key;
+ __aligned_u64 sq_memsize;
+ __aligned_u64 rq_memsize;
__u32 sqid;
__u32 rqid;
__u32 sq_size;
@@ -75,8 +75,13 @@ struct c4iw_create_qp_resp {
};
struct c4iw_alloc_ucontext_resp {
- __u64 status_page_key;
+ __aligned_u64 status_page_key;
__u32 status_page_size;
__u32 reserved; /* explicit padding (optional for i386) */
};
+
+struct c4iw_alloc_pd_resp {
+ __u32 pdid;
+};
+
#endif /* CXGB4_ABI_USER_H */
diff --git a/include/uapi/rdma/hfi/hfi1_ioctl.h b/include/uapi/rdma/hfi/hfi1_ioctl.h
index 9de78c5ee913..8f3d9fe7b141 100644
--- a/include/uapi/rdma/hfi/hfi1_ioctl.h
+++ b/include/uapi/rdma/hfi/hfi1_ioctl.h
@@ -79,7 +79,7 @@ struct hfi1_user_info {
};
struct hfi1_ctxt_info {
- __u64 runtime_flags; /* chip/drv runtime flags (HFI1_CAP_*) */
+ __aligned_u64 runtime_flags; /* chip/drv runtime flags (HFI1_CAP_*) */
__u32 rcvegr_size; /* size of each eager buffer */
__u16 num_active; /* number of active units */
__u16 unit; /* unit (chip) assigned to caller */
@@ -98,9 +98,9 @@ struct hfi1_ctxt_info {
struct hfi1_tid_info {
/* virtual address of first page in transfer */
- __u64 vaddr;
+ __aligned_u64 vaddr;
/* pointer to tid array. this array is big enough */
- __u64 tidlist;
+ __aligned_u64 tidlist;
/* number of tids programmed by this request */
__u32 tidcnt;
/* length of transfer buffer programmed by this request */
@@ -131,23 +131,23 @@ struct hfi1_base_info {
*/
__u32 bthqp;
/* PIO credit return address, */
- __u64 sc_credits_addr;
+ __aligned_u64 sc_credits_addr;
/*
* Base address of write-only pio buffers for this process.
* Each buffer has sendpio_credits*64 bytes.
*/
- __u64 pio_bufbase_sop;
+ __aligned_u64 pio_bufbase_sop;
/*
* Base address of write-only pio buffers for this process.
* Each buffer has sendpio_credits*64 bytes.
*/
- __u64 pio_bufbase;
+ __aligned_u64 pio_bufbase;
/* address where receive buffer queue is mapped into */
- __u64 rcvhdr_bufbase;
+ __aligned_u64 rcvhdr_bufbase;
/* base address of Eager receive buffers. */
- __u64 rcvegr_bufbase;
+ __aligned_u64 rcvegr_bufbase;
/* base address of SDMA completion ring */
- __u64 sdma_comp_bufbase;
+ __aligned_u64 sdma_comp_bufbase;
/*
* User register base for init code, not to be used directly by
* protocol or applications. Always maps real chip register space.
@@ -155,20 +155,20 @@ struct hfi1_base_info {
* ur_rcvhdrhead, ur_rcvhdrtail, ur_rcvegrhead, ur_rcvegrtail,
* ur_rcvtidflow
*/
- __u64 user_regbase;
+ __aligned_u64 user_regbase;
/* notification events */
- __u64 events_bufbase;
+ __aligned_u64 events_bufbase;
/* status page */
- __u64 status_bufbase;
+ __aligned_u64 status_bufbase;
/* rcvhdrtail update */
- __u64 rcvhdrtail_base;
+ __aligned_u64 rcvhdrtail_base;
/*
* shared memory pages for subctxts if ctxt is shared; these cover
* all the processes in the group sharing a single context.
* all have enough space for the num_subcontexts value on this job.
*/
- __u64 subctxt_uregbase;
- __u64 subctxt_rcvegrbuf;
- __u64 subctxt_rcvhdrbuf;
+ __aligned_u64 subctxt_uregbase;
+ __aligned_u64 subctxt_rcvegrbuf;
+ __aligned_u64 subctxt_rcvhdrbuf;
};
#endif /* _LINIUX__HFI1_IOCTL_H */
diff --git a/include/uapi/rdma/hfi/hfi1_user.h b/include/uapi/rdma/hfi/hfi1_user.h
index 791bea2f8297..c6a984c0c881 100644
--- a/include/uapi/rdma/hfi/hfi1_user.h
+++ b/include/uapi/rdma/hfi/hfi1_user.h
@@ -177,8 +177,8 @@ struct hfi1_sdma_comp_entry {
* Device status and notifications from driver to user-space.
*/
struct hfi1_status {
- __u64 dev; /* device/hw status bits */
- __u64 port; /* port state and status bits */
+ __aligned_u64 dev; /* device/hw status bits */
+ __aligned_u64 port; /* port state and status bits */
char freezemsg[0];
};
@@ -219,7 +219,7 @@ struct sdma_req_info {
* in charge of managing its own ring.
*/
__u16 comp_idx;
-} __packed;
+} __attribute__((__packed__));
/*
* SW KDETH header.
@@ -230,7 +230,7 @@ struct hfi1_kdeth_header {
__le16 jkey;
__le16 hcrc;
__le32 swdata[7];
-} __packed;
+} __attribute__((__packed__));
/*
* Structure describing the headers that User space uses. The
@@ -241,7 +241,7 @@ struct hfi1_pkt_header {
__be16 lrh[4];
__be32 bth[3];
struct hfi1_kdeth_header kdeth;
-} __packed;
+} __attribute__((__packed__));
/*
diff --git a/include/uapi/rdma/hns-abi.h b/include/uapi/rdma/hns-abi.h
index a9c03b0eed57..7092c8de4bd8 100644
--- a/include/uapi/rdma/hns-abi.h
+++ b/include/uapi/rdma/hns-abi.h
@@ -37,19 +37,35 @@
#include <linux/types.h>
struct hns_roce_ib_create_cq {
- __u64 buf_addr;
+ __aligned_u64 buf_addr;
+ __aligned_u64 db_addr;
+};
+
+struct hns_roce_ib_create_cq_resp {
+ __aligned_u64 cqn; /* Only 32 bits used, 64 for compat */
+ __aligned_u64 cap_flags;
};
struct hns_roce_ib_create_qp {
- __u64 buf_addr;
- __u64 db_addr;
+ __aligned_u64 buf_addr;
+ __aligned_u64 db_addr;
__u8 log_sq_bb_count;
__u8 log_sq_stride;
__u8 sq_no_prefetch;
__u8 reserved[5];
};
+struct hns_roce_ib_create_qp_resp {
+ __aligned_u64 cap_flags;
+};
+
struct hns_roce_ib_alloc_ucontext_resp {
__u32 qp_tab_size;
+ __u32 reserved;
};
+
+struct hns_roce_ib_alloc_pd_resp {
+ __u32 pdn;
+};
+
#endif /* HNS_ABI_USER_H */
diff --git a/include/uapi/rdma/i40iw-abi.h b/include/uapi/rdma/i40iw-abi.h
new file mode 100644
index 000000000000..79890baa6fdb
--- /dev/null
+++ b/include/uapi/rdma/i40iw-abi.h
@@ -0,0 +1,107 @@
+/*
+ * Copyright (c) 2006 - 2016 Intel Corporation. All rights reserved.
+ * Copyright (c) 2005 Topspin Communications. All rights reserved.
+ * Copyright (c) 2005 Cisco Systems. All rights reserved.
+ * Copyright (c) 2005 Open Grid Computing, Inc. All rights reserved.
+ *
+ * This software is available to you under a choice of one of two
+ * licenses. You may choose to be licensed under the terms of the GNU
+ * General Public License (GPL) Version 2, available from the file
+ * COPYING in the main directory of this source tree, or the
+ * OpenIB.org BSD license below:
+ *
+ * Redistribution and use in source and binary forms, with or
+ * without modification, are permitted provided that the following
+ * conditions are met:
+ *
+ * - Redistributions of source code must retain the above
+ * copyright notice, this list of conditions and the following
+ * disclaimer.
+ *
+ * - Redistributions in binary form must reproduce the above
+ * copyright notice, this list of conditions and the following
+ * disclaimer in the documentation and/or other materials
+ * provided with the distribution.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND,
+ * EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
+ * MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND
+ * NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS
+ * BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN
+ * ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN
+ * CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
+ * SOFTWARE.
+ *
+ */
+
+#ifndef I40IW_ABI_H
+#define I40IW_ABI_H
+
+#include <linux/types.h>
+
+#define I40IW_ABI_VER 5
+
+struct i40iw_alloc_ucontext_req {
+ __u32 reserved32;
+ __u8 userspace_ver;
+ __u8 reserved8[3];
+};
+
+struct i40iw_alloc_ucontext_resp {
+ __u32 max_pds; /* maximum pds allowed for this user process */
+ __u32 max_qps; /* maximum qps allowed for this user process */
+ __u32 wq_size; /* size of the WQs (sq+rq) allocated to the mmaped area */
+ __u8 kernel_ver;
+ __u8 reserved[3];
+};
+
+struct i40iw_alloc_pd_resp {
+ __u32 pd_id;
+ __u8 reserved[4];
+};
+
+struct i40iw_create_cq_req {
+ __aligned_u64 user_cq_buffer;
+ __aligned_u64 user_shadow_area;
+};
+
+struct i40iw_create_qp_req {
+ __aligned_u64 user_wqe_buffers;
+ __aligned_u64 user_compl_ctx;
+
+ /* UDA QP PHB */
+ __aligned_u64 user_sq_phb; /* place for VA of the sq phb buff */
+ __aligned_u64 user_rq_phb; /* place for VA of the rq phb buff */
+};
+
+enum i40iw_memreg_type {
+ IW_MEMREG_TYPE_MEM = 0x0000,
+ IW_MEMREG_TYPE_QP = 0x0001,
+ IW_MEMREG_TYPE_CQ = 0x0002,
+};
+
+struct i40iw_mem_reg_req {
+ __u16 reg_type; /* Memory, QP or CQ */
+ __u16 cq_pages;
+ __u16 rq_pages;
+ __u16 sq_pages;
+};
+
+struct i40iw_create_cq_resp {
+ __u32 cq_id;
+ __u32 cq_size;
+ __u32 mmap_db_index;
+ __u32 reserved;
+};
+
+struct i40iw_create_qp_resp {
+ __u32 qp_id;
+ __u32 actual_sq_size;
+ __u32 actual_rq_size;
+ __u32 i40iw_drv_opt;
+ __u16 push_idx;
+ __u8 lsmm;
+ __u8 rsvd2;
+};
+
+#endif
diff --git a/include/uapi/rdma/ib_user_cm.h b/include/uapi/rdma/ib_user_cm.h
index f4041bdc4d08..4a8f9562f7cd 100644
--- a/include/uapi/rdma/ib_user_cm.h
+++ b/include/uapi/rdma/ib_user_cm.h
@@ -73,8 +73,8 @@ struct ib_ucm_cmd_hdr {
};
struct ib_ucm_create_id {
- __u64 uid;
- __u64 response;
+ __aligned_u64 uid;
+ __aligned_u64 response;
};
struct ib_ucm_create_id_resp {
@@ -82,7 +82,7 @@ struct ib_ucm_create_id_resp {
};
struct ib_ucm_destroy_id {
- __u64 response;
+ __aligned_u64 response;
__u32 id;
__u32 reserved;
};
@@ -92,7 +92,7 @@ struct ib_ucm_destroy_id_resp {
};
struct ib_ucm_attr_id {
- __u64 response;
+ __aligned_u64 response;
__u32 id;
__u32 reserved;
};
@@ -105,7 +105,7 @@ struct ib_ucm_attr_id_resp {
};
struct ib_ucm_init_qp_attr {
- __u64 response;
+ __aligned_u64 response;
__u32 id;
__u32 qp_state;
};
@@ -123,7 +123,7 @@ struct ib_ucm_notify {
};
struct ib_ucm_private_data {
- __u64 data;
+ __aligned_u64 data;
__u32 id;
__u8 len;
__u8 reserved[3];
@@ -135,9 +135,9 @@ struct ib_ucm_req {
__u32 qp_type;
__u32 psn;
__be64 sid;
- __u64 data;
- __u64 primary_path;
- __u64 alternate_path;
+ __aligned_u64 data;
+ __aligned_u64 primary_path;
+ __aligned_u64 alternate_path;
__u8 len;
__u8 peer_to_peer;
__u8 responder_resources;
@@ -153,8 +153,8 @@ struct ib_ucm_req {
};
struct ib_ucm_rep {
- __u64 uid;
- __u64 data;
+ __aligned_u64 uid;
+ __aligned_u64 data;
__u32 id;
__u32 qpn;
__u32 psn;
@@ -172,15 +172,15 @@ struct ib_ucm_rep {
struct ib_ucm_info {
__u32 id;
__u32 status;
- __u64 info;
- __u64 data;
+ __aligned_u64 info;
+ __aligned_u64 data;
__u8 info_len;
__u8 data_len;
__u8 reserved[6];
};
struct ib_ucm_mra {
- __u64 data;
+ __aligned_u64 data;
__u32 id;
__u8 len;
__u8 timeout;
@@ -188,8 +188,8 @@ struct ib_ucm_mra {
};
struct ib_ucm_lap {
- __u64 path;
- __u64 data;
+ __aligned_u64 path;
+ __aligned_u64 data;
__u32 id;
__u8 len;
__u8 reserved[3];
@@ -199,8 +199,8 @@ struct ib_ucm_sidr_req {
__u32 id;
__u32 timeout;
__be64 sid;
- __u64 data;
- __u64 path;
+ __aligned_u64 data;
+ __aligned_u64 path;
__u16 reserved_pkey;
__u8 len;
__u8 max_cm_retries;
@@ -212,8 +212,8 @@ struct ib_ucm_sidr_rep {
__u32 qpn;
__u32 qkey;
__u32 status;
- __u64 info;
- __u64 data;
+ __aligned_u64 info;
+ __aligned_u64 data;
__u8 info_len;
__u8 data_len;
__u8 reserved[6];
@@ -222,9 +222,9 @@ struct ib_ucm_sidr_rep {
* event notification ABI structures.
*/
struct ib_ucm_event_get {
- __u64 response;
- __u64 data;
- __u64 info;
+ __aligned_u64 response;
+ __aligned_u64 data;
+ __aligned_u64 info;
__u8 data_len;
__u8 info_len;
__u8 reserved[6];
@@ -303,7 +303,7 @@ struct ib_ucm_sidr_rep_event_resp {
#define IB_UCM_PRES_ALTERNATE 0x08
struct ib_ucm_event_resp {
- __u64 uid;
+ __aligned_u64 uid;
__u32 id;
__u32 event;
__u32 present;
diff --git a/include/uapi/rdma/ib_user_ioctl_cmds.h b/include/uapi/rdma/ib_user_ioctl_cmds.h
new file mode 100644
index 000000000000..83e3890eef20
--- /dev/null
+++ b/include/uapi/rdma/ib_user_ioctl_cmds.h
@@ -0,0 +1,134 @@
+/*
+ * Copyright (c) 2018, Mellanox Technologies inc. All rights reserved.
+ *
+ * This software is available to you under a choice of one of two
+ * licenses. You may choose to be licensed under the terms of the GNU
+ * General Public License (GPL) Version 2, available from the file
+ * COPYING in the main directory of this source tree, or the
+ * OpenIB.org BSD license below:
+ *
+ * Redistribution and use in source and binary forms, with or
+ * without modification, are permitted provided that the following
+ * conditions are met:
+ *
+ * - Redistributions of source code must retain the above
+ * copyright notice, this list of conditions and the following
+ * disclaimer.
+ *
+ * - Redistributions in binary form must reproduce the above
+ * copyright notice, this list of conditions and the following
+ * disclaimer in the documentation and/or other materials
+ * provided with the distribution.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND,
+ * EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
+ * MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND
+ * NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS
+ * BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN
+ * ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN
+ * CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
+ * SOFTWARE.
+ */
+
+#ifndef IB_USER_IOCTL_CMDS_H
+#define IB_USER_IOCTL_CMDS_H
+
+#define UVERBS_ID_NS_MASK 0xF000
+#define UVERBS_ID_NS_SHIFT 12
+
+#define UVERBS_UDATA_DRIVER_DATA_NS 1
+#define UVERBS_UDATA_DRIVER_DATA_FLAG (1UL << UVERBS_ID_NS_SHIFT)
+
+enum uverbs_default_objects {
+ UVERBS_OBJECT_DEVICE, /* No instances of DEVICE are allowed */
+ UVERBS_OBJECT_PD,
+ UVERBS_OBJECT_COMP_CHANNEL,
+ UVERBS_OBJECT_CQ,
+ UVERBS_OBJECT_QP,
+ UVERBS_OBJECT_SRQ,
+ UVERBS_OBJECT_AH,
+ UVERBS_OBJECT_MR,
+ UVERBS_OBJECT_MW,
+ UVERBS_OBJECT_FLOW,
+ UVERBS_OBJECT_XRCD,
+ UVERBS_OBJECT_RWQ_IND_TBL,
+ UVERBS_OBJECT_WQ,
+ UVERBS_OBJECT_FLOW_ACTION,
+ UVERBS_OBJECT_DM,
+};
+
+enum {
+ UVERBS_ATTR_UHW_IN = UVERBS_UDATA_DRIVER_DATA_FLAG,
+ UVERBS_ATTR_UHW_OUT,
+};
+
+enum uverbs_attrs_create_cq_cmd_attr_ids {
+ UVERBS_ATTR_CREATE_CQ_HANDLE,
+ UVERBS_ATTR_CREATE_CQ_CQE,
+ UVERBS_ATTR_CREATE_CQ_USER_HANDLE,
+ UVERBS_ATTR_CREATE_CQ_COMP_CHANNEL,
+ UVERBS_ATTR_CREATE_CQ_COMP_VECTOR,
+ UVERBS_ATTR_CREATE_CQ_FLAGS,
+ UVERBS_ATTR_CREATE_CQ_RESP_CQE,
+};
+
+enum uverbs_attrs_destroy_cq_cmd_attr_ids {
+ UVERBS_ATTR_DESTROY_CQ_HANDLE,
+ UVERBS_ATTR_DESTROY_CQ_RESP,
+};
+
+enum uverbs_attrs_create_flow_action_esp {
+ UVERBS_ATTR_FLOW_ACTION_ESP_HANDLE,
+ UVERBS_ATTR_FLOW_ACTION_ESP_ATTRS,
+ UVERBS_ATTR_FLOW_ACTION_ESP_ESN,
+ UVERBS_ATTR_FLOW_ACTION_ESP_KEYMAT,
+ UVERBS_ATTR_FLOW_ACTION_ESP_REPLAY,
+ UVERBS_ATTR_FLOW_ACTION_ESP_ENCAP,
+};
+
+enum uverbs_attrs_destroy_flow_action_esp {
+ UVERBS_ATTR_DESTROY_FLOW_ACTION_HANDLE,
+};
+
+enum uverbs_methods_cq {
+ UVERBS_METHOD_CQ_CREATE,
+ UVERBS_METHOD_CQ_DESTROY,
+};
+
+enum uverbs_methods_actions_flow_action_ops {
+ UVERBS_METHOD_FLOW_ACTION_ESP_CREATE,
+ UVERBS_METHOD_FLOW_ACTION_DESTROY,
+ UVERBS_METHOD_FLOW_ACTION_ESP_MODIFY,
+};
+
+enum uverbs_attrs_alloc_dm_cmd_attr_ids {
+ UVERBS_ATTR_ALLOC_DM_HANDLE,
+ UVERBS_ATTR_ALLOC_DM_LENGTH,
+ UVERBS_ATTR_ALLOC_DM_ALIGNMENT,
+};
+
+enum uverbs_attrs_free_dm_cmd_attr_ids {
+ UVERBS_ATTR_FREE_DM_HANDLE,
+};
+
+enum uverbs_methods_dm {
+ UVERBS_METHOD_DM_ALLOC,
+ UVERBS_METHOD_DM_FREE,
+};
+
+enum uverbs_attrs_reg_dm_mr_cmd_attr_ids {
+ UVERBS_ATTR_REG_DM_MR_HANDLE,
+ UVERBS_ATTR_REG_DM_MR_OFFSET,
+ UVERBS_ATTR_REG_DM_MR_LENGTH,
+ UVERBS_ATTR_REG_DM_MR_PD_HANDLE,
+ UVERBS_ATTR_REG_DM_MR_ACCESS_FLAGS,
+ UVERBS_ATTR_REG_DM_MR_DM_HANDLE,
+ UVERBS_ATTR_REG_DM_MR_RESP_LKEY,
+ UVERBS_ATTR_REG_DM_MR_RESP_RKEY,
+};
+
+enum uverbs_methods_mr {
+ UVERBS_METHOD_DM_MR_REG,
+};
+
+#endif
diff --git a/include/uapi/rdma/ib_user_ioctl_verbs.h b/include/uapi/rdma/ib_user_ioctl_verbs.h
index 842792eae383..04e46ea517d3 100644
--- a/include/uapi/rdma/ib_user_ioctl_verbs.h
+++ b/include/uapi/rdma/ib_user_ioctl_verbs.h
@@ -1,5 +1,6 @@
+/* SPDX-License-Identifier: ((GPL-2.0 WITH Linux-syscall-note) OR BSD-2-Clause) */
/*
- * Copyright (c) 2017, Mellanox Technologies inc. All rights reserved.
+ * Copyright (c) 2017-2018, Mellanox Technologies inc. All rights reserved.
*
* This software is available to you under a choice of one of two
* licenses. You may choose to be licensed under the terms of the GNU
@@ -33,52 +34,69 @@
#ifndef IB_USER_IOCTL_VERBS_H
#define IB_USER_IOCTL_VERBS_H
-#include <rdma/rdma_user_ioctl.h>
-
-#define UVERBS_UDATA_DRIVER_DATA_NS 1
-#define UVERBS_UDATA_DRIVER_DATA_FLAG (1UL << UVERBS_ID_NS_SHIFT)
-
-enum uverbs_default_objects {
- UVERBS_OBJECT_DEVICE, /* No instances of DEVICE are allowed */
- UVERBS_OBJECT_PD,
- UVERBS_OBJECT_COMP_CHANNEL,
- UVERBS_OBJECT_CQ,
- UVERBS_OBJECT_QP,
- UVERBS_OBJECT_SRQ,
- UVERBS_OBJECT_AH,
- UVERBS_OBJECT_MR,
- UVERBS_OBJECT_MW,
- UVERBS_OBJECT_FLOW,
- UVERBS_OBJECT_XRCD,
- UVERBS_OBJECT_RWQ_IND_TBL,
- UVERBS_OBJECT_WQ,
- UVERBS_OBJECT_LAST,
+#include <linux/types.h>
+
+#ifndef RDMA_UAPI_PTR
+#define RDMA_UAPI_PTR(_type, _name) __aligned_u64 _name
+#endif
+
+enum ib_uverbs_flow_action_esp_keymat {
+ IB_UVERBS_FLOW_ACTION_ESP_KEYMAT_AES_GCM,
};
-enum {
- UVERBS_UHW_IN = UVERBS_UDATA_DRIVER_DATA_FLAG,
- UVERBS_UHW_OUT,
+enum ib_uverbs_flow_action_esp_keymat_aes_gcm_iv_algo {
+ IB_UVERBS_FLOW_ACTION_IV_ALGO_SEQ,
};
-enum uverbs_create_cq_cmd_attr_ids {
- CREATE_CQ_HANDLE,
- CREATE_CQ_CQE,
- CREATE_CQ_USER_HANDLE,
- CREATE_CQ_COMP_CHANNEL,
- CREATE_CQ_COMP_VECTOR,
- CREATE_CQ_FLAGS,
- CREATE_CQ_RESP_CQE,
+struct ib_uverbs_flow_action_esp_keymat_aes_gcm {
+ __aligned_u64 iv;
+ __u32 iv_algo; /* Use enum ib_uverbs_flow_action_esp_keymat_aes_gcm_iv_algo */
+
+ __u32 salt;
+ __u32 icv_len;
+
+ __u32 key_len;
+ __u32 aes_key[256 / 32];
};
-enum uverbs_destroy_cq_cmd_attr_ids {
- DESTROY_CQ_HANDLE,
- DESTROY_CQ_RESP,
+enum ib_uverbs_flow_action_esp_replay {
+ IB_UVERBS_FLOW_ACTION_ESP_REPLAY_NONE,
+ IB_UVERBS_FLOW_ACTION_ESP_REPLAY_BMP,
};
-enum uverbs_actions_cq_ops {
- UVERBS_CQ_CREATE,
- UVERBS_CQ_DESTROY,
+struct ib_uverbs_flow_action_esp_replay_bmp {
+ __u32 size;
};
-#endif
+enum ib_uverbs_flow_action_esp_flags {
+ IB_UVERBS_FLOW_ACTION_ESP_FLAGS_INLINE_CRYPTO = 0UL << 0, /* Default */
+ IB_UVERBS_FLOW_ACTION_ESP_FLAGS_FULL_OFFLOAD = 1UL << 0,
+
+ IB_UVERBS_FLOW_ACTION_ESP_FLAGS_TUNNEL = 0UL << 1, /* Default */
+ IB_UVERBS_FLOW_ACTION_ESP_FLAGS_TRANSPORT = 1UL << 1,
+
+ IB_UVERBS_FLOW_ACTION_ESP_FLAGS_DECRYPT = 0UL << 2, /* Default */
+ IB_UVERBS_FLOW_ACTION_ESP_FLAGS_ENCRYPT = 1UL << 2,
+ IB_UVERBS_FLOW_ACTION_ESP_FLAGS_ESN_NEW_WINDOW = 1UL << 3,
+};
+
+struct ib_uverbs_flow_action_esp_encap {
+ /* This struct represents a list of pointers to flow_xxxx_filter that
+ * encapsulates the payload in ESP tunnel mode.
+ */
+ RDMA_UAPI_PTR(void *, val_ptr); /* pointer to a flow_xxxx_filter */
+ RDMA_UAPI_PTR(struct ib_uverbs_flow_action_esp_encap *, next_ptr);
+ __u16 len; /* Len of the filter struct val_ptr points to */
+ __u16 type; /* Use flow_spec_type enum */
+};
+
+struct ib_uverbs_flow_action_esp {
+ __u32 spi;
+ __u32 seq;
+ __u32 tfc_pad;
+ __u32 flags;
+ __aligned_u64 hard_limit_pkts;
+};
+
+#endif
diff --git a/include/uapi/rdma/ib_user_mad.h b/include/uapi/rdma/ib_user_mad.h
index 330a3c5f1aa8..ef92118dad97 100644
--- a/include/uapi/rdma/ib_user_mad.h
+++ b/include/uapi/rdma/ib_user_mad.h
@@ -143,7 +143,7 @@ struct ib_user_mad_hdr {
*/
struct ib_user_mad {
struct ib_user_mad_hdr hdr;
- __u64 data[0];
+ __aligned_u64 data[0];
};
/*
@@ -225,7 +225,7 @@ struct ib_user_mad_reg_req2 {
__u8 mgmt_class_version;
__u16 res;
__u32 flags;
- __u64 method_mask[2];
+ __aligned_u64 method_mask[2];
__u32 oui;
__u8 rmpp_version;
__u8 reserved[3];
diff --git a/include/uapi/rdma/ib_user_verbs.h b/include/uapi/rdma/ib_user_verbs.h
index 7e11bb8651b6..9be07394fdbe 100644
--- a/include/uapi/rdma/ib_user_verbs.h
+++ b/include/uapi/rdma/ib_user_verbs.h
@@ -117,13 +117,13 @@ enum {
*/
struct ib_uverbs_async_event_desc {
- __u64 element;
+ __aligned_u64 element;
__u32 event_type; /* enum ib_event_type */
__u32 reserved;
};
struct ib_uverbs_comp_event_desc {
- __u64 cq_handle;
+ __aligned_u64 cq_handle;
};
struct ib_uverbs_cq_moderation_caps {
@@ -141,10 +141,7 @@ struct ib_uverbs_cq_moderation_caps {
*/
#define IB_USER_VERBS_CMD_COMMAND_MASK 0xff
-#define IB_USER_VERBS_CMD_FLAGS_MASK 0xff000000u
-#define IB_USER_VERBS_CMD_FLAGS_SHIFT 24
-
-#define IB_USER_VERBS_CMD_FLAG_EXTENDED 0x80
+#define IB_USER_VERBS_CMD_FLAG_EXTENDED 0x80000000u
struct ib_uverbs_cmd_hdr {
__u32 command;
@@ -153,15 +150,15 @@ struct ib_uverbs_cmd_hdr {
};
struct ib_uverbs_ex_cmd_hdr {
- __u64 response;
+ __aligned_u64 response;
__u16 provider_in_words;
__u16 provider_out_words;
__u32 cmd_hdr_reserved;
};
struct ib_uverbs_get_context {
- __u64 response;
- __u64 driver_data[0];
+ __aligned_u64 response;
+ __aligned_u64 driver_data[0];
};
struct ib_uverbs_get_context_resp {
@@ -170,16 +167,16 @@ struct ib_uverbs_get_context_resp {
};
struct ib_uverbs_query_device {
- __u64 response;
- __u64 driver_data[0];
+ __aligned_u64 response;
+ __aligned_u64 driver_data[0];
};
struct ib_uverbs_query_device_resp {
- __u64 fw_ver;
+ __aligned_u64 fw_ver;
__be64 node_guid;
__be64 sys_image_guid;
- __u64 max_mr_size;
- __u64 page_size_cap;
+ __aligned_u64 max_mr_size;
+ __aligned_u64 page_size_cap;
__u32 vendor_id;
__u32 vendor_part_id;
__u32 hw_ver;
@@ -224,7 +221,7 @@ struct ib_uverbs_ex_query_device {
};
struct ib_uverbs_odp_caps {
- __u64 general_caps;
+ __aligned_u64 general_caps;
struct {
__u32 rc_odp_caps;
__u32 uc_odp_caps;
@@ -263,21 +260,22 @@ struct ib_uverbs_ex_query_device_resp {
__u32 comp_mask;
__u32 response_length;
struct ib_uverbs_odp_caps odp_caps;
- __u64 timestamp_mask;
- __u64 hca_core_clock; /* in KHZ */
- __u64 device_cap_flags_ex;
+ __aligned_u64 timestamp_mask;
+ __aligned_u64 hca_core_clock; /* in KHZ */
+ __aligned_u64 device_cap_flags_ex;
struct ib_uverbs_rss_caps rss_caps;
__u32 max_wq_type_rq;
__u32 raw_packet_caps;
struct ib_uverbs_tm_caps tm_caps;
struct ib_uverbs_cq_moderation_caps cq_moderation_caps;
+ __aligned_u64 max_dm_size;
};
struct ib_uverbs_query_port {
- __u64 response;
+ __aligned_u64 response;
__u8 port_num;
__u8 reserved[7];
- __u64 driver_data[0];
+ __aligned_u64 driver_data[0];
};
struct ib_uverbs_query_port_resp {
@@ -305,8 +303,8 @@ struct ib_uverbs_query_port_resp {
};
struct ib_uverbs_alloc_pd {
- __u64 response;
- __u64 driver_data[0];
+ __aligned_u64 response;
+ __aligned_u64 driver_data[0];
};
struct ib_uverbs_alloc_pd_resp {
@@ -318,10 +316,10 @@ struct ib_uverbs_dealloc_pd {
};
struct ib_uverbs_open_xrcd {
- __u64 response;
+ __aligned_u64 response;
__u32 fd;
__u32 oflags;
- __u64 driver_data[0];
+ __aligned_u64 driver_data[0];
};
struct ib_uverbs_open_xrcd_resp {
@@ -333,13 +331,13 @@ struct ib_uverbs_close_xrcd {
};
struct ib_uverbs_reg_mr {
- __u64 response;
- __u64 start;
- __u64 length;
- __u64 hca_va;
+ __aligned_u64 response;
+ __aligned_u64 start;
+ __aligned_u64 length;
+ __aligned_u64 hca_va;
__u32 pd_handle;
__u32 access_flags;
- __u64 driver_data[0];
+ __aligned_u64 driver_data[0];
};
struct ib_uverbs_reg_mr_resp {
@@ -349,12 +347,12 @@ struct ib_uverbs_reg_mr_resp {
};
struct ib_uverbs_rereg_mr {
- __u64 response;
+ __aligned_u64 response;
__u32 mr_handle;
__u32 flags;
- __u64 start;
- __u64 length;
- __u64 hca_va;
+ __aligned_u64 start;
+ __aligned_u64 length;
+ __aligned_u64 hca_va;
__u32 pd_handle;
__u32 access_flags;
};
@@ -369,7 +367,7 @@ struct ib_uverbs_dereg_mr {
};
struct ib_uverbs_alloc_mw {
- __u64 response;
+ __aligned_u64 response;
__u32 pd_handle;
__u8 mw_type;
__u8 reserved[3];
@@ -385,7 +383,7 @@ struct ib_uverbs_dealloc_mw {
};
struct ib_uverbs_create_comp_channel {
- __u64 response;
+ __aligned_u64 response;
};
struct ib_uverbs_create_comp_channel_resp {
@@ -393,22 +391,27 @@ struct ib_uverbs_create_comp_channel_resp {
};
struct ib_uverbs_create_cq {
- __u64 response;
- __u64 user_handle;
+ __aligned_u64 response;
+ __aligned_u64 user_handle;
__u32 cqe;
__u32 comp_vector;
__s32 comp_channel;
__u32 reserved;
- __u64 driver_data[0];
+ __aligned_u64 driver_data[0];
+};
+
+enum ib_uverbs_ex_create_cq_flags {
+ IB_UVERBS_CQ_FLAGS_TIMESTAMP_COMPLETION = 1 << 0,
+ IB_UVERBS_CQ_FLAGS_IGNORE_OVERRUN = 1 << 1,
};
struct ib_uverbs_ex_create_cq {
- __u64 user_handle;
+ __aligned_u64 user_handle;
__u32 cqe;
__u32 comp_vector;
__s32 comp_channel;
__u32 comp_mask;
- __u32 flags;
+ __u32 flags; /* bitmask of ib_uverbs_ex_create_cq_flags */
__u32 reserved;
};
@@ -424,32 +427,32 @@ struct ib_uverbs_ex_create_cq_resp {
};
struct ib_uverbs_resize_cq {
- __u64 response;
+ __aligned_u64 response;
__u32 cq_handle;
__u32 cqe;
- __u64 driver_data[0];
+ __aligned_u64 driver_data[0];
};
struct ib_uverbs_resize_cq_resp {
__u32 cqe;
__u32 reserved;
- __u64 driver_data[0];
+ __aligned_u64 driver_data[0];
};
struct ib_uverbs_poll_cq {
- __u64 response;
+ __aligned_u64 response;
__u32 cq_handle;
__u32 ne;
};
struct ib_uverbs_wc {
- __u64 wr_id;
+ __aligned_u64 wr_id;
__u32 status;
__u32 opcode;
__u32 vendor_err;
__u32 byte_len;
union {
- __u32 imm_data;
+ __be32 imm_data;
__u32 invalidate_rkey;
} ex;
__u32 qp_num;
@@ -475,7 +478,7 @@ struct ib_uverbs_req_notify_cq {
};
struct ib_uverbs_destroy_cq {
- __u64 response;
+ __aligned_u64 response;
__u32 cq_handle;
__u32 reserved;
};
@@ -544,8 +547,8 @@ struct ib_uverbs_qp_attr {
};
struct ib_uverbs_create_qp {
- __u64 response;
- __u64 user_handle;
+ __aligned_u64 response;
+ __aligned_u64 user_handle;
__u32 pd_handle;
__u32 send_cq_handle;
__u32 recv_cq_handle;
@@ -559,7 +562,7 @@ struct ib_uverbs_create_qp {
__u8 qp_type;
__u8 is_srq;
__u8 reserved;
- __u64 driver_data[0];
+ __aligned_u64 driver_data[0];
};
enum ib_uverbs_create_qp_mask {
@@ -585,7 +588,7 @@ enum {
};
struct ib_uverbs_ex_create_qp {
- __u64 user_handle;
+ __aligned_u64 user_handle;
__u32 pd_handle;
__u32 send_cq_handle;
__u32 recv_cq_handle;
@@ -606,13 +609,13 @@ struct ib_uverbs_ex_create_qp {
};
struct ib_uverbs_open_qp {
- __u64 response;
- __u64 user_handle;
+ __aligned_u64 response;
+ __aligned_u64 user_handle;
__u32 pd_handle;
__u32 qpn;
__u8 qp_type;
__u8 reserved[7];
- __u64 driver_data[0];
+ __aligned_u64 driver_data[0];
};
/* also used for open response */
@@ -653,10 +656,10 @@ struct ib_uverbs_qp_dest {
};
struct ib_uverbs_query_qp {
- __u64 response;
+ __aligned_u64 response;
__u32 qp_handle;
__u32 attr_mask;
- __u64 driver_data[0];
+ __aligned_u64 driver_data[0];
};
struct ib_uverbs_query_qp_resp {
@@ -690,7 +693,7 @@ struct ib_uverbs_query_qp_resp {
__u8 alt_timeout;
__u8 sq_sig_all;
__u8 reserved[5];
- __u64 driver_data[0];
+ __aligned_u64 driver_data[0];
};
struct ib_uverbs_modify_qp {
@@ -720,7 +723,7 @@ struct ib_uverbs_modify_qp {
__u8 alt_port_num;
__u8 alt_timeout;
__u8 reserved[2];
- __u64 driver_data[0];
+ __aligned_u64 driver_data[0];
};
struct ib_uverbs_ex_modify_qp {
@@ -738,7 +741,7 @@ struct ib_uverbs_ex_modify_qp_resp {
};
struct ib_uverbs_destroy_qp {
- __u64 response;
+ __aligned_u64 response;
__u32 qp_handle;
__u32 reserved;
};
@@ -754,30 +757,30 @@ struct ib_uverbs_destroy_qp_resp {
* document the ABI.
*/
struct ib_uverbs_sge {
- __u64 addr;
+ __aligned_u64 addr;
__u32 length;
__u32 lkey;
};
struct ib_uverbs_send_wr {
- __u64 wr_id;
+ __aligned_u64 wr_id;
__u32 num_sge;
__u32 opcode;
__u32 send_flags;
union {
- __u32 imm_data;
+ __be32 imm_data;
__u32 invalidate_rkey;
} ex;
union {
struct {
- __u64 remote_addr;
+ __aligned_u64 remote_addr;
__u32 rkey;
__u32 reserved;
} rdma;
struct {
- __u64 remote_addr;
- __u64 compare_add;
- __u64 swap;
+ __aligned_u64 remote_addr;
+ __aligned_u64 compare_add;
+ __aligned_u64 swap;
__u32 rkey;
__u32 reserved;
} atomic;
@@ -791,7 +794,7 @@ struct ib_uverbs_send_wr {
};
struct ib_uverbs_post_send {
- __u64 response;
+ __aligned_u64 response;
__u32 qp_handle;
__u32 wr_count;
__u32 sge_count;
@@ -804,13 +807,13 @@ struct ib_uverbs_post_send_resp {
};
struct ib_uverbs_recv_wr {
- __u64 wr_id;
+ __aligned_u64 wr_id;
__u32 num_sge;
__u32 reserved;
};
struct ib_uverbs_post_recv {
- __u64 response;
+ __aligned_u64 response;
__u32 qp_handle;
__u32 wr_count;
__u32 sge_count;
@@ -823,7 +826,7 @@ struct ib_uverbs_post_recv_resp {
};
struct ib_uverbs_post_srq_recv {
- __u64 response;
+ __aligned_u64 response;
__u32 srq_handle;
__u32 wr_count;
__u32 sge_count;
@@ -836,8 +839,8 @@ struct ib_uverbs_post_srq_recv_resp {
};
struct ib_uverbs_create_ah {
- __u64 response;
- __u64 user_handle;
+ __aligned_u64 response;
+ __aligned_u64 user_handle;
__u32 pd_handle;
__u32 reserved;
struct ib_uverbs_ah_attr attr;
@@ -856,7 +859,7 @@ struct ib_uverbs_attach_mcast {
__u32 qp_handle;
__u16 mlid;
__u16 reserved;
- __u64 driver_data[0];
+ __aligned_u64 driver_data[0];
};
struct ib_uverbs_detach_mcast {
@@ -864,7 +867,7 @@ struct ib_uverbs_detach_mcast {
__u32 qp_handle;
__u16 mlid;
__u16 reserved;
- __u64 driver_data[0];
+ __aligned_u64 driver_data[0];
};
struct ib_uverbs_flow_spec_hdr {
@@ -872,7 +875,7 @@ struct ib_uverbs_flow_spec_hdr {
__u16 size;
__u16 reserved;
/* followed by flow_spec */
- __u64 flow_spec_data[0];
+ __aligned_u64 flow_spec_data[0];
};
struct ib_uverbs_flow_eth_filter {
@@ -982,6 +985,19 @@ struct ib_uverbs_flow_spec_action_drop {
};
};
+struct ib_uverbs_flow_spec_action_handle {
+ union {
+ struct ib_uverbs_flow_spec_hdr hdr;
+ struct {
+ __u32 type;
+ __u16 size;
+ __u16 reserved;
+ };
+ };
+ __u32 handle;
+ __u32 reserved1;
+};
+
struct ib_uverbs_flow_tunnel_filter {
__be32 tunnel_id;
};
@@ -999,6 +1015,24 @@ struct ib_uverbs_flow_spec_tunnel {
struct ib_uverbs_flow_tunnel_filter mask;
};
+struct ib_uverbs_flow_spec_esp_filter {
+ __u32 spi;
+ __u32 seq;
+};
+
+struct ib_uverbs_flow_spec_esp {
+ union {
+ struct ib_uverbs_flow_spec_hdr hdr;
+ struct {
+ __u32 type;
+ __u16 size;
+ __u16 reserved;
+ };
+ };
+ struct ib_uverbs_flow_spec_esp_filter val;
+ struct ib_uverbs_flow_spec_esp_filter mask;
+};
+
struct ib_uverbs_flow_attr {
__u32 type;
__u16 size;
@@ -1031,18 +1065,18 @@ struct ib_uverbs_destroy_flow {
};
struct ib_uverbs_create_srq {
- __u64 response;
- __u64 user_handle;
+ __aligned_u64 response;
+ __aligned_u64 user_handle;
__u32 pd_handle;
__u32 max_wr;
__u32 max_sge;
__u32 srq_limit;
- __u64 driver_data[0];
+ __aligned_u64 driver_data[0];
};
struct ib_uverbs_create_xsrq {
- __u64 response;
- __u64 user_handle;
+ __aligned_u64 response;
+ __aligned_u64 user_handle;
__u32 srq_type;
__u32 pd_handle;
__u32 max_wr;
@@ -1051,7 +1085,7 @@ struct ib_uverbs_create_xsrq {
__u32 max_num_tags;
__u32 xrcd_handle;
__u32 cq_handle;
- __u64 driver_data[0];
+ __aligned_u64 driver_data[0];
};
struct ib_uverbs_create_srq_resp {
@@ -1066,14 +1100,14 @@ struct ib_uverbs_modify_srq {
__u32 attr_mask;
__u32 max_wr;
__u32 srq_limit;
- __u64 driver_data[0];
+ __aligned_u64 driver_data[0];
};
struct ib_uverbs_query_srq {
- __u64 response;
+ __aligned_u64 response;
__u32 srq_handle;
__u32 reserved;
- __u64 driver_data[0];
+ __aligned_u64 driver_data[0];
};
struct ib_uverbs_query_srq_resp {
@@ -1084,7 +1118,7 @@ struct ib_uverbs_query_srq_resp {
};
struct ib_uverbs_destroy_srq {
- __u64 response;
+ __aligned_u64 response;
__u32 srq_handle;
__u32 reserved;
};
@@ -1096,7 +1130,7 @@ struct ib_uverbs_destroy_srq_resp {
struct ib_uverbs_ex_create_wq {
__u32 comp_mask;
__u32 wq_type;
- __u64 user_handle;
+ __aligned_u64 user_handle;
__u32 pd_handle;
__u32 cq_handle;
__u32 max_wr;
diff --git a/include/uapi/rdma/mlx4-abi.h b/include/uapi/rdma/mlx4-abi.h
index 224b52b6279c..04f64bc4045f 100644
--- a/include/uapi/rdma/mlx4-abi.h
+++ b/include/uapi/rdma/mlx4-abi.h
@@ -59,6 +59,10 @@ struct mlx4_ib_alloc_ucontext_resp_v3 {
__u16 bf_regs_per_page;
};
+enum {
+ MLX4_USER_DEV_CAP_LARGE_CQE = 1L << 0,
+};
+
struct mlx4_ib_alloc_ucontext_resp {
__u32 dev_caps;
__u32 qp_tab_size;
@@ -73,8 +77,8 @@ struct mlx4_ib_alloc_pd_resp {
};
struct mlx4_ib_create_cq {
- __u64 buf_addr;
- __u64 db_addr;
+ __aligned_u64 buf_addr;
+ __aligned_u64 db_addr;
};
struct mlx4_ib_create_cq_resp {
@@ -83,12 +87,12 @@ struct mlx4_ib_create_cq_resp {
};
struct mlx4_ib_resize_cq {
- __u64 buf_addr;
+ __aligned_u64 buf_addr;
};
struct mlx4_ib_create_srq {
- __u64 buf_addr;
- __u64 db_addr;
+ __aligned_u64 buf_addr;
+ __aligned_u64 db_addr;
};
struct mlx4_ib_create_srq_resp {
@@ -97,8 +101,8 @@ struct mlx4_ib_create_srq_resp {
};
struct mlx4_ib_create_qp_rss {
- __u64 rx_hash_fields_mask;
- __u8 rx_hash_function;
+ __aligned_u64 rx_hash_fields_mask; /* Use enum mlx4_ib_rx_hash_fields */
+ __u8 rx_hash_function; /* Use enum mlx4_ib_rx_hash_function_flags */
__u8 reserved[7];
__u8 rx_hash_key[40];
__u32 comp_mask;
@@ -106,8 +110,8 @@ struct mlx4_ib_create_qp_rss {
};
struct mlx4_ib_create_qp {
- __u64 buf_addr;
- __u64 db_addr;
+ __aligned_u64 buf_addr;
+ __aligned_u64 db_addr;
__u8 log_sq_bb_count;
__u8 log_sq_stride;
__u8 sq_no_prefetch;
@@ -116,8 +120,8 @@ struct mlx4_ib_create_qp {
};
struct mlx4_ib_create_wq {
- __u64 buf_addr;
- __u64 db_addr;
+ __aligned_u64 buf_addr;
+ __aligned_u64 db_addr;
__u8 log_range_size;
__u8 reserved[3];
__u32 comp_mask;
@@ -152,7 +156,36 @@ enum mlx4_ib_rx_hash_fields {
MLX4_IB_RX_HASH_SRC_PORT_TCP = 1 << 4,
MLX4_IB_RX_HASH_DST_PORT_TCP = 1 << 5,
MLX4_IB_RX_HASH_SRC_PORT_UDP = 1 << 6,
- MLX4_IB_RX_HASH_DST_PORT_UDP = 1 << 7
+ MLX4_IB_RX_HASH_DST_PORT_UDP = 1 << 7,
+ MLX4_IB_RX_HASH_INNER = 1ULL << 31,
+};
+
+struct mlx4_ib_rss_caps {
+ __aligned_u64 rx_hash_fields_mask; /* enum mlx4_ib_rx_hash_fields */
+ __u8 rx_hash_function; /* enum mlx4_ib_rx_hash_function_flags */
+ __u8 reserved[7];
+};
+
+enum query_device_resp_mask {
+ MLX4_IB_QUERY_DEV_RESP_MASK_CORE_CLOCK_OFFSET = 1UL << 0,
+};
+
+struct mlx4_ib_tso_caps {
+ __u32 max_tso; /* Maximum tso payload size in bytes */
+ /* Corresponding bit will be set if qp type from
+ * 'enum ib_qp_type' is supported.
+ */
+ __u32 supported_qpts;
+};
+
+struct mlx4_uverbs_ex_query_device_resp {
+ __u32 comp_mask;
+ __u32 response_length;
+ __aligned_u64 hca_core_clock_offset;
+ __u32 max_inl_recv_sz;
+ __u32 reserved;
+ struct mlx4_ib_rss_caps rss_caps;
+ struct mlx4_ib_tso_caps tso_caps;
};
#endif /* MLX4_ABI_USER_H */
diff --git a/include/uapi/rdma/mlx5-abi.h b/include/uapi/rdma/mlx5-abi.h
index a33e0517d3fd..cb4a02c4a1ce 100644
--- a/include/uapi/rdma/mlx5-abi.h
+++ b/include/uapi/rdma/mlx5-abi.h
@@ -41,6 +41,9 @@ enum {
MLX5_QP_FLAG_SIGNATURE = 1 << 0,
MLX5_QP_FLAG_SCATTER_CQE = 1 << 1,
MLX5_QP_FLAG_TUNNEL_OFFLOADS = 1 << 2,
+ MLX5_QP_FLAG_BFREG_INDEX = 1 << 3,
+ MLX5_QP_FLAG_TYPE_DCT = 1 << 4,
+ MLX5_QP_FLAG_TYPE_DCI = 1 << 5,
};
enum {
@@ -81,7 +84,7 @@ struct mlx5_ib_alloc_ucontext_req_v2 {
__u8 reserved0;
__u16 reserved1;
__u32 reserved2;
- __u64 lib_caps;
+ __aligned_u64 lib_caps;
};
enum mlx5_ib_alloc_ucontext_resp_mask {
@@ -104,6 +107,14 @@ enum mlx5_user_inline_mode {
MLX5_USER_INLINE_MODE_TCP_UDP,
};
+enum {
+ MLX5_USER_ALLOC_UCONTEXT_FLOW_ACTION_FLAGS_ESP_AES_GCM = 1 << 0,
+ MLX5_USER_ALLOC_UCONTEXT_FLOW_ACTION_FLAGS_ESP_AES_GCM_REQ_METADATA = 1 << 1,
+ MLX5_USER_ALLOC_UCONTEXT_FLOW_ACTION_FLAGS_ESP_AES_GCM_SPI_STEERING = 1 << 2,
+ MLX5_USER_ALLOC_UCONTEXT_FLOW_ACTION_FLAGS_ESP_AES_GCM_FULL_OFFLOAD = 1 << 3,
+ MLX5_USER_ALLOC_UCONTEXT_FLOW_ACTION_FLAGS_ESP_AES_GCM_TX_IV_IS_ESN = 1 << 4,
+};
+
struct mlx5_ib_alloc_ucontext_resp {
__u32 qp_tab_size;
__u32 bf_reg_size;
@@ -115,16 +126,18 @@ struct mlx5_ib_alloc_ucontext_resp {
__u32 max_recv_wr;
__u32 max_srq_recv_wr;
__u16 num_ports;
- __u16 reserved1;
+ __u16 flow_action_flags;
__u32 comp_mask;
__u32 response_length;
__u8 cqe_version;
__u8 cmds_supp_uhw;
__u8 eth_min_inline;
- __u8 reserved2;
- __u64 hca_core_clock_offset;
+ __u8 clock_info_versions;
+ __aligned_u64 hca_core_clock_offset;
__u32 log_uar_size;
__u32 num_uars_per_page;
+ __u32 num_dyn_bfregs;
+ __u32 reserved3;
};
struct mlx5_ib_alloc_pd_resp {
@@ -142,7 +155,7 @@ struct mlx5_ib_tso_caps {
};
struct mlx5_ib_rss_caps {
- __u64 rx_hash_fields_mask; /* enum mlx5_rx_hash_fields */
+ __aligned_u64 rx_hash_fields_mask; /* enum mlx5_rx_hash_fields */
__u8 rx_hash_function; /* enum mlx5_rx_hash_function_flags */
__u8 reserved[7];
};
@@ -158,6 +171,10 @@ struct mlx5_ib_cqe_comp_caps {
__u32 supported_format; /* enum mlx5_ib_cqe_comp_res_format */
};
+enum mlx5_ib_packet_pacing_cap_flags {
+ MLX5_IB_PP_SUPPORT_BURST = 1 << 0,
+};
+
struct mlx5_packet_pacing_caps {
__u32 qp_rate_limit_min;
__u32 qp_rate_limit_max; /* In kpbs */
@@ -167,7 +184,8 @@ struct mlx5_packet_pacing_caps {
* supported_qpts |= 1 << IB_QPT_RAW_PACKET
*/
__u32 supported_qpts;
- __u32 reserved;
+ __u8 cap_flags; /* enum mlx5_ib_packet_pacing_cap_flags */
+ __u8 reserved[3];
};
enum mlx5_ib_mpw_caps {
@@ -238,8 +256,8 @@ enum mlx5_ib_create_cq_flags {
};
struct mlx5_ib_create_cq {
- __u64 buf_addr;
- __u64 db_addr;
+ __aligned_u64 buf_addr;
+ __aligned_u64 db_addr;
__u32 cqe_size;
__u8 cqe_comp_en;
__u8 cqe_comp_res_format;
@@ -252,15 +270,15 @@ struct mlx5_ib_create_cq_resp {
};
struct mlx5_ib_resize_cq {
- __u64 buf_addr;
+ __aligned_u64 buf_addr;
__u16 cqe_size;
__u16 reserved0;
__u32 reserved1;
};
struct mlx5_ib_create_srq {
- __u64 buf_addr;
- __u64 db_addr;
+ __aligned_u64 buf_addr;
+ __aligned_u64 db_addr;
__u32 flags;
__u32 reserved0; /* explicit padding (optional on i386) */
__u32 uidx;
@@ -273,15 +291,18 @@ struct mlx5_ib_create_srq_resp {
};
struct mlx5_ib_create_qp {
- __u64 buf_addr;
- __u64 db_addr;
+ __aligned_u64 buf_addr;
+ __aligned_u64 db_addr;
__u32 sq_wqe_count;
__u32 rq_wqe_count;
__u32 rq_wqe_shift;
__u32 flags;
__u32 uidx;
- __u32 reserved0;
- __u64 sq_buf_addr;
+ __u32 bfreg_index;
+ union {
+ __aligned_u64 sq_buf_addr;
+ __aligned_u64 access_key;
+ };
};
/* RX Hash function flags */
@@ -306,12 +327,13 @@ enum mlx5_rx_hash_fields {
MLX5_RX_HASH_DST_PORT_TCP = 1 << 5,
MLX5_RX_HASH_SRC_PORT_UDP = 1 << 6,
MLX5_RX_HASH_DST_PORT_UDP = 1 << 7,
+ MLX5_RX_HASH_IPSEC_SPI = 1 << 8,
/* Save bits for future fields */
- MLX5_RX_HASH_INNER = 1 << 31
+ MLX5_RX_HASH_INNER = (1UL << 31),
};
struct mlx5_ib_create_qp_rss {
- __u64 rx_hash_fields_mask; /* enum mlx5_rx_hash_fields */
+ __aligned_u64 rx_hash_fields_mask; /* enum mlx5_rx_hash_fields */
__u8 rx_hash_function; /* enum mlx5_rx_hash_function_flags */
__u8 rx_key_len; /* valid only for Toeplitz */
__u8 reserved[6];
@@ -322,6 +344,7 @@ struct mlx5_ib_create_qp_rss {
struct mlx5_ib_create_qp_resp {
__u32 bfreg_index;
+ __u32 reserved;
};
struct mlx5_ib_alloc_mw {
@@ -336,8 +359,8 @@ enum mlx5_ib_create_wq_mask {
};
struct mlx5_ib_create_wq {
- __u64 buf_addr;
- __u64 db_addr;
+ __aligned_u64 buf_addr;
+ __aligned_u64 db_addr;
__u32 rq_wqe_count;
__u32 rq_wqe_shift;
__u32 user_index;
@@ -354,6 +377,23 @@ struct mlx5_ib_create_ah_resp {
__u8 reserved[6];
};
+struct mlx5_ib_burst_info {
+ __u32 max_burst_sz;
+ __u16 typical_pkt_sz;
+ __u16 reserved;
+};
+
+struct mlx5_ib_modify_qp {
+ __u32 comp_mask;
+ struct mlx5_ib_burst_info burst_info;
+ __u32 reserved;
+};
+
+struct mlx5_ib_modify_qp_resp {
+ __u32 response_length;
+ __u32 dctn;
+};
+
struct mlx5_ib_create_wq_resp {
__u32 response_length;
__u32 reserved;
@@ -368,4 +408,37 @@ struct mlx5_ib_modify_wq {
__u32 comp_mask;
__u32 reserved;
};
+
+struct mlx5_ib_clock_info {
+ __u32 sign;
+ __u32 resv;
+ __aligned_u64 nsec;
+ __aligned_u64 cycles;
+ __aligned_u64 frac;
+ __u32 mult;
+ __u32 shift;
+ __aligned_u64 mask;
+ __aligned_u64 overflow_period;
+};
+
+enum mlx5_ib_mmap_cmd {
+ MLX5_IB_MMAP_REGULAR_PAGE = 0,
+ MLX5_IB_MMAP_GET_CONTIGUOUS_PAGES = 1,
+ MLX5_IB_MMAP_WC_PAGE = 2,
+ MLX5_IB_MMAP_NC_PAGE = 3,
+ /* 5 is chosen in order to be compatible with old versions of libmlx5 */
+ MLX5_IB_MMAP_CORE_CLOCK = 5,
+ MLX5_IB_MMAP_ALLOC_WC = 6,
+ MLX5_IB_MMAP_CLOCK_INFO = 7,
+ MLX5_IB_MMAP_DEVICE_MEM = 8,
+};
+
+enum {
+ MLX5_IB_CLOCK_INFO_KERNEL_UPDATING = 1,
+};
+
+/* Bit indexes for the mlx5_alloc_ucontext_resp.clock_info_versions bitmap */
+enum {
+ MLX5_IB_CLOCK_INFO_V1 = 0,
+};
#endif /* MLX5_ABI_USER_H */
diff --git a/include/uapi/rdma/mlx5_user_ioctl_cmds.h b/include/uapi/rdma/mlx5_user_ioctl_cmds.h
new file mode 100644
index 000000000000..f7d685ef2d1f
--- /dev/null
+++ b/include/uapi/rdma/mlx5_user_ioctl_cmds.h
@@ -0,0 +1,48 @@
+/*
+ * Copyright (c) 2018, Mellanox Technologies inc. All rights reserved.
+ *
+ * This software is available to you under a choice of one of two
+ * licenses. You may choose to be licensed under the terms of the GNU
+ * General Public License (GPL) Version 2, available from the file
+ * COPYING in the main directory of this source tree, or the
+ * OpenIB.org BSD license below:
+ *
+ * Redistribution and use in source and binary forms, with or
+ * without modification, are permitted provided that the following
+ * conditions are met:
+ *
+ * - Redistributions of source code must retain the above
+ * copyright notice, this list of conditions and the following
+ * disclaimer.
+ *
+ * - Redistributions in binary form must reproduce the above
+ * copyright notice, this list of conditions and the following
+ * disclaimer in the documentation and/or other materials
+ * provided with the distribution.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND,
+ * EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
+ * MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND
+ * NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS
+ * BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN
+ * ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN
+ * CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
+ * SOFTWARE.
+ */
+
+#ifndef MLX5_USER_IOCTL_CMDS_H
+#define MLX5_USER_IOCTL_CMDS_H
+
+#include <rdma/ib_user_ioctl_cmds.h>
+
+enum mlx5_ib_create_flow_action_attrs {
+ /* This attribute belong to the driver namespace */
+ MLX5_IB_ATTR_CREATE_FLOW_ACTION_FLAGS = (1U << UVERBS_ID_NS_SHIFT),
+};
+
+enum mlx5_ib_alloc_dm_attrs {
+ MLX5_IB_ATTR_ALLOC_DM_RESP_START_OFFSET = (1U << UVERBS_ID_NS_SHIFT),
+ MLX5_IB_ATTR_ALLOC_DM_RESP_PAGE_INDEX,
+};
+
+#endif
diff --git a/include/uapi/rdma/mlx5_user_ioctl_verbs.h b/include/uapi/rdma/mlx5_user_ioctl_verbs.h
new file mode 100644
index 000000000000..8a2fb33f3ed4
--- /dev/null
+++ b/include/uapi/rdma/mlx5_user_ioctl_verbs.h
@@ -0,0 +1,43 @@
+/*
+ * Copyright (c) 2018, Mellanox Technologies inc. All rights reserved.
+ *
+ * This software is available to you under a choice of one of two
+ * licenses. You may choose to be licensed under the terms of the GNU
+ * General Public License (GPL) Version 2, available from the file
+ * COPYING in the main directory of this source tree, or the
+ * OpenIB.org BSD license below:
+ *
+ * Redistribution and use in source and binary forms, with or
+ * without modification, are permitted provided that the following
+ * conditions are met:
+ *
+ * - Redistributions of source code must retain the above
+ * copyright notice, this list of conditions and the following
+ * disclaimer.
+ *
+ * - Redistributions in binary form must reproduce the above
+ * copyright notice, this list of conditions and the following
+ * disclaimer in the documentation and/or other materials
+ * provided with the distribution.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND,
+ * EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
+ * MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND
+ * NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS
+ * BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN
+ * ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN
+ * CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
+ * SOFTWARE.
+ */
+
+#ifndef MLX5_USER_IOCTL_VERBS_H
+#define MLX5_USER_IOCTL_VERBS_H
+
+#include <linux/types.h>
+
+enum mlx5_ib_uapi_flow_action_flags {
+ MLX5_IB_UAPI_FLOW_ACTION_FLAGS_REQUIRE_METADATA = 1 << 0,
+};
+
+#endif
+
diff --git a/include/uapi/rdma/mthca-abi.h b/include/uapi/rdma/mthca-abi.h
index 3020d8a907a7..ac756cd9e807 100644
--- a/include/uapi/rdma/mthca-abi.h
+++ b/include/uapi/rdma/mthca-abi.h
@@ -74,8 +74,8 @@ struct mthca_reg_mr {
struct mthca_create_cq {
__u32 lkey;
__u32 pdn;
- __u64 arm_db_page;
- __u64 set_db_page;
+ __aligned_u64 arm_db_page;
+ __aligned_u64 set_db_page;
__u32 arm_db_index;
__u32 set_db_index;
};
@@ -93,7 +93,7 @@ struct mthca_resize_cq {
struct mthca_create_srq {
__u32 lkey;
__u32 db_index;
- __u64 db_page;
+ __aligned_u64 db_page;
};
struct mthca_create_srq_resp {
@@ -104,8 +104,8 @@ struct mthca_create_srq_resp {
struct mthca_create_qp {
__u32 lkey;
__u32 reserved;
- __u64 sq_db_page;
- __u64 rq_db_page;
+ __aligned_u64 sq_db_page;
+ __aligned_u64 rq_db_page;
__u32 sq_db_index;
__u32 rq_db_index;
};
diff --git a/include/uapi/rdma/nes-abi.h b/include/uapi/rdma/nes-abi.h
index f5b2437aab28..35bfd4015d07 100644
--- a/include/uapi/rdma/nes-abi.h
+++ b/include/uapi/rdma/nes-abi.h
@@ -72,14 +72,14 @@ struct nes_alloc_pd_resp {
};
struct nes_create_cq_req {
- __u64 user_cq_buffer;
+ __aligned_u64 user_cq_buffer;
__u32 mcrqf;
__u8 reserved[4];
};
struct nes_create_qp_req {
- __u64 user_wqe_buffers;
- __u64 user_qp_buffer;
+ __aligned_u64 user_wqe_buffers;
+ __aligned_u64 user_qp_buffer;
};
enum iwnes_memreg_type {
diff --git a/include/uapi/rdma/ocrdma-abi.h b/include/uapi/rdma/ocrdma-abi.h
index ad64a3cea1cd..284d47b41f6e 100644
--- a/include/uapi/rdma/ocrdma-abi.h
+++ b/include/uapi/rdma/ocrdma-abi.h
@@ -55,17 +55,17 @@ struct ocrdma_alloc_ucontext_resp {
__u32 wqe_size;
__u32 max_inline_data;
__u32 dpp_wqe_size;
- __u64 ah_tbl_page;
+ __aligned_u64 ah_tbl_page;
__u32 ah_tbl_len;
__u32 rqe_size;
__u8 fw_ver[32];
/* for future use/new features in progress */
- __u64 rsvd1;
- __u64 rsvd2;
+ __aligned_u64 rsvd1;
+ __aligned_u64 rsvd2;
};
struct ocrdma_alloc_pd_ureq {
- __u64 rsvd1;
+ __u32 rsvd[2];
};
struct ocrdma_alloc_pd_uresp {
@@ -73,7 +73,7 @@ struct ocrdma_alloc_pd_uresp {
__u32 dpp_enabled;
__u32 dpp_page_addr_hi;
__u32 dpp_page_addr_lo;
- __u64 rsvd1;
+ __u32 rsvd[2];
};
struct ocrdma_create_cq_ureq {
@@ -87,13 +87,13 @@ struct ocrdma_create_cq_uresp {
__u32 page_size;
__u32 num_pages;
__u32 max_hw_cqe;
- __u64 page_addr[MAX_CQ_PAGES];
- __u64 db_page_addr;
+ __aligned_u64 page_addr[MAX_CQ_PAGES];
+ __aligned_u64 db_page_addr;
__u32 db_page_size;
__u32 phase_change;
/* for future use/new features in progress */
- __u64 rsvd1;
- __u64 rsvd2;
+ __aligned_u64 rsvd1;
+ __aligned_u64 rsvd2;
};
#define MAX_QP_PAGES 8
@@ -115,9 +115,9 @@ struct ocrdma_create_qp_uresp {
__u32 rq_page_size;
__u32 num_sq_pages;
__u32 num_rq_pages;
- __u64 sq_page_addr[MAX_QP_PAGES];
- __u64 rq_page_addr[MAX_QP_PAGES];
- __u64 db_page_addr;
+ __aligned_u64 sq_page_addr[MAX_QP_PAGES];
+ __aligned_u64 rq_page_addr[MAX_QP_PAGES];
+ __aligned_u64 db_page_addr;
__u32 db_page_size;
__u32 dpp_credit;
__u32 dpp_offset;
@@ -126,8 +126,8 @@ struct ocrdma_create_qp_uresp {
__u32 db_sq_offset;
__u32 db_rq_offset;
__u32 db_shift;
- __u64 rsvd[11];
-} __packed;
+ __aligned_u64 rsvd[11];
+};
struct ocrdma_create_srq_uresp {
__u16 rq_dbid;
@@ -137,16 +137,16 @@ struct ocrdma_create_srq_uresp {
__u32 rq_page_size;
__u32 num_rq_pages;
- __u64 rq_page_addr[MAX_QP_PAGES];
- __u64 db_page_addr;
+ __aligned_u64 rq_page_addr[MAX_QP_PAGES];
+ __aligned_u64 db_page_addr;
__u32 db_page_size;
__u32 num_rqe_allocated;
__u32 db_rq_offset;
__u32 db_shift;
- __u64 rsvd2;
- __u64 rsvd3;
+ __aligned_u64 rsvd2;
+ __aligned_u64 rsvd3;
};
#endif /* OCRDMA_ABI_USER_H */
diff --git a/include/uapi/rdma/qedr-abi.h b/include/uapi/rdma/qedr-abi.h
index 261c6db4623e..8ba098900e9a 100644
--- a/include/uapi/rdma/qedr-abi.h
+++ b/include/uapi/rdma/qedr-abi.h
@@ -40,7 +40,7 @@
/* user kernel communication data structures. */
struct qedr_alloc_ucontext_resp {
- __u64 db_pa;
+ __aligned_u64 db_pa;
__u32 db_size;
__u32 max_send_wr;
@@ -53,24 +53,27 @@ struct qedr_alloc_ucontext_resp {
__u8 dpm_enabled;
__u8 wids_enabled;
__u16 wid_count;
+ __u32 reserved;
};
struct qedr_alloc_pd_ureq {
- __u64 rsvd1;
+ __aligned_u64 rsvd1;
};
struct qedr_alloc_pd_uresp {
__u32 pd_id;
+ __u32 reserved;
};
struct qedr_create_cq_ureq {
- __u64 addr;
- __u64 len;
+ __aligned_u64 addr;
+ __aligned_u64 len;
};
struct qedr_create_cq_uresp {
__u32 db_offset;
__u16 icid;
+ __u16 reserved;
};
struct qedr_create_qp_ureq {
@@ -79,17 +82,17 @@ struct qedr_create_qp_ureq {
/* SQ */
/* user space virtual address of SQ buffer */
- __u64 sq_addr;
+ __aligned_u64 sq_addr;
/* length of SQ buffer */
- __u64 sq_len;
+ __aligned_u64 sq_len;
/* RQ */
/* user space virtual address of RQ buffer */
- __u64 rq_addr;
+ __aligned_u64 rq_addr;
/* length of RQ buffer */
- __u64 rq_len;
+ __aligned_u64 rq_len;
};
struct qedr_create_qp_uresp {
@@ -105,6 +108,7 @@ struct qedr_create_qp_uresp {
__u16 rq_icid;
__u32 rq_db2_offset;
+ __u32 reserved;
};
#endif /* __QEDR_USER_H__ */
diff --git a/include/uapi/rdma/rdma_netlink.h b/include/uapi/rdma/rdma_netlink.h
index cc002e316d09..0ce0943fc808 100644
--- a/include/uapi/rdma/rdma_netlink.h
+++ b/include/uapi/rdma/rdma_netlink.h
@@ -227,14 +227,24 @@ enum rdma_nldev_command {
RDMA_NLDEV_CMD_UNSPEC,
RDMA_NLDEV_CMD_GET, /* can dump */
- RDMA_NLDEV_CMD_SET,
- RDMA_NLDEV_CMD_NEW,
- RDMA_NLDEV_CMD_DEL,
- RDMA_NLDEV_CMD_PORT_GET, /* can dump */
- RDMA_NLDEV_CMD_PORT_SET,
- RDMA_NLDEV_CMD_PORT_NEW,
- RDMA_NLDEV_CMD_PORT_DEL,
+ /* 2 - 4 are free to use */
+
+ RDMA_NLDEV_CMD_PORT_GET = 5, /* can dump */
+
+ /* 6 - 8 are free to use */
+
+ RDMA_NLDEV_CMD_RES_GET = 9, /* can dump */
+
+ RDMA_NLDEV_CMD_RES_QP_GET, /* can dump */
+
+ RDMA_NLDEV_CMD_RES_CM_ID_GET, /* can dump */
+
+ RDMA_NLDEV_CMD_RES_CQ_GET, /* can dump */
+
+ RDMA_NLDEV_CMD_RES_MR_GET, /* can dump */
+
+ RDMA_NLDEV_CMD_RES_PD_GET, /* can dump */
RDMA_NLDEV_NUM_OPS
};
@@ -303,6 +313,94 @@ enum rdma_nldev_attr {
RDMA_NLDEV_ATTR_DEV_NODE_TYPE, /* u8 */
+ RDMA_NLDEV_ATTR_RES_SUMMARY, /* nested table */
+ RDMA_NLDEV_ATTR_RES_SUMMARY_ENTRY, /* nested table */
+ RDMA_NLDEV_ATTR_RES_SUMMARY_ENTRY_NAME, /* string */
+ RDMA_NLDEV_ATTR_RES_SUMMARY_ENTRY_CURR, /* u64 */
+
+ RDMA_NLDEV_ATTR_RES_QP, /* nested table */
+ RDMA_NLDEV_ATTR_RES_QP_ENTRY, /* nested table */
+ /*
+ * Local QPN
+ */
+ RDMA_NLDEV_ATTR_RES_LQPN, /* u32 */
+ /*
+ * Remote QPN,
+ * Applicable for RC and UC only IBTA 11.2.5.3 QUERY QUEUE PAIR
+ */
+ RDMA_NLDEV_ATTR_RES_RQPN, /* u32 */
+ /*
+ * Receive Queue PSN,
+ * Applicable for RC and UC only 11.2.5.3 QUERY QUEUE PAIR
+ */
+ RDMA_NLDEV_ATTR_RES_RQ_PSN, /* u32 */
+ /*
+ * Send Queue PSN
+ */
+ RDMA_NLDEV_ATTR_RES_SQ_PSN, /* u32 */
+ RDMA_NLDEV_ATTR_RES_PATH_MIG_STATE, /* u8 */
+ /*
+ * QP types as visible to RDMA/core, the reserved QPT
+ * are not exported through this interface.
+ */
+ RDMA_NLDEV_ATTR_RES_TYPE, /* u8 */
+ RDMA_NLDEV_ATTR_RES_STATE, /* u8 */
+ /*
+ * Process ID which created object,
+ * in case of kernel origin, PID won't exist.
+ */
+ RDMA_NLDEV_ATTR_RES_PID, /* u32 */
+ /*
+ * The name of process created following resource.
+ * It will exist only for kernel objects.
+ * For user created objects, the user is supposed
+ * to read /proc/PID/comm file.
+ */
+ RDMA_NLDEV_ATTR_RES_KERN_NAME, /* string */
+
+ RDMA_NLDEV_ATTR_RES_CM_ID, /* nested table */
+ RDMA_NLDEV_ATTR_RES_CM_ID_ENTRY, /* nested table */
+ /*
+ * rdma_cm_id port space.
+ */
+ RDMA_NLDEV_ATTR_RES_PS, /* u32 */
+ /*
+ * Source and destination socket addresses
+ */
+ RDMA_NLDEV_ATTR_RES_SRC_ADDR, /* __kernel_sockaddr_storage */
+ RDMA_NLDEV_ATTR_RES_DST_ADDR, /* __kernel_sockaddr_storage */
+
+ RDMA_NLDEV_ATTR_RES_CQ, /* nested table */
+ RDMA_NLDEV_ATTR_RES_CQ_ENTRY, /* nested table */
+ RDMA_NLDEV_ATTR_RES_CQE, /* u32 */
+ RDMA_NLDEV_ATTR_RES_USECNT, /* u64 */
+ RDMA_NLDEV_ATTR_RES_POLL_CTX, /* u8 */
+
+ RDMA_NLDEV_ATTR_RES_MR, /* nested table */
+ RDMA_NLDEV_ATTR_RES_MR_ENTRY, /* nested table */
+ RDMA_NLDEV_ATTR_RES_RKEY, /* u32 */
+ RDMA_NLDEV_ATTR_RES_LKEY, /* u32 */
+ RDMA_NLDEV_ATTR_RES_IOVA, /* u64 */
+ RDMA_NLDEV_ATTR_RES_MRLEN, /* u64 */
+
+ RDMA_NLDEV_ATTR_RES_PD, /* nested table */
+ RDMA_NLDEV_ATTR_RES_PD_ENTRY, /* nested table */
+ RDMA_NLDEV_ATTR_RES_LOCAL_DMA_LKEY, /* u32 */
+ RDMA_NLDEV_ATTR_RES_UNSAFE_GLOBAL_RKEY, /* u32 */
+
+ /*
+ * Provides logical name and index of netdevice which is
+ * connected to physical port. This information is relevant
+ * for RoCE and iWARP.
+ *
+ * The netdevices which are associated with containers are
+ * supposed to be exported together with GID table once it
+ * will be exposed through the netlink. Because the
+ * associated netdevices are properties of GIDs.
+ */
+ RDMA_NLDEV_ATTR_NDEV_INDEX, /* u32 */
+ RDMA_NLDEV_ATTR_NDEV_NAME, /* string */
+
RDMA_NLDEV_ATTR_MAX
};
#endif /* _UAPI_RDMA_NETLINK_H */
diff --git a/include/uapi/rdma/rdma_user_cm.h b/include/uapi/rdma/rdma_user_cm.h
index c83ef0026079..e1269024af47 100644
--- a/include/uapi/rdma/rdma_user_cm.h
+++ b/include/uapi/rdma/rdma_user_cm.h
@@ -70,6 +70,14 @@ enum {
RDMA_USER_CM_CMD_JOIN_MCAST
};
+/* See IBTA Annex A11, servies ID bytes 4 & 5 */
+enum rdma_ucm_port_space {
+ RDMA_PS_IPOIB = 0x0002,
+ RDMA_PS_IB = 0x013F,
+ RDMA_PS_TCP = 0x0106,
+ RDMA_PS_UDP = 0x0111,
+};
+
/*
* command ABI structures.
*/
@@ -80,9 +88,9 @@ struct rdma_ucm_cmd_hdr {
};
struct rdma_ucm_create_id {
- __u64 uid;
- __u64 response;
- __u16 ps;
+ __aligned_u64 uid;
+ __aligned_u64 response;
+ __u16 ps; /* use enum rdma_ucm_port_space */
__u8 qp_type;
__u8 reserved[5];
};
@@ -92,7 +100,7 @@ struct rdma_ucm_create_id_resp {
};
struct rdma_ucm_destroy_id {
- __u64 response;
+ __aligned_u64 response;
__u32 id;
__u32 reserved;
};
@@ -102,7 +110,7 @@ struct rdma_ucm_destroy_id_resp {
};
struct rdma_ucm_bind_ip {
- __u64 response;
+ __aligned_u64 response;
struct sockaddr_in6 addr;
__u32 id;
};
@@ -143,13 +151,13 @@ enum {
};
struct rdma_ucm_query {
- __u64 response;
+ __aligned_u64 response;
__u32 id;
__u32 option;
};
struct rdma_ucm_query_route_resp {
- __u64 node_guid;
+ __aligned_u64 node_guid;
struct ib_user_path_rec ib_route[2];
struct sockaddr_in6 src_addr;
struct sockaddr_in6 dst_addr;
@@ -159,7 +167,7 @@ struct rdma_ucm_query_route_resp {
};
struct rdma_ucm_query_addr_resp {
- __u64 node_guid;
+ __aligned_u64 node_guid;
__u8 port_num;
__u8 reserved;
__u16 pkey;
@@ -210,7 +218,7 @@ struct rdma_ucm_listen {
};
struct rdma_ucm_accept {
- __u64 uid;
+ __aligned_u64 uid;
struct rdma_ucm_conn_param conn_param;
__u32 id;
__u32 reserved;
@@ -228,7 +236,7 @@ struct rdma_ucm_disconnect {
};
struct rdma_ucm_init_qp_attr {
- __u64 response;
+ __aligned_u64 response;
__u32 id;
__u32 qp_state;
};
@@ -239,8 +247,8 @@ struct rdma_ucm_notify {
};
struct rdma_ucm_join_ip_mcast {
- __u64 response; /* rdma_ucm_create_id_resp */
- __u64 uid;
+ __aligned_u64 response; /* rdma_ucm_create_id_resp */
+ __aligned_u64 uid;
struct sockaddr_in6 addr;
__u32 id;
};
@@ -253,8 +261,8 @@ enum {
};
struct rdma_ucm_join_mcast {
- __u64 response; /* rdma_ucma_create_id_resp */
- __u64 uid;
+ __aligned_u64 response; /* rdma_ucma_create_id_resp */
+ __aligned_u64 uid;
__u32 id;
__u16 addr_size;
__u16 join_flags;
@@ -262,18 +270,23 @@ struct rdma_ucm_join_mcast {
};
struct rdma_ucm_get_event {
- __u64 response;
+ __aligned_u64 response;
};
struct rdma_ucm_event_resp {
- __u64 uid;
+ __aligned_u64 uid;
__u32 id;
__u32 event;
__u32 status;
+ /*
+ * NOTE: This union is not aligned to 8 bytes so none of the union
+ * members may contain a u64 or anything with higher alignment than 4.
+ */
union {
struct rdma_ucm_conn_param conn;
struct rdma_ucm_ud_param ud;
} param;
+ __u32 reserved;
};
/* Option levels */
@@ -291,7 +304,7 @@ enum {
};
struct rdma_ucm_set_option {
- __u64 optval;
+ __aligned_u64 optval;
__u32 id;
__u32 level;
__u32 optname;
@@ -299,7 +312,7 @@ struct rdma_ucm_set_option {
};
struct rdma_ucm_migrate_id {
- __u64 response;
+ __aligned_u64 response;
__u32 id;
__u32 fd;
};
diff --git a/include/uapi/rdma/rdma_user_ioctl.h b/include/uapi/rdma/rdma_user_ioctl.h
index 03557b5f9aa6..d223f4164a0f 100644
--- a/include/uapi/rdma/rdma_user_ioctl.h
+++ b/include/uapi/rdma/rdma_user_ioctl.h
@@ -34,49 +34,13 @@
#ifndef RDMA_USER_IOCTL_H
#define RDMA_USER_IOCTL_H
-#include <linux/types.h>
-#include <linux/ioctl.h>
#include <rdma/ib_user_mad.h>
#include <rdma/hfi/hfi1_ioctl.h>
+#include <rdma/rdma_user_ioctl_cmds.h>
-/* Documentation/ioctl/ioctl-number.txt */
-#define RDMA_IOCTL_MAGIC 0x1b
/* Legacy name, for user space application which already use it */
#define IB_IOCTL_MAGIC RDMA_IOCTL_MAGIC
-#define RDMA_VERBS_IOCTL \
- _IOWR(RDMA_IOCTL_MAGIC, 1, struct ib_uverbs_ioctl_hdr)
-
-#define UVERBS_ID_NS_MASK 0xF000
-#define UVERBS_ID_NS_SHIFT 12
-
-enum {
- /* User input */
- UVERBS_ATTR_F_MANDATORY = 1U << 0,
- /*
- * Valid output bit should be ignored and considered set in
- * mandatory fields. This bit is kernel output.
- */
- UVERBS_ATTR_F_VALID_OUTPUT = 1U << 1,
-};
-
-struct ib_uverbs_attr {
- __u16 attr_id; /* command specific type attribute */
- __u16 len; /* only for pointers */
- __u16 flags; /* combination of UVERBS_ATTR_F_XXXX */
- __u16 reserved;
- __u64 data; /* ptr to command, inline data or idr/fd */
-};
-
-struct ib_uverbs_ioctl_hdr {
- __u16 length;
- __u16 object_id;
- __u16 method_id;
- __u16 num_attrs;
- __u64 reserved;
- struct ib_uverbs_attr attrs[0];
-};
-
/*
* General blocks assignments
* It is closed on purpose do not expose it it user space
diff --git a/include/uapi/rdma/rdma_user_ioctl_cmds.h b/include/uapi/rdma/rdma_user_ioctl_cmds.h
new file mode 100644
index 000000000000..1da5a1e1f3a8
--- /dev/null
+++ b/include/uapi/rdma/rdma_user_ioctl_cmds.h
@@ -0,0 +1,99 @@
+/*
+ * Copyright (c) 2018, Mellanox Technologies inc. All rights reserved.
+ *
+ * This software is available to you under a choice of one of two
+ * licenses. You may choose to be licensed under the terms of the GNU
+ * General Public License (GPL) Version 2, available from the file
+ * COPYING in the main directory of this source tree, or the
+ * OpenIB.org BSD license below:
+ *
+ * Redistribution and use in source and binary forms, with or
+ * without modification, are permitted provided that the following
+ * conditions are met:
+ *
+ * - Redistributions of source code must retain the above
+ * copyright notice, this list of conditions and the following
+ * disclaimer.
+ *
+ * - Redistributions in binary form must reproduce the above
+ * copyright notice, this list of conditions and the following
+ * disclaimer in the documentation and/or other materials
+ * provided with the distribution.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND,
+ * EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
+ * MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND
+ * NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS
+ * BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN
+ * ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN
+ * CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
+ * SOFTWARE.
+ */
+
+#ifndef RDMA_USER_IOCTL_CMDS_H
+#define RDMA_USER_IOCTL_CMDS_H
+
+#include <linux/types.h>
+#include <linux/ioctl.h>
+
+/* Documentation/ioctl/ioctl-number.txt */
+#define RDMA_IOCTL_MAGIC 0x1b
+#define RDMA_VERBS_IOCTL \
+ _IOWR(RDMA_IOCTL_MAGIC, 1, struct ib_uverbs_ioctl_hdr)
+
+enum {
+ /* User input */
+ UVERBS_ATTR_F_MANDATORY = 1U << 0,
+ /*
+ * Valid output bit should be ignored and considered set in
+ * mandatory fields. This bit is kernel output.
+ */
+ UVERBS_ATTR_F_VALID_OUTPUT = 1U << 1,
+};
+
+struct ib_uverbs_attr {
+ __u16 attr_id; /* command specific type attribute */
+ __u16 len; /* only for pointers */
+ __u16 flags; /* combination of UVERBS_ATTR_F_XXXX */
+ union {
+ struct {
+ __u8 elem_id;
+ __u8 reserved;
+ } enum_data;
+ __u16 reserved;
+ } attr_data;
+ __aligned_u64 data; /* ptr to command, inline data or idr/fd */
+};
+
+struct ib_uverbs_ioctl_hdr {
+ __u16 length;
+ __u16 object_id;
+ __u16 method_id;
+ __u16 num_attrs;
+ __aligned_u64 reserved1;
+ __u32 driver_id;
+ __u32 reserved2;
+ struct ib_uverbs_attr attrs[0];
+};
+
+enum rdma_driver_id {
+ RDMA_DRIVER_UNKNOWN,
+ RDMA_DRIVER_MLX5,
+ RDMA_DRIVER_MLX4,
+ RDMA_DRIVER_CXGB3,
+ RDMA_DRIVER_CXGB4,
+ RDMA_DRIVER_MTHCA,
+ RDMA_DRIVER_BNXT_RE,
+ RDMA_DRIVER_OCRDMA,
+ RDMA_DRIVER_NES,
+ RDMA_DRIVER_I40IW,
+ RDMA_DRIVER_VMW_PVRDMA,
+ RDMA_DRIVER_QEDR,
+ RDMA_DRIVER_HNS,
+ RDMA_DRIVER_USNIC,
+ RDMA_DRIVER_RXE,
+ RDMA_DRIVER_HFI1,
+ RDMA_DRIVER_QIB,
+};
+
+#endif
diff --git a/include/uapi/rdma/rdma_user_rxe.h b/include/uapi/rdma/rdma_user_rxe.h
index bdeea948b2f3..1f8a9e7daea4 100644
--- a/include/uapi/rdma/rdma_user_rxe.h
+++ b/include/uapi/rdma/rdma_user_rxe.h
@@ -35,6 +35,9 @@
#define RDMA_USER_RXE_H
#include <linux/types.h>
+#include <linux/socket.h>
+#include <linux/in.h>
+#include <linux/in6.h>
union rxe_gid {
__u8 raw[16];
@@ -55,16 +58,17 @@ struct rxe_global_route {
struct rxe_av {
__u8 port_num;
__u8 network_type;
+ __u16 reserved1;
+ __u32 reserved2;
struct rxe_global_route grh;
union {
- struct sockaddr _sockaddr;
struct sockaddr_in _sockaddr_in;
struct sockaddr_in6 _sockaddr_in6;
} sgid_addr, dgid_addr;
};
struct rxe_send_wr {
- __u64 wr_id;
+ __aligned_u64 wr_id;
__u32 num_sge;
__u32 opcode;
__u32 send_flags;
@@ -74,36 +78,42 @@ struct rxe_send_wr {
} ex;
union {
struct {
- __u64 remote_addr;
+ __aligned_u64 remote_addr;
__u32 rkey;
+ __u32 reserved;
} rdma;
struct {
- __u64 remote_addr;
- __u64 compare_add;
- __u64 swap;
+ __aligned_u64 remote_addr;
+ __aligned_u64 compare_add;
+ __aligned_u64 swap;
__u32 rkey;
+ __u32 reserved;
} atomic;
struct {
__u32 remote_qpn;
__u32 remote_qkey;
__u16 pkey_index;
} ud;
+ /* reg is only used by the kernel and is not part of the uapi */
struct {
- struct ib_mr *mr;
+ union {
+ struct ib_mr *mr;
+ __aligned_u64 reserved;
+ };
__u32 key;
- int access;
+ __u32 access;
} reg;
} wr;
};
struct rxe_sge {
- __u64 addr;
+ __aligned_u64 addr;
__u32 length;
__u32 lkey;
};
struct mminfo {
- __u64 offset;
+ __aligned_u64 offset;
__u32 size;
__u32 pad;
};
@@ -114,6 +124,7 @@ struct rxe_dma_info {
__u32 cur_sge;
__u32 num_sge;
__u32 sge_offset;
+ __u32 reserved;
union {
__u8 inline_data[0];
struct rxe_sge sge[0];
@@ -125,7 +136,7 @@ struct rxe_send_wqe {
struct rxe_av av;
__u32 status;
__u32 state;
- __u64 iova;
+ __aligned_u64 iova;
__u32 mask;
__u32 first_psn;
__u32 last_psn;
@@ -136,10 +147,33 @@ struct rxe_send_wqe {
};
struct rxe_recv_wqe {
- __u64 wr_id;
+ __aligned_u64 wr_id;
__u32 num_sge;
__u32 padding;
struct rxe_dma_info dma;
};
+struct rxe_create_cq_resp {
+ struct mminfo mi;
+};
+
+struct rxe_resize_cq_resp {
+ struct mminfo mi;
+};
+
+struct rxe_create_qp_resp {
+ struct mminfo rq_mi;
+ struct mminfo sq_mi;
+};
+
+struct rxe_create_srq_resp {
+ struct mminfo mi;
+ __u32 srq_num;
+ __u32 reserved;
+};
+
+struct rxe_modify_srq_cmd {
+ __aligned_u64 mmap_info_addr;
+};
+
#endif /* RDMA_USER_RXE_H */
diff --git a/include/uapi/rdma/vmw_pvrdma-abi.h b/include/uapi/rdma/vmw_pvrdma-abi.h
index aaa352f2f110..d13fd490b66d 100644
--- a/include/uapi/rdma/vmw_pvrdma-abi.h
+++ b/include/uapi/rdma/vmw_pvrdma-abi.h
@@ -52,12 +52,14 @@
#define PVRDMA_UVERBS_ABI_VERSION 3 /* ABI Version. */
#define PVRDMA_UAR_HANDLE_MASK 0x00FFFFFF /* Bottom 24 bits. */
#define PVRDMA_UAR_QP_OFFSET 0 /* QP doorbell. */
-#define PVRDMA_UAR_QP_SEND BIT(30) /* Send bit. */
-#define PVRDMA_UAR_QP_RECV BIT(31) /* Recv bit. */
+#define PVRDMA_UAR_QP_SEND (1 << 30) /* Send bit. */
+#define PVRDMA_UAR_QP_RECV (1 << 31) /* Recv bit. */
#define PVRDMA_UAR_CQ_OFFSET 4 /* CQ doorbell. */
-#define PVRDMA_UAR_CQ_ARM_SOL BIT(29) /* Arm solicited bit. */
-#define PVRDMA_UAR_CQ_ARM BIT(30) /* Arm bit. */
-#define PVRDMA_UAR_CQ_POLL BIT(31) /* Poll bit. */
+#define PVRDMA_UAR_CQ_ARM_SOL (1 << 29) /* Arm solicited bit. */
+#define PVRDMA_UAR_CQ_ARM (1 << 30) /* Arm bit. */
+#define PVRDMA_UAR_CQ_POLL (1 << 31) /* Poll bit. */
+#define PVRDMA_UAR_SRQ_OFFSET 8 /* SRQ doorbell. */
+#define PVRDMA_UAR_SRQ_RECV (1 << 30) /* Recv bit. */
enum pvrdma_wr_opcode {
PVRDMA_WR_RDMA_WRITE,
@@ -141,7 +143,7 @@ struct pvrdma_alloc_pd_resp {
};
struct pvrdma_create_cq {
- __u64 buf_addr;
+ __aligned_u64 buf_addr;
__u32 buf_size;
__u32 reserved;
};
@@ -152,13 +154,13 @@ struct pvrdma_create_cq_resp {
};
struct pvrdma_resize_cq {
- __u64 buf_addr;
+ __aligned_u64 buf_addr;
__u32 buf_size;
__u32 reserved;
};
struct pvrdma_create_srq {
- __u64 buf_addr;
+ __aligned_u64 buf_addr;
__u32 buf_size;
__u32 reserved;
};
@@ -169,25 +171,25 @@ struct pvrdma_create_srq_resp {
};
struct pvrdma_create_qp {
- __u64 rbuf_addr;
- __u64 sbuf_addr;
+ __aligned_u64 rbuf_addr;
+ __aligned_u64 sbuf_addr;
__u32 rbuf_size;
__u32 sbuf_size;
- __u64 qp_addr;
+ __aligned_u64 qp_addr;
};
/* PVRDMA masked atomic compare and swap */
struct pvrdma_ex_cmp_swap {
- __u64 swap_val;
- __u64 compare_val;
- __u64 swap_mask;
- __u64 compare_mask;
+ __aligned_u64 swap_val;
+ __aligned_u64 compare_val;
+ __aligned_u64 swap_mask;
+ __aligned_u64 compare_mask;
};
/* PVRDMA masked atomic fetch and add */
struct pvrdma_ex_fetch_add {
- __u64 add_val;
- __u64 field_boundary;
+ __aligned_u64 add_val;
+ __aligned_u64 field_boundary;
};
/* PVRDMA address vector. */
@@ -205,14 +207,14 @@ struct pvrdma_av {
/* PVRDMA scatter/gather entry */
struct pvrdma_sge {
- __u64 addr;
+ __aligned_u64 addr;
__u32 length;
__u32 lkey;
};
/* PVRDMA receive queue work request */
struct pvrdma_rq_wqe_hdr {
- __u64 wr_id; /* wr id */
+ __aligned_u64 wr_id; /* wr id */
__u32 num_sge; /* size of s/g array */
__u32 total_len; /* reserved */
};
@@ -220,7 +222,7 @@ struct pvrdma_rq_wqe_hdr {
/* PVRDMA send queue work request */
struct pvrdma_sq_wqe_hdr {
- __u64 wr_id; /* wr id */
+ __aligned_u64 wr_id; /* wr id */
__u32 num_sge; /* size of s/g array */
__u32 total_len; /* reserved */
__u32 opcode; /* operation type */
@@ -232,19 +234,19 @@ struct pvrdma_sq_wqe_hdr {
__u32 reserved;
union {
struct {
- __u64 remote_addr;
+ __aligned_u64 remote_addr;
__u32 rkey;
__u8 reserved[4];
} rdma;
struct {
- __u64 remote_addr;
- __u64 compare_add;
- __u64 swap;
+ __aligned_u64 remote_addr;
+ __aligned_u64 compare_add;
+ __aligned_u64 swap;
__u32 rkey;
__u32 reserved;
} atomic;
struct {
- __u64 remote_addr;
+ __aligned_u64 remote_addr;
__u32 log_arg_sz;
__u32 rkey;
union {
@@ -253,13 +255,14 @@ struct pvrdma_sq_wqe_hdr {
} wr_data;
} masked_atomics;
struct {
- __u64 iova_start;
- __u64 pl_pdir_dma;
+ __aligned_u64 iova_start;
+ __aligned_u64 pl_pdir_dma;
__u32 page_shift;
__u32 page_list_len;
__u32 length;
__u32 access_flags;
__u32 rkey;
+ __u32 reserved;
} fast_reg;
struct {
__u32 remote_qpn;
@@ -272,8 +275,8 @@ struct pvrdma_sq_wqe_hdr {
/* Completion queue element. */
struct pvrdma_cqe {
- __u64 wr_id;
- __u64 qp;
+ __aligned_u64 wr_id;
+ __aligned_u64 qp;
__u32 opcode;
__u32 status;
__u32 byte_len;
diff --git a/include/uapi/sound/asound.h b/include/uapi/sound/asound.h
index c227ccba60ae..ed0a120d4f08 100644
--- a/include/uapi/sound/asound.h
+++ b/include/uapi/sound/asound.h
@@ -214,6 +214,11 @@ typedef int __bitwise snd_pcm_format_t;
#define SNDRV_PCM_FORMAT_IMA_ADPCM ((__force snd_pcm_format_t) 22)
#define SNDRV_PCM_FORMAT_MPEG ((__force snd_pcm_format_t) 23)
#define SNDRV_PCM_FORMAT_GSM ((__force snd_pcm_format_t) 24)
+#define SNDRV_PCM_FORMAT_S20_LE ((__force snd_pcm_format_t) 25) /* in four bytes, LSB justified */
+#define SNDRV_PCM_FORMAT_S20_BE ((__force snd_pcm_format_t) 26) /* in four bytes, LSB justified */
+#define SNDRV_PCM_FORMAT_U20_LE ((__force snd_pcm_format_t) 27) /* in four bytes, LSB justified */
+#define SNDRV_PCM_FORMAT_U20_BE ((__force snd_pcm_format_t) 28) /* in four bytes, LSB justified */
+/* gap in the numbering for a future standard linear format */
#define SNDRV_PCM_FORMAT_SPECIAL ((__force snd_pcm_format_t) 31)
#define SNDRV_PCM_FORMAT_S24_3LE ((__force snd_pcm_format_t) 32) /* in three bytes */
#define SNDRV_PCM_FORMAT_S24_3BE ((__force snd_pcm_format_t) 33) /* in three bytes */
@@ -237,6 +242,7 @@ typedef int __bitwise snd_pcm_format_t;
#define SNDRV_PCM_FORMAT_DSD_U16_BE ((__force snd_pcm_format_t) 51) /* DSD, 2-byte samples DSD (x16), big endian */
#define SNDRV_PCM_FORMAT_DSD_U32_BE ((__force snd_pcm_format_t) 52) /* DSD, 4-byte samples DSD (x32), big endian */
#define SNDRV_PCM_FORMAT_LAST SNDRV_PCM_FORMAT_DSD_U32_BE
+#define SNDRV_PCM_FORMAT_FIRST SNDRV_PCM_FORMAT_S8
#ifdef SNDRV_LITTLE_ENDIAN
#define SNDRV_PCM_FORMAT_S16 SNDRV_PCM_FORMAT_S16_LE
@@ -248,6 +254,8 @@ typedef int __bitwise snd_pcm_format_t;
#define SNDRV_PCM_FORMAT_FLOAT SNDRV_PCM_FORMAT_FLOAT_LE
#define SNDRV_PCM_FORMAT_FLOAT64 SNDRV_PCM_FORMAT_FLOAT64_LE
#define SNDRV_PCM_FORMAT_IEC958_SUBFRAME SNDRV_PCM_FORMAT_IEC958_SUBFRAME_LE
+#define SNDRV_PCM_FORMAT_S20 SNDRV_PCM_FORMAT_S20_LE
+#define SNDRV_PCM_FORMAT_U20 SNDRV_PCM_FORMAT_U20_LE
#endif
#ifdef SNDRV_BIG_ENDIAN
#define SNDRV_PCM_FORMAT_S16 SNDRV_PCM_FORMAT_S16_BE
@@ -259,6 +267,8 @@ typedef int __bitwise snd_pcm_format_t;
#define SNDRV_PCM_FORMAT_FLOAT SNDRV_PCM_FORMAT_FLOAT_BE
#define SNDRV_PCM_FORMAT_FLOAT64 SNDRV_PCM_FORMAT_FLOAT64_BE
#define SNDRV_PCM_FORMAT_IEC958_SUBFRAME SNDRV_PCM_FORMAT_IEC958_SUBFRAME_BE
+#define SNDRV_PCM_FORMAT_S20 SNDRV_PCM_FORMAT_S20_BE
+#define SNDRV_PCM_FORMAT_U20 SNDRV_PCM_FORMAT_U20_BE
#endif
typedef int __bitwise snd_pcm_subformat_t;
diff --git a/include/uapi/sound/snd_sst_tokens.h b/include/uapi/sound/snd_sst_tokens.h
index 326054a72bc7..8ba0112e5336 100644
--- a/include/uapi/sound/snd_sst_tokens.h
+++ b/include/uapi/sound/snd_sst_tokens.h
@@ -222,6 +222,17 @@
* %SKL_TKN_MM_U32_NUM_IN_FMT: Number of input formats
* %SKL_TKN_MM_U32_NUM_OUT_FMT: Number of output formats
*
+ * %SKL_TKN_U32_ASTATE_IDX: Table Index for the A-State entry to be filled
+ * with kcps and clock source
+ *
+ * %SKL_TKN_U32_ASTATE_COUNT: Number of valid entries in A-State table
+ *
+ * %SKL_TKN_U32_ASTATE_KCPS: Specifies the core load threshold (in kilo
+ * cycles per second) below which DSP is clocked
+ * from source specified by clock source.
+ *
+ * %SKL_TKN_U32_ASTATE_CLK_SRC: Clock source for A-State entry
+ *
* module_id and loadable flags dont have tokens as these values will be
* read from the DSP FW manifest
*
@@ -309,7 +320,11 @@ enum SKL_TKNS {
SKL_TKN_MM_U32_NUM_IN_FMT,
SKL_TKN_MM_U32_NUM_OUT_FMT,
- SKL_TKN_MAX = SKL_TKN_MM_U32_NUM_OUT_FMT,
+ SKL_TKN_U32_ASTATE_IDX,
+ SKL_TKN_U32_ASTATE_COUNT,
+ SKL_TKN_U32_ASTATE_KCPS,
+ SKL_TKN_U32_ASTATE_CLK_SRC,
+ SKL_TKN_MAX = SKL_TKN_U32_ASTATE_CLK_SRC,
};
#endif
diff --git a/include/video/exynos5433_decon.h b/include/video/exynos5433_decon.h
deleted file mode 100644
index 78957c9626f5..000000000000
--- a/include/video/exynos5433_decon.h
+++ /dev/null
@@ -1,209 +0,0 @@
-/*
- * Copyright (C) 2014 Samsung Electronics Co.Ltd
- *
- * This program is free software; you can redistribute it and/or modify
- * it under the terms of the GNU General Public License version 2 as
- * published by the Free Software Foundationr
- */
-
-#ifndef EXYNOS_REGS_DECON_H
-#define EXYNOS_REGS_DECON_H
-
-/* Exynos543X DECON */
-#define DECON_VIDCON0 0x0000
-#define DECON_VIDOUTCON0 0x0010
-#define DECON_WINCONx(n) (0x0020 + ((n) * 4))
-#define DECON_VIDOSDxH(n) (0x0080 + ((n) * 4))
-#define DECON_SHADOWCON 0x00A0
-#define DECON_VIDOSDxA(n) (0x00B0 + ((n) * 0x20))
-#define DECON_VIDOSDxB(n) (0x00B4 + ((n) * 0x20))
-#define DECON_VIDOSDxC(n) (0x00B8 + ((n) * 0x20))
-#define DECON_VIDOSDxD(n) (0x00BC + ((n) * 0x20))
-#define DECON_VIDOSDxE(n) (0x00C0 + ((n) * 0x20))
-#define DECON_VIDW0xADD0B0(n) (0x0150 + ((n) * 0x10))
-#define DECON_VIDW0xADD0B1(n) (0x0154 + ((n) * 0x10))
-#define DECON_VIDW0xADD0B2(n) (0x0158 + ((n) * 0x10))
-#define DECON_VIDW0xADD1B0(n) (0x01A0 + ((n) * 0x10))
-#define DECON_VIDW0xADD1B1(n) (0x01A4 + ((n) * 0x10))
-#define DECON_VIDW0xADD1B2(n) (0x01A8 + ((n) * 0x10))
-#define DECON_VIDW0xADD2(n) (0x0200 + ((n) * 4))
-#define DECON_LOCALxSIZE(n) (0x0214 + ((n) * 4))
-#define DECON_VIDINTCON0 0x0220
-#define DECON_VIDINTCON1 0x0224
-#define DECON_WxKEYCON0(n) (0x0230 + ((n - 1) * 8))
-#define DECON_WxKEYCON1(n) (0x0234 + ((n - 1) * 8))
-#define DECON_WxKEYALPHA(n) (0x0250 + ((n - 1) * 4))
-#define DECON_WINxMAP(n) (0x0270 + ((n) * 4))
-#define DECON_QOSLUT07_00 0x02C0
-#define DECON_QOSLUT15_08 0x02C4
-#define DECON_QOSCTRL 0x02C8
-#define DECON_BLENDERQx(n) (0x0300 + ((n - 1) * 4))
-#define DECON_BLENDCON 0x0310
-#define DECON_OPE_VIDW0xADD0(n) (0x0400 + ((n) * 4))
-#define DECON_OPE_VIDW0xADD1(n) (0x0414 + ((n) * 4))
-#define DECON_FRAMEFIFO_REG7 0x051C
-#define DECON_FRAMEFIFO_REG8 0x0520
-#define DECON_FRAMEFIFO_STATUS 0x0524
-#define DECON_CMU 0x1404
-#define DECON_UPDATE 0x1410
-#define DECON_CRFMID 0x1414
-#define DECON_UPDATE_SCHEME 0x1438
-#define DECON_VIDCON1 0x2000
-#define DECON_VIDCON2 0x2004
-#define DECON_VIDCON3 0x2008
-#define DECON_VIDCON4 0x200C
-#define DECON_VIDTCON2 0x2028
-#define DECON_FRAME_SIZE 0x2038
-#define DECON_LINECNT_OP_THRESHOLD 0x203C
-#define DECON_TRIGCON 0x2040
-#define DECON_TRIGSKIP 0x2050
-#define DECON_CRCRDATA 0x20B0
-#define DECON_CRCCTRL 0x20B4
-
-/* Exynos5430 DECON */
-#define DECON_VIDTCON0 0x2020
-#define DECON_VIDTCON1 0x2024
-
-/* Exynos5433 DECON */
-#define DECON_VIDTCON00 0x2010
-#define DECON_VIDTCON01 0x2014
-#define DECON_VIDTCON10 0x2018
-#define DECON_VIDTCON11 0x201C
-
-/* Exynos543X DECON Internal */
-#define DECON_W013DSTREOCON 0x0320
-#define DECON_W233DSTREOCON 0x0324
-#define DECON_FRAMEFIFO_REG0 0x0500
-#define DECON_ENHANCER_CTRL 0x2100
-
-/* Exynos543X DECON TV */
-#define DECON_VCLKCON0 0x0014
-#define DECON_VIDINTCON2 0x0228
-#define DECON_VIDINTCON3 0x022C
-
-/* VIDCON0 */
-#define VIDCON0_SWRESET (1 << 28)
-#define VIDCON0_CLKVALUP (1 << 14)
-#define VIDCON0_VLCKFREE (1 << 5)
-#define VIDCON0_STOP_STATUS (1 << 2)
-#define VIDCON0_ENVID (1 << 1)
-#define VIDCON0_ENVID_F (1 << 0)
-
-/* VIDOUTCON0 */
-#define VIDOUT_INTERLACE_FIELD_F (1 << 29)
-#define VIDOUT_INTERLACE_EN_F (1 << 28)
-#define VIDOUT_LCD_ON (1 << 24)
-#define VIDOUT_IF_F_MASK (0x3 << 20)
-#define VIDOUT_RGB_IF (0x0 << 20)
-#define VIDOUT_COMMAND_IF (0x2 << 20)
-
-/* WINCONx */
-#define WINCONx_HAWSWP_F (1 << 16)
-#define WINCONx_WSWP_F (1 << 15)
-#define WINCONx_BURSTLEN_MASK (0x3 << 10)
-#define WINCONx_BURSTLEN_16WORD (0x0 << 10)
-#define WINCONx_BURSTLEN_8WORD (0x1 << 10)
-#define WINCONx_BURSTLEN_4WORD (0x2 << 10)
-#define WINCONx_BLD_PIX_F (1 << 6)
-#define WINCONx_BPPMODE_MASK (0xf << 2)
-#define WINCONx_BPPMODE_16BPP_565 (0x5 << 2)
-#define WINCONx_BPPMODE_16BPP_A1555 (0x6 << 2)
-#define WINCONx_BPPMODE_16BPP_I1555 (0x7 << 2)
-#define WINCONx_BPPMODE_24BPP_888 (0xb << 2)
-#define WINCONx_BPPMODE_24BPP_A1887 (0xc << 2)
-#define WINCONx_BPPMODE_25BPP_A1888 (0xd << 2)
-#define WINCONx_BPPMODE_32BPP_A8888 (0xd << 2)
-#define WINCONx_BPPMODE_16BPP_A4444 (0xe << 2)
-#define WINCONx_ALPHA_SEL_F (1 << 1)
-#define WINCONx_ENWIN_F (1 << 0)
-
-/* SHADOWCON */
-#define SHADOWCON_PROTECT_MASK GENMASK(14, 10)
-#define SHADOWCON_Wx_PROTECT(n) (1 << (10 + (n)))
-
-/* VIDOSDxD */
-#define VIDOSD_Wx_ALPHA_R_F(n) (((n) & 0xff) << 16)
-#define VIDOSD_Wx_ALPHA_G_F(n) (((n) & 0xff) << 8)
-#define VIDOSD_Wx_ALPHA_B_F(n) (((n) & 0xff) << 0)
-
-/* VIDINTCON0 */
-#define VIDINTCON0_FRAMEDONE (1 << 17)
-#define VIDINTCON0_FRAMESEL_BP (0 << 15)
-#define VIDINTCON0_FRAMESEL_VS (1 << 15)
-#define VIDINTCON0_FRAMESEL_AC (2 << 15)
-#define VIDINTCON0_FRAMESEL_FP (3 << 15)
-#define VIDINTCON0_INTFRMEN (1 << 12)
-#define VIDINTCON0_INTEN (1 << 0)
-
-/* VIDINTCON1 */
-#define VIDINTCON1_INTFRMDONEPEND (1 << 2)
-#define VIDINTCON1_INTFRMPEND (1 << 1)
-#define VIDINTCON1_INTFIFOPEND (1 << 0)
-
-/* DECON_CMU */
-#define CMU_CLKGAGE_MODE_SFR_F (1 << 1)
-#define CMU_CLKGAGE_MODE_MEM_F (1 << 0)
-
-/* DECON_UPDATE */
-#define STANDALONE_UPDATE_F (1 << 0)
-
-/* DECON_VIDCON1 */
-#define VIDCON1_LINECNT_MASK (0x0fff << 16)
-#define VIDCON1_I80_ACTIVE (1 << 15)
-#define VIDCON1_VSTATUS_MASK (0x3 << 13)
-#define VIDCON1_VSTATUS_VS (0 << 13)
-#define VIDCON1_VSTATUS_BP (1 << 13)
-#define VIDCON1_VSTATUS_AC (2 << 13)
-#define VIDCON1_VSTATUS_FP (3 << 13)
-#define VIDCON1_VCLK_MASK (0x3 << 9)
-#define VIDCON1_VCLK_RUN_VDEN_DISABLE (0x3 << 9)
-#define VIDCON1_VCLK_HOLD (0x0 << 9)
-#define VIDCON1_VCLK_RUN (0x1 << 9)
-
-
-/* DECON_VIDTCON00 */
-#define VIDTCON00_VBPD_F(x) (((x) & 0xfff) << 16)
-#define VIDTCON00_VFPD_F(x) ((x) & 0xfff)
-
-/* DECON_VIDTCON01 */
-#define VIDTCON01_VSPW_F(x) (((x) & 0xfff) << 16)
-
-/* DECON_VIDTCON10 */
-#define VIDTCON10_HBPD_F(x) (((x) & 0xfff) << 16)
-#define VIDTCON10_HFPD_F(x) ((x) & 0xfff)
-
-/* DECON_VIDTCON11 */
-#define VIDTCON11_HSPW_F(x) (((x) & 0xfff) << 16)
-
-/* DECON_VIDTCON2 */
-#define VIDTCON2_LINEVAL(x) (((x) & 0xfff) << 16)
-#define VIDTCON2_HOZVAL(x) ((x) & 0xfff)
-
-/* TRIGCON */
-#define TRIGCON_TRIGEN_PER_F (1 << 31)
-#define TRIGCON_TRIGEN_F (1 << 30)
-#define TRIGCON_TE_AUTO_MASK (1 << 29)
-#define TRIGCON_WB_SWTRIGCMD (1 << 28)
-#define TRIGCON_SWTRIGCMD_W4BUF (1 << 26)
-#define TRIGCON_TRIGMODE_W4BUF (1 << 25)
-#define TRIGCON_SWTRIGCMD_W3BUF (1 << 21)
-#define TRIGCON_TRIGMODE_W3BUF (1 << 20)
-#define TRIGCON_SWTRIGCMD_W2BUF (1 << 16)
-#define TRIGCON_TRIGMODE_W2BUF (1 << 15)
-#define TRIGCON_SWTRIGCMD_W1BUF (1 << 11)
-#define TRIGCON_TRIGMODE_W1BUF (1 << 10)
-#define TRIGCON_SWTRIGCMD_W0BUF (1 << 6)
-#define TRIGCON_TRIGMODE_W0BUF (1 << 5)
-#define TRIGCON_HWTRIGMASK (1 << 4)
-#define TRIGCON_HWTRIGEN (1 << 3)
-#define TRIGCON_HWTRIG_INV (1 << 2)
-#define TRIGCON_SWTRIGCMD (1 << 1)
-#define TRIGCON_SWTRIGEN (1 << 0)
-
-/* DECON_CRCCTRL */
-#define CRCCTRL_CRCCLKEN (0x1 << 2)
-#define CRCCTRL_CRCSTART_F (0x1 << 1)
-#define CRCCTRL_CRCEN (0x1 << 0)
-#define CRCCTRL_MASK (0x7)
-
-#endif /* EXYNOS_REGS_DECON_H */
diff --git a/include/video/exynos7_decon.h b/include/video/exynos7_decon.h
deleted file mode 100644
index a62b11b613f6..000000000000
--- a/include/video/exynos7_decon.h
+++ /dev/null
@@ -1,349 +0,0 @@
-/* include/video/exynos7_decon.h
- *
- * Copyright (c) 2014 Samsung Electronics Co., Ltd.
- * Author: Ajay Kumar <ajaykumar.rs@samsung.com>
- *
- * This program is free software; you can redistribute it and/or modify it
- * under the terms of the GNU General Public License as published by the
- * Free Software Foundation; either version 2 of the License, or (at your
- * option) any later version.
- */
-
-/* VIDCON0 */
-#define VIDCON0 0x00
-
-#define VIDCON0_SWRESET (1 << 28)
-#define VIDCON0_DECON_STOP_STATUS (1 << 2)
-#define VIDCON0_ENVID (1 << 1)
-#define VIDCON0_ENVID_F (1 << 0)
-
-/* VIDOUTCON0 */
-#define VIDOUTCON0 0x4
-
-#define VIDOUTCON0_DUAL_MASK (0x3 << 24)
-#define VIDOUTCON0_DUAL_ON (0x3 << 24)
-#define VIDOUTCON0_DISP_IF_1_ON (0x2 << 24)
-#define VIDOUTCON0_DISP_IF_0_ON (0x1 << 24)
-#define VIDOUTCON0_DUAL_OFF (0x0 << 24)
-#define VIDOUTCON0_IF_SHIFT 23
-#define VIDOUTCON0_IF_MASK (0x1 << 23)
-#define VIDOUTCON0_RGBIF (0x0 << 23)
-#define VIDOUTCON0_I80IF (0x1 << 23)
-
-/* VIDCON3 */
-#define VIDCON3 0x8
-
-/* VIDCON4 */
-#define VIDCON4 0xC
-#define VIDCON4_FIFOCNT_START_EN (1 << 0)
-
-/* VCLKCON0 */
-#define VCLKCON0 0x10
-#define VCLKCON0_CLKVALUP (1 << 8)
-#define VCLKCON0_VCLKFREE (1 << 0)
-
-/* VCLKCON */
-#define VCLKCON1 0x14
-#define VCLKCON1_CLKVAL_NUM_VCLK(val) (((val) & 0xff) << 0)
-#define VCLKCON2 0x18
-
-/* SHADOWCON */
-#define SHADOWCON 0x30
-
-#define SHADOWCON_WINx_PROTECT(_win) (1 << (10 + (_win)))
-
-/* WINCONx */
-#define WINCON(_win) (0x50 + ((_win) * 4))
-
-#define WINCONx_BUFSTATUS (0x3 << 30)
-#define WINCONx_BUFSEL_MASK (0x3 << 28)
-#define WINCONx_BUFSEL_SHIFT 28
-#define WINCONx_TRIPLE_BUF_MODE (0x1 << 18)
-#define WINCONx_DOUBLE_BUF_MODE (0x0 << 18)
-#define WINCONx_BURSTLEN_16WORD (0x0 << 11)
-#define WINCONx_BURSTLEN_8WORD (0x1 << 11)
-#define WINCONx_BURSTLEN_MASK (0x1 << 11)
-#define WINCONx_BURSTLEN_SHIFT 11
-#define WINCONx_BLD_PLANE (0 << 8)
-#define WINCONx_BLD_PIX (1 << 8)
-#define WINCONx_ALPHA_MUL (1 << 7)
-
-#define WINCONx_BPPMODE_MASK (0xf << 2)
-#define WINCONx_BPPMODE_SHIFT 2
-#define WINCONx_BPPMODE_16BPP_565 (0x8 << 2)
-#define WINCONx_BPPMODE_24BPP_BGRx (0x7 << 2)
-#define WINCONx_BPPMODE_24BPP_RGBx (0x6 << 2)
-#define WINCONx_BPPMODE_24BPP_xBGR (0x5 << 2)
-#define WINCONx_BPPMODE_24BPP_xRGB (0x4 << 2)
-#define WINCONx_BPPMODE_32BPP_BGRA (0x3 << 2)
-#define WINCONx_BPPMODE_32BPP_RGBA (0x2 << 2)
-#define WINCONx_BPPMODE_32BPP_ABGR (0x1 << 2)
-#define WINCONx_BPPMODE_32BPP_ARGB (0x0 << 2)
-#define WINCONx_ALPHA_SEL (1 << 1)
-#define WINCONx_ENWIN (1 << 0)
-
-#define WINCON1_ALPHA_MUL_F (1 << 7)
-#define WINCON2_ALPHA_MUL_F (1 << 7)
-#define WINCON3_ALPHA_MUL_F (1 << 7)
-#define WINCON4_ALPHA_MUL_F (1 << 7)
-
-/* VIDOSDxH: The height for the OSD image(READ ONLY)*/
-#define VIDOSD_H(_x) (0x80 + ((_x) * 4))
-
-/* Frame buffer start addresses: VIDWxxADD0n */
-#define VIDW_BUF_START(_win) (0x80 + ((_win) * 0x10))
-#define VIDW_BUF_START1(_win) (0x84 + ((_win) * 0x10))
-#define VIDW_BUF_START2(_win) (0x88 + ((_win) * 0x10))
-
-#define VIDW_WHOLE_X(_win) (0x0130 + ((_win) * 8))
-#define VIDW_WHOLE_Y(_win) (0x0134 + ((_win) * 8))
-#define VIDW_OFFSET_X(_win) (0x0170 + ((_win) * 8))
-#define VIDW_OFFSET_Y(_win) (0x0174 + ((_win) * 8))
-#define VIDW_BLKOFFSET(_win) (0x01B0 + ((_win) * 4))
-#define VIDW_BLKSIZE(win) (0x0200 + ((_win) * 4))
-
-/* Interrupt controls register */
-#define VIDINTCON2 0x228
-
-#define VIDINTCON1_INTEXTRA1_EN (1 << 1)
-#define VIDINTCON1_INTEXTRA0_EN (1 << 0)
-
-/* Interrupt controls and status register */
-#define VIDINTCON3 0x22C
-
-#define VIDINTCON1_INTEXTRA1_PEND (1 << 1)
-#define VIDINTCON1_INTEXTRA0_PEND (1 << 0)
-
-/* VIDOSDxA ~ VIDOSDxE */
-#define VIDOSD_BASE 0x230
-
-#define OSD_STRIDE 0x20
-
-#define VIDOSD_A(_win) (VIDOSD_BASE + \
- ((_win) * OSD_STRIDE) + 0x00)
-#define VIDOSD_B(_win) (VIDOSD_BASE + \
- ((_win) * OSD_STRIDE) + 0x04)
-#define VIDOSD_C(_win) (VIDOSD_BASE + \
- ((_win) * OSD_STRIDE) + 0x08)
-#define VIDOSD_D(_win) (VIDOSD_BASE + \
- ((_win) * OSD_STRIDE) + 0x0C)
-#define VIDOSD_E(_win) (VIDOSD_BASE + \
- ((_win) * OSD_STRIDE) + 0x10)
-
-#define VIDOSDxA_TOPLEFT_X_MASK (0x1fff << 13)
-#define VIDOSDxA_TOPLEFT_X_SHIFT 13
-#define VIDOSDxA_TOPLEFT_X_LIMIT 0x1fff
-#define VIDOSDxA_TOPLEFT_X(_x) (((_x) & 0x1fff) << 13)
-
-#define VIDOSDxA_TOPLEFT_Y_MASK (0x1fff << 0)
-#define VIDOSDxA_TOPLEFT_Y_SHIFT 0
-#define VIDOSDxA_TOPLEFT_Y_LIMIT 0x1fff
-#define VIDOSDxA_TOPLEFT_Y(_x) (((_x) & 0x1fff) << 0)
-
-#define VIDOSDxB_BOTRIGHT_X_MASK (0x1fff << 13)
-#define VIDOSDxB_BOTRIGHT_X_SHIFT 13
-#define VIDOSDxB_BOTRIGHT_X_LIMIT 0x1fff
-#define VIDOSDxB_BOTRIGHT_X(_x) (((_x) & 0x1fff) << 13)
-
-#define VIDOSDxB_BOTRIGHT_Y_MASK (0x1fff << 0)
-#define VIDOSDxB_BOTRIGHT_Y_SHIFT 0
-#define VIDOSDxB_BOTRIGHT_Y_LIMIT 0x1fff
-#define VIDOSDxB_BOTRIGHT_Y(_x) (((_x) & 0x1fff) << 0)
-
-#define VIDOSDxC_ALPHA0_R_F(_x) (((_x) & 0xFF) << 16)
-#define VIDOSDxC_ALPHA0_G_F(_x) (((_x) & 0xFF) << 8)
-#define VIDOSDxC_ALPHA0_B_F(_x) (((_x) & 0xFF) << 0)
-
-#define VIDOSDxD_ALPHA1_R_F(_x) (((_x) & 0xFF) << 16)
-#define VIDOSDxD_ALPHA1_G_F(_x) (((_x) & 0xFF) << 8)
-#define VIDOSDxD_ALPHA1_B_F(_x) (((_x) & 0xFF) >> 0)
-
-/* Window MAP (Color map) */
-#define WINxMAP(_win) (0x340 + ((_win) * 4))
-
-#define WINxMAP_MAP (1 << 24)
-#define WINxMAP_MAP_COLOUR_MASK (0xffffff << 0)
-#define WINxMAP_MAP_COLOUR_SHIFT 0
-#define WINxMAP_MAP_COLOUR_LIMIT 0xffffff
-#define WINxMAP_MAP_COLOUR(_x) ((_x) << 0)
-
-/* Window colour-key control registers */
-#define WKEYCON 0x370
-
-#define WKEYCON0 0x00
-#define WKEYCON1 0x04
-#define WxKEYCON0_KEYBL_EN (1 << 26)
-#define WxKEYCON0_KEYEN_F (1 << 25)
-#define WxKEYCON0_DIRCON (1 << 24)
-#define WxKEYCON0_COMPKEY_MASK (0xffffff << 0)
-#define WxKEYCON0_COMPKEY_SHIFT 0
-#define WxKEYCON0_COMPKEY_LIMIT 0xffffff
-#define WxKEYCON0_COMPKEY(_x) ((_x) << 0)
-#define WxKEYCON1_COLVAL_MASK (0xffffff << 0)
-#define WxKEYCON1_COLVAL_SHIFT 0
-#define WxKEYCON1_COLVAL_LIMIT 0xffffff
-#define WxKEYCON1_COLVAL(_x) ((_x) << 0)
-
-/* color key control register for hardware window 1 ~ 4. */
-#define WKEYCON0_BASE(x) ((WKEYCON + WKEYCON0) + ((x - 1) * 8))
-/* color key value register for hardware window 1 ~ 4. */
-#define WKEYCON1_BASE(x) ((WKEYCON + WKEYCON1) + ((x - 1) * 8))
-
-/* Window KEY Alpha value */
-#define WxKEYALPHA(_win) (0x3A0 + (((_win) - 1) * 0x4))
-
-#define Wx_KEYALPHA_R_F_SHIFT 16
-#define Wx_KEYALPHA_G_F_SHIFT 8
-#define Wx_KEYALPHA_B_F_SHIFT 0
-
-/* Blending equation */
-#define BLENDE(_win) (0x03C0 + ((_win) * 4))
-#define BLENDE_COEF_ZERO 0x0
-#define BLENDE_COEF_ONE 0x1
-#define BLENDE_COEF_ALPHA_A 0x2
-#define BLENDE_COEF_ONE_MINUS_ALPHA_A 0x3
-#define BLENDE_COEF_ALPHA_B 0x4
-#define BLENDE_COEF_ONE_MINUS_ALPHA_B 0x5
-#define BLENDE_COEF_ALPHA0 0x6
-#define BLENDE_COEF_A 0xA
-#define BLENDE_COEF_ONE_MINUS_A 0xB
-#define BLENDE_COEF_B 0xC
-#define BLENDE_COEF_ONE_MINUS_B 0xD
-#define BLENDE_Q_FUNC(_v) ((_v) << 18)
-#define BLENDE_P_FUNC(_v) ((_v) << 12)
-#define BLENDE_B_FUNC(_v) ((_v) << 6)
-#define BLENDE_A_FUNC(_v) ((_v) << 0)
-
-/* Blending equation control */
-#define BLENDCON 0x3D8
-#define BLENDCON_NEW_MASK (1 << 0)
-#define BLENDCON_NEW_8BIT_ALPHA_VALUE (1 << 0)
-#define BLENDCON_NEW_4BIT_ALPHA_VALUE (0 << 0)
-
-/* Interrupt control register */
-#define VIDINTCON0 0x500
-
-#define VIDINTCON0_WAKEUP_MASK (0x3f << 26)
-#define VIDINTCON0_INTEXTRAEN (1 << 21)
-
-#define VIDINTCON0_FRAMESEL0_SHIFT 15
-#define VIDINTCON0_FRAMESEL0_MASK (0x3 << 15)
-#define VIDINTCON0_FRAMESEL0_BACKPORCH (0x0 << 15)
-#define VIDINTCON0_FRAMESEL0_VSYNC (0x1 << 15)
-#define VIDINTCON0_FRAMESEL0_ACTIVE (0x2 << 15)
-#define VIDINTCON0_FRAMESEL0_FRONTPORCH (0x3 << 15)
-
-#define VIDINTCON0_INT_FRAME (1 << 11)
-
-#define VIDINTCON0_FIFOLEVEL_MASK (0x7 << 3)
-#define VIDINTCON0_FIFOLEVEL_SHIFT 3
-#define VIDINTCON0_FIFOLEVEL_EMPTY (0x0 << 3)
-#define VIDINTCON0_FIFOLEVEL_TO25PC (0x1 << 3)
-#define VIDINTCON0_FIFOLEVEL_TO50PC (0x2 << 3)
-#define VIDINTCON0_FIFOLEVEL_FULL (0x4 << 3)
-
-#define VIDINTCON0_FIFOSEL_MAIN_EN (1 << 1)
-#define VIDINTCON0_INT_FIFO (1 << 1)
-
-#define VIDINTCON0_INT_ENABLE (1 << 0)
-
-/* Interrupt controls and status register */
-#define VIDINTCON1 0x504
-
-#define VIDINTCON1_INT_EXTRA (1 << 3)
-#define VIDINTCON1_INT_I80 (1 << 2)
-#define VIDINTCON1_INT_FRAME (1 << 1)
-#define VIDINTCON1_INT_FIFO (1 << 0)
-
-/* VIDCON1 */
-#define VIDCON1(_x) (0x0600 + ((_x) * 0x50))
-#define VIDCON1_LINECNT_GET(_v) (((_v) >> 17) & 0x1fff)
-#define VIDCON1_VCLK_MASK (0x3 << 9)
-#define VIDCON1_VCLK_HOLD (0x0 << 9)
-#define VIDCON1_VCLK_RUN (0x1 << 9)
-#define VIDCON1_VCLK_RUN_VDEN_DISABLE (0x3 << 9)
-#define VIDCON1_RGB_ORDER_O_MASK (0x7 << 4)
-#define VIDCON1_RGB_ORDER_O_RGB (0x0 << 4)
-#define VIDCON1_RGB_ORDER_O_GBR (0x1 << 4)
-#define VIDCON1_RGB_ORDER_O_BRG (0x2 << 4)
-#define VIDCON1_RGB_ORDER_O_BGR (0x4 << 4)
-#define VIDCON1_RGB_ORDER_O_RBG (0x5 << 4)
-#define VIDCON1_RGB_ORDER_O_GRB (0x6 << 4)
-
-/* VIDTCON0 */
-#define VIDTCON0 0x610
-
-#define VIDTCON0_VBPD_MASK (0xffff << 16)
-#define VIDTCON0_VBPD_SHIFT 16
-#define VIDTCON0_VBPD_LIMIT 0xffff
-#define VIDTCON0_VBPD(_x) ((_x) << 16)
-
-#define VIDTCON0_VFPD_MASK (0xffff << 0)
-#define VIDTCON0_VFPD_SHIFT 0
-#define VIDTCON0_VFPD_LIMIT 0xffff
-#define VIDTCON0_VFPD(_x) ((_x) << 0)
-
-/* VIDTCON1 */
-#define VIDTCON1 0x614
-
-#define VIDTCON1_VSPW_MASK (0xffff << 16)
-#define VIDTCON1_VSPW_SHIFT 16
-#define VIDTCON1_VSPW_LIMIT 0xffff
-#define VIDTCON1_VSPW(_x) ((_x) << 16)
-
-/* VIDTCON2 */
-#define VIDTCON2 0x618
-
-#define VIDTCON2_HBPD_MASK (0xffff << 16)
-#define VIDTCON2_HBPD_SHIFT 16
-#define VIDTCON2_HBPD_LIMIT 0xffff
-#define VIDTCON2_HBPD(_x) ((_x) << 16)
-
-#define VIDTCON2_HFPD_MASK (0xffff << 0)
-#define VIDTCON2_HFPD_SHIFT 0
-#define VIDTCON2_HFPD_LIMIT 0xffff
-#define VIDTCON2_HFPD(_x) ((_x) << 0)
-
-/* VIDTCON3 */
-#define VIDTCON3 0x61C
-
-#define VIDTCON3_HSPW_MASK (0xffff << 16)
-#define VIDTCON3_HSPW_SHIFT 16
-#define VIDTCON3_HSPW_LIMIT 0xffff
-#define VIDTCON3_HSPW(_x) ((_x) << 16)
-
-/* VIDTCON4 */
-#define VIDTCON4 0x620
-
-#define VIDTCON4_LINEVAL_MASK (0xfff << 16)
-#define VIDTCON4_LINEVAL_SHIFT 16
-#define VIDTCON4_LINEVAL_LIMIT 0xfff
-#define VIDTCON4_LINEVAL(_x) (((_x) & 0xfff) << 16)
-
-#define VIDTCON4_HOZVAL_MASK (0xfff << 0)
-#define VIDTCON4_HOZVAL_SHIFT 0
-#define VIDTCON4_HOZVAL_LIMIT 0xfff
-#define VIDTCON4_HOZVAL(_x) (((_x) & 0xfff) << 0)
-
-/* LINECNT OP THRSHOLD*/
-#define LINECNT_OP_THRESHOLD 0x630
-
-/* CRCCTRL */
-#define CRCCTRL 0x6C8
-#define CRCCTRL_CRCCLKEN (0x1 << 2)
-#define CRCCTRL_CRCSTART_F (0x1 << 1)
-#define CRCCTRL_CRCEN (0x1 << 0)
-
-/* DECON_CMU */
-#define DECON_CMU 0x704
-
-#define DECON_CMU_ALL_CLKGATE_ENABLE 0x3
-#define DECON_CMU_SE_CLKGATE_ENABLE (0x1 << 2)
-#define DECON_CMU_SFR_CLKGATE_ENABLE (0x1 << 1)
-#define DECON_CMU_MEM_CLKGATE_ENABLE (0x1 << 0)
-
-/* DECON_UPDATE */
-#define DECON_UPDATE 0x710
-
-#define DECON_UPDATE_SLAVE_SYNC (1 << 4)
-#define DECON_UPDATE_STANDALONE_F (1 << 0)
diff --git a/include/video/imx-ipu-v3.h b/include/video/imx-ipu-v3.h
index ce4c07688b13..abbad94e14a1 100644
--- a/include/video/imx-ipu-v3.h
+++ b/include/video/imx-ipu-v3.h
@@ -344,7 +344,7 @@ void ipu_prg_channel_disable(struct ipuv3_channel *ipu_chan);
int ipu_prg_channel_configure(struct ipuv3_channel *ipu_chan,
unsigned int axi_id, unsigned int width,
unsigned int height, unsigned int stride,
- u32 format, unsigned long *eba);
+ u32 format, uint64_t modifier, unsigned long *eba);
/*
* IPU CMOS Sensor Interface (csi) functions
diff --git a/include/video/of_display_timing.h b/include/video/of_display_timing.h
index 956455fc9f9a..bb29e5954000 100644
--- a/include/video/of_display_timing.h
+++ b/include/video/of_display_timing.h
@@ -19,7 +19,6 @@ struct display_timings;
int of_get_display_timing(const struct device_node *np, const char *name,
struct display_timing *dt);
struct display_timings *of_get_display_timings(const struct device_node *np);
-int of_display_timings_exist(const struct device_node *np);
#else
static inline int of_get_display_timing(const struct device_node *np,
const char *name, struct display_timing *dt)
@@ -31,10 +30,6 @@ of_get_display_timings(const struct device_node *np)
{
return NULL;
}
-static inline int of_display_timings_exist(const struct device_node *np)
-{
- return -ENOSYS;
-}
#endif
#endif
diff --git a/include/video/udlfb.h b/include/video/udlfb.h
index 1252a7a89bc0..0cabe6b09095 100644
--- a/include/video/udlfb.h
+++ b/include/video/udlfb.h
@@ -19,7 +19,7 @@ struct dloarea {
struct urb_node {
struct list_head entry;
- struct dlfb_data *dev;
+ struct dlfb_data *dlfb;
struct delayed_work release_urb_work;
struct urb *urb;
};
@@ -35,7 +35,6 @@ struct urb_list {
struct dlfb_data {
struct usb_device *udev;
- struct device *gdev; /* &udev->dev */
struct fb_info *info;
struct urb_list urbs;
struct kref kref;