aboutsummaryrefslogtreecommitdiffstats
path: root/include
diff options
context:
space:
mode:
Diffstat (limited to 'include')
-rw-r--r--include/acpi/acpi_bus.h45
-rw-r--r--include/acpi/acpi_io.h4
-rw-r--r--include/acpi/acpiosxf.h2
-rw-r--r--include/acpi/acpixf.h14
-rw-r--r--include/acpi/actbl.h17
-rw-r--r--include/acpi/actbl1.h176
-rw-r--r--include/acpi/actbl2.h212
-rw-r--r--include/acpi/actbl3.h88
-rw-r--r--include/acpi/actypes.h57
-rw-r--r--include/acpi/acuuid.h89
-rw-r--r--include/acpi/platform/acenv.h4
-rw-r--r--include/acpi/platform/acenvex.h3
-rw-r--r--include/acpi/processor.h6
-rw-r--r--include/acpi/video.h21
-rw-r--r--include/asm-generic/asm-offsets.h1
-rw-r--r--include/asm-generic/barrier.h4
-rw-r--r--include/asm-generic/cmpxchg.h3
-rw-r--r--include/asm-generic/dma-mapping-common.h5
-rw-r--r--include/asm-generic/futex.h7
-rw-r--r--include/asm-generic/gpio.h53
-rw-r--r--include/asm-generic/io.h17
-rw-r--r--include/asm-generic/iomap.h4
-rw-r--r--include/asm-generic/pci.h13
-rw-r--r--include/asm-generic/pgtable.h68
-rw-r--r--include/asm-generic/preempt.h7
-rw-r--r--include/asm-generic/qspinlock.h139
-rw-r--r--include/asm-generic/qspinlock_types.h79
-rw-r--r--include/asm-generic/scatterlist.h34
-rw-r--r--include/asm-generic/seccomp.h2
-rw-r--r--include/asm-generic/vmlinux.lds.h18
-rw-r--r--include/crypto/aead.h533
-rw-r--r--include/crypto/akcipher.h340
-rw-r--r--include/crypto/algapi.h37
-rw-r--r--include/crypto/compress.h8
-rw-r--r--include/crypto/cryptd.h1
-rw-r--r--include/crypto/drbg.h59
-rw-r--r--include/crypto/hash.h2
-rw-r--r--include/crypto/if_alg.h4
-rw-r--r--include/crypto/internal/aead.h102
-rw-r--r--include/crypto/internal/akcipher.h60
-rw-r--r--include/crypto/internal/geniv.h24
-rw-r--r--include/crypto/internal/rng.h21
-rw-r--r--include/crypto/internal/rsa.h27
-rw-r--r--include/crypto/md5.h5
-rw-r--r--include/crypto/null.h3
-rw-r--r--include/crypto/rng.h103
-rw-r--r--include/crypto/scatterwalk.h4
-rw-r--r--include/crypto/sha.h15
-rw-r--r--include/crypto/sha1_base.h106
-rw-r--r--include/crypto/sha256_base.h128
-rw-r--r--include/crypto/sha512_base.h131
-rw-r--r--include/drm/bridge/dw_hdmi.h5
-rw-r--r--include/drm/drmP.h20
-rw-r--r--include/drm/drm_atomic.h24
-rw-r--r--include/drm/drm_atomic_helper.h20
-rw-r--r--include/drm/drm_crtc.h21
-rw-r--r--include/drm/drm_crtc_helper.h9
-rw-r--r--include/drm/drm_dp_helper.h176
-rw-r--r--include/drm/drm_dp_mst_helper.h2
-rw-r--r--include/drm/drm_edid.h2
-rw-r--r--include/drm/drm_fb_helper.h19
-rw-r--r--include/drm/drm_gem.h14
-rw-r--r--include/drm/drm_modes.h2
-rw-r--r--include/drm/drm_panel.h5
-rw-r--r--include/drm/drm_pciids.h1
-rw-r--r--include/drm/drm_plane_helper.h12
-rw-r--r--include/drm/i915_pciids.h77
-rw-r--r--include/dt-bindings/clock/exynos3250.h61
-rw-r--r--include/dt-bindings/clock/exynos5433.h1403
-rw-r--r--include/dt-bindings/clock/imx6qdl-clock.h5
-rw-r--r--include/dt-bindings/clock/pistachio-clk.h183
-rw-r--r--include/dt-bindings/clock/qcom,gcc-ipq806x.h3
-rw-r--r--include/dt-bindings/clock/qcom,gcc-msm8916.h156
-rw-r--r--include/dt-bindings/clock/r8a73a4-clock.h62
-rw-r--r--include/dt-bindings/clock/r8a7778-clock.h71
-rw-r--r--include/dt-bindings/clock/r8a7790-clock.h3
-rw-r--r--include/dt-bindings/clock/r8a7791-clock.h3
-rw-r--r--include/dt-bindings/clock/sh73a0-clock.h3
-rw-r--r--include/dt-bindings/clock/tegra124-car-common.h2
-rw-r--r--include/dt-bindings/dma/jz4780-dma.h49
-rw-r--r--include/dt-bindings/gpio/meson8b-gpio.h32
-rw-r--r--include/dt-bindings/leds/common.h21
-rw-r--r--include/dt-bindings/media/omap3-isp.h22
-rw-r--r--include/dt-bindings/media/xilinx-vip.h39
-rw-r--r--include/dt-bindings/mfd/arizona.h (renamed from include/linux/mfd/arizona/gpio.h)81
-rw-r--r--include/dt-bindings/mfd/qcom-rpm.h6
-rw-r--r--include/dt-bindings/mfd/st-lpc.h15
-rw-r--r--include/dt-bindings/net/ti-dp83867.h45
-rw-r--r--include/dt-bindings/pinctrl/mt6397-pinfunc.h256
-rw-r--r--include/dt-bindings/pinctrl/mt65xx.h40
-rw-r--r--include/dt-bindings/pinctrl/qcom,pmic-gpio.h15
-rw-r--r--include/dt-bindings/pinctrl/qcom,pmic-mpp.h4
-rw-r--r--include/dt-bindings/reset/qcom,gcc-ipq806x.h43
-rw-r--r--include/dt-bindings/reset/qcom,gcc-msm8916.h108
-rw-r--r--include/linux/a.out.h67
-rw-r--r--include/linux/acpi.h99
-rw-r--r--include/linux/acpi_irq.h10
-rw-r--r--include/linux/aio.h70
-rw-r--r--include/linux/alarmtimer.h4
-rw-r--r--include/linux/arm-cci.h9
-rw-r--r--include/linux/async_tx.h3
-rw-r--r--include/linux/backing-dev-defs.h255
-rw-r--r--include/linux/backing-dev.h558
-rw-r--r--include/linux/backlight.h8
-rw-r--r--include/linux/basic_mmio_gpio.h1
-rw-r--r--include/linux/bcm47xx_nvram.h34
-rw-r--r--include/linux/bcma/bcma.h30
-rw-r--r--include/linux/bcma/bcma_driver_chipcommon.h11
-rw-r--r--include/linux/bcma/bcma_driver_gmac_cmn.h6
-rw-r--r--include/linux/bcma/bcma_driver_mips.h15
-rw-r--r--include/linux/bcma/bcma_driver_pci.h23
-rw-r--r--include/linux/bcma/bcma_driver_pcie2.h4
-rw-r--r--include/linux/bio.h20
-rw-r--r--include/linux/bitmap.h16
-rw-r--r--include/linux/bitops.h4
-rw-r--r--include/linux/blk-cgroup.h655
-rw-r--r--include/linux/blk-mq.h13
-rw-r--r--include/linux/blk_types.h27
-rw-r--r--include/linux/blkdev.h71
-rw-r--r--include/linux/bootmem.h8
-rw-r--r--include/linux/bottom_half.h1
-rw-r--r--include/linux/bpf.h97
-rw-r--r--include/linux/brcmphy.h11
-rw-r--r--include/linux/can/dev.h2
-rw-r--r--include/linux/can/led.h6
-rw-r--r--include/linux/can/skb.h7
-rw-r--r--include/linux/capability.h29
-rw-r--r--include/linux/ceph/ceph_features.h16
-rw-r--r--include/linux/ceph/ceph_fs.h1
-rw-r--r--include/linux/ceph/debugfs.h8
-rw-r--r--include/linux/ceph/libceph.h2
-rw-r--r--include/linux/ceph/osdmap.h5
-rw-r--r--include/linux/cgroup.h25
-rw-r--r--include/linux/cleancache.h13
-rw-r--r--include/linux/clk-provider.h2
-rw-r--r--include/linux/clk.h27
-rw-r--r--include/linux/clk/at91_pmc.h4
-rw-r--r--include/linux/clk/shmobile.h1
-rw-r--r--include/linux/clk/ti.h6
-rw-r--r--include/linux/clkdev.h6
-rw-r--r--include/linux/clockchips.h37
-rw-r--r--include/linux/clocksource.h7
-rw-r--r--include/linux/cma.h12
-rw-r--r--include/linux/compaction.h1
-rw-r--r--include/linux/compiler-gcc.h16
-rw-r--r--include/linux/compiler-intel.h3
-rw-r--r--include/linux/compiler.h24
-rw-r--r--include/linux/configfs.h1
-rw-r--r--include/linux/console.h3
-rw-r--r--include/linux/context_tracking.h25
-rw-r--r--include/linux/context_tracking_state.h10
-rw-r--r--include/linux/cpu.h20
-rw-r--r--include/linux/cpufreq.h5
-rw-r--r--include/linux/cpuidle.h20
-rw-r--r--include/linux/cpumask.h182
-rw-r--r--include/linux/crc-itu-t.h2
-rw-r--r--include/linux/cred.h23
-rw-r--r--include/linux/crush/crush.h12
-rw-r--r--include/linux/crypto.h507
-rw-r--r--include/linux/dcache.h59
-rw-r--r--include/linux/dccp.h4
-rw-r--r--include/linux/debugfs.h1
-rw-r--r--include/linux/devfreq-event.h2
-rw-r--r--include/linux/device-mapper.h5
-rw-r--r--include/linux/device.h16
-rw-r--r--include/linux/dma-buf.h34
-rw-r--r--include/linux/dma-mapping.h4
-rw-r--r--include/linux/dma/hsu.h48
-rw-r--r--include/linux/dma/xilinx_dma.h (renamed from include/linux/amba/xilinx_dma.h)0
-rw-r--r--include/linux/dmaengine.h36
-rw-r--r--include/linux/dmapool.h2
-rw-r--r--include/linux/dmar.h85
-rw-r--r--include/linux/efi.h15
-rw-r--r--include/linux/elevator.h2
-rw-r--r--include/linux/elf-randomize.h22
-rw-r--r--include/linux/etherdevice.h43
-rw-r--r--include/linux/f2fs_fs.h11
-rw-r--r--include/linux/falloc.h6
-rw-r--r--include/linux/filter.h37
-rw-r--r--include/linux/fixp-arith.h145
-rw-r--r--include/linux/frontswap.h14
-rw-r--r--include/linux/fs.h162
-rw-r--r--include/linux/fs_pin.h2
-rw-r--r--include/linux/fsnotify_backend.h2
-rw-r--r--include/linux/ftrace_event.h20
-rw-r--r--include/linux/fwnode.h27
-rw-r--r--include/linux/gfp.h23
-rw-r--r--include/linux/gpio.h7
-rw-r--r--include/linux/gpio/consumer.h120
-rw-r--r--include/linux/gpio/driver.h61
-rw-r--r--include/linux/hardirq.h2
-rw-r--r--include/linux/hid-sensor-hub.h55
-rw-r--r--include/linux/hid-sensor-ids.h2
-rw-r--r--include/linux/hid.h4
-rw-r--r--include/linux/highmem.h2
-rw-r--r--include/linux/host1x.h1
-rw-r--r--include/linux/hrtimer.h167
-rw-r--r--include/linux/hsi/hsi.h6
-rw-r--r--include/linux/htirq.h22
-rw-r--r--include/linux/hugetlb.h20
-rw-r--r--include/linux/hw_random.h4
-rw-r--r--include/linux/hyperv.h37
-rw-r--r--include/linux/i2c.h59
-rw-r--r--include/linux/i2c/twl.h1
-rw-r--r--include/linux/ide.h27
-rw-r--r--include/linux/ieee802154.h30
-rw-r--r--include/linux/if_bridge.h1
-rw-r--r--include/linux/if_link.h10
-rw-r--r--include/linux/if_macvlan.h2
-rw-r--r--include/linux/if_pppox.h4
-rw-r--r--include/linux/if_vlan.h95
-rw-r--r--include/linux/igmp.h1
-rw-r--r--include/linux/inet_diag.h44
-rw-r--r--include/linux/inetdevice.h3
-rw-r--r--include/linux/init_task.h5
-rw-r--r--include/linux/intel-iommu.h29
-rw-r--r--include/linux/interrupt.h9
-rw-r--r--include/linux/io-mapping.h2
-rw-r--r--include/linux/io.h18
-rw-r--r--include/linux/iommu-common.h51
-rw-r--r--include/linux/iommu.h77
-rw-r--r--include/linux/ioport.h8
-rw-r--r--include/linux/ipv6.h4
-rw-r--r--include/linux/irq.h88
-rw-r--r--include/linux/irqchip/arm-gic-acpi.h31
-rw-r--r--include/linux/irqchip/arm-gic.h2
-rw-r--r--include/linux/irqchip/mips-gic.h7
-rw-r--r--include/linux/irqdesc.h12
-rw-r--r--include/linux/irqdomain.h8
-rw-r--r--include/linux/jbd2.h4
-rw-r--r--include/linux/jhash.h17
-rw-r--r--include/linux/jiffies.h130
-rw-r--r--include/linux/jz4780-nemc.h43
-rw-r--r--include/linux/kasan.h2
-rw-r--r--include/linux/kconfig.h24
-rw-r--r--include/linux/kernel.h15
-rw-r--r--include/linux/kexec.h4
-rw-r--r--include/linux/kmemleak.h6
-rw-r--r--include/linux/ksm.h17
-rw-r--r--include/linux/ktime.h27
-rw-r--r--include/linux/kvm_host.h99
-rw-r--r--include/linux/kvm_types.h1
-rw-r--r--include/linux/led-class-flash.h19
-rw-r--r--include/linux/leds.h5
-rw-r--r--include/linux/lglock.h5
-rw-r--r--include/linux/lguest.h4
-rw-r--r--include/linux/libata.h12
-rw-r--r--include/linux/livepatch.h16
-rw-r--r--include/linux/lockdep.h21
-rw-r--r--include/linux/mbus.h5
-rw-r--r--include/linux/mdio-gpio.h3
-rw-r--r--include/linux/memblock.h57
-rw-r--r--include/linux/memcontrol.h33
-rw-r--r--include/linux/memory_hotplug.h6
-rw-r--r--include/linux/mempool.h5
-rw-r--r--include/linux/mfd/arizona/core.h12
-rw-r--r--include/linux/mfd/arizona/pdata.h29
-rw-r--r--include/linux/mfd/arizona/registers.h27
-rw-r--r--include/linux/mfd/axp20x.h93
-rw-r--r--include/linux/mfd/cros_ec.h93
-rw-r--r--include/linux/mfd/cros_ec_commands.h277
-rw-r--r--include/linux/mfd/da9055/core.h2
-rw-r--r--include/linux/mfd/da9063/pdata.h1
-rw-r--r--include/linux/mfd/max77686.h5
-rw-r--r--include/linux/mfd/max77693-private.h5
-rw-r--r--include/linux/mfd/max77693.h13
-rw-r--r--include/linux/mfd/max77843-private.h454
-rw-r--r--include/linux/mfd/menelaus.h7
-rw-r--r--include/linux/mfd/mt6397/core.h64
-rw-r--r--include/linux/mfd/mt6397/registers.h362
-rw-r--r--include/linux/mfd/rk808.h3
-rw-r--r--include/linux/mfd/rtsx_pci.h1116
-rw-r--r--include/linux/mfd/samsung/core.h9
-rw-r--r--include/linux/mfd/samsung/irq.h2
-rw-r--r--include/linux/mfd/samsung/rtc.h2
-rw-r--r--include/linux/mfd/sky81452.h31
-rw-r--r--include/linux/mfd/stmpe.h44
-rw-r--r--include/linux/mfd/syscon/atmel-st.h49
-rw-r--r--include/linux/mfd/syscon/imx6q-iomuxc-gpr.h1
-rw-r--r--include/linux/mfd/tc3589x.h23
-rw-r--r--include/linux/mfd/ti_am335x_tscadc.h1
-rw-r--r--include/linux/mfd/tmio.h2
-rw-r--r--include/linux/migrate.h5
-rw-r--r--include/linux/miscdevice.h2
-rw-r--r--include/linux/mlx4/cmd.h32
-rw-r--r--include/linux/mlx4/device.h74
-rw-r--r--include/linux/mlx4/qp.h18
-rw-r--r--include/linux/mlx5/cmd.h2
-rw-r--r--include/linux/mlx5/cq.h10
-rw-r--r--include/linux/mlx5/device.h217
-rw-r--r--include/linux/mlx5/doorbell.h2
-rw-r--r--include/linux/mlx5/driver.h189
-rw-r--r--include/linux/mlx5/flow_table.h54
-rw-r--r--include/linux/mlx5/mlx5_ifc.h6584
-rw-r--r--include/linux/mlx5/qp.h27
-rw-r--r--include/linux/mlx5/srq.h2
-rw-r--r--include/linux/mlx5/vport.h55
-rw-r--r--include/linux/mm-arch-hooks.h25
-rw-r--r--include/linux/mm.h145
-rw-r--r--include/linux/mm_types.h22
-rw-r--r--include/linux/mmc/card.h16
-rw-r--r--include/linux/mmc/core.h1
-rw-r--r--include/linux/mmc/dw_mmc.h6
-rw-r--r--include/linux/mmc/host.h28
-rw-r--r--include/linux/mmc/mmc.h4
-rw-r--r--include/linux/mmc/sdhci-pci-data.h2
-rw-r--r--include/linux/mmc/sdio_ids.h2
-rw-r--r--include/linux/mmc/sh_mobile_sdhi.h10
-rw-r--r--include/linux/mmu_notifier.h12
-rw-r--r--include/linux/mmzone.h8
-rw-r--r--include/linux/mod_devicetable.h8
-rw-r--r--include/linux/module.h14
-rw-r--r--include/linux/mount.h3
-rw-r--r--include/linux/mpi.h15
-rw-r--r--include/linux/mtd/cfi.h188
-rw-r--r--include/linux/mtd/map.h54
-rw-r--r--include/linux/mtd/nand.h6
-rw-r--r--include/linux/mtd/spi-nor.h5
-rw-r--r--include/linux/namei.h41
-rw-r--r--include/linux/nbd.h46
-rw-r--r--include/linux/net.h15
-rw-r--r--include/linux/netdev_features.h5
-rw-r--r--include/linux/netdevice.h190
-rw-r--r--include/linux/netfilter.h144
-rw-r--r--include/linux/netfilter/ipset/ip_set.h66
-rw-r--r--include/linux/netfilter/ipset/ip_set_comment.h38
-rw-r--r--include/linux/netfilter/ipset/ip_set_timeout.h27
-rw-r--r--include/linux/netfilter/x_tables.h60
-rw-r--r--include/linux/netfilter_arp/arp_tables.h3
-rw-r--r--include/linux/netfilter_bridge.h111
-rw-r--r--include/linux/netfilter_bridge/ebtables.h5
-rw-r--r--include/linux/netfilter_defs.h9
-rw-r--r--include/linux/netfilter_ingress.h41
-rw-r--r--include/linux/netfilter_ipv4/ip_tables.h3
-rw-r--r--include/linux/netfilter_ipv6.h3
-rw-r--r--include/linux/netfilter_ipv6/ip6_tables.h3
-rw-r--r--include/linux/netlink.h4
-rw-r--r--include/linux/nfs4.h7
-rw-r--r--include/linux/nfs_fs.h6
-rw-r--r--include/linux/nfs_idmap.h79
-rw-r--r--include/linux/nfs_xdr.h6
-rw-r--r--include/linux/nilfs2_fs.h2
-rw-r--r--include/linux/nmi.h24
-rw-r--r--include/linux/nvme.h36
-rw-r--r--include/linux/nx842.h11
-rw-r--r--include/linux/of.h50
-rw-r--r--include/linux/of_fdt.h4
-rw-r--r--include/linux/of_graph.h20
-rw-r--r--include/linux/of_irq.h9
-rw-r--r--include/linux/of_mdio.h7
-rw-r--r--include/linux/of_net.h8
-rw-r--r--include/linux/omap-gpmc.h3
-rw-r--r--include/linux/oom.h15
-rw-r--r--include/linux/osq_lock.h5
-rw-r--r--include/linux/page-flags.h105
-rw-r--r--include/linux/pagemap.h3
-rw-r--r--include/linux/pci.h59
-rw-r--r--include/linux/pci_ids.h7
-rw-r--r--include/linux/percpu_counter.h13
-rw-r--r--include/linux/perf_event.h169
-rw-r--r--include/linux/personality.h40
-rw-r--r--include/linux/phy.h14
-rw-r--r--include/linux/phy_fixed.h9
-rw-r--r--include/linux/pinctrl/consumer.h2
-rw-r--r--include/linux/pinctrl/pinctrl.h2
-rw-r--r--include/linux/pinctrl/pinmux.h6
-rw-r--r--include/linux/platform_data/dma-hsu.h25
-rw-r--r--include/linux/platform_data/dma-imx-sdma.h3
-rw-r--r--include/linux/platform_data/gpio-omap.h12
-rw-r--r--include/linux/platform_data/i2c-davinci.h1
-rw-r--r--include/linux/platform_data/irq-renesas-irqc.h27
-rw-r--r--include/linux/platform_data/keyboard-spear.h2
-rw-r--r--include/linux/platform_data/mmc-msm_sdcc.h27
-rw-r--r--include/linux/platform_data/msm_serial_hs.h49
-rw-r--r--include/linux/platform_data/nfcmrvl.h40
-rw-r--r--include/linux/platform_data/ntc_thermistor.h1
-rw-r--r--include/linux/platform_data/nxp-nci.h27
-rw-r--r--include/linux/platform_data/serial-imx.h5
-rw-r--r--include/linux/platform_data/si5351.h4
-rw-r--r--include/linux/platform_data/sky81452-backlight.h46
-rw-r--r--include/linux/platform_data/st-nci.h (renamed from include/linux/platform_data/st21nfcb.h)14
-rw-r--r--include/linux/platform_data/st33zp24.h (renamed from include/linux/platform_data/tpm_stm_st33.h)21
-rw-r--r--include/linux/platform_data/st_nci.h29
-rw-r--r--include/linux/platform_data/video-msm_fb.h146
-rw-r--r--include/linux/platform_device.h2
-rw-r--r--include/linux/pm-trace.h (renamed from include/linux/resume-trace.h)9
-rw-r--r--include/linux/pm.h22
-rw-r--r--include/linux/pm_clock.h10
-rw-r--r--include/linux/pm_domain.h6
-rw-r--r--include/linux/pm_wakeirq.h51
-rw-r--r--include/linux/pm_wakeup.h9
-rw-r--r--include/linux/pnp.h12
-rw-r--r--include/linux/power/max17042_battery.h4
-rw-r--r--include/linux/power_supply.h11
-rw-r--r--include/linux/preempt.h159
-rw-r--r--include/linux/preempt_mask.h117
-rw-r--r--include/linux/printk.h5
-rw-r--r--include/linux/property.h46
-rw-r--r--include/linux/pstore.h1
-rw-r--r--include/linux/ptp_clock_kernel.h12
-rw-r--r--include/linux/pwm.h12
-rw-r--r--include/linux/pxa2xx_ssp.h3
-rw-r--r--include/linux/qcom_scm.h28
-rw-r--r--include/linux/quota.h90
-rw-r--r--include/linux/quotaops.h14
-rw-r--r--include/linux/raid/pq.h1
-rw-r--r--include/linux/random.h9
-rw-r--r--include/linux/rculist.h10
-rw-r--r--include/linux/rcupdate.h116
-rw-r--r--include/linux/rcutiny.h16
-rw-r--r--include/linux/rcutree.h9
-rw-r--r--include/linux/reboot.h3
-rw-r--r--include/linux/regmap.h14
-rw-r--r--include/linux/regulator/driver.h11
-rw-r--r--include/linux/regulator/machine.h9
-rw-r--r--include/linux/regulator/max8973-regulator.h4
-rw-r--r--include/linux/remoteproc.h2
-rw-r--r--include/linux/rhashtable.h547
-rw-r--r--include/linux/rio.h2
-rw-r--r--include/linux/rmap.h8
-rw-r--r--include/linux/rtnetlink.h13
-rw-r--r--include/linux/scatterlist.h40
-rw-r--r--include/linux/sched.h159
-rw-r--r--include/linux/sched/rt.h7
-rw-r--r--include/linux/sched/sysctl.h12
-rw-r--r--include/linux/security.h28
-rw-r--r--include/linux/seqlock.h47
-rw-r--r--include/linux/serial_8250.h20
-rw-r--r--include/linux/serial_core.h23
-rw-r--r--include/linux/serial_mfd.h47
-rw-r--r--include/linux/shdma-base.h1
-rw-r--r--include/linux/skbuff.h98
-rw-r--r--include/linux/slab.h28
-rw-r--r--include/linux/smp.h2
-rw-r--r--include/linux/smpboot.h6
-rw-r--r--include/linux/sock_diag.h46
-rw-r--r--include/linux/socket.h8
-rw-r--r--include/linux/spi/at86rf230.h1
-rw-r--r--include/linux/spinlock.h2
-rw-r--r--include/linux/srcu.h2
-rw-r--r--include/linux/stacktrace.h2
-rw-r--r--include/linux/stmmac.h3
-rw-r--r--include/linux/string_helpers.h10
-rw-r--r--include/linux/sunrpc/msg_prot.h8
-rw-r--r--include/linux/sunrpc/xprtrdma.h5
-rw-r--r--include/linux/sw842.h12
-rw-r--r--include/linux/swap.h3
-rw-r--r--include/linux/sysctl.h3
-rw-r--r--include/linux/tcp.h28
-rw-r--r--include/linux/tick.h19
-rw-r--r--include/linux/time64.h2
-rw-r--r--include/linux/timekeeper_internal.h19
-rw-r--r--include/linux/timekeeping.h2
-rw-r--r--include/linux/timer.h63
-rw-r--r--include/linux/timerqueue.h8
-rw-r--r--include/linux/topology.h6
-rw-r--r--include/linux/tracefs.h45
-rw-r--r--include/linux/tracepoint.h8
-rw-r--r--include/linux/tty.h3
-rw-r--r--include/linux/types.h18
-rw-r--r--include/linux/u64_stats_sync.h7
-rw-r--r--include/linux/uaccess.h48
-rw-r--r--include/linux/udp.h2
-rw-r--r--include/linux/uidgid.h16
-rw-r--r--include/linux/uio.h23
-rw-r--r--include/linux/usb_usual.h2
-rw-r--r--include/linux/util_macros.h40
-rw-r--r--include/linux/vfio.h25
-rw-r--r--include/linux/vgaarb.h5
-rw-r--r--include/linux/virtio.h2
-rw-r--r--include/linux/virtio_config.h16
-rw-r--r--include/linux/virtio_ring.h23
-rw-r--r--include/linux/wait.h17
-rw-r--r--include/linux/watchdog.h8
-rw-r--r--include/linux/wl12xx.h49
-rw-r--r--include/linux/writeback.h221
-rw-r--r--include/linux/zsmalloc.h1
-rw-r--r--include/media/adv7604.h83
-rw-r--r--include/media/davinci/vpfe_capture.h2
-rw-r--r--include/media/media-entity.h21
-rw-r--r--include/media/mt9p031.h2
-rw-r--r--include/media/omap3isp.h38
-rw-r--r--include/media/ov2659.h34
-rw-r--r--include/media/saa7146_vv.h4
-rw-r--r--include/media/v4l2-clk.h10
-rw-r--r--include/media/v4l2-dev.h1
-rw-r--r--include/media/v4l2-device.h2
-rw-r--r--include/media/v4l2-ioctl.h6
-rw-r--r--include/media/v4l2-of.h30
-rw-r--r--include/media/v4l2-subdev.h55
-rw-r--r--include/media/videobuf2-core.h20
-rw-r--r--include/misc/cxl-base.h48
-rw-r--r--include/misc/cxl.h207
-rw-r--r--include/net/9p/client.h8
-rw-r--r--include/net/9p/transport.h2
-rw-r--r--include/net/addrconf.h1
-rw-r--r--include/net/af_unix.h1
-rw-r--r--include/net/af_vsock.h6
-rw-r--r--include/net/arp.h20
-rw-r--r--include/net/ax25.h5
-rw-r--r--include/net/bluetooth/bluetooth.h56
-rw-r--r--include/net/bluetooth/hci.h69
-rw-r--r--include/net/bluetooth/hci_core.h266
-rw-r--r--include/net/bluetooth/mgmt.h101
-rw-r--r--include/net/bond_3ad.h29
-rw-r--r--include/net/bond_options.h3
-rw-r--r--include/net/bonding.h10
-rw-r--r--include/net/cfg80211.h135
-rw-r--r--include/net/cfg802154.h72
-rw-r--r--include/net/checksum.h4
-rw-r--r--include/net/codel.h22
-rw-r--r--include/net/compat.h2
-rw-r--r--include/net/dcbnl.h3
-rw-r--r--include/net/dn_neigh.h5
-rw-r--r--include/net/dsa.h27
-rw-r--r--include/net/dst.h18
-rw-r--r--include/net/dst_ops.h1
-rw-r--r--include/net/fib_rules.h14
-rw-r--r--include/net/flow_dissector.h220
-rw-r--r--include/net/flow_keys.h45
-rw-r--r--include/net/genetlink.h4
-rw-r--r--include/net/geneve.h5
-rw-r--r--include/net/ieee802154_netdev.h34
-rw-r--r--include/net/if_inet6.h4
-rw-r--r--include/net/inet6_connection_sock.h3
-rw-r--r--include/net/inet6_hashtables.h2
-rw-r--r--include/net/inet_common.h9
-rw-r--r--include/net/inet_connection_sock.h44
-rw-r--r--include/net/inet_frag.h2
-rw-r--r--include/net/inet_hashtables.h85
-rw-r--r--include/net/inet_sock.h31
-rw-r--r--include/net/inet_timewait_sock.h108
-rw-r--r--include/net/inetpeer.h3
-rw-r--r--include/net/ip.h54
-rw-r--r--include/net/ip6_fib.h45
-rw-r--r--include/net/ip6_route.h24
-rw-r--r--include/net/ip6_tunnel.h6
-rw-r--r--include/net/ip_fib.h82
-rw-r--r--include/net/ip_tunnels.h1
-rw-r--r--include/net/ip_vs.h69
-rw-r--r--include/net/ipv6.h53
-rw-r--r--include/net/iw_handler.h22
-rw-r--r--include/net/llc_conn.h2
-rw-r--r--include/net/mac80211.h511
-rw-r--r--include/net/mac802154.h263
-rw-r--r--include/net/ndisc.h19
-rw-r--r--include/net/neighbour.h67
-rw-r--r--include/net/net_namespace.h63
-rw-r--r--include/net/netfilter/br_netfilter.h60
-rw-r--r--include/net/netfilter/ipv4/nf_reject.h8
-rw-r--r--include/net/netfilter/ipv6/nf_reject.h13
-rw-r--r--include/net/netfilter/nf_conntrack.h5
-rw-r--r--include/net/netfilter/nf_nat_l3proto.h48
-rw-r--r--include/net/netfilter/nf_queue.h8
-rw-r--r--include/net/netfilter/nf_tables.h636
-rw-r--r--include/net/netfilter/nf_tables_core.h3
-rw-r--r--include/net/netfilter/nf_tables_ipv4.h5
-rw-r--r--include/net/netfilter/nf_tables_ipv6.h5
-rw-r--r--include/net/netfilter/nft_meta.h4
-rw-r--r--include/net/netlink.h50
-rw-r--r--include/net/netns/generic.h2
-rw-r--r--include/net/netns/hash.h4
-rw-r--r--include/net/netns/ipv4.h16
-rw-r--r--include/net/netns/ipv6.h4
-rw-r--r--include/net/netns/mpls.h17
-rw-r--r--include/net/netns/netfilter.h4
-rw-r--r--include/net/netns/nftables.h1
-rw-r--r--include/net/netns/sctp.h1
-rw-r--r--include/net/netns/x_tables.h3
-rw-r--r--include/net/nfc/hci.h11
-rw-r--r--include/net/nfc/nci.h1
-rw-r--r--include/net/nfc/nci_core.h76
-rw-r--r--include/net/nfc/nfc.h24
-rw-r--r--include/net/nl802154.h85
-rw-r--r--include/net/ping.h7
-rw-r--r--include/net/request_sock.h132
-rw-r--r--include/net/rtnetlink.h2
-rw-r--r--include/net/sch_generic.h27
-rw-r--r--include/net/sctp/sctp.h10
-rw-r--r--include/net/sctp/structs.h4
-rw-r--r--include/net/sock.h94
-rw-r--r--include/net/switchdev.h246
-rw-r--r--include/net/tc_act/tc_bpf.h6
-rw-r--r--include/net/tcp.h224
-rw-r--r--include/net/tcp_states.h4
-rw-r--r--include/net/udp.h24
-rw-r--r--include/net/udp_tunnel.h5
-rw-r--r--include/net/vxlan.h4
-rw-r--r--include/net/xfrm.h25
-rw-r--r--include/ras/ras_event.h85
-rw-r--r--include/rdma/ib_addr.h7
-rw-r--r--include/rdma/ib_cache.h8
-rw-r--r--include/rdma/ib_cm.h7
-rw-r--r--include/rdma/ib_mad.h41
-rw-r--r--include/rdma/ib_verbs.h394
-rw-r--r--include/rdma/iw_cm.h1
-rw-r--r--include/rdma/iw_portmap.h25
-rw-r--r--include/rdma/opa_smi.h106
-rw-r--r--include/rdma/rdma_cm.h2
-rw-r--r--include/rxrpc/packet.h3
-rw-r--r--include/scsi/scsi.h291
-rw-r--r--include/scsi/scsi_common.h64
-rw-r--r--include/scsi/scsi_device.h2
-rw-r--r--include/scsi/scsi_devinfo.h1
-rw-r--r--include/scsi/scsi_eh.h31
-rw-r--r--include/scsi/scsi_proto.h281
-rw-r--r--include/scsi/scsi_transport_fc.h1
-rw-r--r--include/scsi/srp.h7
-rw-r--r--include/sound/ac97_codec.h4
-rw-r--r--include/sound/compress_driver.h4
-rw-r--r--include/sound/control.h2
-rw-r--r--include/sound/core.h3
-rw-r--r--include/sound/designware_i2s.h2
-rw-r--r--include/sound/emu10k1.h14
-rw-r--r--include/sound/hda_regmap.h219
-rw-r--r--include/sound/hdaudio.h247
-rw-r--r--include/sound/pcm.h66
-rw-r--r--include/sound/pcm_params.h7
-rw-r--r--include/sound/rt5670.h1
-rw-r--r--include/sound/seq_device.h46
-rw-r--r--include/sound/seq_kernel.h6
-rw-r--r--include/sound/simple_card.h1
-rw-r--r--include/sound/soc-dapm.h8
-rw-r--r--include/sound/soc-dpcm.h2
-rw-r--r--include/sound/soc.h44
-rw-r--r--include/sound/spear_dma.h2
-rw-r--r--include/target/iscsi/iscsi_target_core.h16
-rw-r--r--include/target/target_core_backend.h8
-rw-r--r--include/target/target_core_base.h25
-rw-r--r--include/target/target_core_configfs.h8
-rw-r--r--include/target/target_core_fabric.h37
-rw-r--r--include/target/target_core_fabric_configfs.h5
-rw-r--r--include/trace/events/9p.h157
-rw-r--r--include/trace/events/btrfs.h8
-rw-r--r--include/trace/events/clk.h198
-rw-r--r--include/trace/events/cma.h66
-rw-r--r--include/trace/events/ext3.h18
-rw-r--r--include/trace/events/ext4.h57
-rw-r--r--include/trace/events/f2fs.h225
-rw-r--r--include/trace/events/filemap.h8
-rw-r--r--include/trace/events/intel-sst.h7
-rw-r--r--include/trace/events/irq.h39
-rw-r--r--include/trace/events/kmem.h96
-rw-r--r--include/trace/events/migrate.h42
-rw-r--r--include/trace/events/module.h4
-rw-r--r--include/trace/events/power.h25
-rw-r--r--include/trace/events/random.h10
-rw-r--r--include/trace/events/sched.h3
-rw-r--r--include/trace/events/sunrpc.h62
-rw-r--r--include/trace/events/target.h2
-rw-r--r--include/trace/events/timer.h12
-rw-r--r--include/trace/events/tlb.h30
-rw-r--r--include/trace/events/v4l2.h75
-rw-r--r--include/trace/events/vmscan.h8
-rw-r--r--include/trace/events/writeback.h49
-rw-r--r--include/trace/events/xen.h2
-rw-r--r--include/trace/ftrace.h41
-rw-r--r--include/uapi/asm-generic/errno.h11
-rw-r--r--include/uapi/drm/drm.h1
-rw-r--r--include/uapi/drm/drm_fourcc.h78
-rw-r--r--include/uapi/drm/drm_mode.h9
-rw-r--r--include/uapi/drm/i915_drm.h5
-rw-r--r--include/uapi/drm/nouveau_drm.h1
-rw-r--r--include/uapi/drm/radeon_drm.h5
-rw-r--r--include/uapi/drm/tegra_drm.h3
-rw-r--r--include/uapi/linux/Kbuild2
-rw-r--r--include/uapi/linux/am437x-vpfe.h2
-rw-r--r--include/uapi/linux/bpf.h106
-rw-r--r--include/uapi/linux/can.h6
-rw-r--r--include/uapi/linux/can/gw.h5
-rw-r--r--include/uapi/linux/can/raw.h1
-rw-r--r--include/uapi/linux/cryptouser.h (renamed from include/linux/cryptouser.h)6
-rw-r--r--include/uapi/linux/dcbnl.h76
-rw-r--r--include/uapi/linux/dm-ioctl.h4
-rw-r--r--include/uapi/linux/elf-em.h1
-rw-r--r--include/uapi/linux/ethtool.h37
-rw-r--r--include/uapi/linux/falloc.h17
-rw-r--r--include/uapi/linux/filter.h10
-rw-r--r--include/uapi/linux/fou.h1
-rw-r--r--include/uapi/linux/hsi/cs-protocol.h16
-rw-r--r--include/uapi/linux/if_addr.h2
-rw-r--r--include/uapi/linux/if_link.h43
-rw-r--r--include/uapi/linux/if_packet.h8
-rw-r--r--include/uapi/linux/in.h3
-rw-r--r--include/uapi/linux/inet_diag.h8
-rw-r--r--include/uapi/linux/input.h7
-rw-r--r--include/uapi/linux/ip.h1
-rw-r--r--include/uapi/linux/ip_vs.h7
-rw-r--r--include/uapi/linux/ipv6.h1
-rw-r--r--include/uapi/linux/ipv6_route.h1
-rw-r--r--include/uapi/linux/kvm.h10
-rw-r--r--include/uapi/linux/magic.h2
-rw-r--r--include/uapi/linux/media-bus-format.h28
-rw-r--r--include/uapi/linux/media.h52
-rw-r--r--include/uapi/linux/mpls.h10
-rw-r--r--include/uapi/linux/nbd.h2
-rw-r--r--include/uapi/linux/neighbour.h1
-rw-r--r--include/uapi/linux/netfilter.h9
-rw-r--r--include/uapi/linux/netfilter/ipset/ip_set.h6
-rw-r--r--include/uapi/linux/netfilter/nf_conntrack_tcp.h3
-rw-r--r--include/uapi/linux/netfilter/nf_tables.h82
-rw-r--r--include/uapi/linux/netfilter/nfnetlink_queue.h4
-rw-r--r--include/uapi/linux/netfilter/xt_socket.h8
-rw-r--r--include/uapi/linux/netfilter_bridge/ebtables.h4
-rw-r--r--include/uapi/linux/netlink.h16
-rw-r--r--include/uapi/linux/nfc.h10
-rw-r--r--include/uapi/linux/nfs4.h7
-rw-r--r--include/uapi/linux/nfs_idmap.h2
-rw-r--r--include/uapi/linux/nfsd/debug.h8
-rw-r--r--include/uapi/linux/nfsd/export.h3
-rw-r--r--include/uapi/linux/nl80211.h62
-rw-r--r--include/uapi/linux/nvme.h5
-rw-r--r--include/uapi/linux/openvswitch.h4
-rw-r--r--include/uapi/linux/perf_event.h133
-rw-r--r--include/uapi/linux/pkt_cls.h59
-rw-r--r--include/uapi/linux/pkt_sched.h7
-rw-r--r--include/uapi/linux/quota.h6
-rw-r--r--include/uapi/linux/raid/md_p.h7
-rw-r--r--include/uapi/linux/raid/md_u.h1
-rw-r--r--include/uapi/linux/rds.h10
-rw-r--r--include/uapi/linux/rtnetlink.h19
-rw-r--r--include/uapi/linux/serial_reg.h19
-rw-r--r--include/uapi/linux/snmp.h2
-rw-r--r--include/uapi/linux/sock_diag.h10
-rw-r--r--include/uapi/linux/target_core_user.h44
-rw-r--r--include/uapi/linux/tc_act/tc_bpf.h2
-rw-r--r--include/uapi/linux/tcp.h7
-rw-r--r--include/uapi/linux/tipc_netlink.h9
-rw-r--r--include/uapi/linux/tty.h1
-rw-r--r--include/uapi/linux/v4l2-dv-timings.h64
-rw-r--r--include/uapi/linux/v4l2-subdev.h12
-rw-r--r--include/uapi/linux/vfio.h104
-rw-r--r--include/uapi/linux/videodev2.h18
-rw-r--r--include/uapi/linux/virtio_balloon.h33
-rw-r--r--include/uapi/linux/virtio_ids.h1
-rw-r--r--include/uapi/linux/virtio_input.h76
-rw-r--r--include/uapi/linux/virtio_ring.h2
-rw-r--r--include/uapi/linux/xfrm.h2
-rw-r--r--include/uapi/linux/xilinx-v4l2-controls.h73
-rw-r--r--include/uapi/misc/cxl.h22
-rw-r--r--include/uapi/rdma/ib_user_verbs.h19
-rw-r--r--include/uapi/rdma/rdma_netlink.h1
-rw-r--r--include/uapi/sound/asequencer.h1
-rw-r--r--include/uapi/sound/asound.h41
-rw-r--r--include/uapi/sound/compress_offload.h2
-rw-r--r--include/uapi/sound/emu10k1.h3
-rw-r--r--include/uapi/sound/hdspm.h6
-rw-r--r--include/video/imx-ipu-v3.h2
-rw-r--r--include/video/neomagic.h5
-rw-r--r--include/video/omapdss.h7
-rw-r--r--include/video/samsung_fimd.h11
-rw-r--r--include/video/tdfx.h2
-rw-r--r--include/xen/events.h2
-rw-r--r--include/xen/grant_table.h1
-rw-r--r--include/xen/interface/xen.h6
-rw-r--r--include/xen/xen-ops.h48
-rw-r--r--include/xen/xenbus.h20
757 files changed, 29831 insertions, 7644 deletions
diff --git a/include/acpi/acpi_bus.h b/include/acpi/acpi_bus.h
index 61e32ec1fc4d..b43276f339ef 100644
--- a/include/acpi/acpi_bus.h
+++ b/include/acpi/acpi_bus.h
@@ -208,7 +208,10 @@ struct acpi_device_flags {
u32 visited:1;
u32 hotplug_notify:1;
u32 is_dock_station:1;
- u32 reserved:23;
+ u32 of_compatible_ok:1;
+ u32 coherent_dma:1;
+ u32 cca_seen:1;
+ u32 reserved:20;
};
/* File System */
@@ -252,6 +255,7 @@ struct acpi_device_pnp {
#define acpi_device_bid(d) ((d)->pnp.bus_id)
#define acpi_device_adr(d) ((d)->pnp.bus_address)
const char *acpi_device_hid(struct acpi_device *device);
+#define acpi_device_uid(d) ((d)->pnp.unique_id)
#define acpi_device_name(d) ((d)->pnp.device_name)
#define acpi_device_class(d) ((d)->pnp.device_class)
@@ -270,7 +274,6 @@ struct acpi_device_power_flags {
struct acpi_device_power_state {
struct {
u8 valid:1;
- u8 os_accessible:1;
u8 explicit_set:1; /* _PSx present? */
u8 reserved:6;
} flags;
@@ -379,6 +382,39 @@ struct acpi_device {
void (*remove)(struct acpi_device *);
};
+static inline bool acpi_check_dma(struct acpi_device *adev, bool *coherent)
+{
+ bool ret = false;
+
+ if (!adev)
+ return ret;
+
+ /**
+ * Currently, we only support _CCA=1 (i.e. coherent_dma=1)
+ * This should be equivalent to specifyig dma-coherent for
+ * a device in OF.
+ *
+ * For the case when _CCA=0 (i.e. coherent_dma=0 && cca_seen=1),
+ * There are two cases:
+ * case 1. Do not support and disable DMA.
+ * case 2. Support but rely on arch-specific cache maintenance for
+ * non-coherence DMA operations.
+ * Currently, we implement case 1 above.
+ *
+ * For the case when _CCA is missing (i.e. cca_seen=0) and
+ * platform specifies ACPI_CCA_REQUIRED, we do not support DMA,
+ * and fallback to arch-specific default handling.
+ *
+ * See acpi_init_coherency() for more info.
+ */
+ if (adev->flags.coherent_dma) {
+ ret = true;
+ if (coherent)
+ *coherent = adev->flags.coherent_dma;
+ }
+ return ret;
+}
+
static inline bool is_acpi_node(struct fwnode_handle *fwnode)
{
return fwnode && fwnode->type == FWNODE_ACPI;
@@ -386,7 +422,8 @@ static inline bool is_acpi_node(struct fwnode_handle *fwnode)
static inline struct acpi_device *acpi_node(struct fwnode_handle *fwnode)
{
- return fwnode ? container_of(fwnode, struct acpi_device, fwnode) : NULL;
+ return is_acpi_node(fwnode) ?
+ container_of(fwnode, struct acpi_device, fwnode) : NULL;
}
static inline struct fwnode_handle *acpi_fwnode_handle(struct acpi_device *adev)
@@ -599,7 +636,7 @@ static inline bool acpi_device_can_wakeup(struct acpi_device *adev)
static inline bool acpi_device_can_poweroff(struct acpi_device *adev)
{
- return adev->power.states[ACPI_STATE_D3_COLD].flags.os_accessible;
+ return adev->power.states[ACPI_STATE_D3_COLD].flags.valid;
}
#else /* CONFIG_ACPI */
diff --git a/include/acpi/acpi_io.h b/include/acpi/acpi_io.h
index 444671e9c65d..dd86c5fc102d 100644
--- a/include/acpi/acpi_io.h
+++ b/include/acpi/acpi_io.h
@@ -3,11 +3,15 @@
#include <linux/io.h>
+#include <asm/acpi.h>
+
+#ifndef acpi_os_ioremap
static inline void __iomem *acpi_os_ioremap(acpi_physical_address phys,
acpi_size size)
{
return ioremap_cache(phys, size);
}
+#endif
void __iomem *__init_refok
acpi_os_map_iomem(acpi_physical_address phys, acpi_size size);
diff --git a/include/acpi/acpiosxf.h b/include/acpi/acpiosxf.h
index 0bc78df66d4b..d02df0a49d98 100644
--- a/include/acpi/acpiosxf.h
+++ b/include/acpi/acpiosxf.h
@@ -95,7 +95,7 @@ acpi_physical_address acpi_os_get_root_pointer(void);
#ifndef ACPI_USE_ALTERNATE_PROTOTYPE_acpi_os_predefined_override
acpi_status
acpi_os_predefined_override(const struct acpi_predefined_names *init_val,
- acpi_string * new_val);
+ char **new_val);
#endif
#ifndef ACPI_USE_ALTERNATE_PROTOTYPE_acpi_os_table_override
diff --git a/include/acpi/acpixf.h b/include/acpi/acpixf.h
index d56f5d722138..d68f1cd39c49 100644
--- a/include/acpi/acpixf.h
+++ b/include/acpi/acpixf.h
@@ -46,7 +46,7 @@
/* Current ACPICA subsystem version in YYYYMMDD format */
-#define ACPI_CA_VERSION 0x20150204
+#define ACPI_CA_VERSION 0x20150515
#include <acpi/acconfig.h>
#include <acpi/actypes.h>
@@ -431,13 +431,13 @@ ACPI_EXTERNAL_RETURN_STATUS(acpi_status __init acpi_load_tables(void))
ACPI_EXTERNAL_RETURN_STATUS(acpi_status __init acpi_reallocate_root_table(void))
ACPI_EXTERNAL_RETURN_STATUS(acpi_status __init
- acpi_find_root_pointer(acpi_size * rsdp_address))
-
+ acpi_find_root_pointer(acpi_physical_address *
+ rsdp_address))
ACPI_EXTERNAL_RETURN_STATUS(acpi_status
- acpi_get_table_header(acpi_string signature,
- u32 instance,
- struct acpi_table_header
- *out_table_header))
+ acpi_get_table_header(acpi_string signature,
+ u32 instance,
+ struct acpi_table_header
+ *out_table_header))
ACPI_EXTERNAL_RETURN_STATUS(acpi_status
acpi_get_table(acpi_string signature, u32 instance,
struct acpi_table_header
diff --git a/include/acpi/actbl.h b/include/acpi/actbl.h
index d4081fef1095..cb8a6b97ceda 100644
--- a/include/acpi/actbl.h
+++ b/include/acpi/actbl.h
@@ -284,6 +284,7 @@ struct acpi_table_fadt {
struct acpi_generic_address xgpe1_block; /* 64-bit Extended General Purpose Event 1 Reg Blk address */
struct acpi_generic_address sleep_control; /* 64-bit Sleep Control register (ACPI 5.0) */
struct acpi_generic_address sleep_status; /* 64-bit Sleep Status register (ACPI 5.0) */
+ u64 hypervisor_id; /* Hypervisor Vendor ID (ACPI 6.0) */
};
/* Masks for FADT IA-PC Boot Architecture Flags (boot_flags) [Vx]=Introduced in this FADT revision */
@@ -341,7 +342,7 @@ enum acpi_preferred_pm_profiles {
PM_TABLET = 8
};
-/* Values for sleep_status and sleep_control registers (V5 FADT) */
+/* Values for sleep_status and sleep_control registers (V5+ FADT) */
#define ACPI_X_WAKE_STATUS 0x80
#define ACPI_X_SLEEP_TYPE_MASK 0x1C
@@ -398,15 +399,17 @@ struct acpi_table_desc {
* FADT is the bottom line as to what the version really is.
*
* For reference, the values below are as follows:
- * FADT V1 size: 0x074
- * FADT V2 size: 0x084
- * FADT V3 size: 0x0F4
- * FADT V4 size: 0x0F4
- * FADT V5 size: 0x10C
+ * FADT V1 size: 0x074
+ * FADT V2 size: 0x084
+ * FADT V3 size: 0x0F4
+ * FADT V4 size: 0x0F4
+ * FADT V5 size: 0x10C
+ * FADT V6 size: 0x114
*/
#define ACPI_FADT_V1_SIZE (u32) (ACPI_FADT_OFFSET (flags) + 4)
#define ACPI_FADT_V2_SIZE (u32) (ACPI_FADT_OFFSET (minor_revision) + 1)
#define ACPI_FADT_V3_SIZE (u32) (ACPI_FADT_OFFSET (sleep_control))
-#define ACPI_FADT_V5_SIZE (u32) (sizeof (struct acpi_table_fadt))
+#define ACPI_FADT_V5_SIZE (u32) (ACPI_FADT_OFFSET (hypervisor_id))
+#define ACPI_FADT_V6_SIZE (u32) (sizeof (struct acpi_table_fadt))
#endif /* __ACTBL_H__ */
diff --git a/include/acpi/actbl1.h b/include/acpi/actbl1.h
index b80b0e6dabc5..06b61f01ea59 100644
--- a/include/acpi/actbl1.h
+++ b/include/acpi/actbl1.h
@@ -71,6 +71,7 @@
#define ACPI_SIG_SBST "SBST" /* Smart Battery Specification Table */
#define ACPI_SIG_SLIT "SLIT" /* System Locality Distance Information Table */
#define ACPI_SIG_SRAT "SRAT" /* System Resource Affinity Table */
+#define ACPI_SIG_NFIT "NFIT" /* NVDIMM Firmware Interface Table */
/*
* All tables must be byte-packed to match the ACPI specification, since
@@ -673,7 +674,8 @@ enum acpi_madt_type {
ACPI_MADT_TYPE_GENERIC_DISTRIBUTOR = 12,
ACPI_MADT_TYPE_GENERIC_MSI_FRAME = 13,
ACPI_MADT_TYPE_GENERIC_REDISTRIBUTOR = 14,
- ACPI_MADT_TYPE_RESERVED = 15 /* 15 and greater are reserved */
+ ACPI_MADT_TYPE_GENERIC_TRANSLATOR = 15,
+ ACPI_MADT_TYPE_RESERVED = 16 /* 16 and greater are reserved */
};
/*
@@ -794,7 +796,7 @@ struct acpi_madt_local_x2apic_nmi {
u8 reserved[3]; /* reserved - must be zero */
};
-/* 11: Generic Interrupt (ACPI 5.0) */
+/* 11: Generic Interrupt (ACPI 5.0 + ACPI 6.0 changes) */
struct acpi_madt_generic_interrupt {
struct acpi_subtable_header header;
@@ -811,6 +813,8 @@ struct acpi_madt_generic_interrupt {
u32 vgic_interrupt;
u64 gicr_base_address;
u64 arm_mpidr;
+ u8 efficiency_class;
+ u8 reserved2[3];
};
/* Masks for Flags field above */
@@ -819,7 +823,7 @@ struct acpi_madt_generic_interrupt {
#define ACPI_MADT_PERFORMANCE_IRQ_MODE (1<<1) /* 01: Performance Interrupt Mode */
#define ACPI_MADT_VGIC_IRQ_MODE (1<<2) /* 02: VGIC Maintenance Interrupt mode */
-/* 12: Generic Distributor (ACPI 5.0) */
+/* 12: Generic Distributor (ACPI 5.0 + ACPI 6.0 changes) */
struct acpi_madt_generic_distributor {
struct acpi_subtable_header header;
@@ -827,7 +831,8 @@ struct acpi_madt_generic_distributor {
u32 gic_id;
u64 base_address;
u32 global_irq_base;
- u32 reserved2; /* reserved - must be zero */
+ u8 version;
+ u8 reserved2[3]; /* reserved - must be zero */
};
/* 13: Generic MSI Frame (ACPI 5.1) */
@@ -855,6 +860,16 @@ struct acpi_madt_generic_redistributor {
u32 length;
};
+/* 15: Generic Translator (ACPI 6.0) */
+
+struct acpi_madt_generic_translator {
+ struct acpi_subtable_header header;
+ u16 reserved; /* reserved - must be zero */
+ u32 translation_id;
+ u64 base_address;
+ u32 reserved2;
+};
+
/*
* Common flags fields for MADT subtables
*/
@@ -908,6 +923,159 @@ struct acpi_msct_proximity {
/*******************************************************************************
*
+ * NFIT - NVDIMM Interface Table (ACPI 6.0)
+ * Version 1
+ *
+ ******************************************************************************/
+
+struct acpi_table_nfit {
+ struct acpi_table_header header; /* Common ACPI table header */
+ u32 reserved; /* Reserved, must be zero */
+};
+
+/* Subtable header for NFIT */
+
+struct acpi_nfit_header {
+ u16 type;
+ u16 length;
+};
+
+/* Values for subtable type in struct acpi_nfit_header */
+
+enum acpi_nfit_type {
+ ACPI_NFIT_TYPE_SYSTEM_ADDRESS = 0,
+ ACPI_NFIT_TYPE_MEMORY_MAP = 1,
+ ACPI_NFIT_TYPE_INTERLEAVE = 2,
+ ACPI_NFIT_TYPE_SMBIOS = 3,
+ ACPI_NFIT_TYPE_CONTROL_REGION = 4,
+ ACPI_NFIT_TYPE_DATA_REGION = 5,
+ ACPI_NFIT_TYPE_FLUSH_ADDRESS = 6,
+ ACPI_NFIT_TYPE_RESERVED = 7 /* 7 and greater are reserved */
+};
+
+/*
+ * NFIT Subtables
+ */
+
+/* 0: System Physical Address Range Structure */
+
+struct acpi_nfit_system_address {
+ struct acpi_nfit_header header;
+ u16 range_index;
+ u16 flags;
+ u32 reserved; /* Reseved, must be zero */
+ u32 proximity_domain;
+ u8 range_guid[16];
+ u64 address;
+ u64 length;
+ u64 memory_mapping;
+};
+
+/* Flags */
+
+#define ACPI_NFIT_ADD_ONLINE_ONLY (1) /* 00: Add/Online Operation Only */
+#define ACPI_NFIT_PROXIMITY_VALID (1<<1) /* 01: Proximity Domain Valid */
+
+/* Range Type GUIDs appear in the include/acuuid.h file */
+
+/* 1: Memory Device to System Address Range Map Structure */
+
+struct acpi_nfit_memory_map {
+ struct acpi_nfit_header header;
+ u32 device_handle;
+ u16 physical_id;
+ u16 region_id;
+ u16 range_index;
+ u16 region_index;
+ u64 region_size;
+ u64 region_offset;
+ u64 address;
+ u16 interleave_index;
+ u16 interleave_ways;
+ u16 flags;
+ u16 reserved; /* Reserved, must be zero */
+};
+
+/* Flags */
+
+#define ACPI_NFIT_MEM_SAVE_FAILED (1) /* 00: Last SAVE to Memory Device failed */
+#define ACPI_NFIT_MEM_RESTORE_FAILED (1<<1) /* 01: Last RESTORE from Memory Device failed */
+#define ACPI_NFIT_MEM_FLUSH_FAILED (1<<2) /* 02: Platform flush failed */
+#define ACPI_NFIT_MEM_ARMED (1<<3) /* 03: Memory Device observed to be not armed */
+#define ACPI_NFIT_MEM_HEALTH_OBSERVED (1<<4) /* 04: Memory Device observed SMART/health events */
+#define ACPI_NFIT_MEM_HEALTH_ENABLED (1<<5) /* 05: SMART/health events enabled */
+
+/* 2: Interleave Structure */
+
+struct acpi_nfit_interleave {
+ struct acpi_nfit_header header;
+ u16 interleave_index;
+ u16 reserved; /* Reserved, must be zero */
+ u32 line_count;
+ u32 line_size;
+ u32 line_offset[1]; /* Variable length */
+};
+
+/* 3: SMBIOS Management Information Structure */
+
+struct acpi_nfit_smbios {
+ struct acpi_nfit_header header;
+ u32 reserved; /* Reserved, must be zero */
+ u8 data[1]; /* Variable length */
+};
+
+/* 4: NVDIMM Control Region Structure */
+
+struct acpi_nfit_control_region {
+ struct acpi_nfit_header header;
+ u16 region_index;
+ u16 vendor_id;
+ u16 device_id;
+ u16 revision_id;
+ u16 subsystem_vendor_id;
+ u16 subsystem_device_id;
+ u16 subsystem_revision_id;
+ u8 reserved[6]; /* Reserved, must be zero */
+ u32 serial_number;
+ u16 code;
+ u16 windows;
+ u64 window_size;
+ u64 command_offset;
+ u64 command_size;
+ u64 status_offset;
+ u64 status_size;
+ u16 flags;
+ u8 reserved1[6]; /* Reserved, must be zero */
+};
+
+/* Flags */
+
+#define ACPI_NFIT_CONTROL_BUFFERED (1) /* Block Data Windows implementation is buffered */
+
+/* 5: NVDIMM Block Data Window Region Structure */
+
+struct acpi_nfit_data_region {
+ struct acpi_nfit_header header;
+ u16 region_index;
+ u16 windows;
+ u64 offset;
+ u64 size;
+ u64 capacity;
+ u64 start_address;
+};
+
+/* 6: Flush Hint Address Structure */
+
+struct acpi_nfit_flush_address {
+ struct acpi_nfit_header header;
+ u32 device_handle;
+ u16 hint_count;
+ u8 reserved[6]; /* Reserved, must be zero */
+ u64 hint_address[1]; /* Variable length */
+};
+
+/*******************************************************************************
+ *
* SBST - Smart Battery Specification Table
* Version 1
*
diff --git a/include/acpi/actbl2.h b/include/acpi/actbl2.h
index f06d75e5fa54..370d69d871a0 100644
--- a/include/acpi/actbl2.h
+++ b/include/acpi/actbl2.h
@@ -69,10 +69,12 @@
#define ACPI_SIG_DMAR "DMAR" /* DMA Remapping table */
#define ACPI_SIG_HPET "HPET" /* High Precision Event Timer table */
#define ACPI_SIG_IBFT "IBFT" /* iSCSI Boot Firmware Table */
+#define ACPI_SIG_IORT "IORT" /* IO Remapping Table */
#define ACPI_SIG_IVRS "IVRS" /* I/O Virtualization Reporting Structure */
#define ACPI_SIG_LPIT "LPIT" /* Low Power Idle Table */
#define ACPI_SIG_MCFG "MCFG" /* PCI Memory Mapped Configuration table */
#define ACPI_SIG_MCHI "MCHI" /* Management Controller Host Interface table */
+#define ACPI_SIG_MSDM "MSDM" /* Microsoft Data Management Table */
#define ACPI_SIG_MTMR "MTMR" /* MID Timer table */
#define ACPI_SIG_SLIC "SLIC" /* Software Licensing Description Table */
#define ACPI_SIG_SPCR "SPCR" /* Serial Port Console Redirection table */
@@ -649,6 +651,131 @@ struct acpi_ibft_target {
/*******************************************************************************
*
+ * IORT - IO Remapping Table
+ *
+ * Conforms to "IO Remapping Table System Software on ARM Platforms",
+ * Document number: ARM DEN 0049A, 2015
+ *
+ ******************************************************************************/
+
+struct acpi_table_iort {
+ struct acpi_table_header header;
+ u32 node_count;
+ u32 node_offset;
+ u32 reserved;
+};
+
+/*
+ * IORT subtables
+ */
+struct acpi_iort_node {
+ u8 type;
+ u16 length;
+ u8 revision;
+ u32 reserved;
+ u32 mapping_count;
+ u32 mapping_offset;
+ char node_data[1];
+};
+
+/* Values for subtable Type above */
+
+enum acpi_iort_node_type {
+ ACPI_IORT_NODE_ITS_GROUP = 0x00,
+ ACPI_IORT_NODE_NAMED_COMPONENT = 0x01,
+ ACPI_IORT_NODE_PCI_ROOT_COMPLEX = 0x02,
+ ACPI_IORT_NODE_SMMU = 0x03
+};
+
+struct acpi_iort_id_mapping {
+ u32 input_base; /* Lowest value in input range */
+ u32 id_count; /* Number of IDs */
+ u32 output_base; /* Lowest value in output range */
+ u32 output_reference; /* A reference to the output node */
+ u32 flags;
+};
+
+/* Masks for Flags field above for IORT subtable */
+
+#define ACPI_IORT_ID_SINGLE_MAPPING (1)
+
+struct acpi_iort_memory_access {
+ u32 cache_coherency;
+ u8 hints;
+ u16 reserved;
+ u8 memory_flags;
+};
+
+/* Values for cache_coherency field above */
+
+#define ACPI_IORT_NODE_COHERENT 0x00000001 /* The device node is fully coherent */
+#define ACPI_IORT_NODE_NOT_COHERENT 0x00000000 /* The device node is not coherent */
+
+/* Masks for Hints field above */
+
+#define ACPI_IORT_HT_TRANSIENT (1)
+#define ACPI_IORT_HT_WRITE (1<<1)
+#define ACPI_IORT_HT_READ (1<<2)
+#define ACPI_IORT_HT_OVERRIDE (1<<3)
+
+/* Masks for memory_flags field above */
+
+#define ACPI_IORT_MF_COHERENCY (1)
+#define ACPI_IORT_MF_ATTRIBUTES (1<<1)
+
+/*
+ * IORT node specific subtables
+ */
+struct acpi_iort_its_group {
+ u32 its_count;
+ u32 identifiers[1]; /* GIC ITS identifier arrary */
+};
+
+struct acpi_iort_named_component {
+ u32 node_flags;
+ u64 memory_properties; /* Memory access properties */
+ u8 memory_address_limit; /* Memory address size limit */
+ char device_name[1]; /* Path of namespace object */
+};
+
+struct acpi_iort_root_complex {
+ u64 memory_properties; /* Memory access properties */
+ u32 ats_attribute;
+ u32 pci_segment_number;
+};
+
+/* Values for ats_attribute field above */
+
+#define ACPI_IORT_ATS_SUPPORTED 0x00000001 /* The root complex supports ATS */
+#define ACPI_IORT_ATS_UNSUPPORTED 0x00000000 /* The root complex doesn't support ATS */
+
+struct acpi_iort_smmu {
+ u64 base_address; /* SMMU base address */
+ u64 span; /* Length of memory range */
+ u32 model;
+ u32 flags;
+ u32 global_interrupt_offset;
+ u32 context_interrupt_count;
+ u32 context_interrupt_offset;
+ u32 pmu_interrupt_count;
+ u32 pmu_interrupt_offset;
+ u64 interrupts[1]; /* Interrupt array */
+};
+
+/* Values for Model field above */
+
+#define ACPI_IORT_SMMU_V1 0x00000000 /* Generic SMMUv1 */
+#define ACPI_IORT_SMMU_V2 0x00000001 /* Generic SMMUv2 */
+#define ACPI_IORT_SMMU_CORELINK_MMU400 0x00000002 /* ARM Corelink MMU-400 */
+#define ACPI_IORT_SMMU_CORELINK_MMU500 0x00000003 /* ARM Corelink MMU-500 */
+
+/* Masks for Flags field above */
+
+#define ACPI_IORT_SMMU_DVM_SUPPORTED (1)
+#define ACPI_IORT_SMMU_COHERENT_WALK (1<<1)
+
+/*******************************************************************************
+ *
* IVRS - I/O Virtualization Reporting Structure
* Version 1
*
@@ -823,7 +950,7 @@ struct acpi_ivrs_memory {
*
* LPIT - Low Power Idle Table
*
- * Conforms to "ACPI Low Power Idle Table (LPIT) and _LPD Proposal (DRAFT)"
+ * Conforms to "ACPI Low Power Idle Table (LPIT)" July 2014.
*
******************************************************************************/
@@ -845,7 +972,7 @@ struct acpi_lpit_header {
enum acpi_lpit_type {
ACPI_LPIT_TYPE_NATIVE_CSTATE = 0x00,
- ACPI_LPIT_TYPE_SIMPLE_IO = 0x01
+ ACPI_LPIT_TYPE_RESERVED = 0x01 /* 1 and above are reserved */
};
/* Masks for Flags field above */
@@ -868,21 +995,6 @@ struct acpi_lpit_native {
u64 counter_frequency;
};
-/* 0x01: Simple I/O based LPI structure */
-
-struct acpi_lpit_io {
- struct acpi_lpit_header header;
- struct acpi_generic_address entry_trigger;
- u32 trigger_action;
- u64 trigger_value;
- u64 trigger_mask;
- struct acpi_generic_address minimum_idle_state;
- u32 residency;
- u32 latency;
- struct acpi_generic_address residency_counter;
- u64 counter_frequency;
-};
-
/*******************************************************************************
*
* MCFG - PCI Memory Mapped Configuration table and subtable
@@ -935,6 +1047,21 @@ struct acpi_table_mchi {
/*******************************************************************************
*
+ * MSDM - Microsoft Data Management table
+ *
+ * Conforms to "Microsoft Software Licensing Tables (SLIC and MSDM)",
+ * November 29, 2011. Copyright 2011 Microsoft
+ *
+ ******************************************************************************/
+
+/* Basic MSDM table is only the common ACPI header */
+
+struct acpi_table_msdm {
+ struct acpi_table_header header; /* Common ACPI table header */
+};
+
+/*******************************************************************************
+ *
* MTMR - MID Timer Table
* Version 1
*
@@ -959,10 +1086,9 @@ struct acpi_mtmr_entry {
/*******************************************************************************
*
* SLIC - Software Licensing Description Table
- * Version 1
*
- * Conforms to "OEM Activation 2.0 for Windows Vista Operating Systems",
- * Copyright 2006
+ * Conforms to "Microsoft Software Licensing Tables (SLIC and MSDM)",
+ * November 29, 2011. Copyright 2011 Microsoft
*
******************************************************************************/
@@ -972,52 +1098,6 @@ struct acpi_table_slic {
struct acpi_table_header header; /* Common ACPI table header */
};
-/* Common SLIC subtable header */
-
-struct acpi_slic_header {
- u32 type;
- u32 length;
-};
-
-/* Values for Type field above */
-
-enum acpi_slic_type {
- ACPI_SLIC_TYPE_PUBLIC_KEY = 0,
- ACPI_SLIC_TYPE_WINDOWS_MARKER = 1,
- ACPI_SLIC_TYPE_RESERVED = 2 /* 2 and greater are reserved */
-};
-
-/*
- * SLIC Subtables, correspond to Type in struct acpi_slic_header
- */
-
-/* 0: Public Key Structure */
-
-struct acpi_slic_key {
- struct acpi_slic_header header;
- u8 key_type;
- u8 version;
- u16 reserved;
- u32 algorithm;
- char magic[4];
- u32 bit_length;
- u32 exponent;
- u8 modulus[128];
-};
-
-/* 1: Windows Marker Structure */
-
-struct acpi_slic_marker {
- struct acpi_slic_header header;
- u32 version;
- char oem_id[ACPI_OEM_ID_SIZE]; /* ASCII OEM identification */
- char oem_table_id[ACPI_OEM_TABLE_ID_SIZE]; /* ASCII OEM table identification */
- char windows_flag[8];
- u32 slic_version;
- u8 reserved[16];
- u8 signature[128];
-};
-
/*******************************************************************************
*
* SPCR - Serial Port Console Redirection table
diff --git a/include/acpi/actbl3.h b/include/acpi/actbl3.h
index 440ca8104b43..4018986d2a2e 100644
--- a/include/acpi/actbl3.h
+++ b/include/acpi/actbl3.h
@@ -68,7 +68,10 @@
#define ACPI_SIG_PCCT "PCCT" /* Platform Communications Channel Table */
#define ACPI_SIG_PMTT "PMTT" /* Platform Memory Topology Table */
#define ACPI_SIG_RASF "RASF" /* RAS Feature table */
+#define ACPI_SIG_STAO "STAO" /* Status Override table */
#define ACPI_SIG_TPM2 "TPM2" /* Trusted Platform Module 2.0 H/W interface table */
+#define ACPI_SIG_WPBT "WPBT" /* Windows Platform Binary Table */
+#define ACPI_SIG_XENV "XENV" /* Xen Environment table */
#define ACPI_SIG_S3PT "S3PT" /* S3 Performance (sub)Table */
#define ACPI_SIG_PCCS "PCC" /* PCC Shared Memory Region */
@@ -77,7 +80,6 @@
#define ACPI_SIG_MATR "MATR" /* Memory Address Translation Table */
#define ACPI_SIG_MSDM "MSDM" /* Microsoft Data Management Table */
-#define ACPI_SIG_WPBT "WPBT" /* Windows Platform Binary Table */
/*
* All tables must be byte-packed to match the ACPI specification, since
@@ -117,6 +119,8 @@ struct acpi_table_bgrt {
/*******************************************************************************
*
* DRTM - Dynamic Root of Trust for Measurement table
+ * Conforms to "TCG D-RTM Architecture" June 17 2013, Version 1.0.0
+ * Table version 1
*
******************************************************************************/
@@ -133,22 +137,40 @@ struct acpi_table_drtm {
u32 flags;
};
-/* 1) Validated Tables List */
+/* Flag Definitions for above */
+
+#define ACPI_DRTM_ACCESS_ALLOWED (1)
+#define ACPI_DRTM_ENABLE_GAP_CODE (1<<1)
+#define ACPI_DRTM_INCOMPLETE_MEASUREMENTS (1<<2)
+#define ACPI_DRTM_AUTHORITY_ORDER (1<<3)
-struct acpi_drtm_vtl_list {
- u32 validated_table_list_count;
+/* 1) Validated Tables List (64-bit addresses) */
+
+struct acpi_drtm_vtable_list {
+ u32 validated_table_count;
+ u64 validated_tables[1];
};
-/* 2) Resources List */
+/* 2) Resources List (of Resource Descriptors) */
+
+/* Resource Descriptor */
+
+struct acpi_drtm_resource {
+ u8 size[7];
+ u8 type;
+ u64 address;
+};
struct acpi_drtm_resource_list {
- u32 resource_list_count;
+ u32 resource_count;
+ struct acpi_drtm_resource resources[1];
};
/* 3) Platform-specific Identifiers List */
-struct acpi_drtm_id_list {
- u32 id_list_count;
+struct acpi_drtm_dps_id {
+ u32 dps_id_length;
+ u8 dps_id[16];
};
/*******************************************************************************
@@ -685,6 +707,21 @@ enum acpi_rasf_status {
/*******************************************************************************
*
+ * STAO - Status Override Table (_STA override) - ACPI 6.0
+ * Version 1
+ *
+ * Conforms to "ACPI Specification for Status Override Table"
+ * 6 January 2015
+ *
+ ******************************************************************************/
+
+struct acpi_table_stao {
+ struct acpi_table_header header; /* Common ACPI table header */
+ u8 ignore_uart;
+};
+
+/*******************************************************************************
+ *
* TPM2 - Trusted Platform Module (TPM) 2.0 Hardware Interface Table
* Version 3
*
@@ -713,6 +750,41 @@ struct acpi_tpm2_control {
u64 response_address;
};
+/*******************************************************************************
+ *
+ * WPBT - Windows Platform Environment Table (ACPI 6.0)
+ * Version 1
+ *
+ * Conforms to "Windows Platform Binary Table (WPBT)" 29 November 2011
+ *
+ ******************************************************************************/
+
+struct acpi_table_wpbt {
+ struct acpi_table_header header; /* Common ACPI table header */
+ u32 handoff_size;
+ u64 handoff_address;
+ u8 layout;
+ u8 type;
+ u16 arguments_length;
+};
+
+/*******************************************************************************
+ *
+ * XENV - Xen Environment Table (ACPI 6.0)
+ * Version 1
+ *
+ * Conforms to "ACPI Specification for Xen Environment Table" 4 January 2015
+ *
+ ******************************************************************************/
+
+struct acpi_table_xenv {
+ struct acpi_table_header header; /* Common ACPI table header */
+ u64 grant_table_address;
+ u64 grant_table_size;
+ u32 event_interrupt;
+ u8 event_flags;
+};
+
/* Reset to default packing */
#pragma pack()
diff --git a/include/acpi/actypes.h b/include/acpi/actypes.h
index b034f1068dfe..63fd7f5e9fb3 100644
--- a/include/acpi/actypes.h
+++ b/include/acpi/actypes.h
@@ -124,7 +124,6 @@
#ifndef ACPI_USE_SYSTEM_INTTYPES
typedef unsigned char u8;
-typedef unsigned char u8;
typedef unsigned short u16;
typedef short s16;
typedef COMPILER_DEPENDENT_UINT64 u64;
@@ -199,9 +198,29 @@ typedef int s32;
typedef s32 acpi_native_int;
typedef u32 acpi_size;
+
+#ifdef ACPI_32BIT_PHYSICAL_ADDRESS
+
+/*
+ * OSPMs can define this to shrink the size of the structures for 32-bit
+ * none PAE environment. ASL compiler may always define this to generate
+ * 32-bit OSPM compliant tables.
+ */
typedef u32 acpi_io_address;
typedef u32 acpi_physical_address;
+#else /* ACPI_32BIT_PHYSICAL_ADDRESS */
+
+/*
+ * It is reported that, after some calculations, the physical addresses can
+ * wrap over the 32-bit boundary on 32-bit PAE environment.
+ * https://bugzilla.kernel.org/show_bug.cgi?id=87971
+ */
+typedef u64 acpi_io_address;
+typedef u64 acpi_physical_address;
+
+#endif /* ACPI_32BIT_PHYSICAL_ADDRESS */
+
#define ACPI_MAX_PTR ACPI_UINT32_MAX
#define ACPI_SIZE_MAX ACPI_UINT32_MAX
@@ -452,11 +471,6 @@ typedef u8 acpi_owner_id;
#define ACPI_INTEGER_BIT_SIZE 64
#define ACPI_MAX_DECIMAL_DIGITS 20 /* 2^64 = 18,446,744,073,709,551,616 */
-
-#if ACPI_MACHINE_WIDTH == 64
-#define ACPI_USE_NATIVE_DIVIDE /* Use compiler native 64-bit divide */
-#endif
-
#define ACPI_MAX64_DECIMAL_DIGITS 20
#define ACPI_MAX32_DECIMAL_DIGITS 10
#define ACPI_MAX16_DECIMAL_DIGITS 5
@@ -511,6 +525,7 @@ typedef u64 acpi_integer;
#define ACPI_CAST_PTR(t, p) ((t *) (acpi_uintptr_t) (p))
#define ACPI_CAST_INDIRECT_PTR(t, p) ((t **) (acpi_uintptr_t) (p))
#define ACPI_ADD_PTR(t, a, b) ACPI_CAST_PTR (t, (ACPI_CAST_PTR (u8, (a)) + (acpi_size)(b)))
+#define ACPI_SUB_PTR(t, a, b) ACPI_CAST_PTR (t, (ACPI_CAST_PTR (u8, (a)) - (acpi_size)(b)))
#define ACPI_PTR_DIFF(a, b) (acpi_size) (ACPI_CAST_PTR (u8, (a)) - ACPI_CAST_PTR (u8, (b)))
/* Pointer/Integer type conversions */
@@ -713,33 +728,32 @@ typedef u32 acpi_event_type;
* The encoding of acpi_event_status is illustrated below.
* Note that a set bit (1) indicates the property is TRUE
* (e.g. if bit 0 is set then the event is enabled).
- * +-------------+-+-+-+-+
- * | Bits 31:4 |3|2|1|0|
- * +-------------+-+-+-+-+
- * | | | | |
- * | | | | +- Enabled?
- * | | | +--- Enabled for wake?
- * | | +----- Set?
- * | +------- Has a handler?
- * +------------- <Reserved>
+ * +-------------+-+-+-+-+-+
+ * | Bits 31:5 |4|3|2|1|0|
+ * +-------------+-+-+-+-+-+
+ * | | | | | |
+ * | | | | | +- Enabled?
+ * | | | | +--- Enabled for wake?
+ * | | | +----- Status bit set?
+ * | | +------- Enable bit set?
+ * | +--------- Has a handler?
+ * +--------------- <Reserved>
*/
typedef u32 acpi_event_status;
#define ACPI_EVENT_FLAG_DISABLED (acpi_event_status) 0x00
#define ACPI_EVENT_FLAG_ENABLED (acpi_event_status) 0x01
#define ACPI_EVENT_FLAG_WAKE_ENABLED (acpi_event_status) 0x02
-#define ACPI_EVENT_FLAG_SET (acpi_event_status) 0x04
-#define ACPI_EVENT_FLAG_HAS_HANDLER (acpi_event_status) 0x08
+#define ACPI_EVENT_FLAG_STATUS_SET (acpi_event_status) 0x04
+#define ACPI_EVENT_FLAG_ENABLE_SET (acpi_event_status) 0x08
+#define ACPI_EVENT_FLAG_HAS_HANDLER (acpi_event_status) 0x10
+#define ACPI_EVENT_FLAG_SET ACPI_EVENT_FLAG_STATUS_SET
/* Actions for acpi_set_gpe, acpi_gpe_wakeup, acpi_hw_low_set_gpe */
#define ACPI_GPE_ENABLE 0
#define ACPI_GPE_DISABLE 1
#define ACPI_GPE_CONDITIONAL_ENABLE 2
-#define ACPI_GPE_SAVE_MASK 4
-
-#define ACPI_GPE_ENABLE_SAVE (ACPI_GPE_ENABLE | ACPI_GPE_SAVE_MASK)
-#define ACPI_GPE_DISABLE_SAVE (ACPI_GPE_DISABLE | ACPI_GPE_SAVE_MASK)
/*
* GPE info flags - Per GPE
@@ -1251,6 +1265,7 @@ struct acpi_memory_list {
#define ACPI_OSI_WIN_VISTA_SP2 0x0A
#define ACPI_OSI_WIN_7 0x0B
#define ACPI_OSI_WIN_8 0x0C
+#define ACPI_OSI_WIN_10 0x0D
/* Definitions of file IO */
diff --git a/include/acpi/acuuid.h b/include/acpi/acuuid.h
new file mode 100644
index 000000000000..80fe8cf74d7a
--- /dev/null
+++ b/include/acpi/acuuid.h
@@ -0,0 +1,89 @@
+/******************************************************************************
+ *
+ * Name: acuuid.h - ACPI-related UUID/GUID definitions
+ *
+ *****************************************************************************/
+
+/*
+ * Copyright (C) 2000 - 2015, Intel Corp.
+ * All rights reserved.
+ *
+ * Redistribution and use in source and binary forms, with or without
+ * modification, are permitted provided that the following conditions
+ * are met:
+ * 1. Redistributions of source code must retain the above copyright
+ * notice, this list of conditions, and the following disclaimer,
+ * without modification.
+ * 2. Redistributions in binary form must reproduce at minimum a disclaimer
+ * substantially similar to the "NO WARRANTY" disclaimer below
+ * ("Disclaimer") and any redistribution must be conditioned upon
+ * including a substantially similar Disclaimer requirement for further
+ * binary redistribution.
+ * 3. Neither the names of the above-listed copyright holders nor the names
+ * of any contributors may be used to endorse or promote products derived
+ * from this software without specific prior written permission.
+ *
+ * Alternatively, this software may be distributed under the terms of the
+ * GNU General Public License ("GPL") version 2 as published by the Free
+ * Software Foundation.
+ *
+ * NO WARRANTY
+ * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
+ * "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
+ * LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTIBILITY AND FITNESS FOR
+ * A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
+ * HOLDERS OR CONTRIBUTORS BE LIABLE FOR SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
+ * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS
+ * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION)
+ * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT,
+ * STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING
+ * IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
+ * POSSIBILITY OF SUCH DAMAGES.
+ */
+
+#ifndef __ACUUID_H__
+#define __ACUUID_H__
+
+/*
+ * Note1: UUIDs and GUIDs are defined to be identical in ACPI.
+ *
+ * Note2: This file is standalone and should remain that way.
+ */
+
+/* Controllers */
+
+#define UUID_GPIO_CONTROLLER "4f248f40-d5e2-499f-834c-27758ea1cd3f"
+#define UUID_USB_CONTROLLER "ce2ee385-00e6-48cb-9f05-2edb927c4899"
+#define UUID_SATA_CONTROLLER "e4db149b-fcfe-425b-a6d8-92357d78fc7f"
+
+/* Devices */
+
+#define UUID_PCI_HOST_BRIDGE "33db4d5b-1ff7-401c-9657-7441c03dd766"
+#define UUID_I2C_DEVICE "3cdff6f7-4267-4555-ad05-b30a3d8938de"
+#define UUID_POWER_BUTTON "dfbcf3c5-e7a5-44e6-9c1f-29c76f6e059c"
+
+/* Interfaces */
+
+#define UUID_DEVICE_LABELING "e5c937d0-3553-4d7a-9117-ea4d19c3434d"
+#define UUID_PHYSICAL_PRESENCE "3dddfaa6-361b-4eb4-a424-8d10089d1653"
+
+/* NVDIMM - NFIT table */
+
+#define UUID_VOLATILE_MEMORY "7305944f-fdda-44e3-b16c-3f22d252e5d0"
+#define UUID_PERSISTENT_MEMORY "66f0d379-b4f3-4074-ac43-0d3318b78cdb"
+#define UUID_CONTROL_REGION "92f701f6-13b4-405d-910b-299367e8234c"
+#define UUID_DATA_REGION "91af0530-5d86-470e-a6b0-0a2db9408249"
+#define UUID_VOLATILE_VIRTUAL_DISK "77ab535a-45fc-624b-5560-f7b281d1f96e"
+#define UUID_VOLATILE_VIRTUAL_CD "3d5abd30-4175-87ce-6d64-d2ade523c4bb"
+#define UUID_PERSISTENT_VIRTUAL_DISK "5cea02c9-4d07-69d3-269f-4496fbe096f9"
+#define UUID_PERSISTENT_VIRTUAL_CD "08018188-42cd-bb48-100f-5387d53ded3d"
+
+/* Miscellaneous */
+
+#define UUID_PLATFORM_CAPABILITIES "0811b06e-4a27-44f9-8d60-3cbbc22e7b48"
+#define UUID_DYNAMIC_ENUMERATION "d8c1a3a6-be9b-4c9b-91bf-c3cb81fc5daf"
+#define UUID_BATTERY_THERMAL_LIMIT "4c2067e3-887d-475c-9720-4af1d3ed602e"
+#define UUID_THERMAL_EXTENSIONS "14d399cd-7a27-4b18-8fb4-7cb7b9f4e500"
+#define UUID_DEVICE_PROPERTIES "daffd814-6eba-4d8c-8a91-bc9bbf4aa301"
+
+#endif /* __AUUID_H__ */
diff --git a/include/acpi/platform/acenv.h b/include/acpi/platform/acenv.h
index ad74dc51d5b7..073997d729e9 100644
--- a/include/acpi/platform/acenv.h
+++ b/include/acpi/platform/acenv.h
@@ -76,6 +76,7 @@
#define ACPI_LARGE_NAMESPACE_NODE
#define ACPI_DATA_TABLE_DISASSEMBLY
#define ACPI_SINGLE_THREADED
+#define ACPI_32BIT_PHYSICAL_ADDRESS
#endif
/* acpi_exec configuration. Multithreaded with full AML debugger */
@@ -174,6 +175,9 @@
#elif defined(_APPLE) || defined(__APPLE__)
#include "acmacosx.h"
+#elif defined(__DragonFly__)
+#include "acdragonfly.h"
+
#elif defined(__FreeBSD__) || defined(__FreeBSD_kernel__)
#include "acfreebsd.h"
diff --git a/include/acpi/platform/acenvex.h b/include/acpi/platform/acenvex.h
index 71e5ec5b07a3..14dc6f68ca18 100644
--- a/include/acpi/platform/acenvex.h
+++ b/include/acpi/platform/acenvex.h
@@ -56,6 +56,9 @@
#if defined(_LINUX) || defined(__linux__)
#include <acpi/platform/aclinuxex.h>
+#elif defined(__DragonFly__)
+#include "acdragonflyex.h"
+
#endif
/*! [End] no source code translation !*/
diff --git a/include/acpi/processor.h b/include/acpi/processor.h
index b95dc32a6e6b..4188a4d3b597 100644
--- a/include/acpi/processor.h
+++ b/include/acpi/processor.h
@@ -196,7 +196,7 @@ struct acpi_processor_flags {
struct acpi_processor {
acpi_handle handle;
u32 acpi_id;
- u32 phys_id; /* CPU hardware ID such as APIC ID for x86 */
+ phys_cpuid_t phys_id; /* CPU hardware ID such as APIC ID for x86 */
u32 id; /* CPU logical ID allocated by OS */
u32 pblk;
int performance_platform_limit;
@@ -310,8 +310,8 @@ static inline int acpi_processor_get_bios_limit(int cpu, unsigned int *limit)
#endif /* CONFIG_CPU_FREQ */
/* in processor_core.c */
-int acpi_get_phys_id(acpi_handle, int type, u32 acpi_id);
-int acpi_map_cpuid(int phys_id, u32 acpi_id);
+phys_cpuid_t acpi_get_phys_id(acpi_handle, int type, u32 acpi_id);
+int acpi_map_cpuid(phys_cpuid_t phys_id, u32 acpi_id);
int acpi_get_cpuid(acpi_handle, int type, u32 acpi_id);
/* in processor_pdc.c */
diff --git a/include/acpi/video.h b/include/acpi/video.h
index 843ef1adfbfa..a7d7f1043e9c 100644
--- a/include/acpi/video.h
+++ b/include/acpi/video.h
@@ -16,23 +16,36 @@ struct acpi_device;
#define ACPI_VIDEO_DISPLAY_LEGACY_PANEL 0x0110
#define ACPI_VIDEO_DISPLAY_LEGACY_TV 0x0200
+enum acpi_backlight_type {
+ acpi_backlight_undef = -1,
+ acpi_backlight_none = 0,
+ acpi_backlight_video,
+ acpi_backlight_vendor,
+ acpi_backlight_native,
+};
+
#if (defined CONFIG_ACPI_VIDEO || defined CONFIG_ACPI_VIDEO_MODULE)
extern int acpi_video_register(void);
extern void acpi_video_unregister(void);
-extern void acpi_video_unregister_backlight(void);
extern int acpi_video_get_edid(struct acpi_device *device, int type,
int device_id, void **edid);
-extern bool acpi_video_verify_backlight_support(void);
+extern enum acpi_backlight_type acpi_video_get_backlight_type(void);
+extern void acpi_video_set_dmi_backlight_type(enum acpi_backlight_type type);
#else
static inline int acpi_video_register(void) { return 0; }
static inline void acpi_video_unregister(void) { return; }
-static inline void acpi_video_unregister_backlight(void) { return; }
static inline int acpi_video_get_edid(struct acpi_device *device, int type,
int device_id, void **edid)
{
return -ENODEV;
}
-static inline bool acpi_video_verify_backlight_support(void) { return false; }
+static inline enum acpi_backlight_type acpi_video_get_backlight_type(void)
+{
+ return acpi_backlight_vendor;
+}
+static void acpi_video_set_dmi_backlight_type(enum acpi_backlight_type type)
+{
+}
#endif
#endif
diff --git a/include/asm-generic/asm-offsets.h b/include/asm-generic/asm-offsets.h
new file mode 100644
index 000000000000..d370ee36a182
--- /dev/null
+++ b/include/asm-generic/asm-offsets.h
@@ -0,0 +1 @@
+#include <generated/asm-offsets.h>
diff --git a/include/asm-generic/barrier.h b/include/asm-generic/barrier.h
index f5c40b0fadc2..e6a83d712ef6 100644
--- a/include/asm-generic/barrier.h
+++ b/include/asm-generic/barrier.h
@@ -66,8 +66,8 @@
#define smp_read_barrier_depends() do { } while (0)
#endif
-#ifndef set_mb
-#define set_mb(var, value) do { (var) = (value); mb(); } while (0)
+#ifndef smp_store_mb
+#define smp_store_mb(var, value) do { WRITE_ONCE(var, value); mb(); } while (0)
#endif
#ifndef smp_mb__before_atomic
diff --git a/include/asm-generic/cmpxchg.h b/include/asm-generic/cmpxchg.h
index 811fb1e9b061..3766ab34aa45 100644
--- a/include/asm-generic/cmpxchg.h
+++ b/include/asm-generic/cmpxchg.h
@@ -86,9 +86,6 @@ unsigned long __xchg(unsigned long x, volatile void *ptr, int size)
/*
* Atomic compare and exchange.
- *
- * Do not define __HAVE_ARCH_CMPXCHG because we want to use it to check whether
- * a cmpxchg primitive faster than repeated local irq save/restore exists.
*/
#include <asm-generic/cmpxchg-local.h>
diff --git a/include/asm-generic/dma-mapping-common.h b/include/asm-generic/dma-mapping-common.h
index 3378dcf4c31e..940d5ec122c9 100644
--- a/include/asm-generic/dma-mapping-common.h
+++ b/include/asm-generic/dma-mapping-common.h
@@ -39,6 +39,10 @@ static inline void dma_unmap_single_attrs(struct device *dev, dma_addr_t addr,
debug_dma_unmap_page(dev, addr, size, dir, true);
}
+/*
+ * dma_maps_sg_attrs returns 0 on error and > 0 on success.
+ * It should never return a value < 0.
+ */
static inline int dma_map_sg_attrs(struct device *dev, struct scatterlist *sg,
int nents, enum dma_data_direction dir,
struct dma_attrs *attrs)
@@ -51,6 +55,7 @@ static inline int dma_map_sg_attrs(struct device *dev, struct scatterlist *sg,
kmemcheck_mark_initialized(sg_virt(s), s->length);
BUG_ON(!valid_dma_direction(dir));
ents = ops->map_sg(dev, sg, nents, dir, attrs);
+ BUG_ON(ents < 0);
debug_dma_map_sg(dev, sg, nents, ents, dir);
return ents;
diff --git a/include/asm-generic/futex.h b/include/asm-generic/futex.h
index b59b5a52637e..e56272c919b5 100644
--- a/include/asm-generic/futex.h
+++ b/include/asm-generic/futex.h
@@ -8,8 +8,7 @@
#ifndef CONFIG_SMP
/*
* The following implementation only for uniprocessor machines.
- * For UP, it's relies on the fact that pagefault_disable() also disables
- * preemption to ensure mutual exclusion.
+ * It relies on preempt_disable() ensuring mutual exclusion.
*
*/
@@ -38,6 +37,7 @@ futex_atomic_op_inuser(int encoded_op, u32 __user *uaddr)
if (encoded_op & (FUTEX_OP_OPARG_SHIFT << 28))
oparg = 1 << oparg;
+ preempt_disable();
pagefault_disable();
ret = -EFAULT;
@@ -72,6 +72,7 @@ futex_atomic_op_inuser(int encoded_op, u32 __user *uaddr)
out_pagefault_enable:
pagefault_enable();
+ preempt_enable();
if (ret == 0) {
switch (cmp) {
@@ -106,6 +107,7 @@ futex_atomic_cmpxchg_inatomic(u32 *uval, u32 __user *uaddr,
{
u32 val;
+ preempt_disable();
if (unlikely(get_user(val, uaddr) != 0))
return -EFAULT;
@@ -113,6 +115,7 @@ futex_atomic_cmpxchg_inatomic(u32 *uval, u32 __user *uaddr,
return -EFAULT;
*uval = val;
+ preempt_enable();
return 0;
}
diff --git a/include/asm-generic/gpio.h b/include/asm-generic/gpio.h
index 383ade1a211b..40ec1433f05d 100644
--- a/include/asm-generic/gpio.h
+++ b/include/asm-generic/gpio.h
@@ -5,7 +5,6 @@
#include <linux/types.h>
#include <linux/errno.h>
#include <linux/of.h>
-#include <linux/pinctrl/pinctrl.h>
#ifdef CONFIG_GPIOLIB
@@ -129,63 +128,11 @@ static inline int gpio_export_link(struct device *dev, const char *name,
return gpiod_export_link(dev, name, gpio_to_desc(gpio));
}
-static inline int gpio_sysfs_set_active_low(unsigned gpio, int value)
-{
- return gpiod_sysfs_set_active_low(gpio_to_desc(gpio), value);
-}
-
static inline void gpio_unexport(unsigned gpio)
{
gpiod_unexport(gpio_to_desc(gpio));
}
-#ifdef CONFIG_PINCTRL
-
-/**
- * struct gpio_pin_range - pin range controlled by a gpio chip
- * @head: list for maintaining set of pin ranges, used internally
- * @pctldev: pinctrl device which handles corresponding pins
- * @range: actual range of pins controlled by a gpio controller
- */
-
-struct gpio_pin_range {
- struct list_head node;
- struct pinctrl_dev *pctldev;
- struct pinctrl_gpio_range range;
-};
-
-int gpiochip_add_pin_range(struct gpio_chip *chip, const char *pinctl_name,
- unsigned int gpio_offset, unsigned int pin_offset,
- unsigned int npins);
-int gpiochip_add_pingroup_range(struct gpio_chip *chip,
- struct pinctrl_dev *pctldev,
- unsigned int gpio_offset, const char *pin_group);
-void gpiochip_remove_pin_ranges(struct gpio_chip *chip);
-
-#else
-
-static inline int
-gpiochip_add_pin_range(struct gpio_chip *chip, const char *pinctl_name,
- unsigned int gpio_offset, unsigned int pin_offset,
- unsigned int npins)
-{
- return 0;
-}
-static inline int
-gpiochip_add_pingroup_range(struct gpio_chip *chip,
- struct pinctrl_dev *pctldev,
- unsigned int gpio_offset, const char *pin_group)
-{
- return 0;
-}
-
-static inline void
-gpiochip_remove_pin_ranges(struct gpio_chip *chip)
-{
-}
-
-#endif /* CONFIG_PINCTRL */
-
#else /* !CONFIG_GPIOLIB */
static inline bool gpio_is_valid(int number)
diff --git a/include/asm-generic/io.h b/include/asm-generic/io.h
index 9db042304df3..f56094cfdeff 100644
--- a/include/asm-generic/io.h
+++ b/include/asm-generic/io.h
@@ -769,6 +769,14 @@ static inline void __iomem *ioremap_nocache(phys_addr_t offset, size_t size)
}
#endif
+#ifndef ioremap_uc
+#define ioremap_uc ioremap_uc
+static inline void __iomem *ioremap_uc(phys_addr_t offset, size_t size)
+{
+ return ioremap_nocache(offset, size);
+}
+#endif
+
#ifndef ioremap_wc
#define ioremap_wc ioremap_wc
static inline void __iomem *ioremap_wc(phys_addr_t offset, size_t size)
@@ -777,8 +785,17 @@ static inline void __iomem *ioremap_wc(phys_addr_t offset, size_t size)
}
#endif
+#ifndef ioremap_wt
+#define ioremap_wt ioremap_wt
+static inline void __iomem *ioremap_wt(phys_addr_t offset, size_t size)
+{
+ return ioremap_nocache(offset, size);
+}
+#endif
+
#ifndef iounmap
#define iounmap iounmap
+
static inline void iounmap(void __iomem *addr)
{
}
diff --git a/include/asm-generic/iomap.h b/include/asm-generic/iomap.h
index 1b41011643a5..d8f8622fa044 100644
--- a/include/asm-generic/iomap.h
+++ b/include/asm-generic/iomap.h
@@ -66,6 +66,10 @@ extern void ioport_unmap(void __iomem *);
#define ioremap_wc ioremap_nocache
#endif
+#ifndef ARCH_HAS_IOREMAP_WT
+#define ioremap_wt ioremap_nocache
+#endif
+
#ifdef CONFIG_PCI
/* Destroy a virtual mapping cookie for a PCI BAR (memory or IO) */
struct pci_dev;
diff --git a/include/asm-generic/pci.h b/include/asm-generic/pci.h
index e80a0495e5b0..f24bc519bf31 100644
--- a/include/asm-generic/pci.h
+++ b/include/asm-generic/pci.h
@@ -6,19 +6,6 @@
#ifndef _ASM_GENERIC_PCI_H
#define _ASM_GENERIC_PCI_H
-static inline struct resource *
-pcibios_select_root(struct pci_dev *pdev, struct resource *res)
-{
- struct resource *root = NULL;
-
- if (res->flags & IORESOURCE_IO)
- root = &ioport_resource;
- if (res->flags & IORESOURCE_MEM)
- root = &iomem_resource;
-
- return root;
-}
-
#ifndef HAVE_ARCH_PCI_GET_LEGACY_IDE_IRQ
static inline int pci_get_legacy_ide_irq(struct pci_dev *dev, int channel)
{
diff --git a/include/asm-generic/pgtable.h b/include/asm-generic/pgtable.h
index 4d46085c1b90..29c57b2cb344 100644
--- a/include/asm-generic/pgtable.h
+++ b/include/asm-generic/pgtable.h
@@ -6,6 +6,12 @@
#include <linux/mm_types.h>
#include <linux/bug.h>
+#include <linux/errno.h>
+
+#if 4 - defined(__PAGETABLE_PUD_FOLDED) - defined(__PAGETABLE_PMD_FOLDED) != \
+ CONFIG_PGTABLE_LEVELS
+#error CONFIG_PGTABLE_LEVELS is not consistent with __PAGETABLE_{PUD,PMD}_FOLDED
+#endif
/*
* On almost all architectures and configurations, 0 can be used as the
@@ -90,11 +96,11 @@ static inline pte_t ptep_get_and_clear(struct mm_struct *mm,
}
#endif
-#ifndef __HAVE_ARCH_PMDP_GET_AND_CLEAR
+#ifndef __HAVE_ARCH_PMDP_HUGE_GET_AND_CLEAR
#ifdef CONFIG_TRANSPARENT_HUGEPAGE
-static inline pmd_t pmdp_get_and_clear(struct mm_struct *mm,
- unsigned long address,
- pmd_t *pmdp)
+static inline pmd_t pmdp_huge_get_and_clear(struct mm_struct *mm,
+ unsigned long address,
+ pmd_t *pmdp)
{
pmd_t pmd = *pmdp;
pmd_clear(pmdp);
@@ -103,13 +109,13 @@ static inline pmd_t pmdp_get_and_clear(struct mm_struct *mm,
#endif /* CONFIG_TRANSPARENT_HUGEPAGE */
#endif
-#ifndef __HAVE_ARCH_PMDP_GET_AND_CLEAR_FULL
+#ifndef __HAVE_ARCH_PMDP_HUGE_GET_AND_CLEAR_FULL
#ifdef CONFIG_TRANSPARENT_HUGEPAGE
-static inline pmd_t pmdp_get_and_clear_full(struct mm_struct *mm,
+static inline pmd_t pmdp_huge_get_and_clear_full(struct mm_struct *mm,
unsigned long address, pmd_t *pmdp,
int full)
{
- return pmdp_get_and_clear(mm, address, pmdp);
+ return pmdp_huge_get_and_clear(mm, address, pmdp);
}
#endif /* CONFIG_TRANSPARENT_HUGEPAGE */
#endif
@@ -146,8 +152,8 @@ extern pte_t ptep_clear_flush(struct vm_area_struct *vma,
pte_t *ptep);
#endif
-#ifndef __HAVE_ARCH_PMDP_CLEAR_FLUSH
-extern pmd_t pmdp_clear_flush(struct vm_area_struct *vma,
+#ifndef __HAVE_ARCH_PMDP_HUGE_CLEAR_FLUSH
+extern pmd_t pmdp_huge_clear_flush(struct vm_area_struct *vma,
unsigned long address,
pmd_t *pmdp);
#endif
@@ -183,6 +189,22 @@ extern void pmdp_splitting_flush(struct vm_area_struct *vma,
unsigned long address, pmd_t *pmdp);
#endif
+#ifndef pmdp_collapse_flush
+#ifdef CONFIG_TRANSPARENT_HUGEPAGE
+extern pmd_t pmdp_collapse_flush(struct vm_area_struct *vma,
+ unsigned long address, pmd_t *pmdp);
+#else
+static inline pmd_t pmdp_collapse_flush(struct vm_area_struct *vma,
+ unsigned long address,
+ pmd_t *pmdp)
+{
+ BUILD_BUG();
+ return *pmdp;
+}
+#define pmdp_collapse_flush pmdp_collapse_flush
+#endif /* CONFIG_TRANSPARENT_HUGEPAGE */
+#endif
+
#ifndef __HAVE_ARCH_PGTABLE_DEPOSIT
extern void pgtable_trans_huge_deposit(struct mm_struct *mm, pmd_t *pmdp,
pgtable_t pgtable);
@@ -256,6 +278,10 @@ static inline int pmd_same(pmd_t pmd_a, pmd_t pmd_b)
#define pgprot_writecombine pgprot_noncached
#endif
+#ifndef pgprot_writethrough
+#define pgprot_writethrough pgprot_noncached
+#endif
+
#ifndef pgprot_device
#define pgprot_device pgprot_noncached
#endif
@@ -691,6 +717,30 @@ static inline int pmd_protnone(pmd_t pmd)
#endif /* CONFIG_MMU */
+#ifdef CONFIG_HAVE_ARCH_HUGE_VMAP
+int pud_set_huge(pud_t *pud, phys_addr_t addr, pgprot_t prot);
+int pmd_set_huge(pmd_t *pmd, phys_addr_t addr, pgprot_t prot);
+int pud_clear_huge(pud_t *pud);
+int pmd_clear_huge(pmd_t *pmd);
+#else /* !CONFIG_HAVE_ARCH_HUGE_VMAP */
+static inline int pud_set_huge(pud_t *pud, phys_addr_t addr, pgprot_t prot)
+{
+ return 0;
+}
+static inline int pmd_set_huge(pmd_t *pmd, phys_addr_t addr, pgprot_t prot)
+{
+ return 0;
+}
+static inline int pud_clear_huge(pud_t *pud)
+{
+ return 0;
+}
+static inline int pmd_clear_huge(pmd_t *pmd)
+{
+ return 0;
+}
+#endif /* CONFIG_HAVE_ARCH_HUGE_VMAP */
+
#endif /* !__ASSEMBLY__ */
#ifndef io_remap_pfn_range
diff --git a/include/asm-generic/preempt.h b/include/asm-generic/preempt.h
index eb6f9e6c3075..d0a7a4753db2 100644
--- a/include/asm-generic/preempt.h
+++ b/include/asm-generic/preempt.h
@@ -79,11 +79,8 @@ static __always_inline bool should_resched(void)
#ifdef CONFIG_PREEMPT
extern asmlinkage void preempt_schedule(void);
#define __preempt_schedule() preempt_schedule()
-
-#ifdef CONFIG_CONTEXT_TRACKING
-extern asmlinkage void preempt_schedule_context(void);
-#define __preempt_schedule_context() preempt_schedule_context()
-#endif
+extern asmlinkage void preempt_schedule_notrace(void);
+#define __preempt_schedule_notrace() preempt_schedule_notrace()
#endif /* CONFIG_PREEMPT */
#endif /* __ASM_PREEMPT_H */
diff --git a/include/asm-generic/qspinlock.h b/include/asm-generic/qspinlock.h
new file mode 100644
index 000000000000..83bfb87f5bf1
--- /dev/null
+++ b/include/asm-generic/qspinlock.h
@@ -0,0 +1,139 @@
+/*
+ * Queued spinlock
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License as published by
+ * the Free Software Foundation; either version 2 of the License, or
+ * (at your option) any later version.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+ * GNU General Public License for more details.
+ *
+ * (C) Copyright 2013-2015 Hewlett-Packard Development Company, L.P.
+ *
+ * Authors: Waiman Long <waiman.long@hp.com>
+ */
+#ifndef __ASM_GENERIC_QSPINLOCK_H
+#define __ASM_GENERIC_QSPINLOCK_H
+
+#include <asm-generic/qspinlock_types.h>
+
+/**
+ * queued_spin_is_locked - is the spinlock locked?
+ * @lock: Pointer to queued spinlock structure
+ * Return: 1 if it is locked, 0 otherwise
+ */
+static __always_inline int queued_spin_is_locked(struct qspinlock *lock)
+{
+ return atomic_read(&lock->val);
+}
+
+/**
+ * queued_spin_value_unlocked - is the spinlock structure unlocked?
+ * @lock: queued spinlock structure
+ * Return: 1 if it is unlocked, 0 otherwise
+ *
+ * N.B. Whenever there are tasks waiting for the lock, it is considered
+ * locked wrt the lockref code to avoid lock stealing by the lockref
+ * code and change things underneath the lock. This also allows some
+ * optimizations to be applied without conflict with lockref.
+ */
+static __always_inline int queued_spin_value_unlocked(struct qspinlock lock)
+{
+ return !atomic_read(&lock.val);
+}
+
+/**
+ * queued_spin_is_contended - check if the lock is contended
+ * @lock : Pointer to queued spinlock structure
+ * Return: 1 if lock contended, 0 otherwise
+ */
+static __always_inline int queued_spin_is_contended(struct qspinlock *lock)
+{
+ return atomic_read(&lock->val) & ~_Q_LOCKED_MASK;
+}
+/**
+ * queued_spin_trylock - try to acquire the queued spinlock
+ * @lock : Pointer to queued spinlock structure
+ * Return: 1 if lock acquired, 0 if failed
+ */
+static __always_inline int queued_spin_trylock(struct qspinlock *lock)
+{
+ if (!atomic_read(&lock->val) &&
+ (atomic_cmpxchg(&lock->val, 0, _Q_LOCKED_VAL) == 0))
+ return 1;
+ return 0;
+}
+
+extern void queued_spin_lock_slowpath(struct qspinlock *lock, u32 val);
+
+/**
+ * queued_spin_lock - acquire a queued spinlock
+ * @lock: Pointer to queued spinlock structure
+ */
+static __always_inline void queued_spin_lock(struct qspinlock *lock)
+{
+ u32 val;
+
+ val = atomic_cmpxchg(&lock->val, 0, _Q_LOCKED_VAL);
+ if (likely(val == 0))
+ return;
+ queued_spin_lock_slowpath(lock, val);
+}
+
+#ifndef queued_spin_unlock
+/**
+ * queued_spin_unlock - release a queued spinlock
+ * @lock : Pointer to queued spinlock structure
+ */
+static __always_inline void queued_spin_unlock(struct qspinlock *lock)
+{
+ /*
+ * smp_mb__before_atomic() in order to guarantee release semantics
+ */
+ smp_mb__before_atomic_dec();
+ atomic_sub(_Q_LOCKED_VAL, &lock->val);
+}
+#endif
+
+/**
+ * queued_spin_unlock_wait - wait until current lock holder releases the lock
+ * @lock : Pointer to queued spinlock structure
+ *
+ * There is a very slight possibility of live-lock if the lockers keep coming
+ * and the waiter is just unfortunate enough to not see any unlock state.
+ */
+static inline void queued_spin_unlock_wait(struct qspinlock *lock)
+{
+ while (atomic_read(&lock->val) & _Q_LOCKED_MASK)
+ cpu_relax();
+}
+
+#ifndef virt_queued_spin_lock
+static __always_inline bool virt_queued_spin_lock(struct qspinlock *lock)
+{
+ return false;
+}
+#endif
+
+/*
+ * Initializier
+ */
+#define __ARCH_SPIN_LOCK_UNLOCKED { ATOMIC_INIT(0) }
+
+/*
+ * Remapping spinlock architecture specific functions to the corresponding
+ * queued spinlock functions.
+ */
+#define arch_spin_is_locked(l) queued_spin_is_locked(l)
+#define arch_spin_is_contended(l) queued_spin_is_contended(l)
+#define arch_spin_value_unlocked(l) queued_spin_value_unlocked(l)
+#define arch_spin_lock(l) queued_spin_lock(l)
+#define arch_spin_trylock(l) queued_spin_trylock(l)
+#define arch_spin_unlock(l) queued_spin_unlock(l)
+#define arch_spin_lock_flags(l, f) queued_spin_lock(l)
+#define arch_spin_unlock_wait(l) queued_spin_unlock_wait(l)
+
+#endif /* __ASM_GENERIC_QSPINLOCK_H */
diff --git a/include/asm-generic/qspinlock_types.h b/include/asm-generic/qspinlock_types.h
new file mode 100644
index 000000000000..85f888e86761
--- /dev/null
+++ b/include/asm-generic/qspinlock_types.h
@@ -0,0 +1,79 @@
+/*
+ * Queued spinlock
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License as published by
+ * the Free Software Foundation; either version 2 of the License, or
+ * (at your option) any later version.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+ * GNU General Public License for more details.
+ *
+ * (C) Copyright 2013-2015 Hewlett-Packard Development Company, L.P.
+ *
+ * Authors: Waiman Long <waiman.long@hp.com>
+ */
+#ifndef __ASM_GENERIC_QSPINLOCK_TYPES_H
+#define __ASM_GENERIC_QSPINLOCK_TYPES_H
+
+/*
+ * Including atomic.h with PARAVIRT on will cause compilation errors because
+ * of recursive header file incluson via paravirt_types.h. So don't include
+ * it if PARAVIRT is on.
+ */
+#ifndef CONFIG_PARAVIRT
+#include <linux/types.h>
+#include <linux/atomic.h>
+#endif
+
+typedef struct qspinlock {
+ atomic_t val;
+} arch_spinlock_t;
+
+/*
+ * Bitfields in the atomic value:
+ *
+ * When NR_CPUS < 16K
+ * 0- 7: locked byte
+ * 8: pending
+ * 9-15: not used
+ * 16-17: tail index
+ * 18-31: tail cpu (+1)
+ *
+ * When NR_CPUS >= 16K
+ * 0- 7: locked byte
+ * 8: pending
+ * 9-10: tail index
+ * 11-31: tail cpu (+1)
+ */
+#define _Q_SET_MASK(type) (((1U << _Q_ ## type ## _BITS) - 1)\
+ << _Q_ ## type ## _OFFSET)
+#define _Q_LOCKED_OFFSET 0
+#define _Q_LOCKED_BITS 8
+#define _Q_LOCKED_MASK _Q_SET_MASK(LOCKED)
+
+#define _Q_PENDING_OFFSET (_Q_LOCKED_OFFSET + _Q_LOCKED_BITS)
+#if CONFIG_NR_CPUS < (1U << 14)
+#define _Q_PENDING_BITS 8
+#else
+#define _Q_PENDING_BITS 1
+#endif
+#define _Q_PENDING_MASK _Q_SET_MASK(PENDING)
+
+#define _Q_TAIL_IDX_OFFSET (_Q_PENDING_OFFSET + _Q_PENDING_BITS)
+#define _Q_TAIL_IDX_BITS 2
+#define _Q_TAIL_IDX_MASK _Q_SET_MASK(TAIL_IDX)
+
+#define _Q_TAIL_CPU_OFFSET (_Q_TAIL_IDX_OFFSET + _Q_TAIL_IDX_BITS)
+#define _Q_TAIL_CPU_BITS (32 - _Q_TAIL_CPU_OFFSET)
+#define _Q_TAIL_CPU_MASK _Q_SET_MASK(TAIL_CPU)
+
+#define _Q_TAIL_OFFSET _Q_TAIL_IDX_OFFSET
+#define _Q_TAIL_MASK (_Q_TAIL_IDX_MASK | _Q_TAIL_CPU_MASK)
+
+#define _Q_LOCKED_VAL (1U << _Q_LOCKED_OFFSET)
+#define _Q_PENDING_VAL (1U << _Q_PENDING_OFFSET)
+
+#endif /* __ASM_GENERIC_QSPINLOCK_TYPES_H */
diff --git a/include/asm-generic/scatterlist.h b/include/asm-generic/scatterlist.h
deleted file mode 100644
index 5de07355fad4..000000000000
--- a/include/asm-generic/scatterlist.h
+++ /dev/null
@@ -1,34 +0,0 @@
-#ifndef __ASM_GENERIC_SCATTERLIST_H
-#define __ASM_GENERIC_SCATTERLIST_H
-
-#include <linux/types.h>
-
-struct scatterlist {
-#ifdef CONFIG_DEBUG_SG
- unsigned long sg_magic;
-#endif
- unsigned long page_link;
- unsigned int offset;
- unsigned int length;
- dma_addr_t dma_address;
-#ifdef CONFIG_NEED_SG_DMA_LENGTH
- unsigned int dma_length;
-#endif
-};
-
-/*
- * These macros should be used after a dma_map_sg call has been done
- * to get bus addresses of each of the SG entries and their lengths.
- * You should only work with the number of sg entries pci_map_sg
- * returns, or alternatively stop on the first sg_dma_len(sg) which
- * is 0.
- */
-#define sg_dma_address(sg) ((sg)->dma_address)
-
-#ifdef CONFIG_NEED_SG_DMA_LENGTH
-#define sg_dma_len(sg) ((sg)->dma_length)
-#else
-#define sg_dma_len(sg) ((sg)->length)
-#endif
-
-#endif /* __ASM_GENERIC_SCATTERLIST_H */
diff --git a/include/asm-generic/seccomp.h b/include/asm-generic/seccomp.h
index 9fa1f653ed3b..c9ccafa0d99a 100644
--- a/include/asm-generic/seccomp.h
+++ b/include/asm-generic/seccomp.h
@@ -17,7 +17,9 @@
#define __NR_seccomp_read_32 __NR_read
#define __NR_seccomp_write_32 __NR_write
#define __NR_seccomp_exit_32 __NR_exit
+#ifndef __NR_seccomp_sigreturn_32
#define __NR_seccomp_sigreturn_32 __NR_rt_sigreturn
+#endif
#endif /* CONFIG_COMPAT && ! already defined */
#define __NR_seccomp_read __NR_read
diff --git a/include/asm-generic/vmlinux.lds.h b/include/asm-generic/vmlinux.lds.h
index ac78910d7416..8bd374d3cf21 100644
--- a/include/asm-generic/vmlinux.lds.h
+++ b/include/asm-generic/vmlinux.lds.h
@@ -124,7 +124,10 @@
#define FTRACE_EVENTS() . = ALIGN(8); \
VMLINUX_SYMBOL(__start_ftrace_events) = .; \
*(_ftrace_events) \
- VMLINUX_SYMBOL(__stop_ftrace_events) = .;
+ VMLINUX_SYMBOL(__stop_ftrace_events) = .; \
+ VMLINUX_SYMBOL(__start_ftrace_enum_maps) = .; \
+ *(_ftrace_enum_map) \
+ VMLINUX_SYMBOL(__stop_ftrace_enum_maps) = .;
#else
#define FTRACE_EVENTS()
#endif
@@ -150,6 +153,14 @@
#define TRACE_SYSCALLS()
#endif
+#ifdef CONFIG_SERIAL_EARLYCON
+#define EARLYCON_TABLE() STRUCT_ALIGN(); \
+ VMLINUX_SYMBOL(__earlycon_table) = .; \
+ *(__earlycon_table) \
+ *(__earlycon_table_end)
+#else
+#define EARLYCON_TABLE()
+#endif
#define ___OF_TABLE(cfg, name) _OF_TABLE_##cfg(name)
#define __OF_TABLE(cfg, name) ___OF_TABLE(cfg, name)
@@ -167,6 +178,7 @@
#define IOMMU_OF_TABLES() OF_TABLE(CONFIG_OF_IOMMU, iommu)
#define RESERVEDMEM_OF_TABLES() OF_TABLE(CONFIG_OF_RESERVED_MEM, reservedmem)
#define CPU_METHOD_OF_TABLES() OF_TABLE(CONFIG_SMP, cpu_method)
+#define CPUIDLE_METHOD_OF_TABLES() OF_TABLE(CONFIG_CPU_IDLE, cpuidle_method)
#define EARLYCON_OF_TABLES() OF_TABLE(CONFIG_SERIAL_EARLYCON, earlycon)
#define KERNEL_DTB() \
@@ -401,7 +413,7 @@
#define TEXT_TEXT \
ALIGN_FUNCTION(); \
*(.text.hot) \
- *(.text) \
+ *(.text .text.fixup) \
*(.ref.text) \
MEM_KEEP(init.text) \
MEM_KEEP(exit.text) \
@@ -501,8 +513,10 @@
CLKSRC_OF_TABLES() \
IOMMU_OF_TABLES() \
CPU_METHOD_OF_TABLES() \
+ CPUIDLE_METHOD_OF_TABLES() \
KERNEL_DTB() \
IRQCHIP_OF_MATCH_TABLE() \
+ EARLYCON_TABLE() \
EARLYCON_OF_TABLES()
#define INIT_TEXT \
diff --git a/include/crypto/aead.h b/include/crypto/aead.h
index 94b19be67574..7169ad04acc0 100644
--- a/include/crypto/aead.h
+++ b/include/crypto/aead.h
@@ -18,6 +18,65 @@
#include <linux/slab.h>
/**
+ * DOC: Authenticated Encryption With Associated Data (AEAD) Cipher API
+ *
+ * The AEAD cipher API is used with the ciphers of type CRYPTO_ALG_TYPE_AEAD
+ * (listed as type "aead" in /proc/crypto)
+ *
+ * The most prominent examples for this type of encryption is GCM and CCM.
+ * However, the kernel supports other types of AEAD ciphers which are defined
+ * with the following cipher string:
+ *
+ * authenc(keyed message digest, block cipher)
+ *
+ * For example: authenc(hmac(sha256), cbc(aes))
+ *
+ * The example code provided for the asynchronous block cipher operation
+ * applies here as well. Naturally all *ablkcipher* symbols must be exchanged
+ * the *aead* pendants discussed in the following. In addition, for the AEAD
+ * operation, the aead_request_set_assoc function must be used to set the
+ * pointer to the associated data memory location before performing the
+ * encryption or decryption operation. In case of an encryption, the associated
+ * data memory is filled during the encryption operation. For decryption, the
+ * associated data memory must contain data that is used to verify the integrity
+ * of the decrypted data. Another deviation from the asynchronous block cipher
+ * operation is that the caller should explicitly check for -EBADMSG of the
+ * crypto_aead_decrypt. That error indicates an authentication error, i.e.
+ * a breach in the integrity of the message. In essence, that -EBADMSG error
+ * code is the key bonus an AEAD cipher has over "standard" block chaining
+ * modes.
+ */
+
+/**
+ * struct aead_request - AEAD request
+ * @base: Common attributes for async crypto requests
+ * @old: Boolean whether the old or new AEAD API is used
+ * @assoclen: Length in bytes of associated data for authentication
+ * @cryptlen: Length of data to be encrypted or decrypted
+ * @iv: Initialisation vector
+ * @assoc: Associated data
+ * @src: Source data
+ * @dst: Destination data
+ * @__ctx: Start of private context data
+ */
+struct aead_request {
+ struct crypto_async_request base;
+
+ bool old;
+
+ unsigned int assoclen;
+ unsigned int cryptlen;
+
+ u8 *iv;
+
+ struct scatterlist *assoc;
+ struct scatterlist *src;
+ struct scatterlist *dst;
+
+ void *__ctx[] CRYPTO_MINALIGN_ATTR;
+};
+
+/**
* struct aead_givcrypt_request - AEAD request with IV generation
* @seq: Sequence number for IV generation
* @giv: Space for generated IV
@@ -30,6 +89,474 @@ struct aead_givcrypt_request {
struct aead_request areq;
};
+/**
+ * struct aead_alg - AEAD cipher definition
+ * @maxauthsize: Set the maximum authentication tag size supported by the
+ * transformation. A transformation may support smaller tag sizes.
+ * As the authentication tag is a message digest to ensure the
+ * integrity of the encrypted data, a consumer typically wants the
+ * largest authentication tag possible as defined by this
+ * variable.
+ * @setauthsize: Set authentication size for the AEAD transformation. This
+ * function is used to specify the consumer requested size of the
+ * authentication tag to be either generated by the transformation
+ * during encryption or the size of the authentication tag to be
+ * supplied during the decryption operation. This function is also
+ * responsible for checking the authentication tag size for
+ * validity.
+ * @setkey: see struct ablkcipher_alg
+ * @encrypt: see struct ablkcipher_alg
+ * @decrypt: see struct ablkcipher_alg
+ * @geniv: see struct ablkcipher_alg
+ * @ivsize: see struct ablkcipher_alg
+ * @init: Initialize the cryptographic transformation object. This function
+ * is used to initialize the cryptographic transformation object.
+ * This function is called only once at the instantiation time, right
+ * after the transformation context was allocated. In case the
+ * cryptographic hardware has some special requirements which need to
+ * be handled by software, this function shall check for the precise
+ * requirement of the transformation and put any software fallbacks
+ * in place.
+ * @exit: Deinitialize the cryptographic transformation object. This is a
+ * counterpart to @init, used to remove various changes set in
+ * @init.
+ *
+ * All fields except @ivsize is mandatory and must be filled.
+ */
+struct aead_alg {
+ int (*setkey)(struct crypto_aead *tfm, const u8 *key,
+ unsigned int keylen);
+ int (*setauthsize)(struct crypto_aead *tfm, unsigned int authsize);
+ int (*encrypt)(struct aead_request *req);
+ int (*decrypt)(struct aead_request *req);
+ int (*init)(struct crypto_aead *tfm);
+ void (*exit)(struct crypto_aead *tfm);
+
+ const char *geniv;
+
+ unsigned int ivsize;
+ unsigned int maxauthsize;
+
+ struct crypto_alg base;
+};
+
+struct crypto_aead {
+ int (*setkey)(struct crypto_aead *tfm, const u8 *key,
+ unsigned int keylen);
+ int (*setauthsize)(struct crypto_aead *tfm, unsigned int authsize);
+ int (*encrypt)(struct aead_request *req);
+ int (*decrypt)(struct aead_request *req);
+ int (*givencrypt)(struct aead_givcrypt_request *req);
+ int (*givdecrypt)(struct aead_givcrypt_request *req);
+
+ struct crypto_aead *child;
+
+ unsigned int authsize;
+ unsigned int reqsize;
+
+ struct crypto_tfm base;
+};
+
+static inline struct crypto_aead *__crypto_aead_cast(struct crypto_tfm *tfm)
+{
+ return container_of(tfm, struct crypto_aead, base);
+}
+
+/**
+ * crypto_alloc_aead() - allocate AEAD cipher handle
+ * @alg_name: is the cra_name / name or cra_driver_name / driver name of the
+ * AEAD cipher
+ * @type: specifies the type of the cipher
+ * @mask: specifies the mask for the cipher
+ *
+ * Allocate a cipher handle for an AEAD. The returned struct
+ * crypto_aead is the cipher handle that is required for any subsequent
+ * API invocation for that AEAD.
+ *
+ * Return: allocated cipher handle in case of success; IS_ERR() is true in case
+ * of an error, PTR_ERR() returns the error code.
+ */
+struct crypto_aead *crypto_alloc_aead(const char *alg_name, u32 type, u32 mask);
+
+static inline struct crypto_tfm *crypto_aead_tfm(struct crypto_aead *tfm)
+{
+ return &tfm->base;
+}
+
+/**
+ * crypto_free_aead() - zeroize and free aead handle
+ * @tfm: cipher handle to be freed
+ */
+static inline void crypto_free_aead(struct crypto_aead *tfm)
+{
+ crypto_destroy_tfm(tfm, crypto_aead_tfm(tfm));
+}
+
+static inline struct crypto_aead *crypto_aead_crt(struct crypto_aead *tfm)
+{
+ return tfm;
+}
+
+static inline struct old_aead_alg *crypto_old_aead_alg(struct crypto_aead *tfm)
+{
+ return &crypto_aead_tfm(tfm)->__crt_alg->cra_aead;
+}
+
+static inline struct aead_alg *crypto_aead_alg(struct crypto_aead *tfm)
+{
+ return container_of(crypto_aead_tfm(tfm)->__crt_alg,
+ struct aead_alg, base);
+}
+
+static inline unsigned int crypto_aead_alg_ivsize(struct aead_alg *alg)
+{
+ return alg->base.cra_aead.encrypt ? alg->base.cra_aead.ivsize :
+ alg->ivsize;
+}
+
+/**
+ * crypto_aead_ivsize() - obtain IV size
+ * @tfm: cipher handle
+ *
+ * The size of the IV for the aead referenced by the cipher handle is
+ * returned. This IV size may be zero if the cipher does not need an IV.
+ *
+ * Return: IV size in bytes
+ */
+static inline unsigned int crypto_aead_ivsize(struct crypto_aead *tfm)
+{
+ return crypto_aead_alg_ivsize(crypto_aead_alg(tfm));
+}
+
+/**
+ * crypto_aead_authsize() - obtain maximum authentication data size
+ * @tfm: cipher handle
+ *
+ * The maximum size of the authentication data for the AEAD cipher referenced
+ * by the AEAD cipher handle is returned. The authentication data size may be
+ * zero if the cipher implements a hard-coded maximum.
+ *
+ * The authentication data may also be known as "tag value".
+ *
+ * Return: authentication data size / tag size in bytes
+ */
+static inline unsigned int crypto_aead_authsize(struct crypto_aead *tfm)
+{
+ return tfm->authsize;
+}
+
+/**
+ * crypto_aead_blocksize() - obtain block size of cipher
+ * @tfm: cipher handle
+ *
+ * The block size for the AEAD referenced with the cipher handle is returned.
+ * The caller may use that information to allocate appropriate memory for the
+ * data returned by the encryption or decryption operation
+ *
+ * Return: block size of cipher
+ */
+static inline unsigned int crypto_aead_blocksize(struct crypto_aead *tfm)
+{
+ return crypto_tfm_alg_blocksize(crypto_aead_tfm(tfm));
+}
+
+static inline unsigned int crypto_aead_alignmask(struct crypto_aead *tfm)
+{
+ return crypto_tfm_alg_alignmask(crypto_aead_tfm(tfm));
+}
+
+static inline u32 crypto_aead_get_flags(struct crypto_aead *tfm)
+{
+ return crypto_tfm_get_flags(crypto_aead_tfm(tfm));
+}
+
+static inline void crypto_aead_set_flags(struct crypto_aead *tfm, u32 flags)
+{
+ crypto_tfm_set_flags(crypto_aead_tfm(tfm), flags);
+}
+
+static inline void crypto_aead_clear_flags(struct crypto_aead *tfm, u32 flags)
+{
+ crypto_tfm_clear_flags(crypto_aead_tfm(tfm), flags);
+}
+
+/**
+ * crypto_aead_setkey() - set key for cipher
+ * @tfm: cipher handle
+ * @key: buffer holding the key
+ * @keylen: length of the key in bytes
+ *
+ * The caller provided key is set for the AEAD referenced by the cipher
+ * handle.
+ *
+ * Note, the key length determines the cipher type. Many block ciphers implement
+ * different cipher modes depending on the key size, such as AES-128 vs AES-192
+ * vs. AES-256. When providing a 16 byte key for an AES cipher handle, AES-128
+ * is performed.
+ *
+ * Return: 0 if the setting of the key was successful; < 0 if an error occurred
+ */
+int crypto_aead_setkey(struct crypto_aead *tfm,
+ const u8 *key, unsigned int keylen);
+
+/**
+ * crypto_aead_setauthsize() - set authentication data size
+ * @tfm: cipher handle
+ * @authsize: size of the authentication data / tag in bytes
+ *
+ * Set the authentication data size / tag size. AEAD requires an authentication
+ * tag (or MAC) in addition to the associated data.
+ *
+ * Return: 0 if the setting of the key was successful; < 0 if an error occurred
+ */
+int crypto_aead_setauthsize(struct crypto_aead *tfm, unsigned int authsize);
+
+static inline struct crypto_aead *crypto_aead_reqtfm(struct aead_request *req)
+{
+ return __crypto_aead_cast(req->base.tfm);
+}
+
+/**
+ * crypto_aead_encrypt() - encrypt plaintext
+ * @req: reference to the aead_request handle that holds all information
+ * needed to perform the cipher operation
+ *
+ * Encrypt plaintext data using the aead_request handle. That data structure
+ * and how it is filled with data is discussed with the aead_request_*
+ * functions.
+ *
+ * IMPORTANT NOTE The encryption operation creates the authentication data /
+ * tag. That data is concatenated with the created ciphertext.
+ * The ciphertext memory size is therefore the given number of
+ * block cipher blocks + the size defined by the
+ * crypto_aead_setauthsize invocation. The caller must ensure
+ * that sufficient memory is available for the ciphertext and
+ * the authentication tag.
+ *
+ * Return: 0 if the cipher operation was successful; < 0 if an error occurred
+ */
+static inline int crypto_aead_encrypt(struct aead_request *req)
+{
+ return crypto_aead_reqtfm(req)->encrypt(req);
+}
+
+/**
+ * crypto_aead_decrypt() - decrypt ciphertext
+ * @req: reference to the ablkcipher_request handle that holds all information
+ * needed to perform the cipher operation
+ *
+ * Decrypt ciphertext data using the aead_request handle. That data structure
+ * and how it is filled with data is discussed with the aead_request_*
+ * functions.
+ *
+ * IMPORTANT NOTE The caller must concatenate the ciphertext followed by the
+ * authentication data / tag. That authentication data / tag
+ * must have the size defined by the crypto_aead_setauthsize
+ * invocation.
+ *
+ *
+ * Return: 0 if the cipher operation was successful; -EBADMSG: The AEAD
+ * cipher operation performs the authentication of the data during the
+ * decryption operation. Therefore, the function returns this error if
+ * the authentication of the ciphertext was unsuccessful (i.e. the
+ * integrity of the ciphertext or the associated data was violated);
+ * < 0 if an error occurred.
+ */
+static inline int crypto_aead_decrypt(struct aead_request *req)
+{
+ if (req->cryptlen < crypto_aead_authsize(crypto_aead_reqtfm(req)))
+ return -EINVAL;
+
+ return crypto_aead_reqtfm(req)->decrypt(req);
+}
+
+/**
+ * DOC: Asynchronous AEAD Request Handle
+ *
+ * The aead_request data structure contains all pointers to data required for
+ * the AEAD cipher operation. This includes the cipher handle (which can be
+ * used by multiple aead_request instances), pointer to plaintext and
+ * ciphertext, asynchronous callback function, etc. It acts as a handle to the
+ * aead_request_* API calls in a similar way as AEAD handle to the
+ * crypto_aead_* API calls.
+ */
+
+/**
+ * crypto_aead_reqsize() - obtain size of the request data structure
+ * @tfm: cipher handle
+ *
+ * Return: number of bytes
+ */
+unsigned int crypto_aead_reqsize(struct crypto_aead *tfm);
+
+/**
+ * aead_request_set_tfm() - update cipher handle reference in request
+ * @req: request handle to be modified
+ * @tfm: cipher handle that shall be added to the request handle
+ *
+ * Allow the caller to replace the existing aead handle in the request
+ * data structure with a different one.
+ */
+static inline void aead_request_set_tfm(struct aead_request *req,
+ struct crypto_aead *tfm)
+{
+ req->base.tfm = crypto_aead_tfm(tfm->child);
+}
+
+/**
+ * aead_request_alloc() - allocate request data structure
+ * @tfm: cipher handle to be registered with the request
+ * @gfp: memory allocation flag that is handed to kmalloc by the API call.
+ *
+ * Allocate the request data structure that must be used with the AEAD
+ * encrypt and decrypt API calls. During the allocation, the provided aead
+ * handle is registered in the request data structure.
+ *
+ * Return: allocated request handle in case of success; IS_ERR() is true in case
+ * of an error, PTR_ERR() returns the error code.
+ */
+static inline struct aead_request *aead_request_alloc(struct crypto_aead *tfm,
+ gfp_t gfp)
+{
+ struct aead_request *req;
+
+ req = kmalloc(sizeof(*req) + crypto_aead_reqsize(tfm), gfp);
+
+ if (likely(req))
+ aead_request_set_tfm(req, tfm);
+
+ return req;
+}
+
+/**
+ * aead_request_free() - zeroize and free request data structure
+ * @req: request data structure cipher handle to be freed
+ */
+static inline void aead_request_free(struct aead_request *req)
+{
+ kzfree(req);
+}
+
+/**
+ * aead_request_set_callback() - set asynchronous callback function
+ * @req: request handle
+ * @flags: specify zero or an ORing of the flags
+ * CRYPTO_TFM_REQ_MAY_BACKLOG the request queue may back log and
+ * increase the wait queue beyond the initial maximum size;
+ * CRYPTO_TFM_REQ_MAY_SLEEP the request processing may sleep
+ * @compl: callback function pointer to be registered with the request handle
+ * @data: The data pointer refers to memory that is not used by the kernel
+ * crypto API, but provided to the callback function for it to use. Here,
+ * the caller can provide a reference to memory the callback function can
+ * operate on. As the callback function is invoked asynchronously to the
+ * related functionality, it may need to access data structures of the
+ * related functionality which can be referenced using this pointer. The
+ * callback function can access the memory via the "data" field in the
+ * crypto_async_request data structure provided to the callback function.
+ *
+ * Setting the callback function that is triggered once the cipher operation
+ * completes
+ *
+ * The callback function is registered with the aead_request handle and
+ * must comply with the following template
+ *
+ * void callback_function(struct crypto_async_request *req, int error)
+ */
+static inline void aead_request_set_callback(struct aead_request *req,
+ u32 flags,
+ crypto_completion_t compl,
+ void *data)
+{
+ req->base.complete = compl;
+ req->base.data = data;
+ req->base.flags = flags;
+}
+
+/**
+ * aead_request_set_crypt - set data buffers
+ * @req: request handle
+ * @src: source scatter / gather list
+ * @dst: destination scatter / gather list
+ * @cryptlen: number of bytes to process from @src
+ * @iv: IV for the cipher operation which must comply with the IV size defined
+ * by crypto_aead_ivsize()
+ *
+ * Setting the source data and destination data scatter / gather lists which
+ * hold the associated data concatenated with the plaintext or ciphertext. See
+ * below for the authentication tag.
+ *
+ * For encryption, the source is treated as the plaintext and the
+ * destination is the ciphertext. For a decryption operation, the use is
+ * reversed - the source is the ciphertext and the destination is the plaintext.
+ *
+ * For both src/dst the layout is associated data, plain/cipher text,
+ * authentication tag.
+ *
+ * The content of the AD in the destination buffer after processing
+ * will either be untouched, or it will contain a copy of the AD
+ * from the source buffer. In order to ensure that it always has
+ * a copy of the AD, the user must copy the AD over either before
+ * or after processing. Of course this is not relevant if the user
+ * is doing in-place processing where src == dst.
+ *
+ * IMPORTANT NOTE AEAD requires an authentication tag (MAC). For decryption,
+ * the caller must concatenate the ciphertext followed by the
+ * authentication tag and provide the entire data stream to the
+ * decryption operation (i.e. the data length used for the
+ * initialization of the scatterlist and the data length for the
+ * decryption operation is identical). For encryption, however,
+ * the authentication tag is created while encrypting the data.
+ * The destination buffer must hold sufficient space for the
+ * ciphertext and the authentication tag while the encryption
+ * invocation must only point to the plaintext data size. The
+ * following code snippet illustrates the memory usage
+ * buffer = kmalloc(ptbuflen + (enc ? authsize : 0));
+ * sg_init_one(&sg, buffer, ptbuflen + (enc ? authsize : 0));
+ * aead_request_set_crypt(req, &sg, &sg, ptbuflen, iv);
+ */
+static inline void aead_request_set_crypt(struct aead_request *req,
+ struct scatterlist *src,
+ struct scatterlist *dst,
+ unsigned int cryptlen, u8 *iv)
+{
+ req->src = src;
+ req->dst = dst;
+ req->cryptlen = cryptlen;
+ req->iv = iv;
+}
+
+/**
+ * aead_request_set_assoc() - set the associated data scatter / gather list
+ * @req: request handle
+ * @assoc: associated data scatter / gather list
+ * @assoclen: number of bytes to process from @assoc
+ *
+ * Obsolete, do not use.
+ */
+static inline void aead_request_set_assoc(struct aead_request *req,
+ struct scatterlist *assoc,
+ unsigned int assoclen)
+{
+ req->assoc = assoc;
+ req->assoclen = assoclen;
+ req->old = true;
+}
+
+/**
+ * aead_request_set_ad - set associated data information
+ * @req: request handle
+ * @assoclen: number of bytes in associated data
+ *
+ * Setting the AD information. This function sets the length of
+ * the associated data.
+ */
+static inline void aead_request_set_ad(struct aead_request *req,
+ unsigned int assoclen)
+{
+ req->assoclen = assoclen;
+ req->old = false;
+}
+
static inline struct crypto_aead *aead_givcrypt_reqtfm(
struct aead_givcrypt_request *req)
{
@@ -38,14 +565,12 @@ static inline struct crypto_aead *aead_givcrypt_reqtfm(
static inline int crypto_aead_givencrypt(struct aead_givcrypt_request *req)
{
- struct aead_tfm *crt = crypto_aead_crt(aead_givcrypt_reqtfm(req));
- return crt->givencrypt(req);
+ return aead_givcrypt_reqtfm(req)->givencrypt(req);
};
static inline int crypto_aead_givdecrypt(struct aead_givcrypt_request *req)
{
- struct aead_tfm *crt = crypto_aead_crt(aead_givcrypt_reqtfm(req));
- return crt->givdecrypt(req);
+ return aead_givcrypt_reqtfm(req)->givdecrypt(req);
};
static inline void aead_givcrypt_set_tfm(struct aead_givcrypt_request *req,
diff --git a/include/crypto/akcipher.h b/include/crypto/akcipher.h
new file mode 100644
index 000000000000..69d163e39101
--- /dev/null
+++ b/include/crypto/akcipher.h
@@ -0,0 +1,340 @@
+/*
+ * Public Key Encryption
+ *
+ * Copyright (c) 2015, Intel Corporation
+ * Authors: Tadeusz Struk <tadeusz.struk@intel.com>
+ *
+ * This program is free software; you can redistribute it and/or modify it
+ * under the terms of the GNU General Public License as published by the Free
+ * Software Foundation; either version 2 of the License, or (at your option)
+ * any later version.
+ *
+ */
+#ifndef _CRYPTO_AKCIPHER_H
+#define _CRYPTO_AKCIPHER_H
+#include <linux/crypto.h>
+
+/**
+ * struct akcipher_request - public key request
+ *
+ * @base: Common attributes for async crypto requests
+ * @src: Pointer to memory containing the input parameters
+ * The format of the parameter(s) is expeted to be Octet String
+ * @dst: Pointer to memory whare the result will be stored
+ * @src_len: Size of the input parameter
+ * @dst_len: Size of the output buffer. It needs to be at leaset
+ * as big as the expected result depending on the operation
+ * After operation it will be updated with the acctual size of the
+ * result. In case of error, where the dst_len was insufficient,
+ * it will be updated to the size required for the operation.
+ * @__ctx: Start of private context data
+ */
+struct akcipher_request {
+ struct crypto_async_request base;
+ void *src;
+ void *dst;
+ unsigned int src_len;
+ unsigned int dst_len;
+ void *__ctx[] CRYPTO_MINALIGN_ATTR;
+};
+
+/**
+ * struct crypto_akcipher - user-instantiated objects which encapsulate
+ * algorithms and core processing logic
+ *
+ * @base: Common crypto API algorithm data structure
+ */
+struct crypto_akcipher {
+ struct crypto_tfm base;
+};
+
+/**
+ * struct akcipher_alg - generic public key algorithm
+ *
+ * @sign: Function performs a sign operation as defined by public key
+ * algorithm. In case of error, where the dst_len was insufficient,
+ * the req->dst_len will be updated to the size required for the
+ * operation
+ * @verify: Function performs a sign operation as defined by public key
+ * algorithm. In case of error, where the dst_len was insufficient,
+ * the req->dst_len will be updated to the size required for the
+ * operation
+ * @encrypt: Function performs an encrytp operation as defined by public key
+ * algorithm. In case of error, where the dst_len was insufficient,
+ * the req->dst_len will be updated to the size required for the
+ * operation
+ * @decrypt: Function performs a decrypt operation as defined by public key
+ * algorithm. In case of error, where the dst_len was insufficient,
+ * the req->dst_len will be updated to the size required for the
+ * operation
+ * @setkey: Function invokes the algorithm specific set key function, which
+ * knows how to decode and interpret the BER encoded key
+ * @init: Initialize the cryptographic transformation object.
+ * This function is used to initialize the cryptographic
+ * transformation object. This function is called only once at
+ * the instantiation time, right after the transformation context
+ * was allocated. In case the cryptographic hardware has some
+ * special requirements which need to be handled by software, this
+ * function shall check for the precise requirement of the
+ * transformation and put any software fallbacks in place.
+ * @exit: Deinitialize the cryptographic transformation object. This is a
+ * counterpart to @init, used to remove various changes set in
+ * @init.
+ *
+ * @reqsize: Request context size required by algorithm implementation
+ * @base: Common crypto API algorithm data structure
+ */
+struct akcipher_alg {
+ int (*sign)(struct akcipher_request *req);
+ int (*verify)(struct akcipher_request *req);
+ int (*encrypt)(struct akcipher_request *req);
+ int (*decrypt)(struct akcipher_request *req);
+ int (*setkey)(struct crypto_akcipher *tfm, const void *key,
+ unsigned int keylen);
+ int (*init)(struct crypto_akcipher *tfm);
+ void (*exit)(struct crypto_akcipher *tfm);
+
+ unsigned int reqsize;
+ struct crypto_alg base;
+};
+
+/**
+ * DOC: Generic Public Key API
+ *
+ * The Public Key API is used with the algorithms of type
+ * CRYPTO_ALG_TYPE_AKCIPHER (listed as type "akcipher" in /proc/crypto)
+ */
+
+/**
+ * crypto_alloc_akcipher() -- allocate AKCIPHER tfm handle
+ * @alg_name: is the cra_name / name or cra_driver_name / driver name of the
+ * public key algorithm e.g. "rsa"
+ * @type: specifies the type of the algorithm
+ * @mask: specifies the mask for the algorithm
+ *
+ * Allocate a handle for public key algorithm. The returned struct
+ * crypto_akcipher is the handle that is required for any subsequent
+ * API invocation for the public key operations.
+ *
+ * Return: allocated handle in case of success; IS_ERR() is true in case
+ * of an error, PTR_ERR() returns the error code.
+ */
+struct crypto_akcipher *crypto_alloc_akcipher(const char *alg_name, u32 type,
+ u32 mask);
+
+static inline struct crypto_tfm *crypto_akcipher_tfm(
+ struct crypto_akcipher *tfm)
+{
+ return &tfm->base;
+}
+
+static inline struct akcipher_alg *__crypto_akcipher_alg(struct crypto_alg *alg)
+{
+ return container_of(alg, struct akcipher_alg, base);
+}
+
+static inline struct crypto_akcipher *__crypto_akcipher_tfm(
+ struct crypto_tfm *tfm)
+{
+ return container_of(tfm, struct crypto_akcipher, base);
+}
+
+static inline struct akcipher_alg *crypto_akcipher_alg(
+ struct crypto_akcipher *tfm)
+{
+ return __crypto_akcipher_alg(crypto_akcipher_tfm(tfm)->__crt_alg);
+}
+
+static inline unsigned int crypto_akcipher_reqsize(struct crypto_akcipher *tfm)
+{
+ return crypto_akcipher_alg(tfm)->reqsize;
+}
+
+static inline void akcipher_request_set_tfm(struct akcipher_request *req,
+ struct crypto_akcipher *tfm)
+{
+ req->base.tfm = crypto_akcipher_tfm(tfm);
+}
+
+static inline struct crypto_akcipher *crypto_akcipher_reqtfm(
+ struct akcipher_request *req)
+{
+ return __crypto_akcipher_tfm(req->base.tfm);
+}
+
+/**
+ * crypto_free_akcipher() -- free AKCIPHER tfm handle
+ *
+ * @tfm: AKCIPHER tfm handle allocated with crypto_alloc_akcipher()
+ */
+static inline void crypto_free_akcipher(struct crypto_akcipher *tfm)
+{
+ crypto_destroy_tfm(tfm, crypto_akcipher_tfm(tfm));
+}
+
+/**
+ * akcipher_request_alloc() -- allocates public key request
+ *
+ * @tfm: AKCIPHER tfm handle allocated with crypto_alloc_akcipher()
+ * @gfp: allocation flags
+ *
+ * Return: allocated handle in case of success or NULL in case of an error.
+ */
+static inline struct akcipher_request *akcipher_request_alloc(
+ struct crypto_akcipher *tfm, gfp_t gfp)
+{
+ struct akcipher_request *req;
+
+ req = kmalloc(sizeof(*req) + crypto_akcipher_reqsize(tfm), gfp);
+ if (likely(req))
+ akcipher_request_set_tfm(req, tfm);
+
+ return req;
+}
+
+/**
+ * akcipher_request_free() -- zeroize and free public key request
+ *
+ * @req: request to free
+ */
+static inline void akcipher_request_free(struct akcipher_request *req)
+{
+ kzfree(req);
+}
+
+/**
+ * akcipher_request_set_callback() -- Sets an asynchronous callback.
+ *
+ * Callback will be called when an asynchronous operation on a given
+ * request is finished.
+ *
+ * @req: request that the callback will be set for
+ * @flgs: specify for instance if the operation may backlog
+ * @cmlp: callback which will be called
+ * @data: private data used by the caller
+ */
+static inline void akcipher_request_set_callback(struct akcipher_request *req,
+ u32 flgs,
+ crypto_completion_t cmpl,
+ void *data)
+{
+ req->base.complete = cmpl;
+ req->base.data = data;
+ req->base.flags = flgs;
+}
+
+/**
+ * akcipher_request_set_crypt() -- Sets reqest parameters
+ *
+ * Sets parameters required by crypto operation
+ *
+ * @req: public key request
+ * @src: ptr to input parameter
+ * @dst: ptr of output parameter
+ * @src_len: size of the input buffer
+ * @dst_len: size of the output buffer. It will be updated by the
+ * implementation to reflect the acctual size of the result
+ */
+static inline void akcipher_request_set_crypt(struct akcipher_request *req,
+ void *src, void *dst,
+ unsigned int src_len,
+ unsigned int dst_len)
+{
+ req->src = src;
+ req->dst = dst;
+ req->src_len = src_len;
+ req->dst_len = dst_len;
+}
+
+/**
+ * crypto_akcipher_encrypt() -- Invoke public key encrypt operation
+ *
+ * Function invokes the specific public key encrypt operation for a given
+ * public key algorithm
+ *
+ * @req: asymmetric key request
+ *
+ * Return: zero on success; error code in case of error
+ */
+static inline int crypto_akcipher_encrypt(struct akcipher_request *req)
+{
+ struct crypto_akcipher *tfm = crypto_akcipher_reqtfm(req);
+ struct akcipher_alg *alg = crypto_akcipher_alg(tfm);
+
+ return alg->encrypt(req);
+}
+
+/**
+ * crypto_akcipher_decrypt() -- Invoke public key decrypt operation
+ *
+ * Function invokes the specific public key decrypt operation for a given
+ * public key algorithm
+ *
+ * @req: asymmetric key request
+ *
+ * Return: zero on success; error code in case of error
+ */
+static inline int crypto_akcipher_decrypt(struct akcipher_request *req)
+{
+ struct crypto_akcipher *tfm = crypto_akcipher_reqtfm(req);
+ struct akcipher_alg *alg = crypto_akcipher_alg(tfm);
+
+ return alg->decrypt(req);
+}
+
+/**
+ * crypto_akcipher_sign() -- Invoke public key sign operation
+ *
+ * Function invokes the specific public key sign operation for a given
+ * public key algorithm
+ *
+ * @req: asymmetric key request
+ *
+ * Return: zero on success; error code in case of error
+ */
+static inline int crypto_akcipher_sign(struct akcipher_request *req)
+{
+ struct crypto_akcipher *tfm = crypto_akcipher_reqtfm(req);
+ struct akcipher_alg *alg = crypto_akcipher_alg(tfm);
+
+ return alg->sign(req);
+}
+
+/**
+ * crypto_akcipher_verify() -- Invoke public key verify operation
+ *
+ * Function invokes the specific public key verify operation for a given
+ * public key algorithm
+ *
+ * @req: asymmetric key request
+ *
+ * Return: zero on success; error code in case of error
+ */
+static inline int crypto_akcipher_verify(struct akcipher_request *req)
+{
+ struct crypto_akcipher *tfm = crypto_akcipher_reqtfm(req);
+ struct akcipher_alg *alg = crypto_akcipher_alg(tfm);
+
+ return alg->verify(req);
+}
+
+/**
+ * crypto_akcipher_setkey() -- Invoke public key setkey operation
+ *
+ * Function invokes the algorithm specific set key function, which knows
+ * how to decode and interpret the encoded key
+ *
+ * @tfm: tfm handle
+ * @key: BER encoded private or public key
+ * @keylen: length of the key
+ *
+ * Return: zero on success; error code in case of error
+ */
+static inline int crypto_akcipher_setkey(struct crypto_akcipher *tfm, void *key,
+ unsigned int keylen)
+{
+ struct akcipher_alg *alg = crypto_akcipher_alg(tfm);
+
+ return alg->setkey(tfm, key, keylen);
+}
+#endif
diff --git a/include/crypto/algapi.h b/include/crypto/algapi.h
index 623a59c1ff5a..d4ebf6e9af6a 100644
--- a/include/crypto/algapi.h
+++ b/include/crypto/algapi.h
@@ -17,6 +17,7 @@
#include <linux/kernel.h>
#include <linux/skbuff.h>
+struct crypto_aead;
struct module;
struct rtattr;
struct seq_file;
@@ -126,7 +127,6 @@ struct ablkcipher_walk {
};
extern const struct crypto_type crypto_ablkcipher_type;
-extern const struct crypto_type crypto_aead_type;
extern const struct crypto_type crypto_blkcipher_type;
void crypto_mod_put(struct crypto_alg *alg);
@@ -137,13 +137,15 @@ struct crypto_template *crypto_lookup_template(const char *name);
int crypto_register_instance(struct crypto_template *tmpl,
struct crypto_instance *inst);
-int crypto_unregister_instance(struct crypto_alg *alg);
+int crypto_unregister_instance(struct crypto_instance *inst);
int crypto_init_spawn(struct crypto_spawn *spawn, struct crypto_alg *alg,
struct crypto_instance *inst, u32 mask);
int crypto_init_spawn2(struct crypto_spawn *spawn, struct crypto_alg *alg,
struct crypto_instance *inst,
const struct crypto_type *frontend);
+int crypto_grab_spawn(struct crypto_spawn *spawn, const char *name,
+ u32 type, u32 mask);
void crypto_drop_spawn(struct crypto_spawn *spawn);
struct crypto_tfm *crypto_spawn_tfm(struct crypto_spawn *spawn, u32 type,
@@ -239,22 +241,6 @@ static inline void *crypto_ablkcipher_ctx_aligned(struct crypto_ablkcipher *tfm)
return crypto_tfm_ctx_aligned(&tfm->base);
}
-static inline struct aead_alg *crypto_aead_alg(struct crypto_aead *tfm)
-{
- return &crypto_aead_tfm(tfm)->__crt_alg->cra_aead;
-}
-
-static inline void *crypto_aead_ctx(struct crypto_aead *tfm)
-{
- return crypto_tfm_ctx(&tfm->base);
-}
-
-static inline struct crypto_instance *crypto_aead_alg_instance(
- struct crypto_aead *aead)
-{
- return crypto_tfm_alg_instance(&aead->base);
-}
-
static inline struct crypto_blkcipher *crypto_spawn_blkcipher(
struct crypto_spawn *spawn)
{
@@ -363,21 +349,6 @@ static inline int ablkcipher_tfm_in_queue(struct crypto_queue *queue,
return crypto_tfm_in_queue(queue, crypto_ablkcipher_tfm(tfm));
}
-static inline void *aead_request_ctx(struct aead_request *req)
-{
- return req->__ctx;
-}
-
-static inline void aead_request_complete(struct aead_request *req, int err)
-{
- req->base.complete(&req->base, err);
-}
-
-static inline u32 aead_request_flags(struct aead_request *req)
-{
- return req->base.flags;
-}
-
static inline struct crypto_alg *crypto_get_attr_alg(struct rtattr **tb,
u32 type, u32 mask)
{
diff --git a/include/crypto/compress.h b/include/crypto/compress.h
index 86163ef24219..5b67af834d83 100644
--- a/include/crypto/compress.h
+++ b/include/crypto/compress.h
@@ -55,14 +55,14 @@ struct crypto_pcomp {
};
struct pcomp_alg {
- int (*compress_setup)(struct crypto_pcomp *tfm, void *params,
+ int (*compress_setup)(struct crypto_pcomp *tfm, const void *params,
unsigned int len);
int (*compress_init)(struct crypto_pcomp *tfm);
int (*compress_update)(struct crypto_pcomp *tfm,
struct comp_request *req);
int (*compress_final)(struct crypto_pcomp *tfm,
struct comp_request *req);
- int (*decompress_setup)(struct crypto_pcomp *tfm, void *params,
+ int (*decompress_setup)(struct crypto_pcomp *tfm, const void *params,
unsigned int len);
int (*decompress_init)(struct crypto_pcomp *tfm);
int (*decompress_update)(struct crypto_pcomp *tfm,
@@ -97,7 +97,7 @@ static inline struct pcomp_alg *crypto_pcomp_alg(struct crypto_pcomp *tfm)
}
static inline int crypto_compress_setup(struct crypto_pcomp *tfm,
- void *params, unsigned int len)
+ const void *params, unsigned int len)
{
return crypto_pcomp_alg(tfm)->compress_setup(tfm, params, len);
}
@@ -120,7 +120,7 @@ static inline int crypto_compress_final(struct crypto_pcomp *tfm,
}
static inline int crypto_decompress_setup(struct crypto_pcomp *tfm,
- void *params, unsigned int len)
+ const void *params, unsigned int len)
{
return crypto_pcomp_alg(tfm)->decompress_setup(tfm, params, len);
}
diff --git a/include/crypto/cryptd.h b/include/crypto/cryptd.h
index ba98918bbd9b..1547f540c920 100644
--- a/include/crypto/cryptd.h
+++ b/include/crypto/cryptd.h
@@ -14,6 +14,7 @@
#include <linux/crypto.h>
#include <linux/kernel.h>
+#include <crypto/aead.h>
#include <crypto/hash.h>
struct cryptd_ablkcipher {
diff --git a/include/crypto/drbg.h b/include/crypto/drbg.h
index 5186f750c713..9756c70899d8 100644
--- a/include/crypto/drbg.h
+++ b/include/crypto/drbg.h
@@ -49,8 +49,9 @@
#include <crypto/internal/rng.h>
#include <crypto/rng.h>
#include <linux/fips.h>
-#include <linux/spinlock.h>
+#include <linux/mutex.h>
#include <linux/list.h>
+#include <linux/workqueue.h>
/*
* Concatenation Helper and string operation helper
@@ -104,12 +105,13 @@ struct drbg_test_data {
};
struct drbg_state {
- spinlock_t drbg_lock; /* lock around DRBG */
+ struct mutex drbg_mutex; /* lock around DRBG */
unsigned char *V; /* internal state 10.1.1.1 1a) */
/* hash: static value 10.1.1.1 1b) hmac / ctr: key */
unsigned char *C;
/* Number of RNG requests since last reseed -- 10.1.1.1 1c) */
size_t reseed_ctr;
+ size_t reseed_threshold;
/* some memory the DRBG can use for its operation */
unsigned char *scratchpad;
void *priv_data; /* Cipher handle */
@@ -119,9 +121,12 @@ struct drbg_state {
bool fips_primed; /* Continuous test primed? */
unsigned char *prev; /* FIPS 140-2 continuous test value */
#endif
+ struct work_struct seed_work; /* asynchronous seeding support */
+ struct crypto_rng *jent;
const struct drbg_state_ops *d_ops;
const struct drbg_core *core;
- struct drbg_test_data *test_data;
+ struct drbg_string test_data;
+ struct random_ready_callback random_ready;
};
static inline __u8 drbg_statelen(struct drbg_state *drbg)
@@ -177,19 +182,8 @@ static inline size_t drbg_max_requests(struct drbg_state *drbg)
}
/*
- * kernel crypto API input data structure for DRBG generate in case dlen
- * is set to 0
- */
-struct drbg_gen {
- unsigned char *outbuf; /* output buffer for random numbers */
- unsigned int outlen; /* size of output buffer */
- struct drbg_string *addtl; /* additional information string */
- struct drbg_test_data *test_data; /* test data */
-};
-
-/*
* This is a wrapper to the kernel crypto API function of
- * crypto_rng_get_bytes() to allow the caller to provide additional data.
+ * crypto_rng_generate() to allow the caller to provide additional data.
*
* @drng DRBG handle -- see crypto_rng_get_bytes
* @outbuf output buffer -- see crypto_rng_get_bytes
@@ -204,21 +198,15 @@ static inline int crypto_drbg_get_bytes_addtl(struct crypto_rng *drng,
unsigned char *outbuf, unsigned int outlen,
struct drbg_string *addtl)
{
- int ret;
- struct drbg_gen genbuf;
- genbuf.outbuf = outbuf;
- genbuf.outlen = outlen;
- genbuf.addtl = addtl;
- genbuf.test_data = NULL;
- ret = crypto_rng_get_bytes(drng, (u8 *)&genbuf, 0);
- return ret;
+ return crypto_rng_generate(drng, addtl->buf, addtl->len,
+ outbuf, outlen);
}
/*
* TEST code
*
* This is a wrapper to the kernel crypto API function of
- * crypto_rng_get_bytes() to allow the caller to provide additional data and
+ * crypto_rng_generate() to allow the caller to provide additional data and
* allow furnishing of test_data
*
* @drng DRBG handle -- see crypto_rng_get_bytes
@@ -236,14 +224,10 @@ static inline int crypto_drbg_get_bytes_addtl_test(struct crypto_rng *drng,
struct drbg_string *addtl,
struct drbg_test_data *test_data)
{
- int ret;
- struct drbg_gen genbuf;
- genbuf.outbuf = outbuf;
- genbuf.outlen = outlen;
- genbuf.addtl = addtl;
- genbuf.test_data = test_data;
- ret = crypto_rng_get_bytes(drng, (u8 *)&genbuf, 0);
- return ret;
+ crypto_rng_set_entropy(drng, test_data->testentropy->buf,
+ test_data->testentropy->len);
+ return crypto_rng_generate(drng, addtl->buf, addtl->len,
+ outbuf, outlen);
}
/*
@@ -264,14 +248,9 @@ static inline int crypto_drbg_reset_test(struct crypto_rng *drng,
struct drbg_string *pers,
struct drbg_test_data *test_data)
{
- int ret;
- struct drbg_gen genbuf;
- genbuf.outbuf = NULL;
- genbuf.outlen = 0;
- genbuf.addtl = pers;
- genbuf.test_data = test_data;
- ret = crypto_rng_reset(drng, (u8 *)&genbuf, 0);
- return ret;
+ crypto_rng_set_entropy(drng, test_data->testentropy->buf,
+ test_data->testentropy->len);
+ return crypto_rng_reset(drng, pers->buf, pers->len);
}
/* DRBG type flags */
diff --git a/include/crypto/hash.h b/include/crypto/hash.h
index 98abda9ed3aa..57c8a6ee33c2 100644
--- a/include/crypto/hash.h
+++ b/include/crypto/hash.h
@@ -66,7 +66,7 @@ struct ahash_request {
/**
* struct ahash_alg - asynchronous message digest definition
* @init: Initialize the transformation context. Intended only to initialize the
- * state of the HASH transformation at the begining. This shall fill in
+ * state of the HASH transformation at the beginning. This shall fill in
* the internal structures used during the entire duration of the whole
* transformation. No data processing happens at this point.
* @update: Push a chunk of data into the driver for transformation. This
diff --git a/include/crypto/if_alg.h b/include/crypto/if_alg.h
index 178525e5f430..018afb264ac2 100644
--- a/include/crypto/if_alg.h
+++ b/include/crypto/if_alg.h
@@ -58,8 +58,9 @@ struct af_alg_type {
};
struct af_alg_sgl {
- struct scatterlist sg[ALG_MAX_PAGES];
+ struct scatterlist sg[ALG_MAX_PAGES + 1];
struct page *pages[ALG_MAX_PAGES];
+ unsigned int npages;
};
int af_alg_register_type(const struct af_alg_type *type);
@@ -70,6 +71,7 @@ int af_alg_accept(struct sock *sk, struct socket *newsock);
int af_alg_make_sg(struct af_alg_sgl *sgl, struct iov_iter *iter, int len);
void af_alg_free_sg(struct af_alg_sgl *sgl);
+void af_alg_link_sg(struct af_alg_sgl *sgl_prev, struct af_alg_sgl *sgl_new);
int af_alg_cmsg_send(struct msghdr *msg, struct af_alg_control *con);
diff --git a/include/crypto/internal/aead.h b/include/crypto/internal/aead.h
index 2eba340230a7..4b2547186519 100644
--- a/include/crypto/internal/aead.h
+++ b/include/crypto/internal/aead.h
@@ -15,16 +15,75 @@
#include <crypto/aead.h>
#include <crypto/algapi.h>
+#include <linux/stddef.h>
#include <linux/types.h>
struct rtattr;
+struct aead_instance {
+ union {
+ struct {
+ char head[offsetof(struct aead_alg, base)];
+ struct crypto_instance base;
+ } s;
+ struct aead_alg alg;
+ };
+};
+
struct crypto_aead_spawn {
struct crypto_spawn base;
};
+extern const struct crypto_type crypto_aead_type;
extern const struct crypto_type crypto_nivaead_type;
+static inline void *crypto_aead_ctx(struct crypto_aead *tfm)
+{
+ return crypto_tfm_ctx(&tfm->base);
+}
+
+static inline struct crypto_instance *crypto_aead_alg_instance(
+ struct crypto_aead *aead)
+{
+ return crypto_tfm_alg_instance(&aead->base);
+}
+
+static inline struct crypto_instance *aead_crypto_instance(
+ struct aead_instance *inst)
+{
+ return container_of(&inst->alg.base, struct crypto_instance, alg);
+}
+
+static inline struct aead_instance *aead_instance(struct crypto_instance *inst)
+{
+ return container_of(&inst->alg, struct aead_instance, alg.base);
+}
+
+static inline struct aead_instance *aead_alg_instance(struct crypto_aead *aead)
+{
+ return aead_instance(crypto_aead_alg_instance(aead));
+}
+
+static inline void *aead_instance_ctx(struct aead_instance *inst)
+{
+ return crypto_instance_ctx(aead_crypto_instance(inst));
+}
+
+static inline void *aead_request_ctx(struct aead_request *req)
+{
+ return req->__ctx;
+}
+
+static inline void aead_request_complete(struct aead_request *req, int err)
+{
+ req->base.complete(&req->base, err);
+}
+
+static inline u32 aead_request_flags(struct aead_request *req)
+{
+ return req->base.flags;
+}
+
static inline void crypto_set_aead_spawn(
struct crypto_aead_spawn *spawn, struct crypto_instance *inst)
{
@@ -47,24 +106,27 @@ static inline struct crypto_alg *crypto_aead_spawn_alg(
return spawn->base.alg;
}
+static inline struct aead_alg *crypto_spawn_aead_alg(
+ struct crypto_aead_spawn *spawn)
+{
+ return container_of(spawn->base.alg, struct aead_alg, base);
+}
+
static inline struct crypto_aead *crypto_spawn_aead(
struct crypto_aead_spawn *spawn)
{
- return __crypto_aead_cast(
- crypto_spawn_tfm(&spawn->base, CRYPTO_ALG_TYPE_AEAD,
- CRYPTO_ALG_TYPE_MASK));
+ return crypto_spawn_tfm2(&spawn->base);
}
-struct crypto_instance *aead_geniv_alloc(struct crypto_template *tmpl,
- struct rtattr **tb, u32 type,
- u32 mask);
-void aead_geniv_free(struct crypto_instance *inst);
+struct aead_instance *aead_geniv_alloc(struct crypto_template *tmpl,
+ struct rtattr **tb, u32 type, u32 mask);
+void aead_geniv_free(struct aead_instance *inst);
int aead_geniv_init(struct crypto_tfm *tfm);
void aead_geniv_exit(struct crypto_tfm *tfm);
static inline struct crypto_aead *aead_geniv_base(struct crypto_aead *geniv)
{
- return crypto_aead_crt(geniv)->base;
+ return geniv->child;
}
static inline void *aead_givcrypt_reqctx(struct aead_givcrypt_request *req)
@@ -78,5 +140,29 @@ static inline void aead_givcrypt_complete(struct aead_givcrypt_request *req,
aead_request_complete(&req->areq, err);
}
+static inline void crypto_aead_set_reqsize(struct crypto_aead *aead,
+ unsigned int reqsize)
+{
+ crypto_aead_crt(aead)->reqsize = reqsize;
+}
+
+static inline unsigned int crypto_aead_alg_maxauthsize(struct aead_alg *alg)
+{
+ return alg->base.cra_aead.encrypt ? alg->base.cra_aead.maxauthsize :
+ alg->maxauthsize;
+}
+
+static inline unsigned int crypto_aead_maxauthsize(struct crypto_aead *aead)
+{
+ return crypto_aead_alg_maxauthsize(crypto_aead_alg(aead));
+}
+
+int crypto_register_aead(struct aead_alg *alg);
+void crypto_unregister_aead(struct aead_alg *alg);
+int crypto_register_aeads(struct aead_alg *algs, int count);
+void crypto_unregister_aeads(struct aead_alg *algs, int count);
+int aead_register_instance(struct crypto_template *tmpl,
+ struct aead_instance *inst);
+
#endif /* _CRYPTO_INTERNAL_AEAD_H */
diff --git a/include/crypto/internal/akcipher.h b/include/crypto/internal/akcipher.h
new file mode 100644
index 000000000000..9a2bda15e454
--- /dev/null
+++ b/include/crypto/internal/akcipher.h
@@ -0,0 +1,60 @@
+/*
+ * Public Key Encryption
+ *
+ * Copyright (c) 2015, Intel Corporation
+ * Authors: Tadeusz Struk <tadeusz.struk@intel.com>
+ *
+ * This program is free software; you can redistribute it and/or modify it
+ * under the terms of the GNU General Public License as published by the Free
+ * Software Foundation; either version 2 of the License, or (at your option)
+ * any later version.
+ *
+ */
+#ifndef _CRYPTO_AKCIPHER_INT_H
+#define _CRYPTO_AKCIPHER_INT_H
+#include <crypto/akcipher.h>
+
+/*
+ * Transform internal helpers.
+ */
+static inline void *akcipher_request_ctx(struct akcipher_request *req)
+{
+ return req->__ctx;
+}
+
+static inline void *akcipher_tfm_ctx(struct crypto_akcipher *tfm)
+{
+ return tfm->base.__crt_ctx;
+}
+
+static inline void akcipher_request_complete(struct akcipher_request *req,
+ int err)
+{
+ req->base.complete(&req->base, err);
+}
+
+static inline const char *akcipher_alg_name(struct crypto_akcipher *tfm)
+{
+ return crypto_akcipher_tfm(tfm)->__crt_alg->cra_name;
+}
+
+/**
+ * crypto_register_akcipher() -- Register public key algorithm
+ *
+ * Function registers an implementation of a public key verify algorithm
+ *
+ * @alg: algorithm definition
+ *
+ * Return: zero on success; error code in case of error
+ */
+int crypto_register_akcipher(struct akcipher_alg *alg);
+
+/**
+ * crypto_unregister_akcipher() -- Unregister public key algorithm
+ *
+ * Function unregisters an implementation of a public key verify algorithm
+ *
+ * @alg: algorithm definition
+ */
+void crypto_unregister_akcipher(struct akcipher_alg *alg);
+#endif
diff --git a/include/crypto/internal/geniv.h b/include/crypto/internal/geniv.h
new file mode 100644
index 000000000000..9ca9b871aba5
--- /dev/null
+++ b/include/crypto/internal/geniv.h
@@ -0,0 +1,24 @@
+/*
+ * geniv: IV generation
+ *
+ * Copyright (c) 2015 Herbert Xu <herbert@gondor.apana.org.au>
+ *
+ * This program is free software; you can redistribute it and/or modify it
+ * under the terms of the GNU General Public License as published by the Free
+ * Software Foundation; either version 2 of the License, or (at your option)
+ * any later version.
+ *
+ */
+
+#ifndef _CRYPTO_INTERNAL_GENIV_H
+#define _CRYPTO_INTERNAL_GENIV_H
+
+#include <crypto/internal/aead.h>
+#include <linux/spinlock.h>
+
+struct aead_geniv_ctx {
+ spinlock_t lock;
+ struct crypto_aead *child;
+};
+
+#endif /* _CRYPTO_INTERNAL_GENIV_H */
diff --git a/include/crypto/internal/rng.h b/include/crypto/internal/rng.h
index 896973369573..a52ef3483dd7 100644
--- a/include/crypto/internal/rng.h
+++ b/include/crypto/internal/rng.h
@@ -2,6 +2,7 @@
* RNG: Random Number Generator algorithms under the crypto API
*
* Copyright (c) 2008 Neil Horman <nhorman@tuxdriver.com>
+ * Copyright (c) 2015 Herbert Xu <herbert@gondor.apana.org.au>
*
* This program is free software; you can redistribute it and/or modify it
* under the terms of the GNU General Public License as published by the Free
@@ -16,11 +17,29 @@
#include <crypto/algapi.h>
#include <crypto/rng.h>
-extern const struct crypto_type crypto_rng_type;
+int crypto_register_rng(struct rng_alg *alg);
+void crypto_unregister_rng(struct rng_alg *alg);
+int crypto_register_rngs(struct rng_alg *algs, int count);
+void crypto_unregister_rngs(struct rng_alg *algs, int count);
+
+#if defined(CONFIG_CRYPTO_RNG) || defined(CONFIG_CRYPTO_RNG_MODULE)
+int crypto_del_default_rng(void);
+#else
+static inline int crypto_del_default_rng(void)
+{
+ return 0;
+}
+#endif
static inline void *crypto_rng_ctx(struct crypto_rng *tfm)
{
return crypto_tfm_ctx(&tfm->base);
}
+static inline void crypto_rng_set_entropy(struct crypto_rng *tfm,
+ const u8 *data, unsigned int len)
+{
+ crypto_rng_alg(tfm)->set_ent(tfm, data, len);
+}
+
#endif
diff --git a/include/crypto/internal/rsa.h b/include/crypto/internal/rsa.h
new file mode 100644
index 000000000000..a8c86365439f
--- /dev/null
+++ b/include/crypto/internal/rsa.h
@@ -0,0 +1,27 @@
+/*
+ * RSA internal helpers
+ *
+ * Copyright (c) 2015, Intel Corporation
+ * Authors: Tadeusz Struk <tadeusz.struk@intel.com>
+ *
+ * This program is free software; you can redistribute it and/or modify it
+ * under the terms of the GNU General Public License as published by the Free
+ * Software Foundation; either version 2 of the License, or (at your option)
+ * any later version.
+ *
+ */
+#ifndef _RSA_HELPER_
+#define _RSA_HELPER_
+#include <linux/mpi.h>
+
+struct rsa_key {
+ MPI n;
+ MPI e;
+ MPI d;
+};
+
+int rsa_parse_key(struct rsa_key *rsa_key, const void *key,
+ unsigned int key_len);
+
+void rsa_free_key(struct rsa_key *rsa_key);
+#endif
diff --git a/include/crypto/md5.h b/include/crypto/md5.h
index 65f299b08b0d..146af825eedb 100644
--- a/include/crypto/md5.h
+++ b/include/crypto/md5.h
@@ -8,6 +8,11 @@
#define MD5_BLOCK_WORDS 16
#define MD5_HASH_WORDS 4
+#define MD5_H0 0x67452301UL
+#define MD5_H1 0xefcdab89UL
+#define MD5_H2 0x98badcfeUL
+#define MD5_H3 0x10325476UL
+
struct md5_state {
u32 hash[MD5_HASH_WORDS];
u32 block[MD5_BLOCK_WORDS];
diff --git a/include/crypto/null.h b/include/crypto/null.h
index b7c864cc70df..06dc30d9f56e 100644
--- a/include/crypto/null.h
+++ b/include/crypto/null.h
@@ -8,4 +8,7 @@
#define NULL_DIGEST_SIZE 0
#define NULL_IV_SIZE 0
+struct crypto_blkcipher *crypto_get_default_null_skcipher(void);
+void crypto_put_default_null_skcipher(void);
+
#endif
diff --git a/include/crypto/rng.h b/include/crypto/rng.h
index a16fb10142bf..b95ede354a66 100644
--- a/include/crypto/rng.h
+++ b/include/crypto/rng.h
@@ -2,6 +2,7 @@
* RNG: Random Number Generator algorithms under the crypto API
*
* Copyright (c) 2008 Neil Horman <nhorman@tuxdriver.com>
+ * Copyright (c) 2015 Herbert Xu <herbert@gondor.apana.org.au>
*
* This program is free software; you can redistribute it and/or modify it
* under the terms of the GNU General Public License as published by the Free
@@ -15,6 +16,50 @@
#include <linux/crypto.h>
+struct crypto_rng;
+
+/**
+ * struct rng_alg - random number generator definition
+ *
+ * @generate: The function defined by this variable obtains a
+ * random number. The random number generator transform
+ * must generate the random number out of the context
+ * provided with this call, plus any additional data
+ * if provided to the call.
+ * @seed: Seed or reseed the random number generator. With the
+ * invocation of this function call, the random number
+ * generator shall become ready for generation. If the
+ * random number generator requires a seed for setting
+ * up a new state, the seed must be provided by the
+ * consumer while invoking this function. The required
+ * size of the seed is defined with @seedsize .
+ * @set_ent: Set entropy that would otherwise be obtained from
+ * entropy source. Internal use only.
+ * @seedsize: The seed size required for a random number generator
+ * initialization defined with this variable. Some
+ * random number generators does not require a seed
+ * as the seeding is implemented internally without
+ * the need of support by the consumer. In this case,
+ * the seed size is set to zero.
+ * @base: Common crypto API algorithm data structure.
+ */
+struct rng_alg {
+ int (*generate)(struct crypto_rng *tfm,
+ const u8 *src, unsigned int slen,
+ u8 *dst, unsigned int dlen);
+ int (*seed)(struct crypto_rng *tfm, const u8 *seed, unsigned int slen);
+ void (*set_ent)(struct crypto_rng *tfm, const u8 *data,
+ unsigned int len);
+
+ unsigned int seedsize;
+
+ struct crypto_alg base;
+};
+
+struct crypto_rng {
+ struct crypto_tfm base;
+};
+
extern struct crypto_rng *crypto_default_rng;
int crypto_get_default_rng(void);
@@ -27,11 +72,6 @@ void crypto_put_default_rng(void);
* CRYPTO_ALG_TYPE_RNG (listed as type "rng" in /proc/crypto)
*/
-static inline struct crypto_rng *__crypto_rng_cast(struct crypto_tfm *tfm)
-{
- return (struct crypto_rng *)tfm;
-}
-
/**
* crypto_alloc_rng() -- allocate RNG handle
* @alg_name: is the cra_name / name or cra_driver_name / driver name of the
@@ -52,15 +92,7 @@ static inline struct crypto_rng *__crypto_rng_cast(struct crypto_tfm *tfm)
* Return: allocated cipher handle in case of success; IS_ERR() is true in case
* of an error, PTR_ERR() returns the error code.
*/
-static inline struct crypto_rng *crypto_alloc_rng(const char *alg_name,
- u32 type, u32 mask)
-{
- type &= ~CRYPTO_ALG_TYPE_MASK;
- type |= CRYPTO_ALG_TYPE_RNG;
- mask |= CRYPTO_ALG_TYPE_MASK;
-
- return __crypto_rng_cast(crypto_alloc_base(alg_name, type, mask));
-}
+struct crypto_rng *crypto_alloc_rng(const char *alg_name, u32 type, u32 mask);
static inline struct crypto_tfm *crypto_rng_tfm(struct crypto_rng *tfm)
{
@@ -77,12 +109,8 @@ static inline struct crypto_tfm *crypto_rng_tfm(struct crypto_rng *tfm)
*/
static inline struct rng_alg *crypto_rng_alg(struct crypto_rng *tfm)
{
- return &crypto_rng_tfm(tfm)->__crt_alg->cra_rng;
-}
-
-static inline struct rng_tfm *crypto_rng_crt(struct crypto_rng *tfm)
-{
- return &crypto_rng_tfm(tfm)->crt_rng;
+ return container_of(crypto_rng_tfm(tfm)->__crt_alg,
+ struct rng_alg, base);
}
/**
@@ -91,7 +119,28 @@ static inline struct rng_tfm *crypto_rng_crt(struct crypto_rng *tfm)
*/
static inline void crypto_free_rng(struct crypto_rng *tfm)
{
- crypto_free_tfm(crypto_rng_tfm(tfm));
+ crypto_destroy_tfm(tfm, crypto_rng_tfm(tfm));
+}
+
+/**
+ * crypto_rng_generate() - get random number
+ * @tfm: cipher handle
+ * @src: Input buffer holding additional data, may be NULL
+ * @slen: Length of additional data
+ * @dst: output buffer holding the random numbers
+ * @dlen: length of the output buffer
+ *
+ * This function fills the caller-allocated buffer with random
+ * numbers using the random number generator referenced by the
+ * cipher handle.
+ *
+ * Return: 0 function was successful; < 0 if an error occurred
+ */
+static inline int crypto_rng_generate(struct crypto_rng *tfm,
+ const u8 *src, unsigned int slen,
+ u8 *dst, unsigned int dlen)
+{
+ return crypto_rng_alg(tfm)->generate(tfm, src, slen, dst, dlen);
}
/**
@@ -103,13 +152,12 @@ static inline void crypto_free_rng(struct crypto_rng *tfm)
* This function fills the caller-allocated buffer with random numbers using the
* random number generator referenced by the cipher handle.
*
- * Return: > 0 function was successful and returns the number of generated
- * bytes; < 0 if an error occurred
+ * Return: 0 function was successful; < 0 if an error occurred
*/
static inline int crypto_rng_get_bytes(struct crypto_rng *tfm,
u8 *rdata, unsigned int dlen)
{
- return crypto_rng_crt(tfm)->rng_gen_random(tfm, rdata, dlen);
+ return crypto_rng_generate(tfm, NULL, 0, rdata, dlen);
}
/**
@@ -129,11 +177,8 @@ static inline int crypto_rng_get_bytes(struct crypto_rng *tfm,
*
* Return: 0 if the setting of the key was successful; < 0 if an error occurred
*/
-static inline int crypto_rng_reset(struct crypto_rng *tfm,
- u8 *seed, unsigned int slen)
-{
- return crypto_rng_crt(tfm)->rng_reset(tfm, seed, slen);
-}
+int crypto_rng_reset(struct crypto_rng *tfm, const u8 *seed,
+ unsigned int slen);
/**
* crypto_rng_seedsize() - obtain seed size of RNG
diff --git a/include/crypto/scatterwalk.h b/include/crypto/scatterwalk.h
index 20e4226a2e14..96670e7e7c14 100644
--- a/include/crypto/scatterwalk.h
+++ b/include/crypto/scatterwalk.h
@@ -102,4 +102,8 @@ void scatterwalk_map_and_copy(void *buf, struct scatterlist *sg,
int scatterwalk_bytes_sglen(struct scatterlist *sg, int num_bytes);
+struct scatterlist *scatterwalk_ffwd(struct scatterlist dst[2],
+ struct scatterlist *src,
+ unsigned int len);
+
#endif /* _CRYPTO_SCATTERWALK_H */
diff --git a/include/crypto/sha.h b/include/crypto/sha.h
index 190f8a0e0242..dd7905a3c22e 100644
--- a/include/crypto/sha.h
+++ b/include/crypto/sha.h
@@ -65,20 +65,20 @@
#define SHA512_H7 0x5be0cd19137e2179ULL
struct sha1_state {
- u64 count;
u32 state[SHA1_DIGEST_SIZE / 4];
+ u64 count;
u8 buffer[SHA1_BLOCK_SIZE];
};
struct sha256_state {
- u64 count;
u32 state[SHA256_DIGEST_SIZE / 4];
+ u64 count;
u8 buf[SHA256_BLOCK_SIZE];
};
struct sha512_state {
- u64 count[2];
u64 state[SHA512_DIGEST_SIZE / 8];
+ u64 count[2];
u8 buf[SHA512_BLOCK_SIZE];
};
@@ -87,9 +87,18 @@ struct shash_desc;
extern int crypto_sha1_update(struct shash_desc *desc, const u8 *data,
unsigned int len);
+extern int crypto_sha1_finup(struct shash_desc *desc, const u8 *data,
+ unsigned int len, u8 *hash);
+
extern int crypto_sha256_update(struct shash_desc *desc, const u8 *data,
unsigned int len);
+extern int crypto_sha256_finup(struct shash_desc *desc, const u8 *data,
+ unsigned int len, u8 *hash);
+
extern int crypto_sha512_update(struct shash_desc *desc, const u8 *data,
unsigned int len);
+
+extern int crypto_sha512_finup(struct shash_desc *desc, const u8 *data,
+ unsigned int len, u8 *hash);
#endif
diff --git a/include/crypto/sha1_base.h b/include/crypto/sha1_base.h
new file mode 100644
index 000000000000..d0df431f9a97
--- /dev/null
+++ b/include/crypto/sha1_base.h
@@ -0,0 +1,106 @@
+/*
+ * sha1_base.h - core logic for SHA-1 implementations
+ *
+ * Copyright (C) 2015 Linaro Ltd <ard.biesheuvel@linaro.org>
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License version 2 as
+ * published by the Free Software Foundation.
+ */
+
+#include <crypto/internal/hash.h>
+#include <crypto/sha.h>
+#include <linux/crypto.h>
+#include <linux/module.h>
+
+#include <asm/unaligned.h>
+
+typedef void (sha1_block_fn)(struct sha1_state *sst, u8 const *src, int blocks);
+
+static inline int sha1_base_init(struct shash_desc *desc)
+{
+ struct sha1_state *sctx = shash_desc_ctx(desc);
+
+ sctx->state[0] = SHA1_H0;
+ sctx->state[1] = SHA1_H1;
+ sctx->state[2] = SHA1_H2;
+ sctx->state[3] = SHA1_H3;
+ sctx->state[4] = SHA1_H4;
+ sctx->count = 0;
+
+ return 0;
+}
+
+static inline int sha1_base_do_update(struct shash_desc *desc,
+ const u8 *data,
+ unsigned int len,
+ sha1_block_fn *block_fn)
+{
+ struct sha1_state *sctx = shash_desc_ctx(desc);
+ unsigned int partial = sctx->count % SHA1_BLOCK_SIZE;
+
+ sctx->count += len;
+
+ if (unlikely((partial + len) >= SHA1_BLOCK_SIZE)) {
+ int blocks;
+
+ if (partial) {
+ int p = SHA1_BLOCK_SIZE - partial;
+
+ memcpy(sctx->buffer + partial, data, p);
+ data += p;
+ len -= p;
+
+ block_fn(sctx, sctx->buffer, 1);
+ }
+
+ blocks = len / SHA1_BLOCK_SIZE;
+ len %= SHA1_BLOCK_SIZE;
+
+ if (blocks) {
+ block_fn(sctx, data, blocks);
+ data += blocks * SHA1_BLOCK_SIZE;
+ }
+ partial = 0;
+ }
+ if (len)
+ memcpy(sctx->buffer + partial, data, len);
+
+ return 0;
+}
+
+static inline int sha1_base_do_finalize(struct shash_desc *desc,
+ sha1_block_fn *block_fn)
+{
+ const int bit_offset = SHA1_BLOCK_SIZE - sizeof(__be64);
+ struct sha1_state *sctx = shash_desc_ctx(desc);
+ __be64 *bits = (__be64 *)(sctx->buffer + bit_offset);
+ unsigned int partial = sctx->count % SHA1_BLOCK_SIZE;
+
+ sctx->buffer[partial++] = 0x80;
+ if (partial > bit_offset) {
+ memset(sctx->buffer + partial, 0x0, SHA1_BLOCK_SIZE - partial);
+ partial = 0;
+
+ block_fn(sctx, sctx->buffer, 1);
+ }
+
+ memset(sctx->buffer + partial, 0x0, bit_offset - partial);
+ *bits = cpu_to_be64(sctx->count << 3);
+ block_fn(sctx, sctx->buffer, 1);
+
+ return 0;
+}
+
+static inline int sha1_base_finish(struct shash_desc *desc, u8 *out)
+{
+ struct sha1_state *sctx = shash_desc_ctx(desc);
+ __be32 *digest = (__be32 *)out;
+ int i;
+
+ for (i = 0; i < SHA1_DIGEST_SIZE / sizeof(__be32); i++)
+ put_unaligned_be32(sctx->state[i], digest++);
+
+ *sctx = (struct sha1_state){};
+ return 0;
+}
diff --git a/include/crypto/sha256_base.h b/include/crypto/sha256_base.h
new file mode 100644
index 000000000000..d1f2195bb7de
--- /dev/null
+++ b/include/crypto/sha256_base.h
@@ -0,0 +1,128 @@
+/*
+ * sha256_base.h - core logic for SHA-256 implementations
+ *
+ * Copyright (C) 2015 Linaro Ltd <ard.biesheuvel@linaro.org>
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License version 2 as
+ * published by the Free Software Foundation.
+ */
+
+#include <crypto/internal/hash.h>
+#include <crypto/sha.h>
+#include <linux/crypto.h>
+#include <linux/module.h>
+
+#include <asm/unaligned.h>
+
+typedef void (sha256_block_fn)(struct sha256_state *sst, u8 const *src,
+ int blocks);
+
+static inline int sha224_base_init(struct shash_desc *desc)
+{
+ struct sha256_state *sctx = shash_desc_ctx(desc);
+
+ sctx->state[0] = SHA224_H0;
+ sctx->state[1] = SHA224_H1;
+ sctx->state[2] = SHA224_H2;
+ sctx->state[3] = SHA224_H3;
+ sctx->state[4] = SHA224_H4;
+ sctx->state[5] = SHA224_H5;
+ sctx->state[6] = SHA224_H6;
+ sctx->state[7] = SHA224_H7;
+ sctx->count = 0;
+
+ return 0;
+}
+
+static inline int sha256_base_init(struct shash_desc *desc)
+{
+ struct sha256_state *sctx = shash_desc_ctx(desc);
+
+ sctx->state[0] = SHA256_H0;
+ sctx->state[1] = SHA256_H1;
+ sctx->state[2] = SHA256_H2;
+ sctx->state[3] = SHA256_H3;
+ sctx->state[4] = SHA256_H4;
+ sctx->state[5] = SHA256_H5;
+ sctx->state[6] = SHA256_H6;
+ sctx->state[7] = SHA256_H7;
+ sctx->count = 0;
+
+ return 0;
+}
+
+static inline int sha256_base_do_update(struct shash_desc *desc,
+ const u8 *data,
+ unsigned int len,
+ sha256_block_fn *block_fn)
+{
+ struct sha256_state *sctx = shash_desc_ctx(desc);
+ unsigned int partial = sctx->count % SHA256_BLOCK_SIZE;
+
+ sctx->count += len;
+
+ if (unlikely((partial + len) >= SHA256_BLOCK_SIZE)) {
+ int blocks;
+
+ if (partial) {
+ int p = SHA256_BLOCK_SIZE - partial;
+
+ memcpy(sctx->buf + partial, data, p);
+ data += p;
+ len -= p;
+
+ block_fn(sctx, sctx->buf, 1);
+ }
+
+ blocks = len / SHA256_BLOCK_SIZE;
+ len %= SHA256_BLOCK_SIZE;
+
+ if (blocks) {
+ block_fn(sctx, data, blocks);
+ data += blocks * SHA256_BLOCK_SIZE;
+ }
+ partial = 0;
+ }
+ if (len)
+ memcpy(sctx->buf + partial, data, len);
+
+ return 0;
+}
+
+static inline int sha256_base_do_finalize(struct shash_desc *desc,
+ sha256_block_fn *block_fn)
+{
+ const int bit_offset = SHA256_BLOCK_SIZE - sizeof(__be64);
+ struct sha256_state *sctx = shash_desc_ctx(desc);
+ __be64 *bits = (__be64 *)(sctx->buf + bit_offset);
+ unsigned int partial = sctx->count % SHA256_BLOCK_SIZE;
+
+ sctx->buf[partial++] = 0x80;
+ if (partial > bit_offset) {
+ memset(sctx->buf + partial, 0x0, SHA256_BLOCK_SIZE - partial);
+ partial = 0;
+
+ block_fn(sctx, sctx->buf, 1);
+ }
+
+ memset(sctx->buf + partial, 0x0, bit_offset - partial);
+ *bits = cpu_to_be64(sctx->count << 3);
+ block_fn(sctx, sctx->buf, 1);
+
+ return 0;
+}
+
+static inline int sha256_base_finish(struct shash_desc *desc, u8 *out)
+{
+ unsigned int digest_size = crypto_shash_digestsize(desc->tfm);
+ struct sha256_state *sctx = shash_desc_ctx(desc);
+ __be32 *digest = (__be32 *)out;
+ int i;
+
+ for (i = 0; digest_size > 0; i++, digest_size -= sizeof(__be32))
+ put_unaligned_be32(sctx->state[i], digest++);
+
+ *sctx = (struct sha256_state){};
+ return 0;
+}
diff --git a/include/crypto/sha512_base.h b/include/crypto/sha512_base.h
new file mode 100644
index 000000000000..6c5341e005ea
--- /dev/null
+++ b/include/crypto/sha512_base.h
@@ -0,0 +1,131 @@
+/*
+ * sha512_base.h - core logic for SHA-512 implementations
+ *
+ * Copyright (C) 2015 Linaro Ltd <ard.biesheuvel@linaro.org>
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License version 2 as
+ * published by the Free Software Foundation.
+ */
+
+#include <crypto/internal/hash.h>
+#include <crypto/sha.h>
+#include <linux/crypto.h>
+#include <linux/module.h>
+
+#include <asm/unaligned.h>
+
+typedef void (sha512_block_fn)(struct sha512_state *sst, u8 const *src,
+ int blocks);
+
+static inline int sha384_base_init(struct shash_desc *desc)
+{
+ struct sha512_state *sctx = shash_desc_ctx(desc);
+
+ sctx->state[0] = SHA384_H0;
+ sctx->state[1] = SHA384_H1;
+ sctx->state[2] = SHA384_H2;
+ sctx->state[3] = SHA384_H3;
+ sctx->state[4] = SHA384_H4;
+ sctx->state[5] = SHA384_H5;
+ sctx->state[6] = SHA384_H6;
+ sctx->state[7] = SHA384_H7;
+ sctx->count[0] = sctx->count[1] = 0;
+
+ return 0;
+}
+
+static inline int sha512_base_init(struct shash_desc *desc)
+{
+ struct sha512_state *sctx = shash_desc_ctx(desc);
+
+ sctx->state[0] = SHA512_H0;
+ sctx->state[1] = SHA512_H1;
+ sctx->state[2] = SHA512_H2;
+ sctx->state[3] = SHA512_H3;
+ sctx->state[4] = SHA512_H4;
+ sctx->state[5] = SHA512_H5;
+ sctx->state[6] = SHA512_H6;
+ sctx->state[7] = SHA512_H7;
+ sctx->count[0] = sctx->count[1] = 0;
+
+ return 0;
+}
+
+static inline int sha512_base_do_update(struct shash_desc *desc,
+ const u8 *data,
+ unsigned int len,
+ sha512_block_fn *block_fn)
+{
+ struct sha512_state *sctx = shash_desc_ctx(desc);
+ unsigned int partial = sctx->count[0] % SHA512_BLOCK_SIZE;
+
+ sctx->count[0] += len;
+ if (sctx->count[0] < len)
+ sctx->count[1]++;
+
+ if (unlikely((partial + len) >= SHA512_BLOCK_SIZE)) {
+ int blocks;
+
+ if (partial) {
+ int p = SHA512_BLOCK_SIZE - partial;
+
+ memcpy(sctx->buf + partial, data, p);
+ data += p;
+ len -= p;
+
+ block_fn(sctx, sctx->buf, 1);
+ }
+
+ blocks = len / SHA512_BLOCK_SIZE;
+ len %= SHA512_BLOCK_SIZE;
+
+ if (blocks) {
+ block_fn(sctx, data, blocks);
+ data += blocks * SHA512_BLOCK_SIZE;
+ }
+ partial = 0;
+ }
+ if (len)
+ memcpy(sctx->buf + partial, data, len);
+
+ return 0;
+}
+
+static inline int sha512_base_do_finalize(struct shash_desc *desc,
+ sha512_block_fn *block_fn)
+{
+ const int bit_offset = SHA512_BLOCK_SIZE - sizeof(__be64[2]);
+ struct sha512_state *sctx = shash_desc_ctx(desc);
+ __be64 *bits = (__be64 *)(sctx->buf + bit_offset);
+ unsigned int partial = sctx->count[0] % SHA512_BLOCK_SIZE;
+
+ sctx->buf[partial++] = 0x80;
+ if (partial > bit_offset) {
+ memset(sctx->buf + partial, 0x0, SHA512_BLOCK_SIZE - partial);
+ partial = 0;
+
+ block_fn(sctx, sctx->buf, 1);
+ }
+
+ memset(sctx->buf + partial, 0x0, bit_offset - partial);
+ bits[0] = cpu_to_be64(sctx->count[1] << 3 | sctx->count[0] >> 61);
+ bits[1] = cpu_to_be64(sctx->count[0] << 3);
+ block_fn(sctx, sctx->buf, 1);
+
+ return 0;
+}
+
+static inline int sha512_base_finish(struct shash_desc *desc, u8 *out)
+{
+ unsigned int digest_size = crypto_shash_digestsize(desc->tfm);
+ struct sha512_state *sctx = shash_desc_ctx(desc);
+ __be64 *digest = (__be64 *)out;
+ int i;
+
+ for (i = 0; digest_size > 0; i++, digest_size -= sizeof(__be64))
+ put_unaligned_be64(sctx->state[i], digest++);
+
+ *sctx = (struct sha512_state){};
+ return 0;
+}
diff --git a/include/drm/bridge/dw_hdmi.h b/include/drm/bridge/dw_hdmi.h
index 5a4f49005169..de13bfc35634 100644
--- a/include/drm/bridge/dw_hdmi.h
+++ b/include/drm/bridge/dw_hdmi.h
@@ -38,17 +38,18 @@ struct dw_hdmi_curr_ctrl {
u16 curr[DW_HDMI_RES_MAX];
};
-struct dw_hdmi_sym_term {
+struct dw_hdmi_phy_config {
unsigned long mpixelclock;
u16 sym_ctr; /*clock symbol and transmitter control*/
u16 term; /*transmission termination value*/
+ u16 vlev_ctr; /* voltage level control */
};
struct dw_hdmi_plat_data {
enum dw_hdmi_devtype dev_type;
const struct dw_hdmi_mpll_config *mpll_cfg;
const struct dw_hdmi_curr_ctrl *cur_ctr;
- const struct dw_hdmi_sym_term *sym_term;
+ const struct dw_hdmi_phy_config *phy_config;
enum drm_mode_status (*mode_valid)(struct drm_connector *connector,
struct drm_display_mode *mode);
};
diff --git a/include/drm/drmP.h b/include/drm/drmP.h
index e928625a9da0..62c40777c009 100644
--- a/include/drm/drmP.h
+++ b/include/drm/drmP.h
@@ -104,6 +104,9 @@ struct dma_buf_attachment;
* PRIME: used in the prime code.
* This is the category used by the DRM_DEBUG_PRIME() macro.
*
+ * ATOMIC: used in the atomic code.
+ * This is the category used by the DRM_DEBUG_ATOMIC() macro.
+ *
* Enabling verbose debug messages is done through the drm.debug parameter,
* each category being enabled by a bit.
*
@@ -121,6 +124,7 @@ struct dma_buf_attachment;
#define DRM_UT_DRIVER 0x02
#define DRM_UT_KMS 0x04
#define DRM_UT_PRIME 0x08
+#define DRM_UT_ATOMIC 0x10
extern __printf(2, 3)
void drm_ut_debug_printk(const char *function_name,
@@ -207,6 +211,11 @@ void drm_err(const char *format, ...);
if (unlikely(drm_debug & DRM_UT_PRIME)) \
drm_ut_debug_printk(__func__, fmt, ##args); \
} while (0)
+#define DRM_DEBUG_ATOMIC(fmt, args...) \
+ do { \
+ if (unlikely(drm_debug & DRM_UT_ATOMIC)) \
+ drm_ut_debug_printk(__func__, fmt, ##args); \
+ } while (0)
/*@}*/
@@ -244,7 +253,6 @@ struct drm_ioctl_desc {
unsigned int cmd;
int flags;
drm_ioctl_t *func;
- unsigned int cmd_drv;
const char *name;
};
@@ -253,8 +261,13 @@ struct drm_ioctl_desc {
* ioctl, for use by drm_ioctl().
*/
-#define DRM_IOCTL_DEF_DRV(ioctl, _func, _flags) \
- [DRM_IOCTL_NR(DRM_##ioctl)] = {.cmd = DRM_##ioctl, .func = _func, .flags = _flags, .cmd_drv = DRM_IOCTL_##ioctl, .name = #ioctl}
+#define DRM_IOCTL_DEF_DRV(ioctl, _func, _flags) \
+ [DRM_IOCTL_NR(DRM_IOCTL_##ioctl) - DRM_COMMAND_BASE] = { \
+ .cmd = DRM_IOCTL_##ioctl, \
+ .func = _func, \
+ .flags = _flags, \
+ .name = #ioctl \
+ }
/* Event queued up for userspace to read */
struct drm_pending_event {
@@ -922,6 +935,7 @@ extern void drm_crtc_wait_one_vblank(struct drm_crtc *crtc);
extern void drm_vblank_off(struct drm_device *dev, int crtc);
extern void drm_vblank_on(struct drm_device *dev, int crtc);
extern void drm_crtc_vblank_off(struct drm_crtc *crtc);
+extern void drm_crtc_vblank_reset(struct drm_crtc *crtc);
extern void drm_crtc_vblank_on(struct drm_crtc *crtc);
extern void drm_vblank_cleanup(struct drm_device *dev);
diff --git a/include/drm/drm_atomic.h b/include/drm/drm_atomic.h
index 51168a8b723a..c157103492b0 100644
--- a/include/drm/drm_atomic.h
+++ b/include/drm/drm_atomic.h
@@ -75,4 +75,28 @@ int __must_check drm_atomic_check_only(struct drm_atomic_state *state);
int __must_check drm_atomic_commit(struct drm_atomic_state *state);
int __must_check drm_atomic_async_commit(struct drm_atomic_state *state);
+#define for_each_connector_in_state(state, connector, connector_state, __i) \
+ for ((__i) = 0; \
+ (connector) = (state)->connectors[__i], \
+ (connector_state) = (state)->connector_states[__i], \
+ (__i) < (state)->num_connector; \
+ (__i)++) \
+ if (connector)
+
+#define for_each_crtc_in_state(state, crtc, crtc_state, __i) \
+ for ((__i) = 0; \
+ (crtc) = (state)->crtcs[__i], \
+ (crtc_state) = (state)->crtc_states[__i], \
+ (__i) < (state)->dev->mode_config.num_crtc; \
+ (__i)++) \
+ if (crtc_state)
+
+#define for_each_plane_in_state(state, plane, plane_state, __i) \
+ for ((__i) = 0; \
+ (plane) = (state)->planes[__i], \
+ (plane_state) = (state)->plane_states[__i], \
+ (__i) < (state)->dev->mode_config.num_total_plane; \
+ (__i)++) \
+ if (plane_state)
+
#endif /* DRM_ATOMIC_H_ */
diff --git a/include/drm/drm_atomic_helper.h b/include/drm/drm_atomic_helper.h
index 8039d54a7441..d665781eb542 100644
--- a/include/drm/drm_atomic_helper.h
+++ b/include/drm/drm_atomic_helper.h
@@ -43,9 +43,9 @@ int drm_atomic_helper_commit(struct drm_device *dev,
void drm_atomic_helper_wait_for_vblanks(struct drm_device *dev,
struct drm_atomic_state *old_state);
-void drm_atomic_helper_commit_pre_planes(struct drm_device *dev,
- struct drm_atomic_state *state);
-void drm_atomic_helper_commit_post_planes(struct drm_device *dev,
+void drm_atomic_helper_commit_modeset_disables(struct drm_device *dev,
+ struct drm_atomic_state *state);
+void drm_atomic_helper_commit_modeset_enables(struct drm_device *dev,
struct drm_atomic_state *old_state);
int drm_atomic_helper_prepare_planes(struct drm_device *dev,
@@ -87,20 +87,34 @@ void drm_atomic_helper_connector_dpms(struct drm_connector *connector,
/* default implementations for state handling */
void drm_atomic_helper_crtc_reset(struct drm_crtc *crtc);
+void __drm_atomic_helper_crtc_duplicate_state(struct drm_crtc *crtc,
+ struct drm_crtc_state *state);
struct drm_crtc_state *
drm_atomic_helper_crtc_duplicate_state(struct drm_crtc *crtc);
+void __drm_atomic_helper_crtc_destroy_state(struct drm_crtc *crtc,
+ struct drm_crtc_state *state);
void drm_atomic_helper_crtc_destroy_state(struct drm_crtc *crtc,
struct drm_crtc_state *state);
void drm_atomic_helper_plane_reset(struct drm_plane *plane);
+void __drm_atomic_helper_plane_duplicate_state(struct drm_plane *plane,
+ struct drm_plane_state *state);
struct drm_plane_state *
drm_atomic_helper_plane_duplicate_state(struct drm_plane *plane);
+void __drm_atomic_helper_plane_destroy_state(struct drm_plane *plane,
+ struct drm_plane_state *state);
void drm_atomic_helper_plane_destroy_state(struct drm_plane *plane,
struct drm_plane_state *state);
void drm_atomic_helper_connector_reset(struct drm_connector *connector);
+void
+__drm_atomic_helper_connector_duplicate_state(struct drm_connector *connector,
+ struct drm_connector_state *state);
struct drm_connector_state *
drm_atomic_helper_connector_duplicate_state(struct drm_connector *connector);
+void
+__drm_atomic_helper_connector_destroy_state(struct drm_connector *connector,
+ struct drm_connector_state *state);
void drm_atomic_helper_connector_destroy_state(struct drm_connector *connector,
struct drm_connector_state *state);
diff --git a/include/drm/drm_crtc.h b/include/drm/drm_crtc.h
index 920e21a8f3fd..ca71c03143d1 100644
--- a/include/drm/drm_crtc.h
+++ b/include/drm/drm_crtc.h
@@ -53,7 +53,6 @@ struct fence;
#define DRM_MODE_OBJECT_FB 0xfbfbfbfb
#define DRM_MODE_OBJECT_BLOB 0xbbbbbbbb
#define DRM_MODE_OBJECT_PLANE 0xeeeeeeee
-#define DRM_MODE_OBJECT_BRIDGE 0xbdbdbdbd
#define DRM_MODE_OBJECT_ANY 0
struct drm_mode_object {
@@ -202,6 +201,7 @@ struct drm_framebuffer {
const struct drm_framebuffer_funcs *funcs;
unsigned int pitches[4];
unsigned int offsets[4];
+ uint64_t modifier[4];
unsigned int width;
unsigned int height;
/* depth can be 15 or 16 */
@@ -466,7 +466,7 @@ struct drm_crtc {
int framedur_ns, linedur_ns, pixeldur_ns;
/* if you are using the helper */
- void *helper_private;
+ const void *helper_private;
struct drm_object_properties properties;
@@ -596,7 +596,7 @@ struct drm_encoder {
struct drm_crtc *crtc;
struct drm_bridge *bridge;
const struct drm_encoder_funcs *funcs;
- void *helper_private;
+ const void *helper_private;
};
/* should we poll this connector for connects and disconnects */
@@ -700,7 +700,7 @@ struct drm_connector {
/* requested DPMS state */
int dpms;
- void *helper_private;
+ const void *helper_private;
/* forced on connector */
struct drm_cmdline_mode cmdline_mode;
@@ -829,6 +829,7 @@ enum drm_plane_type {
* @possible_crtcs: pipes this plane can be bound to
* @format_types: array of formats supported by this plane
* @format_count: number of formats supported
+ * @format_default: driver hasn't supplied supported formats for the plane
* @crtc: currently bound CRTC
* @fb: currently bound fb
* @old_fb: Temporary tracking of the old fb while a modeset is ongoing. Used by
@@ -849,6 +850,7 @@ struct drm_plane {
uint32_t possible_crtcs;
uint32_t *format_types;
uint32_t format_count;
+ bool format_default;
struct drm_crtc *crtc;
struct drm_framebuffer *fb;
@@ -861,7 +863,7 @@ struct drm_plane {
enum drm_plane_type type;
- void *helper_private;
+ const void *helper_private;
struct drm_plane_state *state;
};
@@ -912,7 +914,7 @@ struct drm_bridge {
};
/**
- * struct struct drm_atomic_state - the global state object for atomic updates
+ * struct drm_atomic_state - the global state object for atomic updates
* @dev: parent DRM device
* @allow_modeset: allow full modeset
* @legacy_cursor_update: hint to enforce legacy cursor ioctl semantics
@@ -972,7 +974,7 @@ struct drm_mode_set {
* struct drm_mode_config_funcs - basic driver provided mode setting functions
* @fb_create: create a new framebuffer object
* @output_poll_changed: function to handle output configuration changes
- * @atomic_check: check whether a give atomic state update is possible
+ * @atomic_check: check whether a given atomic state update is possible
* @atomic_commit: commit an atomic state update previously verified with
* atomic_check()
*
@@ -1155,6 +1157,9 @@ struct drm_mode_config {
/* whether async page flip is supported or not */
bool async_page_flip;
+ /* whether the driver supports fb modifiers */
+ bool allow_fb_modifiers;
+
/* cursor size */
uint32_t cursor_width, cursor_height;
};
@@ -1259,6 +1264,8 @@ extern int drm_plane_init(struct drm_device *dev,
extern void drm_plane_cleanup(struct drm_plane *plane);
extern unsigned int drm_plane_index(struct drm_plane *plane);
extern void drm_plane_force_disable(struct drm_plane *plane);
+extern int drm_plane_check_pixel_format(const struct drm_plane *plane,
+ u32 format);
extern void drm_crtc_get_hv_timing(const struct drm_display_mode *mode,
int *hdisplay, int *vdisplay);
extern int drm_crtc_check_viewport(const struct drm_crtc *crtc,
diff --git a/include/drm/drm_crtc_helper.h b/include/drm/drm_crtc_helper.h
index c250a22b39ab..c8fc187061de 100644
--- a/include/drm/drm_crtc_helper.h
+++ b/include/drm/drm_crtc_helper.h
@@ -89,6 +89,7 @@ struct drm_crtc_helper_funcs {
int (*mode_set)(struct drm_crtc *crtc, struct drm_display_mode *mode,
struct drm_display_mode *adjusted_mode, int x, int y,
struct drm_framebuffer *old_fb);
+ /* Actually set the mode for atomic helpers, optional */
void (*mode_set_nofb)(struct drm_crtc *crtc);
/* Move the crtc on the current fb to the given position *optional* */
@@ -119,7 +120,7 @@ struct drm_crtc_helper_funcs {
* @mode_fixup: try to fixup proposed mode for this connector
* @prepare: part of the disable sequence, called before the CRTC modeset
* @commit: called after the CRTC modeset
- * @mode_set: set this mode
+ * @mode_set: set this mode, optional for atomic helpers
* @get_crtc: return CRTC that the encoder is currently attached to
* @detect: connection status detection
* @disable: disable encoder when not in use (overrides DPMS off)
@@ -196,19 +197,19 @@ extern void drm_helper_mode_fill_fb_struct(struct drm_framebuffer *fb,
static inline void drm_crtc_helper_add(struct drm_crtc *crtc,
const struct drm_crtc_helper_funcs *funcs)
{
- crtc->helper_private = (void *)funcs;
+ crtc->helper_private = funcs;
}
static inline void drm_encoder_helper_add(struct drm_encoder *encoder,
const struct drm_encoder_helper_funcs *funcs)
{
- encoder->helper_private = (void *)funcs;
+ encoder->helper_private = funcs;
}
static inline void drm_connector_helper_add(struct drm_connector *connector,
const struct drm_connector_helper_funcs *funcs)
{
- connector->helper_private = (void *)funcs;
+ connector->helper_private = funcs;
}
extern void drm_helper_resume_force_mode(struct drm_device *dev);
diff --git a/include/drm/drm_dp_helper.h b/include/drm/drm_dp_helper.h
index 7e25030a6aa2..523f04c90dea 100644
--- a/include/drm/drm_dp_helper.h
+++ b/include/drm/drm_dp_helper.h
@@ -42,6 +42,8 @@
* 1.2 formally includes both eDP and DPI definitions.
*/
+#define DP_AUX_MAX_PAYLOAD_BYTES 16
+
#define DP_AUX_I2C_WRITE 0x0
#define DP_AUX_I2C_READ 0x1
#define DP_AUX_I2C_STATUS 0x2
@@ -92,6 +94,15 @@
# define DP_MSA_TIMING_PAR_IGNORED (1 << 6) /* eDP */
# define DP_OUI_SUPPORT (1 << 7)
+#define DP_RECEIVE_PORT_0_CAP_0 0x008
+# define DP_LOCAL_EDID_PRESENT (1 << 1)
+# define DP_ASSOCIATED_TO_PRECEDING_PORT (1 << 2)
+
+#define DP_RECEIVE_PORT_0_BUFFER_SIZE 0x009
+
+#define DP_RECEIVE_PORT_1_CAP_0 0x00a
+#define DP_RECEIVE_PORT_1_BUFFER_SIZE 0x00b
+
#define DP_I2C_SPEED_CAP 0x00c /* DPI */
# define DP_I2C_SPEED_1K 0x01
# define DP_I2C_SPEED_5K 0x02
@@ -101,8 +112,19 @@
# define DP_I2C_SPEED_1M 0x20
#define DP_EDP_CONFIGURATION_CAP 0x00d /* XXX 1.2? */
+# define DP_ALTERNATE_SCRAMBLER_RESET_CAP (1 << 0)
+# define DP_FRAMING_CHANGE_CAP (1 << 1)
+# define DP_DPCD_DISPLAY_CONTROL_CAPABLE (1 << 3) /* edp v1.2 or higher */
+
#define DP_TRAINING_AUX_RD_INTERVAL 0x00e /* XXX 1.2? */
+#define DP_ADAPTER_CAP 0x00f /* 1.2 */
+# define DP_FORCE_LOAD_SENSE_CAP (1 << 0)
+# define DP_ALTERNATE_I2C_PATTERN_CAP (1 << 1)
+
+#define DP_SUPPORTED_LINK_RATES 0x010 /* eDP 1.4 */
+# define DP_MAX_SUPPORTED_RATES 8 /* 16-bit little-endian */
+
/* Multiple stream transport */
#define DP_FAUX_CAP 0x020 /* 1.2 */
# define DP_FAUX_CAP_1 (1 << 0)
@@ -110,10 +132,56 @@
#define DP_MSTM_CAP 0x021 /* 1.2 */
# define DP_MST_CAP (1 << 0)
+#define DP_NUMBER_OF_AUDIO_ENDPOINTS 0x022 /* 1.2 */
+
+/* AV_SYNC_DATA_BLOCK 1.2 */
+#define DP_AV_GRANULARITY 0x023
+# define DP_AG_FACTOR_MASK (0xf << 0)
+# define DP_AG_FACTOR_3MS (0 << 0)
+# define DP_AG_FACTOR_2MS (1 << 0)
+# define DP_AG_FACTOR_1MS (2 << 0)
+# define DP_AG_FACTOR_500US (3 << 0)
+# define DP_AG_FACTOR_200US (4 << 0)
+# define DP_AG_FACTOR_100US (5 << 0)
+# define DP_AG_FACTOR_10US (6 << 0)
+# define DP_AG_FACTOR_1US (7 << 0)
+# define DP_VG_FACTOR_MASK (0xf << 4)
+# define DP_VG_FACTOR_3MS (0 << 4)
+# define DP_VG_FACTOR_2MS (1 << 4)
+# define DP_VG_FACTOR_1MS (2 << 4)
+# define DP_VG_FACTOR_500US (3 << 4)
+# define DP_VG_FACTOR_200US (4 << 4)
+# define DP_VG_FACTOR_100US (5 << 4)
+
+#define DP_AUD_DEC_LAT0 0x024
+#define DP_AUD_DEC_LAT1 0x025
+
+#define DP_AUD_PP_LAT0 0x026
+#define DP_AUD_PP_LAT1 0x027
+
+#define DP_VID_INTER_LAT 0x028
+
+#define DP_VID_PROG_LAT 0x029
+
+#define DP_REP_LAT 0x02a
+
+#define DP_AUD_DEL_INS0 0x02b
+#define DP_AUD_DEL_INS1 0x02c
+#define DP_AUD_DEL_INS2 0x02d
+/* End of AV_SYNC_DATA_BLOCK */
+
+#define DP_RECEIVER_ALPM_CAP 0x02e /* eDP 1.4 */
+# define DP_ALPM_CAP (1 << 0)
+
+#define DP_SINK_DEVICE_AUX_FRAME_SYNC_CAP 0x02f /* eDP 1.4 */
+# define DP_AUX_FRAME_SYNC_CAP (1 << 0)
+
#define DP_GUID 0x030 /* 1.2 */
#define DP_PSR_SUPPORT 0x070 /* XXX 1.2? */
# define DP_PSR_IS_SUPPORTED 1
+# define DP_PSR2_IS_SUPPORTED 2 /* eDP 1.4 */
+
#define DP_PSR_CAPS 0x071 /* XXX 1.2? */
# define DP_PSR_NO_TRAIN_ON_EXIT 1
# define DP_PSR_SETUP_TIME_330 (0 << 1)
@@ -153,6 +221,7 @@
/* link configuration */
#define DP_LINK_BW_SET 0x100
+# define DP_LINK_RATE_TABLE 0x00 /* eDP 1.4 */
# define DP_LINK_BW_1_62 0x06
# define DP_LINK_BW_2_7 0x0a
# define DP_LINK_BW_5_4 0x14 /* 1.2 */
@@ -168,11 +237,12 @@
# define DP_TRAINING_PATTERN_3 3 /* 1.2 */
# define DP_TRAINING_PATTERN_MASK 0x3
-# define DP_LINK_QUAL_PATTERN_DISABLE (0 << 2)
-# define DP_LINK_QUAL_PATTERN_D10_2 (1 << 2)
-# define DP_LINK_QUAL_PATTERN_ERROR_RATE (2 << 2)
-# define DP_LINK_QUAL_PATTERN_PRBS7 (3 << 2)
-# define DP_LINK_QUAL_PATTERN_MASK (3 << 2)
+/* DPCD 1.1 only. For DPCD >= 1.2 see per-lane DP_LINK_QUAL_LANEn_SET */
+# define DP_LINK_QUAL_PATTERN_11_DISABLE (0 << 2)
+# define DP_LINK_QUAL_PATTERN_11_D10_2 (1 << 2)
+# define DP_LINK_QUAL_PATTERN_11_ERROR_RATE (2 << 2)
+# define DP_LINK_QUAL_PATTERN_11_PRBS7 (3 << 2)
+# define DP_LINK_QUAL_PATTERN_11_MASK (3 << 2)
# define DP_RECOVERED_CLOCK_OUT_EN (1 << 4)
# define DP_LINK_SCRAMBLING_DISABLE (1 << 5)
@@ -215,17 +285,63 @@
/* bitmask as for DP_I2C_SPEED_CAP */
#define DP_EDP_CONFIGURATION_SET 0x10a /* XXX 1.2? */
+# define DP_ALTERNATE_SCRAMBLER_RESET_ENABLE (1 << 0)
+# define DP_FRAMING_CHANGE_ENABLE (1 << 1)
+# define DP_PANEL_SELF_TEST_ENABLE (1 << 7)
+
+#define DP_LINK_QUAL_LANE0_SET 0x10b /* DPCD >= 1.2 */
+#define DP_LINK_QUAL_LANE1_SET 0x10c
+#define DP_LINK_QUAL_LANE2_SET 0x10d
+#define DP_LINK_QUAL_LANE3_SET 0x10e
+# define DP_LINK_QUAL_PATTERN_DISABLE 0
+# define DP_LINK_QUAL_PATTERN_D10_2 1
+# define DP_LINK_QUAL_PATTERN_ERROR_RATE 2
+# define DP_LINK_QUAL_PATTERN_PRBS7 3
+# define DP_LINK_QUAL_PATTERN_80BIT_CUSTOM 4
+# define DP_LINK_QUAL_PATTERN_HBR2_EYE 5
+# define DP_LINK_QUAL_PATTERN_MASK 7
+
+#define DP_TRAINING_LANE0_1_SET2 0x10f
+#define DP_TRAINING_LANE2_3_SET2 0x110
+# define DP_LANE02_POST_CURSOR2_SET_MASK (3 << 0)
+# define DP_LANE02_MAX_POST_CURSOR2_REACHED (1 << 2)
+# define DP_LANE13_POST_CURSOR2_SET_MASK (3 << 4)
+# define DP_LANE13_MAX_POST_CURSOR2_REACHED (1 << 6)
#define DP_MSTM_CTRL 0x111 /* 1.2 */
# define DP_MST_EN (1 << 0)
# define DP_UP_REQ_EN (1 << 1)
# define DP_UPSTREAM_IS_SRC (1 << 2)
+#define DP_AUDIO_DELAY0 0x112 /* 1.2 */
+#define DP_AUDIO_DELAY1 0x113
+#define DP_AUDIO_DELAY2 0x114
+
+#define DP_LINK_RATE_SET 0x115 /* eDP 1.4 */
+# define DP_LINK_RATE_SET_SHIFT 0
+# define DP_LINK_RATE_SET_MASK (7 << 0)
+
+#define DP_RECEIVER_ALPM_CONFIG 0x116 /* eDP 1.4 */
+# define DP_ALPM_ENABLE (1 << 0)
+# define DP_ALPM_LOCK_ERROR_IRQ_HPD_ENABLE (1 << 1)
+
+#define DP_SINK_DEVICE_AUX_FRAME_SYNC_CONF 0x117 /* eDP 1.4 */
+# define DP_AUX_FRAME_SYNC_ENABLE (1 << 0)
+# define DP_IRQ_HPD_ENABLE (1 << 1)
+
+#define DP_UPSTREAM_DEVICE_DP_PWR_NEED 0x118 /* 1.2 */
+# define DP_PWR_NOT_NEEDED (1 << 0)
+
+#define DP_AUX_FRAME_SYNC_VALUE 0x15c /* eDP 1.4 */
+# define DP_AUX_FRAME_SYNC_VALID (1 << 0)
+
#define DP_PSR_EN_CFG 0x170 /* XXX 1.2? */
# define DP_PSR_ENABLE (1 << 0)
# define DP_PSR_MAIN_LINK_ACTIVE (1 << 1)
# define DP_PSR_CRC_VERIFICATION (1 << 2)
# define DP_PSR_FRAME_CAPTURE (1 << 3)
+# define DP_PSR_SELECTIVE_UPDATE (1 << 4)
+# define DP_PSR_IRQ_HPD_WITH_CRC_ERRORS (1 << 5)
#define DP_ADAPTER_CTRL 0x1a0
# define DP_ADAPTER_CTRL_FORCE_LOAD_SENSE (1 << 0)
@@ -332,6 +448,49 @@
# define DP_SET_POWER_D3 0x2
# define DP_SET_POWER_MASK 0x3
+#define DP_EDP_DPCD_REV 0x700 /* eDP 1.2 */
+# define DP_EDP_11 0x00
+# define DP_EDP_12 0x01
+# define DP_EDP_13 0x02
+# define DP_EDP_14 0x03
+
+#define DP_EDP_GENERAL_CAP_1 0x701
+
+#define DP_EDP_BACKLIGHT_ADJUSTMENT_CAP 0x702
+
+#define DP_EDP_GENERAL_CAP_2 0x703
+
+#define DP_EDP_GENERAL_CAP_3 0x704 /* eDP 1.4 */
+
+#define DP_EDP_DISPLAY_CONTROL_REGISTER 0x720
+
+#define DP_EDP_BACKLIGHT_MODE_SET_REGISTER 0x721
+
+#define DP_EDP_BACKLIGHT_BRIGHTNESS_MSB 0x722
+#define DP_EDP_BACKLIGHT_BRIGHTNESS_LSB 0x723
+
+#define DP_EDP_PWMGEN_BIT_COUNT 0x724
+#define DP_EDP_PWMGEN_BIT_COUNT_CAP_MIN 0x725
+#define DP_EDP_PWMGEN_BIT_COUNT_CAP_MAX 0x726
+
+#define DP_EDP_BACKLIGHT_CONTROL_STATUS 0x727
+
+#define DP_EDP_BACKLIGHT_FREQ_SET 0x728
+
+#define DP_EDP_BACKLIGHT_FREQ_CAP_MIN_MSB 0x72a
+#define DP_EDP_BACKLIGHT_FREQ_CAP_MIN_MID 0x72b
+#define DP_EDP_BACKLIGHT_FREQ_CAP_MIN_LSB 0x72c
+
+#define DP_EDP_BACKLIGHT_FREQ_CAP_MAX_MSB 0x72d
+#define DP_EDP_BACKLIGHT_FREQ_CAP_MAX_MID 0x72e
+#define DP_EDP_BACKLIGHT_FREQ_CAP_MAX_LSB 0x72f
+
+#define DP_EDP_DBC_MINIMUM_BRIGHTNESS_SET 0x732
+#define DP_EDP_DBC_MAXIMUM_BRIGHTNESS_SET 0x733
+
+#define DP_EDP_REGIONAL_BACKLIGHT_BASE 0x740 /* eDP 1.4 */
+#define DP_EDP_REGIONAL_BACKLIGHT_0 0x741 /* eDP 1.4 */
+
#define DP_SIDEBAND_MSG_DOWN_REQ_BASE 0x1000 /* 1.2 MST */
#define DP_SIDEBAND_MSG_UP_REP_BASE 0x1200 /* 1.2 MST */
#define DP_SIDEBAND_MSG_DOWN_REP_BASE 0x1400 /* 1.2 MST */
@@ -350,6 +509,7 @@
#define DP_PSR_ERROR_STATUS 0x2006 /* XXX 1.2? */
# define DP_PSR_LINK_CRC_ERROR (1 << 0)
# define DP_PSR_RFB_STORAGE_ERROR (1 << 1)
+# define DP_PSR_VSC_SDP_UNCORRECTABLE_ERROR (1 << 2) /* eDP 1.4 */
#define DP_PSR_ESI 0x2007 /* XXX 1.2? */
# define DP_PSR_CAPS_CHANGE (1 << 0)
@@ -363,6 +523,9 @@
# define DP_PSR_SINK_INTERNAL_ERROR 7
# define DP_PSR_SINK_STATE_MASK 0x07
+#define DP_RECEIVER_ALPM_STATUS 0x200b /* eDP 1.4 */
+# define DP_ALPM_LOCK_TIMEOUT_ERROR (1 << 0)
+
/* DP 1.2 Sideband message defines */
/* peer device type - DP 1.2a Table 2-92 */
#define DP_PEER_DEVICE_NONE 0x0
@@ -519,6 +682,9 @@ struct drm_dp_aux_msg {
* transactions. The drm_dp_aux_register_i2c_bus() function registers an
* I2C adapter that can be passed to drm_probe_ddc(). Upon removal, drivers
* should call drm_dp_aux_unregister_i2c_bus() to remove the I2C adapter.
+ * The I2C adapter uses long transfers by default; if a partial response is
+ * received, the adapter will drop down to the size given by the partial
+ * response for this transaction only.
*
* Note that the aux helper code assumes that the .transfer() function
* only modifies the reply field of the drm_dp_aux_msg structure. The
diff --git a/include/drm/drm_dp_mst_helper.h b/include/drm/drm_dp_mst_helper.h
index 00c1da927245..a2507817be41 100644
--- a/include/drm/drm_dp_mst_helper.h
+++ b/include/drm/drm_dp_mst_helper.h
@@ -486,6 +486,8 @@ int drm_dp_calc_pbn_mode(int clock, int bpp);
bool drm_dp_mst_allocate_vcpi(struct drm_dp_mst_topology_mgr *mgr, struct drm_dp_mst_port *port, int pbn, int *slots);
+int drm_dp_mst_get_vcpi_slots(struct drm_dp_mst_topology_mgr *mgr, struct drm_dp_mst_port *port);
+
void drm_dp_mst_reset_vcpi_slots(struct drm_dp_mst_topology_mgr *mgr, struct drm_dp_mst_port *port);
diff --git a/include/drm/drm_edid.h b/include/drm/drm_edid.h
index 87d85e81d3a7..799050198323 100644
--- a/include/drm/drm_edid.h
+++ b/include/drm/drm_edid.h
@@ -215,6 +215,8 @@ struct detailed_timing {
#define DRM_ELD_VER 0
# define DRM_ELD_VER_SHIFT 3
# define DRM_ELD_VER_MASK (0x1f << 3)
+# define DRM_ELD_VER_CEA861D (2 << 3) /* supports 861D or below */
+# define DRM_ELD_VER_CANNED (0x1f << 3)
#define DRM_ELD_BASELINE_ELD_LEN 2 /* in dwords! */
diff --git a/include/drm/drm_fb_helper.h b/include/drm/drm_fb_helper.h
index 21b944c456f6..0dfd94def593 100644
--- a/include/drm/drm_fb_helper.h
+++ b/include/drm/drm_fb_helper.h
@@ -44,6 +44,25 @@ struct drm_fb_helper_crtc {
int x, y;
};
+/**
+ * struct drm_fb_helper_surface_size - describes fbdev size and scanout surface size
+ * @fb_width: fbdev width
+ * @fb_height: fbdev height
+ * @surface_width: scanout buffer width
+ * @surface_height: scanout buffer height
+ * @surface_bpp: scanout buffer bpp
+ * @surface_depth: scanout buffer depth
+ *
+ * Note that the scanout surface width/height may be larger than the fbdev
+ * width/height. In case of multiple displays, the scanout surface is sized
+ * according to the largest width/height (so it is large enough for all CRTCs
+ * to scanout). But the fbdev width/height is sized to the minimum width/
+ * height of all the displays. This ensures that fbcon fits on the smallest
+ * of the attached displays.
+ *
+ * So what is passed to drm_fb_helper_fill_var() should be fb_width/fb_height,
+ * rather than the surface size.
+ */
struct drm_fb_helper_surface_size {
u32 fb_width;
u32 fb_height;
diff --git a/include/drm/drm_gem.h b/include/drm/drm_gem.h
index 1e6ae1458f7a..7a592d7e398b 100644
--- a/include/drm/drm_gem.h
+++ b/include/drm/drm_gem.h
@@ -149,14 +149,16 @@ drm_gem_object_unreference(struct drm_gem_object *obj)
static inline void
drm_gem_object_unreference_unlocked(struct drm_gem_object *obj)
{
- if (obj && !atomic_add_unless(&obj->refcount.refcount, -1, 1)) {
- struct drm_device *dev = obj->dev;
+ struct drm_device *dev;
+
+ if (!obj)
+ return;
- mutex_lock(&dev->struct_mutex);
- if (likely(atomic_dec_and_test(&obj->refcount.refcount)))
- drm_gem_object_free(&obj->refcount);
+ dev = obj->dev;
+ if (kref_put_mutex(&obj->refcount, drm_gem_object_free, &dev->struct_mutex))
mutex_unlock(&dev->struct_mutex);
- }
+ else
+ might_lock(&dev->struct_mutex);
}
int drm_gem_handle_create(struct drm_file *file_priv,
diff --git a/include/drm/drm_modes.h b/include/drm/drm_modes.h
index d92f6dd1fb11..0616188c7801 100644
--- a/include/drm/drm_modes.h
+++ b/include/drm/drm_modes.h
@@ -92,7 +92,7 @@ enum drm_mode_status {
#define CRTC_STEREO_DOUBLE (1 << 1) /* adjust timings for stereo modes */
#define CRTC_NO_DBLSCAN (1 << 2) /* don't adjust doublescan */
#define CRTC_NO_VSCAN (1 << 3) /* don't adjust doublescan */
-#define CRTC_STEREO_DOUBLE_ONLY (CRTC_NO_DBLSCAN | CRTC_NO_VSCAN)
+#define CRTC_STEREO_DOUBLE_ONLY (CRTC_STEREO_DOUBLE | CRTC_NO_DBLSCAN | CRTC_NO_VSCAN)
#define DRM_MODE_FLAG_3D_MAX DRM_MODE_FLAG_3D_SIDE_BY_SIDE_HALF
diff --git a/include/drm/drm_panel.h b/include/drm/drm_panel.h
index 1fbcc96063a7..13ff44b28893 100644
--- a/include/drm/drm_panel.h
+++ b/include/drm/drm_panel.h
@@ -29,6 +29,7 @@
struct drm_connector;
struct drm_device;
struct drm_panel;
+struct display_timing;
/**
* struct drm_panel_funcs - perform operations on a given panel
@@ -38,6 +39,8 @@ struct drm_panel;
* @enable: enable panel (turn on back light, etc.)
* @get_modes: add modes to the connector that the panel is attached to and
* return the number of modes added
+ * @get_timings: copy display timings into the provided array and return
+ * the number of display timings available
*
* The .prepare() function is typically called before the display controller
* starts to transmit video data. Panel drivers can use this to turn the panel
@@ -68,6 +71,8 @@ struct drm_panel_funcs {
int (*prepare)(struct drm_panel *panel);
int (*enable)(struct drm_panel *panel);
int (*get_modes)(struct drm_panel *panel);
+ int (*get_timings)(struct drm_panel *panel, unsigned int num_timings,
+ struct display_timing *timings);
};
struct drm_panel {
diff --git a/include/drm/drm_pciids.h b/include/drm/drm_pciids.h
index 2dd405c9be78..45c39a37f924 100644
--- a/include/drm/drm_pciids.h
+++ b/include/drm/drm_pciids.h
@@ -186,6 +186,7 @@
{0x1002, 0x6658, PCI_ANY_ID, PCI_ANY_ID, 0, 0, CHIP_BONAIRE|RADEON_NEW_MEMMAP}, \
{0x1002, 0x665c, PCI_ANY_ID, PCI_ANY_ID, 0, 0, CHIP_BONAIRE|RADEON_NEW_MEMMAP}, \
{0x1002, 0x665d, PCI_ANY_ID, PCI_ANY_ID, 0, 0, CHIP_BONAIRE|RADEON_NEW_MEMMAP}, \
+ {0x1002, 0x665f, PCI_ANY_ID, PCI_ANY_ID, 0, 0, CHIP_BONAIRE|RADEON_NEW_MEMMAP}, \
{0x1002, 0x6660, PCI_ANY_ID, PCI_ANY_ID, 0, 0, CHIP_HAINAN|RADEON_IS_MOBILITY|RADEON_NEW_MEMMAP}, \
{0x1002, 0x6663, PCI_ANY_ID, PCI_ANY_ID, 0, 0, CHIP_HAINAN|RADEON_IS_MOBILITY|RADEON_NEW_MEMMAP}, \
{0x1002, 0x6664, PCI_ANY_ID, PCI_ANY_ID, 0, 0, CHIP_HAINAN|RADEON_IS_MOBILITY|RADEON_NEW_MEMMAP}, \
diff --git a/include/drm/drm_plane_helper.h b/include/drm/drm_plane_helper.h
index 31c11d36fae6..96e16283afb9 100644
--- a/include/drm/drm_plane_helper.h
+++ b/include/drm/drm_plane_helper.h
@@ -59,9 +59,11 @@ extern int drm_crtc_init(struct drm_device *dev,
*/
struct drm_plane_helper_funcs {
int (*prepare_fb)(struct drm_plane *plane,
- struct drm_framebuffer *fb);
+ struct drm_framebuffer *fb,
+ const struct drm_plane_state *new_state);
void (*cleanup_fb)(struct drm_plane *plane,
- struct drm_framebuffer *fb);
+ struct drm_framebuffer *fb,
+ const struct drm_plane_state *old_state);
int (*atomic_check)(struct drm_plane *plane,
struct drm_plane_state *state);
@@ -74,7 +76,7 @@ struct drm_plane_helper_funcs {
static inline void drm_plane_helper_add(struct drm_plane *plane,
const struct drm_plane_helper_funcs *funcs)
{
- plane->helper_private = (void *)funcs;
+ plane->helper_private = funcs;
}
extern int drm_plane_helper_check_update(struct drm_plane *plane,
@@ -98,10 +100,6 @@ extern int drm_primary_helper_update(struct drm_plane *plane,
extern int drm_primary_helper_disable(struct drm_plane *plane);
extern void drm_primary_helper_destroy(struct drm_plane *plane);
extern const struct drm_plane_funcs drm_primary_helper_funcs;
-extern struct drm_plane *drm_primary_helper_create_plane(struct drm_device *dev,
- const uint32_t *formats,
- int num_formats);
-
int drm_plane_helper_update(struct drm_plane *plane, struct drm_crtc *crtc,
struct drm_framebuffer *fb,
diff --git a/include/drm/i915_pciids.h b/include/drm/i915_pciids.h
index d016dc57f007..613372375ada 100644
--- a/include/drm/i915_pciids.h
+++ b/include/drm/i915_pciids.h
@@ -208,40 +208,41 @@
#define INTEL_VLV_D_IDS(info) \
INTEL_VGA_DEVICE(0x0155, info)
-#define _INTEL_BDW_M(gt, id, info) \
- INTEL_VGA_DEVICE((((gt) - 1) << 4) | (id), info)
-#define _INTEL_BDW_D(gt, id, info) \
- INTEL_VGA_DEVICE((((gt) - 1) << 4) | (id), info)
-
-#define _INTEL_BDW_M_IDS(gt, info) \
- _INTEL_BDW_M(gt, 0x1602, info), /* Halo */ \
- _INTEL_BDW_M(gt, 0x1606, info), /* ULT */ \
- _INTEL_BDW_M(gt, 0x160B, info), /* ULT */ \
- _INTEL_BDW_M(gt, 0x160E, info) /* ULX */
-
-#define _INTEL_BDW_D_IDS(gt, info) \
- _INTEL_BDW_D(gt, 0x160A, info), /* Server */ \
- _INTEL_BDW_D(gt, 0x160D, info) /* Workstation */
-
-#define INTEL_BDW_GT12M_IDS(info) \
- _INTEL_BDW_M_IDS(1, info), \
- _INTEL_BDW_M_IDS(2, info)
+#define INTEL_BDW_GT12M_IDS(info) \
+ INTEL_VGA_DEVICE(0x1602, info), /* GT1 ULT */ \
+ INTEL_VGA_DEVICE(0x1606, info), /* GT1 ULT */ \
+ INTEL_VGA_DEVICE(0x160B, info), /* GT1 Iris */ \
+ INTEL_VGA_DEVICE(0x160E, info), /* GT1 ULX */ \
+ INTEL_VGA_DEVICE(0x1612, info), /* GT2 Halo */ \
+ INTEL_VGA_DEVICE(0x1616, info), /* GT2 ULT */ \
+ INTEL_VGA_DEVICE(0x161B, info), /* GT2 ULT */ \
+ INTEL_VGA_DEVICE(0x161E, info) /* GT2 ULX */
#define INTEL_BDW_GT12D_IDS(info) \
- _INTEL_BDW_D_IDS(1, info), \
- _INTEL_BDW_D_IDS(2, info)
+ INTEL_VGA_DEVICE(0x160A, info), /* GT1 Server */ \
+ INTEL_VGA_DEVICE(0x160D, info), /* GT1 Workstation */ \
+ INTEL_VGA_DEVICE(0x161A, info), /* GT2 Server */ \
+ INTEL_VGA_DEVICE(0x161D, info) /* GT2 Workstation */
#define INTEL_BDW_GT3M_IDS(info) \
- _INTEL_BDW_M_IDS(3, info)
+ INTEL_VGA_DEVICE(0x1622, info), /* ULT */ \
+ INTEL_VGA_DEVICE(0x1626, info), /* ULT */ \
+ INTEL_VGA_DEVICE(0x162B, info), /* Iris */ \
+ INTEL_VGA_DEVICE(0x162E, info) /* ULX */
#define INTEL_BDW_GT3D_IDS(info) \
- _INTEL_BDW_D_IDS(3, info)
+ INTEL_VGA_DEVICE(0x162A, info), /* Server */ \
+ INTEL_VGA_DEVICE(0x162D, info) /* Workstation */
#define INTEL_BDW_RSVDM_IDS(info) \
- _INTEL_BDW_M_IDS(4, info)
+ INTEL_VGA_DEVICE(0x1632, info), /* ULT */ \
+ INTEL_VGA_DEVICE(0x1636, info), /* ULT */ \
+ INTEL_VGA_DEVICE(0x163B, info), /* Iris */ \
+ INTEL_VGA_DEVICE(0x163E, info) /* ULX */
#define INTEL_BDW_RSVDD_IDS(info) \
- _INTEL_BDW_D_IDS(4, info)
+ INTEL_VGA_DEVICE(0x163A, info), /* Server */ \
+ INTEL_VGA_DEVICE(0x163D, info) /* Workstation */
#define INTEL_BDW_M_IDS(info) \
INTEL_BDW_GT12M_IDS(info), \
@@ -259,21 +260,31 @@
INTEL_VGA_DEVICE(0x22b2, info), \
INTEL_VGA_DEVICE(0x22b3, info)
-#define INTEL_SKL_IDS(info) \
- INTEL_VGA_DEVICE(0x1916, info), /* ULT GT2 */ \
+#define INTEL_SKL_GT1_IDS(info) \
INTEL_VGA_DEVICE(0x1906, info), /* ULT GT1 */ \
- INTEL_VGA_DEVICE(0x1926, info), /* ULT GT3 */ \
- INTEL_VGA_DEVICE(0x1921, info), /* ULT GT2F */ \
INTEL_VGA_DEVICE(0x190E, info), /* ULX GT1 */ \
+ INTEL_VGA_DEVICE(0x1902, info), /* DT GT1 */ \
+ INTEL_VGA_DEVICE(0x190B, info), /* Halo GT1 */ \
+ INTEL_VGA_DEVICE(0x190A, info) /* SRV GT1 */
+
+#define INTEL_SKL_GT2_IDS(info) \
+ INTEL_VGA_DEVICE(0x1916, info), /* ULT GT2 */ \
+ INTEL_VGA_DEVICE(0x1921, info), /* ULT GT2F */ \
INTEL_VGA_DEVICE(0x191E, info), /* ULX GT2 */ \
INTEL_VGA_DEVICE(0x1912, info), /* DT GT2 */ \
- INTEL_VGA_DEVICE(0x1902, info), /* DT GT1 */ \
INTEL_VGA_DEVICE(0x191B, info), /* Halo GT2 */ \
- INTEL_VGA_DEVICE(0x192B, info), /* Halo GT3 */ \
- INTEL_VGA_DEVICE(0x190B, info), /* Halo GT1 */ \
INTEL_VGA_DEVICE(0x191A, info), /* SRV GT2 */ \
- INTEL_VGA_DEVICE(0x192A, info), /* SRV GT3 */ \
- INTEL_VGA_DEVICE(0x190A, info), /* SRV GT1 */ \
INTEL_VGA_DEVICE(0x191D, info) /* WKS GT2 */
+#define INTEL_SKL_GT3_IDS(info) \
+ INTEL_VGA_DEVICE(0x1926, info), /* ULT GT3 */ \
+ INTEL_VGA_DEVICE(0x192B, info), /* Halo GT3 */ \
+ INTEL_VGA_DEVICE(0x192A, info) /* SRV GT3 */ \
+
+#define INTEL_SKL_IDS(info) \
+ INTEL_SKL_GT1_IDS(info), \
+ INTEL_SKL_GT2_IDS(info), \
+ INTEL_SKL_GT3_IDS(info)
+
+
#endif /* _I915_PCIIDS_H */
diff --git a/include/dt-bindings/clock/exynos3250.h b/include/dt-bindings/clock/exynos3250.h
index 961b9c130ea9..aab088d30199 100644
--- a/include/dt-bindings/clock/exynos3250.h
+++ b/include/dt-bindings/clock/exynos3250.h
@@ -282,4 +282,65 @@
*/
#define NR_CLKS_DMC 21
+/*
+ * CMU ISP
+ */
+
+/* Dividers */
+
+#define CLK_DIV_ISP1 1
+#define CLK_DIV_ISP0 2
+#define CLK_DIV_MCUISP1 3
+#define CLK_DIV_MCUISP0 4
+#define CLK_DIV_MPWM 5
+
+/* Gates */
+
+#define CLK_UART_ISP 8
+#define CLK_WDT_ISP 9
+#define CLK_PWM_ISP 10
+#define CLK_I2C1_ISP 11
+#define CLK_I2C0_ISP 12
+#define CLK_MPWM_ISP 13
+#define CLK_MCUCTL_ISP 14
+#define CLK_PPMUISPX 15
+#define CLK_PPMUISPMX 16
+#define CLK_QE_LITE1 17
+#define CLK_QE_LITE0 18
+#define CLK_QE_FD 19
+#define CLK_QE_DRC 20
+#define CLK_QE_ISP 21
+#define CLK_CSIS1 22
+#define CLK_SMMU_LITE1 23
+#define CLK_SMMU_LITE0 24
+#define CLK_SMMU_FD 25
+#define CLK_SMMU_DRC 26
+#define CLK_SMMU_ISP 27
+#define CLK_GICISP 28
+#define CLK_CSIS0 29
+#define CLK_MCUISP 30
+#define CLK_LITE1 31
+#define CLK_LITE0 32
+#define CLK_FD 33
+#define CLK_DRC 34
+#define CLK_ISP 35
+#define CLK_QE_ISPCX 36
+#define CLK_QE_SCALERP 37
+#define CLK_QE_SCALERC 38
+#define CLK_SMMU_SCALERP 39
+#define CLK_SMMU_SCALERC 40
+#define CLK_SCALERP 41
+#define CLK_SCALERC 42
+#define CLK_SPI1_ISP 43
+#define CLK_SPI0_ISP 44
+#define CLK_SMMU_ISPCX 45
+#define CLK_ASYNCAXIM 46
+#define CLK_SCLK_MPWM_ISP 47
+
+/*
+ * Total number of clocks of CMU_ISP.
+ * NOTE: Must be equal to last clock ID increased by one.
+ */
+#define NR_CLKS_ISP 48
+
#endif /* _DT_BINDINGS_CLOCK_SAMSUNG_EXYNOS3250_CLOCK_H */
diff --git a/include/dt-bindings/clock/exynos5433.h b/include/dt-bindings/clock/exynos5433.h
new file mode 100644
index 000000000000..5bd80d5ecd0f
--- /dev/null
+++ b/include/dt-bindings/clock/exynos5433.h
@@ -0,0 +1,1403 @@
+/*
+ * Copyright (c) 2014 Samsung Electronics Co., Ltd.
+ * Author: Chanwoo Choi <cw00.choi@samsung.com>
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License version 2 as
+ * published by the Free Software Foundation.
+ */
+
+#ifndef _DT_BINDINGS_CLOCK_EXYNOS5433_H
+#define _DT_BINDINGS_CLOCK_EXYNOS5433_H
+
+/* CMU_TOP */
+#define CLK_FOUT_ISP_PLL 1
+#define CLK_FOUT_AUD_PLL 2
+
+#define CLK_MOUT_AUD_PLL 10
+#define CLK_MOUT_ISP_PLL 11
+#define CLK_MOUT_AUD_PLL_USER_T 12
+#define CLK_MOUT_MPHY_PLL_USER 13
+#define CLK_MOUT_MFC_PLL_USER 14
+#define CLK_MOUT_BUS_PLL_USER 15
+#define CLK_MOUT_ACLK_HEVC_400 16
+#define CLK_MOUT_ACLK_CAM1_333 17
+#define CLK_MOUT_ACLK_CAM1_552_B 18
+#define CLK_MOUT_ACLK_CAM1_552_A 19
+#define CLK_MOUT_ACLK_ISP_DIS_400 20
+#define CLK_MOUT_ACLK_ISP_400 21
+#define CLK_MOUT_ACLK_BUS0_400 22
+#define CLK_MOUT_ACLK_MSCL_400_B 23
+#define CLK_MOUT_ACLK_MSCL_400_A 24
+#define CLK_MOUT_ACLK_GSCL_333 25
+#define CLK_MOUT_ACLK_G2D_400_B 26
+#define CLK_MOUT_ACLK_G2D_400_A 27
+#define CLK_MOUT_SCLK_JPEG_C 28
+#define CLK_MOUT_SCLK_JPEG_B 29
+#define CLK_MOUT_SCLK_JPEG_A 30
+#define CLK_MOUT_SCLK_MMC2_B 31
+#define CLK_MOUT_SCLK_MMC2_A 32
+#define CLK_MOUT_SCLK_MMC1_B 33
+#define CLK_MOUT_SCLK_MMC1_A 34
+#define CLK_MOUT_SCLK_MMC0_D 35
+#define CLK_MOUT_SCLK_MMC0_C 36
+#define CLK_MOUT_SCLK_MMC0_B 37
+#define CLK_MOUT_SCLK_MMC0_A 38
+#define CLK_MOUT_SCLK_SPI4 39
+#define CLK_MOUT_SCLK_SPI3 40
+#define CLK_MOUT_SCLK_UART2 41
+#define CLK_MOUT_SCLK_UART1 42
+#define CLK_MOUT_SCLK_UART0 43
+#define CLK_MOUT_SCLK_SPI2 44
+#define CLK_MOUT_SCLK_SPI1 45
+#define CLK_MOUT_SCLK_SPI0 46
+#define CLK_MOUT_ACLK_MFC_400_C 47
+#define CLK_MOUT_ACLK_MFC_400_B 48
+#define CLK_MOUT_ACLK_MFC_400_A 49
+#define CLK_MOUT_SCLK_ISP_SENSOR2 50
+#define CLK_MOUT_SCLK_ISP_SENSOR1 51
+#define CLK_MOUT_SCLK_ISP_SENSOR0 52
+#define CLK_MOUT_SCLK_ISP_UART 53
+#define CLK_MOUT_SCLK_ISP_SPI1 54
+#define CLK_MOUT_SCLK_ISP_SPI0 55
+#define CLK_MOUT_SCLK_PCIE_100 56
+#define CLK_MOUT_SCLK_UFSUNIPRO 57
+#define CLK_MOUT_SCLK_USBHOST30 58
+#define CLK_MOUT_SCLK_USBDRD30 59
+#define CLK_MOUT_SCLK_SLIMBUS 60
+#define CLK_MOUT_SCLK_SPDIF 61
+#define CLK_MOUT_SCLK_AUDIO1 62
+#define CLK_MOUT_SCLK_AUDIO0 63
+#define CLK_MOUT_SCLK_HDMI_SPDIF 64
+
+#define CLK_DIV_ACLK_FSYS_200 100
+#define CLK_DIV_ACLK_IMEM_SSSX_266 101
+#define CLK_DIV_ACLK_IMEM_200 102
+#define CLK_DIV_ACLK_IMEM_266 103
+#define CLK_DIV_ACLK_PERIC_66_B 104
+#define CLK_DIV_ACLK_PERIC_66_A 105
+#define CLK_DIV_ACLK_PERIS_66_B 106
+#define CLK_DIV_ACLK_PERIS_66_A 107
+#define CLK_DIV_SCLK_MMC1_B 108
+#define CLK_DIV_SCLK_MMC1_A 109
+#define CLK_DIV_SCLK_MMC0_B 110
+#define CLK_DIV_SCLK_MMC0_A 111
+#define CLK_DIV_SCLK_MMC2_B 112
+#define CLK_DIV_SCLK_MMC2_A 113
+#define CLK_DIV_SCLK_SPI1_B 114
+#define CLK_DIV_SCLK_SPI1_A 115
+#define CLK_DIV_SCLK_SPI0_B 116
+#define CLK_DIV_SCLK_SPI0_A 117
+#define CLK_DIV_SCLK_SPI2_B 118
+#define CLK_DIV_SCLK_SPI2_A 119
+#define CLK_DIV_SCLK_UART2 120
+#define CLK_DIV_SCLK_UART1 121
+#define CLK_DIV_SCLK_UART0 122
+#define CLK_DIV_SCLK_SPI4_B 123
+#define CLK_DIV_SCLK_SPI4_A 124
+#define CLK_DIV_SCLK_SPI3_B 125
+#define CLK_DIV_SCLK_SPI3_A 126
+#define CLK_DIV_SCLK_I2S1 127
+#define CLK_DIV_SCLK_PCM1 128
+#define CLK_DIV_SCLK_AUDIO1 129
+#define CLK_DIV_SCLK_AUDIO0 130
+#define CLK_DIV_ACLK_GSCL_111 131
+#define CLK_DIV_ACLK_GSCL_333 132
+#define CLK_DIV_ACLK_HEVC_400 133
+#define CLK_DIV_ACLK_MFC_400 134
+#define CLK_DIV_ACLK_G2D_266 135
+#define CLK_DIV_ACLK_G2D_400 136
+#define CLK_DIV_ACLK_G3D_400 137
+#define CLK_DIV_ACLK_BUS0_400 138
+#define CLK_DIV_ACLK_BUS1_400 139
+#define CLK_DIV_SCLK_PCIE_100 140
+#define CLK_DIV_SCLK_USBHOST30 141
+#define CLK_DIV_SCLK_UFSUNIPRO 142
+#define CLK_DIV_SCLK_USBDRD30 143
+#define CLK_DIV_SCLK_JPEG 144
+#define CLK_DIV_ACLK_MSCL_400 145
+#define CLK_DIV_ACLK_ISP_DIS_400 146
+#define CLK_DIV_ACLK_ISP_400 147
+#define CLK_DIV_ACLK_CAM0_333 148
+#define CLK_DIV_ACLK_CAM0_400 149
+#define CLK_DIV_ACLK_CAM0_552 150
+#define CLK_DIV_ACLK_CAM1_333 151
+#define CLK_DIV_ACLK_CAM1_400 152
+#define CLK_DIV_ACLK_CAM1_552 153
+#define CLK_DIV_SCLK_ISP_UART 154
+#define CLK_DIV_SCLK_ISP_SPI1_B 155
+#define CLK_DIV_SCLK_ISP_SPI1_A 156
+#define CLK_DIV_SCLK_ISP_SPI0_B 157
+#define CLK_DIV_SCLK_ISP_SPI0_A 158
+#define CLK_DIV_SCLK_ISP_SENSOR2_B 159
+#define CLK_DIV_SCLK_ISP_SENSOR2_A 160
+#define CLK_DIV_SCLK_ISP_SENSOR1_B 161
+#define CLK_DIV_SCLK_ISP_SENSOR1_A 162
+#define CLK_DIV_SCLK_ISP_SENSOR0_B 163
+#define CLK_DIV_SCLK_ISP_SENSOR0_A 164
+
+#define CLK_ACLK_PERIC_66 200
+#define CLK_ACLK_PERIS_66 201
+#define CLK_ACLK_FSYS_200 202
+#define CLK_SCLK_MMC2_FSYS 203
+#define CLK_SCLK_MMC1_FSYS 204
+#define CLK_SCLK_MMC0_FSYS 205
+#define CLK_SCLK_SPI4_PERIC 206
+#define CLK_SCLK_SPI3_PERIC 207
+#define CLK_SCLK_UART2_PERIC 208
+#define CLK_SCLK_UART1_PERIC 209
+#define CLK_SCLK_UART0_PERIC 210
+#define CLK_SCLK_SPI2_PERIC 211
+#define CLK_SCLK_SPI1_PERIC 212
+#define CLK_SCLK_SPI0_PERIC 213
+#define CLK_SCLK_SPDIF_PERIC 214
+#define CLK_SCLK_I2S1_PERIC 215
+#define CLK_SCLK_PCM1_PERIC 216
+#define CLK_SCLK_SLIMBUS 217
+#define CLK_SCLK_AUDIO1 218
+#define CLK_SCLK_AUDIO0 219
+#define CLK_ACLK_G2D_266 220
+#define CLK_ACLK_G2D_400 221
+#define CLK_ACLK_G3D_400 222
+#define CLK_ACLK_IMEM_SSX_266 223
+#define CLK_ACLK_BUS0_400 224
+#define CLK_ACLK_BUS1_400 225
+#define CLK_ACLK_IMEM_200 226
+#define CLK_ACLK_IMEM_266 227
+#define CLK_SCLK_PCIE_100_FSYS 228
+#define CLK_SCLK_UFSUNIPRO_FSYS 229
+#define CLK_SCLK_USBHOST30_FSYS 230
+#define CLK_SCLK_USBDRD30_FSYS 231
+#define CLK_ACLK_GSCL_111 232
+#define CLK_ACLK_GSCL_333 233
+#define CLK_SCLK_JPEG_MSCL 234
+#define CLK_ACLK_MSCL_400 235
+#define CLK_ACLK_MFC_400 236
+#define CLK_ACLK_HEVC_400 237
+#define CLK_ACLK_ISP_DIS_400 238
+#define CLK_ACLK_ISP_400 239
+#define CLK_ACLK_CAM0_333 240
+#define CLK_ACLK_CAM0_400 241
+#define CLK_ACLK_CAM0_552 242
+#define CLK_ACLK_CAM1_333 243
+#define CLK_ACLK_CAM1_400 244
+#define CLK_ACLK_CAM1_552 245
+#define CLK_SCLK_ISP_SENSOR2 246
+#define CLK_SCLK_ISP_SENSOR1 247
+#define CLK_SCLK_ISP_SENSOR0 248
+#define CLK_SCLK_ISP_MCTADC_CAM1 249
+#define CLK_SCLK_ISP_UART_CAM1 250
+#define CLK_SCLK_ISP_SPI1_CAM1 251
+#define CLK_SCLK_ISP_SPI0_CAM1 252
+#define CLK_SCLK_HDMI_SPDIF_DISP 253
+
+#define TOP_NR_CLK 254
+
+/* CMU_CPIF */
+#define CLK_FOUT_MPHY_PLL 1
+
+#define CLK_MOUT_MPHY_PLL 2
+
+#define CLK_DIV_SCLK_MPHY 10
+
+#define CLK_SCLK_MPHY_PLL 11
+#define CLK_SCLK_UFS_MPHY 11
+
+#define CPIF_NR_CLK 12
+
+/* CMU_MIF */
+#define CLK_FOUT_MEM0_PLL 1
+#define CLK_FOUT_MEM1_PLL 2
+#define CLK_FOUT_BUS_PLL 3
+#define CLK_FOUT_MFC_PLL 4
+#define CLK_DOUT_MFC_PLL 5
+#define CLK_DOUT_BUS_PLL 6
+#define CLK_DOUT_MEM1_PLL 7
+#define CLK_DOUT_MEM0_PLL 8
+
+#define CLK_MOUT_MFC_PLL_DIV2 10
+#define CLK_MOUT_BUS_PLL_DIV2 11
+#define CLK_MOUT_MEM1_PLL_DIV2 12
+#define CLK_MOUT_MEM0_PLL_DIV2 13
+#define CLK_MOUT_MFC_PLL 14
+#define CLK_MOUT_BUS_PLL 15
+#define CLK_MOUT_MEM1_PLL 16
+#define CLK_MOUT_MEM0_PLL 17
+#define CLK_MOUT_CLK2X_PHY_C 18
+#define CLK_MOUT_CLK2X_PHY_B 19
+#define CLK_MOUT_CLK2X_PHY_A 20
+#define CLK_MOUT_CLKM_PHY_C 21
+#define CLK_MOUT_CLKM_PHY_B 22
+#define CLK_MOUT_CLKM_PHY_A 23
+#define CLK_MOUT_ACLK_MIFNM_200 24
+#define CLK_MOUT_ACLK_MIFNM_400 25
+#define CLK_MOUT_ACLK_DISP_333_B 26
+#define CLK_MOUT_ACLK_DISP_333_A 27
+#define CLK_MOUT_SCLK_DECON_VCLK_C 28
+#define CLK_MOUT_SCLK_DECON_VCLK_B 29
+#define CLK_MOUT_SCLK_DECON_VCLK_A 30
+#define CLK_MOUT_SCLK_DECON_ECLK_C 31
+#define CLK_MOUT_SCLK_DECON_ECLK_B 32
+#define CLK_MOUT_SCLK_DECON_ECLK_A 33
+#define CLK_MOUT_SCLK_DECON_TV_ECLK_C 34
+#define CLK_MOUT_SCLK_DECON_TV_ECLK_B 35
+#define CLK_MOUT_SCLK_DECON_TV_ECLK_A 36
+#define CLK_MOUT_SCLK_DSD_C 37
+#define CLK_MOUT_SCLK_DSD_B 38
+#define CLK_MOUT_SCLK_DSD_A 39
+#define CLK_MOUT_SCLK_DSIM0_C 40
+#define CLK_MOUT_SCLK_DSIM0_B 41
+#define CLK_MOUT_SCLK_DSIM0_A 42
+#define CLK_MOUT_SCLK_DECON_TV_VCLK_C 46
+#define CLK_MOUT_SCLK_DECON_TV_VCLK_B 47
+#define CLK_MOUT_SCLK_DECON_TV_VCLK_A 48
+#define CLK_MOUT_SCLK_DSIM1_C 49
+#define CLK_MOUT_SCLK_DSIM1_B 50
+#define CLK_MOUT_SCLK_DSIM1_A 51
+
+#define CLK_DIV_SCLK_HPM_MIF 55
+#define CLK_DIV_ACLK_DREX1 56
+#define CLK_DIV_ACLK_DREX0 57
+#define CLK_DIV_CLK2XPHY 58
+#define CLK_DIV_ACLK_MIF_266 59
+#define CLK_DIV_ACLK_MIFND_133 60
+#define CLK_DIV_ACLK_MIF_133 61
+#define CLK_DIV_ACLK_MIFNM_200 62
+#define CLK_DIV_ACLK_MIF_200 63
+#define CLK_DIV_ACLK_MIF_400 64
+#define CLK_DIV_ACLK_BUS2_400 65
+#define CLK_DIV_ACLK_DISP_333 66
+#define CLK_DIV_ACLK_CPIF_200 67
+#define CLK_DIV_SCLK_DSIM1 68
+#define CLK_DIV_SCLK_DECON_TV_VCLK 69
+#define CLK_DIV_SCLK_DSIM0 70
+#define CLK_DIV_SCLK_DSD 71
+#define CLK_DIV_SCLK_DECON_TV_ECLK 72
+#define CLK_DIV_SCLK_DECON_VCLK 73
+#define CLK_DIV_SCLK_DECON_ECLK 74
+#define CLK_DIV_MIF_PRE 75
+
+#define CLK_CLK2X_PHY1 80
+#define CLK_CLK2X_PHY0 81
+#define CLK_CLKM_PHY1 82
+#define CLK_CLKM_PHY0 83
+#define CLK_RCLK_DREX1 84
+#define CLK_RCLK_DREX0 85
+#define CLK_ACLK_DREX1_TZ 86
+#define CLK_ACLK_DREX0_TZ 87
+#define CLK_ACLK_DREX1_PEREV 88
+#define CLK_ACLK_DREX0_PEREV 89
+#define CLK_ACLK_DREX1_MEMIF 90
+#define CLK_ACLK_DREX0_MEMIF 91
+#define CLK_ACLK_DREX1_SCH 92
+#define CLK_ACLK_DREX0_SCH 93
+#define CLK_ACLK_DREX1_BUSIF 94
+#define CLK_ACLK_DREX0_BUSIF 95
+#define CLK_ACLK_DREX1_BUSIF_RD 96
+#define CLK_ACLK_DREX0_BUSIF_RD 97
+#define CLK_ACLK_DREX1 98
+#define CLK_ACLK_DREX0 99
+#define CLK_ACLK_ASYNCAXIM_ATLAS_CCIX 100
+#define CLK_ACLK_ASYNCAXIS_ATLAS_MIF 101
+#define CLK_ACLK_ASYNCAXIM_ATLAS_MIF 102
+#define CLK_ACLK_ASYNCAXIS_MIF_IMEM 103
+#define CLK_ACLK_ASYNCAXIS_NOC_P_CCI 104
+#define CLK_ACLK_ASYNCAXIM_NOC_P_CCI 105
+#define CLK_ACLK_ASYNCAXIS_CP1 106
+#define CLK_ACLK_ASYNCAXIM_CP1 107
+#define CLK_ACLK_ASYNCAXIS_CP0 108
+#define CLK_ACLK_ASYNCAXIM_CP0 109
+#define CLK_ACLK_ASYNCAXIS_DREX1_3 110
+#define CLK_ACLK_ASYNCAXIM_DREX1_3 111
+#define CLK_ACLK_ASYNCAXIS_DREX1_1 112
+#define CLK_ACLK_ASYNCAXIM_DREX1_1 113
+#define CLK_ACLK_ASYNCAXIS_DREX1_0 114
+#define CLK_ACLK_ASYNCAXIM_DREX1_0 115
+#define CLK_ACLK_ASYNCAXIS_DREX0_3 116
+#define CLK_ACLK_ASYNCAXIM_DREX0_3 117
+#define CLK_ACLK_ASYNCAXIS_DREX0_1 118
+#define CLK_ACLK_ASYNCAXIM_DREX0_1 119
+#define CLK_ACLK_ASYNCAXIS_DREX0_0 120
+#define CLK_ACLK_ASYNCAXIM_DREX0_0 121
+#define CLK_ACLK_AHB2APB_MIF2P 122
+#define CLK_ACLK_AHB2APB_MIF1P 123
+#define CLK_ACLK_AHB2APB_MIF0P 124
+#define CLK_ACLK_IXIU_CCI 125
+#define CLK_ACLK_XIU_MIFSFRX 126
+#define CLK_ACLK_MIFNP_133 127
+#define CLK_ACLK_MIFNM_200 128
+#define CLK_ACLK_MIFND_133 129
+#define CLK_ACLK_MIFND_400 130
+#define CLK_ACLK_CCI 131
+#define CLK_ACLK_MIFND_266 132
+#define CLK_ACLK_PPMU_DREX1S3 133
+#define CLK_ACLK_PPMU_DREX1S1 134
+#define CLK_ACLK_PPMU_DREX1S0 135
+#define CLK_ACLK_PPMU_DREX0S3 136
+#define CLK_ACLK_PPMU_DREX0S1 137
+#define CLK_ACLK_PPMU_DREX0S0 138
+#define CLK_ACLK_BTS_APOLLO 139
+#define CLK_ACLK_BTS_ATLAS 140
+#define CLK_ACLK_ACE_SEL_APOLL 141
+#define CLK_ACLK_ACE_SEL_ATLAS 142
+#define CLK_ACLK_AXIDS_CCI_MIFSFRX 143
+#define CLK_ACLK_AXIUS_ATLAS_CCI 144
+#define CLK_ACLK_AXISYNCDNS_CCI 145
+#define CLK_ACLK_AXISYNCDN_CCI 146
+#define CLK_ACLK_AXISYNCDN_NOC_D 147
+#define CLK_ACLK_ASYNCACEM_APOLLO_CCI 148
+#define CLK_ACLK_ASYNCACEM_ATLAS_CCI 149
+#define CLK_ACLK_ASYNCAPBS_MIF_CSSYS 150
+#define CLK_ACLK_BUS2_400 151
+#define CLK_ACLK_DISP_333 152
+#define CLK_ACLK_CPIF_200 153
+#define CLK_PCLK_PPMU_DREX1S3 154
+#define CLK_PCLK_PPMU_DREX1S1 155
+#define CLK_PCLK_PPMU_DREX1S0 156
+#define CLK_PCLK_PPMU_DREX0S3 157
+#define CLK_PCLK_PPMU_DREX0S1 158
+#define CLK_PCLK_PPMU_DREX0S0 159
+#define CLK_PCLK_BTS_APOLLO 160
+#define CLK_PCLK_BTS_ATLAS 161
+#define CLK_PCLK_ASYNCAXI_NOC_P_CCI 162
+#define CLK_PCLK_ASYNCAXI_CP1 163
+#define CLK_PCLK_ASYNCAXI_CP0 164
+#define CLK_PCLK_ASYNCAXI_DREX1_3 165
+#define CLK_PCLK_ASYNCAXI_DREX1_1 166
+#define CLK_PCLK_ASYNCAXI_DREX1_0 167
+#define CLK_PCLK_ASYNCAXI_DREX0_3 168
+#define CLK_PCLK_ASYNCAXI_DREX0_1 169
+#define CLK_PCLK_ASYNCAXI_DREX0_0 170
+#define CLK_PCLK_MIFSRVND_133 171
+#define CLK_PCLK_PMU_MIF 172
+#define CLK_PCLK_SYSREG_MIF 173
+#define CLK_PCLK_GPIO_ALIVE 174
+#define CLK_PCLK_ABB 175
+#define CLK_PCLK_PMU_APBIF 176
+#define CLK_PCLK_DDR_PHY1 177
+#define CLK_PCLK_DREX1 178
+#define CLK_PCLK_DDR_PHY0 179
+#define CLK_PCLK_DREX0 180
+#define CLK_PCLK_DREX0_TZ 181
+#define CLK_PCLK_DREX1_TZ 182
+#define CLK_PCLK_MONOTONIC_CNT 183
+#define CLK_PCLK_RTC 184
+#define CLK_SCLK_DSIM1_DISP 185
+#define CLK_SCLK_DECON_TV_VCLK_DISP 186
+#define CLK_SCLK_FREQ_DET_BUS_PLL 187
+#define CLK_SCLK_FREQ_DET_MFC_PLL 188
+#define CLK_SCLK_FREQ_DET_MEM0_PLL 189
+#define CLK_SCLK_FREQ_DET_MEM1_PLL 190
+#define CLK_SCLK_DSIM0_DISP 191
+#define CLK_SCLK_DSD_DISP 192
+#define CLK_SCLK_DECON_TV_ECLK_DISP 193
+#define CLK_SCLK_DECON_VCLK_DISP 194
+#define CLK_SCLK_DECON_ECLK_DISP 195
+#define CLK_SCLK_HPM_MIF 196
+#define CLK_SCLK_MFC_PLL 197
+#define CLK_SCLK_BUS_PLL 198
+#define CLK_SCLK_BUS_PLL_APOLLO 199
+#define CLK_SCLK_BUS_PLL_ATLAS 200
+
+#define MIF_NR_CLK 201
+
+/* CMU_PERIC */
+#define CLK_PCLK_SPI2 1
+#define CLK_PCLK_SPI1 2
+#define CLK_PCLK_SPI0 3
+#define CLK_PCLK_UART2 4
+#define CLK_PCLK_UART1 5
+#define CLK_PCLK_UART0 6
+#define CLK_PCLK_HSI2C3 7
+#define CLK_PCLK_HSI2C2 8
+#define CLK_PCLK_HSI2C1 9
+#define CLK_PCLK_HSI2C0 10
+#define CLK_PCLK_I2C7 11
+#define CLK_PCLK_I2C6 12
+#define CLK_PCLK_I2C5 13
+#define CLK_PCLK_I2C4 14
+#define CLK_PCLK_I2C3 15
+#define CLK_PCLK_I2C2 16
+#define CLK_PCLK_I2C1 17
+#define CLK_PCLK_I2C0 18
+#define CLK_PCLK_SPI4 19
+#define CLK_PCLK_SPI3 20
+#define CLK_PCLK_HSI2C11 21
+#define CLK_PCLK_HSI2C10 22
+#define CLK_PCLK_HSI2C9 23
+#define CLK_PCLK_HSI2C8 24
+#define CLK_PCLK_HSI2C7 25
+#define CLK_PCLK_HSI2C6 26
+#define CLK_PCLK_HSI2C5 27
+#define CLK_PCLK_HSI2C4 28
+#define CLK_SCLK_SPI4 29
+#define CLK_SCLK_SPI3 30
+#define CLK_SCLK_SPI2 31
+#define CLK_SCLK_SPI1 32
+#define CLK_SCLK_SPI0 33
+#define CLK_SCLK_UART2 34
+#define CLK_SCLK_UART1 35
+#define CLK_SCLK_UART0 36
+#define CLK_ACLK_AHB2APB_PERIC2P 37
+#define CLK_ACLK_AHB2APB_PERIC1P 38
+#define CLK_ACLK_AHB2APB_PERIC0P 39
+#define CLK_ACLK_PERICNP_66 40
+#define CLK_PCLK_SCI 41
+#define CLK_PCLK_GPIO_FINGER 42
+#define CLK_PCLK_GPIO_ESE 43
+#define CLK_PCLK_PWM 44
+#define CLK_PCLK_SPDIF 45
+#define CLK_PCLK_PCM1 46
+#define CLK_PCLK_I2S1 47
+#define CLK_PCLK_ADCIF 48
+#define CLK_PCLK_GPIO_TOUCH 49
+#define CLK_PCLK_GPIO_NFC 50
+#define CLK_PCLK_GPIO_PERIC 51
+#define CLK_PCLK_PMU_PERIC 52
+#define CLK_PCLK_SYSREG_PERIC 53
+#define CLK_SCLK_IOCLK_SPI4 54
+#define CLK_SCLK_IOCLK_SPI3 55
+#define CLK_SCLK_SCI 56
+#define CLK_SCLK_SC_IN 57
+#define CLK_SCLK_PWM 58
+#define CLK_SCLK_IOCLK_SPI2 59
+#define CLK_SCLK_IOCLK_SPI1 60
+#define CLK_SCLK_IOCLK_SPI0 61
+#define CLK_SCLK_IOCLK_I2S1_BCLK 62
+#define CLK_SCLK_SPDIF 63
+#define CLK_SCLK_PCM1 64
+#define CLK_SCLK_I2S1 65
+
+#define CLK_DIV_SCLK_SCI 70
+#define CLK_DIV_SCLK_SC_IN 71
+
+#define PERIC_NR_CLK 72
+
+/* CMU_PERIS */
+#define CLK_PCLK_HPM_APBIF 1
+#define CLK_PCLK_TMU1_APBIF 2
+#define CLK_PCLK_TMU0_APBIF 3
+#define CLK_PCLK_PMU_PERIS 4
+#define CLK_PCLK_SYSREG_PERIS 5
+#define CLK_PCLK_CMU_TOP_APBIF 6
+#define CLK_PCLK_WDT_APOLLO 7
+#define CLK_PCLK_WDT_ATLAS 8
+#define CLK_PCLK_MCT 9
+#define CLK_PCLK_HDMI_CEC 10
+#define CLK_ACLK_AHB2APB_PERIS1P 11
+#define CLK_ACLK_AHB2APB_PERIS0P 12
+#define CLK_ACLK_PERISNP_66 13
+#define CLK_PCLK_TZPC12 14
+#define CLK_PCLK_TZPC11 15
+#define CLK_PCLK_TZPC10 16
+#define CLK_PCLK_TZPC9 17
+#define CLK_PCLK_TZPC8 18
+#define CLK_PCLK_TZPC7 19
+#define CLK_PCLK_TZPC6 20
+#define CLK_PCLK_TZPC5 21
+#define CLK_PCLK_TZPC4 22
+#define CLK_PCLK_TZPC3 23
+#define CLK_PCLK_TZPC2 24
+#define CLK_PCLK_TZPC1 25
+#define CLK_PCLK_TZPC0 26
+#define CLK_PCLK_SECKEY_APBIF 27
+#define CLK_PCLK_CHIPID_APBIF 28
+#define CLK_PCLK_TOPRTC 29
+#define CLK_PCLK_CUSTOM_EFUSE_APBIF 30
+#define CLK_PCLK_ANTIRBK_CNT_APBIF 31
+#define CLK_PCLK_OTP_CON_APBIF 32
+#define CLK_SCLK_ASV_TB 33
+#define CLK_SCLK_TMU1 34
+#define CLK_SCLK_TMU0 35
+#define CLK_SCLK_SECKEY 36
+#define CLK_SCLK_CHIPID 37
+#define CLK_SCLK_TOPRTC 38
+#define CLK_SCLK_CUSTOM_EFUSE 39
+#define CLK_SCLK_ANTIRBK_CNT 40
+#define CLK_SCLK_OTP_CON 41
+
+#define PERIS_NR_CLK 42
+
+/* CMU_FSYS */
+#define CLK_MOUT_ACLK_FSYS_200_USER 1
+#define CLK_MOUT_SCLK_MMC2_USER 2
+#define CLK_MOUT_SCLK_MMC1_USER 3
+#define CLK_MOUT_SCLK_MMC0_USER 4
+#define CLK_MOUT_SCLK_UFS_MPHY_USER 5
+#define CLK_MOUT_SCLK_PCIE_100_USER 6
+#define CLK_MOUT_SCLK_UFSUNIPRO_USER 7
+#define CLK_MOUT_SCLK_USBHOST30_USER 8
+#define CLK_MOUT_SCLK_USBDRD30_USER 9
+#define CLK_MOUT_PHYCLK_USBHOST30_UHOST30_PIPE_PCLK_USER 10
+#define CLK_MOUT_PHYCLK_USBHOST30_UHOST30_PHYCLOCK_USER 11
+#define CLK_MOUT_PHYCLK_USBHOST20_PHY_HSIC1_USER 12
+#define CLK_MOUT_PHYCLK_USBHOST20_PHY_CLK48MOHCI_USER 13
+#define CLK_MOUT_PHYCLK_USBHOST20_PHY_PHYCLOCK_USER 14
+#define CLK_MOUT_PHYCLK_USBHOST20_PHY_PHY_FREECLK_USER 15
+#define CLK_MOUT_PHYCLK_USBDRD30_UDRD30_PIPE_PCLK_USER 16
+#define CLK_MOUT_PHYCLK_USBDRD30_UDRD30_PHYCLOCK_USER 17
+#define CLK_MOUT_PHYCLK_UFS_RX1_SYMBOL_USER 18
+#define CLK_MOUT_PHYCLK_UFS_RX0_SYMBOL_USER 19
+#define CLK_MOUT_PHYCLK_UFS_TX1_SYMBOL_USER 20
+#define CLK_MOUT_PHYCLK_UFS_TX0_SYMBOL_USER 21
+#define CLK_MOUT_PHYCLK_LLI_MPHY_TO_UFS_USER 22
+#define CLK_MOUT_SCLK_MPHY 23
+
+#define CLK_PHYCLK_USBDRD30_UDRD30_PHYCLOCK_PHY 25
+#define CLK_PHYCLK_USBDRD30_UDRD30_PIPE_PCLK_PHY 26
+#define CLK_PHYCLK_USBHOST30_UHOST30_PHYCLOCK_PHY 27
+#define CLK_PHYCLK_USBHOST30_UHOST30_PIPE_PCLK_PHY 28
+#define CLK_PHYCLK_USBHOST20_PHY_FREECLK_PHY 29
+#define CLK_PHYCLK_USBHOST20_PHY_PHYCLOCK_PHY 30
+#define CLK_PHYCLK_USBHOST20_PHY_CLK48MOHCI_PHY 31
+#define CLK_PHYCLK_USBHOST20_PHY_HSIC1_PHY 32
+#define CLK_PHYCLK_UFS_TX0_SYMBOL_PHY 33
+#define CLK_PHYCLK_UFS_RX0_SYMBOL_PHY 34
+#define CLK_PHYCLK_UFS_TX1_SYMBOL_PHY 35
+#define CLK_PHYCLK_UFS_RX1_SYMBOL_PHY 36
+#define CLK_PHYCLK_LLI_MPHY_TO_UFS_PHY 37
+
+#define CLK_ACLK_PCIE 50
+#define CLK_ACLK_PDMA1 51
+#define CLK_ACLK_TSI 52
+#define CLK_ACLK_MMC2 53
+#define CLK_ACLK_MMC1 54
+#define CLK_ACLK_MMC0 55
+#define CLK_ACLK_UFS 56
+#define CLK_ACLK_USBHOST20 57
+#define CLK_ACLK_USBHOST30 58
+#define CLK_ACLK_USBDRD30 59
+#define CLK_ACLK_PDMA0 60
+#define CLK_SCLK_MMC2 61
+#define CLK_SCLK_MMC1 62
+#define CLK_SCLK_MMC0 63
+#define CLK_PDMA1 64
+#define CLK_PDMA0 65
+#define CLK_ACLK_XIU_FSYSPX 66
+#define CLK_ACLK_AHB_USBLINKH1 67
+#define CLK_ACLK_SMMU_PDMA1 68
+#define CLK_ACLK_BTS_PCIE 69
+#define CLK_ACLK_AXIUS_PDMA1 70
+#define CLK_ACLK_SMMU_PDMA0 71
+#define CLK_ACLK_BTS_UFS 72
+#define CLK_ACLK_BTS_USBHOST30 73
+#define CLK_ACLK_BTS_USBDRD30 74
+#define CLK_ACLK_AXIUS_PDMA0 75
+#define CLK_ACLK_AXIUS_USBHS 76
+#define CLK_ACLK_AXIUS_FSYSSX 77
+#define CLK_ACLK_AHB2APB_FSYSP 78
+#define CLK_ACLK_AHB2AXI_USBHS 79
+#define CLK_ACLK_AHB_USBLINKH0 80
+#define CLK_ACLK_AHB_USBHS 81
+#define CLK_ACLK_AHB_FSYSH 82
+#define CLK_ACLK_XIU_FSYSX 83
+#define CLK_ACLK_XIU_FSYSSX 84
+#define CLK_ACLK_FSYSNP_200 85
+#define CLK_ACLK_FSYSND_200 86
+#define CLK_PCLK_PCIE_CTRL 87
+#define CLK_PCLK_SMMU_PDMA1 88
+#define CLK_PCLK_PCIE_PHY 89
+#define CLK_PCLK_BTS_PCIE 90
+#define CLK_PCLK_SMMU_PDMA0 91
+#define CLK_PCLK_BTS_UFS 92
+#define CLK_PCLK_BTS_USBHOST30 93
+#define CLK_PCLK_BTS_USBDRD30 94
+#define CLK_PCLK_GPIO_FSYS 95
+#define CLK_PCLK_PMU_FSYS 96
+#define CLK_PCLK_SYSREG_FSYS 97
+#define CLK_SCLK_PCIE_100 98
+#define CLK_PHYCLK_USBHOST30_UHOST30_PIPE_PCLK 99
+#define CLK_PHYCLK_USBHOST30_UHOST30_PHYCLOCK 100
+#define CLK_PHYCLK_UFS_RX1_SYMBOL 101
+#define CLK_PHYCLK_UFS_RX0_SYMBOL 102
+#define CLK_PHYCLK_UFS_TX1_SYMBOL 103
+#define CLK_PHYCLK_UFS_TX0_SYMBOL 104
+#define CLK_PHYCLK_USBHOST20_PHY_HSIC1 105
+#define CLK_PHYCLK_USBHOST20_PHY_CLK48MOHCI 106
+#define CLK_PHYCLK_USBHOST20_PHY_PHYCLOCK 107
+#define CLK_PHYCLK_USBHOST20_PHY_FREECLK 108
+#define CLK_PHYCLK_USBDRD30_UDRD30_PIPE_PCLK 109
+#define CLK_PHYCLK_USBDRD30_UDRD30_PHYCLOCK 110
+#define CLK_SCLK_MPHY 111
+#define CLK_SCLK_UFSUNIPRO 112
+#define CLK_SCLK_USBHOST30 113
+#define CLK_SCLK_USBDRD30 114
+
+#define FSYS_NR_CLK 115
+
+/* CMU_G2D */
+#define CLK_MUX_ACLK_G2D_266_USER 1
+#define CLK_MUX_ACLK_G2D_400_USER 2
+
+#define CLK_DIV_PCLK_G2D 3
+
+#define CLK_ACLK_SMMU_MDMA1 4
+#define CLK_ACLK_BTS_MDMA1 5
+#define CLK_ACLK_BTS_G2D 6
+#define CLK_ACLK_ALB_G2D 7
+#define CLK_ACLK_AXIUS_G2DX 8
+#define CLK_ACLK_ASYNCAXI_SYSX 9
+#define CLK_ACLK_AHB2APB_G2D1P 10
+#define CLK_ACLK_AHB2APB_G2D0P 11
+#define CLK_ACLK_XIU_G2DX 12
+#define CLK_ACLK_G2DNP_133 13
+#define CLK_ACLK_G2DND_400 14
+#define CLK_ACLK_MDMA1 15
+#define CLK_ACLK_G2D 16
+#define CLK_ACLK_SMMU_G2D 17
+#define CLK_PCLK_SMMU_MDMA1 18
+#define CLK_PCLK_BTS_MDMA1 19
+#define CLK_PCLK_BTS_G2D 20
+#define CLK_PCLK_ALB_G2D 21
+#define CLK_PCLK_ASYNCAXI_SYSX 22
+#define CLK_PCLK_PMU_G2D 23
+#define CLK_PCLK_SYSREG_G2D 24
+#define CLK_PCLK_G2D 25
+#define CLK_PCLK_SMMU_G2D 26
+
+#define G2D_NR_CLK 27
+
+/* CMU_DISP */
+#define CLK_FOUT_DISP_PLL 1
+
+#define CLK_MOUT_DISP_PLL 2
+#define CLK_MOUT_SCLK_DSIM1_USER 3
+#define CLK_MOUT_SCLK_DSIM0_USER 4
+#define CLK_MOUT_SCLK_DSD_USER 5
+#define CLK_MOUT_SCLK_DECON_TV_ECLK_USER 6
+#define CLK_MOUT_SCLK_DECON_VCLK_USER 7
+#define CLK_MOUT_SCLK_DECON_ECLK_USER 8
+#define CLK_MOUT_SCLK_DECON_TV_VCLK_USER 9
+#define CLK_MOUT_ACLK_DISP_333_USER 10
+#define CLK_MOUT_PHYCLK_MIPIDPHY1_BITCLKDIV8_USER 11
+#define CLK_MOUT_PHYCLK_MIPIDPHY1_RXCLKESC0_USER 12
+#define CLK_MOUT_PHYCLK_MIPIDPHY0_BITCLKDIV8_USER 13
+#define CLK_MOUT_PHYCLK_MIPIDPHY0_RXCLKESC0_USER 14
+#define CLK_MOUT_PHYCLK_HDMIPHY_TMDS_CLKO_USER 15
+#define CLK_MOUT_PHYCLK_HDMIPHY_PIXEL_CLKO_USER 16
+#define CLK_MOUT_SCLK_DSIM0 17
+#define CLK_MOUT_SCLK_DECON_TV_ECLK 18
+#define CLK_MOUT_SCLK_DECON_VCLK 19
+#define CLK_MOUT_SCLK_DECON_ECLK 20
+#define CLK_MOUT_SCLK_DSIM1_B_DISP 21
+#define CLK_MOUT_SCLK_DSIM1_A_DISP 22
+#define CLK_MOUT_SCLK_DECON_TV_VCLK_C_DISP 23
+#define CLK_MOUT_SCLK_DECON_TV_VCLK_B_DISP 24
+#define CLK_MOUT_SCLK_DECON_TV_VCLK_A_DISP 25
+
+#define CLK_DIV_SCLK_DSIM1_DISP 30
+#define CLK_DIV_SCLK_DECON_TV_VCLK_DISP 31
+#define CLK_DIV_SCLK_DSIM0_DISP 32
+#define CLK_DIV_SCLK_DECON_TV_ECLK_DISP 33
+#define CLK_DIV_SCLK_DECON_VCLK_DISP 34
+#define CLK_DIV_SCLK_DECON_ECLK_DISP 35
+#define CLK_DIV_PCLK_DISP 36
+
+#define CLK_ACLK_DECON_TV 40
+#define CLK_ACLK_DECON 41
+#define CLK_ACLK_SMMU_TV1X 42
+#define CLK_ACLK_SMMU_TV0X 43
+#define CLK_ACLK_SMMU_DECON1X 44
+#define CLK_ACLK_SMMU_DECON0X 45
+#define CLK_ACLK_BTS_DECON_TV_M3 46
+#define CLK_ACLK_BTS_DECON_TV_M2 47
+#define CLK_ACLK_BTS_DECON_TV_M1 48
+#define CLK_ACLK_BTS_DECON_TV_M0 49
+#define CLK_ACLK_BTS_DECON_NM4 50
+#define CLK_ACLK_BTS_DECON_NM3 51
+#define CLK_ACLK_BTS_DECON_NM2 52
+#define CLK_ACLK_BTS_DECON_NM1 53
+#define CLK_ACLK_BTS_DECON_NM0 54
+#define CLK_ACLK_AHB2APB_DISPSFR2P 55
+#define CLK_ACLK_AHB2APB_DISPSFR1P 56
+#define CLK_ACLK_AHB2APB_DISPSFR0P 57
+#define CLK_ACLK_AHB_DISPH 58
+#define CLK_ACLK_XIU_TV1X 59
+#define CLK_ACLK_XIU_TV0X 60
+#define CLK_ACLK_XIU_DECON1X 61
+#define CLK_ACLK_XIU_DECON0X 62
+#define CLK_ACLK_XIU_DISP1X 63
+#define CLK_ACLK_XIU_DISPNP_100 64
+#define CLK_ACLK_DISP1ND_333 65
+#define CLK_ACLK_DISP0ND_333 66
+#define CLK_PCLK_SMMU_TV1X 67
+#define CLK_PCLK_SMMU_TV0X 68
+#define CLK_PCLK_SMMU_DECON1X 69
+#define CLK_PCLK_SMMU_DECON0X 70
+#define CLK_PCLK_BTS_DECON_TV_M3 71
+#define CLK_PCLK_BTS_DECON_TV_M2 72
+#define CLK_PCLK_BTS_DECON_TV_M1 73
+#define CLK_PCLK_BTS_DECON_TV_M0 74
+#define CLK_PCLK_BTS_DECONM4 75
+#define CLK_PCLK_BTS_DECONM3 76
+#define CLK_PCLK_BTS_DECONM2 77
+#define CLK_PCLK_BTS_DECONM1 78
+#define CLK_PCLK_BTS_DECONM0 79
+#define CLK_PCLK_MIC1 80
+#define CLK_PCLK_PMU_DISP 81
+#define CLK_PCLK_SYSREG_DISP 82
+#define CLK_PCLK_HDMIPHY 83
+#define CLK_PCLK_HDMI 84
+#define CLK_PCLK_MIC0 85
+#define CLK_PCLK_DSIM1 86
+#define CLK_PCLK_DSIM0 87
+#define CLK_PCLK_DECON_TV 88
+#define CLK_PHYCLK_MIPIDPHY1_BITCLKDIV8 89
+#define CLK_PHYCLK_MIPIDPHY1_RXCLKESC0 90
+#define CLK_SCLK_RGB_TV_VCLK_TO_DSIM1 91
+#define CLK_SCLK_RGB_TV_VCLK_TO_MIC1 92
+#define CLK_SCLK_DSIM1 93
+#define CLK_SCLK_DECON_TV_VCLK 94
+#define CLK_PHYCLK_MIPIDPHY0_BITCLKDIV8 95
+#define CLK_PHYCLK_MIPIDPHY0_RXCLKESC0 96
+#define CLK_PHYCLK_HDMIPHY_TMDS_CLKO 97
+#define CLK_PHYCLK_HDMI_PIXEL 98
+#define CLK_SCLK_RGB_VCLK_TO_SMIES 99
+#define CLK_SCLK_FREQ_DET_DISP_PLL 100
+#define CLK_SCLK_RGB_VCLK_TO_DSIM0 101
+#define CLK_SCLK_RGB_VCLK_TO_MIC0 102
+#define CLK_SCLK_DSD 103
+#define CLK_SCLK_HDMI_SPDIF 104
+#define CLK_SCLK_DSIM0 105
+#define CLK_SCLK_DECON_TV_ECLK 106
+#define CLK_SCLK_DECON_VCLK 107
+#define CLK_SCLK_DECON_ECLK 108
+#define CLK_SCLK_RGB_VCLK 109
+#define CLK_SCLK_RGB_TV_VCLK 110
+
+#define DISP_NR_CLK 111
+
+/* CMU_AUD */
+#define CLK_MOUT_AUD_PLL_USER 1
+#define CLK_MOUT_SCLK_AUD_PCM 2
+#define CLK_MOUT_SCLK_AUD_I2S 3
+
+#define CLK_DIV_ATCLK_AUD 4
+#define CLK_DIV_PCLK_DBG_AUD 5
+#define CLK_DIV_ACLK_AUD 6
+#define CLK_DIV_AUD_CA5 7
+#define CLK_DIV_SCLK_AUD_SLIMBUS 8
+#define CLK_DIV_SCLK_AUD_UART 9
+#define CLK_DIV_SCLK_AUD_PCM 10
+#define CLK_DIV_SCLK_AUD_I2S 11
+
+#define CLK_ACLK_INTR_CTRL 12
+#define CLK_ACLK_AXIDS2_LPASSP 13
+#define CLK_ACLK_AXIDS1_LPASSP 14
+#define CLK_ACLK_AXI2APB1_LPASSP 15
+#define CLK_ACLK_AXI2APH_LPASSP 16
+#define CLK_ACLK_SMMU_LPASSX 17
+#define CLK_ACLK_AXIDS0_LPASSP 18
+#define CLK_ACLK_AXI2APB0_LPASSP 19
+#define CLK_ACLK_XIU_LPASSX 20
+#define CLK_ACLK_AUDNP_133 21
+#define CLK_ACLK_AUDND_133 22
+#define CLK_ACLK_SRAMC 23
+#define CLK_ACLK_DMAC 24
+#define CLK_PCLK_WDT1 25
+#define CLK_PCLK_WDT0 26
+#define CLK_PCLK_SFR1 27
+#define CLK_PCLK_SMMU_LPASSX 28
+#define CLK_PCLK_GPIO_AUD 29
+#define CLK_PCLK_PMU_AUD 30
+#define CLK_PCLK_SYSREG_AUD 31
+#define CLK_PCLK_AUD_SLIMBUS 32
+#define CLK_PCLK_AUD_UART 33
+#define CLK_PCLK_AUD_PCM 34
+#define CLK_PCLK_AUD_I2S 35
+#define CLK_PCLK_TIMER 36
+#define CLK_PCLK_SFR0_CTRL 37
+#define CLK_ATCLK_AUD 38
+#define CLK_PCLK_DBG_AUD 39
+#define CLK_SCLK_AUD_CA5 40
+#define CLK_SCLK_JTAG_TCK 41
+#define CLK_SCLK_SLIMBUS_CLKIN 42
+#define CLK_SCLK_AUD_SLIMBUS 43
+#define CLK_SCLK_AUD_UART 44
+#define CLK_SCLK_AUD_PCM 45
+#define CLK_SCLK_I2S_BCLK 46
+#define CLK_SCLK_AUD_I2S 47
+
+#define AUD_NR_CLK 48
+
+/* CMU_BUS{0|1|2} */
+#define CLK_DIV_PCLK_BUS_133 1
+
+#define CLK_ACLK_AHB2APB_BUSP 2
+#define CLK_ACLK_BUSNP_133 3
+#define CLK_ACLK_BUSND_400 4
+#define CLK_PCLK_BUSSRVND_133 5
+#define CLK_PCLK_PMU_BUS 6
+#define CLK_PCLK_SYSREG_BUS 7
+
+#define CLK_MOUT_ACLK_BUS2_400_USER 8 /* Only CMU_BUS2 */
+#define CLK_ACLK_BUS2BEND_400 9 /* Only CMU_BUS2 */
+#define CLK_ACLK_BUS2RTND_400 10 /* Only CMU_BUS2 */
+
+#define BUSx_NR_CLK 11
+
+/* CMU_G3D */
+#define CLK_FOUT_G3D_PLL 1
+
+#define CLK_MOUT_ACLK_G3D_400 2
+#define CLK_MOUT_G3D_PLL 3
+
+#define CLK_DIV_SCLK_HPM_G3D 4
+#define CLK_DIV_PCLK_G3D 5
+#define CLK_DIV_ACLK_G3D 6
+#define CLK_ACLK_BTS_G3D1 7
+#define CLK_ACLK_BTS_G3D0 8
+#define CLK_ACLK_ASYNCAPBS_G3D 9
+#define CLK_ACLK_ASYNCAPBM_G3D 10
+#define CLK_ACLK_AHB2APB_G3DP 11
+#define CLK_ACLK_G3DNP_150 12
+#define CLK_ACLK_G3DND_600 13
+#define CLK_ACLK_G3D 14
+#define CLK_PCLK_BTS_G3D1 15
+#define CLK_PCLK_BTS_G3D0 16
+#define CLK_PCLK_PMU_G3D 17
+#define CLK_PCLK_SYSREG_G3D 18
+#define CLK_SCLK_HPM_G3D 19
+
+#define G3D_NR_CLK 20
+
+/* CMU_GSCL */
+#define CLK_MOUT_ACLK_GSCL_111_USER 1
+#define CLK_MOUT_ACLK_GSCL_333_USER 2
+
+#define CLK_ACLK_BTS_GSCL2 3
+#define CLK_ACLK_BTS_GSCL1 4
+#define CLK_ACLK_BTS_GSCL0 5
+#define CLK_ACLK_AHB2APB_GSCLP 6
+#define CLK_ACLK_XIU_GSCLX 7
+#define CLK_ACLK_GSCLNP_111 8
+#define CLK_ACLK_GSCLRTND_333 9
+#define CLK_ACLK_GSCLBEND_333 10
+#define CLK_ACLK_GSD 11
+#define CLK_ACLK_GSCL2 12
+#define CLK_ACLK_GSCL1 13
+#define CLK_ACLK_GSCL0 14
+#define CLK_ACLK_SMMU_GSCL0 15
+#define CLK_ACLK_SMMU_GSCL1 16
+#define CLK_ACLK_SMMU_GSCL2 17
+#define CLK_PCLK_BTS_GSCL2 18
+#define CLK_PCLK_BTS_GSCL1 19
+#define CLK_PCLK_BTS_GSCL0 20
+#define CLK_PCLK_PMU_GSCL 21
+#define CLK_PCLK_SYSREG_GSCL 22
+#define CLK_PCLK_GSCL2 23
+#define CLK_PCLK_GSCL1 24
+#define CLK_PCLK_GSCL0 25
+#define CLK_PCLK_SMMU_GSCL0 26
+#define CLK_PCLK_SMMU_GSCL1 27
+#define CLK_PCLK_SMMU_GSCL2 28
+
+#define GSCL_NR_CLK 29
+
+/* CMU_APOLLO */
+#define CLK_FOUT_APOLLO_PLL 1
+
+#define CLK_MOUT_APOLLO_PLL 2
+#define CLK_MOUT_BUS_PLL_APOLLO_USER 3
+#define CLK_MOUT_APOLLO 4
+
+#define CLK_DIV_CNTCLK_APOLLO 5
+#define CLK_DIV_PCLK_DBG_APOLLO 6
+#define CLK_DIV_ATCLK_APOLLO 7
+#define CLK_DIV_PCLK_APOLLO 8
+#define CLK_DIV_ACLK_APOLLO 9
+#define CLK_DIV_APOLLO2 10
+#define CLK_DIV_APOLLO1 11
+#define CLK_DIV_SCLK_HPM_APOLLO 12
+#define CLK_DIV_APOLLO_PLL 13
+
+#define CLK_ACLK_ATBDS_APOLLO_3 14
+#define CLK_ACLK_ATBDS_APOLLO_2 15
+#define CLK_ACLK_ATBDS_APOLLO_1 16
+#define CLK_ACLK_ATBDS_APOLLO_0 17
+#define CLK_ACLK_ASATBSLV_APOLLO_3_CSSYS 18
+#define CLK_ACLK_ASATBSLV_APOLLO_2_CSSYS 19
+#define CLK_ACLK_ASATBSLV_APOLLO_1_CSSYS 20
+#define CLK_ACLK_ASATBSLV_APOLLO_0_CSSYS 21
+#define CLK_ACLK_ASYNCACES_APOLLO_CCI 22
+#define CLK_ACLK_AHB2APB_APOLLOP 23
+#define CLK_ACLK_APOLLONP_200 24
+#define CLK_PCLK_ASAPBMST_CSSYS_APOLLO 25
+#define CLK_PCLK_PMU_APOLLO 26
+#define CLK_PCLK_SYSREG_APOLLO 27
+#define CLK_CNTCLK_APOLLO 28
+#define CLK_SCLK_HPM_APOLLO 29
+#define CLK_SCLK_APOLLO 30
+
+#define APOLLO_NR_CLK 31
+
+/* CMU_ATLAS */
+#define CLK_FOUT_ATLAS_PLL 1
+
+#define CLK_MOUT_ATLAS_PLL 2
+#define CLK_MOUT_BUS_PLL_ATLAS_USER 3
+#define CLK_MOUT_ATLAS 4
+
+#define CLK_DIV_CNTCLK_ATLAS 5
+#define CLK_DIV_PCLK_DBG_ATLAS 6
+#define CLK_DIV_ATCLK_ATLASO 7
+#define CLK_DIV_PCLK_ATLAS 8
+#define CLK_DIV_ACLK_ATLAS 9
+#define CLK_DIV_ATLAS2 10
+#define CLK_DIV_ATLAS1 11
+#define CLK_DIV_SCLK_HPM_ATLAS 12
+#define CLK_DIV_ATLAS_PLL 13
+
+#define CLK_ACLK_ATB_AUD_CSSYS 14
+#define CLK_ACLK_ATB_APOLLO3_CSSYS 15
+#define CLK_ACLK_ATB_APOLLO2_CSSYS 16
+#define CLK_ACLK_ATB_APOLLO1_CSSYS 17
+#define CLK_ACLK_ATB_APOLLO0_CSSYS 18
+#define CLK_ACLK_ASYNCAHBS_CSSYS_SSS 19
+#define CLK_ACLK_ASYNCAXIS_CSSYS_CCIX 20
+#define CLK_ACLK_ASYNCACES_ATLAS_CCI 21
+#define CLK_ACLK_AHB2APB_ATLASP 22
+#define CLK_ACLK_ATLASNP_200 23
+#define CLK_PCLK_ASYNCAPB_AUD_CSSYS 24
+#define CLK_PCLK_ASYNCAPB_ISP_CSSYS 25
+#define CLK_PCLK_ASYNCAPB_APOLLO_CSSYS 26
+#define CLK_PCLK_PMU_ATLAS 27
+#define CLK_PCLK_SYSREG_ATLAS 28
+#define CLK_PCLK_SECJTAG 29
+#define CLK_CNTCLK_ATLAS 30
+#define CLK_SCLK_FREQ_DET_ATLAS_PLL 31
+#define CLK_SCLK_HPM_ATLAS 32
+#define CLK_TRACECLK 33
+#define CLK_CTMCLK 34
+#define CLK_HCLK_CSSYS 35
+#define CLK_PCLK_DBG_CSSYS 36
+#define CLK_PCLK_DBG 37
+#define CLK_ATCLK 38
+#define CLK_SCLK_ATLAS 39
+
+#define ATLAS_NR_CLK 40
+
+/* CMU_MSCL */
+#define CLK_MOUT_SCLK_JPEG_USER 1
+#define CLK_MOUT_ACLK_MSCL_400_USER 2
+#define CLK_MOUT_SCLK_JPEG 3
+
+#define CLK_DIV_PCLK_MSCL 4
+
+#define CLK_ACLK_BTS_JPEG 5
+#define CLK_ACLK_BTS_M2MSCALER1 6
+#define CLK_ACLK_BTS_M2MSCALER0 7
+#define CLK_ACLK_AHB2APB_MSCL0P 8
+#define CLK_ACLK_XIU_MSCLX 9
+#define CLK_ACLK_MSCLNP_100 10
+#define CLK_ACLK_MSCLND_400 11
+#define CLK_ACLK_JPEG 12
+#define CLK_ACLK_M2MSCALER1 13
+#define CLK_ACLK_M2MSCALER0 14
+#define CLK_ACLK_SMMU_M2MSCALER0 15
+#define CLK_ACLK_SMMU_M2MSCALER1 16
+#define CLK_ACLK_SMMU_JPEG 17
+#define CLK_PCLK_BTS_JPEG 18
+#define CLK_PCLK_BTS_M2MSCALER1 19
+#define CLK_PCLK_BTS_M2MSCALER0 20
+#define CLK_PCLK_PMU_MSCL 21
+#define CLK_PCLK_SYSREG_MSCL 22
+#define CLK_PCLK_JPEG 23
+#define CLK_PCLK_M2MSCALER1 24
+#define CLK_PCLK_M2MSCALER0 25
+#define CLK_PCLK_SMMU_M2MSCALER0 26
+#define CLK_PCLK_SMMU_M2MSCALER1 27
+#define CLK_PCLK_SMMU_JPEG 28
+#define CLK_SCLK_JPEG 29
+
+#define MSCL_NR_CLK 30
+
+/* CMU_MFC */
+#define CLK_MOUT_ACLK_MFC_400_USER 1
+
+#define CLK_DIV_PCLK_MFC 2
+
+#define CLK_ACLK_BTS_MFC_1 3
+#define CLK_ACLK_BTS_MFC_0 4
+#define CLK_ACLK_AHB2APB_MFCP 5
+#define CLK_ACLK_XIU_MFCX 6
+#define CLK_ACLK_MFCNP_100 7
+#define CLK_ACLK_MFCND_400 8
+#define CLK_ACLK_MFC 9
+#define CLK_ACLK_SMMU_MFC_1 10
+#define CLK_ACLK_SMMU_MFC_0 11
+#define CLK_PCLK_BTS_MFC_1 12
+#define CLK_PCLK_BTS_MFC_0 13
+#define CLK_PCLK_PMU_MFC 14
+#define CLK_PCLK_SYSREG_MFC 15
+#define CLK_PCLK_MFC 16
+#define CLK_PCLK_SMMU_MFC_1 17
+#define CLK_PCLK_SMMU_MFC_0 18
+
+#define MFC_NR_CLK 19
+
+/* CMU_HEVC */
+#define CLK_MOUT_ACLK_HEVC_400_USER 1
+
+#define CLK_DIV_PCLK_HEVC 2
+
+#define CLK_ACLK_BTS_HEVC_1 3
+#define CLK_ACLK_BTS_HEVC_0 4
+#define CLK_ACLK_AHB2APB_HEVCP 5
+#define CLK_ACLK_XIU_HEVCX 6
+#define CLK_ACLK_HEVCNP_100 7
+#define CLK_ACLK_HEVCND_400 8
+#define CLK_ACLK_HEVC 9
+#define CLK_ACLK_SMMU_HEVC_1 10
+#define CLK_ACLK_SMMU_HEVC_0 11
+#define CLK_PCLK_BTS_HEVC_1 12
+#define CLK_PCLK_BTS_HEVC_0 13
+#define CLK_PCLK_PMU_HEVC 14
+#define CLK_PCLK_SYSREG_HEVC 15
+#define CLK_PCLK_HEVC 16
+#define CLK_PCLK_SMMU_HEVC_1 17
+#define CLK_PCLK_SMMU_HEVC_0 18
+
+#define HEVC_NR_CLK 19
+
+/* CMU_ISP */
+#define CLK_MOUT_ACLK_ISP_DIS_400_USER 1
+#define CLK_MOUT_ACLK_ISP_400_USER 2
+
+#define CLK_DIV_PCLK_ISP_DIS 3
+#define CLK_DIV_PCLK_ISP 4
+#define CLK_DIV_ACLK_ISP_D_200 5
+#define CLK_DIV_ACLK_ISP_C_200 6
+
+#define CLK_ACLK_ISP_D_GLUE 7
+#define CLK_ACLK_SCALERP 8
+#define CLK_ACLK_3DNR 9
+#define CLK_ACLK_DIS 10
+#define CLK_ACLK_SCALERC 11
+#define CLK_ACLK_DRC 12
+#define CLK_ACLK_ISP 13
+#define CLK_ACLK_AXIUS_SCALERP 14
+#define CLK_ACLK_AXIUS_SCALERC 15
+#define CLK_ACLK_AXIUS_DRC 16
+#define CLK_ACLK_ASYNCAHBM_ISP2P 17
+#define CLK_ACLK_ASYNCAHBM_ISP1P 18
+#define CLK_ACLK_ASYNCAXIS_DIS1 19
+#define CLK_ACLK_ASYNCAXIS_DIS0 20
+#define CLK_ACLK_ASYNCAXIM_DIS1 21
+#define CLK_ACLK_ASYNCAXIM_DIS0 22
+#define CLK_ACLK_ASYNCAXIM_ISP2P 23
+#define CLK_ACLK_ASYNCAXIM_ISP1P 24
+#define CLK_ACLK_AHB2APB_ISP2P 25
+#define CLK_ACLK_AHB2APB_ISP1P 26
+#define CLK_ACLK_AXI2APB_ISP2P 27
+#define CLK_ACLK_AXI2APB_ISP1P 28
+#define CLK_ACLK_XIU_ISPEX1 29
+#define CLK_ACLK_XIU_ISPEX0 30
+#define CLK_ACLK_ISPND_400 31
+#define CLK_ACLK_SMMU_SCALERP 32
+#define CLK_ACLK_SMMU_3DNR 33
+#define CLK_ACLK_SMMU_DIS1 34
+#define CLK_ACLK_SMMU_DIS0 35
+#define CLK_ACLK_SMMU_SCALERC 36
+#define CLK_ACLK_SMMU_DRC 37
+#define CLK_ACLK_SMMU_ISP 38
+#define CLK_ACLK_BTS_SCALERP 39
+#define CLK_ACLK_BTS_3DR 40
+#define CLK_ACLK_BTS_DIS1 41
+#define CLK_ACLK_BTS_DIS0 42
+#define CLK_ACLK_BTS_SCALERC 43
+#define CLK_ACLK_BTS_DRC 44
+#define CLK_ACLK_BTS_ISP 45
+#define CLK_PCLK_SMMU_SCALERP 46
+#define CLK_PCLK_SMMU_3DNR 47
+#define CLK_PCLK_SMMU_DIS1 48
+#define CLK_PCLK_SMMU_DIS0 49
+#define CLK_PCLK_SMMU_SCALERC 50
+#define CLK_PCLK_SMMU_DRC 51
+#define CLK_PCLK_SMMU_ISP 52
+#define CLK_PCLK_BTS_SCALERP 53
+#define CLK_PCLK_BTS_3DNR 54
+#define CLK_PCLK_BTS_DIS1 55
+#define CLK_PCLK_BTS_DIS0 56
+#define CLK_PCLK_BTS_SCALERC 57
+#define CLK_PCLK_BTS_DRC 58
+#define CLK_PCLK_BTS_ISP 59
+#define CLK_PCLK_ASYNCAXI_DIS1 60
+#define CLK_PCLK_ASYNCAXI_DIS0 61
+#define CLK_PCLK_PMU_ISP 62
+#define CLK_PCLK_SYSREG_ISP 63
+#define CLK_PCLK_CMU_ISP_LOCAL 64
+#define CLK_PCLK_SCALERP 65
+#define CLK_PCLK_3DNR 66
+#define CLK_PCLK_DIS_CORE 67
+#define CLK_PCLK_DIS 68
+#define CLK_PCLK_SCALERC 69
+#define CLK_PCLK_DRC 70
+#define CLK_PCLK_ISP 71
+#define CLK_SCLK_PIXELASYNCS_DIS 72
+#define CLK_SCLK_PIXELASYNCM_DIS 73
+#define CLK_SCLK_PIXELASYNCS_SCALERP 74
+#define CLK_SCLK_PIXELASYNCM_ISPD 75
+#define CLK_SCLK_PIXELASYNCS_ISPC 76
+#define CLK_SCLK_PIXELASYNCM_ISPC 77
+
+#define ISP_NR_CLK 78
+
+/* CMU_CAM0 */
+#define CLK_PHYCLK_RXBYTEECLKHS0_S4_PHY 1
+#define CLK_PHYCLK_RXBYTEECLKHS0_S2A_PHY 2
+
+#define CLK_MOUT_ACLK_CAM0_333_USER 3
+#define CLK_MOUT_ACLK_CAM0_400_USER 4
+#define CLK_MOUT_ACLK_CAM0_552_USER 5
+#define CLK_MOUT_PHYCLK_RXBYTECLKHS0_S4_USER 6
+#define CLK_MOUT_PHYCLK_RXBYTECLKHS0_S2A_USER 7
+#define CLK_MOUT_ACLK_LITE_D_B 8
+#define CLK_MOUT_ACLK_LITE_D_A 9
+#define CLK_MOUT_ACLK_LITE_B_B 10
+#define CLK_MOUT_ACLK_LITE_B_A 11
+#define CLK_MOUT_ACLK_LITE_A_B 12
+#define CLK_MOUT_ACLK_LITE_A_A 13
+#define CLK_MOUT_ACLK_CAM0_400 14
+#define CLK_MOUT_ACLK_CSIS1_B 15
+#define CLK_MOUT_ACLK_CSIS1_A 16
+#define CLK_MOUT_ACLK_CSIS0_B 17
+#define CLK_MOUT_ACLK_CSIS0_A 18
+#define CLK_MOUT_ACLK_3AA1_B 19
+#define CLK_MOUT_ACLK_3AA1_A 20
+#define CLK_MOUT_ACLK_3AA0_B 21
+#define CLK_MOUT_ACLK_3AA0_A 22
+#define CLK_MOUT_SCLK_LITE_FREECNT_C 23
+#define CLK_MOUT_SCLK_LITE_FREECNT_B 24
+#define CLK_MOUT_SCLK_LITE_FREECNT_A 25
+#define CLK_MOUT_SCLK_PIXELASYNC_LITE_C_B 26
+#define CLK_MOUT_SCLK_PIXELASYNC_LITE_C_A 27
+#define CLK_MOUT_SCLK_PIXELASYNC_LITE_C_INIT_B 28
+#define CLK_MOUT_SCLK_PIXELASYNC_LITE_C_INIT_A 29
+
+#define CLK_DIV_PCLK_CAM0_50 30
+#define CLK_DIV_ACLK_CAM0_200 31
+#define CLK_DIV_ACLK_CAM0_BUS_400 32
+#define CLK_DIV_PCLK_LITE_D 33
+#define CLK_DIV_ACLK_LITE_D 34
+#define CLK_DIV_PCLK_LITE_B 35
+#define CLK_DIV_ACLK_LITE_B 36
+#define CLK_DIV_PCLK_LITE_A 37
+#define CLK_DIV_ACLK_LITE_A 38
+#define CLK_DIV_ACLK_CSIS1 39
+#define CLK_DIV_ACLK_CSIS0 40
+#define CLK_DIV_PCLK_3AA1 41
+#define CLK_DIV_ACLK_3AA1 42
+#define CLK_DIV_PCLK_3AA0 43
+#define CLK_DIV_ACLK_3AA0 44
+#define CLK_DIV_SCLK_PIXELASYNC_LITE_C 45
+#define CLK_DIV_PCLK_PIXELASYNC_LITE_C 46
+#define CLK_DIV_SCLK_PIXELASYNC_LITE_C_INIT 47
+
+#define CLK_ACLK_CSIS1 50
+#define CLK_ACLK_CSIS0 51
+#define CLK_ACLK_3AA1 52
+#define CLK_ACLK_3AA0 53
+#define CLK_ACLK_LITE_D 54
+#define CLK_ACLK_LITE_B 55
+#define CLK_ACLK_LITE_A 56
+#define CLK_ACLK_AHBSYNCDN 57
+#define CLK_ACLK_AXIUS_LITE_D 58
+#define CLK_ACLK_AXIUS_LITE_B 59
+#define CLK_ACLK_AXIUS_LITE_A 60
+#define CLK_ACLK_ASYNCAPBM_3AA1 61
+#define CLK_ACLK_ASYNCAPBS_3AA1 62
+#define CLK_ACLK_ASYNCAPBM_3AA0 63
+#define CLK_ACLK_ASYNCAPBS_3AA0 64
+#define CLK_ACLK_ASYNCAPBM_LITE_D 65
+#define CLK_ACLK_ASYNCAPBS_LITE_D 66
+#define CLK_ACLK_ASYNCAPBM_LITE_B 67
+#define CLK_ACLK_ASYNCAPBS_LITE_B 68
+#define CLK_ACLK_ASYNCAPBM_LITE_A 69
+#define CLK_ACLK_ASYNCAPBS_LITE_A 70
+#define CLK_ACLK_ASYNCAXIM_ISP0P 71
+#define CLK_ACLK_ASYNCAXIM_3AA1 72
+#define CLK_ACLK_ASYNCAXIS_3AA1 73
+#define CLK_ACLK_ASYNCAXIM_3AA0 74
+#define CLK_ACLK_ASYNCAXIS_3AA0 75
+#define CLK_ACLK_ASYNCAXIM_LITE_D 76
+#define CLK_ACLK_ASYNCAXIS_LITE_D 77
+#define CLK_ACLK_ASYNCAXIM_LITE_B 78
+#define CLK_ACLK_ASYNCAXIS_LITE_B 79
+#define CLK_ACLK_ASYNCAXIM_LITE_A 80
+#define CLK_ACLK_ASYNCAXIS_LITE_A 81
+#define CLK_ACLK_AHB2APB_ISPSFRP 82
+#define CLK_ACLK_AXI2APB_ISP0P 83
+#define CLK_ACLK_AXI2AHB_ISP0P 84
+#define CLK_ACLK_XIU_IS0X 85
+#define CLK_ACLK_XIU_ISP0EX 86
+#define CLK_ACLK_CAM0NP_276 87
+#define CLK_ACLK_CAM0ND_400 88
+#define CLK_ACLK_SMMU_3AA1 89
+#define CLK_ACLK_SMMU_3AA0 90
+#define CLK_ACLK_SMMU_LITE_D 91
+#define CLK_ACLK_SMMU_LITE_B 92
+#define CLK_ACLK_SMMU_LITE_A 93
+#define CLK_ACLK_BTS_3AA1 94
+#define CLK_ACLK_BTS_3AA0 95
+#define CLK_ACLK_BTS_LITE_D 96
+#define CLK_ACLK_BTS_LITE_B 97
+#define CLK_ACLK_BTS_LITE_A 98
+#define CLK_PCLK_SMMU_3AA1 99
+#define CLK_PCLK_SMMU_3AA0 100
+#define CLK_PCLK_SMMU_LITE_D 101
+#define CLK_PCLK_SMMU_LITE_B 102
+#define CLK_PCLK_SMMU_LITE_A 103
+#define CLK_PCLK_BTS_3AA1 104
+#define CLK_PCLK_BTS_3AA0 105
+#define CLK_PCLK_BTS_LITE_D 106
+#define CLK_PCLK_BTS_LITE_B 107
+#define CLK_PCLK_BTS_LITE_A 108
+#define CLK_PCLK_ASYNCAXI_CAM1 109
+#define CLK_PCLK_ASYNCAXI_3AA1 110
+#define CLK_PCLK_ASYNCAXI_3AA0 111
+#define CLK_PCLK_ASYNCAXI_LITE_D 112
+#define CLK_PCLK_ASYNCAXI_LITE_B 113
+#define CLK_PCLK_ASYNCAXI_LITE_A 114
+#define CLK_PCLK_PMU_CAM0 115
+#define CLK_PCLK_SYSREG_CAM0 116
+#define CLK_PCLK_CMU_CAM0_LOCAL 117
+#define CLK_PCLK_CSIS1 118
+#define CLK_PCLK_CSIS0 119
+#define CLK_PCLK_3AA1 120
+#define CLK_PCLK_3AA0 121
+#define CLK_PCLK_LITE_D 122
+#define CLK_PCLK_LITE_B 123
+#define CLK_PCLK_LITE_A 124
+#define CLK_PHYCLK_RXBYTECLKHS0_S4 125
+#define CLK_PHYCLK_RXBYTECLKHS0_S2A 126
+#define CLK_SCLK_LITE_FREECNT 127
+#define CLK_SCLK_PIXELASYNCM_3AA1 128
+#define CLK_SCLK_PIXELASYNCM_3AA0 129
+#define CLK_SCLK_PIXELASYNCS_3AA0 130
+#define CLK_SCLK_PIXELASYNCM_LITE_C 131
+#define CLK_SCLK_PIXELASYNCM_LITE_C_INIT 132
+#define CLK_SCLK_PIXELASYNCS_LITE_C_INIT 133
+
+#define CAM0_NR_CLK 134
+
+/* CMU_CAM1 */
+#define CLK_PHYCLK_RXBYTEECLKHS0_S2B 1
+
+#define CLK_MOUT_SCLK_ISP_UART_USER 2
+#define CLK_MOUT_SCLK_ISP_SPI1_USER 3
+#define CLK_MOUT_SCLK_ISP_SPI0_USER 4
+#define CLK_MOUT_ACLK_CAM1_333_USER 5
+#define CLK_MOUT_ACLK_CAM1_400_USER 6
+#define CLK_MOUT_ACLK_CAM1_552_USER 7
+#define CLK_MOUT_PHYCLK_RXBYTECLKHS0_S2B_USER 8
+#define CLK_MOUT_ACLK_CSIS2_B 9
+#define CLK_MOUT_ACLK_CSIS2_A 10
+#define CLK_MOUT_ACLK_FD_B 11
+#define CLK_MOUT_ACLK_FD_A 12
+#define CLK_MOUT_ACLK_LITE_C_B 13
+#define CLK_MOUT_ACLK_LITE_C_A 14
+
+#define CLK_DIV_SCLK_ISP_WPWM 15
+#define CLK_DIV_PCLK_CAM1_83 16
+#define CLK_DIV_PCLK_CAM1_166 17
+#define CLK_DIV_PCLK_DBG_CAM1 18
+#define CLK_DIV_ATCLK_CAM1 19
+#define CLK_DIV_ACLK_CSIS2 20
+#define CLK_DIV_PCLK_FD 21
+#define CLK_DIV_ACLK_FD 22
+#define CLK_DIV_PCLK_LITE_C 23
+#define CLK_DIV_ACLK_LITE_C 24
+
+#define CLK_ACLK_ISP_GIC 25
+#define CLK_ACLK_FD 26
+#define CLK_ACLK_LITE_C 27
+#define CLK_ACLK_CSIS2 28
+#define CLK_ACLK_ASYNCAPBM_FD 29
+#define CLK_ACLK_ASYNCAPBS_FD 30
+#define CLK_ACLK_ASYNCAPBM_LITE_C 31
+#define CLK_ACLK_ASYNCAPBS_LITE_C 32
+#define CLK_ACLK_ASYNCAHBS_SFRISP2H2 33
+#define CLK_ACLK_ASYNCAHBS_SFRISP2H1 34
+#define CLK_ACLK_ASYNCAXIM_CA5 35
+#define CLK_ACLK_ASYNCAXIS_CA5 36
+#define CLK_ACLK_ASYNCAXIS_ISPX2 37
+#define CLK_ACLK_ASYNCAXIS_ISPX1 38
+#define CLK_ACLK_ASYNCAXIS_ISPX0 39
+#define CLK_ACLK_ASYNCAXIM_ISPEX 40
+#define CLK_ACLK_ASYNCAXIM_ISP3P 41
+#define CLK_ACLK_ASYNCAXIS_ISP3P 42
+#define CLK_ACLK_ASYNCAXIM_FD 43
+#define CLK_ACLK_ASYNCAXIS_FD 44
+#define CLK_ACLK_ASYNCAXIM_LITE_C 45
+#define CLK_ACLK_ASYNCAXIS_LITE_C 46
+#define CLK_ACLK_AHB2APB_ISP5P 47
+#define CLK_ACLK_AHB2APB_ISP3P 48
+#define CLK_ACLK_AXI2APB_ISP3P 49
+#define CLK_ACLK_AHB_SFRISP2H 50
+#define CLK_ACLK_AXI_ISP_HX_R 51
+#define CLK_ACLK_AXI_ISP_CX_R 52
+#define CLK_ACLK_AXI_ISP_HX 53
+#define CLK_ACLK_AXI_ISP_CX 54
+#define CLK_ACLK_XIU_ISPX 55
+#define CLK_ACLK_XIU_ISPEX 56
+#define CLK_ACLK_CAM1NP_333 57
+#define CLK_ACLK_CAM1ND_400 58
+#define CLK_ACLK_SMMU_ISPCPU 59
+#define CLK_ACLK_SMMU_FD 60
+#define CLK_ACLK_SMMU_LITE_C 61
+#define CLK_ACLK_BTS_ISP3P 62
+#define CLK_ACLK_BTS_FD 63
+#define CLK_ACLK_BTS_LITE_C 64
+#define CLK_ACLK_AHBDN_SFRISP2H 65
+#define CLK_ACLK_AHBDN_ISP5P 66
+#define CLK_ACLK_AXIUS_ISP3P 67
+#define CLK_ACLK_AXIUS_FD 68
+#define CLK_ACLK_AXIUS_LITE_C 69
+#define CLK_PCLK_SMMU_ISPCPU 70
+#define CLK_PCLK_SMMU_FD 71
+#define CLK_PCLK_SMMU_LITE_C 72
+#define CLK_PCLK_BTS_ISP3P 73
+#define CLK_PCLK_BTS_FD 74
+#define CLK_PCLK_BTS_LITE_C 75
+#define CLK_PCLK_ASYNCAXIM_CA5 76
+#define CLK_PCLK_ASYNCAXIM_ISPEX 77
+#define CLK_PCLK_ASYNCAXIM_ISP3P 78
+#define CLK_PCLK_ASYNCAXIM_FD 79
+#define CLK_PCLK_ASYNCAXIM_LITE_C 80
+#define CLK_PCLK_PMU_CAM1 81
+#define CLK_PCLK_SYSREG_CAM1 82
+#define CLK_PCLK_CMU_CAM1_LOCAL 83
+#define CLK_PCLK_ISP_MCTADC 84
+#define CLK_PCLK_ISP_WDT 85
+#define CLK_PCLK_ISP_PWM 86
+#define CLK_PCLK_ISP_UART 87
+#define CLK_PCLK_ISP_MCUCTL 88
+#define CLK_PCLK_ISP_SPI1 89
+#define CLK_PCLK_ISP_SPI0 90
+#define CLK_PCLK_ISP_I2C2 91
+#define CLK_PCLK_ISP_I2C1 92
+#define CLK_PCLK_ISP_I2C0 93
+#define CLK_PCLK_ISP_MPWM 94
+#define CLK_PCLK_FD 95
+#define CLK_PCLK_LITE_C 96
+#define CLK_PCLK_CSIS2 97
+#define CLK_SCLK_ISP_I2C2 98
+#define CLK_SCLK_ISP_I2C1 99
+#define CLK_SCLK_ISP_I2C0 100
+#define CLK_SCLK_ISP_PWM 101
+#define CLK_PHYCLK_RXBYTECLKHS0_S2B 102
+#define CLK_SCLK_LITE_C_FREECNT 103
+#define CLK_SCLK_PIXELASYNCM_FD 104
+#define CLK_SCLK_ISP_MCTADC 105
+#define CLK_SCLK_ISP_UART 106
+#define CLK_SCLK_ISP_SPI1 107
+#define CLK_SCLK_ISP_SPI0 108
+#define CLK_SCLK_ISP_MPWM 109
+#define CLK_PCLK_DBG_ISP 110
+#define CLK_ATCLK_ISP 111
+#define CLK_SCLK_ISP_CA5 112
+
+#define CAM1_NR_CLK 113
+
+#endif /* _DT_BINDINGS_CLOCK_EXYNOS5433_H */
diff --git a/include/dt-bindings/clock/imx6qdl-clock.h b/include/dt-bindings/clock/imx6qdl-clock.h
index b690cdba163b..8780868458a0 100644
--- a/include/dt-bindings/clock/imx6qdl-clock.h
+++ b/include/dt-bindings/clock/imx6qdl-clock.h
@@ -248,6 +248,9 @@
#define IMX6QDL_PLL6_BYPASS 235
#define IMX6QDL_PLL7_BYPASS 236
#define IMX6QDL_CLK_GPT_3M 237
-#define IMX6QDL_CLK_END 238
+#define IMX6QDL_CLK_VIDEO_27M 238
+#define IMX6QDL_CLK_MIPI_CORE_CFG 239
+#define IMX6QDL_CLK_MIPI_IPG 240
+#define IMX6QDL_CLK_END 241
#endif /* __DT_BINDINGS_CLOCK_IMX6QDL_H */
diff --git a/include/dt-bindings/clock/pistachio-clk.h b/include/dt-bindings/clock/pistachio-clk.h
new file mode 100644
index 000000000000..039f83facb68
--- /dev/null
+++ b/include/dt-bindings/clock/pistachio-clk.h
@@ -0,0 +1,183 @@
+/*
+ * Copyright (C) 2014 Google, Inc.
+ *
+ * This program is free software; you can redistribute it and/or modify it
+ * under the terms and conditions of the GNU General Public License,
+ * version 2, as published by the Free Software Foundation.
+ */
+
+#ifndef _DT_BINDINGS_CLOCK_PISTACHIO_H
+#define _DT_BINDINGS_CLOCK_PISTACHIO_H
+
+/* PLLs */
+#define CLK_MIPS_PLL 0
+#define CLK_AUDIO_PLL 1
+#define CLK_RPU_V_PLL 2
+#define CLK_RPU_L_PLL 3
+#define CLK_SYS_PLL 4
+#define CLK_WIFI_PLL 5
+#define CLK_BT_PLL 6
+
+/* Fixed-factor clocks */
+#define CLK_WIFI_DIV4 16
+#define CLK_WIFI_DIV8 17
+
+/* Gate clocks */
+#define CLK_MIPS 32
+#define CLK_AUDIO_IN 33
+#define CLK_AUDIO 34
+#define CLK_I2S 35
+#define CLK_SPDIF 36
+#define CLK_AUDIO_DAC 37
+#define CLK_RPU_V 38
+#define CLK_RPU_L 39
+#define CLK_RPU_SLEEP 40
+#define CLK_WIFI_PLL_GATE 41
+#define CLK_RPU_CORE 42
+#define CLK_WIFI_ADC 43
+#define CLK_WIFI_DAC 44
+#define CLK_USB_PHY 45
+#define CLK_ENET_IN 46
+#define CLK_ENET 47
+#define CLK_UART0 48
+#define CLK_UART1 49
+#define CLK_PERIPH_SYS 50
+#define CLK_SPI0 51
+#define CLK_SPI1 52
+#define CLK_EVENT_TIMER 53
+#define CLK_AUX_ADC_INTERNAL 54
+#define CLK_AUX_ADC 55
+#define CLK_SD_HOST 56
+#define CLK_BT 57
+#define CLK_BT_DIV4 58
+#define CLK_BT_DIV8 59
+#define CLK_BT_1MHZ 60
+
+/* Divider clocks */
+#define CLK_MIPS_INTERNAL_DIV 64
+#define CLK_MIPS_DIV 65
+#define CLK_AUDIO_DIV 66
+#define CLK_I2S_DIV 67
+#define CLK_SPDIF_DIV 68
+#define CLK_AUDIO_DAC_DIV 69
+#define CLK_RPU_V_DIV 70
+#define CLK_RPU_L_DIV 71
+#define CLK_RPU_SLEEP_DIV 72
+#define CLK_RPU_CORE_DIV 73
+#define CLK_USB_PHY_DIV 74
+#define CLK_ENET_DIV 75
+#define CLK_UART0_INTERNAL_DIV 76
+#define CLK_UART0_DIV 77
+#define CLK_UART1_INTERNAL_DIV 78
+#define CLK_UART1_DIV 79
+#define CLK_SYS_INTERNAL_DIV 80
+#define CLK_SPI0_INTERNAL_DIV 81
+#define CLK_SPI0_DIV 82
+#define CLK_SPI1_INTERNAL_DIV 83
+#define CLK_SPI1_DIV 84
+#define CLK_EVENT_TIMER_INTERNAL_DIV 85
+#define CLK_EVENT_TIMER_DIV 86
+#define CLK_AUX_ADC_INTERNAL_DIV 87
+#define CLK_AUX_ADC_DIV 88
+#define CLK_SD_HOST_DIV 89
+#define CLK_BT_DIV 90
+#define CLK_BT_DIV4_DIV 91
+#define CLK_BT_DIV8_DIV 92
+#define CLK_BT_1MHZ_INTERNAL_DIV 93
+#define CLK_BT_1MHZ_DIV 94
+
+/* Mux clocks */
+#define CLK_AUDIO_REF_MUX 96
+#define CLK_MIPS_PLL_MUX 97
+#define CLK_AUDIO_PLL_MUX 98
+#define CLK_AUDIO_MUX 99
+#define CLK_RPU_V_PLL_MUX 100
+#define CLK_RPU_L_PLL_MUX 101
+#define CLK_RPU_L_MUX 102
+#define CLK_WIFI_PLL_MUX 103
+#define CLK_WIFI_DIV4_MUX 104
+#define CLK_WIFI_DIV8_MUX 105
+#define CLK_RPU_CORE_MUX 106
+#define CLK_SYS_PLL_MUX 107
+#define CLK_ENET_MUX 108
+#define CLK_EVENT_TIMER_MUX 109
+#define CLK_SD_HOST_MUX 110
+#define CLK_BT_PLL_MUX 111
+#define CLK_DEBUG_MUX 112
+
+#define CLK_NR_CLKS 113
+
+/* Peripheral gate clocks */
+#define PERIPH_CLK_SYS 0
+#define PERIPH_CLK_SYS_BUS 1
+#define PERIPH_CLK_DDR 2
+#define PERIPH_CLK_ROM 3
+#define PERIPH_CLK_COUNTER_FAST 4
+#define PERIPH_CLK_COUNTER_SLOW 5
+#define PERIPH_CLK_IR 6
+#define PERIPH_CLK_WD 7
+#define PERIPH_CLK_PDM 8
+#define PERIPH_CLK_PWM 9
+#define PERIPH_CLK_I2C0 10
+#define PERIPH_CLK_I2C1 11
+#define PERIPH_CLK_I2C2 12
+#define PERIPH_CLK_I2C3 13
+
+/* Peripheral divider clocks */
+#define PERIPH_CLK_ROM_DIV 32
+#define PERIPH_CLK_COUNTER_FAST_DIV 33
+#define PERIPH_CLK_COUNTER_SLOW_PRE_DIV 34
+#define PERIPH_CLK_COUNTER_SLOW_DIV 35
+#define PERIPH_CLK_IR_PRE_DIV 36
+#define PERIPH_CLK_IR_DIV 37
+#define PERIPH_CLK_WD_PRE_DIV 38
+#define PERIPH_CLK_WD_DIV 39
+#define PERIPH_CLK_PDM_PRE_DIV 40
+#define PERIPH_CLK_PDM_DIV 41
+#define PERIPH_CLK_PWM_PRE_DIV 42
+#define PERIPH_CLK_PWM_DIV 43
+#define PERIPH_CLK_I2C0_PRE_DIV 44
+#define PERIPH_CLK_I2C0_DIV 45
+#define PERIPH_CLK_I2C1_PRE_DIV 46
+#define PERIPH_CLK_I2C1_DIV 47
+#define PERIPH_CLK_I2C2_PRE_DIV 48
+#define PERIPH_CLK_I2C2_DIV 49
+#define PERIPH_CLK_I2C3_PRE_DIV 50
+#define PERIPH_CLK_I2C3_DIV 51
+
+#define PERIPH_CLK_NR_CLKS 52
+
+/* System gate clocks */
+#define SYS_CLK_I2C0 0
+#define SYS_CLK_I2C1 1
+#define SYS_CLK_I2C2 2
+#define SYS_CLK_I2C3 3
+#define SYS_CLK_I2S_IN 4
+#define SYS_CLK_PAUD_OUT 5
+#define SYS_CLK_SPDIF_OUT 6
+#define SYS_CLK_SPI0_MASTER 7
+#define SYS_CLK_SPI0_SLAVE 8
+#define SYS_CLK_PWM 9
+#define SYS_CLK_UART0 10
+#define SYS_CLK_UART1 11
+#define SYS_CLK_SPI1 12
+#define SYS_CLK_MDC 13
+#define SYS_CLK_SD_HOST 14
+#define SYS_CLK_ENET 15
+#define SYS_CLK_IR 16
+#define SYS_CLK_WD 17
+#define SYS_CLK_TIMER 18
+#define SYS_CLK_I2S_OUT 24
+#define SYS_CLK_SPDIF_IN 25
+#define SYS_CLK_EVENT_TIMER 26
+#define SYS_CLK_HASH 27
+
+#define SYS_CLK_NR_CLKS 28
+
+/* Gates for external input clocks */
+#define EXT_CLK_AUDIO_IN 0
+#define EXT_CLK_ENET_IN 1
+
+#define EXT_CLK_NR_CLKS 2
+
+#endif /* _DT_BINDINGS_CLOCK_PISTACHIO_H */
diff --git a/include/dt-bindings/clock/qcom,gcc-ipq806x.h b/include/dt-bindings/clock/qcom,gcc-ipq806x.h
index 04fb29ae30e6..dc4254b8cbbc 100644
--- a/include/dt-bindings/clock/qcom,gcc-ipq806x.h
+++ b/include/dt-bindings/clock/qcom,gcc-ipq806x.h
@@ -288,5 +288,8 @@
#define UBI32_CORE2_CLK_SRC 278
#define UBI32_CORE1_CLK 279
#define UBI32_CORE2_CLK 280
+#define EBI2_AON_CLK 281
+#define NSSTCM_CLK_SRC 282
+#define NSSTCM_CLK 283
#endif
diff --git a/include/dt-bindings/clock/qcom,gcc-msm8916.h b/include/dt-bindings/clock/qcom,gcc-msm8916.h
new file mode 100644
index 000000000000..e430f644dd6c
--- /dev/null
+++ b/include/dt-bindings/clock/qcom,gcc-msm8916.h
@@ -0,0 +1,156 @@
+/*
+ * Copyright 2015 Linaro Limited
+ *
+ * This software is licensed under the terms of the GNU General Public
+ * License version 2, as published by the Free Software Foundation, and
+ * may be copied, distributed, and modified under those terms.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+ * GNU General Public License for more details.
+ */
+
+#ifndef _DT_BINDINGS_CLK_MSM_GCC_8916_H
+#define _DT_BINDINGS_CLK_MSM_GCC_8916_H
+
+#define GPLL0 0
+#define GPLL0_VOTE 1
+#define BIMC_PLL 2
+#define BIMC_PLL_VOTE 3
+#define GPLL1 4
+#define GPLL1_VOTE 5
+#define GPLL2 6
+#define GPLL2_VOTE 7
+#define PCNOC_BFDCD_CLK_SRC 8
+#define SYSTEM_NOC_BFDCD_CLK_SRC 9
+#define CAMSS_AHB_CLK_SRC 10
+#define APSS_AHB_CLK_SRC 11
+#define CSI0_CLK_SRC 12
+#define CSI1_CLK_SRC 13
+#define GFX3D_CLK_SRC 14
+#define VFE0_CLK_SRC 15
+#define BLSP1_QUP1_I2C_APPS_CLK_SRC 16
+#define BLSP1_QUP1_SPI_APPS_CLK_SRC 17
+#define BLSP1_QUP2_I2C_APPS_CLK_SRC 18
+#define BLSP1_QUP2_SPI_APPS_CLK_SRC 19
+#define BLSP1_QUP3_I2C_APPS_CLK_SRC 20
+#define BLSP1_QUP3_SPI_APPS_CLK_SRC 21
+#define BLSP1_QUP4_I2C_APPS_CLK_SRC 22
+#define BLSP1_QUP4_SPI_APPS_CLK_SRC 23
+#define BLSP1_QUP5_I2C_APPS_CLK_SRC 24
+#define BLSP1_QUP5_SPI_APPS_CLK_SRC 25
+#define BLSP1_QUP6_I2C_APPS_CLK_SRC 26
+#define BLSP1_QUP6_SPI_APPS_CLK_SRC 27
+#define BLSP1_UART1_APPS_CLK_SRC 28
+#define BLSP1_UART2_APPS_CLK_SRC 29
+#define CCI_CLK_SRC 30
+#define CAMSS_GP0_CLK_SRC 31
+#define CAMSS_GP1_CLK_SRC 32
+#define JPEG0_CLK_SRC 33
+#define MCLK0_CLK_SRC 34
+#define MCLK1_CLK_SRC 35
+#define CSI0PHYTIMER_CLK_SRC 36
+#define CSI1PHYTIMER_CLK_SRC 37
+#define CPP_CLK_SRC 38
+#define CRYPTO_CLK_SRC 39
+#define GP1_CLK_SRC 40
+#define GP2_CLK_SRC 41
+#define GP3_CLK_SRC 42
+#define BYTE0_CLK_SRC 43
+#define ESC0_CLK_SRC 44
+#define MDP_CLK_SRC 45
+#define PCLK0_CLK_SRC 46
+#define VSYNC_CLK_SRC 47
+#define PDM2_CLK_SRC 48
+#define SDCC1_APPS_CLK_SRC 49
+#define SDCC2_APPS_CLK_SRC 50
+#define APSS_TCU_CLK_SRC 51
+#define USB_HS_SYSTEM_CLK_SRC 52
+#define VCODEC0_CLK_SRC 53
+#define GCC_BLSP1_AHB_CLK 54
+#define GCC_BLSP1_SLEEP_CLK 55
+#define GCC_BLSP1_QUP1_I2C_APPS_CLK 56
+#define GCC_BLSP1_QUP1_SPI_APPS_CLK 57
+#define GCC_BLSP1_QUP2_I2C_APPS_CLK 58
+#define GCC_BLSP1_QUP2_SPI_APPS_CLK 59
+#define GCC_BLSP1_QUP3_I2C_APPS_CLK 60
+#define GCC_BLSP1_QUP3_SPI_APPS_CLK 61
+#define GCC_BLSP1_QUP4_I2C_APPS_CLK 62
+#define GCC_BLSP1_QUP4_SPI_APPS_CLK 63
+#define GCC_BLSP1_QUP5_I2C_APPS_CLK 64
+#define GCC_BLSP1_QUP5_SPI_APPS_CLK 65
+#define GCC_BLSP1_QUP6_I2C_APPS_CLK 66
+#define GCC_BLSP1_QUP6_SPI_APPS_CLK 67
+#define GCC_BLSP1_UART1_APPS_CLK 68
+#define GCC_BLSP1_UART2_APPS_CLK 69
+#define GCC_BOOT_ROM_AHB_CLK 70
+#define GCC_CAMSS_CCI_AHB_CLK 71
+#define GCC_CAMSS_CCI_CLK 72
+#define GCC_CAMSS_CSI0_AHB_CLK 73
+#define GCC_CAMSS_CSI0_CLK 74
+#define GCC_CAMSS_CSI0PHY_CLK 75
+#define GCC_CAMSS_CSI0PIX_CLK 76
+#define GCC_CAMSS_CSI0RDI_CLK 77
+#define GCC_CAMSS_CSI1_AHB_CLK 78
+#define GCC_CAMSS_CSI1_CLK 79
+#define GCC_CAMSS_CSI1PHY_CLK 80
+#define GCC_CAMSS_CSI1PIX_CLK 81
+#define GCC_CAMSS_CSI1RDI_CLK 82
+#define GCC_CAMSS_CSI_VFE0_CLK 83
+#define GCC_CAMSS_GP0_CLK 84
+#define GCC_CAMSS_GP1_CLK 85
+#define GCC_CAMSS_ISPIF_AHB_CLK 86
+#define GCC_CAMSS_JPEG0_CLK 87
+#define GCC_CAMSS_JPEG_AHB_CLK 88
+#define GCC_CAMSS_JPEG_AXI_CLK 89
+#define GCC_CAMSS_MCLK0_CLK 90
+#define GCC_CAMSS_MCLK1_CLK 91
+#define GCC_CAMSS_MICRO_AHB_CLK 92
+#define GCC_CAMSS_CSI0PHYTIMER_CLK 93
+#define GCC_CAMSS_CSI1PHYTIMER_CLK 94
+#define GCC_CAMSS_AHB_CLK 95
+#define GCC_CAMSS_TOP_AHB_CLK 96
+#define GCC_CAMSS_CPP_AHB_CLK 97
+#define GCC_CAMSS_CPP_CLK 98
+#define GCC_CAMSS_VFE0_CLK 99
+#define GCC_CAMSS_VFE_AHB_CLK 100
+#define GCC_CAMSS_VFE_AXI_CLK 101
+#define GCC_CRYPTO_AHB_CLK 102
+#define GCC_CRYPTO_AXI_CLK 103
+#define GCC_CRYPTO_CLK 104
+#define GCC_OXILI_GMEM_CLK 105
+#define GCC_GP1_CLK 106
+#define GCC_GP2_CLK 107
+#define GCC_GP3_CLK 108
+#define GCC_MDSS_AHB_CLK 109
+#define GCC_MDSS_AXI_CLK 110
+#define GCC_MDSS_BYTE0_CLK 111
+#define GCC_MDSS_ESC0_CLK 112
+#define GCC_MDSS_MDP_CLK 113
+#define GCC_MDSS_PCLK0_CLK 114
+#define GCC_MDSS_VSYNC_CLK 115
+#define GCC_MSS_CFG_AHB_CLK 116
+#define GCC_OXILI_AHB_CLK 117
+#define GCC_OXILI_GFX3D_CLK 118
+#define GCC_PDM2_CLK 119
+#define GCC_PDM_AHB_CLK 120
+#define GCC_PRNG_AHB_CLK 121
+#define GCC_SDCC1_AHB_CLK 122
+#define GCC_SDCC1_APPS_CLK 123
+#define GCC_SDCC2_AHB_CLK 124
+#define GCC_SDCC2_APPS_CLK 125
+#define GCC_GTCU_AHB_CLK 126
+#define GCC_JPEG_TBU_CLK 127
+#define GCC_MDP_TBU_CLK 128
+#define GCC_SMMU_CFG_CLK 129
+#define GCC_VENUS_TBU_CLK 130
+#define GCC_VFE_TBU_CLK 131
+#define GCC_USB2A_PHY_SLEEP_CLK 132
+#define GCC_USB_HS_AHB_CLK 133
+#define GCC_USB_HS_SYSTEM_CLK 134
+#define GCC_VENUS0_AHB_CLK 135
+#define GCC_VENUS0_AXI_CLK 136
+#define GCC_VENUS0_VCODEC0_CLK 137
+
+#endif
diff --git a/include/dt-bindings/clock/r8a73a4-clock.h b/include/dt-bindings/clock/r8a73a4-clock.h
new file mode 100644
index 000000000000..9a4b4c9ca44a
--- /dev/null
+++ b/include/dt-bindings/clock/r8a73a4-clock.h
@@ -0,0 +1,62 @@
+/*
+ * Copyright 2014 Ulrich Hecht
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License as published by
+ * the Free Software Foundation; either version 2 of the License, or
+ * (at your option) any later version.
+ */
+
+#ifndef __DT_BINDINGS_CLOCK_R8A73A4_H__
+#define __DT_BINDINGS_CLOCK_R8A73A4_H__
+
+/* CPG */
+#define R8A73A4_CLK_MAIN 0
+#define R8A73A4_CLK_PLL0 1
+#define R8A73A4_CLK_PLL1 2
+#define R8A73A4_CLK_PLL2 3
+#define R8A73A4_CLK_PLL2S 4
+#define R8A73A4_CLK_PLL2H 5
+#define R8A73A4_CLK_Z 6
+#define R8A73A4_CLK_Z2 7
+#define R8A73A4_CLK_I 8
+#define R8A73A4_CLK_M3 9
+#define R8A73A4_CLK_B 10
+#define R8A73A4_CLK_M1 11
+#define R8A73A4_CLK_M2 12
+#define R8A73A4_CLK_ZX 13
+#define R8A73A4_CLK_ZS 14
+#define R8A73A4_CLK_HP 15
+
+/* MSTP2 */
+#define R8A73A4_CLK_DMAC 18
+#define R8A73A4_CLK_SCIFB3 17
+#define R8A73A4_CLK_SCIFB2 16
+#define R8A73A4_CLK_SCIFB1 7
+#define R8A73A4_CLK_SCIFB0 6
+#define R8A73A4_CLK_SCIFA0 4
+#define R8A73A4_CLK_SCIFA1 3
+
+/* MSTP3 */
+#define R8A73A4_CLK_CMT1 29
+#define R8A73A4_CLK_IIC1 23
+#define R8A73A4_CLK_IIC0 18
+#define R8A73A4_CLK_IIC7 17
+#define R8A73A4_CLK_IIC6 16
+#define R8A73A4_CLK_MMCIF0 15
+#define R8A73A4_CLK_SDHI0 14
+#define R8A73A4_CLK_SDHI1 13
+#define R8A73A4_CLK_SDHI2 12
+#define R8A73A4_CLK_MMCIF1 5
+#define R8A73A4_CLK_IIC2 0
+
+/* MSTP4 */
+#define R8A73A4_CLK_IIC3 11
+#define R8A73A4_CLK_IIC4 10
+#define R8A73A4_CLK_IIC5 9
+
+/* MSTP5 */
+#define R8A73A4_CLK_THERMAL 22
+#define R8A73A4_CLK_IIC8 15
+
+#endif /* __DT_BINDINGS_CLOCK_R8A73A4_H__ */
diff --git a/include/dt-bindings/clock/r8a7778-clock.h b/include/dt-bindings/clock/r8a7778-clock.h
new file mode 100644
index 000000000000..f6b07c5399de
--- /dev/null
+++ b/include/dt-bindings/clock/r8a7778-clock.h
@@ -0,0 +1,71 @@
+/*
+ * Copyright (C) 2014 Ulrich Hecht
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License as published by
+ * the Free Software Foundation; either version 2 of the License, or
+ * (at your option) any later version.
+ */
+
+#ifndef __DT_BINDINGS_CLOCK_R8A7778_H__
+#define __DT_BINDINGS_CLOCK_R8A7778_H__
+
+/* CPG */
+#define R8A7778_CLK_PLLA 0
+#define R8A7778_CLK_PLLB 1
+#define R8A7778_CLK_B 2
+#define R8A7778_CLK_OUT 3
+#define R8A7778_CLK_P 4
+#define R8A7778_CLK_S 5
+#define R8A7778_CLK_S1 6
+
+/* MSTP0 */
+#define R8A7778_CLK_I2C0 30
+#define R8A7778_CLK_I2C1 29
+#define R8A7778_CLK_I2C2 28
+#define R8A7778_CLK_I2C3 27
+#define R8A7778_CLK_SCIF0 26
+#define R8A7778_CLK_SCIF1 25
+#define R8A7778_CLK_SCIF2 24
+#define R8A7778_CLK_SCIF3 23
+#define R8A7778_CLK_SCIF4 22
+#define R8A7778_CLK_SCIF5 21
+#define R8A7778_CLK_TMU0 16
+#define R8A7778_CLK_TMU1 15
+#define R8A7778_CLK_TMU2 14
+#define R8A7778_CLK_SSI0 12
+#define R8A7778_CLK_SSI1 11
+#define R8A7778_CLK_SSI2 10
+#define R8A7778_CLK_SSI3 9
+#define R8A7778_CLK_SRU 8
+#define R8A7778_CLK_HSPI 7
+
+/* MSTP1 */
+#define R8A7778_CLK_ETHER 14
+#define R8A7778_CLK_VIN0 10
+#define R8A7778_CLK_VIN1 9
+#define R8A7778_CLK_USB 0
+
+/* MSTP3 */
+#define R8A7778_CLK_MMC 31
+#define R8A7778_CLK_SDHI0 23
+#define R8A7778_CLK_SDHI1 22
+#define R8A7778_CLK_SDHI2 21
+#define R8A7778_CLK_SSI4 11
+#define R8A7778_CLK_SSI5 10
+#define R8A7778_CLK_SSI6 9
+#define R8A7778_CLK_SSI7 8
+#define R8A7778_CLK_SSI8 7
+
+/* MSTP5 */
+#define R8A7778_CLK_SRU_SRC0 31
+#define R8A7778_CLK_SRU_SRC1 30
+#define R8A7778_CLK_SRU_SRC2 29
+#define R8A7778_CLK_SRU_SRC3 28
+#define R8A7778_CLK_SRU_SRC4 27
+#define R8A7778_CLK_SRU_SRC5 26
+#define R8A7778_CLK_SRU_SRC6 25
+#define R8A7778_CLK_SRU_SRC7 24
+#define R8A7778_CLK_SRU_SRC8 23
+
+#endif /* __DT_BINDINGS_CLOCK_R8A7778_H__ */
diff --git a/include/dt-bindings/clock/r8a7790-clock.h b/include/dt-bindings/clock/r8a7790-clock.h
index 91940271cf83..3f2c6b198d4a 100644
--- a/include/dt-bindings/clock/r8a7790-clock.h
+++ b/include/dt-bindings/clock/r8a7790-clock.h
@@ -21,6 +21,8 @@
#define R8A7790_CLK_SD0 7
#define R8A7790_CLK_SD1 8
#define R8A7790_CLK_Z 9
+#define R8A7790_CLK_RCAN 10
+#define R8A7790_CLK_ADSP 11
/* MSTP0 */
#define R8A7790_CLK_MSIOF0 0
@@ -80,6 +82,7 @@
/* MSTP5 */
#define R8A7790_CLK_AUDIO_DMAC1 1
#define R8A7790_CLK_AUDIO_DMAC0 2
+#define R8A7790_CLK_ADSP_MOD 6
#define R8A7790_CLK_THERMAL 22
#define R8A7790_CLK_PWM 23
diff --git a/include/dt-bindings/clock/r8a7791-clock.h b/include/dt-bindings/clock/r8a7791-clock.h
index f096f3f6c16a..8fc5dc8faeea 100644
--- a/include/dt-bindings/clock/r8a7791-clock.h
+++ b/include/dt-bindings/clock/r8a7791-clock.h
@@ -20,6 +20,8 @@
#define R8A7791_CLK_SDH 6
#define R8A7791_CLK_SD0 7
#define R8A7791_CLK_Z 8
+#define R8A7791_CLK_RCAN 9
+#define R8A7791_CLK_ADSP 10
/* MSTP0 */
#define R8A7791_CLK_MSIOF0 0
@@ -71,6 +73,7 @@
/* MSTP5 */
#define R8A7791_CLK_AUDIO_DMAC1 1
#define R8A7791_CLK_AUDIO_DMAC0 2
+#define R8A7791_CLK_ADSP_MOD 6
#define R8A7791_CLK_THERMAL 22
#define R8A7791_CLK_PWM 23
diff --git a/include/dt-bindings/clock/sh73a0-clock.h b/include/dt-bindings/clock/sh73a0-clock.h
index 1dd3eb2b7d90..53369568c24c 100644
--- a/include/dt-bindings/clock/sh73a0-clock.h
+++ b/include/dt-bindings/clock/sh73a0-clock.h
@@ -76,4 +76,7 @@
#define SH73A0_CLK_IIC4 10
#define SH73A0_CLK_KEYSC 3
+/* MSTP5 */
+#define SH73A0_CLK_INTCA0 8
+
#endif
diff --git a/include/dt-bindings/clock/tegra124-car-common.h b/include/dt-bindings/clock/tegra124-car-common.h
index ae2eb17a1658..a2156090563f 100644
--- a/include/dt-bindings/clock/tegra124-car-common.h
+++ b/include/dt-bindings/clock/tegra124-car-common.h
@@ -297,7 +297,7 @@
#define TEGRA124_CLK_PLL_C4 270
#define TEGRA124_CLK_PLL_DP 271
#define TEGRA124_CLK_PLL_E_MUX 272
-#define TEGRA124_CLK_PLLD_DSI 273
+#define TEGRA124_CLK_PLL_D_DSI_OUT 273
/* 274 */
/* 275 */
/* 276 */
diff --git a/include/dt-bindings/dma/jz4780-dma.h b/include/dt-bindings/dma/jz4780-dma.h
new file mode 100644
index 000000000000..df017fdfb44e
--- /dev/null
+++ b/include/dt-bindings/dma/jz4780-dma.h
@@ -0,0 +1,49 @@
+#ifndef __DT_BINDINGS_DMA_JZ4780_DMA_H__
+#define __DT_BINDINGS_DMA_JZ4780_DMA_H__
+
+/*
+ * Request type numbers for the JZ4780 DMA controller (written to the DRTn
+ * register for the channel).
+ */
+#define JZ4780_DMA_I2S1_TX 0x4
+#define JZ4780_DMA_I2S1_RX 0x5
+#define JZ4780_DMA_I2S0_TX 0x6
+#define JZ4780_DMA_I2S0_RX 0x7
+#define JZ4780_DMA_AUTO 0x8
+#define JZ4780_DMA_SADC_RX 0x9
+#define JZ4780_DMA_UART4_TX 0xc
+#define JZ4780_DMA_UART4_RX 0xd
+#define JZ4780_DMA_UART3_TX 0xe
+#define JZ4780_DMA_UART3_RX 0xf
+#define JZ4780_DMA_UART2_TX 0x10
+#define JZ4780_DMA_UART2_RX 0x11
+#define JZ4780_DMA_UART1_TX 0x12
+#define JZ4780_DMA_UART1_RX 0x13
+#define JZ4780_DMA_UART0_TX 0x14
+#define JZ4780_DMA_UART0_RX 0x15
+#define JZ4780_DMA_SSI0_TX 0x16
+#define JZ4780_DMA_SSI0_RX 0x17
+#define JZ4780_DMA_SSI1_TX 0x18
+#define JZ4780_DMA_SSI1_RX 0x19
+#define JZ4780_DMA_MSC0_TX 0x1a
+#define JZ4780_DMA_MSC0_RX 0x1b
+#define JZ4780_DMA_MSC1_TX 0x1c
+#define JZ4780_DMA_MSC1_RX 0x1d
+#define JZ4780_DMA_MSC2_TX 0x1e
+#define JZ4780_DMA_MSC2_RX 0x1f
+#define JZ4780_DMA_PCM0_TX 0x20
+#define JZ4780_DMA_PCM0_RX 0x21
+#define JZ4780_DMA_SMB0_TX 0x24
+#define JZ4780_DMA_SMB0_RX 0x25
+#define JZ4780_DMA_SMB1_TX 0x26
+#define JZ4780_DMA_SMB1_RX 0x27
+#define JZ4780_DMA_SMB2_TX 0x28
+#define JZ4780_DMA_SMB2_RX 0x29
+#define JZ4780_DMA_SMB3_TX 0x2a
+#define JZ4780_DMA_SMB3_RX 0x2b
+#define JZ4780_DMA_SMB4_TX 0x2c
+#define JZ4780_DMA_SMB4_RX 0x2d
+#define JZ4780_DMA_DES_TX 0x2e
+#define JZ4780_DMA_DES_RX 0x2f
+
+#endif /* __DT_BINDINGS_DMA_JZ4780_DMA_H__ */
diff --git a/include/dt-bindings/gpio/meson8b-gpio.h b/include/dt-bindings/gpio/meson8b-gpio.h
new file mode 100644
index 000000000000..c38cb20d7182
--- /dev/null
+++ b/include/dt-bindings/gpio/meson8b-gpio.h
@@ -0,0 +1,32 @@
+/*
+ * GPIO definitions for Amlogic Meson8b SoCs
+ *
+ * Copyright (C) 2015 Endless Mobile, Inc.
+ * Author: Carlo Caione <carlo@endlessm.com>
+ *
+ * This program is free software; you can redistribute it and/or
+ * modify it under the terms of the GNU General Public License
+ * version 2 as published by the Free Software Foundation.
+ *
+ * You should have received a copy of the GNU General Public License
+ * along with this program. If not, see <http://www.gnu.org/licenses/>.
+ */
+
+#ifndef _DT_BINDINGS_MESON8B_GPIO_H
+#define _DT_BINDINGS_MESON8B_GPIO_H
+
+#include <dt-bindings/gpio/meson8-gpio.h>
+
+/* GPIO Bank DIF */
+#define DIF_0_P 120
+#define DIF_0_N 121
+#define DIF_1_P 122
+#define DIF_1_N 123
+#define DIF_2_P 124
+#define DIF_2_N 125
+#define DIF_3_P 126
+#define DIF_3_N 127
+#define DIF_4_P 128
+#define DIF_4_N 129
+
+#endif /* _DT_BINDINGS_MESON8B_GPIO_H */
diff --git a/include/dt-bindings/leds/common.h b/include/dt-bindings/leds/common.h
new file mode 100644
index 000000000000..79fcef72ef57
--- /dev/null
+++ b/include/dt-bindings/leds/common.h
@@ -0,0 +1,21 @@
+/*
+ * This header provides macros for the common LEDs device tree bindings.
+ *
+ * Copyright (C) 2015, Samsung Electronics Co., Ltd.
+ *
+ * Author: Jacek Anaszewski <j.anaszewski@samsung.com>
+ */
+
+#ifndef __DT_BINDINGS_LEDS_H__
+#define __DT_BINDINGS_LEDS_H
+
+/* External trigger type */
+#define LEDS_TRIG_TYPE_EDGE 0
+#define LEDS_TRIG_TYPE_LEVEL 1
+
+/* Boost modes */
+#define LEDS_BOOST_OFF 0
+#define LEDS_BOOST_ADAPTIVE 1
+#define LEDS_BOOST_FIXED 2
+
+#endif /* __DT_BINDINGS_LEDS_H */
diff --git a/include/dt-bindings/media/omap3-isp.h b/include/dt-bindings/media/omap3-isp.h
new file mode 100644
index 000000000000..b18c60e468c7
--- /dev/null
+++ b/include/dt-bindings/media/omap3-isp.h
@@ -0,0 +1,22 @@
+/*
+ * include/dt-bindings/media/omap3-isp.h
+ *
+ * Copyright (C) 2015 Sakari Ailus
+ *
+ * This program is free software; you can redistribute it and/or
+ * modify it under the terms of the GNU General Public License
+ * version 2 as published by the Free Software Foundation.
+ *
+ * This program is distributed in the hope that it will be useful, but
+ * WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
+ * General Public License for more details.
+ */
+
+#ifndef __DT_BINDINGS_OMAP3_ISP_H__
+#define __DT_BINDINGS_OMAP3_ISP_H__
+
+#define OMAP3ISP_PHY_TYPE_COMPLEX_IO 0
+#define OMAP3ISP_PHY_TYPE_CSIPHY 1
+
+#endif /* __DT_BINDINGS_OMAP3_ISP_H__ */
diff --git a/include/dt-bindings/media/xilinx-vip.h b/include/dt-bindings/media/xilinx-vip.h
new file mode 100644
index 000000000000..6298fec00685
--- /dev/null
+++ b/include/dt-bindings/media/xilinx-vip.h
@@ -0,0 +1,39 @@
+/*
+ * Xilinx Video IP Core
+ *
+ * Copyright (C) 2013-2015 Ideas on Board
+ * Copyright (C) 2013-2015 Xilinx, Inc.
+ *
+ * Contacts: Hyun Kwon <hyun.kwon@xilinx.com>
+ * Laurent Pinchart <laurent.pinchart@ideasonboard.com>
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License version 2 as
+ * published by the Free Software Foundation.
+ */
+
+#ifndef __DT_BINDINGS_MEDIA_XILINX_VIP_H__
+#define __DT_BINDINGS_MEDIA_XILINX_VIP_H__
+
+/*
+ * Video format codes as defined in "AXI4-Stream Video IP and System Design
+ * Guide".
+ */
+#define XVIP_VF_YUV_422 0
+#define XVIP_VF_YUV_444 1
+#define XVIP_VF_RBG 2
+#define XVIP_VF_YUV_420 3
+#define XVIP_VF_YUVA_422 4
+#define XVIP_VF_YUVA_444 5
+#define XVIP_VF_RGBA 6
+#define XVIP_VF_YUVA_420 7
+#define XVIP_VF_YUVD_422 8
+#define XVIP_VF_YUVD_444 9
+#define XVIP_VF_RGBD 10
+#define XVIP_VF_YUVD_420 11
+#define XVIP_VF_MONO_SENSOR 12
+#define XVIP_VF_CUSTOM2 13
+#define XVIP_VF_CUSTOM3 14
+#define XVIP_VF_CUSTOM4 15
+
+#endif /* __DT_BINDINGS_MEDIA_XILINX_VIP_H__ */
diff --git a/include/linux/mfd/arizona/gpio.h b/include/dt-bindings/mfd/arizona.h
index d2146bb74f89..555609910acb 100644
--- a/include/linux/mfd/arizona/gpio.h
+++ b/include/dt-bindings/mfd/arizona.h
@@ -1,7 +1,7 @@
/*
- * GPIO configuration for Arizona devices
+ * Device Tree defines for Arizona devices
*
- * Copyright 2013 Wolfson Microelectronics. PLC.
+ * Copyright 2015 Cirrus Logic Inc.
*
* Author: Charles Keepax <ckeepax@opensource.wolfsonmicro.com>
*
@@ -10,9 +10,10 @@
* published by the Free Software Foundation.
*/
-#ifndef _ARIZONA_GPIO_H
-#define _ARIZONA_GPIO_H
+#ifndef _DT_BINDINGS_MFD_ARIZONA_H
+#define _DT_BINDINGS_MFD_ARIZONA_H
+/* GPIO Function Definitions */
#define ARIZONA_GP_FN_TXLRCLK 0x00
#define ARIZONA_GP_FN_GPIO 0x01
#define ARIZONA_GP_FN_IRQ1 0x02
@@ -61,36 +62,46 @@
#define ARIZONA_GP_FN_SYSCLK_ENA_STATUS 0x4B
#define ARIZONA_GP_FN_ASYNCCLK_ENA_STATUS 0x4C
-#define ARIZONA_GPN_DIR 0x8000 /* GPN_DIR */
-#define ARIZONA_GPN_DIR_MASK 0x8000 /* GPN_DIR */
-#define ARIZONA_GPN_DIR_SHIFT 15 /* GPN_DIR */
-#define ARIZONA_GPN_DIR_WIDTH 1 /* GPN_DIR */
-#define ARIZONA_GPN_PU 0x4000 /* GPN_PU */
-#define ARIZONA_GPN_PU_MASK 0x4000 /* GPN_PU */
-#define ARIZONA_GPN_PU_SHIFT 14 /* GPN_PU */
-#define ARIZONA_GPN_PU_WIDTH 1 /* GPN_PU */
-#define ARIZONA_GPN_PD 0x2000 /* GPN_PD */
-#define ARIZONA_GPN_PD_MASK 0x2000 /* GPN_PD */
-#define ARIZONA_GPN_PD_SHIFT 13 /* GPN_PD */
-#define ARIZONA_GPN_PD_WIDTH 1 /* GPN_PD */
-#define ARIZONA_GPN_LVL 0x0800 /* GPN_LVL */
-#define ARIZONA_GPN_LVL_MASK 0x0800 /* GPN_LVL */
-#define ARIZONA_GPN_LVL_SHIFT 11 /* GPN_LVL */
-#define ARIZONA_GPN_LVL_WIDTH 1 /* GPN_LVL */
-#define ARIZONA_GPN_POL 0x0400 /* GPN_POL */
-#define ARIZONA_GPN_POL_MASK 0x0400 /* GPN_POL */
-#define ARIZONA_GPN_POL_SHIFT 10 /* GPN_POL */
-#define ARIZONA_GPN_POL_WIDTH 1 /* GPN_POL */
-#define ARIZONA_GPN_OP_CFG 0x0200 /* GPN_OP_CFG */
-#define ARIZONA_GPN_OP_CFG_MASK 0x0200 /* GPN_OP_CFG */
-#define ARIZONA_GPN_OP_CFG_SHIFT 9 /* GPN_OP_CFG */
-#define ARIZONA_GPN_OP_CFG_WIDTH 1 /* GPN_OP_CFG */
-#define ARIZONA_GPN_DB 0x0100 /* GPN_DB */
-#define ARIZONA_GPN_DB_MASK 0x0100 /* GPN_DB */
-#define ARIZONA_GPN_DB_SHIFT 8 /* GPN_DB */
-#define ARIZONA_GPN_DB_WIDTH 1 /* GPN_DB */
-#define ARIZONA_GPN_FN_MASK 0x007F /* GPN_DB */
-#define ARIZONA_GPN_FN_SHIFT 0 /* GPN_DB */
-#define ARIZONA_GPN_FN_WIDTH 7 /* GPN_DB */
+/* GPIO Configuration Bits */
+#define ARIZONA_GPN_DIR 0x8000
+#define ARIZONA_GPN_PU 0x4000
+#define ARIZONA_GPN_PD 0x2000
+#define ARIZONA_GPN_LVL 0x0800
+#define ARIZONA_GPN_POL 0x0400
+#define ARIZONA_GPN_OP_CFG 0x0200
+#define ARIZONA_GPN_DB 0x0100
+
+/* Provide some defines for the most common configs */
+#define ARIZONA_GP_DEFAULT 0xffffffff
+#define ARIZONA_GP_OUTPUT (ARIZONA_GP_FN_GPIO)
+#define ARIZONA_GP_INPUT (ARIZONA_GP_FN_GPIO | \
+ ARIZONA_GPN_DIR)
+
+#define ARIZONA_32KZ_MCLK1 1
+#define ARIZONA_32KZ_MCLK2 2
+#define ARIZONA_32KZ_NONE 3
+
+#define ARIZONA_DMIC_MICVDD 0
+#define ARIZONA_DMIC_MICBIAS1 1
+#define ARIZONA_DMIC_MICBIAS2 2
+#define ARIZONA_DMIC_MICBIAS3 3
+
+#define ARIZONA_INMODE_DIFF 0
+#define ARIZONA_INMODE_SE 1
+#define ARIZONA_INMODE_DMIC 2
+
+#define ARIZONA_MICD_TIME_CONTINUOUS 0
+#define ARIZONA_MICD_TIME_250US 1
+#define ARIZONA_MICD_TIME_500US 2
+#define ARIZONA_MICD_TIME_1MS 3
+#define ARIZONA_MICD_TIME_2MS 4
+#define ARIZONA_MICD_TIME_4MS 5
+#define ARIZONA_MICD_TIME_8MS 6
+#define ARIZONA_MICD_TIME_16MS 7
+#define ARIZONA_MICD_TIME_32MS 8
+#define ARIZONA_MICD_TIME_64MS 9
+#define ARIZONA_MICD_TIME_128MS 10
+#define ARIZONA_MICD_TIME_256MS 11
+#define ARIZONA_MICD_TIME_512MS 12
#endif
diff --git a/include/dt-bindings/mfd/qcom-rpm.h b/include/dt-bindings/mfd/qcom-rpm.h
index 388a6f3d6165..13a9d4bf2662 100644
--- a/include/dt-bindings/mfd/qcom-rpm.h
+++ b/include/dt-bindings/mfd/qcom-rpm.h
@@ -141,6 +141,12 @@
#define QCOM_RPM_SYS_FABRIC_MODE 131
#define QCOM_RPM_USB_OTG_SWITCH 132
#define QCOM_RPM_VDDMIN_GPIO 133
+#define QCOM_RPM_NSS_FABRIC_0_CLK 134
+#define QCOM_RPM_NSS_FABRIC_1_CLK 135
+#define QCOM_RPM_SMB208_S1a 136
+#define QCOM_RPM_SMB208_S1b 137
+#define QCOM_RPM_SMB208_S2a 138
+#define QCOM_RPM_SMB208_S2b 139
/*
* Constants used to select force mode for regulators.
diff --git a/include/dt-bindings/mfd/st-lpc.h b/include/dt-bindings/mfd/st-lpc.h
new file mode 100644
index 000000000000..e3e6c75d8822
--- /dev/null
+++ b/include/dt-bindings/mfd/st-lpc.h
@@ -0,0 +1,15 @@
+/*
+ * This header provides shared DT/Driver defines for ST's LPC device
+ *
+ * Copyright (C) 2014 STMicroelectronics -- All Rights Reserved
+ *
+ * Author: Lee Jones <lee.jones@linaro.org> for STMicroelectronics
+ */
+
+#ifndef __DT_BINDINGS_ST_LPC_H__
+#define __DT_BINDINGS_ST_LPC_H__
+
+#define ST_LPC_MODE_RTC 0
+#define ST_LPC_MODE_WDT 1
+
+#endif /* __DT_BINDINGS_ST_LPC_H__ */
diff --git a/include/dt-bindings/net/ti-dp83867.h b/include/dt-bindings/net/ti-dp83867.h
new file mode 100644
index 000000000000..172744a72eb7
--- /dev/null
+++ b/include/dt-bindings/net/ti-dp83867.h
@@ -0,0 +1,45 @@
+/*
+ * Device Tree constants for the Texas Instruments DP83867 PHY
+ *
+ * Author: Dan Murphy <dmurphy@ti.com>
+ *
+ * Copyright: (C) 2015 Texas Instruments, Inc.
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License version 2 as
+ * published by the Free Software Foundation.
+ *
+ * This program is distributed in the hope that it will be useful, but
+ * WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
+ * General Public License for more details.
+ */
+
+#ifndef _DT_BINDINGS_TI_DP83867_H
+#define _DT_BINDINGS_TI_DP83867_H
+
+/* PHY CTRL bits */
+#define DP83867_PHYCR_FIFO_DEPTH_3_B_NIB 0x00
+#define DP83867_PHYCR_FIFO_DEPTH_4_B_NIB 0x01
+#define DP83867_PHYCR_FIFO_DEPTH_6_B_NIB 0x02
+#define DP83867_PHYCR_FIFO_DEPTH_8_B_NIB 0x03
+
+/* RGMIIDCTL internal delay for rx and tx */
+#define DP83867_RGMIIDCTL_250_PS 0x0
+#define DP83867_RGMIIDCTL_500_PS 0x1
+#define DP83867_RGMIIDCTL_750_PS 0x2
+#define DP83867_RGMIIDCTL_1_NS 0x3
+#define DP83867_RGMIIDCTL_1_25_NS 0x4
+#define DP83867_RGMIIDCTL_1_50_NS 0x5
+#define DP83867_RGMIIDCTL_1_75_NS 0x6
+#define DP83867_RGMIIDCTL_2_00_NS 0x7
+#define DP83867_RGMIIDCTL_2_25_NS 0x8
+#define DP83867_RGMIIDCTL_2_50_NS 0x9
+#define DP83867_RGMIIDCTL_2_75_NS 0xa
+#define DP83867_RGMIIDCTL_3_00_NS 0xb
+#define DP83867_RGMIIDCTL_3_25_NS 0xc
+#define DP83867_RGMIIDCTL_3_50_NS 0xd
+#define DP83867_RGMIIDCTL_3_75_NS 0xe
+#define DP83867_RGMIIDCTL_4_00_NS 0xf
+
+#endif
diff --git a/include/dt-bindings/pinctrl/mt6397-pinfunc.h b/include/dt-bindings/pinctrl/mt6397-pinfunc.h
new file mode 100644
index 000000000000..85739b308c2f
--- /dev/null
+++ b/include/dt-bindings/pinctrl/mt6397-pinfunc.h
@@ -0,0 +1,256 @@
+#ifndef __DTS_MT6397_PINFUNC_H
+#define __DTS_MT6397_PINFUNC_H
+
+#include <dt-bindings/pinctrl/mt65xx.h>
+
+#define MT6397_PIN_0_INT__FUNC_GPIO0 (MTK_PIN_NO(0) | 0)
+#define MT6397_PIN_0_INT__FUNC_INT (MTK_PIN_NO(0) | 1)
+
+#define MT6397_PIN_1_SRCVOLTEN__FUNC_GPIO1 (MTK_PIN_NO(1) | 0)
+#define MT6397_PIN_1_SRCVOLTEN__FUNC_SRCVOLTEN (MTK_PIN_NO(1) | 1)
+#define MT6397_PIN_1_SRCVOLTEN__FUNC_TEST_CK1 (MTK_PIN_NO(1) | 6)
+
+#define MT6397_PIN_2_SRCLKEN_PERI__FUNC_GPIO2 (MTK_PIN_NO(2) | 0)
+#define MT6397_PIN_2_SRCLKEN_PERI__FUNC_SRCLKEN_PERI (MTK_PIN_NO(2) | 1)
+#define MT6397_PIN_2_SRCLKEN_PERI__FUNC_TEST_CK2 (MTK_PIN_NO(2) | 6)
+
+#define MT6397_PIN_3_RTC_32K1V8__FUNC_GPIO3 (MTK_PIN_NO(3) | 0)
+#define MT6397_PIN_3_RTC_32K1V8__FUNC_RTC_32K1V8 (MTK_PIN_NO(3) | 1)
+#define MT6397_PIN_3_RTC_32K1V8__FUNC_TEST_CK3 (MTK_PIN_NO(3) | 6)
+
+#define MT6397_PIN_4_WRAP_EVENT__FUNC_GPIO4 (MTK_PIN_NO(4) | 0)
+#define MT6397_PIN_4_WRAP_EVENT__FUNC_WRAP_EVENT (MTK_PIN_NO(4) | 1)
+
+#define MT6397_PIN_5_SPI_CLK__FUNC_GPIO5 (MTK_PIN_NO(5) | 0)
+#define MT6397_PIN_5_SPI_CLK__FUNC_SPI_CLK (MTK_PIN_NO(5) | 1)
+
+#define MT6397_PIN_6_SPI_CSN__FUNC_GPIO6 (MTK_PIN_NO(6) | 0)
+#define MT6397_PIN_6_SPI_CSN__FUNC_SPI_CSN (MTK_PIN_NO(6) | 1)
+
+#define MT6397_PIN_7_SPI_MOSI__FUNC_GPIO7 (MTK_PIN_NO(7) | 0)
+#define MT6397_PIN_7_SPI_MOSI__FUNC_SPI_MOSI (MTK_PIN_NO(7) | 1)
+
+#define MT6397_PIN_8_SPI_MISO__FUNC_GPIO8 (MTK_PIN_NO(8) | 0)
+#define MT6397_PIN_8_SPI_MISO__FUNC_SPI_MISO (MTK_PIN_NO(8) | 1)
+
+#define MT6397_PIN_9_AUD_CLK_MOSI__FUNC_GPIO9 (MTK_PIN_NO(9) | 0)
+#define MT6397_PIN_9_AUD_CLK_MOSI__FUNC_AUD_CLK (MTK_PIN_NO(9) | 1)
+#define MT6397_PIN_9_AUD_CLK_MOSI__FUNC_TEST_IN0 (MTK_PIN_NO(9) | 6)
+#define MT6397_PIN_9_AUD_CLK_MOSI__FUNC_TEST_OUT0 (MTK_PIN_NO(9) | 7)
+
+#define MT6397_PIN_10_AUD_DAT_MISO__FUNC_GPIO10 (MTK_PIN_NO(10) | 0)
+#define MT6397_PIN_10_AUD_DAT_MISO__FUNC_AUD_MISO (MTK_PIN_NO(10) | 1)
+#define MT6397_PIN_10_AUD_DAT_MISO__FUNC_TEST_IN1 (MTK_PIN_NO(10) | 6)
+#define MT6397_PIN_10_AUD_DAT_MISO__FUNC_TEST_OUT1 (MTK_PIN_NO(10) | 7)
+
+#define MT6397_PIN_11_AUD_DAT_MOSI__FUNC_GPIO11 (MTK_PIN_NO(11) | 0)
+#define MT6397_PIN_11_AUD_DAT_MOSI__FUNC_AUD_MOSI (MTK_PIN_NO(11) | 1)
+#define MT6397_PIN_11_AUD_DAT_MOSI__FUNC_TEST_IN2 (MTK_PIN_NO(11) | 6)
+#define MT6397_PIN_11_AUD_DAT_MOSI__FUNC_TEST_OUT2 (MTK_PIN_NO(11) | 7)
+
+#define MT6397_PIN_12_COL0__FUNC_GPIO12 (MTK_PIN_NO(12) | 0)
+#define MT6397_PIN_12_COL0__FUNC_COL0_USBDL (MTK_PIN_NO(12) | 1)
+#define MT6397_PIN_12_COL0__FUNC_EINT10_1X (MTK_PIN_NO(12) | 2)
+#define MT6397_PIN_12_COL0__FUNC_PWM1_3X (MTK_PIN_NO(12) | 3)
+#define MT6397_PIN_12_COL0__FUNC_TEST_IN3 (MTK_PIN_NO(12) | 6)
+#define MT6397_PIN_12_COL0__FUNC_TEST_OUT3 (MTK_PIN_NO(12) | 7)
+
+#define MT6397_PIN_13_COL1__FUNC_GPIO13 (MTK_PIN_NO(13) | 0)
+#define MT6397_PIN_13_COL1__FUNC_COL1 (MTK_PIN_NO(13) | 1)
+#define MT6397_PIN_13_COL1__FUNC_EINT11_1X (MTK_PIN_NO(13) | 2)
+#define MT6397_PIN_13_COL1__FUNC_SCL0_2X (MTK_PIN_NO(13) | 3)
+#define MT6397_PIN_13_COL1__FUNC_TEST_IN4 (MTK_PIN_NO(13) | 6)
+#define MT6397_PIN_13_COL1__FUNC_TEST_OUT4 (MTK_PIN_NO(13) | 7)
+
+#define MT6397_PIN_14_COL2__FUNC_GPIO14 (MTK_PIN_NO(14) | 0)
+#define MT6397_PIN_14_COL2__FUNC_COL2 (MTK_PIN_NO(14) | 1)
+#define MT6397_PIN_14_COL2__FUNC_EINT12_1X (MTK_PIN_NO(14) | 2)
+#define MT6397_PIN_14_COL2__FUNC_SDA0_2X (MTK_PIN_NO(14) | 3)
+#define MT6397_PIN_14_COL2__FUNC_TEST_IN5 (MTK_PIN_NO(14) | 6)
+#define MT6397_PIN_14_COL2__FUNC_TEST_OUT5 (MTK_PIN_NO(14) | 7)
+
+#define MT6397_PIN_15_COL3__FUNC_GPIO15 (MTK_PIN_NO(15) | 0)
+#define MT6397_PIN_15_COL3__FUNC_COL3 (MTK_PIN_NO(15) | 1)
+#define MT6397_PIN_15_COL3__FUNC_EINT13_1X (MTK_PIN_NO(15) | 2)
+#define MT6397_PIN_15_COL3__FUNC_SCL1_2X (MTK_PIN_NO(15) | 3)
+#define MT6397_PIN_15_COL3__FUNC_TEST_IN6 (MTK_PIN_NO(15) | 6)
+#define MT6397_PIN_15_COL3__FUNC_TEST_OUT6 (MTK_PIN_NO(15) | 7)
+
+#define MT6397_PIN_16_COL4__FUNC_GPIO16 (MTK_PIN_NO(16) | 0)
+#define MT6397_PIN_16_COL4__FUNC_COL4 (MTK_PIN_NO(16) | 1)
+#define MT6397_PIN_16_COL4__FUNC_EINT14_1X (MTK_PIN_NO(16) | 2)
+#define MT6397_PIN_16_COL4__FUNC_SDA1_2X (MTK_PIN_NO(16) | 3)
+#define MT6397_PIN_16_COL4__FUNC_TEST_IN7 (MTK_PIN_NO(16) | 6)
+#define MT6397_PIN_16_COL4__FUNC_TEST_OUT7 (MTK_PIN_NO(16) | 7)
+
+#define MT6397_PIN_17_COL5__FUNC_GPIO17 (MTK_PIN_NO(17) | 0)
+#define MT6397_PIN_17_COL5__FUNC_COL5 (MTK_PIN_NO(17) | 1)
+#define MT6397_PIN_17_COL5__FUNC_EINT15_1X (MTK_PIN_NO(17) | 2)
+#define MT6397_PIN_17_COL5__FUNC_SCL2_2X (MTK_PIN_NO(17) | 3)
+#define MT6397_PIN_17_COL5__FUNC_TEST_IN8 (MTK_PIN_NO(17) | 6)
+#define MT6397_PIN_17_COL5__FUNC_TEST_OUT8 (MTK_PIN_NO(17) | 7)
+
+#define MT6397_PIN_18_COL6__FUNC_GPIO18 (MTK_PIN_NO(18) | 0)
+#define MT6397_PIN_18_COL6__FUNC_COL6 (MTK_PIN_NO(18) | 1)
+#define MT6397_PIN_18_COL6__FUNC_EINT16_1X (MTK_PIN_NO(18) | 2)
+#define MT6397_PIN_18_COL6__FUNC_SDA2_2X (MTK_PIN_NO(18) | 3)
+#define MT6397_PIN_18_COL6__FUNC_GPIO32K_0 (MTK_PIN_NO(18) | 4)
+#define MT6397_PIN_18_COL6__FUNC_GPIO26M_0 (MTK_PIN_NO(18) | 5)
+#define MT6397_PIN_18_COL6__FUNC_TEST_IN9 (MTK_PIN_NO(18) | 6)
+#define MT6397_PIN_18_COL6__FUNC_TEST_OUT9 (MTK_PIN_NO(18) | 7)
+
+#define MT6397_PIN_19_COL7__FUNC_GPIO19 (MTK_PIN_NO(19) | 0)
+#define MT6397_PIN_19_COL7__FUNC_COL7 (MTK_PIN_NO(19) | 1)
+#define MT6397_PIN_19_COL7__FUNC_EINT17_1X (MTK_PIN_NO(19) | 2)
+#define MT6397_PIN_19_COL7__FUNC_PWM2_3X (MTK_PIN_NO(19) | 3)
+#define MT6397_PIN_19_COL7__FUNC_GPIO32K_1 (MTK_PIN_NO(19) | 4)
+#define MT6397_PIN_19_COL7__FUNC_GPIO26M_1 (MTK_PIN_NO(19) | 5)
+#define MT6397_PIN_19_COL7__FUNC_TEST_IN10 (MTK_PIN_NO(19) | 6)
+#define MT6397_PIN_19_COL7__FUNC_TEST_OUT10 (MTK_PIN_NO(19) | 7)
+
+#define MT6397_PIN_20_ROW0__FUNC_GPIO20 (MTK_PIN_NO(20) | 0)
+#define MT6397_PIN_20_ROW0__FUNC_ROW0 (MTK_PIN_NO(20) | 1)
+#define MT6397_PIN_20_ROW0__FUNC_EINT18_1X (MTK_PIN_NO(20) | 2)
+#define MT6397_PIN_20_ROW0__FUNC_SCL0_3X (MTK_PIN_NO(20) | 3)
+#define MT6397_PIN_20_ROW0__FUNC_TEST_IN11 (MTK_PIN_NO(20) | 6)
+#define MT6397_PIN_20_ROW0__FUNC_TEST_OUT11 (MTK_PIN_NO(20) | 7)
+
+#define MT6397_PIN_21_ROW1__FUNC_GPIO21 (MTK_PIN_NO(21) | 0)
+#define MT6397_PIN_21_ROW1__FUNC_ROW1 (MTK_PIN_NO(21) | 1)
+#define MT6397_PIN_21_ROW1__FUNC_EINT19_1X (MTK_PIN_NO(21) | 2)
+#define MT6397_PIN_21_ROW1__FUNC_SDA0_3X (MTK_PIN_NO(21) | 3)
+#define MT6397_PIN_21_ROW1__FUNC_AUD_TSTCK (MTK_PIN_NO(21) | 4)
+#define MT6397_PIN_21_ROW1__FUNC_TEST_IN12 (MTK_PIN_NO(21) | 6)
+#define MT6397_PIN_21_ROW1__FUNC_TEST_OUT12 (MTK_PIN_NO(21) | 7)
+
+#define MT6397_PIN_22_ROW2__FUNC_GPIO22 (MTK_PIN_NO(22) | 0)
+#define MT6397_PIN_22_ROW2__FUNC_ROW2 (MTK_PIN_NO(22) | 1)
+#define MT6397_PIN_22_ROW2__FUNC_EINT20_1X (MTK_PIN_NO(22) | 2)
+#define MT6397_PIN_22_ROW2__FUNC_SCL1_3X (MTK_PIN_NO(22) | 3)
+#define MT6397_PIN_22_ROW2__FUNC_TEST_IN13 (MTK_PIN_NO(22) | 6)
+#define MT6397_PIN_22_ROW2__FUNC_TEST_OUT13 (MTK_PIN_NO(22) | 7)
+
+#define MT6397_PIN_23_ROW3__FUNC_GPIO23 (MTK_PIN_NO(23) | 0)
+#define MT6397_PIN_23_ROW3__FUNC_ROW3 (MTK_PIN_NO(23) | 1)
+#define MT6397_PIN_23_ROW3__FUNC_EINT21_1X (MTK_PIN_NO(23) | 2)
+#define MT6397_PIN_23_ROW3__FUNC_SDA1_3X (MTK_PIN_NO(23) | 3)
+#define MT6397_PIN_23_ROW3__FUNC_TEST_IN14 (MTK_PIN_NO(23) | 6)
+#define MT6397_PIN_23_ROW3__FUNC_TEST_OUT14 (MTK_PIN_NO(23) | 7)
+
+#define MT6397_PIN_24_ROW4__FUNC_GPIO24 (MTK_PIN_NO(24) | 0)
+#define MT6397_PIN_24_ROW4__FUNC_ROW4 (MTK_PIN_NO(24) | 1)
+#define MT6397_PIN_24_ROW4__FUNC_EINT22_1X (MTK_PIN_NO(24) | 2)
+#define MT6397_PIN_24_ROW4__FUNC_SCL2_3X (MTK_PIN_NO(24) | 3)
+#define MT6397_PIN_24_ROW4__FUNC_TEST_IN15 (MTK_PIN_NO(24) | 6)
+#define MT6397_PIN_24_ROW4__FUNC_TEST_OUT15 (MTK_PIN_NO(24) | 7)
+
+#define MT6397_PIN_25_ROW5__FUNC_GPIO25 (MTK_PIN_NO(25) | 0)
+#define MT6397_PIN_25_ROW5__FUNC_ROW5 (MTK_PIN_NO(25) | 1)
+#define MT6397_PIN_25_ROW5__FUNC_EINT23_1X (MTK_PIN_NO(25) | 2)
+#define MT6397_PIN_25_ROW5__FUNC_SDA2_3X (MTK_PIN_NO(25) | 3)
+#define MT6397_PIN_25_ROW5__FUNC_TEST_IN16 (MTK_PIN_NO(25) | 6)
+#define MT6397_PIN_25_ROW5__FUNC_TEST_OUT16 (MTK_PIN_NO(25) | 7)
+
+#define MT6397_PIN_26_ROW6__FUNC_GPIO26 (MTK_PIN_NO(26) | 0)
+#define MT6397_PIN_26_ROW6__FUNC_ROW6 (MTK_PIN_NO(26) | 1)
+#define MT6397_PIN_26_ROW6__FUNC_EINT24_1X (MTK_PIN_NO(26) | 2)
+#define MT6397_PIN_26_ROW6__FUNC_PWM3_3X (MTK_PIN_NO(26) | 3)
+#define MT6397_PIN_26_ROW6__FUNC_GPIO32K_2 (MTK_PIN_NO(26) | 4)
+#define MT6397_PIN_26_ROW6__FUNC_GPIO26M_2 (MTK_PIN_NO(26) | 5)
+#define MT6397_PIN_26_ROW6__FUNC_TEST_IN17 (MTK_PIN_NO(26) | 6)
+#define MT6397_PIN_26_ROW6__FUNC_TEST_OUT17 (MTK_PIN_NO(26) | 7)
+
+#define MT6397_PIN_27_ROW7__FUNC_GPIO27 (MTK_PIN_NO(27) | 0)
+#define MT6397_PIN_27_ROW7__FUNC_ROW7 (MTK_PIN_NO(27) | 1)
+#define MT6397_PIN_27_ROW7__FUNC_EINT3_1X (MTK_PIN_NO(27) | 2)
+#define MT6397_PIN_27_ROW7__FUNC_CBUS (MTK_PIN_NO(27) | 3)
+#define MT6397_PIN_27_ROW7__FUNC_GPIO32K_3 (MTK_PIN_NO(27) | 4)
+#define MT6397_PIN_27_ROW7__FUNC_GPIO26M_3 (MTK_PIN_NO(27) | 5)
+#define MT6397_PIN_27_ROW7__FUNC_TEST_IN18 (MTK_PIN_NO(27) | 6)
+#define MT6397_PIN_27_ROW7__FUNC_TEST_OUT18 (MTK_PIN_NO(27) | 7)
+
+#define MT6397_PIN_28_PWM1__FUNC_GPIO28 (MTK_PIN_NO(28) | 0)
+#define MT6397_PIN_28_PWM1__FUNC_PWM1 (MTK_PIN_NO(28) | 1)
+#define MT6397_PIN_28_PWM1__FUNC_EINT4_1X (MTK_PIN_NO(28) | 2)
+#define MT6397_PIN_28_PWM1__FUNC_GPIO32K_4 (MTK_PIN_NO(28) | 4)
+#define MT6397_PIN_28_PWM1__FUNC_GPIO26M_4 (MTK_PIN_NO(28) | 5)
+#define MT6397_PIN_28_PWM1__FUNC_TEST_IN19 (MTK_PIN_NO(28) | 6)
+#define MT6397_PIN_28_PWM1__FUNC_TEST_OUT19 (MTK_PIN_NO(28) | 7)
+
+#define MT6397_PIN_29_PWM2__FUNC_GPIO29 (MTK_PIN_NO(29) | 0)
+#define MT6397_PIN_29_PWM2__FUNC_PWM2 (MTK_PIN_NO(29) | 1)
+#define MT6397_PIN_29_PWM2__FUNC_EINT5_1X (MTK_PIN_NO(29) | 2)
+#define MT6397_PIN_29_PWM2__FUNC_GPIO32K_5 (MTK_PIN_NO(29) | 4)
+#define MT6397_PIN_29_PWM2__FUNC_GPIO26M_5 (MTK_PIN_NO(29) | 5)
+#define MT6397_PIN_29_PWM2__FUNC_TEST_IN20 (MTK_PIN_NO(29) | 6)
+#define MT6397_PIN_29_PWM2__FUNC_TEST_OUT20 (MTK_PIN_NO(29) | 7)
+
+#define MT6397_PIN_30_PWM3__FUNC_GPIO30 (MTK_PIN_NO(30) | 0)
+#define MT6397_PIN_30_PWM3__FUNC_PWM3 (MTK_PIN_NO(30) | 1)
+#define MT6397_PIN_30_PWM3__FUNC_EINT6_1X (MTK_PIN_NO(30) | 2)
+#define MT6397_PIN_30_PWM3__FUNC_COL0 (MTK_PIN_NO(30) | 3)
+#define MT6397_PIN_30_PWM3__FUNC_GPIO32K_6 (MTK_PIN_NO(30) | 4)
+#define MT6397_PIN_30_PWM3__FUNC_GPIO26M_6 (MTK_PIN_NO(30) | 5)
+#define MT6397_PIN_30_PWM3__FUNC_TEST_IN21 (MTK_PIN_NO(30) | 6)
+#define MT6397_PIN_30_PWM3__FUNC_TEST_OUT21 (MTK_PIN_NO(30) | 7)
+
+#define MT6397_PIN_31_SCL0__FUNC_GPIO31 (MTK_PIN_NO(31) | 0)
+#define MT6397_PIN_31_SCL0__FUNC_SCL0 (MTK_PIN_NO(31) | 1)
+#define MT6397_PIN_31_SCL0__FUNC_EINT7_1X (MTK_PIN_NO(31) | 2)
+#define MT6397_PIN_31_SCL0__FUNC_PWM1_2X (MTK_PIN_NO(31) | 3)
+#define MT6397_PIN_31_SCL0__FUNC_TEST_IN22 (MTK_PIN_NO(31) | 6)
+#define MT6397_PIN_31_SCL0__FUNC_TEST_OUT22 (MTK_PIN_NO(31) | 7)
+
+#define MT6397_PIN_32_SDA0__FUNC_GPIO32 (MTK_PIN_NO(32) | 0)
+#define MT6397_PIN_32_SDA0__FUNC_SDA0 (MTK_PIN_NO(32) | 1)
+#define MT6397_PIN_32_SDA0__FUNC_EINT8_1X (MTK_PIN_NO(32) | 2)
+#define MT6397_PIN_32_SDA0__FUNC_TEST_IN23 (MTK_PIN_NO(32) | 6)
+#define MT6397_PIN_32_SDA0__FUNC_TEST_OUT23 (MTK_PIN_NO(32) | 7)
+
+#define MT6397_PIN_33_SCL1__FUNC_GPIO33 (MTK_PIN_NO(33) | 0)
+#define MT6397_PIN_33_SCL1__FUNC_SCL1 (MTK_PIN_NO(33) | 1)
+#define MT6397_PIN_33_SCL1__FUNC_EINT9_1X (MTK_PIN_NO(33) | 2)
+#define MT6397_PIN_33_SCL1__FUNC_PWM2_2X (MTK_PIN_NO(33) | 3)
+#define MT6397_PIN_33_SCL1__FUNC_TEST_IN24 (MTK_PIN_NO(33) | 6)
+#define MT6397_PIN_33_SCL1__FUNC_TEST_OUT24 (MTK_PIN_NO(33) | 7)
+
+#define MT6397_PIN_34_SDA1__FUNC_GPIO34 (MTK_PIN_NO(34) | 0)
+#define MT6397_PIN_34_SDA1__FUNC_SDA1 (MTK_PIN_NO(34) | 1)
+#define MT6397_PIN_34_SDA1__FUNC_EINT0_1X (MTK_PIN_NO(34) | 2)
+#define MT6397_PIN_34_SDA1__FUNC_TEST_IN25 (MTK_PIN_NO(34) | 6)
+#define MT6397_PIN_34_SDA1__FUNC_TEST_OUT25 (MTK_PIN_NO(34) | 7)
+
+#define MT6397_PIN_35_SCL2__FUNC_GPIO35 (MTK_PIN_NO(35) | 0)
+#define MT6397_PIN_35_SCL2__FUNC_SCL2 (MTK_PIN_NO(35) | 1)
+#define MT6397_PIN_35_SCL2__FUNC_EINT1_1X (MTK_PIN_NO(35) | 2)
+#define MT6397_PIN_35_SCL2__FUNC_PWM3_2X (MTK_PIN_NO(35) | 3)
+#define MT6397_PIN_35_SCL2__FUNC_TEST_IN26 (MTK_PIN_NO(35) | 6)
+#define MT6397_PIN_35_SCL2__FUNC_TEST_OUT26 (MTK_PIN_NO(35) | 7)
+
+#define MT6397_PIN_36_SDA2__FUNC_GPIO36 (MTK_PIN_NO(36) | 0)
+#define MT6397_PIN_36_SDA2__FUNC_SDA2 (MTK_PIN_NO(36) | 1)
+#define MT6397_PIN_36_SDA2__FUNC_EINT2_1X (MTK_PIN_NO(36) | 2)
+#define MT6397_PIN_36_SDA2__FUNC_TEST_IN27 (MTK_PIN_NO(36) | 6)
+#define MT6397_PIN_36_SDA2__FUNC_TEST_OUT27 (MTK_PIN_NO(36) | 7)
+
+#define MT6397_PIN_37_HDMISD__FUNC_GPIO37 (MTK_PIN_NO(37) | 0)
+#define MT6397_PIN_37_HDMISD__FUNC_HDMISD (MTK_PIN_NO(37) | 1)
+#define MT6397_PIN_37_HDMISD__FUNC_TEST_IN28 (MTK_PIN_NO(37) | 6)
+#define MT6397_PIN_37_HDMISD__FUNC_TEST_OUT28 (MTK_PIN_NO(37) | 7)
+
+#define MT6397_PIN_38_HDMISCK__FUNC_GPIO38 (MTK_PIN_NO(38) | 0)
+#define MT6397_PIN_38_HDMISCK__FUNC_HDMISCK (MTK_PIN_NO(38) | 1)
+#define MT6397_PIN_38_HDMISCK__FUNC_TEST_IN29 (MTK_PIN_NO(38) | 6)
+#define MT6397_PIN_38_HDMISCK__FUNC_TEST_OUT29 (MTK_PIN_NO(38) | 7)
+
+#define MT6397_PIN_39_HTPLG__FUNC_GPIO39 (MTK_PIN_NO(39) | 0)
+#define MT6397_PIN_39_HTPLG__FUNC_HTPLG (MTK_PIN_NO(39) | 1)
+#define MT6397_PIN_39_HTPLG__FUNC_TEST_IN30 (MTK_PIN_NO(39) | 6)
+#define MT6397_PIN_39_HTPLG__FUNC_TEST_OUT30 (MTK_PIN_NO(39) | 7)
+
+#define MT6397_PIN_40_CEC__FUNC_GPIO40 (MTK_PIN_NO(40) | 0)
+#define MT6397_PIN_40_CEC__FUNC_CEC (MTK_PIN_NO(40) | 1)
+#define MT6397_PIN_40_CEC__FUNC_TEST_IN31 (MTK_PIN_NO(40) | 6)
+#define MT6397_PIN_40_CEC__FUNC_TEST_OUT31 (MTK_PIN_NO(40) | 7)
+
+#endif /* __DTS_MT6397_PINFUNC_H */
diff --git a/include/dt-bindings/pinctrl/mt65xx.h b/include/dt-bindings/pinctrl/mt65xx.h
new file mode 100644
index 000000000000..1198f4541327
--- /dev/null
+++ b/include/dt-bindings/pinctrl/mt65xx.h
@@ -0,0 +1,40 @@
+/*
+ * Copyright (c) 2014 MediaTek Inc.
+ * Author: Hongzhou.Yang <hongzhou.yang@mediatek.com>
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License version 2 as
+ * published by the Free Software Foundation.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+ * GNU General Public License for more details.
+ */
+
+#ifndef _DT_BINDINGS_PINCTRL_MT65XX_H
+#define _DT_BINDINGS_PINCTRL_MT65XX_H
+
+#define MTK_PIN_NO(x) ((x) << 8)
+#define MTK_GET_PIN_NO(x) ((x) >> 8)
+#define MTK_GET_PIN_FUNC(x) ((x) & 0xf)
+
+#define MTK_PUPD_SET_R1R0_00 100
+#define MTK_PUPD_SET_R1R0_01 101
+#define MTK_PUPD_SET_R1R0_10 102
+#define MTK_PUPD_SET_R1R0_11 103
+
+#define MTK_DRIVE_2mA 2
+#define MTK_DRIVE_4mA 4
+#define MTK_DRIVE_6mA 6
+#define MTK_DRIVE_8mA 8
+#define MTK_DRIVE_10mA 10
+#define MTK_DRIVE_12mA 12
+#define MTK_DRIVE_14mA 14
+#define MTK_DRIVE_16mA 16
+#define MTK_DRIVE_20mA 20
+#define MTK_DRIVE_24mA 24
+#define MTK_DRIVE_28mA 28
+#define MTK_DRIVE_32mA 32
+
+#endif /* _DT_BINDINGS_PINCTRL_MT65XX_H */
diff --git a/include/dt-bindings/pinctrl/qcom,pmic-gpio.h b/include/dt-bindings/pinctrl/qcom,pmic-gpio.h
index fa74d7cc960c..aafa76cb569d 100644
--- a/include/dt-bindings/pinctrl/qcom,pmic-gpio.h
+++ b/include/dt-bindings/pinctrl/qcom,pmic-gpio.h
@@ -48,6 +48,14 @@
#define PM8058_GPIO_L5 6
#define PM8058_GPIO_L2 7
+/*
+ * Note: PM8916 GPIO1 and GPIO2 are supporting
+ * only L2(1.15V) and L5(1.8V) options
+ */
+#define PM8916_GPIO_VPH 0
+#define PM8916_GPIO_L2 2
+#define PM8916_GPIO_L5 3
+
#define PM8917_GPIO_VPH 0
#define PM8917_GPIO_S4 2
#define PM8917_GPIO_L15 3
@@ -115,6 +123,13 @@
#define PM8058_GPIO39_MP3_CLK PMIC_GPIO_FUNC_FUNC1
#define PM8058_GPIO40_EXT_BB_EN PMIC_GPIO_FUNC_FUNC1
+#define PM8916_GPIO1_BAT_ALRM_OUT PMIC_GPIO_FUNC_FUNC1
+#define PM8916_GPIO1_KEYP_DRV PMIC_GPIO_FUNC_FUNC2
+#define PM8916_GPIO2_DIV_CLK PMIC_GPIO_FUNC_FUNC1
+#define PM8916_GPIO2_SLEEP_CLK PMIC_GPIO_FUNC_FUNC2
+#define PM8916_GPIO3_KEYP_DRV PMIC_GPIO_FUNC_FUNC1
+#define PM8916_GPIO4_KEYP_DRV PMIC_GPIO_FUNC_FUNC2
+
#define PM8917_GPIO9_18_KEYP_DRV PMIC_GPIO_FUNC_FUNC1
#define PM8917_GPIO20_BAT_ALRM_OUT PMIC_GPIO_FUNC_FUNC1
#define PM8917_GPIO21_23_UART_TX PMIC_GPIO_FUNC_FUNC2
diff --git a/include/dt-bindings/pinctrl/qcom,pmic-mpp.h b/include/dt-bindings/pinctrl/qcom,pmic-mpp.h
index d2c7dabe3223..c10205491f8d 100644
--- a/include/dt-bindings/pinctrl/qcom,pmic-mpp.h
+++ b/include/dt-bindings/pinctrl/qcom,pmic-mpp.h
@@ -10,6 +10,10 @@
#define PM8841_MPP_VPH 0
#define PM8841_MPP_S3 2
+#define PM8916_MPP_VPH 0
+#define PM8916_MPP_L2 2
+#define PM8916_MPP_L5 3
+
#define PM8941_MPP_VPH 0
#define PM8941_MPP_L1 1
#define PM8941_MPP_S3 2
diff --git a/include/dt-bindings/reset/qcom,gcc-ipq806x.h b/include/dt-bindings/reset/qcom,gcc-ipq806x.h
index 0ad5ef930b5d..de9c8140931a 100644
--- a/include/dt-bindings/reset/qcom,gcc-ipq806x.h
+++ b/include/dt-bindings/reset/qcom,gcc-ipq806x.h
@@ -129,4 +129,47 @@
#define USB30_1_PHY_RESET 112
#define NSSFB0_RESET 113
#define NSSFB1_RESET 114
+#define UBI32_CORE1_CLKRST_CLAMP_RESET 115
+#define UBI32_CORE1_CLAMP_RESET 116
+#define UBI32_CORE1_AHB_RESET 117
+#define UBI32_CORE1_AXI_RESET 118
+#define UBI32_CORE2_CLKRST_CLAMP_RESET 119
+#define UBI32_CORE2_CLAMP_RESET 120
+#define UBI32_CORE2_AHB_RESET 121
+#define UBI32_CORE2_AXI_RESET 122
+#define GMAC_CORE1_RESET 123
+#define GMAC_CORE2_RESET 124
+#define GMAC_CORE3_RESET 125
+#define GMAC_CORE4_RESET 126
+#define GMAC_AHB_RESET 127
+#define NSS_CH0_RST_RX_CLK_N_RESET 128
+#define NSS_CH0_RST_TX_CLK_N_RESET 129
+#define NSS_CH0_RST_RX_125M_N_RESET 130
+#define NSS_CH0_HW_RST_RX_125M_N_RESET 131
+#define NSS_CH0_RST_TX_125M_N_RESET 132
+#define NSS_CH1_RST_RX_CLK_N_RESET 133
+#define NSS_CH1_RST_TX_CLK_N_RESET 134
+#define NSS_CH1_RST_RX_125M_N_RESET 135
+#define NSS_CH1_HW_RST_RX_125M_N_RESET 136
+#define NSS_CH1_RST_TX_125M_N_RESET 137
+#define NSS_CH2_RST_RX_CLK_N_RESET 138
+#define NSS_CH2_RST_TX_CLK_N_RESET 139
+#define NSS_CH2_RST_RX_125M_N_RESET 140
+#define NSS_CH2_HW_RST_RX_125M_N_RESET 141
+#define NSS_CH2_RST_TX_125M_N_RESET 142
+#define NSS_CH3_RST_RX_CLK_N_RESET 143
+#define NSS_CH3_RST_TX_CLK_N_RESET 144
+#define NSS_CH3_RST_RX_125M_N_RESET 145
+#define NSS_CH3_HW_RST_RX_125M_N_RESET 146
+#define NSS_CH3_RST_TX_125M_N_RESET 147
+#define NSS_RST_RX_250M_125M_N_RESET 148
+#define NSS_RST_TX_250M_125M_N_RESET 149
+#define NSS_QSGMII_TXPI_RST_N_RESET 150
+#define NSS_QSGMII_CDR_RST_N_RESET 151
+#define NSS_SGMII2_CDR_RST_N_RESET 152
+#define NSS_SGMII3_CDR_RST_N_RESET 153
+#define NSS_CAL_PRBS_RST_N_RESET 154
+#define NSS_LCKDT_RST_N_RESET 155
+#define NSS_SRDS_N_RESET 156
+
#endif
diff --git a/include/dt-bindings/reset/qcom,gcc-msm8916.h b/include/dt-bindings/reset/qcom,gcc-msm8916.h
new file mode 100644
index 000000000000..3d90410f09c7
--- /dev/null
+++ b/include/dt-bindings/reset/qcom,gcc-msm8916.h
@@ -0,0 +1,108 @@
+/*
+ * Copyright 2015 Linaro Limited
+ *
+ * This software is licensed under the terms of the GNU General Public
+ * License version 2, as published by the Free Software Foundation, and
+ * may be copied, distributed, and modified under those terms.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+ * GNU General Public License for more details.
+ */
+
+#ifndef _DT_BINDINGS_RESET_MSM_GCC_8916_H
+#define _DT_BINDINGS_RESET_MSM_GCC_8916_H
+
+#define GCC_BLSP1_BCR 0
+#define GCC_BLSP1_QUP1_BCR 1
+#define GCC_BLSP1_UART1_BCR 2
+#define GCC_BLSP1_QUP2_BCR 3
+#define GCC_BLSP1_UART2_BCR 4
+#define GCC_BLSP1_QUP3_BCR 5
+#define GCC_BLSP1_QUP4_BCR 6
+#define GCC_BLSP1_QUP5_BCR 7
+#define GCC_BLSP1_QUP6_BCR 8
+#define GCC_IMEM_BCR 9
+#define GCC_SMMU_BCR 10
+#define GCC_APSS_TCU_BCR 11
+#define GCC_SMMU_XPU_BCR 12
+#define GCC_PCNOC_TBU_BCR 13
+#define GCC_PRNG_BCR 14
+#define GCC_BOOT_ROM_BCR 15
+#define GCC_CRYPTO_BCR 16
+#define GCC_SEC_CTRL_BCR 17
+#define GCC_AUDIO_CORE_BCR 18
+#define GCC_ULT_AUDIO_BCR 19
+#define GCC_DEHR_BCR 20
+#define GCC_SYSTEM_NOC_BCR 21
+#define GCC_PCNOC_BCR 22
+#define GCC_TCSR_BCR 23
+#define GCC_QDSS_BCR 24
+#define GCC_DCD_BCR 25
+#define GCC_MSG_RAM_BCR 26
+#define GCC_MPM_BCR 27
+#define GCC_SPMI_BCR 28
+#define GCC_SPDM_BCR 29
+#define GCC_MM_SPDM_BCR 30
+#define GCC_BIMC_BCR 31
+#define GCC_RBCPR_BCR 32
+#define GCC_TLMM_BCR 33
+#define GCC_USB_HS_BCR 34
+#define GCC_USB2A_PHY_BCR 35
+#define GCC_SDCC1_BCR 36
+#define GCC_SDCC2_BCR 37
+#define GCC_PDM_BCR 38
+#define GCC_SNOC_BUS_TIMEOUT0_BCR 39
+#define GCC_PCNOC_BUS_TIMEOUT0_BCR 40
+#define GCC_PCNOC_BUS_TIMEOUT1_BCR 41
+#define GCC_PCNOC_BUS_TIMEOUT2_BCR 42
+#define GCC_PCNOC_BUS_TIMEOUT3_BCR 43
+#define GCC_PCNOC_BUS_TIMEOUT4_BCR 44
+#define GCC_PCNOC_BUS_TIMEOUT5_BCR 45
+#define GCC_PCNOC_BUS_TIMEOUT6_BCR 46
+#define GCC_PCNOC_BUS_TIMEOUT7_BCR 47
+#define GCC_PCNOC_BUS_TIMEOUT8_BCR 48
+#define GCC_PCNOC_BUS_TIMEOUT9_BCR 49
+#define GCC_MMSS_BCR 50
+#define GCC_VENUS0_BCR 51
+#define GCC_MDSS_BCR 52
+#define GCC_CAMSS_PHY0_BCR 53
+#define GCC_CAMSS_CSI0_BCR 54
+#define GCC_CAMSS_CSI0PHY_BCR 55
+#define GCC_CAMSS_CSI0RDI_BCR 56
+#define GCC_CAMSS_CSI0PIX_BCR 57
+#define GCC_CAMSS_PHY1_BCR 58
+#define GCC_CAMSS_CSI1_BCR 59
+#define GCC_CAMSS_CSI1PHY_BCR 60
+#define GCC_CAMSS_CSI1RDI_BCR 61
+#define GCC_CAMSS_CSI1PIX_BCR 62
+#define GCC_CAMSS_ISPIF_BCR 63
+#define GCC_CAMSS_CCI_BCR 64
+#define GCC_CAMSS_MCLK0_BCR 65
+#define GCC_CAMSS_MCLK1_BCR 66
+#define GCC_CAMSS_GP0_BCR 67
+#define GCC_CAMSS_GP1_BCR 68
+#define GCC_CAMSS_TOP_BCR 69
+#define GCC_CAMSS_MICRO_BCR 70
+#define GCC_CAMSS_JPEG_BCR 71
+#define GCC_CAMSS_VFE_BCR 72
+#define GCC_CAMSS_CSI_VFE0_BCR 73
+#define GCC_OXILI_BCR 74
+#define GCC_GMEM_BCR 75
+#define GCC_CAMSS_AHB_BCR 76
+#define GCC_MDP_TBU_BCR 77
+#define GCC_GFX_TBU_BCR 78
+#define GCC_GFX_TCU_BCR 79
+#define GCC_MSS_TBU_AXI_BCR 80
+#define GCC_MSS_TBU_GSS_AXI_BCR 81
+#define GCC_MSS_TBU_Q6_AXI_BCR 82
+#define GCC_GTCU_AHB_BCR 83
+#define GCC_SMMU_CFG_BCR 84
+#define GCC_VFE_TBU_BCR 85
+#define GCC_VENUS_TBU_BCR 86
+#define GCC_JPEG_TBU_BCR 87
+#define GCC_PRONTO_TBU_BCR 88
+#define GCC_SMMU_CATS_BCR 89
+
+#endif
diff --git a/include/linux/a.out.h b/include/linux/a.out.h
index 220f14338895..ee884168989f 100644
--- a/include/linux/a.out.h
+++ b/include/linux/a.out.h
@@ -4,44 +4,6 @@
#include <uapi/linux/a.out.h>
#ifndef __ASSEMBLY__
-#if defined (M_OLDSUN2)
-#else
-#endif
-#if defined (M_68010)
-#else
-#endif
-#if defined (M_68020)
-#else
-#endif
-#if defined (M_SPARC)
-#else
-#endif
-#if !defined (N_MAGIC)
-#endif
-#if !defined (N_BADMAG)
-#endif
-#if !defined (N_TXTOFF)
-#endif
-#if !defined (N_DATOFF)
-#endif
-#if !defined (N_TRELOFF)
-#endif
-#if !defined (N_DRELOFF)
-#endif
-#if !defined (N_SYMOFF)
-#endif
-#if !defined (N_STROFF)
-#endif
-#if !defined (N_TXTADDR)
-#endif
-#if defined(vax) || defined(hp300) || defined(pyr)
-#endif
-#ifdef sony
-#endif /* Sony. */
-#ifdef is68k
-#endif
-#if defined(m68k) && defined(PORTAR)
-#endif
#ifdef linux
#include <asm/page.h>
#if defined(__i386__) || defined(__mc68000__)
@@ -51,34 +13,5 @@
#endif
#endif
#endif
-#ifndef N_DATADDR
-#endif
-#if !defined (N_BSSADDR)
-#endif
-#if !defined (N_NLIST_DECLARED)
-#endif /* no N_NLIST_DECLARED. */
-#if !defined (N_UNDF)
-#endif
-#if !defined (N_ABS)
-#endif
-#if !defined (N_TEXT)
-#endif
-#if !defined (N_DATA)
-#endif
-#if !defined (N_BSS)
-#endif
-#if !defined (N_FN)
-#endif
-#if !defined (N_EXT)
-#endif
-#if !defined (N_TYPE)
-#endif
-#if !defined (N_STAB)
-#endif
-#if !defined (N_RELOCATION_INFO_DECLARED)
-#ifdef NS32K
-#else
-#endif
-#endif /* no N_RELOCATION_INFO_DECLARED. */
#endif /*__ASSEMBLY__ */
#endif /* __A_OUT_GNU_H__ */
diff --git a/include/linux/acpi.h b/include/linux/acpi.h
index 24c7aa8b1d20..c187817471fb 100644
--- a/include/linux/acpi.h
+++ b/include/linux/acpi.h
@@ -53,10 +53,16 @@ static inline acpi_handle acpi_device_handle(struct acpi_device *adev)
return adev ? adev->handle : NULL;
}
-#define ACPI_COMPANION(dev) ((dev)->acpi_node.companion)
-#define ACPI_COMPANION_SET(dev, adev) ACPI_COMPANION(dev) = (adev)
+#define ACPI_COMPANION(dev) acpi_node((dev)->fwnode)
+#define ACPI_COMPANION_SET(dev, adev) set_primary_fwnode(dev, (adev) ? \
+ acpi_fwnode_handle(adev) : NULL)
#define ACPI_HANDLE(dev) acpi_device_handle(ACPI_COMPANION(dev))
+static inline bool has_acpi_companion(struct device *dev)
+{
+ return is_acpi_node(dev->fwnode);
+}
+
static inline void acpi_preset_companion(struct device *dev,
struct acpi_device *parent, u64 addr)
{
@@ -73,6 +79,7 @@ enum acpi_irq_model_id {
ACPI_IRQ_MODEL_IOAPIC,
ACPI_IRQ_MODEL_IOSAPIC,
ACPI_IRQ_MODEL_PLATFORM,
+ ACPI_IRQ_MODEL_GIC,
ACPI_IRQ_MODEL_COUNT
};
@@ -146,9 +153,24 @@ void acpi_numa_x2apic_affinity_init(struct acpi_srat_x2apic_cpu_affinity *pa);
int acpi_numa_memory_affinity_init (struct acpi_srat_mem_affinity *ma);
void acpi_numa_arch_fixup(void);
+#ifndef PHYS_CPUID_INVALID
+typedef u32 phys_cpuid_t;
+#define PHYS_CPUID_INVALID (phys_cpuid_t)(-1)
+#endif
+
+static inline bool invalid_logical_cpuid(u32 cpuid)
+{
+ return (int)cpuid < 0;
+}
+
+static inline bool invalid_phys_cpuid(phys_cpuid_t phys_id)
+{
+ return phys_id == PHYS_CPUID_INVALID;
+}
+
#ifdef CONFIG_ACPI_HOTPLUG_CPU
/* Arch dependent functions for cpu hotplug support */
-int acpi_map_cpu(acpi_handle handle, int physid, int *pcpu);
+int acpi_map_cpu(acpi_handle handle, phys_cpuid_t physid, int *pcpu);
int acpi_unmap_cpu(int cpu);
#endif /* CONFIG_ACPI_HOTPLUG_CPU */
@@ -231,50 +253,12 @@ extern bool wmi_has_guid(const char *guid);
#define ACPI_VIDEO_OUTPUT_SWITCHING_DMI_VENDOR 0x0400
#define ACPI_VIDEO_OUTPUT_SWITCHING_DMI_VIDEO 0x0800
-#if defined(CONFIG_ACPI_VIDEO) || defined(CONFIG_ACPI_VIDEO_MODULE)
-
-extern long acpi_video_get_capabilities(acpi_handle graphics_dev_handle);
+extern char acpi_video_backlight_string[];
extern long acpi_is_video_device(acpi_handle handle);
-extern void acpi_video_dmi_promote_vendor(void);
-extern void acpi_video_dmi_demote_vendor(void);
-extern int acpi_video_backlight_support(void);
-extern int acpi_video_display_switch_support(void);
-
-#else
-
-static inline long acpi_video_get_capabilities(acpi_handle graphics_dev_handle)
-{
- return 0;
-}
-
-static inline long acpi_is_video_device(acpi_handle handle)
-{
- return 0;
-}
-
-static inline void acpi_video_dmi_promote_vendor(void)
-{
-}
-
-static inline void acpi_video_dmi_demote_vendor(void)
-{
-}
-
-static inline int acpi_video_backlight_support(void)
-{
- return 0;
-}
-
-static inline int acpi_video_display_switch_support(void)
-{
- return 0;
-}
-
-#endif /* defined(CONFIG_ACPI_VIDEO) || defined(CONFIG_ACPI_VIDEO_MODULE) */
-
extern int acpi_blacklisted(void);
extern void acpi_dmi_osi_linux(int enable, const struct dmi_system_id *d);
extern void acpi_osi_setup(char *str);
+extern bool acpi_osi_is_win8(void);
#ifdef CONFIG_ACPI_NUMA
int acpi_get_node(acpi_handle handle);
@@ -320,6 +304,9 @@ int acpi_check_region(resource_size_t start, resource_size_t n,
int acpi_resources_are_enforced(void);
+int acpi_reserve_region(u64 start, unsigned int length, u8 space_id,
+ unsigned long flags, char *desc);
+
#ifdef CONFIG_HIBERNATION
void __init acpi_no_s4_hw_signature(void);
#endif
@@ -428,6 +415,7 @@ extern acpi_status acpi_pci_osc_control_set(acpi_handle handle,
#define ACPI_OST_SC_INSERT_NOT_SUPPORTED 0x82
extern void acpi_early_init(void);
+extern void acpi_subsystem_init(void);
extern int acpi_nvs_register(__u64 start, __u64 size);
@@ -471,12 +459,18 @@ static inline struct fwnode_handle *acpi_fwnode_handle(struct acpi_device *adev)
return NULL;
}
+static inline bool has_acpi_companion(struct device *dev)
+{
+ return false;
+}
+
static inline const char *acpi_dev_name(struct acpi_device *adev)
{
return NULL;
}
static inline void acpi_early_init(void) { }
+static inline void acpi_subsystem_init(void) { }
static inline int early_acpi_boot_init(void)
{
@@ -508,6 +502,13 @@ static inline int acpi_check_region(resource_size_t start, resource_size_t n,
return 0;
}
+static inline int acpi_reserve_region(u64 start, unsigned int length,
+ u8 space_id, unsigned long flags,
+ char *desc)
+{
+ return -ENXIO;
+}
+
struct acpi_table_header;
static inline int acpi_table_parse(char *id,
int (*handler)(struct acpi_table_header *))
@@ -552,6 +553,11 @@ static inline int acpi_device_modalias(struct device *dev,
return -ENODEV;
}
+static inline bool acpi_check_dma(struct acpi_device *adev, bool *coherent)
+{
+ return false;
+}
+
#define ACPI_PTR(_ptr) (NULL)
#endif /* !CONFIG_ACPI */
@@ -704,6 +710,8 @@ static inline void acpi_dev_remove_driver_gpios(struct acpi_device *adev)
if (adev)
adev->driver_gpios = NULL;
}
+
+int acpi_dev_gpio_irq_get(struct acpi_device *adev, int index);
#else
static inline int acpi_dev_add_driver_gpios(struct acpi_device *adev,
const struct acpi_gpio_mapping *gpios)
@@ -711,6 +719,11 @@ static inline int acpi_dev_add_driver_gpios(struct acpi_device *adev,
return -ENXIO;
}
static inline void acpi_dev_remove_driver_gpios(struct acpi_device *adev) {}
+
+static inline int acpi_dev_gpio_irq_get(struct acpi_device *adev, int index)
+{
+ return -ENXIO;
+}
#endif
/* Device properties */
diff --git a/include/linux/acpi_irq.h b/include/linux/acpi_irq.h
new file mode 100644
index 000000000000..f10c87265855
--- /dev/null
+++ b/include/linux/acpi_irq.h
@@ -0,0 +1,10 @@
+#ifndef _LINUX_ACPI_IRQ_H
+#define _LINUX_ACPI_IRQ_H
+
+#include <linux/irq.h>
+
+#ifndef acpi_irq_init
+static inline void acpi_irq_init(void) { }
+#endif
+
+#endif /* _LINUX_ACPI_IRQ_H */
diff --git a/include/linux/aio.h b/include/linux/aio.h
index d9c92daa3944..9eb42dbc5582 100644
--- a/include/linux/aio.h
+++ b/include/linux/aio.h
@@ -1,86 +1,23 @@
#ifndef __LINUX__AIO_H
#define __LINUX__AIO_H
-#include <linux/list.h>
-#include <linux/workqueue.h>
#include <linux/aio_abi.h>
-#include <linux/uio.h>
-#include <linux/rcupdate.h>
-
-#include <linux/atomic.h>
struct kioctx;
struct kiocb;
+struct mm_struct;
#define KIOCB_KEY 0
-/*
- * We use ki_cancel == KIOCB_CANCELLED to indicate that a kiocb has been either
- * cancelled or completed (this makes a certain amount of sense because
- * successful cancellation - io_cancel() - does deliver the completion to
- * userspace).
- *
- * And since most things don't implement kiocb cancellation and we'd really like
- * kiocb completion to be lockless when possible, we use ki_cancel to
- * synchronize cancellation and completion - we only set it to KIOCB_CANCELLED
- * with xchg() or cmpxchg(), see batch_complete_aio() and kiocb_cancel().
- */
-#define KIOCB_CANCELLED ((void *) (~0ULL))
-
typedef int (kiocb_cancel_fn)(struct kiocb *);
-struct kiocb {
- struct file *ki_filp;
- struct kioctx *ki_ctx; /* NULL for sync ops */
- kiocb_cancel_fn *ki_cancel;
- void *private;
-
- union {
- void __user *user;
- struct task_struct *tsk;
- } ki_obj;
-
- __u64 ki_user_data; /* user's data for completion */
- loff_t ki_pos;
- size_t ki_nbytes; /* copy of iocb->aio_nbytes */
-
- struct list_head ki_list; /* the aio core uses this
- * for cancellation */
-
- /*
- * If the aio_resfd field of the userspace iocb is not zero,
- * this is the underlying eventfd context to deliver events to.
- */
- struct eventfd_ctx *ki_eventfd;
-};
-
-static inline bool is_sync_kiocb(struct kiocb *kiocb)
-{
- return kiocb->ki_ctx == NULL;
-}
-
-static inline void init_sync_kiocb(struct kiocb *kiocb, struct file *filp)
-{
- *kiocb = (struct kiocb) {
- .ki_ctx = NULL,
- .ki_filp = filp,
- .ki_obj.tsk = current,
- };
-}
-
/* prototypes */
#ifdef CONFIG_AIO
-extern ssize_t wait_on_sync_kiocb(struct kiocb *iocb);
-extern void aio_complete(struct kiocb *iocb, long res, long res2);
-struct mm_struct;
extern void exit_aio(struct mm_struct *mm);
extern long do_io_submit(aio_context_t ctx_id, long nr,
struct iocb __user *__user *iocbpp, bool compat);
void kiocb_set_cancel_fn(struct kiocb *req, kiocb_cancel_fn *cancel);
#else
-static inline ssize_t wait_on_sync_kiocb(struct kiocb *iocb) { return 0; }
-static inline void aio_complete(struct kiocb *iocb, long res, long res2) { }
-struct mm_struct;
static inline void exit_aio(struct mm_struct *mm) { }
static inline long do_io_submit(aio_context_t ctx_id, long nr,
struct iocb __user * __user *iocbpp,
@@ -89,11 +26,6 @@ static inline void kiocb_set_cancel_fn(struct kiocb *req,
kiocb_cancel_fn *cancel) { }
#endif /* CONFIG_AIO */
-static inline struct kiocb *list_kiocb(struct list_head *h)
-{
- return list_entry(h, struct kiocb, ki_list);
-}
-
/* for sysctl: */
extern unsigned long aio_nr;
extern unsigned long aio_max_nr;
diff --git a/include/linux/alarmtimer.h b/include/linux/alarmtimer.h
index a899402a5a0e..52f3b7da4f2d 100644
--- a/include/linux/alarmtimer.h
+++ b/include/linux/alarmtimer.h
@@ -43,8 +43,8 @@ struct alarm {
void alarm_init(struct alarm *alarm, enum alarmtimer_type type,
enum alarmtimer_restart (*function)(struct alarm *, ktime_t));
-int alarm_start(struct alarm *alarm, ktime_t start);
-int alarm_start_relative(struct alarm *alarm, ktime_t start);
+void alarm_start(struct alarm *alarm, ktime_t start);
+void alarm_start_relative(struct alarm *alarm, ktime_t start);
void alarm_restart(struct alarm *alarm);
int alarm_try_to_cancel(struct alarm *alarm);
int alarm_cancel(struct alarm *alarm);
diff --git a/include/linux/arm-cci.h b/include/linux/arm-cci.h
index 79d6edf446d5..521ec1f2e6bc 100644
--- a/include/linux/arm-cci.h
+++ b/include/linux/arm-cci.h
@@ -24,16 +24,22 @@
#include <linux/errno.h>
#include <linux/types.h>
+#include <asm/arm-cci.h>
+
struct device_node;
#ifdef CONFIG_ARM_CCI
extern bool cci_probed(void);
+#else
+static inline bool cci_probed(void) { return false; }
+#endif
+
+#ifdef CONFIG_ARM_CCI400_PORT_CTRL
extern int cci_ace_get_port(struct device_node *dn);
extern int cci_disable_port_by_cpu(u64 mpidr);
extern int __cci_control_port_by_device(struct device_node *dn, bool enable);
extern int __cci_control_port_by_index(u32 port, bool enable);
#else
-static inline bool cci_probed(void) { return false; }
static inline int cci_ace_get_port(struct device_node *dn)
{
return -ENODEV;
@@ -49,6 +55,7 @@ static inline int __cci_control_port_by_index(u32 port, bool enable)
return -ENODEV;
}
#endif
+
#define cci_disable_port_by_device(dev) \
__cci_control_port_by_device(dev, false)
#define cci_enable_port_by_device(dev) \
diff --git a/include/linux/async_tx.h b/include/linux/async_tx.h
index 179b38ffd351..388574ea38ed 100644
--- a/include/linux/async_tx.h
+++ b/include/linux/async_tx.h
@@ -60,12 +60,15 @@ struct dma_chan_ref {
* dependency chain
* @ASYNC_TX_FENCE: specify that the next operation in the dependency
* chain uses this operation's result as an input
+ * @ASYNC_TX_PQ_XOR_DST: do not overwrite the syndrome but XOR it with the
+ * input data. Required for rmw case.
*/
enum async_tx_flags {
ASYNC_TX_XOR_ZERO_DST = (1 << 0),
ASYNC_TX_XOR_DROP_DST = (1 << 1),
ASYNC_TX_ACK = (1 << 2),
ASYNC_TX_FENCE = (1 << 3),
+ ASYNC_TX_PQ_XOR_DST = (1 << 4),
};
/**
diff --git a/include/linux/backing-dev-defs.h b/include/linux/backing-dev-defs.h
new file mode 100644
index 000000000000..a48d90e3bcbb
--- /dev/null
+++ b/include/linux/backing-dev-defs.h
@@ -0,0 +1,255 @@
+#ifndef __LINUX_BACKING_DEV_DEFS_H
+#define __LINUX_BACKING_DEV_DEFS_H
+
+#include <linux/list.h>
+#include <linux/radix-tree.h>
+#include <linux/rbtree.h>
+#include <linux/spinlock.h>
+#include <linux/percpu_counter.h>
+#include <linux/percpu-refcount.h>
+#include <linux/flex_proportions.h>
+#include <linux/timer.h>
+#include <linux/workqueue.h>
+
+struct page;
+struct device;
+struct dentry;
+
+/*
+ * Bits in bdi_writeback.state
+ */
+enum wb_state {
+ WB_registered, /* bdi_register() was done */
+ WB_writeback_running, /* Writeback is in progress */
+ WB_has_dirty_io, /* Dirty inodes on ->b_{dirty|io|more_io} */
+};
+
+enum wb_congested_state {
+ WB_async_congested, /* The async (write) queue is getting full */
+ WB_sync_congested, /* The sync queue is getting full */
+};
+
+typedef int (congested_fn)(void *, int);
+
+enum wb_stat_item {
+ WB_RECLAIMABLE,
+ WB_WRITEBACK,
+ WB_DIRTIED,
+ WB_WRITTEN,
+ NR_WB_STAT_ITEMS
+};
+
+#define WB_STAT_BATCH (8*(1+ilog2(nr_cpu_ids)))
+
+/*
+ * For cgroup writeback, multiple wb's may map to the same blkcg. Those
+ * wb's can operate mostly independently but should share the congested
+ * state. To facilitate such sharing, the congested state is tracked using
+ * the following struct which is created on demand, indexed by blkcg ID on
+ * its bdi, and refcounted.
+ */
+struct bdi_writeback_congested {
+ unsigned long state; /* WB_[a]sync_congested flags */
+
+#ifdef CONFIG_CGROUP_WRITEBACK
+ struct backing_dev_info *bdi; /* the associated bdi */
+ atomic_t refcnt; /* nr of attached wb's and blkg */
+ int blkcg_id; /* ID of the associated blkcg */
+ struct rb_node rb_node; /* on bdi->cgwb_congestion_tree */
+#endif
+};
+
+/*
+ * Each wb (bdi_writeback) can perform writeback operations, is measured
+ * and throttled, independently. Without cgroup writeback, each bdi
+ * (bdi_writeback) is served by its embedded bdi->wb.
+ *
+ * On the default hierarchy, blkcg implicitly enables memcg. This allows
+ * using memcg's page ownership for attributing writeback IOs, and every
+ * memcg - blkcg combination can be served by its own wb by assigning a
+ * dedicated wb to each memcg, which enables isolation across different
+ * cgroups and propagation of IO back pressure down from the IO layer upto
+ * the tasks which are generating the dirty pages to be written back.
+ *
+ * A cgroup wb is indexed on its bdi by the ID of the associated memcg,
+ * refcounted with the number of inodes attached to it, and pins the memcg
+ * and the corresponding blkcg. As the corresponding blkcg for a memcg may
+ * change as blkcg is disabled and enabled higher up in the hierarchy, a wb
+ * is tested for blkcg after lookup and removed from index on mismatch so
+ * that a new wb for the combination can be created.
+ */
+struct bdi_writeback {
+ struct backing_dev_info *bdi; /* our parent bdi */
+
+ unsigned long state; /* Always use atomic bitops on this */
+ unsigned long last_old_flush; /* last old data flush */
+
+ struct list_head b_dirty; /* dirty inodes */
+ struct list_head b_io; /* parked for writeback */
+ struct list_head b_more_io; /* parked for more writeback */
+ struct list_head b_dirty_time; /* time stamps are dirty */
+ spinlock_t list_lock; /* protects the b_* lists */
+
+ struct percpu_counter stat[NR_WB_STAT_ITEMS];
+
+ struct bdi_writeback_congested *congested;
+
+ unsigned long bw_time_stamp; /* last time write bw is updated */
+ unsigned long dirtied_stamp;
+ unsigned long written_stamp; /* pages written at bw_time_stamp */
+ unsigned long write_bandwidth; /* the estimated write bandwidth */
+ unsigned long avg_write_bandwidth; /* further smoothed write bw, > 0 */
+
+ /*
+ * The base dirty throttle rate, re-calculated on every 200ms.
+ * All the bdi tasks' dirty rate will be curbed under it.
+ * @dirty_ratelimit tracks the estimated @balanced_dirty_ratelimit
+ * in small steps and is much more smooth/stable than the latter.
+ */
+ unsigned long dirty_ratelimit;
+ unsigned long balanced_dirty_ratelimit;
+
+ struct fprop_local_percpu completions;
+ int dirty_exceeded;
+
+ spinlock_t work_lock; /* protects work_list & dwork scheduling */
+ struct list_head work_list;
+ struct delayed_work dwork; /* work item used for writeback */
+
+#ifdef CONFIG_CGROUP_WRITEBACK
+ struct percpu_ref refcnt; /* used only for !root wb's */
+ struct fprop_local_percpu memcg_completions;
+ struct cgroup_subsys_state *memcg_css; /* the associated memcg */
+ struct cgroup_subsys_state *blkcg_css; /* and blkcg */
+ struct list_head memcg_node; /* anchored at memcg->cgwb_list */
+ struct list_head blkcg_node; /* anchored at blkcg->cgwb_list */
+
+ union {
+ struct work_struct release_work;
+ struct rcu_head rcu;
+ };
+#endif
+};
+
+struct backing_dev_info {
+ struct list_head bdi_list;
+ unsigned long ra_pages; /* max readahead in PAGE_CACHE_SIZE units */
+ unsigned int capabilities; /* Device capabilities */
+ congested_fn *congested_fn; /* Function pointer if device is md/dm */
+ void *congested_data; /* Pointer to aux data for congested func */
+
+ char *name;
+
+ unsigned int min_ratio;
+ unsigned int max_ratio, max_prop_frac;
+
+ /*
+ * Sum of avg_write_bw of wbs with dirty inodes. > 0 if there are
+ * any dirty wbs, which is depended upon by bdi_has_dirty().
+ */
+ atomic_long_t tot_write_bandwidth;
+
+ struct bdi_writeback wb; /* the root writeback info for this bdi */
+ struct bdi_writeback_congested wb_congested; /* its congested state */
+#ifdef CONFIG_CGROUP_WRITEBACK
+ struct radix_tree_root cgwb_tree; /* radix tree of active cgroup wbs */
+ struct rb_root cgwb_congested_tree; /* their congested states */
+ atomic_t usage_cnt; /* counts both cgwbs and cgwb_contested's */
+#endif
+ wait_queue_head_t wb_waitq;
+
+ struct device *dev;
+
+ struct timer_list laptop_mode_wb_timer;
+
+#ifdef CONFIG_DEBUG_FS
+ struct dentry *debug_dir;
+ struct dentry *debug_stats;
+#endif
+};
+
+enum {
+ BLK_RW_ASYNC = 0,
+ BLK_RW_SYNC = 1,
+};
+
+void clear_wb_congested(struct bdi_writeback_congested *congested, int sync);
+void set_wb_congested(struct bdi_writeback_congested *congested, int sync);
+
+static inline void clear_bdi_congested(struct backing_dev_info *bdi, int sync)
+{
+ clear_wb_congested(bdi->wb.congested, sync);
+}
+
+static inline void set_bdi_congested(struct backing_dev_info *bdi, int sync)
+{
+ set_wb_congested(bdi->wb.congested, sync);
+}
+
+#ifdef CONFIG_CGROUP_WRITEBACK
+
+/**
+ * wb_tryget - try to increment a wb's refcount
+ * @wb: bdi_writeback to get
+ */
+static inline bool wb_tryget(struct bdi_writeback *wb)
+{
+ if (wb != &wb->bdi->wb)
+ return percpu_ref_tryget(&wb->refcnt);
+ return true;
+}
+
+/**
+ * wb_get - increment a wb's refcount
+ * @wb: bdi_writeback to get
+ */
+static inline void wb_get(struct bdi_writeback *wb)
+{
+ if (wb != &wb->bdi->wb)
+ percpu_ref_get(&wb->refcnt);
+}
+
+/**
+ * wb_put - decrement a wb's refcount
+ * @wb: bdi_writeback to put
+ */
+static inline void wb_put(struct bdi_writeback *wb)
+{
+ if (wb != &wb->bdi->wb)
+ percpu_ref_put(&wb->refcnt);
+}
+
+/**
+ * wb_dying - is a wb dying?
+ * @wb: bdi_writeback of interest
+ *
+ * Returns whether @wb is unlinked and being drained.
+ */
+static inline bool wb_dying(struct bdi_writeback *wb)
+{
+ return percpu_ref_is_dying(&wb->refcnt);
+}
+
+#else /* CONFIG_CGROUP_WRITEBACK */
+
+static inline bool wb_tryget(struct bdi_writeback *wb)
+{
+ return true;
+}
+
+static inline void wb_get(struct bdi_writeback *wb)
+{
+}
+
+static inline void wb_put(struct bdi_writeback *wb)
+{
+}
+
+static inline bool wb_dying(struct bdi_writeback *wb)
+{
+ return false;
+}
+
+#endif /* CONFIG_CGROUP_WRITEBACK */
+
+#endif /* __LINUX_BACKING_DEV_DEFS_H */
diff --git a/include/linux/backing-dev.h b/include/linux/backing-dev.h
index aff923ae8c4b..0e6d4828a77a 100644
--- a/include/linux/backing-dev.h
+++ b/include/linux/backing-dev.h
@@ -8,106 +8,13 @@
#ifndef _LINUX_BACKING_DEV_H
#define _LINUX_BACKING_DEV_H
-#include <linux/percpu_counter.h>
-#include <linux/log2.h>
-#include <linux/flex_proportions.h>
#include <linux/kernel.h>
#include <linux/fs.h>
#include <linux/sched.h>
-#include <linux/timer.h>
+#include <linux/blkdev.h>
#include <linux/writeback.h>
-#include <linux/atomic.h>
-#include <linux/sysctl.h>
-#include <linux/workqueue.h>
-
-struct page;
-struct device;
-struct dentry;
-
-/*
- * Bits in backing_dev_info.state
- */
-enum bdi_state {
- BDI_async_congested, /* The async (write) queue is getting full */
- BDI_sync_congested, /* The sync queue is getting full */
- BDI_registered, /* bdi_register() was done */
- BDI_writeback_running, /* Writeback is in progress */
-};
-
-typedef int (congested_fn)(void *, int);
-
-enum bdi_stat_item {
- BDI_RECLAIMABLE,
- BDI_WRITEBACK,
- BDI_DIRTIED,
- BDI_WRITTEN,
- NR_BDI_STAT_ITEMS
-};
-
-#define BDI_STAT_BATCH (8*(1+ilog2(nr_cpu_ids)))
-
-struct bdi_writeback {
- struct backing_dev_info *bdi; /* our parent bdi */
-
- unsigned long last_old_flush; /* last old data flush */
-
- struct delayed_work dwork; /* work item used for writeback */
- struct list_head b_dirty; /* dirty inodes */
- struct list_head b_io; /* parked for writeback */
- struct list_head b_more_io; /* parked for more writeback */
- struct list_head b_dirty_time; /* time stamps are dirty */
- spinlock_t list_lock; /* protects the b_* lists */
-};
-
-struct backing_dev_info {
- struct list_head bdi_list;
- unsigned long ra_pages; /* max readahead in PAGE_CACHE_SIZE units */
- unsigned long state; /* Always use atomic bitops on this */
- unsigned int capabilities; /* Device capabilities */
- congested_fn *congested_fn; /* Function pointer if device is md/dm */
- void *congested_data; /* Pointer to aux data for congested func */
-
- char *name;
-
- struct percpu_counter bdi_stat[NR_BDI_STAT_ITEMS];
-
- unsigned long bw_time_stamp; /* last time write bw is updated */
- unsigned long dirtied_stamp;
- unsigned long written_stamp; /* pages written at bw_time_stamp */
- unsigned long write_bandwidth; /* the estimated write bandwidth */
- unsigned long avg_write_bandwidth; /* further smoothed write bw */
-
- /*
- * The base dirty throttle rate, re-calculated on every 200ms.
- * All the bdi tasks' dirty rate will be curbed under it.
- * @dirty_ratelimit tracks the estimated @balanced_dirty_ratelimit
- * in small steps and is much more smooth/stable than the latter.
- */
- unsigned long dirty_ratelimit;
- unsigned long balanced_dirty_ratelimit;
-
- struct fprop_local_percpu completions;
- int dirty_exceeded;
-
- unsigned int min_ratio;
- unsigned int max_ratio, max_prop_frac;
-
- struct bdi_writeback wb; /* default writeback info for this bdi */
- spinlock_t wb_lock; /* protects work_list & wb.dwork scheduling */
-
- struct list_head work_list;
-
- struct device *dev;
-
- struct timer_list laptop_mode_wb_timer;
-
-#ifdef CONFIG_DEBUG_FS
- struct dentry *debug_dir;
- struct dentry *debug_stats;
-#endif
-};
-
-struct backing_dev_info *inode_to_bdi(struct inode *inode);
+#include <linux/blk-cgroup.h>
+#include <linux/backing-dev-defs.h>
int __must_check bdi_init(struct backing_dev_info *bdi);
void bdi_destroy(struct backing_dev_info *bdi);
@@ -116,99 +23,100 @@ __printf(3, 4)
int bdi_register(struct backing_dev_info *bdi, struct device *parent,
const char *fmt, ...);
int bdi_register_dev(struct backing_dev_info *bdi, dev_t dev);
-void bdi_unregister(struct backing_dev_info *bdi);
int __must_check bdi_setup_and_register(struct backing_dev_info *, char *);
-void bdi_start_writeback(struct backing_dev_info *bdi, long nr_pages,
- enum wb_reason reason);
-void bdi_start_background_writeback(struct backing_dev_info *bdi);
-void bdi_writeback_workfn(struct work_struct *work);
-int bdi_has_dirty_io(struct backing_dev_info *bdi);
-void bdi_wakeup_thread_delayed(struct backing_dev_info *bdi);
+void wb_start_writeback(struct bdi_writeback *wb, long nr_pages,
+ bool range_cyclic, enum wb_reason reason);
+void wb_start_background_writeback(struct bdi_writeback *wb);
+void wb_workfn(struct work_struct *work);
+void wb_wakeup_delayed(struct bdi_writeback *wb);
extern spinlock_t bdi_lock;
extern struct list_head bdi_list;
extern struct workqueue_struct *bdi_wq;
-static inline int wb_has_dirty_io(struct bdi_writeback *wb)
+static inline bool wb_has_dirty_io(struct bdi_writeback *wb)
{
- return !list_empty(&wb->b_dirty) ||
- !list_empty(&wb->b_io) ||
- !list_empty(&wb->b_more_io);
+ return test_bit(WB_has_dirty_io, &wb->state);
+}
+
+static inline bool bdi_has_dirty_io(struct backing_dev_info *bdi)
+{
+ /*
+ * @bdi->tot_write_bandwidth is guaranteed to be > 0 if there are
+ * any dirty wbs. See wb_update_write_bandwidth().
+ */
+ return atomic_long_read(&bdi->tot_write_bandwidth);
}
-static inline void __add_bdi_stat(struct backing_dev_info *bdi,
- enum bdi_stat_item item, s64 amount)
+static inline void __add_wb_stat(struct bdi_writeback *wb,
+ enum wb_stat_item item, s64 amount)
{
- __percpu_counter_add(&bdi->bdi_stat[item], amount, BDI_STAT_BATCH);
+ __percpu_counter_add(&wb->stat[item], amount, WB_STAT_BATCH);
}
-static inline void __inc_bdi_stat(struct backing_dev_info *bdi,
- enum bdi_stat_item item)
+static inline void __inc_wb_stat(struct bdi_writeback *wb,
+ enum wb_stat_item item)
{
- __add_bdi_stat(bdi, item, 1);
+ __add_wb_stat(wb, item, 1);
}
-static inline void inc_bdi_stat(struct backing_dev_info *bdi,
- enum bdi_stat_item item)
+static inline void inc_wb_stat(struct bdi_writeback *wb, enum wb_stat_item item)
{
unsigned long flags;
local_irq_save(flags);
- __inc_bdi_stat(bdi, item);
+ __inc_wb_stat(wb, item);
local_irq_restore(flags);
}
-static inline void __dec_bdi_stat(struct backing_dev_info *bdi,
- enum bdi_stat_item item)
+static inline void __dec_wb_stat(struct bdi_writeback *wb,
+ enum wb_stat_item item)
{
- __add_bdi_stat(bdi, item, -1);
+ __add_wb_stat(wb, item, -1);
}
-static inline void dec_bdi_stat(struct backing_dev_info *bdi,
- enum bdi_stat_item item)
+static inline void dec_wb_stat(struct bdi_writeback *wb, enum wb_stat_item item)
{
unsigned long flags;
local_irq_save(flags);
- __dec_bdi_stat(bdi, item);
+ __dec_wb_stat(wb, item);
local_irq_restore(flags);
}
-static inline s64 bdi_stat(struct backing_dev_info *bdi,
- enum bdi_stat_item item)
+static inline s64 wb_stat(struct bdi_writeback *wb, enum wb_stat_item item)
{
- return percpu_counter_read_positive(&bdi->bdi_stat[item]);
+ return percpu_counter_read_positive(&wb->stat[item]);
}
-static inline s64 __bdi_stat_sum(struct backing_dev_info *bdi,
- enum bdi_stat_item item)
+static inline s64 __wb_stat_sum(struct bdi_writeback *wb,
+ enum wb_stat_item item)
{
- return percpu_counter_sum_positive(&bdi->bdi_stat[item]);
+ return percpu_counter_sum_positive(&wb->stat[item]);
}
-static inline s64 bdi_stat_sum(struct backing_dev_info *bdi,
- enum bdi_stat_item item)
+static inline s64 wb_stat_sum(struct bdi_writeback *wb, enum wb_stat_item item)
{
s64 sum;
unsigned long flags;
local_irq_save(flags);
- sum = __bdi_stat_sum(bdi, item);
+ sum = __wb_stat_sum(wb, item);
local_irq_restore(flags);
return sum;
}
-extern void bdi_writeout_inc(struct backing_dev_info *bdi);
+extern void wb_writeout_inc(struct bdi_writeback *wb);
/*
* maximal error of a stat counter.
*/
-static inline unsigned long bdi_stat_error(struct backing_dev_info *bdi)
+static inline unsigned long wb_stat_error(struct bdi_writeback *wb)
{
#ifdef CONFIG_SMP
- return nr_cpu_ids * BDI_STAT_BATCH;
+ return nr_cpu_ids * WB_STAT_BATCH;
#else
return 1;
#endif
@@ -232,50 +140,57 @@ int bdi_set_max_ratio(struct backing_dev_info *bdi, unsigned int max_ratio);
* BDI_CAP_NO_WRITEBACK: Don't write pages back
* BDI_CAP_NO_ACCT_WB: Don't automatically account writeback pages
* BDI_CAP_STRICTLIMIT: Keep number of dirty pages below bdi threshold.
+ *
+ * BDI_CAP_CGROUP_WRITEBACK: Supports cgroup-aware writeback.
*/
#define BDI_CAP_NO_ACCT_DIRTY 0x00000001
#define BDI_CAP_NO_WRITEBACK 0x00000002
#define BDI_CAP_NO_ACCT_WB 0x00000004
#define BDI_CAP_STABLE_WRITES 0x00000008
#define BDI_CAP_STRICTLIMIT 0x00000010
+#define BDI_CAP_CGROUP_WRITEBACK 0x00000020
#define BDI_CAP_NO_ACCT_AND_WRITEBACK \
(BDI_CAP_NO_WRITEBACK | BDI_CAP_NO_ACCT_DIRTY | BDI_CAP_NO_ACCT_WB)
extern struct backing_dev_info noop_backing_dev_info;
-int writeback_in_progress(struct backing_dev_info *bdi);
-
-static inline int bdi_congested(struct backing_dev_info *bdi, int bdi_bits)
+/**
+ * writeback_in_progress - determine whether there is writeback in progress
+ * @wb: bdi_writeback of interest
+ *
+ * Determine whether there is writeback waiting to be handled against a
+ * bdi_writeback.
+ */
+static inline bool writeback_in_progress(struct bdi_writeback *wb)
{
- if (bdi->congested_fn)
- return bdi->congested_fn(bdi->congested_data, bdi_bits);
- return (bdi->state & bdi_bits);
+ return test_bit(WB_writeback_running, &wb->state);
}
-static inline int bdi_read_congested(struct backing_dev_info *bdi)
+static inline struct backing_dev_info *inode_to_bdi(struct inode *inode)
{
- return bdi_congested(bdi, 1 << BDI_sync_congested);
-}
+ struct super_block *sb;
-static inline int bdi_write_congested(struct backing_dev_info *bdi)
-{
- return bdi_congested(bdi, 1 << BDI_async_congested);
+ if (!inode)
+ return &noop_backing_dev_info;
+
+ sb = inode->i_sb;
+#ifdef CONFIG_BLOCK
+ if (sb_is_blkdev_sb(sb))
+ return blk_get_backing_dev_info(I_BDEV(inode));
+#endif
+ return sb->s_bdi;
}
-static inline int bdi_rw_congested(struct backing_dev_info *bdi)
+static inline int wb_congested(struct bdi_writeback *wb, int cong_bits)
{
- return bdi_congested(bdi, (1 << BDI_sync_congested) |
- (1 << BDI_async_congested));
-}
+ struct backing_dev_info *bdi = wb->bdi;
-enum {
- BLK_RW_ASYNC = 0,
- BLK_RW_SYNC = 1,
-};
+ if (bdi->congested_fn)
+ return bdi->congested_fn(bdi->congested_data, cong_bits);
+ return wb->congested->state & cong_bits;
+}
-void clear_bdi_congested(struct backing_dev_info *bdi, int sync);
-void set_bdi_congested(struct backing_dev_info *bdi, int sync);
long congestion_wait(int sync, long timeout);
long wait_iff_congested(struct zone *zone, int sync, long timeout);
int pdflush_proc_obsolete(struct ctl_table *table, int write,
@@ -319,4 +234,333 @@ static inline int bdi_sched_wait(void *word)
return 0;
}
-#endif /* _LINUX_BACKING_DEV_H */
+#ifdef CONFIG_CGROUP_WRITEBACK
+
+struct bdi_writeback_congested *
+wb_congested_get_create(struct backing_dev_info *bdi, int blkcg_id, gfp_t gfp);
+void wb_congested_put(struct bdi_writeback_congested *congested);
+struct bdi_writeback *wb_get_create(struct backing_dev_info *bdi,
+ struct cgroup_subsys_state *memcg_css,
+ gfp_t gfp);
+void wb_memcg_offline(struct mem_cgroup *memcg);
+void wb_blkcg_offline(struct blkcg *blkcg);
+int inode_congested(struct inode *inode, int cong_bits);
+
+/**
+ * inode_cgwb_enabled - test whether cgroup writeback is enabled on an inode
+ * @inode: inode of interest
+ *
+ * cgroup writeback requires support from both the bdi and filesystem.
+ * Test whether @inode has both.
+ */
+static inline bool inode_cgwb_enabled(struct inode *inode)
+{
+ struct backing_dev_info *bdi = inode_to_bdi(inode);
+
+ return bdi_cap_account_dirty(bdi) &&
+ (bdi->capabilities & BDI_CAP_CGROUP_WRITEBACK) &&
+ (inode->i_sb->s_iflags & SB_I_CGROUPWB);
+}
+
+/**
+ * wb_find_current - find wb for %current on a bdi
+ * @bdi: bdi of interest
+ *
+ * Find the wb of @bdi which matches both the memcg and blkcg of %current.
+ * Must be called under rcu_read_lock() which protects the returend wb.
+ * NULL if not found.
+ */
+static inline struct bdi_writeback *wb_find_current(struct backing_dev_info *bdi)
+{
+ struct cgroup_subsys_state *memcg_css;
+ struct bdi_writeback *wb;
+
+ memcg_css = task_css(current, memory_cgrp_id);
+ if (!memcg_css->parent)
+ return &bdi->wb;
+
+ wb = radix_tree_lookup(&bdi->cgwb_tree, memcg_css->id);
+
+ /*
+ * %current's blkcg equals the effective blkcg of its memcg. No
+ * need to use the relatively expensive cgroup_get_e_css().
+ */
+ if (likely(wb && wb->blkcg_css == task_css(current, blkio_cgrp_id)))
+ return wb;
+ return NULL;
+}
+
+/**
+ * wb_get_create_current - get or create wb for %current on a bdi
+ * @bdi: bdi of interest
+ * @gfp: allocation mask
+ *
+ * Equivalent to wb_get_create() on %current's memcg. This function is
+ * called from a relatively hot path and optimizes the common cases using
+ * wb_find_current().
+ */
+static inline struct bdi_writeback *
+wb_get_create_current(struct backing_dev_info *bdi, gfp_t gfp)
+{
+ struct bdi_writeback *wb;
+
+ rcu_read_lock();
+ wb = wb_find_current(bdi);
+ if (wb && unlikely(!wb_tryget(wb)))
+ wb = NULL;
+ rcu_read_unlock();
+
+ if (unlikely(!wb)) {
+ struct cgroup_subsys_state *memcg_css;
+
+ memcg_css = task_get_css(current, memory_cgrp_id);
+ wb = wb_get_create(bdi, memcg_css, gfp);
+ css_put(memcg_css);
+ }
+ return wb;
+}
+
+/**
+ * inode_to_wb_is_valid - test whether an inode has a wb associated
+ * @inode: inode of interest
+ *
+ * Returns %true if @inode has a wb associated. May be called without any
+ * locking.
+ */
+static inline bool inode_to_wb_is_valid(struct inode *inode)
+{
+ return inode->i_wb;
+}
+
+/**
+ * inode_to_wb - determine the wb of an inode
+ * @inode: inode of interest
+ *
+ * Returns the wb @inode is currently associated with. The caller must be
+ * holding either @inode->i_lock, @inode->i_mapping->tree_lock, or the
+ * associated wb's list_lock.
+ */
+static inline struct bdi_writeback *inode_to_wb(struct inode *inode)
+{
+#ifdef CONFIG_LOCKDEP
+ WARN_ON_ONCE(debug_locks &&
+ (!lockdep_is_held(&inode->i_lock) &&
+ !lockdep_is_held(&inode->i_mapping->tree_lock) &&
+ !lockdep_is_held(&inode->i_wb->list_lock)));
+#endif
+ return inode->i_wb;
+}
+
+/**
+ * unlocked_inode_to_wb_begin - begin unlocked inode wb access transaction
+ * @inode: target inode
+ * @lockedp: temp bool output param, to be passed to the end function
+ *
+ * The caller wants to access the wb associated with @inode but isn't
+ * holding inode->i_lock, mapping->tree_lock or wb->list_lock. This
+ * function determines the wb associated with @inode and ensures that the
+ * association doesn't change until the transaction is finished with
+ * unlocked_inode_to_wb_end().
+ *
+ * The caller must call unlocked_inode_to_wb_end() with *@lockdep
+ * afterwards and can't sleep during transaction. IRQ may or may not be
+ * disabled on return.
+ */
+static inline struct bdi_writeback *
+unlocked_inode_to_wb_begin(struct inode *inode, bool *lockedp)
+{
+ rcu_read_lock();
+
+ /*
+ * Paired with store_release in inode_switch_wb_work_fn() and
+ * ensures that we see the new wb if we see cleared I_WB_SWITCH.
+ */
+ *lockedp = smp_load_acquire(&inode->i_state) & I_WB_SWITCH;
+
+ if (unlikely(*lockedp))
+ spin_lock_irq(&inode->i_mapping->tree_lock);
+
+ /*
+ * Protected by either !I_WB_SWITCH + rcu_read_lock() or tree_lock.
+ * inode_to_wb() will bark. Deref directly.
+ */
+ return inode->i_wb;
+}
+
+/**
+ * unlocked_inode_to_wb_end - end inode wb access transaction
+ * @inode: target inode
+ * @locked: *@lockedp from unlocked_inode_to_wb_begin()
+ */
+static inline void unlocked_inode_to_wb_end(struct inode *inode, bool locked)
+{
+ if (unlikely(locked))
+ spin_unlock_irq(&inode->i_mapping->tree_lock);
+
+ rcu_read_unlock();
+}
+
+struct wb_iter {
+ int start_blkcg_id;
+ struct radix_tree_iter tree_iter;
+ void **slot;
+};
+
+static inline struct bdi_writeback *__wb_iter_next(struct wb_iter *iter,
+ struct backing_dev_info *bdi)
+{
+ struct radix_tree_iter *titer = &iter->tree_iter;
+
+ WARN_ON_ONCE(!rcu_read_lock_held());
+
+ if (iter->start_blkcg_id >= 0) {
+ iter->slot = radix_tree_iter_init(titer, iter->start_blkcg_id);
+ iter->start_blkcg_id = -1;
+ } else {
+ iter->slot = radix_tree_next_slot(iter->slot, titer, 0);
+ }
+
+ if (!iter->slot)
+ iter->slot = radix_tree_next_chunk(&bdi->cgwb_tree, titer, 0);
+ if (iter->slot)
+ return *iter->slot;
+ return NULL;
+}
+
+static inline struct bdi_writeback *__wb_iter_init(struct wb_iter *iter,
+ struct backing_dev_info *bdi,
+ int start_blkcg_id)
+{
+ iter->start_blkcg_id = start_blkcg_id;
+
+ if (start_blkcg_id)
+ return __wb_iter_next(iter, bdi);
+ else
+ return &bdi->wb;
+}
+
+/**
+ * bdi_for_each_wb - walk all wb's of a bdi in ascending blkcg ID order
+ * @wb_cur: cursor struct bdi_writeback pointer
+ * @bdi: bdi to walk wb's of
+ * @iter: pointer to struct wb_iter to be used as iteration buffer
+ * @start_blkcg_id: blkcg ID to start iteration from
+ *
+ * Iterate @wb_cur through the wb's (bdi_writeback's) of @bdi in ascending
+ * blkcg ID order starting from @start_blkcg_id. @iter is struct wb_iter
+ * to be used as temp storage during iteration. rcu_read_lock() must be
+ * held throughout iteration.
+ */
+#define bdi_for_each_wb(wb_cur, bdi, iter, start_blkcg_id) \
+ for ((wb_cur) = __wb_iter_init(iter, bdi, start_blkcg_id); \
+ (wb_cur); (wb_cur) = __wb_iter_next(iter, bdi))
+
+#else /* CONFIG_CGROUP_WRITEBACK */
+
+static inline bool inode_cgwb_enabled(struct inode *inode)
+{
+ return false;
+}
+
+static inline struct bdi_writeback_congested *
+wb_congested_get_create(struct backing_dev_info *bdi, int blkcg_id, gfp_t gfp)
+{
+ return bdi->wb.congested;
+}
+
+static inline void wb_congested_put(struct bdi_writeback_congested *congested)
+{
+}
+
+static inline struct bdi_writeback *wb_find_current(struct backing_dev_info *bdi)
+{
+ return &bdi->wb;
+}
+
+static inline struct bdi_writeback *
+wb_get_create_current(struct backing_dev_info *bdi, gfp_t gfp)
+{
+ return &bdi->wb;
+}
+
+static inline bool inode_to_wb_is_valid(struct inode *inode)
+{
+ return true;
+}
+
+static inline struct bdi_writeback *inode_to_wb(struct inode *inode)
+{
+ return &inode_to_bdi(inode)->wb;
+}
+
+static inline struct bdi_writeback *
+unlocked_inode_to_wb_begin(struct inode *inode, bool *lockedp)
+{
+ return inode_to_wb(inode);
+}
+
+static inline void unlocked_inode_to_wb_end(struct inode *inode, bool locked)
+{
+}
+
+static inline void wb_memcg_offline(struct mem_cgroup *memcg)
+{
+}
+
+static inline void wb_blkcg_offline(struct blkcg *blkcg)
+{
+}
+
+struct wb_iter {
+ int next_id;
+};
+
+#define bdi_for_each_wb(wb_cur, bdi, iter, start_blkcg_id) \
+ for ((iter)->next_id = (start_blkcg_id); \
+ ({ (wb_cur) = !(iter)->next_id++ ? &(bdi)->wb : NULL; }); )
+
+static inline int inode_congested(struct inode *inode, int cong_bits)
+{
+ return wb_congested(&inode_to_bdi(inode)->wb, cong_bits);
+}
+
+#endif /* CONFIG_CGROUP_WRITEBACK */
+
+static inline int inode_read_congested(struct inode *inode)
+{
+ return inode_congested(inode, 1 << WB_sync_congested);
+}
+
+static inline int inode_write_congested(struct inode *inode)
+{
+ return inode_congested(inode, 1 << WB_async_congested);
+}
+
+static inline int inode_rw_congested(struct inode *inode)
+{
+ return inode_congested(inode, (1 << WB_sync_congested) |
+ (1 << WB_async_congested));
+}
+
+static inline int bdi_congested(struct backing_dev_info *bdi, int cong_bits)
+{
+ return wb_congested(&bdi->wb, cong_bits);
+}
+
+static inline int bdi_read_congested(struct backing_dev_info *bdi)
+{
+ return bdi_congested(bdi, 1 << WB_sync_congested);
+}
+
+static inline int bdi_write_congested(struct backing_dev_info *bdi)
+{
+ return bdi_congested(bdi, 1 << WB_async_congested);
+}
+
+static inline int bdi_rw_congested(struct backing_dev_info *bdi)
+{
+ return bdi_congested(bdi, (1 << WB_sync_congested) |
+ (1 << WB_async_congested));
+}
+
+#endif /* _LINUX_BACKING_DEV_H */
diff --git a/include/linux/backlight.h b/include/linux/backlight.h
index adb14a8616df..1e7a69adbe6f 100644
--- a/include/linux/backlight.h
+++ b/include/linux/backlight.h
@@ -117,12 +117,16 @@ struct backlight_device {
int use_count;
};
-static inline void backlight_update_status(struct backlight_device *bd)
+static inline int backlight_update_status(struct backlight_device *bd)
{
+ int ret = -ENOENT;
+
mutex_lock(&bd->update_lock);
if (bd->ops && bd->ops->update_status)
- bd->ops->update_status(bd);
+ ret = bd->ops->update_status(bd);
mutex_unlock(&bd->update_lock);
+
+ return ret;
}
extern struct backlight_device *backlight_device_register(const char *name,
diff --git a/include/linux/basic_mmio_gpio.h b/include/linux/basic_mmio_gpio.h
index 0e97856b2cff..14eea946e640 100644
--- a/include/linux/basic_mmio_gpio.h
+++ b/include/linux/basic_mmio_gpio.h
@@ -74,5 +74,6 @@ int bgpio_init(struct bgpio_chip *bgc, struct device *dev,
#define BGPIOF_UNREADABLE_REG_SET BIT(1) /* reg_set is unreadable */
#define BGPIOF_UNREADABLE_REG_DIR BIT(2) /* reg_dir is unreadable */
#define BGPIOF_BIG_ENDIAN_BYTE_ORDER BIT(3)
+#define BGPIOF_READ_OUTPUT_REG_SET BIT(4) /* reg_set stores output value */
#endif /* __BASIC_MMIO_GPIO_H */
diff --git a/include/linux/bcm47xx_nvram.h b/include/linux/bcm47xx_nvram.h
new file mode 100644
index 000000000000..b12b07e75929
--- /dev/null
+++ b/include/linux/bcm47xx_nvram.h
@@ -0,0 +1,34 @@
+/*
+ * This program is free software; you can redistribute it and/or modify it
+ * under the terms of the GNU General Public License as published by the
+ * Free Software Foundation; either version 2 of the License, or (at your
+ * option) any later version.
+ */
+
+#ifndef __BCM47XX_NVRAM_H
+#define __BCM47XX_NVRAM_H
+
+#include <linux/types.h>
+#include <linux/kernel.h>
+
+#ifdef CONFIG_BCM47XX
+int bcm47xx_nvram_init_from_mem(u32 base, u32 lim);
+int bcm47xx_nvram_getenv(const char *name, char *val, size_t val_len);
+int bcm47xx_nvram_gpio_pin(const char *name);
+#else
+static inline int bcm47xx_nvram_init_from_mem(u32 base, u32 lim)
+{
+ return -ENOTSUPP;
+};
+static inline int bcm47xx_nvram_getenv(const char *name, char *val,
+ size_t val_len)
+{
+ return -ENOTSUPP;
+};
+static inline int bcm47xx_nvram_gpio_pin(const char *name)
+{
+ return -ENOTSUPP;
+};
+#endif
+
+#endif /* __BCM47XX_NVRAM_H */
diff --git a/include/linux/bcma/bcma.h b/include/linux/bcma/bcma.h
index 994739da827f..2ff4a9961e1d 100644
--- a/include/linux/bcma/bcma.h
+++ b/include/linux/bcma/bcma.h
@@ -305,6 +305,15 @@ int __bcma_driver_register(struct bcma_driver *drv, struct module *owner);
extern void bcma_driver_unregister(struct bcma_driver *drv);
+/* module_bcma_driver() - Helper macro for drivers that don't do
+ * anything special in module init/exit. This eliminates a lot of
+ * boilerplate. Each module may only use this macro once, and
+ * calling it replaces module_init() and module_exit()
+ */
+#define module_bcma_driver(__bcma_driver) \
+ module_driver(__bcma_driver, bcma_driver_register, \
+ bcma_driver_unregister)
+
/* Set a fallback SPROM.
* See kdoc at the function definition for complete documentation. */
extern int bcma_arch_register_fallback_sprom(
@@ -434,6 +443,27 @@ static inline struct bcma_device *bcma_find_core(struct bcma_bus *bus,
return bcma_find_core_unit(bus, coreid, 0);
}
+#ifdef CONFIG_BCMA_HOST_PCI
+extern void bcma_host_pci_up(struct bcma_bus *bus);
+extern void bcma_host_pci_down(struct bcma_bus *bus);
+extern int bcma_host_pci_irq_ctl(struct bcma_bus *bus,
+ struct bcma_device *core, bool enable);
+#else
+static inline void bcma_host_pci_up(struct bcma_bus *bus)
+{
+}
+static inline void bcma_host_pci_down(struct bcma_bus *bus)
+{
+}
+static inline int bcma_host_pci_irq_ctl(struct bcma_bus *bus,
+ struct bcma_device *core, bool enable)
+{
+ if (bus->hosttype == BCMA_HOSTTYPE_PCI)
+ return -ENOTSUPP;
+ return 0;
+}
+#endif
+
extern bool bcma_core_is_enabled(struct bcma_device *core);
extern void bcma_core_disable(struct bcma_device *core, u32 flags);
extern int bcma_core_enable(struct bcma_device *core, u32 flags);
diff --git a/include/linux/bcma/bcma_driver_chipcommon.h b/include/linux/bcma/bcma_driver_chipcommon.h
index db6fa217f98b..6cceedf65ca2 100644
--- a/include/linux/bcma/bcma_driver_chipcommon.h
+++ b/include/linux/bcma/bcma_driver_chipcommon.h
@@ -663,14 +663,6 @@ struct bcma_drv_cc_b {
#define bcma_cc_maskset32(cc, offset, mask, set) \
bcma_cc_write32(cc, offset, (bcma_cc_read32(cc, offset) & (mask)) | (set))
-extern void bcma_core_chipcommon_init(struct bcma_drv_cc *cc);
-extern void bcma_core_chipcommon_early_init(struct bcma_drv_cc *cc);
-
-extern void bcma_chipco_suspend(struct bcma_drv_cc *cc);
-extern void bcma_chipco_resume(struct bcma_drv_cc *cc);
-
-void bcma_chipco_bcm4331_ext_pa_lines_ctl(struct bcma_drv_cc *cc, bool enable);
-
extern u32 bcma_chipco_watchdog_timer_set(struct bcma_drv_cc *cc, u32 ticks);
extern u32 bcma_chipco_get_alp_clock(struct bcma_drv_cc *cc);
@@ -690,9 +682,6 @@ u32 bcma_chipco_gpio_pullup(struct bcma_drv_cc *cc, u32 mask, u32 value);
u32 bcma_chipco_gpio_pulldown(struct bcma_drv_cc *cc, u32 mask, u32 value);
/* PMU support */
-extern void bcma_pmu_init(struct bcma_drv_cc *cc);
-extern void bcma_pmu_early_init(struct bcma_drv_cc *cc);
-
extern void bcma_chipco_pll_write(struct bcma_drv_cc *cc, u32 offset,
u32 value);
extern void bcma_chipco_pll_maskset(struct bcma_drv_cc *cc, u32 offset,
diff --git a/include/linux/bcma/bcma_driver_gmac_cmn.h b/include/linux/bcma/bcma_driver_gmac_cmn.h
index 4dd1f33e36a2..4354d4ea6713 100644
--- a/include/linux/bcma/bcma_driver_gmac_cmn.h
+++ b/include/linux/bcma/bcma_driver_gmac_cmn.h
@@ -91,10 +91,4 @@ struct bcma_drv_gmac_cmn {
#define gmac_cmn_write16(gc, offset, val) bcma_write16((gc)->core, offset, val)
#define gmac_cmn_write32(gc, offset, val) bcma_write32((gc)->core, offset, val)
-#ifdef CONFIG_BCMA_DRIVER_GMAC_CMN
-extern void bcma_core_gmac_cmn_init(struct bcma_drv_gmac_cmn *gc);
-#else
-static inline void bcma_core_gmac_cmn_init(struct bcma_drv_gmac_cmn *gc) { }
-#endif
-
#endif /* LINUX_BCMA_DRIVER_GMAC_CMN_H_ */
diff --git a/include/linux/bcma/bcma_driver_mips.h b/include/linux/bcma/bcma_driver_mips.h
index 0b3b32aeeb8a..8eea7f9e33b4 100644
--- a/include/linux/bcma/bcma_driver_mips.h
+++ b/include/linux/bcma/bcma_driver_mips.h
@@ -39,21 +39,6 @@ struct bcma_drv_mips {
u8 early_setup_done:1;
};
-#ifdef CONFIG_BCMA_DRIVER_MIPS
-extern void bcma_core_mips_init(struct bcma_drv_mips *mcore);
-extern void bcma_core_mips_early_init(struct bcma_drv_mips *mcore);
-
-extern unsigned int bcma_core_mips_irq(struct bcma_device *dev);
-#else
-static inline void bcma_core_mips_init(struct bcma_drv_mips *mcore) { }
-static inline void bcma_core_mips_early_init(struct bcma_drv_mips *mcore) { }
-
-static inline unsigned int bcma_core_mips_irq(struct bcma_device *dev)
-{
- return 0;
-}
-#endif
-
extern u32 bcma_cpu_clock(struct bcma_drv_mips *mcore);
#endif /* LINUX_BCMA_DRIVER_MIPS_H_ */
diff --git a/include/linux/bcma/bcma_driver_pci.h b/include/linux/bcma/bcma_driver_pci.h
index 3f809ae372c4..9657f11d48a7 100644
--- a/include/linux/bcma/bcma_driver_pci.h
+++ b/include/linux/bcma/bcma_driver_pci.h
@@ -238,15 +238,26 @@ struct bcma_drv_pci {
#define pcicore_write16(pc, offset, val) bcma_write16((pc)->core, offset, val)
#define pcicore_write32(pc, offset, val) bcma_write32((pc)->core, offset, val)
-extern void bcma_core_pci_early_init(struct bcma_drv_pci *pc);
-extern void bcma_core_pci_init(struct bcma_drv_pci *pc);
-extern int bcma_core_pci_irq_ctl(struct bcma_drv_pci *pc,
- struct bcma_device *core, bool enable);
-extern void bcma_core_pci_up(struct bcma_bus *bus);
-extern void bcma_core_pci_down(struct bcma_bus *bus);
+#ifdef CONFIG_BCMA_DRIVER_PCI
extern void bcma_core_pci_power_save(struct bcma_bus *bus, bool up);
+#else
+static inline void bcma_core_pci_power_save(struct bcma_bus *bus, bool up)
+{
+}
+#endif
+#ifdef CONFIG_BCMA_DRIVER_PCI_HOSTMODE
extern int bcma_core_pci_pcibios_map_irq(const struct pci_dev *dev);
extern int bcma_core_pci_plat_dev_init(struct pci_dev *dev);
+#else
+static inline int bcma_core_pci_pcibios_map_irq(const struct pci_dev *dev)
+{
+ return -ENOTSUPP;
+}
+static inline int bcma_core_pci_plat_dev_init(struct pci_dev *dev)
+{
+ return -ENOTSUPP;
+}
+#endif
#endif /* LINUX_BCMA_DRIVER_PCI_H_ */
diff --git a/include/linux/bcma/bcma_driver_pcie2.h b/include/linux/bcma/bcma_driver_pcie2.h
index 5988b05781c3..31e6d17ab798 100644
--- a/include/linux/bcma/bcma_driver_pcie2.h
+++ b/include/linux/bcma/bcma_driver_pcie2.h
@@ -143,6 +143,8 @@
struct bcma_drv_pcie2 {
struct bcma_device *core;
+
+ u16 reqsize;
};
#define pcie2_read16(pcie2, offset) bcma_read16((pcie2)->core, offset)
@@ -153,6 +155,4 @@ struct bcma_drv_pcie2 {
#define pcie2_set32(pcie2, offset, set) bcma_set32((pcie2)->core, offset, set)
#define pcie2_mask32(pcie2, offset, mask) bcma_mask32((pcie2)->core, offset, mask)
-void bcma_core_pcie2_init(struct bcma_drv_pcie2 *pcie2);
-
#endif /* LINUX_BCMA_DRIVER_PCIE2_H_ */
diff --git a/include/linux/bio.h b/include/linux/bio.h
index da3a127c9958..5e963a6d7c14 100644
--- a/include/linux/bio.h
+++ b/include/linux/bio.h
@@ -290,7 +290,21 @@ static inline unsigned bio_segments(struct bio *bio)
* returns. and then bio would be freed memory when if (bio->bi_flags ...)
* runs
*/
-#define bio_get(bio) atomic_inc(&(bio)->bi_cnt)
+static inline void bio_get(struct bio *bio)
+{
+ bio->bi_flags |= (1 << BIO_REFFED);
+ smp_mb__before_atomic();
+ atomic_inc(&bio->__bi_cnt);
+}
+
+static inline void bio_cnt_set(struct bio *bio, unsigned int count)
+{
+ if (count != 1) {
+ bio->bi_flags |= (1 << BIO_REFFED);
+ smp_mb__before_atomic();
+ }
+ atomic_set(&bio->__bi_cnt, count);
+}
enum bip_flags {
BIP_BLOCK_INTEGRITY = 1 << 0, /* block layer owns integrity data */
@@ -413,7 +427,6 @@ static inline struct bio *bio_clone_kmalloc(struct bio *bio, gfp_t gfp_mask)
}
extern void bio_endio(struct bio *, int);
-extern void bio_endio_nodec(struct bio *, int);
struct request_queue;
extern int bio_phys_segments(struct request_queue *, struct bio *);
@@ -469,9 +482,12 @@ extern void bvec_free(mempool_t *, struct bio_vec *, unsigned int);
extern unsigned int bvec_nr_vecs(unsigned short idx);
#ifdef CONFIG_BLK_CGROUP
+int bio_associate_blkcg(struct bio *bio, struct cgroup_subsys_state *blkcg_css);
int bio_associate_current(struct bio *bio);
void bio_disassociate_task(struct bio *bio);
#else /* CONFIG_BLK_CGROUP */
+static inline int bio_associate_blkcg(struct bio *bio,
+ struct cgroup_subsys_state *blkcg_css) { return 0; }
static inline int bio_associate_current(struct bio *bio) { return -ENOENT; }
static inline void bio_disassociate_task(struct bio *bio) { }
#endif /* CONFIG_BLK_CGROUP */
diff --git a/include/linux/bitmap.h b/include/linux/bitmap.h
index dbfbf4990005..ea17cca9e685 100644
--- a/include/linux/bitmap.h
+++ b/include/linux/bitmap.h
@@ -172,12 +172,8 @@ extern unsigned int bitmap_ord_to_pos(const unsigned long *bitmap, unsigned int
extern int bitmap_print_to_pagebuf(bool list, char *buf,
const unsigned long *maskp, int nmaskbits);
-#define BITMAP_FIRST_WORD_MASK(start) (~0UL << ((start) % BITS_PER_LONG))
-#define BITMAP_LAST_WORD_MASK(nbits) \
-( \
- ((nbits) % BITS_PER_LONG) ? \
- (1UL<<((nbits) % BITS_PER_LONG))-1 : ~0UL \
-)
+#define BITMAP_FIRST_WORD_MASK(start) (~0UL << ((start) & (BITS_PER_LONG - 1)))
+#define BITMAP_LAST_WORD_MASK(nbits) (~0UL >> (-(nbits) & (BITS_PER_LONG - 1)))
#define small_const_nbits(nbits) \
(__builtin_constant_p(nbits) && (nbits) <= BITS_PER_LONG)
@@ -287,16 +283,16 @@ static inline int bitmap_empty(const unsigned long *src, unsigned nbits)
{
if (small_const_nbits(nbits))
return ! (*src & BITMAP_LAST_WORD_MASK(nbits));
- else
- return __bitmap_empty(src, nbits);
+
+ return find_first_bit(src, nbits) == nbits;
}
static inline int bitmap_full(const unsigned long *src, unsigned int nbits)
{
if (small_const_nbits(nbits))
return ! (~(*src) & BITMAP_LAST_WORD_MASK(nbits));
- else
- return __bitmap_full(src, nbits);
+
+ return find_first_zero_bit(src, nbits) == nbits;
}
static inline int bitmap_weight(const unsigned long *src, unsigned int nbits)
diff --git a/include/linux/bitops.h b/include/linux/bitops.h
index 5d858e02997f..297f5bda4fdf 100644
--- a/include/linux/bitops.h
+++ b/include/linux/bitops.h
@@ -218,9 +218,9 @@ static inline unsigned long __ffs64(u64 word)
/**
* find_last_bit - find the last set bit in a memory region
* @addr: The address to start the search at
- * @size: The maximum size to search
+ * @size: The number of bits to search
*
- * Returns the bit number of the first set bit, or size.
+ * Returns the bit number of the last set bit, or size.
*/
extern unsigned long find_last_bit(const unsigned long *addr,
unsigned long size);
diff --git a/include/linux/blk-cgroup.h b/include/linux/blk-cgroup.h
new file mode 100644
index 000000000000..58cfab80dd70
--- /dev/null
+++ b/include/linux/blk-cgroup.h
@@ -0,0 +1,655 @@
+#ifndef _BLK_CGROUP_H
+#define _BLK_CGROUP_H
+/*
+ * Common Block IO controller cgroup interface
+ *
+ * Based on ideas and code from CFQ, CFS and BFQ:
+ * Copyright (C) 2003 Jens Axboe <axboe@kernel.dk>
+ *
+ * Copyright (C) 2008 Fabio Checconi <fabio@gandalf.sssup.it>
+ * Paolo Valente <paolo.valente@unimore.it>
+ *
+ * Copyright (C) 2009 Vivek Goyal <vgoyal@redhat.com>
+ * Nauman Rafique <nauman@google.com>
+ */
+
+#include <linux/cgroup.h>
+#include <linux/u64_stats_sync.h>
+#include <linux/seq_file.h>
+#include <linux/radix-tree.h>
+#include <linux/blkdev.h>
+#include <linux/atomic.h>
+
+/* Max limits for throttle policy */
+#define THROTL_IOPS_MAX UINT_MAX
+
+#ifdef CONFIG_BLK_CGROUP
+
+enum blkg_rwstat_type {
+ BLKG_RWSTAT_READ,
+ BLKG_RWSTAT_WRITE,
+ BLKG_RWSTAT_SYNC,
+ BLKG_RWSTAT_ASYNC,
+
+ BLKG_RWSTAT_NR,
+ BLKG_RWSTAT_TOTAL = BLKG_RWSTAT_NR,
+};
+
+struct blkcg_gq;
+
+struct blkcg {
+ struct cgroup_subsys_state css;
+ spinlock_t lock;
+
+ struct radix_tree_root blkg_tree;
+ struct blkcg_gq *blkg_hint;
+ struct hlist_head blkg_list;
+
+ struct blkcg_policy_data *pd[BLKCG_MAX_POLS];
+
+#ifdef CONFIG_CGROUP_WRITEBACK
+ struct list_head cgwb_list;
+#endif
+};
+
+struct blkg_stat {
+ struct u64_stats_sync syncp;
+ uint64_t cnt;
+};
+
+struct blkg_rwstat {
+ struct u64_stats_sync syncp;
+ uint64_t cnt[BLKG_RWSTAT_NR];
+};
+
+/*
+ * A blkcg_gq (blkg) is association between a block cgroup (blkcg) and a
+ * request_queue (q). This is used by blkcg policies which need to track
+ * information per blkcg - q pair.
+ *
+ * There can be multiple active blkcg policies and each has its private
+ * data on each blkg, the size of which is determined by
+ * blkcg_policy->pd_size. blkcg core allocates and frees such areas
+ * together with blkg and invokes pd_init/exit_fn() methods.
+ *
+ * Such private data must embed struct blkg_policy_data (pd) at the
+ * beginning and pd_size can't be smaller than pd.
+ */
+struct blkg_policy_data {
+ /* the blkg and policy id this per-policy data belongs to */
+ struct blkcg_gq *blkg;
+ int plid;
+
+ /* used during policy activation */
+ struct list_head alloc_node;
+};
+
+/*
+ * Policies that need to keep per-blkcg data which is independent
+ * from any request_queue associated to it must specify its size
+ * with the cpd_size field of the blkcg_policy structure and
+ * embed a blkcg_policy_data in it. blkcg core allocates
+ * policy-specific per-blkcg structures lazily the first time
+ * they are actually needed, so it handles them together with
+ * blkgs. cpd_init() is invoked to let each policy handle
+ * per-blkcg data.
+ */
+struct blkcg_policy_data {
+ /* the policy id this per-policy data belongs to */
+ int plid;
+
+ /* used during policy activation */
+ struct list_head alloc_node;
+};
+
+/* association between a blk cgroup and a request queue */
+struct blkcg_gq {
+ /* Pointer to the associated request_queue */
+ struct request_queue *q;
+ struct list_head q_node;
+ struct hlist_node blkcg_node;
+ struct blkcg *blkcg;
+
+ /*
+ * Each blkg gets congested separately and the congestion state is
+ * propagated to the matching bdi_writeback_congested.
+ */
+ struct bdi_writeback_congested *wb_congested;
+
+ /* all non-root blkcg_gq's are guaranteed to have access to parent */
+ struct blkcg_gq *parent;
+
+ /* request allocation list for this blkcg-q pair */
+ struct request_list rl;
+
+ /* reference count */
+ atomic_t refcnt;
+
+ /* is this blkg online? protected by both blkcg and q locks */
+ bool online;
+
+ struct blkg_policy_data *pd[BLKCG_MAX_POLS];
+
+ struct rcu_head rcu_head;
+};
+
+typedef void (blkcg_pol_init_cpd_fn)(const struct blkcg *blkcg);
+typedef void (blkcg_pol_init_pd_fn)(struct blkcg_gq *blkg);
+typedef void (blkcg_pol_online_pd_fn)(struct blkcg_gq *blkg);
+typedef void (blkcg_pol_offline_pd_fn)(struct blkcg_gq *blkg);
+typedef void (blkcg_pol_exit_pd_fn)(struct blkcg_gq *blkg);
+typedef void (blkcg_pol_reset_pd_stats_fn)(struct blkcg_gq *blkg);
+
+struct blkcg_policy {
+ int plid;
+ /* policy specific private data size */
+ size_t pd_size;
+ /* policy specific per-blkcg data size */
+ size_t cpd_size;
+ /* cgroup files for the policy */
+ struct cftype *cftypes;
+
+ /* operations */
+ blkcg_pol_init_cpd_fn *cpd_init_fn;
+ blkcg_pol_init_pd_fn *pd_init_fn;
+ blkcg_pol_online_pd_fn *pd_online_fn;
+ blkcg_pol_offline_pd_fn *pd_offline_fn;
+ blkcg_pol_exit_pd_fn *pd_exit_fn;
+ blkcg_pol_reset_pd_stats_fn *pd_reset_stats_fn;
+};
+
+extern struct blkcg blkcg_root;
+extern struct cgroup_subsys_state * const blkcg_root_css;
+
+struct blkcg_gq *blkg_lookup(struct blkcg *blkcg, struct request_queue *q);
+struct blkcg_gq *blkg_lookup_create(struct blkcg *blkcg,
+ struct request_queue *q);
+int blkcg_init_queue(struct request_queue *q);
+void blkcg_drain_queue(struct request_queue *q);
+void blkcg_exit_queue(struct request_queue *q);
+
+/* Blkio controller policy registration */
+int blkcg_policy_register(struct blkcg_policy *pol);
+void blkcg_policy_unregister(struct blkcg_policy *pol);
+int blkcg_activate_policy(struct request_queue *q,
+ const struct blkcg_policy *pol);
+void blkcg_deactivate_policy(struct request_queue *q,
+ const struct blkcg_policy *pol);
+
+void blkcg_print_blkgs(struct seq_file *sf, struct blkcg *blkcg,
+ u64 (*prfill)(struct seq_file *,
+ struct blkg_policy_data *, int),
+ const struct blkcg_policy *pol, int data,
+ bool show_total);
+u64 __blkg_prfill_u64(struct seq_file *sf, struct blkg_policy_data *pd, u64 v);
+u64 __blkg_prfill_rwstat(struct seq_file *sf, struct blkg_policy_data *pd,
+ const struct blkg_rwstat *rwstat);
+u64 blkg_prfill_stat(struct seq_file *sf, struct blkg_policy_data *pd, int off);
+u64 blkg_prfill_rwstat(struct seq_file *sf, struct blkg_policy_data *pd,
+ int off);
+
+u64 blkg_stat_recursive_sum(struct blkg_policy_data *pd, int off);
+struct blkg_rwstat blkg_rwstat_recursive_sum(struct blkg_policy_data *pd,
+ int off);
+
+struct blkg_conf_ctx {
+ struct gendisk *disk;
+ struct blkcg_gq *blkg;
+ u64 v;
+};
+
+int blkg_conf_prep(struct blkcg *blkcg, const struct blkcg_policy *pol,
+ const char *input, struct blkg_conf_ctx *ctx);
+void blkg_conf_finish(struct blkg_conf_ctx *ctx);
+
+
+static inline struct blkcg *css_to_blkcg(struct cgroup_subsys_state *css)
+{
+ return css ? container_of(css, struct blkcg, css) : NULL;
+}
+
+static inline struct blkcg *task_blkcg(struct task_struct *tsk)
+{
+ return css_to_blkcg(task_css(tsk, blkio_cgrp_id));
+}
+
+static inline struct blkcg *bio_blkcg(struct bio *bio)
+{
+ if (bio && bio->bi_css)
+ return css_to_blkcg(bio->bi_css);
+ return task_blkcg(current);
+}
+
+static inline struct cgroup_subsys_state *
+task_get_blkcg_css(struct task_struct *task)
+{
+ return task_get_css(task, blkio_cgrp_id);
+}
+
+/**
+ * blkcg_parent - get the parent of a blkcg
+ * @blkcg: blkcg of interest
+ *
+ * Return the parent blkcg of @blkcg. Can be called anytime.
+ */
+static inline struct blkcg *blkcg_parent(struct blkcg *blkcg)
+{
+ return css_to_blkcg(blkcg->css.parent);
+}
+
+/**
+ * blkg_to_pdata - get policy private data
+ * @blkg: blkg of interest
+ * @pol: policy of interest
+ *
+ * Return pointer to private data associated with the @blkg-@pol pair.
+ */
+static inline struct blkg_policy_data *blkg_to_pd(struct blkcg_gq *blkg,
+ struct blkcg_policy *pol)
+{
+ return blkg ? blkg->pd[pol->plid] : NULL;
+}
+
+static inline struct blkcg_policy_data *blkcg_to_cpd(struct blkcg *blkcg,
+ struct blkcg_policy *pol)
+{
+ return blkcg ? blkcg->pd[pol->plid] : NULL;
+}
+
+/**
+ * pdata_to_blkg - get blkg associated with policy private data
+ * @pd: policy private data of interest
+ *
+ * @pd is policy private data. Determine the blkg it's associated with.
+ */
+static inline struct blkcg_gq *pd_to_blkg(struct blkg_policy_data *pd)
+{
+ return pd ? pd->blkg : NULL;
+}
+
+/**
+ * blkg_path - format cgroup path of blkg
+ * @blkg: blkg of interest
+ * @buf: target buffer
+ * @buflen: target buffer length
+ *
+ * Format the path of the cgroup of @blkg into @buf.
+ */
+static inline int blkg_path(struct blkcg_gq *blkg, char *buf, int buflen)
+{
+ char *p;
+
+ p = cgroup_path(blkg->blkcg->css.cgroup, buf, buflen);
+ if (!p) {
+ strncpy(buf, "<unavailable>", buflen);
+ return -ENAMETOOLONG;
+ }
+
+ memmove(buf, p, buf + buflen - p);
+ return 0;
+}
+
+/**
+ * blkg_get - get a blkg reference
+ * @blkg: blkg to get
+ *
+ * The caller should be holding an existing reference.
+ */
+static inline void blkg_get(struct blkcg_gq *blkg)
+{
+ WARN_ON_ONCE(atomic_read(&blkg->refcnt) <= 0);
+ atomic_inc(&blkg->refcnt);
+}
+
+void __blkg_release_rcu(struct rcu_head *rcu);
+
+/**
+ * blkg_put - put a blkg reference
+ * @blkg: blkg to put
+ */
+static inline void blkg_put(struct blkcg_gq *blkg)
+{
+ WARN_ON_ONCE(atomic_read(&blkg->refcnt) <= 0);
+ if (atomic_dec_and_test(&blkg->refcnt))
+ call_rcu(&blkg->rcu_head, __blkg_release_rcu);
+}
+
+struct blkcg_gq *__blkg_lookup(struct blkcg *blkcg, struct request_queue *q,
+ bool update_hint);
+
+/**
+ * blkg_for_each_descendant_pre - pre-order walk of a blkg's descendants
+ * @d_blkg: loop cursor pointing to the current descendant
+ * @pos_css: used for iteration
+ * @p_blkg: target blkg to walk descendants of
+ *
+ * Walk @c_blkg through the descendants of @p_blkg. Must be used with RCU
+ * read locked. If called under either blkcg or queue lock, the iteration
+ * is guaranteed to include all and only online blkgs. The caller may
+ * update @pos_css by calling css_rightmost_descendant() to skip subtree.
+ * @p_blkg is included in the iteration and the first node to be visited.
+ */
+#define blkg_for_each_descendant_pre(d_blkg, pos_css, p_blkg) \
+ css_for_each_descendant_pre((pos_css), &(p_blkg)->blkcg->css) \
+ if (((d_blkg) = __blkg_lookup(css_to_blkcg(pos_css), \
+ (p_blkg)->q, false)))
+
+/**
+ * blkg_for_each_descendant_post - post-order walk of a blkg's descendants
+ * @d_blkg: loop cursor pointing to the current descendant
+ * @pos_css: used for iteration
+ * @p_blkg: target blkg to walk descendants of
+ *
+ * Similar to blkg_for_each_descendant_pre() but performs post-order
+ * traversal instead. Synchronization rules are the same. @p_blkg is
+ * included in the iteration and the last node to be visited.
+ */
+#define blkg_for_each_descendant_post(d_blkg, pos_css, p_blkg) \
+ css_for_each_descendant_post((pos_css), &(p_blkg)->blkcg->css) \
+ if (((d_blkg) = __blkg_lookup(css_to_blkcg(pos_css), \
+ (p_blkg)->q, false)))
+
+/**
+ * blk_get_rl - get request_list to use
+ * @q: request_queue of interest
+ * @bio: bio which will be attached to the allocated request (may be %NULL)
+ *
+ * The caller wants to allocate a request from @q to use for @bio. Find
+ * the request_list to use and obtain a reference on it. Should be called
+ * under queue_lock. This function is guaranteed to return non-%NULL
+ * request_list.
+ */
+static inline struct request_list *blk_get_rl(struct request_queue *q,
+ struct bio *bio)
+{
+ struct blkcg *blkcg;
+ struct blkcg_gq *blkg;
+
+ rcu_read_lock();
+
+ blkcg = bio_blkcg(bio);
+
+ /* bypass blkg lookup and use @q->root_rl directly for root */
+ if (blkcg == &blkcg_root)
+ goto root_rl;
+
+ /*
+ * Try to use blkg->rl. blkg lookup may fail under memory pressure
+ * or if either the blkcg or queue is going away. Fall back to
+ * root_rl in such cases.
+ */
+ blkg = blkg_lookup_create(blkcg, q);
+ if (unlikely(IS_ERR(blkg)))
+ goto root_rl;
+
+ blkg_get(blkg);
+ rcu_read_unlock();
+ return &blkg->rl;
+root_rl:
+ rcu_read_unlock();
+ return &q->root_rl;
+}
+
+/**
+ * blk_put_rl - put request_list
+ * @rl: request_list to put
+ *
+ * Put the reference acquired by blk_get_rl(). Should be called under
+ * queue_lock.
+ */
+static inline void blk_put_rl(struct request_list *rl)
+{
+ /* root_rl may not have blkg set */
+ if (rl->blkg && rl->blkg->blkcg != &blkcg_root)
+ blkg_put(rl->blkg);
+}
+
+/**
+ * blk_rq_set_rl - associate a request with a request_list
+ * @rq: request of interest
+ * @rl: target request_list
+ *
+ * Associate @rq with @rl so that accounting and freeing can know the
+ * request_list @rq came from.
+ */
+static inline void blk_rq_set_rl(struct request *rq, struct request_list *rl)
+{
+ rq->rl = rl;
+}
+
+/**
+ * blk_rq_rl - return the request_list a request came from
+ * @rq: request of interest
+ *
+ * Return the request_list @rq is allocated from.
+ */
+static inline struct request_list *blk_rq_rl(struct request *rq)
+{
+ return rq->rl;
+}
+
+struct request_list *__blk_queue_next_rl(struct request_list *rl,
+ struct request_queue *q);
+/**
+ * blk_queue_for_each_rl - iterate through all request_lists of a request_queue
+ *
+ * Should be used under queue_lock.
+ */
+#define blk_queue_for_each_rl(rl, q) \
+ for ((rl) = &(q)->root_rl; (rl); (rl) = __blk_queue_next_rl((rl), (q)))
+
+static inline void blkg_stat_init(struct blkg_stat *stat)
+{
+ u64_stats_init(&stat->syncp);
+}
+
+/**
+ * blkg_stat_add - add a value to a blkg_stat
+ * @stat: target blkg_stat
+ * @val: value to add
+ *
+ * Add @val to @stat. The caller is responsible for synchronizing calls to
+ * this function.
+ */
+static inline void blkg_stat_add(struct blkg_stat *stat, uint64_t val)
+{
+ u64_stats_update_begin(&stat->syncp);
+ stat->cnt += val;
+ u64_stats_update_end(&stat->syncp);
+}
+
+/**
+ * blkg_stat_read - read the current value of a blkg_stat
+ * @stat: blkg_stat to read
+ *
+ * Read the current value of @stat. This function can be called without
+ * synchroniztion and takes care of u64 atomicity.
+ */
+static inline uint64_t blkg_stat_read(struct blkg_stat *stat)
+{
+ unsigned int start;
+ uint64_t v;
+
+ do {
+ start = u64_stats_fetch_begin_irq(&stat->syncp);
+ v = stat->cnt;
+ } while (u64_stats_fetch_retry_irq(&stat->syncp, start));
+
+ return v;
+}
+
+/**
+ * blkg_stat_reset - reset a blkg_stat
+ * @stat: blkg_stat to reset
+ */
+static inline void blkg_stat_reset(struct blkg_stat *stat)
+{
+ stat->cnt = 0;
+}
+
+/**
+ * blkg_stat_merge - merge a blkg_stat into another
+ * @to: the destination blkg_stat
+ * @from: the source
+ *
+ * Add @from's count to @to.
+ */
+static inline void blkg_stat_merge(struct blkg_stat *to, struct blkg_stat *from)
+{
+ blkg_stat_add(to, blkg_stat_read(from));
+}
+
+static inline void blkg_rwstat_init(struct blkg_rwstat *rwstat)
+{
+ u64_stats_init(&rwstat->syncp);
+}
+
+/**
+ * blkg_rwstat_add - add a value to a blkg_rwstat
+ * @rwstat: target blkg_rwstat
+ * @rw: mask of REQ_{WRITE|SYNC}
+ * @val: value to add
+ *
+ * Add @val to @rwstat. The counters are chosen according to @rw. The
+ * caller is responsible for synchronizing calls to this function.
+ */
+static inline void blkg_rwstat_add(struct blkg_rwstat *rwstat,
+ int rw, uint64_t val)
+{
+ u64_stats_update_begin(&rwstat->syncp);
+
+ if (rw & REQ_WRITE)
+ rwstat->cnt[BLKG_RWSTAT_WRITE] += val;
+ else
+ rwstat->cnt[BLKG_RWSTAT_READ] += val;
+ if (rw & REQ_SYNC)
+ rwstat->cnt[BLKG_RWSTAT_SYNC] += val;
+ else
+ rwstat->cnt[BLKG_RWSTAT_ASYNC] += val;
+
+ u64_stats_update_end(&rwstat->syncp);
+}
+
+/**
+ * blkg_rwstat_read - read the current values of a blkg_rwstat
+ * @rwstat: blkg_rwstat to read
+ *
+ * Read the current snapshot of @rwstat and return it as the return value.
+ * This function can be called without synchronization and takes care of
+ * u64 atomicity.
+ */
+static inline struct blkg_rwstat blkg_rwstat_read(struct blkg_rwstat *rwstat)
+{
+ unsigned int start;
+ struct blkg_rwstat tmp;
+
+ do {
+ start = u64_stats_fetch_begin_irq(&rwstat->syncp);
+ tmp = *rwstat;
+ } while (u64_stats_fetch_retry_irq(&rwstat->syncp, start));
+
+ return tmp;
+}
+
+/**
+ * blkg_rwstat_total - read the total count of a blkg_rwstat
+ * @rwstat: blkg_rwstat to read
+ *
+ * Return the total count of @rwstat regardless of the IO direction. This
+ * function can be called without synchronization and takes care of u64
+ * atomicity.
+ */
+static inline uint64_t blkg_rwstat_total(struct blkg_rwstat *rwstat)
+{
+ struct blkg_rwstat tmp = blkg_rwstat_read(rwstat);
+
+ return tmp.cnt[BLKG_RWSTAT_READ] + tmp.cnt[BLKG_RWSTAT_WRITE];
+}
+
+/**
+ * blkg_rwstat_reset - reset a blkg_rwstat
+ * @rwstat: blkg_rwstat to reset
+ */
+static inline void blkg_rwstat_reset(struct blkg_rwstat *rwstat)
+{
+ memset(rwstat->cnt, 0, sizeof(rwstat->cnt));
+}
+
+/**
+ * blkg_rwstat_merge - merge a blkg_rwstat into another
+ * @to: the destination blkg_rwstat
+ * @from: the source
+ *
+ * Add @from's counts to @to.
+ */
+static inline void blkg_rwstat_merge(struct blkg_rwstat *to,
+ struct blkg_rwstat *from)
+{
+ struct blkg_rwstat v = blkg_rwstat_read(from);
+ int i;
+
+ u64_stats_update_begin(&to->syncp);
+ for (i = 0; i < BLKG_RWSTAT_NR; i++)
+ to->cnt[i] += v.cnt[i];
+ u64_stats_update_end(&to->syncp);
+}
+
+#else /* CONFIG_BLK_CGROUP */
+
+struct blkcg {
+};
+
+struct blkg_policy_data {
+};
+
+struct blkcg_policy_data {
+};
+
+struct blkcg_gq {
+};
+
+struct blkcg_policy {
+};
+
+#define blkcg_root_css ((struct cgroup_subsys_state *)ERR_PTR(-EINVAL))
+
+static inline struct cgroup_subsys_state *
+task_get_blkcg_css(struct task_struct *task)
+{
+ return NULL;
+}
+
+#ifdef CONFIG_BLOCK
+
+static inline struct blkcg_gq *blkg_lookup(struct blkcg *blkcg, void *key) { return NULL; }
+static inline int blkcg_init_queue(struct request_queue *q) { return 0; }
+static inline void blkcg_drain_queue(struct request_queue *q) { }
+static inline void blkcg_exit_queue(struct request_queue *q) { }
+static inline int blkcg_policy_register(struct blkcg_policy *pol) { return 0; }
+static inline void blkcg_policy_unregister(struct blkcg_policy *pol) { }
+static inline int blkcg_activate_policy(struct request_queue *q,
+ const struct blkcg_policy *pol) { return 0; }
+static inline void blkcg_deactivate_policy(struct request_queue *q,
+ const struct blkcg_policy *pol) { }
+
+static inline struct blkcg *bio_blkcg(struct bio *bio) { return NULL; }
+
+static inline struct blkg_policy_data *blkg_to_pd(struct blkcg_gq *blkg,
+ struct blkcg_policy *pol) { return NULL; }
+static inline struct blkcg_gq *pd_to_blkg(struct blkg_policy_data *pd) { return NULL; }
+static inline char *blkg_path(struct blkcg_gq *blkg) { return NULL; }
+static inline void blkg_get(struct blkcg_gq *blkg) { }
+static inline void blkg_put(struct blkcg_gq *blkg) { }
+
+static inline struct request_list *blk_get_rl(struct request_queue *q,
+ struct bio *bio) { return &q->root_rl; }
+static inline void blk_put_rl(struct request_list *rl) { }
+static inline void blk_rq_set_rl(struct request *rq, struct request_list *rl) { }
+static inline struct request_list *blk_rq_rl(struct request *rq) { return &rq->q->root_rl; }
+
+#define blk_queue_for_each_rl(rl, q) \
+ for ((rl) = &(q)->root_rl; (rl); (rl) = NULL)
+
+#endif /* CONFIG_BLOCK */
+#endif /* CONFIG_BLK_CGROUP */
+#endif /* _BLK_CGROUP_H */
diff --git a/include/linux/blk-mq.h b/include/linux/blk-mq.h
index 7aec86127335..37d1602c4f7a 100644
--- a/include/linux/blk-mq.h
+++ b/include/linux/blk-mq.h
@@ -13,7 +13,7 @@ struct blk_mq_cpu_notifier {
};
struct blk_mq_ctxmap {
- unsigned int map_size;
+ unsigned int size;
unsigned int bits_per_word;
struct blk_align_bitmap *map;
};
@@ -96,6 +96,7 @@ typedef void (exit_request_fn)(void *, struct request *, unsigned int,
typedef void (busy_iter_fn)(struct blk_mq_hw_ctx *, struct request *, void *,
bool);
+typedef void (busy_tag_iter_fn)(struct request *, void *, bool);
struct blk_mq_ops {
/*
@@ -164,6 +165,8 @@ enum {
<< BLK_MQ_F_ALLOC_POLICY_START_BIT)
struct request_queue *blk_mq_init_queue(struct blk_mq_tag_set *);
+struct request_queue *blk_mq_init_allocated_queue(struct blk_mq_tag_set *set,
+ struct request_queue *q);
void blk_mq_finish_init(struct request_queue *q);
int blk_mq_register_disk(struct gendisk *);
void blk_mq_unregister_disk(struct gendisk *);
@@ -180,6 +183,7 @@ bool blk_mq_can_queue(struct blk_mq_hw_ctx *);
struct request *blk_mq_alloc_request(struct request_queue *q, int rw,
gfp_t gfp, bool reserved);
struct request *blk_mq_tag_to_rq(struct blk_mq_tags *tags, unsigned int tag);
+struct cpumask *blk_mq_tags_cpumask(struct blk_mq_tags *tags);
enum {
BLK_MQ_UNIQUE_TAG_BITS = 16,
@@ -218,16 +222,19 @@ void blk_mq_start_hw_queue(struct blk_mq_hw_ctx *hctx);
void blk_mq_stop_hw_queues(struct request_queue *q);
void blk_mq_start_hw_queues(struct request_queue *q);
void blk_mq_start_stopped_hw_queues(struct request_queue *q, bool async);
+void blk_mq_run_hw_queues(struct request_queue *q, bool async);
void blk_mq_delay_queue(struct blk_mq_hw_ctx *hctx, unsigned long msecs);
void blk_mq_tag_busy_iter(struct blk_mq_hw_ctx *hctx, busy_iter_fn *fn,
void *priv);
+void blk_mq_all_tag_busy_iter(struct blk_mq_tags *tags, busy_tag_iter_fn *fn,
+ void *priv);
void blk_mq_freeze_queue(struct request_queue *q);
void blk_mq_unfreeze_queue(struct request_queue *q);
void blk_mq_freeze_queue_start(struct request_queue *q);
/*
* Driver command data is immediately after the request. So subtract request
- * size to get back to the original request.
+ * size to get back to the original request, add request size to get the PDU.
*/
static inline struct request *blk_mq_rq_from_pdu(void *pdu)
{
@@ -235,7 +242,7 @@ static inline struct request *blk_mq_rq_from_pdu(void *pdu)
}
static inline void *blk_mq_rq_to_pdu(struct request *rq)
{
- return (void *) rq + sizeof(*rq);
+ return rq + 1;
}
#define queue_for_each_hw_ctx(q, hctx, i) \
diff --git a/include/linux/blk_types.h b/include/linux/blk_types.h
index a1b25e35ea5f..6ab9d12d1f17 100644
--- a/include/linux/blk_types.h
+++ b/include/linux/blk_types.h
@@ -65,7 +65,7 @@ struct bio {
unsigned int bi_seg_front_size;
unsigned int bi_seg_back_size;
- atomic_t bi_remaining;
+ atomic_t __bi_remaining;
bio_end_io_t *bi_end_io;
@@ -92,7 +92,7 @@ struct bio {
unsigned short bi_max_vecs; /* max bvl_vecs we can hold */
- atomic_t bi_cnt; /* pin count */
+ atomic_t __bi_cnt; /* pin count */
struct bio_vec *bi_io_vec; /* the actual vec list */
@@ -112,16 +112,15 @@ struct bio {
* bio flags
*/
#define BIO_UPTODATE 0 /* ok after I/O completion */
-#define BIO_RW_BLOCK 1 /* RW_AHEAD set, and read/write would block */
-#define BIO_EOF 2 /* out-out-bounds error */
-#define BIO_SEG_VALID 3 /* bi_phys_segments valid */
-#define BIO_CLONED 4 /* doesn't own data */
-#define BIO_BOUNCED 5 /* bio is a bounce bio */
-#define BIO_USER_MAPPED 6 /* contains user pages */
-#define BIO_EOPNOTSUPP 7 /* not supported */
-#define BIO_NULL_MAPPED 8 /* contains invalid user pages */
-#define BIO_QUIET 9 /* Make BIO Quiet */
-#define BIO_SNAP_STABLE 10 /* bio data must be snapshotted during write */
+#define BIO_SEG_VALID 1 /* bi_phys_segments valid */
+#define BIO_CLONED 2 /* doesn't own data */
+#define BIO_BOUNCED 3 /* bio is a bounce bio */
+#define BIO_USER_MAPPED 4 /* contains user pages */
+#define BIO_NULL_MAPPED 5 /* contains invalid user pages */
+#define BIO_QUIET 6 /* Make BIO Quiet */
+#define BIO_SNAP_STABLE 7 /* bio data must be snapshotted during write */
+#define BIO_CHAIN 8 /* chained bio, ->bi_remaining in effect */
+#define BIO_REFFED 9 /* bio has elevated ->bi_cnt */
/*
* Flags starting here get preserved by bio_reset() - this includes
@@ -193,6 +192,7 @@ enum rq_flag_bits {
__REQ_HASHED, /* on IO scheduler merge hash */
__REQ_MQ_INFLIGHT, /* track inflight for MQ */
__REQ_NO_TIMEOUT, /* requests may never expire */
+ __REQ_CLONE, /* cloned bios */
__REQ_NR_BITS, /* stops here */
};
@@ -220,7 +220,7 @@ enum rq_flag_bits {
/* This mask is used for both bio and request merge checking */
#define REQ_NOMERGE_FLAGS \
- (REQ_NOMERGE | REQ_STARTED | REQ_SOFTBARRIER | REQ_FLUSH | REQ_FUA)
+ (REQ_NOMERGE | REQ_STARTED | REQ_SOFTBARRIER | REQ_FLUSH | REQ_FUA | REQ_FLUSH_SEQ)
#define REQ_RAHEAD (1ULL << __REQ_RAHEAD)
#define REQ_THROTTLED (1ULL << __REQ_THROTTLED)
@@ -247,5 +247,6 @@ enum rq_flag_bits {
#define REQ_HASHED (1ULL << __REQ_HASHED)
#define REQ_MQ_INFLIGHT (1ULL << __REQ_MQ_INFLIGHT)
#define REQ_NO_TIMEOUT (1ULL << __REQ_NO_TIMEOUT)
+#define REQ_CLONE (1ULL << __REQ_CLONE)
#endif /* __LINUX_BLK_TYPES_H */
diff --git a/include/linux/blkdev.h b/include/linux/blkdev.h
index 7f9a516f24de..7f2f54b4587f 100644
--- a/include/linux/blkdev.h
+++ b/include/linux/blkdev.h
@@ -12,7 +12,7 @@
#include <linux/timer.h>
#include <linux/workqueue.h>
#include <linux/pagemap.h>
-#include <linux/backing-dev.h>
+#include <linux/backing-dev-defs.h>
#include <linux/wait.h>
#include <linux/mempool.h>
#include <linux/bio.h>
@@ -22,15 +22,13 @@
#include <linux/smp.h>
#include <linux/rcupdate.h>
#include <linux/percpu-refcount.h>
-
-#include <asm/scatterlist.h>
+#include <linux/scatterlist.h>
struct module;
struct scsi_ioctl_command;
struct request_queue;
struct elevator_queue;
-struct request_pm_state;
struct blk_trace;
struct request;
struct sg_io_hdr;
@@ -75,18 +73,7 @@ struct request_list {
enum rq_cmd_type_bits {
REQ_TYPE_FS = 1, /* fs request */
REQ_TYPE_BLOCK_PC, /* scsi command */
- REQ_TYPE_SENSE, /* sense request */
- REQ_TYPE_PM_SUSPEND, /* suspend request */
- REQ_TYPE_PM_RESUME, /* resume request */
- REQ_TYPE_PM_SHUTDOWN, /* shutdown request */
- REQ_TYPE_SPECIAL, /* driver defined type */
- /*
- * for ATA/ATAPI devices. this really doesn't belong here, ide should
- * use REQ_TYPE_SPECIAL and use rq->cmd[0] with the range of driver
- * private REQ_LB opcodes to differentiate what type of request this is
- */
- REQ_TYPE_ATA_TASKFILE,
- REQ_TYPE_ATA_PC,
+ REQ_TYPE_DRV_PRIV, /* driver defined types from here */
};
#define BLK_MAX_CDB 16
@@ -108,7 +95,7 @@ struct request {
struct blk_mq_ctx *mq_ctx;
u64 cmd_flags;
- enum rq_cmd_type_bits cmd_type;
+ unsigned cmd_type;
unsigned long atomic_flags;
int cpu;
@@ -216,19 +203,6 @@ static inline unsigned short req_get_ioprio(struct request *req)
return req->ioprio;
}
-/*
- * State information carried for REQ_TYPE_PM_SUSPEND and REQ_TYPE_PM_RESUME
- * requests. Some step values could eventually be made generic.
- */
-struct request_pm_state
-{
- /* PM state machine step value, currently driver specific */
- int pm_step;
- /* requested PM state value (S1, S2, S3, S4, ...) */
- u32 pm_state;
- void* data; /* for driver use */
-};
-
#include <linux/elevator.h>
struct blk_queue_ctx;
@@ -469,7 +443,7 @@ struct request_queue {
struct mutex sysfs_lock;
int bypass_depth;
- int mq_freeze_depth;
+ atomic_t mq_freeze_depth;
#if defined(CONFIG_BLK_DEV_BSG)
bsg_job_fn *bsg_job_fn;
@@ -610,10 +584,6 @@ static inline void queue_flag_clear(unsigned int flag, struct request_queue *q)
(((rq)->cmd_flags & REQ_STARTED) && \
((rq)->cmd_type == REQ_TYPE_FS))
-#define blk_pm_request(rq) \
- ((rq)->cmd_type == REQ_TYPE_PM_SUSPEND || \
- (rq)->cmd_type == REQ_TYPE_PM_RESUME)
-
#define blk_rq_cpu_valid(rq) ((rq)->cpu != -1)
#define blk_bidi_rq(rq) ((rq)->next_rq != NULL)
/* rq->queuelist of dequeued request must be list_empty() */
@@ -804,11 +774,7 @@ extern void blk_add_request_payload(struct request *rq, struct page *page,
unsigned int len);
extern int blk_rq_check_limits(struct request_queue *q, struct request *rq);
extern int blk_lld_busy(struct request_queue *q);
-extern int blk_rq_prep_clone(struct request *rq, struct request *rq_src,
- struct bio_set *bs, gfp_t gfp_mask,
- int (*bio_ctr)(struct bio *, struct bio *, void *),
- void *data);
-extern void blk_rq_unprep_clone(struct request *rq);
+extern void blk_rq_prep_clone(struct request *rq, struct request *rq_src);
extern int blk_insert_cloned_request(struct request_queue *q,
struct request *rq);
extern void blk_delay_queue(struct request_queue *, unsigned long);
@@ -821,32 +787,12 @@ extern int scsi_cmd_ioctl(struct request_queue *, struct gendisk *, fmode_t,
extern int sg_scsi_ioctl(struct request_queue *, struct gendisk *, fmode_t,
struct scsi_ioctl_command __user *);
-extern void blk_queue_bio(struct request_queue *q, struct bio *bio);
-
-/*
- * A queue has just exitted congestion. Note this in the global counter of
- * congested queues, and wake up anyone who was waiting for requests to be
- * put back.
- */
-static inline void blk_clear_queue_congested(struct request_queue *q, int sync)
-{
- clear_bdi_congested(&q->backing_dev_info, sync);
-}
-
-/*
- * A queue has just entered congestion. Flag that in the queue's VM-visible
- * state flags and increment the global gounter of congested queues.
- */
-static inline void blk_set_queue_congested(struct request_queue *q, int sync)
-{
- set_bdi_congested(&q->backing_dev_info, sync);
-}
-
extern void blk_start_queue(struct request_queue *q);
extern void blk_stop_queue(struct request_queue *q);
extern void blk_sync_queue(struct request_queue *q);
extern void __blk_stop_queue(struct request_queue *q);
extern void __blk_run_queue(struct request_queue *q);
+extern void __blk_run_queue_uncond(struct request_queue *q);
extern void blk_run_queue(struct request_queue *);
extern void blk_run_queue_async(struct request_queue *q);
extern int blk_rq_map_user(struct request_queue *, struct request *,
@@ -935,7 +881,7 @@ static inline unsigned int blk_rq_get_max_sectors(struct request *rq)
if (unlikely(rq->cmd_type == REQ_TYPE_BLOCK_PC))
return q->limits.max_hw_sectors;
- if (!q->limits.chunk_sectors)
+ if (!q->limits.chunk_sectors || (rq->cmd_flags & REQ_DISCARD))
return blk_queue_get_max_sectors(q, rq->cmd_flags);
return min(blk_max_size_offset(q, blk_rq_pos(rq)),
@@ -1056,6 +1002,7 @@ bool __must_check blk_get_queue(struct request_queue *);
struct request_queue *blk_alloc_queue(gfp_t);
struct request_queue *blk_alloc_queue_node(gfp_t, int);
extern void blk_put_queue(struct request_queue *);
+extern void blk_set_queue_dying(struct request_queue *);
/*
* block layer runtime pm functions
diff --git a/include/linux/bootmem.h b/include/linux/bootmem.h
index 0995c2de8162..f589222bfa87 100644
--- a/include/linux/bootmem.h
+++ b/include/linux/bootmem.h
@@ -357,12 +357,12 @@ extern void *alloc_large_system_hash(const char *tablename,
/* Only NUMA needs hash distribution. 64bit NUMA architectures have
* sufficient vmalloc space.
*/
-#if defined(CONFIG_NUMA) && defined(CONFIG_64BIT)
-#define HASHDIST_DEFAULT 1
+#ifdef CONFIG_NUMA
+#define HASHDIST_DEFAULT IS_ENABLED(CONFIG_64BIT)
+extern int hashdist; /* Distribute hashes across NUMA nodes? */
#else
-#define HASHDIST_DEFAULT 0
+#define hashdist (0)
#endif
-extern int hashdist; /* Distribute hashes across NUMA nodes? */
#endif /* _LINUX_BOOTMEM_H */
diff --git a/include/linux/bottom_half.h b/include/linux/bottom_half.h
index 86c12c93e3cf..8fdcb783197d 100644
--- a/include/linux/bottom_half.h
+++ b/include/linux/bottom_half.h
@@ -2,7 +2,6 @@
#define _LINUX_BH_H
#include <linux/preempt.h>
-#include <linux/preempt_mask.h>
#ifdef CONFIG_TRACE_IRQFLAGS
extern void __local_bh_disable_ip(unsigned long ip, unsigned int cnt);
diff --git a/include/linux/bpf.h b/include/linux/bpf.h
index bbfceb756452..4383476a0d48 100644
--- a/include/linux/bpf.h
+++ b/include/linux/bpf.h
@@ -32,23 +32,19 @@ struct bpf_map {
u32 key_size;
u32 value_size;
u32 max_entries;
- struct bpf_map_ops *ops;
+ const struct bpf_map_ops *ops;
struct work_struct work;
};
struct bpf_map_type_list {
struct list_head list_node;
- struct bpf_map_ops *ops;
+ const struct bpf_map_ops *ops;
enum bpf_map_type type;
};
-void bpf_register_map_type(struct bpf_map_type_list *tl);
-void bpf_map_put(struct bpf_map *map);
-struct bpf_map *bpf_map_get(struct fd f);
-
/* function argument constraints */
enum bpf_arg_type {
- ARG_ANYTHING = 0, /* any argument is ok */
+ ARG_DONTCARE = 0, /* unused argument in helper function */
/* the following constraints used to prototype
* bpf_map_lookup/update/delete_elem() functions
@@ -62,6 +58,9 @@ enum bpf_arg_type {
*/
ARG_PTR_TO_STACK, /* any pointer to eBPF program stack */
ARG_CONST_STACK_SIZE, /* number of bytes accessed from stack */
+
+ ARG_PTR_TO_CTX, /* pointer to context */
+ ARG_ANYTHING, /* any (initialized) argument is ok */
};
/* type of values returned from helper functions */
@@ -105,41 +104,93 @@ struct bpf_verifier_ops {
* with 'type' (read or write) is allowed
*/
bool (*is_valid_access)(int off, int size, enum bpf_access_type type);
+
+ u32 (*convert_ctx_access)(enum bpf_access_type type, int dst_reg,
+ int src_reg, int ctx_off,
+ struct bpf_insn *insn);
};
struct bpf_prog_type_list {
struct list_head list_node;
- struct bpf_verifier_ops *ops;
+ const struct bpf_verifier_ops *ops;
enum bpf_prog_type type;
};
-void bpf_register_prog_type(struct bpf_prog_type_list *tl);
-
struct bpf_prog;
struct bpf_prog_aux {
atomic_t refcnt;
- bool is_gpl_compatible;
- enum bpf_prog_type prog_type;
- struct bpf_verifier_ops *ops;
- struct bpf_map **used_maps;
u32 used_map_cnt;
+ const struct bpf_verifier_ops *ops;
+ struct bpf_map **used_maps;
struct bpf_prog *prog;
- struct work_struct work;
+ union {
+ struct work_struct work;
+ struct rcu_head rcu;
+ };
+};
+
+struct bpf_array {
+ struct bpf_map map;
+ u32 elem_size;
+ /* 'ownership' of prog_array is claimed by the first program that
+ * is going to use this map or by the first program which FD is stored
+ * in the map to make sure that all callers and callees have the same
+ * prog_type and JITed flag
+ */
+ enum bpf_prog_type owner_prog_type;
+ bool owner_jited;
+ union {
+ char value[0] __aligned(8);
+ struct bpf_prog *prog[0] __aligned(8);
+ };
};
+#define MAX_TAIL_CALL_CNT 32
+
+u64 bpf_tail_call(u64 ctx, u64 r2, u64 index, u64 r4, u64 r5);
+void bpf_prog_array_map_clear(struct bpf_map *map);
+bool bpf_prog_array_compatible(struct bpf_array *array, const struct bpf_prog *fp);
+const struct bpf_func_proto *bpf_get_trace_printk_proto(void);
#ifdef CONFIG_BPF_SYSCALL
-void bpf_prog_put(struct bpf_prog *prog);
-#else
-static inline void bpf_prog_put(struct bpf_prog *prog) {}
-#endif
+void bpf_register_prog_type(struct bpf_prog_type_list *tl);
+void bpf_register_map_type(struct bpf_map_type_list *tl);
+
struct bpf_prog *bpf_prog_get(u32 ufd);
+void bpf_prog_put(struct bpf_prog *prog);
+void bpf_prog_put_rcu(struct bpf_prog *prog);
+
+struct bpf_map *bpf_map_get(struct fd f);
+void bpf_map_put(struct bpf_map *map);
+
/* verify correctness of eBPF program */
-int bpf_check(struct bpf_prog *fp, union bpf_attr *attr);
+int bpf_check(struct bpf_prog **fp, union bpf_attr *attr);
+#else
+static inline void bpf_register_prog_type(struct bpf_prog_type_list *tl)
+{
+}
+
+static inline struct bpf_prog *bpf_prog_get(u32 ufd)
+{
+ return ERR_PTR(-EOPNOTSUPP);
+}
+
+static inline void bpf_prog_put(struct bpf_prog *prog)
+{
+}
+#endif /* CONFIG_BPF_SYSCALL */
/* verifier prototypes for helper functions called from eBPF programs */
-extern struct bpf_func_proto bpf_map_lookup_elem_proto;
-extern struct bpf_func_proto bpf_map_update_elem_proto;
-extern struct bpf_func_proto bpf_map_delete_elem_proto;
+extern const struct bpf_func_proto bpf_map_lookup_elem_proto;
+extern const struct bpf_func_proto bpf_map_update_elem_proto;
+extern const struct bpf_func_proto bpf_map_delete_elem_proto;
+
+extern const struct bpf_func_proto bpf_get_prandom_u32_proto;
+extern const struct bpf_func_proto bpf_get_smp_processor_id_proto;
+extern const struct bpf_func_proto bpf_tail_call_proto;
+extern const struct bpf_func_proto bpf_ktime_get_ns_proto;
+extern const struct bpf_func_proto bpf_get_current_pid_tgid_proto;
+extern const struct bpf_func_proto bpf_get_current_uid_gid_proto;
+extern const struct bpf_func_proto bpf_get_current_comm_proto;
#endif /* _LINUX_BPF_H */
diff --git a/include/linux/brcmphy.h b/include/linux/brcmphy.h
index 7ccd928cc1f2..697ca7795bd9 100644
--- a/include/linux/brcmphy.h
+++ b/include/linux/brcmphy.h
@@ -1,6 +1,13 @@
#ifndef _LINUX_BRCMPHY_H
#define _LINUX_BRCMPHY_H
+#include <linux/phy.h>
+
+/* All Broadcom Ethernet switches have a pseudo-PHY at address 30 which is used
+ * to configure the switch internal registers via MDIO accesses.
+ */
+#define BRCM_PSEUDO_PHY_ADDR 30
+
#define PHY_ID_BCM50610 0x0143bd60
#define PHY_ID_BCM50610M 0x0143bd70
#define PHY_ID_BCM5241 0x0143bc30
@@ -11,14 +18,16 @@
#define PHY_ID_BCM5421 0x002060e0
#define PHY_ID_BCM5464 0x002060b0
#define PHY_ID_BCM5461 0x002060c0
+#define PHY_ID_BCM54616S 0x03625d10
#define PHY_ID_BCM57780 0x03625d90
#define PHY_ID_BCM7250 0xae025280
#define PHY_ID_BCM7364 0xae025260
#define PHY_ID_BCM7366 0x600d8490
-#define PHY_ID_BCM7425 0x03625e60
+#define PHY_ID_BCM7425 0x600d86b0
#define PHY_ID_BCM7429 0x600d8730
#define PHY_ID_BCM7439 0x600d8480
+#define PHY_ID_BCM7439_2 0xae025080
#define PHY_ID_BCM7445 0x600d8510
#define PHY_BCM_OUI_MASK 0xfffffc00
diff --git a/include/linux/can/dev.h b/include/linux/can/dev.h
index c05ff0f9f9a5..c3a9c8fc60fa 100644
--- a/include/linux/can/dev.h
+++ b/include/linux/can/dev.h
@@ -61,6 +61,8 @@ struct can_priv {
char tx_led_trig_name[CAN_LED_NAME_SZ];
struct led_trigger *rx_led_trig;
char rx_led_trig_name[CAN_LED_NAME_SZ];
+ struct led_trigger *rxtx_led_trig;
+ char rxtx_led_trig_name[CAN_LED_NAME_SZ];
#endif
};
diff --git a/include/linux/can/led.h b/include/linux/can/led.h
index e0475c5cbb92..146de4506d21 100644
--- a/include/linux/can/led.h
+++ b/include/linux/can/led.h
@@ -21,8 +21,10 @@ enum can_led_event {
#ifdef CONFIG_CAN_LEDS
-/* keep space for interface name + "-tx"/"-rx" suffix and null terminator */
-#define CAN_LED_NAME_SZ (IFNAMSIZ + 4)
+/* keep space for interface name + "-tx"/"-rx"/"-rxtx"
+ * suffix and null terminator
+ */
+#define CAN_LED_NAME_SZ (IFNAMSIZ + 6)
void can_led_event(struct net_device *netdev, enum can_led_event event);
void devm_can_led_init(struct net_device *netdev);
diff --git a/include/linux/can/skb.h b/include/linux/can/skb.h
index cc00d15c6107..b6a52a4b457a 100644
--- a/include/linux/can/skb.h
+++ b/include/linux/can/skb.h
@@ -44,16 +44,11 @@ static inline void can_skb_reserve(struct sk_buff *skb)
skb_reserve(skb, sizeof(struct can_skb_priv));
}
-static inline void can_skb_destructor(struct sk_buff *skb)
-{
- sock_put(skb->sk);
-}
-
static inline void can_skb_set_owner(struct sk_buff *skb, struct sock *sk)
{
if (sk) {
sock_hold(sk);
- skb->destructor = can_skb_destructor;
+ skb->destructor = sock_efree;
skb->sk = sk;
}
}
diff --git a/include/linux/capability.h b/include/linux/capability.h
index aa93e5ef594c..af9f0b9e80e6 100644
--- a/include/linux/capability.h
+++ b/include/linux/capability.h
@@ -205,6 +205,7 @@ static inline kernel_cap_t cap_raise_nfsd_set(const kernel_cap_t a,
cap_intersect(permitted, __cap_nfsd_set));
}
+#ifdef CONFIG_MULTIUSER
extern bool has_capability(struct task_struct *t, int cap);
extern bool has_ns_capability(struct task_struct *t,
struct user_namespace *ns, int cap);
@@ -213,6 +214,34 @@ extern bool has_ns_capability_noaudit(struct task_struct *t,
struct user_namespace *ns, int cap);
extern bool capable(int cap);
extern bool ns_capable(struct user_namespace *ns, int cap);
+#else
+static inline bool has_capability(struct task_struct *t, int cap)
+{
+ return true;
+}
+static inline bool has_ns_capability(struct task_struct *t,
+ struct user_namespace *ns, int cap)
+{
+ return true;
+}
+static inline bool has_capability_noaudit(struct task_struct *t, int cap)
+{
+ return true;
+}
+static inline bool has_ns_capability_noaudit(struct task_struct *t,
+ struct user_namespace *ns, int cap)
+{
+ return true;
+}
+static inline bool capable(int cap)
+{
+ return true;
+}
+static inline bool ns_capable(struct user_namespace *ns, int cap)
+{
+ return true;
+}
+#endif /* CONFIG_MULTIUSER */
extern bool capable_wrt_inode_uidgid(const struct inode *inode, int cap);
extern bool file_ns_capable(const struct file *file, struct user_namespace *ns, int cap);
diff --git a/include/linux/ceph/ceph_features.h b/include/linux/ceph/ceph_features.h
index 71e05bbf8ceb..4763ad64e832 100644
--- a/include/linux/ceph/ceph_features.h
+++ b/include/linux/ceph/ceph_features.h
@@ -50,6 +50,19 @@
#define CEPH_FEATURE_MDS_INLINE_DATA (1ULL<<40)
#define CEPH_FEATURE_CRUSH_TUNABLES3 (1ULL<<41)
#define CEPH_FEATURE_OSD_PRIMARY_AFFINITY (1ULL<<41) /* overlap w/ tunables3 */
+#define CEPH_FEATURE_MSGR_KEEPALIVE2 (1ULL<<42)
+#define CEPH_FEATURE_OSD_POOLRESEND (1ULL<<43)
+#define CEPH_FEATURE_ERASURE_CODE_PLUGINS_V2 (1ULL<<44)
+#define CEPH_FEATURE_OSD_SET_ALLOC_HINT (1ULL<<45)
+#define CEPH_FEATURE_OSD_FADVISE_FLAGS (1ULL<<46)
+#define CEPH_FEATURE_OSD_REPOP (1ULL<<46) /* overlap with fadvise */
+#define CEPH_FEATURE_OSD_OBJECT_DIGEST (1ULL<<46) /* overlap with fadvise */
+#define CEPH_FEATURE_OSD_TRANSACTION_MAY_LAYOUT (1ULL<<46) /* overlap w/ fadvise */
+#define CEPH_FEATURE_MDS_QUOTA (1ULL<<47)
+#define CEPH_FEATURE_CRUSH_V4 (1ULL<<48) /* straw2 buckets */
+#define CEPH_FEATURE_OSD_MIN_SIZE_RECOVERY (1ULL<<49)
+// duplicated since it was introduced at the same time as MIN_SIZE_RECOVERY
+#define CEPH_FEATURE_OSD_PROXY_FEATURES (1ULL<<49) /* overlap w/ above */
/*
* The introduction of CEPH_FEATURE_OSD_SNAPMAPPER caused the feature
@@ -93,7 +106,8 @@ static inline u64 ceph_sanitize_features(u64 features)
CEPH_FEATURE_EXPORT_PEER | \
CEPH_FEATURE_OSDMAP_ENC | \
CEPH_FEATURE_CRUSH_TUNABLES3 | \
- CEPH_FEATURE_OSD_PRIMARY_AFFINITY)
+ CEPH_FEATURE_OSD_PRIMARY_AFFINITY | \
+ CEPH_FEATURE_CRUSH_V4)
#define CEPH_FEATURES_REQUIRED_DEFAULT \
(CEPH_FEATURE_NOSRCADDR | \
diff --git a/include/linux/ceph/ceph_fs.h b/include/linux/ceph/ceph_fs.h
index 31eb03d0c766..d7d072a25c27 100644
--- a/include/linux/ceph/ceph_fs.h
+++ b/include/linux/ceph/ceph_fs.h
@@ -323,6 +323,7 @@ enum {
CEPH_MDS_OP_MKSNAP = 0x01400,
CEPH_MDS_OP_RMSNAP = 0x01401,
CEPH_MDS_OP_LSSNAP = 0x00402,
+ CEPH_MDS_OP_RENAMESNAP = 0x01403,
};
extern const char *ceph_mds_op_name(int op);
diff --git a/include/linux/ceph/debugfs.h b/include/linux/ceph/debugfs.h
index 1df086d7882d..29cf897cc5cd 100644
--- a/include/linux/ceph/debugfs.h
+++ b/include/linux/ceph/debugfs.h
@@ -7,13 +7,7 @@
#define CEPH_DEFINE_SHOW_FUNC(name) \
static int name##_open(struct inode *inode, struct file *file) \
{ \
- struct seq_file *sf; \
- int ret; \
- \
- ret = single_open(file, name, NULL); \
- sf = file->private_data; \
- sf->private = inode->i_private; \
- return ret; \
+ return single_open(file, name, inode->i_private); \
} \
\
static const struct file_operations name##_fops = { \
diff --git a/include/linux/ceph/libceph.h b/include/linux/ceph/libceph.h
index 16fff9608848..30f92cefaa72 100644
--- a/include/linux/ceph/libceph.h
+++ b/include/linux/ceph/libceph.h
@@ -135,6 +135,7 @@ struct ceph_client {
struct dentry *debugfs_dir;
struct dentry *debugfs_monmap;
struct dentry *debugfs_osdmap;
+ struct dentry *debugfs_options;
#endif
};
@@ -191,6 +192,7 @@ extern struct ceph_options *ceph_parse_options(char *options,
const char *dev_name, const char *dev_name_end,
int (*parse_extra_token)(char *c, void *private),
void *private);
+int ceph_print_client_options(struct seq_file *m, struct ceph_client *client);
extern void ceph_destroy_options(struct ceph_options *opt);
extern int ceph_compare_options(struct ceph_options *new_opt,
struct ceph_client *client);
diff --git a/include/linux/ceph/osdmap.h b/include/linux/ceph/osdmap.h
index 561ea896c657..e55c08bc3a96 100644
--- a/include/linux/ceph/osdmap.h
+++ b/include/linux/ceph/osdmap.h
@@ -175,13 +175,12 @@ static inline int ceph_decode_pgid(void **p, void *end, struct ceph_pg *pgid)
__u8 version;
if (!ceph_has_room(p, end, 1 + 8 + 4 + 4)) {
- pr_warning("incomplete pg encoding");
-
+ pr_warn("incomplete pg encoding\n");
return -EINVAL;
}
version = ceph_decode_8(p);
if (version > 1) {
- pr_warning("do not understand pg encoding %d > 1",
+ pr_warn("do not understand pg encoding %d > 1\n",
(int)version);
return -EINVAL;
}
diff --git a/include/linux/cgroup.h b/include/linux/cgroup.h
index b9cb94c3102a..e7da0aa65b2d 100644
--- a/include/linux/cgroup.h
+++ b/include/linux/cgroup.h
@@ -774,6 +774,31 @@ static inline struct cgroup_subsys_state *task_css(struct task_struct *task,
}
/**
+ * task_get_css - find and get the css for (task, subsys)
+ * @task: the target task
+ * @subsys_id: the target subsystem ID
+ *
+ * Find the css for the (@task, @subsys_id) combination, increment a
+ * reference on and return it. This function is guaranteed to return a
+ * valid css.
+ */
+static inline struct cgroup_subsys_state *
+task_get_css(struct task_struct *task, int subsys_id)
+{
+ struct cgroup_subsys_state *css;
+
+ rcu_read_lock();
+ while (true) {
+ css = task_css(task, subsys_id);
+ if (likely(css_tryget_online(css)))
+ break;
+ cpu_relax();
+ }
+ rcu_read_unlock();
+ return css;
+}
+
+/**
* task_css_is_root - test whether a task belongs to the root css
* @task: the target task
* @subsys_id: the target subsystem ID
diff --git a/include/linux/cleancache.h b/include/linux/cleancache.h
index 4ce9056b31a8..bda5ec0b4b4d 100644
--- a/include/linux/cleancache.h
+++ b/include/linux/cleancache.h
@@ -5,6 +5,10 @@
#include <linux/exportfs.h>
#include <linux/mm.h>
+#define CLEANCACHE_NO_POOL -1
+#define CLEANCACHE_NO_BACKEND -2
+#define CLEANCACHE_NO_BACKEND_SHARED -3
+
#define CLEANCACHE_KEY_MAX 6
/*
@@ -33,10 +37,9 @@ struct cleancache_ops {
void (*invalidate_fs)(int);
};
-extern struct cleancache_ops *
- cleancache_register_ops(struct cleancache_ops *ops);
+extern int cleancache_register_ops(struct cleancache_ops *ops);
extern void __cleancache_init_fs(struct super_block *);
-extern void __cleancache_init_shared_fs(char *, struct super_block *);
+extern void __cleancache_init_shared_fs(struct super_block *);
extern int __cleancache_get_page(struct page *);
extern void __cleancache_put_page(struct page *);
extern void __cleancache_invalidate_page(struct address_space *, struct page *);
@@ -78,10 +81,10 @@ static inline void cleancache_init_fs(struct super_block *sb)
__cleancache_init_fs(sb);
}
-static inline void cleancache_init_shared_fs(char *uuid, struct super_block *sb)
+static inline void cleancache_init_shared_fs(struct super_block *sb)
{
if (cleancache_enabled)
- __cleancache_init_shared_fs(uuid, sb);
+ __cleancache_init_shared_fs(sb);
}
static inline int cleancache_get_page(struct page *page)
diff --git a/include/linux/clk-provider.h b/include/linux/clk-provider.h
index 5591ea71a8d1..df695313f975 100644
--- a/include/linux/clk-provider.h
+++ b/include/linux/clk-provider.h
@@ -541,7 +541,7 @@ struct clk_gpio {
extern const struct clk_ops clk_gpio_gate_ops;
struct clk *clk_register_gpio_gate(struct device *dev, const char *name,
- const char *parent_name, struct gpio_desc *gpio,
+ const char *parent_name, unsigned gpio, bool active_low,
unsigned long flags);
void of_gpio_clk_gate_setup(struct device_node *node);
diff --git a/include/linux/clk.h b/include/linux/clk.h
index 68c16a6bedb3..0df4a51e1a78 100644
--- a/include/linux/clk.h
+++ b/include/linux/clk.h
@@ -306,6 +306,20 @@ void devm_clk_put(struct device *dev, struct clk *clk);
* @clk: clock source
* @rate: desired clock rate in Hz
*
+ * This answers the question "if I were to pass @rate to clk_set_rate(),
+ * what clock rate would I end up with?" without changing the hardware
+ * in any way. In other words:
+ *
+ * rate = clk_round_rate(clk, r);
+ *
+ * and:
+ *
+ * clk_set_rate(clk, r);
+ * rate = clk_get_rate(clk);
+ *
+ * are equivalent except the former does not modify the clock hardware
+ * in any way.
+ *
* Returns rounded clock rate in Hz, or negative errno.
*/
long clk_round_rate(struct clk *clk, unsigned long rate);
@@ -471,19 +485,6 @@ static inline void clk_disable_unprepare(struct clk *clk)
clk_unprepare(clk);
}
-/**
- * clk_add_alias - add a new clock alias
- * @alias: name for clock alias
- * @alias_dev_name: device name
- * @id: platform specific clock name
- * @dev: device
- *
- * Allows using generic clock names for drivers by adding a new alias.
- * Assumes clkdev, see clkdev.h for more info.
- */
-int clk_add_alias(const char *alias, const char *alias_dev_name, char *id,
- struct device *dev);
-
struct device_node;
struct of_phandle_args;
diff --git a/include/linux/clk/at91_pmc.h b/include/linux/clk/at91_pmc.h
index c8e3b3d1eded..7669f7618f39 100644
--- a/include/linux/clk/at91_pmc.h
+++ b/include/linux/clk/at91_pmc.h
@@ -20,10 +20,10 @@
extern void __iomem *at91_pmc_base;
#define at91_pmc_read(field) \
- __raw_readl(at91_pmc_base + field)
+ readl_relaxed(at91_pmc_base + field)
#define at91_pmc_write(field, value) \
- __raw_writel(value, at91_pmc_base + field)
+ writel_relaxed(value, at91_pmc_base + field)
#else
.extern at91_pmc_base
#endif
diff --git a/include/linux/clk/shmobile.h b/include/linux/clk/shmobile.h
index 9f8a14041dd5..63a8159c4e64 100644
--- a/include/linux/clk/shmobile.h
+++ b/include/linux/clk/shmobile.h
@@ -16,6 +16,7 @@
#include <linux/types.h>
+void r8a7778_clocks_init(u32 mode);
void r8a7779_clocks_init(u32 mode);
void rcar_gen2_clocks_init(u32 mode);
diff --git a/include/linux/clk/ti.h b/include/linux/clk/ti.h
index 67844003493d..79b76e13d904 100644
--- a/include/linux/clk/ti.h
+++ b/include/linux/clk/ti.h
@@ -215,14 +215,14 @@ struct ti_dt_clk {
.node_name = name, \
}
-/* Maximum number of clock memmaps */
-#define CLK_MAX_MEMMAPS 4
-
/* Static memmap indices */
enum {
TI_CLKM_CM = 0,
+ TI_CLKM_CM2,
TI_CLKM_PRM,
TI_CLKM_SCRM,
+ TI_CLKM_CTRL,
+ CLK_MAX_MEMMAPS
};
typedef void (*ti_of_clk_init_cb_t)(struct clk_hw *, struct device_node *);
diff --git a/include/linux/clkdev.h b/include/linux/clkdev.h
index 94bad77eeb4a..a240b18e86fa 100644
--- a/include/linux/clkdev.h
+++ b/include/linux/clkdev.h
@@ -22,6 +22,7 @@ struct clk_lookup {
const char *dev_id;
const char *con_id;
struct clk *clk;
+ struct clk_hw *clk_hw;
};
#define CLKDEV_INIT(d, n, c) \
@@ -37,8 +38,11 @@ struct clk_lookup *clkdev_alloc(struct clk *clk, const char *con_id,
void clkdev_add(struct clk_lookup *cl);
void clkdev_drop(struct clk_lookup *cl);
+struct clk_lookup *clkdev_create(struct clk *clk, const char *con_id,
+ const char *dev_fmt, ...);
+
void clkdev_add_table(struct clk_lookup *, size_t);
-int clk_add_alias(const char *, const char *, char *, struct device *);
+int clk_add_alias(const char *, const char *, const char *, struct device *);
int clk_register_clkdev(struct clk *, const char *, const char *, ...);
int clk_register_clkdevs(struct clk *, struct clk_lookup *, size_t);
diff --git a/include/linux/clockchips.h b/include/linux/clockchips.h
index 96c280b2c263..597a1e836f22 100644
--- a/include/linux/clockchips.h
+++ b/include/linux/clockchips.h
@@ -37,12 +37,15 @@ enum clock_event_mode {
* reached from DETACHED or SHUTDOWN.
* ONESHOT: Device is programmed to generate event only once. Can be reached
* from DETACHED or SHUTDOWN.
+ * ONESHOT_STOPPED: Device was programmed in ONESHOT mode and is temporarily
+ * stopped.
*/
enum clock_event_state {
CLOCK_EVT_STATE_DETACHED,
CLOCK_EVT_STATE_SHUTDOWN,
CLOCK_EVT_STATE_PERIODIC,
CLOCK_EVT_STATE_ONESHOT,
+ CLOCK_EVT_STATE_ONESHOT_STOPPED,
};
/*
@@ -84,12 +87,13 @@ enum clock_event_state {
* @mult: nanosecond to cycles multiplier
* @shift: nanoseconds to cycles divisor (power of two)
* @mode: operating mode, relevant only to ->set_mode(), OBSOLETE
- * @state: current state of the device, assigned by the core code
+ * @state_use_accessors:current state of the device, assigned by the core code
* @features: features
* @retries: number of forced programming retries
* @set_mode: legacy set mode function, only for modes <= CLOCK_EVT_MODE_RESUME.
* @set_state_periodic: switch state to periodic, if !set_mode
* @set_state_oneshot: switch state to oneshot, if !set_mode
+ * @set_state_oneshot_stopped: switch state to oneshot_stopped, if !set_mode
* @set_state_shutdown: switch state to shutdown, if !set_mode
* @tick_resume: resume clkevt device, if !set_mode
* @broadcast: function to broadcast events
@@ -113,7 +117,7 @@ struct clock_event_device {
u32 mult;
u32 shift;
enum clock_event_mode mode;
- enum clock_event_state state;
+ enum clock_event_state state_use_accessors;
unsigned int features;
unsigned long retries;
@@ -121,11 +125,12 @@ struct clock_event_device {
* State transition callback(s): Only one of the two groups should be
* defined:
* - set_mode(), only for modes <= CLOCK_EVT_MODE_RESUME.
- * - set_state_{shutdown|periodic|oneshot}(), tick_resume().
+ * - set_state_{shutdown|periodic|oneshot|oneshot_stopped}(), tick_resume().
*/
void (*set_mode)(enum clock_event_mode mode, struct clock_event_device *);
int (*set_state_periodic)(struct clock_event_device *);
int (*set_state_oneshot)(struct clock_event_device *);
+ int (*set_state_oneshot_stopped)(struct clock_event_device *);
int (*set_state_shutdown)(struct clock_event_device *);
int (*tick_resume)(struct clock_event_device *);
@@ -144,6 +149,32 @@ struct clock_event_device {
struct module *owner;
} ____cacheline_aligned;
+/* Helpers to verify state of a clockevent device */
+static inline bool clockevent_state_detached(struct clock_event_device *dev)
+{
+ return dev->state_use_accessors == CLOCK_EVT_STATE_DETACHED;
+}
+
+static inline bool clockevent_state_shutdown(struct clock_event_device *dev)
+{
+ return dev->state_use_accessors == CLOCK_EVT_STATE_SHUTDOWN;
+}
+
+static inline bool clockevent_state_periodic(struct clock_event_device *dev)
+{
+ return dev->state_use_accessors == CLOCK_EVT_STATE_PERIODIC;
+}
+
+static inline bool clockevent_state_oneshot(struct clock_event_device *dev)
+{
+ return dev->state_use_accessors == CLOCK_EVT_STATE_ONESHOT;
+}
+
+static inline bool clockevent_state_oneshot_stopped(struct clock_event_device *dev)
+{
+ return dev->state_use_accessors == CLOCK_EVT_STATE_ONESHOT_STOPPED;
+}
+
/*
* Calculate a multiplication factor for scaled math, which is used to convert
* nanoseconds based values to clock ticks:
diff --git a/include/linux/clocksource.h b/include/linux/clocksource.h
index 135509821c39..278dd279a7a8 100644
--- a/include/linux/clocksource.h
+++ b/include/linux/clocksource.h
@@ -181,7 +181,6 @@ static inline s64 clocksource_cyc2ns(cycle_t cycles, u32 mult, u32 shift)
extern int clocksource_unregister(struct clocksource*);
extern void clocksource_touch_watchdog(void);
-extern struct clocksource* clocksource_get_next(void);
extern void clocksource_change_rating(struct clocksource *cs, int rating);
extern void clocksource_suspend(void);
extern void clocksource_resume(void);
@@ -253,4 +252,10 @@ extern void clocksource_of_init(void);
static inline void clocksource_of_init(void) {}
#endif
+#ifdef CONFIG_ACPI
+void acpi_generic_timer_init(void);
+#else
+static inline void acpi_generic_timer_init(void) { }
+#endif
+
#endif /* _LINUX_CLOCKSOURCE_H */
diff --git a/include/linux/cma.h b/include/linux/cma.h
index 9384ba66e975..f7ef093ec49a 100644
--- a/include/linux/cma.h
+++ b/include/linux/cma.h
@@ -16,16 +16,16 @@
struct cma;
extern unsigned long totalcma_pages;
-extern phys_addr_t cma_get_base(struct cma *cma);
-extern unsigned long cma_get_size(struct cma *cma);
+extern phys_addr_t cma_get_base(const struct cma *cma);
+extern unsigned long cma_get_size(const struct cma *cma);
extern int __init cma_declare_contiguous(phys_addr_t base,
phys_addr_t size, phys_addr_t limit,
phys_addr_t alignment, unsigned int order_per_bit,
bool fixed, struct cma **res_cma);
-extern int cma_init_reserved_mem(phys_addr_t base,
- phys_addr_t size, int order_per_bit,
+extern int cma_init_reserved_mem(phys_addr_t base, phys_addr_t size,
+ unsigned int order_per_bit,
struct cma **res_cma);
-extern struct page *cma_alloc(struct cma *cma, int count, unsigned int align);
-extern bool cma_release(struct cma *cma, struct page *pages, int count);
+extern struct page *cma_alloc(struct cma *cma, unsigned int count, unsigned int align);
+extern bool cma_release(struct cma *cma, const struct page *pages, unsigned int count);
#endif
diff --git a/include/linux/compaction.h b/include/linux/compaction.h
index a014559e4a49..aa8f61cf3a19 100644
--- a/include/linux/compaction.h
+++ b/include/linux/compaction.h
@@ -34,6 +34,7 @@ extern int sysctl_compaction_handler(struct ctl_table *table, int write,
extern int sysctl_extfrag_threshold;
extern int sysctl_extfrag_handler(struct ctl_table *table, int write,
void __user *buffer, size_t *length, loff_t *ppos);
+extern int sysctl_compact_unevictable_allowed;
extern int fragmentation_index(struct zone *zone, unsigned int order);
extern unsigned long try_to_compact_pages(gfp_t gfp_mask, unsigned int order,
diff --git a/include/linux/compiler-gcc.h b/include/linux/compiler-gcc.h
index cdf13ca7cac3..371e560d13cf 100644
--- a/include/linux/compiler-gcc.h
+++ b/include/linux/compiler-gcc.h
@@ -9,10 +9,24 @@
+ __GNUC_MINOR__ * 100 \
+ __GNUC_PATCHLEVEL__)
-
/* Optimization barrier */
+
/* The "volatile" is due to gcc bugs */
#define barrier() __asm__ __volatile__("": : :"memory")
+/*
+ * This version is i.e. to prevent dead stores elimination on @ptr
+ * where gcc and llvm may behave differently when otherwise using
+ * normal barrier(): while gcc behavior gets along with a normal
+ * barrier(), llvm needs an explicit input variable to be assumed
+ * clobbered. The issue is as follows: while the inline asm might
+ * access any memory it wants, the compiler could have fit all of
+ * @ptr into memory registers instead, and since @ptr never escaped
+ * from that, it proofed that the inline asm wasn't touching any of
+ * it. This version works well with both compilers, i.e. we're telling
+ * the compiler that the inline asm absolutely may see the contents
+ * of @ptr. See also: https://llvm.org/bugs/show_bug.cgi?id=15495
+ */
+#define barrier_data(ptr) __asm__ __volatile__("": :"r"(ptr) :"memory")
/*
* This macro obfuscates arithmetic on a variable address so that gcc
diff --git a/include/linux/compiler-intel.h b/include/linux/compiler-intel.h
index ba147a1727e6..0c9a2f2c2802 100644
--- a/include/linux/compiler-intel.h
+++ b/include/linux/compiler-intel.h
@@ -13,9 +13,12 @@
/* Intel ECC compiler doesn't support gcc specific asm stmts.
* It uses intrinsics to do the equivalent things.
*/
+#undef barrier_data
#undef RELOC_HIDE
#undef OPTIMIZER_HIDE_VAR
+#define barrier_data(ptr) barrier()
+
#define RELOC_HIDE(ptr, off) \
({ unsigned long __ptr; \
__ptr = (unsigned long) (ptr); \
diff --git a/include/linux/compiler.h b/include/linux/compiler.h
index 0e41ca0e5927..05be2352fef8 100644
--- a/include/linux/compiler.h
+++ b/include/linux/compiler.h
@@ -169,6 +169,10 @@ void ftrace_likely_update(struct ftrace_branch_data *f, int val, int expect);
# define barrier() __memory_barrier()
#endif
+#ifndef barrier_data
+# define barrier_data(ptr) barrier()
+#endif
+
/* Unreachable code */
#ifndef unreachable
# define unreachable() do { } while (1)
@@ -246,7 +250,23 @@ static __always_inline void __write_once_size(volatile void *p, void *res, int s
({ union { typeof(x) __val; char __c[1]; } __u; __read_once_size(&(x), __u.__c, sizeof(x)); __u.__val; })
#define WRITE_ONCE(x, val) \
- ({ typeof(x) __val = (val); __write_once_size(&(x), &__val, sizeof(__val)); __val; })
+ ({ union { typeof(x) __val; char __c[1]; } __u = { .__val = (val) }; __write_once_size(&(x), __u.__c, sizeof(x)); __u.__val; })
+
+/**
+ * READ_ONCE_CTRL - Read a value heading a control dependency
+ * @x: The value to be read, heading the control dependency
+ *
+ * Control dependencies are tricky. See Documentation/memory-barriers.txt
+ * for important information on how to use them. Note that in many cases,
+ * use of smp_load_acquire() will be much simpler. Control dependencies
+ * should be avoided except on the hottest of hotpaths.
+ */
+#define READ_ONCE_CTRL(x) \
+({ \
+ typeof(x) __val = READ_ONCE(x); \
+ smp_read_barrier_depends(); /* Enforce control dependency. */ \
+ __val; \
+})
#endif /* __KERNEL__ */
@@ -446,7 +466,7 @@ static __always_inline void __write_once_size(volatile void *p, void *res, int s
* with an explicit memory barrier or atomic instruction that provides the
* required ordering.
*
- * If possible use READ_ONCE/ASSIGN_ONCE instead.
+ * If possible use READ_ONCE()/WRITE_ONCE() instead.
*/
#define __ACCESS_ONCE(x) ({ \
__maybe_unused typeof(x) __var = (__force typeof(x)) 0; \
diff --git a/include/linux/configfs.h b/include/linux/configfs.h
index 34025df61829..c9e5c57e4edf 100644
--- a/include/linux/configfs.h
+++ b/include/linux/configfs.h
@@ -71,7 +71,6 @@ static inline char *config_item_name(struct config_item * item)
return item->ci_name;
}
-extern void config_item_init(struct config_item *);
extern void config_item_init_type_name(struct config_item *item,
const char *name,
struct config_item_type *type);
diff --git a/include/linux/console.h b/include/linux/console.h
index 7571a16bd653..9f50fb413c11 100644
--- a/include/linux/console.h
+++ b/include/linux/console.h
@@ -123,7 +123,7 @@ struct console {
struct tty_driver *(*device)(struct console *, int *);
void (*unblank)(void);
int (*setup)(struct console *, char *);
- int (*early_setup)(void);
+ int (*match)(struct console *, char *name, int idx, char *options);
short flags;
short index;
int cflag;
@@ -141,7 +141,6 @@ extern int console_set_on_cmdline;
extern struct console *early_console;
extern int add_preferred_console(char *name, int idx, char *options);
-extern int update_console_cmdline(char *name, int idx, char *name_new, int idx_new, char *options);
extern void register_console(struct console *);
extern int unregister_console(struct console *);
extern struct console *console_drivers;
diff --git a/include/linux/context_tracking.h b/include/linux/context_tracking.h
index 37b81bd51ec0..b96bd299966f 100644
--- a/include/linux/context_tracking.h
+++ b/include/linux/context_tracking.h
@@ -10,10 +10,10 @@
#ifdef CONFIG_CONTEXT_TRACKING
extern void context_tracking_cpu_set(int cpu);
+extern void context_tracking_enter(enum ctx_state state);
+extern void context_tracking_exit(enum ctx_state state);
extern void context_tracking_user_enter(void);
extern void context_tracking_user_exit(void);
-extern void __context_tracking_task_switch(struct task_struct *prev,
- struct task_struct *next);
static inline void user_enter(void)
{
@@ -35,7 +35,8 @@ static inline enum ctx_state exception_enter(void)
return 0;
prev_ctx = this_cpu_read(context_tracking.state);
- context_tracking_user_exit();
+ if (prev_ctx != CONTEXT_KERNEL)
+ context_tracking_exit(prev_ctx);
return prev_ctx;
}
@@ -43,24 +44,16 @@ static inline enum ctx_state exception_enter(void)
static inline void exception_exit(enum ctx_state prev_ctx)
{
if (context_tracking_is_enabled()) {
- if (prev_ctx == IN_USER)
- context_tracking_user_enter();
+ if (prev_ctx != CONTEXT_KERNEL)
+ context_tracking_enter(prev_ctx);
}
}
-static inline void context_tracking_task_switch(struct task_struct *prev,
- struct task_struct *next)
-{
- if (context_tracking_is_enabled())
- __context_tracking_task_switch(prev, next);
-}
#else
static inline void user_enter(void) { }
static inline void user_exit(void) { }
static inline enum ctx_state exception_enter(void) { return 0; }
static inline void exception_exit(enum ctx_state prev_ctx) { }
-static inline void context_tracking_task_switch(struct task_struct *prev,
- struct task_struct *next) { }
#endif /* !CONFIG_CONTEXT_TRACKING */
@@ -78,10 +71,16 @@ static inline void guest_enter(void)
vtime_guest_enter(current);
else
current->flags |= PF_VCPU;
+
+ if (context_tracking_is_enabled())
+ context_tracking_enter(CONTEXT_GUEST);
}
static inline void guest_exit(void)
{
+ if (context_tracking_is_enabled())
+ context_tracking_exit(CONTEXT_GUEST);
+
if (vtime_accounting_enabled())
vtime_guest_exit(current);
else
diff --git a/include/linux/context_tracking_state.h b/include/linux/context_tracking_state.h
index 97a81225d037..678ecdf90cf6 100644
--- a/include/linux/context_tracking_state.h
+++ b/include/linux/context_tracking_state.h
@@ -12,9 +12,11 @@ struct context_tracking {
* may be further optimized using static keys.
*/
bool active;
+ int recursion;
enum ctx_state {
- IN_KERNEL = 0,
- IN_USER,
+ CONTEXT_KERNEL = 0,
+ CONTEXT_USER,
+ CONTEXT_GUEST,
} state;
};
@@ -34,11 +36,13 @@ static inline bool context_tracking_cpu_is_enabled(void)
static inline bool context_tracking_in_user(void)
{
- return __this_cpu_read(context_tracking.state) == IN_USER;
+ return __this_cpu_read(context_tracking.state) == CONTEXT_USER;
}
#else
static inline bool context_tracking_in_user(void) { return false; }
static inline bool context_tracking_active(void) { return false; }
+static inline bool context_tracking_is_enabled(void) { return false; }
+static inline bool context_tracking_cpu_is_enabled(void) { return false; }
#endif /* CONFIG_CONTEXT_TRACKING */
#endif
diff --git a/include/linux/cpu.h b/include/linux/cpu.h
index 4260e8594bd7..c0fb6b1b4712 100644
--- a/include/linux/cpu.h
+++ b/include/linux/cpu.h
@@ -73,6 +73,7 @@ enum {
/* migration should happen before other stuff but after perf */
CPU_PRI_PERF = 20,
CPU_PRI_MIGRATION = 10,
+ CPU_PRI_SMPBOOT = 9,
/* bring up workqueues before normal notifiers and down after */
CPU_PRI_WORKQUEUE_UP = 5,
CPU_PRI_WORKQUEUE_DOWN = -5,
@@ -95,6 +96,10 @@ enum {
* Called on the new cpu, just before
* enabling interrupts. Must not sleep,
* must not fail */
+#define CPU_DYING_IDLE 0x000B /* CPU (unsigned)v dying, reached
+ * idle loop. */
+#define CPU_BROKEN 0x000C /* CPU (unsigned)v did not die properly,
+ * perhaps due to preemption. */
/* Used for CPU hotplug events occurring while tasks are frozen due to a suspend
* operation in progress
@@ -161,6 +166,7 @@ static inline void __unregister_cpu_notifier(struct notifier_block *nb)
}
#endif
+void smpboot_thread_init(void);
int cpu_up(unsigned int cpu);
void notify_cpu_starting(unsigned int cpu);
extern void cpu_maps_update_begin(void);
@@ -208,6 +214,10 @@ static inline void cpu_notifier_register_done(void)
{
}
+static inline void smpboot_thread_init(void)
+{
+}
+
#endif /* CONFIG_SMP */
extern struct bus_type cpu_subsys;
@@ -271,4 +281,14 @@ void arch_cpu_idle_enter(void);
void arch_cpu_idle_exit(void);
void arch_cpu_idle_dead(void);
+DECLARE_PER_CPU(bool, cpu_dead_idle);
+
+int cpu_report_state(int cpu);
+int cpu_check_up_prepare(int cpu);
+void cpu_set_state_online(int cpu);
+#ifdef CONFIG_HOTPLUG_CPU
+bool cpu_wait_death(unsigned int cpu, int seconds);
+bool cpu_report_death(void);
+#endif /* #ifdef CONFIG_HOTPLUG_CPU */
+
#endif /* _LINUX_CPU_H_ */
diff --git a/include/linux/cpufreq.h b/include/linux/cpufreq.h
index 2ee4888c1f47..29ad97c34fd5 100644
--- a/include/linux/cpufreq.h
+++ b/include/linux/cpufreq.h
@@ -65,7 +65,9 @@ struct cpufreq_policy {
unsigned int shared_type; /* ACPI: ANY or ALL affected CPUs
should set cpufreq */
- unsigned int cpu; /* cpu nr of CPU managing this policy */
+ unsigned int cpu; /* cpu managing this policy, must be online */
+ unsigned int kobj_cpu; /* cpu managing sysfs files, can be offline */
+
struct clk *clk;
struct cpufreq_cpuinfo cpuinfo;/* see above */
@@ -80,6 +82,7 @@ struct cpufreq_policy {
struct cpufreq_governor *governor; /* see below */
void *governor_data;
bool governor_enabled; /* governor start/stop flag */
+ char last_governor[CPUFREQ_NAME_LEN]; /* last governor used */
struct work_struct update; /* if update_policy() needs to be
* called, but you're in IRQ context */
diff --git a/include/linux/cpuidle.h b/include/linux/cpuidle.h
index 9c5e89254796..d075d34279df 100644
--- a/include/linux/cpuidle.h
+++ b/include/linux/cpuidle.h
@@ -151,10 +151,6 @@ extern void cpuidle_resume(void);
extern int cpuidle_enable_device(struct cpuidle_device *dev);
extern void cpuidle_disable_device(struct cpuidle_device *dev);
extern int cpuidle_play_dead(void);
-extern int cpuidle_find_deepest_state(struct cpuidle_driver *drv,
- struct cpuidle_device *dev);
-extern int cpuidle_enter_freeze(struct cpuidle_driver *drv,
- struct cpuidle_device *dev);
extern struct cpuidle_driver *cpuidle_get_cpu_driver(struct cpuidle_device *dev);
#else
@@ -190,16 +186,28 @@ static inline int cpuidle_enable_device(struct cpuidle_device *dev)
{return -ENODEV; }
static inline void cpuidle_disable_device(struct cpuidle_device *dev) { }
static inline int cpuidle_play_dead(void) {return -ENODEV; }
+static inline struct cpuidle_driver *cpuidle_get_cpu_driver(
+ struct cpuidle_device *dev) {return NULL; }
+#endif
+
+#if defined(CONFIG_CPU_IDLE) && defined(CONFIG_SUSPEND)
+extern int cpuidle_find_deepest_state(struct cpuidle_driver *drv,
+ struct cpuidle_device *dev);
+extern int cpuidle_enter_freeze(struct cpuidle_driver *drv,
+ struct cpuidle_device *dev);
+#else
static inline int cpuidle_find_deepest_state(struct cpuidle_driver *drv,
struct cpuidle_device *dev)
{return -ENODEV; }
static inline int cpuidle_enter_freeze(struct cpuidle_driver *drv,
struct cpuidle_device *dev)
{return -ENODEV; }
-static inline struct cpuidle_driver *cpuidle_get_cpu_driver(
- struct cpuidle_device *dev) {return NULL; }
#endif
+/* kernel/sched/idle.c */
+extern void sched_idle_set_state(struct cpuidle_state *idle_state);
+extern void default_idle_call(void);
+
#ifdef CONFIG_ARCH_NEEDS_CPU_IDLE_COUPLED
void cpuidle_coupled_parallel_barrier(struct cpuidle_device *dev, atomic_t *a);
#else
diff --git a/include/linux/cpumask.h b/include/linux/cpumask.h
index 086549a665e2..59915ea5373c 100644
--- a/include/linux/cpumask.h
+++ b/include/linux/cpumask.h
@@ -11,6 +11,7 @@
#include <linux/bitmap.h>
#include <linux/bug.h>
+/* Don't assign or return these: may not be this big! */
typedef struct cpumask { DECLARE_BITMAP(bits, NR_CPUS); } cpumask_t;
/**
@@ -150,10 +151,8 @@ static inline unsigned int cpumask_any_but(const struct cpumask *mask,
return 1;
}
-static inline int cpumask_set_cpu_local_first(int i, int numa_node, cpumask_t *dstp)
+static inline unsigned int cpumask_local_spread(unsigned int i, int node)
{
- set_bit(0, cpumask_bits(dstp));
-
return 0;
}
@@ -207,7 +206,7 @@ static inline unsigned int cpumask_next_zero(int n, const struct cpumask *srcp)
int cpumask_next_and(int n, const struct cpumask *, const struct cpumask *);
int cpumask_any_but(const struct cpumask *mask, unsigned int cpu);
-int cpumask_set_cpu_local_first(int i, int numa_node, cpumask_t *dstp);
+unsigned int cpumask_local_spread(unsigned int i, int node);
/**
* for_each_cpu - iterate over every cpu in a mask
@@ -289,11 +288,11 @@ static inline void cpumask_clear_cpu(int cpu, struct cpumask *dstp)
* @cpumask: the cpumask pointer
*
* Returns 1 if @cpu is set in @cpumask, else returns 0
- *
- * No static inline type checking - see Subtlety (1) above.
*/
-#define cpumask_test_cpu(cpu, cpumask) \
- test_bit(cpumask_check(cpu), cpumask_bits((cpumask)))
+static inline int cpumask_test_cpu(int cpu, const struct cpumask *cpumask)
+{
+ return test_bit(cpumask_check(cpu), cpumask_bits((cpumask)));
+}
/**
* cpumask_test_and_set_cpu - atomically test and set a cpu in a cpumask
@@ -609,9 +608,7 @@ static inline int cpulist_parse(const char *buf, struct cpumask *dstp)
*/
static inline size_t cpumask_size(void)
{
- /* FIXME: Once all cpumask assignments are eliminated, this
- * can be nr_cpumask_bits */
- return BITS_TO_LONGS(NR_CPUS) * sizeof(long);
+ return BITS_TO_LONGS(nr_cpumask_bits) * sizeof(long);
}
/*
@@ -768,7 +765,7 @@ static inline const struct cpumask *get_cpu_mask(unsigned int cpu)
#if NR_CPUS <= BITS_PER_LONG
#define CPU_BITS_ALL \
{ \
- [BITS_TO_LONGS(NR_CPUS)-1] = CPU_MASK_LAST_WORD \
+ [BITS_TO_LONGS(NR_CPUS)-1] = BITMAP_LAST_WORD_MASK(NR_CPUS) \
}
#else /* NR_CPUS > BITS_PER_LONG */
@@ -776,7 +773,7 @@ static inline const struct cpumask *get_cpu_mask(unsigned int cpu)
#define CPU_BITS_ALL \
{ \
[0 ... BITS_TO_LONGS(NR_CPUS)-2] = ~0UL, \
- [BITS_TO_LONGS(NR_CPUS)-1] = CPU_MASK_LAST_WORD \
+ [BITS_TO_LONGS(NR_CPUS)-1] = BITMAP_LAST_WORD_MASK(NR_CPUS) \
}
#endif /* NR_CPUS > BITS_PER_LONG */
@@ -797,32 +794,18 @@ cpumap_print_to_pagebuf(bool list, char *buf, const struct cpumask *mask)
nr_cpu_ids);
}
-/*
- *
- * From here down, all obsolete. Use cpumask_ variants!
- *
- */
-#ifndef CONFIG_DISABLE_OBSOLETE_CPUMASK_FUNCTIONS
-#define cpumask_of_cpu(cpu) (*get_cpu_mask(cpu))
-
-#define CPU_MASK_LAST_WORD BITMAP_LAST_WORD_MASK(NR_CPUS)
-
#if NR_CPUS <= BITS_PER_LONG
-
#define CPU_MASK_ALL \
(cpumask_t) { { \
- [BITS_TO_LONGS(NR_CPUS)-1] = CPU_MASK_LAST_WORD \
+ [BITS_TO_LONGS(NR_CPUS)-1] = BITMAP_LAST_WORD_MASK(NR_CPUS) \
} }
-
#else
-
#define CPU_MASK_ALL \
(cpumask_t) { { \
[0 ... BITS_TO_LONGS(NR_CPUS)-2] = ~0UL, \
- [BITS_TO_LONGS(NR_CPUS)-1] = CPU_MASK_LAST_WORD \
+ [BITS_TO_LONGS(NR_CPUS)-1] = BITMAP_LAST_WORD_MASK(NR_CPUS) \
} }
-
-#endif
+#endif /* NR_CPUS > BITS_PER_LONG */
#define CPU_MASK_NONE \
(cpumask_t) { { \
@@ -834,143 +817,4 @@ cpumap_print_to_pagebuf(bool list, char *buf, const struct cpumask *mask)
[0] = 1UL \
} }
-#if NR_CPUS == 1
-#define first_cpu(src) ({ (void)(src); 0; })
-#define next_cpu(n, src) ({ (void)(src); 1; })
-#define any_online_cpu(mask) 0
-#define for_each_cpu_mask(cpu, mask) \
- for ((cpu) = 0; (cpu) < 1; (cpu)++, (void)mask)
-#else /* NR_CPUS > 1 */
-int __first_cpu(const cpumask_t *srcp);
-int __next_cpu(int n, const cpumask_t *srcp);
-
-#define first_cpu(src) __first_cpu(&(src))
-#define next_cpu(n, src) __next_cpu((n), &(src))
-#define any_online_cpu(mask) cpumask_any_and(&mask, cpu_online_mask)
-#define for_each_cpu_mask(cpu, mask) \
- for ((cpu) = -1; \
- (cpu) = next_cpu((cpu), (mask)), \
- (cpu) < NR_CPUS; )
-#endif /* SMP */
-
-#if NR_CPUS <= 64
-
-#define for_each_cpu_mask_nr(cpu, mask) for_each_cpu_mask(cpu, mask)
-
-#else /* NR_CPUS > 64 */
-
-int __next_cpu_nr(int n, const cpumask_t *srcp);
-#define for_each_cpu_mask_nr(cpu, mask) \
- for ((cpu) = -1; \
- (cpu) = __next_cpu_nr((cpu), &(mask)), \
- (cpu) < nr_cpu_ids; )
-
-#endif /* NR_CPUS > 64 */
-
-#define cpus_addr(src) ((src).bits)
-
-#define cpu_set(cpu, dst) __cpu_set((cpu), &(dst))
-static inline void __cpu_set(int cpu, volatile cpumask_t *dstp)
-{
- set_bit(cpu, dstp->bits);
-}
-
-#define cpu_clear(cpu, dst) __cpu_clear((cpu), &(dst))
-static inline void __cpu_clear(int cpu, volatile cpumask_t *dstp)
-{
- clear_bit(cpu, dstp->bits);
-}
-
-#define cpus_setall(dst) __cpus_setall(&(dst), NR_CPUS)
-static inline void __cpus_setall(cpumask_t *dstp, unsigned int nbits)
-{
- bitmap_fill(dstp->bits, nbits);
-}
-
-#define cpus_clear(dst) __cpus_clear(&(dst), NR_CPUS)
-static inline void __cpus_clear(cpumask_t *dstp, unsigned int nbits)
-{
- bitmap_zero(dstp->bits, nbits);
-}
-
-/* No static inline type checking - see Subtlety (1) above. */
-#define cpu_isset(cpu, cpumask) test_bit((cpu), (cpumask).bits)
-
-#define cpu_test_and_set(cpu, cpumask) __cpu_test_and_set((cpu), &(cpumask))
-static inline int __cpu_test_and_set(int cpu, cpumask_t *addr)
-{
- return test_and_set_bit(cpu, addr->bits);
-}
-
-#define cpus_and(dst, src1, src2) __cpus_and(&(dst), &(src1), &(src2), NR_CPUS)
-static inline int __cpus_and(cpumask_t *dstp, const cpumask_t *src1p,
- const cpumask_t *src2p, unsigned int nbits)
-{
- return bitmap_and(dstp->bits, src1p->bits, src2p->bits, nbits);
-}
-
-#define cpus_or(dst, src1, src2) __cpus_or(&(dst), &(src1), &(src2), NR_CPUS)
-static inline void __cpus_or(cpumask_t *dstp, const cpumask_t *src1p,
- const cpumask_t *src2p, unsigned int nbits)
-{
- bitmap_or(dstp->bits, src1p->bits, src2p->bits, nbits);
-}
-
-#define cpus_xor(dst, src1, src2) __cpus_xor(&(dst), &(src1), &(src2), NR_CPUS)
-static inline void __cpus_xor(cpumask_t *dstp, const cpumask_t *src1p,
- const cpumask_t *src2p, unsigned int nbits)
-{
- bitmap_xor(dstp->bits, src1p->bits, src2p->bits, nbits);
-}
-
-#define cpus_andnot(dst, src1, src2) \
- __cpus_andnot(&(dst), &(src1), &(src2), NR_CPUS)
-static inline int __cpus_andnot(cpumask_t *dstp, const cpumask_t *src1p,
- const cpumask_t *src2p, unsigned int nbits)
-{
- return bitmap_andnot(dstp->bits, src1p->bits, src2p->bits, nbits);
-}
-
-#define cpus_equal(src1, src2) __cpus_equal(&(src1), &(src2), NR_CPUS)
-static inline int __cpus_equal(const cpumask_t *src1p,
- const cpumask_t *src2p, unsigned int nbits)
-{
- return bitmap_equal(src1p->bits, src2p->bits, nbits);
-}
-
-#define cpus_intersects(src1, src2) __cpus_intersects(&(src1), &(src2), NR_CPUS)
-static inline int __cpus_intersects(const cpumask_t *src1p,
- const cpumask_t *src2p, unsigned int nbits)
-{
- return bitmap_intersects(src1p->bits, src2p->bits, nbits);
-}
-
-#define cpus_subset(src1, src2) __cpus_subset(&(src1), &(src2), NR_CPUS)
-static inline int __cpus_subset(const cpumask_t *src1p,
- const cpumask_t *src2p, unsigned int nbits)
-{
- return bitmap_subset(src1p->bits, src2p->bits, nbits);
-}
-
-#define cpus_empty(src) __cpus_empty(&(src), NR_CPUS)
-static inline int __cpus_empty(const cpumask_t *srcp, unsigned int nbits)
-{
- return bitmap_empty(srcp->bits, nbits);
-}
-
-#define cpus_weight(cpumask) __cpus_weight(&(cpumask), NR_CPUS)
-static inline int __cpus_weight(const cpumask_t *srcp, unsigned int nbits)
-{
- return bitmap_weight(srcp->bits, nbits);
-}
-
-#define cpus_shift_left(dst, src, n) \
- __cpus_shift_left(&(dst), &(src), (n), NR_CPUS)
-static inline void __cpus_shift_left(cpumask_t *dstp,
- const cpumask_t *srcp, int n, int nbits)
-{
- bitmap_shift_left(dstp->bits, srcp->bits, n, nbits);
-}
-#endif /* !CONFIG_DISABLE_OBSOLETE_CPUMASK_FUNCTIONS */
-
#endif /* __LINUX_CPUMASK_H */
diff --git a/include/linux/crc-itu-t.h b/include/linux/crc-itu-t.h
index 84920f3cc83e..a9953c762eee 100644
--- a/include/linux/crc-itu-t.h
+++ b/include/linux/crc-itu-t.h
@@ -3,7 +3,7 @@
*
* Implements the standard CRC ITU-T V.41:
* Width 16
- * Poly 0x0x1021 (x^16 + x^12 + x^15 + 1)
+ * Poly 0x1021 (x^16 + x^12 + x^15 + 1)
* Init 0
*
* This source code is licensed under the GNU General Public License,
diff --git a/include/linux/cred.h b/include/linux/cred.h
index 2fb2ca2127ed..8b6c083e68a7 100644
--- a/include/linux/cred.h
+++ b/include/linux/cred.h
@@ -62,9 +62,27 @@ do { \
groups_free(group_info); \
} while (0)
-extern struct group_info *groups_alloc(int);
extern struct group_info init_groups;
+#ifdef CONFIG_MULTIUSER
+extern struct group_info *groups_alloc(int);
extern void groups_free(struct group_info *);
+
+extern int in_group_p(kgid_t);
+extern int in_egroup_p(kgid_t);
+#else
+static inline void groups_free(struct group_info *group_info)
+{
+}
+
+static inline int in_group_p(kgid_t grp)
+{
+ return 1;
+}
+static inline int in_egroup_p(kgid_t grp)
+{
+ return 1;
+}
+#endif
extern int set_current_groups(struct group_info *);
extern void set_groups(struct cred *, struct group_info *);
extern int groups_search(const struct group_info *, kgid_t);
@@ -74,9 +92,6 @@ extern bool may_setgroups(void);
#define GROUP_AT(gi, i) \
((gi)->blocks[(i) / NGROUPS_PER_BLOCK][(i) % NGROUPS_PER_BLOCK])
-extern int in_group_p(kgid_t);
-extern int in_egroup_p(kgid_t);
-
/*
* The security context of a task
*
diff --git a/include/linux/crush/crush.h b/include/linux/crush/crush.h
index 4fad5f8ee01d..48a1a7d100f1 100644
--- a/include/linux/crush/crush.h
+++ b/include/linux/crush/crush.h
@@ -96,13 +96,15 @@ struct crush_rule {
* uniform O(1) poor poor
* list O(n) optimal poor
* tree O(log n) good good
- * straw O(n) optimal optimal
+ * straw O(n) better better
+ * straw2 O(n) optimal optimal
*/
enum {
CRUSH_BUCKET_UNIFORM = 1,
CRUSH_BUCKET_LIST = 2,
CRUSH_BUCKET_TREE = 3,
- CRUSH_BUCKET_STRAW = 4
+ CRUSH_BUCKET_STRAW = 4,
+ CRUSH_BUCKET_STRAW2 = 5,
};
extern const char *crush_bucket_alg_name(int alg);
@@ -149,6 +151,11 @@ struct crush_bucket_straw {
__u32 *straws; /* 16-bit fixed point */
};
+struct crush_bucket_straw2 {
+ struct crush_bucket h;
+ __u32 *item_weights; /* 16-bit fixed point */
+};
+
/*
@@ -189,6 +196,7 @@ extern void crush_destroy_bucket_uniform(struct crush_bucket_uniform *b);
extern void crush_destroy_bucket_list(struct crush_bucket_list *b);
extern void crush_destroy_bucket_tree(struct crush_bucket_tree *b);
extern void crush_destroy_bucket_straw(struct crush_bucket_straw *b);
+extern void crush_destroy_bucket_straw2(struct crush_bucket_straw2 *b);
extern void crush_destroy_bucket(struct crush_bucket *b);
extern void crush_destroy_rule(struct crush_rule *r);
extern void crush_destroy(struct crush_map *map);
diff --git a/include/linux/crypto.h b/include/linux/crypto.h
index fb5ef16d6a12..81ef938b0a8e 100644
--- a/include/linux/crypto.h
+++ b/include/linux/crypto.h
@@ -53,6 +53,7 @@
#define CRYPTO_ALG_TYPE_SHASH 0x00000009
#define CRYPTO_ALG_TYPE_AHASH 0x0000000a
#define CRYPTO_ALG_TYPE_RNG 0x0000000c
+#define CRYPTO_ALG_TYPE_AKCIPHER 0x0000000d
#define CRYPTO_ALG_TYPE_PCOMPRESS 0x0000000f
#define CRYPTO_ALG_TYPE_HASH_MASK 0x0000000e
@@ -95,6 +96,18 @@
#define CRYPTO_ALG_KERN_DRIVER_ONLY 0x00001000
/*
+ * Mark a cipher as a service implementation only usable by another
+ * cipher and never by a normal user of the kernel crypto API
+ */
+#define CRYPTO_ALG_INTERNAL 0x00002000
+
+/*
+ * Temporary flag used to prevent legacy AEAD implementations from
+ * being used by user-space.
+ */
+#define CRYPTO_ALG_AEAD_NEW 0x00004000
+
+/*
* Transform masks and values (for crt_flags).
*/
#define CRYPTO_TFM_REQ_MASK 0x000fff00
@@ -132,9 +145,9 @@ struct crypto_async_request;
struct crypto_aead;
struct crypto_blkcipher;
struct crypto_hash;
-struct crypto_rng;
struct crypto_tfm;
struct crypto_type;
+struct aead_request;
struct aead_givcrypt_request;
struct skcipher_givcrypt_request;
@@ -169,32 +182,6 @@ struct ablkcipher_request {
void *__ctx[] CRYPTO_MINALIGN_ATTR;
};
-/**
- * struct aead_request - AEAD request
- * @base: Common attributes for async crypto requests
- * @assoclen: Length in bytes of associated data for authentication
- * @cryptlen: Length of data to be encrypted or decrypted
- * @iv: Initialisation vector
- * @assoc: Associated data
- * @src: Source data
- * @dst: Destination data
- * @__ctx: Start of private context data
- */
-struct aead_request {
- struct crypto_async_request base;
-
- unsigned int assoclen;
- unsigned int cryptlen;
-
- u8 *iv;
-
- struct scatterlist *assoc;
- struct scatterlist *src;
- struct scatterlist *dst;
-
- void *__ctx[] CRYPTO_MINALIGN_ATTR;
-};
-
struct blkcipher_desc {
struct crypto_blkcipher *tfm;
void *info;
@@ -288,7 +275,7 @@ struct ablkcipher_alg {
};
/**
- * struct aead_alg - AEAD cipher definition
+ * struct old_aead_alg - AEAD cipher definition
* @maxauthsize: Set the maximum authentication tag size supported by the
* transformation. A transformation may support smaller tag sizes.
* As the authentication tag is a message digest to ensure the
@@ -313,7 +300,7 @@ struct ablkcipher_alg {
* All fields except @givencrypt , @givdecrypt , @geniv and @ivsize are
* mandatory and must be filled.
*/
-struct aead_alg {
+struct old_aead_alg {
int (*setkey)(struct crypto_aead *tfm, const u8 *key,
unsigned int keylen);
int (*setauthsize)(struct crypto_aead *tfm, unsigned int authsize);
@@ -420,40 +407,12 @@ struct compress_alg {
unsigned int slen, u8 *dst, unsigned int *dlen);
};
-/**
- * struct rng_alg - random number generator definition
- * @rng_make_random: The function defined by this variable obtains a random
- * number. The random number generator transform must generate
- * the random number out of the context provided with this
- * call.
- * @rng_reset: Reset of the random number generator by clearing the entire state.
- * With the invocation of this function call, the random number
- * generator shall completely reinitialize its state. If the random
- * number generator requires a seed for setting up a new state,
- * the seed must be provided by the consumer while invoking this
- * function. The required size of the seed is defined with
- * @seedsize .
- * @seedsize: The seed size required for a random number generator
- * initialization defined with this variable. Some random number
- * generators like the SP800-90A DRBG does not require a seed as the
- * seeding is implemented internally without the need of support by
- * the consumer. In this case, the seed size is set to zero.
- */
-struct rng_alg {
- int (*rng_make_random)(struct crypto_rng *tfm, u8 *rdata,
- unsigned int dlen);
- int (*rng_reset)(struct crypto_rng *tfm, u8 *seed, unsigned int slen);
-
- unsigned int seedsize;
-};
-
#define cra_ablkcipher cra_u.ablkcipher
#define cra_aead cra_u.aead
#define cra_blkcipher cra_u.blkcipher
#define cra_cipher cra_u.cipher
#define cra_compress cra_u.compress
-#define cra_rng cra_u.rng
/**
* struct crypto_alg - definition of a cryptograpic cipher algorithm
@@ -499,7 +458,7 @@ struct rng_alg {
* transformation algorithm.
* @cra_type: Type of the cryptographic transformation. This is a pointer to
* struct crypto_type, which implements callbacks common for all
- * trasnformation types. There are multiple options:
+ * transformation types. There are multiple options:
* &crypto_blkcipher_type, &crypto_ablkcipher_type,
* &crypto_ahash_type, &crypto_aead_type, &crypto_rng_type.
* This field might be empty. In that case, there are no common
@@ -549,11 +508,10 @@ struct crypto_alg {
union {
struct ablkcipher_alg ablkcipher;
- struct aead_alg aead;
+ struct old_aead_alg aead;
struct blkcipher_alg blkcipher;
struct cipher_alg cipher;
struct compress_alg compress;
- struct rng_alg rng;
} cra_u;
int (*cra_init)(struct crypto_tfm *tfm);
@@ -561,7 +519,7 @@ struct crypto_alg {
void (*cra_destroy)(struct crypto_alg *alg);
struct module *cra_module;
-};
+} CRYPTO_MINALIGN_ATTR;
/*
* Algorithm registration interface.
@@ -596,21 +554,6 @@ struct ablkcipher_tfm {
unsigned int reqsize;
};
-struct aead_tfm {
- int (*setkey)(struct crypto_aead *tfm, const u8 *key,
- unsigned int keylen);
- int (*encrypt)(struct aead_request *req);
- int (*decrypt)(struct aead_request *req);
- int (*givencrypt)(struct aead_givcrypt_request *req);
- int (*givdecrypt)(struct aead_givcrypt_request *req);
-
- struct crypto_aead *base;
-
- unsigned int ivsize;
- unsigned int authsize;
- unsigned int reqsize;
-};
-
struct blkcipher_tfm {
void *iv;
int (*setkey)(struct crypto_tfm *tfm, const u8 *key,
@@ -649,19 +592,11 @@ struct compress_tfm {
u8 *dst, unsigned int *dlen);
};
-struct rng_tfm {
- int (*rng_gen_random)(struct crypto_rng *tfm, u8 *rdata,
- unsigned int dlen);
- int (*rng_reset)(struct crypto_rng *tfm, u8 *seed, unsigned int slen);
-};
-
#define crt_ablkcipher crt_u.ablkcipher
-#define crt_aead crt_u.aead
#define crt_blkcipher crt_u.blkcipher
#define crt_cipher crt_u.cipher
#define crt_hash crt_u.hash
#define crt_compress crt_u.compress
-#define crt_rng crt_u.rng
struct crypto_tfm {
@@ -669,12 +604,10 @@ struct crypto_tfm {
union {
struct ablkcipher_tfm ablkcipher;
- struct aead_tfm aead;
struct blkcipher_tfm blkcipher;
struct cipher_tfm cipher;
struct hash_tfm hash;
struct compress_tfm compress;
- struct rng_tfm rng;
} crt_u;
void (*exit)(struct crypto_tfm *tfm);
@@ -688,10 +621,6 @@ struct crypto_ablkcipher {
struct crypto_tfm base;
};
-struct crypto_aead {
- struct crypto_tfm base;
-};
-
struct crypto_blkcipher {
struct crypto_tfm base;
};
@@ -708,10 +637,6 @@ struct crypto_hash {
struct crypto_tfm base;
};
-struct crypto_rng {
- struct crypto_tfm base;
-};
-
enum {
CRYPTOA_UNSPEC,
CRYPTOA_ALG,
@@ -1188,400 +1113,6 @@ static inline void ablkcipher_request_set_crypt(
}
/**
- * DOC: Authenticated Encryption With Associated Data (AEAD) Cipher API
- *
- * The AEAD cipher API is used with the ciphers of type CRYPTO_ALG_TYPE_AEAD
- * (listed as type "aead" in /proc/crypto)
- *
- * The most prominent examples for this type of encryption is GCM and CCM.
- * However, the kernel supports other types of AEAD ciphers which are defined
- * with the following cipher string:
- *
- * authenc(keyed message digest, block cipher)
- *
- * For example: authenc(hmac(sha256), cbc(aes))
- *
- * The example code provided for the asynchronous block cipher operation
- * applies here as well. Naturally all *ablkcipher* symbols must be exchanged
- * the *aead* pendants discussed in the following. In addtion, for the AEAD
- * operation, the aead_request_set_assoc function must be used to set the
- * pointer to the associated data memory location before performing the
- * encryption or decryption operation. In case of an encryption, the associated
- * data memory is filled during the encryption operation. For decryption, the
- * associated data memory must contain data that is used to verify the integrity
- * of the decrypted data. Another deviation from the asynchronous block cipher
- * operation is that the caller should explicitly check for -EBADMSG of the
- * crypto_aead_decrypt. That error indicates an authentication error, i.e.
- * a breach in the integrity of the message. In essence, that -EBADMSG error
- * code is the key bonus an AEAD cipher has over "standard" block chaining
- * modes.
- */
-
-static inline struct crypto_aead *__crypto_aead_cast(struct crypto_tfm *tfm)
-{
- return (struct crypto_aead *)tfm;
-}
-
-/**
- * crypto_alloc_aead() - allocate AEAD cipher handle
- * @alg_name: is the cra_name / name or cra_driver_name / driver name of the
- * AEAD cipher
- * @type: specifies the type of the cipher
- * @mask: specifies the mask for the cipher
- *
- * Allocate a cipher handle for an AEAD. The returned struct
- * crypto_aead is the cipher handle that is required for any subsequent
- * API invocation for that AEAD.
- *
- * Return: allocated cipher handle in case of success; IS_ERR() is true in case
- * of an error, PTR_ERR() returns the error code.
- */
-struct crypto_aead *crypto_alloc_aead(const char *alg_name, u32 type, u32 mask);
-
-static inline struct crypto_tfm *crypto_aead_tfm(struct crypto_aead *tfm)
-{
- return &tfm->base;
-}
-
-/**
- * crypto_free_aead() - zeroize and free aead handle
- * @tfm: cipher handle to be freed
- */
-static inline void crypto_free_aead(struct crypto_aead *tfm)
-{
- crypto_free_tfm(crypto_aead_tfm(tfm));
-}
-
-static inline struct aead_tfm *crypto_aead_crt(struct crypto_aead *tfm)
-{
- return &crypto_aead_tfm(tfm)->crt_aead;
-}
-
-/**
- * crypto_aead_ivsize() - obtain IV size
- * @tfm: cipher handle
- *
- * The size of the IV for the aead referenced by the cipher handle is
- * returned. This IV size may be zero if the cipher does not need an IV.
- *
- * Return: IV size in bytes
- */
-static inline unsigned int crypto_aead_ivsize(struct crypto_aead *tfm)
-{
- return crypto_aead_crt(tfm)->ivsize;
-}
-
-/**
- * crypto_aead_authsize() - obtain maximum authentication data size
- * @tfm: cipher handle
- *
- * The maximum size of the authentication data for the AEAD cipher referenced
- * by the AEAD cipher handle is returned. The authentication data size may be
- * zero if the cipher implements a hard-coded maximum.
- *
- * The authentication data may also be known as "tag value".
- *
- * Return: authentication data size / tag size in bytes
- */
-static inline unsigned int crypto_aead_authsize(struct crypto_aead *tfm)
-{
- return crypto_aead_crt(tfm)->authsize;
-}
-
-/**
- * crypto_aead_blocksize() - obtain block size of cipher
- * @tfm: cipher handle
- *
- * The block size for the AEAD referenced with the cipher handle is returned.
- * The caller may use that information to allocate appropriate memory for the
- * data returned by the encryption or decryption operation
- *
- * Return: block size of cipher
- */
-static inline unsigned int crypto_aead_blocksize(struct crypto_aead *tfm)
-{
- return crypto_tfm_alg_blocksize(crypto_aead_tfm(tfm));
-}
-
-static inline unsigned int crypto_aead_alignmask(struct crypto_aead *tfm)
-{
- return crypto_tfm_alg_alignmask(crypto_aead_tfm(tfm));
-}
-
-static inline u32 crypto_aead_get_flags(struct crypto_aead *tfm)
-{
- return crypto_tfm_get_flags(crypto_aead_tfm(tfm));
-}
-
-static inline void crypto_aead_set_flags(struct crypto_aead *tfm, u32 flags)
-{
- crypto_tfm_set_flags(crypto_aead_tfm(tfm), flags);
-}
-
-static inline void crypto_aead_clear_flags(struct crypto_aead *tfm, u32 flags)
-{
- crypto_tfm_clear_flags(crypto_aead_tfm(tfm), flags);
-}
-
-/**
- * crypto_aead_setkey() - set key for cipher
- * @tfm: cipher handle
- * @key: buffer holding the key
- * @keylen: length of the key in bytes
- *
- * The caller provided key is set for the AEAD referenced by the cipher
- * handle.
- *
- * Note, the key length determines the cipher type. Many block ciphers implement
- * different cipher modes depending on the key size, such as AES-128 vs AES-192
- * vs. AES-256. When providing a 16 byte key for an AES cipher handle, AES-128
- * is performed.
- *
- * Return: 0 if the setting of the key was successful; < 0 if an error occurred
- */
-static inline int crypto_aead_setkey(struct crypto_aead *tfm, const u8 *key,
- unsigned int keylen)
-{
- struct aead_tfm *crt = crypto_aead_crt(tfm);
-
- return crt->setkey(crt->base, key, keylen);
-}
-
-/**
- * crypto_aead_setauthsize() - set authentication data size
- * @tfm: cipher handle
- * @authsize: size of the authentication data / tag in bytes
- *
- * Set the authentication data size / tag size. AEAD requires an authentication
- * tag (or MAC) in addition to the associated data.
- *
- * Return: 0 if the setting of the key was successful; < 0 if an error occurred
- */
-int crypto_aead_setauthsize(struct crypto_aead *tfm, unsigned int authsize);
-
-static inline struct crypto_aead *crypto_aead_reqtfm(struct aead_request *req)
-{
- return __crypto_aead_cast(req->base.tfm);
-}
-
-/**
- * crypto_aead_encrypt() - encrypt plaintext
- * @req: reference to the aead_request handle that holds all information
- * needed to perform the cipher operation
- *
- * Encrypt plaintext data using the aead_request handle. That data structure
- * and how it is filled with data is discussed with the aead_request_*
- * functions.
- *
- * IMPORTANT NOTE The encryption operation creates the authentication data /
- * tag. That data is concatenated with the created ciphertext.
- * The ciphertext memory size is therefore the given number of
- * block cipher blocks + the size defined by the
- * crypto_aead_setauthsize invocation. The caller must ensure
- * that sufficient memory is available for the ciphertext and
- * the authentication tag.
- *
- * Return: 0 if the cipher operation was successful; < 0 if an error occurred
- */
-static inline int crypto_aead_encrypt(struct aead_request *req)
-{
- return crypto_aead_crt(crypto_aead_reqtfm(req))->encrypt(req);
-}
-
-/**
- * crypto_aead_decrypt() - decrypt ciphertext
- * @req: reference to the ablkcipher_request handle that holds all information
- * needed to perform the cipher operation
- *
- * Decrypt ciphertext data using the aead_request handle. That data structure
- * and how it is filled with data is discussed with the aead_request_*
- * functions.
- *
- * IMPORTANT NOTE The caller must concatenate the ciphertext followed by the
- * authentication data / tag. That authentication data / tag
- * must have the size defined by the crypto_aead_setauthsize
- * invocation.
- *
- *
- * Return: 0 if the cipher operation was successful; -EBADMSG: The AEAD
- * cipher operation performs the authentication of the data during the
- * decryption operation. Therefore, the function returns this error if
- * the authentication of the ciphertext was unsuccessful (i.e. the
- * integrity of the ciphertext or the associated data was violated);
- * < 0 if an error occurred.
- */
-static inline int crypto_aead_decrypt(struct aead_request *req)
-{
- if (req->cryptlen < crypto_aead_authsize(crypto_aead_reqtfm(req)))
- return -EINVAL;
-
- return crypto_aead_crt(crypto_aead_reqtfm(req))->decrypt(req);
-}
-
-/**
- * DOC: Asynchronous AEAD Request Handle
- *
- * The aead_request data structure contains all pointers to data required for
- * the AEAD cipher operation. This includes the cipher handle (which can be
- * used by multiple aead_request instances), pointer to plaintext and
- * ciphertext, asynchronous callback function, etc. It acts as a handle to the
- * aead_request_* API calls in a similar way as AEAD handle to the
- * crypto_aead_* API calls.
- */
-
-/**
- * crypto_aead_reqsize() - obtain size of the request data structure
- * @tfm: cipher handle
- *
- * Return: number of bytes
- */
-static inline unsigned int crypto_aead_reqsize(struct crypto_aead *tfm)
-{
- return crypto_aead_crt(tfm)->reqsize;
-}
-
-/**
- * aead_request_set_tfm() - update cipher handle reference in request
- * @req: request handle to be modified
- * @tfm: cipher handle that shall be added to the request handle
- *
- * Allow the caller to replace the existing aead handle in the request
- * data structure with a different one.
- */
-static inline void aead_request_set_tfm(struct aead_request *req,
- struct crypto_aead *tfm)
-{
- req->base.tfm = crypto_aead_tfm(crypto_aead_crt(tfm)->base);
-}
-
-/**
- * aead_request_alloc() - allocate request data structure
- * @tfm: cipher handle to be registered with the request
- * @gfp: memory allocation flag that is handed to kmalloc by the API call.
- *
- * Allocate the request data structure that must be used with the AEAD
- * encrypt and decrypt API calls. During the allocation, the provided aead
- * handle is registered in the request data structure.
- *
- * Return: allocated request handle in case of success; IS_ERR() is true in case
- * of an error, PTR_ERR() returns the error code.
- */
-static inline struct aead_request *aead_request_alloc(struct crypto_aead *tfm,
- gfp_t gfp)
-{
- struct aead_request *req;
-
- req = kmalloc(sizeof(*req) + crypto_aead_reqsize(tfm), gfp);
-
- if (likely(req))
- aead_request_set_tfm(req, tfm);
-
- return req;
-}
-
-/**
- * aead_request_free() - zeroize and free request data structure
- * @req: request data structure cipher handle to be freed
- */
-static inline void aead_request_free(struct aead_request *req)
-{
- kzfree(req);
-}
-
-/**
- * aead_request_set_callback() - set asynchronous callback function
- * @req: request handle
- * @flags: specify zero or an ORing of the flags
- * CRYPTO_TFM_REQ_MAY_BACKLOG the request queue may back log and
- * increase the wait queue beyond the initial maximum size;
- * CRYPTO_TFM_REQ_MAY_SLEEP the request processing may sleep
- * @compl: callback function pointer to be registered with the request handle
- * @data: The data pointer refers to memory that is not used by the kernel
- * crypto API, but provided to the callback function for it to use. Here,
- * the caller can provide a reference to memory the callback function can
- * operate on. As the callback function is invoked asynchronously to the
- * related functionality, it may need to access data structures of the
- * related functionality which can be referenced using this pointer. The
- * callback function can access the memory via the "data" field in the
- * crypto_async_request data structure provided to the callback function.
- *
- * Setting the callback function that is triggered once the cipher operation
- * completes
- *
- * The callback function is registered with the aead_request handle and
- * must comply with the following template
- *
- * void callback_function(struct crypto_async_request *req, int error)
- */
-static inline void aead_request_set_callback(struct aead_request *req,
- u32 flags,
- crypto_completion_t compl,
- void *data)
-{
- req->base.complete = compl;
- req->base.data = data;
- req->base.flags = flags;
-}
-
-/**
- * aead_request_set_crypt - set data buffers
- * @req: request handle
- * @src: source scatter / gather list
- * @dst: destination scatter / gather list
- * @cryptlen: number of bytes to process from @src
- * @iv: IV for the cipher operation which must comply with the IV size defined
- * by crypto_aead_ivsize()
- *
- * Setting the source data and destination data scatter / gather lists.
- *
- * For encryption, the source is treated as the plaintext and the
- * destination is the ciphertext. For a decryption operation, the use is
- * reversed - the source is the ciphertext and the destination is the plaintext.
- *
- * IMPORTANT NOTE AEAD requires an authentication tag (MAC). For decryption,
- * the caller must concatenate the ciphertext followed by the
- * authentication tag and provide the entire data stream to the
- * decryption operation (i.e. the data length used for the
- * initialization of the scatterlist and the data length for the
- * decryption operation is identical). For encryption, however,
- * the authentication tag is created while encrypting the data.
- * The destination buffer must hold sufficient space for the
- * ciphertext and the authentication tag while the encryption
- * invocation must only point to the plaintext data size. The
- * following code snippet illustrates the memory usage
- * buffer = kmalloc(ptbuflen + (enc ? authsize : 0));
- * sg_init_one(&sg, buffer, ptbuflen + (enc ? authsize : 0));
- * aead_request_set_crypt(req, &sg, &sg, ptbuflen, iv);
- */
-static inline void aead_request_set_crypt(struct aead_request *req,
- struct scatterlist *src,
- struct scatterlist *dst,
- unsigned int cryptlen, u8 *iv)
-{
- req->src = src;
- req->dst = dst;
- req->cryptlen = cryptlen;
- req->iv = iv;
-}
-
-/**
- * aead_request_set_assoc() - set the associated data scatter / gather list
- * @req: request handle
- * @assoc: associated data scatter / gather list
- * @assoclen: number of bytes to process from @assoc
- *
- * For encryption, the memory is filled with the associated data. For
- * decryption, the memory must point to the associated data.
- */
-static inline void aead_request_set_assoc(struct aead_request *req,
- struct scatterlist *assoc,
- unsigned int assoclen)
-{
- req->assoc = assoc;
- req->assoclen = assoclen;
-}
-
-/**
* DOC: Synchronous Block Cipher API
*
* The synchronous block cipher API is used with the ciphers of type
diff --git a/include/linux/dcache.h b/include/linux/dcache.h
index d8358799c594..df334cbacc6d 100644
--- a/include/linux/dcache.h
+++ b/include/linux/dcache.h
@@ -404,26 +404,11 @@ static inline bool d_mountpoint(const struct dentry *dentry)
/*
* Directory cache entry type accessor functions.
*/
-static inline void __d_set_type(struct dentry *dentry, unsigned type)
-{
- dentry->d_flags = (dentry->d_flags & ~DCACHE_ENTRY_TYPE) | type;
-}
-
-static inline void __d_clear_type(struct dentry *dentry)
-{
- __d_set_type(dentry, DCACHE_MISS_TYPE);
-}
-
-static inline void d_set_type(struct dentry *dentry, unsigned type)
-{
- spin_lock(&dentry->d_lock);
- __d_set_type(dentry, type);
- spin_unlock(&dentry->d_lock);
-}
-
static inline unsigned __d_entry_type(const struct dentry *dentry)
{
- return dentry->d_flags & DCACHE_ENTRY_TYPE;
+ unsigned type = READ_ONCE(dentry->d_flags);
+ smp_rmb();
+ return type & DCACHE_ENTRY_TYPE;
}
static inline bool d_is_miss(const struct dentry *dentry)
@@ -482,6 +467,44 @@ static inline bool d_is_positive(const struct dentry *dentry)
return !d_is_negative(dentry);
}
+/**
+ * d_really_is_negative - Determine if a dentry is really negative (ignoring fallthroughs)
+ * @dentry: The dentry in question
+ *
+ * Returns true if the dentry represents either an absent name or a name that
+ * doesn't map to an inode (ie. ->d_inode is NULL). The dentry could represent
+ * a true miss, a whiteout that isn't represented by a 0,0 chardev or a
+ * fallthrough marker in an opaque directory.
+ *
+ * Note! (1) This should be used *only* by a filesystem to examine its own
+ * dentries. It should not be used to look at some other filesystem's
+ * dentries. (2) It should also be used in combination with d_inode() to get
+ * the inode. (3) The dentry may have something attached to ->d_lower and the
+ * type field of the flags may be set to something other than miss or whiteout.
+ */
+static inline bool d_really_is_negative(const struct dentry *dentry)
+{
+ return dentry->d_inode == NULL;
+}
+
+/**
+ * d_really_is_positive - Determine if a dentry is really positive (ignoring fallthroughs)
+ * @dentry: The dentry in question
+ *
+ * Returns true if the dentry represents a name that maps to an inode
+ * (ie. ->d_inode is not NULL). The dentry might still represent a whiteout if
+ * that is represented on medium as a 0,0 chardev.
+ *
+ * Note! (1) This should be used *only* by a filesystem to examine its own
+ * dentries. It should not be used to look at some other filesystem's
+ * dentries. (2) It should also be used in combination with d_inode() to get
+ * the inode.
+ */
+static inline bool d_really_is_positive(const struct dentry *dentry)
+{
+ return dentry->d_inode != NULL;
+}
+
extern void d_set_fallthru(struct dentry *dentry);
static inline bool d_is_fallthru(const struct dentry *dentry)
diff --git a/include/linux/dccp.h b/include/linux/dccp.h
index 439ff698000a..221025423e6c 100644
--- a/include/linux/dccp.h
+++ b/include/linux/dccp.h
@@ -43,6 +43,7 @@ enum dccp_state {
DCCP_CLOSING = TCP_CLOSING,
DCCP_TIME_WAIT = TCP_TIME_WAIT,
DCCP_CLOSED = TCP_CLOSE,
+ DCCP_NEW_SYN_RECV = TCP_NEW_SYN_RECV,
DCCP_PARTOPEN = TCP_MAX_STATES,
DCCP_PASSIVE_CLOSEREQ, /* clients receiving CloseReq */
DCCP_MAX_STATES
@@ -57,6 +58,7 @@ enum {
DCCPF_CLOSING = TCPF_CLOSING,
DCCPF_TIME_WAIT = TCPF_TIME_WAIT,
DCCPF_CLOSED = TCPF_CLOSE,
+ DCCPF_NEW_SYN_RECV = TCPF_NEW_SYN_RECV,
DCCPF_PARTOPEN = (1 << DCCP_PARTOPEN),
};
@@ -317,6 +319,6 @@ static inline const char *dccp_role(const struct sock *sk)
return NULL;
}
-extern void dccp_syn_ack_timeout(struct sock *sk, struct request_sock *req);
+extern void dccp_syn_ack_timeout(const struct request_sock *req);
#endif /* _LINUX_DCCP_H */
diff --git a/include/linux/debugfs.h b/include/linux/debugfs.h
index cb25af461054..420311bcee38 100644
--- a/include/linux/debugfs.h
+++ b/include/linux/debugfs.h
@@ -45,7 +45,6 @@ extern struct dentry *arch_debugfs_dir;
/* declared over in file.c */
extern const struct file_operations debugfs_file_operations;
-extern const struct inode_operations debugfs_link_operations;
struct dentry *debugfs_create_file(const char *name, umode_t mode,
struct dentry *parent, void *data,
diff --git a/include/linux/devfreq-event.h b/include/linux/devfreq-event.h
index 602fbbfcfeed..0a83a1e648b0 100644
--- a/include/linux/devfreq-event.h
+++ b/include/linux/devfreq-event.h
@@ -91,7 +91,7 @@ struct devfreq_event_desc {
const char *name;
void *driver_data;
- struct devfreq_event_ops *ops;
+ const struct devfreq_event_ops *ops;
};
#if defined(CONFIG_PM_DEVFREQ_EVENT)
diff --git a/include/linux/device-mapper.h b/include/linux/device-mapper.h
index fd23978d93fe..51cc1deb7af3 100644
--- a/include/linux/device-mapper.h
+++ b/include/linux/device-mapper.h
@@ -605,9 +605,4 @@ static inline unsigned long to_bytes(sector_t n)
return (n << SECTOR_SHIFT);
}
-/*-----------------------------------------------------------------
- * Helper for block layer and dm core operations
- *---------------------------------------------------------------*/
-int dm_underlying_device_busy(struct request_queue *q);
-
#endif /* _LINUX_DEVICE_MAPPER_H */
diff --git a/include/linux/device.h b/include/linux/device.h
index f3f2c7e38060..6558af90c8fe 100644
--- a/include/linux/device.h
+++ b/include/linux/device.h
@@ -38,6 +38,7 @@ struct class;
struct subsys_private;
struct bus_type;
struct device_node;
+struct fwnode_handle;
struct iommu_ops;
struct iommu_group;
@@ -650,14 +651,6 @@ struct device_dma_parameters {
unsigned long segment_boundary_mask;
};
-struct acpi_device;
-
-struct acpi_dev_node {
-#ifdef CONFIG_ACPI
- struct acpi_device *companion;
-#endif
-};
-
/**
* struct device - The basic device structure
* @parent: The device's "parent" device, the device to which it is attached.
@@ -703,7 +696,7 @@ struct acpi_dev_node {
* @cma_area: Contiguous memory area for dma allocations
* @archdata: For arch-specific additions.
* @of_node: Associated device tree node.
- * @acpi_node: Associated ACPI device node.
+ * @fwnode: Associated device node supplied by platform firmware.
* @devt: For creating the sysfs "dev".
* @id: device instance
* @devres_lock: Spinlock to protect the resource of the device.
@@ -779,7 +772,7 @@ struct device {
struct dev_archdata archdata;
struct device_node *of_node; /* associated device tree node */
- struct acpi_dev_node acpi_node; /* associated ACPI device node */
+ struct fwnode_handle *fwnode; /* firmware device node */
dev_t devt; /* dev_t, creates the sysfs "dev" */
u32 id; /* device instance */
@@ -954,6 +947,9 @@ extern void unlock_device_hotplug(void);
extern int lock_device_hotplug_sysfs(void);
extern int device_offline(struct device *dev);
extern int device_online(struct device *dev);
+extern void set_primary_fwnode(struct device *dev, struct fwnode_handle *fwnode);
+extern void set_secondary_fwnode(struct device *dev, struct fwnode_handle *fwnode);
+
/*
* Root device objects for grouping under /sys/devices
*/
diff --git a/include/linux/dma-buf.h b/include/linux/dma-buf.h
index 694e1fe1c4b4..2f0b431b73e0 100644
--- a/include/linux/dma-buf.h
+++ b/include/linux/dma-buf.h
@@ -163,6 +163,33 @@ struct dma_buf_attachment {
};
/**
+ * struct dma_buf_export_info - holds information needed to export a dma_buf
+ * @exp_name: name of the exporting module - useful for debugging.
+ * @ops: Attach allocator-defined dma buf ops to the new buffer
+ * @size: Size of the buffer
+ * @flags: mode flags for the file
+ * @resv: reservation-object, NULL to allocate default one
+ * @priv: Attach private data of allocator to this buffer
+ *
+ * This structure holds the information required to export the buffer. Used
+ * with dma_buf_export() only.
+ */
+struct dma_buf_export_info {
+ const char *exp_name;
+ const struct dma_buf_ops *ops;
+ size_t size;
+ int flags;
+ struct reservation_object *resv;
+ void *priv;
+};
+
+/**
+ * helper macro for exporters; zeros and fills in most common values
+ */
+#define DEFINE_DMA_BUF_EXPORT_INFO(a) \
+ struct dma_buf_export_info a = { .exp_name = KBUILD_MODNAME }
+
+/**
* get_dma_buf - convenience wrapper for get_file.
* @dmabuf: [in] pointer to dma_buf
*
@@ -181,12 +208,7 @@ struct dma_buf_attachment *dma_buf_attach(struct dma_buf *dmabuf,
void dma_buf_detach(struct dma_buf *dmabuf,
struct dma_buf_attachment *dmabuf_attach);
-struct dma_buf *dma_buf_export_named(void *priv, const struct dma_buf_ops *ops,
- size_t size, int flags, const char *,
- struct reservation_object *);
-
-#define dma_buf_export(priv, ops, size, flags, resv) \
- dma_buf_export_named(priv, ops, size, flags, KBUILD_MODNAME, resv)
+struct dma_buf *dma_buf_export(const struct dma_buf_export_info *exp_info);
int dma_buf_fd(struct dma_buf *dmabuf, int flags);
struct dma_buf *dma_buf_get(int fd);
diff --git a/include/linux/dma-mapping.h b/include/linux/dma-mapping.h
index c3007cb4bfa6..ac07ff090919 100644
--- a/include/linux/dma-mapping.h
+++ b/include/linux/dma-mapping.h
@@ -34,6 +34,10 @@ struct dma_map_ops {
void (*unmap_page)(struct device *dev, dma_addr_t dma_handle,
size_t size, enum dma_data_direction dir,
struct dma_attrs *attrs);
+ /*
+ * map_sg returns 0 on error and a value > 0 on success.
+ * It should never return a value < 0.
+ */
int (*map_sg)(struct device *dev, struct scatterlist *sg,
int nents, enum dma_data_direction dir,
struct dma_attrs *attrs);
diff --git a/include/linux/dma/hsu.h b/include/linux/dma/hsu.h
new file mode 100644
index 000000000000..234393a6997b
--- /dev/null
+++ b/include/linux/dma/hsu.h
@@ -0,0 +1,48 @@
+/*
+ * Driver for the High Speed UART DMA
+ *
+ * Copyright (C) 2015 Intel Corporation
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License version 2 as
+ * published by the Free Software Foundation.
+ */
+
+#ifndef _DMA_HSU_H
+#define _DMA_HSU_H
+
+#include <linux/device.h>
+#include <linux/interrupt.h>
+
+#include <linux/platform_data/dma-hsu.h>
+
+struct hsu_dma;
+
+/**
+ * struct hsu_dma_chip - representation of HSU DMA hardware
+ * @dev: struct device of the DMA controller
+ * @irq: irq line
+ * @regs: memory mapped I/O space
+ * @length: I/O space length
+ * @offset: offset of the I/O space where registers are located
+ * @hsu: struct hsu_dma that is filed by ->probe()
+ * @pdata: platform data for the DMA controller if provided
+ */
+struct hsu_dma_chip {
+ struct device *dev;
+ int irq;
+ void __iomem *regs;
+ unsigned int length;
+ unsigned int offset;
+ struct hsu_dma *hsu;
+ struct hsu_dma_platform_data *pdata;
+};
+
+/* Export to the internal users */
+irqreturn_t hsu_dma_irq(struct hsu_dma_chip *chip, unsigned short nr);
+
+/* Export to the platform drivers */
+int hsu_dma_probe(struct hsu_dma_chip *chip);
+int hsu_dma_remove(struct hsu_dma_chip *chip);
+
+#endif /* _DMA_HSU_H */
diff --git a/include/linux/amba/xilinx_dma.h b/include/linux/dma/xilinx_dma.h
index 34b98f276ed0..34b98f276ed0 100644
--- a/include/linux/amba/xilinx_dma.h
+++ b/include/linux/dma/xilinx_dma.h
diff --git a/include/linux/dmaengine.h b/include/linux/dmaengine.h
index b6997a0cb528..ad419757241f 100644
--- a/include/linux/dmaengine.h
+++ b/include/linux/dmaengine.h
@@ -11,10 +11,6 @@
* FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for
* more details.
*
- * You should have received a copy of the GNU General Public License along with
- * this program; if not, write to the Free Software Foundation, Inc., 59
- * Temple Place - Suite 330, Boston, MA 02111-1307, USA.
- *
* The full GNU General Public License is included in this distribution in the
* file called COPYING.
*/
@@ -574,7 +570,6 @@ struct dma_tx_state {
* @copy_align: alignment shift for memcpy operations
* @xor_align: alignment shift for xor operations
* @pq_align: alignment shift for pq operations
- * @fill_align: alignment shift for memset operations
* @dev_id: unique device ID
* @dev: struct device reference for dma mapping api
* @src_addr_widths: bit mask of src addr widths the device supports
@@ -625,7 +620,6 @@ struct dma_device {
u8 copy_align;
u8 xor_align;
u8 pq_align;
- u8 fill_align;
#define DMA_HAS_PQ_CONTINUE (1 << 15)
int dev_id;
@@ -826,12 +820,6 @@ static inline bool is_dma_pq_aligned(struct dma_device *dev, size_t off1,
return dmaengine_check_align(dev->pq_align, off1, off2, len);
}
-static inline bool is_dma_fill_aligned(struct dma_device *dev, size_t off1,
- size_t off2, size_t len)
-{
- return dmaengine_check_align(dev->fill_align, off1, off2, len);
-}
-
static inline void
dma_set_maxpq(struct dma_device *dma, int maxpq, int has_pq_continue)
{
@@ -1098,7 +1086,6 @@ void dma_async_device_unregister(struct dma_device *device);
void dma_run_dependencies(struct dma_async_tx_descriptor *tx);
struct dma_chan *dma_get_slave_channel(struct dma_chan *chan);
struct dma_chan *dma_get_any_slave_channel(struct dma_device *device);
-struct dma_chan *net_dma_find_channel(void);
#define dma_request_channel(mask, x, y) __dma_request_channel(&(mask), x, y)
#define dma_request_slave_channel_compat(mask, x, y, dev, name) \
__dma_request_slave_channel_compat(&(mask), x, y, dev, name)
@@ -1116,27 +1103,4 @@ static inline struct dma_chan
return __dma_request_channel(mask, fn, fn_param);
}
-
-/* --- Helper iov-locking functions --- */
-
-struct dma_page_list {
- char __user *base_address;
- int nr_pages;
- struct page **pages;
-};
-
-struct dma_pinned_list {
- int nr_iovecs;
- struct dma_page_list page_list[0];
-};
-
-struct dma_pinned_list *dma_pin_iovec_pages(struct iovec *iov, size_t len);
-void dma_unpin_iovec_pages(struct dma_pinned_list* pinned_list);
-
-dma_cookie_t dma_memcpy_to_iovec(struct dma_chan *chan, struct iovec *iov,
- struct dma_pinned_list *pinned_list, unsigned char *kdata, size_t len);
-dma_cookie_t dma_memcpy_pg_to_iovec(struct dma_chan *chan, struct iovec *iov,
- struct dma_pinned_list *pinned_list, struct page *page,
- unsigned int offset, size_t len);
-
#endif /* DMAENGINE_H */
diff --git a/include/linux/dmapool.h b/include/linux/dmapool.h
index 52456aa566a0..e1043f79122f 100644
--- a/include/linux/dmapool.h
+++ b/include/linux/dmapool.h
@@ -11,8 +11,8 @@
#ifndef LINUX_DMAPOOL_H
#define LINUX_DMAPOOL_H
+#include <linux/scatterlist.h>
#include <asm/io.h>
-#include <asm/scatterlist.h>
struct device;
diff --git a/include/linux/dmar.h b/include/linux/dmar.h
index 30624954dec5..e9bc9292bd3a 100644
--- a/include/linux/dmar.h
+++ b/include/linux/dmar.h
@@ -185,33 +185,85 @@ static inline int dmar_device_remove(void *handle)
struct irte {
union {
+ /* Shared between remapped and posted mode*/
struct {
- __u64 present : 1,
- fpd : 1,
- dst_mode : 1,
- redir_hint : 1,
- trigger_mode : 1,
- dlvry_mode : 3,
- avail : 4,
- __reserved_1 : 4,
- vector : 8,
- __reserved_2 : 8,
- dest_id : 32;
+ __u64 present : 1, /* 0 */
+ fpd : 1, /* 1 */
+ __res0 : 6, /* 2 - 6 */
+ avail : 4, /* 8 - 11 */
+ __res1 : 3, /* 12 - 14 */
+ pst : 1, /* 15 */
+ vector : 8, /* 16 - 23 */
+ __res2 : 40; /* 24 - 63 */
+ };
+
+ /* Remapped mode */
+ struct {
+ __u64 r_present : 1, /* 0 */
+ r_fpd : 1, /* 1 */
+ dst_mode : 1, /* 2 */
+ redir_hint : 1, /* 3 */
+ trigger_mode : 1, /* 4 */
+ dlvry_mode : 3, /* 5 - 7 */
+ r_avail : 4, /* 8 - 11 */
+ r_res0 : 4, /* 12 - 15 */
+ r_vector : 8, /* 16 - 23 */
+ r_res1 : 8, /* 24 - 31 */
+ dest_id : 32; /* 32 - 63 */
+ };
+
+ /* Posted mode */
+ struct {
+ __u64 p_present : 1, /* 0 */
+ p_fpd : 1, /* 1 */
+ p_res0 : 6, /* 2 - 7 */
+ p_avail : 4, /* 8 - 11 */
+ p_res1 : 2, /* 12 - 13 */
+ p_urgent : 1, /* 14 */
+ p_pst : 1, /* 15 */
+ p_vector : 8, /* 16 - 23 */
+ p_res2 : 14, /* 24 - 37 */
+ pda_l : 26; /* 38 - 63 */
};
__u64 low;
};
union {
+ /* Shared between remapped and posted mode*/
struct {
- __u64 sid : 16,
- sq : 2,
- svt : 2,
- __reserved_3 : 44;
+ __u64 sid : 16, /* 64 - 79 */
+ sq : 2, /* 80 - 81 */
+ svt : 2, /* 82 - 83 */
+ __res3 : 44; /* 84 - 127 */
+ };
+
+ /* Posted mode*/
+ struct {
+ __u64 p_sid : 16, /* 64 - 79 */
+ p_sq : 2, /* 80 - 81 */
+ p_svt : 2, /* 82 - 83 */
+ p_res3 : 12, /* 84 - 95 */
+ pda_h : 32; /* 96 - 127 */
};
__u64 high;
};
};
+static inline void dmar_copy_shared_irte(struct irte *dst, struct irte *src)
+{
+ dst->present = src->present;
+ dst->fpd = src->fpd;
+ dst->avail = src->avail;
+ dst->pst = src->pst;
+ dst->vector = src->vector;
+ dst->sid = src->sid;
+ dst->sq = src->sq;
+ dst->svt = src->svt;
+}
+
+#define PDA_LOW_BIT 26
+#define PDA_HIGH_BIT 32
+
enum {
IRQ_REMAP_XAPIC_MODE,
IRQ_REMAP_X2APIC_MODE,
@@ -227,6 +279,7 @@ extern void dmar_msi_read(int irq, struct msi_msg *msg);
extern void dmar_msi_write(int irq, struct msi_msg *msg);
extern int dmar_set_interrupt(struct intel_iommu *iommu);
extern irqreturn_t dmar_fault(int irq, void *dev_id);
-extern int arch_setup_dmar_msi(unsigned int irq);
+extern int dmar_alloc_hwirq(int id, int node, void *arg);
+extern void dmar_free_hwirq(int irq);
#endif /* __DMAR_H__ */
diff --git a/include/linux/efi.h b/include/linux/efi.h
index af5be0368dec..5f19efe4eb3f 100644
--- a/include/linux/efi.h
+++ b/include/linux/efi.h
@@ -96,6 +96,8 @@ typedef struct {
#define EFI_MEMORY_WP ((u64)0x0000000000001000ULL) /* write-protect */
#define EFI_MEMORY_RP ((u64)0x0000000000002000ULL) /* read-protect */
#define EFI_MEMORY_XP ((u64)0x0000000000004000ULL) /* execute-protect */
+#define EFI_MEMORY_MORE_RELIABLE \
+ ((u64)0x0000000000010000ULL) /* higher reliability */
#define EFI_MEMORY_RUNTIME ((u64)0x8000000000000000ULL) /* range requires runtime mapping */
#define EFI_MEMORY_DESCRIPTOR_VERSION 1
@@ -583,6 +585,9 @@ void efi_native_runtime_setup(void);
#define EFI_FILE_INFO_ID \
EFI_GUID( 0x9576e92, 0x6d3f, 0x11d2, 0x8e, 0x39, 0x00, 0xa0, 0xc9, 0x69, 0x72, 0x3b )
+#define EFI_SYSTEM_RESOURCE_TABLE_GUID \
+ EFI_GUID( 0xb122a263, 0x3661, 0x4f68, 0x99, 0x29, 0x78, 0xf8, 0xb0, 0xd6, 0x21, 0x80 )
+
#define EFI_FILE_SYSTEM_GUID \
EFI_GUID( 0x964e5b22, 0x6459, 0x11d2, 0x8e, 0x39, 0x00, 0xa0, 0xc9, 0x69, 0x72, 0x3b )
@@ -823,6 +828,7 @@ extern struct efi {
unsigned long fw_vendor; /* fw_vendor */
unsigned long runtime; /* runtime table */
unsigned long config_table; /* config tables */
+ unsigned long esrt; /* ESRT table */
efi_get_time_t *get_time;
efi_set_time_t *set_time;
efi_get_wakeup_time_t *get_wakeup_time;
@@ -864,6 +870,7 @@ extern void efi_enter_virtual_mode (void); /* switch EFI to virtual mode, if pos
extern void efi_late_init(void);
extern void efi_free_boot_services(void);
extern efi_status_t efi_query_variable_store(u32 attributes, unsigned long size);
+extern void efi_find_mirror(void);
#else
static inline void efi_late_init(void) {}
static inline void efi_free_boot_services(void) {}
@@ -875,6 +882,11 @@ static inline efi_status_t efi_query_variable_store(u32 attributes, unsigned lon
#endif
extern void __iomem *efi_lookup_mapped_addr(u64 phys_addr);
extern int efi_config_init(efi_config_table_type_t *arch_tables);
+#ifdef CONFIG_EFI_ESRT
+extern void __init efi_esrt_init(void);
+#else
+static inline void efi_esrt_init(void) { }
+#endif
extern int efi_config_parse_tables(void *config_tables, int count, int sz,
efi_config_table_type_t *arch_tables);
extern u64 efi_get_iobase (void);
@@ -882,12 +894,15 @@ extern u32 efi_mem_type (unsigned long phys_addr);
extern u64 efi_mem_attributes (unsigned long phys_addr);
extern u64 efi_mem_attribute (unsigned long phys_addr, unsigned long size);
extern int __init efi_uart_console_only (void);
+extern u64 efi_mem_desc_end(efi_memory_desc_t *md);
+extern int efi_mem_desc_lookup(u64 phys_addr, efi_memory_desc_t *out_md);
extern void efi_initialize_iomem_resources(struct resource *code_resource,
struct resource *data_resource, struct resource *bss_resource);
extern void efi_get_time(struct timespec *now);
extern void efi_reserve_boot_services(void);
extern int efi_get_fdt_params(struct efi_fdt_params *params, int verbose);
extern struct efi_memory_map memmap;
+extern struct kobject *efi_kobj;
extern int efi_reboot_quirk_mode;
extern bool efi_poweroff_required(void);
diff --git a/include/linux/elevator.h b/include/linux/elevator.h
index 45a91474487d..638b324f0291 100644
--- a/include/linux/elevator.h
+++ b/include/linux/elevator.h
@@ -39,6 +39,7 @@ typedef void (elevator_deactivate_req_fn) (struct request_queue *, struct reques
typedef int (elevator_init_fn) (struct request_queue *,
struct elevator_type *e);
typedef void (elevator_exit_fn) (struct elevator_queue *);
+typedef void (elevator_registered_fn) (struct request_queue *);
struct elevator_ops
{
@@ -68,6 +69,7 @@ struct elevator_ops
elevator_init_fn *elevator_init_fn;
elevator_exit_fn *elevator_exit_fn;
+ elevator_registered_fn *elevator_registered_fn;
};
#define ELV_NAME_MAX (16)
diff --git a/include/linux/elf-randomize.h b/include/linux/elf-randomize.h
new file mode 100644
index 000000000000..b5f0bda9472e
--- /dev/null
+++ b/include/linux/elf-randomize.h
@@ -0,0 +1,22 @@
+#ifndef _ELF_RANDOMIZE_H
+#define _ELF_RANDOMIZE_H
+
+struct mm_struct;
+
+#ifndef CONFIG_ARCH_HAS_ELF_RANDOMIZE
+static inline unsigned long arch_mmap_rnd(void) { return 0; }
+# if defined(arch_randomize_brk) && defined(CONFIG_COMPAT_BRK)
+# define compat_brk_randomized
+# endif
+# ifndef arch_randomize_brk
+# define arch_randomize_brk(mm) (mm->brk)
+# endif
+#else
+extern unsigned long arch_mmap_rnd(void);
+extern unsigned long arch_randomize_brk(struct mm_struct *mm);
+# ifdef CONFIG_COMPAT_BRK
+# define compat_brk_randomized
+# endif
+#endif
+
+#endif
diff --git a/include/linux/etherdevice.h b/include/linux/etherdevice.h
index 1d869d185a0d..9012f8775208 100644
--- a/include/linux/etherdevice.h
+++ b/include/linux/etherdevice.h
@@ -35,7 +35,6 @@ extern const struct header_ops eth_header_ops;
int eth_header(struct sk_buff *skb, struct net_device *dev, unsigned short type,
const void *daddr, const void *saddr, unsigned len);
-int eth_rebuild_header(struct sk_buff *skb);
int eth_header_parse(const struct sk_buff *skb, unsigned char *haddr);
int eth_header_cache(const struct neighbour *neigh, struct hh_cache *hh,
__be16 type);
@@ -111,7 +110,29 @@ static inline bool is_zero_ether_addr(const u8 *addr)
*/
static inline bool is_multicast_ether_addr(const u8 *addr)
{
- return 0x01 & addr[0];
+#if defined(CONFIG_HAVE_EFFICIENT_UNALIGNED_ACCESS)
+ u32 a = *(const u32 *)addr;
+#else
+ u16 a = *(const u16 *)addr;
+#endif
+#ifdef __BIG_ENDIAN
+ return 0x01 & (a >> ((sizeof(a) * 8) - 8));
+#else
+ return 0x01 & a;
+#endif
+}
+
+static inline bool is_multicast_ether_addr_64bits(const u8 addr[6+2])
+{
+#if defined(CONFIG_HAVE_EFFICIENT_UNALIGNED_ACCESS) && BITS_PER_LONG == 64
+#ifdef __BIG_ENDIAN
+ return 0x01 & ((*(const u64 *)addr) >> 56);
+#else
+ return 0x01 & (*(const u64 *)addr);
+#endif
+#else
+ return is_multicast_ether_addr(addr);
+#endif
}
/**
@@ -170,6 +191,24 @@ static inline bool is_valid_ether_addr(const u8 *addr)
}
/**
+ * eth_proto_is_802_3 - Determine if a given Ethertype/length is a protocol
+ * @proto: Ethertype/length value to be tested
+ *
+ * Check that the value from the Ethertype/length field is a valid Ethertype.
+ *
+ * Return true if the valid is an 802.3 supported Ethertype.
+ */
+static inline bool eth_proto_is_802_3(__be16 proto)
+{
+#ifndef __BIG_ENDIAN
+ /* if CPU is little endian mask off bits representing LSB */
+ proto &= htons(0xFF00);
+#endif
+ /* cast both to u16 and compare since LSB can be ignored */
+ return (__force u16)proto >= (__force u16)htons(ETH_P_802_3_MIN);
+}
+
+/**
* eth_random_addr - Generate software assigned random Ethernet address
* @addr: Pointer to a six-byte array containing the Ethernet address
*
diff --git a/include/linux/f2fs_fs.h b/include/linux/f2fs_fs.h
index a23556c32703..920408a21ffd 100644
--- a/include/linux/f2fs_fs.h
+++ b/include/linux/f2fs_fs.h
@@ -50,6 +50,8 @@
#define MAX_ACTIVE_NODE_LOGS 8
#define MAX_ACTIVE_DATA_LOGS 8
+#define VERSION_LEN 256
+
/*
* For superblock
*/
@@ -86,6 +88,12 @@ struct f2fs_super_block {
__le32 extension_count; /* # of extensions below */
__u8 extension_list[F2FS_MAX_EXTENSION][8]; /* extension array */
__le32 cp_payload;
+ __u8 version[VERSION_LEN]; /* the kernel version */
+ __u8 init_version[VERSION_LEN]; /* the initial kernel version */
+ __le32 feature; /* defined features */
+ __u8 encryption_level; /* versioning level for encryption */
+ __u8 encrypt_pw_salt[16]; /* Salt used for string2key algorithm */
+ __u8 reserved[871]; /* valid reserved region */
} __packed;
/*
@@ -153,7 +161,7 @@ struct f2fs_orphan_block {
*/
struct f2fs_extent {
__le32 fofs; /* start file offset of the extent */
- __le32 blk_addr; /* start block address of the extent */
+ __le32 blk; /* start block address of the extent */
__le32 len; /* lengh of the extent */
} __packed;
@@ -178,6 +186,7 @@ struct f2fs_extent {
#define F2FS_INLINE_DATA 0x02 /* file inline data flag */
#define F2FS_INLINE_DENTRY 0x04 /* file inline dentry flag */
#define F2FS_DATA_EXIST 0x08 /* file inline data exist flag */
+#define F2FS_INLINE_DOTS 0x10 /* file having implicit dot dentries */
#define MAX_INLINE_DATA (sizeof(__le32) * (DEF_ADDRS_PER_INODE - \
F2FS_INLINE_XATTR_ADDRS - 1))
diff --git a/include/linux/falloc.h b/include/linux/falloc.h
index 31591686ac2d..996111000a8c 100644
--- a/include/linux/falloc.h
+++ b/include/linux/falloc.h
@@ -21,4 +21,10 @@ struct space_resv {
#define FS_IOC_RESVSP _IOW('X', 40, struct space_resv)
#define FS_IOC_RESVSP64 _IOW('X', 42, struct space_resv)
+#define FALLOC_FL_SUPPORTED_MASK (FALLOC_FL_KEEP_SIZE | \
+ FALLOC_FL_PUNCH_HOLE | \
+ FALLOC_FL_COLLAPSE_RANGE | \
+ FALLOC_FL_ZERO_RANGE | \
+ FALLOC_FL_INSERT_RANGE)
+
#endif /* _FALLOC_H_ */
diff --git a/include/linux/filter.h b/include/linux/filter.h
index caac2087a4d5..17724f6ea983 100644
--- a/include/linux/filter.h
+++ b/include/linux/filter.h
@@ -145,8 +145,6 @@ struct bpf_prog_aux;
.off = 0, \
.imm = ((__u64) (IMM)) >> 32 })
-#define BPF_PSEUDO_MAP_FD 1
-
/* pseudo BPF_LD_IMM64 insn used to refer to process-local map_fd */
#define BPF_LD_MAP_FD(DST, MAP_FD) \
BPF_LD_IMM64_RAW(DST, BPF_PSEUDO_MAP_FD, MAP_FD)
@@ -209,6 +207,16 @@ struct bpf_prog_aux;
.off = OFF, \
.imm = 0 })
+/* Atomic memory add, *(uint *)(dst_reg + off16) += src_reg */
+
+#define BPF_STX_XADD(SIZE, DST, SRC, OFF) \
+ ((struct bpf_insn) { \
+ .code = BPF_STX | BPF_SIZE(SIZE) | BPF_XADD, \
+ .dst_reg = DST, \
+ .src_reg = SRC, \
+ .off = OFF, \
+ .imm = 0 })
+
/* Memory store, *(uint *) (dst_reg + off16) = imm32 */
#define BPF_ST_MEM(SIZE, DST, OFF, IMM) \
@@ -269,6 +277,14 @@ struct bpf_prog_aux;
.off = 0, \
.imm = 0 })
+/* Internal classic blocks for direct assignment */
+
+#define __BPF_STMT(CODE, K) \
+ ((struct sock_filter) BPF_STMT(CODE, K))
+
+#define __BPF_JUMP(CODE, K, JT, JF) \
+ ((struct sock_filter) BPF_JUMP(CODE, K, JT, JF))
+
#define bytes_to_bpf_size(bytes) \
({ \
int bpf_size = -EINVAL; \
@@ -310,9 +326,11 @@ struct bpf_binary_header {
struct bpf_prog {
u16 pages; /* Number of allocated pages */
bool jited; /* Is our filter JIT'ed? */
+ bool gpl_compatible; /* Is our filter GPL compatible? */
u32 len; /* Number of filter blocks */
- struct sock_fprog_kern *orig_prog; /* Original BPF program */
+ enum bpf_prog_type type; /* Type of BPF program */
struct bpf_prog_aux *aux; /* Auxiliary fields */
+ struct sock_fprog_kern *orig_prog; /* Original BPF program */
unsigned int (*bpf_func)(const struct sk_buff *skb,
const struct bpf_insn *filter);
/* Instructions for interpreter */
@@ -360,12 +378,9 @@ static inline void bpf_prog_unlock_ro(struct bpf_prog *fp)
int sk_filter(struct sock *sk, struct sk_buff *skb);
-void bpf_prog_select_runtime(struct bpf_prog *fp);
+int bpf_prog_select_runtime(struct bpf_prog *fp);
void bpf_prog_free(struct bpf_prog *fp);
-int bpf_convert_filter(struct sock_filter *prog, int len,
- struct bpf_insn *new_prog, int *new_len);
-
struct bpf_prog *bpf_prog_alloc(unsigned int size, gfp_t gfp_extra_flags);
struct bpf_prog *bpf_prog_realloc(struct bpf_prog *fp_old, unsigned int size,
gfp_t gfp_extra_flags);
@@ -377,14 +392,17 @@ static inline void bpf_prog_unlock_free(struct bpf_prog *fp)
__bpf_prog_free(fp);
}
+typedef int (*bpf_aux_classic_check_t)(struct sock_filter *filter,
+ unsigned int flen);
+
int bpf_prog_create(struct bpf_prog **pfp, struct sock_fprog_kern *fprog);
+int bpf_prog_create_from_user(struct bpf_prog **pfp, struct sock_fprog *fprog,
+ bpf_aux_classic_check_t trans);
void bpf_prog_destroy(struct bpf_prog *fp);
int sk_attach_filter(struct sock_fprog *fprog, struct sock *sk);
int sk_attach_bpf(u32 ufd, struct sock *sk);
int sk_detach_filter(struct sock *sk);
-
-int bpf_check_classic(const struct sock_filter *filter, unsigned int flen);
int sk_get_filter(struct sock *sk, struct sock_filter __user *filter,
unsigned int len);
@@ -454,6 +472,7 @@ static inline u16 bpf_anc_helper(const struct sock_filter *ftest)
BPF_ANCILLARY(VLAN_TAG_PRESENT);
BPF_ANCILLARY(PAY_OFFSET);
BPF_ANCILLARY(RANDOM);
+ BPF_ANCILLARY(VLAN_TPID);
}
/* Fallthrough. */
default:
diff --git a/include/linux/fixp-arith.h b/include/linux/fixp-arith.h
index 3089d7382325..d4686fe1cac7 100644
--- a/include/linux/fixp-arith.h
+++ b/include/linux/fixp-arith.h
@@ -1,6 +1,8 @@
#ifndef _FIXP_ARITH_H
#define _FIXP_ARITH_H
+#include <linux/math64.h>
+
/*
* Simplistic fixed-point arithmetics.
* Hmm, I'm probably duplicating some code :(
@@ -29,59 +31,126 @@
#include <linux/types.h>
-/* The type representing fixed-point values */
-typedef s16 fixp_t;
+static const s32 sin_table[] = {
+ 0x00000000, 0x023be165, 0x04779632, 0x06b2f1d2, 0x08edc7b6, 0x0b27eb5c,
+ 0x0d61304d, 0x0f996a26, 0x11d06c96, 0x14060b67, 0x163a1a7d, 0x186c6ddd,
+ 0x1a9cd9ac, 0x1ccb3236, 0x1ef74bf2, 0x2120fb82, 0x234815ba, 0x256c6f9e,
+ 0x278dde6e, 0x29ac379f, 0x2bc750e8, 0x2ddf003f, 0x2ff31bdd, 0x32037a44,
+ 0x340ff241, 0x36185aee, 0x381c8bb5, 0x3a1c5c56, 0x3c17a4e7, 0x3e0e3ddb,
+ 0x3fffffff, 0x41ecc483, 0x43d464fa, 0x45b6bb5d, 0x4793a20f, 0x496af3e1,
+ 0x4b3c8c11, 0x4d084650, 0x4ecdfec6, 0x508d9210, 0x5246dd48, 0x53f9be04,
+ 0x55a6125a, 0x574bb8e5, 0x58ea90c2, 0x5a827999, 0x5c135399, 0x5d9cff82,
+ 0x5f1f5ea0, 0x609a52d1, 0x620dbe8a, 0x637984d3, 0x64dd894f, 0x6639b039,
+ 0x678dde6d, 0x68d9f963, 0x6a1de735, 0x6b598ea1, 0x6c8cd70a, 0x6db7a879,
+ 0x6ed9eba0, 0x6ff389de, 0x71046d3c, 0x720c8074, 0x730baeec, 0x7401e4bf,
+ 0x74ef0ebb, 0x75d31a5f, 0x76adf5e5, 0x777f903b, 0x7847d908, 0x7906c0af,
+ 0x79bc384c, 0x7a6831b8, 0x7b0a9f8c, 0x7ba3751c, 0x7c32a67c, 0x7cb82884,
+ 0x7d33f0c8, 0x7da5f5a3, 0x7e0e2e31, 0x7e6c924f, 0x7ec11aa3, 0x7f0bc095,
+ 0x7f4c7e52, 0x7f834ecf, 0x7fb02dc4, 0x7fd317b3, 0x7fec09e1, 0x7ffb025e,
+ 0x7fffffff
+};
-#define FRAC_N 8
-#define FRAC_MASK ((1<<FRAC_N)-1)
+/**
+ * __fixp_sin32() returns the sin of an angle in degrees
+ *
+ * @degrees: angle, in degrees, from 0 to 360.
+ *
+ * The returned value ranges from -0x7fffffff to +0x7fffffff.
+ */
+static inline s32 __fixp_sin32(int degrees)
+{
+ s32 ret;
+ bool negative = false;
-/* Not to be used directly. Use fixp_{cos,sin} */
-static const fixp_t cos_table[46] = {
- 0x0100, 0x00FF, 0x00FF, 0x00FE, 0x00FD, 0x00FC, 0x00FA, 0x00F8,
- 0x00F6, 0x00F3, 0x00F0, 0x00ED, 0x00E9, 0x00E6, 0x00E2, 0x00DD,
- 0x00D9, 0x00D4, 0x00CF, 0x00C9, 0x00C4, 0x00BE, 0x00B8, 0x00B1,
- 0x00AB, 0x00A4, 0x009D, 0x0096, 0x008F, 0x0087, 0x0080, 0x0078,
- 0x0070, 0x0068, 0x005F, 0x0057, 0x004F, 0x0046, 0x003D, 0x0035,
- 0x002C, 0x0023, 0x001A, 0x0011, 0x0008, 0x0000
-};
+ if (degrees > 180) {
+ negative = true;
+ degrees -= 180;
+ }
+ if (degrees > 90)
+ degrees = 180 - degrees;
+ ret = sin_table[degrees];
-/* a: 123 -> 123.0 */
-static inline fixp_t fixp_new(s16 a)
-{
- return a<<FRAC_N;
+ return negative ? -ret : ret;
}
-/* a: 0xFFFF -> -1.0
- 0x8000 -> 1.0
- 0x0000 -> 0.0
-*/
-static inline fixp_t fixp_new16(s16 a)
+/**
+ * fixp_sin32() returns the sin of an angle in degrees
+ *
+ * @degrees: angle, in degrees. The angle can be positive or negative
+ *
+ * The returned value ranges from -0x7fffffff to +0x7fffffff.
+ */
+static inline s32 fixp_sin32(int degrees)
{
- return ((s32)a)>>(16-FRAC_N);
+ degrees = (degrees % 360 + 360) % 360;
+
+ return __fixp_sin32(degrees);
}
-static inline fixp_t fixp_cos(unsigned int degrees)
+/* cos(x) = sin(x + 90 degrees) */
+#define fixp_cos32(v) fixp_sin32((v) + 90)
+
+/*
+ * 16 bits variants
+ *
+ * The returned value ranges from -0x7fff to 0x7fff
+ */
+
+#define fixp_sin16(v) (fixp_sin32(v) >> 16)
+#define fixp_cos16(v) (fixp_cos32(v) >> 16)
+
+/**
+ * fixp_sin32_rad() - calculates the sin of an angle in radians
+ *
+ * @radians: angle, in radians
+ * @twopi: value to be used for 2*pi
+ *
+ * Provides a variant for the cases where just 360
+ * values is not enough. This function uses linear
+ * interpolation to a wider range of values given by
+ * twopi var.
+ *
+ * Experimental tests gave a maximum difference of
+ * 0.000038 between the value calculated by sin() and
+ * the one produced by this function, when twopi is
+ * equal to 360000. That seems to be enough precision
+ * for practical purposes.
+ *
+ * Please notice that two high numbers for twopi could cause
+ * overflows, so the routine will not allow values of twopi
+ * bigger than 1^18.
+ */
+static inline s32 fixp_sin32_rad(u32 radians, u32 twopi)
{
- int quadrant = (degrees / 90) & 3;
- unsigned int i = degrees % 90;
+ int degrees;
+ s32 v1, v2, dx, dy;
+ s64 tmp;
- if (quadrant == 1 || quadrant == 3)
- i = 90 - i;
+ /*
+ * Avoid too large values for twopi, as we don't want overflows.
+ */
+ BUG_ON(twopi > 1 << 18);
- i >>= 1;
+ degrees = (radians * 360) / twopi;
+ tmp = radians - (degrees * twopi) / 360;
- return (quadrant == 1 || quadrant == 2)? -cos_table[i] : cos_table[i];
-}
+ degrees = (degrees % 360 + 360) % 360;
+ v1 = __fixp_sin32(degrees);
-static inline fixp_t fixp_sin(unsigned int degrees)
-{
- return -fixp_cos(degrees + 90);
-}
+ v2 = fixp_sin32(degrees + 1);
-static inline fixp_t fixp_mult(fixp_t a, fixp_t b)
-{
- return ((s32)(a*b))>>FRAC_N;
+ dx = twopi / 360;
+ dy = v2 - v1;
+
+ tmp *= dy;
+
+ return v1 + div_s64(tmp, dx);
}
+/* cos(x) = sin(x + pi/2 radians) */
+
+#define fixp_cos32_rad(rad, twopi) \
+ fixp_sin32_rad(rad + twopi / 4, twopi)
+
#endif
diff --git a/include/linux/frontswap.h b/include/linux/frontswap.h
index 8293262401de..e65ef959546c 100644
--- a/include/linux/frontswap.h
+++ b/include/linux/frontswap.h
@@ -6,16 +6,16 @@
#include <linux/bitops.h>
struct frontswap_ops {
- void (*init)(unsigned);
- int (*store)(unsigned, pgoff_t, struct page *);
- int (*load)(unsigned, pgoff_t, struct page *);
- void (*invalidate_page)(unsigned, pgoff_t);
- void (*invalidate_area)(unsigned);
+ void (*init)(unsigned); /* this swap type was just swapon'ed */
+ int (*store)(unsigned, pgoff_t, struct page *); /* store a page */
+ int (*load)(unsigned, pgoff_t, struct page *); /* load a page */
+ void (*invalidate_page)(unsigned, pgoff_t); /* page no longer needed */
+ void (*invalidate_area)(unsigned); /* swap type just swapoff'ed */
+ struct frontswap_ops *next; /* private pointer to next ops */
};
extern bool frontswap_enabled;
-extern struct frontswap_ops *
- frontswap_register_ops(struct frontswap_ops *ops);
+extern void frontswap_register_ops(struct frontswap_ops *ops);
extern void frontswap_shrink(unsigned long);
extern unsigned long frontswap_curr_pages(void);
extern void frontswap_writethrough(bool);
diff --git a/include/linux/fs.h b/include/linux/fs.h
index 52cc4492cb3a..e351da4a934f 100644
--- a/include/linux/fs.h
+++ b/include/linux/fs.h
@@ -35,10 +35,10 @@
#include <uapi/linux/fs.h>
struct backing_dev_info;
+struct bdi_writeback;
struct export_operations;
struct hd_geometry;
struct iovec;
-struct nameidata;
struct kiocb;
struct kobject;
struct pipe_inode_info;
@@ -314,6 +314,33 @@ struct page;
struct address_space;
struct writeback_control;
+#define IOCB_EVENTFD (1 << 0)
+#define IOCB_APPEND (1 << 1)
+#define IOCB_DIRECT (1 << 2)
+
+struct kiocb {
+ struct file *ki_filp;
+ loff_t ki_pos;
+ void (*ki_complete)(struct kiocb *iocb, long ret, long ret2);
+ void *private;
+ int ki_flags;
+};
+
+static inline bool is_sync_kiocb(struct kiocb *kiocb)
+{
+ return kiocb->ki_complete == NULL;
+}
+
+static inline int iocb_flags(struct file *file);
+
+static inline void init_sync_kiocb(struct kiocb *kiocb, struct file *filp)
+{
+ *kiocb = (struct kiocb) {
+ .ki_filp = filp,
+ .ki_flags = iocb_flags(filp),
+ };
+}
+
/*
* "descriptor" for what we're up to with a read.
* This allows us to use the same read code yet
@@ -361,7 +388,7 @@ struct address_space_operations {
void (*invalidatepage) (struct page *, unsigned int, unsigned int);
int (*releasepage) (struct page *, gfp_t);
void (*freepage)(struct page *);
- ssize_t (*direct_IO)(int, struct kiocb *, struct iov_iter *iter, loff_t offset);
+ ssize_t (*direct_IO)(struct kiocb *, struct iov_iter *iter, loff_t offset);
/*
* migrate the contents of a page to the specified target. If
* migrate_mode is MIGRATE_ASYNC, it must not block.
@@ -608,6 +635,14 @@ struct inode {
struct hlist_node i_hash;
struct list_head i_wb_list; /* backing dev IO list */
+#ifdef CONFIG_CGROUP_WRITEBACK
+ struct bdi_writeback *i_wb; /* the associated cgroup wb */
+
+ /* foreign inode detection, see wbc_detach_inode() */
+ int i_wb_frn_winner;
+ u16 i_wb_frn_avg_time;
+ u16 i_wb_frn_history;
+#endif
struct list_head i_lru; /* inode LRU list */
struct list_head i_sb_list;
union {
@@ -629,6 +664,7 @@ struct inode {
struct pipe_inode_info *i_pipe;
struct block_device *i_bdev;
struct cdev *i_cdev;
+ char *i_link;
};
__u32 i_generation;
@@ -848,6 +884,7 @@ static inline struct file *get_file(struct file *f)
atomic_long_inc(&f->f_count);
return f;
}
+#define get_file_rcu(x) atomic_long_inc_not_zero(&(x)->f_count)
#define fput_atomic(x) atomic_long_add_unless(&(x)->f_count, -1, 1)
#define file_count(x) atomic_long_read(&(x)->f_count)
@@ -893,8 +930,8 @@ struct file_lock_operations {
struct lock_manager_operations {
int (*lm_compare_owner)(struct file_lock *, struct file_lock *);
unsigned long (*lm_owner_key)(struct file_lock *);
- void (*lm_get_owner)(struct file_lock *, struct file_lock *);
- void (*lm_put_owner)(struct file_lock *);
+ fl_owner_t (*lm_get_owner)(fl_owner_t);
+ void (*lm_put_owner)(fl_owner_t);
void (*lm_notify)(struct file_lock *); /* unblock callback */
int (*lm_grant)(struct file_lock *, int);
bool (*lm_break)(struct file_lock *);
@@ -1019,6 +1056,9 @@ extern void lease_get_mtime(struct inode *, struct timespec *time);
extern int generic_setlease(struct file *, long, struct file_lock **, void **priv);
extern int vfs_setlease(struct file *, long, struct file_lock **, void **);
extern int lease_modify(struct file_lock *, int, struct list_head *);
+struct files_struct;
+extern void show_fd_locks(struct seq_file *f,
+ struct file *filp, struct files_struct *files);
#else /* !CONFIG_FILE_LOCKING */
static inline int fcntl_getlk(struct file *file, unsigned int cmd,
struct flock __user *user)
@@ -1155,6 +1195,10 @@ static inline int lease_modify(struct file_lock *fl, int arg,
{
return -EINVAL;
}
+
+struct files_struct;
+static inline void show_fd_locks(struct seq_file *f,
+ struct file *filp, struct files_struct *files) {}
#endif /* !CONFIG_FILE_LOCKING */
@@ -1197,6 +1241,8 @@ struct mm_struct;
#define UMOUNT_NOFOLLOW 0x00000008 /* Don't follow symlink on umount */
#define UMOUNT_UNUSED 0x80000000 /* Flag guaranteed to be unused */
+/* sb->s_iflags */
+#define SB_I_CGROUPWB 0x00000001 /* cgroup-aware writeback enabled */
/* Possible states of 'frozen' field */
enum {
@@ -1235,6 +1281,7 @@ struct super_block {
const struct quotactl_ops *s_qcop;
const struct export_operations *s_export_op;
unsigned long s_flags;
+ unsigned long s_iflags; /* internal SB_I_* flags */
unsigned long s_magic;
struct dentry *s_root;
struct rw_semaphore s_umount;
@@ -1540,8 +1587,6 @@ struct file_operations {
loff_t (*llseek) (struct file *, loff_t, int);
ssize_t (*read) (struct file *, char __user *, size_t, loff_t *);
ssize_t (*write) (struct file *, const char __user *, size_t, loff_t *);
- ssize_t (*aio_read) (struct kiocb *, const struct iovec *, unsigned long, loff_t);
- ssize_t (*aio_write) (struct kiocb *, const struct iovec *, unsigned long, loff_t);
ssize_t (*read_iter) (struct kiocb *, struct iov_iter *);
ssize_t (*write_iter) (struct kiocb *, struct iov_iter *);
int (*iterate) (struct file *, struct dir_context *);
@@ -1574,12 +1619,12 @@ struct file_operations {
struct inode_operations {
struct dentry * (*lookup) (struct inode *,struct dentry *, unsigned int);
- void * (*follow_link) (struct dentry *, struct nameidata *);
+ const char * (*follow_link) (struct dentry *, void **);
int (*permission) (struct inode *, int);
struct posix_acl * (*get_acl)(struct inode *, int);
int (*readlink) (struct dentry *, char __user *,int);
- void (*put_link) (struct dentry *, struct nameidata *, void *);
+ void (*put_link) (struct inode *, void *);
int (*create) (struct inode *,struct dentry *, umode_t, bool);
int (*link) (struct dentry *,struct inode *,struct dentry *);
@@ -1617,6 +1662,7 @@ ssize_t rw_copy_check_uvector(int type, const struct iovec __user * uvector,
struct iovec **ret_pointer);
extern ssize_t __vfs_read(struct file *, char __user *, size_t, loff_t *);
+extern ssize_t __vfs_write(struct file *, const char __user *, size_t, loff_t *);
extern ssize_t vfs_read(struct file *, char __user *, size_t, loff_t *);
extern ssize_t vfs_write(struct file *, const char __user *, size_t, loff_t *);
extern ssize_t vfs_readv(struct file *, const struct iovec __user *,
@@ -1772,6 +1818,11 @@ struct super_operations {
*
* I_DIO_WAKEUP Never set. Only used as a key for wait_on_bit().
*
+ * I_WB_SWITCH Cgroup bdi_writeback switching in progress. Used to
+ * synchronize competing switching instances and to tell
+ * wb stat updates to grab mapping->tree_lock. See
+ * inode_switch_wb_work_fn() for details.
+ *
* Q: What is the difference between I_WILL_FREE and I_FREEING?
*/
#define I_DIRTY_SYNC (1 << 0)
@@ -1786,11 +1837,12 @@ struct super_operations {
#define I_SYNC (1 << __I_SYNC)
#define I_REFERENCED (1 << 8)
#define __I_DIO_WAKEUP 9
-#define I_DIO_WAKEUP (1 << I_DIO_WAKEUP)
+#define I_DIO_WAKEUP (1 << __I_DIO_WAKEUP)
#define I_LINKABLE (1 << 10)
#define I_DIRTY_TIME (1 << 11)
#define __I_DIRTY_TIME_EXPIRED 12
#define I_DIRTY_TIME_EXPIRED (1 << __I_DIRTY_TIME_EXPIRED)
+#define I_WB_SWITCH (1 << 13)
#define I_DIRTY (I_DIRTY_SYNC | I_DIRTY_DATASYNC | I_DIRTY_PAGES)
#define I_DIRTY_ALL (I_DIRTY | I_DIRTY_TIME)
@@ -1845,6 +1897,7 @@ enum file_time_flags {
S_VERSION = 8,
};
+extern bool atime_needs_update(const struct path *, struct inode *);
extern void touch_atime(const struct path *);
static inline void file_accessed(struct file *file)
{
@@ -2145,7 +2198,7 @@ struct filename {
const __user char *uptr; /* original userland pointer */
struct audit_names *aname;
int refcnt;
- bool separate; /* should "name" be freed? */
+ const char iname[];
};
extern long vfs_truncate(struct path *, loff_t);
@@ -2206,7 +2259,13 @@ extern struct super_block *freeze_bdev(struct block_device *);
extern void emergency_thaw_all(void);
extern int thaw_bdev(struct block_device *bdev, struct super_block *sb);
extern int fsync_bdev(struct block_device *);
-extern int sb_is_blkdev_sb(struct super_block *sb);
+
+extern struct super_block *blockdev_superblock;
+
+static inline bool sb_is_blkdev_sb(struct super_block *sb)
+{
+ return sb == blockdev_superblock;
+}
#else
static inline void bd_forget(struct inode *inode) {}
static inline int sync_blockdev(struct block_device *bdev) { return 0; }
@@ -2245,6 +2304,9 @@ extern struct block_device *blkdev_get_by_path(const char *path, fmode_t mode,
extern struct block_device *blkdev_get_by_dev(dev_t dev, fmode_t mode,
void *holder);
extern void blkdev_put(struct block_device *bdev, fmode_t mode);
+extern int __blkdev_reread_part(struct block_device *bdev);
+extern int blkdev_reread_part(struct block_device *bdev);
+
#ifdef CONFIG_SYSFS
extern int bd_link_disk_holder(struct block_device *bdev, struct gendisk *disk);
extern void bd_unlink_disk_holder(struct block_device *bdev,
@@ -2545,16 +2607,12 @@ extern int sb_min_blocksize(struct super_block *, int);
extern int generic_file_mmap(struct file *, struct vm_area_struct *);
extern int generic_file_readonly_mmap(struct file *, struct vm_area_struct *);
-int generic_write_checks(struct file *file, loff_t *pos, size_t *count, int isblk);
+extern ssize_t generic_write_checks(struct kiocb *, struct iov_iter *);
extern ssize_t generic_file_read_iter(struct kiocb *, struct iov_iter *);
extern ssize_t __generic_file_write_iter(struct kiocb *, struct iov_iter *);
extern ssize_t generic_file_write_iter(struct kiocb *, struct iov_iter *);
extern ssize_t generic_file_direct_write(struct kiocb *, struct iov_iter *, loff_t);
extern ssize_t generic_perform_write(struct file *, struct iov_iter *, loff_t);
-extern ssize_t do_sync_read(struct file *filp, char __user *buf, size_t len, loff_t *ppos);
-extern ssize_t do_sync_write(struct file *filp, const char __user *buf, size_t len, loff_t *ppos);
-extern ssize_t new_sync_read(struct file *filp, char __user *buf, size_t len, loff_t *ppos);
-extern ssize_t new_sync_write(struct file *filp, const char __user *buf, size_t len, loff_t *ppos);
ssize_t vfs_iter_read(struct file *file, struct iov_iter *iter, loff_t *ppos);
ssize_t vfs_iter_write(struct file *file, struct iov_iter *iter, loff_t *ppos);
@@ -2592,12 +2650,13 @@ extern loff_t fixed_size_llseek(struct file *file, loff_t offset,
extern int generic_file_open(struct inode * inode, struct file * filp);
extern int nonseekable_open(struct inode * inode, struct file * filp);
-ssize_t dax_do_io(int rw, struct kiocb *, struct inode *, struct iov_iter *,
- loff_t, get_block_t, dio_iodone_t, int flags);
+ssize_t dax_do_io(struct kiocb *, struct inode *, struct iov_iter *, loff_t,
+ get_block_t, dio_iodone_t, int flags);
int dax_clear_blocks(struct inode *, sector_t block, long size);
int dax_zero_page_range(struct inode *, loff_t from, unsigned len, get_block_t);
int dax_truncate_page(struct inode *, loff_t from, get_block_t);
int dax_fault(struct vm_area_struct *, struct vm_fault *, get_block_t);
+int dax_pfn_mkwrite(struct vm_area_struct *, struct vm_fault *);
#define dax_mkwrite(vma, vmf, gb) dax_fault(vma, vmf, gb)
#ifdef CONFIG_BLOCK
@@ -2613,27 +2672,56 @@ enum {
/* filesystem can handle aio writes beyond i_size */
DIO_ASYNC_EXTEND = 0x04,
+
+ /* inode/fs/bdev does not need truncate protection */
+ DIO_SKIP_DIO_COUNT = 0x08,
};
void dio_end_io(struct bio *bio, int error);
-ssize_t __blockdev_direct_IO(int rw, struct kiocb *iocb, struct inode *inode,
- struct block_device *bdev, struct iov_iter *iter, loff_t offset,
- get_block_t get_block, dio_iodone_t end_io,
- dio_submit_t submit_io, int flags);
+ssize_t __blockdev_direct_IO(struct kiocb *iocb, struct inode *inode,
+ struct block_device *bdev, struct iov_iter *iter,
+ loff_t offset, get_block_t get_block,
+ dio_iodone_t end_io, dio_submit_t submit_io,
+ int flags);
-static inline ssize_t blockdev_direct_IO(int rw, struct kiocb *iocb,
- struct inode *inode, struct iov_iter *iter, loff_t offset,
- get_block_t get_block)
+static inline ssize_t blockdev_direct_IO(struct kiocb *iocb,
+ struct inode *inode,
+ struct iov_iter *iter, loff_t offset,
+ get_block_t get_block)
{
- return __blockdev_direct_IO(rw, iocb, inode, inode->i_sb->s_bdev, iter,
+ return __blockdev_direct_IO(iocb, inode, inode->i_sb->s_bdev, iter,
offset, get_block, NULL, NULL,
DIO_LOCKING | DIO_SKIP_HOLES);
}
#endif
void inode_dio_wait(struct inode *inode);
-void inode_dio_done(struct inode *inode);
+
+/*
+ * inode_dio_begin - signal start of a direct I/O requests
+ * @inode: inode the direct I/O happens on
+ *
+ * This is called once we've finished processing a direct I/O request,
+ * and is used to wake up callers waiting for direct I/O to be quiesced.
+ */
+static inline void inode_dio_begin(struct inode *inode)
+{
+ atomic_inc(&inode->i_dio_count);
+}
+
+/*
+ * inode_dio_end - signal finish of a direct I/O requests
+ * @inode: inode the direct I/O happens on
+ *
+ * This is called once we've finished processing a direct I/O request,
+ * and is used to wake up callers waiting for direct I/O to be quiesced.
+ */
+static inline void inode_dio_end(struct inode *inode)
+{
+ if (atomic_dec_and_test(&inode->i_dio_count))
+ wake_up_bit(&inode->i_state, __I_DIO_WAKEUP);
+}
extern void inode_set_flags(struct inode *inode, unsigned int flags,
unsigned int mask);
@@ -2644,13 +2732,14 @@ extern const struct file_operations generic_ro_fops;
extern int readlink_copy(char __user *, int, const char *);
extern int page_readlink(struct dentry *, char __user *, int);
-extern void *page_follow_link_light(struct dentry *, struct nameidata *);
-extern void page_put_link(struct dentry *, struct nameidata *, void *);
+extern const char *page_follow_link_light(struct dentry *, void **);
+extern void page_put_link(struct inode *, void *);
extern int __page_symlink(struct inode *inode, const char *symname, int len,
int nofs);
extern int page_symlink(struct inode *inode, const char *symname, int len);
extern const struct inode_operations page_symlink_inode_operations;
-extern void kfree_put_link(struct dentry *, struct nameidata *, void *);
+extern void kfree_put_link(struct inode *, void *);
+extern void free_page_put_link(struct inode *, void *);
extern int generic_readlink(struct dentry *, char __user *, int);
extern void generic_fillattr(struct inode *, struct kstat *);
int vfs_getattr_nosec(struct path *path, struct kstat *stat);
@@ -2661,8 +2750,9 @@ void __inode_sub_bytes(struct inode *inode, loff_t bytes);
void inode_sub_bytes(struct inode *inode, loff_t bytes);
loff_t inode_get_bytes(struct inode *inode);
void inode_set_bytes(struct inode *inode, loff_t bytes);
+const char *simple_follow_link(struct dentry *, void **);
+extern const struct inode_operations simple_symlink_inode_operations;
-extern int vfs_readdir(struct file *, filldir_t, void *);
extern int iterate_dir(struct file *, struct dir_context *);
extern int vfs_stat(const char __user *, struct kstat *);
@@ -2760,6 +2850,16 @@ static inline bool io_is_direct(struct file *filp)
return (filp->f_flags & O_DIRECT) || IS_DAX(file_inode(filp));
}
+static inline int iocb_flags(struct file *file)
+{
+ int res = 0;
+ if (file->f_flags & O_APPEND)
+ res |= IOCB_APPEND;
+ if (io_is_direct(file))
+ res |= IOCB_DIRECT;
+ return res;
+}
+
static inline ino_t parent_ino(struct dentry *dentry)
{
ino_t res;
diff --git a/include/linux/fs_pin.h b/include/linux/fs_pin.h
index 9dc4e0384bfb..3886b3bffd7f 100644
--- a/include/linux/fs_pin.h
+++ b/include/linux/fs_pin.h
@@ -13,6 +13,8 @@ struct vfsmount;
static inline void init_fs_pin(struct fs_pin *p, void (*kill)(struct fs_pin *))
{
init_waitqueue_head(&p->wait);
+ INIT_HLIST_NODE(&p->s_list);
+ INIT_HLIST_NODE(&p->m_list);
p->kill = kill;
}
diff --git a/include/linux/fsnotify_backend.h b/include/linux/fsnotify_backend.h
index 0f313f93c586..65a517dd32f7 100644
--- a/include/linux/fsnotify_backend.h
+++ b/include/linux/fsnotify_backend.h
@@ -84,8 +84,6 @@ struct fsnotify_fname;
* Each group much define these ops. The fsnotify infrastructure will call
* these operations for each relevant group.
*
- * should_send_event - given a group, inode, and mask this function determines
- * if the group is interested in this event.
* handle_event - main call for a group to handle an fs event
* free_group_priv - called when a group refcnt hits 0 to clean up the private union
* freeing_mark - called when a mark is being destroyed for some reason. The group
diff --git a/include/linux/ftrace_event.h b/include/linux/ftrace_event.h
index c674ee8f7fca..f9ecf63d47f1 100644
--- a/include/linux/ftrace_event.h
+++ b/include/linux/ftrace_event.h
@@ -13,6 +13,7 @@ struct trace_array;
struct trace_buffer;
struct tracer;
struct dentry;
+struct bpf_prog;
struct trace_print_flags {
unsigned long mask;
@@ -45,7 +46,7 @@ const char *ftrace_print_hex_seq(struct trace_seq *p,
const unsigned char *buf, int len);
const char *ftrace_print_array_seq(struct trace_seq *p,
- const void *buf, int buf_len,
+ const void *buf, int count,
size_t el_size);
struct trace_iterator;
@@ -202,7 +203,7 @@ enum trace_reg {
struct ftrace_event_call;
struct ftrace_event_class {
- char *system;
+ const char *system;
void *probe;
#ifdef CONFIG_PERF_EVENTS
void *perf_probe;
@@ -252,6 +253,7 @@ enum {
TRACE_EVENT_FL_WAS_ENABLED_BIT,
TRACE_EVENT_FL_USE_CALL_FILTER_BIT,
TRACE_EVENT_FL_TRACEPOINT_BIT,
+ TRACE_EVENT_FL_KPROBE_BIT,
};
/*
@@ -265,6 +267,7 @@ enum {
* it is best to clear the buffers that used it).
* USE_CALL_FILTER - For ftrace internal events, don't use file filter
* TRACEPOINT - Event is a tracepoint
+ * KPROBE - Event is a kprobe
*/
enum {
TRACE_EVENT_FL_FILTERED = (1 << TRACE_EVENT_FL_FILTERED_BIT),
@@ -274,6 +277,7 @@ enum {
TRACE_EVENT_FL_WAS_ENABLED = (1 << TRACE_EVENT_FL_WAS_ENABLED_BIT),
TRACE_EVENT_FL_USE_CALL_FILTER = (1 << TRACE_EVENT_FL_USE_CALL_FILTER_BIT),
TRACE_EVENT_FL_TRACEPOINT = (1 << TRACE_EVENT_FL_TRACEPOINT_BIT),
+ TRACE_EVENT_FL_KPROBE = (1 << TRACE_EVENT_FL_KPROBE_BIT),
};
struct ftrace_event_call {
@@ -285,7 +289,7 @@ struct ftrace_event_call {
struct tracepoint *tp;
};
struct trace_event event;
- const char *print_fmt;
+ char *print_fmt;
struct event_filter *filter;
void *mod;
void *data;
@@ -303,6 +307,7 @@ struct ftrace_event_call {
#ifdef CONFIG_PERF_EVENTS
int perf_refcount;
struct hlist_head __percpu *perf_events;
+ struct bpf_prog *prog;
int (*perf_perm)(struct ftrace_event_call *,
struct perf_event *);
@@ -548,6 +553,15 @@ event_trigger_unlock_commit_regs(struct ftrace_event_file *file,
event_triggers_post_call(file, tt);
}
+#ifdef CONFIG_BPF_SYSCALL
+unsigned int trace_call_bpf(struct bpf_prog *prog, void *ctx);
+#else
+static inline unsigned int trace_call_bpf(struct bpf_prog *prog, void *ctx)
+{
+ return 1;
+}
+#endif
+
enum {
FILTER_OTHER = 0,
FILTER_STATIC_STRING,
diff --git a/include/linux/fwnode.h b/include/linux/fwnode.h
new file mode 100644
index 000000000000..0408545bce42
--- /dev/null
+++ b/include/linux/fwnode.h
@@ -0,0 +1,27 @@
+/*
+ * fwnode.h - Firmware device node object handle type definition.
+ *
+ * Copyright (C) 2015, Intel Corporation
+ * Author: Rafael J. Wysocki <rafael.j.wysocki@intel.com>
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License version 2 as
+ * published by the Free Software Foundation.
+ */
+
+#ifndef _LINUX_FWNODE_H_
+#define _LINUX_FWNODE_H_
+
+enum fwnode_type {
+ FWNODE_INVALID = 0,
+ FWNODE_OF,
+ FWNODE_ACPI,
+ FWNODE_PDATA,
+};
+
+struct fwnode_handle {
+ enum fwnode_type type;
+ struct fwnode_handle *secondary;
+};
+
+#endif
diff --git a/include/linux/gfp.h b/include/linux/gfp.h
index 51bd1e72a917..6ba7cf23748f 100644
--- a/include/linux/gfp.h
+++ b/include/linux/gfp.h
@@ -30,6 +30,7 @@ struct vm_area_struct;
#define ___GFP_HARDWALL 0x20000u
#define ___GFP_THISNODE 0x40000u
#define ___GFP_RECLAIMABLE 0x80000u
+#define ___GFP_NOACCOUNT 0x100000u
#define ___GFP_NOTRACK 0x200000u
#define ___GFP_NO_KSWAPD 0x400000u
#define ___GFP_OTHER_NODE 0x800000u
@@ -57,8 +58,10 @@ struct vm_area_struct;
* _might_ fail. This depends upon the particular VM implementation.
*
* __GFP_NOFAIL: The VM implementation _must_ retry infinitely: the caller
- * cannot handle allocation failures. This modifier is deprecated and no new
- * users should be added.
+ * cannot handle allocation failures. New users should be evaluated carefully
+ * (and the flag should be used only when there is no reasonable failure policy)
+ * but it is definitely preferable to use the flag rather than opencode endless
+ * loop around allocator.
*
* __GFP_NORETRY: The VM implementation must not retry indefinitely.
*
@@ -85,6 +88,7 @@ struct vm_area_struct;
#define __GFP_HARDWALL ((__force gfp_t)___GFP_HARDWALL) /* Enforce hardwall cpuset memory allocs */
#define __GFP_THISNODE ((__force gfp_t)___GFP_THISNODE)/* No fallback, no policies */
#define __GFP_RECLAIMABLE ((__force gfp_t)___GFP_RECLAIMABLE) /* Page is reclaimable */
+#define __GFP_NOACCOUNT ((__force gfp_t)___GFP_NOACCOUNT) /* Don't account to kmemcg */
#define __GFP_NOTRACK ((__force gfp_t)___GFP_NOTRACK) /* Don't track with kmemcheck */
#define __GFP_NO_KSWAPD ((__force gfp_t)___GFP_NO_KSWAPD)
@@ -117,16 +121,6 @@ struct vm_area_struct;
__GFP_NOMEMALLOC | __GFP_NORETRY | __GFP_NOWARN | \
__GFP_NO_KSWAPD)
-/*
- * GFP_THISNODE does not perform any reclaim, you most likely want to
- * use __GFP_THISNODE to allocate from a given node without fallback!
- */
-#ifdef CONFIG_NUMA
-#define GFP_THISNODE (__GFP_THISNODE | __GFP_NOWARN | __GFP_NORETRY)
-#else
-#define GFP_THISNODE ((__force gfp_t)0)
-#endif
-
/* This mask makes up all the page movable related flags */
#define GFP_MOVABLE_MASK (__GFP_RECLAIMABLE|__GFP_MOVABLE)
@@ -374,6 +368,11 @@ extern void free_pages(unsigned long addr, unsigned int order);
extern void free_hot_cold_page(struct page *page, bool cold);
extern void free_hot_cold_page_list(struct list_head *list, bool cold);
+struct page_frag_cache;
+extern void *__alloc_page_frag(struct page_frag_cache *nc,
+ unsigned int fragsz, gfp_t gfp_mask);
+extern void __free_page_frag(void *addr);
+
extern void __free_kmem_pages(struct page *page, unsigned int order);
extern void free_kmem_pages(unsigned long addr, unsigned int order);
diff --git a/include/linux/gpio.h b/include/linux/gpio.h
index ab81339a8590..d12b5d566e4b 100644
--- a/include/linux/gpio.h
+++ b/include/linux/gpio.h
@@ -196,13 +196,6 @@ static inline int gpio_export_link(struct device *dev, const char *name,
return -EINVAL;
}
-static inline int gpio_sysfs_set_active_low(unsigned gpio, int value)
-{
- /* GPIO can never have been requested */
- WARN_ON(1);
- return -EINVAL;
-}
-
static inline void gpio_unexport(unsigned gpio)
{
/* GPIO can never have been exported */
diff --git a/include/linux/gpio/consumer.h b/include/linux/gpio/consumer.h
index 45afc2dee560..fd098169fe87 100644
--- a/include/linux/gpio/consumer.h
+++ b/include/linux/gpio/consumer.h
@@ -16,6 +16,15 @@ struct device;
*/
struct gpio_desc;
+/**
+ * Struct containing an array of descriptors that can be obtained using
+ * gpiod_get_array().
+ */
+struct gpio_descs {
+ unsigned int ndescs;
+ struct gpio_desc *desc[];
+};
+
#define GPIOD_FLAGS_BIT_DIR_SET BIT(0)
#define GPIOD_FLAGS_BIT_DIR_OUT BIT(1)
#define GPIOD_FLAGS_BIT_DIR_VAL BIT(2)
@@ -34,6 +43,9 @@ enum gpiod_flags {
#ifdef CONFIG_GPIOLIB
+/* Return the number of GPIOs associated with a device / function */
+int gpiod_count(struct device *dev, const char *con_id);
+
/* Acquire and dispose GPIOs */
struct gpio_desc *__must_check __gpiod_get(struct device *dev,
const char *con_id,
@@ -49,7 +61,14 @@ struct gpio_desc *__must_check __gpiod_get_index_optional(struct device *dev,
const char *con_id,
unsigned int index,
enum gpiod_flags flags);
+struct gpio_descs *__must_check gpiod_get_array(struct device *dev,
+ const char *con_id,
+ enum gpiod_flags flags);
+struct gpio_descs *__must_check gpiod_get_array_optional(struct device *dev,
+ const char *con_id,
+ enum gpiod_flags flags);
void gpiod_put(struct gpio_desc *desc);
+void gpiod_put_array(struct gpio_descs *descs);
struct gpio_desc *__must_check __devm_gpiod_get(struct device *dev,
const char *con_id,
@@ -64,7 +83,14 @@ struct gpio_desc *__must_check __devm_gpiod_get_optional(struct device *dev,
struct gpio_desc *__must_check
__devm_gpiod_get_index_optional(struct device *dev, const char *con_id,
unsigned int index, enum gpiod_flags flags);
+struct gpio_descs *__must_check devm_gpiod_get_array(struct device *dev,
+ const char *con_id,
+ enum gpiod_flags flags);
+struct gpio_descs *__must_check
+devm_gpiod_get_array_optional(struct device *dev, const char *con_id,
+ enum gpiod_flags flags);
void devm_gpiod_put(struct device *dev, struct gpio_desc *desc);
+void devm_gpiod_put_array(struct device *dev, struct gpio_descs *descs);
int gpiod_get_direction(struct gpio_desc *desc);
int gpiod_direction_input(struct gpio_desc *desc);
@@ -74,24 +100,25 @@ int gpiod_direction_output_raw(struct gpio_desc *desc, int value);
/* Value get/set from non-sleeping context */
int gpiod_get_value(const struct gpio_desc *desc);
void gpiod_set_value(struct gpio_desc *desc, int value);
-void gpiod_set_array(unsigned int array_size,
- struct gpio_desc **desc_array, int *value_array);
+void gpiod_set_array_value(unsigned int array_size,
+ struct gpio_desc **desc_array, int *value_array);
int gpiod_get_raw_value(const struct gpio_desc *desc);
void gpiod_set_raw_value(struct gpio_desc *desc, int value);
-void gpiod_set_raw_array(unsigned int array_size,
- struct gpio_desc **desc_array, int *value_array);
+void gpiod_set_raw_array_value(unsigned int array_size,
+ struct gpio_desc **desc_array,
+ int *value_array);
/* Value get/set from sleeping context */
int gpiod_get_value_cansleep(const struct gpio_desc *desc);
void gpiod_set_value_cansleep(struct gpio_desc *desc, int value);
-void gpiod_set_array_cansleep(unsigned int array_size,
- struct gpio_desc **desc_array,
- int *value_array);
+void gpiod_set_array_value_cansleep(unsigned int array_size,
+ struct gpio_desc **desc_array,
+ int *value_array);
int gpiod_get_raw_value_cansleep(const struct gpio_desc *desc);
void gpiod_set_raw_value_cansleep(struct gpio_desc *desc, int value);
-void gpiod_set_raw_array_cansleep(unsigned int array_size,
- struct gpio_desc **desc_array,
- int *value_array);
+void gpiod_set_raw_array_value_cansleep(unsigned int array_size,
+ struct gpio_desc **desc_array,
+ int *value_array);
int gpiod_set_debounce(struct gpio_desc *desc, unsigned debounce);
@@ -110,9 +137,15 @@ struct fwnode_handle;
struct gpio_desc *fwnode_get_named_gpiod(struct fwnode_handle *fwnode,
const char *propname);
struct gpio_desc *devm_get_gpiod_from_child(struct device *dev,
+ const char *con_id,
struct fwnode_handle *child);
#else /* CONFIG_GPIOLIB */
+static inline int gpiod_count(struct device *dev, const char *con_id)
+{
+ return 0;
+}
+
static inline struct gpio_desc *__must_check __gpiod_get(struct device *dev,
const char *con_id,
enum gpiod_flags flags)
@@ -142,6 +175,20 @@ __gpiod_get_index_optional(struct device *dev, const char *con_id,
return ERR_PTR(-ENOSYS);
}
+static inline struct gpio_descs *__must_check
+gpiod_get_array(struct device *dev, const char *con_id,
+ enum gpiod_flags flags)
+{
+ return ERR_PTR(-ENOSYS);
+}
+
+static inline struct gpio_descs *__must_check
+gpiod_get_array_optional(struct device *dev, const char *con_id,
+ enum gpiod_flags flags)
+{
+ return ERR_PTR(-ENOSYS);
+}
+
static inline void gpiod_put(struct gpio_desc *desc)
{
might_sleep();
@@ -150,6 +197,14 @@ static inline void gpiod_put(struct gpio_desc *desc)
WARN_ON(1);
}
+static inline void gpiod_put_array(struct gpio_descs *descs)
+{
+ might_sleep();
+
+ /* GPIO can never have been requested */
+ WARN_ON(1);
+}
+
static inline struct gpio_desc *__must_check
__devm_gpiod_get(struct device *dev,
const char *con_id,
@@ -181,6 +236,20 @@ __devm_gpiod_get_index_optional(struct device *dev, const char *con_id,
return ERR_PTR(-ENOSYS);
}
+static inline struct gpio_descs *__must_check
+devm_gpiod_get_array(struct device *dev, const char *con_id,
+ enum gpiod_flags flags)
+{
+ return ERR_PTR(-ENOSYS);
+}
+
+static inline struct gpio_descs *__must_check
+devm_gpiod_get_array_optional(struct device *dev, const char *con_id,
+ enum gpiod_flags flags)
+{
+ return ERR_PTR(-ENOSYS);
+}
+
static inline void devm_gpiod_put(struct device *dev, struct gpio_desc *desc)
{
might_sleep();
@@ -189,6 +258,15 @@ static inline void devm_gpiod_put(struct device *dev, struct gpio_desc *desc)
WARN_ON(1);
}
+static inline void devm_gpiod_put_array(struct device *dev,
+ struct gpio_descs *descs)
+{
+ might_sleep();
+
+ /* GPIO can never have been requested */
+ WARN_ON(1);
+}
+
static inline int gpiod_get_direction(const struct gpio_desc *desc)
{
@@ -227,9 +305,9 @@ static inline void gpiod_set_value(struct gpio_desc *desc, int value)
/* GPIO can never have been requested */
WARN_ON(1);
}
-static inline void gpiod_set_array(unsigned int array_size,
- struct gpio_desc **desc_array,
- int *value_array)
+static inline void gpiod_set_array_value(unsigned int array_size,
+ struct gpio_desc **desc_array,
+ int *value_array)
{
/* GPIO can never have been requested */
WARN_ON(1);
@@ -245,9 +323,9 @@ static inline void gpiod_set_raw_value(struct gpio_desc *desc, int value)
/* GPIO can never have been requested */
WARN_ON(1);
}
-static inline void gpiod_set_raw_array(unsigned int array_size,
- struct gpio_desc **desc_array,
- int *value_array)
+static inline void gpiod_set_raw_array_value(unsigned int array_size,
+ struct gpio_desc **desc_array,
+ int *value_array)
{
/* GPIO can never have been requested */
WARN_ON(1);
@@ -264,7 +342,7 @@ static inline void gpiod_set_value_cansleep(struct gpio_desc *desc, int value)
/* GPIO can never have been requested */
WARN_ON(1);
}
-static inline void gpiod_set_array_cansleep(unsigned int array_size,
+static inline void gpiod_set_array_value_cansleep(unsigned int array_size,
struct gpio_desc **desc_array,
int *value_array)
{
@@ -283,7 +361,7 @@ static inline void gpiod_set_raw_value_cansleep(struct gpio_desc *desc,
/* GPIO can never have been requested */
WARN_ON(1);
}
-static inline void gpiod_set_raw_array_cansleep(unsigned int array_size,
+static inline void gpiod_set_raw_array_value_cansleep(unsigned int array_size,
struct gpio_desc **desc_array,
int *value_array)
{
@@ -372,7 +450,6 @@ static inline int desc_to_gpio(const struct gpio_desc *desc)
int gpiod_export(struct gpio_desc *desc, bool direction_may_change);
int gpiod_export_link(struct device *dev, const char *name,
struct gpio_desc *desc);
-int gpiod_sysfs_set_active_low(struct gpio_desc *desc, int value);
void gpiod_unexport(struct gpio_desc *desc);
#else /* CONFIG_GPIOLIB && CONFIG_GPIO_SYSFS */
@@ -389,11 +466,6 @@ static inline int gpiod_export_link(struct device *dev, const char *name,
return -ENOSYS;
}
-static inline int gpiod_sysfs_set_active_low(struct gpio_desc *desc, int value)
-{
- return -ENOSYS;
-}
-
static inline void gpiod_unexport(struct gpio_desc *desc)
{
}
diff --git a/include/linux/gpio/driver.h b/include/linux/gpio/driver.h
index c497c62889d1..cc7ec129b329 100644
--- a/include/linux/gpio/driver.h
+++ b/include/linux/gpio/driver.h
@@ -6,6 +6,7 @@
#include <linux/irq.h>
#include <linux/irqchip/chained_irq.h>
#include <linux/irqdomain.h>
+#include <linux/pinctrl/pinctrl.h>
struct device;
struct gpio_desc;
@@ -19,6 +20,7 @@ struct seq_file;
* struct gpio_chip - abstract a GPIO controller
* @label: for diagnostics
* @dev: optional device providing the GPIOs
+ * @cdev: class device used by sysfs interface (may be NULL)
* @owner: helps prevent removal of modules exporting active GPIOs
* @list: links gpio_chips together for traversal
* @request: optional hook for chip-specific activation, such as
@@ -40,8 +42,12 @@ struct seq_file;
* @dbg_show: optional routine to show contents in debugfs; default code
* will be used when this is omitted, but custom code can show extra
* state (such as pullup/pulldown configuration).
- * @base: identifies the first GPIO number handled by this chip; or, if
- * negative during registration, requests dynamic ID allocation.
+ * @base: identifies the first GPIO number handled by this chip;
+ * or, if negative during registration, requests dynamic ID allocation.
+ * DEPRECATION: providing anything non-negative and nailing the base
+ * base offset of GPIO chips is deprecated. Please pass -1 as base to
+ * let gpiolib select the chip base in all possible cases. We want to
+ * get rid of the static GPIO number space in the long run.
* @ngpio: the number of GPIOs handled by this controller; the last GPIO
* handled is (base + ngpio - 1).
* @desc: array of ngpio descriptors. Private.
@@ -56,7 +62,6 @@ struct seq_file;
* implies that if the chip supports IRQs, these IRQs need to be threaded
* as the chip access may sleep when e.g. reading out the IRQ status
* registers.
- * @exported: flags if the gpiochip is exported for use from sysfs. Private.
* @irq_not_threaded: flag must be set if @can_sleep is set but the
* IRQs don't need to be threaded
*
@@ -73,6 +78,7 @@ struct seq_file;
struct gpio_chip {
const char *label;
struct device *dev;
+ struct device *cdev;
struct module *owner;
struct list_head list;
@@ -108,7 +114,6 @@ struct gpio_chip {
const char *const *names;
bool can_sleep;
bool irq_not_threaded;
- bool exported;
#ifdef CONFIG_GPIOLIB_IRQCHIP
/*
@@ -120,6 +125,7 @@ struct gpio_chip {
unsigned int irq_base;
irq_flow_handler_t irq_handler;
unsigned int irq_default_type;
+ int irq_parent;
#endif
#if defined(CONFIG_OF_GPIO)
@@ -173,6 +179,53 @@ int gpiochip_irqchip_add(struct gpio_chip *gpiochip,
#endif /* CONFIG_GPIOLIB_IRQCHIP */
+#ifdef CONFIG_PINCTRL
+
+/**
+ * struct gpio_pin_range - pin range controlled by a gpio chip
+ * @head: list for maintaining set of pin ranges, used internally
+ * @pctldev: pinctrl device which handles corresponding pins
+ * @range: actual range of pins controlled by a gpio controller
+ */
+
+struct gpio_pin_range {
+ struct list_head node;
+ struct pinctrl_dev *pctldev;
+ struct pinctrl_gpio_range range;
+};
+
+int gpiochip_add_pin_range(struct gpio_chip *chip, const char *pinctl_name,
+ unsigned int gpio_offset, unsigned int pin_offset,
+ unsigned int npins);
+int gpiochip_add_pingroup_range(struct gpio_chip *chip,
+ struct pinctrl_dev *pctldev,
+ unsigned int gpio_offset, const char *pin_group);
+void gpiochip_remove_pin_ranges(struct gpio_chip *chip);
+
+#else
+
+static inline int
+gpiochip_add_pin_range(struct gpio_chip *chip, const char *pinctl_name,
+ unsigned int gpio_offset, unsigned int pin_offset,
+ unsigned int npins)
+{
+ return 0;
+}
+static inline int
+gpiochip_add_pingroup_range(struct gpio_chip *chip,
+ struct pinctrl_dev *pctldev,
+ unsigned int gpio_offset, const char *pin_group)
+{
+ return 0;
+}
+
+static inline void
+gpiochip_remove_pin_ranges(struct gpio_chip *chip)
+{
+}
+
+#endif /* CONFIG_PINCTRL */
+
struct gpio_desc *gpiochip_request_own_desc(struct gpio_chip *chip, u16 hwnum,
const char *label);
void gpiochip_free_own_desc(struct gpio_desc *desc);
diff --git a/include/linux/hardirq.h b/include/linux/hardirq.h
index f4af03404b97..dfd59d6bc6f0 100644
--- a/include/linux/hardirq.h
+++ b/include/linux/hardirq.h
@@ -1,7 +1,7 @@
#ifndef LINUX_HARDIRQ_H
#define LINUX_HARDIRQ_H
-#include <linux/preempt_mask.h>
+#include <linux/preempt.h>
#include <linux/lockdep.h>
#include <linux/ftrace_irq.h>
#include <linux/vtime.h>
diff --git a/include/linux/hid-sensor-hub.h b/include/linux/hid-sensor-hub.h
index 4173a8fdad9e..0042bf330b99 100644
--- a/include/linux/hid-sensor-hub.h
+++ b/include/linux/hid-sensor-hub.h
@@ -49,19 +49,43 @@ struct hid_sensor_hub_attribute_info {
};
/**
+ * struct sensor_hub_pending - Synchronous read pending information
+ * @status: Pending status true/false.
+ * @ready: Completion synchronization data.
+ * @usage_id: Usage id for physical device, E.g. Gyro usage id.
+ * @attr_usage_id: Usage Id of a field, E.g. X-AXIS for a gyro.
+ * @raw_size: Response size for a read request.
+ * @raw_data: Place holder for received response.
+ */
+struct sensor_hub_pending {
+ bool status;
+ struct completion ready;
+ u32 usage_id;
+ u32 attr_usage_id;
+ int raw_size;
+ u8 *raw_data;
+};
+
+/**
* struct hid_sensor_hub_device - Stores the hub instance data
* @hdev: Stores the hid instance.
* @vendor_id: Vendor id of hub device.
* @product_id: Product id of hub device.
+ * @usage: Usage id for this hub device instance.
* @start_collection_index: Starting index for a phy type collection
* @end_collection_index: Last index for a phy type collection
+ * @mutex_ptr: synchronizing mutex pointer.
+ * @pending: Holds information of pending sync read request.
*/
struct hid_sensor_hub_device {
struct hid_device *hdev;
u32 vendor_id;
u32 product_id;
+ u32 usage;
int start_collection_index;
int end_collection_index;
+ struct mutex *mutex_ptr;
+ struct sensor_hub_pending pending;
};
/**
@@ -152,40 +176,51 @@ int sensor_hub_input_get_attribute_info(struct hid_sensor_hub_device *hsdev,
* @usage_id: Attribute usage id of parent physical device as per spec
* @attr_usage_id: Attribute usage id as per spec
* @report_id: Report id to look for
+* @flag: Synchronous or asynchronous read
*
-* Issues a synchronous read request for an input attribute. Returns
-* data upto 32 bits. Since client can get events, so this call should
-* not be used for data paths, this will impact performance.
+* Issues a synchronous or asynchronous read request for an input attribute.
+* Returns data upto 32 bits.
*/
+enum sensor_hub_read_flags {
+ SENSOR_HUB_SYNC,
+ SENSOR_HUB_ASYNC,
+};
+
int sensor_hub_input_attr_get_raw_value(struct hid_sensor_hub_device *hsdev,
- u32 usage_id,
- u32 attr_usage_id, u32 report_id);
+ u32 usage_id,
+ u32 attr_usage_id, u32 report_id,
+ enum sensor_hub_read_flags flag
+);
+
/**
* sensor_hub_set_feature() - Feature set request
* @hsdev: Hub device instance.
* @report_id: Report id to look for
* @field_index: Field index inside a report
-* @value: Value to set
+* @buffer_size: size of the buffer
+* @buffer: buffer to use in the feature set
*
* Used to set a field in feature report. For example this can set polling
* interval, sensitivity, activate/deactivate state.
*/
int sensor_hub_set_feature(struct hid_sensor_hub_device *hsdev, u32 report_id,
- u32 field_index, s32 value);
+ u32 field_index, int buffer_size, void *buffer);
/**
* sensor_hub_get_feature() - Feature get request
* @hsdev: Hub device instance.
* @report_id: Report id to look for
* @field_index: Field index inside a report
-* @value: Place holder for return value
+* @buffer_size: size of the buffer
+* @buffer: buffer to copy output
*
* Used to get a field in feature report. For example this can get polling
-* interval, sensitivity, activate/deactivate state.
+* interval, sensitivity, activate/deactivate state. On success it returns
+* number of bytes copied to buffer. On failure, it returns value < 0.
*/
int sensor_hub_get_feature(struct hid_sensor_hub_device *hsdev, u32 report_id,
- u32 field_index, s32 *value);
+ u32 field_index, int buffer_size, void *buffer);
/* hid-sensor-attributes */
diff --git a/include/linux/hid-sensor-ids.h b/include/linux/hid-sensor-ids.h
index 109f0e633e01..f2ee90aed0c2 100644
--- a/include/linux/hid-sensor-ids.h
+++ b/include/linux/hid-sensor-ids.h
@@ -21,6 +21,8 @@
#define HID_MAX_PHY_DEVICES 0xFF
+#define HID_USAGE_SENSOR_COLLECTION 0x200001
+
/* Accel 3D (200073) */
#define HID_USAGE_SENSOR_ACCEL_3D 0x200073
#define HID_USAGE_SENSOR_DATA_ACCELERATION 0x200452
diff --git a/include/linux/hid.h b/include/linux/hid.h
index f94cf28e4b7c..f17980de2662 100644
--- a/include/linux/hid.h
+++ b/include/linux/hid.h
@@ -159,6 +159,7 @@ struct hid_item {
#define HID_UP_LED 0x00080000
#define HID_UP_BUTTON 0x00090000
#define HID_UP_ORDINAL 0x000a0000
+#define HID_UP_TELEPHONY 0x000b0000
#define HID_UP_CONSUMER 0x000c0000
#define HID_UP_DIGITIZER 0x000d0000
#define HID_UP_PID 0x000f0000
@@ -269,6 +270,7 @@ struct hid_item {
#define HID_DG_DEVICEINDEX 0x000d0053
#define HID_DG_CONTACTCOUNT 0x000d0054
#define HID_DG_CONTACTMAX 0x000d0055
+#define HID_DG_BUTTONTYPE 0x000d0059
#define HID_DG_BARRELSWITCH2 0x000d005a
#define HID_DG_TOOLSERIALNUMBER 0x000d005b
@@ -813,6 +815,8 @@ void hid_disconnect(struct hid_device *hid);
const struct hid_device_id *hid_match_id(struct hid_device *hdev,
const struct hid_device_id *id);
s32 hid_snto32(__u32 value, unsigned n);
+__u32 hid_field_extract(const struct hid_device *hid, __u8 *report,
+ unsigned offset, unsigned n);
/**
* hid_device_io_start - enable HID input during probe, remove
diff --git a/include/linux/highmem.h b/include/linux/highmem.h
index 9286a46b7d69..6aefcd0031a6 100644
--- a/include/linux/highmem.h
+++ b/include/linux/highmem.h
@@ -65,6 +65,7 @@ static inline void kunmap(struct page *page)
static inline void *kmap_atomic(struct page *page)
{
+ preempt_disable();
pagefault_disable();
return page_address(page);
}
@@ -73,6 +74,7 @@ static inline void *kmap_atomic(struct page *page)
static inline void __kunmap_atomic(void *addr)
{
pagefault_enable();
+ preempt_enable();
}
#define kmap_atomic_pfn(pfn) kmap_atomic(pfn_to_page(pfn))
diff --git a/include/linux/host1x.h b/include/linux/host1x.h
index 464f33814a94..d2ba7d334039 100644
--- a/include/linux/host1x.h
+++ b/include/linux/host1x.h
@@ -135,6 +135,7 @@ struct host1x_syncpt *host1x_syncpt_get(struct host1x *host, u32 id);
u32 host1x_syncpt_id(struct host1x_syncpt *sp);
u32 host1x_syncpt_read_min(struct host1x_syncpt *sp);
u32 host1x_syncpt_read_max(struct host1x_syncpt *sp);
+u32 host1x_syncpt_read(struct host1x_syncpt *sp);
int host1x_syncpt_incr(struct host1x_syncpt *sp);
u32 host1x_syncpt_incr_max(struct host1x_syncpt *sp, u32 incrs);
int host1x_syncpt_wait(struct host1x_syncpt *sp, u32 thresh, long timeout,
diff --git a/include/linux/hrtimer.h b/include/linux/hrtimer.h
index 05f6df1fdf5b..76dd4f0da5ca 100644
--- a/include/linux/hrtimer.h
+++ b/include/linux/hrtimer.h
@@ -53,34 +53,25 @@ enum hrtimer_restart {
*
* 0x00 inactive
* 0x01 enqueued into rbtree
- * 0x02 callback function running
- * 0x04 timer is migrated to another cpu
*
- * Special cases:
- * 0x03 callback function running and enqueued
- * (was requeued on another CPU)
- * 0x05 timer was migrated on CPU hotunplug
+ * The callback state is not part of the timer->state because clearing it would
+ * mean touching the timer after the callback, this makes it impossible to free
+ * the timer from the callback function.
*
- * The "callback function running and enqueued" status is only possible on
- * SMP. It happens for example when a posix timer expired and the callback
+ * Therefore we track the callback state in:
+ *
+ * timer->base->cpu_base->running == timer
+ *
+ * On SMP it is possible to have a "callback function running and enqueued"
+ * status. It happens for example when a posix timer expired and the callback
* queued a signal. Between dropping the lock which protects the posix timer
* and reacquiring the base lock of the hrtimer, another CPU can deliver the
- * signal and rearm the timer. We have to preserve the callback running state,
- * as otherwise the timer could be removed before the softirq code finishes the
- * the handling of the timer.
- *
- * The HRTIMER_STATE_ENQUEUED bit is always or'ed to the current state
- * to preserve the HRTIMER_STATE_CALLBACK in the above scenario. This
- * also affects HRTIMER_STATE_MIGRATE where the preservation is not
- * necessary. HRTIMER_STATE_MIGRATE is cleared after the timer is
- * enqueued on the new cpu.
+ * signal and rearm the timer.
*
* All state transitions are protected by cpu_base->lock.
*/
#define HRTIMER_STATE_INACTIVE 0x00
#define HRTIMER_STATE_ENQUEUED 0x01
-#define HRTIMER_STATE_CALLBACK 0x02
-#define HRTIMER_STATE_MIGRATE 0x04
/**
* struct hrtimer - the basic hrtimer structure
@@ -130,6 +121,12 @@ struct hrtimer_sleeper {
struct task_struct *task;
};
+#ifdef CONFIG_64BIT
+# define HRTIMER_CLOCK_BASE_ALIGN 64
+#else
+# define HRTIMER_CLOCK_BASE_ALIGN 32
+#endif
+
/**
* struct hrtimer_clock_base - the timer base for a specific clock
* @cpu_base: per cpu clock base
@@ -137,9 +134,7 @@ struct hrtimer_sleeper {
* timer to a base on another cpu.
* @clockid: clock id for per_cpu support
* @active: red black tree root node for the active timers
- * @resolution: the resolution of the clock, in nanoseconds
* @get_time: function to retrieve the current time of the clock
- * @softirq_time: the time when running the hrtimer queue in the softirq
* @offset: offset of this clock to the monotonic base
*/
struct hrtimer_clock_base {
@@ -147,11 +142,9 @@ struct hrtimer_clock_base {
int index;
clockid_t clockid;
struct timerqueue_head active;
- ktime_t resolution;
ktime_t (*get_time)(void);
- ktime_t softirq_time;
ktime_t offset;
-};
+} __attribute__((__aligned__(HRTIMER_CLOCK_BASE_ALIGN)));
enum hrtimer_base_type {
HRTIMER_BASE_MONOTONIC,
@@ -165,11 +158,16 @@ enum hrtimer_base_type {
* struct hrtimer_cpu_base - the per cpu clock bases
* @lock: lock protecting the base and associated clock bases
* and timers
+ * @seq: seqcount around __run_hrtimer
+ * @running: pointer to the currently running hrtimer
* @cpu: cpu number
* @active_bases: Bitfield to mark bases with active timers
- * @clock_was_set: Indicates that clock was set from irq context.
+ * @clock_was_set_seq: Sequence counter of clock was set events
+ * @migration_enabled: The migration of hrtimers to other cpus is enabled
+ * @nohz_active: The nohz functionality is enabled
* @expires_next: absolute time of the next event which was scheduled
* via clock_set_next_event()
+ * @next_timer: Pointer to the first expiring timer
* @in_hrtirq: hrtimer_interrupt() is currently executing
* @hres_active: State of high resolution mode
* @hang_detected: The last hrtimer interrupt detected a hang
@@ -178,27 +176,38 @@ enum hrtimer_base_type {
* @nr_hangs: Total number of hrtimer interrupt hangs
* @max_hang_time: Maximum time spent in hrtimer_interrupt
* @clock_base: array of clock bases for this cpu
+ *
+ * Note: next_timer is just an optimization for __remove_hrtimer().
+ * Do not dereference the pointer because it is not reliable on
+ * cross cpu removals.
*/
struct hrtimer_cpu_base {
raw_spinlock_t lock;
+ seqcount_t seq;
+ struct hrtimer *running;
unsigned int cpu;
unsigned int active_bases;
- unsigned int clock_was_set;
+ unsigned int clock_was_set_seq;
+ bool migration_enabled;
+ bool nohz_active;
#ifdef CONFIG_HIGH_RES_TIMERS
+ unsigned int in_hrtirq : 1,
+ hres_active : 1,
+ hang_detected : 1;
ktime_t expires_next;
- int in_hrtirq;
- int hres_active;
- int hang_detected;
- unsigned long nr_events;
- unsigned long nr_retries;
- unsigned long nr_hangs;
- ktime_t max_hang_time;
+ struct hrtimer *next_timer;
+ unsigned int nr_events;
+ unsigned int nr_retries;
+ unsigned int nr_hangs;
+ unsigned int max_hang_time;
#endif
struct hrtimer_clock_base clock_base[HRTIMER_MAX_CLOCK_BASES];
-};
+} ____cacheline_aligned;
static inline void hrtimer_set_expires(struct hrtimer *timer, ktime_t time)
{
+ BUILD_BUG_ON(sizeof(struct hrtimer_clock_base) > HRTIMER_CLOCK_BASE_ALIGN);
+
timer->node.expires = time;
timer->_softexpires = time;
}
@@ -262,19 +271,16 @@ static inline ktime_t hrtimer_expires_remaining(const struct hrtimer *timer)
return ktime_sub(timer->node.expires, timer->base->get_time());
}
-#ifdef CONFIG_HIGH_RES_TIMERS
-struct clock_event_device;
-
-extern void hrtimer_interrupt(struct clock_event_device *dev);
-
-/*
- * In high resolution mode the time reference must be read accurate
- */
static inline ktime_t hrtimer_cb_get_time(struct hrtimer *timer)
{
return timer->base->get_time();
}
+#ifdef CONFIG_HIGH_RES_TIMERS
+struct clock_event_device;
+
+extern void hrtimer_interrupt(struct clock_event_device *dev);
+
static inline int hrtimer_is_hres_active(struct hrtimer *timer)
{
return timer->base->cpu_base->hres_active;
@@ -295,21 +301,16 @@ extern void hrtimer_peek_ahead_timers(void);
extern void clock_was_set_delayed(void);
+extern unsigned int hrtimer_resolution;
+
#else
# define MONOTONIC_RES_NSEC LOW_RES_NSEC
# define KTIME_MONOTONIC_RES KTIME_LOW_RES
-static inline void hrtimer_peek_ahead_timers(void) { }
+#define hrtimer_resolution (unsigned int)LOW_RES_NSEC
-/*
- * In non high resolution mode the time reference is taken from
- * the base softirq time variable.
- */
-static inline ktime_t hrtimer_cb_get_time(struct hrtimer *timer)
-{
- return timer->base->softirq_time;
-}
+static inline void hrtimer_peek_ahead_timers(void) { }
static inline int hrtimer_is_hres_active(struct hrtimer *timer)
{
@@ -353,49 +354,47 @@ static inline void destroy_hrtimer_on_stack(struct hrtimer *timer) { }
#endif
/* Basic timer operations: */
-extern int hrtimer_start(struct hrtimer *timer, ktime_t tim,
- const enum hrtimer_mode mode);
-extern int hrtimer_start_range_ns(struct hrtimer *timer, ktime_t tim,
+extern void hrtimer_start_range_ns(struct hrtimer *timer, ktime_t tim,
unsigned long range_ns, const enum hrtimer_mode mode);
-extern int
-__hrtimer_start_range_ns(struct hrtimer *timer, ktime_t tim,
- unsigned long delta_ns,
- const enum hrtimer_mode mode, int wakeup);
+
+/**
+ * hrtimer_start - (re)start an hrtimer on the current CPU
+ * @timer: the timer to be added
+ * @tim: expiry time
+ * @mode: expiry mode: absolute (HRTIMER_MODE_ABS) or
+ * relative (HRTIMER_MODE_REL)
+ */
+static inline void hrtimer_start(struct hrtimer *timer, ktime_t tim,
+ const enum hrtimer_mode mode)
+{
+ hrtimer_start_range_ns(timer, tim, 0, mode);
+}
extern int hrtimer_cancel(struct hrtimer *timer);
extern int hrtimer_try_to_cancel(struct hrtimer *timer);
-static inline int hrtimer_start_expires(struct hrtimer *timer,
- enum hrtimer_mode mode)
+static inline void hrtimer_start_expires(struct hrtimer *timer,
+ enum hrtimer_mode mode)
{
unsigned long delta;
ktime_t soft, hard;
soft = hrtimer_get_softexpires(timer);
hard = hrtimer_get_expires(timer);
delta = ktime_to_ns(ktime_sub(hard, soft));
- return hrtimer_start_range_ns(timer, soft, delta, mode);
+ hrtimer_start_range_ns(timer, soft, delta, mode);
}
-static inline int hrtimer_restart(struct hrtimer *timer)
+static inline void hrtimer_restart(struct hrtimer *timer)
{
- return hrtimer_start_expires(timer, HRTIMER_MODE_ABS);
+ hrtimer_start_expires(timer, HRTIMER_MODE_ABS);
}
/* Query timers: */
extern ktime_t hrtimer_get_remaining(const struct hrtimer *timer);
-extern int hrtimer_get_res(const clockid_t which_clock, struct timespec *tp);
-extern ktime_t hrtimer_get_next_event(void);
+extern u64 hrtimer_get_next_event(void);
-/*
- * A timer is active, when it is enqueued into the rbtree or the
- * callback function is running or it's in the state of being migrated
- * to another cpu.
- */
-static inline int hrtimer_active(const struct hrtimer *timer)
-{
- return timer->state != HRTIMER_STATE_INACTIVE;
-}
+extern bool hrtimer_active(const struct hrtimer *timer);
/*
* Helper function to check, whether the timer is on one of the queues
@@ -411,14 +410,29 @@ static inline int hrtimer_is_queued(struct hrtimer *timer)
*/
static inline int hrtimer_callback_running(struct hrtimer *timer)
{
- return timer->state & HRTIMER_STATE_CALLBACK;
+ return timer->base->cpu_base->running == timer;
}
/* Forward a hrtimer so it expires after now: */
extern u64
hrtimer_forward(struct hrtimer *timer, ktime_t now, ktime_t interval);
-/* Forward a hrtimer so it expires after the hrtimer's current now */
+/**
+ * hrtimer_forward_now - forward the timer expiry so it expires after now
+ * @timer: hrtimer to forward
+ * @interval: the interval to forward
+ *
+ * Forward the timer expiry so it will expire after the current time
+ * of the hrtimer clock base. Returns the number of overruns.
+ *
+ * Can be safely called from the callback function of @timer. If
+ * called from other contexts @timer must neither be enqueued nor
+ * running the callback and the caller needs to take care of
+ * serialization.
+ *
+ * Note: This only updates the timer expiry value and does not requeue
+ * the timer.
+ */
static inline u64 hrtimer_forward_now(struct hrtimer *timer,
ktime_t interval)
{
@@ -443,7 +457,6 @@ extern int schedule_hrtimeout(ktime_t *expires, const enum hrtimer_mode mode);
/* Soft interrupt function to run the hrtimer queues: */
extern void hrtimer_run_queues(void);
-extern void hrtimer_run_pending(void);
/* Bootup initialization: */
extern void __init hrtimers_init(void);
diff --git a/include/linux/hsi/hsi.h b/include/linux/hsi/hsi.h
index 3ec06300d535..5dd60c2e120f 100644
--- a/include/linux/hsi/hsi.h
+++ b/include/linux/hsi/hsi.h
@@ -135,9 +135,9 @@ static inline int hsi_register_board_info(struct hsi_board_info const *info,
* @device: Driver model representation of the device
* @tx_cfg: HSI TX configuration
* @rx_cfg: HSI RX configuration
- * e_handler: Callback for handling port events (RX Wake High/Low)
- * pclaimed: Keeps tracks if the clients claimed its associated HSI port
- * nb: Notifier block for port events
+ * @e_handler: Callback for handling port events (RX Wake High/Low)
+ * @pclaimed: Keeps tracks if the clients claimed its associated HSI port
+ * @nb: Notifier block for port events
*/
struct hsi_client {
struct device device;
diff --git a/include/linux/htirq.h b/include/linux/htirq.h
index 70a1dbbf2093..d4a527e58434 100644
--- a/include/linux/htirq.h
+++ b/include/linux/htirq.h
@@ -1,24 +1,38 @@
#ifndef LINUX_HTIRQ_H
#define LINUX_HTIRQ_H
+struct pci_dev;
+struct irq_data;
+
struct ht_irq_msg {
u32 address_lo; /* low 32 bits of the ht irq message */
u32 address_hi; /* high 32 bits of the it irq message */
};
+typedef void (ht_irq_update_t)(struct pci_dev *dev, int irq,
+ struct ht_irq_msg *msg);
+
+struct ht_irq_cfg {
+ struct pci_dev *dev;
+ /* Update callback used to cope with buggy hardware */
+ ht_irq_update_t *update;
+ unsigned pos;
+ unsigned idx;
+ struct ht_irq_msg msg;
+};
+
/* Helper functions.. */
void fetch_ht_irq_msg(unsigned int irq, struct ht_irq_msg *msg);
void write_ht_irq_msg(unsigned int irq, struct ht_irq_msg *msg);
-struct irq_data;
void mask_ht_irq(struct irq_data *data);
void unmask_ht_irq(struct irq_data *data);
/* The arch hook for getting things started */
-int arch_setup_ht_irq(unsigned int irq, struct pci_dev *dev);
+int arch_setup_ht_irq(int idx, int pos, struct pci_dev *dev,
+ ht_irq_update_t *update);
+void arch_teardown_ht_irq(unsigned int irq);
/* For drivers of buggy hardware */
-typedef void (ht_irq_update_t)(struct pci_dev *dev, int irq,
- struct ht_irq_msg *msg);
int __ht_create_irq(struct pci_dev *dev, int idx, ht_irq_update_t *update);
#endif /* LINUX_HTIRQ_H */
diff --git a/include/linux/hugetlb.h b/include/linux/hugetlb.h
index 7b5785032049..205026175c42 100644
--- a/include/linux/hugetlb.h
+++ b/include/linux/hugetlb.h
@@ -22,7 +22,13 @@ struct mmu_gather;
struct hugepage_subpool {
spinlock_t lock;
long count;
- long max_hpages, used_hpages;
+ long max_hpages; /* Maximum huge pages or -1 if no maximum. */
+ long used_hpages; /* Used count against maximum, includes */
+ /* both alloced and reserved pages. */
+ struct hstate *hstate;
+ long min_hpages; /* Minimum huge pages or -1 if no minimum. */
+ long rsv_hpages; /* Pages reserved against global pool to */
+ /* sasitfy minimum size. */
};
struct resv_map {
@@ -38,11 +44,10 @@ extern int hugetlb_max_hstate __read_mostly;
#define for_each_hstate(h) \
for ((h) = hstates; (h) < &hstates[hugetlb_max_hstate]; (h)++)
-struct hugepage_subpool *hugepage_new_subpool(long nr_blocks);
+struct hugepage_subpool *hugepage_new_subpool(struct hstate *h, long max_hpages,
+ long min_hpages);
void hugepage_put_subpool(struct hugepage_subpool *spool);
-int PageHuge(struct page *page);
-
void reset_vma_resv_huge_pages(struct vm_area_struct *vma);
int hugetlb_sysctl_handler(struct ctl_table *, int, void __user *, size_t *, loff_t *);
int hugetlb_overcommit_handler(struct ctl_table *, int, void __user *, size_t *, loff_t *);
@@ -79,7 +84,6 @@ void hugetlb_unreserve_pages(struct inode *inode, long offset, long freed);
int dequeue_hwpoisoned_huge_page(struct page *page);
bool isolate_huge_page(struct page *page, struct list_head *list);
void putback_active_hugepage(struct page *page);
-bool is_hugepage_active(struct page *page);
void free_huge_page(struct page *page);
#ifdef CONFIG_ARCH_WANT_HUGE_PMD_SHARE
@@ -109,11 +113,6 @@ unsigned long hugetlb_change_protection(struct vm_area_struct *vma,
#else /* !CONFIG_HUGETLB_PAGE */
-static inline int PageHuge(struct page *page)
-{
- return 0;
-}
-
static inline void reset_vma_resv_huge_pages(struct vm_area_struct *vma)
{
}
@@ -152,7 +151,6 @@ static inline bool isolate_huge_page(struct page *page, struct list_head *list)
return false;
}
#define putback_active_hugepage(p) do {} while (0)
-#define is_hugepage_active(x) false
static inline unsigned long hugetlb_change_protection(struct vm_area_struct *vma,
unsigned long address, unsigned long end, pgprot_t newprot)
diff --git a/include/linux/hw_random.h b/include/linux/hw_random.h
index eb7b414d232b..4f7d8f4b1e9a 100644
--- a/include/linux/hw_random.h
+++ b/include/linux/hw_random.h
@@ -50,10 +50,14 @@ struct hwrng {
struct completion cleanup_done;
};
+struct device;
+
/** Register a new Hardware Random Number Generator driver. */
extern int hwrng_register(struct hwrng *rng);
+extern int devm_hwrng_register(struct device *dev, struct hwrng *rng);
/** Unregister a Hardware Random Number Generator driver. */
extern void hwrng_unregister(struct hwrng *rng);
+extern void devm_hwrng_unregister(struct device *dve, struct hwrng *rng);
/** Feed random bits into the pool. */
extern void add_hwgenerator_randomness(const char *buffer, size_t count, size_t entropy);
diff --git a/include/linux/hyperv.h b/include/linux/hyperv.h
index 5a2ba674795e..902c37aef67e 100644
--- a/include/linux/hyperv.h
+++ b/include/linux/hyperv.h
@@ -646,12 +646,13 @@ struct hv_input_signal_event_buffer {
};
struct vmbus_channel {
+ /* Unique channel id */
+ int id;
+
struct list_head listentry;
struct hv_device *device_obj;
- struct work_struct work;
-
enum vmbus_channel_state state;
struct vmbus_channel_offer_channel offermsg;
@@ -672,7 +673,6 @@ struct vmbus_channel {
struct hv_ring_buffer_info outbound; /* send to parent */
struct hv_ring_buffer_info inbound; /* receive from parent */
spinlock_t inbound_lock;
- struct workqueue_struct *controlwq;
struct vmbus_close_msg close_msg;
@@ -758,6 +758,9 @@ struct vmbus_channel {
* link up channels based on their CPU affinity.
*/
struct list_head percpu_list;
+
+ int num_sc;
+ int next_oc;
};
static inline void set_channel_read_state(struct vmbus_channel *c, bool state)
@@ -861,6 +864,14 @@ extern int vmbus_sendpacket(struct vmbus_channel *channel,
enum vmbus_packet_type type,
u32 flags);
+extern int vmbus_sendpacket_ctl(struct vmbus_channel *channel,
+ void *buffer,
+ u32 bufferLen,
+ u64 requestid,
+ enum vmbus_packet_type type,
+ u32 flags,
+ bool kick_q);
+
extern int vmbus_sendpacket_pagebuffer(struct vmbus_channel *channel,
struct hv_page_buffer pagebuffers[],
u32 pagecount,
@@ -868,6 +879,15 @@ extern int vmbus_sendpacket_pagebuffer(struct vmbus_channel *channel,
u32 bufferlen,
u64 requestid);
+extern int vmbus_sendpacket_pagebuffer_ctl(struct vmbus_channel *channel,
+ struct hv_page_buffer pagebuffers[],
+ u32 pagecount,
+ void *buffer,
+ u32 bufferlen,
+ u64 requestid,
+ u32 flags,
+ bool kick_q);
+
extern int vmbus_sendpacket_multipagebuffer(struct vmbus_channel *channel,
struct hv_multipage_buffer *mpb,
void *buffer,
@@ -1107,6 +1127,16 @@ void vmbus_driver_unregister(struct hv_driver *hv_driver);
}
/*
+ * NetworkDirect. This is the guest RDMA service.
+ * {8c2eaf3d-32a7-4b09-ab99-bd1f1c86b501}
+ */
+#define HV_ND_GUID \
+ .guid = { \
+ 0x3d, 0xaf, 0x2e, 0x8c, 0xa7, 0x32, 0x09, 0x4b, \
+ 0xab, 0x99, 0xbd, 0x1f, 0x1c, 0x86, 0xb5, 0x01 \
+ }
+
+/*
* Common header for Hyper-V ICs
*/
@@ -1213,6 +1243,7 @@ void hv_kvp_onchannelcallback(void *);
int hv_vss_init(struct hv_util_service *);
void hv_vss_deinit(void);
void hv_vss_onchannelcallback(void *);
+void hv_process_channel_removal(struct vmbus_channel *channel, u32 relid);
extern struct resource hyperv_mmio;
diff --git a/include/linux/i2c.h b/include/linux/i2c.h
index f17da50402a4..e83a738a3b87 100644
--- a/include/linux/i2c.h
+++ b/include/linux/i2c.h
@@ -253,10 +253,10 @@ static inline void i2c_set_clientdata(struct i2c_client *dev, void *data)
#if IS_ENABLED(CONFIG_I2C_SLAVE)
enum i2c_slave_event {
- I2C_SLAVE_REQ_READ_START,
- I2C_SLAVE_REQ_READ_END,
- I2C_SLAVE_REQ_WRITE_START,
- I2C_SLAVE_REQ_WRITE_END,
+ I2C_SLAVE_READ_REQUESTED,
+ I2C_SLAVE_WRITE_REQUESTED,
+ I2C_SLAVE_READ_PROCESSED,
+ I2C_SLAVE_WRITE_RECEIVED,
I2C_SLAVE_STOP,
};
@@ -278,7 +278,7 @@ static inline int i2c_slave_event(struct i2c_client *client,
* @platform_data: stored in i2c_client.dev.platform_data
* @archdata: copied into i2c_client.dev.archdata
* @of_node: pointer to OpenFirmware device node
- * @acpi_node: ACPI device node
+ * @fwnode: device node supplied by the platform firmware
* @irq: stored in i2c_client.irq
*
* I2C doesn't actually support hardware probing, although controllers and
@@ -299,7 +299,7 @@ struct i2c_board_info {
void *platform_data;
struct dev_archdata *archdata;
struct device_node *of_node;
- struct acpi_dev_node acpi_node;
+ struct fwnode_handle *fwnode;
int irq;
};
@@ -435,8 +435,8 @@ struct i2c_bus_recovery_info {
void (*set_scl)(struct i2c_adapter *, int val);
int (*get_sda)(struct i2c_adapter *);
- void (*prepare_recovery)(struct i2c_bus_recovery_info *bri);
- void (*unprepare_recovery)(struct i2c_bus_recovery_info *bri);
+ void (*prepare_recovery)(struct i2c_adapter *);
+ void (*unprepare_recovery)(struct i2c_adapter *);
/* gpio recovery */
int scl_gpio;
@@ -449,6 +449,48 @@ int i2c_recover_bus(struct i2c_adapter *adap);
int i2c_generic_gpio_recovery(struct i2c_adapter *adap);
int i2c_generic_scl_recovery(struct i2c_adapter *adap);
+/**
+ * struct i2c_adapter_quirks - describe flaws of an i2c adapter
+ * @flags: see I2C_AQ_* for possible flags and read below
+ * @max_num_msgs: maximum number of messages per transfer
+ * @max_write_len: maximum length of a write message
+ * @max_read_len: maximum length of a read message
+ * @max_comb_1st_msg_len: maximum length of the first msg in a combined message
+ * @max_comb_2nd_msg_len: maximum length of the second msg in a combined message
+ *
+ * Note about combined messages: Some I2C controllers can only send one message
+ * per transfer, plus something called combined message or write-then-read.
+ * This is (usually) a small write message followed by a read message and
+ * barely enough to access register based devices like EEPROMs. There is a flag
+ * to support this mode. It implies max_num_msg = 2 and does the length checks
+ * with max_comb_*_len because combined message mode usually has its own
+ * limitations. Because of HW implementations, some controllers can actually do
+ * write-then-anything or other variants. To support that, write-then-read has
+ * been broken out into smaller bits like write-first and read-second which can
+ * be combined as needed.
+ */
+
+struct i2c_adapter_quirks {
+ u64 flags;
+ int max_num_msgs;
+ u16 max_write_len;
+ u16 max_read_len;
+ u16 max_comb_1st_msg_len;
+ u16 max_comb_2nd_msg_len;
+};
+
+/* enforce max_num_msgs = 2 and use max_comb_*_len for length checks */
+#define I2C_AQ_COMB BIT(0)
+/* first combined message must be write */
+#define I2C_AQ_COMB_WRITE_FIRST BIT(1)
+/* second combined message must be read */
+#define I2C_AQ_COMB_READ_SECOND BIT(2)
+/* both combined messages must have the same target address */
+#define I2C_AQ_COMB_SAME_ADDR BIT(3)
+/* convenience macro for typical write-then read case */
+#define I2C_AQ_COMB_WRITE_THEN_READ (I2C_AQ_COMB | I2C_AQ_COMB_WRITE_FIRST | \
+ I2C_AQ_COMB_READ_SECOND | I2C_AQ_COMB_SAME_ADDR)
+
/*
* i2c_adapter is the structure used to identify a physical i2c bus along
* with the access algorithms necessary to access it.
@@ -474,6 +516,7 @@ struct i2c_adapter {
struct list_head userspace_clients;
struct i2c_bus_recovery_info *bus_recovery_info;
+ const struct i2c_adapter_quirks *quirks;
};
#define to_i2c_adapter(d) container_of(d, struct i2c_adapter, dev)
diff --git a/include/linux/i2c/twl.h b/include/linux/i2c/twl.h
index 0bc03f100d04..9ad7828d9d34 100644
--- a/include/linux/i2c/twl.h
+++ b/include/linux/i2c/twl.h
@@ -675,6 +675,7 @@ struct twl4030_power_data {
struct twl4030_resconfig *board_config;
#define TWL4030_RESCONFIG_UNDEF ((u8)-1)
bool use_poweroff; /* Board is wired for TWL poweroff */
+ bool ac_charger_quirk; /* Disable AC charger on board */
};
extern int twl4030_remove_script(u8 flags);
diff --git a/include/linux/ide.h b/include/linux/ide.h
index 93b5ca754b5b..a633898f36ac 100644
--- a/include/linux/ide.h
+++ b/include/linux/ide.h
@@ -39,6 +39,19 @@
struct device;
+/* IDE-specific values for req->cmd_type */
+enum ata_cmd_type_bits {
+ REQ_TYPE_ATA_TASKFILE = REQ_TYPE_DRV_PRIV + 1,
+ REQ_TYPE_ATA_PC,
+ REQ_TYPE_ATA_SENSE, /* sense request */
+ REQ_TYPE_ATA_PM_SUSPEND,/* suspend request */
+ REQ_TYPE_ATA_PM_RESUME, /* resume request */
+};
+
+#define ata_pm_request(rq) \
+ ((rq)->cmd_type == REQ_TYPE_ATA_PM_SUSPEND || \
+ (rq)->cmd_type == REQ_TYPE_ATA_PM_RESUME)
+
/* Error codes returned in rq->errors to the higher part of the driver. */
enum {
IDE_DRV_ERROR_GENERAL = 101,
@@ -1314,6 +1327,19 @@ struct ide_port_info {
u8 udma_mask;
};
+/*
+ * State information carried for REQ_TYPE_ATA_PM_SUSPEND and REQ_TYPE_ATA_PM_RESUME
+ * requests.
+ */
+struct ide_pm_state {
+ /* PM state machine step value, currently driver specific */
+ int pm_step;
+ /* requested PM state value (S1, S2, S3, S4, ...) */
+ u32 pm_state;
+ void* data; /* for driver use */
+};
+
+
int ide_pci_init_one(struct pci_dev *, const struct ide_port_info *, void *);
int ide_pci_init_two(struct pci_dev *, struct pci_dev *,
const struct ide_port_info *, void *);
@@ -1551,4 +1577,5 @@ static inline void ide_set_drivedata(ide_drive_t *drive, void *data)
#define ide_host_for_each_port(i, port, host) \
for ((i) = 0; ((port) = (host)->ports[i]) || (i) < MAX_HOST_PORTS; (i)++)
+
#endif /* _IDE_H */
diff --git a/include/linux/ieee802154.h b/include/linux/ieee802154.h
index 6e82d888287c..1dc1f4ed4001 100644
--- a/include/linux/ieee802154.h
+++ b/include/linux/ieee802154.h
@@ -28,7 +28,9 @@
#include <asm/byteorder.h>
#define IEEE802154_MTU 127
-#define IEEE802154_MIN_PSDU_LEN 5
+#define IEEE802154_ACK_PSDU_LEN 5
+#define IEEE802154_MIN_PSDU_LEN 9
+#define IEEE802154_FCS_LEN 2
#define IEEE802154_PAN_ID_BROADCAST 0xffff
#define IEEE802154_ADDR_SHORT_BROADCAST 0xffff
@@ -38,6 +40,7 @@
#define IEEE802154_LIFS_PERIOD 40
#define IEEE802154_SIFS_PERIOD 12
+#define IEEE802154_MAX_SIFS_FRAME_SIZE 18
#define IEEE802154_MAX_CHANNEL 26
#define IEEE802154_MAX_PAGE 31
@@ -204,26 +207,31 @@ enum {
/**
* ieee802154_is_valid_psdu_len - check if psdu len is valid
+ * available lengths:
+ * 0-4 Reserved
+ * 5 MPDU (Acknowledgment)
+ * 6-8 Reserved
+ * 9-127 MPDU
+ *
* @len: psdu len with (MHR + payload + MFR)
*/
static inline bool ieee802154_is_valid_psdu_len(const u8 len)
{
- return (len >= IEEE802154_MIN_PSDU_LEN && len <= IEEE802154_MTU);
+ return (len == IEEE802154_ACK_PSDU_LEN ||
+ (len >= IEEE802154_MIN_PSDU_LEN && len <= IEEE802154_MTU));
}
/**
* ieee802154_is_valid_psdu_len - check if extended addr is valid
* @addr: extended addr to check
*/
-static inline bool ieee802154_is_valid_extended_addr(const __le64 addr)
+static inline bool ieee802154_is_valid_extended_unicast_addr(const __le64 addr)
{
- /* These EUI-64 addresses are reserved by IEEE. 0xffffffffffffffff
- * is used internally as extended to short address broadcast mapping.
- * This is currently a workaround because neighbor discovery can't
- * deal with short addresses types right now.
+ /* Bail out if the address is all zero, or if the group
+ * address bit is set.
*/
return ((addr != cpu_to_le64(0x0000000000000000ULL)) &&
- (addr != cpu_to_le64(0xffffffffffffffffULL)));
+ !(addr & cpu_to_le64(0x0100000000000000ULL)));
}
/**
@@ -234,9 +242,9 @@ static inline void ieee802154_random_extended_addr(__le64 *addr)
{
get_random_bytes(addr, IEEE802154_EXTENDED_ADDR_LEN);
- /* toggle some bit if we hit an invalid extended addr */
- if (!ieee802154_is_valid_extended_addr(*addr))
- ((u8 *)addr)[IEEE802154_EXTENDED_ADDR_LEN - 1] ^= 0x01;
+ /* clear the group bit, and set the locally administered bit */
+ ((u8 *)addr)[IEEE802154_EXTENDED_ADDR_LEN - 1] &= ~0x01;
+ ((u8 *)addr)[IEEE802154_EXTENDED_ADDR_LEN - 1] |= 0x02;
}
#endif /* LINUX_IEEE802154_H */
diff --git a/include/linux/if_bridge.h b/include/linux/if_bridge.h
index a57bca2ea97e..dad8b00beed2 100644
--- a/include/linux/if_bridge.h
+++ b/include/linux/if_bridge.h
@@ -44,6 +44,7 @@ struct br_ip_list {
#define BR_PROMISC BIT(7)
#define BR_PROXYARP BIT(8)
#define BR_LEARNING_SYNC BIT(9)
+#define BR_PROXYARP_WIFI BIT(10)
extern void brioctl_set(int (*ioctl_hook)(struct net *, unsigned int, void __user *));
diff --git a/include/linux/if_link.h b/include/linux/if_link.h
index 119130e9298b..ae5d0d22955d 100644
--- a/include/linux/if_link.h
+++ b/include/linux/if_link.h
@@ -5,6 +5,15 @@
/* We don't want this structure exposed to user space */
+struct ifla_vf_stats {
+ __u64 rx_packets;
+ __u64 tx_packets;
+ __u64 rx_bytes;
+ __u64 tx_bytes;
+ __u64 broadcast;
+ __u64 multicast;
+};
+
struct ifla_vf_info {
__u32 vf;
__u8 mac[32];
@@ -14,5 +23,6 @@ struct ifla_vf_info {
__u32 linkstate;
__u32 min_tx_rate;
__u32 max_tx_rate;
+ __u32 rss_query_en;
};
#endif /* _LINUX_IF_LINK_H */
diff --git a/include/linux/if_macvlan.h b/include/linux/if_macvlan.h
index 6f6929ea8a0c..a4ccc3122f93 100644
--- a/include/linux/if_macvlan.h
+++ b/include/linux/if_macvlan.h
@@ -29,7 +29,7 @@ struct macvtap_queue;
* Maximum times a macvtap device can be opened. This can be used to
* configure the number of receive queue, e.g. for multiqueue virtio.
*/
-#define MAX_MACVTAP_QUEUES 16
+#define MAX_MACVTAP_QUEUES 256
#define MACVLAN_MC_FILTER_BITS 8
#define MACVLAN_MC_FILTER_SZ (1 << MACVLAN_MC_FILTER_BITS)
diff --git a/include/linux/if_pppox.h b/include/linux/if_pppox.h
index aff7ad8a4ea3..b49cf923becc 100644
--- a/include/linux/if_pppox.h
+++ b/include/linux/if_pppox.h
@@ -19,6 +19,7 @@
#include <linux/netdevice.h>
#include <linux/ppp_channel.h>
#include <linux/skbuff.h>
+#include <linux/workqueue.h>
#include <uapi/linux/if_pppox.h>
static inline struct pppoe_hdr *pppoe_hdr(const struct sk_buff *skb)
@@ -32,6 +33,7 @@ struct pppoe_opt {
struct pppoe_addr pa; /* what this socket is bound to*/
struct sockaddr_pppox relay; /* what socket data will be
relayed to (PPPoE relaying) */
+ struct work_struct padt_work;/* Work item for handling PADT */
};
struct pptp_opt {
@@ -72,7 +74,7 @@ static inline struct sock *sk_pppox(struct pppox_sock *po)
struct module;
struct pppox_proto {
- int (*create)(struct net *net, struct socket *sock);
+ int (*create)(struct net *net, struct socket *sock, int kern);
int (*ioctl)(struct socket *sock, unsigned int cmd,
unsigned long arg);
struct module *owner;
diff --git a/include/linux/if_vlan.h b/include/linux/if_vlan.h
index b11b28a30b9e..67ce5bd3b56a 100644
--- a/include/linux/if_vlan.h
+++ b/include/linux/if_vlan.h
@@ -416,7 +416,7 @@ static inline void __vlan_hwaccel_put_tag(struct sk_buff *skb,
/**
* __vlan_get_tag - get the VLAN ID that is part of the payload
* @skb: skbuff to query
- * @vlan_tci: buffer to store vlaue
+ * @vlan_tci: buffer to store value
*
* Returns error if the skb is not of VLAN type
*/
@@ -435,7 +435,7 @@ static inline int __vlan_get_tag(const struct sk_buff *skb, u16 *vlan_tci)
/**
* __vlan_hwaccel_get_tag - get the VLAN ID that is in @skb->cb[]
* @skb: skbuff to query
- * @vlan_tci: buffer to store vlaue
+ * @vlan_tci: buffer to store value
*
* Returns error if @skb->vlan_tci is not set correctly
*/
@@ -456,7 +456,7 @@ static inline int __vlan_hwaccel_get_tag(const struct sk_buff *skb,
/**
* vlan_get_tag - get the VLAN ID from the skb
* @skb: skbuff to query
- * @vlan_tci: buffer to store vlaue
+ * @vlan_tci: buffer to store value
*
* Returns error if the skb is not VLAN tagged
*/
@@ -539,7 +539,7 @@ static inline void vlan_set_encap_proto(struct sk_buff *skb,
*/
proto = vhdr->h_vlan_encapsulated_proto;
- if (ntohs(proto) >= ETH_P_802_3_MIN) {
+ if (eth_proto_is_802_3(proto)) {
skb->protocol = proto;
return;
}
@@ -561,4 +561,91 @@ static inline void vlan_set_encap_proto(struct sk_buff *skb,
skb->protocol = htons(ETH_P_802_2);
}
+/**
+ * skb_vlan_tagged - check if skb is vlan tagged.
+ * @skb: skbuff to query
+ *
+ * Returns true if the skb is tagged, regardless of whether it is hardware
+ * accelerated or not.
+ */
+static inline bool skb_vlan_tagged(const struct sk_buff *skb)
+{
+ if (!skb_vlan_tag_present(skb) &&
+ likely(skb->protocol != htons(ETH_P_8021Q) &&
+ skb->protocol != htons(ETH_P_8021AD)))
+ return false;
+
+ return true;
+}
+
+/**
+ * skb_vlan_tagged_multi - check if skb is vlan tagged with multiple headers.
+ * @skb: skbuff to query
+ *
+ * Returns true if the skb is tagged with multiple vlan headers, regardless
+ * of whether it is hardware accelerated or not.
+ */
+static inline bool skb_vlan_tagged_multi(const struct sk_buff *skb)
+{
+ __be16 protocol = skb->protocol;
+
+ if (!skb_vlan_tag_present(skb)) {
+ struct vlan_ethhdr *veh;
+
+ if (likely(protocol != htons(ETH_P_8021Q) &&
+ protocol != htons(ETH_P_8021AD)))
+ return false;
+
+ veh = (struct vlan_ethhdr *)skb->data;
+ protocol = veh->h_vlan_encapsulated_proto;
+ }
+
+ if (protocol != htons(ETH_P_8021Q) && protocol != htons(ETH_P_8021AD))
+ return false;
+
+ return true;
+}
+
+/**
+ * vlan_features_check - drop unsafe features for skb with multiple tags.
+ * @skb: skbuff to query
+ * @features: features to be checked
+ *
+ * Returns features without unsafe ones if the skb has multiple tags.
+ */
+static inline netdev_features_t vlan_features_check(const struct sk_buff *skb,
+ netdev_features_t features)
+{
+ if (skb_vlan_tagged_multi(skb))
+ features = netdev_intersect_features(features,
+ NETIF_F_SG |
+ NETIF_F_HIGHDMA |
+ NETIF_F_FRAGLIST |
+ NETIF_F_GEN_CSUM |
+ NETIF_F_HW_VLAN_CTAG_TX |
+ NETIF_F_HW_VLAN_STAG_TX);
+
+ return features;
+}
+
+/**
+ * compare_vlan_header - Compare two vlan headers
+ * @h1: Pointer to vlan header
+ * @h2: Pointer to vlan header
+ *
+ * Compare two vlan headers, returns 0 if equal.
+ *
+ * Please note that alignment of h1 & h2 are only guaranteed to be 16 bits.
+ */
+static inline unsigned long compare_vlan_header(const struct vlan_hdr *h1,
+ const struct vlan_hdr *h2)
+{
+#if defined(CONFIG_HAVE_EFFICIENT_UNALIGNED_ACCESS)
+ return *(u32 *)h1 ^ *(u32 *)h2;
+#else
+ return ((__force u32)h1->h_vlan_TCI ^ (__force u32)h2->h_vlan_TCI) |
+ ((__force u32)h1->h_vlan_encapsulated_proto ^
+ (__force u32)h2->h_vlan_encapsulated_proto);
+#endif
+}
#endif /* !(_LINUX_IF_VLAN_H_) */
diff --git a/include/linux/igmp.h b/include/linux/igmp.h
index 2c677afeea47..193ad488d3e2 100644
--- a/include/linux/igmp.h
+++ b/include/linux/igmp.h
@@ -130,5 +130,6 @@ extern void ip_mc_unmap(struct in_device *);
extern void ip_mc_remap(struct in_device *);
extern void ip_mc_dec_group(struct in_device *in_dev, __be32 addr);
extern void ip_mc_inc_group(struct in_device *in_dev, __be32 addr);
+int ip_mc_check_igmp(struct sk_buff *skb, struct sk_buff **skb_trimmed);
#endif
diff --git a/include/linux/inet_diag.h b/include/linux/inet_diag.h
index 46da02410a09..0e707f0c1a3e 100644
--- a/include/linux/inet_diag.h
+++ b/include/linux/inet_diag.h
@@ -11,33 +11,35 @@ struct sk_buff;
struct netlink_callback;
struct inet_diag_handler {
- void (*dump)(struct sk_buff *skb,
- struct netlink_callback *cb,
- struct inet_diag_req_v2 *r,
- struct nlattr *bc);
-
- int (*dump_one)(struct sk_buff *in_skb,
- const struct nlmsghdr *nlh,
- struct inet_diag_req_v2 *req);
-
- void (*idiag_get_info)(struct sock *sk,
- struct inet_diag_msg *r,
- void *info);
- __u16 idiag_type;
+ void (*dump)(struct sk_buff *skb,
+ struct netlink_callback *cb,
+ const struct inet_diag_req_v2 *r,
+ struct nlattr *bc);
+
+ int (*dump_one)(struct sk_buff *in_skb,
+ const struct nlmsghdr *nlh,
+ const struct inet_diag_req_v2 *req);
+
+ void (*idiag_get_info)(struct sock *sk,
+ struct inet_diag_msg *r,
+ void *info);
+ __u16 idiag_type;
+ __u16 idiag_info_size;
};
struct inet_connection_sock;
int inet_sk_diag_fill(struct sock *sk, struct inet_connection_sock *icsk,
- struct sk_buff *skb, struct inet_diag_req_v2 *req,
- struct user_namespace *user_ns,
- u32 pid, u32 seq, u16 nlmsg_flags,
- const struct nlmsghdr *unlh);
+ struct sk_buff *skb, const struct inet_diag_req_v2 *req,
+ struct user_namespace *user_ns,
+ u32 pid, u32 seq, u16 nlmsg_flags,
+ const struct nlmsghdr *unlh);
void inet_diag_dump_icsk(struct inet_hashinfo *h, struct sk_buff *skb,
- struct netlink_callback *cb, struct inet_diag_req_v2 *r,
- struct nlattr *bc);
+ struct netlink_callback *cb,
+ const struct inet_diag_req_v2 *r,
+ struct nlattr *bc);
int inet_diag_dump_one_icsk(struct inet_hashinfo *hashinfo,
- struct sk_buff *in_skb, const struct nlmsghdr *nlh,
- struct inet_diag_req_v2 *req);
+ struct sk_buff *in_skb, const struct nlmsghdr *nlh,
+ const struct inet_diag_req_v2 *req);
int inet_diag_bc_sk(const struct nlattr *_bc, struct sock *sk);
diff --git a/include/linux/inetdevice.h b/include/linux/inetdevice.h
index 0a21fbefdfbe..a4328cea376a 100644
--- a/include/linux/inetdevice.h
+++ b/include/linux/inetdevice.h
@@ -120,6 +120,9 @@ static inline void ipv4_devconf_setall(struct in_device *in_dev)
|| (!IN_DEV_FORWARD(in_dev) && \
IN_DEV_ORCONF((in_dev), ACCEPT_REDIRECTS)))
+#define IN_DEV_IGNORE_ROUTES_WITH_LINKDOWN(in_dev) \
+ IN_DEV_CONF_GET((in_dev), IGNORE_ROUTES_WITH_LINKDOWN)
+
#define IN_DEV_ARPFILTER(in_dev) IN_DEV_ORCONF((in_dev), ARPFILTER)
#define IN_DEV_ARP_ACCEPT(in_dev) IN_DEV_ORCONF((in_dev), ARP_ACCEPT)
#define IN_DEV_ARP_ANNOUNCE(in_dev) IN_DEV_MAXCONF((in_dev), ARP_ANNOUNCE)
diff --git a/include/linux/init_task.h b/include/linux/init_task.h
index 696d22312b31..bb9b075f0eb0 100644
--- a/include/linux/init_task.h
+++ b/include/linux/init_task.h
@@ -50,9 +50,8 @@ extern struct fs_struct init_fs;
.cpu_timers = INIT_CPU_TIMERS(sig.cpu_timers), \
.rlim = INIT_RLIMITS, \
.cputimer = { \
- .cputime = INIT_CPUTIME, \
- .running = 0, \
- .lock = __RAW_SPIN_LOCK_UNLOCKED(sig.cputimer.lock), \
+ .cputime_atomic = INIT_CPUTIME_ATOMIC, \
+ .running = 0, \
}, \
.cred_guard_mutex = \
__MUTEX_INITIALIZER(sig.cred_guard_mutex), \
diff --git a/include/linux/intel-iommu.h b/include/linux/intel-iommu.h
index a65208a8fe18..d9a366d24e3b 100644
--- a/include/linux/intel-iommu.h
+++ b/include/linux/intel-iommu.h
@@ -87,6 +87,7 @@ static inline void dmar_writeq(void __iomem *addr, u64 val)
/*
* Decoding Capability Register
*/
+#define cap_pi_support(c) (((c) >> 59) & 1)
#define cap_read_drain(c) (((c) >> 55) & 1)
#define cap_write_drain(c) (((c) >> 54) & 1)
#define cap_max_amask_val(c) (((c) >> 48) & 0x3f)
@@ -115,10 +116,20 @@ static inline void dmar_writeq(void __iomem *addr, u64 val)
* Extended Capability Register
*/
-#define ecap_niotlb_iunits(e) ((((e) >> 24) & 0xff) + 1)
+#define ecap_pasid(e) ((e >> 40) & 0x1)
+#define ecap_pss(e) ((e >> 35) & 0x1f)
+#define ecap_eafs(e) ((e >> 34) & 0x1)
+#define ecap_nwfs(e) ((e >> 33) & 0x1)
+#define ecap_srs(e) ((e >> 31) & 0x1)
+#define ecap_ers(e) ((e >> 30) & 0x1)
+#define ecap_prs(e) ((e >> 29) & 0x1)
+/* PASID support used to be on bit 28 */
+#define ecap_dis(e) ((e >> 27) & 0x1)
+#define ecap_nest(e) ((e >> 26) & 0x1)
+#define ecap_mts(e) ((e >> 25) & 0x1)
+#define ecap_ecs(e) ((e >> 24) & 0x1)
#define ecap_iotlb_offset(e) ((((e) >> 8) & 0x3ff) * 16)
-#define ecap_max_iotlb_offset(e) \
- (ecap_iotlb_offset(e) + ecap_niotlb_iunits(e) * 16)
+#define ecap_max_iotlb_offset(e) (ecap_iotlb_offset(e) + 16)
#define ecap_coherent(e) ((e) & 0x1)
#define ecap_qis(e) ((e) & 0x2)
#define ecap_pass_through(e) ((e >> 6) & 0x1)
@@ -180,6 +191,9 @@ static inline void dmar_writeq(void __iomem *addr, u64 val)
#define DMA_GSTS_IRES (((u32)1) << 25)
#define DMA_GSTS_CFIS (((u32)1) << 23)
+/* DMA_RTADDR_REG */
+#define DMA_RTADDR_RTT (((u64)1) << 11)
+
/* CCMD_REG */
#define DMA_CCMD_ICC (((u64)1) << 63)
#define DMA_CCMD_GLOBAL_INVL (((u64)1) << 61)
@@ -283,9 +297,12 @@ struct q_inval {
/* 1MB - maximum possible interrupt remapping table size */
#define INTR_REMAP_PAGE_ORDER 8
#define INTR_REMAP_TABLE_REG_SIZE 0xf
+#define INTR_REMAP_TABLE_REG_SIZE_MASK 0xf
#define INTR_REMAP_TABLE_ENTRIES 65536
+struct irq_domain;
+
struct ir_table {
struct irte *base;
unsigned long *bitmap;
@@ -307,6 +324,9 @@ enum {
MAX_SR_DMAR_REGS
};
+#define VTD_FLAG_TRANS_PRE_ENABLED (1 << 0)
+#define VTD_FLAG_IRQ_REMAP_PRE_ENABLED (1 << 1)
+
struct intel_iommu {
void __iomem *reg; /* Pointer to hardware regs, virtual addr */
u64 reg_phys; /* physical address of hw register set */
@@ -335,9 +355,12 @@ struct intel_iommu {
#ifdef CONFIG_IRQ_REMAP
struct ir_table *ir_table; /* Interrupt remapping info */
+ struct irq_domain *ir_domain;
+ struct irq_domain *ir_msi_domain;
#endif
struct device *iommu_dev; /* IOMMU-sysfs device */
int node;
+ u32 flags; /* Software defined flags */
};
static inline void __iommu_flush_cache(
diff --git a/include/linux/interrupt.h b/include/linux/interrupt.h
index 950ae4501826..be7e75c945e9 100644
--- a/include/linux/interrupt.h
+++ b/include/linux/interrupt.h
@@ -413,7 +413,8 @@ enum
BLOCK_IOPOLL_SOFTIRQ,
TASKLET_SOFTIRQ,
SCHED_SOFTIRQ,
- HRTIMER_SOFTIRQ,
+ HRTIMER_SOFTIRQ, /* Unused, but kept as tools rely on the
+ numbering. Sigh! */
RCU_SOFTIRQ, /* Preferable RCU should always be the last softirq */
NR_SOFTIRQS
@@ -592,10 +593,10 @@ tasklet_hrtimer_init(struct tasklet_hrtimer *ttimer,
clockid_t which_clock, enum hrtimer_mode mode);
static inline
-int tasklet_hrtimer_start(struct tasklet_hrtimer *ttimer, ktime_t time,
- const enum hrtimer_mode mode)
+void tasklet_hrtimer_start(struct tasklet_hrtimer *ttimer, ktime_t time,
+ const enum hrtimer_mode mode)
{
- return hrtimer_start(&ttimer->timer, time, mode);
+ hrtimer_start(&ttimer->timer, time, mode);
}
static inline
diff --git a/include/linux/io-mapping.h b/include/linux/io-mapping.h
index 657fab4efab3..c27dde7215b5 100644
--- a/include/linux/io-mapping.h
+++ b/include/linux/io-mapping.h
@@ -141,6 +141,7 @@ static inline void __iomem *
io_mapping_map_atomic_wc(struct io_mapping *mapping,
unsigned long offset)
{
+ preempt_disable();
pagefault_disable();
return ((char __force __iomem *) mapping) + offset;
}
@@ -149,6 +150,7 @@ static inline void
io_mapping_unmap_atomic(void __iomem *vaddr)
{
pagefault_enable();
+ preempt_enable();
}
/* Non-atomic map/unmap */
diff --git a/include/linux/io.h b/include/linux/io.h
index fa02e55e5a2e..fb5a99800e77 100644
--- a/include/linux/io.h
+++ b/include/linux/io.h
@@ -19,6 +19,7 @@
#define _LINUX_IO_H
#include <linux/types.h>
+#include <linux/init.h>
#include <asm/io.h>
#include <asm/page.h>
@@ -38,6 +39,14 @@ static inline int ioremap_page_range(unsigned long addr, unsigned long end,
}
#endif
+#ifdef CONFIG_HAVE_ARCH_HUGE_VMAP
+void __init ioremap_huge_init(void);
+int arch_ioremap_pud_supported(void);
+int arch_ioremap_pmd_supported(void);
+#else
+static inline void ioremap_huge_init(void) { }
+#endif
+
/*
* Managed iomap interface
*/
@@ -64,6 +73,8 @@ void __iomem *devm_ioremap(struct device *dev, resource_size_t offset,
resource_size_t size);
void __iomem *devm_ioremap_nocache(struct device *dev, resource_size_t offset,
resource_size_t size);
+void __iomem *devm_ioremap_wc(struct device *dev, resource_size_t offset,
+ resource_size_t size);
void devm_iounmap(struct device *dev, void __iomem *addr);
int check_signature(const volatile void __iomem *io_addr,
const unsigned char *signature, int length);
@@ -101,6 +112,13 @@ static inline void arch_phys_wc_del(int handle)
}
#define arch_phys_wc_add arch_phys_wc_add
+#ifndef arch_phys_wc_index
+static inline int arch_phys_wc_index(int handle)
+{
+ return -1;
+}
+#define arch_phys_wc_index arch_phys_wc_index
+#endif
#endif
#endif /* _LINUX_IO_H */
diff --git a/include/linux/iommu-common.h b/include/linux/iommu-common.h
new file mode 100644
index 000000000000..bbced83b32ee
--- /dev/null
+++ b/include/linux/iommu-common.h
@@ -0,0 +1,51 @@
+#ifndef _LINUX_IOMMU_COMMON_H
+#define _LINUX_IOMMU_COMMON_H
+
+#include <linux/spinlock_types.h>
+#include <linux/device.h>
+#include <asm/page.h>
+
+#define IOMMU_POOL_HASHBITS 4
+#define IOMMU_NR_POOLS (1 << IOMMU_POOL_HASHBITS)
+
+struct iommu_pool {
+ unsigned long start;
+ unsigned long end;
+ unsigned long hint;
+ spinlock_t lock;
+};
+
+struct iommu_map_table {
+ unsigned long table_map_base;
+ unsigned long table_shift;
+ unsigned long nr_pools;
+ void (*lazy_flush)(struct iommu_map_table *);
+ unsigned long poolsize;
+ struct iommu_pool pools[IOMMU_NR_POOLS];
+ u32 flags;
+#define IOMMU_HAS_LARGE_POOL 0x00000001
+#define IOMMU_NO_SPAN_BOUND 0x00000002
+#define IOMMU_NEED_FLUSH 0x00000004
+ struct iommu_pool large_pool;
+ unsigned long *map;
+};
+
+extern void iommu_tbl_pool_init(struct iommu_map_table *iommu,
+ unsigned long num_entries,
+ u32 table_shift,
+ void (*lazy_flush)(struct iommu_map_table *),
+ bool large_pool, u32 npools,
+ bool skip_span_boundary_check);
+
+extern unsigned long iommu_tbl_range_alloc(struct device *dev,
+ struct iommu_map_table *iommu,
+ unsigned long npages,
+ unsigned long *handle,
+ unsigned long mask,
+ unsigned int align_order);
+
+extern void iommu_tbl_range_free(struct iommu_map_table *iommu,
+ u64 dma_addr, unsigned long npages,
+ unsigned long entry);
+
+#endif
diff --git a/include/linux/iommu.h b/include/linux/iommu.h
index 38daa453f2e5..dc767f7c3704 100644
--- a/include/linux/iommu.h
+++ b/include/linux/iommu.h
@@ -51,9 +51,33 @@ struct iommu_domain_geometry {
bool force_aperture; /* DMA only allowed in mappable range? */
};
+/* Domain feature flags */
+#define __IOMMU_DOMAIN_PAGING (1U << 0) /* Support for iommu_map/unmap */
+#define __IOMMU_DOMAIN_DMA_API (1U << 1) /* Domain for use in DMA-API
+ implementation */
+#define __IOMMU_DOMAIN_PT (1U << 2) /* Domain is identity mapped */
+
+/*
+ * This are the possible domain-types
+ *
+ * IOMMU_DOMAIN_BLOCKED - All DMA is blocked, can be used to isolate
+ * devices
+ * IOMMU_DOMAIN_IDENTITY - DMA addresses are system physical addresses
+ * IOMMU_DOMAIN_UNMANAGED - DMA mappings managed by IOMMU-API user, used
+ * for VMs
+ * IOMMU_DOMAIN_DMA - Internally used for DMA-API implementations.
+ * This flag allows IOMMU drivers to implement
+ * certain optimizations for these domains
+ */
+#define IOMMU_DOMAIN_BLOCKED (0U)
+#define IOMMU_DOMAIN_IDENTITY (__IOMMU_DOMAIN_PT)
+#define IOMMU_DOMAIN_UNMANAGED (__IOMMU_DOMAIN_PAGING)
+#define IOMMU_DOMAIN_DMA (__IOMMU_DOMAIN_PAGING | \
+ __IOMMU_DOMAIN_DMA_API)
+
struct iommu_domain {
+ unsigned type;
const struct iommu_ops *ops;
- void *priv;
iommu_fault_handler_t handler;
void *handler_token;
struct iommu_domain_geometry geometry;
@@ -90,6 +114,20 @@ enum iommu_attr {
DOMAIN_ATTR_MAX,
};
+/**
+ * struct iommu_dm_region - descriptor for a direct mapped memory region
+ * @list: Linked list pointers
+ * @start: System physical start address of the region
+ * @length: Length of the region in bytes
+ * @prot: IOMMU Protection flags (READ/WRITE/...)
+ */
+struct iommu_dm_region {
+ struct list_head list;
+ phys_addr_t start;
+ size_t length;
+ int prot;
+};
+
#ifdef CONFIG_IOMMU_API
/**
@@ -113,8 +151,11 @@ enum iommu_attr {
*/
struct iommu_ops {
bool (*capable)(enum iommu_cap);
- int (*domain_init)(struct iommu_domain *domain);
- void (*domain_destroy)(struct iommu_domain *domain);
+
+ /* Domain allocation and freeing by the iommu driver */
+ struct iommu_domain *(*domain_alloc)(unsigned iommu_domain_type);
+ void (*domain_free)(struct iommu_domain *);
+
int (*attach_dev)(struct iommu_domain *domain, struct device *dev);
void (*detach_dev)(struct iommu_domain *domain, struct device *dev);
int (*map)(struct iommu_domain *domain, unsigned long iova,
@@ -132,6 +173,10 @@ struct iommu_ops {
int (*domain_set_attr)(struct iommu_domain *domain,
enum iommu_attr attr, void *data);
+ /* Request/Free a list of direct mapping requirements for a device */
+ void (*get_dm_regions)(struct device *dev, struct list_head *list);
+ void (*put_dm_regions)(struct device *dev, struct list_head *list);
+
/* Window handling functions */
int (*domain_window_enable)(struct iommu_domain *domain, u32 wnd_nr,
phys_addr_t paddr, u64 size, int prot);
@@ -166,6 +211,7 @@ extern int iommu_attach_device(struct iommu_domain *domain,
struct device *dev);
extern void iommu_detach_device(struct iommu_domain *domain,
struct device *dev);
+extern struct iommu_domain *iommu_get_domain_for_dev(struct device *dev);
extern int iommu_map(struct iommu_domain *domain, unsigned long iova,
phys_addr_t paddr, size_t size, int prot);
extern size_t iommu_unmap(struct iommu_domain *domain, unsigned long iova,
@@ -177,6 +223,10 @@ extern phys_addr_t iommu_iova_to_phys(struct iommu_domain *domain, dma_addr_t io
extern void iommu_set_fault_handler(struct iommu_domain *domain,
iommu_fault_handler_t handler, void *token);
+extern void iommu_get_dm_regions(struct device *dev, struct list_head *list);
+extern void iommu_put_dm_regions(struct device *dev, struct list_head *list);
+extern int iommu_request_dm_for_dev(struct device *dev);
+
extern int iommu_attach_group(struct iommu_domain *domain,
struct iommu_group *group);
extern void iommu_detach_group(struct iommu_domain *domain,
@@ -200,6 +250,7 @@ extern int iommu_group_unregister_notifier(struct iommu_group *group,
struct notifier_block *nb);
extern int iommu_group_id(struct iommu_group *group);
extern struct iommu_group *iommu_group_get_for_dev(struct device *dev);
+extern struct iommu_domain *iommu_group_default_domain(struct iommu_group *);
extern int iommu_domain_get_attr(struct iommu_domain *domain, enum iommu_attr,
void *data);
@@ -305,6 +356,11 @@ static inline void iommu_detach_device(struct iommu_domain *domain,
{
}
+static inline struct iommu_domain *iommu_get_domain_for_dev(struct device *dev)
+{
+ return NULL;
+}
+
static inline int iommu_map(struct iommu_domain *domain, unsigned long iova,
phys_addr_t paddr, int gfp_order, int prot)
{
@@ -346,6 +402,21 @@ static inline void iommu_set_fault_handler(struct iommu_domain *domain,
{
}
+static inline void iommu_get_dm_regions(struct device *dev,
+ struct list_head *list)
+{
+}
+
+static inline void iommu_put_dm_regions(struct device *dev,
+ struct list_head *list)
+{
+}
+
+static inline int iommu_request_dm_for_dev(struct device *dev)
+{
+ return -ENODEV;
+}
+
static inline int iommu_attach_group(struct iommu_domain *domain,
struct iommu_group *group)
{
diff --git a/include/linux/ioport.h b/include/linux/ioport.h
index 2c5250222278..388e3ae94f7a 100644
--- a/include/linux/ioport.h
+++ b/include/linux/ioport.h
@@ -196,10 +196,8 @@ extern struct resource * __request_region(struct resource *,
/* Compatibility cruft */
#define release_region(start,n) __release_region(&ioport_resource, (start), (n))
-#define check_mem_region(start,n) __check_region(&iomem_resource, (start), (n))
#define release_mem_region(start,n) __release_region(&iomem_resource, (start), (n))
-extern int __check_region(struct resource *, resource_size_t, resource_size_t);
extern void __release_region(struct resource *, resource_size_t,
resource_size_t);
#ifdef CONFIG_MEMORY_HOTREMOVE
@@ -207,12 +205,6 @@ extern int release_mem_region_adjustable(struct resource *, resource_size_t,
resource_size_t);
#endif
-static inline int __deprecated check_region(resource_size_t s,
- resource_size_t n)
-{
- return __check_region(&ioport_resource, s, n);
-}
-
/* Wrappers for managed devices */
struct device;
diff --git a/include/linux/ipv6.h b/include/linux/ipv6.h
index 4d5169f5d7d1..82806c60aa42 100644
--- a/include/linux/ipv6.h
+++ b/include/linux/ipv6.h
@@ -53,6 +53,10 @@ struct ipv6_devconf {
__s32 ndisc_notify;
__s32 suppress_frag_ndisc;
__s32 accept_ra_mtu;
+ struct ipv6_stable_secret {
+ bool initialized;
+ struct in6_addr secret;
+ } stable_secret;
void *sysctl;
};
diff --git a/include/linux/irq.h b/include/linux/irq.h
index 62c6901cab55..812149160d3b 100644
--- a/include/linux/irq.h
+++ b/include/linux/irq.h
@@ -126,13 +126,21 @@ struct msi_desc;
struct irq_domain;
/**
- * struct irq_data - per irq and irq chip data passed down to chip functions
+ * struct irq_common_data - per irq data shared by all irqchips
+ * @state_use_accessors: status information for irq chip functions.
+ * Use accessor functions to deal with it
+ */
+struct irq_common_data {
+ unsigned int state_use_accessors;
+};
+
+/**
+ * struct irq_data - per irq chip data passed down to chip functions
* @mask: precomputed bitmask for accessing the chip registers
* @irq: interrupt number
* @hwirq: hardware interrupt number, local to the interrupt domain
* @node: node index useful for balancing
- * @state_use_accessors: status information for irq chip functions.
- * Use accessor functions to deal with it
+ * @common: point to data shared by all irqchips
* @chip: low level interrupt hardware access
* @domain: Interrupt translation domain; responsible for mapping
* between hwirq number and linux irq number.
@@ -153,7 +161,7 @@ struct irq_data {
unsigned int irq;
unsigned long hwirq;
unsigned int node;
- unsigned int state_use_accessors;
+ struct irq_common_data *common;
struct irq_chip *chip;
struct irq_domain *domain;
#ifdef CONFIG_IRQ_DOMAIN_HIERARCHY
@@ -166,7 +174,7 @@ struct irq_data {
};
/*
- * Bit masks for irq_data.state
+ * Bit masks for irq_common_data.state_use_accessors
*
* IRQD_TRIGGER_MASK - Mask for the trigger type bits
* IRQD_SETAFFINITY_PENDING - Affinity setting is pending
@@ -198,34 +206,36 @@ enum {
IRQD_WAKEUP_ARMED = (1 << 19),
};
+#define __irqd_to_state(d) ((d)->common->state_use_accessors)
+
static inline bool irqd_is_setaffinity_pending(struct irq_data *d)
{
- return d->state_use_accessors & IRQD_SETAFFINITY_PENDING;
+ return __irqd_to_state(d) & IRQD_SETAFFINITY_PENDING;
}
static inline bool irqd_is_per_cpu(struct irq_data *d)
{
- return d->state_use_accessors & IRQD_PER_CPU;
+ return __irqd_to_state(d) & IRQD_PER_CPU;
}
static inline bool irqd_can_balance(struct irq_data *d)
{
- return !(d->state_use_accessors & (IRQD_PER_CPU | IRQD_NO_BALANCING));
+ return !(__irqd_to_state(d) & (IRQD_PER_CPU | IRQD_NO_BALANCING));
}
static inline bool irqd_affinity_was_set(struct irq_data *d)
{
- return d->state_use_accessors & IRQD_AFFINITY_SET;
+ return __irqd_to_state(d) & IRQD_AFFINITY_SET;
}
static inline void irqd_mark_affinity_was_set(struct irq_data *d)
{
- d->state_use_accessors |= IRQD_AFFINITY_SET;
+ __irqd_to_state(d) |= IRQD_AFFINITY_SET;
}
static inline u32 irqd_get_trigger_type(struct irq_data *d)
{
- return d->state_use_accessors & IRQD_TRIGGER_MASK;
+ return __irqd_to_state(d) & IRQD_TRIGGER_MASK;
}
/*
@@ -233,43 +243,43 @@ static inline u32 irqd_get_trigger_type(struct irq_data *d)
*/
static inline void irqd_set_trigger_type(struct irq_data *d, u32 type)
{
- d->state_use_accessors &= ~IRQD_TRIGGER_MASK;
- d->state_use_accessors |= type & IRQD_TRIGGER_MASK;
+ __irqd_to_state(d) &= ~IRQD_TRIGGER_MASK;
+ __irqd_to_state(d) |= type & IRQD_TRIGGER_MASK;
}
static inline bool irqd_is_level_type(struct irq_data *d)
{
- return d->state_use_accessors & IRQD_LEVEL;
+ return __irqd_to_state(d) & IRQD_LEVEL;
}
static inline bool irqd_is_wakeup_set(struct irq_data *d)
{
- return d->state_use_accessors & IRQD_WAKEUP_STATE;
+ return __irqd_to_state(d) & IRQD_WAKEUP_STATE;
}
static inline bool irqd_can_move_in_process_context(struct irq_data *d)
{
- return d->state_use_accessors & IRQD_MOVE_PCNTXT;
+ return __irqd_to_state(d) & IRQD_MOVE_PCNTXT;
}
static inline bool irqd_irq_disabled(struct irq_data *d)
{
- return d->state_use_accessors & IRQD_IRQ_DISABLED;
+ return __irqd_to_state(d) & IRQD_IRQ_DISABLED;
}
static inline bool irqd_irq_masked(struct irq_data *d)
{
- return d->state_use_accessors & IRQD_IRQ_MASKED;
+ return __irqd_to_state(d) & IRQD_IRQ_MASKED;
}
static inline bool irqd_irq_inprogress(struct irq_data *d)
{
- return d->state_use_accessors & IRQD_IRQ_INPROGRESS;
+ return __irqd_to_state(d) & IRQD_IRQ_INPROGRESS;
}
static inline bool irqd_is_wakeup_armed(struct irq_data *d)
{
- return d->state_use_accessors & IRQD_WAKEUP_ARMED;
+ return __irqd_to_state(d) & IRQD_WAKEUP_ARMED;
}
@@ -280,12 +290,12 @@ static inline bool irqd_is_wakeup_armed(struct irq_data *d)
*/
static inline void irqd_set_chained_irq_inprogress(struct irq_data *d)
{
- d->state_use_accessors |= IRQD_IRQ_INPROGRESS;
+ __irqd_to_state(d) |= IRQD_IRQ_INPROGRESS;
}
static inline void irqd_clr_chained_irq_inprogress(struct irq_data *d)
{
- d->state_use_accessors &= ~IRQD_IRQ_INPROGRESS;
+ __irqd_to_state(d) &= ~IRQD_IRQ_INPROGRESS;
}
static inline irq_hw_number_t irqd_to_hwirq(struct irq_data *d)
@@ -327,6 +337,7 @@ static inline irq_hw_number_t irqd_to_hwirq(struct irq_data *d)
* @irq_write_msi_msg: optional to write message content for MSI
* @irq_get_irqchip_state: return the internal state of an interrupt
* @irq_set_irqchip_state: set the internal state of a interrupt
+ * @irq_set_vcpu_affinity: optional to target a vCPU in a virtual machine
* @flags: chip specific flags
*/
struct irq_chip {
@@ -369,6 +380,8 @@ struct irq_chip {
int (*irq_get_irqchip_state)(struct irq_data *data, enum irqchip_irq_state which, bool *state);
int (*irq_set_irqchip_state)(struct irq_data *data, enum irqchip_irq_state which, bool state);
+ int (*irq_set_vcpu_affinity)(struct irq_data *data, void *vcpu_info);
+
unsigned long flags;
};
@@ -422,6 +435,7 @@ extern void irq_cpu_online(void);
extern void irq_cpu_offline(void);
extern int irq_set_affinity_locked(struct irq_data *data,
const struct cpumask *cpumask, bool force);
+extern int irq_set_vcpu_affinity(unsigned int irq, void *vcpu_info);
#if defined(CONFIG_SMP) && defined(CONFIG_GENERIC_PENDING_IRQ)
void irq_move_irq(struct irq_data *data);
@@ -458,6 +472,8 @@ extern void handle_nested_irq(unsigned int irq);
extern int irq_chip_compose_msi_msg(struct irq_data *data, struct msi_msg *msg);
#ifdef CONFIG_IRQ_DOMAIN_HIERARCHY
+extern void irq_chip_enable_parent(struct irq_data *data);
+extern void irq_chip_disable_parent(struct irq_data *data);
extern void irq_chip_ack_parent(struct irq_data *data);
extern int irq_chip_retrigger_hierarchy(struct irq_data *data);
extern void irq_chip_mask_parent(struct irq_data *data);
@@ -467,6 +483,8 @@ extern int irq_chip_set_affinity_parent(struct irq_data *data,
const struct cpumask *dest,
bool force);
extern int irq_chip_set_wake_parent(struct irq_data *data, unsigned int on);
+extern int irq_chip_set_vcpu_affinity_parent(struct irq_data *data,
+ void *vcpu_info);
#endif
/* Handling of unhandled and spurious interrupts: */
@@ -517,6 +535,15 @@ irq_set_chained_handler(unsigned int irq, irq_flow_handler_t handle)
__irq_set_handler(irq, handle, 1, NULL);
}
+/*
+ * Set a highlevel chained flow handler and its data for a given IRQ.
+ * (a chained handler is automatically enabled and set to
+ * IRQ_NOREQUEST, IRQ_NOPROBE, and IRQ_NOTHREAD)
+ */
+void
+irq_set_chained_handler_and_data(unsigned int irq, irq_flow_handler_t handle,
+ void *data);
+
void irq_modify_status(unsigned int irq, unsigned long clr, unsigned long set);
static inline void irq_set_status_flags(unsigned int irq, unsigned long set)
@@ -624,6 +651,23 @@ static inline u32 irq_get_trigger_type(unsigned int irq)
return d ? irqd_get_trigger_type(d) : 0;
}
+static inline int irq_data_get_node(struct irq_data *d)
+{
+ return d->node;
+}
+
+static inline struct cpumask *irq_get_affinity_mask(int irq)
+{
+ struct irq_data *d = irq_get_irq_data(irq);
+
+ return d ? d->affinity : NULL;
+}
+
+static inline struct cpumask *irq_data_get_affinity_mask(struct irq_data *d)
+{
+ return d->affinity;
+}
+
unsigned int arch_dynirq_lower_bound(unsigned int from);
int __irq_alloc_descs(int irq, unsigned int from, unsigned int cnt, int node,
diff --git a/include/linux/irqchip/arm-gic-acpi.h b/include/linux/irqchip/arm-gic-acpi.h
new file mode 100644
index 000000000000..de3419ed3937
--- /dev/null
+++ b/include/linux/irqchip/arm-gic-acpi.h
@@ -0,0 +1,31 @@
+/*
+ * Copyright (C) 2014, Linaro Ltd.
+ * Author: Tomasz Nowicki <tomasz.nowicki@linaro.org>
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License version 2 as
+ * published by the Free Software Foundation.
+ */
+
+#ifndef ARM_GIC_ACPI_H_
+#define ARM_GIC_ACPI_H_
+
+#ifdef CONFIG_ACPI
+
+/*
+ * Hard code here, we can not get memory size from MADT (but FDT does),
+ * Actually no need to do that, because this size can be inferred
+ * from GIC spec.
+ */
+#define ACPI_GICV2_DIST_MEM_SIZE (SZ_4K)
+#define ACPI_GIC_CPU_IF_MEM_SIZE (SZ_8K)
+
+struct acpi_table_header;
+
+int gic_v2_acpi_init(struct acpi_table_header *table);
+void acpi_gic_init(void);
+#else
+static inline void acpi_gic_init(void) { }
+#endif
+
+#endif /* ARM_GIC_ACPI_H_ */
diff --git a/include/linux/irqchip/arm-gic.h b/include/linux/irqchip/arm-gic.h
index 36ec4ae74634..9de976b4f9a7 100644
--- a/include/linux/irqchip/arm-gic.h
+++ b/include/linux/irqchip/arm-gic.h
@@ -95,8 +95,6 @@
struct device_node;
-extern struct irq_chip gic_arch_extn;
-
void gic_set_irqchip_flags(unsigned long flags);
void gic_init_bases(unsigned int, int, void __iomem *, void __iomem *,
u32 offset, struct device_node *);
diff --git a/include/linux/irqchip/mips-gic.h b/include/linux/irqchip/mips-gic.h
index 3ea2e4754c40..9b1ad3734911 100644
--- a/include/linux/irqchip/mips-gic.h
+++ b/include/linux/irqchip/mips-gic.h
@@ -165,6 +165,8 @@
#define GIC_VPE_PEND_SWINT0_MSK (MSK(1) << GIC_VPE_PEND_SWINT0_SHF)
#define GIC_VPE_PEND_SWINT1_SHF 5
#define GIC_VPE_PEND_SWINT1_MSK (MSK(1) << GIC_VPE_PEND_SWINT1_SHF)
+#define GIC_VPE_PEND_FDC_SHF 6
+#define GIC_VPE_PEND_FDC_MSK (MSK(1) << GIC_VPE_PEND_FDC_SHF)
/* GIC_VPE_RMASK Masks */
#define GIC_VPE_RMASK_WD_SHF 0
@@ -179,6 +181,8 @@
#define GIC_VPE_RMASK_SWINT0_MSK (MSK(1) << GIC_VPE_RMASK_SWINT0_SHF)
#define GIC_VPE_RMASK_SWINT1_SHF 5
#define GIC_VPE_RMASK_SWINT1_MSK (MSK(1) << GIC_VPE_RMASK_SWINT1_SHF)
+#define GIC_VPE_RMASK_FDC_SHF 6
+#define GIC_VPE_RMASK_FDC_MSK (MSK(1) << GIC_VPE_RMASK_FDC_SHF)
/* GIC_VPE_SMASK Masks */
#define GIC_VPE_SMASK_WD_SHF 0
@@ -193,6 +197,8 @@
#define GIC_VPE_SMASK_SWINT0_MSK (MSK(1) << GIC_VPE_SMASK_SWINT0_SHF)
#define GIC_VPE_SMASK_SWINT1_SHF 5
#define GIC_VPE_SMASK_SWINT1_MSK (MSK(1) << GIC_VPE_SMASK_SWINT1_SHF)
+#define GIC_VPE_SMASK_FDC_SHF 6
+#define GIC_VPE_SMASK_FDC_MSK (MSK(1) << GIC_VPE_SMASK_FDC_SHF)
/* GIC nomenclature for Core Interrupt Pins. */
#define GIC_CPU_INT0 0 /* Core Interrupt 2 */
@@ -247,4 +253,5 @@ extern unsigned int plat_ipi_call_int_xlate(unsigned int);
extern unsigned int plat_ipi_resched_int_xlate(unsigned int);
extern int gic_get_c0_compare_int(void);
extern int gic_get_c0_perfcount_int(void);
+extern int gic_get_c0_fdc_int(void);
#endif /* __LINUX_IRQCHIP_MIPS_GIC_H */
diff --git a/include/linux/irqdesc.h b/include/linux/irqdesc.h
index dd1109fb241e..c52d1480f272 100644
--- a/include/linux/irqdesc.h
+++ b/include/linux/irqdesc.h
@@ -17,7 +17,7 @@ struct pt_regs;
/**
* struct irq_desc - interrupt descriptor
- * @irq_data: per irq and chip data passed down to chip functions
+ * @irq_common_data: per irq and chip data passed down to chip functions
* @kstat_irqs: irq stats per cpu
* @handle_irq: highlevel irq-events handler
* @preflow_handler: handler called before the flow handler (currently used by sparc)
@@ -47,6 +47,7 @@ struct pt_regs;
* @name: flow handler name for /proc/interrupts output
*/
struct irq_desc {
+ struct irq_common_data irq_common_data;
struct irq_data irq_data;
unsigned int __percpu *kstat_irqs;
irq_flow_handler_t handle_irq;
@@ -93,6 +94,15 @@ struct irq_desc {
extern struct irq_desc irq_desc[NR_IRQS];
#endif
+static inline struct irq_desc *irq_data_to_desc(struct irq_data *data)
+{
+#ifdef CONFIG_IRQ_DOMAIN_HIERARCHY
+ return irq_to_desc(data->irq);
+#else
+ return container_of(data, struct irq_desc, irq_data);
+#endif
+}
+
static inline struct irq_data *irq_desc_get_irq_data(struct irq_desc *desc)
{
return &desc->irq_data;
diff --git a/include/linux/irqdomain.h b/include/linux/irqdomain.h
index 676d7306a360..744ac0ec98eb 100644
--- a/include/linux/irqdomain.h
+++ b/include/linux/irqdomain.h
@@ -258,6 +258,10 @@ int irq_domain_xlate_onetwocell(struct irq_domain *d, struct device_node *ctrlr,
/* V2 interfaces to support hierarchy IRQ domains. */
extern struct irq_data *irq_domain_get_irq_data(struct irq_domain *domain,
unsigned int virq);
+extern void irq_domain_set_info(struct irq_domain *domain, unsigned int virq,
+ irq_hw_number_t hwirq, struct irq_chip *chip,
+ void *chip_data, irq_flow_handler_t handler,
+ void *handler_data, const char *handler_name);
#ifdef CONFIG_IRQ_DOMAIN_HIERARCHY
extern struct irq_domain *irq_domain_add_hierarchy(struct irq_domain *parent,
unsigned int flags, unsigned int size,
@@ -281,10 +285,6 @@ extern int irq_domain_set_hwirq_and_chip(struct irq_domain *domain,
irq_hw_number_t hwirq,
struct irq_chip *chip,
void *chip_data);
-extern void irq_domain_set_info(struct irq_domain *domain, unsigned int virq,
- irq_hw_number_t hwirq, struct irq_chip *chip,
- void *chip_data, irq_flow_handler_t handler,
- void *handler_data, const char *handler_name);
extern void irq_domain_reset_irq_data(struct irq_data *irq_data);
extern void irq_domain_free_irqs_common(struct irq_domain *domain,
unsigned int virq,
diff --git a/include/linux/jbd2.h b/include/linux/jbd2.h
index 20e7f78041c8..edb640ae9a94 100644
--- a/include/linux/jbd2.h
+++ b/include/linux/jbd2.h
@@ -1035,7 +1035,7 @@ struct buffer_head *jbd2_journal_get_descriptor_buffer(journal_t *journal);
int jbd2_journal_next_log_block(journal_t *, unsigned long long *);
int jbd2_journal_get_log_tail(journal_t *journal, tid_t *tid,
unsigned long *block);
-void __jbd2_update_log_tail(journal_t *journal, tid_t tid, unsigned long block);
+int __jbd2_update_log_tail(journal_t *journal, tid_t tid, unsigned long block);
void jbd2_update_log_tail(journal_t *journal, tid_t tid, unsigned long block);
/* Commit management */
@@ -1157,7 +1157,7 @@ extern int jbd2_journal_recover (journal_t *journal);
extern int jbd2_journal_wipe (journal_t *, int);
extern int jbd2_journal_skip_recovery (journal_t *);
extern void jbd2_journal_update_sb_errno(journal_t *);
-extern void jbd2_journal_update_sb_log_tail (journal_t *, tid_t,
+extern int jbd2_journal_update_sb_log_tail (journal_t *, tid_t,
unsigned long, int);
extern void __jbd2_journal_abort_hard (journal_t *);
extern void jbd2_journal_abort (journal_t *, int);
diff --git a/include/linux/jhash.h b/include/linux/jhash.h
index 47cb09edec1a..348c6f47e4cc 100644
--- a/include/linux/jhash.h
+++ b/include/linux/jhash.h
@@ -145,11 +145,11 @@ static inline u32 jhash2(const u32 *k, u32 length, u32 initval)
}
-/* jhash_3words - hash exactly 3, 2 or 1 word(s) */
-static inline u32 jhash_3words(u32 a, u32 b, u32 c, u32 initval)
+/* __jhash_nwords - hash exactly 3, 2 or 1 word(s) */
+static inline u32 __jhash_nwords(u32 a, u32 b, u32 c, u32 initval)
{
- a += JHASH_INITVAL;
- b += JHASH_INITVAL;
+ a += initval;
+ b += initval;
c += initval;
__jhash_final(a, b, c);
@@ -157,14 +157,19 @@ static inline u32 jhash_3words(u32 a, u32 b, u32 c, u32 initval)
return c;
}
+static inline u32 jhash_3words(u32 a, u32 b, u32 c, u32 initval)
+{
+ return __jhash_nwords(a, b, c, initval + JHASH_INITVAL + (3 << 2));
+}
+
static inline u32 jhash_2words(u32 a, u32 b, u32 initval)
{
- return jhash_3words(a, b, 0, initval);
+ return __jhash_nwords(a, b, 0, initval + JHASH_INITVAL + (2 << 2));
}
static inline u32 jhash_1word(u32 a, u32 initval)
{
- return jhash_3words(a, 0, 0, initval);
+ return __jhash_nwords(a, 0, 0, initval + JHASH_INITVAL + (1 << 2));
}
#endif /* _LINUX_JHASH_H */
diff --git a/include/linux/jiffies.h b/include/linux/jiffies.h
index c367cbdf73ab..535fd3bb1ba8 100644
--- a/include/linux/jiffies.h
+++ b/include/linux/jiffies.h
@@ -7,6 +7,7 @@
#include <linux/time.h>
#include <linux/timex.h>
#include <asm/param.h> /* for HZ */
+#include <generated/timeconst.h>
/*
* The following defines establish the engineering parameters of the PLL
@@ -288,8 +289,133 @@ static inline u64 jiffies_to_nsecs(const unsigned long j)
return (u64)jiffies_to_usecs(j) * NSEC_PER_USEC;
}
-extern unsigned long msecs_to_jiffies(const unsigned int m);
-extern unsigned long usecs_to_jiffies(const unsigned int u);
+extern unsigned long __msecs_to_jiffies(const unsigned int m);
+#if HZ <= MSEC_PER_SEC && !(MSEC_PER_SEC % HZ)
+/*
+ * HZ is equal to or smaller than 1000, and 1000 is a nice round
+ * multiple of HZ, divide with the factor between them, but round
+ * upwards:
+ */
+static inline unsigned long _msecs_to_jiffies(const unsigned int m)
+{
+ return (m + (MSEC_PER_SEC / HZ) - 1) / (MSEC_PER_SEC / HZ);
+}
+#elif HZ > MSEC_PER_SEC && !(HZ % MSEC_PER_SEC)
+/*
+ * HZ is larger than 1000, and HZ is a nice round multiple of 1000 -
+ * simply multiply with the factor between them.
+ *
+ * But first make sure the multiplication result cannot overflow:
+ */
+static inline unsigned long _msecs_to_jiffies(const unsigned int m)
+{
+ if (m > jiffies_to_msecs(MAX_JIFFY_OFFSET))
+ return MAX_JIFFY_OFFSET;
+ return m * (HZ / MSEC_PER_SEC);
+}
+#else
+/*
+ * Generic case - multiply, round and divide. But first check that if
+ * we are doing a net multiplication, that we wouldn't overflow:
+ */
+static inline unsigned long _msecs_to_jiffies(const unsigned int m)
+{
+ if (HZ > MSEC_PER_SEC && m > jiffies_to_msecs(MAX_JIFFY_OFFSET))
+ return MAX_JIFFY_OFFSET;
+
+ return (MSEC_TO_HZ_MUL32 * m + MSEC_TO_HZ_ADJ32) >> MSEC_TO_HZ_SHR32;
+}
+#endif
+/**
+ * msecs_to_jiffies: - convert milliseconds to jiffies
+ * @m: time in milliseconds
+ *
+ * conversion is done as follows:
+ *
+ * - negative values mean 'infinite timeout' (MAX_JIFFY_OFFSET)
+ *
+ * - 'too large' values [that would result in larger than
+ * MAX_JIFFY_OFFSET values] mean 'infinite timeout' too.
+ *
+ * - all other values are converted to jiffies by either multiplying
+ * the input value by a factor or dividing it with a factor and
+ * handling any 32-bit overflows.
+ * for the details see __msecs_to_jiffies()
+ *
+ * msecs_to_jiffies() checks for the passed in value being a constant
+ * via __builtin_constant_p() allowing gcc to eliminate most of the
+ * code, __msecs_to_jiffies() is called if the value passed does not
+ * allow constant folding and the actual conversion must be done at
+ * runtime.
+ * the HZ range specific helpers _msecs_to_jiffies() are called both
+ * directly here and from __msecs_to_jiffies() in the case where
+ * constant folding is not possible.
+ */
+static inline unsigned long msecs_to_jiffies(const unsigned int m)
+{
+ if (__builtin_constant_p(m)) {
+ if ((int)m < 0)
+ return MAX_JIFFY_OFFSET;
+ return _msecs_to_jiffies(m);
+ } else {
+ return __msecs_to_jiffies(m);
+ }
+}
+
+extern unsigned long __usecs_to_jiffies(const unsigned int u);
+#if HZ <= USEC_PER_SEC && !(USEC_PER_SEC % HZ)
+static inline unsigned long _usecs_to_jiffies(const unsigned int u)
+{
+ return (u + (USEC_PER_SEC / HZ) - 1) / (USEC_PER_SEC / HZ);
+}
+#elif HZ > USEC_PER_SEC && !(HZ % USEC_PER_SEC)
+static inline unsigned long _usecs_to_jiffies(const unsigned int u)
+{
+ return u * (HZ / USEC_PER_SEC);
+}
+static inline unsigned long _usecs_to_jiffies(const unsigned int u)
+{
+#else
+static inline unsigned long _usecs_to_jiffies(const unsigned int u)
+{
+ return (USEC_TO_HZ_MUL32 * u + USEC_TO_HZ_ADJ32)
+ >> USEC_TO_HZ_SHR32;
+}
+#endif
+
+/**
+ * usecs_to_jiffies: - convert microseconds to jiffies
+ * @u: time in microseconds
+ *
+ * conversion is done as follows:
+ *
+ * - 'too large' values [that would result in larger than
+ * MAX_JIFFY_OFFSET values] mean 'infinite timeout' too.
+ *
+ * - all other values are converted to jiffies by either multiplying
+ * the input value by a factor or dividing it with a factor and
+ * handling any 32-bit overflows as for msecs_to_jiffies.
+ *
+ * usecs_to_jiffies() checks for the passed in value being a constant
+ * via __builtin_constant_p() allowing gcc to eliminate most of the
+ * code, __usecs_to_jiffies() is called if the value passed does not
+ * allow constant folding and the actual conversion must be done at
+ * runtime.
+ * the HZ range specific helpers _usecs_to_jiffies() are called both
+ * directly here and from __msecs_to_jiffies() in the case where
+ * constant folding is not possible.
+ */
+static inline unsigned long usecs_to_jiffies(const unsigned int u)
+{
+ if (__builtin_constant_p(u)) {
+ if (u > jiffies_to_usecs(MAX_JIFFY_OFFSET))
+ return MAX_JIFFY_OFFSET;
+ return _usecs_to_jiffies(u);
+ } else {
+ return __usecs_to_jiffies(u);
+ }
+}
+
extern unsigned long timespec_to_jiffies(const struct timespec *value);
extern void jiffies_to_timespec(const unsigned long jiffies,
struct timespec *value);
diff --git a/include/linux/jz4780-nemc.h b/include/linux/jz4780-nemc.h
new file mode 100644
index 000000000000..e7f1cc7a2284
--- /dev/null
+++ b/include/linux/jz4780-nemc.h
@@ -0,0 +1,43 @@
+/*
+ * JZ4780 NAND/external memory controller (NEMC)
+ *
+ * Copyright (c) 2015 Imagination Technologies
+ * Author: Alex Smith <alex@alex-smith.me.uk>
+ *
+ * This program is free software; you can redistribute it and/or modify it
+ * under the terms of the GNU General Public License as published by the
+ * Free Software Foundation; either version 2 of the License, or (at your
+ * option) any later version.
+ */
+
+#ifndef __LINUX_JZ4780_NEMC_H__
+#define __LINUX_JZ4780_NEMC_H__
+
+#include <linux/types.h>
+
+struct device;
+
+/*
+ * Number of NEMC banks. Note that there are actually 6, but they are numbered
+ * from 1.
+ */
+#define JZ4780_NEMC_NUM_BANKS 7
+
+/**
+ * enum jz4780_nemc_bank_type - device types which can be connected to a bank
+ * @JZ4780_NEMC_BANK_SRAM: SRAM
+ * @JZ4780_NEMC_BANK_NAND: NAND
+ */
+enum jz4780_nemc_bank_type {
+ JZ4780_NEMC_BANK_SRAM,
+ JZ4780_NEMC_BANK_NAND,
+};
+
+extern unsigned int jz4780_nemc_num_banks(struct device *dev);
+
+extern void jz4780_nemc_set_type(struct device *dev, unsigned int bank,
+ enum jz4780_nemc_bank_type type);
+extern void jz4780_nemc_assert(struct device *dev, unsigned int bank,
+ bool assert);
+
+#endif /* __LINUX_JZ4780_NEMC_H__ */
diff --git a/include/linux/kasan.h b/include/linux/kasan.h
index 5bb074431eb0..5486d777b706 100644
--- a/include/linux/kasan.h
+++ b/include/linux/kasan.h
@@ -44,6 +44,7 @@ void kasan_poison_object_data(struct kmem_cache *cache, void *object);
void kasan_kmalloc_large(const void *ptr, size_t size);
void kasan_kfree_large(const void *ptr);
+void kasan_kfree(void *ptr);
void kasan_kmalloc(struct kmem_cache *s, const void *object, size_t size);
void kasan_krealloc(const void *object, size_t new_size);
@@ -71,6 +72,7 @@ static inline void kasan_poison_object_data(struct kmem_cache *cache,
static inline void kasan_kmalloc_large(void *ptr, size_t size) {}
static inline void kasan_kfree_large(const void *ptr) {}
+static inline void kasan_kfree(void *ptr) {}
static inline void kasan_kmalloc(struct kmem_cache *s, const void *object,
size_t size) {}
static inline void kasan_krealloc(const void *object, size_t new_size) {}
diff --git a/include/linux/kconfig.h b/include/linux/kconfig.h
index be342b94c640..b33c7797eb57 100644
--- a/include/linux/kconfig.h
+++ b/include/linux/kconfig.h
@@ -23,14 +23,6 @@
#define ___config_enabled(__ignored, val, ...) val
/*
- * IS_ENABLED(CONFIG_FOO) evaluates to 1 if CONFIG_FOO is set to 'y' or 'm',
- * 0 otherwise.
- *
- */
-#define IS_ENABLED(option) \
- (config_enabled(option) || config_enabled(option##_MODULE))
-
-/*
* IS_BUILTIN(CONFIG_FOO) evaluates to 1 if CONFIG_FOO is set to 'y', 0
* otherwise. For boolean options, this is equivalent to
* IS_ENABLED(CONFIG_FOO).
@@ -43,4 +35,20 @@
*/
#define IS_MODULE(option) config_enabled(option##_MODULE)
+/*
+ * IS_REACHABLE(CONFIG_FOO) evaluates to 1 if the currently compiled
+ * code can call a function defined in code compiled based on CONFIG_FOO.
+ * This is similar to IS_ENABLED(), but returns false when invoked from
+ * built-in code when CONFIG_FOO is set to 'm'.
+ */
+#define IS_REACHABLE(option) (config_enabled(option) || \
+ (config_enabled(option##_MODULE) && config_enabled(MODULE)))
+
+/*
+ * IS_ENABLED(CONFIG_FOO) evaluates to 1 if CONFIG_FOO is set to 'y' or 'm',
+ * 0 otherwise.
+ */
+#define IS_ENABLED(option) \
+ (IS_BUILTIN(option) || IS_MODULE(option))
+
#endif /* __LINUX_KCONFIG_H */
diff --git a/include/linux/kernel.h b/include/linux/kernel.h
index d6d630d31ef3..060dd7b61c6d 100644
--- a/include/linux/kernel.h
+++ b/include/linux/kernel.h
@@ -103,6 +103,18 @@
(((__x) - ((__d) / 2)) / (__d)); \
} \
)
+/*
+ * Same as above but for u64 dividends. divisor must be a 32-bit
+ * number.
+ */
+#define DIV_ROUND_CLOSEST_ULL(x, divisor)( \
+{ \
+ typeof(divisor) __d = divisor; \
+ unsigned long long _tmp = (x) + (__d) / 2; \
+ do_div(_tmp, __d); \
+ _tmp; \
+} \
+)
/*
* Multiplies an integer by a fraction, while avoiding unnecessary
@@ -232,7 +244,8 @@ static inline u32 reciprocal_scale(u32 val, u32 ep_ro)
#if defined(CONFIG_MMU) && \
(defined(CONFIG_PROVE_LOCKING) || defined(CONFIG_DEBUG_ATOMIC_SLEEP))
-void might_fault(void);
+#define might_fault() __might_fault(__FILE__, __LINE__)
+void __might_fault(const char *file, int line);
#else
static inline void might_fault(void) { }
#endif
diff --git a/include/linux/kexec.h b/include/linux/kexec.h
index e60a745ac198..e804306ef5e8 100644
--- a/include/linux/kexec.h
+++ b/include/linux/kexec.h
@@ -40,6 +40,10 @@
#error KEXEC_CONTROL_MEMORY_LIMIT not defined
#endif
+#ifndef KEXEC_CONTROL_MEMORY_GFP
+#define KEXEC_CONTROL_MEMORY_GFP GFP_KERNEL
+#endif
+
#ifndef KEXEC_CONTROL_PAGE_SIZE
#error KEXEC_CONTROL_PAGE_SIZE not defined
#endif
diff --git a/include/linux/kmemleak.h b/include/linux/kmemleak.h
index e705467ddb47..d0a1f99e24e3 100644
--- a/include/linux/kmemleak.h
+++ b/include/linux/kmemleak.h
@@ -28,7 +28,8 @@
extern void kmemleak_init(void) __ref;
extern void kmemleak_alloc(const void *ptr, size_t size, int min_count,
gfp_t gfp) __ref;
-extern void kmemleak_alloc_percpu(const void __percpu *ptr, size_t size) __ref;
+extern void kmemleak_alloc_percpu(const void __percpu *ptr, size_t size,
+ gfp_t gfp) __ref;
extern void kmemleak_free(const void *ptr) __ref;
extern void kmemleak_free_part(const void *ptr, size_t size) __ref;
extern void kmemleak_free_percpu(const void __percpu *ptr) __ref;
@@ -71,7 +72,8 @@ static inline void kmemleak_alloc_recursive(const void *ptr, size_t size,
gfp_t gfp)
{
}
-static inline void kmemleak_alloc_percpu(const void __percpu *ptr, size_t size)
+static inline void kmemleak_alloc_percpu(const void __percpu *ptr, size_t size,
+ gfp_t gfp)
{
}
static inline void kmemleak_free(const void *ptr)
diff --git a/include/linux/ksm.h b/include/linux/ksm.h
index 3be6bb18562d..7ae216a39c9e 100644
--- a/include/linux/ksm.h
+++ b/include/linux/ksm.h
@@ -35,18 +35,6 @@ static inline void ksm_exit(struct mm_struct *mm)
__ksm_exit(mm);
}
-/*
- * A KSM page is one of those write-protected "shared pages" or "merged pages"
- * which KSM maps into multiple mms, wherever identical anonymous page content
- * is found in VM_MERGEABLE vmas. It's a PageAnon page, pointing not to any
- * anon_vma, but to that page's node of the stable tree.
- */
-static inline int PageKsm(struct page *page)
-{
- return ((unsigned long)page->mapping & PAGE_MAPPING_FLAGS) ==
- (PAGE_MAPPING_ANON | PAGE_MAPPING_KSM);
-}
-
static inline struct stable_node *page_stable_node(struct page *page)
{
return PageKsm(page) ? page_rmapping(page) : NULL;
@@ -87,11 +75,6 @@ static inline void ksm_exit(struct mm_struct *mm)
{
}
-static inline int PageKsm(struct page *page)
-{
- return 0;
-}
-
#ifdef CONFIG_MMU
static inline int ksm_madvise(struct vm_area_struct *vma, unsigned long start,
unsigned long end, int advice, unsigned long *vm_flags)
diff --git a/include/linux/ktime.h b/include/linux/ktime.h
index 5fc3d1083071..2b6a204bd8d4 100644
--- a/include/linux/ktime.h
+++ b/include/linux/ktime.h
@@ -166,19 +166,34 @@ static inline bool ktime_before(const ktime_t cmp1, const ktime_t cmp2)
}
#if BITS_PER_LONG < 64
-extern u64 __ktime_divns(const ktime_t kt, s64 div);
-static inline u64 ktime_divns(const ktime_t kt, s64 div)
+extern s64 __ktime_divns(const ktime_t kt, s64 div);
+static inline s64 ktime_divns(const ktime_t kt, s64 div)
{
+ /*
+ * Negative divisors could cause an inf loop,
+ * so bug out here.
+ */
+ BUG_ON(div < 0);
if (__builtin_constant_p(div) && !(div >> 32)) {
- u64 ns = kt.tv64;
- do_div(ns, div);
- return ns;
+ s64 ns = kt.tv64;
+ u64 tmp = ns < 0 ? -ns : ns;
+
+ do_div(tmp, div);
+ return ns < 0 ? -tmp : tmp;
} else {
return __ktime_divns(kt, div);
}
}
#else /* BITS_PER_LONG < 64 */
-# define ktime_divns(kt, div) (u64)((kt).tv64 / (div))
+static inline s64 ktime_divns(const ktime_t kt, s64 div)
+{
+ /*
+ * 32-bit implementation cannot handle negative divisors,
+ * so catch them on 64bit as well.
+ */
+ WARN_ON(div < 0);
+ return kt.tv64 / div;
+}
#endif
static inline s64 ktime_to_us(const ktime_t kt)
diff --git a/include/linux/kvm_host.h b/include/linux/kvm_host.h
index 82af5d0b996e..9564fd78c547 100644
--- a/include/linux/kvm_host.h
+++ b/include/linux/kvm_host.h
@@ -44,6 +44,10 @@
/* Two fragments for cross MMIO pages. */
#define KVM_MAX_MMIO_FRAGMENTS 2
+#ifndef KVM_ADDRESS_SPACE_NUM
+#define KVM_ADDRESS_SPACE_NUM 1
+#endif
+
/*
* For the normal pfn, the highest 12 bits should be zero,
* so we can mask bit 62 ~ bit 52 to indicate the error pfn,
@@ -134,6 +138,7 @@ static inline bool is_error_page(struct page *page)
#define KVM_REQ_ENABLE_IBS 23
#define KVM_REQ_DISABLE_IBS 24
#define KVM_REQ_APIC_PAGE_RELOAD 25
+#define KVM_REQ_SMI 26
#define KVM_USERSPACE_IRQ_SOURCE_ID 0
#define KVM_IRQFD_RESAMPLE_IRQ_SOURCE_ID 1
@@ -230,6 +235,7 @@ struct kvm_vcpu {
int fpu_active;
int guest_fpu_loaded, guest_xcr0_loaded;
+ unsigned char fpu_counter;
wait_queue_head_t wq;
struct pid *pid;
int sigset_active;
@@ -329,6 +335,13 @@ struct kvm_kernel_irq_routing_entry {
#define KVM_MEM_SLOTS_NUM (KVM_USER_MEM_SLOTS + KVM_PRIVATE_MEM_SLOTS)
#endif
+#ifndef __KVM_VCPU_MULTIPLE_ADDRESS_SPACE
+static inline int kvm_arch_vcpu_memslots_id(struct kvm_vcpu *vcpu)
+{
+ return 0;
+}
+#endif
+
/*
* Note:
* memslots are not sorted by id anymore, please use id_to_memslot()
@@ -347,7 +360,7 @@ struct kvm {
spinlock_t mmu_lock;
struct mutex slots_lock;
struct mm_struct *mm; /* userspace tied to this vm */
- struct kvm_memslots *memslots;
+ struct kvm_memslots *memslots[KVM_ADDRESS_SPACE_NUM];
struct srcu_struct srcu;
struct srcu_struct irq_srcu;
#ifdef CONFIG_KVM_APIC_ARCHITECTURE
@@ -462,13 +475,25 @@ void kvm_exit(void);
void kvm_get_kvm(struct kvm *kvm);
void kvm_put_kvm(struct kvm *kvm);
-static inline struct kvm_memslots *kvm_memslots(struct kvm *kvm)
+static inline struct kvm_memslots *__kvm_memslots(struct kvm *kvm, int as_id)
{
- return rcu_dereference_check(kvm->memslots,
+ return rcu_dereference_check(kvm->memslots[as_id],
srcu_read_lock_held(&kvm->srcu)
|| lockdep_is_held(&kvm->slots_lock));
}
+static inline struct kvm_memslots *kvm_memslots(struct kvm *kvm)
+{
+ return __kvm_memslots(kvm, 0);
+}
+
+static inline struct kvm_memslots *kvm_vcpu_memslots(struct kvm_vcpu *vcpu)
+{
+ int as_id = kvm_arch_vcpu_memslots_id(vcpu);
+
+ return __kvm_memslots(vcpu->kvm, as_id);
+}
+
static inline struct kvm_memory_slot *
id_to_memslot(struct kvm_memslots *slots, int id)
{
@@ -500,21 +525,22 @@ enum kvm_mr_change {
};
int kvm_set_memory_region(struct kvm *kvm,
- struct kvm_userspace_memory_region *mem);
+ const struct kvm_userspace_memory_region *mem);
int __kvm_set_memory_region(struct kvm *kvm,
- struct kvm_userspace_memory_region *mem);
+ const struct kvm_userspace_memory_region *mem);
void kvm_arch_free_memslot(struct kvm *kvm, struct kvm_memory_slot *free,
struct kvm_memory_slot *dont);
int kvm_arch_create_memslot(struct kvm *kvm, struct kvm_memory_slot *slot,
unsigned long npages);
-void kvm_arch_memslots_updated(struct kvm *kvm);
+void kvm_arch_memslots_updated(struct kvm *kvm, struct kvm_memslots *slots);
int kvm_arch_prepare_memory_region(struct kvm *kvm,
struct kvm_memory_slot *memslot,
- struct kvm_userspace_memory_region *mem,
+ const struct kvm_userspace_memory_region *mem,
enum kvm_mr_change change);
void kvm_arch_commit_memory_region(struct kvm *kvm,
- struct kvm_userspace_memory_region *mem,
+ const struct kvm_userspace_memory_region *mem,
const struct kvm_memory_slot *old,
+ const struct kvm_memory_slot *new,
enum kvm_mr_change change);
bool kvm_largepages_enabled(void);
void kvm_disable_largepages(void);
@@ -524,8 +550,8 @@ void kvm_arch_flush_shadow_all(struct kvm *kvm);
void kvm_arch_flush_shadow_memslot(struct kvm *kvm,
struct kvm_memory_slot *slot);
-int gfn_to_page_many_atomic(struct kvm *kvm, gfn_t gfn, struct page **pages,
- int nr_pages);
+int gfn_to_page_many_atomic(struct kvm_memory_slot *slot, gfn_t gfn,
+ struct page **pages, int nr_pages);
struct page *gfn_to_page(struct kvm *kvm, gfn_t gfn);
unsigned long gfn_to_hva(struct kvm *kvm, gfn_t gfn);
@@ -538,13 +564,13 @@ void kvm_release_page_dirty(struct page *page);
void kvm_set_page_accessed(struct page *page);
pfn_t gfn_to_pfn_atomic(struct kvm *kvm, gfn_t gfn);
-pfn_t gfn_to_pfn_async(struct kvm *kvm, gfn_t gfn, bool *async,
- bool write_fault, bool *writable);
pfn_t gfn_to_pfn(struct kvm *kvm, gfn_t gfn);
pfn_t gfn_to_pfn_prot(struct kvm *kvm, gfn_t gfn, bool write_fault,
bool *writable);
pfn_t gfn_to_pfn_memslot(struct kvm_memory_slot *slot, gfn_t gfn);
pfn_t gfn_to_pfn_memslot_atomic(struct kvm_memory_slot *slot, gfn_t gfn);
+pfn_t __gfn_to_pfn_memslot(struct kvm_memory_slot *slot, gfn_t gfn, bool atomic,
+ bool *async, bool write_fault, bool *writable);
void kvm_release_pfn_clean(pfn_t pfn);
void kvm_set_pfn_dirty(pfn_t pfn);
@@ -573,6 +599,25 @@ int kvm_is_visible_gfn(struct kvm *kvm, gfn_t gfn);
unsigned long kvm_host_page_size(struct kvm *kvm, gfn_t gfn);
void mark_page_dirty(struct kvm *kvm, gfn_t gfn);
+struct kvm_memslots *kvm_vcpu_memslots(struct kvm_vcpu *vcpu);
+struct kvm_memory_slot *kvm_vcpu_gfn_to_memslot(struct kvm_vcpu *vcpu, gfn_t gfn);
+pfn_t kvm_vcpu_gfn_to_pfn_atomic(struct kvm_vcpu *vcpu, gfn_t gfn);
+pfn_t kvm_vcpu_gfn_to_pfn(struct kvm_vcpu *vcpu, gfn_t gfn);
+struct page *kvm_vcpu_gfn_to_page(struct kvm_vcpu *vcpu, gfn_t gfn);
+unsigned long kvm_vcpu_gfn_to_hva(struct kvm_vcpu *vcpu, gfn_t gfn);
+unsigned long kvm_vcpu_gfn_to_hva_prot(struct kvm_vcpu *vcpu, gfn_t gfn, bool *writable);
+int kvm_vcpu_read_guest_page(struct kvm_vcpu *vcpu, gfn_t gfn, void *data, int offset,
+ int len);
+int kvm_vcpu_read_guest_atomic(struct kvm_vcpu *vcpu, gpa_t gpa, void *data,
+ unsigned long len);
+int kvm_vcpu_read_guest(struct kvm_vcpu *vcpu, gpa_t gpa, void *data,
+ unsigned long len);
+int kvm_vcpu_write_guest_page(struct kvm_vcpu *vcpu, gfn_t gfn, const void *data,
+ int offset, int len);
+int kvm_vcpu_write_guest(struct kvm_vcpu *vcpu, gpa_t gpa, const void *data,
+ unsigned long len);
+void kvm_vcpu_mark_page_dirty(struct kvm_vcpu *vcpu, gfn_t gfn);
+
void kvm_vcpu_block(struct kvm_vcpu *vcpu);
void kvm_vcpu_kick(struct kvm_vcpu *vcpu);
int kvm_vcpu_yield_to(struct kvm_vcpu *target);
@@ -762,16 +807,10 @@ static inline void kvm_iommu_unmap_pages(struct kvm *kvm,
}
#endif
-static inline void kvm_guest_enter(void)
+/* must be called with irqs disabled */
+static inline void __kvm_guest_enter(void)
{
- unsigned long flags;
-
- BUG_ON(preemptible());
-
- local_irq_save(flags);
guest_enter();
- local_irq_restore(flags);
-
/* KVM does not hold any references to rcu protected data when it
* switches CPU into a guest mode. In fact switching to a guest mode
* is very similar to exiting to userspace from rcu point of view. In
@@ -779,7 +818,23 @@ static inline void kvm_guest_enter(void)
* one time slice). Lets treat guest mode as quiescent state, just like
* we do with user-mode execution.
*/
- rcu_virt_note_context_switch(smp_processor_id());
+ if (!context_tracking_cpu_is_enabled())
+ rcu_virt_note_context_switch(smp_processor_id());
+}
+
+/* must be called with irqs disabled */
+static inline void __kvm_guest_exit(void)
+{
+ guest_exit();
+}
+
+static inline void kvm_guest_enter(void)
+{
+ unsigned long flags;
+
+ local_irq_save(flags);
+ __kvm_guest_enter();
+ local_irq_restore(flags);
}
static inline void kvm_guest_exit(void)
@@ -787,7 +842,7 @@ static inline void kvm_guest_exit(void)
unsigned long flags;
local_irq_save(flags);
- guest_exit();
+ __kvm_guest_exit();
local_irq_restore(flags);
}
diff --git a/include/linux/kvm_types.h b/include/linux/kvm_types.h
index 931da7e917cf..1b47a185c2f0 100644
--- a/include/linux/kvm_types.h
+++ b/include/linux/kvm_types.h
@@ -28,6 +28,7 @@ struct kvm_run;
struct kvm_userspace_memory_region;
struct kvm_vcpu;
struct kvm_vcpu_init;
+struct kvm_memslots;
enum kvm_mr_change;
diff --git a/include/linux/led-class-flash.h b/include/linux/led-class-flash.h
index 5ba2facd7a51..e97966d1fb8d 100644
--- a/include/linux/led-class-flash.h
+++ b/include/linux/led-class-flash.h
@@ -13,7 +13,6 @@
#define __LINUX_FLASH_LEDS_H_INCLUDED
#include <linux/leds.h>
-#include <uapi/linux/v4l2-controls.h>
struct device_node;
struct led_classdev_flash;
@@ -33,7 +32,7 @@ struct led_classdev_flash;
#define LED_FAULT_LED_OVER_TEMPERATURE (1 << 8)
#define LED_NUM_FLASH_FAULTS 9
-#define LED_FLASH_MAX_SYSFS_GROUPS 7
+#define LED_FLASH_SYSFS_GROUPS_SIZE 5
struct led_flash_ops {
/* set flash brightness */
@@ -81,21 +80,7 @@ struct led_classdev_flash {
struct led_flash_setting timeout;
/* LED Flash class sysfs groups */
- const struct attribute_group *sysfs_groups[LED_FLASH_MAX_SYSFS_GROUPS];
-
- /* LEDs available for flash strobe synchronization */
- struct led_classdev_flash **sync_leds;
-
- /* Number of LEDs available for flash strobe synchronization */
- int num_sync_leds;
-
- /*
- * The identifier of the sub-led to synchronize the flash strobe with.
- * Identifiers start from 1, which reflects the first element from the
- * sync_leds array. 0 means that the flash strobe should not be
- * synchronized.
- */
- u32 sync_led_id;
+ const struct attribute_group *sysfs_groups[LED_FLASH_SYSFS_GROUPS_SIZE];
};
static inline struct led_classdev_flash *lcdev_to_flcdev(
diff --git a/include/linux/leds.h b/include/linux/leds.h
index f70f84f35674..9a2b000094cf 100644
--- a/include/linux/leds.h
+++ b/include/linux/leds.h
@@ -47,7 +47,6 @@ struct led_classdev {
#define SET_BRIGHTNESS_ASYNC (1 << 21)
#define SET_BRIGHTNESS_SYNC (1 << 22)
#define LED_DEV_CAP_FLASH (1 << 23)
-#define LED_DEV_CAP_SYNC_STROBE (1 << 24)
/* Set LED brightness level */
/* Must not sleep, use a workqueue if needed */
@@ -105,7 +104,11 @@ struct led_classdev {
extern int led_classdev_register(struct device *parent,
struct led_classdev *led_cdev);
+extern int devm_led_classdev_register(struct device *parent,
+ struct led_classdev *led_cdev);
extern void led_classdev_unregister(struct led_classdev *led_cdev);
+extern void devm_led_classdev_unregister(struct device *parent,
+ struct led_classdev *led_cdev);
extern void led_classdev_suspend(struct led_classdev *led_cdev);
extern void led_classdev_resume(struct led_classdev *led_cdev);
diff --git a/include/linux/lglock.h b/include/linux/lglock.h
index 0081f000e34b..c92ebd100d9b 100644
--- a/include/linux/lglock.h
+++ b/include/linux/lglock.h
@@ -52,10 +52,15 @@ struct lglock {
static struct lglock name = { .lock = &name ## _lock }
void lg_lock_init(struct lglock *lg, char *name);
+
void lg_local_lock(struct lglock *lg);
void lg_local_unlock(struct lglock *lg);
void lg_local_lock_cpu(struct lglock *lg, int cpu);
void lg_local_unlock_cpu(struct lglock *lg, int cpu);
+
+void lg_double_lock(struct lglock *lg, int cpu1, int cpu2);
+void lg_double_unlock(struct lglock *lg, int cpu1, int cpu2);
+
void lg_global_lock(struct lglock *lg);
void lg_global_unlock(struct lglock *lg);
diff --git a/include/linux/lguest.h b/include/linux/lguest.h
index 9962c6bb1311..6db19f35f7c5 100644
--- a/include/linux/lguest.h
+++ b/include/linux/lguest.h
@@ -61,8 +61,8 @@ struct lguest_data {
u32 tsc_khz;
/* Fields initialized by the Guest at boot: */
- /* Instruction range to suppress interrupts even if enabled */
- unsigned long noirq_start, noirq_end;
+ /* Instruction to suppress interrupts even if enabled */
+ unsigned long noirq_iret;
/* Address above which page tables are all identical. */
unsigned long kernel_address;
/* The vector to try to use for system calls (0x40 or 0x80). */
diff --git a/include/linux/libata.h b/include/linux/libata.h
index c3ef58014b33..36ce37bcc963 100644
--- a/include/linux/libata.h
+++ b/include/linux/libata.h
@@ -134,7 +134,6 @@ enum {
ATA_ALL_DEVICES = (1 << ATA_MAX_DEVICES) - 1,
ATA_SHT_EMULATED = 1,
- ATA_SHT_CMD_PER_LUN = 1,
ATA_SHT_THIS_ID = -1,
ATA_SHT_USE_CLUSTERING = 1,
@@ -205,6 +204,7 @@ enum {
ATA_LFLAG_SW_ACTIVITY = (1 << 7), /* keep activity stats */
ATA_LFLAG_NO_LPM = (1 << 8), /* disable LPM on this link */
ATA_LFLAG_RST_ONCE = (1 << 9), /* limit recovery to one reset */
+ ATA_LFLAG_CHANGED = (1 << 10), /* LPM state changed on this link */
/* struct ata_port flags */
ATA_FLAG_SLAVE_POSS = (1 << 0), /* host supports slave dev */
@@ -309,6 +309,12 @@ enum {
*/
ATA_TMOUT_PMP_SRST_WAIT = 5000,
+ /* When the LPM policy is set to ATA_LPM_MAX_POWER, there might
+ * be a spurious PHY event, so ignore the first PHY event that
+ * occurs within 10s after the policy change.
+ */
+ ATA_TMOUT_SPURIOUS_PHY = 10000,
+
/* ATA bus states */
BUS_UNKNOWN = 0,
BUS_DMA = 1,
@@ -789,6 +795,8 @@ struct ata_link {
struct ata_eh_context eh_context;
struct ata_device device[ATA_MAX_DEVICES];
+
+ unsigned long last_lpm_change; /* when last LPM change happened */
};
#define ATA_LINK_CLEAR_BEGIN offsetof(struct ata_link, active_tag)
#define ATA_LINK_CLEAR_END offsetof(struct ata_link, device[0])
@@ -1202,6 +1210,7 @@ extern struct ata_device *ata_dev_pair(struct ata_device *adev);
extern int ata_do_set_mode(struct ata_link *link, struct ata_device **r_failed_dev);
extern void ata_scsi_port_error_handler(struct Scsi_Host *host, struct ata_port *ap);
extern void ata_scsi_cmd_error_handler(struct Scsi_Host *host, struct ata_port *ap, struct list_head *eh_q);
+extern bool sata_lpm_ignore_phy_events(struct ata_link *link);
extern int ata_cable_40wire(struct ata_port *ap);
extern int ata_cable_80wire(struct ata_port *ap);
@@ -1355,7 +1364,6 @@ extern struct device_attribute *ata_common_sdev_attrs[];
.can_queue = ATA_DEF_QUEUE, \
.tag_alloc_policy = BLK_TAG_ALLOC_RR, \
.this_id = ATA_SHT_THIS_ID, \
- .cmd_per_lun = ATA_SHT_CMD_PER_LUN, \
.emulated = ATA_SHT_EMULATED, \
.use_clustering = ATA_SHT_USE_CLUSTERING, \
.proc_name = drv_name, \
diff --git a/include/linux/livepatch.h b/include/linux/livepatch.h
index 95023fd8b00d..31db7a05dd36 100644
--- a/include/linux/livepatch.h
+++ b/include/linux/livepatch.h
@@ -99,7 +99,7 @@ struct klp_object {
struct klp_func *funcs;
/* internal */
- struct kobject *kobj;
+ struct kobject kobj;
struct module *mod;
enum klp_state state;
};
@@ -123,10 +123,16 @@ struct klp_patch {
enum klp_state state;
};
-extern int klp_register_patch(struct klp_patch *);
-extern int klp_unregister_patch(struct klp_patch *);
-extern int klp_enable_patch(struct klp_patch *);
-extern int klp_disable_patch(struct klp_patch *);
+#define klp_for_each_object(patch, obj) \
+ for (obj = patch->objs; obj->funcs; obj++)
+
+#define klp_for_each_func(obj, func) \
+ for (func = obj->funcs; func->old_name; func++)
+
+int klp_register_patch(struct klp_patch *);
+int klp_unregister_patch(struct klp_patch *);
+int klp_enable_patch(struct klp_patch *);
+int klp_disable_patch(struct klp_patch *);
#endif /* CONFIG_LIVEPATCH */
diff --git a/include/linux/lockdep.h b/include/linux/lockdep.h
index 74ab23176e9b..70400dc7660f 100644
--- a/include/linux/lockdep.h
+++ b/include/linux/lockdep.h
@@ -130,8 +130,8 @@ enum bounce_type {
};
struct lock_class_stats {
- unsigned long contention_point[4];
- unsigned long contending_point[4];
+ unsigned long contention_point[LOCKSTAT_POINTS];
+ unsigned long contending_point[LOCKSTAT_POINTS];
struct lock_time read_waittime;
struct lock_time write_waittime;
struct lock_time read_holdtime;
@@ -255,6 +255,7 @@ struct held_lock {
unsigned int check:1; /* see lock_acquire() comment */
unsigned int hardirqs_off:1;
unsigned int references:12; /* 32 bits */
+ unsigned int pin_count;
};
/*
@@ -354,6 +355,9 @@ extern void lockdep_set_current_reclaim_state(gfp_t gfp_mask);
extern void lockdep_clear_current_reclaim_state(void);
extern void lockdep_trace_alloc(gfp_t mask);
+extern void lock_pin_lock(struct lockdep_map *lock);
+extern void lock_unpin_lock(struct lockdep_map *lock);
+
# define INIT_LOCKDEP .lockdep_recursion = 0, .lockdep_reclaim_gfp = 0,
#define lockdep_depth(tsk) (debug_locks ? (tsk)->lockdep_depth : 0)
@@ -368,6 +372,9 @@ extern void lockdep_trace_alloc(gfp_t mask);
#define lockdep_recursing(tsk) ((tsk)->lockdep_recursion)
+#define lockdep_pin_lock(l) lock_pin_lock(&(l)->dep_map)
+#define lockdep_unpin_lock(l) lock_unpin_lock(&(l)->dep_map)
+
#else /* !CONFIG_LOCKDEP */
static inline void lockdep_off(void)
@@ -420,6 +427,9 @@ struct lock_class_key { };
#define lockdep_recursing(tsk) (0)
+#define lockdep_pin_lock(l) do { (void)(l); } while (0)
+#define lockdep_unpin_lock(l) do { (void)(l); } while (0)
+
#endif /* !LOCKDEP */
#ifdef CONFIG_LOCK_STAT
@@ -531,8 +541,13 @@ do { \
# define might_lock_read(lock) do { } while (0)
#endif
-#ifdef CONFIG_PROVE_RCU
+#ifdef CONFIG_LOCKDEP
void lockdep_rcu_suspicious(const char *file, const int line, const char *s);
+#else
+static inline void
+lockdep_rcu_suspicious(const char *file, const int line, const char *s)
+{
+}
#endif
#endif /* __LINUX_LOCKDEP_H */
diff --git a/include/linux/mbus.h b/include/linux/mbus.h
index 611b69fa8594..1f7bc630d225 100644
--- a/include/linux/mbus.h
+++ b/include/linux/mbus.h
@@ -54,11 +54,16 @@ struct mbus_dram_target_info
*/
#ifdef CONFIG_PLAT_ORION
extern const struct mbus_dram_target_info *mv_mbus_dram_info(void);
+extern const struct mbus_dram_target_info *mv_mbus_dram_info_nooverlap(void);
#else
static inline const struct mbus_dram_target_info *mv_mbus_dram_info(void)
{
return NULL;
}
+static inline const struct mbus_dram_target_info *mv_mbus_dram_info_nooverlap(void)
+{
+ return NULL;
+}
#endif
int mvebu_mbus_save_cpu_target(u32 *store_addr);
diff --git a/include/linux/mdio-gpio.h b/include/linux/mdio-gpio.h
index 66c30a763b10..11f00cdabe3d 100644
--- a/include/linux/mdio-gpio.h
+++ b/include/linux/mdio-gpio.h
@@ -23,7 +23,8 @@ struct mdio_gpio_platform_data {
bool mdio_active_low;
bool mdo_active_low;
- unsigned int phy_mask;
+ u32 phy_mask;
+ u32 phy_ignore_ta_mask;
int irqs[PHY_MAX_ADDR];
/* reset callback */
int (*reset)(struct mii_bus *bus);
diff --git a/include/linux/memblock.h b/include/linux/memblock.h
index e8cc45307f8f..0215ffd63069 100644
--- a/include/linux/memblock.h
+++ b/include/linux/memblock.h
@@ -21,7 +21,11 @@
#define INIT_PHYSMEM_REGIONS 4
/* Definition of memblock flags. */
-#define MEMBLOCK_HOTPLUG 0x1 /* hotpluggable region */
+enum {
+ MEMBLOCK_NONE = 0x0, /* No special request */
+ MEMBLOCK_HOTPLUG = 0x1, /* hotpluggable region */
+ MEMBLOCK_MIRROR = 0x2, /* mirrored region */
+};
struct memblock_region {
phys_addr_t base;
@@ -61,7 +65,7 @@ extern bool movable_node_enabled;
phys_addr_t memblock_find_in_range_node(phys_addr_t size, phys_addr_t align,
phys_addr_t start, phys_addr_t end,
- int nid);
+ int nid, ulong flags);
phys_addr_t memblock_find_in_range(phys_addr_t start, phys_addr_t end,
phys_addr_t size, phys_addr_t align);
phys_addr_t get_allocated_memblock_reserved_regions_info(phys_addr_t *addr);
@@ -75,6 +79,8 @@ int memblock_reserve(phys_addr_t base, phys_addr_t size);
void memblock_trim_memory(phys_addr_t align);
int memblock_mark_hotplug(phys_addr_t base, phys_addr_t size);
int memblock_clear_hotplug(phys_addr_t base, phys_addr_t size);
+int memblock_mark_mirror(phys_addr_t base, phys_addr_t size);
+ulong choose_memblock_flags(void);
/* Low level functions */
int memblock_add_range(struct memblock_type *type,
@@ -85,11 +91,13 @@ int memblock_remove_range(struct memblock_type *type,
phys_addr_t base,
phys_addr_t size);
-void __next_mem_range(u64 *idx, int nid, struct memblock_type *type_a,
+void __next_mem_range(u64 *idx, int nid, ulong flags,
+ struct memblock_type *type_a,
struct memblock_type *type_b, phys_addr_t *out_start,
phys_addr_t *out_end, int *out_nid);
-void __next_mem_range_rev(u64 *idx, int nid, struct memblock_type *type_a,
+void __next_mem_range_rev(u64 *idx, int nid, ulong flags,
+ struct memblock_type *type_a,
struct memblock_type *type_b, phys_addr_t *out_start,
phys_addr_t *out_end, int *out_nid);
@@ -100,16 +108,17 @@ void __next_mem_range_rev(u64 *idx, int nid, struct memblock_type *type_a,
* @type_a: ptr to memblock_type to iterate
* @type_b: ptr to memblock_type which excludes from the iteration
* @nid: node selector, %NUMA_NO_NODE for all nodes
+ * @flags: pick from blocks based on memory attributes
* @p_start: ptr to phys_addr_t for start address of the range, can be %NULL
* @p_end: ptr to phys_addr_t for end address of the range, can be %NULL
* @p_nid: ptr to int for nid of the range, can be %NULL
*/
-#define for_each_mem_range(i, type_a, type_b, nid, \
+#define for_each_mem_range(i, type_a, type_b, nid, flags, \
p_start, p_end, p_nid) \
- for (i = 0, __next_mem_range(&i, nid, type_a, type_b, \
+ for (i = 0, __next_mem_range(&i, nid, flags, type_a, type_b, \
p_start, p_end, p_nid); \
i != (u64)ULLONG_MAX; \
- __next_mem_range(&i, nid, type_a, type_b, \
+ __next_mem_range(&i, nid, flags, type_a, type_b, \
p_start, p_end, p_nid))
/**
@@ -119,17 +128,18 @@ void __next_mem_range_rev(u64 *idx, int nid, struct memblock_type *type_a,
* @type_a: ptr to memblock_type to iterate
* @type_b: ptr to memblock_type which excludes from the iteration
* @nid: node selector, %NUMA_NO_NODE for all nodes
+ * @flags: pick from blocks based on memory attributes
* @p_start: ptr to phys_addr_t for start address of the range, can be %NULL
* @p_end: ptr to phys_addr_t for end address of the range, can be %NULL
* @p_nid: ptr to int for nid of the range, can be %NULL
*/
-#define for_each_mem_range_rev(i, type_a, type_b, nid, \
+#define for_each_mem_range_rev(i, type_a, type_b, nid, flags, \
p_start, p_end, p_nid) \
for (i = (u64)ULLONG_MAX, \
- __next_mem_range_rev(&i, nid, type_a, type_b, \
+ __next_mem_range_rev(&i, nid, flags, type_a, type_b,\
p_start, p_end, p_nid); \
i != (u64)ULLONG_MAX; \
- __next_mem_range_rev(&i, nid, type_a, type_b, \
+ __next_mem_range_rev(&i, nid, flags, type_a, type_b, \
p_start, p_end, p_nid))
#ifdef CONFIG_MOVABLE_NODE
@@ -153,6 +163,11 @@ static inline bool movable_node_is_enabled(void)
}
#endif
+static inline bool memblock_is_mirror(struct memblock_region *m)
+{
+ return m->flags & MEMBLOCK_MIRROR;
+}
+
#ifdef CONFIG_HAVE_MEMBLOCK_NODE_MAP
int memblock_search_pfn_nid(unsigned long pfn, unsigned long *start_pfn,
unsigned long *end_pfn);
@@ -181,13 +196,14 @@ void __next_mem_pfn_range(int *idx, int nid, unsigned long *out_start_pfn,
* @p_start: ptr to phys_addr_t for start address of the range, can be %NULL
* @p_end: ptr to phys_addr_t for end address of the range, can be %NULL
* @p_nid: ptr to int for nid of the range, can be %NULL
+ * @flags: pick from blocks based on memory attributes
*
* Walks over free (memory && !reserved) areas of memblock. Available as
* soon as memblock is initialized.
*/
-#define for_each_free_mem_range(i, nid, p_start, p_end, p_nid) \
+#define for_each_free_mem_range(i, nid, flags, p_start, p_end, p_nid) \
for_each_mem_range(i, &memblock.memory, &memblock.reserved, \
- nid, p_start, p_end, p_nid)
+ nid, flags, p_start, p_end, p_nid)
/**
* for_each_free_mem_range_reverse - rev-iterate through free memblock areas
@@ -196,13 +212,15 @@ void __next_mem_pfn_range(int *idx, int nid, unsigned long *out_start_pfn,
* @p_start: ptr to phys_addr_t for start address of the range, can be %NULL
* @p_end: ptr to phys_addr_t for end address of the range, can be %NULL
* @p_nid: ptr to int for nid of the range, can be %NULL
+ * @flags: pick from blocks based on memory attributes
*
* Walks over free (memory && !reserved) areas of memblock in reverse
* order. Available as soon as memblock is initialized.
*/
-#define for_each_free_mem_range_reverse(i, nid, p_start, p_end, p_nid) \
+#define for_each_free_mem_range_reverse(i, nid, flags, p_start, p_end, \
+ p_nid) \
for_each_mem_range_rev(i, &memblock.memory, &memblock.reserved, \
- nid, p_start, p_end, p_nid)
+ nid, flags, p_start, p_end, p_nid)
static inline void memblock_set_region_flags(struct memblock_region *r,
unsigned long flags)
@@ -273,7 +291,8 @@ static inline bool memblock_bottom_up(void) { return false; }
#define MEMBLOCK_ALLOC_ACCESSIBLE 0
phys_addr_t __init memblock_alloc_range(phys_addr_t size, phys_addr_t align,
- phys_addr_t start, phys_addr_t end);
+ phys_addr_t start, phys_addr_t end,
+ ulong flags);
phys_addr_t memblock_alloc_base(phys_addr_t size, phys_addr_t align,
phys_addr_t max_addr);
phys_addr_t __memblock_alloc_base(phys_addr_t size, phys_addr_t align,
@@ -365,6 +384,14 @@ static inline unsigned long memblock_region_reserved_end_pfn(const struct memblo
#define __initdata_memblock
#endif
+#ifdef CONFIG_MEMTEST
+extern void early_memtest(phys_addr_t start, phys_addr_t end);
+#else
+static inline void early_memtest(phys_addr_t start, phys_addr_t end)
+{
+}
+#endif
+
#else
static inline phys_addr_t memblock_alloc(phys_addr_t size, phys_addr_t align)
{
diff --git a/include/linux/memcontrol.h b/include/linux/memcontrol.h
index 72dff5fb0d0c..73b02b0a8f60 100644
--- a/include/linux/memcontrol.h
+++ b/include/linux/memcontrol.h
@@ -41,6 +41,7 @@ enum mem_cgroup_stat_index {
MEM_CGROUP_STAT_RSS, /* # of pages charged as anon rss */
MEM_CGROUP_STAT_RSS_HUGE, /* # of pages charged as anon huge */
MEM_CGROUP_STAT_FILE_MAPPED, /* # of pages charged as file rss */
+ MEM_CGROUP_STAT_DIRTY, /* # of dirty pages in page cache */
MEM_CGROUP_STAT_WRITEBACK, /* # of pages under writeback */
MEM_CGROUP_STAT_SWAP, /* # of pages, swapped out */
MEM_CGROUP_STAT_NSTATS,
@@ -67,6 +68,8 @@ enum mem_cgroup_events_index {
};
#ifdef CONFIG_MEMCG
+extern struct cgroup_subsys_state *mem_cgroup_root_css;
+
void mem_cgroup_events(struct mem_cgroup *memcg,
enum mem_cgroup_events_index idx,
unsigned int nr);
@@ -112,6 +115,7 @@ static inline bool mm_match_cgroup(struct mm_struct *mm,
}
extern struct cgroup_subsys_state *mem_cgroup_css(struct mem_cgroup *memcg);
+extern struct cgroup_subsys_state *mem_cgroup_css_from_page(struct page *page);
struct mem_cgroup *mem_cgroup_iter(struct mem_cgroup *,
struct mem_cgroup *,
@@ -195,6 +199,8 @@ void mem_cgroup_split_huge_fixup(struct page *head);
#else /* CONFIG_MEMCG */
struct mem_cgroup;
+#define mem_cgroup_root_css ((struct cgroup_subsys_state *)ERR_PTR(-EINVAL))
+
static inline void mem_cgroup_events(struct mem_cgroup *memcg,
enum mem_cgroup_events_index idx,
unsigned int nr)
@@ -382,6 +388,29 @@ enum {
OVER_LIMIT,
};
+#ifdef CONFIG_CGROUP_WRITEBACK
+
+struct list_head *mem_cgroup_cgwb_list(struct mem_cgroup *memcg);
+struct wb_domain *mem_cgroup_wb_domain(struct bdi_writeback *wb);
+void mem_cgroup_wb_stats(struct bdi_writeback *wb, unsigned long *pavail,
+ unsigned long *pdirty, unsigned long *pwriteback);
+
+#else /* CONFIG_CGROUP_WRITEBACK */
+
+static inline struct wb_domain *mem_cgroup_wb_domain(struct bdi_writeback *wb)
+{
+ return NULL;
+}
+
+static inline void mem_cgroup_wb_stats(struct bdi_writeback *wb,
+ unsigned long *pavail,
+ unsigned long *pdirty,
+ unsigned long *pwriteback)
+{
+}
+
+#endif /* CONFIG_CGROUP_WRITEBACK */
+
struct sock;
#if defined(CONFIG_INET) && defined(CONFIG_MEMCG_KMEM)
void sock_update_memcg(struct sock *sk);
@@ -463,6 +492,8 @@ memcg_kmem_newpage_charge(gfp_t gfp, struct mem_cgroup **memcg, int order)
if (!memcg_kmem_enabled())
return true;
+ if (gfp & __GFP_NOACCOUNT)
+ return true;
/*
* __GFP_NOFAIL allocations will move on even if charging is not
* possible. Therefore we don't even try, and have this allocation
@@ -522,6 +553,8 @@ memcg_kmem_get_cache(struct kmem_cache *cachep, gfp_t gfp)
{
if (!memcg_kmem_enabled())
return cachep;
+ if (gfp & __GFP_NOACCOUNT)
+ return cachep;
if (gfp & __GFP_NOFAIL)
return cachep;
if (in_interrupt() || (!current->mm) || (current->flags & PF_KTHREAD))
diff --git a/include/linux/memory_hotplug.h b/include/linux/memory_hotplug.h
index 8f1a41951df9..6ffa0ac7f7d6 100644
--- a/include/linux/memory_hotplug.h
+++ b/include/linux/memory_hotplug.h
@@ -192,6 +192,9 @@ extern void get_page_bootmem(unsigned long ingo, struct page *page,
void get_online_mems(void);
void put_online_mems(void);
+void mem_hotplug_begin(void);
+void mem_hotplug_done(void);
+
#else /* ! CONFIG_MEMORY_HOTPLUG */
/*
* Stub functions for when hotplug is off
@@ -231,6 +234,9 @@ static inline int try_online_node(int nid)
static inline void get_online_mems(void) {}
static inline void put_online_mems(void) {}
+static inline void mem_hotplug_begin(void) {}
+static inline void mem_hotplug_done(void) {}
+
#endif /* ! CONFIG_MEMORY_HOTPLUG */
#ifdef CONFIG_MEMORY_HOTREMOVE
diff --git a/include/linux/mempool.h b/include/linux/mempool.h
index 39ed62ab5b8a..69b6951e8fd2 100644
--- a/include/linux/mempool.h
+++ b/include/linux/mempool.h
@@ -29,14 +29,15 @@ extern mempool_t *mempool_create_node(int min_nr, mempool_alloc_t *alloc_fn,
mempool_free_t *free_fn, void *pool_data,
gfp_t gfp_mask, int nid);
-extern int mempool_resize(mempool_t *pool, int new_min_nr, gfp_t gfp_mask);
+extern int mempool_resize(mempool_t *pool, int new_min_nr);
extern void mempool_destroy(mempool_t *pool);
extern void * mempool_alloc(mempool_t *pool, gfp_t gfp_mask);
extern void mempool_free(void *element, mempool_t *pool);
/*
* A mempool_alloc_t and mempool_free_t that get the memory from
- * a slab that is passed in through pool_data.
+ * a slab cache that is passed in through pool_data.
+ * Note: the slab cache may not have a ctor function.
*/
void *mempool_alloc_slab(gfp_t gfp_mask, void *pool_data);
void mempool_free_slab(void *element, void *pool_data);
diff --git a/include/linux/mfd/arizona/core.h b/include/linux/mfd/arizona/core.h
index 910e3aa1e965..2f434f4f79a1 100644
--- a/include/linux/mfd/arizona/core.h
+++ b/include/linux/mfd/arizona/core.h
@@ -24,6 +24,7 @@ enum arizona_type {
WM5102 = 1,
WM5110 = 2,
WM8997 = 3,
+ WM8280 = 4,
};
#define ARIZONA_IRQ_GP1 0
@@ -116,6 +117,7 @@ struct arizona {
int num_core_supplies;
struct regulator_bulk_data core_supplies[ARIZONA_MAX_CORE_SUPPLIES];
struct regulator *dcvdd;
+ bool has_fully_powered_off;
struct arizona_pdata pdata;
@@ -126,7 +128,7 @@ struct arizona {
struct regmap_irq_chip_data *aod_irq_chip;
struct regmap_irq_chip_data *irq_chip;
- bool hpdet_magic;
+ bool hpdet_clamp;
unsigned int hp_ena;
struct mutex clk_lock;
@@ -152,7 +154,15 @@ int arizona_request_irq(struct arizona *arizona, int irq, char *name,
void arizona_free_irq(struct arizona *arizona, int irq, void *data);
int arizona_set_irq_wake(struct arizona *arizona, int irq, int on);
+#ifdef CONFIG_MFD_WM5102
int wm5102_patch(struct arizona *arizona);
+#else
+static inline int wm5102_patch(struct arizona *arizona)
+{
+ return 0;
+}
+#endif
+
int wm5110_patch(struct arizona *arizona);
int wm8997_patch(struct arizona *arizona);
diff --git a/include/linux/mfd/arizona/pdata.h b/include/linux/mfd/arizona/pdata.h
index 4578c72c9b86..f6722677e6d0 100644
--- a/include/linux/mfd/arizona/pdata.h
+++ b/include/linux/mfd/arizona/pdata.h
@@ -11,31 +11,26 @@
#ifndef _ARIZONA_PDATA_H
#define _ARIZONA_PDATA_H
-#define ARIZONA_GPN_DIR 0x8000 /* GPN_DIR */
+#include <dt-bindings/mfd/arizona.h>
+
#define ARIZONA_GPN_DIR_MASK 0x8000 /* GPN_DIR */
#define ARIZONA_GPN_DIR_SHIFT 15 /* GPN_DIR */
#define ARIZONA_GPN_DIR_WIDTH 1 /* GPN_DIR */
-#define ARIZONA_GPN_PU 0x4000 /* GPN_PU */
#define ARIZONA_GPN_PU_MASK 0x4000 /* GPN_PU */
#define ARIZONA_GPN_PU_SHIFT 14 /* GPN_PU */
#define ARIZONA_GPN_PU_WIDTH 1 /* GPN_PU */
-#define ARIZONA_GPN_PD 0x2000 /* GPN_PD */
#define ARIZONA_GPN_PD_MASK 0x2000 /* GPN_PD */
#define ARIZONA_GPN_PD_SHIFT 13 /* GPN_PD */
#define ARIZONA_GPN_PD_WIDTH 1 /* GPN_PD */
-#define ARIZONA_GPN_LVL 0x0800 /* GPN_LVL */
#define ARIZONA_GPN_LVL_MASK 0x0800 /* GPN_LVL */
#define ARIZONA_GPN_LVL_SHIFT 11 /* GPN_LVL */
#define ARIZONA_GPN_LVL_WIDTH 1 /* GPN_LVL */
-#define ARIZONA_GPN_POL 0x0400 /* GPN_POL */
#define ARIZONA_GPN_POL_MASK 0x0400 /* GPN_POL */
#define ARIZONA_GPN_POL_SHIFT 10 /* GPN_POL */
#define ARIZONA_GPN_POL_WIDTH 1 /* GPN_POL */
-#define ARIZONA_GPN_OP_CFG 0x0200 /* GPN_OP_CFG */
#define ARIZONA_GPN_OP_CFG_MASK 0x0200 /* GPN_OP_CFG */
#define ARIZONA_GPN_OP_CFG_SHIFT 9 /* GPN_OP_CFG */
#define ARIZONA_GPN_OP_CFG_WIDTH 1 /* GPN_OP_CFG */
-#define ARIZONA_GPN_DB 0x0100 /* GPN_DB */
#define ARIZONA_GPN_DB_MASK 0x0100 /* GPN_DB */
#define ARIZONA_GPN_DB_SHIFT 8 /* GPN_DB */
#define ARIZONA_GPN_DB_WIDTH 1 /* GPN_DB */
@@ -45,23 +40,10 @@
#define ARIZONA_MAX_GPIO 5
-#define ARIZONA_32KZ_MCLK1 1
-#define ARIZONA_32KZ_MCLK2 2
-#define ARIZONA_32KZ_NONE 3
-
#define ARIZONA_MAX_INPUT 4
-#define ARIZONA_DMIC_MICVDD 0
-#define ARIZONA_DMIC_MICBIAS1 1
-#define ARIZONA_DMIC_MICBIAS2 2
-#define ARIZONA_DMIC_MICBIAS3 3
-
#define ARIZONA_MAX_MICBIAS 3
-#define ARIZONA_INMODE_DIFF 0
-#define ARIZONA_INMODE_SE 1
-#define ARIZONA_INMODE_DMIC 2
-
#define ARIZONA_MAX_OUTPUT 6
#define ARIZONA_MAX_AIF 3
@@ -112,7 +94,7 @@ struct arizona_pdata {
int gpio_base;
/** Pin state for GPIO pins */
- int gpio_defaults[ARIZONA_MAX_GPIO];
+ unsigned int gpio_defaults[ARIZONA_MAX_GPIO];
/**
* Maximum number of channels clocks will be generated for,
@@ -174,7 +156,10 @@ struct arizona_pdata {
/** MICBIAS configurations */
struct arizona_micbias micbias[ARIZONA_MAX_MICBIAS];
- /** Mode of input structures */
+ /**
+ * Mode of input structures
+ * One of the ARIZONA_INMODE_xxx values
+ */
int inmode[ARIZONA_MAX_INPUT];
/** Mode for outputs */
diff --git a/include/linux/mfd/arizona/registers.h b/include/linux/mfd/arizona/registers.h
index aacc10d7789c..3499d36e6067 100644
--- a/include/linux/mfd/arizona/registers.h
+++ b/include/linux/mfd/arizona/registers.h
@@ -2515,9 +2515,12 @@
#define ARIZONA_IN1_DMIC_SUP_MASK 0x1800 /* IN1_DMIC_SUP - [12:11] */
#define ARIZONA_IN1_DMIC_SUP_SHIFT 11 /* IN1_DMIC_SUP - [12:11] */
#define ARIZONA_IN1_DMIC_SUP_WIDTH 2 /* IN1_DMIC_SUP - [12:11] */
-#define ARIZONA_IN1_MODE_MASK 0x0600 /* IN1_MODE - [10:9] */
-#define ARIZONA_IN1_MODE_SHIFT 9 /* IN1_MODE - [10:9] */
-#define ARIZONA_IN1_MODE_WIDTH 2 /* IN1_MODE - [10:9] */
+#define ARIZONA_IN1_MODE_MASK 0x0400 /* IN1_MODE - [10] */
+#define ARIZONA_IN1_MODE_SHIFT 10 /* IN1_MODE - [10] */
+#define ARIZONA_IN1_MODE_WIDTH 1 /* IN1_MODE - [10] */
+#define ARIZONA_IN1_SINGLE_ENDED_MASK 0x0200 /* IN1_MODE - [9] */
+#define ARIZONA_IN1_SINGLE_ENDED_SHIFT 9 /* IN1_MODE - [9] */
+#define ARIZONA_IN1_SINGLE_ENDED_WIDTH 1 /* IN1_MODE - [9] */
#define ARIZONA_IN1L_PGA_VOL_MASK 0x00FE /* IN1L_PGA_VOL - [7:1] */
#define ARIZONA_IN1L_PGA_VOL_SHIFT 1 /* IN1L_PGA_VOL - [7:1] */
#define ARIZONA_IN1L_PGA_VOL_WIDTH 7 /* IN1L_PGA_VOL - [7:1] */
@@ -2588,9 +2591,12 @@
#define ARIZONA_IN2_DMIC_SUP_MASK 0x1800 /* IN2_DMIC_SUP - [12:11] */
#define ARIZONA_IN2_DMIC_SUP_SHIFT 11 /* IN2_DMIC_SUP - [12:11] */
#define ARIZONA_IN2_DMIC_SUP_WIDTH 2 /* IN2_DMIC_SUP - [12:11] */
-#define ARIZONA_IN2_MODE_MASK 0x0600 /* IN2_MODE - [10:9] */
-#define ARIZONA_IN2_MODE_SHIFT 9 /* IN2_MODE - [10:9] */
-#define ARIZONA_IN2_MODE_WIDTH 2 /* IN2_MODE - [10:9] */
+#define ARIZONA_IN2_MODE_MASK 0x0400 /* IN2_MODE - [10] */
+#define ARIZONA_IN2_MODE_SHIFT 10 /* IN2_MODE - [10] */
+#define ARIZONA_IN2_MODE_WIDTH 1 /* IN2_MODE - [10] */
+#define ARIZONA_IN2_SINGLE_ENDED_MASK 0x0200 /* IN2_MODE - [9] */
+#define ARIZONA_IN2_SINGLE_ENDED_SHIFT 9 /* IN2_MODE - [9] */
+#define ARIZONA_IN2_SINGLE_ENDED_WIDTH 1 /* IN2_MODE - [9] */
#define ARIZONA_IN2L_PGA_VOL_MASK 0x00FE /* IN2L_PGA_VOL - [7:1] */
#define ARIZONA_IN2L_PGA_VOL_SHIFT 1 /* IN2L_PGA_VOL - [7:1] */
#define ARIZONA_IN2L_PGA_VOL_WIDTH 7 /* IN2L_PGA_VOL - [7:1] */
@@ -2661,9 +2667,12 @@
#define ARIZONA_IN3_DMIC_SUP_MASK 0x1800 /* IN3_DMIC_SUP - [12:11] */
#define ARIZONA_IN3_DMIC_SUP_SHIFT 11 /* IN3_DMIC_SUP - [12:11] */
#define ARIZONA_IN3_DMIC_SUP_WIDTH 2 /* IN3_DMIC_SUP - [12:11] */
-#define ARIZONA_IN3_MODE_MASK 0x0600 /* IN3_MODE - [10:9] */
-#define ARIZONA_IN3_MODE_SHIFT 9 /* IN3_MODE - [10:9] */
-#define ARIZONA_IN3_MODE_WIDTH 2 /* IN3_MODE - [10:9] */
+#define ARIZONA_IN3_MODE_MASK 0x0400 /* IN3_MODE - [10] */
+#define ARIZONA_IN3_MODE_SHIFT 10 /* IN3_MODE - [10] */
+#define ARIZONA_IN3_MODE_WIDTH 1 /* IN3_MODE - [10] */
+#define ARIZONA_IN3_SINGLE_ENDED_MASK 0x0200 /* IN3_MODE - [9] */
+#define ARIZONA_IN3_SINGLE_ENDED_SHIFT 9 /* IN3_MODE - [9] */
+#define ARIZONA_IN3_SINGLE_ENDED_WIDTH 1 /* IN3_MODE - [9] */
#define ARIZONA_IN3L_PGA_VOL_MASK 0x00FE /* IN3L_PGA_VOL - [7:1] */
#define ARIZONA_IN3L_PGA_VOL_SHIFT 1 /* IN3L_PGA_VOL - [7:1] */
#define ARIZONA_IN3L_PGA_VOL_WIDTH 7 /* IN3L_PGA_VOL - [7:1] */
diff --git a/include/linux/mfd/axp20x.h b/include/linux/mfd/axp20x.h
index dfabd6db7ddf..02f97dc568ac 100644
--- a/include/linux/mfd/axp20x.h
+++ b/include/linux/mfd/axp20x.h
@@ -14,6 +14,7 @@
enum {
AXP202_ID = 0,
AXP209_ID,
+ AXP221_ID,
AXP288_ID,
NR_AXP20X_VARIANTS,
};
@@ -45,6 +46,28 @@ enum {
#define AXP20X_V_LTF_DISCHRG 0x3c
#define AXP20X_V_HTF_DISCHRG 0x3d
+#define AXP22X_PWR_OUT_CTRL1 0x10
+#define AXP22X_PWR_OUT_CTRL2 0x12
+#define AXP22X_PWR_OUT_CTRL3 0x13
+#define AXP22X_DLDO1_V_OUT 0x15
+#define AXP22X_DLDO2_V_OUT 0x16
+#define AXP22X_DLDO3_V_OUT 0x17
+#define AXP22X_DLDO4_V_OUT 0x18
+#define AXP22X_ELDO1_V_OUT 0x19
+#define AXP22X_ELDO2_V_OUT 0x1a
+#define AXP22X_ELDO3_V_OUT 0x1b
+#define AXP22X_DC5LDO_V_OUT 0x1c
+#define AXP22X_DCDC1_V_OUT 0x21
+#define AXP22X_DCDC2_V_OUT 0x22
+#define AXP22X_DCDC3_V_OUT 0x23
+#define AXP22X_DCDC4_V_OUT 0x24
+#define AXP22X_DCDC5_V_OUT 0x25
+#define AXP22X_DCDC23_V_RAMP_CTRL 0x27
+#define AXP22X_ALDO1_V_OUT 0x28
+#define AXP22X_ALDO2_V_OUT 0x29
+#define AXP22X_ALDO3_V_OUT 0x2a
+#define AXP22X_CHRG_CTRL3 0x35
+
/* Interrupt */
#define AXP20X_IRQ1_EN 0x40
#define AXP20X_IRQ2_EN 0x41
@@ -100,6 +123,9 @@ enum {
#define AXP20X_VBUS_MON 0x8b
#define AXP20X_OVER_TMP 0x8f
+#define AXP22X_PWREN_CTRL1 0x8c
+#define AXP22X_PWREN_CTRL2 0x8d
+
/* GPIO */
#define AXP20X_GPIO0_CTRL 0x90
#define AXP20X_LDO5_V_OUT 0x91
@@ -108,6 +134,11 @@ enum {
#define AXP20X_GPIO20_SS 0x94
#define AXP20X_GPIO3_CTRL 0x95
+#define AXP22X_LDO_IO0_V_OUT 0x91
+#define AXP22X_LDO_IO1_V_OUT 0x93
+#define AXP22X_GPIO_STATE 0x94
+#define AXP22X_GPIO_PULL_DOWN 0x95
+
/* Battery */
#define AXP20X_CHRG_CC_31_24 0xb0
#define AXP20X_CHRG_CC_23_16 0xb1
@@ -120,6 +151,9 @@ enum {
#define AXP20X_CC_CTRL 0xb8
#define AXP20X_FG_RES 0xb9
+/* AXP22X specific registers */
+#define AXP22X_BATLOW_THRES1 0xe6
+
/* AXP288 specific registers */
#define AXP288_PMIC_ADC_H 0x56
#define AXP288_PMIC_ADC_L 0x57
@@ -158,6 +192,30 @@ enum {
AXP20X_REG_ID_MAX,
};
+enum {
+ AXP22X_DCDC1 = 0,
+ AXP22X_DCDC2,
+ AXP22X_DCDC3,
+ AXP22X_DCDC4,
+ AXP22X_DCDC5,
+ AXP22X_DC1SW,
+ AXP22X_DC5LDO,
+ AXP22X_ALDO1,
+ AXP22X_ALDO2,
+ AXP22X_ALDO3,
+ AXP22X_ELDO1,
+ AXP22X_ELDO2,
+ AXP22X_ELDO3,
+ AXP22X_DLDO1,
+ AXP22X_DLDO2,
+ AXP22X_DLDO3,
+ AXP22X_DLDO4,
+ AXP22X_RTC_LDO,
+ AXP22X_LDO_IO0,
+ AXP22X_LDO_IO1,
+ AXP22X_REG_ID_MAX,
+};
+
/* IRQs */
enum {
AXP20X_IRQ_ACIN_OVER_V = 1,
@@ -199,6 +257,34 @@ enum {
AXP20X_IRQ_GPIO0_INPUT,
};
+enum axp22x_irqs {
+ AXP22X_IRQ_ACIN_OVER_V = 1,
+ AXP22X_IRQ_ACIN_PLUGIN,
+ AXP22X_IRQ_ACIN_REMOVAL,
+ AXP22X_IRQ_VBUS_OVER_V,
+ AXP22X_IRQ_VBUS_PLUGIN,
+ AXP22X_IRQ_VBUS_REMOVAL,
+ AXP22X_IRQ_VBUS_V_LOW,
+ AXP22X_IRQ_BATT_PLUGIN,
+ AXP22X_IRQ_BATT_REMOVAL,
+ AXP22X_IRQ_BATT_ENT_ACT_MODE,
+ AXP22X_IRQ_BATT_EXIT_ACT_MODE,
+ AXP22X_IRQ_CHARG,
+ AXP22X_IRQ_CHARG_DONE,
+ AXP22X_IRQ_BATT_TEMP_HIGH,
+ AXP22X_IRQ_BATT_TEMP_LOW,
+ AXP22X_IRQ_DIE_TEMP_HIGH,
+ AXP22X_IRQ_PEK_SHORT,
+ AXP22X_IRQ_PEK_LONG,
+ AXP22X_IRQ_LOW_PWR_LVL1,
+ AXP22X_IRQ_LOW_PWR_LVL2,
+ AXP22X_IRQ_TIMER,
+ AXP22X_IRQ_PEK_RIS_EDGE,
+ AXP22X_IRQ_PEK_FAL_EDGE,
+ AXP22X_IRQ_GPIO1_INPUT,
+ AXP22X_IRQ_GPIO0_INPUT,
+};
+
enum axp288_irqs {
AXP288_IRQ_VBUS_FALL = 2,
AXP288_IRQ_VBUS_RISE,
@@ -275,4 +361,11 @@ struct axp20x_fg_pdata {
int thermistor_curve[MAX_THERM_CURVE_SIZE][2];
};
+struct axp20x_chrg_pdata {
+ int max_cc;
+ int max_cv;
+ int def_cc;
+ int def_cv;
+};
+
#endif /* __LINUX_MFD_AXP20X_H */
diff --git a/include/linux/mfd/cros_ec.h b/include/linux/mfd/cros_ec.h
index 0e166b92f5b4..da72671a42fa 100644
--- a/include/linux/mfd/cros_ec.h
+++ b/include/linux/mfd/cros_ec.h
@@ -16,10 +16,30 @@
#ifndef __LINUX_MFD_CROS_EC_H
#define __LINUX_MFD_CROS_EC_H
+#include <linux/cdev.h>
+#include <linux/device.h>
#include <linux/notifier.h>
#include <linux/mfd/cros_ec_commands.h>
#include <linux/mutex.h>
+#define CROS_EC_DEV_NAME "cros_ec"
+#define CROS_EC_DEV_PD_NAME "cros_pd"
+
+/*
+ * The EC is unresponsive for a time after a reboot command. Add a
+ * simple delay to make sure that the bus stays locked.
+ */
+#define EC_REBOOT_DELAY_MS 50
+
+/*
+ * Max bus-specific overhead incurred by request/responses.
+ * I2C requires 1 additional byte for requests.
+ * I2C requires 2 additional bytes for responses.
+ * */
+#define EC_PROTO_VERSION_UNKNOWN 0
+#define EC_MAX_REQUEST_OVERHEAD 1
+#define EC_MAX_RESPONSE_OVERHEAD 2
+
/*
* Command interface between EC and AP, for LPC, I2C and SPI interfaces.
*/
@@ -38,33 +58,37 @@ enum {
/*
* @version: Command version number (often 0)
* @command: Command to send (EC_CMD_...)
- * @outdata: Outgoing data to EC
* @outsize: Outgoing length in bytes
- * @indata: Where to put the incoming data from EC
* @insize: Max number of bytes to accept from EC
* @result: EC's response to the command (separate from communication failure)
+ * @data: Where to put the incoming data from EC and outgoing data to EC
*/
struct cros_ec_command {
uint32_t version;
uint32_t command;
- uint8_t *outdata;
uint32_t outsize;
- uint8_t *indata;
uint32_t insize;
uint32_t result;
+ uint8_t data[0];
};
/**
* struct cros_ec_device - Information about a ChromeOS EC device
*
- * @ec_name: name of EC device (e.g. 'chromeos-ec')
* @phys_name: name of physical comms layer (e.g. 'i2c-4')
- * @dev: Device pointer
+ * @dev: Device pointer for physical comms device
* @was_wake_device: true if this device was set to wake the system from
* sleep at the last suspend
+ * @cmd_readmem: direct read of the EC memory-mapped region, if supported
+ * @offset is within EC_LPC_ADDR_MEMMAP region.
+ * @bytes: number of bytes to read. zero means "read a string" (including
+ * the trailing '\0'). At most only EC_MEMMAP_SIZE bytes can be read.
+ * Caller must ensure that the buffer is large enough for the result when
+ * reading a string.
*
* @priv: Private data
* @irq: Interrupt to use
+ * @id: Device id
* @din: input buffer (for data from EC)
* @dout: output buffer (for data to EC)
* \note
@@ -76,37 +100,72 @@ struct cros_ec_command {
* to using dword.
* @din_size: size of din buffer to allocate (zero to use static din)
* @dout_size: size of dout buffer to allocate (zero to use static dout)
- * @parent: pointer to parent device (e.g. i2c or spi device)
* @wake_enabled: true if this device can wake the system from sleep
* @cmd_xfer: send command to EC and get response
* Returns the number of bytes received if the communication succeeded, but
* that doesn't mean the EC was happy with the command. The caller
* should check msg.result for the EC's result code.
+ * @pkt_xfer: send packet to EC and get response
* @lock: one transaction at a time
*/
struct cros_ec_device {
/* These are used by other drivers that want to talk to the EC */
- const char *ec_name;
const char *phys_name;
struct device *dev;
bool was_wake_device;
struct class *cros_class;
+ int (*cmd_readmem)(struct cros_ec_device *ec, unsigned int offset,
+ unsigned int bytes, void *dest);
/* These are used to implement the platform-specific interface */
+ u16 max_request;
+ u16 max_response;
+ u16 max_passthru;
+ u16 proto_version;
void *priv;
int irq;
- uint8_t *din;
- uint8_t *dout;
+ u8 *din;
+ u8 *dout;
int din_size;
int dout_size;
- struct device *parent;
bool wake_enabled;
int (*cmd_xfer)(struct cros_ec_device *ec,
struct cros_ec_command *msg);
+ int (*pkt_xfer)(struct cros_ec_device *ec,
+ struct cros_ec_command *msg);
struct mutex lock;
};
+/* struct cros_ec_platform - ChromeOS EC platform information
+ *
+ * @ec_name: name of EC device (e.g. 'cros-ec', 'cros-pd', ...)
+ * used in /dev/ and sysfs.
+ * @cmd_offset: offset to apply for each command. Set when
+ * registering a devicde behind another one.
+ */
+struct cros_ec_platform {
+ const char *ec_name;
+ u16 cmd_offset;
+};
+
+/*
+ * struct cros_ec_dev - ChromeOS EC device entry point
+ *
+ * @class_dev: Device structure used in sysfs
+ * @cdev: Character device structure in /dev
+ * @ec_dev: cros_ec_device structure to talk to the physical device
+ * @dev: pointer to the platform device
+ * @cmd_offset: offset to apply for each command.
+ */
+struct cros_ec_dev {
+ struct device class_dev;
+ struct cdev cdev;
+ struct cros_ec_device *ec_dev;
+ struct device *dev;
+ u16 cmd_offset;
+};
+
/**
* cros_ec_suspend - Handle a suspend operation for the ChromeOS EC device
*
@@ -185,4 +244,16 @@ int cros_ec_remove(struct cros_ec_device *ec_dev);
*/
int cros_ec_register(struct cros_ec_device *ec_dev);
+/**
+ * cros_ec_register - Query the protocol version supported by the ChromeOS EC
+ *
+ * @ec_dev: Device to register
+ * @return 0 if ok, -ve on error
+ */
+int cros_ec_query_all(struct cros_ec_device *ec_dev);
+
+/* sysfs stuff */
+extern struct attribute_group cros_ec_attr_group;
+extern struct attribute_group cros_ec_lightbar_attr_group;
+
#endif /* __LINUX_MFD_CROS_EC_H */
diff --git a/include/linux/mfd/cros_ec_commands.h b/include/linux/mfd/cros_ec_commands.h
index a49cd41feea7..13b630c10d4c 100644
--- a/include/linux/mfd/cros_ec_commands.h
+++ b/include/linux/mfd/cros_ec_commands.h
@@ -515,7 +515,7 @@ struct ec_host_response {
/*
* Notes on commands:
*
- * Each command is an 8-byte command value. Commands which take params or
+ * Each command is an 16-bit command value. Commands which take params or
* return response data specify structs for that data. If no struct is
* specified, the command does not input or output data, respectively.
* Parameter/response length is implicit in the structs. Some underlying
@@ -966,7 +966,7 @@ struct rgb_s {
/* List of tweakable parameters. NOTE: It's __packed so it can be sent in a
* host command, but the alignment is the same regardless. Keep it that way.
*/
-struct lightbar_params {
+struct lightbar_params_v0 {
/* Timing */
int32_t google_ramp_up;
int32_t google_ramp_down;
@@ -1000,32 +1000,81 @@ struct lightbar_params {
struct rgb_s color[8]; /* 0-3 are Google colors */
} __packed;
+struct lightbar_params_v1 {
+ /* Timing */
+ int32_t google_ramp_up;
+ int32_t google_ramp_down;
+ int32_t s3s0_ramp_up;
+ int32_t s0_tick_delay[2]; /* AC=0/1 */
+ int32_t s0a_tick_delay[2]; /* AC=0/1 */
+ int32_t s0s3_ramp_down;
+ int32_t s3_sleep_for;
+ int32_t s3_ramp_up;
+ int32_t s3_ramp_down;
+ int32_t tap_tick_delay;
+ int32_t tap_display_time;
+
+ /* Tap-for-battery params */
+ uint8_t tap_pct_red;
+ uint8_t tap_pct_green;
+ uint8_t tap_seg_min_on;
+ uint8_t tap_seg_max_on;
+ uint8_t tap_seg_osc;
+ uint8_t tap_idx[3];
+
+ /* Oscillation */
+ uint8_t osc_min[2]; /* AC=0/1 */
+ uint8_t osc_max[2]; /* AC=0/1 */
+ uint8_t w_ofs[2]; /* AC=0/1 */
+
+ /* Brightness limits based on the backlight and AC. */
+ uint8_t bright_bl_off_fixed[2]; /* AC=0/1 */
+ uint8_t bright_bl_on_min[2]; /* AC=0/1 */
+ uint8_t bright_bl_on_max[2]; /* AC=0/1 */
+
+ /* Battery level thresholds */
+ uint8_t battery_threshold[LB_BATTERY_LEVELS - 1];
+
+ /* Map [AC][battery_level] to color index */
+ uint8_t s0_idx[2][LB_BATTERY_LEVELS]; /* AP is running */
+ uint8_t s3_idx[2][LB_BATTERY_LEVELS]; /* AP is sleeping */
+
+ /* Color palette */
+ struct rgb_s color[8]; /* 0-3 are Google colors */
+} __packed;
+
struct ec_params_lightbar {
uint8_t cmd; /* Command (see enum lightbar_command) */
union {
struct {
/* no args */
- } dump, off, on, init, get_seq, get_params, version;
+ } dump, off, on, init, get_seq, get_params_v0, get_params_v1,
+ version, get_brightness, get_demo;
- struct num {
+ struct {
uint8_t num;
- } brightness, seq, demo;
+ } set_brightness, seq, demo;
- struct reg {
+ struct {
uint8_t ctrl, reg, value;
} reg;
- struct rgb {
+ struct {
uint8_t led, red, green, blue;
- } rgb;
+ } set_rgb;
+
+ struct {
+ uint8_t led;
+ } get_rgb;
- struct lightbar_params set_params;
+ struct lightbar_params_v0 set_params_v0;
+ struct lightbar_params_v1 set_params_v1;
};
} __packed;
struct ec_response_lightbar {
union {
- struct dump {
+ struct {
struct {
uint8_t reg;
uint8_t ic0;
@@ -1033,20 +1082,26 @@ struct ec_response_lightbar {
} vals[23];
} dump;
- struct get_seq {
+ struct {
uint8_t num;
- } get_seq;
+ } get_seq, get_brightness, get_demo;
- struct lightbar_params get_params;
+ struct lightbar_params_v0 get_params_v0;
+ struct lightbar_params_v1 get_params_v1;
- struct version {
+ struct {
uint32_t num;
uint32_t flags;
} version;
struct {
+ uint8_t red, green, blue;
+ } get_rgb;
+
+ struct {
/* no return params */
- } off, on, init, brightness, seq, reg, rgb, demo, set_params;
+ } off, on, init, set_brightness, seq, reg, set_rgb,
+ demo, set_params_v0, set_params_v1;
};
} __packed;
@@ -1056,15 +1111,20 @@ enum lightbar_command {
LIGHTBAR_CMD_OFF = 1,
LIGHTBAR_CMD_ON = 2,
LIGHTBAR_CMD_INIT = 3,
- LIGHTBAR_CMD_BRIGHTNESS = 4,
+ LIGHTBAR_CMD_SET_BRIGHTNESS = 4,
LIGHTBAR_CMD_SEQ = 5,
LIGHTBAR_CMD_REG = 6,
- LIGHTBAR_CMD_RGB = 7,
+ LIGHTBAR_CMD_SET_RGB = 7,
LIGHTBAR_CMD_GET_SEQ = 8,
LIGHTBAR_CMD_DEMO = 9,
- LIGHTBAR_CMD_GET_PARAMS = 10,
- LIGHTBAR_CMD_SET_PARAMS = 11,
+ LIGHTBAR_CMD_GET_PARAMS_V0 = 10,
+ LIGHTBAR_CMD_SET_PARAMS_V0 = 11,
LIGHTBAR_CMD_VERSION = 12,
+ LIGHTBAR_CMD_GET_BRIGHTNESS = 13,
+ LIGHTBAR_CMD_GET_RGB = 14,
+ LIGHTBAR_CMD_GET_DEMO = 15,
+ LIGHTBAR_CMD_GET_PARAMS_V1 = 16,
+ LIGHTBAR_CMD_SET_PARAMS_V1 = 17,
LIGHTBAR_NUM_CMDS
};
@@ -1421,8 +1481,40 @@ struct ec_response_rtc {
/*****************************************************************************/
/* Port80 log access */
+/* Maximum entries that can be read/written in a single command */
+#define EC_PORT80_SIZE_MAX 32
+
/* Get last port80 code from previous boot */
#define EC_CMD_PORT80_LAST_BOOT 0x48
+#define EC_CMD_PORT80_READ 0x48
+
+enum ec_port80_subcmd {
+ EC_PORT80_GET_INFO = 0,
+ EC_PORT80_READ_BUFFER,
+};
+
+struct ec_params_port80_read {
+ uint16_t subcmd;
+ union {
+ struct {
+ uint32_t offset;
+ uint32_t num_entries;
+ } read_buffer;
+ };
+} __packed;
+
+struct ec_response_port80_read {
+ union {
+ struct {
+ uint32_t writes;
+ uint32_t history_size;
+ uint32_t last_boot;
+ } get_info;
+ struct {
+ uint16_t codes[EC_PORT80_SIZE_MAX];
+ } data;
+ };
+} __packed;
struct ec_response_port80_last_boot {
uint16_t code;
@@ -1782,6 +1874,7 @@ struct ec_params_gpio_set {
/* Get GPIO value */
#define EC_CMD_GPIO_GET 0x93
+/* Version 0 of input params and response */
struct ec_params_gpio_get {
char name[32];
} __packed;
@@ -1789,6 +1882,38 @@ struct ec_response_gpio_get {
uint8_t val;
} __packed;
+/* Version 1 of input params and response */
+struct ec_params_gpio_get_v1 {
+ uint8_t subcmd;
+ union {
+ struct {
+ char name[32];
+ } get_value_by_name;
+ struct {
+ uint8_t index;
+ } get_info;
+ };
+} __packed;
+
+struct ec_response_gpio_get_v1 {
+ union {
+ struct {
+ uint8_t val;
+ } get_value_by_name, get_count;
+ struct {
+ uint8_t val;
+ char name[32];
+ uint32_t flags;
+ } get_info;
+ };
+} __packed;
+
+enum gpio_get_subcmd {
+ EC_GPIO_GET_BY_NAME = 0,
+ EC_GPIO_GET_COUNT = 1,
+ EC_GPIO_GET_INFO = 2,
+};
+
/*****************************************************************************/
/* I2C commands. Only available when flash write protect is unlocked. */
@@ -1857,13 +1982,21 @@ struct ec_params_charge_control {
/*****************************************************************************/
/*
- * Cut off battery power output if the battery supports.
+ * Cut off battery power immediately or after the host has shut down.
*
- * For unsupported battery, just don't implement this command and lets EC
- * return EC_RES_INVALID_COMMAND.
+ * return EC_RES_INVALID_COMMAND if unsupported by a board/battery.
+ * EC_RES_SUCCESS if the command was successful.
+ * EC_RES_ERROR if the cut off command failed.
*/
+
#define EC_CMD_BATTERY_CUT_OFF 0x99
+#define EC_BATTERY_CUTOFF_FLAG_AT_SHUTDOWN (1 << 0)
+
+struct ec_params_battery_cutoff {
+ uint8_t flags;
+} __packed;
+
/*****************************************************************************/
/* USB port mux control. */
@@ -2142,6 +2275,32 @@ struct ec_params_sb_wr_block {
} __packed;
/*****************************************************************************/
+/* Battery vendor parameters
+ *
+ * Get or set vendor-specific parameters in the battery. Implementations may
+ * differ between boards or batteries. On a set operation, the response
+ * contains the actual value set, which may be rounded or clipped from the
+ * requested value.
+ */
+
+#define EC_CMD_BATTERY_VENDOR_PARAM 0xb4
+
+enum ec_battery_vendor_param_mode {
+ BATTERY_VENDOR_PARAM_MODE_GET = 0,
+ BATTERY_VENDOR_PARAM_MODE_SET,
+};
+
+struct ec_params_battery_vendor_param {
+ uint32_t param;
+ uint32_t value;
+ uint8_t mode;
+} __packed;
+
+struct ec_response_battery_vendor_param {
+ uint32_t value;
+} __packed;
+
+/*****************************************************************************/
/* System commands */
/*
@@ -2338,6 +2497,80 @@ struct ec_params_reboot_ec {
/*****************************************************************************/
/*
+ * PD commands
+ *
+ * These commands are for PD MCU communication.
+ */
+
+/* EC to PD MCU exchange status command */
+#define EC_CMD_PD_EXCHANGE_STATUS 0x100
+
+/* Status of EC being sent to PD */
+struct ec_params_pd_status {
+ int8_t batt_soc; /* battery state of charge */
+} __packed;
+
+/* Status of PD being sent back to EC */
+struct ec_response_pd_status {
+ int8_t status; /* PD MCU status */
+ uint32_t curr_lim_ma; /* input current limit */
+} __packed;
+
+/* Set USB type-C port role and muxes */
+#define EC_CMD_USB_PD_CONTROL 0x101
+
+enum usb_pd_control_role {
+ USB_PD_CTRL_ROLE_NO_CHANGE = 0,
+ USB_PD_CTRL_ROLE_TOGGLE_ON = 1, /* == AUTO */
+ USB_PD_CTRL_ROLE_TOGGLE_OFF = 2,
+ USB_PD_CTRL_ROLE_FORCE_SINK = 3,
+ USB_PD_CTRL_ROLE_FORCE_SOURCE = 4,
+};
+
+enum usb_pd_control_mux {
+ USB_PD_CTRL_MUX_NO_CHANGE = 0,
+ USB_PD_CTRL_MUX_NONE = 1,
+ USB_PD_CTRL_MUX_USB = 2,
+ USB_PD_CTRL_MUX_DP = 3,
+ USB_PD_CTRL_MUX_DOCK = 4,
+ USB_PD_CTRL_MUX_AUTO = 5,
+};
+
+struct ec_params_usb_pd_control {
+ uint8_t port;
+ uint8_t role;
+ uint8_t mux;
+} __packed;
+
+/*****************************************************************************/
+/*
+ * Passthru commands
+ *
+ * Some platforms have sub-processors chained to each other. For example.
+ *
+ * AP <--> EC <--> PD MCU
+ *
+ * The top 2 bits of the command number are used to indicate which device the
+ * command is intended for. Device 0 is always the device receiving the
+ * command; other device mapping is board-specific.
+ *
+ * When a device receives a command to be passed to a sub-processor, it passes
+ * it on with the device number set back to 0. This allows the sub-processor
+ * to remain blissfully unaware of whether the command originated on the next
+ * device up the chain, or was passed through from the AP.
+ *
+ * In the above example, if the AP wants to send command 0x0002 to the PD MCU,
+ * AP sends command 0x4002 to the EC
+ * EC sends command 0x0002 to the PD MCU
+ * EC forwards PD MCU response back to the AP
+ */
+
+/* Offset and max command number for sub-device n */
+#define EC_CMD_PASSTHRU_OFFSET(n) (0x4000 * (n))
+#define EC_CMD_PASSTHRU_MAX(n) (EC_CMD_PASSTHRU_OFFSET(n) + 0x3fff)
+
+/*****************************************************************************/
+/*
* Deprecated constants. These constants have been renamed for clarity. The
* meaning and size has not changed. Programs that use the old names should
* switch to the new names soon, as the old names may not be carried forward
diff --git a/include/linux/mfd/da9055/core.h b/include/linux/mfd/da9055/core.h
index 956afa445998..5dc743fd63a6 100644
--- a/include/linux/mfd/da9055/core.h
+++ b/include/linux/mfd/da9055/core.h
@@ -89,6 +89,6 @@ static inline int da9055_reg_update(struct da9055 *da9055, unsigned char reg,
int da9055_device_init(struct da9055 *da9055);
void da9055_device_exit(struct da9055 *da9055);
-extern struct regmap_config da9055_regmap_config;
+extern const struct regmap_config da9055_regmap_config;
#endif /* __DA9055_CORE_H */
diff --git a/include/linux/mfd/da9063/pdata.h b/include/linux/mfd/da9063/pdata.h
index 95c8742215a7..612383bd80ae 100644
--- a/include/linux/mfd/da9063/pdata.h
+++ b/include/linux/mfd/da9063/pdata.h
@@ -103,6 +103,7 @@ struct da9063;
struct da9063_pdata {
int (*init)(struct da9063 *da9063);
int irq_base;
+ bool key_power;
unsigned flags;
struct da9063_regulators_pdata *regulators_pdata;
struct led_platform_data *leds_pdata;
diff --git a/include/linux/mfd/max77686.h b/include/linux/mfd/max77686.h
index bb995ab9a575..d4b72d519115 100644
--- a/include/linux/mfd/max77686.h
+++ b/include/linux/mfd/max77686.h
@@ -125,9 +125,4 @@ enum max77686_opmode {
MAX77686_OPMODE_STANDBY,
};
-struct max77686_opmode_data {
- int id;
- int mode;
-};
-
#endif /* __LINUX_MFD_MAX77686_H */
diff --git a/include/linux/mfd/max77693-private.h b/include/linux/mfd/max77693-private.h
index 955dd990beaf..51633ea6f910 100644
--- a/include/linux/mfd/max77693-private.h
+++ b/include/linux/mfd/max77693-private.h
@@ -87,6 +87,7 @@ enum max77693_pmic_reg {
/* MAX77693 ITORCH register */
#define TORCH_IOUT1_SHIFT 0
#define TORCH_IOUT2_SHIFT 4
+#define TORCH_IOUT_MASK(x) (0xf << (x))
#define TORCH_IOUT_MIN 15625
#define TORCH_IOUT_MAX 250000
#define TORCH_IOUT_STEP 15625
@@ -113,8 +114,8 @@ enum max77693_pmic_reg {
#define FLASH_EN_FLASH 0x1
#define FLASH_EN_TORCH 0x2
#define FLASH_EN_ON 0x3
-#define FLASH_EN_SHIFT(x) (6 - ((x) - 1) * 2)
-#define TORCH_EN_SHIFT(x) (2 - ((x) - 1) * 2)
+#define FLASH_EN_SHIFT(x) (6 - (x) * 2)
+#define TORCH_EN_SHIFT(x) (2 - (x) * 2)
/* MAX77693 MAX_FLASH1 register */
#define MAX_FLASH1_MAX_FL_EN 0x80
diff --git a/include/linux/mfd/max77693.h b/include/linux/mfd/max77693.h
index 09a4dedaeea8..d450f687301b 100644
--- a/include/linux/mfd/max77693.h
+++ b/include/linux/mfd/max77693.h
@@ -81,19 +81,6 @@ enum max77693_led_boost_mode {
MAX77693_LED_BOOST_FIXED,
};
-struct max77693_led_platform_data {
- u32 fleds[2];
- u32 iout_torch[2];
- u32 iout_flash[2];
- u32 trigger[2];
- u32 trigger_type[2];
- u32 num_leds;
- u32 boost_mode;
- u32 flash_timeout;
- u32 boost_vout;
- u32 low_vsys;
-};
-
/* MAX77693 */
struct max77693_platform_data {
diff --git a/include/linux/mfd/max77843-private.h b/include/linux/mfd/max77843-private.h
new file mode 100644
index 000000000000..7178ace8379e
--- /dev/null
+++ b/include/linux/mfd/max77843-private.h
@@ -0,0 +1,454 @@
+/*
+ * Common variables for the Maxim MAX77843 driver
+ *
+ * Copyright (C) 2015 Samsung Electronics
+ * Author: Jaewon Kim <jaewon02.kim@samsung.com>
+ * Author: Beomho Seo <beomho.seo@samsung.com>
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License as published by
+ * the Free Software Foundation; either version 2 of the License, or
+ * (at your option) any later version.
+ */
+
+#ifndef __MAX77843_PRIVATE_H_
+#define __MAX77843_PRIVATE_H_
+
+#include <linux/i2c.h>
+#include <linux/regmap.h>
+
+#define I2C_ADDR_TOPSYS (0xCC >> 1)
+#define I2C_ADDR_CHG (0xD2 >> 1)
+#define I2C_ADDR_FG (0x6C >> 1)
+#define I2C_ADDR_MUIC (0x4A >> 1)
+
+/* Topsys, Haptic and LED registers */
+enum max77843_sys_reg {
+ MAX77843_SYS_REG_PMICID = 0x00,
+ MAX77843_SYS_REG_PMICREV = 0x01,
+ MAX77843_SYS_REG_MAINCTRL1 = 0x02,
+ MAX77843_SYS_REG_INTSRC = 0x22,
+ MAX77843_SYS_REG_INTSRCMASK = 0x23,
+ MAX77843_SYS_REG_SYSINTSRC = 0x24,
+ MAX77843_SYS_REG_SYSINTMASK = 0x26,
+ MAX77843_SYS_REG_TOPSYS_STAT = 0x28,
+ MAX77843_SYS_REG_SAFEOUTCTRL = 0xC6,
+
+ MAX77843_SYS_REG_END,
+};
+
+enum max77843_haptic_reg {
+ MAX77843_HAP_REG_MCONFIG = 0x10,
+
+ MAX77843_HAP_REG_END,
+};
+
+enum max77843_led_reg {
+ MAX77843_LED_REG_LEDEN = 0x30,
+ MAX77843_LED_REG_LED0BRT = 0x31,
+ MAX77843_LED_REG_LED1BRT = 0x32,
+ MAX77843_LED_REG_LED2BRT = 0x33,
+ MAX77843_LED_REG_LED3BRT = 0x34,
+ MAX77843_LED_REG_LEDBLNK = 0x38,
+ MAX77843_LED_REG_LEDRAMP = 0x36,
+
+ MAX77843_LED_REG_END,
+};
+
+/* Charger registers */
+enum max77843_charger_reg {
+ MAX77843_CHG_REG_CHG_INT = 0xB0,
+ MAX77843_CHG_REG_CHG_INT_MASK = 0xB1,
+ MAX77843_CHG_REG_CHG_INT_OK = 0xB2,
+ MAX77843_CHG_REG_CHG_DTLS_00 = 0xB3,
+ MAX77843_CHG_REG_CHG_DTLS_01 = 0xB4,
+ MAX77843_CHG_REG_CHG_DTLS_02 = 0xB5,
+ MAX77843_CHG_REG_CHG_CNFG_00 = 0xB7,
+ MAX77843_CHG_REG_CHG_CNFG_01 = 0xB8,
+ MAX77843_CHG_REG_CHG_CNFG_02 = 0xB9,
+ MAX77843_CHG_REG_CHG_CNFG_03 = 0xBA,
+ MAX77843_CHG_REG_CHG_CNFG_04 = 0xBB,
+ MAX77843_CHG_REG_CHG_CNFG_06 = 0xBD,
+ MAX77843_CHG_REG_CHG_CNFG_07 = 0xBE,
+ MAX77843_CHG_REG_CHG_CNFG_09 = 0xC0,
+ MAX77843_CHG_REG_CHG_CNFG_10 = 0xC1,
+ MAX77843_CHG_REG_CHG_CNFG_11 = 0xC2,
+ MAX77843_CHG_REG_CHG_CNFG_12 = 0xC3,
+
+ MAX77843_CHG_REG_END,
+};
+
+/* Fuel gauge registers */
+enum max77843_fuelgauge {
+ MAX77843_FG_REG_STATUS = 0x00,
+ MAX77843_FG_REG_VALRT_TH = 0x01,
+ MAX77843_FG_REG_TALRT_TH = 0x02,
+ MAX77843_FG_REG_SALRT_TH = 0x03,
+ MAX77843_FG_RATE_AT_RATE = 0x04,
+ MAX77843_FG_REG_REMCAP_REP = 0x05,
+ MAX77843_FG_REG_SOCREP = 0x06,
+ MAX77843_FG_REG_AGE = 0x07,
+ MAX77843_FG_REG_TEMP = 0x08,
+ MAX77843_FG_REG_VCELL = 0x09,
+ MAX77843_FG_REG_CURRENT = 0x0A,
+ MAX77843_FG_REG_AVG_CURRENT = 0x0B,
+ MAX77843_FG_REG_SOCMIX = 0x0D,
+ MAX77843_FG_REG_SOCAV = 0x0E,
+ MAX77843_FG_REG_REMCAP_MIX = 0x0F,
+ MAX77843_FG_REG_FULLCAP = 0x10,
+ MAX77843_FG_REG_AVG_TEMP = 0x16,
+ MAX77843_FG_REG_CYCLES = 0x17,
+ MAX77843_FG_REG_AVG_VCELL = 0x19,
+ MAX77843_FG_REG_CONFIG = 0x1D,
+ MAX77843_FG_REG_REMCAP_AV = 0x1F,
+ MAX77843_FG_REG_FULLCAP_NOM = 0x23,
+ MAX77843_FG_REG_MISCCFG = 0x2B,
+ MAX77843_FG_REG_RCOMP = 0x38,
+ MAX77843_FG_REG_FSTAT = 0x3D,
+ MAX77843_FG_REG_DQACC = 0x45,
+ MAX77843_FG_REG_DPACC = 0x46,
+ MAX77843_FG_REG_OCV = 0xEE,
+ MAX77843_FG_REG_VFOCV = 0xFB,
+ MAX77843_FG_SOCVF = 0xFF,
+
+ MAX77843_FG_END,
+};
+
+/* MUIC registers */
+enum max77843_muic_reg {
+ MAX77843_MUIC_REG_ID = 0x00,
+ MAX77843_MUIC_REG_INT1 = 0x01,
+ MAX77843_MUIC_REG_INT2 = 0x02,
+ MAX77843_MUIC_REG_INT3 = 0x03,
+ MAX77843_MUIC_REG_STATUS1 = 0x04,
+ MAX77843_MUIC_REG_STATUS2 = 0x05,
+ MAX77843_MUIC_REG_STATUS3 = 0x06,
+ MAX77843_MUIC_REG_INTMASK1 = 0x07,
+ MAX77843_MUIC_REG_INTMASK2 = 0x08,
+ MAX77843_MUIC_REG_INTMASK3 = 0x09,
+ MAX77843_MUIC_REG_CDETCTRL1 = 0x0A,
+ MAX77843_MUIC_REG_CDETCTRL2 = 0x0B,
+ MAX77843_MUIC_REG_CONTROL1 = 0x0C,
+ MAX77843_MUIC_REG_CONTROL2 = 0x0D,
+ MAX77843_MUIC_REG_CONTROL3 = 0x0E,
+ MAX77843_MUIC_REG_CONTROL4 = 0x16,
+ MAX77843_MUIC_REG_HVCONTROL1 = 0x17,
+ MAX77843_MUIC_REG_HVCONTROL2 = 0x18,
+
+ MAX77843_MUIC_REG_END,
+};
+
+enum max77843_irq {
+ /* Topsys: SYSTEM */
+ MAX77843_SYS_IRQ_SYSINTSRC_SYSUVLO_INT,
+ MAX77843_SYS_IRQ_SYSINTSRC_SYSOVLO_INT,
+ MAX77843_SYS_IRQ_SYSINTSRC_TSHDN_INT,
+ MAX77843_SYS_IRQ_SYSINTSRC_TM_INT,
+
+ /* Charger: CHG_INT */
+ MAX77843_CHG_IRQ_CHG_INT_BYP_I,
+ MAX77843_CHG_IRQ_CHG_INT_BATP_I,
+ MAX77843_CHG_IRQ_CHG_INT_BAT_I,
+ MAX77843_CHG_IRQ_CHG_INT_CHG_I,
+ MAX77843_CHG_IRQ_CHG_INT_WCIN_I,
+ MAX77843_CHG_IRQ_CHG_INT_CHGIN_I,
+ MAX77843_CHG_IRQ_CHG_INT_AICL_I,
+
+ MAX77843_IRQ_NUM,
+};
+
+enum max77843_irq_muic {
+ /* MUIC: INT1 */
+ MAX77843_MUIC_IRQ_INT1_ADC,
+ MAX77843_MUIC_IRQ_INT1_ADCERROR,
+ MAX77843_MUIC_IRQ_INT1_ADC1K,
+
+ /* MUIC: INT2 */
+ MAX77843_MUIC_IRQ_INT2_CHGTYP,
+ MAX77843_MUIC_IRQ_INT2_CHGDETRUN,
+ MAX77843_MUIC_IRQ_INT2_DCDTMR,
+ MAX77843_MUIC_IRQ_INT2_DXOVP,
+ MAX77843_MUIC_IRQ_INT2_VBVOLT,
+
+ /* MUIC: INT3 */
+ MAX77843_MUIC_IRQ_INT3_VBADC,
+ MAX77843_MUIC_IRQ_INT3_VDNMON,
+ MAX77843_MUIC_IRQ_INT3_DNRES,
+ MAX77843_MUIC_IRQ_INT3_MPNACK,
+ MAX77843_MUIC_IRQ_INT3_MRXBUFOW,
+ MAX77843_MUIC_IRQ_INT3_MRXTRF,
+ MAX77843_MUIC_IRQ_INT3_MRXPERR,
+ MAX77843_MUIC_IRQ_INT3_MRXRDY,
+
+ MAX77843_MUIC_IRQ_NUM,
+};
+
+/* MAX77843 interrupts */
+#define MAX77843_SYS_IRQ_SYSUVLO_INT BIT(0)
+#define MAX77843_SYS_IRQ_SYSOVLO_INT BIT(1)
+#define MAX77843_SYS_IRQ_TSHDN_INT BIT(2)
+#define MAX77843_SYS_IRQ_TM_INT BIT(3)
+
+/* MAX77843 MAINCTRL1 register */
+#define MAINCTRL1_BIASEN_SHIFT 7
+#define MAX77843_MAINCTRL1_BIASEN_MASK BIT(MAINCTRL1_BIASEN_SHIFT)
+
+/* MAX77843 MCONFIG register */
+#define MCONFIG_MODE_SHIFT 7
+#define MCONFIG_MEN_SHIFT 6
+#define MCONFIG_PDIV_SHIFT 0
+
+#define MAX77843_MCONFIG_MODE_MASK BIT(MCONFIG_MODE_SHIFT)
+#define MAX77843_MCONFIG_MEN_MASK BIT(MCONFIG_MEN_SHIFT)
+#define MAX77843_MCONFIG_PDIV_MASK (0x3 << MCONFIG_PDIV_SHIFT)
+
+/* Max77843 charger insterrupts */
+#define MAX77843_CHG_BYP_I BIT(0)
+#define MAX77843_CHG_BATP_I BIT(2)
+#define MAX77843_CHG_BAT_I BIT(3)
+#define MAX77843_CHG_CHG_I BIT(4)
+#define MAX77843_CHG_WCIN_I BIT(5)
+#define MAX77843_CHG_CHGIN_I BIT(6)
+#define MAX77843_CHG_AICL_I BIT(7)
+
+/* MAX77843 CHG_INT_OK register */
+#define MAX77843_CHG_BYP_OK BIT(0)
+#define MAX77843_CHG_BATP_OK BIT(2)
+#define MAX77843_CHG_BAT_OK BIT(3)
+#define MAX77843_CHG_CHG_OK BIT(4)
+#define MAX77843_CHG_WCIN_OK BIT(5)
+#define MAX77843_CHG_CHGIN_OK BIT(6)
+#define MAX77843_CHG_AICL_OK BIT(7)
+
+/* MAX77843 CHG_DETAILS_00 register */
+#define MAX77843_CHG_BAT_DTLS BIT(0)
+
+/* MAX77843 CHG_DETAILS_01 register */
+#define MAX77843_CHG_DTLS_MASK 0x0f
+#define MAX77843_CHG_PQ_MODE 0x00
+#define MAX77843_CHG_CC_MODE 0x01
+#define MAX77843_CHG_CV_MODE 0x02
+#define MAX77843_CHG_TO_MODE 0x03
+#define MAX77843_CHG_DO_MODE 0x04
+#define MAX77843_CHG_HT_MODE 0x05
+#define MAX77843_CHG_TF_MODE 0x06
+#define MAX77843_CHG_TS_MODE 0x07
+#define MAX77843_CHG_OFF_MODE 0x08
+
+#define MAX77843_CHG_BAT_DTLS_MASK 0xf0
+#define MAX77843_CHG_NO_BAT (0x00 << 4)
+#define MAX77843_CHG_LOW_VOLT_BAT (0x01 << 4)
+#define MAX77843_CHG_LONG_BAT_TIME (0x02 << 4)
+#define MAX77843_CHG_OK_BAT (0x03 << 4)
+#define MAX77843_CHG_OK_LOW_VOLT_BAT (0x04 << 4)
+#define MAX77843_CHG_OVER_VOLT_BAT (0x05 << 4)
+#define MAX77843_CHG_OVER_CURRENT_BAT (0x06 << 4)
+
+/* MAX77843 CHG_CNFG_00 register */
+#define MAX77843_CHG_DISABLE 0x00
+#define MAX77843_CHG_ENABLE 0x05
+#define MAX77843_CHG_MASK 0x01
+#define MAX77843_CHG_BUCK_MASK 0x04
+
+/* MAX77843 CHG_CNFG_01 register */
+#define MAX77843_CHG_RESTART_THRESHOLD_100 0x00
+#define MAX77843_CHG_RESTART_THRESHOLD_150 0x10
+#define MAX77843_CHG_RESTART_THRESHOLD_200 0x20
+#define MAX77843_CHG_RESTART_THRESHOLD_DISABLE 0x30
+
+/* MAX77843 CHG_CNFG_02 register */
+#define MAX77843_CHG_FAST_CHG_CURRENT_MIN 100000
+#define MAX77843_CHG_FAST_CHG_CURRENT_MAX 3150000
+#define MAX77843_CHG_FAST_CHG_CURRENT_STEP 50000
+#define MAX77843_CHG_FAST_CHG_CURRENT_MASK 0x3f
+#define MAX77843_CHG_OTG_ILIMIT_500 (0x00 << 6)
+#define MAX77843_CHG_OTG_ILIMIT_900 (0x01 << 6)
+#define MAX77843_CHG_OTG_ILIMIT_1200 (0x02 << 6)
+#define MAX77843_CHG_OTG_ILIMIT_1500 (0x03 << 6)
+#define MAX77843_CHG_OTG_ILIMIT_MASK 0xc0
+
+/* MAX77843 CHG_CNFG_03 register */
+#define MAX77843_CHG_TOP_OFF_CURRENT_MIN 125000
+#define MAX77843_CHG_TOP_OFF_CURRENT_MAX 650000
+#define MAX77843_CHG_TOP_OFF_CURRENT_STEP 75000
+#define MAX77843_CHG_TOP_OFF_CURRENT_MASK 0x07
+
+/* MAX77843 CHG_CNFG_06 register */
+#define MAX77843_CHG_WRITE_CAP_BLOCK 0x10
+#define MAX77843_CHG_WRITE_CAP_UNBLOCK 0x0C
+
+/* MAX77843_CHG_CNFG_09_register */
+#define MAX77843_CHG_INPUT_CURRENT_LIMIT_MIN 100000
+#define MAX77843_CHG_INPUT_CURRENT_LIMIT_MAX 4000000
+#define MAX77843_CHG_INPUT_CURRENT_LIMIT_REF 3367000
+#define MAX77843_CHG_INPUT_CURRENT_LIMIT_STEP 33000
+
+#define MAX77843_MUIC_ADC BIT(0)
+#define MAX77843_MUIC_ADCERROR BIT(2)
+#define MAX77843_MUIC_ADC1K BIT(3)
+
+#define MAX77843_MUIC_CHGTYP BIT(0)
+#define MAX77843_MUIC_CHGDETRUN BIT(1)
+#define MAX77843_MUIC_DCDTMR BIT(2)
+#define MAX77843_MUIC_DXOVP BIT(3)
+#define MAX77843_MUIC_VBVOLT BIT(4)
+
+#define MAX77843_MUIC_VBADC BIT(0)
+#define MAX77843_MUIC_VDNMON BIT(1)
+#define MAX77843_MUIC_DNRES BIT(2)
+#define MAX77843_MUIC_MPNACK BIT(3)
+#define MAX77843_MUIC_MRXBUFOW BIT(4)
+#define MAX77843_MUIC_MRXTRF BIT(5)
+#define MAX77843_MUIC_MRXPERR BIT(6)
+#define MAX77843_MUIC_MRXRDY BIT(7)
+
+/* MAX77843 INTSRCMASK register */
+#define MAX77843_INTSRCMASK_CHGR 0
+#define MAX77843_INTSRCMASK_SYS 1
+#define MAX77843_INTSRCMASK_FG 2
+#define MAX77843_INTSRCMASK_MUIC 3
+
+#define MAX77843_INTSRCMASK_CHGR_MASK BIT(MAX77843_INTSRCMASK_CHGR)
+#define MAX77843_INTSRCMASK_SYS_MASK BIT(MAX77843_INTSRCMASK_SYS)
+#define MAX77843_INTSRCMASK_FG_MASK BIT(MAX77843_INTSRCMASK_FG)
+#define MAX77843_INTSRCMASK_MUIC_MASK BIT(MAX77843_INTSRCMASK_MUIC)
+
+#define MAX77843_INTSRC_MASK_MASK \
+ (MAX77843_INTSRCMASK_MUIC_MASK | MAX77843_INTSRCMASK_FG_MASK | \
+ MAX77843_INTSRCMASK_SYS_MASK | MAX77843_INTSRCMASK_CHGR_MASK)
+
+/* MAX77843 STATUS register*/
+#define STATUS1_ADC_SHIFT 0
+#define STATUS1_ADCERROR_SHIFT 6
+#define STATUS1_ADC1K_SHIFT 7
+#define STATUS2_CHGTYP_SHIFT 0
+#define STATUS2_CHGDETRUN_SHIFT 3
+#define STATUS2_DCDTMR_SHIFT 4
+#define STATUS2_DXOVP_SHIFT 5
+#define STATUS2_VBVOLT_SHIFT 6
+#define STATUS3_VBADC_SHIFT 0
+#define STATUS3_VDNMON_SHIFT 4
+#define STATUS3_DNRES_SHIFT 5
+#define STATUS3_MPNACK_SHIFT 6
+
+#define MAX77843_MUIC_STATUS1_ADC_MASK (0x1f << STATUS1_ADC_SHIFT)
+#define MAX77843_MUIC_STATUS1_ADCERROR_MASK BIT(STATUS1_ADCERROR_SHIFT)
+#define MAX77843_MUIC_STATUS1_ADC1K_MASK BIT(STATUS1_ADC1K_SHIFT)
+#define MAX77843_MUIC_STATUS2_CHGTYP_MASK (0x7 << STATUS2_CHGTYP_SHIFT)
+#define MAX77843_MUIC_STATUS2_CHGDETRUN_MASK BIT(STATUS2_CHGDETRUN_SHIFT)
+#define MAX77843_MUIC_STATUS2_DCDTMR_MASK BIT(STATUS2_DCDTMR_SHIFT)
+#define MAX77843_MUIC_STATUS2_DXOVP_MASK BIT(STATUS2_DXOVP_SHIFT)
+#define MAX77843_MUIC_STATUS2_VBVOLT_MASK BIT(STATUS2_VBVOLT_SHIFT)
+#define MAX77843_MUIC_STATUS3_VBADC_MASK (0xf << STATUS3_VBADC_SHIFT)
+#define MAX77843_MUIC_STATUS3_VDNMON_MASK BIT(STATUS3_VDNMON_SHIFT)
+#define MAX77843_MUIC_STATUS3_DNRES_MASK BIT(STATUS3_DNRES_SHIFT)
+#define MAX77843_MUIC_STATUS3_MPNACK_MASK BIT(STATUS3_MPNACK_SHIFT)
+
+/* MAX77843 CONTROL register */
+#define CONTROL1_COMP1SW_SHIFT 0
+#define CONTROL1_COMP2SW_SHIFT 3
+#define CONTROL1_IDBEN_SHIFT 7
+#define CONTROL2_LOWPWR_SHIFT 0
+#define CONTROL2_ADCEN_SHIFT 1
+#define CONTROL2_CPEN_SHIFT 2
+#define CONTROL2_ACC_DET_SHIFT 5
+#define CONTROL2_USBCPINT_SHIFT 6
+#define CONTROL2_RCPS_SHIFT 7
+#define CONTROL3_JIGSET_SHIFT 0
+#define CONTROL4_ADCDBSET_SHIFT 0
+#define CONTROL4_USBAUTO_SHIFT 4
+#define CONTROL4_FCTAUTO_SHIFT 5
+#define CONTROL4_ADCMODE_SHIFT 6
+
+#define MAX77843_MUIC_CONTROL1_COMP1SW_MASK (0x7 << CONTROL1_COMP1SW_SHIFT)
+#define MAX77843_MUIC_CONTROL1_COMP2SW_MASK (0x7 << CONTROL1_COMP2SW_SHIFT)
+#define MAX77843_MUIC_CONTROL1_IDBEN_MASK BIT(CONTROL1_IDBEN_SHIFT)
+#define MAX77843_MUIC_CONTROL2_LOWPWR_MASK BIT(CONTROL2_LOWPWR_SHIFT)
+#define MAX77843_MUIC_CONTROL2_ADCEN_MASK BIT(CONTROL2_ADCEN_SHIFT)
+#define MAX77843_MUIC_CONTROL2_CPEN_MASK BIT(CONTROL2_CPEN_SHIFT)
+#define MAX77843_MUIC_CONTROL2_ACC_DET_MASK BIT(CONTROL2_ACC_DET_SHIFT)
+#define MAX77843_MUIC_CONTROL2_USBCPINT_MASK BIT(CONTROL2_USBCPINT_SHIFT)
+#define MAX77843_MUIC_CONTROL2_RCPS_MASK BIT(CONTROL2_RCPS_SHIFT)
+#define MAX77843_MUIC_CONTROL3_JIGSET_MASK (0x3 << CONTROL3_JIGSET_SHIFT)
+#define MAX77843_MUIC_CONTROL4_ADCDBSET_MASK (0x3 << CONTROL4_ADCDBSET_SHIFT)
+#define MAX77843_MUIC_CONTROL4_USBAUTO_MASK BIT(CONTROL4_USBAUTO_SHIFT)
+#define MAX77843_MUIC_CONTROL4_FCTAUTO_MASK BIT(CONTROL4_FCTAUTO_SHIFT)
+#define MAX77843_MUIC_CONTROL4_ADCMODE_MASK (0x3 << CONTROL4_ADCMODE_SHIFT)
+
+/* MAX77843 switch port */
+#define COM_OPEN 0
+#define COM_USB 1
+#define COM_AUDIO 2
+#define COM_UART 3
+#define COM_AUX_USB 4
+#define COM_AUX_UART 5
+
+#define CONTROL1_COM_SW \
+ ((MAX77843_MUIC_CONTROL1_COMP1SW_MASK | \
+ MAX77843_MUIC_CONTROL1_COMP2SW_MASK))
+
+#define CONTROL1_SW_OPEN \
+ ((COM_OPEN << CONTROL1_COMP1SW_SHIFT | \
+ COM_OPEN << CONTROL1_COMP2SW_SHIFT))
+#define CONTROL1_SW_USB \
+ ((COM_USB << CONTROL1_COMP1SW_SHIFT | \
+ COM_USB << CONTROL1_COMP2SW_SHIFT))
+#define CONTROL1_SW_AUDIO \
+ ((COM_AUDIO << CONTROL1_COMP1SW_SHIFT | \
+ COM_AUDIO << CONTROL1_COMP2SW_SHIFT))
+#define CONTROL1_SW_UART \
+ ((COM_UART << CONTROL1_COMP1SW_SHIFT | \
+ COM_UART << CONTROL1_COMP2SW_SHIFT))
+#define CONTROL1_SW_AUX_USB \
+ ((COM_AUX_USB << CONTROL1_COMP1SW_SHIFT | \
+ COM_AUX_USB << CONTROL1_COMP2SW_SHIFT))
+#define CONTROL1_SW_AUX_UART \
+ ((COM_AUX_UART << CONTROL1_COMP1SW_SHIFT | \
+ COM_AUX_UART << CONTROL1_COMP2SW_SHIFT))
+
+#define MAX77843_DISABLE 0
+#define MAX77843_ENABLE 1
+
+#define CONTROL4_AUTO_DISABLE \
+ ((MAX77843_DISABLE << CONTROL4_USBAUTO_SHIFT) | \
+ (MAX77843_DISABLE << CONTROL4_FCTAUTO_SHIFT))
+#define CONTROL4_AUTO_ENABLE \
+ ((MAX77843_ENABLE << CONTROL4_USBAUTO_SHIFT) | \
+ (MAX77843_ENABLE << CONTROL4_FCTAUTO_SHIFT))
+
+/* MAX77843 SAFEOUT LDO Control register */
+#define SAFEOUTCTRL_SAFEOUT1_SHIFT 0
+#define SAFEOUTCTRL_SAFEOUT2_SHIFT 2
+#define SAFEOUTCTRL_ENSAFEOUT1_SHIFT 6
+#define SAFEOUTCTRL_ENSAFEOUT2_SHIFT 7
+
+#define MAX77843_REG_SAFEOUTCTRL_ENSAFEOUT1 \
+ BIT(SAFEOUTCTRL_ENSAFEOUT1_SHIFT)
+#define MAX77843_REG_SAFEOUTCTRL_ENSAFEOUT2 \
+ BIT(SAFEOUTCTRL_ENSAFEOUT2_SHIFT)
+#define MAX77843_REG_SAFEOUTCTRL_SAFEOUT1_MASK \
+ (0x3 << SAFEOUTCTRL_SAFEOUT1_SHIFT)
+#define MAX77843_REG_SAFEOUTCTRL_SAFEOUT2_MASK \
+ (0x3 << SAFEOUTCTRL_SAFEOUT2_SHIFT)
+
+struct max77843 {
+ struct device *dev;
+
+ struct i2c_client *i2c;
+ struct i2c_client *i2c_chg;
+ struct i2c_client *i2c_fuel;
+ struct i2c_client *i2c_muic;
+
+ struct regmap *regmap;
+ struct regmap *regmap_chg;
+ struct regmap *regmap_fuel;
+ struct regmap *regmap_muic;
+
+ struct regmap_irq_chip_data *irq_data;
+ struct regmap_irq_chip_data *irq_data_chg;
+ struct regmap_irq_chip_data *irq_data_fuel;
+ struct regmap_irq_chip_data *irq_data_muic;
+
+ int irq;
+};
+#endif /* __MAX77843_H__ */
diff --git a/include/linux/mfd/menelaus.h b/include/linux/mfd/menelaus.h
index f097e89134cb..9e85ac06da89 100644
--- a/include/linux/mfd/menelaus.h
+++ b/include/linux/mfd/menelaus.h
@@ -24,7 +24,6 @@ extern int menelaus_set_vaux(unsigned int mV);
extern int menelaus_set_vdcdc(int dcdc, unsigned int mV);
extern int menelaus_set_slot_sel(int enable);
extern int menelaus_get_slot_pin_states(void);
-extern int menelaus_set_vcore_sw(unsigned int mV);
extern int menelaus_set_vcore_hw(unsigned int roof_mV, unsigned int floor_mV);
#define EN_VPLL_SLEEP (1 << 7)
@@ -38,10 +37,4 @@ extern int menelaus_set_vcore_hw(unsigned int roof_mV, unsigned int floor_mV);
extern int menelaus_set_regulator_sleep(int enable, u32 val);
-#if defined(CONFIG_ARCH_OMAP2) && defined(CONFIG_MENELAUS)
-#define omap_has_menelaus() 1
-#else
-#define omap_has_menelaus() 0
-#endif
-
#endif
diff --git a/include/linux/mfd/mt6397/core.h b/include/linux/mfd/mt6397/core.h
new file mode 100644
index 000000000000..cf5265b0d1c1
--- /dev/null
+++ b/include/linux/mfd/mt6397/core.h
@@ -0,0 +1,64 @@
+/*
+ * Copyright (c) 2014 MediaTek Inc.
+ * Author: Flora Fu, MediaTek
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License version 2 as
+ * published by the Free Software Foundation.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+ * GNU General Public License for more details.
+ */
+
+#ifndef __MFD_MT6397_CORE_H__
+#define __MFD_MT6397_CORE_H__
+
+enum mt6397_irq_numbers {
+ MT6397_IRQ_SPKL_AB = 0,
+ MT6397_IRQ_SPKR_AB,
+ MT6397_IRQ_SPKL,
+ MT6397_IRQ_SPKR,
+ MT6397_IRQ_BAT_L,
+ MT6397_IRQ_BAT_H,
+ MT6397_IRQ_FG_BAT_L,
+ MT6397_IRQ_FG_BAT_H,
+ MT6397_IRQ_WATCHDOG,
+ MT6397_IRQ_PWRKEY,
+ MT6397_IRQ_THR_L,
+ MT6397_IRQ_THR_H,
+ MT6397_IRQ_VBATON_UNDET,
+ MT6397_IRQ_BVALID_DET,
+ MT6397_IRQ_CHRDET,
+ MT6397_IRQ_OV,
+ MT6397_IRQ_LDO,
+ MT6397_IRQ_HOMEKEY,
+ MT6397_IRQ_ACCDET,
+ MT6397_IRQ_AUDIO,
+ MT6397_IRQ_RTC,
+ MT6397_IRQ_PWRKEY_RSTB,
+ MT6397_IRQ_HDMI_SIFM,
+ MT6397_IRQ_HDMI_CEC,
+ MT6397_IRQ_VCA15,
+ MT6397_IRQ_VSRMCA15,
+ MT6397_IRQ_VCORE,
+ MT6397_IRQ_VGPU,
+ MT6397_IRQ_VIO18,
+ MT6397_IRQ_VPCA7,
+ MT6397_IRQ_VSRMCA7,
+ MT6397_IRQ_VDRM,
+ MT6397_IRQ_NR,
+};
+
+struct mt6397_chip {
+ struct device *dev;
+ struct regmap *regmap;
+ int irq;
+ struct irq_domain *irq_domain;
+ struct mutex irqlock;
+ u16 irq_masks_cur[2];
+ u16 irq_masks_cache[2];
+};
+
+#endif /* __MFD_MT6397_CORE_H__ */
diff --git a/include/linux/mfd/mt6397/registers.h b/include/linux/mfd/mt6397/registers.h
new file mode 100644
index 000000000000..f23a0a60a877
--- /dev/null
+++ b/include/linux/mfd/mt6397/registers.h
@@ -0,0 +1,362 @@
+/*
+ * Copyright (c) 2014 MediaTek Inc.
+ * Author: Flora Fu, MediaTek
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License version 2 as
+ * published by the Free Software Foundation.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+ * GNU General Public License for more details.
+ */
+
+#ifndef __MFD_MT6397_REGISTERS_H__
+#define __MFD_MT6397_REGISTERS_H__
+
+/* PMIC Registers */
+#define MT6397_CID 0x0100
+#define MT6397_TOP_CKPDN 0x0102
+#define MT6397_TOP_CKPDN_SET 0x0104
+#define MT6397_TOP_CKPDN_CLR 0x0106
+#define MT6397_TOP_CKPDN2 0x0108
+#define MT6397_TOP_CKPDN2_SET 0x010A
+#define MT6397_TOP_CKPDN2_CLR 0x010C
+#define MT6397_TOP_GPIO_CKPDN 0x010E
+#define MT6397_TOP_RST_CON 0x0114
+#define MT6397_WRP_CKPDN 0x011A
+#define MT6397_WRP_RST_CON 0x0120
+#define MT6397_TOP_RST_MISC 0x0126
+#define MT6397_TOP_CKCON1 0x0128
+#define MT6397_TOP_CKCON2 0x012A
+#define MT6397_TOP_CKTST1 0x012C
+#define MT6397_TOP_CKTST2 0x012E
+#define MT6397_OC_DEG_EN 0x0130
+#define MT6397_OC_CTL0 0x0132
+#define MT6397_OC_CTL1 0x0134
+#define MT6397_OC_CTL2 0x0136
+#define MT6397_INT_RSV 0x0138
+#define MT6397_TEST_CON0 0x013A
+#define MT6397_TEST_CON1 0x013C
+#define MT6397_STATUS0 0x013E
+#define MT6397_STATUS1 0x0140
+#define MT6397_PGSTATUS 0x0142
+#define MT6397_CHRSTATUS 0x0144
+#define MT6397_OCSTATUS0 0x0146
+#define MT6397_OCSTATUS1 0x0148
+#define MT6397_OCSTATUS2 0x014A
+#define MT6397_HDMI_PAD_IE 0x014C
+#define MT6397_TEST_OUT_L 0x014E
+#define MT6397_TEST_OUT_H 0x0150
+#define MT6397_TDSEL_CON 0x0152
+#define MT6397_RDSEL_CON 0x0154
+#define MT6397_GPIO_SMT_CON0 0x0156
+#define MT6397_GPIO_SMT_CON1 0x0158
+#define MT6397_GPIO_SMT_CON2 0x015A
+#define MT6397_GPIO_SMT_CON3 0x015C
+#define MT6397_DRV_CON0 0x015E
+#define MT6397_DRV_CON1 0x0160
+#define MT6397_DRV_CON2 0x0162
+#define MT6397_DRV_CON3 0x0164
+#define MT6397_DRV_CON4 0x0166
+#define MT6397_DRV_CON5 0x0168
+#define MT6397_DRV_CON6 0x016A
+#define MT6397_DRV_CON7 0x016C
+#define MT6397_DRV_CON8 0x016E
+#define MT6397_DRV_CON9 0x0170
+#define MT6397_DRV_CON10 0x0172
+#define MT6397_DRV_CON11 0x0174
+#define MT6397_DRV_CON12 0x0176
+#define MT6397_INT_CON0 0x0178
+#define MT6397_INT_CON1 0x017E
+#define MT6397_INT_STATUS0 0x0184
+#define MT6397_INT_STATUS1 0x0186
+#define MT6397_FQMTR_CON0 0x0188
+#define MT6397_FQMTR_CON1 0x018A
+#define MT6397_FQMTR_CON2 0x018C
+#define MT6397_EFUSE_DOUT_0_15 0x01C4
+#define MT6397_EFUSE_DOUT_16_31 0x01C6
+#define MT6397_EFUSE_DOUT_32_47 0x01C8
+#define MT6397_EFUSE_DOUT_48_63 0x01CA
+#define MT6397_SPI_CON 0x01CC
+#define MT6397_TOP_CKPDN3 0x01CE
+#define MT6397_TOP_CKCON3 0x01D4
+#define MT6397_EFUSE_DOUT_64_79 0x01D6
+#define MT6397_EFUSE_DOUT_80_95 0x01D8
+#define MT6397_EFUSE_DOUT_96_111 0x01DA
+#define MT6397_EFUSE_DOUT_112_127 0x01DC
+#define MT6397_EFUSE_DOUT_128_143 0x01DE
+#define MT6397_EFUSE_DOUT_144_159 0x01E0
+#define MT6397_EFUSE_DOUT_160_175 0x01E2
+#define MT6397_EFUSE_DOUT_176_191 0x01E4
+#define MT6397_EFUSE_DOUT_192_207 0x01E6
+#define MT6397_EFUSE_DOUT_208_223 0x01E8
+#define MT6397_EFUSE_DOUT_224_239 0x01EA
+#define MT6397_EFUSE_DOUT_240_255 0x01EC
+#define MT6397_EFUSE_DOUT_256_271 0x01EE
+#define MT6397_EFUSE_DOUT_272_287 0x01F0
+#define MT6397_EFUSE_DOUT_288_300 0x01F2
+#define MT6397_EFUSE_DOUT_304_319 0x01F4
+#define MT6397_BUCK_CON0 0x0200
+#define MT6397_BUCK_CON1 0x0202
+#define MT6397_BUCK_CON2 0x0204
+#define MT6397_BUCK_CON3 0x0206
+#define MT6397_BUCK_CON4 0x0208
+#define MT6397_BUCK_CON5 0x020A
+#define MT6397_BUCK_CON6 0x020C
+#define MT6397_BUCK_CON7 0x020E
+#define MT6397_BUCK_CON8 0x0210
+#define MT6397_BUCK_CON9 0x0212
+#define MT6397_VCA15_CON0 0x0214
+#define MT6397_VCA15_CON1 0x0216
+#define MT6397_VCA15_CON2 0x0218
+#define MT6397_VCA15_CON3 0x021A
+#define MT6397_VCA15_CON4 0x021C
+#define MT6397_VCA15_CON5 0x021E
+#define MT6397_VCA15_CON6 0x0220
+#define MT6397_VCA15_CON7 0x0222
+#define MT6397_VCA15_CON8 0x0224
+#define MT6397_VCA15_CON9 0x0226
+#define MT6397_VCA15_CON10 0x0228
+#define MT6397_VCA15_CON11 0x022A
+#define MT6397_VCA15_CON12 0x022C
+#define MT6397_VCA15_CON13 0x022E
+#define MT6397_VCA15_CON14 0x0230
+#define MT6397_VCA15_CON15 0x0232
+#define MT6397_VCA15_CON16 0x0234
+#define MT6397_VCA15_CON17 0x0236
+#define MT6397_VCA15_CON18 0x0238
+#define MT6397_VSRMCA15_CON0 0x023A
+#define MT6397_VSRMCA15_CON1 0x023C
+#define MT6397_VSRMCA15_CON2 0x023E
+#define MT6397_VSRMCA15_CON3 0x0240
+#define MT6397_VSRMCA15_CON4 0x0242
+#define MT6397_VSRMCA15_CON5 0x0244
+#define MT6397_VSRMCA15_CON6 0x0246
+#define MT6397_VSRMCA15_CON7 0x0248
+#define MT6397_VSRMCA15_CON8 0x024A
+#define MT6397_VSRMCA15_CON9 0x024C
+#define MT6397_VSRMCA15_CON10 0x024E
+#define MT6397_VSRMCA15_CON11 0x0250
+#define MT6397_VSRMCA15_CON12 0x0252
+#define MT6397_VSRMCA15_CON13 0x0254
+#define MT6397_VSRMCA15_CON14 0x0256
+#define MT6397_VSRMCA15_CON15 0x0258
+#define MT6397_VSRMCA15_CON16 0x025A
+#define MT6397_VSRMCA15_CON17 0x025C
+#define MT6397_VSRMCA15_CON18 0x025E
+#define MT6397_VSRMCA15_CON19 0x0260
+#define MT6397_VSRMCA15_CON20 0x0262
+#define MT6397_VSRMCA15_CON21 0x0264
+#define MT6397_VCORE_CON0 0x0266
+#define MT6397_VCORE_CON1 0x0268
+#define MT6397_VCORE_CON2 0x026A
+#define MT6397_VCORE_CON3 0x026C
+#define MT6397_VCORE_CON4 0x026E
+#define MT6397_VCORE_CON5 0x0270
+#define MT6397_VCORE_CON6 0x0272
+#define MT6397_VCORE_CON7 0x0274
+#define MT6397_VCORE_CON8 0x0276
+#define MT6397_VCORE_CON9 0x0278
+#define MT6397_VCORE_CON10 0x027A
+#define MT6397_VCORE_CON11 0x027C
+#define MT6397_VCORE_CON12 0x027E
+#define MT6397_VCORE_CON13 0x0280
+#define MT6397_VCORE_CON14 0x0282
+#define MT6397_VCORE_CON15 0x0284
+#define MT6397_VCORE_CON16 0x0286
+#define MT6397_VCORE_CON17 0x0288
+#define MT6397_VCORE_CON18 0x028A
+#define MT6397_VGPU_CON0 0x028C
+#define MT6397_VGPU_CON1 0x028E
+#define MT6397_VGPU_CON2 0x0290
+#define MT6397_VGPU_CON3 0x0292
+#define MT6397_VGPU_CON4 0x0294
+#define MT6397_VGPU_CON5 0x0296
+#define MT6397_VGPU_CON6 0x0298
+#define MT6397_VGPU_CON7 0x029A
+#define MT6397_VGPU_CON8 0x029C
+#define MT6397_VGPU_CON9 0x029E
+#define MT6397_VGPU_CON10 0x02A0
+#define MT6397_VGPU_CON11 0x02A2
+#define MT6397_VGPU_CON12 0x02A4
+#define MT6397_VGPU_CON13 0x02A6
+#define MT6397_VGPU_CON14 0x02A8
+#define MT6397_VGPU_CON15 0x02AA
+#define MT6397_VGPU_CON16 0x02AC
+#define MT6397_VGPU_CON17 0x02AE
+#define MT6397_VGPU_CON18 0x02B0
+#define MT6397_VIO18_CON0 0x0300
+#define MT6397_VIO18_CON1 0x0302
+#define MT6397_VIO18_CON2 0x0304
+#define MT6397_VIO18_CON3 0x0306
+#define MT6397_VIO18_CON4 0x0308
+#define MT6397_VIO18_CON5 0x030A
+#define MT6397_VIO18_CON6 0x030C
+#define MT6397_VIO18_CON7 0x030E
+#define MT6397_VIO18_CON8 0x0310
+#define MT6397_VIO18_CON9 0x0312
+#define MT6397_VIO18_CON10 0x0314
+#define MT6397_VIO18_CON11 0x0316
+#define MT6397_VIO18_CON12 0x0318
+#define MT6397_VIO18_CON13 0x031A
+#define MT6397_VIO18_CON14 0x031C
+#define MT6397_VIO18_CON15 0x031E
+#define MT6397_VIO18_CON16 0x0320
+#define MT6397_VIO18_CON17 0x0322
+#define MT6397_VIO18_CON18 0x0324
+#define MT6397_VPCA7_CON0 0x0326
+#define MT6397_VPCA7_CON1 0x0328
+#define MT6397_VPCA7_CON2 0x032A
+#define MT6397_VPCA7_CON3 0x032C
+#define MT6397_VPCA7_CON4 0x032E
+#define MT6397_VPCA7_CON5 0x0330
+#define MT6397_VPCA7_CON6 0x0332
+#define MT6397_VPCA7_CON7 0x0334
+#define MT6397_VPCA7_CON8 0x0336
+#define MT6397_VPCA7_CON9 0x0338
+#define MT6397_VPCA7_CON10 0x033A
+#define MT6397_VPCA7_CON11 0x033C
+#define MT6397_VPCA7_CON12 0x033E
+#define MT6397_VPCA7_CON13 0x0340
+#define MT6397_VPCA7_CON14 0x0342
+#define MT6397_VPCA7_CON15 0x0344
+#define MT6397_VPCA7_CON16 0x0346
+#define MT6397_VPCA7_CON17 0x0348
+#define MT6397_VPCA7_CON18 0x034A
+#define MT6397_VSRMCA7_CON0 0x034C
+#define MT6397_VSRMCA7_CON1 0x034E
+#define MT6397_VSRMCA7_CON2 0x0350
+#define MT6397_VSRMCA7_CON3 0x0352
+#define MT6397_VSRMCA7_CON4 0x0354
+#define MT6397_VSRMCA7_CON5 0x0356
+#define MT6397_VSRMCA7_CON6 0x0358
+#define MT6397_VSRMCA7_CON7 0x035A
+#define MT6397_VSRMCA7_CON8 0x035C
+#define MT6397_VSRMCA7_CON9 0x035E
+#define MT6397_VSRMCA7_CON10 0x0360
+#define MT6397_VSRMCA7_CON11 0x0362
+#define MT6397_VSRMCA7_CON12 0x0364
+#define MT6397_VSRMCA7_CON13 0x0366
+#define MT6397_VSRMCA7_CON14 0x0368
+#define MT6397_VSRMCA7_CON15 0x036A
+#define MT6397_VSRMCA7_CON16 0x036C
+#define MT6397_VSRMCA7_CON17 0x036E
+#define MT6397_VSRMCA7_CON18 0x0370
+#define MT6397_VSRMCA7_CON19 0x0372
+#define MT6397_VSRMCA7_CON20 0x0374
+#define MT6397_VSRMCA7_CON21 0x0376
+#define MT6397_VDRM_CON0 0x0378
+#define MT6397_VDRM_CON1 0x037A
+#define MT6397_VDRM_CON2 0x037C
+#define MT6397_VDRM_CON3 0x037E
+#define MT6397_VDRM_CON4 0x0380
+#define MT6397_VDRM_CON5 0x0382
+#define MT6397_VDRM_CON6 0x0384
+#define MT6397_VDRM_CON7 0x0386
+#define MT6397_VDRM_CON8 0x0388
+#define MT6397_VDRM_CON9 0x038A
+#define MT6397_VDRM_CON10 0x038C
+#define MT6397_VDRM_CON11 0x038E
+#define MT6397_VDRM_CON12 0x0390
+#define MT6397_VDRM_CON13 0x0392
+#define MT6397_VDRM_CON14 0x0394
+#define MT6397_VDRM_CON15 0x0396
+#define MT6397_VDRM_CON16 0x0398
+#define MT6397_VDRM_CON17 0x039A
+#define MT6397_VDRM_CON18 0x039C
+#define MT6397_BUCK_K_CON0 0x039E
+#define MT6397_BUCK_K_CON1 0x03A0
+#define MT6397_ANALDO_CON0 0x0400
+#define MT6397_ANALDO_CON1 0x0402
+#define MT6397_ANALDO_CON2 0x0404
+#define MT6397_ANALDO_CON3 0x0406
+#define MT6397_ANALDO_CON4 0x0408
+#define MT6397_ANALDO_CON5 0x040A
+#define MT6397_ANALDO_CON6 0x040C
+#define MT6397_ANALDO_CON7 0x040E
+#define MT6397_DIGLDO_CON0 0x0410
+#define MT6397_DIGLDO_CON1 0x0412
+#define MT6397_DIGLDO_CON2 0x0414
+#define MT6397_DIGLDO_CON3 0x0416
+#define MT6397_DIGLDO_CON4 0x0418
+#define MT6397_DIGLDO_CON5 0x041A
+#define MT6397_DIGLDO_CON6 0x041C
+#define MT6397_DIGLDO_CON7 0x041E
+#define MT6397_DIGLDO_CON8 0x0420
+#define MT6397_DIGLDO_CON9 0x0422
+#define MT6397_DIGLDO_CON10 0x0424
+#define MT6397_DIGLDO_CON11 0x0426
+#define MT6397_DIGLDO_CON12 0x0428
+#define MT6397_DIGLDO_CON13 0x042A
+#define MT6397_DIGLDO_CON14 0x042C
+#define MT6397_DIGLDO_CON15 0x042E
+#define MT6397_DIGLDO_CON16 0x0430
+#define MT6397_DIGLDO_CON17 0x0432
+#define MT6397_DIGLDO_CON18 0x0434
+#define MT6397_DIGLDO_CON19 0x0436
+#define MT6397_DIGLDO_CON20 0x0438
+#define MT6397_DIGLDO_CON21 0x043A
+#define MT6397_DIGLDO_CON22 0x043C
+#define MT6397_DIGLDO_CON23 0x043E
+#define MT6397_DIGLDO_CON24 0x0440
+#define MT6397_DIGLDO_CON25 0x0442
+#define MT6397_DIGLDO_CON26 0x0444
+#define MT6397_DIGLDO_CON27 0x0446
+#define MT6397_DIGLDO_CON28 0x0448
+#define MT6397_DIGLDO_CON29 0x044A
+#define MT6397_DIGLDO_CON30 0x044C
+#define MT6397_DIGLDO_CON31 0x044E
+#define MT6397_DIGLDO_CON32 0x0450
+#define MT6397_DIGLDO_CON33 0x045A
+#define MT6397_SPK_CON0 0x0600
+#define MT6397_SPK_CON1 0x0602
+#define MT6397_SPK_CON2 0x0604
+#define MT6397_SPK_CON3 0x0606
+#define MT6397_SPK_CON4 0x0608
+#define MT6397_SPK_CON5 0x060A
+#define MT6397_SPK_CON6 0x060C
+#define MT6397_SPK_CON7 0x060E
+#define MT6397_SPK_CON8 0x0610
+#define MT6397_SPK_CON9 0x0612
+#define MT6397_SPK_CON10 0x0614
+#define MT6397_SPK_CON11 0x0616
+#define MT6397_AUDDAC_CON0 0x0700
+#define MT6397_AUDBUF_CFG0 0x0702
+#define MT6397_AUDBUF_CFG1 0x0704
+#define MT6397_AUDBUF_CFG2 0x0706
+#define MT6397_AUDBUF_CFG3 0x0708
+#define MT6397_AUDBUF_CFG4 0x070A
+#define MT6397_IBIASDIST_CFG0 0x070C
+#define MT6397_AUDACCDEPOP_CFG0 0x070E
+#define MT6397_AUD_IV_CFG0 0x0710
+#define MT6397_AUDCLKGEN_CFG0 0x0712
+#define MT6397_AUDLDO_CFG0 0x0714
+#define MT6397_AUDLDO_CFG1 0x0716
+#define MT6397_AUDNVREGGLB_CFG0 0x0718
+#define MT6397_AUD_NCP0 0x071A
+#define MT6397_AUDPREAMP_CON0 0x071C
+#define MT6397_AUDADC_CON0 0x071E
+#define MT6397_AUDADC_CON1 0x0720
+#define MT6397_AUDADC_CON2 0x0722
+#define MT6397_AUDADC_CON3 0x0724
+#define MT6397_AUDADC_CON4 0x0726
+#define MT6397_AUDADC_CON5 0x0728
+#define MT6397_AUDADC_CON6 0x072A
+#define MT6397_AUDDIGMI_CON0 0x072C
+#define MT6397_AUDLSBUF_CON0 0x072E
+#define MT6397_AUDLSBUF_CON1 0x0730
+#define MT6397_AUDENCSPARE_CON0 0x0732
+#define MT6397_AUDENCCLKSQ_CON0 0x0734
+#define MT6397_AUDPREAMPGAIN_CON0 0x0736
+#define MT6397_ZCD_CON0 0x0738
+#define MT6397_ZCD_CON1 0x073A
+#define MT6397_ZCD_CON2 0x073C
+#define MT6397_ZCD_CON3 0x073E
+#define MT6397_ZCD_CON4 0x0740
+#define MT6397_ZCD_CON5 0x0742
+#define MT6397_NCP_CLKDIV_CON0 0x0744
+#define MT6397_NCP_CLKDIV_CON1 0x0746
+
+#endif /* __MFD_MT6397_REGISTERS_H__ */
diff --git a/include/linux/mfd/rk808.h b/include/linux/mfd/rk808.h
index fb09312d854b..441b6ee72691 100644
--- a/include/linux/mfd/rk808.h
+++ b/include/linux/mfd/rk808.h
@@ -156,6 +156,9 @@ enum rk808_reg {
#define BUCK2_RATE_MASK (3 << 3)
#define MASK_ALL 0xff
+#define BUCK_UV_ACT_MASK 0x0f
+#define BUCK_UV_ACT_DISABLE 0
+
#define SWITCH2_EN BIT(6)
#define SWITCH1_EN BIT(5)
#define DEV_OFF_RST BIT(3)
diff --git a/include/linux/mfd/rtsx_pci.h b/include/linux/mfd/rtsx_pci.h
index 0c12628e91c6..ff843e7ca23d 100644
--- a/include/linux/mfd/rtsx_pci.h
+++ b/include/linux/mfd/rtsx_pci.h
@@ -28,74 +28,72 @@
#define MAX_RW_REG_CNT 1024
-/* PCI Operation Register Address */
#define RTSX_HCBAR 0x00
#define RTSX_HCBCTLR 0x04
+#define STOP_CMD (0x01 << 28)
+#define READ_REG_CMD 0
+#define WRITE_REG_CMD 1
+#define CHECK_REG_CMD 2
+
#define RTSX_HDBAR 0x08
+#define SG_INT 0x04
+#define SG_END 0x02
+#define SG_VALID 0x01
+#define SG_NO_OP 0x00
+#define SG_TRANS_DATA (0x02 << 4)
+#define SG_LINK_DESC (0x03 << 4)
#define RTSX_HDBCTLR 0x0C
+#define SDMA_MODE 0x00
+#define ADMA_MODE (0x02 << 26)
+#define STOP_DMA (0x01 << 28)
+#define TRIG_DMA (0x01 << 31)
+
#define RTSX_HAIMR 0x10
-#define RTSX_BIPR 0x14
-#define RTSX_BIER 0x18
+#define HAIMR_TRANS_START (0x01 << 31)
+#define HAIMR_READ 0x00
+#define HAIMR_WRITE (0x01 << 30)
+#define HAIMR_READ_START (HAIMR_TRANS_START | HAIMR_READ)
+#define HAIMR_WRITE_START (HAIMR_TRANS_START | HAIMR_WRITE)
+#define HAIMR_TRANS_END (HAIMR_TRANS_START)
-/* Host command buffer control register */
-#define STOP_CMD (0x01 << 28)
-
-/* Host data buffer control register */
-#define SDMA_MODE 0x00
-#define ADMA_MODE (0x02 << 26)
-#define STOP_DMA (0x01 << 28)
-#define TRIG_DMA (0x01 << 31)
-
-/* Host access internal memory register */
-#define HAIMR_TRANS_START (0x01 << 31)
-#define HAIMR_READ 0x00
-#define HAIMR_WRITE (0x01 << 30)
-#define HAIMR_READ_START (HAIMR_TRANS_START | HAIMR_READ)
-#define HAIMR_WRITE_START (HAIMR_TRANS_START | HAIMR_WRITE)
-#define HAIMR_TRANS_END (HAIMR_TRANS_START)
-
-/* Bus interrupt pending register */
-#define CMD_DONE_INT (1 << 31)
-#define DATA_DONE_INT (1 << 30)
-#define TRANS_OK_INT (1 << 29)
-#define TRANS_FAIL_INT (1 << 28)
-#define XD_INT (1 << 27)
-#define MS_INT (1 << 26)
-#define SD_INT (1 << 25)
-#define GPIO0_INT (1 << 24)
-#define OC_INT (1 << 23)
-#define SD_WRITE_PROTECT (1 << 19)
-#define XD_EXIST (1 << 18)
-#define MS_EXIST (1 << 17)
-#define SD_EXIST (1 << 16)
-#define DELINK_INT GPIO0_INT
-#define MS_OC_INT (1 << 23)
-#define SD_OC_INT (1 << 22)
+#define RTSX_BIPR 0x14
+#define CMD_DONE_INT (1 << 31)
+#define DATA_DONE_INT (1 << 30)
+#define TRANS_OK_INT (1 << 29)
+#define TRANS_FAIL_INT (1 << 28)
+#define XD_INT (1 << 27)
+#define MS_INT (1 << 26)
+#define SD_INT (1 << 25)
+#define GPIO0_INT (1 << 24)
+#define OC_INT (1 << 23)
+#define SD_WRITE_PROTECT (1 << 19)
+#define XD_EXIST (1 << 18)
+#define MS_EXIST (1 << 17)
+#define SD_EXIST (1 << 16)
+#define DELINK_INT GPIO0_INT
+#define MS_OC_INT (1 << 23)
+#define SD_OC_INT (1 << 22)
#define CARD_INT (XD_INT | MS_INT | SD_INT)
#define NEED_COMPLETE_INT (DATA_DONE_INT | TRANS_OK_INT | TRANS_FAIL_INT)
#define RTSX_INT (CMD_DONE_INT | NEED_COMPLETE_INT | \
CARD_INT | GPIO0_INT | OC_INT)
-
#define CARD_EXIST (XD_EXIST | MS_EXIST | SD_EXIST)
-/* Bus interrupt enable register */
-#define CMD_DONE_INT_EN (1 << 31)
-#define DATA_DONE_INT_EN (1 << 30)
-#define TRANS_OK_INT_EN (1 << 29)
-#define TRANS_FAIL_INT_EN (1 << 28)
-#define XD_INT_EN (1 << 27)
-#define MS_INT_EN (1 << 26)
-#define SD_INT_EN (1 << 25)
-#define GPIO0_INT_EN (1 << 24)
-#define OC_INT_EN (1 << 23)
-#define DELINK_INT_EN GPIO0_INT_EN
-#define MS_OC_INT_EN (1 << 23)
-#define SD_OC_INT_EN (1 << 22)
-
-#define READ_REG_CMD 0
-#define WRITE_REG_CMD 1
-#define CHECK_REG_CMD 2
+#define RTSX_BIER 0x18
+#define CMD_DONE_INT_EN (1 << 31)
+#define DATA_DONE_INT_EN (1 << 30)
+#define TRANS_OK_INT_EN (1 << 29)
+#define TRANS_FAIL_INT_EN (1 << 28)
+#define XD_INT_EN (1 << 27)
+#define MS_INT_EN (1 << 26)
+#define SD_INT_EN (1 << 25)
+#define GPIO0_INT_EN (1 << 24)
+#define OC_INT_EN (1 << 23)
+#define DELINK_INT_EN GPIO0_INT_EN
+#define MS_OC_INT_EN (1 << 23)
+#define SD_OC_INT_EN (1 << 22)
+
/*
* macros for easy use
@@ -125,423 +123,68 @@
#define rtsx_pci_write_config_dword(pcr, where, val) \
pci_write_config_dword((pcr)->pci, where, val)
-#define STATE_TRANS_NONE 0
-#define STATE_TRANS_CMD 1
-#define STATE_TRANS_BUF 2
-#define STATE_TRANS_SG 3
-
-#define TRANS_NOT_READY 0
-#define TRANS_RESULT_OK 1
-#define TRANS_RESULT_FAIL 2
-#define TRANS_NO_DEVICE 3
-
-#define RTSX_RESV_BUF_LEN 4096
-#define HOST_CMDS_BUF_LEN 1024
-#define HOST_SG_TBL_BUF_LEN (RTSX_RESV_BUF_LEN - HOST_CMDS_BUF_LEN)
-#define HOST_SG_TBL_ITEMS (HOST_SG_TBL_BUF_LEN / 8)
-#define MAX_SG_ITEM_LEN 0x80000
-
-#define HOST_TO_DEVICE 0
-#define DEVICE_TO_HOST 1
-
-#define RTSX_PHASE_MAX 32
-#define RX_TUNING_CNT 3
-
-/* SG descriptor */
-#define SG_INT 0x04
-#define SG_END 0x02
-#define SG_VALID 0x01
-
-#define SG_NO_OP 0x00
-#define SG_TRANS_DATA (0x02 << 4)
-#define SG_LINK_DESC (0x03 << 4)
-
-/* Output voltage */
-#define OUTPUT_3V3 0
-#define OUTPUT_1V8 1
-
-/* Card Clock Enable Register */
-#define SD_CLK_EN 0x04
-#define MS_CLK_EN 0x08
-
-/* Card Select Register */
-#define SD_MOD_SEL 2
-#define MS_MOD_SEL 3
-
-/* Card Output Enable Register */
-#define SD_OUTPUT_EN 0x04
-#define MS_OUTPUT_EN 0x08
-
-/* CARD_SHARE_MODE */
-#define CARD_SHARE_MASK 0x0F
-#define CARD_SHARE_MULTI_LUN 0x00
-#define CARD_SHARE_NORMAL 0x00
-#define CARD_SHARE_48_SD 0x04
-#define CARD_SHARE_48_MS 0x08
-/* CARD_SHARE_MODE for barossa */
-#define CARD_SHARE_BAROSSA_SD 0x01
-#define CARD_SHARE_BAROSSA_MS 0x02
-
-/* CARD_DRIVE_SEL */
-#define MS_DRIVE_8mA (0x01 << 6)
-#define MMC_DRIVE_8mA (0x01 << 4)
-#define XD_DRIVE_8mA (0x01 << 2)
-#define GPIO_DRIVE_8mA 0x01
-#define RTS5209_CARD_DRIVE_DEFAULT (MS_DRIVE_8mA | MMC_DRIVE_8mA |\
- XD_DRIVE_8mA | GPIO_DRIVE_8mA)
-#define RTL8411_CARD_DRIVE_DEFAULT (MS_DRIVE_8mA | MMC_DRIVE_8mA |\
- XD_DRIVE_8mA)
-#define RTSX_CARD_DRIVE_DEFAULT (MS_DRIVE_8mA | GPIO_DRIVE_8mA)
+#define STATE_TRANS_NONE 0
+#define STATE_TRANS_CMD 1
+#define STATE_TRANS_BUF 2
+#define STATE_TRANS_SG 3
-/* SD30_DRIVE_SEL */
-#define DRIVER_TYPE_A 0x05
-#define DRIVER_TYPE_B 0x03
-#define DRIVER_TYPE_C 0x02
-#define DRIVER_TYPE_D 0x01
-#define CFG_DRIVER_TYPE_A 0x02
-#define CFG_DRIVER_TYPE_B 0x03
-#define CFG_DRIVER_TYPE_C 0x01
-#define CFG_DRIVER_TYPE_D 0x00
-
-/* FPDCTL */
-#define SSC_POWER_DOWN 0x01
-#define SD_OC_POWER_DOWN 0x02
-#define ALL_POWER_DOWN 0x07
-#define OC_POWER_DOWN 0x06
-
-/* CLK_CTL */
-#define CHANGE_CLK 0x01
-
-/* LDO_CTL */
-#define BPP_ASIC_1V7 0x00
-#define BPP_ASIC_1V8 0x01
-#define BPP_ASIC_1V9 0x02
-#define BPP_ASIC_2V0 0x03
-#define BPP_ASIC_2V7 0x04
-#define BPP_ASIC_2V8 0x05
-#define BPP_ASIC_3V2 0x06
-#define BPP_ASIC_3V3 0x07
-#define BPP_REG_TUNED18 0x07
-#define BPP_TUNED18_SHIFT_8402 5
-#define BPP_TUNED18_SHIFT_8411 4
-#define BPP_PAD_MASK 0x04
-#define BPP_PAD_3V3 0x04
-#define BPP_PAD_1V8 0x00
-#define BPP_LDO_POWB 0x03
-#define BPP_LDO_ON 0x00
-#define BPP_LDO_SUSPEND 0x02
-#define BPP_LDO_OFF 0x03
-
-/* CD_PAD_CTL */
-#define CD_DISABLE_MASK 0x07
-#define MS_CD_DISABLE 0x04
-#define SD_CD_DISABLE 0x02
-#define XD_CD_DISABLE 0x01
-#define CD_DISABLE 0x07
-#define CD_ENABLE 0x00
-#define MS_CD_EN_ONLY 0x03
-#define SD_CD_EN_ONLY 0x05
-#define XD_CD_EN_ONLY 0x06
-#define FORCE_CD_LOW_MASK 0x38
-#define FORCE_CD_XD_LOW 0x08
-#define FORCE_CD_SD_LOW 0x10
-#define FORCE_CD_MS_LOW 0x20
-#define CD_AUTO_DISABLE 0x40
-
-/* SD_STAT1 */
-#define SD_CRC7_ERR 0x80
-#define SD_CRC16_ERR 0x40
-#define SD_CRC_WRITE_ERR 0x20
-#define SD_CRC_WRITE_ERR_MASK 0x1C
-#define GET_CRC_TIME_OUT 0x02
-#define SD_TUNING_COMPARE_ERR 0x01
-
-/* SD_STAT2 */
-#define SD_RSP_80CLK_TIMEOUT 0x01
-
-/* SD_BUS_STAT */
-#define SD_CLK_TOGGLE_EN 0x80
-#define SD_CLK_FORCE_STOP 0x40
-#define SD_DAT3_STATUS 0x10
-#define SD_DAT2_STATUS 0x08
-#define SD_DAT1_STATUS 0x04
-#define SD_DAT0_STATUS 0x02
-#define SD_CMD_STATUS 0x01
-
-/* SD_PAD_CTL */
-#define SD_IO_USING_1V8 0x80
-#define SD_IO_USING_3V3 0x7F
-#define TYPE_A_DRIVING 0x00
-#define TYPE_B_DRIVING 0x01
-#define TYPE_C_DRIVING 0x02
-#define TYPE_D_DRIVING 0x03
-
-/* SD_SAMPLE_POINT_CTL */
-#define DDR_FIX_RX_DAT 0x00
-#define DDR_VAR_RX_DAT 0x80
-#define DDR_FIX_RX_DAT_EDGE 0x00
-#define DDR_FIX_RX_DAT_14_DELAY 0x40
-#define DDR_FIX_RX_CMD 0x00
-#define DDR_VAR_RX_CMD 0x20
-#define DDR_FIX_RX_CMD_POS_EDGE 0x00
-#define DDR_FIX_RX_CMD_14_DELAY 0x10
-#define SD20_RX_POS_EDGE 0x00
-#define SD20_RX_14_DELAY 0x08
-#define SD20_RX_SEL_MASK 0x08
+#define TRANS_NOT_READY 0
+#define TRANS_RESULT_OK 1
+#define TRANS_RESULT_FAIL 2
+#define TRANS_NO_DEVICE 3
-/* SD_PUSH_POINT_CTL */
-#define DDR_FIX_TX_CMD_DAT 0x00
-#define DDR_VAR_TX_CMD_DAT 0x80
-#define DDR_FIX_TX_DAT_14_TSU 0x00
-#define DDR_FIX_TX_DAT_12_TSU 0x40
-#define DDR_FIX_TX_CMD_NEG_EDGE 0x00
-#define DDR_FIX_TX_CMD_14_AHEAD 0x20
-#define SD20_TX_NEG_EDGE 0x00
-#define SD20_TX_14_AHEAD 0x10
-#define SD20_TX_SEL_MASK 0x10
-#define DDR_VAR_SDCLK_POL_SWAP 0x01
-
-/* SD_TRANSFER */
-#define SD_TRANSFER_START 0x80
-#define SD_TRANSFER_END 0x40
-#define SD_STAT_IDLE 0x20
-#define SD_TRANSFER_ERR 0x10
-/* SD Transfer Mode definition */
-#define SD_TM_NORMAL_WRITE 0x00
-#define SD_TM_AUTO_WRITE_3 0x01
-#define SD_TM_AUTO_WRITE_4 0x02
-#define SD_TM_AUTO_READ_3 0x05
-#define SD_TM_AUTO_READ_4 0x06
-#define SD_TM_CMD_RSP 0x08
-#define SD_TM_AUTO_WRITE_1 0x09
-#define SD_TM_AUTO_WRITE_2 0x0A
-#define SD_TM_NORMAL_READ 0x0C
-#define SD_TM_AUTO_READ_1 0x0D
-#define SD_TM_AUTO_READ_2 0x0E
-#define SD_TM_AUTO_TUNING 0x0F
-
-/* SD_VPTX_CTL / SD_VPRX_CTL */
-#define PHASE_CHANGE 0x80
-#define PHASE_NOT_RESET 0x40
-
-/* SD_DCMPS_TX_CTL / SD_DCMPS_RX_CTL */
-#define DCMPS_CHANGE 0x80
-#define DCMPS_CHANGE_DONE 0x40
-#define DCMPS_ERROR 0x20
-#define DCMPS_CURRENT_PHASE 0x1F
-
-/* SD Configure 1 Register */
-#define SD_CLK_DIVIDE_0 0x00
-#define SD_CLK_DIVIDE_256 0xC0
-#define SD_CLK_DIVIDE_128 0x80
-#define SD_BUS_WIDTH_1BIT 0x00
-#define SD_BUS_WIDTH_4BIT 0x01
-#define SD_BUS_WIDTH_8BIT 0x02
-#define SD_ASYNC_FIFO_NOT_RST 0x10
-#define SD_20_MODE 0x00
-#define SD_DDR_MODE 0x04
-#define SD_30_MODE 0x08
-
-#define SD_CLK_DIVIDE_MASK 0xC0
-
-/* SD_CMD_STATE */
-#define SD_CMD_IDLE 0x80
-
-/* SD_DATA_STATE */
-#define SD_DATA_IDLE 0x80
-
-/* DCM_DRP_CTL */
-#define DCM_RESET 0x08
-#define DCM_LOCKED 0x04
-#define DCM_208M 0x00
-#define DCM_TX 0x01
-#define DCM_RX 0x02
-
-/* DCM_DRP_TRIG */
-#define DRP_START 0x80
-#define DRP_DONE 0x40
-
-/* DCM_DRP_CFG */
-#define DRP_WRITE 0x80
-#define DRP_READ 0x00
-#define DCM_WRITE_ADDRESS_50 0x50
-#define DCM_WRITE_ADDRESS_51 0x51
-#define DCM_READ_ADDRESS_00 0x00
-#define DCM_READ_ADDRESS_51 0x51
-
-/* IRQSTAT0 */
-#define DMA_DONE_INT 0x80
-#define SUSPEND_INT 0x40
-#define LINK_RDY_INT 0x20
-#define LINK_DOWN_INT 0x10
-
-/* DMACTL */
-#define DMA_RST 0x80
-#define DMA_BUSY 0x04
-#define DMA_DIR_TO_CARD 0x00
-#define DMA_DIR_FROM_CARD 0x02
-#define DMA_EN 0x01
-#define DMA_128 (0 << 4)
-#define DMA_256 (1 << 4)
-#define DMA_512 (2 << 4)
-#define DMA_1024 (3 << 4)
-#define DMA_PACK_SIZE_MASK 0x30
-
-/* SSC_CTL1 */
-#define SSC_RSTB 0x80
-#define SSC_8X_EN 0x40
-#define SSC_FIX_FRAC 0x20
-#define SSC_SEL_1M 0x00
-#define SSC_SEL_2M 0x08
-#define SSC_SEL_4M 0x10
-#define SSC_SEL_8M 0x18
-
-/* SSC_CTL2 */
-#define SSC_DEPTH_MASK 0x07
-#define SSC_DEPTH_DISALBE 0x00
-#define SSC_DEPTH_4M 0x01
-#define SSC_DEPTH_2M 0x02
-#define SSC_DEPTH_1M 0x03
-#define SSC_DEPTH_500K 0x04
-#define SSC_DEPTH_250K 0x05
-
-/* System Clock Control Register */
-#define CLK_LOW_FREQ 0x01
-
-/* System Clock Divider Register */
-#define CLK_DIV_1 0x01
-#define CLK_DIV_2 0x02
-#define CLK_DIV_4 0x03
-#define CLK_DIV_8 0x04
-
-/* MS_CFG */
-#define SAMPLE_TIME_RISING 0x00
-#define SAMPLE_TIME_FALLING 0x80
-#define PUSH_TIME_DEFAULT 0x00
-#define PUSH_TIME_ODD 0x40
-#define NO_EXTEND_TOGGLE 0x00
-#define EXTEND_TOGGLE_CHK 0x20
-#define MS_BUS_WIDTH_1 0x00
-#define MS_BUS_WIDTH_4 0x10
-#define MS_BUS_WIDTH_8 0x18
-#define MS_2K_SECTOR_MODE 0x04
-#define MS_512_SECTOR_MODE 0x00
-#define MS_TOGGLE_TIMEOUT_EN 0x00
-#define MS_TOGGLE_TIMEOUT_DISEN 0x01
-#define MS_NO_CHECK_INT 0x02
+#define RTSX_RESV_BUF_LEN 4096
+#define HOST_CMDS_BUF_LEN 1024
+#define HOST_SG_TBL_BUF_LEN (RTSX_RESV_BUF_LEN - HOST_CMDS_BUF_LEN)
+#define HOST_SG_TBL_ITEMS (HOST_SG_TBL_BUF_LEN / 8)
+#define MAX_SG_ITEM_LEN 0x80000
+#define HOST_TO_DEVICE 0
+#define DEVICE_TO_HOST 1
-/* MS_TRANS_CFG */
-#define WAIT_INT 0x80
-#define NO_WAIT_INT 0x00
-#define NO_AUTO_READ_INT_REG 0x00
-#define AUTO_READ_INT_REG 0x40
-#define MS_CRC16_ERR 0x20
-#define MS_RDY_TIMEOUT 0x10
-#define MS_INT_CMDNK 0x08
-#define MS_INT_BREQ 0x04
-#define MS_INT_ERR 0x02
-#define MS_INT_CED 0x01
-
-/* MS_TRANSFER */
-#define MS_TRANSFER_START 0x80
-#define MS_TRANSFER_END 0x40
-#define MS_TRANSFER_ERR 0x20
-#define MS_BS_STATE 0x10
-#define MS_TM_READ_BYTES 0x00
-#define MS_TM_NORMAL_READ 0x01
-#define MS_TM_WRITE_BYTES 0x04
-#define MS_TM_NORMAL_WRITE 0x05
-#define MS_TM_AUTO_READ 0x08
-#define MS_TM_AUTO_WRITE 0x0C
-
-/* SD Configure 2 Register */
-#define SD_CALCULATE_CRC7 0x00
-#define SD_NO_CALCULATE_CRC7 0x80
-#define SD_CHECK_CRC16 0x00
-#define SD_NO_CHECK_CRC16 0x40
-#define SD_NO_CHECK_WAIT_CRC_TO 0x20
-#define SD_WAIT_BUSY_END 0x08
-#define SD_NO_WAIT_BUSY_END 0x00
-#define SD_CHECK_CRC7 0x00
-#define SD_NO_CHECK_CRC7 0x04
-#define SD_RSP_LEN_0 0x00
-#define SD_RSP_LEN_6 0x01
-#define SD_RSP_LEN_17 0x02
-/* SD/MMC Response Type Definition */
-#define SD_RSP_TYPE_R0 0x04
-#define SD_RSP_TYPE_R1 0x01
-#define SD_RSP_TYPE_R1b 0x09
-#define SD_RSP_TYPE_R2 0x02
-#define SD_RSP_TYPE_R3 0x05
-#define SD_RSP_TYPE_R4 0x05
-#define SD_RSP_TYPE_R5 0x01
-#define SD_RSP_TYPE_R6 0x01
-#define SD_RSP_TYPE_R7 0x01
-
-/* SD_CONFIGURE3 */
-#define SD_RSP_80CLK_TIMEOUT_EN 0x01
-
-/* Card Transfer Reset Register */
-#define SPI_STOP 0x01
-#define XD_STOP 0x02
-#define SD_STOP 0x04
-#define MS_STOP 0x08
-#define SPI_CLR_ERR 0x10
-#define XD_CLR_ERR 0x20
-#define SD_CLR_ERR 0x40
-#define MS_CLR_ERR 0x80
-
-/* Card Data Source Register */
-#define PINGPONG_BUFFER 0x01
-#define RING_BUFFER 0x00
-
-/* Card Power Control Register */
-#define PMOS_STRG_MASK 0x10
-#define PMOS_STRG_800mA 0x10
-#define PMOS_STRG_400mA 0x00
-#define SD_POWER_OFF 0x03
-#define SD_PARTIAL_POWER_ON 0x01
-#define SD_POWER_ON 0x00
-#define SD_POWER_MASK 0x03
-#define MS_POWER_OFF 0x0C
-#define MS_PARTIAL_POWER_ON 0x04
-#define MS_POWER_ON 0x00
-#define MS_POWER_MASK 0x0C
-#define BPP_POWER_OFF 0x0F
-#define BPP_POWER_5_PERCENT_ON 0x0E
-#define BPP_POWER_10_PERCENT_ON 0x0C
-#define BPP_POWER_15_PERCENT_ON 0x08
-#define BPP_POWER_ON 0x00
-#define BPP_POWER_MASK 0x0F
-#define SD_VCC_PARTIAL_POWER_ON 0x02
-#define SD_VCC_POWER_ON 0x00
-
-/* PWR_GATE_CTRL */
-#define PWR_GATE_EN 0x01
-#define LDO3318_PWR_MASK 0x06
-#define LDO_ON 0x00
-#define LDO_SUSPEND 0x04
-#define LDO_OFF 0x06
-
-/* CARD_CLK_SOURCE */
-#define CRC_FIX_CLK (0x00 << 0)
-#define CRC_VAR_CLK0 (0x01 << 0)
-#define CRC_VAR_CLK1 (0x02 << 0)
-#define SD30_FIX_CLK (0x00 << 2)
-#define SD30_VAR_CLK0 (0x01 << 2)
-#define SD30_VAR_CLK1 (0x02 << 2)
-#define SAMPLE_FIX_CLK (0x00 << 4)
-#define SAMPLE_VAR_CLK0 (0x01 << 4)
-#define SAMPLE_VAR_CLK1 (0x02 << 4)
-
-/* HOST_SLEEP_STATE */
-#define HOST_ENTER_S1 1
-#define HOST_ENTER_S3 2
+#define OUTPUT_3V3 0
+#define OUTPUT_1V8 1
+
+#define RTSX_PHASE_MAX 32
+#define RX_TUNING_CNT 3
#define MS_CFG 0xFD40
+#define SAMPLE_TIME_RISING 0x00
+#define SAMPLE_TIME_FALLING 0x80
+#define PUSH_TIME_DEFAULT 0x00
+#define PUSH_TIME_ODD 0x40
+#define NO_EXTEND_TOGGLE 0x00
+#define EXTEND_TOGGLE_CHK 0x20
+#define MS_BUS_WIDTH_1 0x00
+#define MS_BUS_WIDTH_4 0x10
+#define MS_BUS_WIDTH_8 0x18
+#define MS_2K_SECTOR_MODE 0x04
+#define MS_512_SECTOR_MODE 0x00
+#define MS_TOGGLE_TIMEOUT_EN 0x00
+#define MS_TOGGLE_TIMEOUT_DISEN 0x01
+#define MS_NO_CHECK_INT 0x02
#define MS_TPC 0xFD41
#define MS_TRANS_CFG 0xFD42
+#define WAIT_INT 0x80
+#define NO_WAIT_INT 0x00
+#define NO_AUTO_READ_INT_REG 0x00
+#define AUTO_READ_INT_REG 0x40
+#define MS_CRC16_ERR 0x20
+#define MS_RDY_TIMEOUT 0x10
+#define MS_INT_CMDNK 0x08
+#define MS_INT_BREQ 0x04
+#define MS_INT_ERR 0x02
+#define MS_INT_CED 0x01
#define MS_TRANSFER 0xFD43
+#define MS_TRANSFER_START 0x80
+#define MS_TRANSFER_END 0x40
+#define MS_TRANSFER_ERR 0x20
+#define MS_BS_STATE 0x10
+#define MS_TM_READ_BYTES 0x00
+#define MS_TM_NORMAL_READ 0x01
+#define MS_TM_WRITE_BYTES 0x04
+#define MS_TM_NORMAL_WRITE 0x05
+#define MS_TM_AUTO_READ 0x08
+#define MS_TM_AUTO_WRITE 0x0C
#define MS_INT_REG 0xFD44
#define MS_BYTE_CNT 0xFD45
#define MS_SECTOR_CNT_L 0xFD46
@@ -549,14 +192,90 @@
#define MS_DBUS_H 0xFD48
#define SD_CFG1 0xFDA0
+#define SD_CLK_DIVIDE_0 0x00
+#define SD_CLK_DIVIDE_256 0xC0
+#define SD_CLK_DIVIDE_128 0x80
+#define SD_BUS_WIDTH_1BIT 0x00
+#define SD_BUS_WIDTH_4BIT 0x01
+#define SD_BUS_WIDTH_8BIT 0x02
+#define SD_ASYNC_FIFO_NOT_RST 0x10
+#define SD_20_MODE 0x00
+#define SD_DDR_MODE 0x04
+#define SD_30_MODE 0x08
+#define SD_CLK_DIVIDE_MASK 0xC0
#define SD_CFG2 0xFDA1
+#define SD_CALCULATE_CRC7 0x00
+#define SD_NO_CALCULATE_CRC7 0x80
+#define SD_CHECK_CRC16 0x00
+#define SD_NO_CHECK_CRC16 0x40
+#define SD_NO_CHECK_WAIT_CRC_TO 0x20
+#define SD_WAIT_BUSY_END 0x08
+#define SD_NO_WAIT_BUSY_END 0x00
+#define SD_CHECK_CRC7 0x00
+#define SD_NO_CHECK_CRC7 0x04
+#define SD_RSP_LEN_0 0x00
+#define SD_RSP_LEN_6 0x01
+#define SD_RSP_LEN_17 0x02
+#define SD_RSP_TYPE_R0 0x04
+#define SD_RSP_TYPE_R1 0x01
+#define SD_RSP_TYPE_R1b 0x09
+#define SD_RSP_TYPE_R2 0x02
+#define SD_RSP_TYPE_R3 0x05
+#define SD_RSP_TYPE_R4 0x05
+#define SD_RSP_TYPE_R5 0x01
+#define SD_RSP_TYPE_R6 0x01
+#define SD_RSP_TYPE_R7 0x01
#define SD_CFG3 0xFDA2
+#define SD_RSP_80CLK_TIMEOUT_EN 0x01
+
#define SD_STAT1 0xFDA3
+#define SD_CRC7_ERR 0x80
+#define SD_CRC16_ERR 0x40
+#define SD_CRC_WRITE_ERR 0x20
+#define SD_CRC_WRITE_ERR_MASK 0x1C
+#define GET_CRC_TIME_OUT 0x02
+#define SD_TUNING_COMPARE_ERR 0x01
#define SD_STAT2 0xFDA4
+#define SD_RSP_80CLK_TIMEOUT 0x01
+
#define SD_BUS_STAT 0xFDA5
+#define SD_CLK_TOGGLE_EN 0x80
+#define SD_CLK_FORCE_STOP 0x40
+#define SD_DAT3_STATUS 0x10
+#define SD_DAT2_STATUS 0x08
+#define SD_DAT1_STATUS 0x04
+#define SD_DAT0_STATUS 0x02
+#define SD_CMD_STATUS 0x01
#define SD_PAD_CTL 0xFDA6
+#define SD_IO_USING_1V8 0x80
+#define SD_IO_USING_3V3 0x7F
+#define TYPE_A_DRIVING 0x00
+#define TYPE_B_DRIVING 0x01
+#define TYPE_C_DRIVING 0x02
+#define TYPE_D_DRIVING 0x03
#define SD_SAMPLE_POINT_CTL 0xFDA7
+#define DDR_FIX_RX_DAT 0x00
+#define DDR_VAR_RX_DAT 0x80
+#define DDR_FIX_RX_DAT_EDGE 0x00
+#define DDR_FIX_RX_DAT_14_DELAY 0x40
+#define DDR_FIX_RX_CMD 0x00
+#define DDR_VAR_RX_CMD 0x20
+#define DDR_FIX_RX_CMD_POS_EDGE 0x00
+#define DDR_FIX_RX_CMD_14_DELAY 0x10
+#define SD20_RX_POS_EDGE 0x00
+#define SD20_RX_14_DELAY 0x08
+#define SD20_RX_SEL_MASK 0x08
#define SD_PUSH_POINT_CTL 0xFDA8
+#define DDR_FIX_TX_CMD_DAT 0x00
+#define DDR_VAR_TX_CMD_DAT 0x80
+#define DDR_FIX_TX_DAT_14_TSU 0x00
+#define DDR_FIX_TX_DAT_12_TSU 0x40
+#define DDR_FIX_TX_CMD_NEG_EDGE 0x00
+#define DDR_FIX_TX_CMD_14_AHEAD 0x20
+#define SD20_TX_NEG_EDGE 0x00
+#define SD20_TX_14_AHEAD 0x10
+#define SD20_TX_SEL_MASK 0x10
+#define DDR_VAR_SDCLK_POL_SWAP 0x01
#define SD_CMD0 0xFDA9
#define SD_CMD_START 0x40
#define SD_CMD1 0xFDAA
@@ -569,60 +288,203 @@
#define SD_BLOCK_CNT_L 0xFDB1
#define SD_BLOCK_CNT_H 0xFDB2
#define SD_TRANSFER 0xFDB3
+#define SD_TRANSFER_START 0x80
+#define SD_TRANSFER_END 0x40
+#define SD_STAT_IDLE 0x20
+#define SD_TRANSFER_ERR 0x10
+#define SD_TM_NORMAL_WRITE 0x00
+#define SD_TM_AUTO_WRITE_3 0x01
+#define SD_TM_AUTO_WRITE_4 0x02
+#define SD_TM_AUTO_READ_3 0x05
+#define SD_TM_AUTO_READ_4 0x06
+#define SD_TM_CMD_RSP 0x08
+#define SD_TM_AUTO_WRITE_1 0x09
+#define SD_TM_AUTO_WRITE_2 0x0A
+#define SD_TM_NORMAL_READ 0x0C
+#define SD_TM_AUTO_READ_1 0x0D
+#define SD_TM_AUTO_READ_2 0x0E
+#define SD_TM_AUTO_TUNING 0x0F
#define SD_CMD_STATE 0xFDB5
+#define SD_CMD_IDLE 0x80
+
#define SD_DATA_STATE 0xFDB6
+#define SD_DATA_IDLE 0x80
#define SRCTL 0xFC13
-#define DCM_DRP_CTL 0xFC23
-#define DCM_DRP_TRIG 0xFC24
-#define DCM_DRP_CFG 0xFC25
-#define DCM_DRP_WR_DATA_L 0xFC26
-#define DCM_DRP_WR_DATA_H 0xFC27
-#define DCM_DRP_RD_DATA_L 0xFC28
-#define DCM_DRP_RD_DATA_H 0xFC29
+#define DCM_DRP_CTL 0xFC23
+#define DCM_RESET 0x08
+#define DCM_LOCKED 0x04
+#define DCM_208M 0x00
+#define DCM_TX 0x01
+#define DCM_RX 0x02
+#define DCM_DRP_TRIG 0xFC24
+#define DRP_START 0x80
+#define DRP_DONE 0x40
+#define DCM_DRP_CFG 0xFC25
+#define DRP_WRITE 0x80
+#define DRP_READ 0x00
+#define DCM_WRITE_ADDRESS_50 0x50
+#define DCM_WRITE_ADDRESS_51 0x51
+#define DCM_READ_ADDRESS_00 0x00
+#define DCM_READ_ADDRESS_51 0x51
+#define DCM_DRP_WR_DATA_L 0xFC26
+#define DCM_DRP_WR_DATA_H 0xFC27
+#define DCM_DRP_RD_DATA_L 0xFC28
+#define DCM_DRP_RD_DATA_H 0xFC29
#define SD_VPCLK0_CTL 0xFC2A
#define SD_VPCLK1_CTL 0xFC2B
#define SD_DCMPS0_CTL 0xFC2C
#define SD_DCMPS1_CTL 0xFC2D
#define SD_VPTX_CTL SD_VPCLK0_CTL
#define SD_VPRX_CTL SD_VPCLK1_CTL
+#define PHASE_CHANGE 0x80
+#define PHASE_NOT_RESET 0x40
#define SD_DCMPS_TX_CTL SD_DCMPS0_CTL
#define SD_DCMPS_RX_CTL SD_DCMPS1_CTL
+#define DCMPS_CHANGE 0x80
+#define DCMPS_CHANGE_DONE 0x40
+#define DCMPS_ERROR 0x20
+#define DCMPS_CURRENT_PHASE 0x1F
#define CARD_CLK_SOURCE 0xFC2E
-
+#define CRC_FIX_CLK (0x00 << 0)
+#define CRC_VAR_CLK0 (0x01 << 0)
+#define CRC_VAR_CLK1 (0x02 << 0)
+#define SD30_FIX_CLK (0x00 << 2)
+#define SD30_VAR_CLK0 (0x01 << 2)
+#define SD30_VAR_CLK1 (0x02 << 2)
+#define SAMPLE_FIX_CLK (0x00 << 4)
+#define SAMPLE_VAR_CLK0 (0x01 << 4)
+#define SAMPLE_VAR_CLK1 (0x02 << 4)
#define CARD_PWR_CTL 0xFD50
+#define PMOS_STRG_MASK 0x10
+#define PMOS_STRG_800mA 0x10
+#define PMOS_STRG_400mA 0x00
+#define SD_POWER_OFF 0x03
+#define SD_PARTIAL_POWER_ON 0x01
+#define SD_POWER_ON 0x00
+#define SD_POWER_MASK 0x03
+#define MS_POWER_OFF 0x0C
+#define MS_PARTIAL_POWER_ON 0x04
+#define MS_POWER_ON 0x00
+#define MS_POWER_MASK 0x0C
+#define BPP_POWER_OFF 0x0F
+#define BPP_POWER_5_PERCENT_ON 0x0E
+#define BPP_POWER_10_PERCENT_ON 0x0C
+#define BPP_POWER_15_PERCENT_ON 0x08
+#define BPP_POWER_ON 0x00
+#define BPP_POWER_MASK 0x0F
+#define SD_VCC_PARTIAL_POWER_ON 0x02
+#define SD_VCC_POWER_ON 0x00
#define CARD_CLK_SWITCH 0xFD51
#define RTL8411B_PACKAGE_MODE 0xFD51
#define CARD_SHARE_MODE 0xFD52
+#define CARD_SHARE_MASK 0x0F
+#define CARD_SHARE_MULTI_LUN 0x00
+#define CARD_SHARE_NORMAL 0x00
+#define CARD_SHARE_48_SD 0x04
+#define CARD_SHARE_48_MS 0x08
+#define CARD_SHARE_BAROSSA_SD 0x01
+#define CARD_SHARE_BAROSSA_MS 0x02
#define CARD_DRIVE_SEL 0xFD53
+#define MS_DRIVE_8mA (0x01 << 6)
+#define MMC_DRIVE_8mA (0x01 << 4)
+#define XD_DRIVE_8mA (0x01 << 2)
+#define GPIO_DRIVE_8mA 0x01
+#define RTS5209_CARD_DRIVE_DEFAULT (MS_DRIVE_8mA | MMC_DRIVE_8mA |\
+ XD_DRIVE_8mA | GPIO_DRIVE_8mA)
+#define RTL8411_CARD_DRIVE_DEFAULT (MS_DRIVE_8mA | MMC_DRIVE_8mA |\
+ XD_DRIVE_8mA)
+#define RTSX_CARD_DRIVE_DEFAULT (MS_DRIVE_8mA | GPIO_DRIVE_8mA)
+
#define CARD_STOP 0xFD54
+#define SPI_STOP 0x01
+#define XD_STOP 0x02
+#define SD_STOP 0x04
+#define MS_STOP 0x08
+#define SPI_CLR_ERR 0x10
+#define XD_CLR_ERR 0x20
+#define SD_CLR_ERR 0x40
+#define MS_CLR_ERR 0x80
#define CARD_OE 0xFD55
+#define SD_OUTPUT_EN 0x04
+#define MS_OUTPUT_EN 0x08
#define CARD_AUTO_BLINK 0xFD56
#define CARD_GPIO_DIR 0xFD57
#define CARD_GPIO 0xFD58
#define CARD_DATA_SOURCE 0xFD5B
+#define PINGPONG_BUFFER 0x01
+#define RING_BUFFER 0x00
#define SD30_CLK_DRIVE_SEL 0xFD5A
+#define DRIVER_TYPE_A 0x05
+#define DRIVER_TYPE_B 0x03
+#define DRIVER_TYPE_C 0x02
+#define DRIVER_TYPE_D 0x01
#define CARD_SELECT 0xFD5C
+#define SD_MOD_SEL 2
+#define MS_MOD_SEL 3
#define SD30_DRIVE_SEL 0xFD5E
+#define CFG_DRIVER_TYPE_A 0x02
+#define CFG_DRIVER_TYPE_B 0x03
+#define CFG_DRIVER_TYPE_C 0x01
+#define CFG_DRIVER_TYPE_D 0x00
#define SD30_CMD_DRIVE_SEL 0xFD5E
#define SD30_DAT_DRIVE_SEL 0xFD5F
#define CARD_CLK_EN 0xFD69
+#define SD_CLK_EN 0x04
+#define MS_CLK_EN 0x08
#define SDIO_CTRL 0xFD6B
#define CD_PAD_CTL 0xFD73
-
+#define CD_DISABLE_MASK 0x07
+#define MS_CD_DISABLE 0x04
+#define SD_CD_DISABLE 0x02
+#define XD_CD_DISABLE 0x01
+#define CD_DISABLE 0x07
+#define CD_ENABLE 0x00
+#define MS_CD_EN_ONLY 0x03
+#define SD_CD_EN_ONLY 0x05
+#define XD_CD_EN_ONLY 0x06
+#define FORCE_CD_LOW_MASK 0x38
+#define FORCE_CD_XD_LOW 0x08
+#define FORCE_CD_SD_LOW 0x10
+#define FORCE_CD_MS_LOW 0x20
+#define CD_AUTO_DISABLE 0x40
#define FPDCTL 0xFC00
+#define SSC_POWER_DOWN 0x01
+#define SD_OC_POWER_DOWN 0x02
+#define ALL_POWER_DOWN 0x07
+#define OC_POWER_DOWN 0x06
#define PDINFO 0xFC01
#define CLK_CTL 0xFC02
+#define CHANGE_CLK 0x01
+#define CLK_LOW_FREQ 0x01
+
#define CLK_DIV 0xFC03
+#define CLK_DIV_1 0x01
+#define CLK_DIV_2 0x02
+#define CLK_DIV_4 0x03
+#define CLK_DIV_8 0x04
#define CLK_SEL 0xFC04
#define SSC_DIV_N_0 0xFC0F
#define SSC_DIV_N_1 0xFC10
#define SSC_CTL1 0xFC11
+#define SSC_RSTB 0x80
+#define SSC_8X_EN 0x40
+#define SSC_FIX_FRAC 0x20
+#define SSC_SEL_1M 0x00
+#define SSC_SEL_2M 0x08
+#define SSC_SEL_4M 0x10
+#define SSC_SEL_8M 0x18
#define SSC_CTL2 0xFC12
-
+#define SSC_DEPTH_MASK 0x07
+#define SSC_DEPTH_DISALBE 0x00
+#define SSC_DEPTH_4M 0x01
+#define SSC_DEPTH_2M 0x02
+#define SSC_DEPTH_1M 0x03
+#define SSC_DEPTH_500K 0x04
+#define SSC_DEPTH_250K 0x05
#define RCCTL 0xFC14
#define FPGA_PULL_CTL 0xFC1D
@@ -630,6 +492,24 @@
#define GPIO_CTL 0xFC1F
#define LDO_CTL 0xFC1E
+#define BPP_ASIC_1V7 0x00
+#define BPP_ASIC_1V8 0x01
+#define BPP_ASIC_1V9 0x02
+#define BPP_ASIC_2V0 0x03
+#define BPP_ASIC_2V7 0x04
+#define BPP_ASIC_2V8 0x05
+#define BPP_ASIC_3V2 0x06
+#define BPP_ASIC_3V3 0x07
+#define BPP_REG_TUNED18 0x07
+#define BPP_TUNED18_SHIFT_8402 5
+#define BPP_TUNED18_SHIFT_8411 4
+#define BPP_PAD_MASK 0x04
+#define BPP_PAD_3V3 0x04
+#define BPP_PAD_1V8 0x00
+#define BPP_LDO_POWB 0x03
+#define BPP_LDO_ON 0x00
+#define BPP_LDO_SUSPEND 0x02
+#define BPP_LDO_OFF 0x03
#define SYS_VER 0xFC32
#define CARD_PULL_CTL1 0xFD60
@@ -642,6 +522,10 @@
/* PCI Express Related Registers */
#define IRQEN0 0xFE20
#define IRQSTAT0 0xFE21
+#define DMA_DONE_INT 0x80
+#define SUSPEND_INT 0x40
+#define LINK_RDY_INT 0x20
+#define LINK_DOWN_INT 0x10
#define IRQEN1 0xFE22
#define IRQSTAT1 0xFE23
#define TLPRIEN 0xFE24
@@ -653,6 +537,16 @@
#define DMATC2 0xFE2A
#define DMATC3 0xFE2B
#define DMACTL 0xFE2C
+#define DMA_RST 0x80
+#define DMA_BUSY 0x04
+#define DMA_DIR_TO_CARD 0x00
+#define DMA_DIR_FROM_CARD 0x02
+#define DMA_EN 0x01
+#define DMA_128 (0 << 4)
+#define DMA_256 (1 << 4)
+#define DMA_512 (2 << 4)
+#define DMA_1024 (3 << 4)
+#define DMA_PACK_SIZE_MASK 0x30
#define BCTL 0xFE2D
#define RBBC0 0xFE2E
#define RBBC1 0xFE2F
@@ -678,14 +572,21 @@
#define MSGTXDATA2 0xFE46
#define MSGTXDATA3 0xFE47
#define MSGTXCTL 0xFE48
-#define PETXCFG 0xFE49
#define LTR_CTL 0xFE4A
#define OBFF_CFG 0xFE4C
#define CDRESUMECTL 0xFE52
#define WAKE_SEL_CTL 0xFE54
+#define PCLK_CTL 0xFE55
+#define PCLK_MODE_SEL 0x20
#define PME_FORCE_CTL 0xFE56
+
#define ASPM_FORCE_CTL 0xFE57
+#define FORCE_ASPM_CTL0 0x10
+#define FORCE_ASPM_VAL_MASK 0x03
+#define FORCE_ASPM_L1_EN 0x02
+#define FORCE_ASPM_L0_EN 0x01
+#define FORCE_ASPM_NO_ASPM 0x00
#define PM_CLK_FORCE_CTL 0xFE58
#define FUNC_FORCE_CTL 0xFE59
#define PERST_GLITCH_WIDTH 0xFE5C
@@ -693,19 +594,36 @@
#define RESET_LOAD_REG 0xFE5E
#define EFUSE_CONTENT 0xFE5F
#define HOST_SLEEP_STATE 0xFE60
-#define SDIO_CFG 0xFE70
+#define HOST_ENTER_S1 1
+#define HOST_ENTER_S3 2
+#define SDIO_CFG 0xFE70
+#define PM_EVENT_DEBUG 0xFE71
+#define PME_DEBUG_0 0x08
#define NFTS_TX_CTRL 0xFE72
#define PWR_GATE_CTRL 0xFE75
+#define PWR_GATE_EN 0x01
+#define LDO3318_PWR_MASK 0x06
+#define LDO_ON 0x00
+#define LDO_SUSPEND 0x04
+#define LDO_OFF 0x06
#define PWD_SUSPEND_EN 0xFE76
#define LDO_PWR_SEL 0xFE78
+#define L1SUB_CONFIG1 0xFE8D
+#define L1SUB_CONFIG2 0xFE8E
+#define L1SUB_AUTO_CFG 0x02
+#define L1SUB_CONFIG3 0xFE8F
+
#define DUMMY_REG_RESET_0 0xFE90
#define AUTOLOAD_CFG_BASE 0xFF00
+#define PETXCFG 0xFF03
#define PM_CTRL1 0xFF44
+#define CD_RESUME_EN_MASK 0xF0
+
#define PM_CTRL2 0xFF45
#define PM_CTRL3 0xFF46
#define SDIO_SEND_PME_EN 0x80
@@ -726,18 +644,125 @@
#define IMAGE_FLAG_ADDR0 0xCE80
#define IMAGE_FLAG_ADDR1 0xCE81
+#define RREF_CFG 0xFF6C
+#define RREF_VBGSEL_MASK 0x38
+#define RREF_VBGSEL_1V25 0x28
+
+#define OOBS_CONFIG 0xFF6E
+#define OOBS_AUTOK_DIS 0x80
+#define OOBS_VAL_MASK 0x1F
+
+#define LDO_DV18_CFG 0xFF70
+#define LDO_DV18_SR_MASK 0xC0
+#define LDO_DV18_SR_DF 0x40
+
+#define LDO_CONFIG2 0xFF71
+#define LDO_D3318_MASK 0x07
+#define LDO_D3318_33V 0x07
+#define LDO_D3318_18V 0x02
+
+#define LDO_VCC_CFG0 0xFF72
+#define LDO_VCC_LMTVTH_MASK 0x30
+#define LDO_VCC_LMTVTH_2A 0x10
+
+#define LDO_VCC_CFG1 0xFF73
+#define LDO_VCC_REF_TUNE_MASK 0x30
+#define LDO_VCC_REF_1V2 0x20
+#define LDO_VCC_TUNE_MASK 0x07
+#define LDO_VCC_1V8 0x04
+#define LDO_VCC_3V3 0x07
+#define LDO_VCC_LMT_EN 0x08
+
+#define LDO_VIO_CFG 0xFF75
+#define LDO_VIO_SR_MASK 0xC0
+#define LDO_VIO_SR_DF 0x40
+#define LDO_VIO_REF_TUNE_MASK 0x30
+#define LDO_VIO_REF_1V2 0x20
+#define LDO_VIO_TUNE_MASK 0x07
+#define LDO_VIO_1V7 0x03
+#define LDO_VIO_1V8 0x04
+#define LDO_VIO_3V3 0x07
+
+#define LDO_DV12S_CFG 0xFF76
+#define LDO_REF12_TUNE_MASK 0x18
+#define LDO_REF12_TUNE_DF 0x10
+#define LDO_D12_TUNE_MASK 0x07
+#define LDO_D12_TUNE_DF 0x04
+
+#define LDO_AV12S_CFG 0xFF77
+#define LDO_AV12S_TUNE_MASK 0x07
+#define LDO_AV12S_TUNE_DF 0x04
+
+#define SD40_LDO_CTL1 0xFE7D
+#define SD40_VIO_TUNE_MASK 0x70
+#define SD40_VIO_TUNE_1V7 0x30
+#define SD_VIO_LDO_1V8 0x40
+#define SD_VIO_LDO_3V3 0x70
+
/* Phy register */
#define PHY_PCR 0x00
+#define PHY_PCR_FORCE_CODE 0xB000
+#define PHY_PCR_OOBS_CALI_50 0x0800
+#define PHY_PCR_OOBS_VCM_08 0x0200
+#define PHY_PCR_OOBS_SEN_90 0x0040
+#define PHY_PCR_RSSI_EN 0x0002
+#define PHY_PCR_RX10K 0x0001
+
#define PHY_RCR0 0x01
#define PHY_RCR1 0x02
+#define PHY_RCR1_ADP_TIME_4 0x0400
+#define PHY_RCR1_VCO_COARSE 0x001F
+#define PHY_SSCCR2 0x02
+#define PHY_SSCCR2_PLL_NCODE 0x0A00
+#define PHY_SSCCR2_TIME0 0x001C
+#define PHY_SSCCR2_TIME2_WIDTH 0x0003
+
#define PHY_RCR2 0x03
+#define PHY_RCR2_EMPHASE_EN 0x8000
+#define PHY_RCR2_NADJR 0x4000
+#define PHY_RCR2_CDR_SR_2 0x0100
+#define PHY_RCR2_FREQSEL_12 0x0040
+#define PHY_RCR2_CDR_SC_12P 0x0010
+#define PHY_RCR2_CALIB_LATE 0x0002
+#define PHY_SSCCR3 0x03
+#define PHY_SSCCR3_STEP_IN 0x2740
+#define PHY_SSCCR3_CHECK_DELAY 0x0008
+#define _PHY_ANA03 0x03
+#define _PHY_ANA03_TIMER_MAX 0x2700
+#define _PHY_ANA03_OOBS_DEB_EN 0x0040
+#define _PHY_CMU_DEBUG_EN 0x0008
+
#define PHY_RTCR 0x04
#define PHY_RDR 0x05
+#define PHY_RDR_RXDSEL_1_9 0x4000
+#define PHY_SSC_AUTO_PWD 0x0600
#define PHY_TCR0 0x06
#define PHY_TCR1 0x07
#define PHY_TUNE 0x08
+#define PHY_TUNE_TUNEREF_1_0 0x4000
+#define PHY_TUNE_VBGSEL_1252 0x0C00
+#define PHY_TUNE_SDBUS_33 0x0200
+#define PHY_TUNE_TUNED18 0x01C0
+#define PHY_TUNE_TUNED12 0X0020
+#define PHY_TUNE_TUNEA12 0x0004
+#define PHY_TUNE_VOLTAGE_MASK 0xFC3F
+#define PHY_TUNE_VOLTAGE_3V3 0x03C0
+#define PHY_TUNE_D18_1V8 0x0100
+#define PHY_TUNE_D18_1V7 0x0080
+#define PHY_ANA08 0x08
+#define PHY_ANA08_RX_EQ_DCGAIN 0x5000
+#define PHY_ANA08_SEL_RX_EN 0x0400
+#define PHY_ANA08_RX_EQ_VAL 0x03C0
+#define PHY_ANA08_SCP 0x0020
+#define PHY_ANA08_SEL_IPI 0x0004
+
#define PHY_IMR 0x09
#define PHY_BPCR 0x0A
+#define PHY_BPCR_IBRXSEL 0x0400
+#define PHY_BPCR_IBTXSEL 0x0100
+#define PHY_BPCR_IB_FILTER 0x0080
+#define PHY_BPCR_CMIRROR_EN 0x0040
+
#define PHY_BIST 0x0B
#define PHY_RAW_L 0x0C
#define PHY_RAW_H 0x0D
@@ -745,6 +770,7 @@
#define PHY_HOST_CLK_CTRL 0x0F
#define PHY_DMR 0x10
#define PHY_BACR 0x11
+#define PHY_BACR_BASIC_MASK 0xFFF3
#define PHY_IER 0x12
#define PHY_BCSR 0x13
#define PHY_BPR 0x14
@@ -752,80 +778,70 @@
#define PHY_BPNR 0x16
#define PHY_BRNR2 0x17
#define PHY_BENR 0x18
-#define PHY_REG_REV 0x19
+#define PHY_REV 0x19
+#define PHY_REV_RESV 0xE000
+#define PHY_REV_RXIDLE_LATCHED 0x1000
+#define PHY_REV_P1_EN 0x0800
+#define PHY_REV_RXIDLE_EN 0x0400
+#define PHY_REV_CLKREQ_TX_EN 0x0200
+#define PHY_REV_CLKREQ_RX_EN 0x0100
+#define PHY_REV_CLKREQ_DT_1_0 0x0040
+#define PHY_REV_STOP_CLKRD 0x0020
+#define PHY_REV_RX_PWST 0x0008
+#define PHY_REV_STOP_CLKWR 0x0004
+#define _PHY_REV0 0x19
+#define _PHY_REV0_FILTER_OUT 0x3800
+#define _PHY_REV0_CDR_BYPASS_PFD 0x0100
+#define _PHY_REV0_CDR_RX_IDLE_BYPASS 0x0002
+
#define PHY_FLD0 0x1A
+#define PHY_ANA1A 0x1A
+#define PHY_ANA1A_TXR_LOOPBACK 0x2000
+#define PHY_ANA1A_RXT_BIST 0x0500
+#define PHY_ANA1A_TXR_BIST 0x0040
+#define PHY_ANA1A_REV 0x0006
#define PHY_FLD1 0x1B
#define PHY_FLD2 0x1C
#define PHY_FLD3 0x1D
+#define PHY_FLD3_TIMER_4 0x0800
+#define PHY_FLD3_TIMER_6 0x0020
+#define PHY_FLD3_RXDELINK 0x0004
+#define PHY_ANA1D 0x1D
+#define PHY_ANA1D_DEBUG_ADDR 0x0004
+#define _PHY_FLD0 0x1D
+#define _PHY_FLD0_CLK_REQ_20C 0x8000
+#define _PHY_FLD0_RX_IDLE_EN 0x1000
+#define _PHY_FLD0_BIT_ERR_RSTN 0x0800
+#define _PHY_FLD0_BER_COUNT 0x01E0
+#define _PHY_FLD0_BER_TIMER 0x001E
+#define _PHY_FLD0_CHECK_EN 0x0001
+
#define PHY_FLD4 0x1E
+#define PHY_FLD4_FLDEN_SEL 0x4000
+#define PHY_FLD4_REQ_REF 0x2000
+#define PHY_FLD4_RXAMP_OFF 0x1000
+#define PHY_FLD4_REQ_ADDA 0x0800
+#define PHY_FLD4_BER_COUNT 0x00E0
+#define PHY_FLD4_BER_TIMER 0x000A
+#define PHY_FLD4_BER_CHK_EN 0x0001
+#define PHY_DIG1E 0x1E
+#define PHY_DIG1E_REV 0x4000
+#define PHY_DIG1E_D0_X_D1 0x1000
+#define PHY_DIG1E_RX_ON_HOST 0x0800
+#define PHY_DIG1E_RCLK_REF_HOST 0x0400
+#define PHY_DIG1E_RCLK_TX_EN_KEEP 0x0040
+#define PHY_DIG1E_RCLK_TX_TERM_KEEP 0x0020
+#define PHY_DIG1E_RCLK_RX_EIDLE_ON 0x0010
+#define PHY_DIG1E_TX_TERM_KEEP 0x0008
+#define PHY_DIG1E_RX_TERM_KEEP 0x0004
+#define PHY_DIG1E_TX_EN_KEEP 0x0002
+#define PHY_DIG1E_RX_EN_KEEP 0x0001
#define PHY_DUM_REG 0x1F
-#define LCTLR 0x80
-#define LCTLR_EXT_SYNC 0x80
-#define LCTLR_COMMON_CLOCK_CFG 0x40
-#define LCTLR_RETRAIN_LINK 0x20
-#define LCTLR_LINK_DISABLE 0x10
-#define LCTLR_RCB 0x08
-#define LCTLR_RESERVED 0x04
-#define LCTLR_ASPM_CTL_MASK 0x03
-
#define PCR_SETTING_REG1 0x724
#define PCR_SETTING_REG2 0x814
#define PCR_SETTING_REG3 0x747
-/* Phy bits */
-#define PHY_PCR_FORCE_CODE 0xB000
-#define PHY_PCR_OOBS_CALI_50 0x0800
-#define PHY_PCR_OOBS_VCM_08 0x0200
-#define PHY_PCR_OOBS_SEN_90 0x0040
-#define PHY_PCR_RSSI_EN 0x0002
-
-#define PHY_RCR1_ADP_TIME 0x0100
-#define PHY_RCR1_VCO_COARSE 0x001F
-
-#define PHY_RCR2_EMPHASE_EN 0x8000
-#define PHY_RCR2_NADJR 0x4000
-#define PHY_RCR2_CDR_CP_10 0x0400
-#define PHY_RCR2_CDR_SR_2 0x0100
-#define PHY_RCR2_FREQSEL_12 0x0040
-#define PHY_RCR2_CPADJEN 0x0020
-#define PHY_RCR2_CDR_SC_8 0x0008
-#define PHY_RCR2_CALIB_LATE 0x0002
-
-#define PHY_RDR_RXDSEL_1_9 0x4000
-
-#define PHY_TUNE_TUNEREF_1_0 0x4000
-#define PHY_TUNE_VBGSEL_1252 0x0C00
-#define PHY_TUNE_SDBUS_33 0x0200
-#define PHY_TUNE_TUNED18 0x01C0
-#define PHY_TUNE_TUNED12 0X0020
-
-#define PHY_BPCR_IBRXSEL 0x0400
-#define PHY_BPCR_IBTXSEL 0x0100
-#define PHY_BPCR_IB_FILTER 0x0080
-#define PHY_BPCR_CMIRROR_EN 0x0040
-
-#define PHY_REG_REV_RESV 0xE000
-#define PHY_REG_REV_RXIDLE_LATCHED 0x1000
-#define PHY_REG_REV_P1_EN 0x0800
-#define PHY_REG_REV_RXIDLE_EN 0x0400
-#define PHY_REG_REV_CLKREQ_DLY_TIMER_1_0 0x0040
-#define PHY_REG_REV_STOP_CLKRD 0x0020
-#define PHY_REG_REV_RX_PWST 0x0008
-#define PHY_REG_REV_STOP_CLKWR 0x0004
-
-#define PHY_FLD3_TIMER_4 0x7800
-#define PHY_FLD3_TIMER_6 0x00E0
-#define PHY_FLD3_RXDELINK 0x0004
-
-#define PHY_FLD4_FLDEN_SEL 0x4000
-#define PHY_FLD4_REQ_REF 0x2000
-#define PHY_FLD4_RXAMP_OFF 0x1000
-#define PHY_FLD4_REQ_ADDA 0x0800
-#define PHY_FLD4_BER_COUNT 0x00E0
-#define PHY_FLD4_BER_TIMER 0x000A
-#define PHY_FLD4_BER_CHK_EN 0x0001
-
#define rtsx_pci_init_cmd(pcr) ((pcr)->ci = 0)
struct rtsx_pcr;
@@ -835,6 +851,8 @@ struct pcr_handle {
};
struct pcr_ops {
+ int (*write_phy)(struct rtsx_pcr *pcr, u8 addr, u16 val);
+ int (*read_phy)(struct rtsx_pcr *pcr, u8 addr, u16 *val);
int (*extra_init_hw)(struct rtsx_pcr *pcr);
int (*optimize_phy)(struct rtsx_pcr *pcr);
int (*turn_on_led)(struct rtsx_pcr *pcr);
@@ -856,6 +874,7 @@ enum PDEV_STAT {PDEV_STAT_IDLE, PDEV_STAT_RUN};
struct rtsx_pcr {
struct pci_dev *pci;
unsigned int id;
+ int pcie_cap;
/* pci resources */
unsigned long addr;
@@ -928,6 +947,8 @@ struct rtsx_pcr {
const struct pcr_ops *ops;
enum PDEV_STAT state;
+ u16 reg_pm_ctrl3;
+
int num_slots;
struct rtsx_slot *slots;
};
@@ -935,6 +956,10 @@ struct rtsx_pcr {
#define CHK_PCI_PID(pcr, pid) ((pcr)->pci->device == (pid))
#define PCI_VID(pcr) ((pcr)->pci->vendor)
#define PCI_PID(pcr) ((pcr)->pci->device)
+#define is_version(pcr, pid, ver) \
+ (CHK_PCI_PID(pcr, pid) && (pcr)->ic_version == (ver))
+#define pcr_dbg(pcr, fmt, arg...) \
+ dev_dbg(&(pcr)->pci->dev, fmt, ##arg)
#define SDR104_PHASE(val) ((val) & 0xFF)
#define SDR50_PHASE(val) (((val) >> 8) & 0xFF)
@@ -1004,4 +1029,17 @@ static inline void rtsx_pci_write_be32(struct rtsx_pcr *pcr, u16 reg, u32 val)
rtsx_pci_add_cmd(pcr, WRITE_REG_CMD, reg + 3, 0xFF, val);
}
+static inline int rtsx_pci_update_phy(struct rtsx_pcr *pcr, u8 addr,
+ u16 mask, u16 append)
+{
+ int err;
+ u16 val;
+
+ err = rtsx_pci_read_phy_register(pcr, addr, &val);
+ if (err < 0)
+ return err;
+
+ return rtsx_pci_write_phy_register(pcr, addr, (val & mask) | append);
+}
+
#endif
diff --git a/include/linux/mfd/samsung/core.h b/include/linux/mfd/samsung/core.h
index 3fdb7cfbffb3..75115384f3fc 100644
--- a/include/linux/mfd/samsung/core.h
+++ b/include/linux/mfd/samsung/core.h
@@ -58,13 +58,7 @@ enum sec_device_type {
* @irq_base: Base IRQ number for device, required for IRQs
* @irq: Generic IRQ number for device
* @irq_data: Runtime data structure for IRQ controller
- * @ono: Power onoff IRQ number for s5m87xx
* @wakeup: Whether or not this is a wakeup device
- * @wtsr_smpl: Whether or not to enable in RTC driver the Watchdog
- * Timer Software Reset (registers set to default value
- * after PWRHOLD falling) and Sudden Momentary Power Loss
- * (PMIC will enter power on sequence after short drop in
- * VBATT voltage).
*/
struct sec_pmic_dev {
struct device *dev;
@@ -77,9 +71,7 @@ struct sec_pmic_dev {
int irq;
struct regmap_irq_chip_data *irq_data;
- int ono;
bool wakeup;
- bool wtsr_smpl;
};
int sec_irq_init(struct sec_pmic_dev *sec_pmic);
@@ -95,7 +87,6 @@ struct sec_platform_data {
int irq_base;
int (*cfg_pmic_irq)(void);
- int ono;
bool wakeup;
bool buck_voltage_lock;
diff --git a/include/linux/mfd/samsung/irq.h b/include/linux/mfd/samsung/irq.h
index f35af7361b60..667aa40486dd 100644
--- a/include/linux/mfd/samsung/irq.h
+++ b/include/linux/mfd/samsung/irq.h
@@ -74,8 +74,8 @@ enum s2mps11_irq {
S2MPS11_IRQ_MRB,
S2MPS11_IRQ_RTC60S,
- S2MPS11_IRQ_RTCA0,
S2MPS11_IRQ_RTCA1,
+ S2MPS11_IRQ_RTCA0,
S2MPS11_IRQ_SMPL,
S2MPS11_IRQ_RTC1S,
S2MPS11_IRQ_WTSR,
diff --git a/include/linux/mfd/samsung/rtc.h b/include/linux/mfd/samsung/rtc.h
index b6401e7661c7..29c30ac36020 100644
--- a/include/linux/mfd/samsung/rtc.h
+++ b/include/linux/mfd/samsung/rtc.h
@@ -105,6 +105,8 @@ enum s2mps_rtc_reg {
#define S5M_RTC_UDR_MASK (1 << S5M_RTC_UDR_SHIFT)
#define S2MPS_RTC_WUDR_SHIFT 4
#define S2MPS_RTC_WUDR_MASK (1 << S2MPS_RTC_WUDR_SHIFT)
+#define S2MPS13_RTC_AUDR_SHIFT 1
+#define S2MPS13_RTC_AUDR_MASK (1 << S2MPS13_RTC_AUDR_SHIFT)
#define S2MPS_RTC_RUDR_SHIFT 0
#define S2MPS_RTC_RUDR_MASK (1 << S2MPS_RTC_RUDR_SHIFT)
#define RTC_TCON_SHIFT 1
diff --git a/include/linux/mfd/sky81452.h b/include/linux/mfd/sky81452.h
new file mode 100644
index 000000000000..b0925fa3e9ef
--- /dev/null
+++ b/include/linux/mfd/sky81452.h
@@ -0,0 +1,31 @@
+/*
+ * sky81452.h SKY81452 MFD driver
+ *
+ * Copyright 2014 Skyworks Solutions Inc.
+ * Author : Gyungoh Yoo <jack.yoo@skyworksinc.com>
+ *
+ * This program is free software; you can redistribute it and/or modify it
+ * under the terms of the GNU General Public License version 2
+ * as published by the Free Software Foundation.
+ *
+ * This program is distributed in the hope that it will be useful, but
+ * WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
+ * General Public License for more details.
+ *
+ * You should have received a copy of the GNU General Public License along
+ * with this program; if not, see <http://www.gnu.org/licenses/>.
+ */
+
+#ifndef _SKY81452_H
+#define _SKY81452_H
+
+#include <linux/platform_data/sky81452-backlight.h>
+#include <linux/regulator/machine.h>
+
+struct sky81452_platform_data {
+ struct sky81452_bl_platform_data *bl_pdata;
+ struct regulator_init_data *regulator_init_data;
+};
+
+#endif
diff --git a/include/linux/mfd/stmpe.h b/include/linux/mfd/stmpe.h
index c9d869027300..cb83883918a7 100644
--- a/include/linux/mfd/stmpe.h
+++ b/include/linux/mfd/stmpe.h
@@ -118,47 +118,6 @@ extern int stmpe_disable(struct stmpe *stmpe, unsigned int blocks);
#define STMPE_GPIO_NOREQ_811_TOUCH (0xf0)
/**
- * struct stmpe_ts_platform_data - stmpe811 touch screen controller platform
- * data
- * @sample_time: ADC converstion time in number of clock.
- * (0 -> 36 clocks, 1 -> 44 clocks, 2 -> 56 clocks, 3 -> 64 clocks,
- * 4 -> 80 clocks, 5 -> 96 clocks, 6 -> 144 clocks),
- * recommended is 4.
- * @mod_12b: ADC Bit mode (0 -> 10bit ADC, 1 -> 12bit ADC)
- * @ref_sel: ADC reference source
- * (0 -> internal reference, 1 -> external reference)
- * @adc_freq: ADC Clock speed
- * (0 -> 1.625 MHz, 1 -> 3.25 MHz, 2 || 3 -> 6.5 MHz)
- * @ave_ctrl: Sample average control
- * (0 -> 1 sample, 1 -> 2 samples, 2 -> 4 samples, 3 -> 8 samples)
- * @touch_det_delay: Touch detect interrupt delay
- * (0 -> 10 us, 1 -> 50 us, 2 -> 100 us, 3 -> 500 us,
- * 4-> 1 ms, 5 -> 5 ms, 6 -> 10 ms, 7 -> 50 ms)
- * recommended is 3
- * @settling: Panel driver settling time
- * (0 -> 10 us, 1 -> 100 us, 2 -> 500 us, 3 -> 1 ms,
- * 4 -> 5 ms, 5 -> 10 ms, 6 for 50 ms, 7 -> 100 ms)
- * recommended is 2
- * @fraction_z: Length of the fractional part in z
- * (fraction_z ([0..7]) = Count of the fractional part)
- * recommended is 7
- * @i_drive: current limit value of the touchscreen drivers
- * (0 -> 20 mA typical 35 mA max, 1 -> 50 mA typical 80 mA max)
- *
- * */
-struct stmpe_ts_platform_data {
- u8 sample_time;
- u8 mod_12b;
- u8 ref_sel;
- u8 adc_freq;
- u8 ave_ctrl;
- u8 touch_det_delay;
- u8 settling;
- u8 fraction_z;
- u8 i_drive;
-};
-
-/**
* struct stmpe_platform_data - STMPE platform data
* @id: device id to distinguish between multiple STMPEs on the same board
* @blocks: bitmask of blocks to enable (use STMPE_BLOCK_*)
@@ -168,7 +127,6 @@ struct stmpe_ts_platform_data {
* @irq_over_gpio: true if gpio is used to get irq
* @irq_gpio: gpio number over which irq will be requested (significant only if
* irq_over_gpio is true)
- * @ts: touchscreen-specific platform data
*/
struct stmpe_platform_data {
int id;
@@ -178,8 +136,6 @@ struct stmpe_platform_data {
bool irq_over_gpio;
int irq_gpio;
int autosleep_timeout;
-
- struct stmpe_ts_platform_data *ts;
};
#endif
diff --git a/include/linux/mfd/syscon/atmel-st.h b/include/linux/mfd/syscon/atmel-st.h
new file mode 100644
index 000000000000..8acf1ec1fa32
--- /dev/null
+++ b/include/linux/mfd/syscon/atmel-st.h
@@ -0,0 +1,49 @@
+/*
+ * Copyright (C) 2005 Ivan Kokshaysky
+ * Copyright (C) SAN People
+ *
+ * System Timer (ST) - System peripherals registers.
+ * Based on AT91RM9200 datasheet revision E.
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License as published by
+ * the Free Software Foundation; either version 2 of the License, or
+ * (at your option) any later version.
+ */
+
+#ifndef _LINUX_MFD_SYSCON_ATMEL_ST_H
+#define _LINUX_MFD_SYSCON_ATMEL_ST_H
+
+#include <linux/bitops.h>
+
+#define AT91_ST_CR 0x00 /* Control Register */
+#define AT91_ST_WDRST BIT(0) /* Watchdog Timer Restart */
+
+#define AT91_ST_PIMR 0x04 /* Period Interval Mode Register */
+#define AT91_ST_PIV 0xffff /* Period Interval Value */
+
+#define AT91_ST_WDMR 0x08 /* Watchdog Mode Register */
+#define AT91_ST_WDV 0xffff /* Watchdog Counter Value */
+#define AT91_ST_RSTEN BIT(16) /* Reset Enable */
+#define AT91_ST_EXTEN BIT(17) /* External Signal Assertion Enable */
+
+#define AT91_ST_RTMR 0x0c /* Real-time Mode Register */
+#define AT91_ST_RTPRES 0xffff /* Real-time Prescalar Value */
+
+#define AT91_ST_SR 0x10 /* Status Register */
+#define AT91_ST_PITS BIT(0) /* Period Interval Timer Status */
+#define AT91_ST_WDOVF BIT(1) /* Watchdog Overflow */
+#define AT91_ST_RTTINC BIT(2) /* Real-time Timer Increment */
+#define AT91_ST_ALMS BIT(3) /* Alarm Status */
+
+#define AT91_ST_IER 0x14 /* Interrupt Enable Register */
+#define AT91_ST_IDR 0x18 /* Interrupt Disable Register */
+#define AT91_ST_IMR 0x1c /* Interrupt Mask Register */
+
+#define AT91_ST_RTAR 0x20 /* Real-time Alarm Register */
+#define AT91_ST_ALMV 0xfffff /* Alarm Value */
+
+#define AT91_ST_CRTR 0x24 /* Current Real-time Register */
+#define AT91_ST_CRTV 0xfffff /* Current Real-Time Value */
+
+#endif /* _LINUX_MFD_SYSCON_ATMEL_ST_H */
diff --git a/include/linux/mfd/syscon/imx6q-iomuxc-gpr.h b/include/linux/mfd/syscon/imx6q-iomuxc-gpr.h
index c877cad61a13..d16f4c82c568 100644
--- a/include/linux/mfd/syscon/imx6q-iomuxc-gpr.h
+++ b/include/linux/mfd/syscon/imx6q-iomuxc-gpr.h
@@ -207,6 +207,7 @@
#define IMX6Q_GPR3_LVDS0_MUX_CTL_IPU1_DI1 (0x1 << 6)
#define IMX6Q_GPR3_LVDS0_MUX_CTL_IPU2_DI0 (0x2 << 6)
#define IMX6Q_GPR3_LVDS0_MUX_CTL_IPU2_DI1 (0x3 << 6)
+#define IMX6Q_GPR3_MIPI_MUX_CTL_SHIFT 4
#define IMX6Q_GPR3_MIPI_MUX_CTL_MASK (0x3 << 4)
#define IMX6Q_GPR3_MIPI_MUX_CTL_IPU1_DI0 (0x0 << 4)
#define IMX6Q_GPR3_MIPI_MUX_CTL_IPU1_DI1 (0x1 << 4)
diff --git a/include/linux/mfd/tc3589x.h b/include/linux/mfd/tc3589x.h
index c203c9c56776..468c31a27fcf 100644
--- a/include/linux/mfd/tc3589x.h
+++ b/include/linux/mfd/tc3589x.h
@@ -140,36 +140,13 @@ extern int tc3589x_set_bits(struct tc3589x *tc3589x, u8 reg, u8 mask, u8 val);
#define TC_KPD_DEBOUNCE_PERIOD 0xA3
#define TC_KPD_SETTLE_TIME 0xA3
-/**
- * struct tc35893_platform_data - data structure for platform specific data
- * @keymap_data: matrix scan code table for keycodes
- * @krow: mask for available rows, value is 0xFF
- * @kcol: mask for available columns, value is 0xFF
- * @debounce_period: platform specific debounce time
- * @settle_time: platform specific settle down time
- * @irqtype: type of interrupt, falling or rising edge
- * @enable_wakeup: specifies if keypad event can wake up system from sleep
- * @no_autorepeat: flag for auto repetition
- */
-struct tc3589x_keypad_platform_data {
- const struct matrix_keymap_data *keymap_data;
- u8 krow;
- u8 kcol;
- u8 debounce_period;
- u8 settle_time;
- unsigned long irqtype;
- bool enable_wakeup;
- bool no_autorepeat;
-};
/**
* struct tc3589x_platform_data - TC3589x platform data
* @block: bitmask of blocks to enable (use TC3589x_BLOCK_*)
- * @keypad: keypad-specific platform data
*/
struct tc3589x_platform_data {
unsigned int block;
- const struct tc3589x_keypad_platform_data *keypad;
};
#endif
diff --git a/include/linux/mfd/ti_am335x_tscadc.h b/include/linux/mfd/ti_am335x_tscadc.h
index 3f4e994ace2b..1fd50dcfe47c 100644
--- a/include/linux/mfd/ti_am335x_tscadc.h
+++ b/include/linux/mfd/ti_am335x_tscadc.h
@@ -128,6 +128,7 @@
/* Sequencer Status */
#define SEQ_STATUS BIT(5)
+#define CHARGE_STEP 0x11
#define ADC_CLK 3000000
#define TOTAL_STEPS 16
diff --git a/include/linux/mfd/tmio.h b/include/linux/mfd/tmio.h
index 605812820e48..24b86d538e88 100644
--- a/include/linux/mfd/tmio.h
+++ b/include/linux/mfd/tmio.h
@@ -111,6 +111,8 @@ struct dma_chan;
* data for the MMC controller
*/
struct tmio_mmc_data {
+ void *chan_priv_tx;
+ void *chan_priv_rx;
unsigned int hclk;
unsigned long capabilities;
unsigned long capabilities2;
diff --git a/include/linux/migrate.h b/include/linux/migrate.h
index 78baed5f2952..cac1c0904d5f 100644
--- a/include/linux/migrate.h
+++ b/include/linux/migrate.h
@@ -69,7 +69,6 @@ static inline int migrate_huge_page_move_mapping(struct address_space *mapping,
extern bool pmd_trans_migrating(pmd_t pmd);
extern int migrate_misplaced_page(struct page *page,
struct vm_area_struct *vma, int node);
-extern bool migrate_ratelimited(int node);
#else
static inline bool pmd_trans_migrating(pmd_t pmd)
{
@@ -80,10 +79,6 @@ static inline int migrate_misplaced_page(struct page *page,
{
return -EAGAIN; /* can't migrate now */
}
-static inline bool migrate_ratelimited(int node)
-{
- return false;
-}
#endif /* CONFIG_NUMA_BALANCING */
#if defined(CONFIG_NUMA_BALANCING) && defined(CONFIG_TRANSPARENT_HUGEPAGE)
diff --git a/include/linux/miscdevice.h b/include/linux/miscdevice.h
index ee80dd7d9f60..819077c32690 100644
--- a/include/linux/miscdevice.h
+++ b/include/linux/miscdevice.h
@@ -52,6 +52,7 @@
#define MISC_DYNAMIC_MINOR 255
struct device;
+struct attribute_group;
struct miscdevice {
int minor;
@@ -60,6 +61,7 @@ struct miscdevice {
struct list_head list;
struct device *parent;
struct device *this_device;
+ const struct attribute_group **groups;
const char *nodename;
umode_t mode;
};
diff --git a/include/linux/mlx4/cmd.h b/include/linux/mlx4/cmd.h
index 7b6d4e9ff603..58391f2e0414 100644
--- a/include/linux/mlx4/cmd.h
+++ b/include/linux/mlx4/cmd.h
@@ -35,6 +35,8 @@
#include <linux/dma-mapping.h>
#include <linux/if_link.h>
+#include <linux/mlx4/device.h>
+#include <linux/netdevice.h>
enum {
/* initialization and general commands */
@@ -68,6 +70,8 @@ enum {
MLX4_CMD_UNMAP_ICM_AUX = 0xffb,
MLX4_CMD_SET_ICM_SIZE = 0xffd,
MLX4_CMD_ACCESS_REG = 0x3b,
+ MLX4_CMD_ALLOCATE_VPP = 0x80,
+ MLX4_CMD_SET_VPORT_QOS = 0x81,
/*master notify fw on finish for slave's flr*/
MLX4_CMD_INFORM_FLR_DONE = 0x5b,
@@ -163,6 +167,9 @@ enum {
MLX4_QP_FLOW_STEERING_ATTACH = 0x65,
MLX4_QP_FLOW_STEERING_DETACH = 0x66,
MLX4_FLOW_STEERING_IB_UC_QP_RANGE = 0x64,
+
+ /* Update and read QCN parameters */
+ MLX4_CMD_CONGESTION_CTRL_OPCODE = 0x68,
};
enum {
@@ -183,7 +190,14 @@ enum {
};
enum {
- /* set port opcode modifiers */
+ /* Set port opcode modifiers */
+ MLX4_SET_PORT_IB_OPCODE = 0x0,
+ MLX4_SET_PORT_ETH_OPCODE = 0x1,
+ MLX4_SET_PORT_BEACON_OPCODE = 0x4,
+};
+
+enum {
+ /* Set port Ethernet input modifiers */
MLX4_SET_PORT_GENERAL = 0x0,
MLX4_SET_PORT_RQP_CALC = 0x1,
MLX4_SET_PORT_MAC_TABLE = 0x2,
@@ -233,6 +247,16 @@ struct mlx4_config_dev_params {
u8 rx_csum_flags_port_2;
};
+enum mlx4_en_congestion_control_algorithm {
+ MLX4_CTRL_ALGO_802_1_QAU_REACTION_POINT = 0,
+};
+
+enum mlx4_en_congestion_control_opmod {
+ MLX4_CONGESTION_CONTROL_GET_PARAMS,
+ MLX4_CONGESTION_CONTROL_GET_STATISTICS,
+ MLX4_CONGESTION_CONTROL_SET_PARAMS = 4,
+};
+
struct mlx4_dev;
struct mlx4_cmd_mailbox {
@@ -278,9 +302,15 @@ static inline int mlx4_cmd_imm(struct mlx4_dev *dev, u64 in_param, u64 *out_para
struct mlx4_cmd_mailbox *mlx4_alloc_cmd_mailbox(struct mlx4_dev *dev);
void mlx4_free_cmd_mailbox(struct mlx4_dev *dev, struct mlx4_cmd_mailbox *mailbox);
+int mlx4_get_counter_stats(struct mlx4_dev *dev, int counter_index,
+ struct mlx4_counter *counter_stats, int reset);
+int mlx4_get_vf_stats(struct mlx4_dev *dev, int port, int vf_idx,
+ struct ifla_vf_stats *vf_stats);
u32 mlx4_comm_get_version(void);
int mlx4_set_vf_mac(struct mlx4_dev *dev, int port, int vf, u64 mac);
int mlx4_set_vf_vlan(struct mlx4_dev *dev, int port, int vf, u16 vlan, u8 qos);
+int mlx4_set_vf_rate(struct mlx4_dev *dev, int port, int vf, int min_tx_rate,
+ int max_tx_rate);
int mlx4_set_vf_spoofchk(struct mlx4_dev *dev, int port, int vf, bool setting);
int mlx4_get_vf_config(struct mlx4_dev *dev, int port, int vf, struct ifla_vf_info *ivf);
int mlx4_set_vf_link_state(struct mlx4_dev *dev, int port, int vf, int link_state);
diff --git a/include/linux/mlx4/device.h b/include/linux/mlx4/device.h
index e4ebff7e9d02..fd13c1ce3b4a 100644
--- a/include/linux/mlx4/device.h
+++ b/include/linux/mlx4/device.h
@@ -46,11 +46,10 @@
#define MAX_MSIX_P_PORT 17
#define MAX_MSIX 64
-#define MSIX_LEGACY_SZ 4
#define MIN_MSIX_P_PORT 5
+#define MLX4_IS_LEGACY_EQ_MODE(dev_cap) ((dev_cap).num_comp_vectors < \
+ (dev_cap).num_ports * MIN_MSIX_P_PORT)
-#define MLX4_NUM_UP 8
-#define MLX4_NUM_TC 8
#define MLX4_MAX_100M_UNITS_VAL 255 /*
* work around: can't set values
* greater then this value when
@@ -174,6 +173,7 @@ enum {
MLX4_DEV_CAP_FLAG_VEP_UC_STEER = 1LL << 41,
MLX4_DEV_CAP_FLAG_VEP_MC_STEER = 1LL << 42,
MLX4_DEV_CAP_FLAG_COUNTERS = 1LL << 48,
+ MLX4_DEV_CAP_FLAG_RSS_IP_FRAG = 1LL << 52,
MLX4_DEV_CAP_FLAG_SET_ETH_SCHED = 1LL << 53,
MLX4_DEV_CAP_FLAG_SENSE_SUPPORT = 1LL << 55,
MLX4_DEV_CAP_FLAG_PORT_MNG_CHG_EV = 1LL << 59,
@@ -203,7 +203,14 @@ enum {
MLX4_DEV_CAP_FLAG2_80_VFS = 1LL << 18,
MLX4_DEV_CAP_FLAG2_FS_A0 = 1LL << 19,
MLX4_DEV_CAP_FLAG2_RECOVERABLE_ERROR_EVENT = 1LL << 20,
- MLX4_DEV_CAP_FLAG2_PORT_REMAP = 1LL << 21
+ MLX4_DEV_CAP_FLAG2_PORT_REMAP = 1LL << 21,
+ MLX4_DEV_CAP_FLAG2_QCN = 1LL << 22,
+ MLX4_DEV_CAP_FLAG2_QP_RATE_LIMIT = 1LL << 23,
+ MLX4_DEV_CAP_FLAG2_FLOWSTATS_EN = 1LL << 24,
+ MLX4_DEV_CAP_FLAG2_QOS_VPP = 1LL << 25,
+ MLX4_DEV_CAP_FLAG2_ETS_CFG = 1LL << 26,
+ MLX4_DEV_CAP_FLAG2_PORT_BEACON = 1LL << 27,
+ MLX4_DEV_CAP_FLAG2_IGNORE_FCS = 1LL << 28,
};
enum {
@@ -449,6 +456,21 @@ enum mlx4_module_id {
MLX4_MODULE_ID_QSFP28 = 0x11,
};
+enum { /* rl */
+ MLX4_QP_RATE_LIMIT_NONE = 0,
+ MLX4_QP_RATE_LIMIT_KBS = 1,
+ MLX4_QP_RATE_LIMIT_MBS = 2,
+ MLX4_QP_RATE_LIMIT_GBS = 3
+};
+
+struct mlx4_rate_limit_caps {
+ u16 num_rates; /* Number of different rates */
+ u8 min_unit;
+ u16 min_val;
+ u8 max_unit;
+ u16 max_val;
+};
+
static inline u64 mlx4_fw_ver(u64 major, u64 minor, u64 subminor)
{
return (major << 32) | (minor << 16) | subminor;
@@ -507,7 +529,6 @@ struct mlx4_caps {
int num_eqs;
int reserved_eqs;
int num_comp_vectors;
- int comp_pool;
int num_mpts;
int max_fmr_maps;
int num_mtts;
@@ -564,6 +585,7 @@ struct mlx4_caps {
u32 dmfs_high_rate_qpn_base;
u32 dmfs_high_rate_qpn_range;
u32 vf_caps;
+ struct mlx4_rate_limit_caps rl_caps;
};
struct mlx4_buf_list {
@@ -749,6 +771,14 @@ union mlx4_ext_av {
struct mlx4_eth_av eth;
};
+/* Counters should be saturate once they reach their maximum value */
+#define ASSIGN_32BIT_COUNTER(counter, value) do { \
+ if ((value) > U32_MAX) \
+ counter = cpu_to_be32(U32_MAX); \
+ else \
+ counter = cpu_to_be32(value); \
+} while (0)
+
struct mlx4_counter {
u8 reserved1[3];
u8 counter_mode;
@@ -807,6 +837,12 @@ struct mlx4_dev {
struct mlx4_vf_dev *dev_vfs;
};
+struct mlx4_clock_params {
+ u64 offset;
+ u8 bar;
+ u8 size;
+};
+
struct mlx4_eqe {
u8 reserved1;
u8 type;
@@ -935,6 +971,7 @@ struct mlx4_mad_ifc {
((dev)->caps.flags & MLX4_DEV_CAP_FLAG_IBOE))
#define MLX4_INVALID_SLAVE_ID 0xFF
+#define MLX4_SINK_COUNTER_INDEX(dev) (dev->caps.max_counters - 1)
void handle_port_mgmt_change_event(struct work_struct *work);
@@ -982,6 +1019,11 @@ static inline int mlx4_is_slave(struct mlx4_dev *dev)
return dev->flags & MLX4_FLAG_SLAVE;
}
+static inline int mlx4_is_eth(struct mlx4_dev *dev, int port)
+{
+ return dev->caps.port_type[port] == MLX4_PORT_TYPE_IB ? 0 : 1;
+}
+
int mlx4_buf_alloc(struct mlx4_dev *dev, int size, int max_direct,
struct mlx4_buf *buf, gfp_t gfp);
void mlx4_buf_free(struct mlx4_dev *dev, int size, struct mlx4_buf *buf);
@@ -1282,14 +1324,13 @@ int mlx4_register_mac(struct mlx4_dev *dev, u8 port, u64 mac);
void mlx4_unregister_mac(struct mlx4_dev *dev, u8 port, u64 mac);
int mlx4_get_base_qpn(struct mlx4_dev *dev, u8 port);
int __mlx4_replace_mac(struct mlx4_dev *dev, u8 port, int qpn, u64 new_mac);
-void mlx4_set_stats_bitmap(struct mlx4_dev *dev, u64 *stats_bitmap);
int mlx4_SET_PORT_general(struct mlx4_dev *dev, u8 port, int mtu,
u8 pptx, u8 pfctx, u8 pprx, u8 pfcrx);
int mlx4_SET_PORT_qpn_calc(struct mlx4_dev *dev, u8 port, u32 base_qpn,
u8 promisc);
-int mlx4_SET_PORT_PRIO2TC(struct mlx4_dev *dev, u8 port, u8 *prio2tc);
-int mlx4_SET_PORT_SCHEDULER(struct mlx4_dev *dev, u8 port, u8 *tc_tx_bw,
- u8 *pg, u16 *ratelimit);
+int mlx4_SET_PORT_BEACON(struct mlx4_dev *dev, u8 port, u16 time);
+int mlx4_SET_PORT_fcs_check(struct mlx4_dev *dev, u8 port,
+ u8 ignore_fcs_value);
int mlx4_SET_PORT_VXLAN(struct mlx4_dev *dev, u8 port, u8 steering, int enable);
int mlx4_find_cached_mac(struct mlx4_dev *dev, u8 port, u64 mac, int *idx);
int mlx4_find_cached_vlan(struct mlx4_dev *dev, u8 port, u16 vid, int *idx);
@@ -1306,10 +1347,13 @@ void mlx4_fmr_unmap(struct mlx4_dev *dev, struct mlx4_fmr *fmr,
int mlx4_fmr_free(struct mlx4_dev *dev, struct mlx4_fmr *fmr);
int mlx4_SYNC_TPT(struct mlx4_dev *dev);
int mlx4_test_interrupts(struct mlx4_dev *dev);
-int mlx4_assign_eq(struct mlx4_dev *dev, char *name, struct cpu_rmap *rmap,
- int *vector);
+u32 mlx4_get_eqs_per_port(struct mlx4_dev *dev, u8 port);
+bool mlx4_is_eq_vector_valid(struct mlx4_dev *dev, u8 port, int vector);
+struct cpu_rmap *mlx4_get_cpu_rmap(struct mlx4_dev *dev, int port);
+int mlx4_assign_eq(struct mlx4_dev *dev, u8 port, int *vector);
void mlx4_release_eq(struct mlx4_dev *dev, int vec);
+int mlx4_is_eq_shared(struct mlx4_dev *dev, int vector);
int mlx4_eq_get_irq(struct mlx4_dev *dev, int vec);
int mlx4_get_phys_port_id(struct mlx4_dev *dev);
@@ -1318,7 +1362,12 @@ int mlx4_wol_write(struct mlx4_dev *dev, u64 config, int port);
int mlx4_counter_alloc(struct mlx4_dev *dev, u32 *idx);
void mlx4_counter_free(struct mlx4_dev *dev, u32 idx);
+int mlx4_get_default_counter_index(struct mlx4_dev *dev, int port);
+void mlx4_set_admin_guid(struct mlx4_dev *dev, __be64 guid, int entry,
+ int port);
+__be64 mlx4_get_admin_guid(struct mlx4_dev *dev, int entry, int port);
+void mlx4_set_random_admin_guid(struct mlx4_dev *dev, int entry, int port);
int mlx4_flow_attach(struct mlx4_dev *dev,
struct mlx4_net_trans_rule *rule, u64 *reg_id);
int mlx4_flow_detach(struct mlx4_dev *dev, u64 reg_id);
@@ -1455,4 +1504,7 @@ int mlx4_ACCESS_PTYS_REG(struct mlx4_dev *dev,
enum mlx4_access_reg_method method,
struct mlx4_ptys_reg *ptys_reg);
+int mlx4_get_internal_clock_params(struct mlx4_dev *dev,
+ struct mlx4_clock_params *params);
+
#endif /* MLX4_DEVICE_H */
diff --git a/include/linux/mlx4/qp.h b/include/linux/mlx4/qp.h
index 551f85456c11..6fed539e5456 100644
--- a/include/linux/mlx4/qp.h
+++ b/include/linux/mlx4/qp.h
@@ -207,14 +207,17 @@ struct mlx4_qp_context {
__be32 msn;
__be16 rq_wqe_counter;
__be16 sq_wqe_counter;
- u32 reserved3[2];
+ u32 reserved3;
+ __be16 rate_limit_params;
+ u8 reserved4;
+ u8 qos_vport;
__be32 param3;
__be32 nummmcpeers_basemkey;
u8 log_page_size;
- u8 reserved4[2];
+ u8 reserved5[2];
u8 mtt_base_addr_h;
__be32 mtt_base_addr_l;
- u32 reserved5[10];
+ u32 reserved6[10];
};
struct mlx4_update_qp_context {
@@ -229,6 +232,8 @@ struct mlx4_update_qp_context {
enum {
MLX4_UPD_QP_MASK_PM_STATE = 32,
MLX4_UPD_QP_MASK_VSD = 33,
+ MLX4_UPD_QP_MASK_QOS_VPP = 34,
+ MLX4_UPD_QP_MASK_RATE_LIMIT = 35,
};
enum {
@@ -428,7 +433,9 @@ struct mlx4_wqe_inline_seg {
enum mlx4_update_qp_attr {
MLX4_UPDATE_QP_SMAC = 1 << 0,
MLX4_UPDATE_QP_VSD = 1 << 1,
- MLX4_UPDATE_QP_SUPPORTED_ATTRS = (1 << 2) - 1
+ MLX4_UPDATE_QP_RATE_LIMIT = 1 << 2,
+ MLX4_UPDATE_QP_QOS_VPORT = 1 << 3,
+ MLX4_UPDATE_QP_SUPPORTED_ATTRS = (1 << 4) - 1
};
enum mlx4_update_qp_params_flags {
@@ -437,7 +444,10 @@ enum mlx4_update_qp_params_flags {
struct mlx4_update_qp_params {
u8 smac_index;
+ u8 qos_vport;
u32 flags;
+ u16 rate_unit;
+ u16 rate_val;
};
int mlx4_update_qp(struct mlx4_dev *dev, u32 qpn,
diff --git a/include/linux/mlx5/cmd.h b/include/linux/mlx5/cmd.h
index 2826a4b6071e..68cd08f02c2f 100644
--- a/include/linux/mlx5/cmd.h
+++ b/include/linux/mlx5/cmd.h
@@ -1,5 +1,5 @@
/*
- * Copyright (c) 2013, Mellanox Technologies inc. All rights reserved.
+ * Copyright (c) 2013-2015, Mellanox Technologies. All rights reserved.
*
* This software is available to you under a choice of one of two
* licenses. You may choose to be licensed under the terms of the GNU
diff --git a/include/linux/mlx5/cq.h b/include/linux/mlx5/cq.h
index f6b17ac601bd..abc4767695e4 100644
--- a/include/linux/mlx5/cq.h
+++ b/include/linux/mlx5/cq.h
@@ -1,5 +1,5 @@
/*
- * Copyright (c) 2013, Mellanox Technologies inc. All rights reserved.
+ * Copyright (c) 2013-2015, Mellanox Technologies. All rights reserved.
*
* This software is available to you under a choice of one of two
* licenses. You may choose to be licensed under the terms of the GNU
@@ -137,14 +137,15 @@ enum {
static inline void mlx5_cq_arm(struct mlx5_core_cq *cq, u32 cmd,
void __iomem *uar_page,
- spinlock_t *doorbell_lock)
+ spinlock_t *doorbell_lock,
+ u32 cons_index)
{
__be32 doorbell[2];
u32 sn;
u32 ci;
sn = cq->arm_sn & 3;
- ci = cq->cons_index & 0xffffff;
+ ci = cons_index & 0xffffff;
*cq->arm_db = cpu_to_be32(sn << 28 | cmd | ci);
@@ -168,6 +169,9 @@ int mlx5_core_query_cq(struct mlx5_core_dev *dev, struct mlx5_core_cq *cq,
struct mlx5_query_cq_mbox_out *out);
int mlx5_core_modify_cq(struct mlx5_core_dev *dev, struct mlx5_core_cq *cq,
struct mlx5_modify_cq_mbox_in *in, int in_sz);
+int mlx5_core_modify_cq_moderation(struct mlx5_core_dev *dev,
+ struct mlx5_core_cq *cq, u16 cq_period,
+ u16 cq_max_count);
int mlx5_debug_cq_add(struct mlx5_core_dev *dev, struct mlx5_core_cq *cq);
void mlx5_debug_cq_remove(struct mlx5_core_dev *dev, struct mlx5_core_cq *cq);
diff --git a/include/linux/mlx5/device.h b/include/linux/mlx5/device.h
index 4e5bd813bb9a..b943cd9e2097 100644
--- a/include/linux/mlx5/device.h
+++ b/include/linux/mlx5/device.h
@@ -1,5 +1,5 @@
/*
- * Copyright (c) 2013, Mellanox Technologies inc. All rights reserved.
+ * Copyright (c) 2013-2015, Mellanox Technologies. All rights reserved.
*
* This software is available to you under a choice of one of two
* licenses. You may choose to be licensed under the terms of the GNU
@@ -35,6 +35,7 @@
#include <linux/types.h>
#include <rdma/ib_verbs.h>
+#include <linux/mlx5/mlx5_ifc.h>
#if defined(__LITTLE_ENDIAN)
#define MLX5_SET_HOST_ENDIANNESS 0
@@ -58,6 +59,8 @@
#define MLX5_FLD_SZ_BYTES(typ, fld) (__mlx5_bit_sz(typ, fld) / 8)
#define MLX5_ST_SZ_BYTES(typ) (sizeof(struct mlx5_ifc_##typ##_bits) / 8)
#define MLX5_ST_SZ_DW(typ) (sizeof(struct mlx5_ifc_##typ##_bits) / 32)
+#define MLX5_UN_SZ_BYTES(typ) (sizeof(union mlx5_ifc_##typ##_bits) / 8)
+#define MLX5_UN_SZ_DW(typ) (sizeof(union mlx5_ifc_##typ##_bits) / 32)
#define MLX5_BYTE_OFF(typ, fld) (__mlx5_bit_off(typ, fld) / 8)
#define MLX5_ADDR_OF(typ, p, fld) ((char *)(p) + MLX5_BYTE_OFF(typ, fld))
@@ -70,6 +73,14 @@
<< __mlx5_dw_bit_off(typ, fld))); \
} while (0)
+#define MLX5_SET_TO_ONES(typ, p, fld) do { \
+ BUILD_BUG_ON(__mlx5_st_sz_bits(typ) % 32); \
+ *((__be32 *)(p) + __mlx5_dw_off(typ, fld)) = \
+ cpu_to_be32((be32_to_cpu(*((__be32 *)(p) + __mlx5_dw_off(typ, fld))) & \
+ (~__mlx5_dw_mask(typ, fld))) | ((__mlx5_mask(typ, fld)) \
+ << __mlx5_dw_bit_off(typ, fld))); \
+} while (0)
+
#define MLX5_GET(typ, p, fld) ((be32_to_cpu(*((__be32 *)(p) +\
__mlx5_dw_off(typ, fld))) >> __mlx5_dw_bit_off(typ, fld)) & \
__mlx5_mask(typ, fld))
@@ -88,6 +99,12 @@ __mlx5_mask(typ, fld))
#define MLX5_GET64(typ, p, fld) be64_to_cpu(*((__be64 *)(p) + __mlx5_64_off(typ, fld)))
+#define MLX5_GET64_PR(typ, p, fld) ({ \
+ u64 ___t = MLX5_GET64(typ, p, fld); \
+ pr_debug(#fld " = 0x%llx\n", ___t); \
+ ___t; \
+})
+
enum {
MLX5_MAX_COMMANDS = 32,
MLX5_CMD_DATA_BLOCK_SIZE = 512,
@@ -115,6 +132,10 @@ enum {
};
enum {
+ MLX5_HW_START_PADDING = MLX5_INLINE_SEG,
+};
+
+enum {
MLX5_MIN_PKEY_TABLE_SIZE = 128,
MLX5_MAX_LOG_PKEY_TABLE = 5,
};
@@ -264,6 +285,7 @@ enum {
MLX5_OPCODE_RDMA_WRITE_IMM = 0x09,
MLX5_OPCODE_SEND = 0x0a,
MLX5_OPCODE_SEND_IMM = 0x0b,
+ MLX5_OPCODE_LSO = 0x0e,
MLX5_OPCODE_RDMA_READ = 0x10,
MLX5_OPCODE_ATOMIC_CS = 0x11,
MLX5_OPCODE_ATOMIC_FA = 0x12,
@@ -312,13 +334,6 @@ enum {
MLX5_CAP_OFF_CMDIF_CSUM = 46,
};
-enum {
- HCA_CAP_OPMOD_GET_MAX = 0,
- HCA_CAP_OPMOD_GET_CUR = 1,
- HCA_CAP_OPMOD_GET_ODP_MAX = 4,
- HCA_CAP_OPMOD_GET_ODP_CUR = 5
-};
-
struct mlx5_inbox_hdr {
__be16 opcode;
u8 rsvd[4];
@@ -541,6 +556,10 @@ struct mlx5_cmd_prot_block {
u8 sig;
};
+enum {
+ MLX5_CQE_SYND_FLUSHED_IN_ERROR = 5,
+};
+
struct mlx5_err_cqe {
u8 rsvd0[32];
__be32 srqn;
@@ -554,13 +573,22 @@ struct mlx5_err_cqe {
};
struct mlx5_cqe64 {
- u8 rsvd0[17];
+ u8 rsvd0[4];
+ u8 lro_tcppsh_abort_dupack;
+ u8 lro_min_ttl;
+ __be16 lro_tcp_win;
+ __be32 lro_ack_seq_num;
+ __be32 rss_hash_result;
+ u8 rss_hash_type;
u8 ml_path;
- u8 rsvd20[4];
+ u8 rsvd20[2];
+ __be16 check_sum;
__be16 slid;
__be32 flags_rqpn;
- u8 rsvd28[4];
- __be32 srqn;
+ u8 hds_ip_ext;
+ u8 l4_hdr_type_etc;
+ __be16 vlan_info;
+ __be32 srqn; /* [31:24]: lro_num_seg, [23:0]: srqn */
__be32 imm_inval_pkey;
u8 rsvd40[4];
__be32 byte_cnt;
@@ -571,6 +599,40 @@ struct mlx5_cqe64 {
u8 op_own;
};
+static inline int get_cqe_lro_tcppsh(struct mlx5_cqe64 *cqe)
+{
+ return (cqe->lro_tcppsh_abort_dupack >> 6) & 1;
+}
+
+static inline u8 get_cqe_l4_hdr_type(struct mlx5_cqe64 *cqe)
+{
+ return (cqe->l4_hdr_type_etc >> 4) & 0x7;
+}
+
+static inline int cqe_has_vlan(struct mlx5_cqe64 *cqe)
+{
+ return !!(cqe->l4_hdr_type_etc & 0x1);
+}
+
+enum {
+ CQE_L4_HDR_TYPE_NONE = 0x0,
+ CQE_L4_HDR_TYPE_TCP_NO_ACK = 0x1,
+ CQE_L4_HDR_TYPE_UDP = 0x2,
+ CQE_L4_HDR_TYPE_TCP_ACK_NO_DATA = 0x3,
+ CQE_L4_HDR_TYPE_TCP_ACK_AND_DATA = 0x4,
+};
+
+enum {
+ CQE_RSS_HTYPE_IP = 0x3 << 6,
+ CQE_RSS_HTYPE_L4 = 0x3 << 2,
+};
+
+enum {
+ CQE_L2_OK = 1 << 0,
+ CQE_L3_OK = 1 << 1,
+ CQE_L4_OK = 1 << 2,
+};
+
struct mlx5_sig_err_cqe {
u8 rsvd0[16];
__be32 expected_trans_sig;
@@ -996,4 +1058,135 @@ struct mlx5_destroy_psv_out {
u8 rsvd[8];
};
+#define MLX5_CMD_OP_MAX 0x920
+
+enum {
+ VPORT_STATE_DOWN = 0x0,
+ VPORT_STATE_UP = 0x1,
+};
+
+enum {
+ MLX5_L3_PROT_TYPE_IPV4 = 0,
+ MLX5_L3_PROT_TYPE_IPV6 = 1,
+};
+
+enum {
+ MLX5_L4_PROT_TYPE_TCP = 0,
+ MLX5_L4_PROT_TYPE_UDP = 1,
+};
+
+enum {
+ MLX5_HASH_FIELD_SEL_SRC_IP = 1 << 0,
+ MLX5_HASH_FIELD_SEL_DST_IP = 1 << 1,
+ MLX5_HASH_FIELD_SEL_L4_SPORT = 1 << 2,
+ MLX5_HASH_FIELD_SEL_L4_DPORT = 1 << 3,
+ MLX5_HASH_FIELD_SEL_IPSEC_SPI = 1 << 4,
+};
+
+enum {
+ MLX5_MATCH_OUTER_HEADERS = 1 << 0,
+ MLX5_MATCH_MISC_PARAMETERS = 1 << 1,
+ MLX5_MATCH_INNER_HEADERS = 1 << 2,
+
+};
+
+enum {
+ MLX5_FLOW_TABLE_TYPE_NIC_RCV = 0,
+ MLX5_FLOW_TABLE_TYPE_ESWITCH = 4,
+};
+
+enum {
+ MLX5_FLOW_CONTEXT_DEST_TYPE_VPORT = 0,
+ MLX5_FLOW_CONTEXT_DEST_TYPE_FLOW_TABLE = 1,
+ MLX5_FLOW_CONTEXT_DEST_TYPE_TIR = 2,
+};
+
+enum {
+ MLX5_RQC_RQ_TYPE_MEMORY_RQ_INLINE = 0x0,
+ MLX5_RQC_RQ_TYPE_MEMORY_RQ_RPM = 0x1,
+};
+
+/* MLX5 DEV CAPs */
+
+/* TODO: EAT.ME */
+enum mlx5_cap_mode {
+ HCA_CAP_OPMOD_GET_MAX = 0,
+ HCA_CAP_OPMOD_GET_CUR = 1,
+};
+
+enum mlx5_cap_type {
+ MLX5_CAP_GENERAL = 0,
+ MLX5_CAP_ETHERNET_OFFLOADS,
+ MLX5_CAP_ODP,
+ MLX5_CAP_ATOMIC,
+ MLX5_CAP_ROCE,
+ MLX5_CAP_IPOIB_OFFLOADS,
+ MLX5_CAP_EOIB_OFFLOADS,
+ MLX5_CAP_FLOW_TABLE,
+ /* NUM OF CAP Types */
+ MLX5_CAP_NUM
+};
+
+/* GET Dev Caps macros */
+#define MLX5_CAP_GEN(mdev, cap) \
+ MLX5_GET(cmd_hca_cap, mdev->hca_caps_cur[MLX5_CAP_GENERAL], cap)
+
+#define MLX5_CAP_GEN_MAX(mdev, cap) \
+ MLX5_GET(cmd_hca_cap, mdev->hca_caps_max[MLX5_CAP_GENERAL], cap)
+
+#define MLX5_CAP_ETH(mdev, cap) \
+ MLX5_GET(per_protocol_networking_offload_caps,\
+ mdev->hca_caps_cur[MLX5_CAP_ETHERNET_OFFLOADS], cap)
+
+#define MLX5_CAP_ETH_MAX(mdev, cap) \
+ MLX5_GET(per_protocol_networking_offload_caps,\
+ mdev->hca_caps_max[MLX5_CAP_ETHERNET_OFFLOADS], cap)
+
+#define MLX5_CAP_ROCE(mdev, cap) \
+ MLX5_GET(roce_cap, mdev->hca_caps_cur[MLX5_CAP_ROCE], cap)
+
+#define MLX5_CAP_ROCE_MAX(mdev, cap) \
+ MLX5_GET(roce_cap, mdev->hca_caps_max[MLX5_CAP_ROCE], cap)
+
+#define MLX5_CAP_ATOMIC(mdev, cap) \
+ MLX5_GET(atomic_caps, mdev->hca_caps_cur[MLX5_CAP_ATOMIC], cap)
+
+#define MLX5_CAP_ATOMIC_MAX(mdev, cap) \
+ MLX5_GET(atomic_caps, mdev->hca_caps_max[MLX5_CAP_ATOMIC], cap)
+
+#define MLX5_CAP_FLOWTABLE(mdev, cap) \
+ MLX5_GET(flow_table_nic_cap, mdev->hca_caps_cur[MLX5_CAP_FLOW_TABLE], cap)
+
+#define MLX5_CAP_FLOWTABLE_MAX(mdev, cap) \
+ MLX5_GET(flow_table_nic_cap, mdev->hca_caps_max[MLX5_CAP_FLOW_TABLE], cap)
+
+#define MLX5_CAP_ODP(mdev, cap)\
+ MLX5_GET(odp_cap, mdev->hca_caps_cur[MLX5_CAP_ODP], cap)
+
+enum {
+ MLX5_CMD_STAT_OK = 0x0,
+ MLX5_CMD_STAT_INT_ERR = 0x1,
+ MLX5_CMD_STAT_BAD_OP_ERR = 0x2,
+ MLX5_CMD_STAT_BAD_PARAM_ERR = 0x3,
+ MLX5_CMD_STAT_BAD_SYS_STATE_ERR = 0x4,
+ MLX5_CMD_STAT_BAD_RES_ERR = 0x5,
+ MLX5_CMD_STAT_RES_BUSY = 0x6,
+ MLX5_CMD_STAT_LIM_ERR = 0x8,
+ MLX5_CMD_STAT_BAD_RES_STATE_ERR = 0x9,
+ MLX5_CMD_STAT_IX_ERR = 0xa,
+ MLX5_CMD_STAT_NO_RES_ERR = 0xf,
+ MLX5_CMD_STAT_BAD_INP_LEN_ERR = 0x50,
+ MLX5_CMD_STAT_BAD_OUTP_LEN_ERR = 0x51,
+ MLX5_CMD_STAT_BAD_QP_STATE_ERR = 0x10,
+ MLX5_CMD_STAT_BAD_PKT_ERR = 0x30,
+ MLX5_CMD_STAT_BAD_SIZE_OUTS_CQES_ERR = 0x40,
+};
+
+static inline u16 mlx5_to_sw_pkey_sz(int pkey_sz)
+{
+ if (pkey_sz > MLX5_MAX_LOG_PKEY_TABLE)
+ return 0;
+ return MLX5_MIN_PKEY_TABLE_SIZE << pkey_sz;
+}
+
#endif /* MLX5_DEVICE_H */
diff --git a/include/linux/mlx5/doorbell.h b/include/linux/mlx5/doorbell.h
index 163a818411e7..afc78a3f4462 100644
--- a/include/linux/mlx5/doorbell.h
+++ b/include/linux/mlx5/doorbell.h
@@ -1,5 +1,5 @@
/*
- * Copyright (c) 2013, Mellanox Technologies inc. All rights reserved.
+ * Copyright (c) 2013-2015, Mellanox Technologies. All rights reserved.
*
* This software is available to you under a choice of one of two
* licenses. You may choose to be licensed under the terms of the GNU
diff --git a/include/linux/mlx5/driver.h b/include/linux/mlx5/driver.h
index 166d9315fe4b..5722d88c2429 100644
--- a/include/linux/mlx5/driver.h
+++ b/include/linux/mlx5/driver.h
@@ -1,5 +1,5 @@
/*
- * Copyright (c) 2013, Mellanox Technologies inc. All rights reserved.
+ * Copyright (c) 2013-2015, Mellanox Technologies. All rights reserved.
*
* This software is available to you under a choice of one of two
* licenses. You may choose to be licensed under the terms of the GNU
@@ -44,7 +44,6 @@
#include <linux/mlx5/device.h>
#include <linux/mlx5/doorbell.h>
-#include <linux/mlx5/mlx5_ifc.h>
enum {
MLX5_BOARD_ID_LEN = 64,
@@ -85,7 +84,7 @@ enum {
};
enum {
- MLX5_MAX_EQ_NAME = 32
+ MLX5_MAX_IRQ_NAME = 32
};
enum {
@@ -108,6 +107,7 @@ enum {
MLX5_REG_PUDE = 0x5009,
MLX5_REG_PMPE = 0x5010,
MLX5_REG_PELC = 0x500e,
+ MLX5_REG_PVLC = 0x500f,
MLX5_REG_PMLP = 0, /* TBD */
MLX5_REG_NODE_DESC = 0x6001,
MLX5_REG_HOST_ENDIANNESS = 0x7004,
@@ -150,6 +150,11 @@ enum mlx5_dev_event {
MLX5_DEV_EVENT_CLIENT_REREG,
};
+enum mlx5_port_status {
+ MLX5_PORT_UP = 1 << 1,
+ MLX5_PORT_DOWN = 1 << 2,
+};
+
struct mlx5_uuar_info {
struct mlx5_uar *uars;
int num_uars;
@@ -232,6 +237,9 @@ struct mlx5_cmd_stats {
};
struct mlx5_cmd {
+ void *cmd_alloc_buf;
+ dma_addr_t alloc_dma;
+ int alloc_size;
void *cmd_buf;
dma_addr_t dma;
u16 cmdif_rev;
@@ -266,56 +274,7 @@ struct mlx5_cmd {
struct mlx5_port_caps {
int gid_table_len;
int pkey_table_len;
-};
-
-struct mlx5_general_caps {
- u8 log_max_eq;
- u8 log_max_cq;
- u8 log_max_qp;
- u8 log_max_mkey;
- u8 log_max_pd;
- u8 log_max_srq;
- u8 log_max_strq;
- u8 log_max_mrw_sz;
- u8 log_max_bsf_list_size;
- u8 log_max_klm_list_size;
- u32 max_cqes;
- int max_wqes;
- u32 max_eqes;
- u32 max_indirection;
- int max_sq_desc_sz;
- int max_rq_desc_sz;
- int max_dc_sq_desc_sz;
- u64 flags;
- u16 stat_rate_support;
- int log_max_msg;
- int num_ports;
- u8 log_max_ra_res_qp;
- u8 log_max_ra_req_qp;
- int max_srq_wqes;
- int bf_reg_size;
- int bf_regs_per_page;
- struct mlx5_port_caps port[MLX5_MAX_PORTS];
- u8 ext_port_cap[MLX5_MAX_PORTS];
- int max_vf;
- u32 reserved_lkey;
- u8 local_ca_ack_delay;
- u8 log_max_mcg;
- u32 max_qp_mcg;
- int min_page_sz;
- int pd_cap;
- u32 max_qp_counters;
- u32 pkey_table_size;
- u8 log_max_ra_req_dc;
- u8 log_max_ra_res_dc;
- u32 uar_sz;
- u8 min_log_pg_sz;
- u8 log_max_xrcd;
- u16 log_uar_page_sz;
-};
-
-struct mlx5_caps {
- struct mlx5_general_caps gen;
+ u8 ext_port_cap;
};
struct mlx5_cmd_mailbox {
@@ -331,8 +290,6 @@ struct mlx5_buf_list {
struct mlx5_buf {
struct mlx5_buf_list direct;
- struct mlx5_buf_list *page_list;
- int nbufs;
int npages;
int size;
u8 page_shift;
@@ -348,7 +305,6 @@ struct mlx5_eq {
u8 eqn;
int nent;
u64 mask;
- char name[MLX5_MAX_EQ_NAME];
struct list_head list;
int index;
struct mlx5_rsc_debug *dbg;
@@ -384,6 +340,8 @@ struct mlx5_core_mr {
enum mlx5_res_type {
MLX5_RES_QP,
+ MLX5_RES_SRQ,
+ MLX5_RES_XSRQ,
};
struct mlx5_core_rsc_common {
@@ -393,6 +351,7 @@ struct mlx5_core_rsc_common {
};
struct mlx5_core_srq {
+ struct mlx5_core_rsc_common common; /* must be first */
u32 srqn;
int max;
int max_gs;
@@ -407,11 +366,10 @@ struct mlx5_core_srq {
struct mlx5_eq_table {
void __iomem *update_ci;
void __iomem *update_arm_ci;
- struct list_head *comp_eq_head;
+ struct list_head comp_eqs_list;
struct mlx5_eq pages_eq;
struct mlx5_eq async_eq;
struct mlx5_eq cmd_eq;
- struct msix_entry *msix_arr;
int num_comp_vectors;
/* protect EQs list
*/
@@ -464,9 +422,16 @@ struct mlx5_mr_table {
struct radix_tree_root tree;
};
+struct mlx5_irq_info {
+ cpumask_var_t mask;
+ char name[MLX5_MAX_IRQ_NAME];
+};
+
struct mlx5_priv {
char name[MLX5_MAX_NAME_LEN];
struct mlx5_eq_table eq_table;
+ struct msix_entry *msix_arr;
+ struct mlx5_irq_info *irq_info;
struct mlx5_uuar_info uuari;
MLX5_DECLARE_DOORBELL_LOCK(cq_uar_lock);
@@ -517,7 +482,9 @@ struct mlx5_core_dev {
u8 rev_id;
char board_id[MLX5_BOARD_ID_LEN];
struct mlx5_cmd cmd;
- struct mlx5_caps caps;
+ struct mlx5_port_caps port_caps[MLX5_MAX_PORTS];
+ u32 hca_caps_cur[MLX5_CAP_NUM][MLX5_UN_SZ_DW(hca_cap_union)];
+ u32 hca_caps_max[MLX5_CAP_NUM][MLX5_UN_SZ_DW(hca_cap_union)];
phys_addr_t iseg_base;
struct mlx5_init_seg __iomem *iseg;
void (*event) (struct mlx5_core_dev *dev,
@@ -526,6 +493,7 @@ struct mlx5_core_dev {
struct mlx5_priv priv;
struct mlx5_profile *profile;
atomic_t num_qps;
+ u32 issi;
};
struct mlx5_db {
@@ -546,6 +514,11 @@ enum {
MLX5_COMP_EQ_SIZE = 1024,
};
+enum {
+ MLX5_PTYS_IB = 1 << 0,
+ MLX5_PTYS_EN = 1 << 2,
+};
+
struct mlx5_db_pgdir {
struct list_head list;
DECLARE_BITMAP(bitmap, MLX5_DB_PER_PAGE);
@@ -581,13 +554,44 @@ struct mlx5_pas {
u8 log_sz;
};
+enum port_state_policy {
+ MLX5_AAA_000
+};
+
+enum phy_port_state {
+ MLX5_AAA_111
+};
+
+struct mlx5_hca_vport_context {
+ u32 field_select;
+ bool sm_virt_aware;
+ bool has_smi;
+ bool has_raw;
+ enum port_state_policy policy;
+ enum phy_port_state phys_state;
+ enum ib_port_state vport_state;
+ u8 port_physical_state;
+ u64 sys_image_guid;
+ u64 port_guid;
+ u64 node_guid;
+ u32 cap_mask1;
+ u32 cap_mask1_perm;
+ u32 cap_mask2;
+ u32 cap_mask2_perm;
+ u16 lid;
+ u8 init_type_reply; /* bitmask: see ib spec 14.2.5.6 InitTypeReply */
+ u8 lmc;
+ u8 subnet_timeout;
+ u16 sm_lid;
+ u8 sm_sl;
+ u16 qkey_violation_counter;
+ u16 pkey_violation_counter;
+ bool grh_required;
+};
+
static inline void *mlx5_buf_offset(struct mlx5_buf *buf, int offset)
{
- if (likely(BITS_PER_LONG == 64 || buf->nbufs == 1))
return buf->direct.buf + offset;
- else
- return buf->page_list[offset >> PAGE_SHIFT].buf +
- (offset & (PAGE_SIZE - 1));
}
extern struct workqueue_struct *mlx5_core_wq;
@@ -651,8 +655,8 @@ void mlx5_cmd_use_events(struct mlx5_core_dev *dev);
void mlx5_cmd_use_polling(struct mlx5_core_dev *dev);
int mlx5_cmd_status_to_err(struct mlx5_outbox_hdr *hdr);
int mlx5_cmd_status_to_err_v2(void *ptr);
-int mlx5_core_get_caps(struct mlx5_core_dev *dev, struct mlx5_caps *caps,
- u16 opmod);
+int mlx5_core_get_caps(struct mlx5_core_dev *dev, enum mlx5_cap_type cap_type,
+ enum mlx5_cap_mode cap_mode);
int mlx5_cmd_exec(struct mlx5_core_dev *dev, void *in, int in_size, void *out,
int out_size);
int mlx5_cmd_exec_cb(struct mlx5_core_dev *dev, void *in, int in_size,
@@ -662,19 +666,21 @@ int mlx5_cmd_alloc_uar(struct mlx5_core_dev *dev, u32 *uarn);
int mlx5_cmd_free_uar(struct mlx5_core_dev *dev, u32 uarn);
int mlx5_alloc_uuars(struct mlx5_core_dev *dev, struct mlx5_uuar_info *uuari);
int mlx5_free_uuars(struct mlx5_core_dev *dev, struct mlx5_uuar_info *uuari);
+int mlx5_alloc_map_uar(struct mlx5_core_dev *mdev, struct mlx5_uar *uar);
+void mlx5_unmap_free_uar(struct mlx5_core_dev *mdev, struct mlx5_uar *uar);
void mlx5_health_cleanup(void);
void __init mlx5_health_init(void);
void mlx5_start_health_poll(struct mlx5_core_dev *dev);
void mlx5_stop_health_poll(struct mlx5_core_dev *dev);
-int mlx5_buf_alloc(struct mlx5_core_dev *dev, int size, int max_direct,
- struct mlx5_buf *buf);
+int mlx5_buf_alloc(struct mlx5_core_dev *dev, int size, struct mlx5_buf *buf);
void mlx5_buf_free(struct mlx5_core_dev *dev, struct mlx5_buf *buf);
struct mlx5_cmd_mailbox *mlx5_alloc_cmd_mailbox_chain(struct mlx5_core_dev *dev,
gfp_t flags, int npages);
void mlx5_free_cmd_mailbox_chain(struct mlx5_core_dev *dev,
struct mlx5_cmd_mailbox *head);
int mlx5_core_create_srq(struct mlx5_core_dev *dev, struct mlx5_core_srq *srq,
- struct mlx5_create_srq_mbox_in *in, int inlen);
+ struct mlx5_create_srq_mbox_in *in, int inlen,
+ int is_xrc);
int mlx5_core_destroy_srq(struct mlx5_core_dev *dev, struct mlx5_core_srq *srq);
int mlx5_core_query_srq(struct mlx5_core_dev *dev, struct mlx5_core_srq *srq,
struct mlx5_query_srq_mbox_out *out);
@@ -693,7 +699,7 @@ int mlx5_core_dump_fill_mkey(struct mlx5_core_dev *dev, struct mlx5_core_mr *mr,
u32 *mkey);
int mlx5_core_alloc_pd(struct mlx5_core_dev *dev, u32 *pdn);
int mlx5_core_dealloc_pd(struct mlx5_core_dev *dev, u32 pdn);
-int mlx5_core_mad_ifc(struct mlx5_core_dev *dev, void *inb, void *outb,
+int mlx5_core_mad_ifc(struct mlx5_core_dev *dev, const void *inb, void *outb,
u16 opmod, u8 port);
void mlx5_pagealloc_init(struct mlx5_core_dev *dev);
void mlx5_pagealloc_cleanup(struct mlx5_core_dev *dev);
@@ -722,6 +728,7 @@ int mlx5_create_map_eq(struct mlx5_core_dev *dev, struct mlx5_eq *eq, u8 vecidx,
int mlx5_destroy_unmap_eq(struct mlx5_core_dev *dev, struct mlx5_eq *eq);
int mlx5_start_eqs(struct mlx5_core_dev *dev);
int mlx5_stop_eqs(struct mlx5_core_dev *dev);
+int mlx5_vector2eqn(struct mlx5_core_dev *dev, int vector, int *eqn, int *irqn);
int mlx5_core_attach_mcg(struct mlx5_core_dev *dev, union ib_gid *mgid, u32 qpn);
int mlx5_core_detach_mcg(struct mlx5_core_dev *dev, union ib_gid *mgid, u32 qpn);
@@ -730,7 +737,32 @@ void mlx5_qp_debugfs_cleanup(struct mlx5_core_dev *dev);
int mlx5_core_access_reg(struct mlx5_core_dev *dev, void *data_in,
int size_in, void *data_out, int size_out,
u16 reg_num, int arg, int write);
+
int mlx5_set_port_caps(struct mlx5_core_dev *dev, u8 port_num, u32 caps);
+int mlx5_query_port_ptys(struct mlx5_core_dev *dev, u32 *ptys,
+ int ptys_size, int proto_mask, u8 local_port);
+int mlx5_query_port_proto_cap(struct mlx5_core_dev *dev,
+ u32 *proto_cap, int proto_mask);
+int mlx5_query_port_proto_admin(struct mlx5_core_dev *dev,
+ u32 *proto_admin, int proto_mask);
+int mlx5_query_port_link_width_oper(struct mlx5_core_dev *dev,
+ u8 *link_width_oper, u8 local_port);
+int mlx5_query_port_proto_oper(struct mlx5_core_dev *dev,
+ u8 *proto_oper, int proto_mask,
+ u8 local_port);
+int mlx5_set_port_proto(struct mlx5_core_dev *dev, u32 proto_admin,
+ int proto_mask);
+int mlx5_set_port_status(struct mlx5_core_dev *dev,
+ enum mlx5_port_status status);
+int mlx5_query_port_status(struct mlx5_core_dev *dev, u8 *status);
+
+int mlx5_set_port_mtu(struct mlx5_core_dev *dev, int mtu, u8 port);
+void mlx5_query_port_max_mtu(struct mlx5_core_dev *dev, int *max_mtu, u8 port);
+void mlx5_query_port_oper_mtu(struct mlx5_core_dev *dev, int *oper_mtu,
+ u8 port);
+
+int mlx5_query_port_vl_hw_cap(struct mlx5_core_dev *dev,
+ u8 *vl_hw_cap, u8 local_port);
int mlx5_debug_eq_add(struct mlx5_core_dev *dev, struct mlx5_eq *eq);
void mlx5_debug_eq_remove(struct mlx5_core_dev *dev, struct mlx5_eq *eq);
@@ -777,16 +809,25 @@ enum {
MAX_MR_CACHE_ENTRIES = 16,
};
+enum {
+ MLX5_INTERFACE_PROTOCOL_IB = 0,
+ MLX5_INTERFACE_PROTOCOL_ETH = 1,
+};
+
struct mlx5_interface {
void * (*add)(struct mlx5_core_dev *dev);
void (*remove)(struct mlx5_core_dev *dev, void *context);
void (*event)(struct mlx5_core_dev *dev, void *context,
enum mlx5_dev_event event, unsigned long param);
+ void * (*get_dev)(void *context);
+ int protocol;
struct list_head list;
};
+void *mlx5_get_protocol_dev(struct mlx5_core_dev *mdev, int protocol);
int mlx5_register_interface(struct mlx5_interface *intf);
void mlx5_unregister_interface(struct mlx5_interface *intf);
+int mlx5_core_query_vendor_id(struct mlx5_core_dev *mdev, u32 *vendor_id);
struct mlx5_profile {
u64 mask;
@@ -797,4 +838,14 @@ struct mlx5_profile {
} mr_cache[MAX_MR_CACHE_ENTRIES];
};
+static inline int mlx5_get_gid_table_len(u16 param)
+{
+ if (param > 4) {
+ pr_warn("gid table length is zero\n");
+ return 0;
+ }
+
+ return 8 * (1 << param);
+}
+
#endif /* MLX5_DRIVER_H */
diff --git a/include/linux/mlx5/flow_table.h b/include/linux/mlx5/flow_table.h
new file mode 100644
index 000000000000..5f922c6d4fc2
--- /dev/null
+++ b/include/linux/mlx5/flow_table.h
@@ -0,0 +1,54 @@
+/*
+ * Copyright (c) 2013-2015, Mellanox Technologies, Ltd. All rights reserved.
+ *
+ * This software is available to you under a choice of one of two
+ * licenses. You may choose to be licensed under the terms of the GNU
+ * General Public License (GPL) Version 2, available from the file
+ * COPYING in the main directory of this source tree, or the
+ * OpenIB.org BSD license below:
+ *
+ * Redistribution and use in source and binary forms, with or
+ * without modification, are permitted provided that the following
+ * conditions are met:
+ *
+ * - Redistributions of source code must retain the above
+ * copyright notice, this list of conditions and the following
+ * disclaimer.
+ *
+ * - Redistributions in binary form must reproduce the above
+ * copyright notice, this list of conditions and the following
+ * disclaimer in the documentation and/or other materials
+ * provided with the distribution.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND,
+ * EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
+ * MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND
+ * NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS
+ * BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN
+ * ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN
+ * CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
+ * SOFTWARE.
+ */
+
+#ifndef MLX5_FLOW_TABLE_H
+#define MLX5_FLOW_TABLE_H
+
+#include <linux/mlx5/driver.h>
+
+struct mlx5_flow_table_group {
+ u8 log_sz;
+ u8 match_criteria_enable;
+ u32 match_criteria[MLX5_ST_SZ_DW(fte_match_param)];
+};
+
+void *mlx5_create_flow_table(struct mlx5_core_dev *dev, u8 level, u8 table_type,
+ u16 num_groups,
+ struct mlx5_flow_table_group *group);
+void mlx5_destroy_flow_table(void *flow_table);
+int mlx5_add_flow_table_entry(void *flow_table, u8 match_criteria_enable,
+ void *match_criteria, void *flow_context,
+ u32 *flow_index);
+void mlx5_del_flow_table_entry(void *flow_table, u32 flow_index);
+u32 mlx5_get_flow_table_id(void *flow_table);
+
+#endif /* MLX5_FLOW_TABLE_H */
diff --git a/include/linux/mlx5/mlx5_ifc.h b/include/linux/mlx5/mlx5_ifc.h
index 5f48b8f592c5..6d2f6fee041c 100644
--- a/include/linux/mlx5/mlx5_ifc.h
+++ b/include/linux/mlx5/mlx5_ifc.h
@@ -1,5 +1,5 @@
/*
- * Copyright (c) 2014, Mellanox Technologies inc. All rights reserved.
+ * Copyright (c) 2013-2015, Mellanox Technologies, Ltd. All rights reserved.
*
* This software is available to you under a choice of one of two
* licenses. You may choose to be licensed under the terms of the GNU
@@ -28,12 +28,45 @@
* ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN
* CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
* SOFTWARE.
- */
-
+*/
#ifndef MLX5_IFC_H
#define MLX5_IFC_H
enum {
+ MLX5_EVENT_TYPE_CODING_COMPLETION_EVENTS = 0x0,
+ MLX5_EVENT_TYPE_CODING_PATH_MIGRATED_SUCCEEDED = 0x1,
+ MLX5_EVENT_TYPE_CODING_COMMUNICATION_ESTABLISHED = 0x2,
+ MLX5_EVENT_TYPE_CODING_SEND_QUEUE_DRAINED = 0x3,
+ MLX5_EVENT_TYPE_CODING_LAST_WQE_REACHED = 0x13,
+ MLX5_EVENT_TYPE_CODING_SRQ_LIMIT = 0x14,
+ MLX5_EVENT_TYPE_CODING_DCT_ALL_CONNECTIONS_CLOSED = 0x1c,
+ MLX5_EVENT_TYPE_CODING_DCT_ACCESS_KEY_VIOLATION = 0x1d,
+ MLX5_EVENT_TYPE_CODING_CQ_ERROR = 0x4,
+ MLX5_EVENT_TYPE_CODING_LOCAL_WQ_CATASTROPHIC_ERROR = 0x5,
+ MLX5_EVENT_TYPE_CODING_PATH_MIGRATION_FAILED = 0x7,
+ MLX5_EVENT_TYPE_CODING_PAGE_FAULT_EVENT = 0xc,
+ MLX5_EVENT_TYPE_CODING_INVALID_REQUEST_LOCAL_WQ_ERROR = 0x10,
+ MLX5_EVENT_TYPE_CODING_LOCAL_ACCESS_VIOLATION_WQ_ERROR = 0x11,
+ MLX5_EVENT_TYPE_CODING_LOCAL_SRQ_CATASTROPHIC_ERROR = 0x12,
+ MLX5_EVENT_TYPE_CODING_INTERNAL_ERROR = 0x8,
+ MLX5_EVENT_TYPE_CODING_PORT_STATE_CHANGE = 0x9,
+ MLX5_EVENT_TYPE_CODING_GPIO_EVENT = 0x15,
+ MLX5_EVENT_TYPE_CODING_REMOTE_CONFIGURATION_PROTOCOL_EVENT = 0x19,
+ MLX5_EVENT_TYPE_CODING_DOORBELL_BLUEFLAME_CONGESTION_EVENT = 0x1a,
+ MLX5_EVENT_TYPE_CODING_STALL_VL_EVENT = 0x1b,
+ MLX5_EVENT_TYPE_CODING_DROPPED_PACKET_LOGGED_EVENT = 0x1f,
+ MLX5_EVENT_TYPE_CODING_COMMAND_INTERFACE_COMPLETION = 0xa,
+ MLX5_EVENT_TYPE_CODING_PAGE_REQUEST = 0xb
+};
+
+enum {
+ MLX5_MODIFY_TIR_BITMASK_LRO = 0x0,
+ MLX5_MODIFY_TIR_BITMASK_INDIRECT_TABLE = 0x1,
+ MLX5_MODIFY_TIR_BITMASK_HASH = 0x2,
+ MLX5_MODIFY_TIR_BITMASK_TUNNELED_OFFLOAD_EN = 0x3
+};
+
+enum {
MLX5_CMD_OP_QUERY_HCA_CAP = 0x100,
MLX5_CMD_OP_QUERY_ADAPTER = 0x101,
MLX5_CMD_OP_INIT_HCA = 0x102,
@@ -43,6 +76,8 @@ enum {
MLX5_CMD_OP_QUERY_PAGES = 0x107,
MLX5_CMD_OP_MANAGE_PAGES = 0x108,
MLX5_CMD_OP_SET_HCA_CAP = 0x109,
+ MLX5_CMD_OP_QUERY_ISSI = 0x10a,
+ MLX5_CMD_OP_SET_ISSI = 0x10b,
MLX5_CMD_OP_CREATE_MKEY = 0x200,
MLX5_CMD_OP_QUERY_MKEY = 0x201,
MLX5_CMD_OP_DESTROY_MKEY = 0x202,
@@ -66,6 +101,7 @@ enum {
MLX5_CMD_OP_2ERR_QP = 0x507,
MLX5_CMD_OP_2RST_QP = 0x50a,
MLX5_CMD_OP_QUERY_QP = 0x50b,
+ MLX5_CMD_OP_SQD_RTS_QP = 0x50c,
MLX5_CMD_OP_INIT2INIT_QP = 0x50e,
MLX5_CMD_OP_CREATE_PSV = 0x600,
MLX5_CMD_OP_DESTROY_PSV = 0x601,
@@ -73,7 +109,10 @@ enum {
MLX5_CMD_OP_DESTROY_SRQ = 0x701,
MLX5_CMD_OP_QUERY_SRQ = 0x702,
MLX5_CMD_OP_ARM_RQ = 0x703,
- MLX5_CMD_OP_RESIZE_SRQ = 0x704,
+ MLX5_CMD_OP_CREATE_XRC_SRQ = 0x705,
+ MLX5_CMD_OP_DESTROY_XRC_SRQ = 0x706,
+ MLX5_CMD_OP_QUERY_XRC_SRQ = 0x707,
+ MLX5_CMD_OP_ARM_XRC_SRQ = 0x708,
MLX5_CMD_OP_CREATE_DCT = 0x710,
MLX5_CMD_OP_DESTROY_DCT = 0x711,
MLX5_CMD_OP_DRAIN_DCT = 0x712,
@@ -85,8 +124,12 @@ enum {
MLX5_CMD_OP_MODIFY_ESW_VPORT_CONTEXT = 0x753,
MLX5_CMD_OP_QUERY_NIC_VPORT_CONTEXT = 0x754,
MLX5_CMD_OP_MODIFY_NIC_VPORT_CONTEXT = 0x755,
- MLX5_CMD_OP_QUERY_RCOE_ADDRESS = 0x760,
+ MLX5_CMD_OP_QUERY_ROCE_ADDRESS = 0x760,
MLX5_CMD_OP_SET_ROCE_ADDRESS = 0x761,
+ MLX5_CMD_OP_QUERY_HCA_VPORT_CONTEXT = 0x762,
+ MLX5_CMD_OP_MODIFY_HCA_VPORT_CONTEXT = 0x763,
+ MLX5_CMD_OP_QUERY_HCA_VPORT_GID = 0x764,
+ MLX5_CMD_OP_QUERY_HCA_VPORT_PKEY = 0x765,
MLX5_CMD_OP_QUERY_VPORT_COUNTER = 0x770,
MLX5_CMD_OP_ALLOC_Q_COUNTER = 0x771,
MLX5_CMD_OP_DEALLOC_Q_COUNTER = 0x772,
@@ -98,7 +141,7 @@ enum {
MLX5_CMD_OP_CONFIG_INT_MODERATION = 0x804,
MLX5_CMD_OP_ACCESS_REG = 0x805,
MLX5_CMD_OP_ATTACH_TO_MCG = 0x806,
- MLX5_CMD_OP_DETACH_FROM_MCG = 0x807,
+ MLX5_CMD_OP_DETTACH_FROM_MCG = 0x807,
MLX5_CMD_OP_GET_DROPPED_PACKET_LOG = 0x80a,
MLX5_CMD_OP_MAD_IFC = 0x50d,
MLX5_CMD_OP_QUERY_MAD_DEMUX = 0x80b,
@@ -106,23 +149,22 @@ enum {
MLX5_CMD_OP_NOP = 0x80d,
MLX5_CMD_OP_ALLOC_XRCD = 0x80e,
MLX5_CMD_OP_DEALLOC_XRCD = 0x80f,
- MLX5_CMD_OP_SET_BURST_SIZE = 0x812,
- MLX5_CMD_OP_QUERY_BURST_SZIE = 0x813,
- MLX5_CMD_OP_ACTIVATE_TRACER = 0x814,
- MLX5_CMD_OP_DEACTIVATE_TRACER = 0x815,
- MLX5_CMD_OP_CREATE_SNIFFER_RULE = 0x820,
- MLX5_CMD_OP_DESTROY_SNIFFER_RULE = 0x821,
- MLX5_CMD_OP_QUERY_CONG_PARAMS = 0x822,
- MLX5_CMD_OP_MODIFY_CONG_PARAMS = 0x823,
- MLX5_CMD_OP_QUERY_CONG_STATISTICS = 0x824,
+ MLX5_CMD_OP_ALLOC_TRANSPORT_DOMAIN = 0x816,
+ MLX5_CMD_OP_DEALLOC_TRANSPORT_DOMAIN = 0x817,
+ MLX5_CMD_OP_QUERY_CONG_STATUS = 0x822,
+ MLX5_CMD_OP_MODIFY_CONG_STATUS = 0x823,
+ MLX5_CMD_OP_QUERY_CONG_PARAMS = 0x824,
+ MLX5_CMD_OP_MODIFY_CONG_PARAMS = 0x825,
+ MLX5_CMD_OP_QUERY_CONG_STATISTICS = 0x826,
+ MLX5_CMD_OP_ADD_VXLAN_UDP_DPORT = 0x827,
+ MLX5_CMD_OP_DELETE_VXLAN_UDP_DPORT = 0x828,
+ MLX5_CMD_OP_SET_L2_TABLE_ENTRY = 0x829,
+ MLX5_CMD_OP_QUERY_L2_TABLE_ENTRY = 0x82a,
+ MLX5_CMD_OP_DELETE_L2_TABLE_ENTRY = 0x82b,
MLX5_CMD_OP_CREATE_TIR = 0x900,
MLX5_CMD_OP_MODIFY_TIR = 0x901,
MLX5_CMD_OP_DESTROY_TIR = 0x902,
MLX5_CMD_OP_QUERY_TIR = 0x903,
- MLX5_CMD_OP_CREATE_TIS = 0x912,
- MLX5_CMD_OP_MODIFY_TIS = 0x913,
- MLX5_CMD_OP_DESTROY_TIS = 0x914,
- MLX5_CMD_OP_QUERY_TIS = 0x915,
MLX5_CMD_OP_CREATE_SQ = 0x904,
MLX5_CMD_OP_MODIFY_SQ = 0x905,
MLX5_CMD_OP_DESTROY_SQ = 0x906,
@@ -135,9 +177,430 @@ enum {
MLX5_CMD_OP_MODIFY_RMP = 0x90d,
MLX5_CMD_OP_DESTROY_RMP = 0x90e,
MLX5_CMD_OP_QUERY_RMP = 0x90f,
- MLX5_CMD_OP_SET_FLOW_TABLE_ENTRY = 0x910,
- MLX5_CMD_OP_QUERY_FLOW_TABLE_ENTRY = 0x911,
- MLX5_CMD_OP_MAX = 0x911
+ MLX5_CMD_OP_CREATE_TIS = 0x912,
+ MLX5_CMD_OP_MODIFY_TIS = 0x913,
+ MLX5_CMD_OP_DESTROY_TIS = 0x914,
+ MLX5_CMD_OP_QUERY_TIS = 0x915,
+ MLX5_CMD_OP_CREATE_RQT = 0x916,
+ MLX5_CMD_OP_MODIFY_RQT = 0x917,
+ MLX5_CMD_OP_DESTROY_RQT = 0x918,
+ MLX5_CMD_OP_QUERY_RQT = 0x919,
+ MLX5_CMD_OP_CREATE_FLOW_TABLE = 0x930,
+ MLX5_CMD_OP_DESTROY_FLOW_TABLE = 0x931,
+ MLX5_CMD_OP_QUERY_FLOW_TABLE = 0x932,
+ MLX5_CMD_OP_CREATE_FLOW_GROUP = 0x933,
+ MLX5_CMD_OP_DESTROY_FLOW_GROUP = 0x934,
+ MLX5_CMD_OP_QUERY_FLOW_GROUP = 0x935,
+ MLX5_CMD_OP_SET_FLOW_TABLE_ENTRY = 0x936,
+ MLX5_CMD_OP_QUERY_FLOW_TABLE_ENTRY = 0x937,
+ MLX5_CMD_OP_DELETE_FLOW_TABLE_ENTRY = 0x938
+};
+
+struct mlx5_ifc_flow_table_fields_supported_bits {
+ u8 outer_dmac[0x1];
+ u8 outer_smac[0x1];
+ u8 outer_ether_type[0x1];
+ u8 reserved_0[0x1];
+ u8 outer_first_prio[0x1];
+ u8 outer_first_cfi[0x1];
+ u8 outer_first_vid[0x1];
+ u8 reserved_1[0x1];
+ u8 outer_second_prio[0x1];
+ u8 outer_second_cfi[0x1];
+ u8 outer_second_vid[0x1];
+ u8 reserved_2[0x1];
+ u8 outer_sip[0x1];
+ u8 outer_dip[0x1];
+ u8 outer_frag[0x1];
+ u8 outer_ip_protocol[0x1];
+ u8 outer_ip_ecn[0x1];
+ u8 outer_ip_dscp[0x1];
+ u8 outer_udp_sport[0x1];
+ u8 outer_udp_dport[0x1];
+ u8 outer_tcp_sport[0x1];
+ u8 outer_tcp_dport[0x1];
+ u8 outer_tcp_flags[0x1];
+ u8 outer_gre_protocol[0x1];
+ u8 outer_gre_key[0x1];
+ u8 outer_vxlan_vni[0x1];
+ u8 reserved_3[0x5];
+ u8 source_eswitch_port[0x1];
+
+ u8 inner_dmac[0x1];
+ u8 inner_smac[0x1];
+ u8 inner_ether_type[0x1];
+ u8 reserved_4[0x1];
+ u8 inner_first_prio[0x1];
+ u8 inner_first_cfi[0x1];
+ u8 inner_first_vid[0x1];
+ u8 reserved_5[0x1];
+ u8 inner_second_prio[0x1];
+ u8 inner_second_cfi[0x1];
+ u8 inner_second_vid[0x1];
+ u8 reserved_6[0x1];
+ u8 inner_sip[0x1];
+ u8 inner_dip[0x1];
+ u8 inner_frag[0x1];
+ u8 inner_ip_protocol[0x1];
+ u8 inner_ip_ecn[0x1];
+ u8 inner_ip_dscp[0x1];
+ u8 inner_udp_sport[0x1];
+ u8 inner_udp_dport[0x1];
+ u8 inner_tcp_sport[0x1];
+ u8 inner_tcp_dport[0x1];
+ u8 inner_tcp_flags[0x1];
+ u8 reserved_7[0x9];
+
+ u8 reserved_8[0x40];
+};
+
+struct mlx5_ifc_flow_table_prop_layout_bits {
+ u8 ft_support[0x1];
+ u8 reserved_0[0x1f];
+
+ u8 reserved_1[0x2];
+ u8 log_max_ft_size[0x6];
+ u8 reserved_2[0x10];
+ u8 max_ft_level[0x8];
+
+ u8 reserved_3[0x20];
+
+ u8 reserved_4[0x18];
+ u8 log_max_ft_num[0x8];
+
+ u8 reserved_5[0x18];
+ u8 log_max_destination[0x8];
+
+ u8 reserved_6[0x18];
+ u8 log_max_flow[0x8];
+
+ u8 reserved_7[0x40];
+
+ struct mlx5_ifc_flow_table_fields_supported_bits ft_field_support;
+
+ struct mlx5_ifc_flow_table_fields_supported_bits ft_field_bitmask_support;
+};
+
+struct mlx5_ifc_odp_per_transport_service_cap_bits {
+ u8 send[0x1];
+ u8 receive[0x1];
+ u8 write[0x1];
+ u8 read[0x1];
+ u8 reserved_0[0x1];
+ u8 srq_receive[0x1];
+ u8 reserved_1[0x1a];
+};
+
+struct mlx5_ifc_fte_match_set_lyr_2_4_bits {
+ u8 smac_47_16[0x20];
+
+ u8 smac_15_0[0x10];
+ u8 ethertype[0x10];
+
+ u8 dmac_47_16[0x20];
+
+ u8 dmac_15_0[0x10];
+ u8 first_prio[0x3];
+ u8 first_cfi[0x1];
+ u8 first_vid[0xc];
+
+ u8 ip_protocol[0x8];
+ u8 ip_dscp[0x6];
+ u8 ip_ecn[0x2];
+ u8 vlan_tag[0x1];
+ u8 reserved_0[0x1];
+ u8 frag[0x1];
+ u8 reserved_1[0x4];
+ u8 tcp_flags[0x9];
+
+ u8 tcp_sport[0x10];
+ u8 tcp_dport[0x10];
+
+ u8 reserved_2[0x20];
+
+ u8 udp_sport[0x10];
+ u8 udp_dport[0x10];
+
+ u8 src_ip[4][0x20];
+
+ u8 dst_ip[4][0x20];
+};
+
+struct mlx5_ifc_fte_match_set_misc_bits {
+ u8 reserved_0[0x20];
+
+ u8 reserved_1[0x10];
+ u8 source_port[0x10];
+
+ u8 outer_second_prio[0x3];
+ u8 outer_second_cfi[0x1];
+ u8 outer_second_vid[0xc];
+ u8 inner_second_prio[0x3];
+ u8 inner_second_cfi[0x1];
+ u8 inner_second_vid[0xc];
+
+ u8 outer_second_vlan_tag[0x1];
+ u8 inner_second_vlan_tag[0x1];
+ u8 reserved_2[0xe];
+ u8 gre_protocol[0x10];
+
+ u8 gre_key_h[0x18];
+ u8 gre_key_l[0x8];
+
+ u8 vxlan_vni[0x18];
+ u8 reserved_3[0x8];
+
+ u8 reserved_4[0x20];
+
+ u8 reserved_5[0xc];
+ u8 outer_ipv6_flow_label[0x14];
+
+ u8 reserved_6[0xc];
+ u8 inner_ipv6_flow_label[0x14];
+
+ u8 reserved_7[0xe0];
+};
+
+struct mlx5_ifc_cmd_pas_bits {
+ u8 pa_h[0x20];
+
+ u8 pa_l[0x14];
+ u8 reserved_0[0xc];
+};
+
+struct mlx5_ifc_uint64_bits {
+ u8 hi[0x20];
+
+ u8 lo[0x20];
+};
+
+enum {
+ MLX5_ADS_STAT_RATE_NO_LIMIT = 0x0,
+ MLX5_ADS_STAT_RATE_2_5GBPS = 0x7,
+ MLX5_ADS_STAT_RATE_10GBPS = 0x8,
+ MLX5_ADS_STAT_RATE_30GBPS = 0x9,
+ MLX5_ADS_STAT_RATE_5GBPS = 0xa,
+ MLX5_ADS_STAT_RATE_20GBPS = 0xb,
+ MLX5_ADS_STAT_RATE_40GBPS = 0xc,
+ MLX5_ADS_STAT_RATE_60GBPS = 0xd,
+ MLX5_ADS_STAT_RATE_80GBPS = 0xe,
+ MLX5_ADS_STAT_RATE_120GBPS = 0xf,
+};
+
+struct mlx5_ifc_ads_bits {
+ u8 fl[0x1];
+ u8 free_ar[0x1];
+ u8 reserved_0[0xe];
+ u8 pkey_index[0x10];
+
+ u8 reserved_1[0x8];
+ u8 grh[0x1];
+ u8 mlid[0x7];
+ u8 rlid[0x10];
+
+ u8 ack_timeout[0x5];
+ u8 reserved_2[0x3];
+ u8 src_addr_index[0x8];
+ u8 reserved_3[0x4];
+ u8 stat_rate[0x4];
+ u8 hop_limit[0x8];
+
+ u8 reserved_4[0x4];
+ u8 tclass[0x8];
+ u8 flow_label[0x14];
+
+ u8 rgid_rip[16][0x8];
+
+ u8 reserved_5[0x4];
+ u8 f_dscp[0x1];
+ u8 f_ecn[0x1];
+ u8 reserved_6[0x1];
+ u8 f_eth_prio[0x1];
+ u8 ecn[0x2];
+ u8 dscp[0x6];
+ u8 udp_sport[0x10];
+
+ u8 dei_cfi[0x1];
+ u8 eth_prio[0x3];
+ u8 sl[0x4];
+ u8 port[0x8];
+ u8 rmac_47_32[0x10];
+
+ u8 rmac_31_0[0x20];
+};
+
+struct mlx5_ifc_flow_table_nic_cap_bits {
+ u8 reserved_0[0x200];
+
+ struct mlx5_ifc_flow_table_prop_layout_bits flow_table_properties_nic_receive;
+
+ u8 reserved_1[0x200];
+
+ struct mlx5_ifc_flow_table_prop_layout_bits flow_table_properties_nic_receive_sniffer;
+
+ struct mlx5_ifc_flow_table_prop_layout_bits flow_table_properties_nic_transmit;
+
+ u8 reserved_2[0x200];
+
+ struct mlx5_ifc_flow_table_prop_layout_bits flow_table_properties_nic_transmit_sniffer;
+
+ u8 reserved_3[0x7200];
+};
+
+struct mlx5_ifc_per_protocol_networking_offload_caps_bits {
+ u8 csum_cap[0x1];
+ u8 vlan_cap[0x1];
+ u8 lro_cap[0x1];
+ u8 lro_psh_flag[0x1];
+ u8 lro_time_stamp[0x1];
+ u8 reserved_0[0x6];
+ u8 max_lso_cap[0x5];
+ u8 reserved_1[0x4];
+ u8 rss_ind_tbl_cap[0x4];
+ u8 reserved_2[0x3];
+ u8 tunnel_lso_const_out_ip_id[0x1];
+ u8 reserved_3[0x2];
+ u8 tunnel_statless_gre[0x1];
+ u8 tunnel_stateless_vxlan[0x1];
+
+ u8 reserved_4[0x20];
+
+ u8 reserved_5[0x10];
+ u8 lro_min_mss_size[0x10];
+
+ u8 reserved_6[0x120];
+
+ u8 lro_timer_supported_periods[4][0x20];
+
+ u8 reserved_7[0x600];
+};
+
+struct mlx5_ifc_roce_cap_bits {
+ u8 roce_apm[0x1];
+ u8 reserved_0[0x1f];
+
+ u8 reserved_1[0x60];
+
+ u8 reserved_2[0xc];
+ u8 l3_type[0x4];
+ u8 reserved_3[0x8];
+ u8 roce_version[0x8];
+
+ u8 reserved_4[0x10];
+ u8 r_roce_dest_udp_port[0x10];
+
+ u8 r_roce_max_src_udp_port[0x10];
+ u8 r_roce_min_src_udp_port[0x10];
+
+ u8 reserved_5[0x10];
+ u8 roce_address_table_size[0x10];
+
+ u8 reserved_6[0x700];
+};
+
+enum {
+ MLX5_ATOMIC_CAPS_ATOMIC_SIZE_QP_1_BYTE = 0x0,
+ MLX5_ATOMIC_CAPS_ATOMIC_SIZE_QP_2_BYTES = 0x2,
+ MLX5_ATOMIC_CAPS_ATOMIC_SIZE_QP_4_BYTES = 0x4,
+ MLX5_ATOMIC_CAPS_ATOMIC_SIZE_QP_8_BYTES = 0x8,
+ MLX5_ATOMIC_CAPS_ATOMIC_SIZE_QP_16_BYTES = 0x10,
+ MLX5_ATOMIC_CAPS_ATOMIC_SIZE_QP_32_BYTES = 0x20,
+ MLX5_ATOMIC_CAPS_ATOMIC_SIZE_QP_64_BYTES = 0x40,
+ MLX5_ATOMIC_CAPS_ATOMIC_SIZE_QP_128_BYTES = 0x80,
+ MLX5_ATOMIC_CAPS_ATOMIC_SIZE_QP_256_BYTES = 0x100,
+};
+
+enum {
+ MLX5_ATOMIC_CAPS_ATOMIC_SIZE_DC_1_BYTE = 0x1,
+ MLX5_ATOMIC_CAPS_ATOMIC_SIZE_DC_2_BYTES = 0x2,
+ MLX5_ATOMIC_CAPS_ATOMIC_SIZE_DC_4_BYTES = 0x4,
+ MLX5_ATOMIC_CAPS_ATOMIC_SIZE_DC_8_BYTES = 0x8,
+ MLX5_ATOMIC_CAPS_ATOMIC_SIZE_DC_16_BYTES = 0x10,
+ MLX5_ATOMIC_CAPS_ATOMIC_SIZE_DC_32_BYTES = 0x20,
+ MLX5_ATOMIC_CAPS_ATOMIC_SIZE_DC_64_BYTES = 0x40,
+ MLX5_ATOMIC_CAPS_ATOMIC_SIZE_DC_128_BYTES = 0x80,
+ MLX5_ATOMIC_CAPS_ATOMIC_SIZE_DC_256_BYTES = 0x100,
+};
+
+struct mlx5_ifc_atomic_caps_bits {
+ u8 reserved_0[0x40];
+
+ u8 atomic_req_endianness[0x1];
+ u8 reserved_1[0x1f];
+
+ u8 reserved_2[0x20];
+
+ u8 reserved_3[0x10];
+ u8 atomic_operations[0x10];
+
+ u8 reserved_4[0x10];
+ u8 atomic_size_qp[0x10];
+
+ u8 reserved_5[0x10];
+ u8 atomic_size_dc[0x10];
+
+ u8 reserved_6[0x720];
+};
+
+struct mlx5_ifc_odp_cap_bits {
+ u8 reserved_0[0x40];
+
+ u8 sig[0x1];
+ u8 reserved_1[0x1f];
+
+ u8 reserved_2[0x20];
+
+ struct mlx5_ifc_odp_per_transport_service_cap_bits rc_odp_caps;
+
+ struct mlx5_ifc_odp_per_transport_service_cap_bits uc_odp_caps;
+
+ struct mlx5_ifc_odp_per_transport_service_cap_bits ud_odp_caps;
+
+ u8 reserved_3[0x720];
+};
+
+enum {
+ MLX5_WQ_TYPE_LINKED_LIST = 0x0,
+ MLX5_WQ_TYPE_CYCLIC = 0x1,
+ MLX5_WQ_TYPE_STRQ = 0x2,
+};
+
+enum {
+ MLX5_WQ_END_PAD_MODE_NONE = 0x0,
+ MLX5_WQ_END_PAD_MODE_ALIGN = 0x1,
+};
+
+enum {
+ MLX5_CMD_HCA_CAP_GID_TABLE_SIZE_8_GID_ENTRIES = 0x0,
+ MLX5_CMD_HCA_CAP_GID_TABLE_SIZE_16_GID_ENTRIES = 0x1,
+ MLX5_CMD_HCA_CAP_GID_TABLE_SIZE_32_GID_ENTRIES = 0x2,
+ MLX5_CMD_HCA_CAP_GID_TABLE_SIZE_64_GID_ENTRIES = 0x3,
+ MLX5_CMD_HCA_CAP_GID_TABLE_SIZE_128_GID_ENTRIES = 0x4,
+};
+
+enum {
+ MLX5_CMD_HCA_CAP_PKEY_TABLE_SIZE_128_ENTRIES = 0x0,
+ MLX5_CMD_HCA_CAP_PKEY_TABLE_SIZE_256_ENTRIES = 0x1,
+ MLX5_CMD_HCA_CAP_PKEY_TABLE_SIZE_512_ENTRIES = 0x2,
+ MLX5_CMD_HCA_CAP_PKEY_TABLE_SIZE_1K_ENTRIES = 0x3,
+ MLX5_CMD_HCA_CAP_PKEY_TABLE_SIZE_2K_ENTRIES = 0x4,
+ MLX5_CMD_HCA_CAP_PKEY_TABLE_SIZE_4K_ENTRIES = 0x5,
+};
+
+enum {
+ MLX5_CMD_HCA_CAP_PORT_TYPE_IB = 0x0,
+ MLX5_CMD_HCA_CAP_PORT_TYPE_ETHERNET = 0x1,
+};
+
+enum {
+ MLX5_CMD_HCA_CAP_CMDIF_CHECKSUM_DISABLED = 0x0,
+ MLX5_CMD_HCA_CAP_CMDIF_CHECKSUM_INITIAL_STATE = 0x1,
+ MLX5_CMD_HCA_CAP_CMDIF_CHECKSUM_ENABLED = 0x3,
+};
+
+enum {
+ MLX5_CAP_PORT_TYPE_IB = 0x0,
+ MLX5_CAP_PORT_TYPE_ETH = 0x1,
};
struct mlx5_ifc_cmd_hca_cap_bits {
@@ -148,9 +611,8 @@ struct mlx5_ifc_cmd_hca_cap_bits {
u8 reserved_1[0xb];
u8 log_max_qp[0x5];
- u8 log_max_strq_sz[0x8];
- u8 reserved_2[0x3];
- u8 log_max_srqs[0x5];
+ u8 reserved_2[0xb];
+ u8 log_max_srq[0x5];
u8 reserved_3[0x10];
u8 reserved_4[0x8];
@@ -185,123 +647,2112 @@ struct mlx5_ifc_cmd_hca_cap_bits {
u8 pad_cap[0x1];
u8 cc_query_allowed[0x1];
u8 cc_modify_allowed[0x1];
- u8 reserved_15[0x1d];
+ u8 reserved_15[0xd];
+ u8 gid_table_size[0x10];
- u8 reserved_16[0x6];
+ u8 out_of_seq_cnt[0x1];
+ u8 vport_counters[0x1];
+ u8 reserved_16[0x4];
u8 max_qp_cnt[0xa];
u8 pkey_table_size[0x10];
- u8 eswitch_owner[0x1];
- u8 reserved_17[0xa];
+ u8 vport_group_manager[0x1];
+ u8 vhca_group_manager[0x1];
+ u8 ib_virt[0x1];
+ u8 eth_virt[0x1];
+ u8 reserved_17[0x1];
+ u8 ets[0x1];
+ u8 nic_flow_table[0x1];
+ u8 reserved_18[0x4];
u8 local_ca_ack_delay[0x5];
- u8 reserved_18[0x8];
+ u8 reserved_19[0x6];
+ u8 port_type[0x2];
u8 num_ports[0x8];
- u8 reserved_19[0x3];
+ u8 reserved_20[0x3];
u8 log_max_msg[0x5];
- u8 reserved_20[0x18];
+ u8 reserved_21[0x18];
u8 stat_rate_support[0x10];
- u8 reserved_21[0x10];
+ u8 reserved_22[0xc];
+ u8 cqe_version[0x4];
- u8 reserved_22[0x10];
+ u8 compact_address_vector[0x1];
+ u8 reserved_23[0xe];
+ u8 drain_sigerr[0x1];
u8 cmdif_checksum[0x2];
u8 sigerr_cqe[0x1];
- u8 reserved_23[0x1];
+ u8 reserved_24[0x1];
u8 wq_signature[0x1];
u8 sctr_data_cqe[0x1];
- u8 reserved_24[0x1];
+ u8 reserved_25[0x1];
u8 sho[0x1];
u8 tph[0x1];
u8 rf[0x1];
- u8 dc[0x1];
- u8 reserved_25[0x2];
+ u8 dct[0x1];
+ u8 reserved_26[0x1];
+ u8 eth_net_offloads[0x1];
u8 roce[0x1];
u8 atomic[0x1];
- u8 rsz_srq[0x1];
+ u8 reserved_27[0x1];
u8 cq_oi[0x1];
u8 cq_resize[0x1];
u8 cq_moderation[0x1];
- u8 sniffer_rule_flow[0x1];
- u8 sniffer_rule_vport[0x1];
- u8 sniffer_rule_phy[0x1];
- u8 reserved_26[0x1];
+ u8 reserved_28[0x3];
+ u8 cq_eq_remap[0x1];
u8 pg[0x1];
u8 block_lb_mc[0x1];
- u8 reserved_27[0x3];
+ u8 reserved_29[0x1];
+ u8 scqe_break_moderation[0x1];
+ u8 reserved_30[0x1];
u8 cd[0x1];
- u8 reserved_28[0x1];
+ u8 reserved_31[0x1];
u8 apm[0x1];
- u8 reserved_29[0x7];
+ u8 reserved_32[0x7];
u8 qkv[0x1];
u8 pkv[0x1];
- u8 reserved_30[0x4];
+ u8 reserved_33[0x4];
u8 xrc[0x1];
u8 ud[0x1];
u8 uc[0x1];
u8 rc[0x1];
- u8 reserved_31[0xa];
+ u8 reserved_34[0xa];
u8 uar_sz[0x6];
- u8 reserved_32[0x8];
+ u8 reserved_35[0x8];
u8 log_pg_sz[0x8];
u8 bf[0x1];
- u8 reserved_33[0xa];
+ u8 reserved_36[0x1];
+ u8 pad_tx_eth_packet[0x1];
+ u8 reserved_37[0x8];
u8 log_bf_reg_size[0x5];
- u8 reserved_34[0x10];
+ u8 reserved_38[0x10];
- u8 reserved_35[0x10];
+ u8 reserved_39[0x10];
u8 max_wqe_sz_sq[0x10];
- u8 reserved_36[0x10];
+ u8 reserved_40[0x10];
u8 max_wqe_sz_rq[0x10];
- u8 reserved_37[0x10];
+ u8 reserved_41[0x10];
u8 max_wqe_sz_sq_dc[0x10];
- u8 reserved_38[0x7];
+ u8 reserved_42[0x7];
u8 max_qp_mcg[0x19];
- u8 reserved_39[0x18];
+ u8 reserved_43[0x18];
u8 log_max_mcg[0x8];
- u8 reserved_40[0xb];
+ u8 reserved_44[0x3];
+ u8 log_max_transport_domain[0x5];
+ u8 reserved_45[0x3];
u8 log_max_pd[0x5];
- u8 reserved_41[0xb];
+ u8 reserved_46[0xb];
u8 log_max_xrcd[0x5];
- u8 reserved_42[0x20];
+ u8 reserved_47[0x20];
- u8 reserved_43[0x3];
+ u8 reserved_48[0x3];
u8 log_max_rq[0x5];
- u8 reserved_44[0x3];
+ u8 reserved_49[0x3];
u8 log_max_sq[0x5];
- u8 reserved_45[0x3];
+ u8 reserved_50[0x3];
u8 log_max_tir[0x5];
- u8 reserved_46[0x3];
+ u8 reserved_51[0x3];
u8 log_max_tis[0x5];
- u8 reserved_47[0x13];
- u8 log_max_rq_per_tir[0x5];
- u8 reserved_48[0x3];
+ u8 basic_cyclic_rcv_wqe[0x1];
+ u8 reserved_52[0x2];
+ u8 log_max_rmp[0x5];
+ u8 reserved_53[0x3];
+ u8 log_max_rqt[0x5];
+ u8 reserved_54[0x3];
+ u8 log_max_rqt_size[0x5];
+ u8 reserved_55[0x3];
u8 log_max_tis_per_sq[0x5];
- u8 reserved_49[0xe0];
+ u8 reserved_56[0x3];
+ u8 log_max_stride_sz_rq[0x5];
+ u8 reserved_57[0x3];
+ u8 log_min_stride_sz_rq[0x5];
+ u8 reserved_58[0x3];
+ u8 log_max_stride_sz_sq[0x5];
+ u8 reserved_59[0x3];
+ u8 log_min_stride_sz_sq[0x5];
- u8 reserved_50[0x10];
+ u8 reserved_60[0x1b];
+ u8 log_max_wq_sz[0x5];
+
+ u8 reserved_61[0xa0];
+
+ u8 reserved_62[0x3];
+ u8 log_max_l2_table[0x5];
+ u8 reserved_63[0x8];
u8 log_uar_page_sz[0x10];
- u8 reserved_51[0x100];
+ u8 reserved_64[0x100];
- u8 reserved_52[0x1f];
+ u8 reserved_65[0x1f];
u8 cqe_zip[0x1];
u8 cqe_zip_timeout[0x10];
u8 cqe_zip_max_num[0x10];
- u8 reserved_53[0x220];
+ u8 reserved_66[0x220];
+};
+
+enum {
+ MLX5_DEST_FORMAT_STRUCT_DESTINATION_TYPE_FLOW_TABLE_ = 0x1,
+ MLX5_DEST_FORMAT_STRUCT_DESTINATION_TYPE_TIR = 0x2,
+};
+
+struct mlx5_ifc_dest_format_struct_bits {
+ u8 destination_type[0x8];
+ u8 destination_id[0x18];
+
+ u8 reserved_0[0x20];
+};
+
+struct mlx5_ifc_fte_match_param_bits {
+ struct mlx5_ifc_fte_match_set_lyr_2_4_bits outer_headers;
+
+ struct mlx5_ifc_fte_match_set_misc_bits misc_parameters;
+
+ struct mlx5_ifc_fte_match_set_lyr_2_4_bits inner_headers;
+
+ u8 reserved_0[0xa00];
+};
+
+enum {
+ MLX5_RX_HASH_FIELD_SELECT_SELECTED_FIELDS_SRC_IP = 0x0,
+ MLX5_RX_HASH_FIELD_SELECT_SELECTED_FIELDS_DST_IP = 0x1,
+ MLX5_RX_HASH_FIELD_SELECT_SELECTED_FIELDS_L4_SPORT = 0x2,
+ MLX5_RX_HASH_FIELD_SELECT_SELECTED_FIELDS_L4_DPORT = 0x3,
+ MLX5_RX_HASH_FIELD_SELECT_SELECTED_FIELDS_IPSEC_SPI = 0x4,
+};
+
+struct mlx5_ifc_rx_hash_field_select_bits {
+ u8 l3_prot_type[0x1];
+ u8 l4_prot_type[0x1];
+ u8 selected_fields[0x1e];
+};
+
+enum {
+ MLX5_WQ_WQ_TYPE_WQ_LINKED_LIST = 0x0,
+ MLX5_WQ_WQ_TYPE_WQ_CYCLIC = 0x1,
+};
+
+enum {
+ MLX5_WQ_END_PADDING_MODE_END_PAD_NONE = 0x0,
+ MLX5_WQ_END_PADDING_MODE_END_PAD_ALIGN = 0x1,
+};
+
+struct mlx5_ifc_wq_bits {
+ u8 wq_type[0x4];
+ u8 wq_signature[0x1];
+ u8 end_padding_mode[0x2];
+ u8 cd_slave[0x1];
+ u8 reserved_0[0x18];
+
+ u8 hds_skip_first_sge[0x1];
+ u8 log2_hds_buf_size[0x3];
+ u8 reserved_1[0x7];
+ u8 page_offset[0x5];
+ u8 lwm[0x10];
+
+ u8 reserved_2[0x8];
+ u8 pd[0x18];
+
+ u8 reserved_3[0x8];
+ u8 uar_page[0x18];
+
+ u8 dbr_addr[0x40];
+
+ u8 hw_counter[0x20];
+
+ u8 sw_counter[0x20];
+
+ u8 reserved_4[0xc];
+ u8 log_wq_stride[0x4];
+ u8 reserved_5[0x3];
+ u8 log_wq_pg_sz[0x5];
+ u8 reserved_6[0x3];
+ u8 log_wq_sz[0x5];
+
+ u8 reserved_7[0x4e0];
+
+ struct mlx5_ifc_cmd_pas_bits pas[0];
+};
+
+struct mlx5_ifc_rq_num_bits {
+ u8 reserved_0[0x8];
+ u8 rq_num[0x18];
+};
+
+struct mlx5_ifc_mac_address_layout_bits {
+ u8 reserved_0[0x10];
+ u8 mac_addr_47_32[0x10];
+
+ u8 mac_addr_31_0[0x20];
+};
+
+struct mlx5_ifc_cong_control_r_roce_ecn_np_bits {
+ u8 reserved_0[0xa0];
+
+ u8 min_time_between_cnps[0x20];
+
+ u8 reserved_1[0x12];
+ u8 cnp_dscp[0x6];
+ u8 reserved_2[0x5];
+ u8 cnp_802p_prio[0x3];
+
+ u8 reserved_3[0x720];
+};
+
+struct mlx5_ifc_cong_control_r_roce_ecn_rp_bits {
+ u8 reserved_0[0x60];
+
+ u8 reserved_1[0x4];
+ u8 clamp_tgt_rate[0x1];
+ u8 reserved_2[0x3];
+ u8 clamp_tgt_rate_after_time_inc[0x1];
+ u8 reserved_3[0x17];
+
+ u8 reserved_4[0x20];
+
+ u8 rpg_time_reset[0x20];
+
+ u8 rpg_byte_reset[0x20];
+
+ u8 rpg_threshold[0x20];
+
+ u8 rpg_max_rate[0x20];
+
+ u8 rpg_ai_rate[0x20];
+
+ u8 rpg_hai_rate[0x20];
+
+ u8 rpg_gd[0x20];
+
+ u8 rpg_min_dec_fac[0x20];
+
+ u8 rpg_min_rate[0x20];
+
+ u8 reserved_5[0xe0];
+
+ u8 rate_to_set_on_first_cnp[0x20];
+
+ u8 dce_tcp_g[0x20];
+
+ u8 dce_tcp_rtt[0x20];
+
+ u8 rate_reduce_monitor_period[0x20];
+
+ u8 reserved_6[0x20];
+
+ u8 initial_alpha_value[0x20];
+
+ u8 reserved_7[0x4a0];
+};
+
+struct mlx5_ifc_cong_control_802_1qau_rp_bits {
+ u8 reserved_0[0x80];
+
+ u8 rppp_max_rps[0x20];
+
+ u8 rpg_time_reset[0x20];
+
+ u8 rpg_byte_reset[0x20];
+
+ u8 rpg_threshold[0x20];
+
+ u8 rpg_max_rate[0x20];
+
+ u8 rpg_ai_rate[0x20];
+
+ u8 rpg_hai_rate[0x20];
+
+ u8 rpg_gd[0x20];
+
+ u8 rpg_min_dec_fac[0x20];
+
+ u8 rpg_min_rate[0x20];
+
+ u8 reserved_1[0x640];
+};
+
+enum {
+ MLX5_RESIZE_FIELD_SELECT_RESIZE_FIELD_SELECT_LOG_CQ_SIZE = 0x1,
+ MLX5_RESIZE_FIELD_SELECT_RESIZE_FIELD_SELECT_PAGE_OFFSET = 0x2,
+ MLX5_RESIZE_FIELD_SELECT_RESIZE_FIELD_SELECT_LOG_PAGE_SIZE = 0x4,
+};
+
+struct mlx5_ifc_resize_field_select_bits {
+ u8 resize_field_select[0x20];
+};
+
+enum {
+ MLX5_MODIFY_FIELD_SELECT_MODIFY_FIELD_SELECT_CQ_PERIOD = 0x1,
+ MLX5_MODIFY_FIELD_SELECT_MODIFY_FIELD_SELECT_CQ_MAX_COUNT = 0x2,
+ MLX5_MODIFY_FIELD_SELECT_MODIFY_FIELD_SELECT_OI = 0x4,
+ MLX5_MODIFY_FIELD_SELECT_MODIFY_FIELD_SELECT_C_EQN = 0x8,
+};
+
+struct mlx5_ifc_modify_field_select_bits {
+ u8 modify_field_select[0x20];
+};
+
+struct mlx5_ifc_field_select_r_roce_np_bits {
+ u8 field_select_r_roce_np[0x20];
+};
+
+struct mlx5_ifc_field_select_r_roce_rp_bits {
+ u8 field_select_r_roce_rp[0x20];
+};
+
+enum {
+ MLX5_FIELD_SELECT_802_1QAU_RP_FIELD_SELECT_8021QAURP_RPPP_MAX_RPS = 0x4,
+ MLX5_FIELD_SELECT_802_1QAU_RP_FIELD_SELECT_8021QAURP_RPG_TIME_RESET = 0x8,
+ MLX5_FIELD_SELECT_802_1QAU_RP_FIELD_SELECT_8021QAURP_RPG_BYTE_RESET = 0x10,
+ MLX5_FIELD_SELECT_802_1QAU_RP_FIELD_SELECT_8021QAURP_RPG_THRESHOLD = 0x20,
+ MLX5_FIELD_SELECT_802_1QAU_RP_FIELD_SELECT_8021QAURP_RPG_MAX_RATE = 0x40,
+ MLX5_FIELD_SELECT_802_1QAU_RP_FIELD_SELECT_8021QAURP_RPG_AI_RATE = 0x80,
+ MLX5_FIELD_SELECT_802_1QAU_RP_FIELD_SELECT_8021QAURP_RPG_HAI_RATE = 0x100,
+ MLX5_FIELD_SELECT_802_1QAU_RP_FIELD_SELECT_8021QAURP_RPG_GD = 0x200,
+ MLX5_FIELD_SELECT_802_1QAU_RP_FIELD_SELECT_8021QAURP_RPG_MIN_DEC_FAC = 0x400,
+ MLX5_FIELD_SELECT_802_1QAU_RP_FIELD_SELECT_8021QAURP_RPG_MIN_RATE = 0x800,
+};
+
+struct mlx5_ifc_field_select_802_1qau_rp_bits {
+ u8 field_select_8021qaurp[0x20];
+};
+
+struct mlx5_ifc_phys_layer_cntrs_bits {
+ u8 time_since_last_clear_high[0x20];
+
+ u8 time_since_last_clear_low[0x20];
+
+ u8 symbol_errors_high[0x20];
+
+ u8 symbol_errors_low[0x20];
+
+ u8 sync_headers_errors_high[0x20];
+
+ u8 sync_headers_errors_low[0x20];
+
+ u8 edpl_bip_errors_lane0_high[0x20];
+
+ u8 edpl_bip_errors_lane0_low[0x20];
+
+ u8 edpl_bip_errors_lane1_high[0x20];
+
+ u8 edpl_bip_errors_lane1_low[0x20];
+
+ u8 edpl_bip_errors_lane2_high[0x20];
+
+ u8 edpl_bip_errors_lane2_low[0x20];
+
+ u8 edpl_bip_errors_lane3_high[0x20];
+
+ u8 edpl_bip_errors_lane3_low[0x20];
+
+ u8 fc_fec_corrected_blocks_lane0_high[0x20];
+
+ u8 fc_fec_corrected_blocks_lane0_low[0x20];
+
+ u8 fc_fec_corrected_blocks_lane1_high[0x20];
+
+ u8 fc_fec_corrected_blocks_lane1_low[0x20];
+
+ u8 fc_fec_corrected_blocks_lane2_high[0x20];
+
+ u8 fc_fec_corrected_blocks_lane2_low[0x20];
+
+ u8 fc_fec_corrected_blocks_lane3_high[0x20];
+
+ u8 fc_fec_corrected_blocks_lane3_low[0x20];
+
+ u8 fc_fec_uncorrectable_blocks_lane0_high[0x20];
+
+ u8 fc_fec_uncorrectable_blocks_lane0_low[0x20];
+
+ u8 fc_fec_uncorrectable_blocks_lane1_high[0x20];
+
+ u8 fc_fec_uncorrectable_blocks_lane1_low[0x20];
+
+ u8 fc_fec_uncorrectable_blocks_lane2_high[0x20];
+
+ u8 fc_fec_uncorrectable_blocks_lane2_low[0x20];
+
+ u8 fc_fec_uncorrectable_blocks_lane3_high[0x20];
+
+ u8 fc_fec_uncorrectable_blocks_lane3_low[0x20];
+
+ u8 rs_fec_corrected_blocks_high[0x20];
+
+ u8 rs_fec_corrected_blocks_low[0x20];
+
+ u8 rs_fec_uncorrectable_blocks_high[0x20];
+
+ u8 rs_fec_uncorrectable_blocks_low[0x20];
+
+ u8 rs_fec_no_errors_blocks_high[0x20];
+
+ u8 rs_fec_no_errors_blocks_low[0x20];
+
+ u8 rs_fec_single_error_blocks_high[0x20];
+
+ u8 rs_fec_single_error_blocks_low[0x20];
+
+ u8 rs_fec_corrected_symbols_total_high[0x20];
+
+ u8 rs_fec_corrected_symbols_total_low[0x20];
+
+ u8 rs_fec_corrected_symbols_lane0_high[0x20];
+
+ u8 rs_fec_corrected_symbols_lane0_low[0x20];
+
+ u8 rs_fec_corrected_symbols_lane1_high[0x20];
+
+ u8 rs_fec_corrected_symbols_lane1_low[0x20];
+
+ u8 rs_fec_corrected_symbols_lane2_high[0x20];
+
+ u8 rs_fec_corrected_symbols_lane2_low[0x20];
+
+ u8 rs_fec_corrected_symbols_lane3_high[0x20];
+
+ u8 rs_fec_corrected_symbols_lane3_low[0x20];
+
+ u8 link_down_events[0x20];
+
+ u8 successful_recovery_events[0x20];
+
+ u8 reserved_0[0x180];
+};
+
+struct mlx5_ifc_eth_per_traffic_grp_data_layout_bits {
+ u8 transmit_queue_high[0x20];
+
+ u8 transmit_queue_low[0x20];
+
+ u8 reserved_0[0x780];
+};
+
+struct mlx5_ifc_eth_per_prio_grp_data_layout_bits {
+ u8 rx_octets_high[0x20];
+
+ u8 rx_octets_low[0x20];
+
+ u8 reserved_0[0xc0];
+
+ u8 rx_frames_high[0x20];
+
+ u8 rx_frames_low[0x20];
+
+ u8 tx_octets_high[0x20];
+
+ u8 tx_octets_low[0x20];
+
+ u8 reserved_1[0xc0];
+
+ u8 tx_frames_high[0x20];
+
+ u8 tx_frames_low[0x20];
+
+ u8 rx_pause_high[0x20];
+
+ u8 rx_pause_low[0x20];
+
+ u8 rx_pause_duration_high[0x20];
+
+ u8 rx_pause_duration_low[0x20];
+
+ u8 tx_pause_high[0x20];
+
+ u8 tx_pause_low[0x20];
+
+ u8 tx_pause_duration_high[0x20];
+
+ u8 tx_pause_duration_low[0x20];
+
+ u8 rx_pause_transition_high[0x20];
+
+ u8 rx_pause_transition_low[0x20];
+
+ u8 reserved_2[0x400];
+};
+
+struct mlx5_ifc_eth_extended_cntrs_grp_data_layout_bits {
+ u8 port_transmit_wait_high[0x20];
+
+ u8 port_transmit_wait_low[0x20];
+
+ u8 reserved_0[0x780];
+};
+
+struct mlx5_ifc_eth_3635_cntrs_grp_data_layout_bits {
+ u8 dot3stats_alignment_errors_high[0x20];
+
+ u8 dot3stats_alignment_errors_low[0x20];
+
+ u8 dot3stats_fcs_errors_high[0x20];
+
+ u8 dot3stats_fcs_errors_low[0x20];
+
+ u8 dot3stats_single_collision_frames_high[0x20];
+
+ u8 dot3stats_single_collision_frames_low[0x20];
+
+ u8 dot3stats_multiple_collision_frames_high[0x20];
+
+ u8 dot3stats_multiple_collision_frames_low[0x20];
+
+ u8 dot3stats_sqe_test_errors_high[0x20];
+
+ u8 dot3stats_sqe_test_errors_low[0x20];
+
+ u8 dot3stats_deferred_transmissions_high[0x20];
+
+ u8 dot3stats_deferred_transmissions_low[0x20];
+
+ u8 dot3stats_late_collisions_high[0x20];
+
+ u8 dot3stats_late_collisions_low[0x20];
+
+ u8 dot3stats_excessive_collisions_high[0x20];
+
+ u8 dot3stats_excessive_collisions_low[0x20];
+
+ u8 dot3stats_internal_mac_transmit_errors_high[0x20];
+
+ u8 dot3stats_internal_mac_transmit_errors_low[0x20];
+
+ u8 dot3stats_carrier_sense_errors_high[0x20];
+
+ u8 dot3stats_carrier_sense_errors_low[0x20];
+
+ u8 dot3stats_frame_too_longs_high[0x20];
+
+ u8 dot3stats_frame_too_longs_low[0x20];
+
+ u8 dot3stats_internal_mac_receive_errors_high[0x20];
+
+ u8 dot3stats_internal_mac_receive_errors_low[0x20];
+
+ u8 dot3stats_symbol_errors_high[0x20];
+
+ u8 dot3stats_symbol_errors_low[0x20];
+
+ u8 dot3control_in_unknown_opcodes_high[0x20];
+
+ u8 dot3control_in_unknown_opcodes_low[0x20];
+
+ u8 dot3in_pause_frames_high[0x20];
+
+ u8 dot3in_pause_frames_low[0x20];
+
+ u8 dot3out_pause_frames_high[0x20];
+
+ u8 dot3out_pause_frames_low[0x20];
+
+ u8 reserved_0[0x3c0];
+};
+
+struct mlx5_ifc_eth_2819_cntrs_grp_data_layout_bits {
+ u8 ether_stats_drop_events_high[0x20];
+
+ u8 ether_stats_drop_events_low[0x20];
+
+ u8 ether_stats_octets_high[0x20];
+
+ u8 ether_stats_octets_low[0x20];
+
+ u8 ether_stats_pkts_high[0x20];
+
+ u8 ether_stats_pkts_low[0x20];
+
+ u8 ether_stats_broadcast_pkts_high[0x20];
+
+ u8 ether_stats_broadcast_pkts_low[0x20];
+
+ u8 ether_stats_multicast_pkts_high[0x20];
+
+ u8 ether_stats_multicast_pkts_low[0x20];
+
+ u8 ether_stats_crc_align_errors_high[0x20];
+
+ u8 ether_stats_crc_align_errors_low[0x20];
+
+ u8 ether_stats_undersize_pkts_high[0x20];
+
+ u8 ether_stats_undersize_pkts_low[0x20];
+
+ u8 ether_stats_oversize_pkts_high[0x20];
+
+ u8 ether_stats_oversize_pkts_low[0x20];
+
+ u8 ether_stats_fragments_high[0x20];
+
+ u8 ether_stats_fragments_low[0x20];
+
+ u8 ether_stats_jabbers_high[0x20];
+
+ u8 ether_stats_jabbers_low[0x20];
+
+ u8 ether_stats_collisions_high[0x20];
+
+ u8 ether_stats_collisions_low[0x20];
+
+ u8 ether_stats_pkts64octets_high[0x20];
+
+ u8 ether_stats_pkts64octets_low[0x20];
+
+ u8 ether_stats_pkts65to127octets_high[0x20];
+
+ u8 ether_stats_pkts65to127octets_low[0x20];
+
+ u8 ether_stats_pkts128to255octets_high[0x20];
+
+ u8 ether_stats_pkts128to255octets_low[0x20];
+
+ u8 ether_stats_pkts256to511octets_high[0x20];
+
+ u8 ether_stats_pkts256to511octets_low[0x20];
+
+ u8 ether_stats_pkts512to1023octets_high[0x20];
+
+ u8 ether_stats_pkts512to1023octets_low[0x20];
+
+ u8 ether_stats_pkts1024to1518octets_high[0x20];
+
+ u8 ether_stats_pkts1024to1518octets_low[0x20];
+
+ u8 ether_stats_pkts1519to2047octets_high[0x20];
+
+ u8 ether_stats_pkts1519to2047octets_low[0x20];
+
+ u8 ether_stats_pkts2048to4095octets_high[0x20];
+
+ u8 ether_stats_pkts2048to4095octets_low[0x20];
+
+ u8 ether_stats_pkts4096to8191octets_high[0x20];
+
+ u8 ether_stats_pkts4096to8191octets_low[0x20];
+
+ u8 ether_stats_pkts8192to10239octets_high[0x20];
+
+ u8 ether_stats_pkts8192to10239octets_low[0x20];
+
+ u8 reserved_0[0x280];
+};
+
+struct mlx5_ifc_eth_2863_cntrs_grp_data_layout_bits {
+ u8 if_in_octets_high[0x20];
+
+ u8 if_in_octets_low[0x20];
+
+ u8 if_in_ucast_pkts_high[0x20];
+
+ u8 if_in_ucast_pkts_low[0x20];
+
+ u8 if_in_discards_high[0x20];
+
+ u8 if_in_discards_low[0x20];
+
+ u8 if_in_errors_high[0x20];
+
+ u8 if_in_errors_low[0x20];
+
+ u8 if_in_unknown_protos_high[0x20];
+
+ u8 if_in_unknown_protos_low[0x20];
+
+ u8 if_out_octets_high[0x20];
+
+ u8 if_out_octets_low[0x20];
+
+ u8 if_out_ucast_pkts_high[0x20];
+
+ u8 if_out_ucast_pkts_low[0x20];
+
+ u8 if_out_discards_high[0x20];
+
+ u8 if_out_discards_low[0x20];
+
+ u8 if_out_errors_high[0x20];
+
+ u8 if_out_errors_low[0x20];
+
+ u8 if_in_multicast_pkts_high[0x20];
+
+ u8 if_in_multicast_pkts_low[0x20];
+
+ u8 if_in_broadcast_pkts_high[0x20];
+
+ u8 if_in_broadcast_pkts_low[0x20];
+
+ u8 if_out_multicast_pkts_high[0x20];
+
+ u8 if_out_multicast_pkts_low[0x20];
+
+ u8 if_out_broadcast_pkts_high[0x20];
+
+ u8 if_out_broadcast_pkts_low[0x20];
+
+ u8 reserved_0[0x480];
+};
+
+struct mlx5_ifc_eth_802_3_cntrs_grp_data_layout_bits {
+ u8 a_frames_transmitted_ok_high[0x20];
+
+ u8 a_frames_transmitted_ok_low[0x20];
+
+ u8 a_frames_received_ok_high[0x20];
+
+ u8 a_frames_received_ok_low[0x20];
+
+ u8 a_frame_check_sequence_errors_high[0x20];
+
+ u8 a_frame_check_sequence_errors_low[0x20];
+
+ u8 a_alignment_errors_high[0x20];
+
+ u8 a_alignment_errors_low[0x20];
+
+ u8 a_octets_transmitted_ok_high[0x20];
+
+ u8 a_octets_transmitted_ok_low[0x20];
+
+ u8 a_octets_received_ok_high[0x20];
+
+ u8 a_octets_received_ok_low[0x20];
+
+ u8 a_multicast_frames_xmitted_ok_high[0x20];
+
+ u8 a_multicast_frames_xmitted_ok_low[0x20];
+
+ u8 a_broadcast_frames_xmitted_ok_high[0x20];
+
+ u8 a_broadcast_frames_xmitted_ok_low[0x20];
+
+ u8 a_multicast_frames_received_ok_high[0x20];
+
+ u8 a_multicast_frames_received_ok_low[0x20];
+
+ u8 a_broadcast_frames_received_ok_high[0x20];
+
+ u8 a_broadcast_frames_received_ok_low[0x20];
+
+ u8 a_in_range_length_errors_high[0x20];
+
+ u8 a_in_range_length_errors_low[0x20];
+
+ u8 a_out_of_range_length_field_high[0x20];
+
+ u8 a_out_of_range_length_field_low[0x20];
+
+ u8 a_frame_too_long_errors_high[0x20];
+
+ u8 a_frame_too_long_errors_low[0x20];
+
+ u8 a_symbol_error_during_carrier_high[0x20];
+
+ u8 a_symbol_error_during_carrier_low[0x20];
+
+ u8 a_mac_control_frames_transmitted_high[0x20];
+
+ u8 a_mac_control_frames_transmitted_low[0x20];
+
+ u8 a_mac_control_frames_received_high[0x20];
+
+ u8 a_mac_control_frames_received_low[0x20];
+
+ u8 a_unsupported_opcodes_received_high[0x20];
+
+ u8 a_unsupported_opcodes_received_low[0x20];
+
+ u8 a_pause_mac_ctrl_frames_received_high[0x20];
+
+ u8 a_pause_mac_ctrl_frames_received_low[0x20];
+
+ u8 a_pause_mac_ctrl_frames_transmitted_high[0x20];
+
+ u8 a_pause_mac_ctrl_frames_transmitted_low[0x20];
+
+ u8 reserved_0[0x300];
+};
+
+struct mlx5_ifc_cmd_inter_comp_event_bits {
+ u8 command_completion_vector[0x20];
+
+ u8 reserved_0[0xc0];
+};
+
+struct mlx5_ifc_stall_vl_event_bits {
+ u8 reserved_0[0x18];
+ u8 port_num[0x1];
+ u8 reserved_1[0x3];
+ u8 vl[0x4];
+
+ u8 reserved_2[0xa0];
+};
+
+struct mlx5_ifc_db_bf_congestion_event_bits {
+ u8 event_subtype[0x8];
+ u8 reserved_0[0x8];
+ u8 congestion_level[0x8];
+ u8 reserved_1[0x8];
+
+ u8 reserved_2[0xa0];
+};
+
+struct mlx5_ifc_gpio_event_bits {
+ u8 reserved_0[0x60];
+
+ u8 gpio_event_hi[0x20];
+
+ u8 gpio_event_lo[0x20];
+
+ u8 reserved_1[0x40];
+};
+
+struct mlx5_ifc_port_state_change_event_bits {
+ u8 reserved_0[0x40];
+
+ u8 port_num[0x4];
+ u8 reserved_1[0x1c];
+
+ u8 reserved_2[0x80];
+};
+
+struct mlx5_ifc_dropped_packet_logged_bits {
+ u8 reserved_0[0xe0];
+};
+
+enum {
+ MLX5_CQ_ERROR_SYNDROME_CQ_OVERRUN = 0x1,
+ MLX5_CQ_ERROR_SYNDROME_CQ_ACCESS_VIOLATION_ERROR = 0x2,
+};
+
+struct mlx5_ifc_cq_error_bits {
+ u8 reserved_0[0x8];
+ u8 cqn[0x18];
+
+ u8 reserved_1[0x20];
+
+ u8 reserved_2[0x18];
+ u8 syndrome[0x8];
+
+ u8 reserved_3[0x80];
+};
+
+struct mlx5_ifc_rdma_page_fault_event_bits {
+ u8 bytes_committed[0x20];
+
+ u8 r_key[0x20];
+
+ u8 reserved_0[0x10];
+ u8 packet_len[0x10];
+
+ u8 rdma_op_len[0x20];
+
+ u8 rdma_va[0x40];
+
+ u8 reserved_1[0x5];
+ u8 rdma[0x1];
+ u8 write[0x1];
+ u8 requestor[0x1];
+ u8 qp_number[0x18];
+};
+
+struct mlx5_ifc_wqe_associated_page_fault_event_bits {
+ u8 bytes_committed[0x20];
+
+ u8 reserved_0[0x10];
+ u8 wqe_index[0x10];
+
+ u8 reserved_1[0x10];
+ u8 len[0x10];
+
+ u8 reserved_2[0x60];
+
+ u8 reserved_3[0x5];
+ u8 rdma[0x1];
+ u8 write_read[0x1];
+ u8 requestor[0x1];
+ u8 qpn[0x18];
+};
+
+struct mlx5_ifc_qp_events_bits {
+ u8 reserved_0[0xa0];
+
+ u8 type[0x8];
+ u8 reserved_1[0x18];
+
+ u8 reserved_2[0x8];
+ u8 qpn_rqn_sqn[0x18];
+};
+
+struct mlx5_ifc_dct_events_bits {
+ u8 reserved_0[0xc0];
+
+ u8 reserved_1[0x8];
+ u8 dct_number[0x18];
+};
+
+struct mlx5_ifc_comp_event_bits {
+ u8 reserved_0[0xc0];
+
+ u8 reserved_1[0x8];
+ u8 cq_number[0x18];
+};
+
+enum {
+ MLX5_QPC_STATE_RST = 0x0,
+ MLX5_QPC_STATE_INIT = 0x1,
+ MLX5_QPC_STATE_RTR = 0x2,
+ MLX5_QPC_STATE_RTS = 0x3,
+ MLX5_QPC_STATE_SQER = 0x4,
+ MLX5_QPC_STATE_ERR = 0x6,
+ MLX5_QPC_STATE_SQD = 0x7,
+ MLX5_QPC_STATE_SUSPENDED = 0x9,
+};
+
+enum {
+ MLX5_QPC_ST_RC = 0x0,
+ MLX5_QPC_ST_UC = 0x1,
+ MLX5_QPC_ST_UD = 0x2,
+ MLX5_QPC_ST_XRC = 0x3,
+ MLX5_QPC_ST_DCI = 0x5,
+ MLX5_QPC_ST_QP0 = 0x7,
+ MLX5_QPC_ST_QP1 = 0x8,
+ MLX5_QPC_ST_RAW_DATAGRAM = 0x9,
+ MLX5_QPC_ST_REG_UMR = 0xc,
+};
+
+enum {
+ MLX5_QPC_PM_STATE_ARMED = 0x0,
+ MLX5_QPC_PM_STATE_REARM = 0x1,
+ MLX5_QPC_PM_STATE_RESERVED = 0x2,
+ MLX5_QPC_PM_STATE_MIGRATED = 0x3,
+};
+
+enum {
+ MLX5_QPC_END_PADDING_MODE_SCATTER_AS_IS = 0x0,
+ MLX5_QPC_END_PADDING_MODE_PAD_TO_CACHE_LINE_ALIGNMENT = 0x1,
+};
+
+enum {
+ MLX5_QPC_MTU_256_BYTES = 0x1,
+ MLX5_QPC_MTU_512_BYTES = 0x2,
+ MLX5_QPC_MTU_1K_BYTES = 0x3,
+ MLX5_QPC_MTU_2K_BYTES = 0x4,
+ MLX5_QPC_MTU_4K_BYTES = 0x5,
+ MLX5_QPC_MTU_RAW_ETHERNET_QP = 0x7,
+};
+
+enum {
+ MLX5_QPC_ATOMIC_MODE_IB_SPEC = 0x1,
+ MLX5_QPC_ATOMIC_MODE_ONLY_8B = 0x2,
+ MLX5_QPC_ATOMIC_MODE_UP_TO_8B = 0x3,
+ MLX5_QPC_ATOMIC_MODE_UP_TO_16B = 0x4,
+ MLX5_QPC_ATOMIC_MODE_UP_TO_32B = 0x5,
+ MLX5_QPC_ATOMIC_MODE_UP_TO_64B = 0x6,
+ MLX5_QPC_ATOMIC_MODE_UP_TO_128B = 0x7,
+ MLX5_QPC_ATOMIC_MODE_UP_TO_256B = 0x8,
+};
+
+enum {
+ MLX5_QPC_CS_REQ_DISABLE = 0x0,
+ MLX5_QPC_CS_REQ_UP_TO_32B = 0x11,
+ MLX5_QPC_CS_REQ_UP_TO_64B = 0x22,
+};
+
+enum {
+ MLX5_QPC_CS_RES_DISABLE = 0x0,
+ MLX5_QPC_CS_RES_UP_TO_32B = 0x1,
+ MLX5_QPC_CS_RES_UP_TO_64B = 0x2,
+};
+
+struct mlx5_ifc_qpc_bits {
+ u8 state[0x4];
+ u8 reserved_0[0x4];
+ u8 st[0x8];
+ u8 reserved_1[0x3];
+ u8 pm_state[0x2];
+ u8 reserved_2[0x7];
+ u8 end_padding_mode[0x2];
+ u8 reserved_3[0x2];
+
+ u8 wq_signature[0x1];
+ u8 block_lb_mc[0x1];
+ u8 atomic_like_write_en[0x1];
+ u8 latency_sensitive[0x1];
+ u8 reserved_4[0x1];
+ u8 drain_sigerr[0x1];
+ u8 reserved_5[0x2];
+ u8 pd[0x18];
+
+ u8 mtu[0x3];
+ u8 log_msg_max[0x5];
+ u8 reserved_6[0x1];
+ u8 log_rq_size[0x4];
+ u8 log_rq_stride[0x3];
+ u8 no_sq[0x1];
+ u8 log_sq_size[0x4];
+ u8 reserved_7[0x6];
+ u8 rlky[0x1];
+ u8 reserved_8[0x4];
+
+ u8 counter_set_id[0x8];
+ u8 uar_page[0x18];
+
+ u8 reserved_9[0x8];
+ u8 user_index[0x18];
+
+ u8 reserved_10[0x3];
+ u8 log_page_size[0x5];
+ u8 remote_qpn[0x18];
+
+ struct mlx5_ifc_ads_bits primary_address_path;
+
+ struct mlx5_ifc_ads_bits secondary_address_path;
+
+ u8 log_ack_req_freq[0x4];
+ u8 reserved_11[0x4];
+ u8 log_sra_max[0x3];
+ u8 reserved_12[0x2];
+ u8 retry_count[0x3];
+ u8 rnr_retry[0x3];
+ u8 reserved_13[0x1];
+ u8 fre[0x1];
+ u8 cur_rnr_retry[0x3];
+ u8 cur_retry_count[0x3];
+ u8 reserved_14[0x5];
+
+ u8 reserved_15[0x20];
+
+ u8 reserved_16[0x8];
+ u8 next_send_psn[0x18];
+
+ u8 reserved_17[0x8];
+ u8 cqn_snd[0x18];
+
+ u8 reserved_18[0x40];
+
+ u8 reserved_19[0x8];
+ u8 last_acked_psn[0x18];
+
+ u8 reserved_20[0x8];
+ u8 ssn[0x18];
+
+ u8 reserved_21[0x8];
+ u8 log_rra_max[0x3];
+ u8 reserved_22[0x1];
+ u8 atomic_mode[0x4];
+ u8 rre[0x1];
+ u8 rwe[0x1];
+ u8 rae[0x1];
+ u8 reserved_23[0x1];
+ u8 page_offset[0x6];
+ u8 reserved_24[0x3];
+ u8 cd_slave_receive[0x1];
+ u8 cd_slave_send[0x1];
+ u8 cd_master[0x1];
+
+ u8 reserved_25[0x3];
+ u8 min_rnr_nak[0x5];
+ u8 next_rcv_psn[0x18];
+
+ u8 reserved_26[0x8];
+ u8 xrcd[0x18];
+
+ u8 reserved_27[0x8];
+ u8 cqn_rcv[0x18];
+
+ u8 dbr_addr[0x40];
+
+ u8 q_key[0x20];
+
+ u8 reserved_28[0x5];
+ u8 rq_type[0x3];
+ u8 srqn_rmpn[0x18];
+
+ u8 reserved_29[0x8];
+ u8 rmsn[0x18];
+
+ u8 hw_sq_wqebb_counter[0x10];
+ u8 sw_sq_wqebb_counter[0x10];
+
+ u8 hw_rq_counter[0x20];
+
+ u8 sw_rq_counter[0x20];
+
+ u8 reserved_30[0x20];
+
+ u8 reserved_31[0xf];
+ u8 cgs[0x1];
+ u8 cs_req[0x8];
+ u8 cs_res[0x8];
+
+ u8 dc_access_key[0x40];
+
+ u8 reserved_32[0xc0];
+};
+
+struct mlx5_ifc_roce_addr_layout_bits {
+ u8 source_l3_address[16][0x8];
+
+ u8 reserved_0[0x3];
+ u8 vlan_valid[0x1];
+ u8 vlan_id[0xc];
+ u8 source_mac_47_32[0x10];
+
+ u8 source_mac_31_0[0x20];
+
+ u8 reserved_1[0x14];
+ u8 roce_l3_type[0x4];
+ u8 roce_version[0x8];
+
+ u8 reserved_2[0x20];
+};
+
+union mlx5_ifc_hca_cap_union_bits {
+ struct mlx5_ifc_cmd_hca_cap_bits cmd_hca_cap;
+ struct mlx5_ifc_odp_cap_bits odp_cap;
+ struct mlx5_ifc_atomic_caps_bits atomic_caps;
+ struct mlx5_ifc_roce_cap_bits roce_cap;
+ struct mlx5_ifc_per_protocol_networking_offload_caps_bits per_protocol_networking_offload_caps;
+ struct mlx5_ifc_flow_table_nic_cap_bits flow_table_nic_cap;
+ u8 reserved_0[0x8000];
+};
+
+enum {
+ MLX5_FLOW_CONTEXT_ACTION_ALLOW = 0x1,
+ MLX5_FLOW_CONTEXT_ACTION_DROP = 0x2,
+ MLX5_FLOW_CONTEXT_ACTION_FWD_DEST = 0x4,
+};
+
+struct mlx5_ifc_flow_context_bits {
+ u8 reserved_0[0x20];
+
+ u8 group_id[0x20];
+
+ u8 reserved_1[0x8];
+ u8 flow_tag[0x18];
+
+ u8 reserved_2[0x10];
+ u8 action[0x10];
+
+ u8 reserved_3[0x8];
+ u8 destination_list_size[0x18];
+
+ u8 reserved_4[0x160];
+
+ struct mlx5_ifc_fte_match_param_bits match_value;
+
+ u8 reserved_5[0x600];
+
+ struct mlx5_ifc_dest_format_struct_bits destination[0];
+};
+
+enum {
+ MLX5_XRC_SRQC_STATE_GOOD = 0x0,
+ MLX5_XRC_SRQC_STATE_ERROR = 0x1,
+};
+
+struct mlx5_ifc_xrc_srqc_bits {
+ u8 state[0x4];
+ u8 log_xrc_srq_size[0x4];
+ u8 reserved_0[0x18];
+
+ u8 wq_signature[0x1];
+ u8 cont_srq[0x1];
+ u8 reserved_1[0x1];
+ u8 rlky[0x1];
+ u8 basic_cyclic_rcv_wqe[0x1];
+ u8 log_rq_stride[0x3];
+ u8 xrcd[0x18];
+
+ u8 page_offset[0x6];
+ u8 reserved_2[0x2];
+ u8 cqn[0x18];
+
+ u8 reserved_3[0x20];
+
+ u8 user_index_equal_xrc_srqn[0x1];
+ u8 reserved_4[0x1];
+ u8 log_page_size[0x6];
+ u8 user_index[0x18];
+
+ u8 reserved_5[0x20];
+
+ u8 reserved_6[0x8];
+ u8 pd[0x18];
+
+ u8 lwm[0x10];
+ u8 wqe_cnt[0x10];
+
+ u8 reserved_7[0x40];
+
+ u8 db_record_addr_h[0x20];
+
+ u8 db_record_addr_l[0x1e];
+ u8 reserved_8[0x2];
+
+ u8 reserved_9[0x80];
+};
+
+struct mlx5_ifc_traffic_counter_bits {
+ u8 packets[0x40];
+
+ u8 octets[0x40];
+};
+
+struct mlx5_ifc_tisc_bits {
+ u8 reserved_0[0xc];
+ u8 prio[0x4];
+ u8 reserved_1[0x10];
+
+ u8 reserved_2[0x100];
+
+ u8 reserved_3[0x8];
+ u8 transport_domain[0x18];
+
+ u8 reserved_4[0x3c0];
+};
+
+enum {
+ MLX5_TIRC_DISP_TYPE_DIRECT = 0x0,
+ MLX5_TIRC_DISP_TYPE_INDIRECT = 0x1,
+};
+
+enum {
+ MLX5_TIRC_LRO_ENABLE_MASK_IPV4_LRO = 0x1,
+ MLX5_TIRC_LRO_ENABLE_MASK_IPV6_LRO = 0x2,
+};
+
+enum {
+ MLX5_TIRC_RX_HASH_FN_HASH_NONE = 0x0,
+ MLX5_TIRC_RX_HASH_FN_HASH_INVERTED_XOR8 = 0x1,
+ MLX5_TIRC_RX_HASH_FN_HASH_TOEPLITZ = 0x2,
+};
+
+enum {
+ MLX5_TIRC_SELF_LB_BLOCK_BLOCK_UNICAST_ = 0x1,
+ MLX5_TIRC_SELF_LB_BLOCK_BLOCK_MULTICAST_ = 0x2,
+};
+
+struct mlx5_ifc_tirc_bits {
+ u8 reserved_0[0x20];
+
+ u8 disp_type[0x4];
+ u8 reserved_1[0x1c];
+
+ u8 reserved_2[0x40];
+
+ u8 reserved_3[0x4];
+ u8 lro_timeout_period_usecs[0x10];
+ u8 lro_enable_mask[0x4];
+ u8 lro_max_ip_payload_size[0x8];
+
+ u8 reserved_4[0x40];
+
+ u8 reserved_5[0x8];
+ u8 inline_rqn[0x18];
+
+ u8 rx_hash_symmetric[0x1];
+ u8 reserved_6[0x1];
+ u8 tunneled_offload_en[0x1];
+ u8 reserved_7[0x5];
+ u8 indirect_table[0x18];
+
+ u8 rx_hash_fn[0x4];
+ u8 reserved_8[0x2];
+ u8 self_lb_block[0x2];
+ u8 transport_domain[0x18];
+
+ u8 rx_hash_toeplitz_key[10][0x20];
+
+ struct mlx5_ifc_rx_hash_field_select_bits rx_hash_field_selector_outer;
+
+ struct mlx5_ifc_rx_hash_field_select_bits rx_hash_field_selector_inner;
+
+ u8 reserved_9[0x4c0];
+};
+
+enum {
+ MLX5_SRQC_STATE_GOOD = 0x0,
+ MLX5_SRQC_STATE_ERROR = 0x1,
+};
+
+struct mlx5_ifc_srqc_bits {
+ u8 state[0x4];
+ u8 log_srq_size[0x4];
+ u8 reserved_0[0x18];
+
+ u8 wq_signature[0x1];
+ u8 cont_srq[0x1];
+ u8 reserved_1[0x1];
+ u8 rlky[0x1];
+ u8 reserved_2[0x1];
+ u8 log_rq_stride[0x3];
+ u8 xrcd[0x18];
+
+ u8 page_offset[0x6];
+ u8 reserved_3[0x2];
+ u8 cqn[0x18];
+
+ u8 reserved_4[0x20];
+
+ u8 reserved_5[0x2];
+ u8 log_page_size[0x6];
+ u8 reserved_6[0x18];
+
+ u8 reserved_7[0x20];
+
+ u8 reserved_8[0x8];
+ u8 pd[0x18];
+
+ u8 lwm[0x10];
+ u8 wqe_cnt[0x10];
+
+ u8 reserved_9[0x40];
+
+ u8 dbr_addr[0x40];
+
+ u8 reserved_10[0x80];
+};
+
+enum {
+ MLX5_SQC_STATE_RST = 0x0,
+ MLX5_SQC_STATE_RDY = 0x1,
+ MLX5_SQC_STATE_ERR = 0x3,
+};
+
+struct mlx5_ifc_sqc_bits {
+ u8 rlky[0x1];
+ u8 cd_master[0x1];
+ u8 fre[0x1];
+ u8 flush_in_error_en[0x1];
+ u8 reserved_0[0x4];
+ u8 state[0x4];
+ u8 reserved_1[0x14];
+
+ u8 reserved_2[0x8];
+ u8 user_index[0x18];
+
+ u8 reserved_3[0x8];
+ u8 cqn[0x18];
+
+ u8 reserved_4[0xa0];
+
+ u8 tis_lst_sz[0x10];
+ u8 reserved_5[0x10];
+
+ u8 reserved_6[0x40];
+
+ u8 reserved_7[0x8];
+ u8 tis_num_0[0x18];
+
+ struct mlx5_ifc_wq_bits wq;
+};
+
+struct mlx5_ifc_rqtc_bits {
+ u8 reserved_0[0xa0];
+
+ u8 reserved_1[0x10];
+ u8 rqt_max_size[0x10];
+
+ u8 reserved_2[0x10];
+ u8 rqt_actual_size[0x10];
+
+ u8 reserved_3[0x6a0];
+
+ struct mlx5_ifc_rq_num_bits rq_num[0];
+};
+
+enum {
+ MLX5_RQC_MEM_RQ_TYPE_MEMORY_RQ_INLINE = 0x0,
+ MLX5_RQC_MEM_RQ_TYPE_MEMORY_RQ_RMP = 0x1,
+};
+
+enum {
+ MLX5_RQC_STATE_RST = 0x0,
+ MLX5_RQC_STATE_RDY = 0x1,
+ MLX5_RQC_STATE_ERR = 0x3,
+};
+
+struct mlx5_ifc_rqc_bits {
+ u8 rlky[0x1];
+ u8 reserved_0[0x2];
+ u8 vsd[0x1];
+ u8 mem_rq_type[0x4];
+ u8 state[0x4];
+ u8 reserved_1[0x1];
+ u8 flush_in_error_en[0x1];
+ u8 reserved_2[0x12];
+
+ u8 reserved_3[0x8];
+ u8 user_index[0x18];
+
+ u8 reserved_4[0x8];
+ u8 cqn[0x18];
+
+ u8 counter_set_id[0x8];
+ u8 reserved_5[0x18];
+
+ u8 reserved_6[0x8];
+ u8 rmpn[0x18];
+
+ u8 reserved_7[0xe0];
+
+ struct mlx5_ifc_wq_bits wq;
+};
+
+enum {
+ MLX5_RMPC_STATE_RDY = 0x1,
+ MLX5_RMPC_STATE_ERR = 0x3,
+};
+
+struct mlx5_ifc_rmpc_bits {
+ u8 reserved_0[0x8];
+ u8 state[0x4];
+ u8 reserved_1[0x14];
+
+ u8 basic_cyclic_rcv_wqe[0x1];
+ u8 reserved_2[0x1f];
+
+ u8 reserved_3[0x140];
+
+ struct mlx5_ifc_wq_bits wq;
+};
+
+enum {
+ MLX5_NIC_VPORT_CONTEXT_ALLOWED_LIST_TYPE_CURRENT_UC_MAC_ADDRESS = 0x0,
+};
+
+struct mlx5_ifc_nic_vport_context_bits {
+ u8 reserved_0[0x1f];
+ u8 roce_en[0x1];
+
+ u8 reserved_1[0x760];
+
+ u8 reserved_2[0x5];
+ u8 allowed_list_type[0x3];
+ u8 reserved_3[0xc];
+ u8 allowed_list_size[0xc];
+
+ struct mlx5_ifc_mac_address_layout_bits permanent_address;
+
+ u8 reserved_4[0x20];
+
+ u8 current_uc_mac_address[0][0x40];
+};
+
+enum {
+ MLX5_MKC_ACCESS_MODE_PA = 0x0,
+ MLX5_MKC_ACCESS_MODE_MTT = 0x1,
+ MLX5_MKC_ACCESS_MODE_KLMS = 0x2,
+};
+
+struct mlx5_ifc_mkc_bits {
+ u8 reserved_0[0x1];
+ u8 free[0x1];
+ u8 reserved_1[0xd];
+ u8 small_fence_on_rdma_read_response[0x1];
+ u8 umr_en[0x1];
+ u8 a[0x1];
+ u8 rw[0x1];
+ u8 rr[0x1];
+ u8 lw[0x1];
+ u8 lr[0x1];
+ u8 access_mode[0x2];
+ u8 reserved_2[0x8];
+
+ u8 qpn[0x18];
+ u8 mkey_7_0[0x8];
+
+ u8 reserved_3[0x20];
+
+ u8 length64[0x1];
+ u8 bsf_en[0x1];
+ u8 sync_umr[0x1];
+ u8 reserved_4[0x2];
+ u8 expected_sigerr_count[0x1];
+ u8 reserved_5[0x1];
+ u8 en_rinval[0x1];
+ u8 pd[0x18];
+
+ u8 start_addr[0x40];
+
+ u8 len[0x40];
+
+ u8 bsf_octword_size[0x20];
+
+ u8 reserved_6[0x80];
+
+ u8 translations_octword_size[0x20];
+
+ u8 reserved_7[0x1b];
+ u8 log_page_size[0x5];
+
+ u8 reserved_8[0x20];
+};
+
+struct mlx5_ifc_pkey_bits {
+ u8 reserved_0[0x10];
+ u8 pkey[0x10];
+};
+
+struct mlx5_ifc_array128_auto_bits {
+ u8 array128_auto[16][0x8];
+};
+
+struct mlx5_ifc_hca_vport_context_bits {
+ u8 field_select[0x20];
+
+ u8 reserved_0[0xe0];
+
+ u8 sm_virt_aware[0x1];
+ u8 has_smi[0x1];
+ u8 has_raw[0x1];
+ u8 grh_required[0x1];
+ u8 reserved_1[0xc];
+ u8 port_physical_state[0x4];
+ u8 vport_state_policy[0x4];
+ u8 port_state[0x4];
+ u8 vport_state[0x4];
+
+ u8 reserved_2[0x20];
+
+ u8 system_image_guid[0x40];
+
+ u8 port_guid[0x40];
+
+ u8 node_guid[0x40];
+
+ u8 cap_mask1[0x20];
+
+ u8 cap_mask1_field_select[0x20];
+
+ u8 cap_mask2[0x20];
+
+ u8 cap_mask2_field_select[0x20];
+
+ u8 reserved_3[0x80];
+
+ u8 lid[0x10];
+ u8 reserved_4[0x4];
+ u8 init_type_reply[0x4];
+ u8 lmc[0x3];
+ u8 subnet_timeout[0x5];
+
+ u8 sm_lid[0x10];
+ u8 sm_sl[0x4];
+ u8 reserved_5[0xc];
+
+ u8 qkey_violation_counter[0x10];
+ u8 pkey_violation_counter[0x10];
+
+ u8 reserved_6[0xca0];
+};
+
+enum {
+ MLX5_EQC_STATUS_OK = 0x0,
+ MLX5_EQC_STATUS_EQ_WRITE_FAILURE = 0xa,
+};
+
+enum {
+ MLX5_EQC_ST_ARMED = 0x9,
+ MLX5_EQC_ST_FIRED = 0xa,
+};
+
+struct mlx5_ifc_eqc_bits {
+ u8 status[0x4];
+ u8 reserved_0[0x9];
+ u8 ec[0x1];
+ u8 oi[0x1];
+ u8 reserved_1[0x5];
+ u8 st[0x4];
+ u8 reserved_2[0x8];
+
+ u8 reserved_3[0x20];
+
+ u8 reserved_4[0x14];
+ u8 page_offset[0x6];
+ u8 reserved_5[0x6];
+
+ u8 reserved_6[0x3];
+ u8 log_eq_size[0x5];
+ u8 uar_page[0x18];
+
+ u8 reserved_7[0x20];
+
+ u8 reserved_8[0x18];
+ u8 intr[0x8];
+
+ u8 reserved_9[0x3];
+ u8 log_page_size[0x5];
+ u8 reserved_10[0x18];
+
+ u8 reserved_11[0x60];
+
+ u8 reserved_12[0x8];
+ u8 consumer_counter[0x18];
+
+ u8 reserved_13[0x8];
+ u8 producer_counter[0x18];
+
+ u8 reserved_14[0x80];
+};
+
+enum {
+ MLX5_DCTC_STATE_ACTIVE = 0x0,
+ MLX5_DCTC_STATE_DRAINING = 0x1,
+ MLX5_DCTC_STATE_DRAINED = 0x2,
+};
+
+enum {
+ MLX5_DCTC_CS_RES_DISABLE = 0x0,
+ MLX5_DCTC_CS_RES_NA = 0x1,
+ MLX5_DCTC_CS_RES_UP_TO_64B = 0x2,
+};
+
+enum {
+ MLX5_DCTC_MTU_256_BYTES = 0x1,
+ MLX5_DCTC_MTU_512_BYTES = 0x2,
+ MLX5_DCTC_MTU_1K_BYTES = 0x3,
+ MLX5_DCTC_MTU_2K_BYTES = 0x4,
+ MLX5_DCTC_MTU_4K_BYTES = 0x5,
+};
+
+struct mlx5_ifc_dctc_bits {
+ u8 reserved_0[0x4];
+ u8 state[0x4];
+ u8 reserved_1[0x18];
+
+ u8 reserved_2[0x8];
+ u8 user_index[0x18];
+
+ u8 reserved_3[0x8];
+ u8 cqn[0x18];
+
+ u8 counter_set_id[0x8];
+ u8 atomic_mode[0x4];
+ u8 rre[0x1];
+ u8 rwe[0x1];
+ u8 rae[0x1];
+ u8 atomic_like_write_en[0x1];
+ u8 latency_sensitive[0x1];
+ u8 rlky[0x1];
+ u8 free_ar[0x1];
+ u8 reserved_4[0xd];
+
+ u8 reserved_5[0x8];
+ u8 cs_res[0x8];
+ u8 reserved_6[0x3];
+ u8 min_rnr_nak[0x5];
+ u8 reserved_7[0x8];
+
+ u8 reserved_8[0x8];
+ u8 srqn[0x18];
+
+ u8 reserved_9[0x8];
+ u8 pd[0x18];
+
+ u8 tclass[0x8];
+ u8 reserved_10[0x4];
+ u8 flow_label[0x14];
+
+ u8 dc_access_key[0x40];
+
+ u8 reserved_11[0x5];
+ u8 mtu[0x3];
+ u8 port[0x8];
+ u8 pkey_index[0x10];
+
+ u8 reserved_12[0x8];
+ u8 my_addr_index[0x8];
+ u8 reserved_13[0x8];
+ u8 hop_limit[0x8];
+
+ u8 dc_access_key_violation_count[0x20];
+
+ u8 reserved_14[0x14];
+ u8 dei_cfi[0x1];
+ u8 eth_prio[0x3];
+ u8 ecn[0x2];
+ u8 dscp[0x6];
+
+ u8 reserved_15[0x40];
+};
+
+enum {
+ MLX5_CQC_STATUS_OK = 0x0,
+ MLX5_CQC_STATUS_CQ_OVERFLOW = 0x9,
+ MLX5_CQC_STATUS_CQ_WRITE_FAIL = 0xa,
+};
+
+enum {
+ MLX5_CQC_CQE_SZ_64_BYTES = 0x0,
+ MLX5_CQC_CQE_SZ_128_BYTES = 0x1,
+};
+
+enum {
+ MLX5_CQC_ST_SOLICITED_NOTIFICATION_REQUEST_ARMED = 0x6,
+ MLX5_CQC_ST_NOTIFICATION_REQUEST_ARMED = 0x9,
+ MLX5_CQC_ST_FIRED = 0xa,
+};
+
+struct mlx5_ifc_cqc_bits {
+ u8 status[0x4];
+ u8 reserved_0[0x4];
+ u8 cqe_sz[0x3];
+ u8 cc[0x1];
+ u8 reserved_1[0x1];
+ u8 scqe_break_moderation_en[0x1];
+ u8 oi[0x1];
+ u8 reserved_2[0x2];
+ u8 cqe_zip_en[0x1];
+ u8 mini_cqe_res_format[0x2];
+ u8 st[0x4];
+ u8 reserved_3[0x8];
+
+ u8 reserved_4[0x20];
+
+ u8 reserved_5[0x14];
+ u8 page_offset[0x6];
+ u8 reserved_6[0x6];
+
+ u8 reserved_7[0x3];
+ u8 log_cq_size[0x5];
+ u8 uar_page[0x18];
+
+ u8 reserved_8[0x4];
+ u8 cq_period[0xc];
+ u8 cq_max_count[0x10];
+
+ u8 reserved_9[0x18];
+ u8 c_eqn[0x8];
+
+ u8 reserved_10[0x3];
+ u8 log_page_size[0x5];
+ u8 reserved_11[0x18];
+
+ u8 reserved_12[0x20];
+
+ u8 reserved_13[0x8];
+ u8 last_notified_index[0x18];
+
+ u8 reserved_14[0x8];
+ u8 last_solicit_index[0x18];
+
+ u8 reserved_15[0x8];
+ u8 consumer_counter[0x18];
+
+ u8 reserved_16[0x8];
+ u8 producer_counter[0x18];
+
+ u8 reserved_17[0x40];
+
+ u8 dbr_addr[0x40];
+};
+
+union mlx5_ifc_cong_control_roce_ecn_auto_bits {
+ struct mlx5_ifc_cong_control_802_1qau_rp_bits cong_control_802_1qau_rp;
+ struct mlx5_ifc_cong_control_r_roce_ecn_rp_bits cong_control_r_roce_ecn_rp;
+ struct mlx5_ifc_cong_control_r_roce_ecn_np_bits cong_control_r_roce_ecn_np;
+ u8 reserved_0[0x800];
+};
+
+struct mlx5_ifc_query_adapter_param_block_bits {
+ u8 reserved_0[0xc0];
+
+ u8 reserved_1[0x8];
+ u8 ieee_vendor_id[0x18];
+
+ u8 reserved_2[0x10];
+ u8 vsd_vendor_id[0x10];
+
+ u8 vsd[208][0x8];
+
+ u8 vsd_contd_psid[16][0x8];
+};
+
+union mlx5_ifc_modify_field_select_resize_field_select_auto_bits {
+ struct mlx5_ifc_modify_field_select_bits modify_field_select;
+ struct mlx5_ifc_resize_field_select_bits resize_field_select;
+ u8 reserved_0[0x20];
+};
+
+union mlx5_ifc_field_select_802_1_r_roce_auto_bits {
+ struct mlx5_ifc_field_select_802_1qau_rp_bits field_select_802_1qau_rp;
+ struct mlx5_ifc_field_select_r_roce_rp_bits field_select_r_roce_rp;
+ struct mlx5_ifc_field_select_r_roce_np_bits field_select_r_roce_np;
+ u8 reserved_0[0x20];
+};
+
+union mlx5_ifc_eth_cntrs_grp_data_layout_auto_bits {
+ struct mlx5_ifc_eth_802_3_cntrs_grp_data_layout_bits eth_802_3_cntrs_grp_data_layout;
+ struct mlx5_ifc_eth_2863_cntrs_grp_data_layout_bits eth_2863_cntrs_grp_data_layout;
+ struct mlx5_ifc_eth_2819_cntrs_grp_data_layout_bits eth_2819_cntrs_grp_data_layout;
+ struct mlx5_ifc_eth_3635_cntrs_grp_data_layout_bits eth_3635_cntrs_grp_data_layout;
+ struct mlx5_ifc_eth_extended_cntrs_grp_data_layout_bits eth_extended_cntrs_grp_data_layout;
+ struct mlx5_ifc_eth_per_prio_grp_data_layout_bits eth_per_prio_grp_data_layout;
+ struct mlx5_ifc_eth_per_traffic_grp_data_layout_bits eth_per_traffic_grp_data_layout;
+ struct mlx5_ifc_phys_layer_cntrs_bits phys_layer_cntrs;
+ u8 reserved_0[0x7c0];
+};
+
+union mlx5_ifc_event_auto_bits {
+ struct mlx5_ifc_comp_event_bits comp_event;
+ struct mlx5_ifc_dct_events_bits dct_events;
+ struct mlx5_ifc_qp_events_bits qp_events;
+ struct mlx5_ifc_wqe_associated_page_fault_event_bits wqe_associated_page_fault_event;
+ struct mlx5_ifc_rdma_page_fault_event_bits rdma_page_fault_event;
+ struct mlx5_ifc_cq_error_bits cq_error;
+ struct mlx5_ifc_dropped_packet_logged_bits dropped_packet_logged;
+ struct mlx5_ifc_port_state_change_event_bits port_state_change_event;
+ struct mlx5_ifc_gpio_event_bits gpio_event;
+ struct mlx5_ifc_db_bf_congestion_event_bits db_bf_congestion_event;
+ struct mlx5_ifc_stall_vl_event_bits stall_vl_event;
+ struct mlx5_ifc_cmd_inter_comp_event_bits cmd_inter_comp_event;
+ u8 reserved_0[0xe0];
+};
+
+struct mlx5_ifc_health_buffer_bits {
+ u8 reserved_0[0x100];
+
+ u8 assert_existptr[0x20];
+
+ u8 assert_callra[0x20];
+
+ u8 reserved_1[0x40];
+
+ u8 fw_version[0x20];
+
+ u8 hw_id[0x20];
+
+ u8 reserved_2[0x20];
+
+ u8 irisc_index[0x8];
+ u8 synd[0x8];
+ u8 ext_synd[0x10];
+};
+
+struct mlx5_ifc_register_loopback_control_bits {
+ u8 no_lb[0x1];
+ u8 reserved_0[0x7];
+ u8 port[0x8];
+ u8 reserved_1[0x10];
+
+ u8 reserved_2[0x60];
+};
+
+struct mlx5_ifc_teardown_hca_out_bits {
+ u8 status[0x8];
+ u8 reserved_0[0x18];
+
+ u8 syndrome[0x20];
+
+ u8 reserved_1[0x40];
+};
+
+enum {
+ MLX5_TEARDOWN_HCA_IN_PROFILE_GRACEFUL_CLOSE = 0x0,
+ MLX5_TEARDOWN_HCA_IN_PROFILE_PANIC_CLOSE = 0x1,
+};
+
+struct mlx5_ifc_teardown_hca_in_bits {
+ u8 opcode[0x10];
+ u8 reserved_0[0x10];
+
+ u8 reserved_1[0x10];
+ u8 op_mod[0x10];
+
+ u8 reserved_2[0x10];
+ u8 profile[0x10];
+
+ u8 reserved_3[0x20];
+};
+
+struct mlx5_ifc_sqerr2rts_qp_out_bits {
+ u8 status[0x8];
+ u8 reserved_0[0x18];
+
+ u8 syndrome[0x20];
+
+ u8 reserved_1[0x40];
+};
+
+struct mlx5_ifc_sqerr2rts_qp_in_bits {
+ u8 opcode[0x10];
+ u8 reserved_0[0x10];
+
+ u8 reserved_1[0x10];
+ u8 op_mod[0x10];
+
+ u8 reserved_2[0x8];
+ u8 qpn[0x18];
+
+ u8 reserved_3[0x20];
+
+ u8 opt_param_mask[0x20];
+
+ u8 reserved_4[0x20];
+
+ struct mlx5_ifc_qpc_bits qpc;
+
+ u8 reserved_5[0x80];
+};
+
+struct mlx5_ifc_sqd2rts_qp_out_bits {
+ u8 status[0x8];
+ u8 reserved_0[0x18];
+
+ u8 syndrome[0x20];
+
+ u8 reserved_1[0x40];
+};
+
+struct mlx5_ifc_sqd2rts_qp_in_bits {
+ u8 opcode[0x10];
+ u8 reserved_0[0x10];
+
+ u8 reserved_1[0x10];
+ u8 op_mod[0x10];
+
+ u8 reserved_2[0x8];
+ u8 qpn[0x18];
+
+ u8 reserved_3[0x20];
+
+ u8 opt_param_mask[0x20];
+
+ u8 reserved_4[0x20];
+
+ struct mlx5_ifc_qpc_bits qpc;
+
+ u8 reserved_5[0x80];
+};
+
+struct mlx5_ifc_set_roce_address_out_bits {
+ u8 status[0x8];
+ u8 reserved_0[0x18];
+
+ u8 syndrome[0x20];
+
+ u8 reserved_1[0x40];
+};
+
+struct mlx5_ifc_set_roce_address_in_bits {
+ u8 opcode[0x10];
+ u8 reserved_0[0x10];
+
+ u8 reserved_1[0x10];
+ u8 op_mod[0x10];
+
+ u8 roce_address_index[0x10];
+ u8 reserved_2[0x10];
+
+ u8 reserved_3[0x20];
+
+ struct mlx5_ifc_roce_addr_layout_bits roce_address;
+};
+
+struct mlx5_ifc_set_mad_demux_out_bits {
+ u8 status[0x8];
+ u8 reserved_0[0x18];
+
+ u8 syndrome[0x20];
+
+ u8 reserved_1[0x40];
+};
+
+enum {
+ MLX5_SET_MAD_DEMUX_IN_DEMUX_MODE_PASS_ALL = 0x0,
+ MLX5_SET_MAD_DEMUX_IN_DEMUX_MODE_SELECTIVE = 0x2,
+};
+
+struct mlx5_ifc_set_mad_demux_in_bits {
+ u8 opcode[0x10];
+ u8 reserved_0[0x10];
+
+ u8 reserved_1[0x10];
+ u8 op_mod[0x10];
+
+ u8 reserved_2[0x20];
+
+ u8 reserved_3[0x6];
+ u8 demux_mode[0x2];
+ u8 reserved_4[0x18];
+};
+
+struct mlx5_ifc_set_l2_table_entry_out_bits {
+ u8 status[0x8];
+ u8 reserved_0[0x18];
+
+ u8 syndrome[0x20];
+
+ u8 reserved_1[0x40];
+};
+
+struct mlx5_ifc_set_l2_table_entry_in_bits {
+ u8 opcode[0x10];
+ u8 reserved_0[0x10];
+
+ u8 reserved_1[0x10];
+ u8 op_mod[0x10];
+
+ u8 reserved_2[0x60];
+
+ u8 reserved_3[0x8];
+ u8 table_index[0x18];
+
+ u8 reserved_4[0x20];
+
+ u8 reserved_5[0x13];
+ u8 vlan_valid[0x1];
+ u8 vlan[0xc];
+
+ struct mlx5_ifc_mac_address_layout_bits mac_address;
+
+ u8 reserved_6[0xc0];
+};
+
+struct mlx5_ifc_set_issi_out_bits {
+ u8 status[0x8];
+ u8 reserved_0[0x18];
+
+ u8 syndrome[0x20];
+
+ u8 reserved_1[0x40];
+};
+
+struct mlx5_ifc_set_issi_in_bits {
+ u8 opcode[0x10];
+ u8 reserved_0[0x10];
+
+ u8 reserved_1[0x10];
+ u8 op_mod[0x10];
+
+ u8 reserved_2[0x10];
+ u8 current_issi[0x10];
+
+ u8 reserved_3[0x20];
+};
+
+struct mlx5_ifc_set_hca_cap_out_bits {
+ u8 status[0x8];
+ u8 reserved_0[0x18];
+
+ u8 syndrome[0x20];
+
+ u8 reserved_1[0x40];
};
struct mlx5_ifc_set_hca_cap_in_bits {
@@ -313,10 +2764,653 @@ struct mlx5_ifc_set_hca_cap_in_bits {
u8 reserved_2[0x40];
- struct mlx5_ifc_cmd_hca_cap_bits hca_capability_struct;
+ union mlx5_ifc_hca_cap_union_bits capability;
};
-struct mlx5_ifc_query_hca_cap_in_bits {
+struct mlx5_ifc_set_fte_out_bits {
+ u8 status[0x8];
+ u8 reserved_0[0x18];
+
+ u8 syndrome[0x20];
+
+ u8 reserved_1[0x40];
+};
+
+struct mlx5_ifc_set_fte_in_bits {
+ u8 opcode[0x10];
+ u8 reserved_0[0x10];
+
+ u8 reserved_1[0x10];
+ u8 op_mod[0x10];
+
+ u8 reserved_2[0x40];
+
+ u8 table_type[0x8];
+ u8 reserved_3[0x18];
+
+ u8 reserved_4[0x8];
+ u8 table_id[0x18];
+
+ u8 reserved_5[0x40];
+
+ u8 flow_index[0x20];
+
+ u8 reserved_6[0xe0];
+
+ struct mlx5_ifc_flow_context_bits flow_context;
+};
+
+struct mlx5_ifc_rts2rts_qp_out_bits {
+ u8 status[0x8];
+ u8 reserved_0[0x18];
+
+ u8 syndrome[0x20];
+
+ u8 reserved_1[0x40];
+};
+
+struct mlx5_ifc_rts2rts_qp_in_bits {
+ u8 opcode[0x10];
+ u8 reserved_0[0x10];
+
+ u8 reserved_1[0x10];
+ u8 op_mod[0x10];
+
+ u8 reserved_2[0x8];
+ u8 qpn[0x18];
+
+ u8 reserved_3[0x20];
+
+ u8 opt_param_mask[0x20];
+
+ u8 reserved_4[0x20];
+
+ struct mlx5_ifc_qpc_bits qpc;
+
+ u8 reserved_5[0x80];
+};
+
+struct mlx5_ifc_rtr2rts_qp_out_bits {
+ u8 status[0x8];
+ u8 reserved_0[0x18];
+
+ u8 syndrome[0x20];
+
+ u8 reserved_1[0x40];
+};
+
+struct mlx5_ifc_rtr2rts_qp_in_bits {
+ u8 opcode[0x10];
+ u8 reserved_0[0x10];
+
+ u8 reserved_1[0x10];
+ u8 op_mod[0x10];
+
+ u8 reserved_2[0x8];
+ u8 qpn[0x18];
+
+ u8 reserved_3[0x20];
+
+ u8 opt_param_mask[0x20];
+
+ u8 reserved_4[0x20];
+
+ struct mlx5_ifc_qpc_bits qpc;
+
+ u8 reserved_5[0x80];
+};
+
+struct mlx5_ifc_rst2init_qp_out_bits {
+ u8 status[0x8];
+ u8 reserved_0[0x18];
+
+ u8 syndrome[0x20];
+
+ u8 reserved_1[0x40];
+};
+
+struct mlx5_ifc_rst2init_qp_in_bits {
+ u8 opcode[0x10];
+ u8 reserved_0[0x10];
+
+ u8 reserved_1[0x10];
+ u8 op_mod[0x10];
+
+ u8 reserved_2[0x8];
+ u8 qpn[0x18];
+
+ u8 reserved_3[0x20];
+
+ u8 opt_param_mask[0x20];
+
+ u8 reserved_4[0x20];
+
+ struct mlx5_ifc_qpc_bits qpc;
+
+ u8 reserved_5[0x80];
+};
+
+struct mlx5_ifc_query_xrc_srq_out_bits {
+ u8 status[0x8];
+ u8 reserved_0[0x18];
+
+ u8 syndrome[0x20];
+
+ u8 reserved_1[0x40];
+
+ struct mlx5_ifc_xrc_srqc_bits xrc_srq_context_entry;
+
+ u8 reserved_2[0x600];
+
+ u8 pas[0][0x40];
+};
+
+struct mlx5_ifc_query_xrc_srq_in_bits {
+ u8 opcode[0x10];
+ u8 reserved_0[0x10];
+
+ u8 reserved_1[0x10];
+ u8 op_mod[0x10];
+
+ u8 reserved_2[0x8];
+ u8 xrc_srqn[0x18];
+
+ u8 reserved_3[0x20];
+};
+
+enum {
+ MLX5_QUERY_VPORT_STATE_OUT_STATE_DOWN = 0x0,
+ MLX5_QUERY_VPORT_STATE_OUT_STATE_UP = 0x1,
+};
+
+struct mlx5_ifc_query_vport_state_out_bits {
+ u8 status[0x8];
+ u8 reserved_0[0x18];
+
+ u8 syndrome[0x20];
+
+ u8 reserved_1[0x20];
+
+ u8 reserved_2[0x18];
+ u8 admin_state[0x4];
+ u8 state[0x4];
+};
+
+enum {
+ MLX5_QUERY_VPORT_STATE_IN_OP_MOD_VNIC_VPORT = 0x0,
+};
+
+struct mlx5_ifc_query_vport_state_in_bits {
+ u8 opcode[0x10];
+ u8 reserved_0[0x10];
+
+ u8 reserved_1[0x10];
+ u8 op_mod[0x10];
+
+ u8 other_vport[0x1];
+ u8 reserved_2[0xf];
+ u8 vport_number[0x10];
+
+ u8 reserved_3[0x20];
+};
+
+struct mlx5_ifc_query_vport_counter_out_bits {
+ u8 status[0x8];
+ u8 reserved_0[0x18];
+
+ u8 syndrome[0x20];
+
+ u8 reserved_1[0x40];
+
+ struct mlx5_ifc_traffic_counter_bits received_errors;
+
+ struct mlx5_ifc_traffic_counter_bits transmit_errors;
+
+ struct mlx5_ifc_traffic_counter_bits received_ib_unicast;
+
+ struct mlx5_ifc_traffic_counter_bits transmitted_ib_unicast;
+
+ struct mlx5_ifc_traffic_counter_bits received_ib_multicast;
+
+ struct mlx5_ifc_traffic_counter_bits transmitted_ib_multicast;
+
+ struct mlx5_ifc_traffic_counter_bits received_eth_broadcast;
+
+ struct mlx5_ifc_traffic_counter_bits transmitted_eth_broadcast;
+
+ struct mlx5_ifc_traffic_counter_bits received_eth_unicast;
+
+ struct mlx5_ifc_traffic_counter_bits transmitted_eth_unicast;
+
+ struct mlx5_ifc_traffic_counter_bits received_eth_multicast;
+
+ struct mlx5_ifc_traffic_counter_bits transmitted_eth_multicast;
+
+ u8 reserved_2[0xa00];
+};
+
+enum {
+ MLX5_QUERY_VPORT_COUNTER_IN_OP_MOD_VPORT_COUNTERS = 0x0,
+};
+
+struct mlx5_ifc_query_vport_counter_in_bits {
+ u8 opcode[0x10];
+ u8 reserved_0[0x10];
+
+ u8 reserved_1[0x10];
+ u8 op_mod[0x10];
+
+ u8 other_vport[0x1];
+ u8 reserved_2[0xf];
+ u8 vport_number[0x10];
+
+ u8 reserved_3[0x60];
+
+ u8 clear[0x1];
+ u8 reserved_4[0x1f];
+
+ u8 reserved_5[0x20];
+};
+
+struct mlx5_ifc_query_tis_out_bits {
+ u8 status[0x8];
+ u8 reserved_0[0x18];
+
+ u8 syndrome[0x20];
+
+ u8 reserved_1[0x40];
+
+ struct mlx5_ifc_tisc_bits tis_context;
+};
+
+struct mlx5_ifc_query_tis_in_bits {
+ u8 opcode[0x10];
+ u8 reserved_0[0x10];
+
+ u8 reserved_1[0x10];
+ u8 op_mod[0x10];
+
+ u8 reserved_2[0x8];
+ u8 tisn[0x18];
+
+ u8 reserved_3[0x20];
+};
+
+struct mlx5_ifc_query_tir_out_bits {
+ u8 status[0x8];
+ u8 reserved_0[0x18];
+
+ u8 syndrome[0x20];
+
+ u8 reserved_1[0xc0];
+
+ struct mlx5_ifc_tirc_bits tir_context;
+};
+
+struct mlx5_ifc_query_tir_in_bits {
+ u8 opcode[0x10];
+ u8 reserved_0[0x10];
+
+ u8 reserved_1[0x10];
+ u8 op_mod[0x10];
+
+ u8 reserved_2[0x8];
+ u8 tirn[0x18];
+
+ u8 reserved_3[0x20];
+};
+
+struct mlx5_ifc_query_srq_out_bits {
+ u8 status[0x8];
+ u8 reserved_0[0x18];
+
+ u8 syndrome[0x20];
+
+ u8 reserved_1[0x40];
+
+ struct mlx5_ifc_srqc_bits srq_context_entry;
+
+ u8 reserved_2[0x600];
+
+ u8 pas[0][0x40];
+};
+
+struct mlx5_ifc_query_srq_in_bits {
+ u8 opcode[0x10];
+ u8 reserved_0[0x10];
+
+ u8 reserved_1[0x10];
+ u8 op_mod[0x10];
+
+ u8 reserved_2[0x8];
+ u8 srqn[0x18];
+
+ u8 reserved_3[0x20];
+};
+
+struct mlx5_ifc_query_sq_out_bits {
+ u8 status[0x8];
+ u8 reserved_0[0x18];
+
+ u8 syndrome[0x20];
+
+ u8 reserved_1[0xc0];
+
+ struct mlx5_ifc_sqc_bits sq_context;
+};
+
+struct mlx5_ifc_query_sq_in_bits {
+ u8 opcode[0x10];
+ u8 reserved_0[0x10];
+
+ u8 reserved_1[0x10];
+ u8 op_mod[0x10];
+
+ u8 reserved_2[0x8];
+ u8 sqn[0x18];
+
+ u8 reserved_3[0x20];
+};
+
+struct mlx5_ifc_query_special_contexts_out_bits {
+ u8 status[0x8];
+ u8 reserved_0[0x18];
+
+ u8 syndrome[0x20];
+
+ u8 reserved_1[0x20];
+
+ u8 resd_lkey[0x20];
+};
+
+struct mlx5_ifc_query_special_contexts_in_bits {
+ u8 opcode[0x10];
+ u8 reserved_0[0x10];
+
+ u8 reserved_1[0x10];
+ u8 op_mod[0x10];
+
+ u8 reserved_2[0x40];
+};
+
+struct mlx5_ifc_query_rqt_out_bits {
+ u8 status[0x8];
+ u8 reserved_0[0x18];
+
+ u8 syndrome[0x20];
+
+ u8 reserved_1[0xc0];
+
+ struct mlx5_ifc_rqtc_bits rqt_context;
+};
+
+struct mlx5_ifc_query_rqt_in_bits {
+ u8 opcode[0x10];
+ u8 reserved_0[0x10];
+
+ u8 reserved_1[0x10];
+ u8 op_mod[0x10];
+
+ u8 reserved_2[0x8];
+ u8 rqtn[0x18];
+
+ u8 reserved_3[0x20];
+};
+
+struct mlx5_ifc_query_rq_out_bits {
+ u8 status[0x8];
+ u8 reserved_0[0x18];
+
+ u8 syndrome[0x20];
+
+ u8 reserved_1[0xc0];
+
+ struct mlx5_ifc_rqc_bits rq_context;
+};
+
+struct mlx5_ifc_query_rq_in_bits {
+ u8 opcode[0x10];
+ u8 reserved_0[0x10];
+
+ u8 reserved_1[0x10];
+ u8 op_mod[0x10];
+
+ u8 reserved_2[0x8];
+ u8 rqn[0x18];
+
+ u8 reserved_3[0x20];
+};
+
+struct mlx5_ifc_query_roce_address_out_bits {
+ u8 status[0x8];
+ u8 reserved_0[0x18];
+
+ u8 syndrome[0x20];
+
+ u8 reserved_1[0x40];
+
+ struct mlx5_ifc_roce_addr_layout_bits roce_address;
+};
+
+struct mlx5_ifc_query_roce_address_in_bits {
+ u8 opcode[0x10];
+ u8 reserved_0[0x10];
+
+ u8 reserved_1[0x10];
+ u8 op_mod[0x10];
+
+ u8 roce_address_index[0x10];
+ u8 reserved_2[0x10];
+
+ u8 reserved_3[0x20];
+};
+
+struct mlx5_ifc_query_rmp_out_bits {
+ u8 status[0x8];
+ u8 reserved_0[0x18];
+
+ u8 syndrome[0x20];
+
+ u8 reserved_1[0xc0];
+
+ struct mlx5_ifc_rmpc_bits rmp_context;
+};
+
+struct mlx5_ifc_query_rmp_in_bits {
+ u8 opcode[0x10];
+ u8 reserved_0[0x10];
+
+ u8 reserved_1[0x10];
+ u8 op_mod[0x10];
+
+ u8 reserved_2[0x8];
+ u8 rmpn[0x18];
+
+ u8 reserved_3[0x20];
+};
+
+struct mlx5_ifc_query_qp_out_bits {
+ u8 status[0x8];
+ u8 reserved_0[0x18];
+
+ u8 syndrome[0x20];
+
+ u8 reserved_1[0x40];
+
+ u8 opt_param_mask[0x20];
+
+ u8 reserved_2[0x20];
+
+ struct mlx5_ifc_qpc_bits qpc;
+
+ u8 reserved_3[0x80];
+
+ u8 pas[0][0x40];
+};
+
+struct mlx5_ifc_query_qp_in_bits {
+ u8 opcode[0x10];
+ u8 reserved_0[0x10];
+
+ u8 reserved_1[0x10];
+ u8 op_mod[0x10];
+
+ u8 reserved_2[0x8];
+ u8 qpn[0x18];
+
+ u8 reserved_3[0x20];
+};
+
+struct mlx5_ifc_query_q_counter_out_bits {
+ u8 status[0x8];
+ u8 reserved_0[0x18];
+
+ u8 syndrome[0x20];
+
+ u8 reserved_1[0x40];
+
+ u8 rx_write_requests[0x20];
+
+ u8 reserved_2[0x20];
+
+ u8 rx_read_requests[0x20];
+
+ u8 reserved_3[0x20];
+
+ u8 rx_atomic_requests[0x20];
+
+ u8 reserved_4[0x20];
+
+ u8 rx_dct_connect[0x20];
+
+ u8 reserved_5[0x20];
+
+ u8 out_of_buffer[0x20];
+
+ u8 reserved_6[0x20];
+
+ u8 out_of_sequence[0x20];
+
+ u8 reserved_7[0x620];
+};
+
+struct mlx5_ifc_query_q_counter_in_bits {
+ u8 opcode[0x10];
+ u8 reserved_0[0x10];
+
+ u8 reserved_1[0x10];
+ u8 op_mod[0x10];
+
+ u8 reserved_2[0x80];
+
+ u8 clear[0x1];
+ u8 reserved_3[0x1f];
+
+ u8 reserved_4[0x18];
+ u8 counter_set_id[0x8];
+};
+
+struct mlx5_ifc_query_pages_out_bits {
+ u8 status[0x8];
+ u8 reserved_0[0x18];
+
+ u8 syndrome[0x20];
+
+ u8 reserved_1[0x10];
+ u8 function_id[0x10];
+
+ u8 num_pages[0x20];
+};
+
+enum {
+ MLX5_QUERY_PAGES_IN_OP_MOD_BOOT_PAGES = 0x1,
+ MLX5_QUERY_PAGES_IN_OP_MOD_INIT_PAGES = 0x2,
+ MLX5_QUERY_PAGES_IN_OP_MOD_REGULAR_PAGES = 0x3,
+};
+
+struct mlx5_ifc_query_pages_in_bits {
+ u8 opcode[0x10];
+ u8 reserved_0[0x10];
+
+ u8 reserved_1[0x10];
+ u8 op_mod[0x10];
+
+ u8 reserved_2[0x10];
+ u8 function_id[0x10];
+
+ u8 reserved_3[0x20];
+};
+
+struct mlx5_ifc_query_nic_vport_context_out_bits {
+ u8 status[0x8];
+ u8 reserved_0[0x18];
+
+ u8 syndrome[0x20];
+
+ u8 reserved_1[0x40];
+
+ struct mlx5_ifc_nic_vport_context_bits nic_vport_context;
+};
+
+struct mlx5_ifc_query_nic_vport_context_in_bits {
+ u8 opcode[0x10];
+ u8 reserved_0[0x10];
+
+ u8 reserved_1[0x10];
+ u8 op_mod[0x10];
+
+ u8 other_vport[0x1];
+ u8 reserved_2[0xf];
+ u8 vport_number[0x10];
+
+ u8 reserved_3[0x5];
+ u8 allowed_list_type[0x3];
+ u8 reserved_4[0x18];
+};
+
+struct mlx5_ifc_query_mkey_out_bits {
+ u8 status[0x8];
+ u8 reserved_0[0x18];
+
+ u8 syndrome[0x20];
+
+ u8 reserved_1[0x40];
+
+ struct mlx5_ifc_mkc_bits memory_key_mkey_entry;
+
+ u8 reserved_2[0x600];
+
+ u8 bsf0_klm0_pas_mtt0_1[16][0x8];
+
+ u8 bsf1_klm1_pas_mtt2_3[16][0x8];
+};
+
+struct mlx5_ifc_query_mkey_in_bits {
+ u8 opcode[0x10];
+ u8 reserved_0[0x10];
+
+ u8 reserved_1[0x10];
+ u8 op_mod[0x10];
+
+ u8 reserved_2[0x8];
+ u8 mkey_index[0x18];
+
+ u8 pg_access[0x1];
+ u8 reserved_3[0x1f];
+};
+
+struct mlx5_ifc_query_mad_demux_out_bits {
+ u8 status[0x8];
+ u8 reserved_0[0x18];
+
+ u8 syndrome[0x20];
+
+ u8 reserved_1[0x40];
+
+ u8 mad_dumux_parameters_block[0x20];
+};
+
+struct mlx5_ifc_query_mad_demux_in_bits {
u8 opcode[0x10];
u8 reserved_0[0x10];
@@ -326,6 +3420,146 @@ struct mlx5_ifc_query_hca_cap_in_bits {
u8 reserved_2[0x40];
};
+struct mlx5_ifc_query_l2_table_entry_out_bits {
+ u8 status[0x8];
+ u8 reserved_0[0x18];
+
+ u8 syndrome[0x20];
+
+ u8 reserved_1[0xa0];
+
+ u8 reserved_2[0x13];
+ u8 vlan_valid[0x1];
+ u8 vlan[0xc];
+
+ struct mlx5_ifc_mac_address_layout_bits mac_address;
+
+ u8 reserved_3[0xc0];
+};
+
+struct mlx5_ifc_query_l2_table_entry_in_bits {
+ u8 opcode[0x10];
+ u8 reserved_0[0x10];
+
+ u8 reserved_1[0x10];
+ u8 op_mod[0x10];
+
+ u8 reserved_2[0x60];
+
+ u8 reserved_3[0x8];
+ u8 table_index[0x18];
+
+ u8 reserved_4[0x140];
+};
+
+struct mlx5_ifc_query_issi_out_bits {
+ u8 status[0x8];
+ u8 reserved_0[0x18];
+
+ u8 syndrome[0x20];
+
+ u8 reserved_1[0x10];
+ u8 current_issi[0x10];
+
+ u8 reserved_2[0xa0];
+
+ u8 supported_issi_reserved[76][0x8];
+ u8 supported_issi_dw0[0x20];
+};
+
+struct mlx5_ifc_query_issi_in_bits {
+ u8 opcode[0x10];
+ u8 reserved_0[0x10];
+
+ u8 reserved_1[0x10];
+ u8 op_mod[0x10];
+
+ u8 reserved_2[0x40];
+};
+
+struct mlx5_ifc_query_hca_vport_pkey_out_bits {
+ u8 status[0x8];
+ u8 reserved_0[0x18];
+
+ u8 syndrome[0x20];
+
+ u8 reserved_1[0x40];
+
+ struct mlx5_ifc_pkey_bits pkey[0];
+};
+
+struct mlx5_ifc_query_hca_vport_pkey_in_bits {
+ u8 opcode[0x10];
+ u8 reserved_0[0x10];
+
+ u8 reserved_1[0x10];
+ u8 op_mod[0x10];
+
+ u8 other_vport[0x1];
+ u8 reserved_2[0xb];
+ u8 port_num[0x4];
+ u8 vport_number[0x10];
+
+ u8 reserved_3[0x10];
+ u8 pkey_index[0x10];
+};
+
+struct mlx5_ifc_query_hca_vport_gid_out_bits {
+ u8 status[0x8];
+ u8 reserved_0[0x18];
+
+ u8 syndrome[0x20];
+
+ u8 reserved_1[0x20];
+
+ u8 gids_num[0x10];
+ u8 reserved_2[0x10];
+
+ struct mlx5_ifc_array128_auto_bits gid[0];
+};
+
+struct mlx5_ifc_query_hca_vport_gid_in_bits {
+ u8 opcode[0x10];
+ u8 reserved_0[0x10];
+
+ u8 reserved_1[0x10];
+ u8 op_mod[0x10];
+
+ u8 other_vport[0x1];
+ u8 reserved_2[0xb];
+ u8 port_num[0x4];
+ u8 vport_number[0x10];
+
+ u8 reserved_3[0x10];
+ u8 gid_index[0x10];
+};
+
+struct mlx5_ifc_query_hca_vport_context_out_bits {
+ u8 status[0x8];
+ u8 reserved_0[0x18];
+
+ u8 syndrome[0x20];
+
+ u8 reserved_1[0x40];
+
+ struct mlx5_ifc_hca_vport_context_bits hca_vport_context;
+};
+
+struct mlx5_ifc_query_hca_vport_context_in_bits {
+ u8 opcode[0x10];
+ u8 reserved_0[0x10];
+
+ u8 reserved_1[0x10];
+ u8 op_mod[0x10];
+
+ u8 other_vport[0x1];
+ u8 reserved_2[0xb];
+ u8 port_num[0x4];
+ u8 vport_number[0x10];
+
+ u8 reserved_3[0x20];
+};
+
struct mlx5_ifc_query_hca_cap_out_bits {
u8 status[0x8];
u8 reserved_0[0x18];
@@ -334,16 +3568,3216 @@ struct mlx5_ifc_query_hca_cap_out_bits {
u8 reserved_1[0x40];
- u8 capability_struct[256][0x8];
+ union mlx5_ifc_hca_cap_union_bits capability;
};
-struct mlx5_ifc_set_hca_cap_out_bits {
+struct mlx5_ifc_query_hca_cap_in_bits {
+ u8 opcode[0x10];
+ u8 reserved_0[0x10];
+
+ u8 reserved_1[0x10];
+ u8 op_mod[0x10];
+
+ u8 reserved_2[0x40];
+};
+
+struct mlx5_ifc_query_flow_table_out_bits {
+ u8 status[0x8];
+ u8 reserved_0[0x18];
+
+ u8 syndrome[0x20];
+
+ u8 reserved_1[0x80];
+
+ u8 reserved_2[0x8];
+ u8 level[0x8];
+ u8 reserved_3[0x8];
+ u8 log_size[0x8];
+
+ u8 reserved_4[0x120];
+};
+
+struct mlx5_ifc_query_flow_table_in_bits {
+ u8 opcode[0x10];
+ u8 reserved_0[0x10];
+
+ u8 reserved_1[0x10];
+ u8 op_mod[0x10];
+
+ u8 reserved_2[0x40];
+
+ u8 table_type[0x8];
+ u8 reserved_3[0x18];
+
+ u8 reserved_4[0x8];
+ u8 table_id[0x18];
+
+ u8 reserved_5[0x140];
+};
+
+struct mlx5_ifc_query_fte_out_bits {
+ u8 status[0x8];
+ u8 reserved_0[0x18];
+
+ u8 syndrome[0x20];
+
+ u8 reserved_1[0x1c0];
+
+ struct mlx5_ifc_flow_context_bits flow_context;
+};
+
+struct mlx5_ifc_query_fte_in_bits {
+ u8 opcode[0x10];
+ u8 reserved_0[0x10];
+
+ u8 reserved_1[0x10];
+ u8 op_mod[0x10];
+
+ u8 reserved_2[0x40];
+
+ u8 table_type[0x8];
+ u8 reserved_3[0x18];
+
+ u8 reserved_4[0x8];
+ u8 table_id[0x18];
+
+ u8 reserved_5[0x40];
+
+ u8 flow_index[0x20];
+
+ u8 reserved_6[0xe0];
+};
+
+enum {
+ MLX5_QUERY_FLOW_GROUP_OUT_MATCH_CRITERIA_ENABLE_OUTER_HEADERS = 0x0,
+ MLX5_QUERY_FLOW_GROUP_OUT_MATCH_CRITERIA_ENABLE_MISC_PARAMETERS = 0x1,
+ MLX5_QUERY_FLOW_GROUP_OUT_MATCH_CRITERIA_ENABLE_INNER_HEADERS = 0x2,
+};
+
+struct mlx5_ifc_query_flow_group_out_bits {
+ u8 status[0x8];
+ u8 reserved_0[0x18];
+
+ u8 syndrome[0x20];
+
+ u8 reserved_1[0xa0];
+
+ u8 start_flow_index[0x20];
+
+ u8 reserved_2[0x20];
+
+ u8 end_flow_index[0x20];
+
+ u8 reserved_3[0xa0];
+
+ u8 reserved_4[0x18];
+ u8 match_criteria_enable[0x8];
+
+ struct mlx5_ifc_fte_match_param_bits match_criteria;
+
+ u8 reserved_5[0xe00];
+};
+
+struct mlx5_ifc_query_flow_group_in_bits {
+ u8 opcode[0x10];
+ u8 reserved_0[0x10];
+
+ u8 reserved_1[0x10];
+ u8 op_mod[0x10];
+
+ u8 reserved_2[0x40];
+
+ u8 table_type[0x8];
+ u8 reserved_3[0x18];
+
+ u8 reserved_4[0x8];
+ u8 table_id[0x18];
+
+ u8 group_id[0x20];
+
+ u8 reserved_5[0x120];
+};
+
+struct mlx5_ifc_query_eq_out_bits {
+ u8 status[0x8];
+ u8 reserved_0[0x18];
+
+ u8 syndrome[0x20];
+
+ u8 reserved_1[0x40];
+
+ struct mlx5_ifc_eqc_bits eq_context_entry;
+
+ u8 reserved_2[0x40];
+
+ u8 event_bitmask[0x40];
+
+ u8 reserved_3[0x580];
+
+ u8 pas[0][0x40];
+};
+
+struct mlx5_ifc_query_eq_in_bits {
+ u8 opcode[0x10];
+ u8 reserved_0[0x10];
+
+ u8 reserved_1[0x10];
+ u8 op_mod[0x10];
+
+ u8 reserved_2[0x18];
+ u8 eq_number[0x8];
+
+ u8 reserved_3[0x20];
+};
+
+struct mlx5_ifc_query_dct_out_bits {
+ u8 status[0x8];
+ u8 reserved_0[0x18];
+
+ u8 syndrome[0x20];
+
+ u8 reserved_1[0x40];
+
+ struct mlx5_ifc_dctc_bits dct_context_entry;
+
+ u8 reserved_2[0x180];
+};
+
+struct mlx5_ifc_query_dct_in_bits {
+ u8 opcode[0x10];
+ u8 reserved_0[0x10];
+
+ u8 reserved_1[0x10];
+ u8 op_mod[0x10];
+
+ u8 reserved_2[0x8];
+ u8 dctn[0x18];
+
+ u8 reserved_3[0x20];
+};
+
+struct mlx5_ifc_query_cq_out_bits {
u8 status[0x8];
u8 reserved_0[0x18];
u8 syndrome[0x20];
u8 reserved_1[0x40];
+
+ struct mlx5_ifc_cqc_bits cq_context;
+
+ u8 reserved_2[0x600];
+
+ u8 pas[0][0x40];
+};
+
+struct mlx5_ifc_query_cq_in_bits {
+ u8 opcode[0x10];
+ u8 reserved_0[0x10];
+
+ u8 reserved_1[0x10];
+ u8 op_mod[0x10];
+
+ u8 reserved_2[0x8];
+ u8 cqn[0x18];
+
+ u8 reserved_3[0x20];
+};
+
+struct mlx5_ifc_query_cong_status_out_bits {
+ u8 status[0x8];
+ u8 reserved_0[0x18];
+
+ u8 syndrome[0x20];
+
+ u8 reserved_1[0x20];
+
+ u8 enable[0x1];
+ u8 tag_enable[0x1];
+ u8 reserved_2[0x1e];
+};
+
+struct mlx5_ifc_query_cong_status_in_bits {
+ u8 opcode[0x10];
+ u8 reserved_0[0x10];
+
+ u8 reserved_1[0x10];
+ u8 op_mod[0x10];
+
+ u8 reserved_2[0x18];
+ u8 priority[0x4];
+ u8 cong_protocol[0x4];
+
+ u8 reserved_3[0x20];
+};
+
+struct mlx5_ifc_query_cong_statistics_out_bits {
+ u8 status[0x8];
+ u8 reserved_0[0x18];
+
+ u8 syndrome[0x20];
+
+ u8 reserved_1[0x40];
+
+ u8 cur_flows[0x20];
+
+ u8 sum_flows[0x20];
+
+ u8 cnp_ignored_high[0x20];
+
+ u8 cnp_ignored_low[0x20];
+
+ u8 cnp_handled_high[0x20];
+
+ u8 cnp_handled_low[0x20];
+
+ u8 reserved_2[0x100];
+
+ u8 time_stamp_high[0x20];
+
+ u8 time_stamp_low[0x20];
+
+ u8 accumulators_period[0x20];
+
+ u8 ecn_marked_roce_packets_high[0x20];
+
+ u8 ecn_marked_roce_packets_low[0x20];
+
+ u8 cnps_sent_high[0x20];
+
+ u8 cnps_sent_low[0x20];
+
+ u8 reserved_3[0x560];
+};
+
+struct mlx5_ifc_query_cong_statistics_in_bits {
+ u8 opcode[0x10];
+ u8 reserved_0[0x10];
+
+ u8 reserved_1[0x10];
+ u8 op_mod[0x10];
+
+ u8 clear[0x1];
+ u8 reserved_2[0x1f];
+
+ u8 reserved_3[0x20];
+};
+
+struct mlx5_ifc_query_cong_params_out_bits {
+ u8 status[0x8];
+ u8 reserved_0[0x18];
+
+ u8 syndrome[0x20];
+
+ u8 reserved_1[0x40];
+
+ union mlx5_ifc_cong_control_roce_ecn_auto_bits congestion_parameters;
+};
+
+struct mlx5_ifc_query_cong_params_in_bits {
+ u8 opcode[0x10];
+ u8 reserved_0[0x10];
+
+ u8 reserved_1[0x10];
+ u8 op_mod[0x10];
+
+ u8 reserved_2[0x1c];
+ u8 cong_protocol[0x4];
+
+ u8 reserved_3[0x20];
+};
+
+struct mlx5_ifc_query_adapter_out_bits {
+ u8 status[0x8];
+ u8 reserved_0[0x18];
+
+ u8 syndrome[0x20];
+
+ u8 reserved_1[0x40];
+
+ struct mlx5_ifc_query_adapter_param_block_bits query_adapter_struct;
+};
+
+struct mlx5_ifc_query_adapter_in_bits {
+ u8 opcode[0x10];
+ u8 reserved_0[0x10];
+
+ u8 reserved_1[0x10];
+ u8 op_mod[0x10];
+
+ u8 reserved_2[0x40];
+};
+
+struct mlx5_ifc_qp_2rst_out_bits {
+ u8 status[0x8];
+ u8 reserved_0[0x18];
+
+ u8 syndrome[0x20];
+
+ u8 reserved_1[0x40];
+};
+
+struct mlx5_ifc_qp_2rst_in_bits {
+ u8 opcode[0x10];
+ u8 reserved_0[0x10];
+
+ u8 reserved_1[0x10];
+ u8 op_mod[0x10];
+
+ u8 reserved_2[0x8];
+ u8 qpn[0x18];
+
+ u8 reserved_3[0x20];
+};
+
+struct mlx5_ifc_qp_2err_out_bits {
+ u8 status[0x8];
+ u8 reserved_0[0x18];
+
+ u8 syndrome[0x20];
+
+ u8 reserved_1[0x40];
+};
+
+struct mlx5_ifc_qp_2err_in_bits {
+ u8 opcode[0x10];
+ u8 reserved_0[0x10];
+
+ u8 reserved_1[0x10];
+ u8 op_mod[0x10];
+
+ u8 reserved_2[0x8];
+ u8 qpn[0x18];
+
+ u8 reserved_3[0x20];
+};
+
+struct mlx5_ifc_page_fault_resume_out_bits {
+ u8 status[0x8];
+ u8 reserved_0[0x18];
+
+ u8 syndrome[0x20];
+
+ u8 reserved_1[0x40];
+};
+
+struct mlx5_ifc_page_fault_resume_in_bits {
+ u8 opcode[0x10];
+ u8 reserved_0[0x10];
+
+ u8 reserved_1[0x10];
+ u8 op_mod[0x10];
+
+ u8 error[0x1];
+ u8 reserved_2[0x4];
+ u8 rdma[0x1];
+ u8 read_write[0x1];
+ u8 req_res[0x1];
+ u8 qpn[0x18];
+
+ u8 reserved_3[0x20];
+};
+
+struct mlx5_ifc_nop_out_bits {
+ u8 status[0x8];
+ u8 reserved_0[0x18];
+
+ u8 syndrome[0x20];
+
+ u8 reserved_1[0x40];
+};
+
+struct mlx5_ifc_nop_in_bits {
+ u8 opcode[0x10];
+ u8 reserved_0[0x10];
+
+ u8 reserved_1[0x10];
+ u8 op_mod[0x10];
+
+ u8 reserved_2[0x40];
+};
+
+struct mlx5_ifc_modify_vport_state_out_bits {
+ u8 status[0x8];
+ u8 reserved_0[0x18];
+
+ u8 syndrome[0x20];
+
+ u8 reserved_1[0x40];
+};
+
+struct mlx5_ifc_modify_vport_state_in_bits {
+ u8 opcode[0x10];
+ u8 reserved_0[0x10];
+
+ u8 reserved_1[0x10];
+ u8 op_mod[0x10];
+
+ u8 other_vport[0x1];
+ u8 reserved_2[0xf];
+ u8 vport_number[0x10];
+
+ u8 reserved_3[0x18];
+ u8 admin_state[0x4];
+ u8 reserved_4[0x4];
+};
+
+struct mlx5_ifc_modify_tis_out_bits {
+ u8 status[0x8];
+ u8 reserved_0[0x18];
+
+ u8 syndrome[0x20];
+
+ u8 reserved_1[0x40];
+};
+
+struct mlx5_ifc_modify_tis_in_bits {
+ u8 opcode[0x10];
+ u8 reserved_0[0x10];
+
+ u8 reserved_1[0x10];
+ u8 op_mod[0x10];
+
+ u8 reserved_2[0x8];
+ u8 tisn[0x18];
+
+ u8 reserved_3[0x20];
+
+ u8 modify_bitmask[0x40];
+
+ u8 reserved_4[0x40];
+
+ struct mlx5_ifc_tisc_bits ctx;
+};
+
+struct mlx5_ifc_modify_tir_out_bits {
+ u8 status[0x8];
+ u8 reserved_0[0x18];
+
+ u8 syndrome[0x20];
+
+ u8 reserved_1[0x40];
+};
+
+struct mlx5_ifc_modify_tir_in_bits {
+ u8 opcode[0x10];
+ u8 reserved_0[0x10];
+
+ u8 reserved_1[0x10];
+ u8 op_mod[0x10];
+
+ u8 reserved_2[0x8];
+ u8 tirn[0x18];
+
+ u8 reserved_3[0x20];
+
+ u8 modify_bitmask[0x40];
+
+ u8 reserved_4[0x40];
+
+ struct mlx5_ifc_tirc_bits ctx;
+};
+
+struct mlx5_ifc_modify_sq_out_bits {
+ u8 status[0x8];
+ u8 reserved_0[0x18];
+
+ u8 syndrome[0x20];
+
+ u8 reserved_1[0x40];
+};
+
+struct mlx5_ifc_modify_sq_in_bits {
+ u8 opcode[0x10];
+ u8 reserved_0[0x10];
+
+ u8 reserved_1[0x10];
+ u8 op_mod[0x10];
+
+ u8 sq_state[0x4];
+ u8 reserved_2[0x4];
+ u8 sqn[0x18];
+
+ u8 reserved_3[0x20];
+
+ u8 modify_bitmask[0x40];
+
+ u8 reserved_4[0x40];
+
+ struct mlx5_ifc_sqc_bits ctx;
+};
+
+struct mlx5_ifc_modify_rqt_out_bits {
+ u8 status[0x8];
+ u8 reserved_0[0x18];
+
+ u8 syndrome[0x20];
+
+ u8 reserved_1[0x40];
+};
+
+struct mlx5_ifc_modify_rqt_in_bits {
+ u8 opcode[0x10];
+ u8 reserved_0[0x10];
+
+ u8 reserved_1[0x10];
+ u8 op_mod[0x10];
+
+ u8 reserved_2[0x8];
+ u8 rqtn[0x18];
+
+ u8 reserved_3[0x20];
+
+ u8 modify_bitmask[0x40];
+
+ u8 reserved_4[0x40];
+
+ struct mlx5_ifc_rqtc_bits ctx;
+};
+
+struct mlx5_ifc_modify_rq_out_bits {
+ u8 status[0x8];
+ u8 reserved_0[0x18];
+
+ u8 syndrome[0x20];
+
+ u8 reserved_1[0x40];
+};
+
+struct mlx5_ifc_modify_rq_in_bits {
+ u8 opcode[0x10];
+ u8 reserved_0[0x10];
+
+ u8 reserved_1[0x10];
+ u8 op_mod[0x10];
+
+ u8 rq_state[0x4];
+ u8 reserved_2[0x4];
+ u8 rqn[0x18];
+
+ u8 reserved_3[0x20];
+
+ u8 modify_bitmask[0x40];
+
+ u8 reserved_4[0x40];
+
+ struct mlx5_ifc_rqc_bits ctx;
+};
+
+struct mlx5_ifc_modify_rmp_out_bits {
+ u8 status[0x8];
+ u8 reserved_0[0x18];
+
+ u8 syndrome[0x20];
+
+ u8 reserved_1[0x40];
+};
+
+struct mlx5_ifc_rmp_bitmask_bits {
+ u8 reserved[0x20];
+
+ u8 reserved1[0x1f];
+ u8 lwm[0x1];
+};
+
+struct mlx5_ifc_modify_rmp_in_bits {
+ u8 opcode[0x10];
+ u8 reserved_0[0x10];
+
+ u8 reserved_1[0x10];
+ u8 op_mod[0x10];
+
+ u8 rmp_state[0x4];
+ u8 reserved_2[0x4];
+ u8 rmpn[0x18];
+
+ u8 reserved_3[0x20];
+
+ struct mlx5_ifc_rmp_bitmask_bits bitmask;
+
+ u8 reserved_4[0x40];
+
+ struct mlx5_ifc_rmpc_bits ctx;
+};
+
+struct mlx5_ifc_modify_nic_vport_context_out_bits {
+ u8 status[0x8];
+ u8 reserved_0[0x18];
+
+ u8 syndrome[0x20];
+
+ u8 reserved_1[0x40];
+};
+
+struct mlx5_ifc_modify_nic_vport_field_select_bits {
+ u8 reserved_0[0x1c];
+ u8 permanent_address[0x1];
+ u8 addresses_list[0x1];
+ u8 roce_en[0x1];
+ u8 reserved_1[0x1];
+};
+
+struct mlx5_ifc_modify_nic_vport_context_in_bits {
+ u8 opcode[0x10];
+ u8 reserved_0[0x10];
+
+ u8 reserved_1[0x10];
+ u8 op_mod[0x10];
+
+ u8 other_vport[0x1];
+ u8 reserved_2[0xf];
+ u8 vport_number[0x10];
+
+ struct mlx5_ifc_modify_nic_vport_field_select_bits field_select;
+
+ u8 reserved_3[0x780];
+
+ struct mlx5_ifc_nic_vport_context_bits nic_vport_context;
+};
+
+struct mlx5_ifc_modify_hca_vport_context_out_bits {
+ u8 status[0x8];
+ u8 reserved_0[0x18];
+
+ u8 syndrome[0x20];
+
+ u8 reserved_1[0x40];
+};
+
+struct mlx5_ifc_modify_hca_vport_context_in_bits {
+ u8 opcode[0x10];
+ u8 reserved_0[0x10];
+
+ u8 reserved_1[0x10];
+ u8 op_mod[0x10];
+
+ u8 other_vport[0x1];
+ u8 reserved_2[0xb];
+ u8 port_num[0x4];
+ u8 vport_number[0x10];
+
+ u8 reserved_3[0x20];
+
+ struct mlx5_ifc_hca_vport_context_bits hca_vport_context;
+};
+
+struct mlx5_ifc_modify_cq_out_bits {
+ u8 status[0x8];
+ u8 reserved_0[0x18];
+
+ u8 syndrome[0x20];
+
+ u8 reserved_1[0x40];
+};
+
+enum {
+ MLX5_MODIFY_CQ_IN_OP_MOD_MODIFY_CQ = 0x0,
+ MLX5_MODIFY_CQ_IN_OP_MOD_RESIZE_CQ = 0x1,
+};
+
+struct mlx5_ifc_modify_cq_in_bits {
+ u8 opcode[0x10];
+ u8 reserved_0[0x10];
+
+ u8 reserved_1[0x10];
+ u8 op_mod[0x10];
+
+ u8 reserved_2[0x8];
+ u8 cqn[0x18];
+
+ union mlx5_ifc_modify_field_select_resize_field_select_auto_bits modify_field_select_resize_field_select;
+
+ struct mlx5_ifc_cqc_bits cq_context;
+
+ u8 reserved_3[0x600];
+
+ u8 pas[0][0x40];
+};
+
+struct mlx5_ifc_modify_cong_status_out_bits {
+ u8 status[0x8];
+ u8 reserved_0[0x18];
+
+ u8 syndrome[0x20];
+
+ u8 reserved_1[0x40];
+};
+
+struct mlx5_ifc_modify_cong_status_in_bits {
+ u8 opcode[0x10];
+ u8 reserved_0[0x10];
+
+ u8 reserved_1[0x10];
+ u8 op_mod[0x10];
+
+ u8 reserved_2[0x18];
+ u8 priority[0x4];
+ u8 cong_protocol[0x4];
+
+ u8 enable[0x1];
+ u8 tag_enable[0x1];
+ u8 reserved_3[0x1e];
+};
+
+struct mlx5_ifc_modify_cong_params_out_bits {
+ u8 status[0x8];
+ u8 reserved_0[0x18];
+
+ u8 syndrome[0x20];
+
+ u8 reserved_1[0x40];
+};
+
+struct mlx5_ifc_modify_cong_params_in_bits {
+ u8 opcode[0x10];
+ u8 reserved_0[0x10];
+
+ u8 reserved_1[0x10];
+ u8 op_mod[0x10];
+
+ u8 reserved_2[0x1c];
+ u8 cong_protocol[0x4];
+
+ union mlx5_ifc_field_select_802_1_r_roce_auto_bits field_select;
+
+ u8 reserved_3[0x80];
+
+ union mlx5_ifc_cong_control_roce_ecn_auto_bits congestion_parameters;
+};
+
+struct mlx5_ifc_manage_pages_out_bits {
+ u8 status[0x8];
+ u8 reserved_0[0x18];
+
+ u8 syndrome[0x20];
+
+ u8 output_num_entries[0x20];
+
+ u8 reserved_1[0x20];
+
+ u8 pas[0][0x40];
+};
+
+enum {
+ MLX5_MANAGE_PAGES_IN_OP_MOD_ALLOCATION_FAIL = 0x0,
+ MLX5_MANAGE_PAGES_IN_OP_MOD_ALLOCATION_SUCCESS = 0x1,
+ MLX5_MANAGE_PAGES_IN_OP_MOD_HCA_RETURN_PAGES = 0x2,
+};
+
+struct mlx5_ifc_manage_pages_in_bits {
+ u8 opcode[0x10];
+ u8 reserved_0[0x10];
+
+ u8 reserved_1[0x10];
+ u8 op_mod[0x10];
+
+ u8 reserved_2[0x10];
+ u8 function_id[0x10];
+
+ u8 input_num_entries[0x20];
+
+ u8 pas[0][0x40];
+};
+
+struct mlx5_ifc_mad_ifc_out_bits {
+ u8 status[0x8];
+ u8 reserved_0[0x18];
+
+ u8 syndrome[0x20];
+
+ u8 reserved_1[0x40];
+
+ u8 response_mad_packet[256][0x8];
+};
+
+struct mlx5_ifc_mad_ifc_in_bits {
+ u8 opcode[0x10];
+ u8 reserved_0[0x10];
+
+ u8 reserved_1[0x10];
+ u8 op_mod[0x10];
+
+ u8 remote_lid[0x10];
+ u8 reserved_2[0x8];
+ u8 port[0x8];
+
+ u8 reserved_3[0x20];
+
+ u8 mad[256][0x8];
+};
+
+struct mlx5_ifc_init_hca_out_bits {
+ u8 status[0x8];
+ u8 reserved_0[0x18];
+
+ u8 syndrome[0x20];
+
+ u8 reserved_1[0x40];
+};
+
+struct mlx5_ifc_init_hca_in_bits {
+ u8 opcode[0x10];
+ u8 reserved_0[0x10];
+
+ u8 reserved_1[0x10];
+ u8 op_mod[0x10];
+
+ u8 reserved_2[0x40];
+};
+
+struct mlx5_ifc_init2rtr_qp_out_bits {
+ u8 status[0x8];
+ u8 reserved_0[0x18];
+
+ u8 syndrome[0x20];
+
+ u8 reserved_1[0x40];
+};
+
+struct mlx5_ifc_init2rtr_qp_in_bits {
+ u8 opcode[0x10];
+ u8 reserved_0[0x10];
+
+ u8 reserved_1[0x10];
+ u8 op_mod[0x10];
+
+ u8 reserved_2[0x8];
+ u8 qpn[0x18];
+
+ u8 reserved_3[0x20];
+
+ u8 opt_param_mask[0x20];
+
+ u8 reserved_4[0x20];
+
+ struct mlx5_ifc_qpc_bits qpc;
+
+ u8 reserved_5[0x80];
+};
+
+struct mlx5_ifc_init2init_qp_out_bits {
+ u8 status[0x8];
+ u8 reserved_0[0x18];
+
+ u8 syndrome[0x20];
+
+ u8 reserved_1[0x40];
+};
+
+struct mlx5_ifc_init2init_qp_in_bits {
+ u8 opcode[0x10];
+ u8 reserved_0[0x10];
+
+ u8 reserved_1[0x10];
+ u8 op_mod[0x10];
+
+ u8 reserved_2[0x8];
+ u8 qpn[0x18];
+
+ u8 reserved_3[0x20];
+
+ u8 opt_param_mask[0x20];
+
+ u8 reserved_4[0x20];
+
+ struct mlx5_ifc_qpc_bits qpc;
+
+ u8 reserved_5[0x80];
+};
+
+struct mlx5_ifc_get_dropped_packet_log_out_bits {
+ u8 status[0x8];
+ u8 reserved_0[0x18];
+
+ u8 syndrome[0x20];
+
+ u8 reserved_1[0x40];
+
+ u8 packet_headers_log[128][0x8];
+
+ u8 packet_syndrome[64][0x8];
+};
+
+struct mlx5_ifc_get_dropped_packet_log_in_bits {
+ u8 opcode[0x10];
+ u8 reserved_0[0x10];
+
+ u8 reserved_1[0x10];
+ u8 op_mod[0x10];
+
+ u8 reserved_2[0x40];
+};
+
+struct mlx5_ifc_gen_eqe_in_bits {
+ u8 opcode[0x10];
+ u8 reserved_0[0x10];
+
+ u8 reserved_1[0x10];
+ u8 op_mod[0x10];
+
+ u8 reserved_2[0x18];
+ u8 eq_number[0x8];
+
+ u8 reserved_3[0x20];
+
+ u8 eqe[64][0x8];
+};
+
+struct mlx5_ifc_gen_eq_out_bits {
+ u8 status[0x8];
+ u8 reserved_0[0x18];
+
+ u8 syndrome[0x20];
+
+ u8 reserved_1[0x40];
+};
+
+struct mlx5_ifc_enable_hca_out_bits {
+ u8 status[0x8];
+ u8 reserved_0[0x18];
+
+ u8 syndrome[0x20];
+
+ u8 reserved_1[0x20];
+};
+
+struct mlx5_ifc_enable_hca_in_bits {
+ u8 opcode[0x10];
+ u8 reserved_0[0x10];
+
+ u8 reserved_1[0x10];
+ u8 op_mod[0x10];
+
+ u8 reserved_2[0x10];
+ u8 function_id[0x10];
+
+ u8 reserved_3[0x20];
+};
+
+struct mlx5_ifc_drain_dct_out_bits {
+ u8 status[0x8];
+ u8 reserved_0[0x18];
+
+ u8 syndrome[0x20];
+
+ u8 reserved_1[0x40];
+};
+
+struct mlx5_ifc_drain_dct_in_bits {
+ u8 opcode[0x10];
+ u8 reserved_0[0x10];
+
+ u8 reserved_1[0x10];
+ u8 op_mod[0x10];
+
+ u8 reserved_2[0x8];
+ u8 dctn[0x18];
+
+ u8 reserved_3[0x20];
+};
+
+struct mlx5_ifc_disable_hca_out_bits {
+ u8 status[0x8];
+ u8 reserved_0[0x18];
+
+ u8 syndrome[0x20];
+
+ u8 reserved_1[0x20];
+};
+
+struct mlx5_ifc_disable_hca_in_bits {
+ u8 opcode[0x10];
+ u8 reserved_0[0x10];
+
+ u8 reserved_1[0x10];
+ u8 op_mod[0x10];
+
+ u8 reserved_2[0x10];
+ u8 function_id[0x10];
+
+ u8 reserved_3[0x20];
+};
+
+struct mlx5_ifc_detach_from_mcg_out_bits {
+ u8 status[0x8];
+ u8 reserved_0[0x18];
+
+ u8 syndrome[0x20];
+
+ u8 reserved_1[0x40];
+};
+
+struct mlx5_ifc_detach_from_mcg_in_bits {
+ u8 opcode[0x10];
+ u8 reserved_0[0x10];
+
+ u8 reserved_1[0x10];
+ u8 op_mod[0x10];
+
+ u8 reserved_2[0x8];
+ u8 qpn[0x18];
+
+ u8 reserved_3[0x20];
+
+ u8 multicast_gid[16][0x8];
+};
+
+struct mlx5_ifc_destroy_xrc_srq_out_bits {
+ u8 status[0x8];
+ u8 reserved_0[0x18];
+
+ u8 syndrome[0x20];
+
+ u8 reserved_1[0x40];
+};
+
+struct mlx5_ifc_destroy_xrc_srq_in_bits {
+ u8 opcode[0x10];
+ u8 reserved_0[0x10];
+
+ u8 reserved_1[0x10];
+ u8 op_mod[0x10];
+
+ u8 reserved_2[0x8];
+ u8 xrc_srqn[0x18];
+
+ u8 reserved_3[0x20];
+};
+
+struct mlx5_ifc_destroy_tis_out_bits {
+ u8 status[0x8];
+ u8 reserved_0[0x18];
+
+ u8 syndrome[0x20];
+
+ u8 reserved_1[0x40];
+};
+
+struct mlx5_ifc_destroy_tis_in_bits {
+ u8 opcode[0x10];
+ u8 reserved_0[0x10];
+
+ u8 reserved_1[0x10];
+ u8 op_mod[0x10];
+
+ u8 reserved_2[0x8];
+ u8 tisn[0x18];
+
+ u8 reserved_3[0x20];
+};
+
+struct mlx5_ifc_destroy_tir_out_bits {
+ u8 status[0x8];
+ u8 reserved_0[0x18];
+
+ u8 syndrome[0x20];
+
+ u8 reserved_1[0x40];
+};
+
+struct mlx5_ifc_destroy_tir_in_bits {
+ u8 opcode[0x10];
+ u8 reserved_0[0x10];
+
+ u8 reserved_1[0x10];
+ u8 op_mod[0x10];
+
+ u8 reserved_2[0x8];
+ u8 tirn[0x18];
+
+ u8 reserved_3[0x20];
+};
+
+struct mlx5_ifc_destroy_srq_out_bits {
+ u8 status[0x8];
+ u8 reserved_0[0x18];
+
+ u8 syndrome[0x20];
+
+ u8 reserved_1[0x40];
+};
+
+struct mlx5_ifc_destroy_srq_in_bits {
+ u8 opcode[0x10];
+ u8 reserved_0[0x10];
+
+ u8 reserved_1[0x10];
+ u8 op_mod[0x10];
+
+ u8 reserved_2[0x8];
+ u8 srqn[0x18];
+
+ u8 reserved_3[0x20];
+};
+
+struct mlx5_ifc_destroy_sq_out_bits {
+ u8 status[0x8];
+ u8 reserved_0[0x18];
+
+ u8 syndrome[0x20];
+
+ u8 reserved_1[0x40];
+};
+
+struct mlx5_ifc_destroy_sq_in_bits {
+ u8 opcode[0x10];
+ u8 reserved_0[0x10];
+
+ u8 reserved_1[0x10];
+ u8 op_mod[0x10];
+
+ u8 reserved_2[0x8];
+ u8 sqn[0x18];
+
+ u8 reserved_3[0x20];
+};
+
+struct mlx5_ifc_destroy_rqt_out_bits {
+ u8 status[0x8];
+ u8 reserved_0[0x18];
+
+ u8 syndrome[0x20];
+
+ u8 reserved_1[0x40];
+};
+
+struct mlx5_ifc_destroy_rqt_in_bits {
+ u8 opcode[0x10];
+ u8 reserved_0[0x10];
+
+ u8 reserved_1[0x10];
+ u8 op_mod[0x10];
+
+ u8 reserved_2[0x8];
+ u8 rqtn[0x18];
+
+ u8 reserved_3[0x20];
+};
+
+struct mlx5_ifc_destroy_rq_out_bits {
+ u8 status[0x8];
+ u8 reserved_0[0x18];
+
+ u8 syndrome[0x20];
+
+ u8 reserved_1[0x40];
+};
+
+struct mlx5_ifc_destroy_rq_in_bits {
+ u8 opcode[0x10];
+ u8 reserved_0[0x10];
+
+ u8 reserved_1[0x10];
+ u8 op_mod[0x10];
+
+ u8 reserved_2[0x8];
+ u8 rqn[0x18];
+
+ u8 reserved_3[0x20];
+};
+
+struct mlx5_ifc_destroy_rmp_out_bits {
+ u8 status[0x8];
+ u8 reserved_0[0x18];
+
+ u8 syndrome[0x20];
+
+ u8 reserved_1[0x40];
+};
+
+struct mlx5_ifc_destroy_rmp_in_bits {
+ u8 opcode[0x10];
+ u8 reserved_0[0x10];
+
+ u8 reserved_1[0x10];
+ u8 op_mod[0x10];
+
+ u8 reserved_2[0x8];
+ u8 rmpn[0x18];
+
+ u8 reserved_3[0x20];
+};
+
+struct mlx5_ifc_destroy_qp_out_bits {
+ u8 status[0x8];
+ u8 reserved_0[0x18];
+
+ u8 syndrome[0x20];
+
+ u8 reserved_1[0x40];
+};
+
+struct mlx5_ifc_destroy_qp_in_bits {
+ u8 opcode[0x10];
+ u8 reserved_0[0x10];
+
+ u8 reserved_1[0x10];
+ u8 op_mod[0x10];
+
+ u8 reserved_2[0x8];
+ u8 qpn[0x18];
+
+ u8 reserved_3[0x20];
+};
+
+struct mlx5_ifc_destroy_psv_out_bits {
+ u8 status[0x8];
+ u8 reserved_0[0x18];
+
+ u8 syndrome[0x20];
+
+ u8 reserved_1[0x40];
+};
+
+struct mlx5_ifc_destroy_psv_in_bits {
+ u8 opcode[0x10];
+ u8 reserved_0[0x10];
+
+ u8 reserved_1[0x10];
+ u8 op_mod[0x10];
+
+ u8 reserved_2[0x8];
+ u8 psvn[0x18];
+
+ u8 reserved_3[0x20];
+};
+
+struct mlx5_ifc_destroy_mkey_out_bits {
+ u8 status[0x8];
+ u8 reserved_0[0x18];
+
+ u8 syndrome[0x20];
+
+ u8 reserved_1[0x40];
+};
+
+struct mlx5_ifc_destroy_mkey_in_bits {
+ u8 opcode[0x10];
+ u8 reserved_0[0x10];
+
+ u8 reserved_1[0x10];
+ u8 op_mod[0x10];
+
+ u8 reserved_2[0x8];
+ u8 mkey_index[0x18];
+
+ u8 reserved_3[0x20];
+};
+
+struct mlx5_ifc_destroy_flow_table_out_bits {
+ u8 status[0x8];
+ u8 reserved_0[0x18];
+
+ u8 syndrome[0x20];
+
+ u8 reserved_1[0x40];
+};
+
+struct mlx5_ifc_destroy_flow_table_in_bits {
+ u8 opcode[0x10];
+ u8 reserved_0[0x10];
+
+ u8 reserved_1[0x10];
+ u8 op_mod[0x10];
+
+ u8 reserved_2[0x40];
+
+ u8 table_type[0x8];
+ u8 reserved_3[0x18];
+
+ u8 reserved_4[0x8];
+ u8 table_id[0x18];
+
+ u8 reserved_5[0x140];
+};
+
+struct mlx5_ifc_destroy_flow_group_out_bits {
+ u8 status[0x8];
+ u8 reserved_0[0x18];
+
+ u8 syndrome[0x20];
+
+ u8 reserved_1[0x40];
+};
+
+struct mlx5_ifc_destroy_flow_group_in_bits {
+ u8 opcode[0x10];
+ u8 reserved_0[0x10];
+
+ u8 reserved_1[0x10];
+ u8 op_mod[0x10];
+
+ u8 reserved_2[0x40];
+
+ u8 table_type[0x8];
+ u8 reserved_3[0x18];
+
+ u8 reserved_4[0x8];
+ u8 table_id[0x18];
+
+ u8 group_id[0x20];
+
+ u8 reserved_5[0x120];
+};
+
+struct mlx5_ifc_destroy_eq_out_bits {
+ u8 status[0x8];
+ u8 reserved_0[0x18];
+
+ u8 syndrome[0x20];
+
+ u8 reserved_1[0x40];
+};
+
+struct mlx5_ifc_destroy_eq_in_bits {
+ u8 opcode[0x10];
+ u8 reserved_0[0x10];
+
+ u8 reserved_1[0x10];
+ u8 op_mod[0x10];
+
+ u8 reserved_2[0x18];
+ u8 eq_number[0x8];
+
+ u8 reserved_3[0x20];
+};
+
+struct mlx5_ifc_destroy_dct_out_bits {
+ u8 status[0x8];
+ u8 reserved_0[0x18];
+
+ u8 syndrome[0x20];
+
+ u8 reserved_1[0x40];
+};
+
+struct mlx5_ifc_destroy_dct_in_bits {
+ u8 opcode[0x10];
+ u8 reserved_0[0x10];
+
+ u8 reserved_1[0x10];
+ u8 op_mod[0x10];
+
+ u8 reserved_2[0x8];
+ u8 dctn[0x18];
+
+ u8 reserved_3[0x20];
+};
+
+struct mlx5_ifc_destroy_cq_out_bits {
+ u8 status[0x8];
+ u8 reserved_0[0x18];
+
+ u8 syndrome[0x20];
+
+ u8 reserved_1[0x40];
+};
+
+struct mlx5_ifc_destroy_cq_in_bits {
+ u8 opcode[0x10];
+ u8 reserved_0[0x10];
+
+ u8 reserved_1[0x10];
+ u8 op_mod[0x10];
+
+ u8 reserved_2[0x8];
+ u8 cqn[0x18];
+
+ u8 reserved_3[0x20];
+};
+
+struct mlx5_ifc_delete_vxlan_udp_dport_out_bits {
+ u8 status[0x8];
+ u8 reserved_0[0x18];
+
+ u8 syndrome[0x20];
+
+ u8 reserved_1[0x40];
+};
+
+struct mlx5_ifc_delete_vxlan_udp_dport_in_bits {
+ u8 opcode[0x10];
+ u8 reserved_0[0x10];
+
+ u8 reserved_1[0x10];
+ u8 op_mod[0x10];
+
+ u8 reserved_2[0x20];
+
+ u8 reserved_3[0x10];
+ u8 vxlan_udp_port[0x10];
+};
+
+struct mlx5_ifc_delete_l2_table_entry_out_bits {
+ u8 status[0x8];
+ u8 reserved_0[0x18];
+
+ u8 syndrome[0x20];
+
+ u8 reserved_1[0x40];
+};
+
+struct mlx5_ifc_delete_l2_table_entry_in_bits {
+ u8 opcode[0x10];
+ u8 reserved_0[0x10];
+
+ u8 reserved_1[0x10];
+ u8 op_mod[0x10];
+
+ u8 reserved_2[0x60];
+
+ u8 reserved_3[0x8];
+ u8 table_index[0x18];
+
+ u8 reserved_4[0x140];
+};
+
+struct mlx5_ifc_delete_fte_out_bits {
+ u8 status[0x8];
+ u8 reserved_0[0x18];
+
+ u8 syndrome[0x20];
+
+ u8 reserved_1[0x40];
+};
+
+struct mlx5_ifc_delete_fte_in_bits {
+ u8 opcode[0x10];
+ u8 reserved_0[0x10];
+
+ u8 reserved_1[0x10];
+ u8 op_mod[0x10];
+
+ u8 reserved_2[0x40];
+
+ u8 table_type[0x8];
+ u8 reserved_3[0x18];
+
+ u8 reserved_4[0x8];
+ u8 table_id[0x18];
+
+ u8 reserved_5[0x40];
+
+ u8 flow_index[0x20];
+
+ u8 reserved_6[0xe0];
+};
+
+struct mlx5_ifc_dealloc_xrcd_out_bits {
+ u8 status[0x8];
+ u8 reserved_0[0x18];
+
+ u8 syndrome[0x20];
+
+ u8 reserved_1[0x40];
+};
+
+struct mlx5_ifc_dealloc_xrcd_in_bits {
+ u8 opcode[0x10];
+ u8 reserved_0[0x10];
+
+ u8 reserved_1[0x10];
+ u8 op_mod[0x10];
+
+ u8 reserved_2[0x8];
+ u8 xrcd[0x18];
+
+ u8 reserved_3[0x20];
+};
+
+struct mlx5_ifc_dealloc_uar_out_bits {
+ u8 status[0x8];
+ u8 reserved_0[0x18];
+
+ u8 syndrome[0x20];
+
+ u8 reserved_1[0x40];
+};
+
+struct mlx5_ifc_dealloc_uar_in_bits {
+ u8 opcode[0x10];
+ u8 reserved_0[0x10];
+
+ u8 reserved_1[0x10];
+ u8 op_mod[0x10];
+
+ u8 reserved_2[0x8];
+ u8 uar[0x18];
+
+ u8 reserved_3[0x20];
+};
+
+struct mlx5_ifc_dealloc_transport_domain_out_bits {
+ u8 status[0x8];
+ u8 reserved_0[0x18];
+
+ u8 syndrome[0x20];
+
+ u8 reserved_1[0x40];
+};
+
+struct mlx5_ifc_dealloc_transport_domain_in_bits {
+ u8 opcode[0x10];
+ u8 reserved_0[0x10];
+
+ u8 reserved_1[0x10];
+ u8 op_mod[0x10];
+
+ u8 reserved_2[0x8];
+ u8 transport_domain[0x18];
+
+ u8 reserved_3[0x20];
+};
+
+struct mlx5_ifc_dealloc_q_counter_out_bits {
+ u8 status[0x8];
+ u8 reserved_0[0x18];
+
+ u8 syndrome[0x20];
+
+ u8 reserved_1[0x40];
+};
+
+struct mlx5_ifc_dealloc_q_counter_in_bits {
+ u8 opcode[0x10];
+ u8 reserved_0[0x10];
+
+ u8 reserved_1[0x10];
+ u8 op_mod[0x10];
+
+ u8 reserved_2[0x18];
+ u8 counter_set_id[0x8];
+
+ u8 reserved_3[0x20];
+};
+
+struct mlx5_ifc_dealloc_pd_out_bits {
+ u8 status[0x8];
+ u8 reserved_0[0x18];
+
+ u8 syndrome[0x20];
+
+ u8 reserved_1[0x40];
+};
+
+struct mlx5_ifc_dealloc_pd_in_bits {
+ u8 opcode[0x10];
+ u8 reserved_0[0x10];
+
+ u8 reserved_1[0x10];
+ u8 op_mod[0x10];
+
+ u8 reserved_2[0x8];
+ u8 pd[0x18];
+
+ u8 reserved_3[0x20];
+};
+
+struct mlx5_ifc_create_xrc_srq_out_bits {
+ u8 status[0x8];
+ u8 reserved_0[0x18];
+
+ u8 syndrome[0x20];
+
+ u8 reserved_1[0x8];
+ u8 xrc_srqn[0x18];
+
+ u8 reserved_2[0x20];
+};
+
+struct mlx5_ifc_create_xrc_srq_in_bits {
+ u8 opcode[0x10];
+ u8 reserved_0[0x10];
+
+ u8 reserved_1[0x10];
+ u8 op_mod[0x10];
+
+ u8 reserved_2[0x40];
+
+ struct mlx5_ifc_xrc_srqc_bits xrc_srq_context_entry;
+
+ u8 reserved_3[0x600];
+
+ u8 pas[0][0x40];
+};
+
+struct mlx5_ifc_create_tis_out_bits {
+ u8 status[0x8];
+ u8 reserved_0[0x18];
+
+ u8 syndrome[0x20];
+
+ u8 reserved_1[0x8];
+ u8 tisn[0x18];
+
+ u8 reserved_2[0x20];
+};
+
+struct mlx5_ifc_create_tis_in_bits {
+ u8 opcode[0x10];
+ u8 reserved_0[0x10];
+
+ u8 reserved_1[0x10];
+ u8 op_mod[0x10];
+
+ u8 reserved_2[0xc0];
+
+ struct mlx5_ifc_tisc_bits ctx;
+};
+
+struct mlx5_ifc_create_tir_out_bits {
+ u8 status[0x8];
+ u8 reserved_0[0x18];
+
+ u8 syndrome[0x20];
+
+ u8 reserved_1[0x8];
+ u8 tirn[0x18];
+
+ u8 reserved_2[0x20];
+};
+
+struct mlx5_ifc_create_tir_in_bits {
+ u8 opcode[0x10];
+ u8 reserved_0[0x10];
+
+ u8 reserved_1[0x10];
+ u8 op_mod[0x10];
+
+ u8 reserved_2[0xc0];
+
+ struct mlx5_ifc_tirc_bits ctx;
+};
+
+struct mlx5_ifc_create_srq_out_bits {
+ u8 status[0x8];
+ u8 reserved_0[0x18];
+
+ u8 syndrome[0x20];
+
+ u8 reserved_1[0x8];
+ u8 srqn[0x18];
+
+ u8 reserved_2[0x20];
+};
+
+struct mlx5_ifc_create_srq_in_bits {
+ u8 opcode[0x10];
+ u8 reserved_0[0x10];
+
+ u8 reserved_1[0x10];
+ u8 op_mod[0x10];
+
+ u8 reserved_2[0x40];
+
+ struct mlx5_ifc_srqc_bits srq_context_entry;
+
+ u8 reserved_3[0x600];
+
+ u8 pas[0][0x40];
+};
+
+struct mlx5_ifc_create_sq_out_bits {
+ u8 status[0x8];
+ u8 reserved_0[0x18];
+
+ u8 syndrome[0x20];
+
+ u8 reserved_1[0x8];
+ u8 sqn[0x18];
+
+ u8 reserved_2[0x20];
+};
+
+struct mlx5_ifc_create_sq_in_bits {
+ u8 opcode[0x10];
+ u8 reserved_0[0x10];
+
+ u8 reserved_1[0x10];
+ u8 op_mod[0x10];
+
+ u8 reserved_2[0xc0];
+
+ struct mlx5_ifc_sqc_bits ctx;
+};
+
+struct mlx5_ifc_create_rqt_out_bits {
+ u8 status[0x8];
+ u8 reserved_0[0x18];
+
+ u8 syndrome[0x20];
+
+ u8 reserved_1[0x8];
+ u8 rqtn[0x18];
+
+ u8 reserved_2[0x20];
+};
+
+struct mlx5_ifc_create_rqt_in_bits {
+ u8 opcode[0x10];
+ u8 reserved_0[0x10];
+
+ u8 reserved_1[0x10];
+ u8 op_mod[0x10];
+
+ u8 reserved_2[0xc0];
+
+ struct mlx5_ifc_rqtc_bits rqt_context;
+};
+
+struct mlx5_ifc_create_rq_out_bits {
+ u8 status[0x8];
+ u8 reserved_0[0x18];
+
+ u8 syndrome[0x20];
+
+ u8 reserved_1[0x8];
+ u8 rqn[0x18];
+
+ u8 reserved_2[0x20];
+};
+
+struct mlx5_ifc_create_rq_in_bits {
+ u8 opcode[0x10];
+ u8 reserved_0[0x10];
+
+ u8 reserved_1[0x10];
+ u8 op_mod[0x10];
+
+ u8 reserved_2[0xc0];
+
+ struct mlx5_ifc_rqc_bits ctx;
+};
+
+struct mlx5_ifc_create_rmp_out_bits {
+ u8 status[0x8];
+ u8 reserved_0[0x18];
+
+ u8 syndrome[0x20];
+
+ u8 reserved_1[0x8];
+ u8 rmpn[0x18];
+
+ u8 reserved_2[0x20];
+};
+
+struct mlx5_ifc_create_rmp_in_bits {
+ u8 opcode[0x10];
+ u8 reserved_0[0x10];
+
+ u8 reserved_1[0x10];
+ u8 op_mod[0x10];
+
+ u8 reserved_2[0xc0];
+
+ struct mlx5_ifc_rmpc_bits ctx;
+};
+
+struct mlx5_ifc_create_qp_out_bits {
+ u8 status[0x8];
+ u8 reserved_0[0x18];
+
+ u8 syndrome[0x20];
+
+ u8 reserved_1[0x8];
+ u8 qpn[0x18];
+
+ u8 reserved_2[0x20];
+};
+
+struct mlx5_ifc_create_qp_in_bits {
+ u8 opcode[0x10];
+ u8 reserved_0[0x10];
+
+ u8 reserved_1[0x10];
+ u8 op_mod[0x10];
+
+ u8 reserved_2[0x40];
+
+ u8 opt_param_mask[0x20];
+
+ u8 reserved_3[0x20];
+
+ struct mlx5_ifc_qpc_bits qpc;
+
+ u8 reserved_4[0x80];
+
+ u8 pas[0][0x40];
+};
+
+struct mlx5_ifc_create_psv_out_bits {
+ u8 status[0x8];
+ u8 reserved_0[0x18];
+
+ u8 syndrome[0x20];
+
+ u8 reserved_1[0x40];
+
+ u8 reserved_2[0x8];
+ u8 psv0_index[0x18];
+
+ u8 reserved_3[0x8];
+ u8 psv1_index[0x18];
+
+ u8 reserved_4[0x8];
+ u8 psv2_index[0x18];
+
+ u8 reserved_5[0x8];
+ u8 psv3_index[0x18];
+};
+
+struct mlx5_ifc_create_psv_in_bits {
+ u8 opcode[0x10];
+ u8 reserved_0[0x10];
+
+ u8 reserved_1[0x10];
+ u8 op_mod[0x10];
+
+ u8 num_psv[0x4];
+ u8 reserved_2[0x4];
+ u8 pd[0x18];
+
+ u8 reserved_3[0x20];
+};
+
+struct mlx5_ifc_create_mkey_out_bits {
+ u8 status[0x8];
+ u8 reserved_0[0x18];
+
+ u8 syndrome[0x20];
+
+ u8 reserved_1[0x8];
+ u8 mkey_index[0x18];
+
+ u8 reserved_2[0x20];
+};
+
+struct mlx5_ifc_create_mkey_in_bits {
+ u8 opcode[0x10];
+ u8 reserved_0[0x10];
+
+ u8 reserved_1[0x10];
+ u8 op_mod[0x10];
+
+ u8 reserved_2[0x20];
+
+ u8 pg_access[0x1];
+ u8 reserved_3[0x1f];
+
+ struct mlx5_ifc_mkc_bits memory_key_mkey_entry;
+
+ u8 reserved_4[0x80];
+
+ u8 translations_octword_actual_size[0x20];
+
+ u8 reserved_5[0x560];
+
+ u8 klm_pas_mtt[0][0x20];
+};
+
+struct mlx5_ifc_create_flow_table_out_bits {
+ u8 status[0x8];
+ u8 reserved_0[0x18];
+
+ u8 syndrome[0x20];
+
+ u8 reserved_1[0x8];
+ u8 table_id[0x18];
+
+ u8 reserved_2[0x20];
+};
+
+struct mlx5_ifc_create_flow_table_in_bits {
+ u8 opcode[0x10];
+ u8 reserved_0[0x10];
+
+ u8 reserved_1[0x10];
+ u8 op_mod[0x10];
+
+ u8 reserved_2[0x40];
+
+ u8 table_type[0x8];
+ u8 reserved_3[0x18];
+
+ u8 reserved_4[0x20];
+
+ u8 reserved_5[0x8];
+ u8 level[0x8];
+ u8 reserved_6[0x8];
+ u8 log_size[0x8];
+
+ u8 reserved_7[0x120];
+};
+
+struct mlx5_ifc_create_flow_group_out_bits {
+ u8 status[0x8];
+ u8 reserved_0[0x18];
+
+ u8 syndrome[0x20];
+
+ u8 reserved_1[0x8];
+ u8 group_id[0x18];
+
+ u8 reserved_2[0x20];
+};
+
+enum {
+ MLX5_CREATE_FLOW_GROUP_IN_MATCH_CRITERIA_ENABLE_OUTER_HEADERS = 0x0,
+ MLX5_CREATE_FLOW_GROUP_IN_MATCH_CRITERIA_ENABLE_MISC_PARAMETERS = 0x1,
+ MLX5_CREATE_FLOW_GROUP_IN_MATCH_CRITERIA_ENABLE_INNER_HEADERS = 0x2,
+};
+
+struct mlx5_ifc_create_flow_group_in_bits {
+ u8 opcode[0x10];
+ u8 reserved_0[0x10];
+
+ u8 reserved_1[0x10];
+ u8 op_mod[0x10];
+
+ u8 reserved_2[0x40];
+
+ u8 table_type[0x8];
+ u8 reserved_3[0x18];
+
+ u8 reserved_4[0x8];
+ u8 table_id[0x18];
+
+ u8 reserved_5[0x20];
+
+ u8 start_flow_index[0x20];
+
+ u8 reserved_6[0x20];
+
+ u8 end_flow_index[0x20];
+
+ u8 reserved_7[0xa0];
+
+ u8 reserved_8[0x18];
+ u8 match_criteria_enable[0x8];
+
+ struct mlx5_ifc_fte_match_param_bits match_criteria;
+
+ u8 reserved_9[0xe00];
+};
+
+struct mlx5_ifc_create_eq_out_bits {
+ u8 status[0x8];
+ u8 reserved_0[0x18];
+
+ u8 syndrome[0x20];
+
+ u8 reserved_1[0x18];
+ u8 eq_number[0x8];
+
+ u8 reserved_2[0x20];
+};
+
+struct mlx5_ifc_create_eq_in_bits {
+ u8 opcode[0x10];
+ u8 reserved_0[0x10];
+
+ u8 reserved_1[0x10];
+ u8 op_mod[0x10];
+
+ u8 reserved_2[0x40];
+
+ struct mlx5_ifc_eqc_bits eq_context_entry;
+
+ u8 reserved_3[0x40];
+
+ u8 event_bitmask[0x40];
+
+ u8 reserved_4[0x580];
+
+ u8 pas[0][0x40];
+};
+
+struct mlx5_ifc_create_dct_out_bits {
+ u8 status[0x8];
+ u8 reserved_0[0x18];
+
+ u8 syndrome[0x20];
+
+ u8 reserved_1[0x8];
+ u8 dctn[0x18];
+
+ u8 reserved_2[0x20];
+};
+
+struct mlx5_ifc_create_dct_in_bits {
+ u8 opcode[0x10];
+ u8 reserved_0[0x10];
+
+ u8 reserved_1[0x10];
+ u8 op_mod[0x10];
+
+ u8 reserved_2[0x40];
+
+ struct mlx5_ifc_dctc_bits dct_context_entry;
+
+ u8 reserved_3[0x180];
+};
+
+struct mlx5_ifc_create_cq_out_bits {
+ u8 status[0x8];
+ u8 reserved_0[0x18];
+
+ u8 syndrome[0x20];
+
+ u8 reserved_1[0x8];
+ u8 cqn[0x18];
+
+ u8 reserved_2[0x20];
+};
+
+struct mlx5_ifc_create_cq_in_bits {
+ u8 opcode[0x10];
+ u8 reserved_0[0x10];
+
+ u8 reserved_1[0x10];
+ u8 op_mod[0x10];
+
+ u8 reserved_2[0x40];
+
+ struct mlx5_ifc_cqc_bits cq_context;
+
+ u8 reserved_3[0x600];
+
+ u8 pas[0][0x40];
+};
+
+struct mlx5_ifc_config_int_moderation_out_bits {
+ u8 status[0x8];
+ u8 reserved_0[0x18];
+
+ u8 syndrome[0x20];
+
+ u8 reserved_1[0x4];
+ u8 min_delay[0xc];
+ u8 int_vector[0x10];
+
+ u8 reserved_2[0x20];
+};
+
+enum {
+ MLX5_CONFIG_INT_MODERATION_IN_OP_MOD_WRITE = 0x0,
+ MLX5_CONFIG_INT_MODERATION_IN_OP_MOD_READ = 0x1,
+};
+
+struct mlx5_ifc_config_int_moderation_in_bits {
+ u8 opcode[0x10];
+ u8 reserved_0[0x10];
+
+ u8 reserved_1[0x10];
+ u8 op_mod[0x10];
+
+ u8 reserved_2[0x4];
+ u8 min_delay[0xc];
+ u8 int_vector[0x10];
+
+ u8 reserved_3[0x20];
+};
+
+struct mlx5_ifc_attach_to_mcg_out_bits {
+ u8 status[0x8];
+ u8 reserved_0[0x18];
+
+ u8 syndrome[0x20];
+
+ u8 reserved_1[0x40];
+};
+
+struct mlx5_ifc_attach_to_mcg_in_bits {
+ u8 opcode[0x10];
+ u8 reserved_0[0x10];
+
+ u8 reserved_1[0x10];
+ u8 op_mod[0x10];
+
+ u8 reserved_2[0x8];
+ u8 qpn[0x18];
+
+ u8 reserved_3[0x20];
+
+ u8 multicast_gid[16][0x8];
+};
+
+struct mlx5_ifc_arm_xrc_srq_out_bits {
+ u8 status[0x8];
+ u8 reserved_0[0x18];
+
+ u8 syndrome[0x20];
+
+ u8 reserved_1[0x40];
+};
+
+enum {
+ MLX5_ARM_XRC_SRQ_IN_OP_MOD_XRC_SRQ = 0x1,
+};
+
+struct mlx5_ifc_arm_xrc_srq_in_bits {
+ u8 opcode[0x10];
+ u8 reserved_0[0x10];
+
+ u8 reserved_1[0x10];
+ u8 op_mod[0x10];
+
+ u8 reserved_2[0x8];
+ u8 xrc_srqn[0x18];
+
+ u8 reserved_3[0x10];
+ u8 lwm[0x10];
+};
+
+struct mlx5_ifc_arm_rq_out_bits {
+ u8 status[0x8];
+ u8 reserved_0[0x18];
+
+ u8 syndrome[0x20];
+
+ u8 reserved_1[0x40];
+};
+
+enum {
+ MLX5_ARM_RQ_IN_OP_MOD_SRQ_ = 0x1,
+};
+
+struct mlx5_ifc_arm_rq_in_bits {
+ u8 opcode[0x10];
+ u8 reserved_0[0x10];
+
+ u8 reserved_1[0x10];
+ u8 op_mod[0x10];
+
+ u8 reserved_2[0x8];
+ u8 srq_number[0x18];
+
+ u8 reserved_3[0x10];
+ u8 lwm[0x10];
+};
+
+struct mlx5_ifc_arm_dct_out_bits {
+ u8 status[0x8];
+ u8 reserved_0[0x18];
+
+ u8 syndrome[0x20];
+
+ u8 reserved_1[0x40];
+};
+
+struct mlx5_ifc_arm_dct_in_bits {
+ u8 opcode[0x10];
+ u8 reserved_0[0x10];
+
+ u8 reserved_1[0x10];
+ u8 op_mod[0x10];
+
+ u8 reserved_2[0x8];
+ u8 dct_number[0x18];
+
+ u8 reserved_3[0x20];
+};
+
+struct mlx5_ifc_alloc_xrcd_out_bits {
+ u8 status[0x8];
+ u8 reserved_0[0x18];
+
+ u8 syndrome[0x20];
+
+ u8 reserved_1[0x8];
+ u8 xrcd[0x18];
+
+ u8 reserved_2[0x20];
+};
+
+struct mlx5_ifc_alloc_xrcd_in_bits {
+ u8 opcode[0x10];
+ u8 reserved_0[0x10];
+
+ u8 reserved_1[0x10];
+ u8 op_mod[0x10];
+
+ u8 reserved_2[0x40];
+};
+
+struct mlx5_ifc_alloc_uar_out_bits {
+ u8 status[0x8];
+ u8 reserved_0[0x18];
+
+ u8 syndrome[0x20];
+
+ u8 reserved_1[0x8];
+ u8 uar[0x18];
+
+ u8 reserved_2[0x20];
+};
+
+struct mlx5_ifc_alloc_uar_in_bits {
+ u8 opcode[0x10];
+ u8 reserved_0[0x10];
+
+ u8 reserved_1[0x10];
+ u8 op_mod[0x10];
+
+ u8 reserved_2[0x40];
+};
+
+struct mlx5_ifc_alloc_transport_domain_out_bits {
+ u8 status[0x8];
+ u8 reserved_0[0x18];
+
+ u8 syndrome[0x20];
+
+ u8 reserved_1[0x8];
+ u8 transport_domain[0x18];
+
+ u8 reserved_2[0x20];
+};
+
+struct mlx5_ifc_alloc_transport_domain_in_bits {
+ u8 opcode[0x10];
+ u8 reserved_0[0x10];
+
+ u8 reserved_1[0x10];
+ u8 op_mod[0x10];
+
+ u8 reserved_2[0x40];
+};
+
+struct mlx5_ifc_alloc_q_counter_out_bits {
+ u8 status[0x8];
+ u8 reserved_0[0x18];
+
+ u8 syndrome[0x20];
+
+ u8 reserved_1[0x18];
+ u8 counter_set_id[0x8];
+
+ u8 reserved_2[0x20];
+};
+
+struct mlx5_ifc_alloc_q_counter_in_bits {
+ u8 opcode[0x10];
+ u8 reserved_0[0x10];
+
+ u8 reserved_1[0x10];
+ u8 op_mod[0x10];
+
+ u8 reserved_2[0x40];
+};
+
+struct mlx5_ifc_alloc_pd_out_bits {
+ u8 status[0x8];
+ u8 reserved_0[0x18];
+
+ u8 syndrome[0x20];
+
+ u8 reserved_1[0x8];
+ u8 pd[0x18];
+
+ u8 reserved_2[0x20];
+};
+
+struct mlx5_ifc_alloc_pd_in_bits {
+ u8 opcode[0x10];
+ u8 reserved_0[0x10];
+
+ u8 reserved_1[0x10];
+ u8 op_mod[0x10];
+
+ u8 reserved_2[0x40];
+};
+
+struct mlx5_ifc_add_vxlan_udp_dport_out_bits {
+ u8 status[0x8];
+ u8 reserved_0[0x18];
+
+ u8 syndrome[0x20];
+
+ u8 reserved_1[0x40];
+};
+
+struct mlx5_ifc_add_vxlan_udp_dport_in_bits {
+ u8 opcode[0x10];
+ u8 reserved_0[0x10];
+
+ u8 reserved_1[0x10];
+ u8 op_mod[0x10];
+
+ u8 reserved_2[0x20];
+
+ u8 reserved_3[0x10];
+ u8 vxlan_udp_port[0x10];
+};
+
+struct mlx5_ifc_access_register_out_bits {
+ u8 status[0x8];
+ u8 reserved_0[0x18];
+
+ u8 syndrome[0x20];
+
+ u8 reserved_1[0x40];
+
+ u8 register_data[0][0x20];
+};
+
+enum {
+ MLX5_ACCESS_REGISTER_IN_OP_MOD_WRITE = 0x0,
+ MLX5_ACCESS_REGISTER_IN_OP_MOD_READ = 0x1,
+};
+
+struct mlx5_ifc_access_register_in_bits {
+ u8 opcode[0x10];
+ u8 reserved_0[0x10];
+
+ u8 reserved_1[0x10];
+ u8 op_mod[0x10];
+
+ u8 reserved_2[0x10];
+ u8 register_id[0x10];
+
+ u8 argument[0x20];
+
+ u8 register_data[0][0x20];
+};
+
+struct mlx5_ifc_sltp_reg_bits {
+ u8 status[0x4];
+ u8 version[0x4];
+ u8 local_port[0x8];
+ u8 pnat[0x2];
+ u8 reserved_0[0x2];
+ u8 lane[0x4];
+ u8 reserved_1[0x8];
+
+ u8 reserved_2[0x20];
+
+ u8 reserved_3[0x7];
+ u8 polarity[0x1];
+ u8 ob_tap0[0x8];
+ u8 ob_tap1[0x8];
+ u8 ob_tap2[0x8];
+
+ u8 reserved_4[0xc];
+ u8 ob_preemp_mode[0x4];
+ u8 ob_reg[0x8];
+ u8 ob_bias[0x8];
+
+ u8 reserved_5[0x20];
+};
+
+struct mlx5_ifc_slrg_reg_bits {
+ u8 status[0x4];
+ u8 version[0x4];
+ u8 local_port[0x8];
+ u8 pnat[0x2];
+ u8 reserved_0[0x2];
+ u8 lane[0x4];
+ u8 reserved_1[0x8];
+
+ u8 time_to_link_up[0x10];
+ u8 reserved_2[0xc];
+ u8 grade_lane_speed[0x4];
+
+ u8 grade_version[0x8];
+ u8 grade[0x18];
+
+ u8 reserved_3[0x4];
+ u8 height_grade_type[0x4];
+ u8 height_grade[0x18];
+
+ u8 height_dz[0x10];
+ u8 height_dv[0x10];
+
+ u8 reserved_4[0x10];
+ u8 height_sigma[0x10];
+
+ u8 reserved_5[0x20];
+
+ u8 reserved_6[0x4];
+ u8 phase_grade_type[0x4];
+ u8 phase_grade[0x18];
+
+ u8 reserved_7[0x8];
+ u8 phase_eo_pos[0x8];
+ u8 reserved_8[0x8];
+ u8 phase_eo_neg[0x8];
+
+ u8 ffe_set_tested[0x10];
+ u8 test_errors_per_lane[0x10];
+};
+
+struct mlx5_ifc_pvlc_reg_bits {
+ u8 reserved_0[0x8];
+ u8 local_port[0x8];
+ u8 reserved_1[0x10];
+
+ u8 reserved_2[0x1c];
+ u8 vl_hw_cap[0x4];
+
+ u8 reserved_3[0x1c];
+ u8 vl_admin[0x4];
+
+ u8 reserved_4[0x1c];
+ u8 vl_operational[0x4];
+};
+
+struct mlx5_ifc_pude_reg_bits {
+ u8 swid[0x8];
+ u8 local_port[0x8];
+ u8 reserved_0[0x4];
+ u8 admin_status[0x4];
+ u8 reserved_1[0x4];
+ u8 oper_status[0x4];
+
+ u8 reserved_2[0x60];
+};
+
+struct mlx5_ifc_ptys_reg_bits {
+ u8 reserved_0[0x8];
+ u8 local_port[0x8];
+ u8 reserved_1[0xd];
+ u8 proto_mask[0x3];
+
+ u8 reserved_2[0x40];
+
+ u8 eth_proto_capability[0x20];
+
+ u8 ib_link_width_capability[0x10];
+ u8 ib_proto_capability[0x10];
+
+ u8 reserved_3[0x20];
+
+ u8 eth_proto_admin[0x20];
+
+ u8 ib_link_width_admin[0x10];
+ u8 ib_proto_admin[0x10];
+
+ u8 reserved_4[0x20];
+
+ u8 eth_proto_oper[0x20];
+
+ u8 ib_link_width_oper[0x10];
+ u8 ib_proto_oper[0x10];
+
+ u8 reserved_5[0x20];
+
+ u8 eth_proto_lp_advertise[0x20];
+
+ u8 reserved_6[0x60];
+};
+
+struct mlx5_ifc_ptas_reg_bits {
+ u8 reserved_0[0x20];
+
+ u8 algorithm_options[0x10];
+ u8 reserved_1[0x4];
+ u8 repetitions_mode[0x4];
+ u8 num_of_repetitions[0x8];
+
+ u8 grade_version[0x8];
+ u8 height_grade_type[0x4];
+ u8 phase_grade_type[0x4];
+ u8 height_grade_weight[0x8];
+ u8 phase_grade_weight[0x8];
+
+ u8 gisim_measure_bits[0x10];
+ u8 adaptive_tap_measure_bits[0x10];
+
+ u8 ber_bath_high_error_threshold[0x10];
+ u8 ber_bath_mid_error_threshold[0x10];
+
+ u8 ber_bath_low_error_threshold[0x10];
+ u8 one_ratio_high_threshold[0x10];
+
+ u8 one_ratio_high_mid_threshold[0x10];
+ u8 one_ratio_low_mid_threshold[0x10];
+
+ u8 one_ratio_low_threshold[0x10];
+ u8 ndeo_error_threshold[0x10];
+
+ u8 mixer_offset_step_size[0x10];
+ u8 reserved_2[0x8];
+ u8 mix90_phase_for_voltage_bath[0x8];
+
+ u8 mixer_offset_start[0x10];
+ u8 mixer_offset_end[0x10];
+
+ u8 reserved_3[0x15];
+ u8 ber_test_time[0xb];
+};
+
+struct mlx5_ifc_pspa_reg_bits {
+ u8 swid[0x8];
+ u8 local_port[0x8];
+ u8 sub_port[0x8];
+ u8 reserved_0[0x8];
+
+ u8 reserved_1[0x20];
+};
+
+struct mlx5_ifc_pqdr_reg_bits {
+ u8 reserved_0[0x8];
+ u8 local_port[0x8];
+ u8 reserved_1[0x5];
+ u8 prio[0x3];
+ u8 reserved_2[0x6];
+ u8 mode[0x2];
+
+ u8 reserved_3[0x20];
+
+ u8 reserved_4[0x10];
+ u8 min_threshold[0x10];
+
+ u8 reserved_5[0x10];
+ u8 max_threshold[0x10];
+
+ u8 reserved_6[0x10];
+ u8 mark_probability_denominator[0x10];
+
+ u8 reserved_7[0x60];
+};
+
+struct mlx5_ifc_ppsc_reg_bits {
+ u8 reserved_0[0x8];
+ u8 local_port[0x8];
+ u8 reserved_1[0x10];
+
+ u8 reserved_2[0x60];
+
+ u8 reserved_3[0x1c];
+ u8 wrps_admin[0x4];
+
+ u8 reserved_4[0x1c];
+ u8 wrps_status[0x4];
+
+ u8 reserved_5[0x8];
+ u8 up_threshold[0x8];
+ u8 reserved_6[0x8];
+ u8 down_threshold[0x8];
+
+ u8 reserved_7[0x20];
+
+ u8 reserved_8[0x1c];
+ u8 srps_admin[0x4];
+
+ u8 reserved_9[0x1c];
+ u8 srps_status[0x4];
+
+ u8 reserved_10[0x40];
+};
+
+struct mlx5_ifc_pplr_reg_bits {
+ u8 reserved_0[0x8];
+ u8 local_port[0x8];
+ u8 reserved_1[0x10];
+
+ u8 reserved_2[0x8];
+ u8 lb_cap[0x8];
+ u8 reserved_3[0x8];
+ u8 lb_en[0x8];
+};
+
+struct mlx5_ifc_pplm_reg_bits {
+ u8 reserved_0[0x8];
+ u8 local_port[0x8];
+ u8 reserved_1[0x10];
+
+ u8 reserved_2[0x20];
+
+ u8 port_profile_mode[0x8];
+ u8 static_port_profile[0x8];
+ u8 active_port_profile[0x8];
+ u8 reserved_3[0x8];
+
+ u8 retransmission_active[0x8];
+ u8 fec_mode_active[0x18];
+
+ u8 reserved_4[0x20];
+};
+
+struct mlx5_ifc_ppcnt_reg_bits {
+ u8 swid[0x8];
+ u8 local_port[0x8];
+ u8 pnat[0x2];
+ u8 reserved_0[0x8];
+ u8 grp[0x6];
+
+ u8 clr[0x1];
+ u8 reserved_1[0x1c];
+ u8 prio_tc[0x3];
+
+ union mlx5_ifc_eth_cntrs_grp_data_layout_auto_bits counter_set;
+};
+
+struct mlx5_ifc_ppad_reg_bits {
+ u8 reserved_0[0x3];
+ u8 single_mac[0x1];
+ u8 reserved_1[0x4];
+ u8 local_port[0x8];
+ u8 mac_47_32[0x10];
+
+ u8 mac_31_0[0x20];
+
+ u8 reserved_2[0x40];
+};
+
+struct mlx5_ifc_pmtu_reg_bits {
+ u8 reserved_0[0x8];
+ u8 local_port[0x8];
+ u8 reserved_1[0x10];
+
+ u8 max_mtu[0x10];
+ u8 reserved_2[0x10];
+
+ u8 admin_mtu[0x10];
+ u8 reserved_3[0x10];
+
+ u8 oper_mtu[0x10];
+ u8 reserved_4[0x10];
+};
+
+struct mlx5_ifc_pmpr_reg_bits {
+ u8 reserved_0[0x8];
+ u8 module[0x8];
+ u8 reserved_1[0x10];
+
+ u8 reserved_2[0x18];
+ u8 attenuation_5g[0x8];
+
+ u8 reserved_3[0x18];
+ u8 attenuation_7g[0x8];
+
+ u8 reserved_4[0x18];
+ u8 attenuation_12g[0x8];
+};
+
+struct mlx5_ifc_pmpe_reg_bits {
+ u8 reserved_0[0x8];
+ u8 module[0x8];
+ u8 reserved_1[0xc];
+ u8 module_status[0x4];
+
+ u8 reserved_2[0x60];
+};
+
+struct mlx5_ifc_pmpc_reg_bits {
+ u8 module_state_updated[32][0x8];
+};
+
+struct mlx5_ifc_pmlpn_reg_bits {
+ u8 reserved_0[0x4];
+ u8 mlpn_status[0x4];
+ u8 local_port[0x8];
+ u8 reserved_1[0x10];
+
+ u8 e[0x1];
+ u8 reserved_2[0x1f];
+};
+
+struct mlx5_ifc_pmlp_reg_bits {
+ u8 rxtx[0x1];
+ u8 reserved_0[0x7];
+ u8 local_port[0x8];
+ u8 reserved_1[0x8];
+ u8 width[0x8];
+
+ u8 lane0_module_mapping[0x20];
+
+ u8 lane1_module_mapping[0x20];
+
+ u8 lane2_module_mapping[0x20];
+
+ u8 lane3_module_mapping[0x20];
+
+ u8 reserved_2[0x160];
+};
+
+struct mlx5_ifc_pmaos_reg_bits {
+ u8 reserved_0[0x8];
+ u8 module[0x8];
+ u8 reserved_1[0x4];
+ u8 admin_status[0x4];
+ u8 reserved_2[0x4];
+ u8 oper_status[0x4];
+
+ u8 ase[0x1];
+ u8 ee[0x1];
+ u8 reserved_3[0x1c];
+ u8 e[0x2];
+
+ u8 reserved_4[0x40];
+};
+
+struct mlx5_ifc_plpc_reg_bits {
+ u8 reserved_0[0x4];
+ u8 profile_id[0xc];
+ u8 reserved_1[0x4];
+ u8 proto_mask[0x4];
+ u8 reserved_2[0x8];
+
+ u8 reserved_3[0x10];
+ u8 lane_speed[0x10];
+
+ u8 reserved_4[0x17];
+ u8 lpbf[0x1];
+ u8 fec_mode_policy[0x8];
+
+ u8 retransmission_capability[0x8];
+ u8 fec_mode_capability[0x18];
+
+ u8 retransmission_support_admin[0x8];
+ u8 fec_mode_support_admin[0x18];
+
+ u8 retransmission_request_admin[0x8];
+ u8 fec_mode_request_admin[0x18];
+
+ u8 reserved_5[0x80];
+};
+
+struct mlx5_ifc_plib_reg_bits {
+ u8 reserved_0[0x8];
+ u8 local_port[0x8];
+ u8 reserved_1[0x8];
+ u8 ib_port[0x8];
+
+ u8 reserved_2[0x60];
+};
+
+struct mlx5_ifc_plbf_reg_bits {
+ u8 reserved_0[0x8];
+ u8 local_port[0x8];
+ u8 reserved_1[0xd];
+ u8 lbf_mode[0x3];
+
+ u8 reserved_2[0x20];
+};
+
+struct mlx5_ifc_pipg_reg_bits {
+ u8 reserved_0[0x8];
+ u8 local_port[0x8];
+ u8 reserved_1[0x10];
+
+ u8 dic[0x1];
+ u8 reserved_2[0x19];
+ u8 ipg[0x4];
+ u8 reserved_3[0x2];
+};
+
+struct mlx5_ifc_pifr_reg_bits {
+ u8 reserved_0[0x8];
+ u8 local_port[0x8];
+ u8 reserved_1[0x10];
+
+ u8 reserved_2[0xe0];
+
+ u8 port_filter[8][0x20];
+
+ u8 port_filter_update_en[8][0x20];
+};
+
+struct mlx5_ifc_pfcc_reg_bits {
+ u8 reserved_0[0x8];
+ u8 local_port[0x8];
+ u8 reserved_1[0x10];
+
+ u8 ppan[0x4];
+ u8 reserved_2[0x4];
+ u8 prio_mask_tx[0x8];
+ u8 reserved_3[0x8];
+ u8 prio_mask_rx[0x8];
+
+ u8 pptx[0x1];
+ u8 aptx[0x1];
+ u8 reserved_4[0x6];
+ u8 pfctx[0x8];
+ u8 reserved_5[0x10];
+
+ u8 pprx[0x1];
+ u8 aprx[0x1];
+ u8 reserved_6[0x6];
+ u8 pfcrx[0x8];
+ u8 reserved_7[0x10];
+
+ u8 reserved_8[0x80];
+};
+
+struct mlx5_ifc_pelc_reg_bits {
+ u8 op[0x4];
+ u8 reserved_0[0x4];
+ u8 local_port[0x8];
+ u8 reserved_1[0x10];
+
+ u8 op_admin[0x8];
+ u8 op_capability[0x8];
+ u8 op_request[0x8];
+ u8 op_active[0x8];
+
+ u8 admin[0x40];
+
+ u8 capability[0x40];
+
+ u8 request[0x40];
+
+ u8 active[0x40];
+
+ u8 reserved_2[0x80];
+};
+
+struct mlx5_ifc_peir_reg_bits {
+ u8 reserved_0[0x8];
+ u8 local_port[0x8];
+ u8 reserved_1[0x10];
+
+ u8 reserved_2[0xc];
+ u8 error_count[0x4];
+ u8 reserved_3[0x10];
+
+ u8 reserved_4[0xc];
+ u8 lane[0x4];
+ u8 reserved_5[0x8];
+ u8 error_type[0x8];
+};
+
+struct mlx5_ifc_pcap_reg_bits {
+ u8 reserved_0[0x8];
+ u8 local_port[0x8];
+ u8 reserved_1[0x10];
+
+ u8 port_capability_mask[4][0x20];
+};
+
+struct mlx5_ifc_paos_reg_bits {
+ u8 swid[0x8];
+ u8 local_port[0x8];
+ u8 reserved_0[0x4];
+ u8 admin_status[0x4];
+ u8 reserved_1[0x4];
+ u8 oper_status[0x4];
+
+ u8 ase[0x1];
+ u8 ee[0x1];
+ u8 reserved_2[0x1c];
+ u8 e[0x2];
+
+ u8 reserved_3[0x40];
+};
+
+struct mlx5_ifc_pamp_reg_bits {
+ u8 reserved_0[0x8];
+ u8 opamp_group[0x8];
+ u8 reserved_1[0xc];
+ u8 opamp_group_type[0x4];
+
+ u8 start_index[0x10];
+ u8 reserved_2[0x4];
+ u8 num_of_indices[0xc];
+
+ u8 index_data[18][0x10];
+};
+
+struct mlx5_ifc_lane_2_module_mapping_bits {
+ u8 reserved_0[0x6];
+ u8 rx_lane[0x2];
+ u8 reserved_1[0x6];
+ u8 tx_lane[0x2];
+ u8 reserved_2[0x8];
+ u8 module[0x8];
+};
+
+struct mlx5_ifc_bufferx_reg_bits {
+ u8 reserved_0[0x6];
+ u8 lossy[0x1];
+ u8 epsb[0x1];
+ u8 reserved_1[0xc];
+ u8 size[0xc];
+
+ u8 xoff_threshold[0x10];
+ u8 xon_threshold[0x10];
+};
+
+struct mlx5_ifc_set_node_in_bits {
+ u8 node_description[64][0x8];
+};
+
+struct mlx5_ifc_register_power_settings_bits {
+ u8 reserved_0[0x18];
+ u8 power_settings_level[0x8];
+
+ u8 reserved_1[0x60];
+};
+
+struct mlx5_ifc_register_host_endianness_bits {
+ u8 he[0x1];
+ u8 reserved_0[0x1f];
+
+ u8 reserved_1[0x60];
+};
+
+struct mlx5_ifc_umr_pointer_desc_argument_bits {
+ u8 reserved_0[0x20];
+
+ u8 mkey[0x20];
+
+ u8 addressh_63_32[0x20];
+
+ u8 addressl_31_0[0x20];
+};
+
+struct mlx5_ifc_ud_adrs_vector_bits {
+ u8 dc_key[0x40];
+
+ u8 ext[0x1];
+ u8 reserved_0[0x7];
+ u8 destination_qp_dct[0x18];
+
+ u8 static_rate[0x4];
+ u8 sl_eth_prio[0x4];
+ u8 fl[0x1];
+ u8 mlid[0x7];
+ u8 rlid_udp_sport[0x10];
+
+ u8 reserved_1[0x20];
+
+ u8 rmac_47_16[0x20];
+
+ u8 rmac_15_0[0x10];
+ u8 tclass[0x8];
+ u8 hop_limit[0x8];
+
+ u8 reserved_2[0x1];
+ u8 grh[0x1];
+ u8 reserved_3[0x2];
+ u8 src_addr_index[0x8];
+ u8 flow_label[0x14];
+
+ u8 rgid_rip[16][0x8];
+};
+
+struct mlx5_ifc_pages_req_event_bits {
+ u8 reserved_0[0x10];
+ u8 function_id[0x10];
+
+ u8 num_pages[0x20];
+
+ u8 reserved_1[0xa0];
+};
+
+struct mlx5_ifc_eqe_bits {
+ u8 reserved_0[0x8];
+ u8 event_type[0x8];
+ u8 reserved_1[0x8];
+ u8 event_sub_type[0x8];
+
+ u8 reserved_2[0xe0];
+
+ union mlx5_ifc_event_auto_bits event_data;
+
+ u8 reserved_3[0x10];
+ u8 signature[0x8];
+ u8 reserved_4[0x7];
+ u8 owner[0x1];
+};
+
+enum {
+ MLX5_CMD_QUEUE_ENTRY_TYPE_PCIE_CMD_IF_TRANSPORT = 0x7,
+};
+
+struct mlx5_ifc_cmd_queue_entry_bits {
+ u8 type[0x8];
+ u8 reserved_0[0x18];
+
+ u8 input_length[0x20];
+
+ u8 input_mailbox_pointer_63_32[0x20];
+
+ u8 input_mailbox_pointer_31_9[0x17];
+ u8 reserved_1[0x9];
+
+ u8 command_input_inline_data[16][0x8];
+
+ u8 command_output_inline_data[16][0x8];
+
+ u8 output_mailbox_pointer_63_32[0x20];
+
+ u8 output_mailbox_pointer_31_9[0x17];
+ u8 reserved_2[0x9];
+
+ u8 output_length[0x20];
+
+ u8 token[0x8];
+ u8 signature[0x8];
+ u8 reserved_3[0x8];
+ u8 status[0x7];
+ u8 ownership[0x1];
+};
+
+struct mlx5_ifc_cmd_out_bits {
+ u8 status[0x8];
+ u8 reserved_0[0x18];
+
+ u8 syndrome[0x20];
+
+ u8 command_output[0x20];
+};
+
+struct mlx5_ifc_cmd_in_bits {
+ u8 opcode[0x10];
+ u8 reserved_0[0x10];
+
+ u8 reserved_1[0x10];
+ u8 op_mod[0x10];
+
+ u8 command[0][0x20];
+};
+
+struct mlx5_ifc_cmd_if_box_bits {
+ u8 mailbox_data[512][0x8];
+
+ u8 reserved_0[0x180];
+
+ u8 next_pointer_63_32[0x20];
+
+ u8 next_pointer_31_10[0x16];
+ u8 reserved_1[0xa];
+
+ u8 block_number[0x20];
+
+ u8 reserved_2[0x8];
+ u8 token[0x8];
+ u8 ctrl_signature[0x8];
+ u8 signature[0x8];
+};
+
+struct mlx5_ifc_mtt_bits {
+ u8 ptag_63_32[0x20];
+
+ u8 ptag_31_8[0x18];
+ u8 reserved_0[0x6];
+ u8 wr_en[0x1];
+ u8 rd_en[0x1];
+};
+
+enum {
+ MLX5_INITIAL_SEG_NIC_INTERFACE_FULL_DRIVER = 0x0,
+ MLX5_INITIAL_SEG_NIC_INTERFACE_DISABLED = 0x1,
+ MLX5_INITIAL_SEG_NIC_INTERFACE_NO_DRAM_NIC = 0x2,
+};
+
+enum {
+ MLX5_INITIAL_SEG_NIC_INTERFACE_SUPPORTED_FULL_DRIVER = 0x0,
+ MLX5_INITIAL_SEG_NIC_INTERFACE_SUPPORTED_DISABLED = 0x1,
+ MLX5_INITIAL_SEG_NIC_INTERFACE_SUPPORTED_NO_DRAM_NIC = 0x2,
+};
+
+enum {
+ MLX5_INITIAL_SEG_HEALTH_SYNDROME_FW_INTERNAL_ERR = 0x1,
+ MLX5_INITIAL_SEG_HEALTH_SYNDROME_DEAD_IRISC = 0x7,
+ MLX5_INITIAL_SEG_HEALTH_SYNDROME_HW_FATAL_ERR = 0x8,
+ MLX5_INITIAL_SEG_HEALTH_SYNDROME_FW_CRC_ERR = 0x9,
+ MLX5_INITIAL_SEG_HEALTH_SYNDROME_ICM_FETCH_PCI_ERR = 0xa,
+ MLX5_INITIAL_SEG_HEALTH_SYNDROME_ICM_PAGE_ERR = 0xb,
+ MLX5_INITIAL_SEG_HEALTH_SYNDROME_ASYNCHRONOUS_EQ_BUF_OVERRUN = 0xc,
+ MLX5_INITIAL_SEG_HEALTH_SYNDROME_EQ_IN_ERR = 0xd,
+ MLX5_INITIAL_SEG_HEALTH_SYNDROME_EQ_INV = 0xe,
+ MLX5_INITIAL_SEG_HEALTH_SYNDROME_FFSER_ERR = 0xf,
+ MLX5_INITIAL_SEG_HEALTH_SYNDROME_HIGH_TEMP_ERR = 0x10,
+};
+
+struct mlx5_ifc_initial_seg_bits {
+ u8 fw_rev_minor[0x10];
+ u8 fw_rev_major[0x10];
+
+ u8 cmd_interface_rev[0x10];
+ u8 fw_rev_subminor[0x10];
+
+ u8 reserved_0[0x40];
+
+ u8 cmdq_phy_addr_63_32[0x20];
+
+ u8 cmdq_phy_addr_31_12[0x14];
+ u8 reserved_1[0x2];
+ u8 nic_interface[0x2];
+ u8 log_cmdq_size[0x4];
+ u8 log_cmdq_stride[0x4];
+
+ u8 command_doorbell_vector[0x20];
+
+ u8 reserved_2[0xf00];
+
+ u8 initializing[0x1];
+ u8 reserved_3[0x4];
+ u8 nic_interface_supported[0x3];
+ u8 reserved_4[0x18];
+
+ struct mlx5_ifc_health_buffer_bits health_buffer;
+
+ u8 no_dram_nic_offset[0x20];
+
+ u8 reserved_5[0x6e40];
+
+ u8 reserved_6[0x1f];
+ u8 clear_int[0x1];
+
+ u8 health_syndrome[0x8];
+ u8 health_counter[0x18];
+
+ u8 reserved_7[0x17fc0];
+};
+
+union mlx5_ifc_ports_control_registers_document_bits {
+ struct mlx5_ifc_bufferx_reg_bits bufferx_reg;
+ struct mlx5_ifc_eth_2819_cntrs_grp_data_layout_bits eth_2819_cntrs_grp_data_layout;
+ struct mlx5_ifc_eth_2863_cntrs_grp_data_layout_bits eth_2863_cntrs_grp_data_layout;
+ struct mlx5_ifc_eth_3635_cntrs_grp_data_layout_bits eth_3635_cntrs_grp_data_layout;
+ struct mlx5_ifc_eth_802_3_cntrs_grp_data_layout_bits eth_802_3_cntrs_grp_data_layout;
+ struct mlx5_ifc_eth_extended_cntrs_grp_data_layout_bits eth_extended_cntrs_grp_data_layout;
+ struct mlx5_ifc_eth_per_prio_grp_data_layout_bits eth_per_prio_grp_data_layout;
+ struct mlx5_ifc_eth_per_traffic_grp_data_layout_bits eth_per_traffic_grp_data_layout;
+ struct mlx5_ifc_lane_2_module_mapping_bits lane_2_module_mapping;
+ struct mlx5_ifc_pamp_reg_bits pamp_reg;
+ struct mlx5_ifc_paos_reg_bits paos_reg;
+ struct mlx5_ifc_pcap_reg_bits pcap_reg;
+ struct mlx5_ifc_peir_reg_bits peir_reg;
+ struct mlx5_ifc_pelc_reg_bits pelc_reg;
+ struct mlx5_ifc_pfcc_reg_bits pfcc_reg;
+ struct mlx5_ifc_phys_layer_cntrs_bits phys_layer_cntrs;
+ struct mlx5_ifc_pifr_reg_bits pifr_reg;
+ struct mlx5_ifc_pipg_reg_bits pipg_reg;
+ struct mlx5_ifc_plbf_reg_bits plbf_reg;
+ struct mlx5_ifc_plib_reg_bits plib_reg;
+ struct mlx5_ifc_plpc_reg_bits plpc_reg;
+ struct mlx5_ifc_pmaos_reg_bits pmaos_reg;
+ struct mlx5_ifc_pmlp_reg_bits pmlp_reg;
+ struct mlx5_ifc_pmlpn_reg_bits pmlpn_reg;
+ struct mlx5_ifc_pmpc_reg_bits pmpc_reg;
+ struct mlx5_ifc_pmpe_reg_bits pmpe_reg;
+ struct mlx5_ifc_pmpr_reg_bits pmpr_reg;
+ struct mlx5_ifc_pmtu_reg_bits pmtu_reg;
+ struct mlx5_ifc_ppad_reg_bits ppad_reg;
+ struct mlx5_ifc_ppcnt_reg_bits ppcnt_reg;
+ struct mlx5_ifc_pplm_reg_bits pplm_reg;
+ struct mlx5_ifc_pplr_reg_bits pplr_reg;
+ struct mlx5_ifc_ppsc_reg_bits ppsc_reg;
+ struct mlx5_ifc_pqdr_reg_bits pqdr_reg;
+ struct mlx5_ifc_pspa_reg_bits pspa_reg;
+ struct mlx5_ifc_ptas_reg_bits ptas_reg;
+ struct mlx5_ifc_ptys_reg_bits ptys_reg;
+ struct mlx5_ifc_pude_reg_bits pude_reg;
+ struct mlx5_ifc_pvlc_reg_bits pvlc_reg;
+ struct mlx5_ifc_slrg_reg_bits slrg_reg;
+ struct mlx5_ifc_sltp_reg_bits sltp_reg;
+ u8 reserved_0[0x60e0];
+};
+
+union mlx5_ifc_debug_enhancements_document_bits {
+ struct mlx5_ifc_health_buffer_bits health_buffer;
+ u8 reserved_0[0x200];
+};
+
+union mlx5_ifc_uplink_pci_interface_document_bits {
+ struct mlx5_ifc_initial_seg_bits initial_seg;
+ u8 reserved_0[0x20060];
};
#endif /* MLX5_IFC_H */
diff --git a/include/linux/mlx5/qp.h b/include/linux/mlx5/qp.h
index 61f7a342d1bf..f079fb1a31f7 100644
--- a/include/linux/mlx5/qp.h
+++ b/include/linux/mlx5/qp.h
@@ -1,5 +1,5 @@
/*
- * Copyright (c) 2013, Mellanox Technologies inc. All rights reserved.
+ * Copyright (c) 2013-2015, Mellanox Technologies. All rights reserved.
*
* This software is available to you under a choice of one of two
* licenses. You may choose to be licensed under the terms of the GNU
@@ -134,13 +134,21 @@ enum {
enum {
MLX5_WQE_CTRL_CQ_UPDATE = 2 << 2,
+ MLX5_WQE_CTRL_CQ_UPDATE_AND_EQE = 3 << 2,
MLX5_WQE_CTRL_SOLICITED = 1 << 1,
};
enum {
+ MLX5_SEND_WQE_DS = 16,
MLX5_SEND_WQE_BB = 64,
};
+#define MLX5_SEND_WQEBB_NUM_DS (MLX5_SEND_WQE_BB / MLX5_SEND_WQE_DS)
+
+enum {
+ MLX5_SEND_WQE_MAX_WQEBBS = 16,
+};
+
enum {
MLX5_WQE_FMR_PERM_LOCAL_READ = 1 << 27,
MLX5_WQE_FMR_PERM_LOCAL_WRITE = 1 << 28,
@@ -200,6 +208,23 @@ struct mlx5_wqe_ctrl_seg {
#define MLX5_WQE_CTRL_WQE_INDEX_MASK 0x00ffff00
#define MLX5_WQE_CTRL_WQE_INDEX_SHIFT 8
+enum {
+ MLX5_ETH_WQE_L3_INNER_CSUM = 1 << 4,
+ MLX5_ETH_WQE_L4_INNER_CSUM = 1 << 5,
+ MLX5_ETH_WQE_L3_CSUM = 1 << 6,
+ MLX5_ETH_WQE_L4_CSUM = 1 << 7,
+};
+
+struct mlx5_wqe_eth_seg {
+ u8 rsvd0[4];
+ u8 cs_flags;
+ u8 rsvd1;
+ __be16 mss;
+ __be32 rsvd2;
+ __be16 inline_hdr_sz;
+ u8 inline_hdr_start[2];
+};
+
struct mlx5_wqe_xrc_seg {
__be32 xrc_srqn;
u8 rsvd[12];
diff --git a/include/linux/mlx5/srq.h b/include/linux/mlx5/srq.h
index e1a363a33663..f43ed054a3e0 100644
--- a/include/linux/mlx5/srq.h
+++ b/include/linux/mlx5/srq.h
@@ -1,5 +1,5 @@
/*
- * Copyright (c) 2013, Mellanox Technologies inc. All rights reserved.
+ * Copyright (c) 2013-2015, Mellanox Technologies. All rights reserved.
*
* This software is available to you under a choice of one of two
* licenses. You may choose to be licensed under the terms of the GNU
diff --git a/include/linux/mlx5/vport.h b/include/linux/mlx5/vport.h
new file mode 100644
index 000000000000..967e0fd06e89
--- /dev/null
+++ b/include/linux/mlx5/vport.h
@@ -0,0 +1,55 @@
+/*
+ * Copyright (c) 2013-2015, Mellanox Technologies, Ltd. All rights reserved.
+ *
+ * This software is available to you under a choice of one of two
+ * licenses. You may choose to be licensed under the terms of the GNU
+ * General Public License (GPL) Version 2, available from the file
+ * COPYING in the main directory of this source tree, or the
+ * OpenIB.org BSD license below:
+ *
+ * Redistribution and use in source and binary forms, with or
+ * without modification, are permitted provided that the following
+ * conditions are met:
+ *
+ * - Redistributions of source code must retain the above
+ * copyright notice, this list of conditions and the following
+ * disclaimer.
+ *
+ * - Redistributions in binary form must reproduce the above
+ * copyright notice, this list of conditions and the following
+ * disclaimer in the documentation and/or other materials
+ * provided with the distribution.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND,
+ * EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
+ * MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND
+ * NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS
+ * BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN
+ * ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN
+ * CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
+ * SOFTWARE.
+ */
+
+#ifndef __MLX5_VPORT_H__
+#define __MLX5_VPORT_H__
+
+#include <linux/mlx5/driver.h>
+
+u8 mlx5_query_vport_state(struct mlx5_core_dev *mdev, u8 opmod);
+void mlx5_query_nic_vport_mac_address(struct mlx5_core_dev *mdev, u8 *addr);
+int mlx5_query_hca_vport_gid(struct mlx5_core_dev *dev, u8 other_vport,
+ u8 port_num, u16 vf_num, u16 gid_index,
+ union ib_gid *gid);
+int mlx5_query_hca_vport_pkey(struct mlx5_core_dev *dev, u8 other_vport,
+ u8 port_num, u16 vf_num, u16 pkey_index,
+ u16 *pkey);
+int mlx5_query_hca_vport_context(struct mlx5_core_dev *dev,
+ u8 other_vport, u8 port_num,
+ u16 vf_num,
+ struct mlx5_hca_vport_context *rep);
+int mlx5_query_hca_vport_system_image_guid(struct mlx5_core_dev *dev,
+ u64 *sys_image_guid);
+int mlx5_query_hca_vport_node_guid(struct mlx5_core_dev *dev,
+ u64 *node_guid);
+
+#endif /* __MLX5_VPORT_H__ */
diff --git a/include/linux/mm-arch-hooks.h b/include/linux/mm-arch-hooks.h
new file mode 100644
index 000000000000..4efc3f56e6df
--- /dev/null
+++ b/include/linux/mm-arch-hooks.h
@@ -0,0 +1,25 @@
+/*
+ * Generic mm no-op hooks.
+ *
+ * Copyright (C) 2015, IBM Corporation
+ * Author: Laurent Dufour <ldufour@linux.vnet.ibm.com>
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License version 2 as
+ * published by the Free Software Foundation.
+ */
+#ifndef _LINUX_MM_ARCH_HOOKS_H
+#define _LINUX_MM_ARCH_HOOKS_H
+
+#include <asm/mm-arch-hooks.h>
+
+#ifndef arch_remap
+static inline void arch_remap(struct mm_struct *mm,
+ unsigned long old_start, unsigned long old_end,
+ unsigned long new_start, unsigned long new_end)
+{
+}
+#define arch_remap arch_remap
+#endif
+
+#endif /* _LINUX_MM_ARCH_HOOKS_H */
diff --git a/include/linux/mm.h b/include/linux/mm.h
index 47a93928b90f..99959a34f4f1 100644
--- a/include/linux/mm.h
+++ b/include/linux/mm.h
@@ -27,6 +27,7 @@ struct anon_vma_chain;
struct file_ra_state;
struct user_struct;
struct writeback_control;
+struct bdi_writeback;
#ifndef CONFIG_NEED_MULTIPLE_NODES /* Don't use mapnrs, do it properly */
extern unsigned long max_mapnr;
@@ -251,6 +252,9 @@ struct vm_operations_struct {
* writable, if an error is returned it will cause a SIGBUS */
int (*page_mkwrite)(struct vm_area_struct *vma, struct vm_fault *vmf);
+ /* same as page_mkwrite when using VM_PFNMAP|VM_MIXEDMAP */
+ int (*pfn_mkwrite)(struct vm_area_struct *vma, struct vm_fault *vmf);
+
/* called by access_process_vm when get_user_pages() fails, typically
* for use by special VMAs that can switch between memory and hardware
*/
@@ -494,18 +498,9 @@ static inline int page_count(struct page *page)
return atomic_read(&compound_head(page)->_count);
}
-#ifdef CONFIG_HUGETLB_PAGE
-extern int PageHeadHuge(struct page *page_head);
-#else /* CONFIG_HUGETLB_PAGE */
-static inline int PageHeadHuge(struct page *page_head)
-{
- return 0;
-}
-#endif /* CONFIG_HUGETLB_PAGE */
-
static inline bool __compound_tail_refcounted(struct page *page)
{
- return !PageSlab(page) && !PageHeadHuge(page);
+ return PageAnon(page) && !PageSlab(page) && !PageHeadHuge(page);
}
/*
@@ -571,53 +566,6 @@ static inline void init_page_count(struct page *page)
atomic_set(&page->_count, 1);
}
-/*
- * PageBuddy() indicate that the page is free and in the buddy system
- * (see mm/page_alloc.c).
- *
- * PAGE_BUDDY_MAPCOUNT_VALUE must be <= -2 but better not too close to
- * -2 so that an underflow of the page_mapcount() won't be mistaken
- * for a genuine PAGE_BUDDY_MAPCOUNT_VALUE. -128 can be created very
- * efficiently by most CPU architectures.
- */
-#define PAGE_BUDDY_MAPCOUNT_VALUE (-128)
-
-static inline int PageBuddy(struct page *page)
-{
- return atomic_read(&page->_mapcount) == PAGE_BUDDY_MAPCOUNT_VALUE;
-}
-
-static inline void __SetPageBuddy(struct page *page)
-{
- VM_BUG_ON_PAGE(atomic_read(&page->_mapcount) != -1, page);
- atomic_set(&page->_mapcount, PAGE_BUDDY_MAPCOUNT_VALUE);
-}
-
-static inline void __ClearPageBuddy(struct page *page)
-{
- VM_BUG_ON_PAGE(!PageBuddy(page), page);
- atomic_set(&page->_mapcount, -1);
-}
-
-#define PAGE_BALLOON_MAPCOUNT_VALUE (-256)
-
-static inline int PageBalloon(struct page *page)
-{
- return atomic_read(&page->_mapcount) == PAGE_BALLOON_MAPCOUNT_VALUE;
-}
-
-static inline void __SetPageBalloon(struct page *page)
-{
- VM_BUG_ON_PAGE(atomic_read(&page->_mapcount) != -1, page);
- atomic_set(&page->_mapcount, PAGE_BALLOON_MAPCOUNT_VALUE);
-}
-
-static inline void __ClearPageBalloon(struct page *page)
-{
- VM_BUG_ON_PAGE(!PageBalloon(page), page);
- atomic_set(&page->_mapcount, -1);
-}
-
void put_page(struct page *page);
void put_pages_list(struct list_head *pages);
@@ -1006,34 +954,10 @@ void page_address_init(void);
#define page_address_init() do { } while(0)
#endif
-/*
- * On an anonymous page mapped into a user virtual memory area,
- * page->mapping points to its anon_vma, not to a struct address_space;
- * with the PAGE_MAPPING_ANON bit set to distinguish it. See rmap.h.
- *
- * On an anonymous page in a VM_MERGEABLE area, if CONFIG_KSM is enabled,
- * the PAGE_MAPPING_KSM bit may be set along with the PAGE_MAPPING_ANON bit;
- * and then page->mapping points, not to an anon_vma, but to a private
- * structure which KSM associates with that merged page. See ksm.h.
- *
- * PAGE_MAPPING_KSM without PAGE_MAPPING_ANON is currently never used.
- *
- * Please note that, confusingly, "page_mapping" refers to the inode
- * address_space which maps the page from disk; whereas "page_mapped"
- * refers to user virtual address space into which the page is mapped.
- */
-#define PAGE_MAPPING_ANON 1
-#define PAGE_MAPPING_KSM 2
-#define PAGE_MAPPING_FLAGS (PAGE_MAPPING_ANON | PAGE_MAPPING_KSM)
-
+extern void *page_rmapping(struct page *page);
+extern struct anon_vma *page_anon_vma(struct page *page);
extern struct address_space *page_mapping(struct page *page);
-/* Neutral page->mapping pointer to address_space or anon_vma or other */
-static inline void *page_rmapping(struct page *page)
-{
- return (void *)((unsigned long)page->mapping & ~PAGE_MAPPING_FLAGS);
-}
-
extern struct address_space *__page_file_mapping(struct page *);
static inline
@@ -1045,11 +969,6 @@ struct address_space *page_file_mapping(struct page *page)
return page->mapping;
}
-static inline int PageAnon(struct page *page)
-{
- return ((unsigned long)page->mapping & PAGE_MAPPING_ANON) != 0;
-}
-
/*
* Return the pagecache index of the passed page. Regular pagecache pages
* use ->index whereas swapcache pages use ->private
@@ -1293,10 +1212,15 @@ int __set_page_dirty_nobuffers(struct page *page);
int __set_page_dirty_no_writeback(struct page *page);
int redirty_page_for_writepage(struct writeback_control *wbc,
struct page *page);
-void account_page_dirtied(struct page *page, struct address_space *mapping);
+void account_page_dirtied(struct page *page, struct address_space *mapping,
+ struct mem_cgroup *memcg);
+void account_page_cleaned(struct page *page, struct address_space *mapping,
+ struct mem_cgroup *memcg, struct bdi_writeback *wb);
int set_page_dirty(struct page *page);
int set_page_dirty_lock(struct page *page);
+void cancel_dirty_page(struct page *page);
int clear_page_dirty_for_io(struct page *page);
+
int get_cmdline(struct task_struct *task, char *buffer, int buflen);
/* Is the vma a continuation of the stack vma above it? */
@@ -1973,10 +1897,10 @@ extern unsigned long unmapped_area_topdown(struct vm_unmapped_area_info *info);
static inline unsigned long
vm_unmapped_area(struct vm_unmapped_area_info *info)
{
- if (!(info->flags & VM_UNMAPPED_AREA_TOPDOWN))
- return unmapped_area(info);
- else
+ if (info->flags & VM_UNMAPPED_AREA_TOPDOWN)
return unmapped_area_topdown(info);
+ else
+ return unmapped_area(info);
}
/* truncate.c */
@@ -2109,7 +2033,7 @@ static inline struct page *follow_page(struct vm_area_struct *vma,
#define FOLL_FORCE 0x10 /* get_user_pages read/write w/o permission */
#define FOLL_NOWAIT 0x20 /* if a disk transfer is needed, start the IO
* and return without waiting upon it */
-#define FOLL_MLOCK 0x40 /* mark page as mlocked */
+#define FOLL_POPULATE 0x40 /* fault in page */
#define FOLL_SPLIT 0x80 /* don't return transhuge pages, split them */
#define FOLL_HWPOISON 0x100 /* check page is hwpoisoned */
#define FOLL_NUMA 0x200 /* force NUMA hinting page fault */
@@ -2226,12 +2150,47 @@ enum mf_flags {
extern int memory_failure(unsigned long pfn, int trapno, int flags);
extern void memory_failure_queue(unsigned long pfn, int trapno, int flags);
extern int unpoison_memory(unsigned long pfn);
+extern int get_hwpoison_page(struct page *page);
extern int sysctl_memory_failure_early_kill;
extern int sysctl_memory_failure_recovery;
extern void shake_page(struct page *p, int access);
extern atomic_long_t num_poisoned_pages;
extern int soft_offline_page(struct page *page, int flags);
+
+/*
+ * Error handlers for various types of pages.
+ */
+enum mf_result {
+ MF_IGNORED, /* Error: cannot be handled */
+ MF_FAILED, /* Error: handling failed */
+ MF_DELAYED, /* Will be handled later */
+ MF_RECOVERED, /* Successfully recovered */
+};
+
+enum mf_action_page_type {
+ MF_MSG_KERNEL,
+ MF_MSG_KERNEL_HIGH_ORDER,
+ MF_MSG_SLAB,
+ MF_MSG_DIFFERENT_COMPOUND,
+ MF_MSG_POISONED_HUGE,
+ MF_MSG_HUGE,
+ MF_MSG_FREE_HUGE,
+ MF_MSG_UNMAP_FAILED,
+ MF_MSG_DIRTY_SWAPCACHE,
+ MF_MSG_CLEAN_SWAPCACHE,
+ MF_MSG_DIRTY_MLOCKED_LRU,
+ MF_MSG_CLEAN_MLOCKED_LRU,
+ MF_MSG_DIRTY_UNEVICTABLE_LRU,
+ MF_MSG_CLEAN_UNEVICTABLE_LRU,
+ MF_MSG_DIRTY_LRU,
+ MF_MSG_CLEAN_LRU,
+ MF_MSG_TRUNCATED_LRU,
+ MF_MSG_BUDDY,
+ MF_MSG_BUDDY_2ND,
+ MF_MSG_UNKNOWN,
+};
+
#if defined(CONFIG_TRANSPARENT_HUGEPAGE) || defined(CONFIG_HUGETLBFS)
extern void clear_huge_page(struct page *page,
unsigned long addr,
diff --git a/include/linux/mm_types.h b/include/linux/mm_types.h
index 199a03aab8dc..0038ac7466fd 100644
--- a/include/linux/mm_types.h
+++ b/include/linux/mm_types.h
@@ -226,6 +226,24 @@ struct page_frag {
#endif
};
+#define PAGE_FRAG_CACHE_MAX_SIZE __ALIGN_MASK(32768, ~PAGE_MASK)
+#define PAGE_FRAG_CACHE_MAX_ORDER get_order(PAGE_FRAG_CACHE_MAX_SIZE)
+
+struct page_frag_cache {
+ void * va;
+#if (PAGE_SIZE < PAGE_FRAG_CACHE_MAX_SIZE)
+ __u16 offset;
+ __u16 size;
+#else
+ __u32 offset;
+#endif
+ /* we maintain a pagecount bias, so that we dont dirty cache line
+ * containing page->_count every time we allocate a fragment.
+ */
+ unsigned int pagecnt_bias;
+ bool pfmemalloc;
+};
+
typedef unsigned long __nocast vm_flags_t;
/*
@@ -364,7 +382,9 @@ struct mm_struct {
atomic_t mm_users; /* How many users with user space? */
atomic_t mm_count; /* How many references to "struct mm_struct" (users count as 1) */
atomic_long_t nr_ptes; /* PTE page table pages */
+#if CONFIG_PGTABLE_LEVELS > 2
atomic_long_t nr_pmds; /* PMD page table pages */
+#endif
int map_count; /* number of VMAs */
spinlock_t page_table_lock; /* Protects page tables and some counters */
@@ -427,7 +447,7 @@ struct mm_struct {
#endif
/* store ref to file /proc/<pid>/exe symlink points to */
- struct file *exe_file;
+ struct file __rcu *exe_file;
#ifdef CONFIG_MMU_NOTIFIER
struct mmu_notifier_mm *mmu_notifier_mm;
#endif
diff --git a/include/linux/mmc/card.h b/include/linux/mmc/card.h
index a6cf4c063e4e..4d3776d25925 100644
--- a/include/linux/mmc/card.h
+++ b/include/linux/mmc/card.h
@@ -97,6 +97,7 @@ struct mmc_ext_csd {
u8 raw_erased_mem_count; /* 181 */
u8 raw_ext_csd_structure; /* 194 */
u8 raw_card_type; /* 196 */
+ u8 raw_driver_strength; /* 197 */
u8 out_of_int_time; /* 198 */
u8 raw_pwr_cl_52_195; /* 200 */
u8 raw_pwr_cl_26_195; /* 201 */
@@ -305,6 +306,7 @@ struct mmc_card {
unsigned int sd_bus_speed; /* Bus Speed Mode set for the card */
unsigned int mmc_avail_type; /* supported device type by both host and card */
+ unsigned int drive_strength; /* for UHS-I, HS200 or HS400 */
struct dentry *debugfs_root;
struct mmc_part part[MMC_NUM_PHY_PARTITION]; /* physical partitions */
@@ -512,8 +514,18 @@ static inline int mmc_card_broken_irq_polling(const struct mmc_card *c)
#define mmc_dev_to_card(d) container_of(d, struct mmc_card, dev)
-extern int mmc_register_driver(struct device_driver *);
-extern void mmc_unregister_driver(struct device_driver *);
+/*
+ * MMC device driver (e.g., Flash card, I/O card...)
+ */
+struct mmc_driver {
+ struct device_driver drv;
+ int (*probe)(struct mmc_card *);
+ void (*remove)(struct mmc_card *);
+ void (*shutdown)(struct mmc_card *);
+};
+
+extern int mmc_register_driver(struct mmc_driver *);
+extern void mmc_unregister_driver(struct mmc_driver *);
extern void mmc_fixup_device(struct mmc_card *card,
const struct mmc_fixup *table);
diff --git a/include/linux/mmc/core.h b/include/linux/mmc/core.h
index de722d4e9d61..258daf914c6d 100644
--- a/include/linux/mmc/core.h
+++ b/include/linux/mmc/core.h
@@ -121,6 +121,7 @@ struct mmc_data {
struct mmc_request *mrq; /* associated request */
unsigned int sg_len; /* size of scatter list */
+ int sg_count; /* mapped sg entries */
struct scatterlist *sg; /* I/O scatter list */
s32 host_cookie; /* host private data */
};
diff --git a/include/linux/mmc/dw_mmc.h b/include/linux/mmc/dw_mmc.h
index 12111993a317..5be97676f1fa 100644
--- a/include/linux/mmc/dw_mmc.h
+++ b/include/linux/mmc/dw_mmc.h
@@ -226,12 +226,6 @@ struct dw_mci_dma_ops {
#define DW_MCI_QUIRK_HIGHSPEED BIT(2)
/* Unreliable card detection */
#define DW_MCI_QUIRK_BROKEN_CARD_DETECTION BIT(3)
-/* No write protect */
-#define DW_MCI_QUIRK_NO_WRITE_PROTECT BIT(4)
-
-/* Slot level quirks */
-/* This slot has no write protect */
-#define DW_MCI_SLOT_QUIRK_NO_WRITE_PROTECT BIT(0)
struct dma_pdata;
diff --git a/include/linux/mmc/host.h b/include/linux/mmc/host.h
index b5bedaec6223..1369e54faeb7 100644
--- a/include/linux/mmc/host.h
+++ b/include/linux/mmc/host.h
@@ -12,6 +12,7 @@
#include <linux/leds.h>
#include <linux/mutex.h>
+#include <linux/timer.h>
#include <linux/sched.h>
#include <linux/device.h>
#include <linux/fault-inject.h>
@@ -131,7 +132,9 @@ struct mmc_host_ops {
/* Prepare HS400 target operating frequency depending host driver */
int (*prepare_hs400_tuning)(struct mmc_host *host, struct mmc_ios *ios);
- int (*select_drive_strength)(unsigned int max_dtr, int host_drv, int card_drv);
+ int (*select_drive_strength)(struct mmc_card *card,
+ unsigned int max_dtr, int host_drv,
+ int card_drv, int *drv_type);
void (*hw_reset)(struct mmc_host *host);
void (*card_event)(struct mmc_host *host);
@@ -285,6 +288,7 @@ struct mmc_host {
MMC_CAP2_HS400_1_2V)
#define MMC_CAP2_HSX00_1_2V (MMC_CAP2_HS200_1_2V_SDR | MMC_CAP2_HS400_1_2V)
#define MMC_CAP2_SDIO_IRQ_NOTHREAD (1 << 17)
+#define MMC_CAP2_NO_WRITE_PROTECT (1 << 18) /* No physical write protect pin, assume that card is always read-write */
mmc_pm_flag_t pm_caps; /* supported pm features */
@@ -321,10 +325,18 @@ struct mmc_host {
#ifdef CONFIG_MMC_DEBUG
unsigned int removed:1; /* host is being removed */
#endif
+ unsigned int can_retune:1; /* re-tuning can be used */
+ unsigned int doing_retune:1; /* re-tuning in progress */
+ unsigned int retune_now:1; /* do re-tuning at next req */
int rescan_disable; /* disable card detection */
int rescan_entered; /* used with nonremovable devices */
+ int need_retune; /* re-tuning is needed */
+ int hold_retune; /* hold off re-tuning */
+ unsigned int retune_period; /* re-tuning period in secs */
+ struct timer_list retune_timer; /* for periodic re-tuning */
+
bool trigger_card_event; /* card_event necessary */
struct mmc_card *card; /* device attached to this host */
@@ -513,4 +525,18 @@ static inline bool mmc_card_hs400(struct mmc_card *card)
return card->host->ios.timing == MMC_TIMING_MMC_HS400;
}
+void mmc_retune_timer_stop(struct mmc_host *host);
+
+static inline void mmc_retune_needed(struct mmc_host *host)
+{
+ if (host->can_retune)
+ host->need_retune = 1;
+}
+
+static inline void mmc_retune_recheck(struct mmc_host *host)
+{
+ if (host->hold_retune <= 1)
+ host->retune_now = 1;
+}
+
#endif /* LINUX_MMC_HOST_H */
diff --git a/include/linux/mmc/mmc.h b/include/linux/mmc/mmc.h
index 124f562118b8..15f2c4a0a62c 100644
--- a/include/linux/mmc/mmc.h
+++ b/include/linux/mmc/mmc.h
@@ -302,6 +302,7 @@ struct _mmc_csd {
#define EXT_CSD_REV 192 /* RO */
#define EXT_CSD_STRUCTURE 194 /* RO */
#define EXT_CSD_CARD_TYPE 196 /* RO */
+#define EXT_CSD_DRIVER_STRENGTH 197 /* RO */
#define EXT_CSD_OUT_OF_INTERRUPT_TIME 198 /* RO */
#define EXT_CSD_PART_SWITCH_TIME 199 /* RO */
#define EXT_CSD_PWR_CL_52_195 200 /* RO */
@@ -390,6 +391,7 @@ struct _mmc_csd {
#define EXT_CSD_TIMING_HS 1 /* High speed */
#define EXT_CSD_TIMING_HS200 2 /* HS200 */
#define EXT_CSD_TIMING_HS400 3 /* HS400 */
+#define EXT_CSD_DRV_STR_SHIFT 4 /* Driver Strength shift */
#define EXT_CSD_SEC_ER_EN BIT(0)
#define EXT_CSD_SEC_BD_BLK_EN BIT(2)
@@ -441,4 +443,6 @@ struct _mmc_csd {
#define MMC_SWITCH_MODE_CLEAR_BITS 0x02 /* Clear bits which are 1 in value */
#define MMC_SWITCH_MODE_WRITE_BYTE 0x03 /* Set target to value */
+#define mmc_driver_type_mask(n) (1 << (n))
+
#endif /* LINUX_MMC_MMC_H */
diff --git a/include/linux/mmc/sdhci-pci-data.h b/include/linux/mmc/sdhci-pci-data.h
index 8959604a13d3..fda15b6d4135 100644
--- a/include/linux/mmc/sdhci-pci-data.h
+++ b/include/linux/mmc/sdhci-pci-data.h
@@ -15,4 +15,6 @@ struct sdhci_pci_data {
extern struct sdhci_pci_data *(*sdhci_pci_get_data)(struct pci_dev *pdev,
int slotno);
+extern int sdhci_pci_spt_drive_strength;
+
#endif
diff --git a/include/linux/mmc/sdio_ids.h b/include/linux/mmc/sdio_ids.h
index 996807963716..83430f2ea757 100644
--- a/include/linux/mmc/sdio_ids.h
+++ b/include/linux/mmc/sdio_ids.h
@@ -33,6 +33,8 @@
#define SDIO_DEVICE_ID_BROADCOM_43341 0xa94d
#define SDIO_DEVICE_ID_BROADCOM_4335_4339 0x4335
#define SDIO_DEVICE_ID_BROADCOM_43362 0xa962
+#define SDIO_DEVICE_ID_BROADCOM_43430 0xa9a6
+#define SDIO_DEVICE_ID_BROADCOM_4345 0x4345
#define SDIO_DEVICE_ID_BROADCOM_4354 0x4354
#define SDIO_VENDOR_ID_INTEL 0x0089
diff --git a/include/linux/mmc/sh_mobile_sdhi.h b/include/linux/mmc/sh_mobile_sdhi.h
index da77e5e2041d..95d6f0314a7d 100644
--- a/include/linux/mmc/sh_mobile_sdhi.h
+++ b/include/linux/mmc/sh_mobile_sdhi.h
@@ -7,14 +7,4 @@
#define SH_MOBILE_SDHI_IRQ_SDCARD "sdcard"
#define SH_MOBILE_SDHI_IRQ_SDIO "sdio"
-struct sh_mobile_sdhi_info {
- int dma_slave_tx;
- int dma_slave_rx;
- unsigned long tmio_flags;
- unsigned long tmio_caps;
- unsigned long tmio_caps2;
- u32 tmio_ocr_mask; /* available MMC voltages */
- unsigned int cd_gpio;
-};
-
#endif /* LINUX_MMC_SH_MOBILE_SDHI_H */
diff --git a/include/linux/mmu_notifier.h b/include/linux/mmu_notifier.h
index 95243d28a0ee..61cd67f4d788 100644
--- a/include/linux/mmu_notifier.h
+++ b/include/linux/mmu_notifier.h
@@ -324,25 +324,25 @@ static inline void mmu_notifier_mm_destroy(struct mm_struct *mm)
___pte; \
})
-#define pmdp_clear_flush_notify(__vma, __haddr, __pmd) \
+#define pmdp_huge_clear_flush_notify(__vma, __haddr, __pmd) \
({ \
unsigned long ___haddr = __haddr & HPAGE_PMD_MASK; \
struct mm_struct *___mm = (__vma)->vm_mm; \
pmd_t ___pmd; \
\
- ___pmd = pmdp_clear_flush(__vma, __haddr, __pmd); \
+ ___pmd = pmdp_huge_clear_flush(__vma, __haddr, __pmd); \
mmu_notifier_invalidate_range(___mm, ___haddr, \
___haddr + HPAGE_PMD_SIZE); \
\
___pmd; \
})
-#define pmdp_get_and_clear_notify(__mm, __haddr, __pmd) \
+#define pmdp_huge_get_and_clear_notify(__mm, __haddr, __pmd) \
({ \
unsigned long ___haddr = __haddr & HPAGE_PMD_MASK; \
pmd_t ___pmd; \
\
- ___pmd = pmdp_get_and_clear(__mm, __haddr, __pmd); \
+ ___pmd = pmdp_huge_get_and_clear(__mm, __haddr, __pmd); \
mmu_notifier_invalidate_range(__mm, ___haddr, \
___haddr + HPAGE_PMD_SIZE); \
\
@@ -428,8 +428,8 @@ static inline void mmu_notifier_mm_destroy(struct mm_struct *mm)
#define ptep_clear_flush_young_notify ptep_clear_flush_young
#define pmdp_clear_flush_young_notify pmdp_clear_flush_young
#define ptep_clear_flush_notify ptep_clear_flush
-#define pmdp_clear_flush_notify pmdp_clear_flush
-#define pmdp_get_and_clear_notify pmdp_get_and_clear
+#define pmdp_huge_clear_flush_notify pmdp_huge_clear_flush
+#define pmdp_huge_get_and_clear_notify pmdp_huge_get_and_clear
#define set_pte_at_notify set_pte_at
#endif /* CONFIG_MMU_NOTIFIER */
diff --git a/include/linux/mmzone.h b/include/linux/mmzone.h
index 2782df47101e..54d74f6eb233 100644
--- a/include/linux/mmzone.h
+++ b/include/linux/mmzone.h
@@ -842,16 +842,16 @@ static inline int populated_zone(struct zone *zone)
extern int movable_zone;
+#ifdef CONFIG_HIGHMEM
static inline int zone_movable_is_highmem(void)
{
-#if defined(CONFIG_HIGHMEM) && defined(CONFIG_HAVE_MEMBLOCK_NODE_MAP)
+#ifdef CONFIG_HAVE_MEMBLOCK_NODE_MAP
return movable_zone == ZONE_HIGHMEM;
-#elif defined(CONFIG_HIGHMEM)
- return (ZONE_MOVABLE - 1) == ZONE_HIGHMEM;
#else
- return 0;
+ return (ZONE_MOVABLE - 1) == ZONE_HIGHMEM;
#endif
}
+#endif
static inline int is_highmem_idx(enum zone_type idx)
{
diff --git a/include/linux/mod_devicetable.h b/include/linux/mod_devicetable.h
index e530533b94be..3bfd56778c29 100644
--- a/include/linux/mod_devicetable.h
+++ b/include/linux/mod_devicetable.h
@@ -546,6 +546,14 @@ struct amba_id {
void *data;
};
+/**
+ * struct mips_cdmm_device_id - identifies devices in MIPS CDMM bus
+ * @type: Device type identifier.
+ */
+struct mips_cdmm_device_id {
+ __u8 type;
+};
+
/*
* Match x86 CPUs for CPU specific drivers.
* See documentation of "x86_match_cpu" for details.
diff --git a/include/linux/module.h b/include/linux/module.h
index b03485bcb82a..1e5436042eb0 100644
--- a/include/linux/module.h
+++ b/include/linux/module.h
@@ -338,6 +338,8 @@ struct module {
#ifdef CONFIG_EVENT_TRACING
struct ftrace_event_call **trace_events;
unsigned int num_trace_events;
+ struct trace_enum_map **trace_enums;
+ unsigned int num_trace_enums;
#endif
#ifdef CONFIG_FTRACE_MCOUNT_RECORD
unsigned int num_ftrace_callsites;
@@ -653,4 +655,16 @@ static inline void module_bug_finalize(const Elf_Ehdr *hdr,
static inline void module_bug_cleanup(struct module *mod) {}
#endif /* CONFIG_GENERIC_BUG */
+#ifdef CONFIG_MODULE_SIG
+static inline bool module_sig_ok(struct module *module)
+{
+ return module->sig_ok;
+}
+#else /* !CONFIG_MODULE_SIG */
+static inline bool module_sig_ok(struct module *module)
+{
+ return true;
+}
+#endif /* CONFIG_MODULE_SIG */
+
#endif /* _LINUX_MODULE_H */
diff --git a/include/linux/mount.h b/include/linux/mount.h
index c2c561dc0114..f822c3c11377 100644
--- a/include/linux/mount.h
+++ b/include/linux/mount.h
@@ -61,6 +61,7 @@ struct mnt_namespace;
#define MNT_DOOMED 0x1000000
#define MNT_SYNC_UMOUNT 0x2000000
#define MNT_MARKED 0x4000000
+#define MNT_UMOUNT 0x8000000
struct vfsmount {
struct dentry *mnt_root; /* root of the mounted tree */
@@ -92,6 +93,6 @@ extern struct vfsmount *vfs_kern_mount(struct file_system_type *type,
extern void mnt_set_expiry(struct vfsmount *mnt, struct list_head *expiry_list);
extern void mark_mounts_for_expiry(struct list_head *mounts);
-extern dev_t name_to_dev_t(char *name);
+extern dev_t name_to_dev_t(const char *name);
#endif /* _LINUX_MOUNT_H */
diff --git a/include/linux/mpi.h b/include/linux/mpi.h
index 5af1b81def49..641b7d6fd096 100644
--- a/include/linux/mpi.h
+++ b/include/linux/mpi.h
@@ -81,6 +81,8 @@ MPI mpi_read_from_buffer(const void *buffer, unsigned *ret_nread);
int mpi_fromstr(MPI val, const char *str);
u32 mpi_get_keyid(MPI a, u32 *keyid);
void *mpi_get_buffer(MPI a, unsigned *nbytes, int *sign);
+int mpi_read_buffer(MPI a, uint8_t *buf, unsigned buf_len, unsigned *nbytes,
+ int *sign);
void *mpi_get_secure_buffer(MPI a, unsigned *nbytes, int *sign);
int mpi_set_buffer(MPI a, const void *buffer, unsigned nbytes, int sign);
@@ -142,4 +144,17 @@ int mpi_rshift(MPI x, MPI a, unsigned n);
/*-- mpi-inv.c --*/
int mpi_invm(MPI x, MPI u, MPI v);
+/* inline functions */
+
+/**
+ * mpi_get_size() - returns max size required to store the number
+ *
+ * @a: A multi precision integer for which we want to allocate a bufer
+ *
+ * Return: size required to store the number
+ */
+static inline unsigned int mpi_get_size(MPI a)
+{
+ return a->nlimbs * BYTES_PER_MPI_LIMB;
+}
#endif /*G10_MPI_H */
diff --git a/include/linux/mtd/cfi.h b/include/linux/mtd/cfi.h
index 299d7d31fe53..9b57a9b1b081 100644
--- a/include/linux/mtd/cfi.h
+++ b/include/linux/mtd/cfi.h
@@ -296,183 +296,19 @@ struct cfi_private {
struct flchip chips[0]; /* per-chip data structure for each chip */
};
-/*
- * Returns the command address according to the given geometry.
- */
-static inline uint32_t cfi_build_cmd_addr(uint32_t cmd_ofs,
- struct map_info *map, struct cfi_private *cfi)
-{
- unsigned bankwidth = map_bankwidth(map);
- unsigned interleave = cfi_interleave(cfi);
- unsigned type = cfi->device_type;
- uint32_t addr;
-
- addr = (cmd_ofs * type) * interleave;
-
- /* Modify the unlock address if we are in compatibility mode.
- * For 16bit devices on 8 bit busses
- * and 32bit devices on 16 bit busses
- * set the low bit of the alternating bit sequence of the address.
- */
- if (((type * interleave) > bankwidth) && ((cmd_ofs & 0xff) == 0xaa))
- addr |= (type >> 1)*interleave;
-
- return addr;
-}
-
-/*
- * Transforms the CFI command for the given geometry (bus width & interleave).
- * It looks too long to be inline, but in the common case it should almost all
- * get optimised away.
- */
-static inline map_word cfi_build_cmd(u_long cmd, struct map_info *map, struct cfi_private *cfi)
-{
- map_word val = { {0} };
- int wordwidth, words_per_bus, chip_mode, chips_per_word;
- unsigned long onecmd;
- int i;
-
- /* We do it this way to give the compiler a fighting chance
- of optimising away all the crap for 'bankwidth' larger than
- an unsigned long, in the common case where that support is
- disabled */
- if (map_bankwidth_is_large(map)) {
- wordwidth = sizeof(unsigned long);
- words_per_bus = (map_bankwidth(map)) / wordwidth; // i.e. normally 1
- } else {
- wordwidth = map_bankwidth(map);
- words_per_bus = 1;
- }
-
- chip_mode = map_bankwidth(map) / cfi_interleave(cfi);
- chips_per_word = wordwidth * cfi_interleave(cfi) / map_bankwidth(map);
-
- /* First, determine what the bit-pattern should be for a single
- device, according to chip mode and endianness... */
- switch (chip_mode) {
- default: BUG();
- case 1:
- onecmd = cmd;
- break;
- case 2:
- onecmd = cpu_to_cfi16(map, cmd);
- break;
- case 4:
- onecmd = cpu_to_cfi32(map, cmd);
- break;
- }
-
- /* Now replicate it across the size of an unsigned long, or
- just to the bus width as appropriate */
- switch (chips_per_word) {
- default: BUG();
-#if BITS_PER_LONG >= 64
- case 8:
- onecmd |= (onecmd << (chip_mode * 32));
-#endif
- case 4:
- onecmd |= (onecmd << (chip_mode * 16));
- case 2:
- onecmd |= (onecmd << (chip_mode * 8));
- case 1:
- ;
- }
+uint32_t cfi_build_cmd_addr(uint32_t cmd_ofs,
+ struct map_info *map, struct cfi_private *cfi);
- /* And finally, for the multi-word case, replicate it
- in all words in the structure */
- for (i=0; i < words_per_bus; i++) {
- val.x[i] = onecmd;
- }
-
- return val;
-}
+map_word cfi_build_cmd(u_long cmd, struct map_info *map, struct cfi_private *cfi);
#define CMD(x) cfi_build_cmd((x), map, cfi)
-
-static inline unsigned long cfi_merge_status(map_word val, struct map_info *map,
- struct cfi_private *cfi)
-{
- int wordwidth, words_per_bus, chip_mode, chips_per_word;
- unsigned long onestat, res = 0;
- int i;
-
- /* We do it this way to give the compiler a fighting chance
- of optimising away all the crap for 'bankwidth' larger than
- an unsigned long, in the common case where that support is
- disabled */
- if (map_bankwidth_is_large(map)) {
- wordwidth = sizeof(unsigned long);
- words_per_bus = (map_bankwidth(map)) / wordwidth; // i.e. normally 1
- } else {
- wordwidth = map_bankwidth(map);
- words_per_bus = 1;
- }
-
- chip_mode = map_bankwidth(map) / cfi_interleave(cfi);
- chips_per_word = wordwidth * cfi_interleave(cfi) / map_bankwidth(map);
-
- onestat = val.x[0];
- /* Or all status words together */
- for (i=1; i < words_per_bus; i++) {
- onestat |= val.x[i];
- }
-
- res = onestat;
- switch(chips_per_word) {
- default: BUG();
-#if BITS_PER_LONG >= 64
- case 8:
- res |= (onestat >> (chip_mode * 32));
-#endif
- case 4:
- res |= (onestat >> (chip_mode * 16));
- case 2:
- res |= (onestat >> (chip_mode * 8));
- case 1:
- ;
- }
-
- /* Last, determine what the bit-pattern should be for a single
- device, according to chip mode and endianness... */
- switch (chip_mode) {
- case 1:
- break;
- case 2:
- res = cfi16_to_cpu(map, res);
- break;
- case 4:
- res = cfi32_to_cpu(map, res);
- break;
- default: BUG();
- }
- return res;
-}
-
+unsigned long cfi_merge_status(map_word val, struct map_info *map,
+ struct cfi_private *cfi);
#define MERGESTATUS(x) cfi_merge_status((x), map, cfi)
-
-/*
- * Sends a CFI command to a bank of flash for the given geometry.
- *
- * Returns the offset in flash where the command was written.
- * If prev_val is non-null, it will be set to the value at the command address,
- * before the command was written.
- */
-static inline uint32_t cfi_send_gen_cmd(u_char cmd, uint32_t cmd_addr, uint32_t base,
+uint32_t cfi_send_gen_cmd(u_char cmd, uint32_t cmd_addr, uint32_t base,
struct map_info *map, struct cfi_private *cfi,
- int type, map_word *prev_val)
-{
- map_word val;
- uint32_t addr = base + cfi_build_cmd_addr(cmd_addr, map, cfi);
- val = cfi_build_cmd(cmd, map, cfi);
-
- if (prev_val)
- *prev_val = map_read(map, addr);
-
- map_write(map, val, addr);
-
- return addr - base;
-}
+ int type, map_word *prev_val);
static inline uint8_t cfi_read_query(struct map_info *map, uint32_t addr)
{
@@ -506,15 +342,7 @@ static inline uint16_t cfi_read_query16(struct map_info *map, uint32_t addr)
}
}
-static inline void cfi_udelay(int us)
-{
- if (us >= 1000) {
- msleep((us+999)/1000);
- } else {
- udelay(us);
- cond_resched();
- }
-}
+void cfi_udelay(int us);
int __xipram cfi_qry_present(struct map_info *map, __u32 base,
struct cfi_private *cfi);
diff --git a/include/linux/mtd/map.h b/include/linux/mtd/map.h
index 5f487d776411..29975c73a953 100644
--- a/include/linux/mtd/map.h
+++ b/include/linux/mtd/map.h
@@ -77,7 +77,7 @@
/* ensure we never evaluate anything shorted than an unsigned long
* to zero, and ensure we'll never miss the end of an comparison (bjd) */
-#define map_calc_words(map) ((map_bankwidth(map) + (sizeof(unsigned long)-1))/ sizeof(unsigned long))
+#define map_calc_words(map) ((map_bankwidth(map) + (sizeof(unsigned long)-1)) / sizeof(unsigned long))
#ifdef CONFIG_MTD_MAP_BANK_WIDTH_8
# ifdef map_bankwidth
@@ -181,7 +181,7 @@ static inline int map_bankwidth_supported(int w)
}
}
-#define MAX_MAP_LONGS ( ((MAX_MAP_BANKWIDTH*8) + BITS_PER_LONG - 1) / BITS_PER_LONG )
+#define MAX_MAP_LONGS (((MAX_MAP_BANKWIDTH * 8) + BITS_PER_LONG - 1) / BITS_PER_LONG)
typedef union {
unsigned long x[MAX_MAP_LONGS];
@@ -264,20 +264,22 @@ void unregister_mtd_chip_driver(struct mtd_chip_driver *);
struct mtd_info *do_map_probe(const char *name, struct map_info *map);
void map_destroy(struct mtd_info *mtd);
-#define ENABLE_VPP(map) do { if(map->set_vpp) map->set_vpp(map, 1); } while(0)
-#define DISABLE_VPP(map) do { if(map->set_vpp) map->set_vpp(map, 0); } while(0)
+#define ENABLE_VPP(map) do { if (map->set_vpp) map->set_vpp(map, 1); } while (0)
+#define DISABLE_VPP(map) do { if (map->set_vpp) map->set_vpp(map, 0); } while (0)
#define INVALIDATE_CACHED_RANGE(map, from, size) \
- do { if(map->inval_cache) map->inval_cache(map, from, size); } while(0)
+ do { if (map->inval_cache) map->inval_cache(map, from, size); } while (0)
static inline int map_word_equal(struct map_info *map, map_word val1, map_word val2)
{
int i;
- for (i=0; i<map_words(map); i++) {
+
+ for (i = 0; i < map_words(map); i++) {
if (val1.x[i] != val2.x[i])
return 0;
}
+
return 1;
}
@@ -286,9 +288,9 @@ static inline map_word map_word_and(struct map_info *map, map_word val1, map_wor
map_word r;
int i;
- for (i=0; i<map_words(map); i++) {
+ for (i = 0; i < map_words(map); i++)
r.x[i] = val1.x[i] & val2.x[i];
- }
+
return r;
}
@@ -297,9 +299,9 @@ static inline map_word map_word_clr(struct map_info *map, map_word val1, map_wor
map_word r;
int i;
- for (i=0; i<map_words(map); i++) {
+ for (i = 0; i < map_words(map); i++)
r.x[i] = val1.x[i] & ~val2.x[i];
- }
+
return r;
}
@@ -308,22 +310,33 @@ static inline map_word map_word_or(struct map_info *map, map_word val1, map_word
map_word r;
int i;
- for (i=0; i<map_words(map); i++) {
+ for (i = 0; i < map_words(map); i++)
r.x[i] = val1.x[i] | val2.x[i];
- }
+
return r;
}
-#define map_word_andequal(m, a, b, z) map_word_equal(m, z, map_word_and(m, a, b))
+static inline int map_word_andequal(struct map_info *map, map_word val1, map_word val2, map_word val3)
+{
+ int i;
+
+ for (i = 0; i < map_words(map); i++) {
+ if ((val1.x[i] & val2.x[i]) != val3.x[i])
+ return 0;
+ }
+
+ return 1;
+}
static inline int map_word_bitsset(struct map_info *map, map_word val1, map_word val2)
{
int i;
- for (i=0; i<map_words(map); i++) {
+ for (i = 0; i < map_words(map); i++) {
if (val1.x[i] & val2.x[i])
return 1;
}
+
return 0;
}
@@ -355,14 +368,16 @@ static inline map_word map_word_load_partial(struct map_info *map, map_word orig
if (map_bankwidth_is_large(map)) {
char *dest = (char *)&orig;
+
memcpy(dest+start, buf, len);
} else {
- for (i=start; i < start+len; i++) {
+ for (i = start; i < start+len; i++) {
int bitpos;
+
#ifdef __LITTLE_ENDIAN
- bitpos = i*8;
+ bitpos = i * 8;
#else /* __BIG_ENDIAN */
- bitpos = (map_bankwidth(map)-1-i)*8;
+ bitpos = (map_bankwidth(map) - 1 - i) * 8;
#endif
orig.x[0] &= ~(0xff << bitpos);
orig.x[0] |= (unsigned long)buf[i-start] << bitpos;
@@ -384,9 +399,10 @@ static inline map_word map_word_ff(struct map_info *map)
if (map_bankwidth(map) < MAP_FF_LIMIT) {
int bw = 8 * map_bankwidth(map);
+
r.x[0] = (1UL << bw) - 1;
} else {
- for (i=0; i<map_words(map); i++)
+ for (i = 0; i < map_words(map); i++)
r.x[i] = ~0UL;
}
return r;
@@ -407,7 +423,7 @@ static inline map_word inline_map_read(struct map_info *map, unsigned long ofs)
r.x[0] = __raw_readq(map->virt + ofs);
#endif
else if (map_bankwidth_is_large(map))
- memcpy_fromio(r.x, map->virt+ofs, map->bankwidth);
+ memcpy_fromio(r.x, map->virt + ofs, map->bankwidth);
else
BUG();
diff --git a/include/linux/mtd/nand.h b/include/linux/mtd/nand.h
index 3d4ea7eb2b68..f25e2bdd188c 100644
--- a/include/linux/mtd/nand.h
+++ b/include/linux/mtd/nand.h
@@ -26,6 +26,8 @@
struct mtd_info;
struct nand_flash_dev;
+struct device_node;
+
/* Scan and identify a NAND device */
extern int nand_scan(struct mtd_info *mtd, int max_chips);
/*
@@ -542,6 +544,7 @@ struct nand_buffers {
* flash device
* @IO_ADDR_W: [BOARDSPECIFIC] address to write the 8 I/O lines of the
* flash device.
+ * @dn: [BOARDSPECIFIC] device node describing this instance
* @read_byte: [REPLACEABLE] read one byte from the chip
* @read_word: [REPLACEABLE] read one word from the chip
* @write_byte: [REPLACEABLE] write a single byte to the chip on the
@@ -644,6 +647,8 @@ struct nand_chip {
void __iomem *IO_ADDR_R;
void __iomem *IO_ADDR_W;
+ struct device_node *dn;
+
uint8_t (*read_byte)(struct mtd_info *mtd);
u16 (*read_word)(struct mtd_info *mtd);
void (*write_byte)(struct mtd_info *mtd, uint8_t byte);
@@ -833,7 +838,6 @@ struct nand_manufacturers {
extern struct nand_flash_dev nand_flash_ids[];
extern struct nand_manufacturers nand_manuf_ids[];
-extern int nand_scan_bbt(struct mtd_info *mtd, struct nand_bbt_descr *bd);
extern int nand_default_bbt(struct mtd_info *mtd);
extern int nand_markbad_bbt(struct mtd_info *mtd, loff_t offs);
extern int nand_isreserved_bbt(struct mtd_info *mtd, loff_t offs);
diff --git a/include/linux/mtd/spi-nor.h b/include/linux/mtd/spi-nor.h
index 4720b86ee73d..e5409524bb0a 100644
--- a/include/linux/mtd/spi-nor.h
+++ b/include/linux/mtd/spi-nor.h
@@ -155,6 +155,8 @@ enum spi_nor_option_flags {
* @write: [DRIVER-SPECIFIC] write data to the SPI NOR
* @erase: [DRIVER-SPECIFIC] erase a sector of the SPI NOR
* at the offset @offs
+ * @lock: [FLASH-SPECIFIC] lock a region of the SPI NOR
+ * @unlock: [FLASH-SPECIFIC] unlock a region of the SPI NOR
* @priv: the private data
*/
struct spi_nor {
@@ -189,6 +191,9 @@ struct spi_nor {
size_t len, size_t *retlen, const u_char *write_buf);
int (*erase)(struct spi_nor *nor, loff_t offs);
+ int (*flash_lock)(struct spi_nor *nor, loff_t ofs, uint64_t len);
+ int (*flash_unlock)(struct spi_nor *nor, loff_t ofs, uint64_t len);
+
void *priv;
};
diff --git a/include/linux/namei.h b/include/linux/namei.h
index c8990779f0c3..d8c6334cd150 100644
--- a/include/linux/namei.h
+++ b/include/linux/namei.h
@@ -1,16 +1,15 @@
#ifndef _LINUX_NAMEI_H
#define _LINUX_NAMEI_H
-#include <linux/dcache.h>
-#include <linux/errno.h>
-#include <linux/linkage.h>
+#include <linux/kernel.h>
#include <linux/path.h>
-
-struct vfsmount;
-struct nameidata;
+#include <linux/fcntl.h>
+#include <linux/errno.h>
enum { MAX_NESTED_LINKS = 8 };
+#define MAXSYMLINKS 40
+
/*
* Type of the last component on LOOKUP_PARENT
*/
@@ -45,13 +44,29 @@ enum {LAST_NORM, LAST_ROOT, LAST_DOT, LAST_DOTDOT, LAST_BIND};
#define LOOKUP_ROOT 0x2000
#define LOOKUP_EMPTY 0x4000
-extern int user_path_at(int, const char __user *, unsigned, struct path *);
extern int user_path_at_empty(int, const char __user *, unsigned, struct path *, int *empty);
-#define user_path(name, path) user_path_at(AT_FDCWD, name, LOOKUP_FOLLOW, path)
-#define user_lpath(name, path) user_path_at(AT_FDCWD, name, 0, path)
-#define user_path_dir(name, path) \
- user_path_at(AT_FDCWD, name, LOOKUP_FOLLOW | LOOKUP_DIRECTORY, path)
+static inline int user_path_at(int dfd, const char __user *name, unsigned flags,
+ struct path *path)
+{
+ return user_path_at_empty(dfd, name, flags, path, NULL);
+}
+
+static inline int user_path(const char __user *name, struct path *path)
+{
+ return user_path_at_empty(AT_FDCWD, name, LOOKUP_FOLLOW, path, NULL);
+}
+
+static inline int user_lpath(const char __user *name, struct path *path)
+{
+ return user_path_at_empty(AT_FDCWD, name, 0, path, NULL);
+}
+
+static inline int user_path_dir(const char __user *name, struct path *path)
+{
+ return user_path_at_empty(AT_FDCWD, name,
+ LOOKUP_FOLLOW | LOOKUP_DIRECTORY, path, NULL);
+}
extern int kern_path(const char *, unsigned, struct path *);
@@ -70,9 +85,7 @@ extern int follow_up(struct path *);
extern struct dentry *lock_rename(struct dentry *, struct dentry *);
extern void unlock_rename(struct dentry *, struct dentry *);
-extern void nd_jump_link(struct nameidata *nd, struct path *path);
-extern void nd_set_link(struct nameidata *nd, char *path);
-extern char *nd_get_link(struct nameidata *nd);
+extern void nd_jump_link(struct path *path);
static inline void nd_terminate_link(void *name, size_t len, size_t maxlen)
{
diff --git a/include/linux/nbd.h b/include/linux/nbd.h
deleted file mode 100644
index f62f78aef4ac..000000000000
--- a/include/linux/nbd.h
+++ /dev/null
@@ -1,46 +0,0 @@
-/*
- * 1999 Copyright (C) Pavel Machek, pavel@ucw.cz. This code is GPL.
- * 1999/11/04 Copyright (C) 1999 VMware, Inc. (Regis "HPReg" Duchesne)
- * Made nbd_end_request() use the io_request_lock
- * 2001 Copyright (C) Steven Whitehouse
- * New nbd_end_request() for compatibility with new linux block
- * layer code.
- * 2003/06/24 Louis D. Langholtz <ldl@aros.net>
- * Removed unneeded blksize_bits field from nbd_device struct.
- * Cleanup PARANOIA usage & code.
- * 2004/02/19 Paul Clements
- * Removed PARANOIA, plus various cleanup and comments
- */
-#ifndef LINUX_NBD_H
-#define LINUX_NBD_H
-
-
-#include <linux/wait.h>
-#include <linux/mutex.h>
-#include <uapi/linux/nbd.h>
-
-struct request;
-
-struct nbd_device {
- int flags;
- int harderror; /* Code of hard error */
- struct socket * sock; /* If == NULL, device is not ready, yet */
- int magic;
-
- spinlock_t queue_lock;
- struct list_head queue_head; /* Requests waiting result */
- struct request *active_req;
- wait_queue_head_t active_wq;
- struct list_head waiting_queue; /* Requests to be sent */
- wait_queue_head_t waiting_wq;
-
- struct mutex tx_lock;
- struct gendisk *disk;
- int blksize;
- u64 bytesize;
- pid_t pid; /* pid of nbd-client, if attached */
- int xmit_timeout;
- int disconnect; /* a disconnect has been requested by user */
-};
-
-#endif
diff --git a/include/linux/net.h b/include/linux/net.h
index 17d83393afcc..04aa06852771 100644
--- a/include/linux/net.h
+++ b/include/linux/net.h
@@ -38,7 +38,6 @@ struct net;
#define SOCK_NOSPACE 2
#define SOCK_PASSCRED 3
#define SOCK_PASSSEC 4
-#define SOCK_EXTERNALLY_ALLOCATED 5
#ifndef ARCH_HAS_SOCKET_TYPES
/**
@@ -120,7 +119,6 @@ struct socket {
struct vm_area_struct;
struct page;
-struct kiocb;
struct sockaddr;
struct msghdr;
struct module;
@@ -162,8 +160,8 @@ struct proto_ops {
int (*compat_getsockopt)(struct socket *sock, int level,
int optname, char __user *optval, int __user *optlen);
#endif
- int (*sendmsg) (struct kiocb *iocb, struct socket *sock,
- struct msghdr *m, size_t total_len);
+ int (*sendmsg) (struct socket *sock, struct msghdr *m,
+ size_t total_len);
/* Notes for implementing recvmsg:
* ===============================
* msg->msg_namelen should get updated by the recvmsg handlers
@@ -172,9 +170,8 @@ struct proto_ops {
* handlers can assume that msg.msg_name is either NULL or has
* a minimum size of sizeof(struct sockaddr_storage).
*/
- int (*recvmsg) (struct kiocb *iocb, struct socket *sock,
- struct msghdr *m, size_t total_len,
- int flags);
+ int (*recvmsg) (struct socket *sock, struct msghdr *m,
+ size_t total_len, int flags);
int (*mmap) (struct file *file, struct socket *sock,
struct vm_area_struct * vma);
ssize_t (*sendpage) (struct socket *sock, struct page *page,
@@ -210,10 +207,10 @@ void sock_unregister(int family);
int __sock_create(struct net *net, int family, int type, int proto,
struct socket **res, int kern);
int sock_create(int family, int type, int proto, struct socket **res);
-int sock_create_kern(int family, int type, int proto, struct socket **res);
+int sock_create_kern(struct net *net, int family, int type, int proto, struct socket **res);
int sock_create_lite(int family, int type, int proto, struct socket **res);
void sock_release(struct socket *sock);
-int sock_sendmsg(struct socket *sock, struct msghdr *msg, size_t len);
+int sock_sendmsg(struct socket *sock, struct msghdr *msg);
int sock_recvmsg(struct socket *sock, struct msghdr *msg, size_t size,
int flags);
struct file *sock_alloc_file(struct socket *sock, int flags, const char *dname);
diff --git a/include/linux/netdev_features.h b/include/linux/netdev_features.h
index 7d59dc6ab789..9672781c593d 100644
--- a/include/linux/netdev_features.h
+++ b/include/linux/netdev_features.h
@@ -66,7 +66,6 @@ enum {
NETIF_F_HW_VLAN_STAG_FILTER_BIT,/* Receive filtering on VLAN STAGs */
NETIF_F_HW_L2FW_DOFFLOAD_BIT, /* Allow L2 Forwarding in Hardware */
NETIF_F_BUSY_POLL_BIT, /* Busy poll */
- NETIF_F_HW_SWITCH_OFFLOAD_BIT, /* HW switch offload */
/*
* Add your fresh new feature above and remember to update
@@ -125,7 +124,6 @@ enum {
#define NETIF_F_HW_VLAN_STAG_TX __NETIF_F(HW_VLAN_STAG_TX)
#define NETIF_F_HW_L2FW_DOFFLOAD __NETIF_F(HW_L2FW_DOFFLOAD)
#define NETIF_F_BUSY_POLL __NETIF_F(BUSY_POLL)
-#define NETIF_F_HW_SWITCH_OFFLOAD __NETIF_F(HW_SWITCH_OFFLOAD)
/* Features valid for ethtool to change */
/* = all defined minus driver/device-class-related */
@@ -161,8 +159,7 @@ enum {
*/
#define NETIF_F_ONE_FOR_ALL (NETIF_F_GSO_SOFTWARE | NETIF_F_GSO_ROBUST | \
NETIF_F_SG | NETIF_F_HIGHDMA | \
- NETIF_F_FRAGLIST | NETIF_F_VLAN_CHALLENGED | \
- NETIF_F_HW_SWITCH_OFFLOAD)
+ NETIF_F_FRAGLIST | NETIF_F_VLAN_CHALLENGED)
/*
* If one device doesn't support one of these features, then disable it
diff --git a/include/linux/netdevice.h b/include/linux/netdevice.h
index 278738873703..e20979dfd6a9 100644
--- a/include/linux/netdevice.h
+++ b/include/linux/netdevice.h
@@ -25,7 +25,6 @@
#ifndef _LINUX_NETDEVICE_H
#define _LINUX_NETDEVICE_H
-#include <linux/pm_qos.h>
#include <linux/timer.h>
#include <linux/bug.h>
#include <linux/delay.h>
@@ -60,6 +59,7 @@ struct phy_device;
struct wireless_dev;
/* 802.15.4 specific */
struct wpan_dev;
+struct mpls_dev;
void netdev_set_default_ethtool_ops(struct net_device *dev,
const struct ethtool_ops *ops);
@@ -261,7 +261,6 @@ struct header_ops {
unsigned short type, const void *daddr,
const void *saddr, unsigned int len);
int (*parse)(const struct sk_buff *skb, unsigned char *haddr);
- int (*rebuild)(struct sk_buff *skb);
int (*cache)(const struct neighbour *neigh, struct hh_cache *hh, __be16 type);
void (*cache_update)(struct hh_cache *hh,
const struct net_device *dev,
@@ -588,6 +587,7 @@ struct netdev_queue {
#ifdef CONFIG_BQL
struct dql dql;
#endif
+ unsigned long tx_maxrate;
} ____cacheline_aligned_in_smp;
static inline int netdev_queue_numa_node_read(const struct netdev_queue *q)
@@ -795,7 +795,10 @@ typedef u16 (*select_queue_fallback_t)(struct net_device *dev,
* netdev_tx_t (*ndo_start_xmit)(struct sk_buff *skb,
* struct net_device *dev);
* Called when a packet needs to be transmitted.
- * Must return NETDEV_TX_OK , NETDEV_TX_BUSY.
+ * Returns NETDEV_TX_OK. Can return NETDEV_TX_BUSY, but you should stop
+ * the queue before that can happen; it's for obsolete devices and weird
+ * corner cases, but the stack really does a non-trivial amount
+ * of useless work if you return NETDEV_TX_BUSY.
* (can also return NETDEV_TX_LOCKED iff NETIF_F_LLTX)
* Required can not be NULL.
*
@@ -875,6 +878,11 @@ typedef u16 (*select_queue_fallback_t)(struct net_device *dev,
* int (*ndo_set_vf_link_state)(struct net_device *dev, int vf, int link_state);
* int (*ndo_set_vf_port)(struct net_device *dev, int vf,
* struct nlattr *port[]);
+ *
+ * Enable or disable the VF ability to query its RSS Redirection Table and
+ * Hash Key. This is needed since on some devices VF share this information
+ * with PF and querying it may adduce a theoretical security risk.
+ * int (*ndo_set_vf_rss_query_en)(struct net_device *dev, int vf, bool setting);
* int (*ndo_get_vf_port)(struct net_device *dev, int vf, struct sk_buff *skb);
* int (*ndo_setup_tc)(struct net_device *dev, u8 tc)
* Called to setup 'tc' number of traffic classes in the net device. This
@@ -968,7 +976,8 @@ typedef u16 (*select_queue_fallback_t)(struct net_device *dev,
* int (*ndo_bridge_setlink)(struct net_device *dev, struct nlmsghdr *nlh,
* u16 flags)
* int (*ndo_bridge_getlink)(struct sk_buff *skb, u32 pid, u32 seq,
- * struct net_device *dev, u32 filter_mask)
+ * struct net_device *dev, u32 filter_mask,
+ * int nlflags)
* int (*ndo_bridge_dellink)(struct net_device *dev, struct nlmsghdr *nlh,
* u16 flags);
*
@@ -1026,15 +1035,12 @@ typedef u16 (*select_queue_fallback_t)(struct net_device *dev,
* be otherwise expressed by feature flags. The check is called with
* the set of features that the stack has calculated and it returns
* those the driver believes to be appropriate.
- *
- * int (*ndo_switch_parent_id_get)(struct net_device *dev,
- * struct netdev_phys_item_id *psid);
- * Called to get an ID of the switch chip this port is part of.
- * If driver implements this, it indicates that it represents a port
- * of a switch chip.
- * int (*ndo_switch_port_stp_update)(struct net_device *dev, u8 state);
- * Called to notify switch device port of bridge port STP
- * state change.
+ * int (*ndo_set_tx_maxrate)(struct net_device *dev,
+ * int queue_index, u32 maxrate);
+ * Called when a user wants to set a max-rate limitation of specific
+ * TX queue.
+ * int (*ndo_get_iflink)(const struct net_device *dev);
+ * Called to get the iflink value of this device.
*/
struct net_device_ops {
int (*ndo_init)(struct net_device *dev);
@@ -1094,11 +1100,18 @@ struct net_device_ops {
struct ifla_vf_info *ivf);
int (*ndo_set_vf_link_state)(struct net_device *dev,
int vf, int link_state);
+ int (*ndo_get_vf_stats)(struct net_device *dev,
+ int vf,
+ struct ifla_vf_stats
+ *vf_stats);
int (*ndo_set_vf_port)(struct net_device *dev,
int vf,
struct nlattr *port[]);
int (*ndo_get_vf_port)(struct net_device *dev,
int vf, struct sk_buff *skb);
+ int (*ndo_set_vf_rss_query_en)(
+ struct net_device *dev,
+ int vf, bool setting);
int (*ndo_setup_tc)(struct net_device *dev, u8 tc);
#if IS_ENABLED(CONFIG_FCOE)
int (*ndo_fcoe_enable)(struct net_device *dev);
@@ -1164,7 +1177,8 @@ struct net_device_ops {
int (*ndo_bridge_getlink)(struct sk_buff *skb,
u32 pid, u32 seq,
struct net_device *dev,
- u32 filter_mask);
+ u32 filter_mask,
+ int nlflags);
int (*ndo_bridge_dellink)(struct net_device *dev,
struct nlmsghdr *nlh,
u16 flags);
@@ -1172,6 +1186,8 @@ struct net_device_ops {
bool new_carrier);
int (*ndo_get_phys_port_id)(struct net_device *dev,
struct netdev_phys_item_id *ppid);
+ int (*ndo_get_phys_port_name)(struct net_device *dev,
+ char *name, size_t len);
void (*ndo_add_vxlan_port)(struct net_device *dev,
sa_family_t sa_family,
__be16 port);
@@ -1191,12 +1207,10 @@ struct net_device_ops {
netdev_features_t (*ndo_features_check) (struct sk_buff *skb,
struct net_device *dev,
netdev_features_t features);
-#ifdef CONFIG_NET_SWITCHDEV
- int (*ndo_switch_parent_id_get)(struct net_device *dev,
- struct netdev_phys_item_id *psid);
- int (*ndo_switch_port_stp_update)(struct net_device *dev,
- u8 state);
-#endif
+ int (*ndo_set_tx_maxrate)(struct net_device *dev,
+ int queue_index,
+ u32 maxrate);
+ int (*ndo_get_iflink)(const struct net_device *dev);
};
/**
@@ -1305,6 +1319,8 @@ enum netdev_priv_flags {
* @base_addr: Device I/O address
* @irq: Device IRQ number
*
+ * @carrier_changes: Stats to monitor carrier on<->off transitions
+ *
* @state: Generic network queuing layer state, see netdev_state_t
* @dev_list: The global list of network devices
* @napi_list: List entry, that is used for polling napi devices
@@ -1328,7 +1344,7 @@ enum netdev_priv_flags {
* @mpls_features: Mask of features inheritable by MPLS
*
* @ifindex: interface index
- * @iflink: unique device identifier
+ * @group: The group, that the device belongs to
*
* @stats: Statistics struct, which was left as a legacy, use
* rtnl_link_stats64 instead
@@ -1338,8 +1354,6 @@ enum netdev_priv_flags {
* @tx_dropped: Dropped packets by core network,
* do not use this in drivers
*
- * @carrier_changes: Stats to monitor carrier on<->off transitions
- *
* @wireless_handlers: List of functions to handle Wireless Extensions,
* instead of ioctl,
* see <net/iw_handler.h> for details.
@@ -1348,8 +1362,7 @@ enum netdev_priv_flags {
* @netdev_ops: Includes several pointers to callbacks,
* if one wants to override the ndo_*() functions
* @ethtool_ops: Management operations
- * @fwd_ops: Management operations
- * @header_ops: Includes callbacks for creating,parsing,rebuilding,etc
+ * @header_ops: Includes callbacks for creating,parsing,caching,etc
* of Layer 2 headers.
*
* @flags: Interface flags (a la BSD)
@@ -1383,14 +1396,14 @@ enum netdev_priv_flags {
* @dev_port: Used to differentiate devices that share
* the same function
* @addr_list_lock: XXX: need comments on this one
- * @uc: unicast mac addresses
- * @mc: multicast mac addresses
- * @dev_addrs: list of device hw addresses
- * @queues_kset: Group of all Kobjects in the Tx and RX queues
* @uc_promisc: Counter, that indicates, that promiscuous mode
* has been enabled due to the need to listen to
* additional unicast addresses in a device that
* does not implement ndo_set_rx_mode()
+ * @uc: unicast mac addresses
+ * @mc: multicast mac addresses
+ * @dev_addrs: list of device hw addresses
+ * @queues_kset: Group of all Kobjects in the Tx and RX queues
* @promiscuity: Number of times, the NIC is told to work in
* Promiscuous mode, if it becomes 0 the NIC will
* exit from working in Promiscuous mode
@@ -1420,6 +1433,12 @@ enum netdev_priv_flags {
* @ingress_queue: XXX: need comments on this one
* @broadcast: hw bcast address
*
+ * @rx_cpu_rmap: CPU reverse-mapping for RX completion interrupts,
+ * indexed by RX queue number. Assigned by driver.
+ * This must only be set if the ndo_rx_flow_steer
+ * operation is defined
+ * @index_hlist: Device index hash chain
+ *
* @_tx: Array of TX queues
* @num_tx_queues: Number of TX queues allocated at alloc_netdev_mq() time
* @real_num_tx_queues: Number of TX queues currently active in device
@@ -1429,11 +1448,6 @@ enum netdev_priv_flags {
*
* @xps_maps: XXX: need comments on this one
*
- * @rx_cpu_rmap: CPU reverse-mapping for RX completion interrupts,
- * indexed by RX queue number. Assigned by driver.
- * This must only be set if the ndo_rx_flow_steer
- * operation is defined
- *
* @trans_start: Time (in jiffies) of last Tx
* @watchdog_timeo: Represents the timeout that is used by
* the watchdog ( see dev_watchdog() )
@@ -1441,7 +1455,6 @@ enum netdev_priv_flags {
*
* @pcpu_refcnt: Number of references to this device
* @todo_list: Delayed register/unregister
- * @index_hlist: Device index hash chain
* @link_watch_list: XXX: need comments on this one
*
* @reg_state: Register/unregister state machine
@@ -1489,9 +1502,6 @@ enum netdev_priv_flags {
*
* @qdisc_tx_busylock: XXX: need comments on this one
*
- * @group: The group, that the device belongs to
- * @pm_qos_req: Power Management QoS object
- *
* FIXME: cleanup struct net_device such that network protocol info
* moves out.
*/
@@ -1509,6 +1519,8 @@ struct net_device {
unsigned long base_addr;
int irq;
+ atomic_t carrier_changes;
+
/*
* Some hardware also needs these fields (state,dev_list,
* napi_list,unreg_list,close_list) but they are not
@@ -1542,22 +1554,22 @@ struct net_device {
netdev_features_t mpls_features;
int ifindex;
- int iflink;
+ int group;
struct net_device_stats stats;
atomic_long_t rx_dropped;
atomic_long_t tx_dropped;
- atomic_t carrier_changes;
-
#ifdef CONFIG_WIRELESS_EXT
const struct iw_handler_def * wireless_handlers;
struct iw_public_data * wireless_data;
#endif
const struct net_device_ops *netdev_ops;
const struct ethtool_ops *ethtool_ops;
- const struct forwarding_accel_ops *fwd_ops;
+#ifdef CONFIG_NET_SWITCHDEV
+ const struct switchdev_ops *switchdev_ops;
+#endif
const struct header_ops *header_ops;
@@ -1588,6 +1600,8 @@ struct net_device {
unsigned short dev_id;
unsigned short dev_port;
spinlock_t addr_list_lock;
+ unsigned char name_assign_type;
+ bool uc_promisc;
struct netdev_hw_addr_list uc;
struct netdev_hw_addr_list mc;
struct netdev_hw_addr_list dev_addrs;
@@ -1595,10 +1609,6 @@ struct net_device {
#ifdef CONFIG_SYSFS
struct kset *queues_kset;
#endif
-
- unsigned char name_assign_type;
-
- bool uc_promisc;
unsigned int promiscuity;
unsigned int allmulti;
@@ -1621,6 +1631,9 @@ struct net_device {
void *ax25_ptr;
struct wireless_dev *ieee80211_ptr;
struct wpan_dev *ieee802154_ptr;
+#if IS_ENABLED(CONFIG_MPLS_ROUTING)
+ struct mpls_dev __rcu *mpls_ptr;
+#endif
/*
* Cache lines mostly used on receive path (including eth_type_trans())
@@ -1643,9 +1656,19 @@ struct net_device {
rx_handler_func_t __rcu *rx_handler;
void __rcu *rx_handler_data;
+#ifdef CONFIG_NET_CLS_ACT
+ struct tcf_proto __rcu *ingress_cl_list;
+#endif
struct netdev_queue __rcu *ingress_queue;
- unsigned char broadcast[MAX_ADDR_LEN];
+#ifdef CONFIG_NETFILTER_INGRESS
+ struct list_head nf_hooks_ingress;
+#endif
+ unsigned char broadcast[MAX_ADDR_LEN];
+#ifdef CONFIG_RFS_ACCEL
+ struct cpu_rmap *rx_cpu_rmap;
+#endif
+ struct hlist_node index_hlist;
/*
* Cache lines mostly used on transmit path
@@ -1656,13 +1679,11 @@ struct net_device {
struct Qdisc *qdisc;
unsigned long tx_queue_len;
spinlock_t tx_global_lock;
+ int watchdog_timeo;
#ifdef CONFIG_XPS
struct xps_dev_maps __rcu *xps_maps;
#endif
-#ifdef CONFIG_RFS_ACCEL
- struct cpu_rmap *rx_cpu_rmap;
-#endif
/* These may be needed for future network-power-down code. */
@@ -1672,13 +1693,11 @@ struct net_device {
*/
unsigned long trans_start;
- int watchdog_timeo;
struct timer_list watchdog_timer;
int __percpu *pcpu_refcnt;
struct list_head todo_list;
- struct hlist_node index_hlist;
struct list_head link_watch_list;
enum { NETREG_UNINITIALIZED=0,
@@ -1702,9 +1721,7 @@ struct net_device {
struct netpoll_info __rcu *npinfo;
#endif
-#ifdef CONFIG_NET_NS
- struct net *nd_net;
-#endif
+ possible_net_t nd_net;
/* mid-layer private */
union {
@@ -1745,8 +1762,6 @@ struct net_device {
#endif
struct phy_device *phydev;
struct lock_class_key *qdisc_tx_busylock;
- int group;
- struct pm_qos_request pm_qos_req;
};
#define to_net_dev(d) container_of(d, struct net_device, dev)
@@ -1844,10 +1859,7 @@ struct net *dev_net(const struct net_device *dev)
static inline
void dev_net_set(struct net_device *dev, struct net *net)
{
-#ifdef CONFIG_NET_NS
- release_net(dev->nd_net);
- dev->nd_net = hold_net(net);
-#endif
+ write_pnet(&dev->nd_net, net);
}
static inline bool netdev_uses_dsa(struct net_device *dev)
@@ -1989,6 +2001,7 @@ struct offload_callbacks {
struct packet_offload {
__be16 type; /* This is really htons(ether_type). */
+ u16 priority;
struct offload_callbacks callbacks;
struct list_head list;
};
@@ -2023,10 +2036,10 @@ struct pcpu_sw_netstats {
({ \
typeof(type) __percpu *pcpu_stats = alloc_percpu(type); \
if (pcpu_stats) { \
- int i; \
- for_each_possible_cpu(i) { \
+ int __cpu; \
+ for_each_possible_cpu(__cpu) { \
typeof(type) *stat; \
- stat = per_cpu_ptr(pcpu_stats, i); \
+ stat = per_cpu_ptr(pcpu_stats, __cpu); \
u64_stats_init(&stat->syncp); \
} \
} \
@@ -2159,6 +2172,7 @@ void __dev_remove_pack(struct packet_type *pt);
void dev_add_offload(struct packet_offload *po);
void dev_remove_offload(struct packet_offload *po);
+int dev_get_iflink(const struct net_device *dev);
struct net_device *__dev_get_by_flags(struct net *net, unsigned short flags,
unsigned short mask);
struct net_device *dev_get_by_name(struct net *net, const char *name);
@@ -2167,9 +2181,14 @@ struct net_device *__dev_get_by_name(struct net *net, const char *name);
int dev_alloc_name(struct net_device *dev, const char *name);
int dev_open(struct net_device *dev);
int dev_close(struct net_device *dev);
+int dev_close_many(struct list_head *head, bool unlink);
void dev_disable_lro(struct net_device *dev);
-int dev_loopback_xmit(struct sk_buff *newskb);
-int dev_queue_xmit(struct sk_buff *skb);
+int dev_loopback_xmit(struct sock *sk, struct sk_buff *newskb);
+int dev_queue_xmit_sk(struct sock *sk, struct sk_buff *skb);
+static inline int dev_queue_xmit(struct sk_buff *skb)
+{
+ return dev_queue_xmit_sk(skb->sk, skb);
+}
int dev_queue_xmit_accel(struct sk_buff *skb, void *accel_priv);
int register_netdevice(struct net_device *dev);
void unregister_netdevice_queue(struct net_device *dev, struct list_head *head);
@@ -2409,15 +2428,6 @@ static inline int dev_parse_header(const struct sk_buff *skb,
return dev->header_ops->parse(skb, haddr);
}
-static inline int dev_rebuild_header(struct sk_buff *skb)
-{
- const struct net_device *dev = skb->dev;
-
- if (!dev->header_ops || !dev->header_ops->rebuild)
- return 0;
- return dev->header_ops->rebuild(skb);
-}
-
typedef int gifconf_func_t(struct net_device * dev, char __user * bufptr, int len);
int register_gifconf(unsigned int family, gifconf_func_t *gifconf);
static inline int unregister_gifconf(unsigned int family)
@@ -2554,10 +2564,6 @@ static inline void netif_tx_wake_all_queues(struct net_device *dev)
static inline void netif_tx_stop_queue(struct netdev_queue *dev_queue)
{
- if (WARN_ON(!dev_queue)) {
- pr_info("netif_stop_queue() cannot be called before register_netdev()\n");
- return;
- }
set_bit(__QUEUE_STATE_DRV_XOFF, &dev_queue->state);
}
@@ -2573,15 +2579,7 @@ static inline void netif_stop_queue(struct net_device *dev)
netif_tx_stop_queue(netdev_get_tx_queue(dev, 0));
}
-static inline void netif_tx_stop_all_queues(struct net_device *dev)
-{
- unsigned int i;
-
- for (i = 0; i < dev->num_tx_queues; i++) {
- struct netdev_queue *txq = netdev_get_tx_queue(dev, i);
- netif_tx_stop_queue(txq);
- }
-}
+void netif_tx_stop_all_queues(struct net_device *dev);
static inline bool netif_tx_queue_stopped(const struct netdev_queue *dev_queue)
{
@@ -2842,6 +2840,9 @@ static inline int netif_set_xps_queue(struct net_device *dev,
}
#endif
+u16 __skb_tx_hash(const struct net_device *dev, struct sk_buff *skb,
+ unsigned int num_tx_queues);
+
/*
* Returns a Tx hash for the given packet when dev->real_num_tx_queues is used
* as a distribution range limit for the returned value.
@@ -2939,7 +2940,11 @@ static inline void dev_consume_skb_any(struct sk_buff *skb)
int netif_rx(struct sk_buff *skb);
int netif_rx_ni(struct sk_buff *skb);
-int netif_receive_skb(struct sk_buff *skb);
+int netif_receive_skb_sk(struct sock *sk, struct sk_buff *skb);
+static inline int netif_receive_skb(struct sk_buff *skb)
+{
+ return netif_receive_skb_sk(skb->sk, skb);
+}
gro_result_t napi_gro_receive(struct napi_struct *napi, struct sk_buff *skb);
void napi_gro_flush(struct napi_struct *napi, bool flush_old);
struct sk_buff *napi_get_frags(struct napi_struct *napi);
@@ -2975,6 +2980,8 @@ int dev_set_mac_address(struct net_device *, struct sockaddr *);
int dev_change_carrier(struct net_device *, bool new_carrier);
int dev_get_phys_port_id(struct net_device *dev,
struct netdev_phys_item_id *ppid);
+int dev_get_phys_port_name(struct net_device *dev,
+ char *name, size_t len);
struct sk_buff *validate_xmit_skb_list(struct sk_buff *skb, struct net_device *dev);
struct sk_buff *dev_hard_start_xmit(struct sk_buff *skb, struct net_device *dev,
struct netdev_queue *txq, int *ret);
@@ -3679,6 +3686,9 @@ void netdev_change_features(struct net_device *dev);
void netif_stacked_transfer_operstate(const struct net_device *rootdev,
struct net_device *dev);
+netdev_features_t passthru_features_check(struct sk_buff *skb,
+ struct net_device *dev,
+ netdev_features_t features);
netdev_features_t netif_skb_features(struct sk_buff *skb);
static inline bool net_gso_ok(netdev_features_t features, int gso_type)
@@ -3709,7 +3719,7 @@ static inline bool skb_gso_ok(struct sk_buff *skb, netdev_features_t features)
(!skb_has_frag_list(skb) || (features & NETIF_F_FRAGLIST));
}
-static inline bool netif_needs_gso(struct net_device *dev, struct sk_buff *skb,
+static inline bool netif_needs_gso(struct sk_buff *skb,
netdev_features_t features)
{
return skb_is_gso(skb) && (!skb_gso_ok(skb, features) ||
diff --git a/include/linux/netfilter.h b/include/linux/netfilter.h
index 2517ece98820..00050dfd9f23 100644
--- a/include/linux/netfilter.h
+++ b/include/linux/netfilter.h
@@ -10,7 +10,8 @@
#include <linux/wait.h>
#include <linux/list.h>
#include <linux/static_key.h>
-#include <uapi/linux/netfilter.h>
+#include <linux/netfilter_defs.h>
+
#ifdef CONFIG_NETFILTER
static inline int NF_DROP_GETERR(int verdict)
{
@@ -38,29 +39,58 @@ static inline void nf_inet_addr_mask(const union nf_inet_addr *a1,
int netfilter_init(void);
-/* Largest hook number + 1 */
-#define NF_MAX_HOOKS 8
-
struct sk_buff;
struct nf_hook_ops;
+
+struct sock;
+
+struct nf_hook_state {
+ unsigned int hook;
+ int thresh;
+ u_int8_t pf;
+ struct net_device *in;
+ struct net_device *out;
+ struct sock *sk;
+ struct list_head *hook_list;
+ int (*okfn)(struct sock *, struct sk_buff *);
+};
+
+static inline void nf_hook_state_init(struct nf_hook_state *p,
+ struct list_head *hook_list,
+ unsigned int hook,
+ int thresh, u_int8_t pf,
+ struct net_device *indev,
+ struct net_device *outdev,
+ struct sock *sk,
+ int (*okfn)(struct sock *, struct sk_buff *))
+{
+ p->hook = hook;
+ p->thresh = thresh;
+ p->pf = pf;
+ p->in = indev;
+ p->out = outdev;
+ p->sk = sk;
+ p->hook_list = hook_list;
+ p->okfn = okfn;
+}
+
typedef unsigned int nf_hookfn(const struct nf_hook_ops *ops,
struct sk_buff *skb,
- const struct net_device *in,
- const struct net_device *out,
- int (*okfn)(struct sk_buff *));
+ const struct nf_hook_state *state);
struct nf_hook_ops {
- struct list_head list;
+ struct list_head list;
/* User fills in from here down. */
- nf_hookfn *hook;
- struct module *owner;
- void *priv;
- u_int8_t pf;
- unsigned int hooknum;
+ nf_hookfn *hook;
+ struct net_device *dev;
+ struct module *owner;
+ void *priv;
+ u_int8_t pf;
+ unsigned int hooknum;
/* Hooks are ordered in ascending priority. */
- int priority;
+ int priority;
};
struct nf_sockopt_ops {
@@ -103,48 +133,61 @@ extern struct list_head nf_hooks[NFPROTO_NUMPROTO][NF_MAX_HOOKS];
#ifdef HAVE_JUMP_LABEL
extern struct static_key nf_hooks_needed[NFPROTO_NUMPROTO][NF_MAX_HOOKS];
-static inline bool nf_hooks_active(u_int8_t pf, unsigned int hook)
+static inline bool nf_hook_list_active(struct list_head *nf_hook_list,
+ u_int8_t pf, unsigned int hook)
{
if (__builtin_constant_p(pf) &&
__builtin_constant_p(hook))
return static_key_false(&nf_hooks_needed[pf][hook]);
- return !list_empty(&nf_hooks[pf][hook]);
+ return !list_empty(nf_hook_list);
}
#else
-static inline bool nf_hooks_active(u_int8_t pf, unsigned int hook)
+static inline bool nf_hook_list_active(struct list_head *nf_hook_list,
+ u_int8_t pf, unsigned int hook)
{
- return !list_empty(&nf_hooks[pf][hook]);
+ return !list_empty(nf_hook_list);
}
#endif
-int nf_hook_slow(u_int8_t pf, unsigned int hook, struct sk_buff *skb,
- struct net_device *indev, struct net_device *outdev,
- int (*okfn)(struct sk_buff *), int thresh);
+static inline bool nf_hooks_active(u_int8_t pf, unsigned int hook)
+{
+ return nf_hook_list_active(&nf_hooks[pf][hook], pf, hook);
+}
+
+int nf_hook_slow(struct sk_buff *skb, struct nf_hook_state *state);
/**
* nf_hook_thresh - call a netfilter hook
- *
+ *
* Returns 1 if the hook has allowed the packet to pass. The function
* okfn must be invoked by the caller in this case. Any other return
* value indicates the packet has been consumed by the hook.
*/
static inline int nf_hook_thresh(u_int8_t pf, unsigned int hook,
+ struct sock *sk,
struct sk_buff *skb,
struct net_device *indev,
struct net_device *outdev,
- int (*okfn)(struct sk_buff *), int thresh)
+ int (*okfn)(struct sock *, struct sk_buff *),
+ int thresh)
{
- if (nf_hooks_active(pf, hook))
- return nf_hook_slow(pf, hook, skb, indev, outdev, okfn, thresh);
+ if (nf_hooks_active(pf, hook)) {
+ struct nf_hook_state state;
+
+ nf_hook_state_init(&state, &nf_hooks[pf][hook], hook, thresh,
+ pf, indev, outdev, sk, okfn);
+ return nf_hook_slow(skb, &state);
+ }
return 1;
}
-static inline int nf_hook(u_int8_t pf, unsigned int hook, struct sk_buff *skb,
- struct net_device *indev, struct net_device *outdev,
- int (*okfn)(struct sk_buff *))
+static inline int nf_hook(u_int8_t pf, unsigned int hook, struct sock *sk,
+ struct sk_buff *skb, struct net_device *indev,
+ struct net_device *outdev,
+ int (*okfn)(struct sock *, struct sk_buff *))
{
- return nf_hook_thresh(pf, hook, skb, indev, outdev, okfn, INT_MIN);
+ return nf_hook_thresh(pf, hook, sk, skb, indev, outdev, okfn, INT_MIN);
}
/* Activate hook; either okfn or kfree_skb called, unless a hook
@@ -165,35 +208,36 @@ static inline int nf_hook(u_int8_t pf, unsigned int hook, struct sk_buff *skb,
*/
static inline int
-NF_HOOK_THRESH(uint8_t pf, unsigned int hook, struct sk_buff *skb,
- struct net_device *in, struct net_device *out,
- int (*okfn)(struct sk_buff *), int thresh)
+NF_HOOK_THRESH(uint8_t pf, unsigned int hook, struct sock *sk,
+ struct sk_buff *skb, struct net_device *in,
+ struct net_device *out,
+ int (*okfn)(struct sock *, struct sk_buff *), int thresh)
{
- int ret = nf_hook_thresh(pf, hook, skb, in, out, okfn, thresh);
+ int ret = nf_hook_thresh(pf, hook, sk, skb, in, out, okfn, thresh);
if (ret == 1)
- ret = okfn(skb);
+ ret = okfn(sk, skb);
return ret;
}
static inline int
-NF_HOOK_COND(uint8_t pf, unsigned int hook, struct sk_buff *skb,
- struct net_device *in, struct net_device *out,
- int (*okfn)(struct sk_buff *), bool cond)
+NF_HOOK_COND(uint8_t pf, unsigned int hook, struct sock *sk,
+ struct sk_buff *skb, struct net_device *in, struct net_device *out,
+ int (*okfn)(struct sock *, struct sk_buff *), bool cond)
{
int ret;
if (!cond ||
- ((ret = nf_hook_thresh(pf, hook, skb, in, out, okfn, INT_MIN)) == 1))
- ret = okfn(skb);
+ ((ret = nf_hook_thresh(pf, hook, sk, skb, in, out, okfn, INT_MIN)) == 1))
+ ret = okfn(sk, skb);
return ret;
}
static inline int
-NF_HOOK(uint8_t pf, unsigned int hook, struct sk_buff *skb,
+NF_HOOK(uint8_t pf, unsigned int hook, struct sock *sk, struct sk_buff *skb,
struct net_device *in, struct net_device *out,
- int (*okfn)(struct sk_buff *))
+ int (*okfn)(struct sock *, struct sk_buff *))
{
- return NF_HOOK_THRESH(pf, hook, skb, in, out, okfn, INT_MIN);
+ return NF_HOOK_THRESH(pf, hook, sk, skb, in, out, okfn, INT_MIN);
}
/* Call setsockopt() */
@@ -293,19 +337,21 @@ nf_nat_decode_session(struct sk_buff *skb, struct flowi *fl, u_int8_t family)
}
#else /* !CONFIG_NETFILTER */
-#define NF_HOOK(pf, hook, skb, indev, outdev, okfn) (okfn)(skb)
-#define NF_HOOK_COND(pf, hook, skb, indev, outdev, okfn, cond) (okfn)(skb)
+#define NF_HOOK(pf, hook, sk, skb, indev, outdev, okfn) (okfn)(sk, skb)
+#define NF_HOOK_COND(pf, hook, sk, skb, indev, outdev, okfn, cond) (okfn)(sk, skb)
static inline int nf_hook_thresh(u_int8_t pf, unsigned int hook,
+ struct sock *sk,
struct sk_buff *skb,
struct net_device *indev,
struct net_device *outdev,
- int (*okfn)(struct sk_buff *), int thresh)
+ int (*okfn)(struct sock *sk, struct sk_buff *), int thresh)
{
- return okfn(skb);
+ return okfn(sk, skb);
}
-static inline int nf_hook(u_int8_t pf, unsigned int hook, struct sk_buff *skb,
- struct net_device *indev, struct net_device *outdev,
- int (*okfn)(struct sk_buff *))
+static inline int nf_hook(u_int8_t pf, unsigned int hook, struct sock *sk,
+ struct sk_buff *skb, struct net_device *indev,
+ struct net_device *outdev,
+ int (*okfn)(struct sock *, struct sk_buff *))
{
return 1;
}
diff --git a/include/linux/netfilter/ipset/ip_set.h b/include/linux/netfilter/ipset/ip_set.h
index f1606fa6132d..48bb01edcf30 100644
--- a/include/linux/netfilter/ipset/ip_set.h
+++ b/include/linux/netfilter/ipset/ip_set.h
@@ -108,8 +108,13 @@ struct ip_set_counter {
atomic64_t packets;
};
+struct ip_set_comment_rcu {
+ struct rcu_head rcu;
+ char str[0];
+};
+
struct ip_set_comment {
- char *str;
+ struct ip_set_comment_rcu __rcu *c;
};
struct ip_set_skbinfo {
@@ -122,13 +127,13 @@ struct ip_set_skbinfo {
struct ip_set;
#define ext_timeout(e, s) \
-(unsigned long *)(((void *)(e)) + (s)->offset[IPSET_EXT_ID_TIMEOUT])
+((unsigned long *)(((void *)(e)) + (s)->offset[IPSET_EXT_ID_TIMEOUT]))
#define ext_counter(e, s) \
-(struct ip_set_counter *)(((void *)(e)) + (s)->offset[IPSET_EXT_ID_COUNTER])
+((struct ip_set_counter *)(((void *)(e)) + (s)->offset[IPSET_EXT_ID_COUNTER]))
#define ext_comment(e, s) \
-(struct ip_set_comment *)(((void *)(e)) + (s)->offset[IPSET_EXT_ID_COMMENT])
+((struct ip_set_comment *)(((void *)(e)) + (s)->offset[IPSET_EXT_ID_COMMENT]))
#define ext_skbinfo(e, s) \
-(struct ip_set_skbinfo *)(((void *)(e)) + (s)->offset[IPSET_EXT_ID_SKBINFO])
+((struct ip_set_skbinfo *)(((void *)(e)) + (s)->offset[IPSET_EXT_ID_SKBINFO]))
typedef int (*ipset_adtfn)(struct ip_set *set, void *value,
const struct ip_set_ext *ext,
@@ -176,6 +181,9 @@ struct ip_set_type_variant {
/* List elements */
int (*list)(const struct ip_set *set, struct sk_buff *skb,
struct netlink_callback *cb);
+ /* Keep listing private when resizing runs parallel */
+ void (*uref)(struct ip_set *set, struct netlink_callback *cb,
+ bool start);
/* Return true if "b" set is the same as "a"
* according to the create set parameters */
@@ -223,7 +231,7 @@ struct ip_set {
/* The name of the set */
char name[IPSET_MAXNAMELEN];
/* Lock protecting the set data */
- rwlock_t lock;
+ spinlock_t lock;
/* References to the set */
u32 ref;
/* The core set type */
@@ -341,12 +349,11 @@ ip_set_put_skbinfo(struct sk_buff *skb, struct ip_set_skbinfo *skbinfo)
cpu_to_be64((u64)skbinfo->skbmark << 32 |
skbinfo->skbmarkmask))) ||
(skbinfo->skbprio &&
- nla_put_net32(skb, IPSET_ATTR_SKBPRIO,
+ nla_put_net32(skb, IPSET_ATTR_SKBPRIO,
cpu_to_be32(skbinfo->skbprio))) ||
(skbinfo->skbqueue &&
- nla_put_net16(skb, IPSET_ATTR_SKBQUEUE,
+ nla_put_net16(skb, IPSET_ATTR_SKBQUEUE,
cpu_to_be16(skbinfo->skbqueue)));
-
}
static inline void
@@ -380,12 +387,12 @@ ip_set_init_counter(struct ip_set_counter *counter,
/* Netlink CB args */
enum {
- IPSET_CB_NET = 0,
- IPSET_CB_DUMP,
- IPSET_CB_INDEX,
- IPSET_CB_ARG0,
+ IPSET_CB_NET = 0, /* net namespace */
+ IPSET_CB_DUMP, /* dump single set/all sets */
+ IPSET_CB_INDEX, /* set index */
+ IPSET_CB_PRIVATE, /* set private data */
+ IPSET_CB_ARG0, /* type specific */
IPSET_CB_ARG1,
- IPSET_CB_ARG2,
};
/* register and unregister set references */
@@ -483,7 +490,7 @@ static inline int nla_put_ipaddr4(struct sk_buff *skb, int type, __be32 ipaddr)
if (!__nested)
return -EMSGSIZE;
- ret = nla_put_net32(skb, IPSET_ATTR_IPADDR_IPV4, ipaddr);
+ ret = nla_put_in_addr(skb, IPSET_ATTR_IPADDR_IPV4, ipaddr);
if (!ret)
ipset_nest_end(skb, __nested);
return ret;
@@ -497,8 +504,7 @@ static inline int nla_put_ipaddr6(struct sk_buff *skb, int type,
if (!__nested)
return -EMSGSIZE;
- ret = nla_put(skb, IPSET_ATTR_IPADDR_IPV6,
- sizeof(struct in6_addr), ipaddrptr);
+ ret = nla_put_in6_addr(skb, IPSET_ATTR_IPADDR_IPV6, ipaddrptr);
if (!ret)
ipset_nest_end(skb, __nested);
return ret;
@@ -534,29 +540,9 @@ bitmap_bytes(u32 a, u32 b)
#include <linux/netfilter/ipset/ip_set_timeout.h>
#include <linux/netfilter/ipset/ip_set_comment.h>
-static inline int
+int
ip_set_put_extensions(struct sk_buff *skb, const struct ip_set *set,
- const void *e, bool active)
-{
- if (SET_WITH_TIMEOUT(set)) {
- unsigned long *timeout = ext_timeout(e, set);
-
- if (nla_put_net32(skb, IPSET_ATTR_TIMEOUT,
- htonl(active ? ip_set_timeout_get(timeout)
- : *timeout)))
- return -EMSGSIZE;
- }
- if (SET_WITH_COUNTER(set) &&
- ip_set_put_counter(skb, ext_counter(e, set)))
- return -EMSGSIZE;
- if (SET_WITH_COMMENT(set) &&
- ip_set_put_comment(skb, ext_comment(e, set)))
- return -EMSGSIZE;
- if (SET_WITH_SKBINFO(set) &&
- ip_set_put_skbinfo(skb, ext_skbinfo(e, set)))
- return -EMSGSIZE;
- return 0;
-}
+ const void *e, bool active);
#define IP_SET_INIT_KEXT(skb, opt, set) \
{ .bytes = (skb)->len, .packets = 1, \
@@ -566,8 +552,6 @@ ip_set_put_extensions(struct sk_buff *skb, const struct ip_set *set,
{ .bytes = ULLONG_MAX, .packets = ULLONG_MAX, \
.timeout = (set)->timeout }
-#define IP_SET_INIT_CIDR(a, b) ((a) ? (a) : (b))
-
#define IPSET_CONCAT(a, b) a##b
#define IPSET_TOKEN(a, b) IPSET_CONCAT(a, b)
diff --git a/include/linux/netfilter/ipset/ip_set_comment.h b/include/linux/netfilter/ipset/ip_set_comment.h
index 21217ea008d7..8d0248525957 100644
--- a/include/linux/netfilter/ipset/ip_set_comment.h
+++ b/include/linux/netfilter/ipset/ip_set_comment.h
@@ -16,41 +16,57 @@ ip_set_comment_uget(struct nlattr *tb)
return nla_data(tb);
}
+/* Called from uadd only, protected by the set spinlock.
+ * The kadt functions don't use the comment extensions in any way.
+ */
static inline void
ip_set_init_comment(struct ip_set_comment *comment,
const struct ip_set_ext *ext)
{
+ struct ip_set_comment_rcu *c = rcu_dereference_protected(comment->c, 1);
size_t len = ext->comment ? strlen(ext->comment) : 0;
- if (unlikely(comment->str)) {
- kfree(comment->str);
- comment->str = NULL;
+ if (unlikely(c)) {
+ kfree_rcu(c, rcu);
+ rcu_assign_pointer(comment->c, NULL);
}
if (!len)
return;
if (unlikely(len > IPSET_MAX_COMMENT_SIZE))
len = IPSET_MAX_COMMENT_SIZE;
- comment->str = kzalloc(len + 1, GFP_ATOMIC);
- if (unlikely(!comment->str))
+ c = kzalloc(sizeof(*c) + len + 1, GFP_ATOMIC);
+ if (unlikely(!c))
return;
- strlcpy(comment->str, ext->comment, len + 1);
+ strlcpy(c->str, ext->comment, len + 1);
+ rcu_assign_pointer(comment->c, c);
}
+/* Used only when dumping a set, protected by rcu_read_lock_bh() */
static inline int
ip_set_put_comment(struct sk_buff *skb, struct ip_set_comment *comment)
{
- if (!comment->str)
+ struct ip_set_comment_rcu *c = rcu_dereference_bh(comment->c);
+
+ if (!c)
return 0;
- return nla_put_string(skb, IPSET_ATTR_COMMENT, comment->str);
+ return nla_put_string(skb, IPSET_ATTR_COMMENT, c->str);
}
+/* Called from uadd/udel, flush or the garbage collectors protected
+ * by the set spinlock.
+ * Called when the set is destroyed and when there can't be any user
+ * of the set data anymore.
+ */
static inline void
ip_set_comment_free(struct ip_set_comment *comment)
{
- if (unlikely(!comment->str))
+ struct ip_set_comment_rcu *c;
+
+ c = rcu_dereference_protected(comment->c, 1);
+ if (unlikely(!c))
return;
- kfree(comment->str);
- comment->str = NULL;
+ kfree_rcu(c, rcu);
+ rcu_assign_pointer(comment->c, NULL);
}
#endif
diff --git a/include/linux/netfilter/ipset/ip_set_timeout.h b/include/linux/netfilter/ipset/ip_set_timeout.h
index 83c2f9e0886c..1d6a935c1ac5 100644
--- a/include/linux/netfilter/ipset/ip_set_timeout.h
+++ b/include/linux/netfilter/ipset/ip_set_timeout.h
@@ -40,38 +40,33 @@ ip_set_timeout_uget(struct nlattr *tb)
}
static inline bool
-ip_set_timeout_test(unsigned long timeout)
+ip_set_timeout_expired(unsigned long *t)
{
- return timeout == IPSET_ELEM_PERMANENT ||
- time_is_after_jiffies(timeout);
-}
-
-static inline bool
-ip_set_timeout_expired(unsigned long *timeout)
-{
- return *timeout != IPSET_ELEM_PERMANENT &&
- time_is_before_jiffies(*timeout);
+ return *t != IPSET_ELEM_PERMANENT && time_is_before_jiffies(*t);
}
static inline void
-ip_set_timeout_set(unsigned long *timeout, u32 t)
+ip_set_timeout_set(unsigned long *timeout, u32 value)
{
- if (!t) {
+ unsigned long t;
+
+ if (!value) {
*timeout = IPSET_ELEM_PERMANENT;
return;
}
- *timeout = msecs_to_jiffies(t * 1000) + jiffies;
- if (*timeout == IPSET_ELEM_PERMANENT)
+ t = msecs_to_jiffies(value * MSEC_PER_SEC) + jiffies;
+ if (t == IPSET_ELEM_PERMANENT)
/* Bingo! :-) */
- (*timeout)--;
+ t--;
+ *timeout = t;
}
static inline u32
ip_set_timeout_get(unsigned long *timeout)
{
return *timeout == IPSET_ELEM_PERMANENT ? 0 :
- jiffies_to_msecs(*timeout - jiffies)/1000;
+ jiffies_to_msecs(*timeout - jiffies)/MSEC_PER_SEC;
}
#endif /* __KERNEL__ */
diff --git a/include/linux/netfilter/x_tables.h b/include/linux/netfilter/x_tables.h
index a3e215bb0241..286098a5667f 100644
--- a/include/linux/netfilter/x_tables.h
+++ b/include/linux/netfilter/x_tables.h
@@ -62,6 +62,7 @@ struct xt_mtchk_param {
void *matchinfo;
unsigned int hook_mask;
u_int8_t family;
+ bool nft_compat;
};
/**
@@ -92,6 +93,7 @@ struct xt_tgchk_param {
void *targinfo;
unsigned int hook_mask;
u_int8_t family;
+ bool nft_compat;
};
/* Target destructor parameters */
@@ -222,13 +224,10 @@ struct xt_table_info {
unsigned int stacksize;
unsigned int __percpu *stackptr;
void ***jumpstack;
- /* ipt_entry tables: one per CPU */
- /* Note : this field MUST be the last one, see XT_TABLE_INFO_SZ */
- void *entries[1];
+
+ unsigned char entries[0] __aligned(8);
};
-#define XT_TABLE_INFO_SZ (offsetof(struct xt_table_info, entries) \
- + nr_cpu_ids * sizeof(char *))
int xt_register_target(struct xt_target *target);
void xt_unregister_target(struct xt_target *target);
int xt_register_targets(struct xt_target *target, unsigned int n);
@@ -351,6 +350,57 @@ static inline unsigned long ifname_compare_aligned(const char *_a,
return ret;
}
+
+/* On SMP, ip(6)t_entry->counters.pcnt holds address of the
+ * real (percpu) counter. On !SMP, its just the packet count,
+ * so nothing needs to be done there.
+ *
+ * xt_percpu_counter_alloc returns the address of the percpu
+ * counter, or 0 on !SMP. We force an alignment of 16 bytes
+ * so that bytes/packets share a common cache line.
+ *
+ * Hence caller must use IS_ERR_VALUE to check for error, this
+ * allows us to return 0 for single core systems without forcing
+ * callers to deal with SMP vs. NONSMP issues.
+ */
+static inline u64 xt_percpu_counter_alloc(void)
+{
+ if (nr_cpu_ids > 1) {
+ void __percpu *res = __alloc_percpu(sizeof(struct xt_counters),
+ sizeof(struct xt_counters));
+
+ if (res == NULL)
+ return (u64) -ENOMEM;
+
+ return (u64) (__force unsigned long) res;
+ }
+
+ return 0;
+}
+static inline void xt_percpu_counter_free(u64 pcnt)
+{
+ if (nr_cpu_ids > 1)
+ free_percpu((void __percpu *) (unsigned long) pcnt);
+}
+
+static inline struct xt_counters *
+xt_get_this_cpu_counter(struct xt_counters *cnt)
+{
+ if (nr_cpu_ids > 1)
+ return this_cpu_ptr((void __percpu *) (unsigned long) cnt->pcnt);
+
+ return cnt;
+}
+
+static inline struct xt_counters *
+xt_get_per_cpu_counter(struct xt_counters *cnt, unsigned int cpu)
+{
+ if (nr_cpu_ids > 1)
+ return per_cpu_ptr((void __percpu *) (unsigned long) cnt->pcnt, cpu);
+
+ return cnt;
+}
+
struct nf_hook_ops *xt_hook_link(const struct xt_table *, nf_hookfn *);
void xt_hook_unlink(const struct xt_table *, struct nf_hook_ops *);
diff --git a/include/linux/netfilter_arp/arp_tables.h b/include/linux/netfilter_arp/arp_tables.h
index cfb7191e6efa..c22a7fb8d0df 100644
--- a/include/linux/netfilter_arp/arp_tables.h
+++ b/include/linux/netfilter_arp/arp_tables.h
@@ -54,8 +54,7 @@ extern struct xt_table *arpt_register_table(struct net *net,
extern void arpt_unregister_table(struct xt_table *table);
extern unsigned int arpt_do_table(struct sk_buff *skb,
unsigned int hook,
- const struct net_device *in,
- const struct net_device *out,
+ const struct nf_hook_state *state,
struct xt_table *table);
#ifdef CONFIG_COMPAT
diff --git a/include/linux/netfilter_bridge.h b/include/linux/netfilter_bridge.h
index c755e4971fa3..6d80fc686323 100644
--- a/include/linux/netfilter_bridge.h
+++ b/include/linux/netfilter_bridge.h
@@ -2,7 +2,7 @@
#define __LINUX_BRIDGE_NETFILTER_H
#include <uapi/linux/netfilter_bridge.h>
-
+#include <linux/skbuff.h>
enum nf_br_hook_priorities {
NF_BR_PRI_FIRST = INT_MIN,
@@ -17,110 +17,53 @@ enum nf_br_hook_priorities {
#if IS_ENABLED(CONFIG_BRIDGE_NETFILTER)
-#define BRNF_PKT_TYPE 0x01
#define BRNF_BRIDGED_DNAT 0x02
-#define BRNF_BRIDGED 0x04
#define BRNF_NF_BRIDGE_PREROUTING 0x08
-#define BRNF_8021Q 0x10
-#define BRNF_PPPoE 0x20
-static inline unsigned int nf_bridge_encap_header_len(const struct sk_buff *skb)
-{
- switch (skb->protocol) {
- case __cpu_to_be16(ETH_P_8021Q):
- return VLAN_HLEN;
- case __cpu_to_be16(ETH_P_PPP_SES):
- return PPPOE_SES_HLEN;
- default:
- return 0;
- }
-}
+int br_handle_frame_finish(struct sock *sk, struct sk_buff *skb);
-static inline void nf_bridge_update_protocol(struct sk_buff *skb)
+static inline void br_drop_fake_rtable(struct sk_buff *skb)
{
- if (skb->nf_bridge->mask & BRNF_8021Q)
- skb->protocol = htons(ETH_P_8021Q);
- else if (skb->nf_bridge->mask & BRNF_PPPoE)
- skb->protocol = htons(ETH_P_PPP_SES);
-}
+ struct dst_entry *dst = skb_dst(skb);
-/* Fill in the header for fragmented IP packets handled by
- * the IPv4 connection tracking code.
- *
- * Only used in br_forward.c
- */
-static inline int nf_bridge_copy_header(struct sk_buff *skb)
-{
- int err;
- unsigned int header_size;
-
- nf_bridge_update_protocol(skb);
- header_size = ETH_HLEN + nf_bridge_encap_header_len(skb);
- err = skb_cow_head(skb, header_size);
- if (err)
- return err;
-
- skb_copy_to_linear_data_offset(skb, -header_size,
- skb->nf_bridge->data, header_size);
- __skb_push(skb, nf_bridge_encap_header_len(skb));
- return 0;
+ if (dst && (dst->flags & DST_FAKE_RTABLE))
+ skb_dst_drop(skb);
}
-static inline int nf_bridge_maybe_copy_header(struct sk_buff *skb)
+static inline int nf_bridge_get_physinif(const struct sk_buff *skb)
{
- if (skb->nf_bridge &&
- skb->nf_bridge->mask & (BRNF_BRIDGED | BRNF_BRIDGED_DNAT))
- return nf_bridge_copy_header(skb);
- return 0;
-}
+ struct nf_bridge_info *nf_bridge;
-static inline unsigned int nf_bridge_mtu_reduction(const struct sk_buff *skb)
-{
- if (unlikely(skb->nf_bridge->mask & BRNF_PPPoE))
- return PPPOE_SES_HLEN;
- return 0;
+ if (skb->nf_bridge == NULL)
+ return 0;
+
+ nf_bridge = skb->nf_bridge;
+ return nf_bridge->physindev ? nf_bridge->physindev->ifindex : 0;
}
-int br_handle_frame_finish(struct sk_buff *skb);
-/* Only used in br_device.c */
-static inline int br_nf_pre_routing_finish_bridge_slow(struct sk_buff *skb)
+static inline int nf_bridge_get_physoutif(const struct sk_buff *skb)
{
- struct nf_bridge_info *nf_bridge = skb->nf_bridge;
-
- skb_pull(skb, ETH_HLEN);
- nf_bridge->mask ^= BRNF_BRIDGED_DNAT;
- skb_copy_to_linear_data_offset(skb, -(ETH_HLEN-ETH_ALEN),
- skb->nf_bridge->data, ETH_HLEN-ETH_ALEN);
- skb->dev = nf_bridge->physindev;
- return br_handle_frame_finish(skb);
+ struct nf_bridge_info *nf_bridge;
+
+ if (skb->nf_bridge == NULL)
+ return 0;
+
+ nf_bridge = skb->nf_bridge;
+ return nf_bridge->physoutdev ? nf_bridge->physoutdev->ifindex : 0;
}
-/* This is called by the IP fragmenting code and it ensures there is
- * enough room for the encapsulating header (if there is one). */
-static inline unsigned int nf_bridge_pad(const struct sk_buff *skb)
+static inline struct net_device *
+nf_bridge_get_physindev(const struct sk_buff *skb)
{
- if (skb->nf_bridge)
- return nf_bridge_encap_header_len(skb);
- return 0;
+ return skb->nf_bridge ? skb->nf_bridge->physindev : NULL;
}
-struct bridge_skb_cb {
- union {
- __be32 ipv4;
- } daddr;
-};
-
-static inline void br_drop_fake_rtable(struct sk_buff *skb)
+static inline struct net_device *
+nf_bridge_get_physoutdev(const struct sk_buff *skb)
{
- struct dst_entry *dst = skb_dst(skb);
-
- if (dst && (dst->flags & DST_FAKE_RTABLE))
- skb_dst_drop(skb);
+ return skb->nf_bridge ? skb->nf_bridge->physoutdev : NULL;
}
-
#else
-#define nf_bridge_maybe_copy_header(skb) (0)
-#define nf_bridge_pad(skb) (0)
#define br_drop_fake_rtable(skb) do { } while (0)
#endif /* CONFIG_BRIDGE_NETFILTER */
diff --git a/include/linux/netfilter_bridge/ebtables.h b/include/linux/netfilter_bridge/ebtables.h
index 34e7a2b7f867..8ca6d6464ea3 100644
--- a/include/linux/netfilter_bridge/ebtables.h
+++ b/include/linux/netfilter_bridge/ebtables.h
@@ -6,15 +6,16 @@
*
* ebtables.c,v 2.0, April, 2002
*
- * This code is stongly inspired on the iptables code which is
+ * This code is strongly inspired by the iptables code which is
* Copyright (C) 1999 Paul `Rusty' Russell & Michael J. Neuling
*/
#ifndef __LINUX_BRIDGE_EFF_H
#define __LINUX_BRIDGE_EFF_H
+#include <linux/if.h>
+#include <linux/if_ether.h>
#include <uapi/linux/netfilter_bridge/ebtables.h>
-
/* return values for match() functions */
#define EBT_MATCH 0
#define EBT_NOMATCH 1
diff --git a/include/linux/netfilter_defs.h b/include/linux/netfilter_defs.h
new file mode 100644
index 000000000000..d3a7f8597e82
--- /dev/null
+++ b/include/linux/netfilter_defs.h
@@ -0,0 +1,9 @@
+#ifndef __LINUX_NETFILTER_CORE_H_
+#define __LINUX_NETFILTER_CORE_H_
+
+#include <uapi/linux/netfilter.h>
+
+/* Largest hook number + 1, see uapi/linux/netfilter_decnet.h */
+#define NF_MAX_HOOKS 8
+
+#endif
diff --git a/include/linux/netfilter_ingress.h b/include/linux/netfilter_ingress.h
new file mode 100644
index 000000000000..cb0727fe2b3d
--- /dev/null
+++ b/include/linux/netfilter_ingress.h
@@ -0,0 +1,41 @@
+#ifndef _NETFILTER_INGRESS_H_
+#define _NETFILTER_INGRESS_H_
+
+#include <linux/netfilter.h>
+#include <linux/netdevice.h>
+
+#ifdef CONFIG_NETFILTER_INGRESS
+static inline int nf_hook_ingress_active(struct sk_buff *skb)
+{
+ return nf_hook_list_active(&skb->dev->nf_hooks_ingress,
+ NFPROTO_NETDEV, NF_NETDEV_INGRESS);
+}
+
+static inline int nf_hook_ingress(struct sk_buff *skb)
+{
+ struct nf_hook_state state;
+
+ nf_hook_state_init(&state, &skb->dev->nf_hooks_ingress,
+ NF_NETDEV_INGRESS, INT_MIN, NFPROTO_NETDEV, NULL,
+ skb->dev, NULL, NULL);
+ return nf_hook_slow(skb, &state);
+}
+
+static inline void nf_hook_ingress_init(struct net_device *dev)
+{
+ INIT_LIST_HEAD(&dev->nf_hooks_ingress);
+}
+#else /* CONFIG_NETFILTER_INGRESS */
+static inline int nf_hook_ingress_active(struct sk_buff *skb)
+{
+ return 0;
+}
+
+static inline int nf_hook_ingress(struct sk_buff *skb)
+{
+ return 0;
+}
+
+static inline void nf_hook_ingress_init(struct net_device *dev) {}
+#endif /* CONFIG_NETFILTER_INGRESS */
+#endif /* _NETFILTER_INGRESS_H_ */
diff --git a/include/linux/netfilter_ipv4/ip_tables.h b/include/linux/netfilter_ipv4/ip_tables.h
index 901e84db847d..4073510da485 100644
--- a/include/linux/netfilter_ipv4/ip_tables.h
+++ b/include/linux/netfilter_ipv4/ip_tables.h
@@ -65,8 +65,7 @@ struct ipt_error {
extern void *ipt_alloc_initial_table(const struct xt_table *);
extern unsigned int ipt_do_table(struct sk_buff *skb,
unsigned int hook,
- const struct net_device *in,
- const struct net_device *out,
+ const struct nf_hook_state *state,
struct xt_table *table);
#ifdef CONFIG_COMPAT
diff --git a/include/linux/netfilter_ipv6.h b/include/linux/netfilter_ipv6.h
index 64dad1cc1a4b..8b7d28f3aada 100644
--- a/include/linux/netfilter_ipv6.h
+++ b/include/linux/netfilter_ipv6.h
@@ -25,6 +25,9 @@ void ipv6_netfilter_fini(void);
struct nf_ipv6_ops {
int (*chk_addr)(struct net *net, const struct in6_addr *addr,
const struct net_device *dev, int strict);
+ void (*route_input)(struct sk_buff *skb);
+ int (*fragment)(struct sock *sk, struct sk_buff *skb,
+ int (*output)(struct sock *, struct sk_buff *));
};
extern const struct nf_ipv6_ops __rcu *nf_ipv6_ops;
diff --git a/include/linux/netfilter_ipv6/ip6_tables.h b/include/linux/netfilter_ipv6/ip6_tables.h
index 610208b18c05..b40d2b635778 100644
--- a/include/linux/netfilter_ipv6/ip6_tables.h
+++ b/include/linux/netfilter_ipv6/ip6_tables.h
@@ -31,8 +31,7 @@ extern struct xt_table *ip6t_register_table(struct net *net,
extern void ip6t_unregister_table(struct net *net, struct xt_table *table);
extern unsigned int ip6t_do_table(struct sk_buff *skb,
unsigned int hook,
- const struct net_device *in,
- const struct net_device *out,
+ const struct nf_hook_state *state,
struct xt_table *table);
/* Check for an extension */
diff --git a/include/linux/netlink.h b/include/linux/netlink.h
index 02fc86d2348e..9120edb650a0 100644
--- a/include/linux/netlink.h
+++ b/include/linux/netlink.h
@@ -28,6 +28,8 @@ struct netlink_skb_parms {
__u32 dst_group;
__u32 flags;
struct sock *sk;
+ bool nsid_is_set;
+ int nsid;
};
#define NETLINK_CB(skb) (*(struct netlink_skb_parms*)&((skb)->cb))
@@ -134,7 +136,7 @@ struct netlink_callback {
struct netlink_notify {
struct net *net;
- int portid;
+ u32 portid;
int protocol;
};
diff --git a/include/linux/nfs4.h b/include/linux/nfs4.h
index ed43cb74b11d..32201c269890 100644
--- a/include/linux/nfs4.h
+++ b/include/linux/nfs4.h
@@ -16,6 +16,13 @@
#include <linux/uidgid.h>
#include <uapi/linux/nfs4.h>
+enum nfs4_acl_whotype {
+ NFS4_ACL_WHO_NAMED = 0,
+ NFS4_ACL_WHO_OWNER,
+ NFS4_ACL_WHO_GROUP,
+ NFS4_ACL_WHO_EVERYONE,
+};
+
struct nfs4_ace {
uint32_t type;
uint32_t flag;
diff --git a/include/linux/nfs_fs.h b/include/linux/nfs_fs.h
index b01ccf371fdc..b95f914ce083 100644
--- a/include/linux/nfs_fs.h
+++ b/include/linux/nfs_fs.h
@@ -447,13 +447,12 @@ static inline struct rpc_cred *nfs_file_cred(struct file *file)
/*
* linux/fs/nfs/direct.c
*/
-extern ssize_t nfs_direct_IO(int, struct kiocb *, struct iov_iter *, loff_t);
+extern ssize_t nfs_direct_IO(struct kiocb *, struct iov_iter *, loff_t);
extern ssize_t nfs_file_direct_read(struct kiocb *iocb,
struct iov_iter *iter,
loff_t pos);
extern ssize_t nfs_file_direct_write(struct kiocb *iocb,
- struct iov_iter *iter,
- loff_t pos);
+ struct iov_iter *iter);
/*
* linux/fs/nfs/dir.c
@@ -512,6 +511,7 @@ extern int nfs_updatepage(struct file *, struct page *, unsigned int, unsigned
* Try to write back everything synchronously (but check the
* return value!)
*/
+extern int nfs_sync_inode(struct inode *inode);
extern int nfs_wb_all(struct inode *inode);
extern int nfs_wb_page(struct inode *inode, struct page* page);
extern int nfs_wb_page_cancel(struct inode *inode, struct page* page);
diff --git a/include/linux/nfs_idmap.h b/include/linux/nfs_idmap.h
deleted file mode 100644
index 333844e38f66..000000000000
--- a/include/linux/nfs_idmap.h
+++ /dev/null
@@ -1,79 +0,0 @@
-/*
- * include/linux/nfs_idmap.h
- *
- * UID and GID to name mapping for clients.
- *
- * Copyright (c) 2002 The Regents of the University of Michigan.
- * All rights reserved.
- *
- * Marius Aamodt Eriksen <marius@umich.edu>
- *
- * Redistribution and use in source and binary forms, with or without
- * modification, are permitted provided that the following conditions
- * are met:
- *
- * 1. Redistributions of source code must retain the above copyright
- * notice, this list of conditions and the following disclaimer.
- * 2. Redistributions in binary form must reproduce the above copyright
- * notice, this list of conditions and the following disclaimer in the
- * documentation and/or other materials provided with the distribution.
- * 3. Neither the name of the University nor the names of its
- * contributors may be used to endorse or promote products derived
- * from this software without specific prior written permission.
- *
- * THIS SOFTWARE IS PROVIDED ``AS IS'' AND ANY EXPRESS OR IMPLIED
- * WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES OF
- * MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE
- * DISCLAIMED. IN NO EVENT SHALL THE REGENTS OR CONTRIBUTORS BE LIABLE
- * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
- * CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
- * SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR
- * BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF
- * LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING
- * NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS
- * SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
- */
-#ifndef NFS_IDMAP_H
-#define NFS_IDMAP_H
-
-#include <linux/uidgid.h>
-#include <uapi/linux/nfs_idmap.h>
-
-
-/* Forward declaration to make this header independent of others */
-struct nfs_client;
-struct nfs_server;
-struct nfs_fattr;
-struct nfs4_string;
-
-#if IS_ENABLED(CONFIG_NFS_V4)
-int nfs_idmap_init(void);
-void nfs_idmap_quit(void);
-#else
-static inline int nfs_idmap_init(void)
-{
- return 0;
-}
-
-static inline void nfs_idmap_quit(void)
-{}
-#endif
-
-int nfs_idmap_new(struct nfs_client *);
-void nfs_idmap_delete(struct nfs_client *);
-
-void nfs_fattr_init_names(struct nfs_fattr *fattr,
- struct nfs4_string *owner_name,
- struct nfs4_string *group_name);
-void nfs_fattr_free_names(struct nfs_fattr *);
-void nfs_fattr_map_and_free_names(struct nfs_server *, struct nfs_fattr *);
-
-int nfs_map_name_to_uid(const struct nfs_server *, const char *, size_t, kuid_t *);
-int nfs_map_group_to_gid(const struct nfs_server *, const char *, size_t, kgid_t *);
-int nfs_map_uid_to_name(const struct nfs_server *, kuid_t, char *, size_t);
-int nfs_map_gid_to_group(const struct nfs_server *, kgid_t, char *, size_t);
-
-int nfs_map_string_to_numeric(const char *name, size_t namelen, __u32 *res);
-
-extern unsigned int nfs_idmap_cache_timeout;
-#endif /* NFS_IDMAP_H */
diff --git a/include/linux/nfs_xdr.h b/include/linux/nfs_xdr.h
index 4cb3eaa89cf7..93ab6071bbe9 100644
--- a/include/linux/nfs_xdr.h
+++ b/include/linux/nfs_xdr.h
@@ -255,11 +255,13 @@ struct nfs4_layoutget {
struct nfs4_getdeviceinfo_args {
struct nfs4_sequence_args seq_args;
struct pnfs_device *pdev;
+ __u32 notify_types;
};
struct nfs4_getdeviceinfo_res {
struct nfs4_sequence_res seq_res;
struct pnfs_device *pdev;
+ __u32 notification;
};
struct nfs4_layoutcommit_args {
@@ -1271,11 +1273,15 @@ struct nfs42_falloc_args {
nfs4_stateid falloc_stateid;
u64 falloc_offset;
u64 falloc_length;
+ const u32 *falloc_bitmask;
};
struct nfs42_falloc_res {
struct nfs4_sequence_res seq_res;
unsigned int status;
+
+ struct nfs_fattr *falloc_fattr;
+ const struct nfs_server *falloc_server;
};
struct nfs42_seek_args {
diff --git a/include/linux/nilfs2_fs.h b/include/linux/nilfs2_fs.h
index ff3fea3194c6..9abb763e4b86 100644
--- a/include/linux/nilfs2_fs.h
+++ b/include/linux/nilfs2_fs.h
@@ -460,7 +460,7 @@ struct nilfs_btree_node {
/* level */
#define NILFS_BTREE_LEVEL_DATA 0
#define NILFS_BTREE_LEVEL_NODE_MIN (NILFS_BTREE_LEVEL_DATA + 1)
-#define NILFS_BTREE_LEVEL_MAX 14
+#define NILFS_BTREE_LEVEL_MAX 14 /* Max level (exclusive) */
/**
* struct nilfs_palloc_group_desc - block group descriptor
diff --git a/include/linux/nmi.h b/include/linux/nmi.h
index 9b2022ab4d85..f94da0e65dea 100644
--- a/include/linux/nmi.h
+++ b/include/linux/nmi.h
@@ -25,16 +25,11 @@ static inline void touch_nmi_watchdog(void)
#endif
#if defined(CONFIG_HARDLOCKUP_DETECTOR)
-extern void watchdog_enable_hardlockup_detector(bool val);
-extern bool watchdog_hardlockup_detector_is_enabled(void);
+extern void hardlockup_detector_disable(void);
#else
-static inline void watchdog_enable_hardlockup_detector(bool val)
+static inline void hardlockup_detector_disable(void)
{
}
-static inline bool watchdog_hardlockup_detector_is_enabled(void)
-{
- return true;
-}
#endif
/*
@@ -68,12 +63,23 @@ static inline bool trigger_allbutself_cpu_backtrace(void)
#ifdef CONFIG_LOCKUP_DETECTOR
int hw_nmi_is_cpu_stuck(struct pt_regs *);
u64 hw_nmi_get_sample_period(int watchdog_thresh);
+extern int nmi_watchdog_enabled;
+extern int soft_watchdog_enabled;
extern int watchdog_user_enabled;
extern int watchdog_thresh;
+extern unsigned long *watchdog_cpumask_bits;
extern int sysctl_softlockup_all_cpu_backtrace;
struct ctl_table;
-extern int proc_dowatchdog(struct ctl_table *, int ,
- void __user *, size_t *, loff_t *);
+extern int proc_watchdog(struct ctl_table *, int ,
+ void __user *, size_t *, loff_t *);
+extern int proc_nmi_watchdog(struct ctl_table *, int ,
+ void __user *, size_t *, loff_t *);
+extern int proc_soft_watchdog(struct ctl_table *, int ,
+ void __user *, size_t *, loff_t *);
+extern int proc_watchdog_thresh(struct ctl_table *, int ,
+ void __user *, size_t *, loff_t *);
+extern int proc_watchdog_cpumask(struct ctl_table *, int,
+ void __user *, size_t *, loff_t *);
#endif
#ifdef CONFIG_HAVE_ACPI_APEI_NMI
diff --git a/include/linux/nvme.h b/include/linux/nvme.h
index 0adad4a5419b..c0d94ed8ce9a 100644
--- a/include/linux/nvme.h
+++ b/include/linux/nvme.h
@@ -74,7 +74,7 @@ struct nvme_dev {
struct blk_mq_tag_set tagset;
struct blk_mq_tag_set admin_tagset;
u32 __iomem *dbs;
- struct pci_dev *pci_dev;
+ struct device *dev;
struct dma_pool *prp_page_pool;
struct dma_pool *prp_small_pool;
int instance;
@@ -92,6 +92,7 @@ struct nvme_dev {
work_func_t reset_workfn;
struct work_struct reset_work;
struct work_struct probe_work;
+ struct work_struct scan_work;
char name[12];
char serial[20];
char model[40];
@@ -117,8 +118,9 @@ struct nvme_ns {
unsigned ns_id;
int lba_shift;
- int ms;
- int pi_type;
+ u16 ms;
+ bool ext;
+ u8 pi_type;
u64 mode_select_num_blocks;
u32 mode_select_block_len;
};
@@ -145,25 +147,15 @@ static inline u64 nvme_block_nr(struct nvme_ns *ns, sector_t sector)
return (sector >> (ns->lba_shift - 9));
}
-/**
- * nvme_free_iod - frees an nvme_iod
- * @dev: The device that the I/O was submitted to
- * @iod: The memory to free
- */
-void nvme_free_iod(struct nvme_dev *dev, struct nvme_iod *iod);
-
-int nvme_setup_prps(struct nvme_dev *, struct nvme_iod *, int, gfp_t);
-struct nvme_iod *nvme_map_user_pages(struct nvme_dev *dev, int write,
- unsigned long addr, unsigned length);
-void nvme_unmap_user_pages(struct nvme_dev *dev, int write,
- struct nvme_iod *iod);
-int nvme_submit_io_cmd(struct nvme_dev *, struct nvme_ns *,
- struct nvme_command *, u32 *);
-int nvme_submit_flush_data(struct nvme_queue *nvmeq, struct nvme_ns *ns);
-int nvme_submit_admin_cmd(struct nvme_dev *, struct nvme_command *,
- u32 *result);
-int nvme_identify(struct nvme_dev *, unsigned nsid, unsigned cns,
- dma_addr_t dma_addr);
+int nvme_submit_sync_cmd(struct request_queue *q, struct nvme_command *cmd,
+ void *buf, unsigned bufflen);
+int __nvme_submit_sync_cmd(struct request_queue *q, struct nvme_command *cmd,
+ void *buffer, void __user *ubuffer, unsigned bufflen,
+ u32 *result, unsigned timeout);
+int nvme_identify_ctrl(struct nvme_dev *dev, struct nvme_id_ctrl **id);
+int nvme_identify_ns(struct nvme_dev *dev, unsigned nsid,
+ struct nvme_id_ns **id);
+int nvme_get_log_page(struct nvme_dev *dev, struct nvme_smart_log **log);
int nvme_get_features(struct nvme_dev *dev, unsigned fid, unsigned nsid,
dma_addr_t dma_addr, u32 *result);
int nvme_set_features(struct nvme_dev *dev, unsigned fid, unsigned dword11,
diff --git a/include/linux/nx842.h b/include/linux/nx842.h
deleted file mode 100644
index a4d324c6406a..000000000000
--- a/include/linux/nx842.h
+++ /dev/null
@@ -1,11 +0,0 @@
-#ifndef __NX842_H__
-#define __NX842_H__
-
-int nx842_get_workmem_size(void);
-int nx842_get_workmem_size_aligned(void);
-int nx842_compress(const unsigned char *in, unsigned int in_len,
- unsigned char *out, unsigned int *out_len, void *wrkmem);
-int nx842_decompress(const unsigned char *in, unsigned int in_len,
- unsigned char *out, unsigned int *out_len, void *wrkmem);
-
-#endif
diff --git a/include/linux/of.h b/include/linux/of.h
index dfde07e77a63..b871ff9d81d7 100644
--- a/include/linux/of.h
+++ b/include/linux/of.h
@@ -121,6 +121,8 @@ extern struct device_node *of_stdout;
extern raw_spinlock_t devtree_lock;
#ifdef CONFIG_OF
+void of_core_init(void);
+
static inline bool is_of_node(struct fwnode_handle *fwnode)
{
return fwnode && fwnode->type == FWNODE_OF;
@@ -305,6 +307,7 @@ extern int of_property_read_string_helper(struct device_node *np,
extern int of_device_is_compatible(const struct device_node *device,
const char *);
extern bool of_device_is_available(const struct device_node *device);
+extern bool of_device_is_big_endian(const struct device_node *device);
extern const void *of_get_property(const struct device_node *node,
const char *name,
int *lenp);
@@ -332,6 +335,7 @@ extern int of_count_phandle_with_args(const struct device_node *np,
extern void of_alias_scan(void * (*dt_alloc)(u64 size, u64 align));
extern int of_alias_get_id(struct device_node *np, const char *stem);
+extern int of_alias_get_highest_id(const char *stem);
extern int of_machine_is_compatible(const char *compat);
@@ -374,6 +378,10 @@ bool of_console_check(struct device_node *dn, char *name, int index);
#else /* CONFIG_OF */
+static inline void of_core_init(void)
+{
+}
+
static inline bool is_of_node(struct fwnode_handle *fwnode)
{
return false;
@@ -466,6 +474,11 @@ static inline bool of_device_is_available(const struct device_node *device)
return false;
}
+static inline bool of_device_is_big_endian(const struct device_node *device)
+{
+ return false;
+}
+
static inline struct property *of_find_property(const struct device_node *np,
const char *name,
int *lenp)
@@ -594,6 +607,11 @@ static inline int of_alias_get_id(struct device_node *np, const char *stem)
return -ENOSYS;
}
+static inline int of_alias_get_highest_id(const char *stem)
+{
+ return -ENOSYS;
+}
+
static inline int of_machine_is_compatible(const char *compat)
{
return 0;
@@ -616,6 +634,38 @@ static inline const char *of_prop_next_string(struct property *prop,
return NULL;
}
+static inline int of_node_check_flag(struct device_node *n, unsigned long flag)
+{
+ return 0;
+}
+
+static inline int of_node_test_and_set_flag(struct device_node *n,
+ unsigned long flag)
+{
+ return 0;
+}
+
+static inline void of_node_set_flag(struct device_node *n, unsigned long flag)
+{
+}
+
+static inline void of_node_clear_flag(struct device_node *n, unsigned long flag)
+{
+}
+
+static inline int of_property_check_flag(struct property *p, unsigned long flag)
+{
+ return 0;
+}
+
+static inline void of_property_set_flag(struct property *p, unsigned long flag)
+{
+}
+
+static inline void of_property_clear_flag(struct property *p, unsigned long flag)
+{
+}
+
#define of_match_ptr(_ptr) NULL
#define of_match_node(_matches, _node) NULL
#endif /* CONFIG_OF */
diff --git a/include/linux/of_fdt.h b/include/linux/of_fdt.h
index 0ff360d5b3b3..fd627a58068f 100644
--- a/include/linux/of_fdt.h
+++ b/include/linux/of_fdt.h
@@ -33,6 +33,8 @@ extern void *of_fdt_get_property(const void *blob,
extern int of_fdt_is_compatible(const void *blob,
unsigned long node,
const char *compat);
+extern bool of_fdt_is_big_endian(const void *blob,
+ unsigned long node);
extern int of_fdt_match(const void *blob, unsigned long node,
const char *const *compat);
extern void of_fdt_unflatten_tree(unsigned long *blob,
@@ -62,6 +64,7 @@ extern int early_init_dt_scan_chosen(unsigned long node, const char *uname,
extern int early_init_dt_scan_memory(unsigned long node, const char *uname,
int depth, void *data);
extern void early_init_fdt_scan_reserved_mem(void);
+extern void early_init_fdt_reserve_self(void);
extern void early_init_dt_add_memory_arch(u64 base, u64 size);
extern int early_init_dt_reserve_memory_arch(phys_addr_t base, phys_addr_t size,
bool no_map);
@@ -89,6 +92,7 @@ extern u64 fdt_translate_address(const void *blob, int node_offset);
extern void of_fdt_limit_memory(int limit);
#else /* CONFIG_OF_FLATTREE */
static inline void early_init_fdt_scan_reserved_mem(void) {}
+static inline void early_init_fdt_reserve_self(void) {}
static inline const char *of_flat_dt_get_machine_name(void) { return NULL; }
static inline void unflatten_device_tree(void) {}
static inline void unflatten_and_copy_device_tree(void) {}
diff --git a/include/linux/of_graph.h b/include/linux/of_graph.h
index befef42e015b..7bc92e050608 100644
--- a/include/linux/of_graph.h
+++ b/include/linux/of_graph.h
@@ -14,6 +14,8 @@
#ifndef __LINUX_OF_GRAPH_H
#define __LINUX_OF_GRAPH_H
+#include <linux/types.h>
+
/**
* struct of_endpoint - the OF graph endpoint data structure
* @port: identifier (value of reg property) of a port this endpoint belongs to
@@ -26,9 +28,21 @@ struct of_endpoint {
const struct device_node *local_node;
};
+/**
+ * for_each_endpoint_of_node - iterate over every endpoint in a device node
+ * @parent: parent device node containing ports and endpoints
+ * @child: loop variable pointing to the current endpoint node
+ *
+ * When breaking out of the loop, of_node_put(child) has to be called manually.
+ */
+#define for_each_endpoint_of_node(parent, child) \
+ for (child = of_graph_get_next_endpoint(parent, NULL); child != NULL; \
+ child = of_graph_get_next_endpoint(parent, child))
+
#ifdef CONFIG_OF
int of_graph_parse_endpoint(const struct device_node *node,
struct of_endpoint *endpoint);
+struct device_node *of_graph_get_port_by_id(struct device_node *node, u32 id);
struct device_node *of_graph_get_next_endpoint(const struct device_node *parent,
struct device_node *previous);
struct device_node *of_graph_get_remote_port_parent(
@@ -42,6 +56,12 @@ static inline int of_graph_parse_endpoint(const struct device_node *node,
return -ENOSYS;
}
+static inline struct device_node *of_graph_get_port_by_id(
+ struct device_node *node, u32 id)
+{
+ return NULL;
+}
+
static inline struct device_node *of_graph_get_next_endpoint(
const struct device_node *parent,
struct device_node *previous)
diff --git a/include/linux/of_irq.h b/include/linux/of_irq.h
index bfec136a6d1e..d884929a7747 100644
--- a/include/linux/of_irq.h
+++ b/include/linux/of_irq.h
@@ -37,8 +37,6 @@ extern int of_irq_parse_one(struct device_node *device, int index,
extern unsigned int irq_create_of_mapping(struct of_phandle_args *irq_data);
extern int of_irq_to_resource(struct device_node *dev, int index,
struct resource *r);
-extern int of_irq_to_resource_table(struct device_node *dev,
- struct resource *res, int nr_irqs);
extern void of_irq_init(const struct of_device_id *matches);
@@ -46,6 +44,8 @@ extern void of_irq_init(const struct of_device_id *matches);
extern int of_irq_count(struct device_node *dev);
extern int of_irq_get(struct device_node *dev, int index);
extern int of_irq_get_byname(struct device_node *dev, const char *name);
+extern int of_irq_to_resource_table(struct device_node *dev,
+ struct resource *res, int nr_irqs);
#else
static inline int of_irq_count(struct device_node *dev)
{
@@ -59,6 +59,11 @@ static inline int of_irq_get_byname(struct device_node *dev, const char *name)
{
return 0;
}
+static inline int of_irq_to_resource_table(struct device_node *dev,
+ struct resource *res, int nr_irqs)
+{
+ return 0;
+}
#endif
#if defined(CONFIG_OF)
diff --git a/include/linux/of_mdio.h b/include/linux/of_mdio.h
index d449018d0726..8f2237eb3485 100644
--- a/include/linux/of_mdio.h
+++ b/include/linux/of_mdio.h
@@ -24,6 +24,7 @@ struct phy_device *of_phy_attach(struct net_device *dev,
phy_interface_t iface);
extern struct mii_bus *of_mdio_find_bus(struct device_node *mdio_np);
+extern int of_mdio_parse_addr(struct device *dev, const struct device_node *np);
#else /* CONFIG_OF */
static inline int of_mdiobus_register(struct mii_bus *mdio, struct device_node *np)
@@ -60,6 +61,12 @@ static inline struct mii_bus *of_mdio_find_bus(struct device_node *mdio_np)
{
return NULL;
}
+
+static inline int of_mdio_parse_addr(struct device *dev,
+ const struct device_node *np)
+{
+ return -ENOSYS;
+}
#endif /* CONFIG_OF */
#if defined(CONFIG_OF) && defined(CONFIG_FIXED_PHY)
diff --git a/include/linux/of_net.h b/include/linux/of_net.h
index 34597c8c1a4c..9cd72aab76fe 100644
--- a/include/linux/of_net.h
+++ b/include/linux/of_net.h
@@ -9,8 +9,11 @@
#ifdef CONFIG_OF_NET
#include <linux/of.h>
+
+struct net_device;
extern int of_get_phy_mode(struct device_node *np);
extern const void *of_get_mac_address(struct device_node *np);
+extern struct net_device *of_find_net_device_by_node(struct device_node *np);
#else
static inline int of_get_phy_mode(struct device_node *np)
{
@@ -21,6 +24,11 @@ static inline const void *of_get_mac_address(struct device_node *np)
{
return NULL;
}
+
+static inline struct net_device *of_find_net_device_by_node(struct device_node *np)
+{
+ return NULL;
+}
#endif
#endif /* __LINUX_OF_NET_H */
diff --git a/include/linux/omap-gpmc.h b/include/linux/omap-gpmc.h
index c2080eebbb47..7dee00143afd 100644
--- a/include/linux/omap-gpmc.h
+++ b/include/linux/omap-gpmc.h
@@ -163,7 +163,8 @@ extern unsigned int gpmc_ticks_to_ns(unsigned int ticks);
extern void gpmc_cs_write_reg(int cs, int idx, u32 val);
extern int gpmc_calc_divider(unsigned int sync_clk);
-extern int gpmc_cs_set_timings(int cs, const struct gpmc_timings *t);
+extern int gpmc_cs_set_timings(int cs, const struct gpmc_timings *t,
+ const struct gpmc_settings *s);
extern int gpmc_cs_program_settings(int cs, struct gpmc_settings *p);
extern int gpmc_cs_request(int cs, unsigned long size, unsigned long *base);
extern void gpmc_cs_free(int cs);
diff --git a/include/linux/oom.h b/include/linux/oom.h
index d5771bed59c9..7deecb7bca5e 100644
--- a/include/linux/oom.h
+++ b/include/linux/oom.h
@@ -32,6 +32,8 @@ enum oom_scan_t {
/* Thread is the potential origin of an oom condition; kill first on oom */
#define OOM_FLAG_ORIGIN ((__force oom_flags_t)0x1)
+extern struct mutex oom_lock;
+
static inline void set_current_oom_origin(void)
{
current->signal->oom_flags |= OOM_FLAG_ORIGIN;
@@ -47,9 +49,7 @@ static inline bool oom_task_origin(const struct task_struct *p)
return !!(p->signal->oom_flags & OOM_FLAG_ORIGIN);
}
-extern void mark_tsk_oom_victim(struct task_struct *tsk);
-
-extern void unmark_oom_victim(void);
+extern void mark_oom_victim(struct task_struct *tsk);
extern unsigned long oom_badness(struct task_struct *p,
struct mem_cgroup *memcg, const nodemask_t *nodemask,
@@ -62,11 +62,9 @@ extern void oom_kill_process(struct task_struct *p, gfp_t gfp_mask, int order,
struct mem_cgroup *memcg, nodemask_t *nodemask,
const char *message);
-extern bool oom_zonelist_trylock(struct zonelist *zonelist, gfp_t gfp_flags);
-extern void oom_zonelist_unlock(struct zonelist *zonelist, gfp_t gfp_flags);
-
extern void check_panic_on_oom(enum oom_constraint constraint, gfp_t gfp_mask,
- int order, const nodemask_t *nodemask);
+ int order, const nodemask_t *nodemask,
+ struct mem_cgroup *memcg);
extern enum oom_scan_t oom_scan_process_thread(struct task_struct *task,
unsigned long totalpages, const nodemask_t *nodemask,
@@ -74,6 +72,9 @@ extern enum oom_scan_t oom_scan_process_thread(struct task_struct *task,
extern bool out_of_memory(struct zonelist *zonelist, gfp_t gfp_mask,
int order, nodemask_t *mask, bool force_kill);
+
+extern void exit_oom_victim(void);
+
extern int register_oom_notifier(struct notifier_block *nb);
extern int unregister_oom_notifier(struct notifier_block *nb);
diff --git a/include/linux/osq_lock.h b/include/linux/osq_lock.h
index 3a6490e81b28..703ea5c30a33 100644
--- a/include/linux/osq_lock.h
+++ b/include/linux/osq_lock.h
@@ -32,4 +32,9 @@ static inline void osq_lock_init(struct optimistic_spin_queue *lock)
extern bool osq_lock(struct optimistic_spin_queue *lock);
extern void osq_unlock(struct optimistic_spin_queue *lock);
+static inline bool osq_is_locked(struct optimistic_spin_queue *lock)
+{
+ return atomic_read(&lock->tail) != OSQ_UNLOCKED_VAL;
+}
+
#endif
diff --git a/include/linux/page-flags.h b/include/linux/page-flags.h
index 5ed7bdaf22d5..f34e040b34e9 100644
--- a/include/linux/page-flags.h
+++ b/include/linux/page-flags.h
@@ -289,6 +289,47 @@ PAGEFLAG_FALSE(HWPoison)
#define __PG_HWPOISON 0
#endif
+/*
+ * On an anonymous page mapped into a user virtual memory area,
+ * page->mapping points to its anon_vma, not to a struct address_space;
+ * with the PAGE_MAPPING_ANON bit set to distinguish it. See rmap.h.
+ *
+ * On an anonymous page in a VM_MERGEABLE area, if CONFIG_KSM is enabled,
+ * the PAGE_MAPPING_KSM bit may be set along with the PAGE_MAPPING_ANON bit;
+ * and then page->mapping points, not to an anon_vma, but to a private
+ * structure which KSM associates with that merged page. See ksm.h.
+ *
+ * PAGE_MAPPING_KSM without PAGE_MAPPING_ANON is currently never used.
+ *
+ * Please note that, confusingly, "page_mapping" refers to the inode
+ * address_space which maps the page from disk; whereas "page_mapped"
+ * refers to user virtual address space into which the page is mapped.
+ */
+#define PAGE_MAPPING_ANON 1
+#define PAGE_MAPPING_KSM 2
+#define PAGE_MAPPING_FLAGS (PAGE_MAPPING_ANON | PAGE_MAPPING_KSM)
+
+static inline int PageAnon(struct page *page)
+{
+ return ((unsigned long)page->mapping & PAGE_MAPPING_ANON) != 0;
+}
+
+#ifdef CONFIG_KSM
+/*
+ * A KSM page is one of those write-protected "shared pages" or "merged pages"
+ * which KSM maps into multiple mms, wherever identical anonymous page content
+ * is found in VM_MERGEABLE vmas. It's a PageAnon page, pointing not to any
+ * anon_vma, but to that page's node of the stable tree.
+ */
+static inline int PageKsm(struct page *page)
+{
+ return ((unsigned long)page->mapping & PAGE_MAPPING_FLAGS) ==
+ (PAGE_MAPPING_ANON | PAGE_MAPPING_KSM);
+}
+#else
+TESTPAGEFLAG_FALSE(Ksm)
+#endif
+
u64 stable_page_flags(struct page *page);
static inline int PageUptodate(struct page *page)
@@ -328,8 +369,6 @@ static inline void SetPageUptodate(struct page *page)
CLEARPAGEFLAG(Uptodate, uptodate)
-extern void cancel_dirty_page(struct page *page, unsigned int account_size);
-
int test_clear_page_writeback(struct page *page);
int __test_set_page_writeback(struct page *page, bool keep_write);
@@ -428,6 +467,21 @@ static inline void ClearPageCompound(struct page *page)
#endif /* !PAGEFLAGS_EXTENDED */
+#ifdef CONFIG_HUGETLB_PAGE
+int PageHuge(struct page *page);
+int PageHeadHuge(struct page *page);
+bool page_huge_active(struct page *page);
+#else
+TESTPAGEFLAG_FALSE(Huge)
+TESTPAGEFLAG_FALSE(HeadHuge)
+
+static inline bool page_huge_active(struct page *page)
+{
+ return 0;
+}
+#endif
+
+
#ifdef CONFIG_TRANSPARENT_HUGEPAGE
/*
* PageHuge() only returns true for hugetlbfs pages, but not for
@@ -482,6 +536,53 @@ static inline int PageTransTail(struct page *page)
#endif
/*
+ * PageBuddy() indicate that the page is free and in the buddy system
+ * (see mm/page_alloc.c).
+ *
+ * PAGE_BUDDY_MAPCOUNT_VALUE must be <= -2 but better not too close to
+ * -2 so that an underflow of the page_mapcount() won't be mistaken
+ * for a genuine PAGE_BUDDY_MAPCOUNT_VALUE. -128 can be created very
+ * efficiently by most CPU architectures.
+ */
+#define PAGE_BUDDY_MAPCOUNT_VALUE (-128)
+
+static inline int PageBuddy(struct page *page)
+{
+ return atomic_read(&page->_mapcount) == PAGE_BUDDY_MAPCOUNT_VALUE;
+}
+
+static inline void __SetPageBuddy(struct page *page)
+{
+ VM_BUG_ON_PAGE(atomic_read(&page->_mapcount) != -1, page);
+ atomic_set(&page->_mapcount, PAGE_BUDDY_MAPCOUNT_VALUE);
+}
+
+static inline void __ClearPageBuddy(struct page *page)
+{
+ VM_BUG_ON_PAGE(!PageBuddy(page), page);
+ atomic_set(&page->_mapcount, -1);
+}
+
+#define PAGE_BALLOON_MAPCOUNT_VALUE (-256)
+
+static inline int PageBalloon(struct page *page)
+{
+ return atomic_read(&page->_mapcount) == PAGE_BALLOON_MAPCOUNT_VALUE;
+}
+
+static inline void __SetPageBalloon(struct page *page)
+{
+ VM_BUG_ON_PAGE(atomic_read(&page->_mapcount) != -1, page);
+ atomic_set(&page->_mapcount, PAGE_BALLOON_MAPCOUNT_VALUE);
+}
+
+static inline void __ClearPageBalloon(struct page *page)
+{
+ VM_BUG_ON_PAGE(!PageBalloon(page), page);
+ atomic_set(&page->_mapcount, -1);
+}
+
+/*
* If network-based swap is enabled, sl*b must keep track of whether pages
* were allocated from pfmemalloc reserves.
*/
diff --git a/include/linux/pagemap.h b/include/linux/pagemap.h
index 4b3736f7065c..fb0814ca65c7 100644
--- a/include/linux/pagemap.h
+++ b/include/linux/pagemap.h
@@ -651,7 +651,8 @@ int add_to_page_cache_locked(struct page *page, struct address_space *mapping,
int add_to_page_cache_lru(struct page *page, struct address_space *mapping,
pgoff_t index, gfp_t gfp_mask);
extern void delete_from_page_cache(struct page *page);
-extern void __delete_from_page_cache(struct page *page, void *shadow);
+extern void __delete_from_page_cache(struct page *page, void *shadow,
+ struct mem_cgroup *memcg);
int replace_page_cache_page(struct page *old, struct page *new, gfp_t gfp_mask);
/*
diff --git a/include/linux/pci.h b/include/linux/pci.h
index e63112fb55be..8a0321a8fb59 100644
--- a/include/linux/pci.h
+++ b/include/linux/pci.h
@@ -355,6 +355,7 @@ struct pci_dev {
unsigned int broken_intx_masking:1;
unsigned int io_window_1k:1; /* Intel P2P bridge 1K I/O windows */
unsigned int irq_managed:1;
+ unsigned int has_secondary_link:1;
pci_dev_flags_t dev_flags;
atomic_t enable_cnt; /* pci_enable_device has been called */
@@ -577,9 +578,15 @@ int raw_pci_read(unsigned int domain, unsigned int bus, unsigned int devfn,
int raw_pci_write(unsigned int domain, unsigned int bus, unsigned int devfn,
int reg, int len, u32 val);
+#ifdef CONFIG_PCI_BUS_ADDR_T_64BIT
+typedef u64 pci_bus_addr_t;
+#else
+typedef u32 pci_bus_addr_t;
+#endif
+
struct pci_bus_region {
- dma_addr_t start;
- dma_addr_t end;
+ pci_bus_addr_t start;
+ pci_bus_addr_t end;
};
struct pci_dynids {
@@ -773,8 +780,6 @@ void pcibios_bus_to_resource(struct pci_bus *bus, struct resource *res,
void pcibios_scan_specific_bus(int busn);
struct pci_bus *pci_find_bus(int domain, int busnr);
void pci_bus_add_devices(const struct pci_bus *bus);
-struct pci_bus *pci_scan_bus_parented(struct device *parent, int bus,
- struct pci_ops *ops, void *sysdata);
struct pci_bus *pci_scan_bus(int bus, struct pci_ops *ops, void *sysdata);
struct pci_bus *pci_create_root_bus(struct device *parent, int bus,
struct pci_ops *ops, void *sysdata,
@@ -974,7 +979,6 @@ void pci_intx(struct pci_dev *dev, int enable);
bool pci_intx_mask_supported(struct pci_dev *dev);
bool pci_check_and_mask_intx(struct pci_dev *dev);
bool pci_check_and_unmask_intx(struct pci_dev *dev);
-void pci_msi_off(struct pci_dev *dev);
int pci_set_dma_max_seg_size(struct pci_dev *dev, unsigned int size);
int pci_set_dma_seg_boundary(struct pci_dev *dev, unsigned long mask);
int pci_wait_for_pending(struct pci_dev *dev, int pos, u16 mask);
@@ -1006,6 +1010,7 @@ int __must_check pci_assign_resource(struct pci_dev *dev, int i);
int __must_check pci_reassign_resource(struct pci_dev *dev, int i, resource_size_t add_size, resource_size_t align);
int pci_select_bars(struct pci_dev *dev, unsigned long flags);
bool pci_device_is_present(struct pci_dev *pdev);
+void pci_ignore_hotplug(struct pci_dev *dev);
/* ROM control related routines */
int pci_enable_rom(struct pci_dev *pdev);
@@ -1043,11 +1048,6 @@ bool pci_dev_run_wake(struct pci_dev *dev);
bool pci_check_pme_status(struct pci_dev *dev);
void pci_pme_wakeup_bus(struct pci_bus *bus);
-static inline void pci_ignore_hotplug(struct pci_dev *dev)
-{
- dev->ignore_hotplug = 1;
-}
-
static inline int pci_enable_wake(struct pci_dev *dev, pci_power_t state,
bool enable)
{
@@ -1128,7 +1128,7 @@ int __must_check pci_bus_alloc_resource(struct pci_bus *bus,
int pci_remap_iospace(const struct resource *res, phys_addr_t phys_addr);
-static inline dma_addr_t pci_bus_address(struct pci_dev *pdev, int bar)
+static inline pci_bus_addr_t pci_bus_address(struct pci_dev *pdev, int bar)
{
struct pci_bus_region region;
@@ -1178,6 +1178,7 @@ unsigned char pci_bus_max_busnr(struct pci_bus *bus);
void pci_setup_bridge(struct pci_bus *bus);
resource_size_t pcibios_window_alignment(struct pci_bus *bus,
unsigned long type);
+resource_size_t pcibios_iov_resource_alignment(struct pci_dev *dev, int resno);
#define PCI_VGA_STATE_CHANGE_BRIDGE (1 << 0)
#define PCI_VGA_STATE_CHANGE_DECODES (1 << 1)
@@ -1196,15 +1197,6 @@ int pci_set_vga_state(struct pci_dev *pdev, bool decode,
#define pci_pool_alloc(pool, flags, handle) dma_pool_alloc(pool, flags, handle)
#define pci_pool_free(pool, vaddr, addr) dma_pool_free(pool, vaddr, addr)
-enum pci_dma_burst_strategy {
- PCI_DMA_BURST_INFINITY, /* make bursts as large as possible,
- strategy_parameter is N/A */
- PCI_DMA_BURST_BOUNDARY, /* disconnect at every strategy_parameter
- byte boundaries */
- PCI_DMA_BURST_MULTIPLE, /* disconnect at some multiple of
- strategy_parameter byte boundaries */
-};
-
struct msix_entry {
u32 vector; /* kernel uses to write allocated vector */
u16 entry; /* driver uses to specify entry, OS writes */
@@ -1429,8 +1421,6 @@ static inline int pci_request_regions(struct pci_dev *dev, const char *res_name)
{ return -EIO; }
static inline void pci_release_regions(struct pci_dev *dev) { }
-#define pci_dma_burst_advice(pdev, strat, strategy_parameter) do { } while (0)
-
static inline void pci_block_cfg_access(struct pci_dev *dev) { }
static inline int pci_block_cfg_access_in_atomic(struct pci_dev *dev)
{ return 0; }
@@ -1673,13 +1663,25 @@ int pci_ext_cfg_avail(void);
void __iomem *pci_ioremap_bar(struct pci_dev *pdev, int bar);
#ifdef CONFIG_PCI_IOV
+int pci_iov_virtfn_bus(struct pci_dev *dev, int id);
+int pci_iov_virtfn_devfn(struct pci_dev *dev, int id);
+
int pci_enable_sriov(struct pci_dev *dev, int nr_virtfn);
void pci_disable_sriov(struct pci_dev *dev);
int pci_num_vf(struct pci_dev *dev);
int pci_vfs_assigned(struct pci_dev *dev);
int pci_sriov_set_totalvfs(struct pci_dev *dev, u16 numvfs);
int pci_sriov_get_totalvfs(struct pci_dev *dev);
+resource_size_t pci_iov_resource_size(struct pci_dev *dev, int resno);
#else
+static inline int pci_iov_virtfn_bus(struct pci_dev *dev, int id)
+{
+ return -ENOSYS;
+}
+static inline int pci_iov_virtfn_devfn(struct pci_dev *dev, int id)
+{
+ return -ENOSYS;
+}
static inline int pci_enable_sriov(struct pci_dev *dev, int nr_virtfn)
{ return -ENODEV; }
static inline void pci_disable_sriov(struct pci_dev *dev) { }
@@ -1690,6 +1692,8 @@ static inline int pci_sriov_set_totalvfs(struct pci_dev *dev, u16 numvfs)
{ return 0; }
static inline int pci_sriov_get_totalvfs(struct pci_dev *dev)
{ return 0; }
+static inline resource_size_t pci_iov_resource_size(struct pci_dev *dev, int resno)
+{ return 0; }
#endif
#if defined(CONFIG_HOTPLUG_PCI) || defined(CONFIG_HOTPLUG_PCI_MODULE)
@@ -1890,4 +1894,15 @@ static inline bool pci_is_dev_assigned(struct pci_dev *pdev)
{
return (pdev->dev_flags & PCI_DEV_FLAGS_ASSIGNED) == PCI_DEV_FLAGS_ASSIGNED;
}
+
+/**
+ * pci_ari_enabled - query ARI forwarding status
+ * @bus: the PCI bus
+ *
+ * Returns true if ARI forwarding is enabled.
+ */
+static inline bool pci_ari_enabled(struct pci_bus *bus)
+{
+ return bus->self && bus->self->ari_enabled;
+}
#endif /* LINUX_PCI_H */
diff --git a/include/linux/pci_ids.h b/include/linux/pci_ids.h
index 38cff8f6716d..fcff8f865341 100644
--- a/include/linux/pci_ids.h
+++ b/include/linux/pci_ids.h
@@ -579,6 +579,7 @@
#define PCI_DEVICE_ID_AMD_HUDSON2_SATA_IDE 0x7800
#define PCI_DEVICE_ID_AMD_HUDSON2_SMBUS 0x780b
#define PCI_DEVICE_ID_AMD_HUDSON2_IDE 0x780c
+#define PCI_DEVICE_ID_AMD_KERNCZ_SMBUS 0x790b
#define PCI_VENDOR_ID_TRIDENT 0x1023
#define PCI_DEVICE_ID_TRIDENT_4DWAVE_DX 0x2000
@@ -2329,6 +2330,8 @@
#define PCI_DEVICE_ID_ALTIMA_AC9100 0x03ea
#define PCI_DEVICE_ID_ALTIMA_AC1003 0x03eb
+#define PCI_VENDOR_ID_CAVIUM 0x177d
+
#define PCI_VENDOR_ID_BELKIN 0x1799
#define PCI_DEVICE_ID_BELKIN_F5D7010V7 0x701f
@@ -2541,10 +2544,6 @@
#define PCI_VENDOR_ID_INTEL 0x8086
#define PCI_DEVICE_ID_INTEL_EESSC 0x0008
-#define PCI_DEVICE_ID_INTEL_SNB_IMC 0x0100
-#define PCI_DEVICE_ID_INTEL_IVB_IMC 0x0154
-#define PCI_DEVICE_ID_INTEL_IVB_E3_IMC 0x0150
-#define PCI_DEVICE_ID_INTEL_HSW_IMC 0x0c00
#define PCI_DEVICE_ID_INTEL_PXHD_0 0x0320
#define PCI_DEVICE_ID_INTEL_PXHD_1 0x0321
#define PCI_DEVICE_ID_INTEL_PXH_0 0x0329
diff --git a/include/linux/percpu_counter.h b/include/linux/percpu_counter.h
index 50e50095c8d1..84a109449610 100644
--- a/include/linux/percpu_counter.h
+++ b/include/linux/percpu_counter.h
@@ -41,7 +41,12 @@ void percpu_counter_destroy(struct percpu_counter *fbc);
void percpu_counter_set(struct percpu_counter *fbc, s64 amount);
void __percpu_counter_add(struct percpu_counter *fbc, s64 amount, s32 batch);
s64 __percpu_counter_sum(struct percpu_counter *fbc);
-int percpu_counter_compare(struct percpu_counter *fbc, s64 rhs);
+int __percpu_counter_compare(struct percpu_counter *fbc, s64 rhs, s32 batch);
+
+static inline int percpu_counter_compare(struct percpu_counter *fbc, s64 rhs)
+{
+ return __percpu_counter_compare(fbc, rhs, percpu_counter_batch);
+}
static inline void percpu_counter_add(struct percpu_counter *fbc, s64 amount)
{
@@ -116,6 +121,12 @@ static inline int percpu_counter_compare(struct percpu_counter *fbc, s64 rhs)
return 0;
}
+static inline int
+__percpu_counter_compare(struct percpu_counter *fbc, s64 rhs, s32 batch)
+{
+ return percpu_counter_compare(fbc, rhs);
+}
+
static inline void
percpu_counter_add(struct percpu_counter *fbc, s64 amount)
{
diff --git a/include/linux/perf_event.h b/include/linux/perf_event.h
index 2b621982938d..1b82d44b0a02 100644
--- a/include/linux/perf_event.h
+++ b/include/linux/perf_event.h
@@ -53,6 +53,7 @@ struct perf_guest_info_callbacks {
#include <linux/sysfs.h>
#include <linux/perf_regs.h>
#include <linux/workqueue.h>
+#include <linux/cgroup.h>
#include <asm/local.h>
struct perf_callchain_entry {
@@ -91,8 +92,6 @@ struct hw_perf_event_extra {
int idx; /* index in shared_regs->regs[] */
};
-struct event_constraint;
-
/**
* struct hw_perf_event - performance event hardware details:
*/
@@ -111,17 +110,24 @@ struct hw_perf_event {
struct hw_perf_event_extra extra_reg;
struct hw_perf_event_extra branch_reg;
-
- struct event_constraint *constraint;
};
struct { /* software */
struct hrtimer hrtimer;
};
struct { /* tracepoint */
- struct task_struct *tp_target;
/* for tp_event->class */
struct list_head tp_list;
};
+ struct { /* intel_cqm */
+ int cqm_state;
+ u32 cqm_rmid;
+ struct list_head cqm_events_entry;
+ struct list_head cqm_groups_entry;
+ struct list_head cqm_group_entry;
+ };
+ struct { /* itrace */
+ int itrace_started;
+ };
#ifdef CONFIG_HAVE_HW_BREAKPOINT
struct { /* breakpoint */
/*
@@ -129,12 +135,12 @@ struct hw_perf_event {
* problem hw_breakpoint has with context
* creation and event initalization.
*/
- struct task_struct *bp_target;
struct arch_hw_breakpoint info;
struct list_head bp_list;
};
#endif
};
+ struct task_struct *target;
int state;
local64_t prev_count;
u64 sample_period;
@@ -166,6 +172,11 @@ struct perf_event;
* pmu::capabilities flags
*/
#define PERF_PMU_CAP_NO_INTERRUPT 0x01
+#define PERF_PMU_CAP_NO_NMI 0x02
+#define PERF_PMU_CAP_AUX_NO_SG 0x04
+#define PERF_PMU_CAP_AUX_SW_DOUBLEBUF 0x08
+#define PERF_PMU_CAP_EXCLUSIVE 0x10
+#define PERF_PMU_CAP_ITRACE 0x20
/**
* struct pmu - generic performance monitoring unit
@@ -186,6 +197,7 @@ struct pmu {
int * __percpu pmu_disable_count;
struct perf_cpu_context * __percpu pmu_cpu_context;
+ atomic_t exclusive_cnt; /* < 0: cpu; > 0: tsk */
int task_ctx_nr;
int hrtimer_interval_ms;
@@ -262,9 +274,32 @@ struct pmu {
int (*event_idx) (struct perf_event *event); /*optional */
/*
- * flush branch stack on context-switches (needed in cpu-wide mode)
+ * context-switches callback
+ */
+ void (*sched_task) (struct perf_event_context *ctx,
+ bool sched_in);
+ /*
+ * PMU specific data size
+ */
+ size_t task_ctx_size;
+
+
+ /*
+ * Return the count value for a counter.
+ */
+ u64 (*count) (struct perf_event *event); /*optional*/
+
+ /*
+ * Set up pmu-private data structures for an AUX area
+ */
+ void *(*setup_aux) (int cpu, void **pages,
+ int nr_pages, bool overwrite);
+ /* optional */
+
+ /*
+ * Free pmu-private AUX data structures
*/
- void (*flush_branch_stack) (void);
+ void (*free_aux) (void *aux); /* optional */
};
/**
@@ -300,6 +335,7 @@ struct swevent_hlist {
#define PERF_ATTACH_CONTEXT 0x01
#define PERF_ATTACH_GROUP 0x02
#define PERF_ATTACH_TASK 0x04
+#define PERF_ATTACH_TASK_DATA 0x08
struct perf_cgroup;
struct ring_buffer;
@@ -438,6 +474,7 @@ struct perf_event {
struct pid_namespace *ns;
u64 id;
+ u64 (*clock)(void);
perf_overflow_handler_t overflow_handler;
void *overflow_handler_context;
@@ -504,7 +541,7 @@ struct perf_event_context {
u64 generation;
int pin_count;
int nr_cgroups; /* cgroup evts */
- int nr_branch_stack; /* branch_stack evt */
+ void *task_ctx_data; /* pmu specific data */
struct rcu_head rcu_head;
struct delayed_work orphans_remove;
@@ -525,8 +562,12 @@ struct perf_cpu_context {
struct perf_event_context *task_ctx;
int active_oncpu;
int exclusive;
+
+ raw_spinlock_t hrtimer_lock;
struct hrtimer hrtimer;
ktime_t hrtimer_interval;
+ unsigned int hrtimer_active;
+
struct pmu *unique_pmu;
struct perf_cgroup *cgrp;
};
@@ -536,12 +577,52 @@ struct perf_output_handle {
struct ring_buffer *rb;
unsigned long wakeup;
unsigned long size;
- void *addr;
+ union {
+ void *addr;
+ unsigned long head;
+ };
int page;
};
+#ifdef CONFIG_CGROUP_PERF
+
+/*
+ * perf_cgroup_info keeps track of time_enabled for a cgroup.
+ * This is a per-cpu dynamically allocated data structure.
+ */
+struct perf_cgroup_info {
+ u64 time;
+ u64 timestamp;
+};
+
+struct perf_cgroup {
+ struct cgroup_subsys_state css;
+ struct perf_cgroup_info __percpu *info;
+};
+
+/*
+ * Must ensure cgroup is pinned (css_get) before calling
+ * this function. In other words, we cannot call this function
+ * if there is no cgroup event for the current CPU context.
+ */
+static inline struct perf_cgroup *
+perf_cgroup_from_task(struct task_struct *task)
+{
+ return container_of(task_css(task, perf_event_cgrp_id),
+ struct perf_cgroup, css);
+}
+#endif /* CONFIG_CGROUP_PERF */
+
#ifdef CONFIG_PERF_EVENTS
+extern void *perf_aux_output_begin(struct perf_output_handle *handle,
+ struct perf_event *event);
+extern void perf_aux_output_end(struct perf_output_handle *handle,
+ unsigned long size, bool truncated);
+extern int perf_aux_output_skip(struct perf_output_handle *handle,
+ unsigned long size);
+extern void *perf_get_aux(struct perf_output_handle *handle);
+
extern int perf_pmu_register(struct pmu *pmu, const char *name, int type);
extern void perf_pmu_unregister(struct pmu *pmu);
@@ -558,6 +639,8 @@ extern void perf_event_delayed_put(struct task_struct *task);
extern void perf_event_print_debug(void);
extern void perf_pmu_disable(struct pmu *pmu);
extern void perf_pmu_enable(struct pmu *pmu);
+extern void perf_sched_cb_dec(struct pmu *pmu);
+extern void perf_sched_cb_inc(struct pmu *pmu);
extern int perf_event_task_disable(void);
extern int perf_event_task_enable(void);
extern int perf_event_refresh(struct perf_event *event, int refresh);
@@ -651,6 +734,22 @@ extern int perf_event_overflow(struct perf_event *event,
struct perf_sample_data *data,
struct pt_regs *regs);
+extern void perf_event_output(struct perf_event *event,
+ struct perf_sample_data *data,
+ struct pt_regs *regs);
+
+extern void
+perf_event_header__init_id(struct perf_event_header *header,
+ struct perf_sample_data *data,
+ struct perf_event *event);
+extern void
+perf_event__output_id_sample(struct perf_event *event,
+ struct perf_output_handle *handle,
+ struct perf_sample_data *sample);
+
+extern void
+perf_log_lost_samples(struct perf_event *event, u64 lost);
+
static inline bool is_sampling_event(struct perf_event *event)
{
return event->attr.sample_period != 0;
@@ -715,11 +814,33 @@ perf_sw_event_sched(u32 event_id, u64 nr, u64 addr)
extern struct static_key_deferred perf_sched_events;
+static __always_inline bool
+perf_sw_migrate_enabled(void)
+{
+ if (static_key_false(&perf_swevent_enabled[PERF_COUNT_SW_CPU_MIGRATIONS]))
+ return true;
+ return false;
+}
+
+static inline void perf_event_task_migrate(struct task_struct *task)
+{
+ if (perf_sw_migrate_enabled())
+ task->sched_migrated = 1;
+}
+
static inline void perf_event_task_sched_in(struct task_struct *prev,
struct task_struct *task)
{
if (static_key_false(&perf_sched_events.key))
__perf_event_task_sched_in(prev, task);
+
+ if (perf_sw_migrate_enabled() && task->sched_migrated) {
+ struct pt_regs *regs = this_cpu_ptr(&__perf_regs[0]);
+
+ perf_fetch_caller_regs(regs);
+ ___perf_sw_event(PERF_COUNT_SW_CPU_MIGRATIONS, 1, regs, 0);
+ task->sched_migrated = 0;
+ }
}
static inline void perf_event_task_sched_out(struct task_struct *prev,
@@ -731,6 +852,11 @@ static inline void perf_event_task_sched_out(struct task_struct *prev,
__perf_event_task_sched_out(prev, next);
}
+static inline u64 __perf_event_count(struct perf_event *event)
+{
+ return local64_read(&event->count) + atomic64_read(&event->child_count);
+}
+
extern void perf_event_mmap(struct vm_area_struct *vma);
extern struct perf_guest_info_callbacks *perf_guest_cbs;
extern int perf_register_guest_info_callbacks(struct perf_guest_info_callbacks *callbacks);
@@ -800,6 +926,16 @@ static inline bool has_branch_stack(struct perf_event *event)
return event->attr.sample_type & PERF_SAMPLE_BRANCH_STACK;
}
+static inline bool needs_branch_stack(struct perf_event *event)
+{
+ return event->attr.branch_sample_type != 0;
+}
+
+static inline bool has_aux(struct perf_event *event)
+{
+ return event->pmu->setup_aux;
+}
+
extern int perf_output_begin(struct perf_output_handle *handle,
struct perf_event *event, unsigned int size);
extern void perf_output_end(struct perf_output_handle *handle);
@@ -815,6 +951,19 @@ extern void perf_event_disable(struct perf_event *event);
extern int __perf_event_disable(void *info);
extern void perf_event_task_tick(void);
#else /* !CONFIG_PERF_EVENTS: */
+static inline void *
+perf_aux_output_begin(struct perf_output_handle *handle,
+ struct perf_event *event) { return NULL; }
+static inline void
+perf_aux_output_end(struct perf_output_handle *handle, unsigned long size,
+ bool truncated) { }
+static inline int
+perf_aux_output_skip(struct perf_output_handle *handle,
+ unsigned long size) { return -EINVAL; }
+static inline void *
+perf_get_aux(struct perf_output_handle *handle) { return NULL; }
+static inline void
+perf_event_task_migrate(struct task_struct *task) { }
static inline void
perf_event_task_sched_in(struct task_struct *prev,
struct task_struct *task) { }
diff --git a/include/linux/personality.h b/include/linux/personality.h
index 646c0a7d50fa..aeb7892b2468 100644
--- a/include/linux/personality.h
+++ b/include/linux/personality.h
@@ -3,52 +3,14 @@
#include <uapi/linux/personality.h>
-
-/*
- * Handling of different ABIs (personalities).
- */
-
-struct exec_domain;
-struct pt_regs;
-
-extern int register_exec_domain(struct exec_domain *);
-extern int unregister_exec_domain(struct exec_domain *);
-extern int __set_personality(unsigned int);
-
-
-/*
- * Description of an execution domain.
- *
- * The first two members are refernced from assembly source
- * and should stay where they are unless explicitly needed.
- */
-typedef void (*handler_t)(int, struct pt_regs *);
-
-struct exec_domain {
- const char *name; /* name of the execdomain */
- handler_t handler; /* handler for syscalls */
- unsigned char pers_low; /* lowest personality */
- unsigned char pers_high; /* highest personality */
- unsigned long *signal_map; /* signal mapping */
- unsigned long *signal_invmap; /* reverse signal mapping */
- struct map_segment *err_map; /* error mapping */
- struct map_segment *socktype_map; /* socket type mapping */
- struct map_segment *sockopt_map; /* socket option mapping */
- struct map_segment *af_map; /* address family mapping */
- struct module *module; /* module context of the ed. */
- struct exec_domain *next; /* linked list (internal) */
-};
-
/*
* Return the base personality without flags.
*/
#define personality(pers) (pers & PER_MASK)
-
/*
* Change personality of the currently running process.
*/
-#define set_personality(pers) \
- ((current->personality == (pers)) ? 0 : __set_personality(pers))
+#define set_personality(pers) (current->personality = (pers))
#endif /* _LINUX_PERSONALITY_H */
diff --git a/include/linux/phy.h b/include/linux/phy.h
index 685809835b5c..a26c3f84b8dd 100644
--- a/include/linux/phy.h
+++ b/include/linux/phy.h
@@ -181,6 +181,9 @@ struct mii_bus {
/* PHY addresses to be ignored when probing */
u32 phy_mask;
+ /* PHY addresses to ignore the TA/read failure */
+ u32 phy_ignore_ta_mask;
+
/*
* Pointer to an array of interrupts, each PHY's
* interrupt at the index matching its address
@@ -675,6 +678,17 @@ static inline bool phy_is_internal(struct phy_device *phydev)
}
/**
+ * phy_interface_is_rgmii - Convenience function for testing if a PHY interface
+ * is RGMII (all variants)
+ * @phydev: the phy_device struct
+ */
+static inline bool phy_interface_is_rgmii(struct phy_device *phydev)
+{
+ return phydev->interface >= PHY_INTERFACE_MODE_RGMII &&
+ phydev->interface <= PHY_INTERFACE_MODE_RGMII_TXID;
+}
+
+/**
* phy_write_mmd - Convenience function for writing a register
* on an MMD on a given PHY.
* @phydev: The phy_device struct
diff --git a/include/linux/phy_fixed.h b/include/linux/phy_fixed.h
index 7e75bfe37cc7..fe5732d53eda 100644
--- a/include/linux/phy_fixed.h
+++ b/include/linux/phy_fixed.h
@@ -21,6 +21,9 @@ extern void fixed_phy_del(int phy_addr);
extern int fixed_phy_set_link_update(struct phy_device *phydev,
int (*link_update)(struct net_device *,
struct fixed_phy_status *));
+extern int fixed_phy_update_state(struct phy_device *phydev,
+ const struct fixed_phy_status *status,
+ const struct fixed_phy_status *changed);
#else
static inline int fixed_phy_add(unsigned int irq, int phy_id,
struct fixed_phy_status *status)
@@ -43,6 +46,12 @@ static inline int fixed_phy_set_link_update(struct phy_device *phydev,
{
return -ENODEV;
}
+static inline int fixed_phy_update_state(struct phy_device *phydev,
+ const struct fixed_phy_status *status,
+ const struct fixed_phy_status *changed)
+{
+ return -ENODEV;
+}
#endif /* CONFIG_FIXED_PHY */
#endif /* __PHY_FIXED_H */
diff --git a/include/linux/pinctrl/consumer.h b/include/linux/pinctrl/consumer.h
index 18eccefea06e..d7e5d608faa7 100644
--- a/include/linux/pinctrl/consumer.h
+++ b/include/linux/pinctrl/consumer.h
@@ -142,7 +142,7 @@ static inline struct pinctrl * __must_check pinctrl_get_select(
s = pinctrl_lookup_state(p, name);
if (IS_ERR(s)) {
pinctrl_put(p);
- return ERR_PTR(PTR_ERR(s));
+ return ERR_CAST(s);
}
ret = pinctrl_select_state(p, s);
diff --git a/include/linux/pinctrl/pinctrl.h b/include/linux/pinctrl/pinctrl.h
index 66e4697516de..9ba59fcba549 100644
--- a/include/linux/pinctrl/pinctrl.h
+++ b/include/linux/pinctrl/pinctrl.h
@@ -127,7 +127,7 @@ struct pinctrl_ops {
*/
struct pinctrl_desc {
const char *name;
- struct pinctrl_pin_desc const *pins;
+ const struct pinctrl_pin_desc *pins;
unsigned int npins;
const struct pinctrl_ops *pctlops;
const struct pinmux_ops *pmxops;
diff --git a/include/linux/pinctrl/pinmux.h b/include/linux/pinctrl/pinmux.h
index 511bda9ed4bf..ace60d775b20 100644
--- a/include/linux/pinctrl/pinmux.h
+++ b/include/linux/pinctrl/pinmux.h
@@ -56,6 +56,9 @@ struct pinctrl_dev;
* depending on whether the GPIO is configured as input or output,
* a direction selector function may be implemented as a backing
* to the GPIO controllers that need pin muxing.
+ * @strict: do not allow simultaneous use of the same pin for GPIO and another
+ * function. Check both gpio_owner and mux_owner strictly before approving
+ * the pin request.
*/
struct pinmux_ops {
int (*request) (struct pinctrl_dev *pctldev, unsigned offset);
@@ -66,7 +69,7 @@ struct pinmux_ops {
int (*get_function_groups) (struct pinctrl_dev *pctldev,
unsigned selector,
const char * const **groups,
- unsigned * const num_groups);
+ unsigned *num_groups);
int (*set_mux) (struct pinctrl_dev *pctldev, unsigned func_selector,
unsigned group_selector);
int (*gpio_request_enable) (struct pinctrl_dev *pctldev,
@@ -79,6 +82,7 @@ struct pinmux_ops {
struct pinctrl_gpio_range *range,
unsigned offset,
bool input);
+ bool strict;
};
#endif /* CONFIG_PINMUX */
diff --git a/include/linux/platform_data/dma-hsu.h b/include/linux/platform_data/dma-hsu.h
new file mode 100644
index 000000000000..8a1f6a4920b2
--- /dev/null
+++ b/include/linux/platform_data/dma-hsu.h
@@ -0,0 +1,25 @@
+/*
+ * Driver for the High Speed UART DMA
+ *
+ * Copyright (C) 2015 Intel Corporation
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License version 2 as
+ * published by the Free Software Foundation.
+ */
+
+#ifndef _PLATFORM_DATA_DMA_HSU_H
+#define _PLATFORM_DATA_DMA_HSU_H
+
+#include <linux/device.h>
+
+struct hsu_dma_slave {
+ struct device *dma_dev;
+ int chan_id;
+};
+
+struct hsu_dma_platform_data {
+ unsigned short nr_channels;
+};
+
+#endif /* _PLATFORM_DATA_DMA_HSU_H */
diff --git a/include/linux/platform_data/dma-imx-sdma.h b/include/linux/platform_data/dma-imx-sdma.h
index eabac4e2fc99..2d08816720f6 100644
--- a/include/linux/platform_data/dma-imx-sdma.h
+++ b/include/linux/platform_data/dma-imx-sdma.h
@@ -48,6 +48,9 @@ struct sdma_script_start_addrs {
s32 ssish_2_mcu_addr;
s32 hdmi_dma_addr;
/* End of v2 array */
+ s32 zcanfd_2_mcu_addr;
+ s32 zqspi_2_mcu_addr;
+ /* End of v3 array */
};
/**
diff --git a/include/linux/platform_data/gpio-omap.h b/include/linux/platform_data/gpio-omap.h
index 5d50b25a73d7..cb2618147c34 100644
--- a/include/linux/platform_data/gpio-omap.h
+++ b/include/linux/platform_data/gpio-omap.h
@@ -208,9 +208,17 @@ struct omap_gpio_platform_data {
int (*get_context_loss_count)(struct device *dev);
};
+#if IS_BUILTIN(CONFIG_GPIO_OMAP)
extern void omap2_gpio_prepare_for_idle(int off_mode);
extern void omap2_gpio_resume_after_idle(void);
-extern void omap_set_gpio_debounce(int gpio, int enable);
-extern void omap_set_gpio_debounce_time(int gpio, int enable);
+#else
+static inline void omap2_gpio_prepare_for_idle(int off_mode)
+{
+}
+
+static inline void omap2_gpio_resume_after_idle(void)
+{
+}
+#endif
#endif
diff --git a/include/linux/platform_data/i2c-davinci.h b/include/linux/platform_data/i2c-davinci.h
index 2312d197dfb7..89fd34727a24 100644
--- a/include/linux/platform_data/i2c-davinci.h
+++ b/include/linux/platform_data/i2c-davinci.h
@@ -18,6 +18,7 @@ struct davinci_i2c_platform_data {
unsigned int bus_delay; /* post-transaction delay (usec) */
unsigned int sda_pin; /* GPIO pin ID to use for SDA */
unsigned int scl_pin; /* GPIO pin ID to use for SCL */
+ bool has_pfunc; /*chip has a ICPFUNC register */
};
/* for board setup code */
diff --git a/include/linux/platform_data/irq-renesas-irqc.h b/include/linux/platform_data/irq-renesas-irqc.h
deleted file mode 100644
index 3ae17b3e00ed..000000000000
--- a/include/linux/platform_data/irq-renesas-irqc.h
+++ /dev/null
@@ -1,27 +0,0 @@
-/*
- * Renesas IRQC Driver
- *
- * Copyright (C) 2013 Magnus Damm
- *
- * This program is free software; you can redistribute it and/or modify
- * it under the terms of the GNU General Public License as published by
- * the Free Software Foundation; either version 2 of the License
- *
- * This program is distributed in the hope that it will be useful,
- * but WITHOUT ANY WARRANTY; without even the implied warranty of
- * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
- * GNU General Public License for more details.
- *
- * You should have received a copy of the GNU General Public License
- * along with this program; if not, write to the Free Software
- * Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA
- */
-
-#ifndef __IRQ_RENESAS_IRQC_H__
-#define __IRQ_RENESAS_IRQC_H__
-
-struct renesas_irqc_config {
- unsigned int irq_base;
-};
-
-#endif /* __IRQ_RENESAS_IRQC_H__ */
diff --git a/include/linux/platform_data/keyboard-spear.h b/include/linux/platform_data/keyboard-spear.h
index 9248e3a7e333..5e3ff653900c 100644
--- a/include/linux/platform_data/keyboard-spear.h
+++ b/include/linux/platform_data/keyboard-spear.h
@@ -1,6 +1,6 @@
/*
* Copyright (C) 2010 ST Microelectronics
- * Rajeev Kumar<rajeev-dlh.kumar@st.com>
+ * Rajeev Kumar <rajeevkumar.linux@gmail.com>
*
* This file is licensed under the terms of the GNU General Public
* License version 2. This program is licensed "as is" without any
diff --git a/include/linux/platform_data/mmc-msm_sdcc.h b/include/linux/platform_data/mmc-msm_sdcc.h
deleted file mode 100644
index 55aa873c9396..000000000000
--- a/include/linux/platform_data/mmc-msm_sdcc.h
+++ /dev/null
@@ -1,27 +0,0 @@
-#ifndef __MMC_MSM_SDCC_H
-#define __MMC_MSM_SDCC_H
-
-#include <linux/mmc/host.h>
-#include <linux/mmc/card.h>
-#include <linux/mmc/sdio_func.h>
-
-struct msm_mmc_gpio {
- unsigned no;
- const char *name;
-};
-
-struct msm_mmc_gpio_data {
- struct msm_mmc_gpio *gpio;
- u8 size;
-};
-
-struct msm_mmc_platform_data {
- unsigned int ocr_mask; /* available voltages */
- u32 (*translate_vdd)(struct device *, unsigned int);
- unsigned int (*status)(struct device *);
- int (*register_status_notify)(void (*callback)(int card_present, void *dev_id), void *dev_id);
- struct msm_mmc_gpio_data *gpio_data;
- void (*init_card)(struct mmc_card *card);
-};
-
-#endif
diff --git a/include/linux/platform_data/msm_serial_hs.h b/include/linux/platform_data/msm_serial_hs.h
deleted file mode 100644
index 98a2046f8b31..000000000000
--- a/include/linux/platform_data/msm_serial_hs.h
+++ /dev/null
@@ -1,49 +0,0 @@
-/*
- * Copyright (C) 2008 Google, Inc.
- * Author: Nick Pelly <npelly@google.com>
- *
- * This software is licensed under the terms of the GNU General Public
- * License version 2, as published by the Free Software Foundation, and
- * may be copied, distributed, and modified under those terms.
- *
- * This program is distributed in the hope that it will be useful,
- * but WITHOUT ANY WARRANTY; without even the implied warranty of
- * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
- * GNU General Public License for more details.
- */
-
-#ifndef __ASM_ARCH_MSM_SERIAL_HS_H
-#define __ASM_ARCH_MSM_SERIAL_HS_H
-
-#include <linux/serial_core.h>
-
-/* API to request the uart clock off or on for low power management
- * Clients should call request_clock_off() when no uart data is expected,
- * and must call request_clock_on() before any further uart data can be
- * received. */
-extern void msm_hs_request_clock_off(struct uart_port *uport);
-extern void msm_hs_request_clock_on(struct uart_port *uport);
-
-/**
- * struct msm_serial_hs_platform_data
- * @rx_wakeup_irq: Rx activity irq
- * @rx_to_inject: extra character to be inserted to Rx tty on wakeup
- * @inject_rx: 1 = insert rx_to_inject. 0 = do not insert extra character
- * @exit_lpm_cb: function called before every Tx transaction
- *
- * This is an optional structure required for UART Rx GPIO IRQ based
- * wakeup from low power state. UART wakeup can be triggered by RX activity
- * (using a wakeup GPIO on the UART RX pin). This should only be used if
- * there is not a wakeup GPIO on the UART CTS, and the first RX byte is
- * known (eg., with the Bluetooth Texas Instruments HCILL protocol),
- * since the first RX byte will always be lost. RTS will be asserted even
- * while the UART is clocked off in this mode of operation.
- */
-struct msm_serial_hs_platform_data {
- int rx_wakeup_irq;
- unsigned char inject_rx_on_wakeup;
- char rx_to_inject;
- void (*exit_lpm_cb)(struct uart_port *);
-};
-
-#endif
diff --git a/include/linux/platform_data/nfcmrvl.h b/include/linux/platform_data/nfcmrvl.h
new file mode 100644
index 000000000000..ac91707dabcb
--- /dev/null
+++ b/include/linux/platform_data/nfcmrvl.h
@@ -0,0 +1,40 @@
+/*
+ * Copyright (C) 2015, Marvell International Ltd.
+ *
+ * This software file (the "File") is distributed by Marvell International
+ * Ltd. under the terms of the GNU General Public License Version 2, June 1991
+ * (the "License"). You may use, redistribute and/or modify this File in
+ * accordance with the terms and conditions of the License, a copy of which
+ * is available on the worldwide web at
+ * http://www.gnu.org/licenses/old-licenses/gpl-2.0.txt.
+ *
+ * THE FILE IS DISTRIBUTED AS-IS, WITHOUT WARRANTY OF ANY KIND, AND THE
+ * IMPLIED WARRANTIES OF MERCHANTABILITY OR FITNESS FOR A PARTICULAR PURPOSE
+ * ARE EXPRESSLY DISCLAIMED. The License provides additional details about
+ * this warranty disclaimer.
+ */
+
+#ifndef _NFCMRVL_PTF_H_
+#define _NFCMRVL_PTF_H_
+
+struct nfcmrvl_platform_data {
+ /*
+ * Generic
+ */
+
+ /* GPIO that is wired to RESET_N signal */
+ unsigned int reset_n_io;
+ /* Tell if transport is muxed in HCI one */
+ unsigned int hci_muxed;
+
+ /*
+ * UART specific
+ */
+
+ /* Tell if UART needs flow control at init */
+ unsigned int flow_control;
+ /* Tell if firmware supports break control for power management */
+ unsigned int break_control;
+};
+
+#endif /* _NFCMRVL_PTF_H_ */
diff --git a/include/linux/platform_data/ntc_thermistor.h b/include/linux/platform_data/ntc_thermistor.h
index 0a6de4ca4930..aed170588b74 100644
--- a/include/linux/platform_data/ntc_thermistor.h
+++ b/include/linux/platform_data/ntc_thermistor.h
@@ -27,6 +27,7 @@ enum ntc_thermistor_type {
TYPE_NCPXXWB473,
TYPE_NCPXXWL333,
TYPE_B57330V2103,
+ TYPE_NCPXXWF104,
};
struct ntc_thermistor_platform_data {
diff --git a/include/linux/platform_data/nxp-nci.h b/include/linux/platform_data/nxp-nci.h
new file mode 100644
index 000000000000..d6ed28679bb2
--- /dev/null
+++ b/include/linux/platform_data/nxp-nci.h
@@ -0,0 +1,27 @@
+/*
+ * Generic platform data for the NXP NCI NFC chips.
+ *
+ * Copyright (C) 2014 NXP Semiconductors All rights reserved.
+ *
+ * Authors: Clément Perrochaud <clement.perrochaud@nxp.com>
+ *
+ * This program is free software; you can redistribute it and/or modify it
+ * under the terms and conditions of the GNU General Public License,
+ * version 2, as published by the Free Software Foundation.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+ * GNU General Public License for more details.
+ */
+
+#ifndef _NXP_NCI_H_
+#define _NXP_NCI_H_
+
+struct nxp_nci_nfc_platform_data {
+ unsigned int gpio_en;
+ unsigned int gpio_fw;
+ unsigned int irq;
+};
+
+#endif /* _NXP_NCI_H_ */
diff --git a/include/linux/platform_data/serial-imx.h b/include/linux/platform_data/serial-imx.h
index 3cc2e3c40914..a938eba2f18e 100644
--- a/include/linux/platform_data/serial-imx.h
+++ b/include/linux/platform_data/serial-imx.h
@@ -20,14 +20,9 @@
#define ASMARM_ARCH_UART_H
#define IMXUART_HAVE_RTSCTS (1<<0)
-#define IMXUART_IRDA (1<<1)
struct imxuart_platform_data {
unsigned int flags;
- void (*irda_enable)(int enable);
- unsigned int irda_inv_rx:1;
- unsigned int irda_inv_tx:1;
- unsigned short transceiver_delay;
};
#endif
diff --git a/include/linux/platform_data/si5351.h b/include/linux/platform_data/si5351.h
index a947ab8b441a..533d9807e543 100644
--- a/include/linux/platform_data/si5351.h
+++ b/include/linux/platform_data/si5351.h
@@ -5,8 +5,6 @@
#ifndef __LINUX_PLATFORM_DATA_SI5351_H__
#define __LINUX_PLATFORM_DATA_SI5351_H__
-struct clk;
-
/**
* enum si5351_pll_src - Si5351 pll clock source
* @SI5351_PLL_SRC_DEFAULT: default, do not change eeprom config
@@ -107,8 +105,6 @@ struct si5351_clkout_config {
* @clkout: array of clkout configuration
*/
struct si5351_platform_data {
- struct clk *clk_xtal;
- struct clk *clk_clkin;
enum si5351_pll_src pll_src[2];
struct si5351_clkout_config clkout[8];
};
diff --git a/include/linux/platform_data/sky81452-backlight.h b/include/linux/platform_data/sky81452-backlight.h
new file mode 100644
index 000000000000..1231e9bb00f1
--- /dev/null
+++ b/include/linux/platform_data/sky81452-backlight.h
@@ -0,0 +1,46 @@
+/*
+ * sky81452.h SKY81452 backlight driver
+ *
+ * Copyright 2014 Skyworks Solutions Inc.
+ * Author : Gyungoh Yoo <jack.yoo@skyworksinc.com>
+ *
+ * This program is free software; you can redistribute it and/or modify it
+ * under the terms of the GNU General Public License version 2
+ * as published by the Free Software Foundation.
+ *
+ * This program is distributed in the hope that it will be useful, but
+ * WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
+ * General Public License for more details.
+ *
+ * You should have received a copy of the GNU General Public License along
+ * with this program; if not, see <http://www.gnu.org/licenses/>.
+ */
+
+#ifndef _SKY81452_BACKLIGHT_H
+#define _SKY81452_BACKLIGHT_H
+
+/**
+ * struct sky81452_platform_data
+ * @name: backlight driver name.
+ If it is not defined, default name is lcd-backlight.
+ * @gpio_enable:GPIO number which control EN pin
+ * @enable: Enable mask for current sink channel 1, 2, 3, 4, 5 and 6.
+ * @ignore_pwm: true if DPWMI should be ignored.
+ * @dpwm_mode: true is DPWM dimming mode, otherwise Analog dimming mode.
+ * @phase_shift:true is phase shift mode.
+ * @short_detecion_threshold: It should be one of 4, 5, 6 and 7V.
+ * @boost_current_limit: It should be one of 2300, 2750mA.
+ */
+struct sky81452_bl_platform_data {
+ const char *name;
+ int gpio_enable;
+ unsigned int enable;
+ bool ignore_pwm;
+ bool dpwm_mode;
+ bool phase_shift;
+ unsigned int short_detection_threshold;
+ unsigned int boost_current_limit;
+};
+
+#endif
diff --git a/include/linux/platform_data/st21nfcb.h b/include/linux/platform_data/st-nci.h
index b023373d9874..d9d400a297bd 100644
--- a/include/linux/platform_data/st21nfcb.h
+++ b/include/linux/platform_data/st-nci.h
@@ -1,7 +1,7 @@
/*
- * Driver include for the ST21NFCB NFC chip.
+ * Driver include for ST NCI NFC chip family.
*
- * Copyright (C) 2014 STMicroelectronics SAS. All rights reserved.
+ * Copyright (C) 2014-2015 STMicroelectronics SAS. All rights reserved.
*
* This program is free software; you can redistribute it and/or modify it
* under the terms and conditions of the GNU General Public License,
@@ -16,14 +16,14 @@
* along with this program; if not, see <http://www.gnu.org/licenses/>.
*/
-#ifndef _ST21NFCB_NCI_H_
-#define _ST21NFCB_NCI_H_
+#ifndef _ST_NCI_H_
+#define _ST_NCI_H_
-#define ST21NFCB_NCI_DRIVER_NAME "st21nfcb_nci"
+#define ST_NCI_DRIVER_NAME "st_nci"
-struct st21nfcb_nfc_platform_data {
+struct st_nci_nfc_platform_data {
unsigned int gpio_reset;
unsigned int irq_polarity;
};
-#endif /* _ST21NFCB_NCI_H_ */
+#endif /* _ST_NCI_H_ */
diff --git a/include/linux/platform_data/tpm_stm_st33.h b/include/linux/platform_data/st33zp24.h
index ff75310c0f47..817dfdb37885 100644
--- a/include/linux/platform_data/tpm_stm_st33.h
+++ b/include/linux/platform_data/st33zp24.h
@@ -1,6 +1,6 @@
/*
- * STMicroelectronics TPM I2C Linux driver for TPM ST33ZP24
- * Copyright (C) 2009, 2010 STMicroelectronics
+ * STMicroelectronics TPM Linux driver for TPM 1.2 ST33ZP24
+ * Copyright (C) 2009 - 2015 STMicroelectronics
*
* This program is free software; you can redistribute it and/or modify
* it under the terms of the GNU General Public License as published by
@@ -14,20 +14,9 @@
*
* You should have received a copy of the GNU General Public License
* along with this program; if not, see <http://www.gnu.org/licenses/>.
- *
- * STMicroelectronics version 1.2.0, Copyright (C) 2010
- * STMicroelectronics comes with ABSOLUTELY NO WARRANTY.
- * This is free software, and you are welcome to redistribute it
- * under certain conditions.
- *
- * @Author: Christophe RICARD tpmsupport@st.com
- *
- * @File: stm_st33_tpm.h
- *
- * @Date: 09/15/2010
*/
-#ifndef __STM_ST33_TPM_H__
-#define __STM_ST33_TPM_H__
+#ifndef __ST33ZP24_H__
+#define __ST33ZP24_H__
#define TPM_ST33_I2C "st33zp24-i2c"
#define TPM_ST33_SPI "st33zp24-spi"
@@ -36,4 +25,4 @@ struct st33zp24_platform_data {
int io_lpcpd;
};
-#endif /* __STM_ST33_TPM_H__ */
+#endif /* __ST33ZP24_H__ */
diff --git a/include/linux/platform_data/st_nci.h b/include/linux/platform_data/st_nci.h
new file mode 100644
index 000000000000..d9d400a297bd
--- /dev/null
+++ b/include/linux/platform_data/st_nci.h
@@ -0,0 +1,29 @@
+/*
+ * Driver include for ST NCI NFC chip family.
+ *
+ * Copyright (C) 2014-2015 STMicroelectronics SAS. All rights reserved.
+ *
+ * This program is free software; you can redistribute it and/or modify it
+ * under the terms and conditions of the GNU General Public License,
+ * version 2, as published by the Free Software Foundation.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+ * GNU General Public License for more details.
+ *
+ * You should have received a copy of the GNU General Public License
+ * along with this program; if not, see <http://www.gnu.org/licenses/>.
+ */
+
+#ifndef _ST_NCI_H_
+#define _ST_NCI_H_
+
+#define ST_NCI_DRIVER_NAME "st_nci"
+
+struct st_nci_nfc_platform_data {
+ unsigned int gpio_reset;
+ unsigned int irq_polarity;
+};
+
+#endif /* _ST_NCI_H_ */
diff --git a/include/linux/platform_data/video-msm_fb.h b/include/linux/platform_data/video-msm_fb.h
deleted file mode 100644
index 31449be3eadb..000000000000
--- a/include/linux/platform_data/video-msm_fb.h
+++ /dev/null
@@ -1,146 +0,0 @@
-/*
- * Internal shared definitions for various MSM framebuffer parts.
- *
- * Copyright (C) 2007 Google Incorporated
- *
- * This software is licensed under the terms of the GNU General Public
- * License version 2, as published by the Free Software Foundation, and
- * may be copied, distributed, and modified under those terms.
- *
- * This program is distributed in the hope that it will be useful,
- * but WITHOUT ANY WARRANTY; without even the implied warranty of
- * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
- * GNU General Public License for more details.
- */
-
-#ifndef _MSM_FB_H_
-#define _MSM_FB_H_
-
-#include <linux/device.h>
-
-struct mddi_info;
-
-struct msm_fb_data {
- int xres; /* x resolution in pixels */
- int yres; /* y resolution in pixels */
- int width; /* disply width in mm */
- int height; /* display height in mm */
- unsigned output_format;
-};
-
-struct msmfb_callback {
- void (*func)(struct msmfb_callback *);
-};
-
-enum {
- MSM_MDDI_PMDH_INTERFACE,
- MSM_MDDI_EMDH_INTERFACE,
- MSM_EBI2_INTERFACE,
-};
-
-#define MSMFB_CAP_PARTIAL_UPDATES (1 << 0)
-
-struct msm_panel_data {
- /* turns off the fb memory */
- int (*suspend)(struct msm_panel_data *);
- /* turns on the fb memory */
- int (*resume)(struct msm_panel_data *);
- /* turns off the panel */
- int (*blank)(struct msm_panel_data *);
- /* turns on the panel */
- int (*unblank)(struct msm_panel_data *);
- void (*wait_vsync)(struct msm_panel_data *);
- void (*request_vsync)(struct msm_panel_data *, struct msmfb_callback *);
- void (*clear_vsync)(struct msm_panel_data *);
- /* from the enum above */
- unsigned interface_type;
- /* data to be passed to the fb driver */
- struct msm_fb_data *fb_data;
-
- /* capabilities supported by the panel */
- uint32_t caps;
-};
-
-struct msm_mddi_client_data {
- void (*suspend)(struct msm_mddi_client_data *);
- void (*resume)(struct msm_mddi_client_data *);
- void (*activate_link)(struct msm_mddi_client_data *);
- void (*remote_write)(struct msm_mddi_client_data *, uint32_t val,
- uint32_t reg);
- uint32_t (*remote_read)(struct msm_mddi_client_data *, uint32_t reg);
- void (*auto_hibernate)(struct msm_mddi_client_data *, int);
- /* custom data that needs to be passed from the board file to a
- * particular client */
- void *private_client_data;
- struct resource *fb_resource;
- /* from the list above */
- unsigned interface_type;
-};
-
-struct msm_mddi_platform_data {
- unsigned int clk_rate;
- void (*power_client)(struct msm_mddi_client_data *, int on);
-
- /* fixup the mfr name, product id */
- void (*fixup)(uint16_t *mfr_name, uint16_t *product_id);
-
- struct resource *fb_resource; /*optional*/
- /* number of clients in the list that follows */
- int num_clients;
- /* array of client information of clients */
- struct {
- unsigned product_id; /* mfr id in top 16 bits, product id
- * in lower 16 bits
- */
- char *name; /* the device name will be the platform
- * device name registered for the client,
- * it should match the name of the associated
- * driver
- */
- unsigned id; /* id for mddi client device node, will also
- * be used as device id of panel devices, if
- * the client device will have multiple panels
- * space must be left here for them
- */
- void *client_data; /* required private client data */
- unsigned int clk_rate; /* optional: if the client requires a
- * different mddi clk rate
- */
- } client_platform_data[];
-};
-
-struct mdp_blit_req;
-struct fb_info;
-struct mdp_device {
- struct device dev;
- void (*dma)(struct mdp_device *mpd, uint32_t addr,
- uint32_t stride, uint32_t w, uint32_t h, uint32_t x,
- uint32_t y, struct msmfb_callback *callback, int interface);
- void (*dma_wait)(struct mdp_device *mdp);
- int (*blit)(struct mdp_device *mdp, struct fb_info *fb,
- struct mdp_blit_req *req);
- void (*set_grp_disp)(struct mdp_device *mdp, uint32_t disp_id);
-};
-
-struct class_interface;
-int register_mdp_client(struct class_interface *class_intf);
-
-/**** private client data structs go below this line ***/
-
-struct msm_mddi_bridge_platform_data {
- /* from board file */
- int (*init)(struct msm_mddi_bridge_platform_data *,
- struct msm_mddi_client_data *);
- int (*uninit)(struct msm_mddi_bridge_platform_data *,
- struct msm_mddi_client_data *);
- /* passed to panel for use by the fb driver */
- int (*blank)(struct msm_mddi_bridge_platform_data *,
- struct msm_mddi_client_data *);
- int (*unblank)(struct msm_mddi_bridge_platform_data *,
- struct msm_mddi_client_data *);
- struct msm_fb_data fb_data;
-};
-
-
-
-#endif
diff --git a/include/linux/platform_device.h b/include/linux/platform_device.h
index ae4882ca4a64..58f1e75ba105 100644
--- a/include/linux/platform_device.h
+++ b/include/linux/platform_device.h
@@ -59,7 +59,7 @@ extern int platform_add_devices(struct platform_device **, int);
struct platform_device_info {
struct device *parent;
- struct acpi_dev_node acpi_node;
+ struct fwnode_handle *fwnode;
const char *name;
int id;
diff --git a/include/linux/resume-trace.h b/include/linux/pm-trace.h
index f31db2368782..ecbde7a5548e 100644
--- a/include/linux/resume-trace.h
+++ b/include/linux/pm-trace.h
@@ -1,8 +1,8 @@
-#ifndef RESUME_TRACE_H
-#define RESUME_TRACE_H
+#ifndef PM_TRACE_H
+#define PM_TRACE_H
#ifdef CONFIG_PM_TRACE
-#include <asm/resume-trace.h>
+#include <asm/pm-trace.h>
#include <linux/types.h>
extern int pm_trace_enabled;
@@ -14,7 +14,7 @@ static inline int pm_trace_is_enabled(void)
struct device;
extern void set_trace_device(struct device *);
-extern void generate_resume_trace(const void *tracedata, unsigned int user);
+extern void generate_pm_trace(const void *tracedata, unsigned int user);
extern int show_trace_dev_match(char *buf, size_t size);
#define TRACE_DEVICE(dev) do { \
@@ -28,6 +28,7 @@ static inline int pm_trace_is_enabled(void) { return 0; }
#define TRACE_DEVICE(dev) do { } while (0)
#define TRACE_RESUME(dev) do { } while (0)
+#define TRACE_SUSPEND(dev) do { } while (0)
#endif
diff --git a/include/linux/pm.h b/include/linux/pm.h
index e2f1be6dd9dd..35d599e7250d 100644
--- a/include/linux/pm.h
+++ b/include/linux/pm.h
@@ -342,6 +342,18 @@ struct dev_pm_ops {
#define SET_LATE_SYSTEM_SLEEP_PM_OPS(suspend_fn, resume_fn)
#endif
+#ifdef CONFIG_PM_SLEEP
+#define SET_NOIRQ_SYSTEM_SLEEP_PM_OPS(suspend_fn, resume_fn) \
+ .suspend_noirq = suspend_fn, \
+ .resume_noirq = resume_fn, \
+ .freeze_noirq = suspend_fn, \
+ .thaw_noirq = resume_fn, \
+ .poweroff_noirq = suspend_fn, \
+ .restore_noirq = resume_fn,
+#else
+#define SET_NOIRQ_SYSTEM_SLEEP_PM_OPS(suspend_fn, resume_fn)
+#endif
+
#ifdef CONFIG_PM
#define SET_RUNTIME_PM_OPS(suspend_fn, resume_fn, idle_fn) \
.runtime_suspend = suspend_fn, \
@@ -529,6 +541,7 @@ enum rpm_request {
};
struct wakeup_source;
+struct wake_irq;
struct pm_domain_data;
struct pm_subsys_data {
@@ -568,6 +581,7 @@ struct dev_pm_info {
unsigned long timer_expires;
struct work_struct work;
wait_queue_head_t wait_queue;
+ struct wake_irq *wakeirq;
atomic_t usage_count;
atomic_t child_count;
unsigned int disable_depth:3;
@@ -603,10 +617,18 @@ extern void dev_pm_put_subsys_data(struct device *dev);
* Power domains provide callbacks that are executed during system suspend,
* hibernation, system resume and during runtime PM transitions along with
* subsystem-level and driver-level callbacks.
+ *
+ * @detach: Called when removing a device from the domain.
+ * @activate: Called before executing probe routines for bus types and drivers.
+ * @sync: Called after successful driver probe.
+ * @dismiss: Called after unsuccessful driver probe and after driver removal.
*/
struct dev_pm_domain {
struct dev_pm_ops ops;
void (*detach)(struct device *dev, bool power_off);
+ int (*activate)(struct device *dev);
+ void (*sync)(struct device *dev);
+ void (*dismiss)(struct device *dev);
};
/*
diff --git a/include/linux/pm_clock.h b/include/linux/pm_clock.h
index 0b0039634410..25266c600021 100644
--- a/include/linux/pm_clock.h
+++ b/include/linux/pm_clock.h
@@ -20,6 +20,16 @@ struct pm_clk_notifier_block {
struct clk;
+#ifdef CONFIG_PM
+extern int pm_clk_runtime_suspend(struct device *dev);
+extern int pm_clk_runtime_resume(struct device *dev);
+#define USE_PM_CLK_RUNTIME_OPS \
+ .runtime_suspend = pm_clk_runtime_suspend, \
+ .runtime_resume = pm_clk_runtime_resume,
+#else
+#define USE_PM_CLK_RUNTIME_OPS
+#endif
+
#ifdef CONFIG_PM_CLK
static inline bool pm_clk_no_clocks(struct device *dev)
{
diff --git a/include/linux/pm_domain.h b/include/linux/pm_domain.h
index 080e778118ba..681ccb053f72 100644
--- a/include/linux/pm_domain.h
+++ b/include/linux/pm_domain.h
@@ -127,7 +127,7 @@ static inline struct generic_pm_domain_data *dev_gpd_data(struct device *dev)
return to_gpd_data(dev->power.subsys_data->domain_data);
}
-extern struct generic_pm_domain *dev_to_genpd(struct device *dev);
+extern struct generic_pm_domain *pm_genpd_lookup_dev(struct device *dev);
extern int __pm_genpd_add_device(struct generic_pm_domain *genpd,
struct device *dev,
struct gpd_timing_data *td);
@@ -163,9 +163,9 @@ static inline struct generic_pm_domain_data *dev_gpd_data(struct device *dev)
{
return ERR_PTR(-ENOSYS);
}
-static inline struct generic_pm_domain *dev_to_genpd(struct device *dev)
+static inline struct generic_pm_domain *pm_genpd_lookup_dev(struct device *dev)
{
- return ERR_PTR(-ENOSYS);
+ return NULL;
}
static inline int __pm_genpd_add_device(struct generic_pm_domain *genpd,
struct device *dev,
diff --git a/include/linux/pm_wakeirq.h b/include/linux/pm_wakeirq.h
new file mode 100644
index 000000000000..cd5b62db9084
--- /dev/null
+++ b/include/linux/pm_wakeirq.h
@@ -0,0 +1,51 @@
+/*
+ * pm_wakeirq.h - Device wakeirq helper functions
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License version 2 as
+ * published by the Free Software Foundation.
+ *
+ * This program is distributed "as is" WITHOUT ANY WARRANTY of any
+ * kind, whether express or implied; without even the implied warranty
+ * of MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+ * GNU General Public License for more details.
+ */
+
+#ifndef _LINUX_PM_WAKEIRQ_H
+#define _LINUX_PM_WAKEIRQ_H
+
+#ifdef CONFIG_PM
+
+extern int dev_pm_set_wake_irq(struct device *dev, int irq);
+extern int dev_pm_set_dedicated_wake_irq(struct device *dev,
+ int irq);
+extern void dev_pm_clear_wake_irq(struct device *dev);
+extern void dev_pm_enable_wake_irq(struct device *dev);
+extern void dev_pm_disable_wake_irq(struct device *dev);
+
+#else /* !CONFIG_PM */
+
+static inline int dev_pm_set_wake_irq(struct device *dev, int irq)
+{
+ return 0;
+}
+
+static inline int dev_pm_set_dedicated_wake_irq(struct device *dev, int irq)
+{
+ return 0;
+}
+
+static inline void dev_pm_clear_wake_irq(struct device *dev)
+{
+}
+
+static inline void dev_pm_enable_wake_irq(struct device *dev)
+{
+}
+
+static inline void dev_pm_disable_wake_irq(struct device *dev)
+{
+}
+
+#endif /* CONFIG_PM */
+#endif /* _LINUX_PM_WAKEIRQ_H */
diff --git a/include/linux/pm_wakeup.h b/include/linux/pm_wakeup.h
index a0f70808d7f4..a3447932df1f 100644
--- a/include/linux/pm_wakeup.h
+++ b/include/linux/pm_wakeup.h
@@ -28,9 +28,17 @@
#include <linux/types.h>
+struct wake_irq;
+
/**
* struct wakeup_source - Representation of wakeup sources
*
+ * @name: Name of the wakeup source
+ * @entry: Wakeup source list entry
+ * @lock: Wakeup source lock
+ * @wakeirq: Optional device specific wakeirq
+ * @timer: Wakeup timer list
+ * @timer_expires: Wakeup timer expiration
* @total_time: Total time this wakeup source has been active.
* @max_time: Maximum time this wakeup source has been continuously active.
* @last_time: Monotonic clock when the wakeup source's was touched last time.
@@ -47,6 +55,7 @@ struct wakeup_source {
const char *name;
struct list_head entry;
spinlock_t lock;
+ struct wake_irq *wakeirq;
struct timer_list timer;
unsigned long timer_expires;
ktime_t total_time;
diff --git a/include/linux/pnp.h b/include/linux/pnp.h
index 6512e9cbc6d5..5df733b8f704 100644
--- a/include/linux/pnp.h
+++ b/include/linux/pnp.h
@@ -510,4 +510,16 @@ static inline void pnp_unregister_driver(struct pnp_driver *drv) { }
#endif /* CONFIG_PNP */
+/**
+ * module_pnp_driver() - Helper macro for registering a PnP driver
+ * @__pnp_driver: pnp_driver struct
+ *
+ * Helper macro for PnP drivers which do not do anything special in module
+ * init/exit. This eliminates a lot of boilerplate. Each module may only
+ * use this macro once, and calling it replaces module_init() and module_exit()
+ */
+#define module_pnp_driver(__pnp_driver) \
+ module_driver(__pnp_driver, pnp_register_driver, \
+ pnp_unregister_driver)
+
#endif /* _LINUX_PNP_H */
diff --git a/include/linux/power/max17042_battery.h b/include/linux/power/max17042_battery.h
index cf112b4075c8..522757ac9cd4 100644
--- a/include/linux/power/max17042_battery.h
+++ b/include/linux/power/max17042_battery.h
@@ -215,6 +215,10 @@ struct max17042_platform_data {
* the datasheet although it can be changed by board designers.
*/
unsigned int r_sns;
+ int vmin; /* in millivolts */
+ int vmax; /* in millivolts */
+ int temp_min; /* in tenths of degree Celsius */
+ int temp_max; /* in tenths of degree Celsius */
};
#endif /* __MAX17042_BATTERY_H_ */
diff --git a/include/linux/power_supply.h b/include/linux/power_supply.h
index 75a1dd8dc56e..ef9f1592185d 100644
--- a/include/linux/power_supply.h
+++ b/include/linux/power_supply.h
@@ -206,6 +206,11 @@ struct power_supply_desc {
int (*set_property)(struct power_supply *psy,
enum power_supply_property psp,
const union power_supply_propval *val);
+ /*
+ * property_is_writeable() will be called during registration
+ * of power supply. If this happens during device probe then it must
+ * not access internal data of device (because probe did not end).
+ */
int (*property_is_writeable)(struct power_supply *psy,
enum power_supply_property psp);
void (*external_power_changed)(struct power_supply *psy);
@@ -237,6 +242,7 @@ struct power_supply {
/* private */
struct device dev;
struct work_struct changed_work;
+ struct delayed_work deferred_register_work;
spinlock_t changed_lock;
bool changed;
atomic_t use_cnt;
@@ -286,10 +292,15 @@ extern void power_supply_put(struct power_supply *psy);
#ifdef CONFIG_OF
extern struct power_supply *power_supply_get_by_phandle(struct device_node *np,
const char *property);
+extern struct power_supply *devm_power_supply_get_by_phandle(
+ struct device *dev, const char *property);
#else /* !CONFIG_OF */
static inline struct power_supply *
power_supply_get_by_phandle(struct device_node *np, const char *property)
{ return NULL; }
+static inline struct power_supply *
+devm_power_supply_get_by_phandle(struct device *dev, const char *property)
+{ return NULL; }
#endif /* CONFIG_OF */
extern void power_supply_changed(struct power_supply *psy);
extern int power_supply_am_i_supplied(struct power_supply *psy);
diff --git a/include/linux/preempt.h b/include/linux/preempt.h
index de83b4eb1642..0f1534acaf60 100644
--- a/include/linux/preempt.h
+++ b/include/linux/preempt.h
@@ -10,13 +10,117 @@
#include <linux/list.h>
/*
- * We use the MSB mostly because its available; see <linux/preempt_mask.h> for
- * the other bits -- can't include that header due to inclusion hell.
+ * We put the hardirq and softirq counter into the preemption
+ * counter. The bitmask has the following meaning:
+ *
+ * - bits 0-7 are the preemption count (max preemption depth: 256)
+ * - bits 8-15 are the softirq count (max # of softirqs: 256)
+ *
+ * The hardirq count could in theory be the same as the number of
+ * interrupts in the system, but we run all interrupt handlers with
+ * interrupts disabled, so we cannot have nesting interrupts. Though
+ * there are a few palaeontologic drivers which reenable interrupts in
+ * the handler, so we need more than one bit here.
+ *
+ * PREEMPT_MASK: 0x000000ff
+ * SOFTIRQ_MASK: 0x0000ff00
+ * HARDIRQ_MASK: 0x000f0000
+ * NMI_MASK: 0x00100000
+ * PREEMPT_ACTIVE: 0x00200000
+ * PREEMPT_NEED_RESCHED: 0x80000000
*/
+#define PREEMPT_BITS 8
+#define SOFTIRQ_BITS 8
+#define HARDIRQ_BITS 4
+#define NMI_BITS 1
+
+#define PREEMPT_SHIFT 0
+#define SOFTIRQ_SHIFT (PREEMPT_SHIFT + PREEMPT_BITS)
+#define HARDIRQ_SHIFT (SOFTIRQ_SHIFT + SOFTIRQ_BITS)
+#define NMI_SHIFT (HARDIRQ_SHIFT + HARDIRQ_BITS)
+
+#define __IRQ_MASK(x) ((1UL << (x))-1)
+
+#define PREEMPT_MASK (__IRQ_MASK(PREEMPT_BITS) << PREEMPT_SHIFT)
+#define SOFTIRQ_MASK (__IRQ_MASK(SOFTIRQ_BITS) << SOFTIRQ_SHIFT)
+#define HARDIRQ_MASK (__IRQ_MASK(HARDIRQ_BITS) << HARDIRQ_SHIFT)
+#define NMI_MASK (__IRQ_MASK(NMI_BITS) << NMI_SHIFT)
+
+#define PREEMPT_OFFSET (1UL << PREEMPT_SHIFT)
+#define SOFTIRQ_OFFSET (1UL << SOFTIRQ_SHIFT)
+#define HARDIRQ_OFFSET (1UL << HARDIRQ_SHIFT)
+#define NMI_OFFSET (1UL << NMI_SHIFT)
+
+#define SOFTIRQ_DISABLE_OFFSET (2 * SOFTIRQ_OFFSET)
+
+#define PREEMPT_ACTIVE_BITS 1
+#define PREEMPT_ACTIVE_SHIFT (NMI_SHIFT + NMI_BITS)
+#define PREEMPT_ACTIVE (__IRQ_MASK(PREEMPT_ACTIVE_BITS) << PREEMPT_ACTIVE_SHIFT)
+
+/* We use the MSB mostly because its available */
#define PREEMPT_NEED_RESCHED 0x80000000
+/* preempt_count() and related functions, depends on PREEMPT_NEED_RESCHED */
#include <asm/preempt.h>
+#define hardirq_count() (preempt_count() & HARDIRQ_MASK)
+#define softirq_count() (preempt_count() & SOFTIRQ_MASK)
+#define irq_count() (preempt_count() & (HARDIRQ_MASK | SOFTIRQ_MASK \
+ | NMI_MASK))
+
+/*
+ * Are we doing bottom half or hardware interrupt processing?
+ * Are we in a softirq context? Interrupt context?
+ * in_softirq - Are we currently processing softirq or have bh disabled?
+ * in_serving_softirq - Are we currently processing softirq?
+ */
+#define in_irq() (hardirq_count())
+#define in_softirq() (softirq_count())
+#define in_interrupt() (irq_count())
+#define in_serving_softirq() (softirq_count() & SOFTIRQ_OFFSET)
+
+/*
+ * Are we in NMI context?
+ */
+#define in_nmi() (preempt_count() & NMI_MASK)
+
+#if defined(CONFIG_PREEMPT_COUNT)
+# define PREEMPT_DISABLE_OFFSET 1
+#else
+# define PREEMPT_DISABLE_OFFSET 0
+#endif
+
+/*
+ * The preempt_count offset needed for things like:
+ *
+ * spin_lock_bh()
+ *
+ * Which need to disable both preemption (CONFIG_PREEMPT_COUNT) and
+ * softirqs, such that unlock sequences of:
+ *
+ * spin_unlock();
+ * local_bh_enable();
+ *
+ * Work as expected.
+ */
+#define SOFTIRQ_LOCK_OFFSET (SOFTIRQ_DISABLE_OFFSET + PREEMPT_DISABLE_OFFSET)
+
+/*
+ * Are we running in atomic context? WARNING: this macro cannot
+ * always detect atomic context; in particular, it cannot know about
+ * held spinlocks in non-preemptible kernels. Thus it should not be
+ * used in the general case to determine whether sleeping is possible.
+ * Do not use in_atomic() in driver code.
+ */
+#define in_atomic() (preempt_count() != 0)
+
+/*
+ * Check whether we were atomic before we did preempt_disable():
+ * (used by the scheduler)
+ */
+#define in_atomic_preempt_off() \
+ ((preempt_count() & ~PREEMPT_ACTIVE) != PREEMPT_DISABLE_OFFSET)
+
#if defined(CONFIG_DEBUG_PREEMPT) || defined(CONFIG_PREEMPT_TRACER)
extern void preempt_count_add(int val);
extern void preempt_count_sub(int val);
@@ -33,6 +137,18 @@ extern void preempt_count_sub(int val);
#define preempt_count_inc() preempt_count_add(1)
#define preempt_count_dec() preempt_count_sub(1)
+#define preempt_active_enter() \
+do { \
+ preempt_count_add(PREEMPT_ACTIVE + PREEMPT_DISABLE_OFFSET); \
+ barrier(); \
+} while (0)
+
+#define preempt_active_exit() \
+do { \
+ barrier(); \
+ preempt_count_sub(PREEMPT_ACTIVE + PREEMPT_DISABLE_OFFSET); \
+} while (0)
+
#ifdef CONFIG_PREEMPT_COUNT
#define preempt_disable() \
@@ -49,6 +165,8 @@ do { \
#define preempt_enable_no_resched() sched_preempt_enable_no_resched()
+#define preemptible() (preempt_count() == 0 && !irqs_disabled())
+
#ifdef CONFIG_PREEMPT
#define preempt_enable() \
do { \
@@ -57,52 +175,46 @@ do { \
__preempt_schedule(); \
} while (0)
+#define preempt_enable_notrace() \
+do { \
+ barrier(); \
+ if (unlikely(__preempt_count_dec_and_test())) \
+ __preempt_schedule_notrace(); \
+} while (0)
+
#define preempt_check_resched() \
do { \
if (should_resched()) \
__preempt_schedule(); \
} while (0)
-#else
+#else /* !CONFIG_PREEMPT */
#define preempt_enable() \
do { \
barrier(); \
preempt_count_dec(); \
} while (0)
-#define preempt_check_resched() do { } while (0)
-#endif
-
-#define preempt_disable_notrace() \
-do { \
- __preempt_count_inc(); \
- barrier(); \
-} while (0)
-#define preempt_enable_no_resched_notrace() \
+#define preempt_enable_notrace() \
do { \
barrier(); \
__preempt_count_dec(); \
} while (0)
-#ifdef CONFIG_PREEMPT
-
-#ifndef CONFIG_CONTEXT_TRACKING
-#define __preempt_schedule_context() __preempt_schedule()
-#endif
+#define preempt_check_resched() do { } while (0)
+#endif /* CONFIG_PREEMPT */
-#define preempt_enable_notrace() \
+#define preempt_disable_notrace() \
do { \
+ __preempt_count_inc(); \
barrier(); \
- if (unlikely(__preempt_count_dec_and_test())) \
- __preempt_schedule_context(); \
} while (0)
-#else
-#define preempt_enable_notrace() \
+
+#define preempt_enable_no_resched_notrace() \
do { \
barrier(); \
__preempt_count_dec(); \
} while (0)
-#endif
#else /* !CONFIG_PREEMPT_COUNT */
@@ -121,6 +233,7 @@ do { \
#define preempt_disable_notrace() barrier()
#define preempt_enable_no_resched_notrace() barrier()
#define preempt_enable_notrace() barrier()
+#define preemptible() 0
#endif /* CONFIG_PREEMPT_COUNT */
diff --git a/include/linux/preempt_mask.h b/include/linux/preempt_mask.h
deleted file mode 100644
index dbeec4d4a3be..000000000000
--- a/include/linux/preempt_mask.h
+++ /dev/null
@@ -1,117 +0,0 @@
-#ifndef LINUX_PREEMPT_MASK_H
-#define LINUX_PREEMPT_MASK_H
-
-#include <linux/preempt.h>
-
-/*
- * We put the hardirq and softirq counter into the preemption
- * counter. The bitmask has the following meaning:
- *
- * - bits 0-7 are the preemption count (max preemption depth: 256)
- * - bits 8-15 are the softirq count (max # of softirqs: 256)
- *
- * The hardirq count could in theory be the same as the number of
- * interrupts in the system, but we run all interrupt handlers with
- * interrupts disabled, so we cannot have nesting interrupts. Though
- * there are a few palaeontologic drivers which reenable interrupts in
- * the handler, so we need more than one bit here.
- *
- * PREEMPT_MASK: 0x000000ff
- * SOFTIRQ_MASK: 0x0000ff00
- * HARDIRQ_MASK: 0x000f0000
- * NMI_MASK: 0x00100000
- * PREEMPT_ACTIVE: 0x00200000
- */
-#define PREEMPT_BITS 8
-#define SOFTIRQ_BITS 8
-#define HARDIRQ_BITS 4
-#define NMI_BITS 1
-
-#define PREEMPT_SHIFT 0
-#define SOFTIRQ_SHIFT (PREEMPT_SHIFT + PREEMPT_BITS)
-#define HARDIRQ_SHIFT (SOFTIRQ_SHIFT + SOFTIRQ_BITS)
-#define NMI_SHIFT (HARDIRQ_SHIFT + HARDIRQ_BITS)
-
-#define __IRQ_MASK(x) ((1UL << (x))-1)
-
-#define PREEMPT_MASK (__IRQ_MASK(PREEMPT_BITS) << PREEMPT_SHIFT)
-#define SOFTIRQ_MASK (__IRQ_MASK(SOFTIRQ_BITS) << SOFTIRQ_SHIFT)
-#define HARDIRQ_MASK (__IRQ_MASK(HARDIRQ_BITS) << HARDIRQ_SHIFT)
-#define NMI_MASK (__IRQ_MASK(NMI_BITS) << NMI_SHIFT)
-
-#define PREEMPT_OFFSET (1UL << PREEMPT_SHIFT)
-#define SOFTIRQ_OFFSET (1UL << SOFTIRQ_SHIFT)
-#define HARDIRQ_OFFSET (1UL << HARDIRQ_SHIFT)
-#define NMI_OFFSET (1UL << NMI_SHIFT)
-
-#define SOFTIRQ_DISABLE_OFFSET (2 * SOFTIRQ_OFFSET)
-
-#define PREEMPT_ACTIVE_BITS 1
-#define PREEMPT_ACTIVE_SHIFT (NMI_SHIFT + NMI_BITS)
-#define PREEMPT_ACTIVE (__IRQ_MASK(PREEMPT_ACTIVE_BITS) << PREEMPT_ACTIVE_SHIFT)
-
-#define hardirq_count() (preempt_count() & HARDIRQ_MASK)
-#define softirq_count() (preempt_count() & SOFTIRQ_MASK)
-#define irq_count() (preempt_count() & (HARDIRQ_MASK | SOFTIRQ_MASK \
- | NMI_MASK))
-
-/*
- * Are we doing bottom half or hardware interrupt processing?
- * Are we in a softirq context? Interrupt context?
- * in_softirq - Are we currently processing softirq or have bh disabled?
- * in_serving_softirq - Are we currently processing softirq?
- */
-#define in_irq() (hardirq_count())
-#define in_softirq() (softirq_count())
-#define in_interrupt() (irq_count())
-#define in_serving_softirq() (softirq_count() & SOFTIRQ_OFFSET)
-
-/*
- * Are we in NMI context?
- */
-#define in_nmi() (preempt_count() & NMI_MASK)
-
-#if defined(CONFIG_PREEMPT_COUNT)
-# define PREEMPT_CHECK_OFFSET 1
-#else
-# define PREEMPT_CHECK_OFFSET 0
-#endif
-
-/*
- * The preempt_count offset needed for things like:
- *
- * spin_lock_bh()
- *
- * Which need to disable both preemption (CONFIG_PREEMPT_COUNT) and
- * softirqs, such that unlock sequences of:
- *
- * spin_unlock();
- * local_bh_enable();
- *
- * Work as expected.
- */
-#define SOFTIRQ_LOCK_OFFSET (SOFTIRQ_DISABLE_OFFSET + PREEMPT_CHECK_OFFSET)
-
-/*
- * Are we running in atomic context? WARNING: this macro cannot
- * always detect atomic context; in particular, it cannot know about
- * held spinlocks in non-preemptible kernels. Thus it should not be
- * used in the general case to determine whether sleeping is possible.
- * Do not use in_atomic() in driver code.
- */
-#define in_atomic() ((preempt_count() & ~PREEMPT_ACTIVE) != 0)
-
-/*
- * Check whether we were atomic before we did preempt_disable():
- * (used by the scheduler, *after* releasing the kernel lock)
- */
-#define in_atomic_preempt_off() \
- ((preempt_count() & ~PREEMPT_ACTIVE) != PREEMPT_CHECK_OFFSET)
-
-#ifdef CONFIG_PREEMPT_COUNT
-# define preemptible() (preempt_count() == 0 && !irqs_disabled())
-#else
-# define preemptible() 0
-#endif
-
-#endif /* LINUX_PREEMPT_MASK_H */
diff --git a/include/linux/printk.h b/include/linux/printk.h
index baa3f97d8ce8..9b30871c9149 100644
--- a/include/linux/printk.h
+++ b/include/linux/printk.h
@@ -255,6 +255,11 @@ extern asmlinkage void dump_stack(void) __cold;
printk(KERN_NOTICE pr_fmt(fmt), ##__VA_ARGS__)
#define pr_info(fmt, ...) \
printk(KERN_INFO pr_fmt(fmt), ##__VA_ARGS__)
+/*
+ * Like KERN_CONT, pr_cont() should only be used when continuing
+ * a line with no newline ('\n') enclosed. Otherwise it defaults
+ * back to KERN_DEFAULT.
+ */
#define pr_cont(fmt, ...) \
printk(KERN_CONT fmt, ##__VA_ARGS__)
diff --git a/include/linux/property.h b/include/linux/property.h
index a6a3d98bd7e9..76ebde9c11d4 100644
--- a/include/linux/property.h
+++ b/include/linux/property.h
@@ -13,6 +13,7 @@
#ifndef _LINUX_PROPERTY_H_
#define _LINUX_PROPERTY_H_
+#include <linux/fwnode.h>
#include <linux/types.h>
struct device;
@@ -40,16 +41,6 @@ int device_property_read_string_array(struct device *dev, const char *propname,
int device_property_read_string(struct device *dev, const char *propname,
const char **val);
-enum fwnode_type {
- FWNODE_INVALID = 0,
- FWNODE_OF,
- FWNODE_ACPI,
-};
-
-struct fwnode_handle {
- enum fwnode_type type;
-};
-
bool fwnode_property_present(struct fwnode_handle *fwnode, const char *propname);
int fwnode_property_read_u8_array(struct fwnode_handle *fwnode,
const char *propname, u8 *val,
@@ -140,4 +131,39 @@ static inline int fwnode_property_read_u64(struct fwnode_handle *fwnode,
return fwnode_property_read_u64_array(fwnode, propname, val, 1);
}
+/**
+ * struct property_entry - "Built-in" device property representation.
+ * @name: Name of the property.
+ * @type: Type of the property.
+ * @nval: Number of items of type @type making up the value.
+ * @value: Value of the property (an array of @nval items of type @type).
+ */
+struct property_entry {
+ const char *name;
+ enum dev_prop_type type;
+ size_t nval;
+ union {
+ void *raw_data;
+ u8 *u8_data;
+ u16 *u16_data;
+ u32 *u32_data;
+ u64 *u64_data;
+ const char **str;
+ } value;
+};
+
+/**
+ * struct property_set - Collection of "built-in" device properties.
+ * @fwnode: Handle to be pointed to by the fwnode field of struct device.
+ * @properties: Array of properties terminated with a null entry.
+ */
+struct property_set {
+ struct fwnode_handle fwnode;
+ struct property_entry *properties;
+};
+
+void device_add_property_set(struct device *dev, struct property_set *pset);
+
+bool device_dma_is_coherent(struct device *dev);
+
#endif /* _LINUX_PROPERTY_H_ */
diff --git a/include/linux/pstore.h b/include/linux/pstore.h
index 8884f6e507f7..8e7a25b068b0 100644
--- a/include/linux/pstore.h
+++ b/include/linux/pstore.h
@@ -40,6 +40,7 @@ enum pstore_type_id {
PSTORE_TYPE_PPC_OF = 5,
PSTORE_TYPE_PPC_COMMON = 6,
PSTORE_TYPE_PMSG = 7,
+ PSTORE_TYPE_PPC_OPAL = 8,
PSTORE_TYPE_UNKNOWN = 255
};
diff --git a/include/linux/ptp_clock_kernel.h b/include/linux/ptp_clock_kernel.h
index 0d8ff3fb84ba..b8b73066d137 100644
--- a/include/linux/ptp_clock_kernel.h
+++ b/include/linux/ptp_clock_kernel.h
@@ -64,11 +64,11 @@ struct ptp_clock_request {
* @adjtime: Shifts the time of the hardware clock.
* parameter delta: Desired change in nanoseconds.
*
- * @gettime: Reads the current time from the hardware clock.
- * parameter ts: Holds the result.
+ * @gettime64: Reads the current time from the hardware clock.
+ * parameter ts: Holds the result.
*
- * @settime: Set the current time on the hardware clock.
- * parameter ts: Time value to set.
+ * @settime64: Set the current time on the hardware clock.
+ * parameter ts: Time value to set.
*
* @enable: Request driver to enable or disable an ancillary feature.
* parameter request: Desired resource to enable or disable.
@@ -104,8 +104,8 @@ struct ptp_clock_info {
struct ptp_pin_desc *pin_config;
int (*adjfreq)(struct ptp_clock_info *ptp, s32 delta);
int (*adjtime)(struct ptp_clock_info *ptp, s64 delta);
- int (*gettime)(struct ptp_clock_info *ptp, struct timespec *ts);
- int (*settime)(struct ptp_clock_info *ptp, const struct timespec *ts);
+ int (*gettime64)(struct ptp_clock_info *ptp, struct timespec64 *ts);
+ int (*settime64)(struct ptp_clock_info *p, const struct timespec64 *ts);
int (*enable)(struct ptp_clock_info *ptp,
struct ptp_clock_request *request, int on);
int (*verify)(struct ptp_clock_info *ptp, unsigned int pin,
diff --git a/include/linux/pwm.h b/include/linux/pwm.h
index e90628cac8fa..36262d08a9da 100644
--- a/include/linux/pwm.h
+++ b/include/linux/pwm.h
@@ -182,6 +182,8 @@ struct pwm_chip {
int pwm_set_chip_data(struct pwm_device *pwm, void *data);
void *pwm_get_chip_data(struct pwm_device *pwm);
+int pwmchip_add_with_polarity(struct pwm_chip *chip,
+ enum pwm_polarity polarity);
int pwmchip_add(struct pwm_chip *chip);
int pwmchip_remove(struct pwm_chip *chip);
struct pwm_device *pwm_request_from_chip(struct pwm_chip *chip,
@@ -217,6 +219,11 @@ static inline int pwmchip_add(struct pwm_chip *chip)
return -EINVAL;
}
+static inline int pwmchip_add_inversed(struct pwm_chip *chip)
+{
+ return -EINVAL;
+}
+
static inline int pwmchip_remove(struct pwm_chip *chip)
{
return -EINVAL;
@@ -290,10 +297,15 @@ struct pwm_lookup {
#if IS_ENABLED(CONFIG_PWM)
void pwm_add_table(struct pwm_lookup *table, size_t num);
+void pwm_remove_table(struct pwm_lookup *table, size_t num);
#else
static inline void pwm_add_table(struct pwm_lookup *table, size_t num)
{
}
+
+static inline void pwm_remove_table(struct pwm_lookup *table, size_t num)
+{
+}
#endif
#ifdef CONFIG_PWM_SYSFS
diff --git a/include/linux/pxa2xx_ssp.h b/include/linux/pxa2xx_ssp.h
index dab545bb66b3..0485bab061fd 100644
--- a/include/linux/pxa2xx_ssp.h
+++ b/include/linux/pxa2xx_ssp.h
@@ -194,8 +194,9 @@ enum pxa_ssp_type {
PXA168_SSP,
PXA910_SSP,
CE4100_SSP,
- LPSS_SSP,
QUARK_X1000_SSP,
+ LPSS_LPT_SSP, /* Keep LPSS types sorted with lpss_platforms[] */
+ LPSS_BYT_SSP,
};
struct ssp_device {
diff --git a/include/linux/qcom_scm.h b/include/linux/qcom_scm.h
new file mode 100644
index 000000000000..d7a974d5f57c
--- /dev/null
+++ b/include/linux/qcom_scm.h
@@ -0,0 +1,28 @@
+/* Copyright (c) 2010-2014, The Linux Foundation. All rights reserved.
+ * Copyright (C) 2015 Linaro Ltd.
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License version 2 and
+ * only version 2 as published by the Free Software Foundation.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+ * GNU General Public License for more details.
+ */
+#ifndef __QCOM_SCM_H
+#define __QCOM_SCM_H
+
+extern int qcom_scm_set_cold_boot_addr(void *entry, const cpumask_t *cpus);
+extern int qcom_scm_set_warm_boot_addr(void *entry, const cpumask_t *cpus);
+
+#define QCOM_SCM_CPU_PWR_DOWN_L2_ON 0x0
+#define QCOM_SCM_CPU_PWR_DOWN_L2_OFF 0x1
+
+extern void qcom_scm_cpu_power_down(u32 flags);
+
+#define QCOM_SCM_VERSION(major, minor) (((major) << 16) | ((minor) & 0xFF))
+
+extern u32 qcom_scm_get_version(void);
+
+#endif
diff --git a/include/linux/quota.h b/include/linux/quota.h
index d534e8ed308a..b2505acfd3c0 100644
--- a/include/linux/quota.h
+++ b/include/linux/quota.h
@@ -50,6 +50,7 @@
#undef USRQUOTA
#undef GRPQUOTA
+#undef PRJQUOTA
enum quota_type {
USRQUOTA = 0, /* element used for user quotas */
GRPQUOTA = 1, /* element used for group quotas */
@@ -319,6 +320,7 @@ struct dquot_operations {
/* get reserved quota for delayed alloc, value returned is managed by
* quota code only */
qsize_t *(*get_reserved_space) (struct inode *);
+ int (*get_projid) (struct inode *, kprojid_t *);/* Get project ID */
};
struct path;
@@ -344,7 +346,10 @@ struct qc_dqblk {
int d_rt_spc_warns; /* # warnings issued wrt RT space */
};
-/* Field specifiers for ->set_dqblk() in struct qc_dqblk */
+/*
+ * Field specifiers for ->set_dqblk() in struct qc_dqblk and also for
+ * ->set_info() in struct qc_info
+ */
#define QC_INO_SOFT (1<<0)
#define QC_INO_HARD (1<<1)
#define QC_SPC_SOFT (1<<2)
@@ -365,6 +370,51 @@ struct qc_dqblk {
#define QC_INO_COUNT (1<<13)
#define QC_RT_SPACE (1<<14)
#define QC_ACCT_MASK (QC_SPACE | QC_INO_COUNT | QC_RT_SPACE)
+#define QC_FLAGS (1<<15)
+
+#define QCI_SYSFILE (1 << 0) /* Quota file is hidden from userspace */
+#define QCI_ROOT_SQUASH (1 << 1) /* Root squash turned on */
+#define QCI_ACCT_ENABLED (1 << 2) /* Quota accounting enabled */
+#define QCI_LIMITS_ENFORCED (1 << 3) /* Quota limits enforced */
+
+/* Structures for communicating via ->get_state */
+struct qc_type_state {
+ unsigned int flags; /* Flags QCI_* */
+ unsigned int spc_timelimit; /* Time after which space softlimit is
+ * enforced */
+ unsigned int ino_timelimit; /* Ditto for inode softlimit */
+ unsigned int rt_spc_timelimit; /* Ditto for real-time space */
+ unsigned int spc_warnlimit; /* Limit for number of space warnings */
+ unsigned int ino_warnlimit; /* Ditto for inodes */
+ unsigned int rt_spc_warnlimit; /* Ditto for real-time space */
+ unsigned long long ino; /* Inode number of quota file */
+ blkcnt_t blocks; /* Number of 512-byte blocks in the file */
+ blkcnt_t nextents; /* Number of extents in the file */
+};
+
+struct qc_state {
+ unsigned int s_incoredqs; /* Number of dquots in core */
+ /*
+ * Per quota type information. The array should really have
+ * max(MAXQUOTAS, XQM_MAXQUOTAS) entries. BUILD_BUG_ON in
+ * quota_getinfo() makes sure XQM_MAXQUOTAS is large enough. Once VFS
+ * supports project quotas, this can be changed to MAXQUOTAS
+ */
+ struct qc_type_state s_state[XQM_MAXQUOTAS];
+};
+
+/* Structure for communicating via ->set_info */
+struct qc_info {
+ int i_fieldmask; /* mask of fields to change in ->set_info() */
+ unsigned int i_flags; /* Flags QCI_* */
+ unsigned int i_spc_timelimit; /* Time after which space softlimit is
+ * enforced */
+ unsigned int i_ino_timelimit; /* Ditto for inode softlimit */
+ unsigned int i_rt_spc_timelimit;/* Ditto for real-time space */
+ unsigned int i_spc_warnlimit; /* Limit for number of space warnings */
+ unsigned int i_ino_warnlimit; /* Limit for number of inode warnings */
+ unsigned int i_rt_spc_warnlimit; /* Ditto for real-time space */
+};
/* Operations handling requests from userspace */
struct quotactl_ops {
@@ -373,12 +423,10 @@ struct quotactl_ops {
int (*quota_enable)(struct super_block *, unsigned int);
int (*quota_disable)(struct super_block *, unsigned int);
int (*quota_sync)(struct super_block *, int);
- int (*get_info)(struct super_block *, int, struct if_dqinfo *);
- int (*set_info)(struct super_block *, int, struct if_dqinfo *);
+ int (*set_info)(struct super_block *, int, struct qc_info *);
int (*get_dqblk)(struct super_block *, struct kqid, struct qc_dqblk *);
int (*set_dqblk)(struct super_block *, struct kqid, struct qc_dqblk *);
- int (*get_xstate)(struct super_block *, struct fs_quota_stat *);
- int (*get_xstatev)(struct super_block *, struct fs_quota_statv *);
+ int (*get_state)(struct super_block *, struct qc_state *);
int (*rm_xquota)(struct super_block *, unsigned int);
};
@@ -389,7 +437,19 @@ struct quota_format_type {
struct quota_format_type *qf_next;
};
-/* Quota state flags - they actually come in two flavors - for users and groups */
+/**
+ * Quota state flags - they actually come in two flavors - for users and groups.
+ *
+ * Actual typed flags layout:
+ * USRQUOTA GRPQUOTA
+ * DQUOT_USAGE_ENABLED 0x0001 0x0002
+ * DQUOT_LIMITS_ENABLED 0x0004 0x0008
+ * DQUOT_SUSPENDED 0x0010 0x0020
+ *
+ * Following bits are used for non-typed flags:
+ * DQUOT_QUOTA_SYS_FILE 0x0040
+ * DQUOT_NEGATIVE_USAGE 0x0080
+ */
enum {
_DQUOT_USAGE_ENABLED = 0, /* Track disk usage for users */
_DQUOT_LIMITS_ENABLED, /* Enforce quota limits for users */
@@ -398,9 +458,9 @@ enum {
* memory to turn them on */
_DQUOT_STATE_FLAGS
};
-#define DQUOT_USAGE_ENABLED (1 << _DQUOT_USAGE_ENABLED)
-#define DQUOT_LIMITS_ENABLED (1 << _DQUOT_LIMITS_ENABLED)
-#define DQUOT_SUSPENDED (1 << _DQUOT_SUSPENDED)
+#define DQUOT_USAGE_ENABLED (1 << _DQUOT_USAGE_ENABLED * MAXQUOTAS)
+#define DQUOT_LIMITS_ENABLED (1 << _DQUOT_LIMITS_ENABLED * MAXQUOTAS)
+#define DQUOT_SUSPENDED (1 << _DQUOT_SUSPENDED * MAXQUOTAS)
#define DQUOT_STATE_FLAGS (DQUOT_USAGE_ENABLED | DQUOT_LIMITS_ENABLED | \
DQUOT_SUSPENDED)
/* Other quota flags */
@@ -414,15 +474,21 @@ enum {
*/
#define DQUOT_NEGATIVE_USAGE (1 << (DQUOT_STATE_LAST + 1))
/* Allow negative quota usage */
-
static inline unsigned int dquot_state_flag(unsigned int flags, int type)
{
- return flags << _DQUOT_STATE_FLAGS * type;
+ return flags << type;
}
static inline unsigned int dquot_generic_flag(unsigned int flags, int type)
{
- return (flags >> _DQUOT_STATE_FLAGS * type) & DQUOT_STATE_FLAGS;
+ return (flags >> type) & DQUOT_STATE_FLAGS;
+}
+
+/* Bitmap of quota types where flag is set in flags */
+static __always_inline unsigned dquot_state_types(unsigned flags, unsigned flag)
+{
+ BUILD_BUG_ON_NOT_POWER_OF_2(flag);
+ return (flags / flag) & ((1 << MAXQUOTAS) - 1);
}
#ifdef CONFIG_QUOTA_NETLINK_INTERFACE
diff --git a/include/linux/quotaops.h b/include/linux/quotaops.h
index df73258cca47..77ca6601ff25 100644
--- a/include/linux/quotaops.h
+++ b/include/linux/quotaops.h
@@ -95,8 +95,8 @@ int dquot_quota_on_mount(struct super_block *sb, char *qf_name,
int dquot_quota_off(struct super_block *sb, int type);
int dquot_writeback_dquots(struct super_block *sb, int type);
int dquot_quota_sync(struct super_block *sb, int type);
-int dquot_get_dqinfo(struct super_block *sb, int type, struct if_dqinfo *ii);
-int dquot_set_dqinfo(struct super_block *sb, int type, struct if_dqinfo *ii);
+int dquot_get_state(struct super_block *sb, struct qc_state *state);
+int dquot_set_dqinfo(struct super_block *sb, int type, struct qc_info *ii);
int dquot_get_dqblk(struct super_block *sb, struct kqid id,
struct qc_dqblk *di);
int dquot_set_dqblk(struct super_block *sb, struct kqid id,
@@ -134,10 +134,7 @@ static inline bool sb_has_quota_suspended(struct super_block *sb, int type)
static inline unsigned sb_any_quota_suspended(struct super_block *sb)
{
- unsigned type, tmsk = 0;
- for (type = 0; type < MAXQUOTAS; type++)
- tmsk |= sb_has_quota_suspended(sb, type) << type;
- return tmsk;
+ return dquot_state_types(sb_dqopt(sb)->flags, DQUOT_SUSPENDED);
}
/* Does kernel know about any quota information for given sb + type? */
@@ -149,10 +146,7 @@ static inline bool sb_has_quota_loaded(struct super_block *sb, int type)
static inline unsigned sb_any_quota_loaded(struct super_block *sb)
{
- unsigned type, tmsk = 0;
- for (type = 0; type < MAXQUOTAS; type++)
- tmsk |= sb_has_quota_loaded(sb, type) << type;
- return tmsk;
+ return dquot_state_types(sb_dqopt(sb)->flags, DQUOT_USAGE_ENABLED);
}
static inline bool sb_has_quota_active(struct super_block *sb, int type)
diff --git a/include/linux/raid/pq.h b/include/linux/raid/pq.h
index 73069cb6c54a..a7a06d1dcf9c 100644
--- a/include/linux/raid/pq.h
+++ b/include/linux/raid/pq.h
@@ -72,6 +72,7 @@ extern const char raid6_empty_zero_page[PAGE_SIZE];
/* Routine choices */
struct raid6_calls {
void (*gen_syndrome)(int, size_t, void **);
+ void (*xor_syndrome)(int, int, int, size_t, void **);
int (*valid)(void); /* Returns 1 if this routine set is usable */
const char *name; /* Name of this routine set */
int prefer; /* Has special performance attribute */
diff --git a/include/linux/random.h b/include/linux/random.h
index b05856e16b75..e651874df2c9 100644
--- a/include/linux/random.h
+++ b/include/linux/random.h
@@ -6,14 +6,23 @@
#ifndef _LINUX_RANDOM_H
#define _LINUX_RANDOM_H
+#include <linux/list.h>
#include <uapi/linux/random.h>
+struct random_ready_callback {
+ struct list_head list;
+ void (*func)(struct random_ready_callback *rdy);
+ struct module *owner;
+};
+
extern void add_device_randomness(const void *, unsigned int);
extern void add_input_randomness(unsigned int type, unsigned int code,
unsigned int value);
extern void add_interrupt_randomness(int irq, int irq_flags);
extern void get_random_bytes(void *buf, int nbytes);
+extern int add_random_ready_callback(struct random_ready_callback *rdy);
+extern void del_random_ready_callback(struct random_ready_callback *rdy);
extern void get_random_bytes_arch(void *buf, int nbytes);
void generate_random_uuid(unsigned char uuid_out[16]);
extern int random_int_secret_init(void);
diff --git a/include/linux/rculist.h b/include/linux/rculist.h
index a18b16f1dc0e..17c6b1f84a77 100644
--- a/include/linux/rculist.h
+++ b/include/linux/rculist.h
@@ -29,8 +29,8 @@
*/
static inline void INIT_LIST_HEAD_RCU(struct list_head *list)
{
- ACCESS_ONCE(list->next) = list;
- ACCESS_ONCE(list->prev) = list;
+ WRITE_ONCE(list->next, list);
+ WRITE_ONCE(list->prev, list);
}
/*
@@ -288,7 +288,7 @@ static inline void list_splice_init_rcu(struct list_head *list,
#define list_first_or_null_rcu(ptr, type, member) \
({ \
struct list_head *__ptr = (ptr); \
- struct list_head *__next = ACCESS_ONCE(__ptr->next); \
+ struct list_head *__next = READ_ONCE(__ptr->next); \
likely(__ptr != __next) ? list_entry_rcu(__next, type, member) : NULL; \
})
@@ -549,8 +549,8 @@ static inline void hlist_add_behind_rcu(struct hlist_node *n,
*/
#define hlist_for_each_entry_from_rcu(pos, member) \
for (; pos; \
- pos = hlist_entry_safe(rcu_dereference((pos)->member.next),\
- typeof(*(pos)), member))
+ pos = hlist_entry_safe(rcu_dereference_raw(hlist_next_rcu( \
+ &(pos)->member)), typeof(*(pos)), member))
#endif /* __KERNEL__ */
#endif
diff --git a/include/linux/rcupdate.h b/include/linux/rcupdate.h
index 78097491cd99..33a056bb886f 100644
--- a/include/linux/rcupdate.h
+++ b/include/linux/rcupdate.h
@@ -44,10 +44,32 @@
#include <linux/debugobjects.h>
#include <linux/bug.h>
#include <linux/compiler.h>
+#include <linux/ktime.h>
+
#include <asm/barrier.h>
extern int rcu_expedited; /* for sysctl */
+#ifdef CONFIG_TINY_RCU
+/* Tiny RCU doesn't expedite, as its purpose in life is instead to be tiny. */
+static inline bool rcu_gp_is_expedited(void) /* Internal RCU use. */
+{
+ return false;
+}
+
+static inline void rcu_expedite_gp(void)
+{
+}
+
+static inline void rcu_unexpedite_gp(void)
+{
+}
+#else /* #ifdef CONFIG_TINY_RCU */
+bool rcu_gp_is_expedited(void); /* Internal RCU use. */
+void rcu_expedite_gp(void);
+void rcu_unexpedite_gp(void);
+#endif /* #else #ifdef CONFIG_TINY_RCU */
+
enum rcutorture_type {
RCU_FLAVOR,
RCU_BH_FLAVOR,
@@ -195,6 +217,15 @@ void call_rcu_sched(struct rcu_head *head,
void synchronize_sched(void);
+/*
+ * Structure allowing asynchronous waiting on RCU.
+ */
+struct rcu_synchronize {
+ struct rcu_head head;
+ struct completion completion;
+};
+void wakeme_after_rcu(struct rcu_head *head);
+
/**
* call_rcu_tasks() - Queue an RCU for invocation task-based grace period
* @head: structure to be used for queueing the RCU updates.
@@ -258,14 +289,13 @@ static inline int rcu_preempt_depth(void)
/* Internal to kernel */
void rcu_init(void);
+void rcu_end_inkernel_boot(void);
void rcu_sched_qs(void);
void rcu_bh_qs(void);
void rcu_check_callbacks(int user);
struct notifier_block;
-void rcu_idle_enter(void);
-void rcu_idle_exit(void);
-void rcu_irq_enter(void);
-void rcu_irq_exit(void);
+int rcu_cpu_notify(struct notifier_block *self,
+ unsigned long action, void *hcpu);
#ifdef CONFIG_RCU_STALL_COMMON
void rcu_sysrq_start(void);
@@ -332,8 +362,8 @@ extern struct srcu_struct tasks_rcu_exit_srcu;
#define rcu_note_voluntary_context_switch(t) \
do { \
rcu_all_qs(); \
- if (ACCESS_ONCE((t)->rcu_tasks_holdout)) \
- ACCESS_ONCE((t)->rcu_tasks_holdout) = false; \
+ if (READ_ONCE((t)->rcu_tasks_holdout)) \
+ WRITE_ONCE((t)->rcu_tasks_holdout, false); \
} while (0)
#else /* #ifdef CONFIG_TASKS_RCU */
#define TASKS_RCU(x) do { } while (0)
@@ -577,7 +607,7 @@ static inline void rcu_preempt_sleep_check(void)
#define __rcu_access_pointer(p, space) \
({ \
- typeof(*p) *_________p1 = (typeof(*p) *__force)ACCESS_ONCE(p); \
+ typeof(*p) *_________p1 = (typeof(*p) *__force)READ_ONCE(p); \
rcu_dereference_sparse(p, space); \
((typeof(*p) __force __kernel *)(_________p1)); \
})
@@ -596,21 +626,6 @@ static inline void rcu_preempt_sleep_check(void)
((typeof(*p) __force __kernel *)(p)); \
})
-#define __rcu_access_index(p, space) \
-({ \
- typeof(p) _________p1 = ACCESS_ONCE(p); \
- rcu_dereference_sparse(p, space); \
- (_________p1); \
-})
-#define __rcu_dereference_index_check(p, c) \
-({ \
- /* Dependency order vs. p above. */ \
- typeof(p) _________p1 = lockless_dereference(p); \
- rcu_lockdep_assert(c, \
- "suspicious rcu_dereference_index_check() usage"); \
- (_________p1); \
-})
-
/**
* RCU_INITIALIZER() - statically initialize an RCU-protected global variable
* @v: The value to statically initialize with.
@@ -627,7 +642,7 @@ static inline void rcu_preempt_sleep_check(void)
*/
#define lockless_dereference(p) \
({ \
- typeof(p) _________p1 = ACCESS_ONCE(p); \
+ typeof(p) _________p1 = READ_ONCE(p); \
smp_read_barrier_depends(); /* Dependency order vs. p above. */ \
(_________p1); \
})
@@ -670,7 +685,7 @@ static inline void rcu_preempt_sleep_check(void)
* @p: The pointer to read
*
* Return the value of the specified RCU-protected pointer, but omit the
- * smp_read_barrier_depends() and keep the ACCESS_ONCE(). This is useful
+ * smp_read_barrier_depends() and keep the READ_ONCE(). This is useful
* when the value of this pointer is accessed, but the pointer is not
* dereferenced, for example, when testing an RCU-protected pointer against
* NULL. Although rcu_access_pointer() may also be used in cases where
@@ -720,7 +735,7 @@ static inline void rcu_preempt_sleep_check(void)
* annotated as __rcu.
*/
#define rcu_dereference_check(p, c) \
- __rcu_dereference_check((p), rcu_read_lock_held() || (c), __rcu)
+ __rcu_dereference_check((p), (c) || rcu_read_lock_held(), __rcu)
/**
* rcu_dereference_bh_check() - rcu_dereference_bh with debug checking
@@ -730,7 +745,7 @@ static inline void rcu_preempt_sleep_check(void)
* This is the RCU-bh counterpart to rcu_dereference_check().
*/
#define rcu_dereference_bh_check(p, c) \
- __rcu_dereference_check((p), rcu_read_lock_bh_held() || (c), __rcu)
+ __rcu_dereference_check((p), (c) || rcu_read_lock_bh_held(), __rcu)
/**
* rcu_dereference_sched_check() - rcu_dereference_sched with debug checking
@@ -740,7 +755,7 @@ static inline void rcu_preempt_sleep_check(void)
* This is the RCU-sched counterpart to rcu_dereference_check().
*/
#define rcu_dereference_sched_check(p, c) \
- __rcu_dereference_check((p), rcu_read_lock_sched_held() || (c), \
+ __rcu_dereference_check((p), (c) || rcu_read_lock_sched_held(), \
__rcu)
#define rcu_dereference_raw(p) rcu_dereference_check(p, 1) /*@@@ needed? @@@*/
@@ -755,47 +770,12 @@ static inline void rcu_preempt_sleep_check(void)
#define rcu_dereference_raw_notrace(p) __rcu_dereference_check((p), 1, __rcu)
/**
- * rcu_access_index() - fetch RCU index with no dereferencing
- * @p: The index to read
- *
- * Return the value of the specified RCU-protected index, but omit the
- * smp_read_barrier_depends() and keep the ACCESS_ONCE(). This is useful
- * when the value of this index is accessed, but the index is not
- * dereferenced, for example, when testing an RCU-protected index against
- * -1. Although rcu_access_index() may also be used in cases where
- * update-side locks prevent the value of the index from changing, you
- * should instead use rcu_dereference_index_protected() for this use case.
- */
-#define rcu_access_index(p) __rcu_access_index((p), __rcu)
-
-/**
- * rcu_dereference_index_check() - rcu_dereference for indices with debug checking
- * @p: The pointer to read, prior to dereferencing
- * @c: The conditions under which the dereference will take place
- *
- * Similar to rcu_dereference_check(), but omits the sparse checking.
- * This allows rcu_dereference_index_check() to be used on integers,
- * which can then be used as array indices. Attempting to use
- * rcu_dereference_check() on an integer will give compiler warnings
- * because the sparse address-space mechanism relies on dereferencing
- * the RCU-protected pointer. Dereferencing integers is not something
- * that even gcc will put up with.
- *
- * Note that this function does not implicitly check for RCU read-side
- * critical sections. If this function gains lots of uses, it might
- * make sense to provide versions for each flavor of RCU, but it does
- * not make sense as of early 2010.
- */
-#define rcu_dereference_index_check(p, c) \
- __rcu_dereference_index_check((p), (c))
-
-/**
* rcu_dereference_protected() - fetch RCU pointer when updates prevented
* @p: The pointer to read, prior to dereferencing
* @c: The conditions under which the dereference will take place
*
* Return the value of the specified RCU-protected pointer, but omit
- * both the smp_read_barrier_depends() and the ACCESS_ONCE(). This
+ * both the smp_read_barrier_depends() and the READ_ONCE(). This
* is useful in cases where update-side locks prevent the value of the
* pointer from changing. Please note that this primitive does -not-
* prevent the compiler from repeating this reference or combining it
@@ -933,9 +913,9 @@ static inline void rcu_read_unlock(void)
{
rcu_lockdep_assert(rcu_is_watching(),
"rcu_read_unlock() used illegally while idle");
- rcu_lock_release(&rcu_lock_map);
__release(RCU);
__rcu_read_unlock();
+ rcu_lock_release(&rcu_lock_map); /* Keep acq info for rls diags. */
}
/**
@@ -1121,13 +1101,13 @@ static inline notrace void rcu_read_unlock_sched_notrace(void)
#define kfree_rcu(ptr, rcu_head) \
__kfree_rcu(&((ptr)->rcu_head), offsetof(typeof(*(ptr)), rcu_head))
-#if defined(CONFIG_TINY_RCU) || defined(CONFIG_RCU_NOCB_CPU_ALL)
-static inline int rcu_needs_cpu(unsigned long *delta_jiffies)
+#ifdef CONFIG_TINY_RCU
+static inline int rcu_needs_cpu(u64 basemono, u64 *nextevt)
{
- *delta_jiffies = ULONG_MAX;
+ *nextevt = KTIME_MAX;
return 0;
}
-#endif /* #if defined(CONFIG_TINY_RCU) || defined(CONFIG_RCU_NOCB_CPU_ALL) */
+#endif /* #ifdef CONFIG_TINY_RCU */
#if defined(CONFIG_RCU_NOCB_CPU_ALL)
static inline bool rcu_is_nocb_cpu(int cpu) { return true; }
diff --git a/include/linux/rcutiny.h b/include/linux/rcutiny.h
index 937edaeb150d..3df6c1ec4e25 100644
--- a/include/linux/rcutiny.h
+++ b/include/linux/rcutiny.h
@@ -159,6 +159,22 @@ static inline void rcu_cpu_stall_reset(void)
{
}
+static inline void rcu_idle_enter(void)
+{
+}
+
+static inline void rcu_idle_exit(void)
+{
+}
+
+static inline void rcu_irq_enter(void)
+{
+}
+
+static inline void rcu_irq_exit(void)
+{
+}
+
static inline void exit_rcu(void)
{
}
diff --git a/include/linux/rcutree.h b/include/linux/rcutree.h
index d2e583a6aaca..456879143f89 100644
--- a/include/linux/rcutree.h
+++ b/include/linux/rcutree.h
@@ -31,9 +31,7 @@
#define __LINUX_RCUTREE_H
void rcu_note_context_switch(void);
-#ifndef CONFIG_RCU_NOCB_CPU_ALL
-int rcu_needs_cpu(unsigned long *delta_jiffies);
-#endif /* #ifndef CONFIG_RCU_NOCB_CPU_ALL */
+int rcu_needs_cpu(u64 basem, u64 *nextevt);
void rcu_cpu_stall_reset(void);
/*
@@ -93,6 +91,11 @@ void rcu_force_quiescent_state(void);
void rcu_bh_force_quiescent_state(void);
void rcu_sched_force_quiescent_state(void);
+void rcu_idle_enter(void);
+void rcu_idle_exit(void);
+void rcu_irq_enter(void);
+void rcu_irq_exit(void);
+
void exit_rcu(void);
void rcu_scheduler_starting(void);
diff --git a/include/linux/reboot.h b/include/linux/reboot.h
index 67fc8fcdc4b0..a7ff409f386d 100644
--- a/include/linux/reboot.h
+++ b/include/linux/reboot.h
@@ -70,7 +70,8 @@ void ctrl_alt_del(void);
#define POWEROFF_CMD_PATH_LEN 256
extern char poweroff_cmd[POWEROFF_CMD_PATH_LEN];
-extern int orderly_poweroff(bool force);
+extern void orderly_poweroff(bool force);
+extern void orderly_reboot(void);
/*
* Emergency restart, callable from an interrupt handler.
diff --git a/include/linux/regmap.h b/include/linux/regmap.h
index 116655d92269..59c55ea0f0b5 100644
--- a/include/linux/regmap.h
+++ b/include/linux/regmap.h
@@ -433,6 +433,8 @@ int regmap_update_bits_check_async(struct regmap *map, unsigned int reg,
unsigned int mask, unsigned int val,
bool *change);
int regmap_get_val_bytes(struct regmap *map);
+int regmap_get_max_register(struct regmap *map);
+int regmap_get_reg_stride(struct regmap *map);
int regmap_async_complete(struct regmap *map);
bool regmap_can_raw_write(struct regmap *map);
@@ -676,6 +678,18 @@ static inline int regmap_get_val_bytes(struct regmap *map)
return -EINVAL;
}
+static inline int regmap_get_max_register(struct regmap *map)
+{
+ WARN_ONCE(1, "regmap API is disabled");
+ return -EINVAL;
+}
+
+static inline int regmap_get_reg_stride(struct regmap *map)
+{
+ WARN_ONCE(1, "regmap API is disabled");
+ return -EINVAL;
+}
+
static inline int regcache_sync(struct regmap *map)
{
WARN_ONCE(1, "regmap API is disabled");
diff --git a/include/linux/regulator/driver.h b/include/linux/regulator/driver.h
index fffa688ac3a7..4db9fbe4889d 100644
--- a/include/linux/regulator/driver.h
+++ b/include/linux/regulator/driver.h
@@ -91,6 +91,7 @@ struct regulator_linear_range {
* @set_current_limit: Configure a limit for a current-limited regulator.
* The driver should select the current closest to max_uA.
* @get_current_limit: Get the configured limit for a current-limited regulator.
+ * @set_input_current_limit: Configure an input limit.
*
* @set_mode: Set the configured operating mode for the regulator.
* @get_mode: Get the configured operating mode for the regulator.
@@ -111,6 +112,7 @@ struct regulator_linear_range {
* to stabilise after being set to a new value, in microseconds.
* The function provides the from and to voltage selector, the
* function should return the worst case.
+ * @set_soft_start: Enable soft start for the regulator.
*
* @set_suspend_voltage: Set the voltage for the regulator when the system
* is suspended.
@@ -121,6 +123,9 @@ struct regulator_linear_range {
* @set_suspend_mode: Set the operating mode for the regulator when the
* system is suspended.
*
+ * @set_pull_down: Configure the regulator to pull down when the regulator
+ * is disabled.
+ *
* This struct describes regulator operations which can be implemented by
* regulator chip drivers.
*/
@@ -142,6 +147,8 @@ struct regulator_ops {
int min_uA, int max_uA);
int (*get_current_limit) (struct regulator_dev *);
+ int (*set_input_current_limit) (struct regulator_dev *, int lim_uA);
+
/* enable/disable regulator */
int (*enable) (struct regulator_dev *);
int (*disable) (struct regulator_dev *);
@@ -158,6 +165,8 @@ struct regulator_ops {
unsigned int old_selector,
unsigned int new_selector);
+ int (*set_soft_start) (struct regulator_dev *);
+
/* report regulator status ... most other accessors report
* control inputs, this reports results of combining inputs
* from Linux (and other sources) with the actual load.
@@ -187,6 +196,8 @@ struct regulator_ops {
/* set regulator suspend operating mode (defined in consumer.h) */
int (*set_suspend_mode) (struct regulator_dev *, unsigned int mode);
+
+ int (*set_pull_down) (struct regulator_dev *);
};
/*
diff --git a/include/linux/regulator/machine.h b/include/linux/regulator/machine.h
index b07562e082c4..b11be1260129 100644
--- a/include/linux/regulator/machine.h
+++ b/include/linux/regulator/machine.h
@@ -75,6 +75,8 @@ struct regulator_state {
*
* @min_uA: Smallest current consumers may set.
* @max_uA: Largest current consumers may set.
+ * @ilim_uA: Maximum input current.
+ * @system_load: Load that isn't captured by any consumer requests.
*
* @valid_modes_mask: Mask of modes which may be configured by consumers.
* @valid_ops_mask: Operations which may be performed by consumers.
@@ -86,6 +88,8 @@ struct regulator_state {
* applied.
* @apply_uV: Apply the voltage constraint when initialising.
* @ramp_disable: Disable ramp delay when initialising or when setting voltage.
+ * @soft_start: Enable soft start so that voltage ramps slowly.
+ * @pull_down: Enable pull down when regulator is disabled.
*
* @input_uV: Input voltage for regulator when supplied by another regulator.
*
@@ -111,6 +115,9 @@ struct regulation_constraints {
/* current output range (inclusive) - for current control */
int min_uA;
int max_uA;
+ int ilim_uA;
+
+ int system_load;
/* valid regulator operating modes for this machine */
unsigned int valid_modes_mask;
@@ -138,6 +145,8 @@ struct regulation_constraints {
unsigned boot_on:1; /* bootloader/firmware enabled regulator */
unsigned apply_uV:1; /* apply uV constraint if min == max */
unsigned ramp_disable:1; /* disable ramp delay */
+ unsigned soft_start:1; /* ramp voltage slowly */
+ unsigned pull_down:1; /* pull down resistor when regulator off */
};
/**
diff --git a/include/linux/regulator/max8973-regulator.h b/include/linux/regulator/max8973-regulator.h
index f8acc052e353..f6a8a16a0d4d 100644
--- a/include/linux/regulator/max8973-regulator.h
+++ b/include/linux/regulator/max8973-regulator.h
@@ -58,6 +58,9 @@
* control signal from EN input pin. If it is false then
* voltage output will be enabled/disabled through EN bit of
* device register.
+ * @enable_gpio: Enable GPIO. If EN pin is controlled through GPIO from host
+ * then GPIO number can be provided. If no GPIO controlled then
+ * it should be -1.
* @dvs_gpio: GPIO for dvs. It should be -1 if this is tied with fixed logic.
* @dvs_def_state: Default state of dvs. 1 if it is high else 0.
*/
@@ -65,6 +68,7 @@ struct max8973_regulator_platform_data {
struct regulator_init_data *reg_init_data;
unsigned long control_flags;
bool enable_ext_control;
+ int enable_gpio;
int dvs_gpio;
unsigned dvs_def_state:1;
};
diff --git a/include/linux/remoteproc.h b/include/linux/remoteproc.h
index 9e7e745dac55..78b8a9b9d40a 100644
--- a/include/linux/remoteproc.h
+++ b/include/linux/remoteproc.h
@@ -404,6 +404,7 @@ enum rproc_crash_type {
* @table_ptr: pointer to the resource table in effect
* @cached_table: copy of the resource table
* @table_csum: checksum of the resource table
+ * @has_iommu: flag to indicate if remote processor is behind an MMU
*/
struct rproc {
struct klist_node node;
@@ -435,6 +436,7 @@ struct rproc {
struct resource_table *table_ptr;
struct resource_table *cached_table;
u32 table_csum;
+ bool has_iommu;
};
/* we currently support only two vrings per rvdev */
diff --git a/include/linux/rhashtable.h b/include/linux/rhashtable.h
index d438eeb08bff..843ceca9a21e 100644
--- a/include/linux/rhashtable.h
+++ b/include/linux/rhashtable.h
@@ -1,14 +1,13 @@
/*
* Resizable, Scalable, Concurrent Hash Table
*
- * Copyright (c) 2014 Thomas Graf <tgraf@suug.ch>
+ * Copyright (c) 2015 Herbert Xu <herbert@gondor.apana.org.au>
+ * Copyright (c) 2014-2015 Thomas Graf <tgraf@suug.ch>
* Copyright (c) 2008-2014 Patrick McHardy <kaber@trash.net>
*
- * Based on the following paper by Josh Triplett, Paul E. McKenney
- * and Jonathan Walpole:
- * https://www.usenix.org/legacy/event/atc11/tech/final_files/Triplett.pdf
- *
* Code partially derived from nft_hash
+ * Rewritten with rehash code from br_multicast plus single list
+ * pointer as suggested by Josh Triplett
*
* This program is free software; you can redistribute it and/or modify
* it under the terms of the GNU General Public License version 2 as
@@ -18,10 +17,14 @@
#ifndef _LINUX_RHASHTABLE_H
#define _LINUX_RHASHTABLE_H
+#include <linux/atomic.h>
#include <linux/compiler.h>
+#include <linux/errno.h>
+#include <linux/jhash.h>
#include <linux/list_nulls.h>
#include <linux/workqueue.h>
#include <linux/mutex.h>
+#include <linux/rcupdate.h>
/*
* The end of the chain is marked with a special nulls marks which has
@@ -42,6 +45,9 @@
#define RHT_HASH_BITS 27
#define RHT_BASE_SHIFT RHT_HASH_BITS
+/* Base bits plus 1 bit for nulls marker */
+#define RHT_HASH_RESERVED_SPACE (RHT_BASE_BITS + 1)
+
struct rhash_head {
struct rhash_head __rcu *next;
};
@@ -49,20 +55,43 @@ struct rhash_head {
/**
* struct bucket_table - Table of hash buckets
* @size: Number of hash buckets
+ * @rehash: Current bucket being rehashed
+ * @hash_rnd: Random seed to fold into hash
* @locks_mask: Mask to apply before accessing locks[]
* @locks: Array of spinlocks protecting individual buckets
+ * @walkers: List of active walkers
+ * @rcu: RCU structure for freeing the table
+ * @future_tbl: Table under construction during rehashing
* @buckets: size * hash buckets
*/
struct bucket_table {
- size_t size;
+ unsigned int size;
+ unsigned int rehash;
+ u32 hash_rnd;
unsigned int locks_mask;
spinlock_t *locks;
+ struct list_head walkers;
+ struct rcu_head rcu;
+
+ struct bucket_table __rcu *future_tbl;
struct rhash_head __rcu *buckets[] ____cacheline_aligned_in_smp;
};
+/**
+ * struct rhashtable_compare_arg - Key for the function rhashtable_compare
+ * @ht: Hash table
+ * @key: Key to compare against
+ */
+struct rhashtable_compare_arg {
+ struct rhashtable *ht;
+ const void *key;
+};
+
typedef u32 (*rht_hashfn_t)(const void *data, u32 len, u32 seed);
-typedef u32 (*rht_obj_hashfn_t)(const void *data, u32 seed);
+typedef u32 (*rht_obj_hashfn_t)(const void *data, u32 len, u32 seed);
+typedef int (*rht_obj_cmpfn_t)(struct rhashtable_compare_arg *arg,
+ const void *obj);
struct rhashtable;
@@ -72,60 +101,64 @@ struct rhashtable;
* @key_len: Length of key
* @key_offset: Offset of key in struct to be hashed
* @head_offset: Offset of rhash_head in struct to be hashed
- * @hash_rnd: Seed to use while hashing
- * @max_shift: Maximum number of shifts while expanding
- * @min_shift: Minimum number of shifts while shrinking
+ * @insecure_max_entries: Maximum number of entries (may be exceeded)
+ * @max_size: Maximum size while expanding
+ * @min_size: Minimum size while shrinking
* @nulls_base: Base value to generate nulls marker
+ * @insecure_elasticity: Set to true to disable chain length checks
+ * @automatic_shrinking: Enable automatic shrinking of tables
* @locks_mul: Number of bucket locks to allocate per cpu (default: 128)
- * @hashfn: Function to hash key
+ * @hashfn: Hash function (default: jhash2 if !(key_len % 4), or jhash)
* @obj_hashfn: Function to hash object
+ * @obj_cmpfn: Function to compare key with object
*/
struct rhashtable_params {
size_t nelem_hint;
size_t key_len;
size_t key_offset;
size_t head_offset;
- u32 hash_rnd;
- size_t max_shift;
- size_t min_shift;
+ unsigned int insecure_max_entries;
+ unsigned int max_size;
+ unsigned int min_size;
u32 nulls_base;
+ bool insecure_elasticity;
+ bool automatic_shrinking;
size_t locks_mul;
rht_hashfn_t hashfn;
rht_obj_hashfn_t obj_hashfn;
+ rht_obj_cmpfn_t obj_cmpfn;
};
/**
* struct rhashtable - Hash table handle
* @tbl: Bucket table
- * @future_tbl: Table under construction during expansion/shrinking
* @nelems: Number of elements in table
- * @shift: Current size (1 << shift)
+ * @key_len: Key length for hashfn
+ * @elasticity: Maximum chain length before rehash
* @p: Configuration parameters
* @run_work: Deferred worker to expand/shrink asynchronously
* @mutex: Mutex to protect current/future table swapping
- * @walkers: List of active walkers
- * @being_destroyed: True if table is set up for destruction
+ * @lock: Spin lock to protect walker list
*/
struct rhashtable {
struct bucket_table __rcu *tbl;
- struct bucket_table __rcu *future_tbl;
atomic_t nelems;
- atomic_t shift;
+ unsigned int key_len;
+ unsigned int elasticity;
struct rhashtable_params p;
struct work_struct run_work;
struct mutex mutex;
- struct list_head walkers;
- bool being_destroyed;
+ spinlock_t lock;
};
/**
* struct rhashtable_walker - Hash table walker
* @list: List entry on list of walkers
- * @resize: Resize event occured
+ * @tbl: The table that we were walking over
*/
struct rhashtable_walker {
struct list_head list;
- bool resize;
+ struct bucket_table *tbl;
};
/**
@@ -162,6 +195,131 @@ static inline unsigned long rht_get_nulls_value(const struct rhash_head *ptr)
return ((unsigned long) ptr) >> 1;
}
+static inline void *rht_obj(const struct rhashtable *ht,
+ const struct rhash_head *he)
+{
+ return (char *)he - ht->p.head_offset;
+}
+
+static inline unsigned int rht_bucket_index(const struct bucket_table *tbl,
+ unsigned int hash)
+{
+ return (hash >> RHT_HASH_RESERVED_SPACE) & (tbl->size - 1);
+}
+
+static inline unsigned int rht_key_hashfn(
+ struct rhashtable *ht, const struct bucket_table *tbl,
+ const void *key, const struct rhashtable_params params)
+{
+ unsigned int hash;
+
+ /* params must be equal to ht->p if it isn't constant. */
+ if (!__builtin_constant_p(params.key_len))
+ hash = ht->p.hashfn(key, ht->key_len, tbl->hash_rnd);
+ else if (params.key_len) {
+ unsigned int key_len = params.key_len;
+
+ if (params.hashfn)
+ hash = params.hashfn(key, key_len, tbl->hash_rnd);
+ else if (key_len & (sizeof(u32) - 1))
+ hash = jhash(key, key_len, tbl->hash_rnd);
+ else
+ hash = jhash2(key, key_len / sizeof(u32),
+ tbl->hash_rnd);
+ } else {
+ unsigned int key_len = ht->p.key_len;
+
+ if (params.hashfn)
+ hash = params.hashfn(key, key_len, tbl->hash_rnd);
+ else
+ hash = jhash(key, key_len, tbl->hash_rnd);
+ }
+
+ return rht_bucket_index(tbl, hash);
+}
+
+static inline unsigned int rht_head_hashfn(
+ struct rhashtable *ht, const struct bucket_table *tbl,
+ const struct rhash_head *he, const struct rhashtable_params params)
+{
+ const char *ptr = rht_obj(ht, he);
+
+ return likely(params.obj_hashfn) ?
+ rht_bucket_index(tbl, params.obj_hashfn(ptr, params.key_len ?:
+ ht->p.key_len,
+ tbl->hash_rnd)) :
+ rht_key_hashfn(ht, tbl, ptr + params.key_offset, params);
+}
+
+/**
+ * rht_grow_above_75 - returns true if nelems > 0.75 * table-size
+ * @ht: hash table
+ * @tbl: current table
+ */
+static inline bool rht_grow_above_75(const struct rhashtable *ht,
+ const struct bucket_table *tbl)
+{
+ /* Expand table when exceeding 75% load */
+ return atomic_read(&ht->nelems) > (tbl->size / 4 * 3) &&
+ (!ht->p.max_size || tbl->size < ht->p.max_size);
+}
+
+/**
+ * rht_shrink_below_30 - returns true if nelems < 0.3 * table-size
+ * @ht: hash table
+ * @tbl: current table
+ */
+static inline bool rht_shrink_below_30(const struct rhashtable *ht,
+ const struct bucket_table *tbl)
+{
+ /* Shrink table beneath 30% load */
+ return atomic_read(&ht->nelems) < (tbl->size * 3 / 10) &&
+ tbl->size > ht->p.min_size;
+}
+
+/**
+ * rht_grow_above_100 - returns true if nelems > table-size
+ * @ht: hash table
+ * @tbl: current table
+ */
+static inline bool rht_grow_above_100(const struct rhashtable *ht,
+ const struct bucket_table *tbl)
+{
+ return atomic_read(&ht->nelems) > tbl->size &&
+ (!ht->p.max_size || tbl->size < ht->p.max_size);
+}
+
+/**
+ * rht_grow_above_max - returns true if table is above maximum
+ * @ht: hash table
+ * @tbl: current table
+ */
+static inline bool rht_grow_above_max(const struct rhashtable *ht,
+ const struct bucket_table *tbl)
+{
+ return ht->p.insecure_max_entries &&
+ atomic_read(&ht->nelems) >= ht->p.insecure_max_entries;
+}
+
+/* The bucket lock is selected based on the hash and protects mutations
+ * on a group of hash buckets.
+ *
+ * A maximum of tbl->size/2 bucket locks is allocated. This ensures that
+ * a single lock always covers both buckets which may both contains
+ * entries which link to the same bucket of the old table during resizing.
+ * This allows to simplify the locking as locking the bucket in both
+ * tables during resize always guarantee protection.
+ *
+ * IMPORTANT: When holding the bucket lock of both the old and new table
+ * during expansions and shrinking, the old bucket lock must always be
+ * acquired first.
+ */
+static inline spinlock_t *rht_bucket_lock(const struct bucket_table *tbl,
+ unsigned int hash)
+{
+ return &tbl->locks[hash & tbl->locks_mask];
+}
+
#ifdef CONFIG_PROVE_LOCKING
int lockdep_rht_mutex_is_held(struct rhashtable *ht);
int lockdep_rht_bucket_is_held(const struct bucket_table *tbl, u32 hash);
@@ -178,23 +336,13 @@ static inline int lockdep_rht_bucket_is_held(const struct bucket_table *tbl,
}
#endif /* CONFIG_PROVE_LOCKING */
-int rhashtable_init(struct rhashtable *ht, struct rhashtable_params *params);
-
-void rhashtable_insert(struct rhashtable *ht, struct rhash_head *node);
-bool rhashtable_remove(struct rhashtable *ht, struct rhash_head *node);
-
-int rhashtable_expand(struct rhashtable *ht);
-int rhashtable_shrink(struct rhashtable *ht);
-
-void *rhashtable_lookup(struct rhashtable *ht, const void *key);
-void *rhashtable_lookup_compare(struct rhashtable *ht, const void *key,
- bool (*compare)(void *, void *), void *arg);
+int rhashtable_init(struct rhashtable *ht,
+ const struct rhashtable_params *params);
-bool rhashtable_lookup_insert(struct rhashtable *ht, struct rhash_head *obj);
-bool rhashtable_lookup_compare_insert(struct rhashtable *ht,
- struct rhash_head *obj,
- bool (*compare)(void *, void *),
- void *arg);
+int rhashtable_insert_slow(struct rhashtable *ht, const void *key,
+ struct rhash_head *obj,
+ struct bucket_table *old_tbl);
+int rhashtable_insert_rehash(struct rhashtable *ht);
int rhashtable_walk_init(struct rhashtable *ht, struct rhashtable_iter *iter);
void rhashtable_walk_exit(struct rhashtable_iter *iter);
@@ -202,6 +350,9 @@ int rhashtable_walk_start(struct rhashtable_iter *iter) __acquires(RCU);
void *rhashtable_walk_next(struct rhashtable_iter *iter);
void rhashtable_walk_stop(struct rhashtable_iter *iter) __releases(RCU);
+void rhashtable_free_and_destroy(struct rhashtable *ht,
+ void (*free_fn)(void *ptr, void *arg),
+ void *arg);
void rhashtable_destroy(struct rhashtable *ht);
#define rht_dereference(p, ht) \
@@ -352,4 +503,320 @@ void rhashtable_destroy(struct rhashtable *ht);
rht_for_each_entry_rcu_continue(tpos, pos, (tbl)->buckets[hash],\
tbl, hash, member)
+static inline int rhashtable_compare(struct rhashtable_compare_arg *arg,
+ const void *obj)
+{
+ struct rhashtable *ht = arg->ht;
+ const char *ptr = obj;
+
+ return memcmp(ptr + ht->p.key_offset, arg->key, ht->p.key_len);
+}
+
+/**
+ * rhashtable_lookup_fast - search hash table, inlined version
+ * @ht: hash table
+ * @key: the pointer to the key
+ * @params: hash table parameters
+ *
+ * Computes the hash value for the key and traverses the bucket chain looking
+ * for a entry with an identical key. The first matching entry is returned.
+ *
+ * Returns the first entry on which the compare function returned true.
+ */
+static inline void *rhashtable_lookup_fast(
+ struct rhashtable *ht, const void *key,
+ const struct rhashtable_params params)
+{
+ struct rhashtable_compare_arg arg = {
+ .ht = ht,
+ .key = key,
+ };
+ const struct bucket_table *tbl;
+ struct rhash_head *he;
+ unsigned int hash;
+
+ rcu_read_lock();
+
+ tbl = rht_dereference_rcu(ht->tbl, ht);
+restart:
+ hash = rht_key_hashfn(ht, tbl, key, params);
+ rht_for_each_rcu(he, tbl, hash) {
+ if (params.obj_cmpfn ?
+ params.obj_cmpfn(&arg, rht_obj(ht, he)) :
+ rhashtable_compare(&arg, rht_obj(ht, he)))
+ continue;
+ rcu_read_unlock();
+ return rht_obj(ht, he);
+ }
+
+ /* Ensure we see any new tables. */
+ smp_rmb();
+
+ tbl = rht_dereference_rcu(tbl->future_tbl, ht);
+ if (unlikely(tbl))
+ goto restart;
+ rcu_read_unlock();
+
+ return NULL;
+}
+
+/* Internal function, please use rhashtable_insert_fast() instead */
+static inline int __rhashtable_insert_fast(
+ struct rhashtable *ht, const void *key, struct rhash_head *obj,
+ const struct rhashtable_params params)
+{
+ struct rhashtable_compare_arg arg = {
+ .ht = ht,
+ .key = key,
+ };
+ struct bucket_table *tbl, *new_tbl;
+ struct rhash_head *head;
+ spinlock_t *lock;
+ unsigned int elasticity;
+ unsigned int hash;
+ int err;
+
+restart:
+ rcu_read_lock();
+
+ tbl = rht_dereference_rcu(ht->tbl, ht);
+
+ /* All insertions must grab the oldest table containing
+ * the hashed bucket that is yet to be rehashed.
+ */
+ for (;;) {
+ hash = rht_head_hashfn(ht, tbl, obj, params);
+ lock = rht_bucket_lock(tbl, hash);
+ spin_lock_bh(lock);
+
+ if (tbl->rehash <= hash)
+ break;
+
+ spin_unlock_bh(lock);
+ tbl = rht_dereference_rcu(tbl->future_tbl, ht);
+ }
+
+ new_tbl = rht_dereference_rcu(tbl->future_tbl, ht);
+ if (unlikely(new_tbl)) {
+ err = rhashtable_insert_slow(ht, key, obj, new_tbl);
+ if (err == -EAGAIN)
+ goto slow_path;
+ goto out;
+ }
+
+ err = -E2BIG;
+ if (unlikely(rht_grow_above_max(ht, tbl)))
+ goto out;
+
+ if (unlikely(rht_grow_above_100(ht, tbl))) {
+slow_path:
+ spin_unlock_bh(lock);
+ err = rhashtable_insert_rehash(ht);
+ rcu_read_unlock();
+ if (err)
+ return err;
+
+ goto restart;
+ }
+
+ err = -EEXIST;
+ elasticity = ht->elasticity;
+ rht_for_each(head, tbl, hash) {
+ if (key &&
+ unlikely(!(params.obj_cmpfn ?
+ params.obj_cmpfn(&arg, rht_obj(ht, head)) :
+ rhashtable_compare(&arg, rht_obj(ht, head)))))
+ goto out;
+ if (!--elasticity)
+ goto slow_path;
+ }
+
+ err = 0;
+
+ head = rht_dereference_bucket(tbl->buckets[hash], tbl, hash);
+
+ RCU_INIT_POINTER(obj->next, head);
+
+ rcu_assign_pointer(tbl->buckets[hash], obj);
+
+ atomic_inc(&ht->nelems);
+ if (rht_grow_above_75(ht, tbl))
+ schedule_work(&ht->run_work);
+
+out:
+ spin_unlock_bh(lock);
+ rcu_read_unlock();
+
+ return err;
+}
+
+/**
+ * rhashtable_insert_fast - insert object into hash table
+ * @ht: hash table
+ * @obj: pointer to hash head inside object
+ * @params: hash table parameters
+ *
+ * Will take a per bucket spinlock to protect against mutual mutations
+ * on the same bucket. Multiple insertions may occur in parallel unless
+ * they map to the same bucket lock.
+ *
+ * It is safe to call this function from atomic context.
+ *
+ * Will trigger an automatic deferred table resizing if the size grows
+ * beyond the watermark indicated by grow_decision() which can be passed
+ * to rhashtable_init().
+ */
+static inline int rhashtable_insert_fast(
+ struct rhashtable *ht, struct rhash_head *obj,
+ const struct rhashtable_params params)
+{
+ return __rhashtable_insert_fast(ht, NULL, obj, params);
+}
+
+/**
+ * rhashtable_lookup_insert_fast - lookup and insert object into hash table
+ * @ht: hash table
+ * @obj: pointer to hash head inside object
+ * @params: hash table parameters
+ *
+ * Locks down the bucket chain in both the old and new table if a resize
+ * is in progress to ensure that writers can't remove from the old table
+ * and can't insert to the new table during the atomic operation of search
+ * and insertion. Searches for duplicates in both the old and new table if
+ * a resize is in progress.
+ *
+ * This lookup function may only be used for fixed key hash table (key_len
+ * parameter set). It will BUG() if used inappropriately.
+ *
+ * It is safe to call this function from atomic context.
+ *
+ * Will trigger an automatic deferred table resizing if the size grows
+ * beyond the watermark indicated by grow_decision() which can be passed
+ * to rhashtable_init().
+ */
+static inline int rhashtable_lookup_insert_fast(
+ struct rhashtable *ht, struct rhash_head *obj,
+ const struct rhashtable_params params)
+{
+ const char *key = rht_obj(ht, obj);
+
+ BUG_ON(ht->p.obj_hashfn);
+
+ return __rhashtable_insert_fast(ht, key + ht->p.key_offset, obj,
+ params);
+}
+
+/**
+ * rhashtable_lookup_insert_key - search and insert object to hash table
+ * with explicit key
+ * @ht: hash table
+ * @key: key
+ * @obj: pointer to hash head inside object
+ * @params: hash table parameters
+ *
+ * Locks down the bucket chain in both the old and new table if a resize
+ * is in progress to ensure that writers can't remove from the old table
+ * and can't insert to the new table during the atomic operation of search
+ * and insertion. Searches for duplicates in both the old and new table if
+ * a resize is in progress.
+ *
+ * Lookups may occur in parallel with hashtable mutations and resizing.
+ *
+ * Will trigger an automatic deferred table resizing if the size grows
+ * beyond the watermark indicated by grow_decision() which can be passed
+ * to rhashtable_init().
+ *
+ * Returns zero on success.
+ */
+static inline int rhashtable_lookup_insert_key(
+ struct rhashtable *ht, const void *key, struct rhash_head *obj,
+ const struct rhashtable_params params)
+{
+ BUG_ON(!ht->p.obj_hashfn || !key);
+
+ return __rhashtable_insert_fast(ht, key, obj, params);
+}
+
+/* Internal function, please use rhashtable_remove_fast() instead */
+static inline int __rhashtable_remove_fast(
+ struct rhashtable *ht, struct bucket_table *tbl,
+ struct rhash_head *obj, const struct rhashtable_params params)
+{
+ struct rhash_head __rcu **pprev;
+ struct rhash_head *he;
+ spinlock_t * lock;
+ unsigned int hash;
+ int err = -ENOENT;
+
+ hash = rht_head_hashfn(ht, tbl, obj, params);
+ lock = rht_bucket_lock(tbl, hash);
+
+ spin_lock_bh(lock);
+
+ pprev = &tbl->buckets[hash];
+ rht_for_each(he, tbl, hash) {
+ if (he != obj) {
+ pprev = &he->next;
+ continue;
+ }
+
+ rcu_assign_pointer(*pprev, obj->next);
+ err = 0;
+ break;
+ }
+
+ spin_unlock_bh(lock);
+
+ return err;
+}
+
+/**
+ * rhashtable_remove_fast - remove object from hash table
+ * @ht: hash table
+ * @obj: pointer to hash head inside object
+ * @params: hash table parameters
+ *
+ * Since the hash chain is single linked, the removal operation needs to
+ * walk the bucket chain upon removal. The removal operation is thus
+ * considerable slow if the hash table is not correctly sized.
+ *
+ * Will automatically shrink the table via rhashtable_expand() if the
+ * shrink_decision function specified at rhashtable_init() returns true.
+ *
+ * Returns zero on success, -ENOENT if the entry could not be found.
+ */
+static inline int rhashtable_remove_fast(
+ struct rhashtable *ht, struct rhash_head *obj,
+ const struct rhashtable_params params)
+{
+ struct bucket_table *tbl;
+ int err;
+
+ rcu_read_lock();
+
+ tbl = rht_dereference_rcu(ht->tbl, ht);
+
+ /* Because we have already taken (and released) the bucket
+ * lock in old_tbl, if we find that future_tbl is not yet
+ * visible then that guarantees the entry to still be in
+ * the old tbl if it exists.
+ */
+ while ((err = __rhashtable_remove_fast(ht, tbl, obj, params)) &&
+ (tbl = rht_dereference_rcu(tbl->future_tbl, ht)))
+ ;
+
+ if (err)
+ goto out;
+
+ atomic_dec(&ht->nelems);
+ if (unlikely(ht->p.automatic_shrinking &&
+ rht_shrink_below_30(ht, tbl)))
+ schedule_work(&ht->run_work);
+
+out:
+ rcu_read_unlock();
+
+ return err;
+}
+
#endif /* _LINUX_RHASHTABLE_H */
diff --git a/include/linux/rio.h b/include/linux/rio.h
index 6bda06f21930..cde976e86b48 100644
--- a/include/linux/rio.h
+++ b/include/linux/rio.h
@@ -298,7 +298,7 @@ struct rio_id_table {
* struct rio_net - RIO network info
* @node: Node in global list of RIO networks
* @devices: List of devices in this network
- * @switches: List of switches in this netowrk
+ * @switches: List of switches in this network
* @mports: List of master ports accessing this network
* @hport: Default port for accessing this network
* @id: RIO network ID
diff --git a/include/linux/rmap.h b/include/linux/rmap.h
index c4c559a45dc8..c89c53a113a8 100644
--- a/include/linux/rmap.h
+++ b/include/linux/rmap.h
@@ -105,14 +105,6 @@ static inline void put_anon_vma(struct anon_vma *anon_vma)
__put_anon_vma(anon_vma);
}
-static inline struct anon_vma *page_anon_vma(struct page *page)
-{
- if (((unsigned long)page->mapping & PAGE_MAPPING_FLAGS) !=
- PAGE_MAPPING_ANON)
- return NULL;
- return page_rmapping(page);
-}
-
static inline void vma_lock_anon_vma(struct vm_area_struct *vma)
{
struct anon_vma *anon_vma = vma->anon_vma;
diff --git a/include/linux/rtnetlink.h b/include/linux/rtnetlink.h
index 5db76a32fcab..39adaa9529eb 100644
--- a/include/linux/rtnetlink.h
+++ b/include/linux/rtnetlink.h
@@ -77,7 +77,12 @@ static inline struct netdev_queue *dev_ingress_queue(struct net_device *dev)
return rtnl_dereference(dev->ingress_queue);
}
-extern struct netdev_queue *dev_ingress_queue_create(struct net_device *dev);
+struct netdev_queue *dev_ingress_queue_create(struct net_device *dev);
+
+#ifdef CONFIG_NET_INGRESS
+void net_inc_ingress_queue(void);
+void net_dec_ingress_queue(void);
+#endif
extern void rtnetlink_init(void);
extern void __rtnl_unlock(void);
@@ -109,5 +114,9 @@ extern int ndo_dflt_fdb_del(struct ndmsg *ndm,
extern int ndo_dflt_bridge_getlink(struct sk_buff *skb, u32 pid, u32 seq,
struct net_device *dev, u16 mode,
- u32 flags, u32 mask);
+ u32 flags, u32 mask, int nlflags,
+ u32 filter_mask,
+ int (*vlan_fill)(struct sk_buff *skb,
+ struct net_device *dev,
+ u32 filter_mask));
#endif /* __LINUX_RTNETLINK_H */
diff --git a/include/linux/scatterlist.h b/include/linux/scatterlist.h
index ed8f9e70df9b..50a8486c524b 100644
--- a/include/linux/scatterlist.h
+++ b/include/linux/scatterlist.h
@@ -2,13 +2,39 @@
#define _LINUX_SCATTERLIST_H
#include <linux/string.h>
+#include <linux/types.h>
#include <linux/bug.h>
#include <linux/mm.h>
-
-#include <asm/types.h>
-#include <asm/scatterlist.h>
#include <asm/io.h>
+struct scatterlist {
+#ifdef CONFIG_DEBUG_SG
+ unsigned long sg_magic;
+#endif
+ unsigned long page_link;
+ unsigned int offset;
+ unsigned int length;
+ dma_addr_t dma_address;
+#ifdef CONFIG_NEED_SG_DMA_LENGTH
+ unsigned int dma_length;
+#endif
+};
+
+/*
+ * These macros should be used after a dma_map_sg call has been done
+ * to get bus addresses of each of the SG entries and their lengths.
+ * You should only work with the number of sg entries dma_map_sg
+ * returns, or alternatively stop on the first sg_dma_len(sg) which
+ * is 0.
+ */
+#define sg_dma_address(sg) ((sg)->dma_address)
+
+#ifdef CONFIG_NEED_SG_DMA_LENGTH
+#define sg_dma_len(sg) ((sg)->dma_length)
+#else
+#define sg_dma_len(sg) ((sg)->length)
+#endif
+
struct sg_table {
struct scatterlist *sgl; /* the list */
unsigned int nents; /* number of mapped entries */
@@ -18,10 +44,9 @@ struct sg_table {
/*
* Notes on SG table design.
*
- * Architectures must provide an unsigned long page_link field in the
- * scatterlist struct. We use that to place the page pointer AND encode
- * information about the sg table as well. The two lower bits are reserved
- * for this information.
+ * We use the unsigned long page_link field in the scatterlist struct to place
+ * the page pointer AND encode information about the sg table as well. The two
+ * lower bits are reserved for this information.
*
* If bit 0 is set, then the page_link contains a pointer to the next sg
* table list. Otherwise the next entry is at sg + 1.
@@ -221,6 +246,7 @@ static inline void *sg_virt(struct scatterlist *sg)
}
int sg_nents(struct scatterlist *sg);
+int sg_nents_for_len(struct scatterlist *sg, u64 len);
struct scatterlist *sg_next(struct scatterlist *);
struct scatterlist *sg_last(struct scatterlist *s, unsigned int);
void sg_init_table(struct scatterlist *, unsigned int);
diff --git a/include/linux/sched.h b/include/linux/sched.h
index f74d4cc3a3e5..6633e83e608a 100644
--- a/include/linux/sched.h
+++ b/include/linux/sched.h
@@ -25,7 +25,7 @@ struct sched_param {
#include <linux/errno.h>
#include <linux/nodemask.h>
#include <linux/mm_types.h>
-#include <linux/preempt_mask.h>
+#include <linux/preempt.h>
#include <asm/page.h>
#include <asm/ptrace.h>
@@ -125,7 +125,6 @@ struct sched_attr {
u64 sched_period;
};
-struct exec_domain;
struct futex_pi_state;
struct robust_list_head;
struct bio_list;
@@ -133,6 +132,7 @@ struct fs_struct;
struct perf_event_context;
struct blk_plug;
struct filename;
+struct nameidata;
#define VMACACHE_BITS 2
#define VMACACHE_SIZE (1U << VMACACHE_BITS)
@@ -174,15 +174,12 @@ extern unsigned long nr_iowait_cpu(int cpu);
extern void get_iowait_load(unsigned long *nr_waiters, unsigned long *load);
extern void calc_global_load(unsigned long ticks);
-extern void update_cpu_load_nohz(void);
-/* Notifier for when a task gets migrated to a new CPU */
-struct task_migration_notifier {
- struct task_struct *task;
- int from_cpu;
- int to_cpu;
-};
-extern void register_task_migration_notifier(struct notifier_block *n);
+#if defined(CONFIG_SMP) && defined(CONFIG_NO_HZ_COMMON)
+extern void update_cpu_load_nohz(void);
+#else
+static inline void update_cpu_load_nohz(void) { }
+#endif
extern unsigned long get_parent_ip(unsigned long addr);
@@ -222,9 +219,10 @@ print_cfs_rq(struct seq_file *m, int cpu, struct cfs_rq *cfs_rq);
#define TASK_WAKEKILL 128
#define TASK_WAKING 256
#define TASK_PARKED 512
-#define TASK_STATE_MAX 1024
+#define TASK_NOLOAD 1024
+#define TASK_STATE_MAX 2048
-#define TASK_STATE_TO_CHAR_STR "RSDTtXZxKWP"
+#define TASK_STATE_TO_CHAR_STR "RSDTtXZxKWPN"
extern char ___assert_task_state[1 - 2*!!(
sizeof(TASK_STATE_TO_CHAR_STR)-1 != ilog2(TASK_STATE_MAX)+1)];
@@ -234,6 +232,8 @@ extern char ___assert_task_state[1 - 2*!!(
#define TASK_STOPPED (TASK_WAKEKILL | __TASK_STOPPED)
#define TASK_TRACED (TASK_WAKEKILL | __TASK_TRACED)
+#define TASK_IDLE (TASK_UNINTERRUPTIBLE | TASK_NOLOAD)
+
/* Convenience macros for the sake of wake_up */
#define TASK_NORMAL (TASK_INTERRUPTIBLE | TASK_UNINTERRUPTIBLE)
#define TASK_ALL (TASK_NORMAL | __TASK_STOPPED | __TASK_TRACED)
@@ -249,7 +249,8 @@ extern char ___assert_task_state[1 - 2*!!(
((task->state & (__TASK_STOPPED | __TASK_TRACED)) != 0)
#define task_contributes_to_load(task) \
((task->state & TASK_UNINTERRUPTIBLE) != 0 && \
- (task->flags & PF_FROZEN) == 0)
+ (task->flags & PF_FROZEN) == 0 && \
+ (task->state & TASK_NOLOAD) == 0)
#ifdef CONFIG_DEBUG_ATOMIC_SLEEP
@@ -261,7 +262,7 @@ extern char ___assert_task_state[1 - 2*!!(
#define set_task_state(tsk, state_value) \
do { \
(tsk)->task_state_change = _THIS_IP_; \
- set_mb((tsk)->state, (state_value)); \
+ smp_store_mb((tsk)->state, (state_value)); \
} while (0)
/*
@@ -283,7 +284,7 @@ extern char ___assert_task_state[1 - 2*!!(
#define set_current_state(state_value) \
do { \
current->task_state_change = _THIS_IP_; \
- set_mb(current->state, (state_value)); \
+ smp_store_mb(current->state, (state_value)); \
} while (0)
#else
@@ -291,7 +292,7 @@ extern char ___assert_task_state[1 - 2*!!(
#define __set_task_state(tsk, state_value) \
do { (tsk)->state = (state_value); } while (0)
#define set_task_state(tsk, state_value) \
- set_mb((tsk)->state, (state_value))
+ smp_store_mb((tsk)->state, (state_value))
/*
* set_current_state() includes a barrier so that the write of current->state
@@ -307,7 +308,7 @@ extern char ___assert_task_state[1 - 2*!!(
#define __set_current_state(state_value) \
do { current->state = (state_value); } while (0)
#define set_current_state(state_value) \
- set_mb(current->state, (state_value))
+ smp_store_mb(current->state, (state_value))
#endif
@@ -344,14 +345,10 @@ extern int runqueue_is_locked(int cpu);
#if defined(CONFIG_SMP) && defined(CONFIG_NO_HZ_COMMON)
extern void nohz_balance_enter_idle(int cpu);
extern void set_cpu_sd_state_idle(void);
-extern int get_nohz_timer_target(int pinned);
+extern int get_nohz_timer_target(void);
#else
static inline void nohz_balance_enter_idle(int cpu) { }
static inline void set_cpu_sd_state_idle(void) { }
-static inline int get_nohz_timer_target(int pinned)
-{
- return smp_processor_id();
-}
#endif
/*
@@ -576,6 +573,23 @@ struct task_cputime {
.sum_exec_runtime = 0, \
}
+/*
+ * This is the atomic variant of task_cputime, which can be used for
+ * storing and updating task_cputime statistics without locking.
+ */
+struct task_cputime_atomic {
+ atomic64_t utime;
+ atomic64_t stime;
+ atomic64_t sum_exec_runtime;
+};
+
+#define INIT_CPUTIME_ATOMIC \
+ (struct task_cputime_atomic) { \
+ .utime = ATOMIC64_INIT(0), \
+ .stime = ATOMIC64_INIT(0), \
+ .sum_exec_runtime = ATOMIC64_INIT(0), \
+ }
+
#ifdef CONFIG_PREEMPT_COUNT
#define PREEMPT_DISABLED (1 + PREEMPT_ENABLED)
#else
@@ -593,18 +607,16 @@ struct task_cputime {
/**
* struct thread_group_cputimer - thread group interval timer counts
- * @cputime: thread group interval timers.
+ * @cputime_atomic: atomic thread group interval timers.
* @running: non-zero when there are timers running and
* @cputime receives updates.
- * @lock: lock for fields in this struct.
*
* This structure contains the version of task_cputime, above, that is
* used for thread group CPU timer calculations.
*/
struct thread_group_cputimer {
- struct task_cputime cputime;
+ struct task_cputime_atomic cputime_atomic;
int running;
- raw_spinlock_t lock;
};
#include <linux/rwsem.h>
@@ -909,6 +921,50 @@ enum cpu_idle_type {
#define SCHED_CAPACITY_SCALE (1L << SCHED_CAPACITY_SHIFT)
/*
+ * Wake-queues are lists of tasks with a pending wakeup, whose
+ * callers have already marked the task as woken internally,
+ * and can thus carry on. A common use case is being able to
+ * do the wakeups once the corresponding user lock as been
+ * released.
+ *
+ * We hold reference to each task in the list across the wakeup,
+ * thus guaranteeing that the memory is still valid by the time
+ * the actual wakeups are performed in wake_up_q().
+ *
+ * One per task suffices, because there's never a need for a task to be
+ * in two wake queues simultaneously; it is forbidden to abandon a task
+ * in a wake queue (a call to wake_up_q() _must_ follow), so if a task is
+ * already in a wake queue, the wakeup will happen soon and the second
+ * waker can just skip it.
+ *
+ * The WAKE_Q macro declares and initializes the list head.
+ * wake_up_q() does NOT reinitialize the list; it's expected to be
+ * called near the end of a function, where the fact that the queue is
+ * not used again will be easy to see by inspection.
+ *
+ * Note that this can cause spurious wakeups. schedule() callers
+ * must ensure the call is done inside a loop, confirming that the
+ * wakeup condition has in fact occurred.
+ */
+struct wake_q_node {
+ struct wake_q_node *next;
+};
+
+struct wake_q_head {
+ struct wake_q_node *first;
+ struct wake_q_node **lastp;
+};
+
+#define WAKE_Q_TAIL ((struct wake_q_node *) 0x01)
+
+#define WAKE_Q(name) \
+ struct wake_q_head name = { WAKE_Q_TAIL, &name.first }
+
+extern void wake_q_add(struct wake_q_head *head,
+ struct task_struct *task);
+extern void wake_up_q(struct wake_q_head *head);
+
+/*
* sched-domains (multiprocessor balancing) declarations:
*/
#ifdef CONFIG_SMP
@@ -1343,8 +1399,6 @@ struct task_struct {
int rcu_read_lock_nesting;
union rcu_special rcu_read_unlock_special;
struct list_head rcu_node_entry;
-#endif /* #ifdef CONFIG_PREEMPT_RCU */
-#ifdef CONFIG_PREEMPT_RCU
struct rcu_node *rcu_blocked_node;
#endif /* #ifdef CONFIG_PREEMPT_RCU */
#ifdef CONFIG_TASKS_RCU
@@ -1365,9 +1419,6 @@ struct task_struct {
#endif
struct mm_struct *mm, *active_mm;
-#ifdef CONFIG_COMPAT_BRK
- unsigned brk_randomized:1;
-#endif
/* per-thread vma caching */
u32 vmacache_seqnum;
struct vm_area_struct *vmacache[VMACACHE_SIZE];
@@ -1378,7 +1429,7 @@ struct task_struct {
int exit_state;
int exit_code, exit_signal;
int pdeath_signal; /* The signal sent when the parent dies */
- unsigned int jobctl; /* JOBCTL_*, siglock protected */
+ unsigned long jobctl; /* JOBCTL_*, siglock protected */
/* Used for emulating ABI behavior of previous Linux versions */
unsigned int personality;
@@ -1390,10 +1441,14 @@ struct task_struct {
/* Revert to default priority/policy when forking */
unsigned sched_reset_on_fork:1;
unsigned sched_contributes_to_load:1;
+ unsigned sched_migrated:1;
#ifdef CONFIG_MEMCG_KMEM
unsigned memcg_kmem_skip_account:1;
#endif
+#ifdef CONFIG_COMPAT_BRK
+ unsigned brk_randomized:1;
+#endif
unsigned long atomic_flags; /* Flags needing atomic access. */
@@ -1470,7 +1525,7 @@ struct task_struct {
it with task_lock())
- initialized normally by setup_new_exec */
/* file system info */
- int link_count, total_link_count;
+ struct nameidata *nameidata;
#ifdef CONFIG_SYSVIPC
/* ipc stuff */
struct sysv_sem sysvsem;
@@ -1520,6 +1575,8 @@ struct task_struct {
/* Protection of the PI data structures: */
raw_spinlock_t pi_lock;
+ struct wake_q_node wake_q;
+
#ifdef CONFIG_RT_MUTEXES
/* PI waiters blocked on a rt_mutex held by this task */
struct rb_root pi_waiters;
@@ -1733,6 +1790,7 @@ struct task_struct {
#ifdef CONFIG_DEBUG_ATOMIC_SLEEP
unsigned long task_state_change;
#endif
+ int pagefault_disabled;
};
/* Future-safe accessor for struct task_struct's cpus_allowed. */
@@ -2086,22 +2144,22 @@ TASK_PFA_CLEAR(SPREAD_SLAB, spread_slab)
#define JOBCTL_TRAPPING_BIT 21 /* switching to TRACED */
#define JOBCTL_LISTENING_BIT 22 /* ptracer is listening for events */
-#define JOBCTL_STOP_DEQUEUED (1 << JOBCTL_STOP_DEQUEUED_BIT)
-#define JOBCTL_STOP_PENDING (1 << JOBCTL_STOP_PENDING_BIT)
-#define JOBCTL_STOP_CONSUME (1 << JOBCTL_STOP_CONSUME_BIT)
-#define JOBCTL_TRAP_STOP (1 << JOBCTL_TRAP_STOP_BIT)
-#define JOBCTL_TRAP_NOTIFY (1 << JOBCTL_TRAP_NOTIFY_BIT)
-#define JOBCTL_TRAPPING (1 << JOBCTL_TRAPPING_BIT)
-#define JOBCTL_LISTENING (1 << JOBCTL_LISTENING_BIT)
+#define JOBCTL_STOP_DEQUEUED (1UL << JOBCTL_STOP_DEQUEUED_BIT)
+#define JOBCTL_STOP_PENDING (1UL << JOBCTL_STOP_PENDING_BIT)
+#define JOBCTL_STOP_CONSUME (1UL << JOBCTL_STOP_CONSUME_BIT)
+#define JOBCTL_TRAP_STOP (1UL << JOBCTL_TRAP_STOP_BIT)
+#define JOBCTL_TRAP_NOTIFY (1UL << JOBCTL_TRAP_NOTIFY_BIT)
+#define JOBCTL_TRAPPING (1UL << JOBCTL_TRAPPING_BIT)
+#define JOBCTL_LISTENING (1UL << JOBCTL_LISTENING_BIT)
#define JOBCTL_TRAP_MASK (JOBCTL_TRAP_STOP | JOBCTL_TRAP_NOTIFY)
#define JOBCTL_PENDING_MASK (JOBCTL_STOP_PENDING | JOBCTL_TRAP_MASK)
extern bool task_set_jobctl_pending(struct task_struct *task,
- unsigned int mask);
+ unsigned long mask);
extern void task_clear_jobctl_trapping(struct task_struct *task);
extern void task_clear_jobctl_pending(struct task_struct *task,
- unsigned int mask);
+ unsigned long mask);
static inline void rcu_copy_process(struct task_struct *p)
{
@@ -2311,11 +2369,6 @@ extern void set_curr_task(int cpu, struct task_struct *p);
void yield(void);
-/*
- * The default (Linux) execution domain.
- */
-extern struct exec_domain default_exec_domain;
-
union thread_union {
struct thread_info thread_info;
unsigned long stack[THREAD_SIZE/sizeof(long)];
@@ -2546,6 +2599,9 @@ static inline unsigned long wait_task_inactive(struct task_struct *p,
}
#endif
+#define tasklist_empty() \
+ list_empty(&init_task.tasks)
+
#define next_task(p) \
list_entry_rcu((p)->tasks.next, struct task_struct, tasks)
@@ -2976,11 +3032,6 @@ static __always_inline bool need_resched(void)
void thread_group_cputime(struct task_struct *tsk, struct task_cputime *times);
void thread_group_cputimer(struct task_struct *tsk, struct task_cputime *times);
-static inline void thread_group_cputime_init(struct signal_struct *sig)
-{
- raw_spin_lock_init(&sig->cputimer.lock);
-}
-
/*
* Reevaluate whether the task has signals pending delivery.
* Wake the task if so.
@@ -3094,13 +3145,13 @@ static inline void mm_update_next_owner(struct mm_struct *mm)
static inline unsigned long task_rlimit(const struct task_struct *tsk,
unsigned int limit)
{
- return ACCESS_ONCE(tsk->signal->rlim[limit].rlim_cur);
+ return READ_ONCE(tsk->signal->rlim[limit].rlim_cur);
}
static inline unsigned long task_rlimit_max(const struct task_struct *tsk,
unsigned int limit)
{
- return ACCESS_ONCE(tsk->signal->rlim[limit].rlim_max);
+ return READ_ONCE(tsk->signal->rlim[limit].rlim_max);
}
static inline unsigned long rlimit(unsigned int limit)
diff --git a/include/linux/sched/rt.h b/include/linux/sched/rt.h
index 6341f5be6e24..a30b172df6e1 100644
--- a/include/linux/sched/rt.h
+++ b/include/linux/sched/rt.h
@@ -18,7 +18,7 @@ static inline int rt_task(struct task_struct *p)
#ifdef CONFIG_RT_MUTEXES
extern int rt_mutex_getprio(struct task_struct *p);
extern void rt_mutex_setprio(struct task_struct *p, int prio);
-extern int rt_mutex_check_prio(struct task_struct *task, int newprio);
+extern int rt_mutex_get_effective_prio(struct task_struct *task, int newprio);
extern struct task_struct *rt_mutex_get_top_task(struct task_struct *task);
extern void rt_mutex_adjust_pi(struct task_struct *p);
static inline bool tsk_is_pi_blocked(struct task_struct *tsk)
@@ -31,9 +31,10 @@ static inline int rt_mutex_getprio(struct task_struct *p)
return p->normal_prio;
}
-static inline int rt_mutex_check_prio(struct task_struct *task, int newprio)
+static inline int rt_mutex_get_effective_prio(struct task_struct *task,
+ int newprio)
{
- return 0;
+ return newprio;
}
static inline struct task_struct *rt_mutex_get_top_task(struct task_struct *task)
diff --git a/include/linux/sched/sysctl.h b/include/linux/sched/sysctl.h
index 596a0e007c62..c9e4731cf10b 100644
--- a/include/linux/sched/sysctl.h
+++ b/include/linux/sched/sysctl.h
@@ -57,24 +57,12 @@ extern unsigned int sysctl_numa_balancing_scan_size;
extern unsigned int sysctl_sched_migration_cost;
extern unsigned int sysctl_sched_nr_migrate;
extern unsigned int sysctl_sched_time_avg;
-extern unsigned int sysctl_timer_migration;
extern unsigned int sysctl_sched_shares_window;
int sched_proc_update_handler(struct ctl_table *table, int write,
void __user *buffer, size_t *length,
loff_t *ppos);
#endif
-#ifdef CONFIG_SCHED_DEBUG
-static inline unsigned int get_sysctl_timer_migration(void)
-{
- return sysctl_timer_migration;
-}
-#else
-static inline unsigned int get_sysctl_timer_migration(void)
-{
- return 1;
-}
-#endif
/*
* control realtime throttling:
diff --git a/include/linux/security.h b/include/linux/security.h
index a1b7dbd127ff..52febde52479 100644
--- a/include/linux/security.h
+++ b/include/linux/security.h
@@ -43,7 +43,6 @@ struct file;
struct vfsmount;
struct path;
struct qstr;
-struct nameidata;
struct iattr;
struct fown_struct;
struct file_operations;
@@ -477,7 +476,8 @@ static inline void security_free_mnt_opts(struct security_mnt_opts *opts)
* @inode_follow_link:
* Check permission to follow a symbolic link when looking up a pathname.
* @dentry contains the dentry structure for the link.
- * @nd contains the nameidata structure for the parent directory.
+ * @inode contains the inode, which itself is not stable in RCU-walk
+ * @rcu indicates whether we are in RCU-walk mode.
* Return 0 if permission is granted.
* @inode_permission:
* Check permission before accessing an inode. This hook is called by the
@@ -1553,10 +1553,11 @@ struct security_operations {
int (*inode_rename) (struct inode *old_dir, struct dentry *old_dentry,
struct inode *new_dir, struct dentry *new_dentry);
int (*inode_readlink) (struct dentry *dentry);
- int (*inode_follow_link) (struct dentry *dentry, struct nameidata *nd);
+ int (*inode_follow_link) (struct dentry *dentry, struct inode *inode,
+ bool rcu);
int (*inode_permission) (struct inode *inode, int mask);
int (*inode_setattr) (struct dentry *dentry, struct iattr *attr);
- int (*inode_getattr) (struct vfsmount *mnt, struct dentry *dentry);
+ int (*inode_getattr) (const struct path *path);
int (*inode_setxattr) (struct dentry *dentry, const char *name,
const void *value, size_t size, int flags);
void (*inode_post_setxattr) (struct dentry *dentry, const char *name,
@@ -1716,7 +1717,6 @@ struct security_operations {
int (*tun_dev_attach_queue) (void *security);
int (*tun_dev_attach) (struct sock *sk, void *security);
int (*tun_dev_open) (void *security);
- void (*skb_owned_by) (struct sk_buff *skb, struct sock *sk);
#endif /* CONFIG_SECURITY_NETWORK */
#ifdef CONFIG_SECURITY_NETWORK_XFRM
@@ -1840,10 +1840,11 @@ int security_inode_rename(struct inode *old_dir, struct dentry *old_dentry,
struct inode *new_dir, struct dentry *new_dentry,
unsigned int flags);
int security_inode_readlink(struct dentry *dentry);
-int security_inode_follow_link(struct dentry *dentry, struct nameidata *nd);
+int security_inode_follow_link(struct dentry *dentry, struct inode *inode,
+ bool rcu);
int security_inode_permission(struct inode *inode, int mask);
int security_inode_setattr(struct dentry *dentry, struct iattr *attr);
-int security_inode_getattr(struct vfsmount *mnt, struct dentry *dentry);
+int security_inode_getattr(const struct path *path);
int security_inode_setxattr(struct dentry *dentry, const char *name,
const void *value, size_t size, int flags);
void security_inode_post_setxattr(struct dentry *dentry, const char *name,
@@ -2243,7 +2244,8 @@ static inline int security_inode_readlink(struct dentry *dentry)
}
static inline int security_inode_follow_link(struct dentry *dentry,
- struct nameidata *nd)
+ struct inode *inode,
+ bool rcu)
{
return 0;
}
@@ -2259,8 +2261,7 @@ static inline int security_inode_setattr(struct dentry *dentry,
return 0;
}
-static inline int security_inode_getattr(struct vfsmount *mnt,
- struct dentry *dentry)
+static inline int security_inode_getattr(const struct path *path)
{
return 0;
}
@@ -2735,8 +2736,6 @@ int security_tun_dev_attach_queue(void *security);
int security_tun_dev_attach(struct sock *sk, void *security);
int security_tun_dev_open(void *security);
-void security_skb_owned_by(struct sk_buff *skb, struct sock *sk);
-
#else /* CONFIG_SECURITY_NETWORK */
static inline int security_unix_stream_connect(struct sock *sock,
struct sock *other,
@@ -2928,11 +2927,6 @@ static inline int security_tun_dev_open(void *security)
{
return 0;
}
-
-static inline void security_skb_owned_by(struct sk_buff *skb, struct sock *sk)
-{
-}
-
#endif /* CONFIG_SECURITY_NETWORK */
#ifdef CONFIG_SECURITY_NETWORK_XFRM
diff --git a/include/linux/seqlock.h b/include/linux/seqlock.h
index 5f68d0a391ce..486e685a226a 100644
--- a/include/linux/seqlock.h
+++ b/include/linux/seqlock.h
@@ -233,6 +233,47 @@ static inline void raw_write_seqcount_end(seqcount_t *s)
s->sequence++;
}
+/**
+ * raw_write_seqcount_barrier - do a seq write barrier
+ * @s: pointer to seqcount_t
+ *
+ * This can be used to provide an ordering guarantee instead of the
+ * usual consistency guarantee. It is one wmb cheaper, because we can
+ * collapse the two back-to-back wmb()s.
+ *
+ * seqcount_t seq;
+ * bool X = true, Y = false;
+ *
+ * void read(void)
+ * {
+ * bool x, y;
+ *
+ * do {
+ * int s = read_seqcount_begin(&seq);
+ *
+ * x = X; y = Y;
+ *
+ * } while (read_seqcount_retry(&seq, s));
+ *
+ * BUG_ON(!x && !y);
+ * }
+ *
+ * void write(void)
+ * {
+ * Y = true;
+ *
+ * raw_write_seqcount_barrier(seq);
+ *
+ * X = false;
+ * }
+ */
+static inline void raw_write_seqcount_barrier(seqcount_t *s)
+{
+ s->sequence++;
+ smp_wmb();
+ s->sequence++;
+}
+
/*
* raw_write_seqcount_latch - redirect readers to even/odd copy
* @s: pointer to seqcount_t
@@ -266,13 +307,13 @@ static inline void write_seqcount_end(seqcount_t *s)
}
/**
- * write_seqcount_barrier - invalidate in-progress read-side seq operations
+ * write_seqcount_invalidate - invalidate in-progress read-side seq operations
* @s: pointer to seqcount_t
*
- * After write_seqcount_barrier, no read-side seq operations will complete
+ * After write_seqcount_invalidate, no read-side seq operations will complete
* successfully and see data older than this.
*/
-static inline void write_seqcount_barrier(seqcount_t *s)
+static inline void write_seqcount_invalidate(seqcount_t *s)
{
smp_wmb();
s->sequence+=2;
diff --git a/include/linux/serial_8250.h b/include/linux/serial_8250.h
index a8efa235b7c1..78097e7a330a 100644
--- a/include/linux/serial_8250.h
+++ b/include/linux/serial_8250.h
@@ -60,6 +60,20 @@ enum {
};
struct uart_8250_dma;
+struct uart_8250_port;
+
+/**
+ * 8250 core driver operations
+ *
+ * @setup_irq() Setup irq handling. The universal 8250 driver links this
+ * port to the irq chain. Other drivers may @request_irq().
+ * @release_irq() Undo irq handling. The universal 8250 driver unlinks
+ * the port from the irq chain.
+ */
+struct uart_8250_ops {
+ int (*setup_irq)(struct uart_8250_port *);
+ void (*release_irq)(struct uart_8250_port *);
+};
/*
* This should be used by drivers which want to register
@@ -88,6 +102,8 @@ struct uart_8250_port {
unsigned char canary; /* non-zero during system sleep
* if no_console_suspend
*/
+ unsigned char probe;
+#define UART_PROBE_RSA (1 << 0)
/*
* Some bits in registers are cleared on a read, so they must
@@ -100,6 +116,7 @@ struct uart_8250_port {
unsigned char msr_saved_flags;
struct uart_8250_dma *dma;
+ const struct uart_8250_ops *ops;
/* 8250 specific callbacks */
int (*dl_read)(struct uart_8250_port *);
@@ -118,11 +135,8 @@ void serial8250_resume_port(int line);
extern int early_serial_setup(struct uart_port *port);
-extern int serial8250_find_port(struct uart_port *p);
-extern int serial8250_find_port_for_earlycon(void);
extern unsigned int serial8250_early_in(struct uart_port *port, int offset);
extern void serial8250_early_out(struct uart_port *port, int offset, int value);
-extern int setup_early_serial8250_console(char *cmdline);
extern void serial8250_do_set_termios(struct uart_port *port,
struct ktermios *termios, struct ktermios *old);
extern int serial8250_do_startup(struct uart_port *port);
diff --git a/include/linux/serial_core.h b/include/linux/serial_core.h
index d10965f0d8a4..025dad9dcde4 100644
--- a/include/linux/serial_core.h
+++ b/include/linux/serial_core.h
@@ -235,7 +235,9 @@ struct uart_port {
const struct uart_ops *ops;
unsigned int custom_divisor;
unsigned int line; /* port index */
+ unsigned int minor;
resource_size_t mapbase; /* for ioremap */
+ resource_size_t mapsize;
struct device *dev; /* parent device */
unsigned char hub6; /* this should be in the 8250 driver */
unsigned char suspended;
@@ -336,24 +338,29 @@ struct earlycon_device {
char options[16]; /* e.g., 115200n8 */
unsigned int baud;
};
-int setup_earlycon(char *buf, const char *match,
- int (*setup)(struct earlycon_device *, const char *));
+struct earlycon_id {
+ char name[16];
+ int (*setup)(struct earlycon_device *, const char *options);
+} __aligned(32);
+
+extern int setup_earlycon(char *buf);
extern int of_setup_earlycon(unsigned long addr,
int (*setup)(struct earlycon_device *, const char *));
-#define EARLYCON_DECLARE(name, func) \
-static int __init name ## _setup_earlycon(char *buf) \
-{ \
- return setup_earlycon(buf, __stringify(name), func); \
-} \
-early_param("earlycon", name ## _setup_earlycon);
+#define EARLYCON_DECLARE(_name, func) \
+ static const struct earlycon_id __earlycon_##_name \
+ __used __section(__earlycon_table) \
+ = { .name = __stringify(_name), \
+ .setup = func }
#define OF_EARLYCON_DECLARE(name, compat, fn) \
_OF_DECLARE(earlycon, name, compat, fn, void *)
struct uart_port *uart_get_console(struct uart_port *ports, int nr,
struct console *c);
+int uart_parse_earlycon(char *p, unsigned char *iotype, unsigned long *addr,
+ char **options);
void uart_parse_options(char *options, int *baud, int *parity, int *bits,
int *flow);
int uart_set_options(struct uart_port *port, struct console *co, int baud,
diff --git a/include/linux/serial_mfd.h b/include/linux/serial_mfd.h
deleted file mode 100644
index 2b071e0b034d..000000000000
--- a/include/linux/serial_mfd.h
+++ /dev/null
@@ -1,47 +0,0 @@
-#ifndef _SERIAL_MFD_H_
-#define _SERIAL_MFD_H_
-
-/* HW register offset definition */
-#define UART_FOR 0x08
-#define UART_PS 0x0C
-#define UART_MUL 0x0D
-#define UART_DIV 0x0E
-
-#define HSU_GBL_IEN 0x0
-#define HSU_GBL_IST 0x4
-
-#define HSU_GBL_INT_BIT_PORT0 0x0
-#define HSU_GBL_INT_BIT_PORT1 0x1
-#define HSU_GBL_INT_BIT_PORT2 0x2
-#define HSU_GBL_INT_BIT_IRI 0x3
-#define HSU_GBL_INT_BIT_HDLC 0x4
-#define HSU_GBL_INT_BIT_DMA 0x5
-
-#define HSU_GBL_ISR 0x8
-#define HSU_GBL_DMASR 0x400
-#define HSU_GBL_DMAISR 0x404
-
-#define HSU_PORT_REG_OFFSET 0x80
-#define HSU_PORT0_REG_OFFSET 0x80
-#define HSU_PORT1_REG_OFFSET 0x100
-#define HSU_PORT2_REG_OFFSET 0x180
-#define HSU_PORT_REG_LENGTH 0x80
-
-#define HSU_DMA_CHANS_REG_OFFSET 0x500
-#define HSU_DMA_CHANS_REG_LENGTH 0x40
-
-#define HSU_CH_SR 0x0 /* channel status reg */
-#define HSU_CH_CR 0x4 /* control reg */
-#define HSU_CH_DCR 0x8 /* descriptor control reg */
-#define HSU_CH_BSR 0x10 /* max fifo buffer size reg */
-#define HSU_CH_MOTSR 0x14 /* minimum ocp transfer size */
-#define HSU_CH_D0SAR 0x20 /* desc 0 start addr */
-#define HSU_CH_D0TSR 0x24 /* desc 0 transfer size */
-#define HSU_CH_D1SAR 0x28
-#define HSU_CH_D1TSR 0x2C
-#define HSU_CH_D2SAR 0x30
-#define HSU_CH_D2TSR 0x34
-#define HSU_CH_D3SAR 0x38
-#define HSU_CH_D3TSR 0x3C
-
-#endif
diff --git a/include/linux/shdma-base.h b/include/linux/shdma-base.h
index abdf1f229dc3..dd0ba502ccb3 100644
--- a/include/linux/shdma-base.h
+++ b/include/linux/shdma-base.h
@@ -69,6 +69,7 @@ struct shdma_chan {
int id; /* Raw id of this channel */
int irq; /* Channel IRQ */
int slave_id; /* Client ID for slave DMA */
+ int real_slave_id; /* argument passed to filter function */
int hw_req; /* DMA request line for slave DMA - same
* as MID/RID, used with DT */
enum shdma_pm_state pm_state;
diff --git a/include/linux/skbuff.h b/include/linux/skbuff.h
index f54d6659713a..d6cdd6e87d53 100644
--- a/include/linux/skbuff.h
+++ b/include/linux/skbuff.h
@@ -34,7 +34,9 @@
#include <linux/dma-mapping.h>
#include <linux/netdev_features.h>
#include <linux/sched.h>
-#include <net/flow_keys.h>
+#include <net/flow_dissector.h>
+#include <linux/splice.h>
+#include <linux/in6.h>
/* A. Checksumming of received packets by device.
*
@@ -166,10 +168,23 @@ struct nf_conntrack {
#if IS_ENABLED(CONFIG_BRIDGE_NETFILTER)
struct nf_bridge_info {
atomic_t use;
+ enum {
+ BRNF_PROTO_UNCHANGED,
+ BRNF_PROTO_8021Q,
+ BRNF_PROTO_PPPOE
+ } orig_proto:8;
+ bool pkt_otherhost;
+ __u16 frag_max_size;
unsigned int mask;
struct net_device *physindev;
- struct net_device *physoutdev;
- unsigned long data[32 / sizeof(unsigned long)];
+ union {
+ struct net_device *physoutdev;
+ char neigh_header[8];
+ };
+ union {
+ __be32 ipv4_daddr;
+ struct in6_addr ipv6_daddr;
+ };
};
#endif
@@ -492,7 +507,6 @@ static inline u32 skb_mstamp_us_delta(const struct skb_mstamp *t1,
* @napi_id: id of the NAPI struct this skb came from
* @secmark: security marking
* @mark: Generic packet mark
- * @dropcount: total number of sk_receive_queue overflows
* @vlan_proto: vlan encapsulation protocol
* @vlan_tci: vlan tag control information
* @inner_protocol: Protocol (encapsulation)
@@ -641,7 +655,6 @@ struct sk_buff {
#endif
union {
__u32 mark;
- __u32 dropcount;
__u32 reserved_tailroom;
};
@@ -769,6 +782,7 @@ bool skb_try_coalesce(struct sk_buff *to, struct sk_buff *from,
struct sk_buff *__alloc_skb(unsigned int size, gfp_t priority, int flags,
int node);
+struct sk_buff *__build_skb(void *data, unsigned int frag_size);
struct sk_buff *build_skb(void *data, unsigned int frag_size);
static inline struct sk_buff *alloc_skb(unsigned int size,
gfp_t priority)
@@ -853,6 +867,9 @@ int skb_append_datato_frags(struct sock *sk, struct sk_buff *skb,
int len, int odd, struct sk_buff *skb),
void *from, int length);
+int skb_append_pagefrags(struct sk_buff *skb, struct page *page,
+ int offset, size_t size);
+
struct skb_seq_state {
__u32 lower_offset;
__u32 upper_offset;
@@ -870,8 +887,7 @@ unsigned int skb_seq_read(unsigned int consumed, const u8 **data,
void skb_abort_seq_read(struct skb_seq_state *st);
unsigned int skb_find_text(struct sk_buff *skb, unsigned int from,
- unsigned int to, struct ts_config *config,
- struct ts_state *state);
+ unsigned int to, struct ts_config *config);
/*
* Packet hash types specify the type of hash in skb_set_hash.
@@ -914,7 +930,6 @@ skb_set_hash(struct sk_buff *skb, __u32 hash, enum pkt_hash_types type)
skb->hash = hash;
}
-void __skb_get_hash(struct sk_buff *skb);
static inline __u32 skb_get_hash(struct sk_buff *skb)
{
if (!skb->l4_hash && !skb->sw_hash)
@@ -923,6 +938,8 @@ static inline __u32 skb_get_hash(struct sk_buff *skb)
return skb->hash;
}
+__u32 skb_get_hash_perturb(const struct sk_buff *skb, u32 perturb);
+
static inline __u32 skb_get_hash_raw(const struct sk_buff *skb)
{
return skb->hash;
@@ -1930,8 +1947,8 @@ static inline void skb_probe_transport_header(struct sk_buff *skb,
if (skb_transport_header_was_set(skb))
return;
- else if (skb_flow_dissect(skb, &keys))
- skb_set_transport_header(skb, keys.thoff);
+ else if (skb_flow_dissect_flow_keys(skb, &keys))
+ skb_set_transport_header(skb, keys.control.thoff);
else
skb_set_transport_header(skb, offset_hint);
}
@@ -2122,10 +2139,6 @@ static inline void __skb_queue_purge(struct sk_buff_head *list)
kfree_skb(skb);
}
-#define NETDEV_FRAG_PAGE_MAX_ORDER get_order(32768)
-#define NETDEV_FRAG_PAGE_MAX_SIZE (PAGE_SIZE << NETDEV_FRAG_PAGE_MAX_ORDER)
-#define NETDEV_PAGECNT_MAX_BIAS NETDEV_FRAG_PAGE_MAX_SIZE
-
void *netdev_alloc_frag(unsigned int fragsz);
struct sk_buff *__netdev_alloc_skb(struct net_device *dev, unsigned int length,
@@ -2180,6 +2193,11 @@ static inline struct sk_buff *netdev_alloc_skb_ip_align(struct net_device *dev,
return __netdev_alloc_skb_ip_align(dev, length, GFP_ATOMIC);
}
+static inline void skb_free_frag(void *addr)
+{
+ __free_page_frag(addr);
+}
+
void *napi_alloc_frag(unsigned int fragsz);
struct sk_buff *__napi_alloc_skb(struct napi_struct *napi,
unsigned int length, gfp_t gfp_mask);
@@ -2687,9 +2705,15 @@ int skb_copy_bits(const struct sk_buff *skb, int offset, void *to, int len);
int skb_store_bits(struct sk_buff *skb, int offset, const void *from, int len);
__wsum skb_copy_and_csum_bits(const struct sk_buff *skb, int offset, u8 *to,
int len, __wsum csum);
-int skb_splice_bits(struct sk_buff *skb, unsigned int offset,
+ssize_t skb_socket_splice(struct sock *sk,
+ struct pipe_inode_info *pipe,
+ struct splice_pipe_desc *spd);
+int skb_splice_bits(struct sk_buff *skb, struct sock *sk, unsigned int offset,
struct pipe_inode_info *pipe, unsigned int len,
- unsigned int flags);
+ unsigned int flags,
+ ssize_t (*splice_cb)(struct sock *,
+ struct pipe_inode_info *,
+ struct splice_pipe_desc *));
void skb_copy_and_csum_dev(const struct sk_buff *skb, u8 *to);
unsigned int skb_zerocopy_headlen(const struct sk_buff *from);
int skb_zerocopy(struct sk_buff *to, struct sk_buff *from,
@@ -2724,8 +2748,9 @@ __wsum __skb_checksum(const struct sk_buff *skb, int offset, int len,
__wsum skb_checksum(const struct sk_buff *skb, int offset, int len,
__wsum csum);
-static inline void *__skb_header_pointer(const struct sk_buff *skb, int offset,
- int len, void *data, int hlen, void *buffer)
+static inline void * __must_check
+__skb_header_pointer(const struct sk_buff *skb, int offset,
+ int len, void *data, int hlen, void *buffer)
{
if (hlen - offset >= len)
return data + offset;
@@ -2737,8 +2762,8 @@ static inline void *__skb_header_pointer(const struct sk_buff *skb, int offset,
return buffer;
}
-static inline void *skb_header_pointer(const struct sk_buff *skb, int offset,
- int len, void *buffer)
+static inline void * __must_check
+skb_header_pointer(const struct sk_buff *skb, int offset, int len, void *buffer)
{
return __skb_header_pointer(skb, offset, len, skb->data,
skb_headlen(skb), buffer);
@@ -3013,6 +3038,18 @@ static inline bool __skb_checksum_validate_needed(struct sk_buff *skb,
*/
#define CHECKSUM_BREAK 76
+/* Unset checksum-complete
+ *
+ * Unset checksum complete can be done when packet is being modified
+ * (uncompressed for instance) and checksum-complete value is
+ * invalidated.
+ */
+static inline void skb_checksum_complete_unset(struct sk_buff *skb)
+{
+ if (skb->ip_summed == CHECKSUM_COMPLETE)
+ skb->ip_summed = CHECKSUM_NONE;
+}
+
/* Validate (init) checksum based on checksum complete.
*
* Return values:
@@ -3033,7 +3070,7 @@ static inline __sum16 __skb_checksum_validate_complete(struct sk_buff *skb,
}
} else if (skb->csum_bad) {
/* ip_summed == CHECKSUM_NONE in this case */
- return 1;
+ return (__force __sum16)1;
}
skb->csum = psum;
@@ -3281,9 +3318,6 @@ static inline bool skb_rx_queue_recorded(const struct sk_buff *skb)
return skb->queue_mapping != 0;
}
-u16 __skb_tx_hash(const struct net_device *dev, struct sk_buff *skb,
- unsigned int num_tx_queues);
-
static inline struct sec_path *skb_sec_path(struct sk_buff *skb)
{
#ifdef CONFIG_XFRM
@@ -3338,15 +3372,14 @@ static inline int gso_pskb_expand_head(struct sk_buff *skb, int extra)
static inline __sum16 gso_make_checksum(struct sk_buff *skb, __wsum res)
{
int plen = SKB_GSO_CB(skb)->csum_start - skb_headroom(skb) -
- skb_transport_offset(skb);
- __u16 csum;
+ skb_transport_offset(skb);
+ __wsum partial;
- csum = csum_fold(csum_partial(skb_transport_header(skb),
- plen, skb->csum));
+ partial = csum_partial(skb_transport_header(skb), plen, skb->csum);
skb->csum = res;
SKB_GSO_CB(skb)->csum_start -= plen;
- return csum;
+ return csum_fold(partial);
}
static inline bool skb_is_gso(const struct sk_buff *skb)
@@ -3401,10 +3434,9 @@ static inline void skb_checksum_none_assert(const struct sk_buff *skb)
bool skb_partial_csum_set(struct sk_buff *skb, u16 start, u16 off);
int skb_checksum_setup(struct sk_buff *skb, bool recalculate);
-
-u32 skb_get_poff(const struct sk_buff *skb);
-u32 __skb_get_poff(const struct sk_buff *skb, void *data,
- const struct flow_keys *keys, int hlen);
+struct sk_buff *skb_checksum_trimmed(struct sk_buff *skb,
+ unsigned int transport_len,
+ __sum16(*skb_chkf)(struct sk_buff *skb));
/**
* skb_head_is_locked - Determine if the skb->head is locked down
diff --git a/include/linux/slab.h b/include/linux/slab.h
index 76f1feeabd38..9de2fdc8b5e4 100644
--- a/include/linux/slab.h
+++ b/include/linux/slab.h
@@ -18,7 +18,7 @@
/*
* Flags to pass to kmem_cache_create().
- * The ones marked DEBUG are only valid if CONFIG_SLAB_DEBUG is set.
+ * The ones marked DEBUG are only valid if CONFIG_DEBUG_SLAB is set.
*/
#define SLAB_DEBUG_FREE 0x00000100UL /* DEBUG: Perform (expensive) checks on free */
#define SLAB_RED_ZONE 0x00000400UL /* DEBUG: Red zone objs in a cache */
@@ -153,8 +153,30 @@ size_t ksize(const void *);
#define ARCH_KMALLOC_MINALIGN ARCH_DMA_MINALIGN
#define KMALLOC_MIN_SIZE ARCH_DMA_MINALIGN
#define KMALLOC_SHIFT_LOW ilog2(ARCH_DMA_MINALIGN)
+/*
+ * The KMALLOC_LOOP_LOW is the definition for the for loop index start number
+ * to create the kmalloc_caches object in create_kmalloc_caches(). The first
+ * and the second are 96 and 192. You can see that in the kmalloc_index(), if
+ * the KMALLOC_MIN_SIZE <= 32, then return 1 (96). If KMALLOC_MIN_SIZE <= 64,
+ * then return 2 (192). If the KMALLOC_MIN_SIZE is bigger than 64, we don't
+ * need to initialize 96 and 192. Go directly to start the KMALLOC_SHIFT_LOW.
+ */
+#if KMALLOC_MIN_SIZE <= 32
+#define KMALLOC_LOOP_LOW 1
+#elif KMALLOC_MIN_SIZE <= 64
+#define KMALLOC_LOOP_LOW 2
+#else
+#define KMALLOC_LOOP_LOW KMALLOC_SHIFT_LOW
+#endif
+
#else
#define ARCH_KMALLOC_MINALIGN __alignof__(unsigned long long)
+/*
+ * The KMALLOC_MIN_SIZE of slub/slab/slob is 2^3/2^5/2^3. So, even slab is used.
+ * The KMALLOC_MIN_SIZE <= 32. The kmalloc-96 and kmalloc-192 should also be
+ * initialized.
+ */
+#define KMALLOC_LOOP_LOW 1
#endif
/*
@@ -240,8 +262,8 @@ extern struct kmem_cache *kmalloc_dma_caches[KMALLOC_SHIFT_HIGH + 1];
* belongs to.
* 0 = zero alloc
* 1 = 65 .. 96 bytes
- * 2 = 120 .. 192 bytes
- * n = 2^(n-1) .. 2^n -1
+ * 2 = 129 .. 192 bytes
+ * n = 2^(n-1)+1 .. 2^n
*/
static __always_inline int kmalloc_index(size_t size)
{
diff --git a/include/linux/smp.h b/include/linux/smp.h
index be91db2a7017..c4414074bd88 100644
--- a/include/linux/smp.h
+++ b/include/linux/smp.h
@@ -18,7 +18,7 @@ struct call_single_data {
struct llist_node llist;
smp_call_func_t func;
void *info;
- u16 flags;
+ unsigned int flags;
};
/* total number of cpus in this system (may exceed NR_CPUS) */
diff --git a/include/linux/smpboot.h b/include/linux/smpboot.h
index 13e929679550..da3c593f9845 100644
--- a/include/linux/smpboot.h
+++ b/include/linux/smpboot.h
@@ -27,6 +27,8 @@ struct smpboot_thread_data;
* @pre_unpark: Optional unpark function, called before the thread is
* unparked (cpu online). This is not guaranteed to be
* called on the target cpu of the thread. Careful!
+ * @cpumask: Internal state. To update which threads are unparked,
+ * call smpboot_update_cpumask_percpu_thread().
* @selfparking: Thread is not parked by the park function.
* @thread_comm: The base name of the thread
*/
@@ -41,12 +43,14 @@ struct smp_hotplug_thread {
void (*park)(unsigned int cpu);
void (*unpark)(unsigned int cpu);
void (*pre_unpark)(unsigned int cpu);
+ cpumask_var_t cpumask;
bool selfparking;
const char *thread_comm;
};
int smpboot_register_percpu_thread(struct smp_hotplug_thread *plug_thread);
void smpboot_unregister_percpu_thread(struct smp_hotplug_thread *plug_thread);
-int smpboot_thread_schedule(void);
+int smpboot_update_cpumask_percpu_thread(struct smp_hotplug_thread *plug_thread,
+ const struct cpumask *);
#endif
diff --git a/include/linux/sock_diag.h b/include/linux/sock_diag.h
index 46cca4c06848..fddebc617469 100644
--- a/include/linux/sock_diag.h
+++ b/include/linux/sock_diag.h
@@ -1,7 +1,10 @@
#ifndef __SOCK_DIAG_H__
#define __SOCK_DIAG_H__
+#include <linux/netlink.h>
#include <linux/user_namespace.h>
+#include <net/net_namespace.h>
+#include <net/sock.h>
#include <uapi/linux/sock_diag.h>
struct sk_buff;
@@ -11,6 +14,7 @@ struct sock;
struct sock_diag_handler {
__u8 family;
int (*dump)(struct sk_buff *skb, struct nlmsghdr *nlh);
+ int (*get_info)(struct sk_buff *skb, struct sock *sk);
};
int sock_diag_register(const struct sock_diag_handler *h);
@@ -19,11 +23,49 @@ void sock_diag_unregister(const struct sock_diag_handler *h);
void sock_diag_register_inet_compat(int (*fn)(struct sk_buff *skb, struct nlmsghdr *nlh));
void sock_diag_unregister_inet_compat(int (*fn)(struct sk_buff *skb, struct nlmsghdr *nlh));
-int sock_diag_check_cookie(void *sk, __u32 *cookie);
-void sock_diag_save_cookie(void *sk, __u32 *cookie);
+int sock_diag_check_cookie(struct sock *sk, const __u32 *cookie);
+void sock_diag_save_cookie(struct sock *sk, __u32 *cookie);
int sock_diag_put_meminfo(struct sock *sk, struct sk_buff *skb, int attr);
int sock_diag_put_filterinfo(bool may_report_filterinfo, struct sock *sk,
struct sk_buff *skb, int attrtype);
+static inline
+enum sknetlink_groups sock_diag_destroy_group(const struct sock *sk)
+{
+ switch (sk->sk_family) {
+ case AF_INET:
+ switch (sk->sk_protocol) {
+ case IPPROTO_TCP:
+ return SKNLGRP_INET_TCP_DESTROY;
+ case IPPROTO_UDP:
+ return SKNLGRP_INET_UDP_DESTROY;
+ default:
+ return SKNLGRP_NONE;
+ }
+ case AF_INET6:
+ switch (sk->sk_protocol) {
+ case IPPROTO_TCP:
+ return SKNLGRP_INET6_TCP_DESTROY;
+ case IPPROTO_UDP:
+ return SKNLGRP_INET6_UDP_DESTROY;
+ default:
+ return SKNLGRP_NONE;
+ }
+ default:
+ return SKNLGRP_NONE;
+ }
+}
+
+static inline
+bool sock_diag_has_destroy_listeners(const struct sock *sk)
+{
+ const struct net *n = sock_net(sk);
+ const enum sknetlink_groups group = sock_diag_destroy_group(sk);
+
+ return group != SKNLGRP_NONE && n->diag_nlsk &&
+ netlink_has_listeners(n->diag_nlsk, group);
+}
+void sock_diag_broadcast_destroy(struct sock *sk);
+
#endif
diff --git a/include/linux/socket.h b/include/linux/socket.h
index 5c19cba34dce..5bf59c8493b7 100644
--- a/include/linux/socket.h
+++ b/include/linux/socket.h
@@ -51,6 +51,7 @@ struct msghdr {
void *msg_control; /* ancillary data */
__kernel_size_t msg_controllen; /* ancillary data buffer length */
unsigned int msg_flags; /* flags on received message */
+ struct kiocb *msg_iocb; /* ptr to iocb for async requests */
};
struct user_msghdr {
@@ -138,6 +139,11 @@ static inline struct cmsghdr * cmsg_nxthdr (struct msghdr *__msg, struct cmsghdr
return __cmsg_nxthdr(__msg->msg_control, __msg->msg_controllen, __cmsg);
}
+static inline size_t msg_data_left(struct msghdr *msg)
+{
+ return iov_iter_count(&msg->msg_iter);
+}
+
/* "Socket"-level control message types: */
#define SCM_RIGHTS 0x01 /* rw: access rights (array of int) */
@@ -181,6 +187,7 @@ struct ucred {
#define AF_WANPIPE 25 /* Wanpipe API Sockets */
#define AF_LLC 26 /* Linux LLC */
#define AF_IB 27 /* Native InfiniBand address */
+#define AF_MPLS 28 /* MPLS */
#define AF_CAN 29 /* Controller Area Network */
#define AF_TIPC 30 /* TIPC sockets */
#define AF_BLUETOOTH 31 /* Bluetooth sockets */
@@ -226,6 +233,7 @@ struct ucred {
#define PF_WANPIPE AF_WANPIPE
#define PF_LLC AF_LLC
#define PF_IB AF_IB
+#define PF_MPLS AF_MPLS
#define PF_CAN AF_CAN
#define PF_TIPC AF_TIPC
#define PF_BLUETOOTH AF_BLUETOOTH
diff --git a/include/linux/spi/at86rf230.h b/include/linux/spi/at86rf230.h
index cd519a11c2c6..b63fe6f5fdc8 100644
--- a/include/linux/spi/at86rf230.h
+++ b/include/linux/spi/at86rf230.h
@@ -22,6 +22,7 @@ struct at86rf230_platform_data {
int rstn;
int slp_tr;
int dig2;
+ u8 xtal_trim;
};
#endif
diff --git a/include/linux/spinlock.h b/include/linux/spinlock.h
index 3e18379dfa6f..0063b24b4f36 100644
--- a/include/linux/spinlock.h
+++ b/include/linux/spinlock.h
@@ -120,7 +120,7 @@ do { \
/*
* Despite its name it doesn't necessarily has to be a full barrier.
* It should only guarantee that a STORE before the critical section
- * can not be reordered with a LOAD inside this section.
+ * can not be reordered with LOADs and STOREs inside this section.
* spin_lock() is the one-way barrier, this LOAD can not escape out
* of the region. So the default implementation simply ensures that
* a STORE can not move into the critical section, smp_wmb() should
diff --git a/include/linux/srcu.h b/include/linux/srcu.h
index 9cfd9623fb03..bdeb4567b71e 100644
--- a/include/linux/srcu.h
+++ b/include/linux/srcu.h
@@ -182,7 +182,7 @@ static inline int srcu_read_lock_held(struct srcu_struct *sp)
* lockdep_is_held() calls.
*/
#define srcu_dereference_check(p, sp, c) \
- __rcu_dereference_check((p), srcu_read_lock_held(sp) || (c), __rcu)
+ __rcu_dereference_check((p), (c) || srcu_read_lock_held(sp), __rcu)
/**
* srcu_dereference - fetch SRCU-protected pointer for later dereferencing
diff --git a/include/linux/stacktrace.h b/include/linux/stacktrace.h
index 669045ab73f3..0a34489a46b6 100644
--- a/include/linux/stacktrace.h
+++ b/include/linux/stacktrace.h
@@ -7,8 +7,6 @@ struct task_struct;
struct pt_regs;
#ifdef CONFIG_STACKTRACE
-struct task_struct;
-
struct stack_trace {
unsigned int nr_entries, max_entries;
unsigned long *entries;
diff --git a/include/linux/stmmac.h b/include/linux/stmmac.h
index cd63851b57f2..c735f5c91eea 100644
--- a/include/linux/stmmac.h
+++ b/include/linux/stmmac.h
@@ -99,6 +99,7 @@ struct plat_stmmacenet_data {
int phy_addr;
int interface;
struct stmmac_mdio_bus_data *mdio_bus_data;
+ struct device_node *phy_node;
struct stmmac_dma_cfg *dma_cfg;
int clk_csr;
int has_gmac;
@@ -114,6 +115,8 @@ struct plat_stmmacenet_data {
int maxmtu;
int multicast_filter_bins;
int unicast_filter_entries;
+ int tx_fifo_size;
+ int rx_fifo_size;
void (*fix_mac_speed)(void *priv, unsigned int speed);
void (*bus_setup)(void __iomem *ioaddr);
void *(*setup)(struct platform_device *pdev);
diff --git a/include/linux/string_helpers.h b/include/linux/string_helpers.h
index 657571817260..71f711db4500 100644
--- a/include/linux/string_helpers.h
+++ b/include/linux/string_helpers.h
@@ -10,7 +10,7 @@ enum string_size_units {
STRING_UNITS_2, /* use binary powers of 2^10 */
};
-void string_get_size(u64 size, enum string_size_units units,
+void string_get_size(u64 size, u64 blk_size, enum string_size_units units,
char *buf, int len);
#define UNESCAPE_SPACE 0x01
@@ -47,22 +47,22 @@ static inline int string_unescape_any_inplace(char *buf)
#define ESCAPE_ANY_NP (ESCAPE_ANY | ESCAPE_NP)
#define ESCAPE_HEX 0x20
-int string_escape_mem(const char *src, size_t isz, char **dst, size_t osz,
+int string_escape_mem(const char *src, size_t isz, char *dst, size_t osz,
unsigned int flags, const char *esc);
static inline int string_escape_mem_any_np(const char *src, size_t isz,
- char **dst, size_t osz, const char *esc)
+ char *dst, size_t osz, const char *esc)
{
return string_escape_mem(src, isz, dst, osz, ESCAPE_ANY_NP, esc);
}
-static inline int string_escape_str(const char *src, char **dst, size_t sz,
+static inline int string_escape_str(const char *src, char *dst, size_t sz,
unsigned int flags, const char *esc)
{
return string_escape_mem(src, strlen(src), dst, sz, flags, esc);
}
-static inline int string_escape_str_any_np(const char *src, char **dst,
+static inline int string_escape_str_any_np(const char *src, char *dst,
size_t sz, const char *esc)
{
return string_escape_str(src, dst, sz, ESCAPE_ANY_NP, esc);
diff --git a/include/linux/sunrpc/msg_prot.h b/include/linux/sunrpc/msg_prot.h
index aadc6a04e1ac..807371357160 100644
--- a/include/linux/sunrpc/msg_prot.h
+++ b/include/linux/sunrpc/msg_prot.h
@@ -142,12 +142,18 @@ typedef __be32 rpc_fraghdr;
(RPC_REPHDRSIZE + (2 + RPC_MAX_AUTH_SIZE/4))
/*
- * RFC1833/RFC3530 rpcbind (v3+) well-known netid's.
+ * Well-known netids. See:
+ *
+ * http://www.iana.org/assignments/rpc-netids/rpc-netids.xhtml
*/
#define RPCBIND_NETID_UDP "udp"
#define RPCBIND_NETID_TCP "tcp"
+#define RPCBIND_NETID_RDMA "rdma"
+#define RPCBIND_NETID_SCTP "sctp"
#define RPCBIND_NETID_UDP6 "udp6"
#define RPCBIND_NETID_TCP6 "tcp6"
+#define RPCBIND_NETID_RDMA6 "rdma6"
+#define RPCBIND_NETID_SCTP6 "sctp6"
#define RPCBIND_NETID_LOCAL "local"
/*
diff --git a/include/linux/sunrpc/xprtrdma.h b/include/linux/sunrpc/xprtrdma.h
index 64a0a0a97b23..c984c85981ea 100644
--- a/include/linux/sunrpc/xprtrdma.h
+++ b/include/linux/sunrpc/xprtrdma.h
@@ -41,11 +41,6 @@
#define _LINUX_SUNRPC_XPRTRDMA_H
/*
- * rpcbind (v3+) RDMA netid.
- */
-#define RPCBIND_NETID_RDMA "rdma"
-
-/*
* Constants. Max RPC/NFS header is big enough to account for
* additional marshaling buffers passed down by Linux client.
*
diff --git a/include/linux/sw842.h b/include/linux/sw842.h
new file mode 100644
index 000000000000..109ba041c2ae
--- /dev/null
+++ b/include/linux/sw842.h
@@ -0,0 +1,12 @@
+#ifndef __SW842_H__
+#define __SW842_H__
+
+#define SW842_MEM_COMPRESS (0xf000)
+
+int sw842_compress(const u8 *src, unsigned int srclen,
+ u8 *dst, unsigned int *destlen, void *wmem);
+
+int sw842_decompress(const u8 *src, unsigned int srclen,
+ u8 *dst, unsigned int *destlen);
+
+#endif
diff --git a/include/linux/swap.h b/include/linux/swap.h
index 7067eca501e2..38874729dc5f 100644
--- a/include/linux/swap.h
+++ b/include/linux/swap.h
@@ -307,7 +307,7 @@ extern void lru_add_drain(void);
extern void lru_add_drain_cpu(int cpu);
extern void lru_add_drain_all(void);
extern void rotate_reclaimable_page(struct page *page);
-extern void deactivate_page(struct page *page);
+extern void deactivate_file_page(struct page *page);
extern void swap_setup(void);
extern void add_page_to_unevictable_list(struct page *page);
@@ -377,7 +377,6 @@ extern void end_swap_bio_write(struct bio *bio, int err);
extern int __swap_writepage(struct page *page, struct writeback_control *wbc,
void (*end_write_func)(struct bio *, int));
extern int swap_set_page_dirty(struct page *page);
-extern void end_swap_bio_read(struct bio *bio, int err);
int add_swap_extent(struct swap_info_struct *sis, unsigned long start_page,
unsigned long nr_pages, sector_t start_block);
diff --git a/include/linux/sysctl.h b/include/linux/sysctl.h
index b7361f831226..795d5fea5697 100644
--- a/include/linux/sysctl.h
+++ b/include/linux/sysctl.h
@@ -212,4 +212,7 @@ static inline void setup_sysctl_set(struct ctl_table_set *p,
#endif /* CONFIG_SYSCTL */
+int sysctl_max_threads(struct ctl_table *table, int write,
+ void __user *buffer, size_t *lenp, loff_t *ppos);
+
#endif /* _LINUX_SYSCTL_H */
diff --git a/include/linux/tcp.h b/include/linux/tcp.h
index 1a7adb411647..48c3696e8645 100644
--- a/include/linux/tcp.h
+++ b/include/linux/tcp.h
@@ -58,6 +58,7 @@ static inline unsigned int tcp_optlen(const struct sk_buff *skb)
struct tcp_fastopen_cookie {
s8 len;
u8 val[TCP_FASTOPEN_COOKIE_MAX];
+ bool exp; /* In RFC6994 experimental option format */
};
/* This defines a selective acknowledgement block. */
@@ -111,7 +112,7 @@ struct tcp_request_sock_ops;
struct tcp_request_sock {
struct inet_request_sock req;
const struct tcp_request_sock_ops *af_specific;
- struct sock *listener; /* needed for TFO */
+ bool tfo_listener;
u32 rcv_isn;
u32 snt_isn;
u32 snt_synack; /* synack sent time */
@@ -144,10 +145,25 @@ struct tcp_sock {
* read the code and the spec side by side (and laugh ...)
* See RFC793 and RFC1122. The RFC writes these in capitals.
*/
+ u64 bytes_received; /* RFC4898 tcpEStatsAppHCThruOctetsReceived
+ * sum(delta(rcv_nxt)), or how many bytes
+ * were acked.
+ */
+ u32 segs_in; /* RFC4898 tcpEStatsPerfSegsIn
+ * total number of segments in.
+ */
u32 rcv_nxt; /* What we want to receive next */
u32 copied_seq; /* Head of yet unread data */
u32 rcv_wup; /* rcv_nxt on last window update sent */
u32 snd_nxt; /* Next sequence we send */
+ u32 segs_out; /* RFC4898 tcpEStatsPerfSegsOut
+ * The total number of segments sent.
+ */
+ u64 bytes_acked; /* RFC4898 tcpEStatsAppHCThruOctetsAcked
+ * sum(delta(snd_una)), or how many bytes
+ * were acked.
+ */
+ struct u64_stats_sync syncp; /* protects 64bit vars (cf tcp_get_info()) */
u32 snd_una; /* First byte we want an ack for */
u32 snd_sml; /* Last byte of the most recently transmitted small packet */
@@ -188,7 +204,9 @@ struct tcp_sock {
u8 do_early_retrans:1,/* Enable RFC5827 early-retransmit */
syn_data:1, /* SYN includes data */
syn_fastopen:1, /* SYN includes Fast Open option */
+ syn_fastopen_exp:1,/* SYN includes Fast Open exp. option */
syn_data_acked:1,/* data in SYN is acked by SYN-ACK */
+ save_syn:1, /* Save headers of SYN packet */
is_cwnd_limited:1;/* forward progress limited by snd_cwnd? */
u32 tlp_high_seq; /* snd_nxt at the time of TLP retransmit. */
@@ -236,7 +254,6 @@ struct tcp_sock {
u32 lost_out; /* Lost packets */
u32 sacked_out; /* SACK'd packets */
u32 fackets_out; /* FACK'd packets */
- u32 tso_deferred;
/* from STCP, retrans queue hinting */
struct sk_buff* lost_skb_hint;
@@ -317,6 +334,7 @@ struct tcp_sock {
* socket. Used to retransmit SYNACKs etc.
*/
struct request_sock *fastopen_rsk;
+ u32 *saved_syn;
};
enum tsq_flags {
@@ -384,4 +402,10 @@ static inline int fastopen_init_queue(struct sock *sk, int backlog)
return 0;
}
+static inline void tcp_saved_syn_free(struct tcp_sock *tp)
+{
+ kfree(tp->saved_syn);
+ tp->saved_syn = NULL;
+}
+
#endif /* _LINUX_TCP_H */
diff --git a/include/linux/tick.h b/include/linux/tick.h
index f8492da57ad3..3741ba1a652c 100644
--- a/include/linux/tick.h
+++ b/include/linux/tick.h
@@ -13,8 +13,6 @@
#ifdef CONFIG_GENERIC_CLOCKEVENTS
extern void __init tick_init(void);
-extern void tick_freeze(void);
-extern void tick_unfreeze(void);
/* Should be core only, but ARM BL switcher requires it */
extern void tick_suspend_local(void);
/* Should be core only, but XEN resume magic and ARM BL switcher require it */
@@ -23,14 +21,20 @@ extern void tick_handover_do_timer(void);
extern void tick_cleanup_dead_cpu(int cpu);
#else /* CONFIG_GENERIC_CLOCKEVENTS */
static inline void tick_init(void) { }
-static inline void tick_freeze(void) { }
-static inline void tick_unfreeze(void) { }
static inline void tick_suspend_local(void) { }
static inline void tick_resume_local(void) { }
static inline void tick_handover_do_timer(void) { }
static inline void tick_cleanup_dead_cpu(int cpu) { }
#endif /* !CONFIG_GENERIC_CLOCKEVENTS */
+#if defined(CONFIG_GENERIC_CLOCKEVENTS) && defined(CONFIG_SUSPEND)
+extern void tick_freeze(void);
+extern void tick_unfreeze(void);
+#else
+static inline void tick_freeze(void) { }
+static inline void tick_unfreeze(void) { }
+#endif
+
#ifdef CONFIG_TICK_ONESHOT
extern void tick_irq_enter(void);
# ifndef arch_needs_cpu
@@ -134,6 +138,12 @@ static inline bool tick_nohz_full_cpu(int cpu)
return cpumask_test_cpu(cpu, tick_nohz_full_mask);
}
+static inline void tick_nohz_full_add_cpus_to(struct cpumask *mask)
+{
+ if (tick_nohz_full_enabled())
+ cpumask_or(mask, mask, tick_nohz_full_mask);
+}
+
extern void __tick_nohz_full_check(void);
extern void tick_nohz_full_kick(void);
extern void tick_nohz_full_kick_cpu(int cpu);
@@ -142,6 +152,7 @@ extern void __tick_nohz_task_switch(struct task_struct *tsk);
#else
static inline bool tick_nohz_full_enabled(void) { return false; }
static inline bool tick_nohz_full_cpu(int cpu) { return false; }
+static inline void tick_nohz_full_add_cpus_to(struct cpumask *mask) { }
static inline void __tick_nohz_full_check(void) { }
static inline void tick_nohz_full_kick_cpu(int cpu) { }
static inline void tick_nohz_full_kick(void) { }
diff --git a/include/linux/time64.h b/include/linux/time64.h
index a3831478d9cf..77b5df2acd2a 100644
--- a/include/linux/time64.h
+++ b/include/linux/time64.h
@@ -2,6 +2,7 @@
#define _LINUX_TIME64_H
#include <uapi/linux/time.h>
+#include <linux/math64.h>
typedef __s64 time64_t;
@@ -28,6 +29,7 @@ struct timespec64 {
#define FSEC_PER_SEC 1000000000000000LL
/* Located here for timespec[64]_valid_strict */
+#define TIME64_MAX ((s64)~((u64)1 << 63))
#define KTIME_MAX ((s64)~((u64)1 << 63))
#define KTIME_SEC_MAX (KTIME_MAX / NSEC_PER_SEC)
diff --git a/include/linux/timekeeper_internal.h b/include/linux/timekeeper_internal.h
index fb86963859c7..25247220b4b7 100644
--- a/include/linux/timekeeper_internal.h
+++ b/include/linux/timekeeper_internal.h
@@ -49,6 +49,8 @@ struct tk_read_base {
* @offs_boot: Offset clock monotonic -> clock boottime
* @offs_tai: Offset clock monotonic -> clock tai
* @tai_offset: The current UTC to TAI offset in seconds
+ * @clock_was_set_seq: The sequence number of clock was set events
+ * @next_leap_ktime: CLOCK_MONOTONIC time value of a pending leap-second
* @raw_time: Monotonic raw base time in timespec64 format
* @cycle_interval: Number of clock cycles in one NTP interval
* @xtime_interval: Number of clock shifted nano seconds in one NTP
@@ -60,6 +62,9 @@ struct tk_read_base {
* shifted nano seconds.
* @ntp_error_shift: Shift conversion between clock shifted nano seconds and
* ntp shifted nano seconds.
+ * @last_warning: Warning ratelimiter (DEBUG_TIMEKEEPING)
+ * @underflow_seen: Underflow warning flag (DEBUG_TIMEKEEPING)
+ * @overflow_seen: Overflow warning flag (DEBUG_TIMEKEEPING)
*
* Note: For timespec(64) based interfaces wall_to_monotonic is what
* we need to add to xtime (or xtime corrected for sub jiffie times)
@@ -85,6 +90,8 @@ struct timekeeper {
ktime_t offs_boot;
ktime_t offs_tai;
s32 tai_offset;
+ unsigned int clock_was_set_seq;
+ ktime_t next_leap_ktime;
struct timespec64 raw_time;
/* The following members are for timekeeping internal use */
@@ -104,6 +111,18 @@ struct timekeeper {
s64 ntp_error;
u32 ntp_error_shift;
u32 ntp_err_mult;
+#ifdef CONFIG_DEBUG_TIMEKEEPING
+ long last_warning;
+ /*
+ * These simple flag variables are managed
+ * without locks, which is racy, but they are
+ * ok since we don't really care about being
+ * super precise about how many events were
+ * seen, just that a problem was observed.
+ */
+ int underflow_seen;
+ int overflow_seen;
+#endif
};
#ifdef CONFIG_GENERIC_TIME_VSYSCALL
diff --git a/include/linux/timekeeping.h b/include/linux/timekeeping.h
index 99176af216af..3aa72e648650 100644
--- a/include/linux/timekeeping.h
+++ b/include/linux/timekeeping.h
@@ -163,6 +163,7 @@ extern ktime_t ktime_get(void);
extern ktime_t ktime_get_with_offset(enum tk_offsets offs);
extern ktime_t ktime_mono_to_any(ktime_t tmono, enum tk_offsets offs);
extern ktime_t ktime_get_raw(void);
+extern u32 ktime_get_resolution_ns(void);
/**
* ktime_get_real - get the real (wall-) time in ktime_t format
@@ -266,7 +267,6 @@ extern int persistent_clock_is_local;
extern void read_persistent_clock(struct timespec *ts);
extern void read_persistent_clock64(struct timespec64 *ts);
-extern void read_boot_clock(struct timespec *ts);
extern void read_boot_clock64(struct timespec64 *ts);
extern int update_persistent_clock(struct timespec now);
extern int update_persistent_clock64(struct timespec64 now);
diff --git a/include/linux/timer.h b/include/linux/timer.h
index 8c5a197e1587..61aa61dc410c 100644
--- a/include/linux/timer.h
+++ b/include/linux/timer.h
@@ -14,27 +14,23 @@ struct timer_list {
* All fields that change during normal runtime grouped to the
* same cacheline
*/
- struct list_head entry;
- unsigned long expires;
- struct tvec_base *base;
-
- void (*function)(unsigned long);
- unsigned long data;
-
- int slack;
+ struct hlist_node entry;
+ unsigned long expires;
+ void (*function)(unsigned long);
+ unsigned long data;
+ u32 flags;
+ int slack;
#ifdef CONFIG_TIMER_STATS
- int start_pid;
- void *start_site;
- char start_comm[16];
+ int start_pid;
+ void *start_site;
+ char start_comm[16];
#endif
#ifdef CONFIG_LOCKDEP
- struct lockdep_map lockdep_map;
+ struct lockdep_map lockdep_map;
#endif
};
-extern struct tvec_base boot_tvec_bases;
-
#ifdef CONFIG_LOCKDEP
/*
* NB: because we have to copy the lockdep_map, setting the lockdep_map key
@@ -49,9 +45,6 @@ extern struct tvec_base boot_tvec_bases;
#endif
/*
- * Note that all tvec_bases are at least 4 byte aligned and lower two bits
- * of base in timer_list is guaranteed to be zero. Use them for flags.
- *
* A deferrable timer will work normally when the system is busy, but
* will not cause a CPU to come out of idle just to service it; instead,
* the timer will be serviced when the CPU eventually wakes up with a
@@ -65,17 +58,18 @@ extern struct tvec_base boot_tvec_bases;
* workqueue locking issues. It's not meant for executing random crap
* with interrupts disabled. Abuse is monitored!
*/
-#define TIMER_DEFERRABLE 0x1LU
-#define TIMER_IRQSAFE 0x2LU
-
-#define TIMER_FLAG_MASK 0x3LU
+#define TIMER_CPUMASK 0x0007FFFF
+#define TIMER_MIGRATING 0x00080000
+#define TIMER_BASEMASK (TIMER_CPUMASK | TIMER_MIGRATING)
+#define TIMER_DEFERRABLE 0x00100000
+#define TIMER_IRQSAFE 0x00200000
#define __TIMER_INITIALIZER(_function, _expires, _data, _flags) { \
- .entry = { .prev = TIMER_ENTRY_STATIC }, \
+ .entry = { .next = TIMER_ENTRY_STATIC }, \
.function = (_function), \
.expires = (_expires), \
.data = (_data), \
- .base = (void *)((unsigned long)&boot_tvec_bases + (_flags)), \
+ .flags = (_flags), \
.slack = -1, \
__TIMER_LOCKDEP_MAP_INITIALIZER( \
__FILE__ ":" __stringify(__LINE__)) \
@@ -168,7 +162,7 @@ static inline void init_timer_on_stack_key(struct timer_list *timer,
*/
static inline int timer_pending(const struct timer_list * timer)
{
- return timer->entry.next != NULL;
+ return timer->entry.pprev != NULL;
}
extern void add_timer_on(struct timer_list *timer, int cpu);
@@ -188,26 +182,16 @@ extern void set_timer_slack(struct timer_list *time, int slack_hz);
#define NEXT_TIMER_MAX_DELTA ((1UL << 30) - 1)
/*
- * Return when the next timer-wheel timeout occurs (in absolute jiffies),
- * locks the timer base and does the comparison against the given
- * jiffie.
- */
-extern unsigned long get_next_timer_interrupt(unsigned long now);
-
-/*
* Timer-statistics info:
*/
#ifdef CONFIG_TIMER_STATS
extern int timer_stats_active;
-#define TIMER_STATS_FLAG_DEFERRABLE 0x1
-
extern void init_timer_stats(void);
extern void timer_stats_update_stats(void *timer, pid_t pid, void *startf,
- void *timerf, char *comm,
- unsigned int timer_flag);
+ void *timerf, char *comm, u32 flags);
extern void __timer_stats_timer_set_start_info(struct timer_list *timer,
void *addr);
@@ -254,6 +238,15 @@ extern void run_local_timers(void);
struct hrtimer;
extern enum hrtimer_restart it_real_fn(struct hrtimer *);
+#if defined(CONFIG_SMP) && defined(CONFIG_NO_HZ_COMMON)
+#include <linux/sysctl.h>
+
+extern unsigned int sysctl_timer_migration;
+int timer_migration_handler(struct ctl_table *table, int write,
+ void __user *buffer, size_t *lenp,
+ loff_t *ppos);
+#endif
+
unsigned long __round_jiffies(unsigned long j, int cpu);
unsigned long __round_jiffies_relative(unsigned long j, int cpu);
unsigned long round_jiffies(unsigned long j);
diff --git a/include/linux/timerqueue.h b/include/linux/timerqueue.h
index a520fd70a59f..7eec17ad7fa1 100644
--- a/include/linux/timerqueue.h
+++ b/include/linux/timerqueue.h
@@ -16,10 +16,10 @@ struct timerqueue_head {
};
-extern void timerqueue_add(struct timerqueue_head *head,
- struct timerqueue_node *node);
-extern void timerqueue_del(struct timerqueue_head *head,
- struct timerqueue_node *node);
+extern bool timerqueue_add(struct timerqueue_head *head,
+ struct timerqueue_node *node);
+extern bool timerqueue_del(struct timerqueue_head *head,
+ struct timerqueue_node *node);
extern struct timerqueue_node *timerqueue_iterate_next(
struct timerqueue_node *node);
diff --git a/include/linux/topology.h b/include/linux/topology.h
index 909b6e43b694..73ddad1e0fa3 100644
--- a/include/linux/topology.h
+++ b/include/linux/topology.h
@@ -191,8 +191,8 @@ static inline int cpu_to_mem(int cpu)
#ifndef topology_core_id
#define topology_core_id(cpu) ((void)(cpu), 0)
#endif
-#ifndef topology_thread_cpumask
-#define topology_thread_cpumask(cpu) cpumask_of(cpu)
+#ifndef topology_sibling_cpumask
+#define topology_sibling_cpumask(cpu) cpumask_of(cpu)
#endif
#ifndef topology_core_cpumask
#define topology_core_cpumask(cpu) cpumask_of(cpu)
@@ -201,7 +201,7 @@ static inline int cpu_to_mem(int cpu)
#ifdef CONFIG_SCHED_SMT
static inline const struct cpumask *cpu_smt_mask(int cpu)
{
- return topology_thread_cpumask(cpu);
+ return topology_sibling_cpumask(cpu);
}
#endif
diff --git a/include/linux/tracefs.h b/include/linux/tracefs.h
new file mode 100644
index 000000000000..5b727a17beee
--- /dev/null
+++ b/include/linux/tracefs.h
@@ -0,0 +1,45 @@
+/*
+ * tracefs.h - a pseudo file system for activating tracing
+ *
+ * Based on debugfs by: 2004 Greg Kroah-Hartman <greg@kroah.com>
+ *
+ * Copyright (C) 2014 Red Hat Inc, author: Steven Rostedt <srostedt@redhat.com>
+ *
+ * This program is free software; you can redistribute it and/or
+ * modify it under the terms of the GNU General Public License version
+ * 2 as published by the Free Software Foundation.
+ *
+ * tracefs is the file system that is used by the tracing infrastructure.
+ *
+ */
+
+#ifndef _TRACEFS_H_
+#define _TRACEFS_H_
+
+#include <linux/fs.h>
+#include <linux/seq_file.h>
+
+#include <linux/types.h>
+
+struct file_operations;
+
+#ifdef CONFIG_TRACING
+
+struct dentry *tracefs_create_file(const char *name, umode_t mode,
+ struct dentry *parent, void *data,
+ const struct file_operations *fops);
+
+struct dentry *tracefs_create_dir(const char *name, struct dentry *parent);
+
+void tracefs_remove(struct dentry *dentry);
+void tracefs_remove_recursive(struct dentry *dentry);
+
+struct dentry *tracefs_create_instance_dir(const char *name, struct dentry *parent,
+ int (*mkdir)(const char *name),
+ int (*rmdir)(const char *name));
+
+bool tracefs_initialized(void);
+
+#endif /* CONFIG_TRACING */
+
+#endif
diff --git a/include/linux/tracepoint.h b/include/linux/tracepoint.h
index c72851328ca9..a5f7f3ecafa3 100644
--- a/include/linux/tracepoint.h
+++ b/include/linux/tracepoint.h
@@ -36,6 +36,12 @@ struct tracepoint {
struct tracepoint_func __rcu *funcs;
};
+struct trace_enum_map {
+ const char *system;
+ const char *enum_string;
+ unsigned long enum_value;
+};
+
extern int
tracepoint_probe_register(struct tracepoint *tp, void *probe, void *data);
extern int
@@ -87,6 +93,8 @@ extern void syscall_unregfunc(void);
#define PARAMS(args...) args
+#define TRACE_DEFINE_ENUM(x)
+
#endif /* _LINUX_TRACEPOINT_H */
/*
diff --git a/include/linux/tty.h b/include/linux/tty.h
index 358a337af598..d76631f615c2 100644
--- a/include/linux/tty.h
+++ b/include/linux/tty.h
@@ -339,6 +339,7 @@ struct tty_file_private {
#define TTY_EXCLUSIVE 3 /* Exclusive open mode */
#define TTY_DEBUG 4 /* Debugging */
#define TTY_DO_WRITE_WAKEUP 5 /* Call write_wakeup after queuing new */
+#define TTY_OTHER_DONE 6 /* Closed pty has completed input processing */
#define TTY_LDISC_OPEN 11 /* Line discipline is open */
#define TTY_PTY_LOCK 16 /* pty private */
#define TTY_NO_WRITE_SPLIT 17 /* Preserve write boundaries to driver */
@@ -462,7 +463,6 @@ extern int tty_hung_up_p(struct file *filp);
extern void do_SAK(struct tty_struct *tty);
extern void __do_SAK(struct tty_struct *tty);
extern void no_tty(void);
-extern void tty_flush_to_ldisc(struct tty_struct *tty);
extern void tty_buffer_free_all(struct tty_port *port);
extern void tty_buffer_flush(struct tty_struct *tty, struct tty_ldisc *ld);
extern void tty_buffer_init(struct tty_port *port);
@@ -491,6 +491,7 @@ static inline speed_t tty_get_baud_rate(struct tty_struct *tty)
extern void tty_termios_copy_hw(struct ktermios *new, struct ktermios *old);
extern int tty_termios_hw_change(struct ktermios *a, struct ktermios *b);
+extern int tty_set_termios(struct tty_struct *tty, struct ktermios *kt);
extern struct tty_ldisc *tty_ldisc_ref(struct tty_struct *);
extern void tty_ldisc_deref(struct tty_ldisc *);
diff --git a/include/linux/types.h b/include/linux/types.h
index 6747247e3f9f..8715287c3b1f 100644
--- a/include/linux/types.h
+++ b/include/linux/types.h
@@ -139,19 +139,21 @@ typedef unsigned long blkcnt_t;
*/
#define pgoff_t unsigned long
-/* A dma_addr_t can hold any valid DMA or bus address for the platform */
+/*
+ * A dma_addr_t can hold any valid DMA address, i.e., any address returned
+ * by the DMA API.
+ *
+ * If the DMA API only uses 32-bit addresses, dma_addr_t need only be 32
+ * bits wide. Bus addresses, e.g., PCI BARs, may be wider than 32 bits,
+ * but drivers do memory-mapped I/O to ioremapped kernel virtual addresses,
+ * so they don't care about the size of the actual bus addresses.
+ */
#ifdef CONFIG_ARCH_DMA_ADDR_T_64BIT
typedef u64 dma_addr_t;
#else
typedef u32 dma_addr_t;
-#endif /* dma_addr_t */
-
-#ifdef __CHECKER__
-#else
-#endif
-#ifdef __CHECK_ENDIAN__
-#else
#endif
+
typedef unsigned __bitwise__ gfp_t;
typedef unsigned __bitwise__ fmode_t;
typedef unsigned __bitwise__ oom_flags_t;
diff --git a/include/linux/u64_stats_sync.h b/include/linux/u64_stats_sync.h
index 4b4439e75f45..df89c9bcba7d 100644
--- a/include/linux/u64_stats_sync.h
+++ b/include/linux/u64_stats_sync.h
@@ -68,11 +68,12 @@ struct u64_stats_sync {
};
+static inline void u64_stats_init(struct u64_stats_sync *syncp)
+{
#if BITS_PER_LONG == 32 && defined(CONFIG_SMP)
-# define u64_stats_init(syncp) seqcount_init(syncp.seq)
-#else
-# define u64_stats_init(syncp) do { } while (0)
+ seqcount_init(&syncp->seq);
#endif
+}
static inline void u64_stats_update_begin(struct u64_stats_sync *syncp)
{
diff --git a/include/linux/uaccess.h b/include/linux/uaccess.h
index ecd3319dac33..ae572c138607 100644
--- a/include/linux/uaccess.h
+++ b/include/linux/uaccess.h
@@ -1,21 +1,30 @@
#ifndef __LINUX_UACCESS_H__
#define __LINUX_UACCESS_H__
-#include <linux/preempt.h>
+#include <linux/sched.h>
#include <asm/uaccess.h>
+static __always_inline void pagefault_disabled_inc(void)
+{
+ current->pagefault_disabled++;
+}
+
+static __always_inline void pagefault_disabled_dec(void)
+{
+ current->pagefault_disabled--;
+ WARN_ON(current->pagefault_disabled < 0);
+}
+
/*
- * These routines enable/disable the pagefault handler in that
- * it will not take any locks and go straight to the fixup table.
+ * These routines enable/disable the pagefault handler. If disabled, it will
+ * not take any locks and go straight to the fixup table.
*
- * They have great resemblance to the preempt_disable/enable calls
- * and in fact they are identical; this is because currently there is
- * no other way to make the pagefault handlers do this. So we do
- * disable preemption but we don't necessarily care about that.
+ * User access methods will not sleep when called from a pagefault_disabled()
+ * environment.
*/
static inline void pagefault_disable(void)
{
- preempt_count_inc();
+ pagefault_disabled_inc();
/*
* make sure to have issued the store before a pagefault
* can hit.
@@ -25,18 +34,31 @@ static inline void pagefault_disable(void)
static inline void pagefault_enable(void)
{
-#ifndef CONFIG_PREEMPT
/*
* make sure to issue those last loads/stores before enabling
* the pagefault handler again.
*/
barrier();
- preempt_count_dec();
-#else
- preempt_enable();
-#endif
+ pagefault_disabled_dec();
}
+/*
+ * Is the pagefault handler disabled? If so, user access methods will not sleep.
+ */
+#define pagefault_disabled() (current->pagefault_disabled != 0)
+
+/*
+ * The pagefault handler is in general disabled by pagefault_disable() or
+ * when in irq context (via in_atomic()).
+ *
+ * This function should only be used by the fault handlers. Other users should
+ * stick to pagefault_disabled().
+ * Please NEVER use preempt_disable() to disable the fault handler. With
+ * !CONFIG_PREEMPT_COUNT, this is like a NOP. So the handler won't be disabled.
+ * in_atomic() will report different values based on !CONFIG_PREEMPT_COUNT.
+ */
+#define faulthandler_disabled() (pagefault_disabled() || in_atomic())
+
#ifndef ARCH_HAS_NOCACHE_UACCESS
static inline unsigned long __copy_from_user_inatomic_nocache(void *to,
diff --git a/include/linux/udp.h b/include/linux/udp.h
index 247cfdcc4b08..87c094961bd5 100644
--- a/include/linux/udp.h
+++ b/include/linux/udp.h
@@ -34,7 +34,7 @@ static inline struct udphdr *inner_udp_hdr(const struct sk_buff *skb)
#define UDP_HTABLE_SIZE_MIN (CONFIG_BASE_SMALL ? 128 : 256)
-static inline int udp_hashfn(struct net *net, unsigned num, unsigned mask)
+static inline u32 udp_hashfn(const struct net *net, u32 num, u32 mask)
{
return (num + net_hash_mix(net)) & mask;
}
diff --git a/include/linux/uidgid.h b/include/linux/uidgid.h
index 2d1f9b627f91..03835522dfcb 100644
--- a/include/linux/uidgid.h
+++ b/include/linux/uidgid.h
@@ -29,6 +29,7 @@ typedef struct {
#define KUIDT_INIT(value) (kuid_t){ value }
#define KGIDT_INIT(value) (kgid_t){ value }
+#ifdef CONFIG_MULTIUSER
static inline uid_t __kuid_val(kuid_t uid)
{
return uid.val;
@@ -38,6 +39,17 @@ static inline gid_t __kgid_val(kgid_t gid)
{
return gid.val;
}
+#else
+static inline uid_t __kuid_val(kuid_t uid)
+{
+ return 0;
+}
+
+static inline gid_t __kgid_val(kgid_t gid)
+{
+ return 0;
+}
+#endif
#define GLOBAL_ROOT_UID KUIDT_INIT(0)
#define GLOBAL_ROOT_GID KGIDT_INIT(0)
@@ -97,12 +109,12 @@ static inline bool gid_lte(kgid_t left, kgid_t right)
static inline bool uid_valid(kuid_t uid)
{
- return !uid_eq(uid, INVALID_UID);
+ return __kuid_val(uid) != (uid_t) -1;
}
static inline bool gid_valid(kgid_t gid)
{
- return !gid_eq(gid, INVALID_GID);
+ return __kgid_val(gid) != (gid_t) -1;
}
#ifdef CONFIG_USER_NS
diff --git a/include/linux/uio.h b/include/linux/uio.h
index 71880299ed48..8b01e1c3c614 100644
--- a/include/linux/uio.h
+++ b/include/linux/uio.h
@@ -76,6 +76,7 @@ size_t iov_iter_copy_from_user_atomic(struct page *page,
struct iov_iter *i, unsigned long offset, size_t bytes);
void iov_iter_advance(struct iov_iter *i, size_t bytes);
int iov_iter_fault_in_readable(struct iov_iter *i, size_t bytes);
+int iov_iter_fault_in_multipages_readable(struct iov_iter *i, size_t bytes);
size_t iov_iter_single_seg_count(const struct iov_iter *i);
size_t copy_page_to_iter(struct page *page, size_t offset, size_t bytes,
struct iov_iter *i);
@@ -111,6 +112,14 @@ static inline bool iter_is_iovec(struct iov_iter *i)
}
/*
+ * Get one of READ or WRITE out of iter->type without any other flags OR'd in
+ * with it.
+ *
+ * The ?: is just for type safety.
+ */
+#define iov_iter_rw(i) ((0 ? (struct iov_iter *)0 : (i))->type & RW_MASK)
+
+/*
* Cap the iov_iter by given limit; note that the second argument is
* *not* the new size - it's upper limit for such. Passing it a value
* greater than the amount of data in iov_iter is fine - it'll just do
@@ -139,4 +148,18 @@ static inline void iov_iter_reexpand(struct iov_iter *i, size_t count)
size_t csum_and_copy_to_iter(void *addr, size_t bytes, __wsum *csum, struct iov_iter *i);
size_t csum_and_copy_from_iter(void *addr, size_t bytes, __wsum *csum, struct iov_iter *i);
+int import_iovec(int type, const struct iovec __user * uvector,
+ unsigned nr_segs, unsigned fast_segs,
+ struct iovec **iov, struct iov_iter *i);
+
+#ifdef CONFIG_COMPAT
+struct compat_iovec;
+int compat_import_iovec(int type, const struct compat_iovec __user * uvector,
+ unsigned nr_segs, unsigned fast_segs,
+ struct iovec **iov, struct iov_iter *i);
+#endif
+
+int import_single_range(int type, void __user *buf, size_t len,
+ struct iovec *iov, struct iov_iter *i);
+
#endif
diff --git a/include/linux/usb_usual.h b/include/linux/usb_usual.h
index a7f2604c5f25..7f5f78bd15ad 100644
--- a/include/linux/usb_usual.h
+++ b/include/linux/usb_usual.h
@@ -77,6 +77,8 @@
/* Cannot handle ATA_12 or ATA_16 CDBs */ \
US_FLAG(NO_REPORT_OPCODES, 0x04000000) \
/* Cannot handle MI_REPORT_SUPPORTED_OPERATION_CODES */ \
+ US_FLAG(MAX_SECTORS_240, 0x08000000) \
+ /* Sets max_sectors to 240 */ \
#define US_FLAG(name, value) US_FL_##name = value ,
enum { US_DO_ALL_FLAGS };
diff --git a/include/linux/util_macros.h b/include/linux/util_macros.h
new file mode 100644
index 000000000000..f9b2ce58039b
--- /dev/null
+++ b/include/linux/util_macros.h
@@ -0,0 +1,40 @@
+#ifndef _LINUX_HELPER_MACROS_H_
+#define _LINUX_HELPER_MACROS_H_
+
+#define __find_closest(x, a, as, op) \
+({ \
+ typeof(as) __fc_i, __fc_as = (as) - 1; \
+ typeof(x) __fc_x = (x); \
+ typeof(*a) const *__fc_a = (a); \
+ for (__fc_i = 0; __fc_i < __fc_as; __fc_i++) { \
+ if (__fc_x op DIV_ROUND_CLOSEST(__fc_a[__fc_i] + \
+ __fc_a[__fc_i + 1], 2)) \
+ break; \
+ } \
+ (__fc_i); \
+})
+
+/**
+ * find_closest - locate the closest element in a sorted array
+ * @x: The reference value.
+ * @a: The array in which to look for the closest element. Must be sorted
+ * in ascending order.
+ * @as: Size of 'a'.
+ *
+ * Returns the index of the element closest to 'x'.
+ */
+#define find_closest(x, a, as) __find_closest(x, a, as, <=)
+
+/**
+ * find_closest_descending - locate the closest element in a sorted array
+ * @x: The reference value.
+ * @a: The array in which to look for the closest element. Must be sorted
+ * in descending order.
+ * @as: Size of 'a'.
+ *
+ * Similar to find_closest() but 'a' is expected to be sorted in descending
+ * order.
+ */
+#define find_closest_descending(x, a, as) __find_closest(x, a, as, >=)
+
+#endif
diff --git a/include/linux/vfio.h b/include/linux/vfio.h
index 049b2f497bc7..ddb440975382 100644
--- a/include/linux/vfio.h
+++ b/include/linux/vfio.h
@@ -14,6 +14,8 @@
#include <linux/iommu.h>
#include <linux/mm.h>
+#include <linux/workqueue.h>
+#include <linux/poll.h>
#include <uapi/linux/vfio.h>
/**
@@ -110,4 +112,27 @@ static inline long vfio_spapr_iommu_eeh_ioctl(struct iommu_group *group,
return -ENOTTY;
}
#endif /* CONFIG_EEH */
+
+/*
+ * IRQfd - generic
+ */
+struct virqfd {
+ void *opaque;
+ struct eventfd_ctx *eventfd;
+ int (*handler)(void *, void *);
+ void (*thread)(void *, void *);
+ void *data;
+ struct work_struct inject;
+ wait_queue_t wait;
+ poll_table pt;
+ struct work_struct shutdown;
+ struct virqfd **pvirqfd;
+};
+
+extern int vfio_virqfd_enable(void *opaque,
+ int (*handler)(void *, void *),
+ void (*thread)(void *, void *),
+ void *data, struct virqfd **pvirqfd, int fd);
+extern void vfio_virqfd_disable(struct virqfd **pvirqfd);
+
#endif /* VFIO_H */
diff --git a/include/linux/vgaarb.h b/include/linux/vgaarb.h
index c37bd4d06739..8c3b412d84df 100644
--- a/include/linux/vgaarb.h
+++ b/include/linux/vgaarb.h
@@ -65,8 +65,13 @@ struct pci_dev;
* out of the arbitration process (and can be safe to take
* interrupts at any time.
*/
+#if defined(CONFIG_VGA_ARB)
extern void vga_set_legacy_decoding(struct pci_dev *pdev,
unsigned int decodes);
+#else
+static inline void vga_set_legacy_decoding(struct pci_dev *pdev,
+ unsigned int decodes) { };
+#endif
/**
* vga_get - acquire & locks VGA resources
diff --git a/include/linux/virtio.h b/include/linux/virtio.h
index 28f0e65b9a11..8f4d4bfa6d46 100644
--- a/include/linux/virtio.h
+++ b/include/linux/virtio.h
@@ -108,8 +108,6 @@ struct virtio_device {
void *priv;
};
-bool virtio_device_is_legacy_only(struct virtio_device_id id);
-
static inline struct virtio_device *dev_to_virtio(struct device *_dev)
{
return container_of(_dev, struct virtio_device, dev);
diff --git a/include/linux/virtio_config.h b/include/linux/virtio_config.h
index ca3ed78e5ec7..1e306f727edc 100644
--- a/include/linux/virtio_config.h
+++ b/include/linux/virtio_config.h
@@ -298,13 +298,6 @@ static inline __virtio64 cpu_to_virtio64(struct virtio_device *vdev, u64 val)
} \
} while(0)
-static inline u8 virtio_cread8(struct virtio_device *vdev, unsigned int offset)
-{
- u8 ret;
- vdev->config->get(vdev, offset, &ret, sizeof(ret));
- return ret;
-}
-
/* Read @count fields, @bytes each. */
static inline void __virtio_cread_many(struct virtio_device *vdev,
unsigned int offset,
@@ -326,7 +319,6 @@ static inline void __virtio_cread_many(struct virtio_device *vdev,
} while (gen != old);
}
-
static inline void virtio_cread_bytes(struct virtio_device *vdev,
unsigned int offset,
void *buf, size_t len)
@@ -334,6 +326,13 @@ static inline void virtio_cread_bytes(struct virtio_device *vdev,
__virtio_cread_many(vdev, offset, buf, len, 1);
}
+static inline u8 virtio_cread8(struct virtio_device *vdev, unsigned int offset)
+{
+ u8 ret;
+ vdev->config->get(vdev, offset, &ret, sizeof(ret));
+ return ret;
+}
+
static inline void virtio_cwrite8(struct virtio_device *vdev,
unsigned int offset, u8 val)
{
@@ -374,7 +373,6 @@ static inline u64 virtio_cread64(struct virtio_device *vdev,
unsigned int offset)
{
u64 ret;
- vdev->config->get(vdev, offset, &ret, sizeof(ret));
__virtio_cread_many(vdev, offset, &ret, 1, sizeof(ret));
return virtio64_to_cpu(vdev, (__force __virtio64)ret);
}
diff --git a/include/linux/virtio_ring.h b/include/linux/virtio_ring.h
index 67e06fe18c03..8e50888a6d59 100644
--- a/include/linux/virtio_ring.h
+++ b/include/linux/virtio_ring.h
@@ -21,19 +21,20 @@
* actually quite cheap.
*/
-#ifdef CONFIG_SMP
static inline void virtio_mb(bool weak_barriers)
{
+#ifdef CONFIG_SMP
if (weak_barriers)
smp_mb();
else
+#endif
mb();
}
static inline void virtio_rmb(bool weak_barriers)
{
if (weak_barriers)
- smp_rmb();
+ dma_rmb();
else
rmb();
}
@@ -41,26 +42,10 @@ static inline void virtio_rmb(bool weak_barriers)
static inline void virtio_wmb(bool weak_barriers)
{
if (weak_barriers)
- smp_wmb();
+ dma_wmb();
else
wmb();
}
-#else
-static inline void virtio_mb(bool weak_barriers)
-{
- mb();
-}
-
-static inline void virtio_rmb(bool weak_barriers)
-{
- rmb();
-}
-
-static inline void virtio_wmb(bool weak_barriers)
-{
- wmb();
-}
-#endif
struct virtio_device;
struct virtqueue;
diff --git a/include/linux/wait.h b/include/linux/wait.h
index 2db83349865b..d69ac4ecc88b 100644
--- a/include/linux/wait.h
+++ b/include/linux/wait.h
@@ -969,7 +969,7 @@ extern int bit_wait_io_timeout(struct wait_bit_key *);
* on that signal.
*/
static inline int
-wait_on_bit(void *word, int bit, unsigned mode)
+wait_on_bit(unsigned long *word, int bit, unsigned mode)
{
might_sleep();
if (!test_bit(bit, word))
@@ -994,7 +994,7 @@ wait_on_bit(void *word, int bit, unsigned mode)
* on that signal.
*/
static inline int
-wait_on_bit_io(void *word, int bit, unsigned mode)
+wait_on_bit_io(unsigned long *word, int bit, unsigned mode)
{
might_sleep();
if (!test_bit(bit, word))
@@ -1020,7 +1020,8 @@ wait_on_bit_io(void *word, int bit, unsigned mode)
* received a signal and the mode permitted wakeup on that signal.
*/
static inline int
-wait_on_bit_timeout(void *word, int bit, unsigned mode, unsigned long timeout)
+wait_on_bit_timeout(unsigned long *word, int bit, unsigned mode,
+ unsigned long timeout)
{
might_sleep();
if (!test_bit(bit, word))
@@ -1047,7 +1048,8 @@ wait_on_bit_timeout(void *word, int bit, unsigned mode, unsigned long timeout)
* on that signal.
*/
static inline int
-wait_on_bit_action(void *word, int bit, wait_bit_action_f *action, unsigned mode)
+wait_on_bit_action(unsigned long *word, int bit, wait_bit_action_f *action,
+ unsigned mode)
{
might_sleep();
if (!test_bit(bit, word))
@@ -1075,7 +1077,7 @@ wait_on_bit_action(void *word, int bit, wait_bit_action_f *action, unsigned mode
* the @mode allows that signal to wake the process.
*/
static inline int
-wait_on_bit_lock(void *word, int bit, unsigned mode)
+wait_on_bit_lock(unsigned long *word, int bit, unsigned mode)
{
might_sleep();
if (!test_and_set_bit(bit, word))
@@ -1099,7 +1101,7 @@ wait_on_bit_lock(void *word, int bit, unsigned mode)
* the @mode allows that signal to wake the process.
*/
static inline int
-wait_on_bit_lock_io(void *word, int bit, unsigned mode)
+wait_on_bit_lock_io(unsigned long *word, int bit, unsigned mode)
{
might_sleep();
if (!test_and_set_bit(bit, word))
@@ -1125,7 +1127,8 @@ wait_on_bit_lock_io(void *word, int bit, unsigned mode)
* the @mode allows that signal to wake the process.
*/
static inline int
-wait_on_bit_lock_action(void *word, int bit, wait_bit_action_f *action, unsigned mode)
+wait_on_bit_lock_action(unsigned long *word, int bit, wait_bit_action_f *action,
+ unsigned mode)
{
might_sleep();
if (!test_and_set_bit(bit, word))
diff --git a/include/linux/watchdog.h b/include/linux/watchdog.h
index 395b70e0eccf..a746bf5216f8 100644
--- a/include/linux/watchdog.h
+++ b/include/linux/watchdog.h
@@ -137,4 +137,12 @@ extern int watchdog_init_timeout(struct watchdog_device *wdd,
extern int watchdog_register_device(struct watchdog_device *);
extern void watchdog_unregister_device(struct watchdog_device *);
+#ifdef CONFIG_HARDLOCKUP_DETECTOR
+void watchdog_nmi_disable_all(void);
+void watchdog_nmi_enable_all(void);
+#else
+static inline void watchdog_nmi_disable_all(void) {}
+static inline void watchdog_nmi_enable_all(void) {}
+#endif
+
#endif /* ifndef _LINUX_WATCHDOG_H */
diff --git a/include/linux/wl12xx.h b/include/linux/wl12xx.h
index a9c723be1acf..95704cd4cfab 100644
--- a/include/linux/wl12xx.h
+++ b/include/linux/wl12xx.h
@@ -26,28 +26,6 @@
#include <linux/err.h>
-/* Reference clock values */
-enum {
- WL12XX_REFCLOCK_19 = 0, /* 19.2 MHz */
- WL12XX_REFCLOCK_26 = 1, /* 26 MHz */
- WL12XX_REFCLOCK_38 = 2, /* 38.4 MHz */
- WL12XX_REFCLOCK_52 = 3, /* 52 MHz */
- WL12XX_REFCLOCK_38_XTAL = 4, /* 38.4 MHz, XTAL */
- WL12XX_REFCLOCK_26_XTAL = 5, /* 26 MHz, XTAL */
-};
-
-/* TCXO clock values */
-enum {
- WL12XX_TCXOCLOCK_19_2 = 0, /* 19.2MHz */
- WL12XX_TCXOCLOCK_26 = 1, /* 26 MHz */
- WL12XX_TCXOCLOCK_38_4 = 2, /* 38.4MHz */
- WL12XX_TCXOCLOCK_52 = 3, /* 52 MHz */
- WL12XX_TCXOCLOCK_16_368 = 4, /* 16.368 MHz */
- WL12XX_TCXOCLOCK_32_736 = 5, /* 32.736 MHz */
- WL12XX_TCXOCLOCK_16_8 = 6, /* 16.8 MHz */
- WL12XX_TCXOCLOCK_33_6 = 7, /* 33.6 MHz */
-};
-
struct wl1251_platform_data {
int power_gpio;
/* SDIO only: IRQ number if WLAN_IRQ line is used, 0 for SDIO IRQs */
@@ -55,23 +33,8 @@ struct wl1251_platform_data {
bool use_eeprom;
};
-struct wl12xx_platform_data {
- int irq;
- int board_ref_clock;
- int board_tcxo_clock;
- unsigned long platform_quirks;
- bool pwr_in_suspend;
-};
-
-/* Platform does not support level trigger interrupts */
-#define WL12XX_PLATFORM_QUIRK_EDGE_IRQ BIT(0)
-
#ifdef CONFIG_WILINK_PLATFORM_DATA
-int wl12xx_set_platform_data(const struct wl12xx_platform_data *data);
-
-struct wl12xx_platform_data *wl12xx_get_platform_data(void);
-
int wl1251_set_platform_data(const struct wl1251_platform_data *data);
struct wl1251_platform_data *wl1251_get_platform_data(void);
@@ -79,18 +42,6 @@ struct wl1251_platform_data *wl1251_get_platform_data(void);
#else
static inline
-int wl12xx_set_platform_data(const struct wl12xx_platform_data *data)
-{
- return -ENOSYS;
-}
-
-static inline
-struct wl12xx_platform_data *wl12xx_get_platform_data(void)
-{
- return ERR_PTR(-ENODATA);
-}
-
-static inline
int wl1251_set_platform_data(const struct wl1251_platform_data *data)
{
return -ENOSYS;
diff --git a/include/linux/writeback.h b/include/linux/writeback.h
index b2dd371ec0ca..b333c945e571 100644
--- a/include/linux/writeback.h
+++ b/include/linux/writeback.h
@@ -7,6 +7,8 @@
#include <linux/sched.h>
#include <linux/workqueue.h>
#include <linux/fs.h>
+#include <linux/flex_proportions.h>
+#include <linux/backing-dev-defs.h>
DECLARE_PER_CPU(int, dirty_throttle_leaks);
@@ -84,18 +86,95 @@ struct writeback_control {
unsigned for_reclaim:1; /* Invoked from the page allocator */
unsigned range_cyclic:1; /* range_start is cyclic */
unsigned for_sync:1; /* sync(2) WB_SYNC_ALL writeback */
+#ifdef CONFIG_CGROUP_WRITEBACK
+ struct bdi_writeback *wb; /* wb this writeback is issued under */
+ struct inode *inode; /* inode being written out */
+
+ /* foreign inode detection, see wbc_detach_inode() */
+ int wb_id; /* current wb id */
+ int wb_lcand_id; /* last foreign candidate wb id */
+ int wb_tcand_id; /* this foreign candidate wb id */
+ size_t wb_bytes; /* bytes written by current wb */
+ size_t wb_lcand_bytes; /* bytes written by last candidate */
+ size_t wb_tcand_bytes; /* bytes written by this candidate */
+#endif
};
/*
+ * A wb_domain represents a domain that wb's (bdi_writeback's) belong to
+ * and are measured against each other in. There always is one global
+ * domain, global_wb_domain, that every wb in the system is a member of.
+ * This allows measuring the relative bandwidth of each wb to distribute
+ * dirtyable memory accordingly.
+ */
+struct wb_domain {
+ spinlock_t lock;
+
+ /*
+ * Scale the writeback cache size proportional to the relative
+ * writeout speed.
+ *
+ * We do this by keeping a floating proportion between BDIs, based
+ * on page writeback completions [end_page_writeback()]. Those
+ * devices that write out pages fastest will get the larger share,
+ * while the slower will get a smaller share.
+ *
+ * We use page writeout completions because we are interested in
+ * getting rid of dirty pages. Having them written out is the
+ * primary goal.
+ *
+ * We introduce a concept of time, a period over which we measure
+ * these events, because demand can/will vary over time. The length
+ * of this period itself is measured in page writeback completions.
+ */
+ struct fprop_global completions;
+ struct timer_list period_timer; /* timer for aging of completions */
+ unsigned long period_time;
+
+ /*
+ * The dirtyable memory and dirty threshold could be suddenly
+ * knocked down by a large amount (eg. on the startup of KVM in a
+ * swapless system). This may throw the system into deep dirty
+ * exceeded state and throttle heavy/light dirtiers alike. To
+ * retain good responsiveness, maintain global_dirty_limit for
+ * tracking slowly down to the knocked down dirty threshold.
+ *
+ * Both fields are protected by ->lock.
+ */
+ unsigned long dirty_limit_tstamp;
+ unsigned long dirty_limit;
+};
+
+/**
+ * wb_domain_size_changed - memory available to a wb_domain has changed
+ * @dom: wb_domain of interest
+ *
+ * This function should be called when the amount of memory available to
+ * @dom has changed. It resets @dom's dirty limit parameters to prevent
+ * the past values which don't match the current configuration from skewing
+ * dirty throttling. Without this, when memory size of a wb_domain is
+ * greatly reduced, the dirty throttling logic may allow too many pages to
+ * be dirtied leading to consecutive unnecessary OOMs and may get stuck in
+ * that situation.
+ */
+static inline void wb_domain_size_changed(struct wb_domain *dom)
+{
+ spin_lock(&dom->lock);
+ dom->dirty_limit_tstamp = jiffies;
+ dom->dirty_limit = 0;
+ spin_unlock(&dom->lock);
+}
+
+/*
* fs/fs-writeback.c
*/
struct bdi_writeback;
void writeback_inodes_sb(struct super_block *, enum wb_reason reason);
void writeback_inodes_sb_nr(struct super_block *, unsigned long nr,
enum wb_reason reason);
-int try_to_writeback_inodes_sb(struct super_block *, enum wb_reason reason);
-int try_to_writeback_inodes_sb_nr(struct super_block *, unsigned long nr,
- enum wb_reason reason);
+bool try_to_writeback_inodes_sb(struct super_block *, enum wb_reason reason);
+bool try_to_writeback_inodes_sb_nr(struct super_block *, unsigned long nr,
+ enum wb_reason reason);
void sync_inodes_sb(struct super_block *);
void wakeup_flusher_threads(long nr_pages, enum wb_reason reason);
void inode_wait_for_writeback(struct inode *inode);
@@ -107,6 +186,123 @@ static inline void wait_on_inode(struct inode *inode)
wait_on_bit(&inode->i_state, __I_NEW, TASK_UNINTERRUPTIBLE);
}
+#ifdef CONFIG_CGROUP_WRITEBACK
+
+#include <linux/cgroup.h>
+#include <linux/bio.h>
+
+void __inode_attach_wb(struct inode *inode, struct page *page);
+void wbc_attach_and_unlock_inode(struct writeback_control *wbc,
+ struct inode *inode)
+ __releases(&inode->i_lock);
+void wbc_detach_inode(struct writeback_control *wbc);
+void wbc_account_io(struct writeback_control *wbc, struct page *page,
+ size_t bytes);
+
+/**
+ * inode_attach_wb - associate an inode with its wb
+ * @inode: inode of interest
+ * @page: page being dirtied (may be NULL)
+ *
+ * If @inode doesn't have its wb, associate it with the wb matching the
+ * memcg of @page or, if @page is NULL, %current. May be called w/ or w/o
+ * @inode->i_lock.
+ */
+static inline void inode_attach_wb(struct inode *inode, struct page *page)
+{
+ if (!inode->i_wb)
+ __inode_attach_wb(inode, page);
+}
+
+/**
+ * inode_detach_wb - disassociate an inode from its wb
+ * @inode: inode of interest
+ *
+ * @inode is being freed. Detach from its wb.
+ */
+static inline void inode_detach_wb(struct inode *inode)
+{
+ if (inode->i_wb) {
+ wb_put(inode->i_wb);
+ inode->i_wb = NULL;
+ }
+}
+
+/**
+ * wbc_attach_fdatawrite_inode - associate wbc and inode for fdatawrite
+ * @wbc: writeback_control of interest
+ * @inode: target inode
+ *
+ * This function is to be used by __filemap_fdatawrite_range(), which is an
+ * alternative entry point into writeback code, and first ensures @inode is
+ * associated with a bdi_writeback and attaches it to @wbc.
+ */
+static inline void wbc_attach_fdatawrite_inode(struct writeback_control *wbc,
+ struct inode *inode)
+{
+ spin_lock(&inode->i_lock);
+ inode_attach_wb(inode, NULL);
+ wbc_attach_and_unlock_inode(wbc, inode);
+}
+
+/**
+ * wbc_init_bio - writeback specific initializtion of bio
+ * @wbc: writeback_control for the writeback in progress
+ * @bio: bio to be initialized
+ *
+ * @bio is a part of the writeback in progress controlled by @wbc. Perform
+ * writeback specific initialization. This is used to apply the cgroup
+ * writeback context.
+ */
+static inline void wbc_init_bio(struct writeback_control *wbc, struct bio *bio)
+{
+ /*
+ * pageout() path doesn't attach @wbc to the inode being written
+ * out. This is intentional as we don't want the function to block
+ * behind a slow cgroup. Ultimately, we want pageout() to kick off
+ * regular writeback instead of writing things out itself.
+ */
+ if (wbc->wb)
+ bio_associate_blkcg(bio, wbc->wb->blkcg_css);
+}
+
+#else /* CONFIG_CGROUP_WRITEBACK */
+
+static inline void inode_attach_wb(struct inode *inode, struct page *page)
+{
+}
+
+static inline void inode_detach_wb(struct inode *inode)
+{
+}
+
+static inline void wbc_attach_and_unlock_inode(struct writeback_control *wbc,
+ struct inode *inode)
+ __releases(&inode->i_lock)
+{
+ spin_unlock(&inode->i_lock);
+}
+
+static inline void wbc_attach_fdatawrite_inode(struct writeback_control *wbc,
+ struct inode *inode)
+{
+}
+
+static inline void wbc_detach_inode(struct writeback_control *wbc)
+{
+}
+
+static inline void wbc_init_bio(struct writeback_control *wbc, struct bio *bio)
+{
+}
+
+static inline void wbc_account_io(struct writeback_control *wbc,
+ struct page *page, size_t bytes)
+{
+}
+
+#endif /* CONFIG_CGROUP_WRITEBACK */
+
/*
* mm/page-writeback.c
*/
@@ -120,8 +316,12 @@ static inline void laptop_sync_completion(void) { }
#endif
void throttle_vm_writeout(gfp_t gfp_mask);
bool zone_dirty_ok(struct zone *zone);
+int wb_domain_init(struct wb_domain *dom, gfp_t gfp);
+#ifdef CONFIG_CGROUP_WRITEBACK
+void wb_domain_exit(struct wb_domain *dom);
+#endif
-extern unsigned long global_dirty_limit;
+extern struct wb_domain global_wb_domain;
/* These are exported to sysctl. */
extern int dirty_background_ratio;
@@ -155,19 +355,12 @@ int dirty_writeback_centisecs_handler(struct ctl_table *, int,
void __user *, size_t *, loff_t *);
void global_dirty_limits(unsigned long *pbackground, unsigned long *pdirty);
-unsigned long bdi_dirty_limit(struct backing_dev_info *bdi,
- unsigned long dirty);
-
-void __bdi_update_bandwidth(struct backing_dev_info *bdi,
- unsigned long thresh,
- unsigned long bg_thresh,
- unsigned long dirty,
- unsigned long bdi_thresh,
- unsigned long bdi_dirty,
- unsigned long start_time);
+unsigned long wb_calc_thresh(struct bdi_writeback *wb, unsigned long thresh);
+void wb_update_bandwidth(struct bdi_writeback *wb, unsigned long start_time);
void page_writeback_init(void);
void balance_dirty_pages_ratelimited(struct address_space *mapping);
+bool wb_over_bg_thresh(struct bdi_writeback *wb);
typedef int (*writepage_t)(struct page *page, struct writeback_control *wbc,
void *data);
diff --git a/include/linux/zsmalloc.h b/include/linux/zsmalloc.h
index 3283c6a55425..1338190b5478 100644
--- a/include/linux/zsmalloc.h
+++ b/include/linux/zsmalloc.h
@@ -47,5 +47,6 @@ void *zs_map_object(struct zs_pool *pool, unsigned long handle,
void zs_unmap_object(struct zs_pool *pool, unsigned long handle);
unsigned long zs_get_total_pages(struct zs_pool *pool);
+unsigned long zs_compact(struct zs_pool *pool);
#endif
diff --git a/include/media/adv7604.h b/include/media/adv7604.h
index aa1c4477722d..9ecf353160c1 100644
--- a/include/media/adv7604.h
+++ b/include/media/adv7604.h
@@ -47,16 +47,16 @@ enum adv7604_bus_order {
};
/* Input Color Space (IO register 0x02, [7:4]) */
-enum adv7604_inp_color_space {
- ADV7604_INP_COLOR_SPACE_LIM_RGB = 0,
- ADV7604_INP_COLOR_SPACE_FULL_RGB = 1,
- ADV7604_INP_COLOR_SPACE_LIM_YCbCr_601 = 2,
- ADV7604_INP_COLOR_SPACE_LIM_YCbCr_709 = 3,
- ADV7604_INP_COLOR_SPACE_XVYCC_601 = 4,
- ADV7604_INP_COLOR_SPACE_XVYCC_709 = 5,
- ADV7604_INP_COLOR_SPACE_FULL_YCbCr_601 = 6,
- ADV7604_INP_COLOR_SPACE_FULL_YCbCr_709 = 7,
- ADV7604_INP_COLOR_SPACE_AUTO = 0xf,
+enum adv76xx_inp_color_space {
+ ADV76XX_INP_COLOR_SPACE_LIM_RGB = 0,
+ ADV76XX_INP_COLOR_SPACE_FULL_RGB = 1,
+ ADV76XX_INP_COLOR_SPACE_LIM_YCbCr_601 = 2,
+ ADV76XX_INP_COLOR_SPACE_LIM_YCbCr_709 = 3,
+ ADV76XX_INP_COLOR_SPACE_XVYCC_601 = 4,
+ ADV76XX_INP_COLOR_SPACE_XVYCC_709 = 5,
+ ADV76XX_INP_COLOR_SPACE_FULL_YCbCr_601 = 6,
+ ADV76XX_INP_COLOR_SPACE_FULL_YCbCr_709 = 7,
+ ADV76XX_INP_COLOR_SPACE_AUTO = 0xf,
};
/* Select output format (IO register 0x03, [4:2]) */
@@ -66,38 +66,39 @@ enum adv7604_op_format_mode_sel {
ADV7604_OP_FORMAT_MODE2 = 0x08,
};
-enum adv7604_drive_strength {
- ADV7604_DR_STR_MEDIUM_LOW = 1,
- ADV7604_DR_STR_MEDIUM_HIGH = 2,
- ADV7604_DR_STR_HIGH = 3,
+enum adv76xx_drive_strength {
+ ADV76XX_DR_STR_MEDIUM_LOW = 1,
+ ADV76XX_DR_STR_MEDIUM_HIGH = 2,
+ ADV76XX_DR_STR_HIGH = 3,
};
-enum adv7604_int1_config {
- ADV7604_INT1_CONFIG_OPEN_DRAIN,
- ADV7604_INT1_CONFIG_ACTIVE_LOW,
- ADV7604_INT1_CONFIG_ACTIVE_HIGH,
- ADV7604_INT1_CONFIG_DISABLED,
+/* INT1 Configuration (IO register 0x40, [1:0]) */
+enum adv76xx_int1_config {
+ ADV76XX_INT1_CONFIG_OPEN_DRAIN,
+ ADV76XX_INT1_CONFIG_ACTIVE_LOW,
+ ADV76XX_INT1_CONFIG_ACTIVE_HIGH,
+ ADV76XX_INT1_CONFIG_DISABLED,
};
-enum adv7604_page {
- ADV7604_PAGE_IO,
+enum adv76xx_page {
+ ADV76XX_PAGE_IO,
ADV7604_PAGE_AVLINK,
- ADV7604_PAGE_CEC,
- ADV7604_PAGE_INFOFRAME,
+ ADV76XX_PAGE_CEC,
+ ADV76XX_PAGE_INFOFRAME,
ADV7604_PAGE_ESDP,
ADV7604_PAGE_DPP,
- ADV7604_PAGE_AFE,
- ADV7604_PAGE_REP,
- ADV7604_PAGE_EDID,
- ADV7604_PAGE_HDMI,
- ADV7604_PAGE_TEST,
- ADV7604_PAGE_CP,
+ ADV76XX_PAGE_AFE,
+ ADV76XX_PAGE_REP,
+ ADV76XX_PAGE_EDID,
+ ADV76XX_PAGE_HDMI,
+ ADV76XX_PAGE_TEST,
+ ADV76XX_PAGE_CP,
ADV7604_PAGE_VDP,
- ADV7604_PAGE_MAX,
+ ADV76XX_PAGE_MAX,
};
/* Platform dependent definition */
-struct adv7604_platform_data {
+struct adv76xx_platform_data {
/* DIS_PWRDNB: 1 if the PWRDNB pin is unused and unconnected */
unsigned disable_pwrdnb:1;
@@ -116,7 +117,7 @@ struct adv7604_platform_data {
enum adv7604_op_format_mode_sel op_format_mode_sel;
/* Configuration of the INT1 pin */
- enum adv7604_int1_config int1_config;
+ enum adv76xx_int1_config int1_config;
/* IO register 0x02 */
unsigned alt_gamma:1;
@@ -134,9 +135,9 @@ struct adv7604_platform_data {
unsigned inv_llc_pol:1;
/* IO register 0x14 */
- enum adv7604_drive_strength dr_str_data;
- enum adv7604_drive_strength dr_str_clk;
- enum adv7604_drive_strength dr_str_sync;
+ enum adv76xx_drive_strength dr_str_data;
+ enum adv76xx_drive_strength dr_str_clk;
+ enum adv76xx_drive_strength dr_str_sync;
/* IO register 0x30 */
unsigned output_bus_lsb_to_msb:1;
@@ -145,11 +146,11 @@ struct adv7604_platform_data {
unsigned hdmi_free_run_mode;
/* i2c addresses: 0 == use default */
- u8 i2c_addresses[ADV7604_PAGE_MAX];
+ u8 i2c_addresses[ADV76XX_PAGE_MAX];
};
-enum adv7604_pad {
- ADV7604_PAD_HDMI_PORT_A = 0,
+enum adv76xx_pad {
+ ADV76XX_PAD_HDMI_PORT_A = 0,
ADV7604_PAD_HDMI_PORT_B = 1,
ADV7604_PAD_HDMI_PORT_C = 2,
ADV7604_PAD_HDMI_PORT_D = 3,
@@ -158,7 +159,7 @@ enum adv7604_pad {
/* The source pad is either 1 (ADV7611) or 6 (ADV7604) */
ADV7604_PAD_SOURCE = 6,
ADV7611_PAD_SOURCE = 1,
- ADV7604_PAD_MAX = 7,
+ ADV76XX_PAD_MAX = 7,
};
#define V4L2_CID_ADV_RX_ANALOG_SAMPLING_PHASE (V4L2_CID_DV_CLASS_BASE + 0x1000)
@@ -166,7 +167,7 @@ enum adv7604_pad {
#define V4L2_CID_ADV_RX_FREE_RUN_COLOR (V4L2_CID_DV_CLASS_BASE + 0x1002)
/* notify events */
-#define ADV7604_HOTPLUG 1
-#define ADV7604_FMT_CHANGE 2
+#define ADV76XX_HOTPLUG 1
+#define ADV76XX_FMT_CHANGE 2
#endif
diff --git a/include/media/davinci/vpfe_capture.h b/include/media/davinci/vpfe_capture.h
index 288772e6900a..28bcd71cdd26 100644
--- a/include/media/davinci/vpfe_capture.h
+++ b/include/media/davinci/vpfe_capture.h
@@ -102,7 +102,7 @@ struct vpfe_config {
struct vpfe_device {
/* V4l2 specific parameters */
/* Identifies video device for this channel */
- struct video_device *video_dev;
+ struct video_device video_dev;
/* sub devices */
struct v4l2_subdev **sd;
/* vpfe cfg */
diff --git a/include/media/media-entity.h b/include/media/media-entity.h
index e00459185d20..0c003d817493 100644
--- a/include/media/media-entity.h
+++ b/include/media/media-entity.h
@@ -44,6 +44,15 @@ struct media_pad {
unsigned long flags; /* Pad flags (MEDIA_PAD_FL_*) */
};
+/**
+ * struct media_entity_operations - Media entity operations
+ * @link_setup: Notify the entity of link changes. The operation can
+ * return an error, in which case link setup will be
+ * cancelled. Optional.
+ * @link_validate: Return whether a link is valid from the entity point of
+ * view. The media_entity_pipeline_start() function
+ * validates all links by calling this operation. Optional.
+ */
struct media_entity_operations {
int (*link_setup)(struct media_entity *entity,
const struct media_pad *local,
@@ -87,17 +96,7 @@ struct media_entity {
struct {
u32 major;
u32 minor;
- } v4l;
- struct {
- u32 major;
- u32 minor;
- } fb;
- struct {
- u32 card;
- u32 device;
- u32 subdevice;
- } alsa;
- int dvb;
+ } dev;
/* Sub-device specifications */
/* Nothing needed yet */
diff --git a/include/media/mt9p031.h b/include/media/mt9p031.h
index b1e63f2b72bd..1ba361205af1 100644
--- a/include/media/mt9p031.h
+++ b/include/media/mt9p031.h
@@ -5,12 +5,10 @@ struct v4l2_subdev;
/*
* struct mt9p031_platform_data - MT9P031 platform data
- * @reset: Chip reset GPIO (set to -1 if not used)
* @ext_freq: Input clock frequency
* @target_freq: Pixel clock frequency
*/
struct mt9p031_platform_data {
- int reset;
int ext_freq;
int target_freq;
};
diff --git a/include/media/omap3isp.h b/include/media/omap3isp.h
index 398279dd1922..048f8f9117ef 100644
--- a/include/media/omap3isp.h
+++ b/include/media/omap3isp.h
@@ -45,7 +45,7 @@ enum {
};
/**
- * struct isp_parallel_platform_data - Parallel interface platform data
+ * struct isp_parallel_cfg - Parallel interface configuration
* @data_lane_shift: Data lane shifter
* ISP_LANE_SHIFT_0 - CAMEXT[13:0] -> CAM[13:0]
* ISP_LANE_SHIFT_2 - CAMEXT[13:2] -> CAM[11:0]
@@ -62,7 +62,7 @@ enum {
* @data_pol: Data polarity
* 0 - Normal, 1 - One's complement
*/
-struct isp_parallel_platform_data {
+struct isp_parallel_cfg {
unsigned int data_lane_shift:2;
unsigned int clk_pol:1;
unsigned int hs_pol:1;
@@ -105,7 +105,7 @@ struct isp_csiphy_lanes_cfg {
};
/**
- * struct isp_ccp2_platform_data - CCP2 interface platform data
+ * struct isp_ccp2_cfg - CCP2 interface configuration
* @strobe_clk_pol: Strobe/clock polarity
* 0 - Non Inverted, 1 - Inverted
* @crc: Enable the cyclic redundancy check
@@ -117,7 +117,7 @@ struct isp_csiphy_lanes_cfg {
* ISP_CCP2_PHY_DATA_STROBE - Data/strobe physical layer
* @vpclk_div: Video port output clock control
*/
-struct isp_ccp2_platform_data {
+struct isp_ccp2_cfg {
unsigned int strobe_clk_pol:1;
unsigned int crc:1;
unsigned int ccp2_mode:1;
@@ -127,39 +127,31 @@ struct isp_ccp2_platform_data {
};
/**
- * struct isp_csi2_platform_data - CSI2 interface platform data
+ * struct isp_csi2_cfg - CSI2 interface configuration
* @crc: Enable the cyclic redundancy check
- * @vpclk_div: Video port output clock control
*/
-struct isp_csi2_platform_data {
+struct isp_csi2_cfg {
unsigned crc:1;
- unsigned vpclk_div:2;
struct isp_csiphy_lanes_cfg lanecfg;
};
-struct isp_subdev_i2c_board_info {
- struct i2c_board_info *board_info;
- int i2c_adapter_id;
-};
-
-struct isp_v4l2_subdevs_group {
- struct isp_subdev_i2c_board_info *subdevs;
+struct isp_bus_cfg {
enum isp_interface_type interface;
union {
- struct isp_parallel_platform_data parallel;
- struct isp_ccp2_platform_data ccp2;
- struct isp_csi2_platform_data csi2;
+ struct isp_parallel_cfg parallel;
+ struct isp_ccp2_cfg ccp2;
+ struct isp_csi2_cfg csi2;
} bus; /* gcc < 4.6.0 chokes on anonymous union initializers */
};
-struct isp_platform_xclk {
- const char *dev_id;
- const char *con_id;
+struct isp_platform_subdev {
+ struct i2c_board_info *board_info;
+ int i2c_adapter_id;
+ struct isp_bus_cfg *bus;
};
struct isp_platform_data {
- struct isp_platform_xclk xclks[2];
- struct isp_v4l2_subdevs_group *subdevs;
+ struct isp_platform_subdev *subdevs;
void (*set_constraints)(struct isp_device *isp, bool enable);
};
diff --git a/include/media/ov2659.h b/include/media/ov2659.h
new file mode 100644
index 000000000000..4216adc1ede2
--- /dev/null
+++ b/include/media/ov2659.h
@@ -0,0 +1,34 @@
+/*
+ * Omnivision OV2659 CMOS Image Sensor driver
+ *
+ * Copyright (C) 2015 Texas Instruments, Inc.
+ *
+ * Benoit Parrot <bparrot@ti.com>
+ * Lad, Prabhakar <prabhakar.csengg@gmail.com>
+ *
+ * This program is free software; you may redistribute it and/or modify
+ * it under the terms of the GNU General Public License as published by
+ * the Free Software Foundation; version 2 of the License.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND,
+ * EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
+ * MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND
+ * NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS
+ * BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN
+ * ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN
+ * CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
+ * SOFTWARE.
+ */
+
+#ifndef OV2659_H
+#define OV2659_H
+
+/**
+ * struct ov2659_platform_data - ov2659 driver platform data
+ * @link_frequency: target pixel clock frequency
+ */
+struct ov2659_platform_data {
+ s64 link_frequency;
+};
+
+#endif /* OV2659_H */
diff --git a/include/media/saa7146_vv.h b/include/media/saa7146_vv.h
index 944ecdf3530f..92766f77a5de 100644
--- a/include/media/saa7146_vv.h
+++ b/include/media/saa7146_vv.h
@@ -178,8 +178,8 @@ struct saa7146_use_ops {
};
/* from saa7146_fops.c */
-int saa7146_register_device(struct video_device **vid, struct saa7146_dev* dev, char *name, int type);
-int saa7146_unregister_device(struct video_device **vid, struct saa7146_dev* dev);
+int saa7146_register_device(struct video_device *vid, struct saa7146_dev *dev, char *name, int type);
+int saa7146_unregister_device(struct video_device *vid, struct saa7146_dev *dev);
void saa7146_buffer_finish(struct saa7146_dev *dev, struct saa7146_dmaqueue *q, int state);
void saa7146_buffer_next(struct saa7146_dev *dev, struct saa7146_dmaqueue *q,int vbi);
int saa7146_buffer_queue(struct saa7146_dev *dev, struct saa7146_dmaqueue *q, struct saa7146_buf *buf);
diff --git a/include/media/v4l2-clk.h b/include/media/v4l2-clk.h
index 0b36cc138304..3ef6e3d5ed6c 100644
--- a/include/media/v4l2-clk.h
+++ b/include/media/v4l2-clk.h
@@ -22,14 +22,15 @@
struct module;
struct device;
+struct clk;
struct v4l2_clk {
struct list_head list;
const struct v4l2_clk_ops *ops;
const char *dev_id;
- const char *id;
int enable;
struct mutex lock; /* Protect the enable count */
atomic_t use_count;
+ struct clk *clk;
void *priv;
};
@@ -43,7 +44,7 @@ struct v4l2_clk_ops {
struct v4l2_clk *v4l2_clk_register(const struct v4l2_clk_ops *ops,
const char *dev_name,
- const char *name, void *priv);
+ void *priv);
void v4l2_clk_unregister(struct v4l2_clk *clk);
struct v4l2_clk *v4l2_clk_get(struct device *dev, const char *id);
void v4l2_clk_put(struct v4l2_clk *clk);
@@ -55,14 +56,13 @@ int v4l2_clk_set_rate(struct v4l2_clk *clk, unsigned long rate);
struct module;
struct v4l2_clk *__v4l2_clk_register_fixed(const char *dev_id,
- const char *id, unsigned long rate, struct module *owner);
+ unsigned long rate, struct module *owner);
void v4l2_clk_unregister_fixed(struct v4l2_clk *clk);
static inline struct v4l2_clk *v4l2_clk_register_fixed(const char *dev_id,
- const char *id,
unsigned long rate)
{
- return __v4l2_clk_register_fixed(dev_id, id, rate, THIS_MODULE);
+ return __v4l2_clk_register_fixed(dev_id, rate, THIS_MODULE);
}
#define v4l2_clk_name_i2c(name, size, adap, client) snprintf(name, size, \
diff --git a/include/media/v4l2-dev.h b/include/media/v4l2-dev.h
index 3e4fddfc840c..acbcd2f5fe7f 100644
--- a/include/media/v4l2-dev.h
+++ b/include/media/v4l2-dev.h
@@ -65,7 +65,6 @@ struct v4l2_file_operations {
ssize_t (*read) (struct file *, char __user *, size_t, loff_t *);
ssize_t (*write) (struct file *, const char __user *, size_t, loff_t *);
unsigned int (*poll) (struct file *, struct poll_table_struct *);
- long (*ioctl) (struct file *, unsigned int, unsigned long);
long (*unlocked_ioctl) (struct file *, unsigned int, unsigned long);
#ifdef CONFIG_COMPAT
long (*compat_ioctl32) (struct file *, unsigned int, unsigned long);
diff --git a/include/media/v4l2-device.h b/include/media/v4l2-device.h
index ffb69da3ce9e..9c581578783f 100644
--- a/include/media/v4l2-device.h
+++ b/include/media/v4l2-device.h
@@ -58,8 +58,6 @@ struct v4l2_device {
struct v4l2_ctrl_handler *ctrl_handler;
/* Device's priority state */
struct v4l2_prio_state prio;
- /* BKL replacement mutex. Temporary solution only. */
- struct mutex ioctl_lock;
/* Keep track of the references to this struct. */
struct kref ref;
/* Release function that is called when the ref count goes to 0. */
diff --git a/include/media/v4l2-ioctl.h b/include/media/v4l2-ioctl.h
index 8537983b9b22..8fbbd76d78e8 100644
--- a/include/media/v4l2-ioctl.h
+++ b/include/media/v4l2-ioctl.h
@@ -23,12 +23,6 @@ struct v4l2_ioctl_ops {
/* VIDIOC_QUERYCAP handler */
int (*vidioc_querycap)(struct file *file, void *fh, struct v4l2_capability *cap);
- /* Priority handling */
- int (*vidioc_g_priority) (struct file *file, void *fh,
- enum v4l2_priority *p);
- int (*vidioc_s_priority) (struct file *file, void *fh,
- enum v4l2_priority p);
-
/* VIDIOC_ENUM_FMT handlers */
int (*vidioc_enum_fmt_vid_cap) (struct file *file, void *fh,
struct v4l2_fmtdesc *f);
diff --git a/include/media/v4l2-of.h b/include/media/v4l2-of.h
index 70fa7b7b0487..f831c9c225b6 100644
--- a/include/media/v4l2-of.h
+++ b/include/media/v4l2-of.h
@@ -29,12 +29,15 @@ struct device_node;
* @data_lanes: an array of physical data lane indexes
* @clock_lane: physical lane index of the clock lane
* @num_data_lanes: number of data lanes
+ * @lane_polarities: polarity of the lanes. The order is the same of
+ * the physical lanes.
*/
struct v4l2_of_bus_mipi_csi2 {
unsigned int flags;
unsigned char data_lanes[4];
unsigned char clock_lane;
unsigned short num_data_lanes;
+ bool lane_polarities[5];
};
/**
@@ -66,9 +69,26 @@ struct v4l2_of_endpoint {
struct list_head head;
};
+/**
+ * struct v4l2_of_link - a link between two endpoints
+ * @local_node: pointer to device_node of this endpoint
+ * @local_port: identifier of the port this endpoint belongs to
+ * @remote_node: pointer to device_node of the remote endpoint
+ * @remote_port: identifier of the port the remote endpoint belongs to
+ */
+struct v4l2_of_link {
+ struct device_node *local_node;
+ unsigned int local_port;
+ struct device_node *remote_node;
+ unsigned int remote_port;
+};
+
#ifdef CONFIG_OF
int v4l2_of_parse_endpoint(const struct device_node *node,
struct v4l2_of_endpoint *endpoint);
+int v4l2_of_parse_link(const struct device_node *node,
+ struct v4l2_of_link *link);
+void v4l2_of_put_link(struct v4l2_of_link *link);
#else /* CONFIG_OF */
static inline int v4l2_of_parse_endpoint(const struct device_node *node,
@@ -77,6 +97,16 @@ static inline int v4l2_of_parse_endpoint(const struct device_node *node,
return -ENOSYS;
}
+static inline int v4l2_of_parse_link(const struct device_node *node,
+ struct v4l2_of_link *link)
+{
+ return -ENOSYS;
+}
+
+static inline void v4l2_of_put_link(struct v4l2_of_link *link)
+{
+}
+
#endif /* CONFIG_OF */
#endif /* _V4L2_OF_H */
diff --git a/include/media/v4l2-subdev.h b/include/media/v4l2-subdev.h
index 5beeb8744fd1..2f0a345a7fed 100644
--- a/include/media/v4l2-subdev.h
+++ b/include/media/v4l2-subdev.h
@@ -332,8 +332,6 @@ struct v4l2_subdev_video_ops {
struct v4l2_subdev_frame_interval *interval);
int (*s_frame_interval)(struct v4l2_subdev *sd,
struct v4l2_subdev_frame_interval *interval);
- int (*enum_framesizes)(struct v4l2_subdev *sd, struct v4l2_frmsizeenum *fsize);
- int (*enum_frameintervals)(struct v4l2_subdev *sd, struct v4l2_frmivalenum *fival);
int (*s_dv_timings)(struct v4l2_subdev *sd,
struct v4l2_dv_timings *timings);
int (*g_dv_timings)(struct v4l2_subdev *sd,
@@ -482,6 +480,18 @@ struct v4l2_subdev_ir_ops {
struct v4l2_subdev_ir_parameters *params);
};
+/*
+ * Used for storing subdev pad information. This structure only needs
+ * to be passed to the pad op if the 'which' field of the main argument
+ * is set to V4L2_SUBDEV_FORMAT_TRY. For V4L2_SUBDEV_FORMAT_ACTIVE it is
+ * safe to pass NULL.
+ */
+struct v4l2_subdev_pad_config {
+ struct v4l2_mbus_framefmt try_fmt;
+ struct v4l2_rect try_crop;
+ struct v4l2_rect try_compose;
+};
+
/**
* struct v4l2_subdev_pad_ops - v4l2-subdev pad level operations
* @get_frame_desc: get the current low level media bus frame parameters.
@@ -489,21 +499,26 @@ struct v4l2_subdev_ir_ops {
* may be adjusted by the subdev driver to device capabilities.
*/
struct v4l2_subdev_pad_ops {
- int (*enum_mbus_code)(struct v4l2_subdev *sd, struct v4l2_subdev_fh *fh,
+ int (*enum_mbus_code)(struct v4l2_subdev *sd,
+ struct v4l2_subdev_pad_config *cfg,
struct v4l2_subdev_mbus_code_enum *code);
int (*enum_frame_size)(struct v4l2_subdev *sd,
- struct v4l2_subdev_fh *fh,
+ struct v4l2_subdev_pad_config *cfg,
struct v4l2_subdev_frame_size_enum *fse);
int (*enum_frame_interval)(struct v4l2_subdev *sd,
- struct v4l2_subdev_fh *fh,
+ struct v4l2_subdev_pad_config *cfg,
struct v4l2_subdev_frame_interval_enum *fie);
- int (*get_fmt)(struct v4l2_subdev *sd, struct v4l2_subdev_fh *fh,
+ int (*get_fmt)(struct v4l2_subdev *sd,
+ struct v4l2_subdev_pad_config *cfg,
struct v4l2_subdev_format *format);
- int (*set_fmt)(struct v4l2_subdev *sd, struct v4l2_subdev_fh *fh,
+ int (*set_fmt)(struct v4l2_subdev *sd,
+ struct v4l2_subdev_pad_config *cfg,
struct v4l2_subdev_format *format);
- int (*get_selection)(struct v4l2_subdev *sd, struct v4l2_subdev_fh *fh,
+ int (*get_selection)(struct v4l2_subdev *sd,
+ struct v4l2_subdev_pad_config *cfg,
struct v4l2_subdev_selection *sel);
- int (*set_selection)(struct v4l2_subdev *sd, struct v4l2_subdev_fh *fh,
+ int (*set_selection)(struct v4l2_subdev *sd,
+ struct v4l2_subdev_pad_config *cfg,
struct v4l2_subdev_selection *sel);
int (*get_edid)(struct v4l2_subdev *sd, struct v4l2_edid *edid);
int (*set_edid)(struct v4l2_subdev *sd, struct v4l2_edid *edid);
@@ -625,11 +640,7 @@ struct v4l2_subdev {
struct v4l2_subdev_fh {
struct v4l2_fh vfh;
#if defined(CONFIG_VIDEO_V4L2_SUBDEV_API)
- struct {
- struct v4l2_mbus_framefmt try_fmt;
- struct v4l2_rect try_crop;
- struct v4l2_rect try_compose;
- } *pad;
+ struct v4l2_subdev_pad_config *pad;
#endif
};
@@ -639,17 +650,17 @@ struct v4l2_subdev_fh {
#if defined(CONFIG_VIDEO_V4L2_SUBDEV_API)
#define __V4L2_SUBDEV_MK_GET_TRY(rtype, fun_name, field_name) \
static inline struct rtype * \
- v4l2_subdev_get_try_##fun_name(struct v4l2_subdev_fh *fh, \
- unsigned int pad) \
+ fun_name(struct v4l2_subdev *sd, \
+ struct v4l2_subdev_pad_config *cfg, \
+ unsigned int pad) \
{ \
- BUG_ON(pad >= vdev_to_v4l2_subdev( \
- fh->vfh.vdev)->entity.num_pads); \
- return &fh->pad[pad].field_name; \
+ BUG_ON(pad >= sd->entity.num_pads); \
+ return &cfg[pad].field_name; \
}
-__V4L2_SUBDEV_MK_GET_TRY(v4l2_mbus_framefmt, format, try_fmt)
-__V4L2_SUBDEV_MK_GET_TRY(v4l2_rect, crop, try_crop)
-__V4L2_SUBDEV_MK_GET_TRY(v4l2_rect, compose, try_compose)
+__V4L2_SUBDEV_MK_GET_TRY(v4l2_mbus_framefmt, v4l2_subdev_get_try_format, try_fmt)
+__V4L2_SUBDEV_MK_GET_TRY(v4l2_rect, v4l2_subdev_get_try_crop, try_crop)
+__V4L2_SUBDEV_MK_GET_TRY(v4l2_rect, v4l2_subdev_get_try_compose, try_compose)
#endif
extern const struct v4l2_file_operations v4l2_subdev_fops;
diff --git a/include/media/videobuf2-core.h b/include/media/videobuf2-core.h
index bd2cec2d6c3d..a5790fd5d125 100644
--- a/include/media/videobuf2-core.h
+++ b/include/media/videobuf2-core.h
@@ -134,17 +134,6 @@ enum vb2_io_modes {
};
/**
- * enum vb2_fileio_flags - flags for selecting a mode of the file io emulator,
- * by default the 'streaming' style is used by the file io emulator
- * @VB2_FILEIO_READ_ONCE: report EOF after reading the first buffer
- * @VB2_FILEIO_WRITE_IMMEDIATELY: queue buffer after each write() call
- */
-enum vb2_fileio_flags {
- VB2_FILEIO_READ_ONCE = (1 << 0),
- VB2_FILEIO_WRITE_IMMEDIATELY = (1 << 1),
-};
-
-/**
* enum vb2_buffer_state - current video buffer state
* @VB2_BUF_STATE_DEQUEUED: buffer under userspace control
* @VB2_BUF_STATE_PREPARING: buffer is being prepared in videobuf
@@ -346,7 +335,9 @@ struct v4l2_fh;
*
* @type: queue type (see V4L2_BUF_TYPE_* in linux/videodev2.h
* @io_modes: supported io methods (see vb2_io_modes enum)
- * @io_flags: additional io flags (see vb2_fileio_flags enum)
+ * @fileio_read_once: report EOF after reading the first buffer
+ * @fileio_write_immediately: queue buffer after each write() call
+ * @allow_zero_bytesused: allow bytesused == 0 to be passed to the driver
* @lock: pointer to a mutex that protects the vb2_queue struct. The
* driver can set this to a mutex to let the v4l2 core serialize
* the queuing ioctls. If the driver wants to handle locking
@@ -396,7 +387,10 @@ struct v4l2_fh;
struct vb2_queue {
enum v4l2_buf_type type;
unsigned int io_modes;
- unsigned int io_flags;
+ unsigned fileio_read_once:1;
+ unsigned fileio_write_immediately:1;
+ unsigned allow_zero_bytesused:1;
+
struct mutex *lock;
struct v4l2_fh *owner;
diff --git a/include/misc/cxl-base.h b/include/misc/cxl-base.h
new file mode 100644
index 000000000000..5ae962512fb8
--- /dev/null
+++ b/include/misc/cxl-base.h
@@ -0,0 +1,48 @@
+/*
+ * Copyright 2014 IBM Corp.
+ *
+ * This program is free software; you can redistribute it and/or
+ * modify it under the terms of the GNU General Public License
+ * as published by the Free Software Foundation; either version
+ * 2 of the License, or (at your option) any later version.
+ */
+
+#ifndef _MISC_CXL_BASE_H
+#define _MISC_CXL_BASE_H
+
+#ifdef CONFIG_CXL_BASE
+
+#define CXL_IRQ_RANGES 4
+
+struct cxl_irq_ranges {
+ irq_hw_number_t offset[CXL_IRQ_RANGES];
+ irq_hw_number_t range[CXL_IRQ_RANGES];
+};
+
+extern atomic_t cxl_use_count;
+
+static inline bool cxl_ctx_in_use(void)
+{
+ return (atomic_read(&cxl_use_count) != 0);
+}
+
+static inline void cxl_ctx_get(void)
+{
+ atomic_inc(&cxl_use_count);
+}
+
+static inline void cxl_ctx_put(void)
+{
+ atomic_dec(&cxl_use_count);
+}
+
+void cxl_slbia(struct mm_struct *mm);
+
+#else /* CONFIG_CXL_BASE */
+
+static inline bool cxl_ctx_in_use(void) { return false; }
+static inline void cxl_slbia(struct mm_struct *mm) {}
+
+#endif /* CONFIG_CXL_BASE */
+
+#endif
diff --git a/include/misc/cxl.h b/include/misc/cxl.h
index 975cc7861f18..7a6c1d6cc173 100644
--- a/include/misc/cxl.h
+++ b/include/misc/cxl.h
@@ -1,5 +1,5 @@
/*
- * Copyright 2014 IBM Corp.
+ * Copyright 2015 IBM Corp.
*
* This program is free software; you can redistribute it and/or
* modify it under the terms of the GNU General Public License
@@ -10,39 +10,194 @@
#ifndef _MISC_CXL_H
#define _MISC_CXL_H
-#ifdef CONFIG_CXL_BASE
+#include <linux/pci.h>
+#include <linux/poll.h>
+#include <linux/interrupt.h>
+#include <uapi/misc/cxl.h>
-#define CXL_IRQ_RANGES 4
+/*
+ * This documents the in kernel API for driver to use CXL. It allows kernel
+ * drivers to bind to AFUs using an AFU configuration record exposed as a PCI
+ * configuration record.
+ *
+ * This API enables control over AFU and contexts which can't be part of the
+ * generic PCI API. This API is agnostic to the actual AFU.
+ */
+
+/* Get the AFU associated with a pci_dev */
+struct cxl_afu *cxl_pci_to_afu(struct pci_dev *dev);
+
+/* Get the AFU conf record number associated with a pci_dev */
+unsigned int cxl_pci_to_cfg_record(struct pci_dev *dev);
+
+/* Get the physical device (ie. the PCIe card) which the AFU is attached */
+struct device *cxl_get_phys_dev(struct pci_dev *dev);
+
+
+/*
+ * Context lifetime overview:
+ *
+ * An AFU context may be inited and then started and stoppped multiple times
+ * before it's released. ie.
+ * - cxl_dev_context_init()
+ * - cxl_start_context()
+ * - cxl_stop_context()
+ * - cxl_start_context()
+ * - cxl_stop_context()
+ * ...repeat...
+ * - cxl_release_context()
+ * Once released, a context can't be started again.
+ *
+ * One context is inited by the cxl driver for every pci_dev. This is to be
+ * used as a default kernel context. cxl_get_context() will get this
+ * context. This context will be released by PCI hot unplug, so doesn't need to
+ * be released explicitly by drivers.
+ *
+ * Additional kernel contexts may be inited using cxl_dev_context_init().
+ * These must be released using cxl_context_detach().
+ *
+ * Once a context has been inited, IRQs may be configured. Firstly these IRQs
+ * must be allocated (cxl_allocate_afu_irqs()), then individually mapped to
+ * specific handlers (cxl_map_afu_irq()).
+ *
+ * These IRQs can be unmapped (cxl_unmap_afu_irq()) and finally released
+ * (cxl_free_afu_irqs()).
+ *
+ * The AFU can be reset (cxl_afu_reset()). This will cause the PSL/AFU
+ * hardware to lose track of all contexts. It's upto the caller of
+ * cxl_afu_reset() to restart these contexts.
+ */
+
+/*
+ * On pci_enabled_device(), the cxl driver will init a single cxl context for
+ * use by the driver. It doesn't start this context (as that will likely
+ * generate DMA traffic for most AFUs).
+ *
+ * This gets the default context associated with this pci_dev. This context
+ * doesn't need to be released as this will be done by the PCI subsystem on hot
+ * unplug.
+ */
+struct cxl_context *cxl_get_context(struct pci_dev *dev);
+/*
+ * Allocate and initalise a context associated with a AFU PCI device. This
+ * doesn't start the context in the AFU.
+ */
+struct cxl_context *cxl_dev_context_init(struct pci_dev *dev);
+/*
+ * Release and free a context. Context should be stopped before calling.
+ */
+int cxl_release_context(struct cxl_context *ctx);
-struct cxl_irq_ranges {
- irq_hw_number_t offset[CXL_IRQ_RANGES];
- irq_hw_number_t range[CXL_IRQ_RANGES];
-};
+/*
+ * Allocate AFU interrupts for this context. num=0 will allocate the default
+ * for this AFU as given in the AFU descriptor. This number doesn't include the
+ * interrupt 0 (CAIA defines AFU IRQ 0 for page faults). Each interrupt to be
+ * used must map a handler with cxl_map_afu_irq.
+ */
+int cxl_allocate_afu_irqs(struct cxl_context *cxl, int num);
+/* Free allocated interrupts */
+void cxl_free_afu_irqs(struct cxl_context *cxl);
+
+/*
+ * Map a handler for an AFU interrupt associated with a particular context. AFU
+ * IRQS numbers start from 1 (CAIA defines AFU IRQ 0 for page faults). cookie
+ * is private data is that will be provided to the interrupt handler.
+ */
+int cxl_map_afu_irq(struct cxl_context *cxl, int num,
+ irq_handler_t handler, void *cookie, char *name);
+/* unmap mapped IRQ handlers */
+void cxl_unmap_afu_irq(struct cxl_context *cxl, int num, void *cookie);
-extern atomic_t cxl_use_count;
+/*
+ * Start work on the AFU. This starts an cxl context and associates it with a
+ * task. task == NULL will make it a kernel context.
+ */
+int cxl_start_context(struct cxl_context *ctx, u64 wed,
+ struct task_struct *task);
+/*
+ * Stop a context and remove it from the PSL
+ */
+int cxl_stop_context(struct cxl_context *ctx);
-static inline bool cxl_ctx_in_use(void)
-{
- return (atomic_read(&cxl_use_count) != 0);
-}
+/* Reset the AFU */
+int cxl_afu_reset(struct cxl_context *ctx);
-static inline void cxl_ctx_get(void)
-{
- atomic_inc(&cxl_use_count);
-}
+/*
+ * Set a context as a master context.
+ * This sets the default problem space area mapped as the full space, rather
+ * than just the per context area (for slaves).
+ */
+void cxl_set_master(struct cxl_context *ctx);
-static inline void cxl_ctx_put(void)
-{
- atomic_dec(&cxl_use_count);
-}
+/*
+ * Map and unmap the AFU Problem Space area. The amount and location mapped
+ * depends on if this context is a master or slave.
+ */
+void __iomem *cxl_psa_map(struct cxl_context *ctx);
+void cxl_psa_unmap(void __iomem *addr);
-void cxl_slbia(struct mm_struct *mm);
+/* Get the process element for this context */
+int cxl_process_element(struct cxl_context *ctx);
-#else /* CONFIG_CXL_BASE */
-static inline bool cxl_ctx_in_use(void) { return false; }
-static inline void cxl_slbia(struct mm_struct *mm) {}
+/*
+ * These calls allow drivers to create their own file descriptors and make them
+ * identical to the cxl file descriptor user API. An example use case:
+ *
+ * struct file_operations cxl_my_fops = {};
+ * ......
+ * // Init the context
+ * ctx = cxl_dev_context_init(dev);
+ * if (IS_ERR(ctx))
+ * return PTR_ERR(ctx);
+ * // Create and attach a new file descriptor to my file ops
+ * file = cxl_get_fd(ctx, &cxl_my_fops, &fd);
+ * // Start context
+ * rc = cxl_start_work(ctx, &work.work);
+ * if (rc) {
+ * fput(file);
+ * put_unused_fd(fd);
+ * return -ENODEV;
+ * }
+ * // No error paths after installing the fd
+ * fd_install(fd, file);
+ * return fd;
+ *
+ * This inits a context, and gets a file descriptor and associates some file
+ * ops to that file descriptor. If the file ops are blank, the cxl driver will
+ * fill them in with the default ones that mimic the standard user API. Once
+ * completed, the file descriptor can be installed. Once the file descriptor is
+ * installed, it's visible to the user so no errors must occur past this point.
+ *
+ * If cxl_fd_release() file op call is installed, the context will be stopped
+ * and released when the fd is released. Hence the driver won't need to manage
+ * this itself.
+ */
-#endif /* CONFIG_CXL_BASE */
+/*
+ * Take a context and associate it with my file ops. Returns the associated
+ * file and file descriptor. Any file ops which are blank are filled in by the
+ * cxl driver with the default ops to mimic the standard API.
+ */
+struct file *cxl_get_fd(struct cxl_context *ctx, struct file_operations *fops,
+ int *fd);
+/* Get the context associated with this file */
+struct cxl_context *cxl_fops_get_context(struct file *file);
+/*
+ * Start a context associated a struct cxl_ioctl_start_work used by the
+ * standard cxl user API.
+ */
+int cxl_start_work(struct cxl_context *ctx,
+ struct cxl_ioctl_start_work *work);
+/*
+ * Export all the existing fops so drivers can use them
+ */
+int cxl_fd_open(struct inode *inode, struct file *file);
+int cxl_fd_release(struct inode *inode, struct file *file);
+long cxl_fd_ioctl(struct file *file, unsigned int cmd, unsigned long arg);
+int cxl_fd_mmap(struct file *file, struct vm_area_struct *vm);
+unsigned int cxl_fd_poll(struct file *file, struct poll_table_struct *poll);
+ssize_t cxl_fd_read(struct file *file, char __user *buf, size_t count,
+ loff_t *off);
-#endif
+#endif /* _MISC_CXL_H */
diff --git a/include/net/9p/client.h b/include/net/9p/client.h
index 6fab66c5c5af..c6b97e58cf84 100644
--- a/include/net/9p/client.h
+++ b/include/net/9p/client.h
@@ -211,6 +211,8 @@ struct p9_dirent {
char d_name[256];
};
+struct iov_iter;
+
int p9_client_statfs(struct p9_fid *fid, struct p9_rstatfs *sb);
int p9_client_rename(struct p9_fid *fid, struct p9_fid *newdirfid,
const char *name);
@@ -236,10 +238,8 @@ int p9_client_clunk(struct p9_fid *fid);
int p9_client_fsync(struct p9_fid *fid, int datasync);
int p9_client_remove(struct p9_fid *fid);
int p9_client_unlinkat(struct p9_fid *dfid, const char *name, int flags);
-int p9_client_read(struct p9_fid *fid, char *data, char __user *udata,
- u64 offset, u32 count);
-int p9_client_write(struct p9_fid *fid, char *data, const char __user *udata,
- u64 offset, u32 count);
+int p9_client_read(struct p9_fid *fid, u64 offset, struct iov_iter *to, int *err);
+int p9_client_write(struct p9_fid *fid, u64 offset, struct iov_iter *from, int *err);
int p9_client_readdir(struct p9_fid *fid, char *data, u32 count, u64 offset);
int p9dirent_read(struct p9_client *clnt, char *buf, int len,
struct p9_dirent *dirent);
diff --git a/include/net/9p/transport.h b/include/net/9p/transport.h
index 2a25dec30211..5122b5e40f78 100644
--- a/include/net/9p/transport.h
+++ b/include/net/9p/transport.h
@@ -61,7 +61,7 @@ struct p9_trans_module {
int (*cancel) (struct p9_client *, struct p9_req_t *req);
int (*cancelled)(struct p9_client *, struct p9_req_t *req);
int (*zc_request)(struct p9_client *, struct p9_req_t *,
- char *, char *, int , int, int, int);
+ struct iov_iter *, struct iov_iter *, int , int, int);
};
void v9fs_register_trans(struct p9_trans_module *m);
diff --git a/include/net/addrconf.h b/include/net/addrconf.h
index 80456f72d70a..def59d3a34d5 100644
--- a/include/net/addrconf.h
+++ b/include/net/addrconf.h
@@ -142,6 +142,7 @@ void ipv6_mc_unmap(struct inet6_dev *idev);
void ipv6_mc_remap(struct inet6_dev *idev);
void ipv6_mc_init_dev(struct inet6_dev *idev);
void ipv6_mc_destroy_dev(struct inet6_dev *idev);
+int ipv6_mc_check_mld(struct sk_buff *skb, struct sk_buff **skb_trimmed);
void addrconf_dad_failure(struct inet6_ifaddr *ifp);
bool ipv6_chk_mcast_addr(struct net_device *dev, const struct in6_addr *group,
diff --git a/include/net/af_unix.h b/include/net/af_unix.h
index a175ba4a7adb..4a167b30a12f 100644
--- a/include/net/af_unix.h
+++ b/include/net/af_unix.h
@@ -39,7 +39,6 @@ struct unix_skb_parms {
};
#define UNIXCB(skb) (*(struct unix_skb_parms *)&((skb)->cb))
-#define UNIXSID(skb) (&UNIXCB((skb)).secid)
#define unix_state_lock(s) spin_lock(&unix_sk(s)->lock)
#define unix_state_unlock(s) spin_unlock(&unix_sk(s)->lock)
diff --git a/include/net/af_vsock.h b/include/net/af_vsock.h
index 0d87674fb775..db639a4c5ab8 100644
--- a/include/net/af_vsock.h
+++ b/include/net/af_vsock.h
@@ -74,7 +74,7 @@ void vsock_pending_work(struct work_struct *work);
struct sock *__vsock_create(struct net *net,
struct socket *sock,
struct sock *parent,
- gfp_t priority, unsigned short type);
+ gfp_t priority, unsigned short type, int kern);
/**** TRANSPORT ****/
@@ -100,8 +100,8 @@ struct vsock_transport {
/* DGRAM. */
int (*dgram_bind)(struct vsock_sock *, struct sockaddr_vm *);
- int (*dgram_dequeue)(struct kiocb *kiocb, struct vsock_sock *vsk,
- struct msghdr *msg, size_t len, int flags);
+ int (*dgram_dequeue)(struct vsock_sock *vsk, struct msghdr *msg,
+ size_t len, int flags);
int (*dgram_enqueue)(struct vsock_sock *, struct sockaddr_vm *,
struct msghdr *, size_t len);
bool (*dgram_allow)(u32 cid, u32 port);
diff --git a/include/net/arp.h b/include/net/arp.h
index 73c49864076b..5e0f891d476c 100644
--- a/include/net/arp.h
+++ b/include/net/arp.h
@@ -9,28 +9,17 @@
extern struct neigh_table arp_tbl;
-static inline u32 arp_hashfn(u32 key, const struct net_device *dev, u32 hash_rnd)
+static inline u32 arp_hashfn(const void *pkey, const struct net_device *dev, u32 *hash_rnd)
{
+ u32 key = *(const u32 *)pkey;
u32 val = key ^ hash32_ptr(dev);
- return val * hash_rnd;
+ return val * hash_rnd[0];
}
static inline struct neighbour *__ipv4_neigh_lookup_noref(struct net_device *dev, u32 key)
{
- struct neigh_hash_table *nht = rcu_dereference_bh(arp_tbl.nht);
- struct neighbour *n;
- u32 hash_val;
-
- hash_val = arp_hashfn(key, dev, nht->hash_rnd[0]) >> (32 - nht->hash_shift);
- for (n = rcu_dereference_bh(nht->hash_buckets[hash_val]);
- n != NULL;
- n = rcu_dereference_bh(n->next)) {
- if (n->dev == dev && *(u32 *)n->primary_key == key)
- return n;
- }
-
- return NULL;
+ return ___neigh_lookup_noref(&arp_tbl, neigh_key_eq32, arp_hashfn, &key, dev);
}
static inline struct neighbour *__ipv4_neigh_lookup(struct net_device *dev, u32 key)
@@ -47,7 +36,6 @@ static inline struct neighbour *__ipv4_neigh_lookup(struct net_device *dev, u32
}
void arp_init(void);
-int arp_find(unsigned char *haddr, struct sk_buff *skb);
int arp_ioctl(struct net *net, unsigned int cmd, void __user *arg);
void arp_send(int type, int ptype, __be32 dest_ip,
struct net_device *dev, __be32 src_ip,
diff --git a/include/net/ax25.h b/include/net/ax25.h
index bf0396e9a5d3..16a923a3a43a 100644
--- a/include/net/ax25.h
+++ b/include/net/ax25.h
@@ -12,6 +12,7 @@
#include <linux/list.h>
#include <linux/slab.h>
#include <linux/atomic.h>
+#include <net/neighbour.h>
#define AX25_T1CLAMPLO 1
#define AX25_T1CLAMPHI (30 * HZ)
@@ -366,9 +367,7 @@ int ax25_kiss_rcv(struct sk_buff *, struct net_device *, struct packet_type *,
struct net_device *);
/* ax25_ip.c */
-int ax25_hard_header(struct sk_buff *, struct net_device *, unsigned short,
- const void *, const void *, unsigned int);
-int ax25_rebuild_header(struct sk_buff *);
+netdev_tx_t ax25_ip_xmit(struct sk_buff *skb);
extern const struct header_ops ax25_header_ops;
/* ax25_out.c */
diff --git a/include/net/bluetooth/bluetooth.h b/include/net/bluetooth/bluetooth.h
index e00455aab18c..38d8a34d3589 100644
--- a/include/net/bluetooth/bluetooth.h
+++ b/include/net/bluetooth/bluetooth.h
@@ -245,10 +245,10 @@ int bt_sock_register(int proto, const struct net_proto_family *ops);
void bt_sock_unregister(int proto);
void bt_sock_link(struct bt_sock_list *l, struct sock *s);
void bt_sock_unlink(struct bt_sock_list *l, struct sock *s);
-int bt_sock_recvmsg(struct kiocb *iocb, struct socket *sock,
- struct msghdr *msg, size_t len, int flags);
-int bt_sock_stream_recvmsg(struct kiocb *iocb, struct socket *sock,
- struct msghdr *msg, size_t len, int flags);
+int bt_sock_recvmsg(struct socket *sock, struct msghdr *msg, size_t len,
+ int flags);
+int bt_sock_stream_recvmsg(struct socket *sock, struct msghdr *msg,
+ size_t len, int flags);
uint bt_sock_poll(struct file *file, struct socket *sock, poll_table *wait);
int bt_sock_ioctl(struct socket *sock, unsigned int cmd, unsigned long arg);
int bt_sock_wait_state(struct sock *sk, int state, unsigned long timeo);
@@ -269,29 +269,34 @@ struct l2cap_ctrl {
__u16 reqseq;
__u16 txseq;
__u8 retries;
+ __le16 psm;
+ bdaddr_t bdaddr;
+ struct l2cap_chan *chan;
};
struct hci_dev;
typedef void (*hci_req_complete_t)(struct hci_dev *hdev, u8 status, u16 opcode);
-
-struct hci_req_ctrl {
- bool start;
- u8 event;
- hci_req_complete_t complete;
+typedef void (*hci_req_complete_skb_t)(struct hci_dev *hdev, u8 status,
+ u16 opcode, struct sk_buff *skb);
+
+struct req_ctrl {
+ bool start;
+ u8 event;
+ hci_req_complete_t complete;
+ hci_req_complete_skb_t complete_skb;
};
struct bt_skb_cb {
__u8 pkt_type;
- __u8 incoming;
+ __u8 force_active;
__u16 opcode;
__u16 expect;
- __u8 force_active;
- struct l2cap_chan *chan;
- struct l2cap_ctrl control;
- struct hci_req_ctrl req;
- bdaddr_t bdaddr;
- __le16 psm;
+ __u8 incoming:1;
+ union {
+ struct l2cap_ctrl l2cap;
+ struct req_ctrl req;
+ };
};
#define bt_cb(skb) ((struct bt_skb_cb *)((skb)->cb))
@@ -339,6 +344,11 @@ out:
int bt_to_errno(__u16 code);
+void hci_sock_set_flag(struct sock *sk, int nr);
+void hci_sock_clear_flag(struct sock *sk, int nr);
+int hci_sock_test_flag(struct sock *sk, int nr);
+unsigned short hci_sock_get_channel(struct sock *sk);
+
int hci_sock_init(void);
void hci_sock_cleanup(void);
@@ -355,8 +365,22 @@ extern struct dentry *bt_debugfs;
int l2cap_init(void);
void l2cap_exit(void);
+#if IS_ENABLED(CONFIG_BT_BREDR)
int sco_init(void);
void sco_exit(void);
+#else
+static inline int sco_init(void)
+{
+ return 0;
+}
+
+static inline void sco_exit(void)
+{
+}
+#endif
+
+int mgmt_init(void);
+void mgmt_exit(void);
void bt_sock_reclassify_lock(struct sock *sk, int proto);
diff --git a/include/net/bluetooth/hci.h b/include/net/bluetooth/hci.h
index 8e54f825153c..7ca6690355ea 100644
--- a/include/net/bluetooth/hci.h
+++ b/include/net/bluetooth/hci.h
@@ -160,6 +160,14 @@ enum {
* during the hdev->setup vendor callback.
*/
HCI_QUIRK_STRICT_DUPLICATE_FILTER,
+
+ /* When this quirk is set, LE scan and BR/EDR inquiry is done
+ * simultaneously, otherwise it's interleaved.
+ *
+ * This quirk can be set before hci_register_dev is called or
+ * during the hdev->setup vendor callback.
+ */
+ HCI_QUIRK_SIMULTANEOUS_DISCOVERY,
};
/* HCI device flags */
@@ -179,13 +187,14 @@ enum {
HCI_RESET,
};
-/* BR/EDR and/or LE controller flags: the flags defined here should represent
- * states configured via debugfs for debugging and testing purposes only.
- */
+/* HCI socket flags */
enum {
- HCI_DUT_MODE,
- HCI_FORCE_BREDR_SMP,
- HCI_FORCE_STATIC_ADDR,
+ HCI_SOCK_TRUSTED,
+ HCI_MGMT_INDEX_EVENTS,
+ HCI_MGMT_UNCONF_INDEX_EVENTS,
+ HCI_MGMT_EXT_INDEX_EVENTS,
+ HCI_MGMT_GENERIC_EVENTS,
+ HCI_MGMT_OOB_DATA_EVENTS,
};
/*
@@ -217,6 +226,8 @@ enum {
HCI_HS_ENABLED,
HCI_LE_ENABLED,
HCI_ADVERTISING,
+ HCI_ADVERTISING_CONNECTABLE,
+ HCI_ADVERTISING_INSTANCE,
HCI_CONNECTABLE,
HCI_DISCOVERABLE,
HCI_LIMITED_DISCOVERABLE,
@@ -225,13 +236,13 @@ enum {
HCI_FAST_CONNECTABLE,
HCI_BREDR_ENABLED,
HCI_LE_SCAN_INTERRUPTED,
-};
-/* A mask for the flags that are supposed to remain when a reset happens
- * or the HCI device is closed.
- */
-#define HCI_PERSISTENT_MASK (BIT(HCI_LE_SCAN) | BIT(HCI_PERIODIC_INQ) | \
- BIT(HCI_FAST_CONNECTABLE) | BIT(HCI_LE_ADV))
+ HCI_DUT_MODE,
+ HCI_FORCE_BREDR_SMP,
+ HCI_FORCE_STATIC_ADDR,
+
+ __HCI_NUM_FLAGS,
+};
/* HCI timeouts */
#define HCI_DISCONN_TIMEOUT msecs_to_jiffies(2000) /* 2 seconds */
@@ -363,6 +374,7 @@ enum {
/* LE features */
#define HCI_LE_ENCRYPTION 0x01
#define HCI_LE_CONN_PARAM_REQ_PROC 0x02
+#define HCI_LE_SLAVE_FEATURES 0x08
#define HCI_LE_PING 0x10
#define HCI_LE_DATA_LEN_EXT 0x20
#define HCI_LE_EXT_SCAN_POLICY 0x80
@@ -452,9 +464,16 @@ enum {
#define EIR_NAME_COMPLETE 0x09 /* complete local name */
#define EIR_TX_POWER 0x0A /* transmit power level */
#define EIR_CLASS_OF_DEV 0x0D /* Class of Device */
-#define EIR_SSP_HASH_C 0x0E /* Simple Pairing Hash C */
-#define EIR_SSP_RAND_R 0x0F /* Simple Pairing Randomizer R */
+#define EIR_SSP_HASH_C192 0x0E /* Simple Pairing Hash C-192 */
+#define EIR_SSP_RAND_R192 0x0F /* Simple Pairing Randomizer R-192 */
#define EIR_DEVICE_ID 0x10 /* device ID */
+#define EIR_APPEARANCE 0x19 /* Device appearance */
+#define EIR_LE_BDADDR 0x1B /* LE Bluetooth device address */
+#define EIR_LE_ROLE 0x1C /* LE role */
+#define EIR_SSP_HASH_C256 0x1D /* Simple Pairing Hash C-256 */
+#define EIR_SSP_RAND_R256 0x1E /* Simple Pairing Rand R-256 */
+#define EIR_LE_SC_CONFIRM 0x22 /* LE SC Confirmation Value */
+#define EIR_LE_SC_RANDOM 0x23 /* LE SC Random Value */
/* Low Energy Advertising Flags */
#define LE_AD_LIMITED 0x01 /* Limited Discoverable */
@@ -1183,6 +1202,16 @@ struct hci_rp_read_clock {
__le16 accuracy;
} __packed;
+#define HCI_OP_READ_ENC_KEY_SIZE 0x1408
+struct hci_cp_read_enc_key_size {
+ __le16 handle;
+} __packed;
+struct hci_rp_read_enc_key_size {
+ __u8 status;
+ __le16 handle;
+ __u8 key_size;
+} __packed;
+
#define HCI_OP_READ_LOCAL_AMP_INFO 0x1409
struct hci_rp_read_local_amp_info {
__u8 status;
@@ -1358,6 +1387,11 @@ struct hci_cp_le_conn_update {
__le16 max_ce_len;
} __packed;
+#define HCI_OP_LE_READ_REMOTE_FEATURES 0x2016
+struct hci_cp_le_read_remote_features {
+ __le16 handle;
+} __packed;
+
#define HCI_OP_LE_START_ENC 0x2019
struct hci_cp_le_start_enc {
__le16 handle;
@@ -1850,6 +1884,13 @@ struct hci_ev_le_conn_update_complete {
__le16 supervision_timeout;
} __packed;
+#define HCI_EV_LE_REMOTE_FEAT_COMPLETE 0x04
+struct hci_ev_le_remote_feat_complete {
+ __u8 status;
+ __le16 handle;
+ __u8 features[8];
+} __packed;
+
#define HCI_EV_LE_LTK_REQ 0x05
struct hci_ev_le_ltk_req {
__le16 handle;
diff --git a/include/net/bluetooth/hci_core.h b/include/net/bluetooth/hci_core.h
index 52863c3e0b13..3bd618d3e55d 100644
--- a/include/net/bluetooth/hci_core.h
+++ b/include/net/bluetooth/hci_core.h
@@ -76,6 +76,7 @@ struct discovery_state {
u8 last_adv_data[HCI_MAX_AD_LENGTH];
u8 last_adv_data_len;
bool report_invalid_rssi;
+ bool result_filtering;
s8 rssi;
u16 uuid_count;
u8 (*uuids)[16];
@@ -108,7 +109,7 @@ struct bt_uuid {
struct smp_csrk {
bdaddr_t bdaddr;
u8 bdaddr_type;
- u8 master;
+ u8 type;
u8 val[16];
};
@@ -154,6 +155,23 @@ struct oob_data {
u8 rand256[16];
};
+struct adv_info {
+ struct list_head list;
+ bool pending;
+ __u8 instance;
+ __u32 flags;
+ __u16 timeout;
+ __u16 remaining_time;
+ __u16 duration;
+ __u16 adv_data_len;
+ __u8 adv_data[HCI_MAX_AD_LENGTH];
+ __u16 scan_rsp_len;
+ __u8 scan_rsp_data[HCI_MAX_AD_LENGTH];
+};
+
+#define HCI_MAX_ADV_INSTANCES 5
+#define HCI_DEFAULT_ADV_DURATION 2
+
#define HCI_MAX_SHORT_NAME_LENGTH 10
/* Default LE RPA expiry time, 15 minutes */
@@ -173,7 +191,6 @@ struct amp_assoc {
#define HCI_MAX_PAGES 3
-#define NUM_REASSEMBLY 4
struct hci_dev {
struct list_head list;
struct mutex lock;
@@ -314,14 +331,13 @@ struct hci_dev {
struct sk_buff_head raw_q;
struct sk_buff_head cmd_q;
- struct sk_buff *recv_evt;
struct sk_buff *sent_cmd;
- struct sk_buff *reassembly[NUM_REASSEMBLY];
struct mutex req_lock;
wait_queue_head_t req_wait_q;
__u32 req_status;
__u32 req_result;
+ struct sk_buff *req_skb;
void *smp_data;
void *smp_bredr_data;
@@ -352,8 +368,7 @@ struct hci_dev {
struct rfkill *rfkill;
- unsigned long dbg_flags;
- unsigned long dev_flags;
+ DECLARE_BITMAP(dev_flags, __HCI_NUM_FLAGS);
struct delayed_work le_scan_disable;
struct delayed_work le_scan_restart;
@@ -364,6 +379,12 @@ struct hci_dev {
__u8 scan_rsp_data[HCI_MAX_AD_LENGTH];
__u8 scan_rsp_data_len;
+ struct list_head adv_instances;
+ unsigned int adv_instance_cnt;
+ __u8 cur_adv_instance;
+ __u16 adv_instance_timeout;
+ struct delayed_work adv_instance_expire;
+
__u8 irk[16];
__u32 rpa_timeout;
struct delayed_work rpa_expired;
@@ -373,6 +394,7 @@ struct hci_dev {
int (*close)(struct hci_dev *hdev);
int (*flush)(struct hci_dev *hdev);
int (*setup)(struct hci_dev *hdev);
+ int (*shutdown)(struct hci_dev *hdev);
int (*send)(struct hci_dev *hdev, struct sk_buff *skb);
void (*notify)(struct hci_dev *hdev, unsigned int evt);
void (*hw_error)(struct hci_dev *hdev, u8 code);
@@ -498,20 +520,42 @@ struct hci_conn_params {
extern struct list_head hci_dev_list;
extern struct list_head hci_cb_list;
extern rwlock_t hci_dev_list_lock;
-extern rwlock_t hci_cb_list_lock;
+extern struct mutex hci_cb_list_lock;
+
+#define hci_dev_set_flag(hdev, nr) set_bit((nr), (hdev)->dev_flags)
+#define hci_dev_clear_flag(hdev, nr) clear_bit((nr), (hdev)->dev_flags)
+#define hci_dev_change_flag(hdev, nr) change_bit((nr), (hdev)->dev_flags)
+#define hci_dev_test_flag(hdev, nr) test_bit((nr), (hdev)->dev_flags)
+#define hci_dev_test_and_set_flag(hdev, nr) test_and_set_bit((nr), (hdev)->dev_flags)
+#define hci_dev_test_and_clear_flag(hdev, nr) test_and_clear_bit((nr), (hdev)->dev_flags)
+#define hci_dev_test_and_change_flag(hdev, nr) test_and_change_bit((nr), (hdev)->dev_flags)
+
+#define hci_dev_clear_volatile_flags(hdev) \
+ do { \
+ hci_dev_clear_flag(hdev, HCI_LE_SCAN); \
+ hci_dev_clear_flag(hdev, HCI_LE_ADV); \
+ hci_dev_clear_flag(hdev, HCI_PERIODIC_INQ); \
+ } while (0)
/* ----- HCI interface to upper protocols ----- */
int l2cap_connect_ind(struct hci_dev *hdev, bdaddr_t *bdaddr);
-void l2cap_connect_cfm(struct hci_conn *hcon, u8 status);
int l2cap_disconn_ind(struct hci_conn *hcon);
-void l2cap_disconn_cfm(struct hci_conn *hcon, u8 reason);
-int l2cap_security_cfm(struct hci_conn *hcon, u8 status, u8 encrypt);
-int l2cap_recv_acldata(struct hci_conn *hcon, struct sk_buff *skb, u16 flags);
+void l2cap_recv_acldata(struct hci_conn *hcon, struct sk_buff *skb, u16 flags);
+#if IS_ENABLED(CONFIG_BT_BREDR)
int sco_connect_ind(struct hci_dev *hdev, bdaddr_t *bdaddr, __u8 *flags);
-void sco_connect_cfm(struct hci_conn *hcon, __u8 status);
-void sco_disconn_cfm(struct hci_conn *hcon, __u8 reason);
-int sco_recv_scodata(struct hci_conn *hcon, struct sk_buff *skb);
+void sco_recv_scodata(struct hci_conn *hcon, struct sk_buff *skb);
+#else
+static inline int sco_connect_ind(struct hci_dev *hdev, bdaddr_t *bdaddr,
+ __u8 *flags)
+{
+ return 0;
+}
+
+static inline void sco_recv_scodata(struct hci_conn *hcon, struct sk_buff *skb)
+{
+}
+#endif
/* ----- Inquiry cache ----- */
#define INQUIRY_CACHE_AGE_MAX (HZ*30) /* 30 seconds */
@@ -529,6 +573,7 @@ static inline void discovery_init(struct hci_dev *hdev)
static inline void hci_discovery_filter_clear(struct hci_dev *hdev)
{
+ hdev->discovery.result_filtering = false;
hdev->discovery.report_invalid_rssi = true;
hdev->discovery.rssi = HCI_RSSI_INVALID;
hdev->discovery.uuid_count = 0;
@@ -584,7 +629,6 @@ enum {
HCI_CONN_SC_ENABLED,
HCI_CONN_AES_CCM,
HCI_CONN_POWER_SAVE,
- HCI_CONN_REMOTE_OOB,
HCI_CONN_FLUSH_KEY,
HCI_CONN_ENCRYPT,
HCI_CONN_AUTH,
@@ -600,14 +644,14 @@ enum {
static inline bool hci_conn_ssp_enabled(struct hci_conn *conn)
{
struct hci_dev *hdev = conn->hdev;
- return test_bit(HCI_SSP_ENABLED, &hdev->dev_flags) &&
+ return hci_dev_test_flag(hdev, HCI_SSP_ENABLED) &&
test_bit(HCI_CONN_SSP_ENABLED, &conn->flags);
}
static inline bool hci_conn_sc_enabled(struct hci_conn *conn)
{
struct hci_dev *hdev = conn->hdev;
- return test_bit(HCI_SC_ENABLED, &hdev->dev_flags) &&
+ return hci_dev_test_flag(hdev, HCI_SC_ENABLED) &&
test_bit(HCI_CONN_SC_ENABLED, &conn->flags);
}
@@ -969,6 +1013,8 @@ struct smp_irk *hci_add_irk(struct hci_dev *hdev, bdaddr_t *bdaddr,
void hci_remove_irk(struct hci_dev *hdev, bdaddr_t *bdaddr, u8 addr_type);
void hci_smp_irks_clear(struct hci_dev *hdev);
+bool hci_bdaddr_is_paired(struct hci_dev *hdev, bdaddr_t *bdaddr, u8 type);
+
void hci_remote_oob_data_clear(struct hci_dev *hdev);
struct oob_data *hci_find_remote_oob_data(struct hci_dev *hdev,
bdaddr_t *bdaddr, u8 bdaddr_type);
@@ -978,10 +1024,18 @@ int hci_add_remote_oob_data(struct hci_dev *hdev, bdaddr_t *bdaddr,
int hci_remove_remote_oob_data(struct hci_dev *hdev, bdaddr_t *bdaddr,
u8 bdaddr_type);
+void hci_adv_instances_clear(struct hci_dev *hdev);
+struct adv_info *hci_find_adv_instance(struct hci_dev *hdev, u8 instance);
+struct adv_info *hci_get_next_instance(struct hci_dev *hdev, u8 instance);
+int hci_add_adv_instance(struct hci_dev *hdev, u8 instance, u32 flags,
+ u16 adv_data_len, u8 *adv_data,
+ u16 scan_rsp_len, u8 *scan_rsp_data,
+ u16 timeout, u16 duration);
+int hci_remove_adv_instance(struct hci_dev *hdev, u8 instance);
+
void hci_event_packet(struct hci_dev *hdev, struct sk_buff *skb);
int hci_recv_frame(struct hci_dev *hdev, struct sk_buff *skb);
-int hci_recv_stream_fragment(struct hci_dev *hdev, void *data, int count);
void hci_init_sysfs(struct hci_dev *hdev);
void hci_conn_init_sysfs(struct hci_conn *conn);
@@ -1025,10 +1079,10 @@ void hci_conn_del_sysfs(struct hci_conn *conn);
#define lmp_host_le_capable(dev) (!!((dev)->features[1][0] & LMP_HOST_LE))
#define lmp_host_le_br_capable(dev) (!!((dev)->features[1][0] & LMP_HOST_LE_BREDR))
-#define hdev_is_powered(hdev) (test_bit(HCI_UP, &hdev->flags) && \
- !test_bit(HCI_AUTO_OFF, &hdev->dev_flags))
-#define bredr_sc_enabled(dev) (lmp_sc_capable(dev) && \
- test_bit(HCI_SC_ENABLED, &(dev)->dev_flags))
+#define hdev_is_powered(dev) (test_bit(HCI_UP, &(dev)->flags) && \
+ !hci_dev_test_flag(dev, HCI_AUTO_OFF))
+#define bredr_sc_enabled(dev) (lmp_sc_capable(dev) && \
+ hci_dev_test_flag(dev, HCI_SC_ENABLED))
/* ----- HCI protocols ----- */
#define HCI_PROTO_DEFER 0x01
@@ -1050,28 +1104,6 @@ static inline int hci_proto_connect_ind(struct hci_dev *hdev, bdaddr_t *bdaddr,
}
}
-static inline void hci_proto_connect_cfm(struct hci_conn *conn, __u8 status)
-{
- switch (conn->type) {
- case ACL_LINK:
- case LE_LINK:
- l2cap_connect_cfm(conn, status);
- break;
-
- case SCO_LINK:
- case ESCO_LINK:
- sco_connect_cfm(conn, status);
- break;
-
- default:
- BT_ERR("unknown link type %d", conn->type);
- break;
- }
-
- if (conn->connect_cfm_cb)
- conn->connect_cfm_cb(conn, status);
-}
-
static inline int hci_proto_disconn_ind(struct hci_conn *conn)
{
if (conn->type != ACL_LINK && conn->type != LE_LINK)
@@ -1080,91 +1112,69 @@ static inline int hci_proto_disconn_ind(struct hci_conn *conn)
return l2cap_disconn_ind(conn);
}
-static inline void hci_proto_disconn_cfm(struct hci_conn *conn, __u8 reason)
-{
- switch (conn->type) {
- case ACL_LINK:
- case LE_LINK:
- l2cap_disconn_cfm(conn, reason);
- break;
-
- case SCO_LINK:
- case ESCO_LINK:
- sco_disconn_cfm(conn, reason);
- break;
-
- /* L2CAP would be handled for BREDR chan */
- case AMP_LINK:
- break;
+/* ----- HCI callbacks ----- */
+struct hci_cb {
+ struct list_head list;
- default:
- BT_ERR("unknown link type %d", conn->type);
- break;
- }
+ char *name;
- if (conn->disconn_cfm_cb)
- conn->disconn_cfm_cb(conn, reason);
-}
+ void (*connect_cfm) (struct hci_conn *conn, __u8 status);
+ void (*disconn_cfm) (struct hci_conn *conn, __u8 status);
+ void (*security_cfm) (struct hci_conn *conn, __u8 status,
+ __u8 encrypt);
+ void (*key_change_cfm) (struct hci_conn *conn, __u8 status);
+ void (*role_switch_cfm) (struct hci_conn *conn, __u8 status, __u8 role);
+};
-static inline void hci_proto_auth_cfm(struct hci_conn *conn, __u8 status)
+static inline void hci_connect_cfm(struct hci_conn *conn, __u8 status)
{
- __u8 encrypt;
-
- if (conn->type != ACL_LINK && conn->type != LE_LINK)
- return;
-
- if (test_bit(HCI_CONN_ENCRYPT_PEND, &conn->flags))
- return;
+ struct hci_cb *cb;
- encrypt = test_bit(HCI_CONN_ENCRYPT, &conn->flags) ? 0x01 : 0x00;
- l2cap_security_cfm(conn, status, encrypt);
+ mutex_lock(&hci_cb_list_lock);
+ list_for_each_entry(cb, &hci_cb_list, list) {
+ if (cb->connect_cfm)
+ cb->connect_cfm(conn, status);
+ }
+ mutex_unlock(&hci_cb_list_lock);
- if (conn->security_cfm_cb)
- conn->security_cfm_cb(conn, status);
+ if (conn->connect_cfm_cb)
+ conn->connect_cfm_cb(conn, status);
}
-static inline void hci_proto_encrypt_cfm(struct hci_conn *conn, __u8 status,
- __u8 encrypt)
+static inline void hci_disconn_cfm(struct hci_conn *conn, __u8 reason)
{
- if (conn->type != ACL_LINK && conn->type != LE_LINK)
- return;
+ struct hci_cb *cb;
- l2cap_security_cfm(conn, status, encrypt);
+ mutex_lock(&hci_cb_list_lock);
+ list_for_each_entry(cb, &hci_cb_list, list) {
+ if (cb->disconn_cfm)
+ cb->disconn_cfm(conn, reason);
+ }
+ mutex_unlock(&hci_cb_list_lock);
- if (conn->security_cfm_cb)
- conn->security_cfm_cb(conn, status);
+ if (conn->disconn_cfm_cb)
+ conn->disconn_cfm_cb(conn, reason);
}
-/* ----- HCI callbacks ----- */
-struct hci_cb {
- struct list_head list;
-
- char *name;
-
- void (*security_cfm) (struct hci_conn *conn, __u8 status,
- __u8 encrypt);
- void (*key_change_cfm) (struct hci_conn *conn, __u8 status);
- void (*role_switch_cfm) (struct hci_conn *conn, __u8 status, __u8 role);
-};
-
static inline void hci_auth_cfm(struct hci_conn *conn, __u8 status)
{
struct hci_cb *cb;
__u8 encrypt;
- hci_proto_auth_cfm(conn, status);
-
if (test_bit(HCI_CONN_ENCRYPT_PEND, &conn->flags))
return;
encrypt = test_bit(HCI_CONN_ENCRYPT, &conn->flags) ? 0x01 : 0x00;
- read_lock(&hci_cb_list_lock);
+ mutex_lock(&hci_cb_list_lock);
list_for_each_entry(cb, &hci_cb_list, list) {
if (cb->security_cfm)
cb->security_cfm(conn, status, encrypt);
}
- read_unlock(&hci_cb_list_lock);
+ mutex_unlock(&hci_cb_list_lock);
+
+ if (conn->security_cfm_cb)
+ conn->security_cfm_cb(conn, status);
}
static inline void hci_encrypt_cfm(struct hci_conn *conn, __u8 status,
@@ -1178,26 +1188,27 @@ static inline void hci_encrypt_cfm(struct hci_conn *conn, __u8 status,
if (conn->pending_sec_level > conn->sec_level)
conn->sec_level = conn->pending_sec_level;
- hci_proto_encrypt_cfm(conn, status, encrypt);
-
- read_lock(&hci_cb_list_lock);
+ mutex_lock(&hci_cb_list_lock);
list_for_each_entry(cb, &hci_cb_list, list) {
if (cb->security_cfm)
cb->security_cfm(conn, status, encrypt);
}
- read_unlock(&hci_cb_list_lock);
+ mutex_unlock(&hci_cb_list_lock);
+
+ if (conn->security_cfm_cb)
+ conn->security_cfm_cb(conn, status);
}
static inline void hci_key_change_cfm(struct hci_conn *conn, __u8 status)
{
struct hci_cb *cb;
- read_lock(&hci_cb_list_lock);
+ mutex_lock(&hci_cb_list_lock);
list_for_each_entry(cb, &hci_cb_list, list) {
if (cb->key_change_cfm)
cb->key_change_cfm(conn, status);
}
- read_unlock(&hci_cb_list_lock);
+ mutex_unlock(&hci_cb_list_lock);
}
static inline void hci_role_switch_cfm(struct hci_conn *conn, __u8 status,
@@ -1205,12 +1216,12 @@ static inline void hci_role_switch_cfm(struct hci_conn *conn, __u8 status,
{
struct hci_cb *cb;
- read_lock(&hci_cb_list_lock);
+ mutex_lock(&hci_cb_list_lock);
list_for_each_entry(cb, &hci_cb_list, list) {
if (cb->role_switch_cfm)
cb->role_switch_cfm(conn, status, role);
}
- read_unlock(&hci_cb_list_lock);
+ mutex_unlock(&hci_cb_list_lock);
}
static inline bool eir_has_data_type(u8 *data, size_t data_len, u8 type)
@@ -1296,8 +1307,6 @@ static inline int hci_check_conn_params(u16 min, u16 max, u16 latency,
int hci_register_cb(struct hci_cb *hcb);
int hci_unregister_cb(struct hci_cb *hcb);
-bool hci_req_pending(struct hci_dev *hdev);
-
struct sk_buff *__hci_cmd_sync(struct hci_dev *hdev, u16 opcode, u32 plen,
const void *param, u32 timeout);
struct sk_buff *__hci_cmd_sync_ev(struct hci_dev *hdev, u16 opcode, u32 plen,
@@ -1312,11 +1321,35 @@ void *hci_sent_cmd_data(struct hci_dev *hdev, __u16 opcode);
/* ----- HCI Sockets ----- */
void hci_send_to_sock(struct hci_dev *hdev, struct sk_buff *skb);
-void hci_send_to_control(struct sk_buff *skb, struct sock *skip_sk);
+void hci_send_to_channel(unsigned short channel, struct sk_buff *skb,
+ int flag, struct sock *skip_sk);
void hci_send_to_monitor(struct hci_dev *hdev, struct sk_buff *skb);
void hci_sock_dev_event(struct hci_dev *hdev, int event);
+#define HCI_MGMT_VAR_LEN BIT(0)
+#define HCI_MGMT_NO_HDEV BIT(1)
+#define HCI_MGMT_UNTRUSTED BIT(2)
+#define HCI_MGMT_UNCONFIGURED BIT(3)
+
+struct hci_mgmt_handler {
+ int (*func) (struct sock *sk, struct hci_dev *hdev, void *data,
+ u16 data_len);
+ size_t data_len;
+ unsigned long flags;
+};
+
+struct hci_mgmt_chan {
+ struct list_head list;
+ unsigned short channel;
+ size_t handler_count;
+ const struct hci_mgmt_handler *handlers;
+ void (*hdev_init) (struct sock *sk, struct hci_dev *hdev);
+};
+
+int hci_mgmt_chan_register(struct hci_mgmt_chan *c);
+void hci_mgmt_chan_unregister(struct hci_mgmt_chan *c);
+
/* Management interface */
#define DISCOV_TYPE_BREDR (BIT(BDADDR_BREDR))
#define DISCOV_TYPE_LE (BIT(BDADDR_LE_PUBLIC) | \
@@ -1336,7 +1369,6 @@ void hci_sock_dev_event(struct hci_dev *hdev, int event);
#define DISCOV_BREDR_INQUIRY_LEN 0x08
#define DISCOV_LE_RESTART_DELAY msecs_to_jiffies(200) /* msec */
-int mgmt_control(struct sock *sk, struct msghdr *msg, size_t len);
int mgmt_new_settings(struct hci_dev *hdev);
void mgmt_index_added(struct hci_dev *hdev);
void mgmt_index_removed(struct hci_dev *hdev);
@@ -1344,6 +1376,7 @@ void mgmt_set_powered_failed(struct hci_dev *hdev, int err);
int mgmt_powered(struct hci_dev *hdev, u8 powered);
int mgmt_update_adv_data(struct hci_dev *hdev);
void mgmt_discoverable_timeout(struct hci_dev *hdev);
+void mgmt_adv_timeout_expired(struct hci_dev *hdev);
void mgmt_new_link_key(struct hci_dev *hdev, struct link_key *key,
bool persistent);
void mgmt_device_connected(struct hci_dev *hdev, struct hci_conn *conn,
@@ -1382,9 +1415,6 @@ void mgmt_ssp_enable_complete(struct hci_dev *hdev, u8 enable, u8 status);
void mgmt_set_class_of_dev_complete(struct hci_dev *hdev, u8 *dev_class,
u8 status);
void mgmt_set_local_name_complete(struct hci_dev *hdev, u8 *name, u8 status);
-void mgmt_read_local_oob_data_complete(struct hci_dev *hdev, u8 *hash192,
- u8 *rand192, u8 *hash256, u8 *rand256,
- u8 status);
void mgmt_device_found(struct hci_dev *hdev, bdaddr_t *bdaddr, u8 link_type,
u8 addr_type, u8 *dev_class, s8 rssi, u32 flags,
u8 *eir, u16 eir_len, u8 *scan_rsp, u8 scan_rsp_len);
@@ -1405,7 +1435,7 @@ void mgmt_smp_complete(struct hci_conn *conn, bool complete);
u8 hci_le_conn_update(struct hci_conn *conn, u16 min, u16 max, u16 latency,
u16 to_multiplier);
void hci_le_start_enc(struct hci_conn *conn, __le16 ediv, __le64 rand,
- __u8 ltk[16]);
+ __u8 ltk[16], __u8 key_size);
void hci_copy_identity_address(struct hci_dev *hdev, bdaddr_t *bdaddr,
u8 *bdaddr_type);
diff --git a/include/net/bluetooth/mgmt.h b/include/net/bluetooth/mgmt.h
index e218a30f2061..b831242d48a4 100644
--- a/include/net/bluetooth/mgmt.h
+++ b/include/net/bluetooth/mgmt.h
@@ -43,6 +43,8 @@
#define MGMT_STATUS_CANCELLED 0x10
#define MGMT_STATUS_INVALID_INDEX 0x11
#define MGMT_STATUS_RFKILLED 0x12
+#define MGMT_STATUS_ALREADY_PAIRED 0x13
+#define MGMT_STATUS_PERMISSION_DENIED 0x14
struct mgmt_hdr {
__le16 opcode;
@@ -98,6 +100,7 @@ struct mgmt_rp_read_index_list {
#define MGMT_SETTING_DEBUG_KEYS 0x00001000
#define MGMT_SETTING_PRIVACY 0x00002000
#define MGMT_SETTING_CONFIGURATION 0x00004000
+#define MGMT_SETTING_STATIC_ADDRESS 0x00008000
#define MGMT_OP_READ_INFO 0x0004
#define MGMT_READ_INFO_SIZE 0
@@ -503,6 +506,71 @@ struct mgmt_cp_start_service_discovery {
} __packed;
#define MGMT_START_SERVICE_DISCOVERY_SIZE 4
+#define MGMT_OP_READ_LOCAL_OOB_EXT_DATA 0x003B
+struct mgmt_cp_read_local_oob_ext_data {
+ __u8 type;
+} __packed;
+#define MGMT_READ_LOCAL_OOB_EXT_DATA_SIZE 1
+struct mgmt_rp_read_local_oob_ext_data {
+ __u8 type;
+ __le16 eir_len;
+ __u8 eir[0];
+} __packed;
+
+#define MGMT_OP_READ_EXT_INDEX_LIST 0x003C
+#define MGMT_READ_EXT_INDEX_LIST_SIZE 0
+struct mgmt_rp_read_ext_index_list {
+ __le16 num_controllers;
+ struct {
+ __le16 index;
+ __u8 type;
+ __u8 bus;
+ } entry[0];
+} __packed;
+
+#define MGMT_OP_READ_ADV_FEATURES 0x0003D
+#define MGMT_READ_ADV_FEATURES_SIZE 0
+struct mgmt_rp_read_adv_features {
+ __le32 supported_flags;
+ __u8 max_adv_data_len;
+ __u8 max_scan_rsp_len;
+ __u8 max_instances;
+ __u8 num_instances;
+ __u8 instance[0];
+} __packed;
+
+#define MGMT_OP_ADD_ADVERTISING 0x003E
+struct mgmt_cp_add_advertising {
+ __u8 instance;
+ __le32 flags;
+ __le16 duration;
+ __le16 timeout;
+ __u8 adv_data_len;
+ __u8 scan_rsp_len;
+ __u8 data[0];
+} __packed;
+#define MGMT_ADD_ADVERTISING_SIZE 11
+struct mgmt_rp_add_advertising {
+ __u8 instance;
+} __packed;
+
+#define MGMT_ADV_FLAG_CONNECTABLE BIT(0)
+#define MGMT_ADV_FLAG_DISCOV BIT(1)
+#define MGMT_ADV_FLAG_LIMITED_DISCOV BIT(2)
+#define MGMT_ADV_FLAG_MANAGED_FLAGS BIT(3)
+#define MGMT_ADV_FLAG_TX_POWER BIT(4)
+#define MGMT_ADV_FLAG_APPEARANCE BIT(5)
+#define MGMT_ADV_FLAG_LOCAL_NAME BIT(6)
+
+#define MGMT_OP_REMOVE_ADVERTISING 0x003F
+struct mgmt_cp_remove_advertising {
+ __u8 instance;
+} __packed;
+#define MGMT_REMOVE_ADVERTISING_SIZE 1
+struct mgmt_rp_remove_advertising {
+ __u8 instance;
+} __packed;
+
#define MGMT_EV_CMD_COMPLETE 0x0001
struct mgmt_ev_cmd_complete {
__le16 opcode;
@@ -647,9 +715,14 @@ struct mgmt_ev_new_irk {
struct mgmt_irk_info irk;
} __packed;
+#define MGMT_CSRK_LOCAL_UNAUTHENTICATED 0x00
+#define MGMT_CSRK_REMOTE_UNAUTHENTICATED 0x01
+#define MGMT_CSRK_LOCAL_AUTHENTICATED 0x02
+#define MGMT_CSRK_REMOTE_AUTHENTICATED 0x03
+
struct mgmt_csrk_info {
struct mgmt_addr_info addr;
- __u8 master;
+ __u8 type;
__u8 val[16];
} __packed;
@@ -685,3 +758,29 @@ struct mgmt_ev_new_conn_param {
#define MGMT_EV_UNCONF_INDEX_REMOVED 0x001e
#define MGMT_EV_NEW_CONFIG_OPTIONS 0x001f
+
+struct mgmt_ev_ext_index {
+ __u8 type;
+ __u8 bus;
+} __packed;
+
+#define MGMT_EV_EXT_INDEX_ADDED 0x0020
+
+#define MGMT_EV_EXT_INDEX_REMOVED 0x0021
+
+#define MGMT_EV_LOCAL_OOB_DATA_UPDATED 0x0022
+struct mgmt_ev_local_oob_data_updated {
+ __u8 type;
+ __le16 eir_len;
+ __u8 eir[0];
+} __packed;
+
+#define MGMT_EV_ADVERTISING_ADDED 0x0023
+struct mgmt_ev_advertising_added {
+ __u8 instance;
+} __packed;
+
+#define MGMT_EV_ADVERTISING_REMOVED 0x0024
+struct mgmt_ev_advertising_removed {
+ __u8 instance;
+} __packed;
diff --git a/include/net/bond_3ad.h b/include/net/bond_3ad.h
index f04cdbb7848e..c2a40a172fcd 100644
--- a/include/net/bond_3ad.h
+++ b/include/net/bond_3ad.h
@@ -82,6 +82,13 @@ typedef enum {
AD_TRANSMIT /* tx Machine */
} tx_states_t;
+/* churn machine states(43.4.17 in the 802.3ad standard) */
+typedef enum {
+ AD_CHURN_MONITOR, /* monitoring for churn */
+ AD_CHURN, /* churn detected (error) */
+ AD_NO_CHURN /* no churn (no error) */
+} churn_state_t;
+
/* rx indication types */
typedef enum {
AD_TYPE_LACPDU = 1, /* type lacpdu */
@@ -229,6 +236,12 @@ typedef struct port {
u16 sm_mux_timer_counter; /* state machine mux timer counter */
tx_states_t sm_tx_state; /* state machine tx state */
u16 sm_tx_timer_counter; /* state machine tx timer counter(allways on - enter to transmit state 3 time per second) */
+ u16 sm_churn_actor_timer_counter;
+ u16 sm_churn_partner_timer_counter;
+ u32 churn_actor_count;
+ u32 churn_partner_count;
+ churn_state_t sm_churn_actor_state;
+ churn_state_t sm_churn_partner_state;
struct slave *slave; /* pointer to the bond slave that this port belongs to */
struct aggregator *aggregator; /* pointer to an aggregator that this port related to */
struct port *next_port_in_aggregator; /* Next port on the linked list of the parent aggregator */
@@ -262,6 +275,22 @@ struct ad_slave_info {
u16 id;
};
+static inline const char *bond_3ad_churn_desc(churn_state_t state)
+{
+ static const char *const churn_description[] = {
+ "monitoring",
+ "churned",
+ "none",
+ "unknown"
+ };
+ int max_size = sizeof(churn_description) / sizeof(churn_description[0]);
+
+ if (state >= max_size)
+ state = max_size - 1;
+
+ return churn_description[state];
+}
+
/* ========== AD Exported functions to the main bonding code ========== */
void bond_3ad_initialize(struct bonding *bond, u16 tick_resolution);
void bond_3ad_bind_slave(struct slave *slave);
diff --git a/include/net/bond_options.h b/include/net/bond_options.h
index ea6546d2c946..c28aca25320e 100644
--- a/include/net/bond_options.h
+++ b/include/net/bond_options.h
@@ -63,6 +63,9 @@ enum {
BOND_OPT_LP_INTERVAL,
BOND_OPT_SLAVES,
BOND_OPT_TLB_DYNAMIC_LB,
+ BOND_OPT_AD_ACTOR_SYS_PRIO,
+ BOND_OPT_AD_ACTOR_SYSTEM,
+ BOND_OPT_AD_USER_PORT_KEY,
BOND_OPT_LAST
};
diff --git a/include/net/bonding.h b/include/net/bonding.h
index fda6feeb6c1f..20defc0353d1 100644
--- a/include/net/bonding.h
+++ b/include/net/bonding.h
@@ -30,13 +30,6 @@
#include <net/bond_alb.h>
#include <net/bond_options.h>
-#define DRV_VERSION "3.7.1"
-#define DRV_RELDATE "April 27, 2011"
-#define DRV_NAME "bonding"
-#define DRV_DESCRIPTION "Ethernet Channel Bonding Driver"
-
-#define bond_version DRV_DESCRIPTION ": v" DRV_VERSION " (" DRV_RELDATE ")\n"
-
#define BOND_MAX_ARP_TARGETS 16
#define BOND_DEFAULT_MIIMON 100
@@ -143,6 +136,9 @@ struct bond_params {
int packets_per_slave;
int tlb_dynamic_lb;
struct reciprocal_value reciprocal_packets_per_slave;
+ u16 ad_actor_sys_prio;
+ u16 ad_user_port_key;
+ u8 ad_actor_system[ETH_ALEN];
};
struct bond_parm_tbl {
diff --git a/include/net/cfg80211.h b/include/net/cfg80211.h
index 64e09e1e8099..a741678f24a2 100644
--- a/include/net/cfg80211.h
+++ b/include/net/cfg80211.h
@@ -111,7 +111,7 @@ enum ieee80211_band {
* This may be due to the driver or due to regulatory bandwidth
* restrictions.
* @IEEE80211_CHAN_INDOOR_ONLY: see %NL80211_FREQUENCY_ATTR_INDOOR_ONLY
- * @IEEE80211_CHAN_GO_CONCURRENT: see %NL80211_FREQUENCY_ATTR_GO_CONCURRENT
+ * @IEEE80211_CHAN_IR_CONCURRENT: see %NL80211_FREQUENCY_ATTR_IR_CONCURRENT
* @IEEE80211_CHAN_NO_20MHZ: 20 MHz bandwidth is not permitted
* on this channel.
* @IEEE80211_CHAN_NO_10MHZ: 10 MHz bandwidth is not permitted
@@ -129,7 +129,7 @@ enum ieee80211_channel_flags {
IEEE80211_CHAN_NO_80MHZ = 1<<7,
IEEE80211_CHAN_NO_160MHZ = 1<<8,
IEEE80211_CHAN_INDOOR_ONLY = 1<<9,
- IEEE80211_CHAN_GO_CONCURRENT = 1<<10,
+ IEEE80211_CHAN_IR_CONCURRENT = 1<<10,
IEEE80211_CHAN_NO_20MHZ = 1<<11,
IEEE80211_CHAN_NO_10MHZ = 1<<12,
};
@@ -215,6 +215,39 @@ enum ieee80211_rate_flags {
};
/**
+ * enum ieee80211_bss_type - BSS type filter
+ *
+ * @IEEE80211_BSS_TYPE_ESS: Infrastructure BSS
+ * @IEEE80211_BSS_TYPE_PBSS: Personal BSS
+ * @IEEE80211_BSS_TYPE_IBSS: Independent BSS
+ * @IEEE80211_BSS_TYPE_MBSS: Mesh BSS
+ * @IEEE80211_BSS_TYPE_ANY: Wildcard value for matching any BSS type
+ */
+enum ieee80211_bss_type {
+ IEEE80211_BSS_TYPE_ESS,
+ IEEE80211_BSS_TYPE_PBSS,
+ IEEE80211_BSS_TYPE_IBSS,
+ IEEE80211_BSS_TYPE_MBSS,
+ IEEE80211_BSS_TYPE_ANY
+};
+
+/**
+ * enum ieee80211_privacy - BSS privacy filter
+ *
+ * @IEEE80211_PRIVACY_ON: privacy bit set
+ * @IEEE80211_PRIVACY_OFF: privacy bit clear
+ * @IEEE80211_PRIVACY_ANY: Wildcard value for matching any privacy setting
+ */
+enum ieee80211_privacy {
+ IEEE80211_PRIVACY_ON,
+ IEEE80211_PRIVACY_OFF,
+ IEEE80211_PRIVACY_ANY
+};
+
+#define IEEE80211_PRIVACY(x) \
+ ((x) ? IEEE80211_PRIVACY_ON : IEEE80211_PRIVACY_OFF)
+
+/**
* struct ieee80211_rate - bitrate definition
*
* This structure describes a bitrate that an 802.11 PHY can
@@ -2423,6 +2456,7 @@ struct cfg80211_ops {
struct wireless_dev * (*add_virtual_intf)(struct wiphy *wiphy,
const char *name,
+ unsigned char name_assign_type,
enum nl80211_iftype type,
u32 *flags,
struct vif_params *params);
@@ -3183,10 +3217,8 @@ struct wiphy {
const struct ieee80211_ht_cap *ht_capa_mod_mask;
const struct ieee80211_vht_cap *vht_capa_mod_mask;
-#ifdef CONFIG_NET_NS
/* the network namespace this phy lives in currently */
- struct net *_net;
-#endif
+ possible_net_t _net;
#ifdef CONFIG_CFG80211_WEXT
const struct iw_handler_def *wext;
@@ -4012,14 +4044,16 @@ struct cfg80211_bss *cfg80211_get_bss(struct wiphy *wiphy,
struct ieee80211_channel *channel,
const u8 *bssid,
const u8 *ssid, size_t ssid_len,
- u16 capa_mask, u16 capa_val);
+ enum ieee80211_bss_type bss_type,
+ enum ieee80211_privacy);
static inline struct cfg80211_bss *
cfg80211_get_ibss(struct wiphy *wiphy,
struct ieee80211_channel *channel,
const u8 *ssid, size_t ssid_len)
{
return cfg80211_get_bss(wiphy, channel, NULL, ssid, ssid_len,
- WLAN_CAPABILITY_IBSS, WLAN_CAPABILITY_IBSS);
+ IEEE80211_BSS_TYPE_IBSS,
+ IEEE80211_PRIVACY_ANY);
}
/**
@@ -4260,6 +4294,7 @@ struct sk_buff *__cfg80211_alloc_reply_skb(struct wiphy *wiphy,
int approxlen);
struct sk_buff *__cfg80211_alloc_event_skb(struct wiphy *wiphy,
+ struct wireless_dev *wdev,
enum nl80211_commands cmd,
enum nl80211_attrs attr,
int vendor_event_idx,
@@ -4314,6 +4349,7 @@ int cfg80211_vendor_cmd_reply(struct sk_buff *skb);
/**
* cfg80211_vendor_event_alloc - allocate vendor-specific event skb
* @wiphy: the wiphy
+ * @wdev: the wireless device
* @event_idx: index of the vendor event in the wiphy's vendor_events
* @approxlen: an upper bound of the length of the data that will
* be put into the skb
@@ -4322,16 +4358,20 @@ int cfg80211_vendor_cmd_reply(struct sk_buff *skb);
* This function allocates and pre-fills an skb for an event on the
* vendor-specific multicast group.
*
+ * If wdev != NULL, both the ifindex and identifier of the specified
+ * wireless device are added to the event message before the vendor data
+ * attribute.
+ *
* When done filling the skb, call cfg80211_vendor_event() with the
* skb to send the event.
*
* Return: An allocated and pre-filled skb. %NULL if any errors happen.
*/
static inline struct sk_buff *
-cfg80211_vendor_event_alloc(struct wiphy *wiphy, int approxlen,
- int event_idx, gfp_t gfp)
+cfg80211_vendor_event_alloc(struct wiphy *wiphy, struct wireless_dev *wdev,
+ int approxlen, int event_idx, gfp_t gfp)
{
- return __cfg80211_alloc_event_skb(wiphy, NL80211_CMD_VENDOR,
+ return __cfg80211_alloc_event_skb(wiphy, wdev, NL80211_CMD_VENDOR,
NL80211_ATTR_VENDOR_DATA,
event_idx, approxlen, gfp);
}
@@ -4432,7 +4472,7 @@ static inline int cfg80211_testmode_reply(struct sk_buff *skb)
static inline struct sk_buff *
cfg80211_testmode_alloc_event_skb(struct wiphy *wiphy, int approxlen, gfp_t gfp)
{
- return __cfg80211_alloc_event_skb(wiphy, NL80211_CMD_TESTMODE,
+ return __cfg80211_alloc_event_skb(wiphy, NULL, NL80211_CMD_TESTMODE,
NL80211_ATTR_TESTDATA, -1,
approxlen, gfp);
}
@@ -4535,13 +4575,15 @@ void cfg80211_roamed_bss(struct net_device *dev, struct cfg80211_bss *bss,
* @ie: information elements of the deauth/disassoc frame (may be %NULL)
* @ie_len: length of IEs
* @reason: reason code for the disconnection, set it to 0 if unknown
+ * @locally_generated: disconnection was requested locally
* @gfp: allocation flags
*
* After it calls this function, the driver should enter an idle state
* and not try to connect to any AP any more.
*/
void cfg80211_disconnected(struct net_device *dev, u16 reason,
- const u8 *ie, size_t ie_len, gfp_t gfp);
+ const u8 *ie, size_t ie_len,
+ bool locally_generated, gfp_t gfp);
/**
* cfg80211_ready_on_channel - notification of remain_on_channel start
@@ -4862,6 +4904,17 @@ void cfg80211_ch_switch_started_notify(struct net_device *dev,
bool ieee80211_operating_class_to_band(u8 operating_class,
enum ieee80211_band *band);
+/**
+ * ieee80211_chandef_to_operating_class - convert chandef to operation class
+ *
+ * @chandef: the chandef to convert
+ * @op_class: a pointer to the resulting operating class
+ *
+ * Returns %true if the conversion was successful, %false otherwise.
+ */
+bool ieee80211_chandef_to_operating_class(struct cfg80211_chan_def *chandef,
+ u8 *op_class);
+
/*
* cfg80211_tdls_oper_request - request userspace to perform TDLS operation
* @dev: the device on which the operation is requested
@@ -4950,6 +5003,64 @@ int cfg80211_get_p2p_attr(const u8 *ies, unsigned int len,
u8 *buf, unsigned int bufsize);
/**
+ * ieee80211_ie_split_ric - split an IE buffer according to ordering (with RIC)
+ * @ies: the IE buffer
+ * @ielen: the length of the IE buffer
+ * @ids: an array with element IDs that are allowed before
+ * the split
+ * @n_ids: the size of the element ID array
+ * @after_ric: array IE types that come after the RIC element
+ * @n_after_ric: size of the @after_ric array
+ * @offset: offset where to start splitting in the buffer
+ *
+ * This function splits an IE buffer by updating the @offset
+ * variable to point to the location where the buffer should be
+ * split.
+ *
+ * It assumes that the given IE buffer is well-formed, this
+ * has to be guaranteed by the caller!
+ *
+ * It also assumes that the IEs in the buffer are ordered
+ * correctly, if not the result of using this function will not
+ * be ordered correctly either, i.e. it does no reordering.
+ *
+ * The function returns the offset where the next part of the
+ * buffer starts, which may be @ielen if the entire (remainder)
+ * of the buffer should be used.
+ */
+size_t ieee80211_ie_split_ric(const u8 *ies, size_t ielen,
+ const u8 *ids, int n_ids,
+ const u8 *after_ric, int n_after_ric,
+ size_t offset);
+
+/**
+ * ieee80211_ie_split - split an IE buffer according to ordering
+ * @ies: the IE buffer
+ * @ielen: the length of the IE buffer
+ * @ids: an array with element IDs that are allowed before
+ * the split
+ * @n_ids: the size of the element ID array
+ * @offset: offset where to start splitting in the buffer
+ *
+ * This function splits an IE buffer by updating the @offset
+ * variable to point to the location where the buffer should be
+ * split.
+ *
+ * It assumes that the given IE buffer is well-formed, this
+ * has to be guaranteed by the caller!
+ *
+ * It also assumes that the IEs in the buffer are ordered
+ * correctly, if not the result of using this function will not
+ * be ordered correctly either, i.e. it does no reordering.
+ *
+ * The function returns the offset where the next part of the
+ * buffer starts, which may be @ielen if the entire (remainder)
+ * of the buffer should be used.
+ */
+size_t ieee80211_ie_split(const u8 *ies, size_t ielen,
+ const u8 *ids, int n_ids, size_t offset);
+
+/**
* cfg80211_report_wowlan_wakeup - report wakeup from WoWLAN
* @wdev: the wireless device reporting the wakeup
* @wakeup: the wakeup report
diff --git a/include/net/cfg802154.h b/include/net/cfg802154.h
index eeda67652766..290a9a69af07 100644
--- a/include/net/cfg802154.h
+++ b/include/net/cfg802154.h
@@ -30,11 +30,13 @@ struct wpan_phy_cca;
struct cfg802154_ops {
struct net_device * (*add_virtual_intf_deprecated)(struct wpan_phy *wpan_phy,
const char *name,
+ unsigned char name_assign_type,
int type);
void (*del_virtual_intf_deprecated)(struct wpan_phy *wpan_phy,
struct net_device *dev);
int (*add_virtual_intf)(struct wpan_phy *wpan_phy,
const char *name,
+ unsigned char name_assign_type,
enum nl802154_iftype type,
__le64 extended_addr);
int (*del_virtual_intf)(struct wpan_phy *wpan_phy,
@@ -42,6 +44,8 @@ struct cfg802154_ops {
int (*set_channel)(struct wpan_phy *wpan_phy, u8 page, u8 channel);
int (*set_cca_mode)(struct wpan_phy *wpan_phy,
const struct wpan_phy_cca *cca);
+ int (*set_cca_ed_level)(struct wpan_phy *wpan_phy, s32 ed_level);
+ int (*set_tx_power)(struct wpan_phy *wpan_phy, s32 power);
int (*set_pan_id)(struct wpan_phy *wpan_phy,
struct wpan_dev *wpan_dev, __le16 pan_id);
int (*set_short_addr)(struct wpan_phy *wpan_phy,
@@ -59,14 +63,66 @@ struct cfg802154_ops {
struct wpan_dev *wpan_dev, bool mode);
};
+static inline bool
+wpan_phy_supported_bool(bool b, enum nl802154_supported_bool_states st)
+{
+ switch (st) {
+ case NL802154_SUPPORTED_BOOL_TRUE:
+ return b;
+ case NL802154_SUPPORTED_BOOL_FALSE:
+ return !b;
+ case NL802154_SUPPORTED_BOOL_BOTH:
+ return true;
+ default:
+ WARN_ON(1);
+ }
+
+ return false;
+}
+
+struct wpan_phy_supported {
+ u32 channels[IEEE802154_MAX_PAGE + 1],
+ cca_modes, cca_opts, iftypes;
+ enum nl802154_supported_bool_states lbt;
+ u8 min_minbe, max_minbe, min_maxbe, max_maxbe,
+ min_csma_backoffs, max_csma_backoffs;
+ s8 min_frame_retries, max_frame_retries;
+ size_t tx_powers_size, cca_ed_levels_size;
+ const s32 *tx_powers, *cca_ed_levels;
+};
+
struct wpan_phy_cca {
enum nl802154_cca_modes mode;
enum nl802154_cca_opts opt;
};
-struct wpan_phy {
- struct mutex pib_lock;
+static inline bool
+wpan_phy_cca_cmp(const struct wpan_phy_cca *a, const struct wpan_phy_cca *b)
+{
+ if (a->mode != b->mode)
+ return false;
+
+ if (a->mode == NL802154_CCA_ENERGY_CARRIER)
+ return a->opt == b->opt;
+ return true;
+}
+
+/**
+ * @WPAN_PHY_FLAG_TRANSMIT_POWER: Indicates that transceiver will support
+ * transmit power setting.
+ * @WPAN_PHY_FLAG_CCA_ED_LEVEL: Indicates that transceiver will support cca ed
+ * level setting.
+ * @WPAN_PHY_FLAG_CCA_MODE: Indicates that transceiver will support cca mode
+ * setting.
+ */
+enum wpan_phy_flags {
+ WPAN_PHY_FLAG_TXPOWER = BIT(1),
+ WPAN_PHY_FLAG_CCA_ED_LEVEL = BIT(2),
+ WPAN_PHY_FLAG_CCA_MODE = BIT(3),
+};
+
+struct wpan_phy {
/* If multiple wpan_phys are registered and you're handed e.g.
* a regular netdev with assigned ieee802154_ptr, you won't
* know whether it points to a wpan_phy your driver has registered
@@ -75,6 +131,8 @@ struct wpan_phy {
*/
const void *privid;
+ u32 flags;
+
/*
* This is a PIB according to 802.15.4-2011.
* We do not provide timing-related variables, as they
@@ -82,12 +140,14 @@ struct wpan_phy {
*/
u8 current_channel;
u8 current_page;
- u32 channels_supported[IEEE802154_MAX_PAGE + 1];
- s8 transmit_power;
+ struct wpan_phy_supported supported;
+ /* current transmit_power in mBm */
+ s32 transmit_power;
struct wpan_phy_cca cca;
__le64 perm_extended_addr;
+ /* current cca ed threshold in mBm */
s32 cca_ed_level;
/* PHY depended MAC PIB values */
@@ -119,9 +179,9 @@ struct wpan_dev {
__le64 extended_addr;
/* MAC BSN field */
- u8 bsn;
+ atomic_t bsn;
/* MAC DSN field */
- u8 dsn;
+ atomic_t dsn;
u8 min_be;
u8 max_be;
diff --git a/include/net/checksum.h b/include/net/checksum.h
index 0a55ac715077..2d1d73cb773e 100644
--- a/include/net/checksum.h
+++ b/include/net/checksum.h
@@ -122,7 +122,9 @@ static inline __wsum csum_partial_ext(const void *buff, int len, __wsum sum)
static inline void csum_replace4(__sum16 *sum, __be32 from, __be32 to)
{
- *sum = csum_fold(csum_add(csum_sub(~csum_unfold(*sum), from), to));
+ __wsum tmp = csum_sub(~csum_unfold(*sum), (__force __wsum)from);
+
+ *sum = csum_fold(csum_add(tmp, (__force __wsum)to));
}
/* Implements RFC 1624 (Incremental Internet Checksum)
diff --git a/include/net/codel.h b/include/net/codel.h
index aeee28081245..267e70210061 100644
--- a/include/net/codel.h
+++ b/include/net/codel.h
@@ -7,7 +7,7 @@
* Copyright (C) 2011-2012 Kathleen Nichols <nichols@pollere.com>
* Copyright (C) 2011-2012 Van Jacobson <van@pollere.net>
* Copyright (C) 2012 Michael D. Taht <dave.taht@bufferbloat.net>
- * Copyright (C) 2012 Eric Dumazet <edumazet@google.com>
+ * Copyright (C) 2012,2015 Eric Dumazet <edumazet@google.com>
*
* Redistribution and use in source and binary forms, with or without
* modification, are permitted provided that the following conditions
@@ -119,12 +119,16 @@ static inline u32 codel_time_to_us(codel_time_t val)
/**
* struct codel_params - contains codel parameters
* @target: target queue size (in time units)
+ * @ce_threshold: threshold for marking packets with ECN CE
* @interval: width of moving time window
+ * @mtu: device mtu, or minimal queue backlog in bytes.
* @ecn: is Explicit Congestion Notification enabled
*/
struct codel_params {
codel_time_t target;
+ codel_time_t ce_threshold;
codel_time_t interval;
+ u32 mtu;
bool ecn;
};
@@ -159,17 +163,24 @@ struct codel_vars {
* @maxpacket: largest packet we've seen so far
* @drop_count: temp count of dropped packets in dequeue()
* ecn_mark: number of packets we ECN marked instead of dropping
+ * ce_mark: number of packets CE marked because sojourn time was above ce_threshold
*/
struct codel_stats {
u32 maxpacket;
u32 drop_count;
u32 ecn_mark;
+ u32 ce_mark;
};
-static void codel_params_init(struct codel_params *params)
+#define CODEL_DISABLED_THRESHOLD INT_MAX
+
+static void codel_params_init(struct codel_params *params,
+ const struct Qdisc *sch)
{
params->interval = MS2TIME(100);
params->target = MS2TIME(5);
+ params->mtu = psched_mtu(qdisc_dev(sch));
+ params->ce_threshold = CODEL_DISABLED_THRESHOLD;
params->ecn = false;
}
@@ -180,7 +191,7 @@ static void codel_vars_init(struct codel_vars *vars)
static void codel_stats_init(struct codel_stats *stats)
{
- stats->maxpacket = 256;
+ stats->maxpacket = 0;
}
/*
@@ -234,7 +245,7 @@ static bool codel_should_drop(const struct sk_buff *skb,
stats->maxpacket = qdisc_pkt_len(skb);
if (codel_time_before(vars->ldelay, params->target) ||
- sch->qstats.backlog <= stats->maxpacket) {
+ sch->qstats.backlog <= params->mtu) {
/* went below - stay below for at least interval */
vars->first_above_time = 0;
return false;
@@ -350,6 +361,9 @@ static struct sk_buff *codel_dequeue(struct Qdisc *sch,
vars->rec_inv_sqrt);
}
end:
+ if (skb && codel_time_after(vars->ldelay, params->ce_threshold) &&
+ INET_ECN_set_ce(skb))
+ stats->ce_mark++;
return skb;
}
#endif
diff --git a/include/net/compat.h b/include/net/compat.h
index 42a9c8431177..48103cf94e97 100644
--- a/include/net/compat.h
+++ b/include/net/compat.h
@@ -40,7 +40,7 @@ int compat_sock_get_timestampns(struct sock *, struct timespec __user *);
#define compat_mmsghdr mmsghdr
#endif /* defined(CONFIG_COMPAT) */
-ssize_t get_compat_msghdr(struct msghdr *, struct compat_msghdr __user *,
+int get_compat_msghdr(struct msghdr *, struct compat_msghdr __user *,
struct sockaddr __user **, struct iovec **);
asmlinkage long compat_sys_sendmsg(int, struct compat_msghdr __user *,
unsigned int);
diff --git a/include/net/dcbnl.h b/include/net/dcbnl.h
index 597b88a94332..207d9ba1f92c 100644
--- a/include/net/dcbnl.h
+++ b/include/net/dcbnl.h
@@ -49,6 +49,9 @@ struct dcbnl_rtnl_ops {
int (*ieee_setets) (struct net_device *, struct ieee_ets *);
int (*ieee_getmaxrate) (struct net_device *, struct ieee_maxrate *);
int (*ieee_setmaxrate) (struct net_device *, struct ieee_maxrate *);
+ int (*ieee_getqcn) (struct net_device *, struct ieee_qcn *);
+ int (*ieee_setqcn) (struct net_device *, struct ieee_qcn *);
+ int (*ieee_getqcnstats) (struct net_device *, struct ieee_qcn_stats *);
int (*ieee_getpfc) (struct net_device *, struct ieee_pfc *);
int (*ieee_setpfc) (struct net_device *, struct ieee_pfc *);
int (*ieee_getapp) (struct net_device *, struct dcb_app *);
diff --git a/include/net/dn_neigh.h b/include/net/dn_neigh.h
index fac4e3f4a6d3..d0424269313f 100644
--- a/include/net/dn_neigh.h
+++ b/include/net/dn_neigh.h
@@ -18,10 +18,11 @@ struct dn_neigh {
void dn_neigh_init(void);
void dn_neigh_cleanup(void);
-int dn_neigh_router_hello(struct sk_buff *skb);
-int dn_neigh_endnode_hello(struct sk_buff *skb);
+int dn_neigh_router_hello(struct sock *sk, struct sk_buff *skb);
+int dn_neigh_endnode_hello(struct sock *sk, struct sk_buff *skb);
void dn_neigh_pointopoint_hello(struct sk_buff *skb);
int dn_neigh_elist(struct net_device *dev, unsigned char *ptr, int n);
+int dn_to_neigh_output(struct sock *sk, struct sk_buff *skb);
extern struct neigh_table dn_neigh_table;
diff --git a/include/net/dsa.h b/include/net/dsa.h
index ed3c34bbb67a..fbca63ba8f73 100644
--- a/include/net/dsa.h
+++ b/include/net/dsa.h
@@ -72,6 +72,7 @@ struct dsa_platform_data {
* to the root switch chip of the tree.
*/
struct device *netdev;
+ struct net_device *of_netdev;
/*
* Info structs describing each of the switch chips
@@ -128,6 +129,11 @@ struct dsa_switch {
int index;
/*
+ * Tagging protocol understood by this switch
+ */
+ enum dsa_tag_protocol tag_protocol;
+
+ /*
* Configuration data for this switch.
*/
struct dsa_chip_data *pd;
@@ -165,6 +171,11 @@ static inline bool dsa_is_cpu_port(struct dsa_switch *ds, int p)
return !!(ds->index == ds->dst->cpu_switch && p == ds->dst->cpu_port);
}
+static inline bool dsa_is_port_initialized(struct dsa_switch *ds, int p)
+{
+ return ds->phys_port_mask & (1 << p) && ds->ports[p];
+}
+
static inline u8 dsa_upstream_port(struct dsa_switch *ds)
{
struct dsa_switch_tree *dst = ds->dst;
@@ -275,6 +286,22 @@ struct dsa_switch_driver {
int (*get_regs_len)(struct dsa_switch *ds, int port);
void (*get_regs)(struct dsa_switch *ds, int port,
struct ethtool_regs *regs, void *p);
+
+ /*
+ * Bridge integration
+ */
+ int (*port_join_bridge)(struct dsa_switch *ds, int port,
+ u32 br_port_mask);
+ int (*port_leave_bridge)(struct dsa_switch *ds, int port,
+ u32 br_port_mask);
+ int (*port_stp_update)(struct dsa_switch *ds, int port,
+ u8 state);
+ int (*fdb_add)(struct dsa_switch *ds, int port,
+ const unsigned char *addr, u16 vid);
+ int (*fdb_del)(struct dsa_switch *ds, int port,
+ const unsigned char *addr, u16 vid);
+ int (*fdb_getnext)(struct dsa_switch *ds, int port,
+ unsigned char *addr, bool *is_static);
};
void register_switch_driver(struct dsa_switch_driver *type);
diff --git a/include/net/dst.h b/include/net/dst.h
index 0fb99a26e973..2bc73f8a00a9 100644
--- a/include/net/dst.h
+++ b/include/net/dst.h
@@ -109,7 +109,6 @@ u32 *dst_cow_metrics_generic(struct dst_entry *dst, unsigned long old);
extern const u32 dst_default_metrics[];
#define DST_METRICS_READ_ONLY 0x1UL
-#define DST_METRICS_FORCE_OVERWRITE 0x2UL
#define DST_METRICS_FLAGS 0x3UL
#define __DST_METRICS_PTR(Y) \
((u32 *)((Y) & ~DST_METRICS_FLAGS))
@@ -120,11 +119,6 @@ static inline bool dst_metrics_read_only(const struct dst_entry *dst)
return dst->_metrics & DST_METRICS_READ_ONLY;
}
-static inline void dst_metrics_set_force_overwrite(struct dst_entry *dst)
-{
- dst->_metrics |= DST_METRICS_FORCE_OVERWRITE;
-}
-
void __dst_destroy_metrics_generic(struct dst_entry *dst, unsigned long old);
static inline void dst_destroy_metrics_generic(struct dst_entry *dst)
@@ -355,18 +349,6 @@ static inline void skb_tunnel_rx(struct sk_buff *skb, struct net_device *dev,
__skb_tunnel_rx(skb, dev, net);
}
-/* Children define the path of the packet through the
- * Linux networking. Thus, destinations are stackable.
- */
-
-static inline struct dst_entry *skb_dst_pop(struct sk_buff *skb)
-{
- struct dst_entry *child = dst_clone(skb_dst(skb)->child);
-
- skb_dst_drop(skb);
- return child;
-}
-
int dst_discard_sk(struct sock *sk, struct sk_buff *skb);
static inline int dst_discard(struct sk_buff *skb)
{
diff --git a/include/net/dst_ops.h b/include/net/dst_ops.h
index 1f99a1de0e4f..d64253914a6a 100644
--- a/include/net/dst_ops.h
+++ b/include/net/dst_ops.h
@@ -12,7 +12,6 @@ struct sock;
struct dst_ops {
unsigned short family;
- __be16 protocol;
unsigned int gc_thresh;
int (*gc)(struct dst_ops *ops);
diff --git a/include/net/fib_rules.h b/include/net/fib_rules.h
index e584de16e4c3..903a55efbffe 100644
--- a/include/net/fib_rules.h
+++ b/include/net/fib_rules.h
@@ -36,7 +36,8 @@ struct fib_lookup_arg {
void *result;
struct fib_rule *rule;
int flags;
-#define FIB_LOOKUP_NOREF 1
+#define FIB_LOOKUP_NOREF 1
+#define FIB_LOOKUP_IGNORE_LINKSTATE 2
};
struct fib_rules_ops {
@@ -58,7 +59,7 @@ struct fib_rules_ops {
struct sk_buff *,
struct fib_rule_hdr *,
struct nlattr **);
- void (*delete)(struct fib_rule *);
+ int (*delete)(struct fib_rule *);
int (*compare)(struct fib_rule *,
struct fib_rule_hdr *,
struct nlattr **);
@@ -95,17 +96,10 @@ static inline void fib_rule_get(struct fib_rule *rule)
atomic_inc(&rule->refcnt);
}
-static inline void fib_rule_put_rcu(struct rcu_head *head)
-{
- struct fib_rule *rule = container_of(head, struct fib_rule, rcu);
- release_net(rule->fr_net);
- kfree(rule);
-}
-
static inline void fib_rule_put(struct fib_rule *rule)
{
if (atomic_dec_and_test(&rule->refcnt))
- call_rcu(&rule->rcu, fib_rule_put_rcu);
+ kfree_rcu(rule, rcu);
}
static inline u32 frh_get_table(struct fib_rule_hdr *frh, struct nlattr **nla)
diff --git a/include/net/flow_dissector.h b/include/net/flow_dissector.h
new file mode 100644
index 000000000000..1a8c22419936
--- /dev/null
+++ b/include/net/flow_dissector.h
@@ -0,0 +1,220 @@
+#ifndef _NET_FLOW_DISSECTOR_H
+#define _NET_FLOW_DISSECTOR_H
+
+#include <linux/types.h>
+#include <linux/skbuff.h>
+#include <linux/in6.h>
+#include <uapi/linux/if_ether.h>
+
+/**
+ * struct flow_dissector_key_control:
+ * @thoff: Transport header offset
+ */
+struct flow_dissector_key_control {
+ u16 thoff;
+ u16 addr_type;
+};
+
+/**
+ * struct flow_dissector_key_basic:
+ * @thoff: Transport header offset
+ * @n_proto: Network header protocol (eg. IPv4/IPv6)
+ * @ip_proto: Transport header protocol (eg. TCP/UDP)
+ */
+struct flow_dissector_key_basic {
+ __be16 n_proto;
+ u8 ip_proto;
+ u8 padding;
+};
+
+struct flow_dissector_key_tags {
+ u32 vlan_id:12,
+ flow_label:20;
+};
+
+struct flow_dissector_key_keyid {
+ __be32 keyid;
+};
+
+/**
+ * struct flow_dissector_key_ipv4_addrs:
+ * @src: source ip address
+ * @dst: destination ip address
+ */
+struct flow_dissector_key_ipv4_addrs {
+ /* (src,dst) must be grouped, in the same way than in IP header */
+ __be32 src;
+ __be32 dst;
+};
+
+/**
+ * struct flow_dissector_key_ipv6_addrs:
+ * @src: source ip address
+ * @dst: destination ip address
+ */
+struct flow_dissector_key_ipv6_addrs {
+ /* (src,dst) must be grouped, in the same way than in IP header */
+ struct in6_addr src;
+ struct in6_addr dst;
+};
+
+/**
+ * struct flow_dissector_key_tipc_addrs:
+ * @srcnode: source node address
+ */
+struct flow_dissector_key_tipc_addrs {
+ __be32 srcnode;
+};
+
+/**
+ * struct flow_dissector_key_addrs:
+ * @v4addrs: IPv4 addresses
+ * @v6addrs: IPv6 addresses
+ */
+struct flow_dissector_key_addrs {
+ union {
+ struct flow_dissector_key_ipv4_addrs v4addrs;
+ struct flow_dissector_key_ipv6_addrs v6addrs;
+ struct flow_dissector_key_tipc_addrs tipcaddrs;
+ };
+};
+
+/**
+ * flow_dissector_key_tp_ports:
+ * @ports: port numbers of Transport header
+ * src: source port number
+ * dst: destination port number
+ */
+struct flow_dissector_key_ports {
+ union {
+ __be32 ports;
+ struct {
+ __be16 src;
+ __be16 dst;
+ };
+ };
+};
+
+
+/**
+ * struct flow_dissector_key_eth_addrs:
+ * @src: source Ethernet address
+ * @dst: destination Ethernet address
+ */
+struct flow_dissector_key_eth_addrs {
+ /* (dst,src) must be grouped, in the same way than in ETH header */
+ unsigned char dst[ETH_ALEN];
+ unsigned char src[ETH_ALEN];
+};
+
+enum flow_dissector_key_id {
+ FLOW_DISSECTOR_KEY_CONTROL, /* struct flow_dissector_key_control */
+ FLOW_DISSECTOR_KEY_BASIC, /* struct flow_dissector_key_basic */
+ FLOW_DISSECTOR_KEY_IPV4_ADDRS, /* struct flow_dissector_key_ipv4_addrs */
+ FLOW_DISSECTOR_KEY_IPV6_ADDRS, /* struct flow_dissector_key_ipv6_addrs */
+ FLOW_DISSECTOR_KEY_PORTS, /* struct flow_dissector_key_ports */
+ FLOW_DISSECTOR_KEY_ETH_ADDRS, /* struct flow_dissector_key_eth_addrs */
+ FLOW_DISSECTOR_KEY_TIPC_ADDRS, /* struct flow_dissector_key_tipc_addrs */
+ FLOW_DISSECTOR_KEY_VLANID, /* struct flow_dissector_key_flow_tags */
+ FLOW_DISSECTOR_KEY_FLOW_LABEL, /* struct flow_dissector_key_flow_tags */
+ FLOW_DISSECTOR_KEY_GRE_KEYID, /* struct flow_dissector_key_keyid */
+ FLOW_DISSECTOR_KEY_MPLS_ENTROPY, /* struct flow_dissector_key_keyid */
+
+ FLOW_DISSECTOR_KEY_MAX,
+};
+
+struct flow_dissector_key {
+ enum flow_dissector_key_id key_id;
+ size_t offset; /* offset of struct flow_dissector_key_*
+ in target the struct */
+};
+
+struct flow_dissector {
+ unsigned int used_keys; /* each bit repesents presence of one key id */
+ unsigned short int offset[FLOW_DISSECTOR_KEY_MAX];
+};
+
+void skb_flow_dissector_init(struct flow_dissector *flow_dissector,
+ const struct flow_dissector_key *key,
+ unsigned int key_count);
+
+bool __skb_flow_dissect(const struct sk_buff *skb,
+ struct flow_dissector *flow_dissector,
+ void *target_container,
+ void *data, __be16 proto, int nhoff, int hlen);
+
+static inline bool skb_flow_dissect(const struct sk_buff *skb,
+ struct flow_dissector *flow_dissector,
+ void *target_container)
+{
+ return __skb_flow_dissect(skb, flow_dissector, target_container,
+ NULL, 0, 0, 0);
+}
+
+struct flow_keys {
+ struct flow_dissector_key_control control;
+#define FLOW_KEYS_HASH_START_FIELD basic
+ struct flow_dissector_key_basic basic;
+ struct flow_dissector_key_tags tags;
+ struct flow_dissector_key_keyid keyid;
+ struct flow_dissector_key_ports ports;
+ struct flow_dissector_key_addrs addrs;
+};
+
+#define FLOW_KEYS_HASH_OFFSET \
+ offsetof(struct flow_keys, FLOW_KEYS_HASH_START_FIELD)
+
+__be32 flow_get_u32_src(const struct flow_keys *flow);
+__be32 flow_get_u32_dst(const struct flow_keys *flow);
+
+extern struct flow_dissector flow_keys_dissector;
+extern struct flow_dissector flow_keys_buf_dissector;
+
+static inline bool skb_flow_dissect_flow_keys(const struct sk_buff *skb,
+ struct flow_keys *flow)
+{
+ memset(flow, 0, sizeof(*flow));
+ return __skb_flow_dissect(skb, &flow_keys_dissector, flow,
+ NULL, 0, 0, 0);
+}
+
+static inline bool skb_flow_dissect_flow_keys_buf(struct flow_keys *flow,
+ void *data, __be16 proto,
+ int nhoff, int hlen)
+{
+ memset(flow, 0, sizeof(*flow));
+ return __skb_flow_dissect(NULL, &flow_keys_buf_dissector, flow,
+ data, proto, nhoff, hlen);
+}
+
+__be32 __skb_flow_get_ports(const struct sk_buff *skb, int thoff, u8 ip_proto,
+ void *data, int hlen_proto);
+
+static inline __be32 skb_flow_get_ports(const struct sk_buff *skb,
+ int thoff, u8 ip_proto)
+{
+ return __skb_flow_get_ports(skb, thoff, ip_proto, NULL, 0);
+}
+
+u32 flow_hash_from_keys(struct flow_keys *keys);
+void __skb_get_hash(struct sk_buff *skb);
+u32 skb_get_poff(const struct sk_buff *skb);
+u32 __skb_get_poff(const struct sk_buff *skb, void *data,
+ const struct flow_keys *keys, int hlen);
+
+/* struct flow_keys_digest:
+ *
+ * This structure is used to hold a digest of the full flow keys. This is a
+ * larger "hash" of a flow to allow definitively matching specific flows where
+ * the 32 bit skb->hash is not large enough. The size is limited to 16 bytes so
+ * that it can by used in CB of skb (see sch_choke for an example).
+ */
+#define FLOW_KEYS_DIGEST_LEN 16
+struct flow_keys_digest {
+ u8 data[FLOW_KEYS_DIGEST_LEN];
+};
+
+void make_flow_keys_digest(struct flow_keys_digest *digest,
+ const struct flow_keys *flow);
+
+#endif
diff --git a/include/net/flow_keys.h b/include/net/flow_keys.h
deleted file mode 100644
index dc8fd81412bf..000000000000
--- a/include/net/flow_keys.h
+++ /dev/null
@@ -1,45 +0,0 @@
-#ifndef _NET_FLOW_KEYS_H
-#define _NET_FLOW_KEYS_H
-
-/* struct flow_keys:
- * @src: source ip address in case of IPv4
- * For IPv6 it contains 32bit hash of src address
- * @dst: destination ip address in case of IPv4
- * For IPv6 it contains 32bit hash of dst address
- * @ports: port numbers of Transport header
- * port16[0]: src port number
- * port16[1]: dst port number
- * @thoff: Transport header offset
- * @n_proto: Network header protocol (eg. IPv4/IPv6)
- * @ip_proto: Transport header protocol (eg. TCP/UDP)
- * All the members, except thoff, are in network byte order.
- */
-struct flow_keys {
- /* (src,dst) must be grouped, in the same way than in IP header */
- __be32 src;
- __be32 dst;
- union {
- __be32 ports;
- __be16 port16[2];
- };
- u16 thoff;
- __be16 n_proto;
- u8 ip_proto;
-};
-
-bool __skb_flow_dissect(const struct sk_buff *skb, struct flow_keys *flow,
- void *data, __be16 proto, int nhoff, int hlen);
-static inline bool skb_flow_dissect(const struct sk_buff *skb, struct flow_keys *flow)
-{
- return __skb_flow_dissect(skb, flow, NULL, 0, 0, 0);
-}
-__be32 __skb_flow_get_ports(const struct sk_buff *skb, int thoff, u8 ip_proto,
- void *data, int hlen_proto);
-static inline __be32 skb_flow_get_ports(const struct sk_buff *skb, int thoff, u8 ip_proto)
-{
- return __skb_flow_get_ports(skb, thoff, ip_proto, NULL, 0);
-}
-u32 flow_hash_from_keys(struct flow_keys *keys);
-unsigned int flow_get_hlen(const unsigned char *data, unsigned int max_len,
- __be16 protocol);
-#endif
diff --git a/include/net/genetlink.h b/include/net/genetlink.h
index 0574abd3db86..a9af1cc8c1bc 100644
--- a/include/net/genetlink.h
+++ b/include/net/genetlink.h
@@ -92,9 +92,7 @@ struct genl_info {
struct genlmsghdr * genlhdr;
void * userhdr;
struct nlattr ** attrs;
-#ifdef CONFIG_NET_NS
- struct net * _net;
-#endif
+ possible_net_t _net;
void * user_ptr[2];
struct sock * dst_sk;
};
diff --git a/include/net/geneve.h b/include/net/geneve.h
index 14fb8d3390b4..2a0543a1899d 100644
--- a/include/net/geneve.h
+++ b/include/net/geneve.h
@@ -62,6 +62,11 @@ struct genevehdr {
struct geneve_opt options[];
};
+static inline struct genevehdr *geneve_hdr(const struct sk_buff *skb)
+{
+ return (struct genevehdr *)(udp_hdr(skb) + 1);
+}
+
#ifdef CONFIG_INET
struct geneve_sock;
diff --git a/include/net/ieee802154_netdev.h b/include/net/ieee802154_netdev.h
index 94a297052442..2c10a9f0c6d9 100644
--- a/include/net/ieee802154_netdev.h
+++ b/include/net/ieee802154_netdev.h
@@ -346,15 +346,15 @@ struct ieee802154_mac_params {
struct wpan_phy;
enum {
- IEEE802154_LLSEC_PARAM_ENABLED = 1 << 0,
- IEEE802154_LLSEC_PARAM_FRAME_COUNTER = 1 << 1,
- IEEE802154_LLSEC_PARAM_OUT_LEVEL = 1 << 2,
- IEEE802154_LLSEC_PARAM_OUT_KEY = 1 << 3,
- IEEE802154_LLSEC_PARAM_KEY_SOURCE = 1 << 4,
- IEEE802154_LLSEC_PARAM_PAN_ID = 1 << 5,
- IEEE802154_LLSEC_PARAM_HWADDR = 1 << 6,
- IEEE802154_LLSEC_PARAM_COORD_HWADDR = 1 << 7,
- IEEE802154_LLSEC_PARAM_COORD_SHORTADDR = 1 << 8,
+ IEEE802154_LLSEC_PARAM_ENABLED = BIT(0),
+ IEEE802154_LLSEC_PARAM_FRAME_COUNTER = BIT(1),
+ IEEE802154_LLSEC_PARAM_OUT_LEVEL = BIT(2),
+ IEEE802154_LLSEC_PARAM_OUT_KEY = BIT(3),
+ IEEE802154_LLSEC_PARAM_KEY_SOURCE = BIT(4),
+ IEEE802154_LLSEC_PARAM_PAN_ID = BIT(5),
+ IEEE802154_LLSEC_PARAM_HWADDR = BIT(6),
+ IEEE802154_LLSEC_PARAM_COORD_HWADDR = BIT(7),
+ IEEE802154_LLSEC_PARAM_COORD_SHORTADDR = BIT(8),
};
struct ieee802154_llsec_ops {
@@ -422,16 +422,6 @@ struct ieee802154_mlme_ops {
struct ieee802154_mac_params *params);
struct ieee802154_llsec_ops *llsec;
-
- /* The fields below are required. */
-
- /*
- * FIXME: these should become the part of PIB/MIB interface.
- * However we still don't have IB interface of any kind
- */
- __le16 (*get_pan_id)(const struct net_device *dev);
- __le16 (*get_short_addr)(const struct net_device *dev);
- u8 (*get_dsn)(const struct net_device *dev);
};
static inline struct ieee802154_mlme_ops *
@@ -440,10 +430,4 @@ ieee802154_mlme_ops(const struct net_device *dev)
return dev->ml_priv;
}
-static inline struct ieee802154_reduced_mlme_ops *
-ieee802154_reduced_mlme_ops(const struct net_device *dev)
-{
- return dev->ml_priv;
-}
-
#endif
diff --git a/include/net/if_inet6.h b/include/net/if_inet6.h
index 98e5f9578f86..1c8b6820b694 100644
--- a/include/net/if_inet6.h
+++ b/include/net/if_inet6.h
@@ -41,18 +41,18 @@ enum {
struct inet6_ifaddr {
struct in6_addr addr;
__u32 prefix_len;
-
+
/* In seconds, relative to tstamp. Expiry is at tstamp + HZ * lft. */
__u32 valid_lft;
__u32 prefered_lft;
atomic_t refcnt;
spinlock_t lock;
- spinlock_t state_lock;
int state;
__u32 flags;
__u8 dad_probes;
+ __u8 stable_privacy_retry;
__u16 scope;
diff --git a/include/net/inet6_connection_sock.h b/include/net/inet6_connection_sock.h
index 74af137304be..6d539e4e5ba7 100644
--- a/include/net/inet6_connection_sock.h
+++ b/include/net/inet6_connection_sock.h
@@ -28,8 +28,7 @@ int inet6_csk_bind_conflict(const struct sock *sk,
struct dst_entry *inet6_csk_route_req(struct sock *sk, struct flowi6 *fl6,
const struct request_sock *req);
-struct request_sock *inet6_csk_search_req(const struct sock *sk,
- struct request_sock ***prevp,
+struct request_sock *inet6_csk_search_req(struct sock *sk,
const __be16 rport,
const struct in6_addr *raddr,
const struct in6_addr *laddr,
diff --git a/include/net/inet6_hashtables.h b/include/net/inet6_hashtables.h
index 9201afe083fa..7ff588ca6817 100644
--- a/include/net/inet6_hashtables.h
+++ b/include/net/inet6_hashtables.h
@@ -38,8 +38,6 @@ static inline unsigned int __inet6_ehashfn(const u32 lhash,
return jhash_3words(lhash, fhash, ports, initval);
}
-int __inet6_hash(struct sock *sk, struct inet_timewait_sock *twp);
-
/*
* Sockets in TCP_CLOSE state are _always_ taken out of the hash, so
* we need not check it for TCP lookups anymore, thanks Alexey. -DaveM
diff --git a/include/net/inet_common.h b/include/net/inet_common.h
index b2828a06a5a6..279f83591971 100644
--- a/include/net/inet_common.h
+++ b/include/net/inet_common.h
@@ -21,12 +21,11 @@ int __inet_stream_connect(struct socket *sock, struct sockaddr *uaddr,
int inet_dgram_connect(struct socket *sock, struct sockaddr *uaddr,
int addr_len, int flags);
int inet_accept(struct socket *sock, struct socket *newsock, int flags);
-int inet_sendmsg(struct kiocb *iocb, struct socket *sock, struct msghdr *msg,
- size_t size);
+int inet_sendmsg(struct socket *sock, struct msghdr *msg, size_t size);
ssize_t inet_sendpage(struct socket *sock, struct page *page, int offset,
size_t size, int flags);
-int inet_recvmsg(struct kiocb *iocb, struct socket *sock, struct msghdr *msg,
- size_t size, int flags);
+int inet_recvmsg(struct socket *sock, struct msghdr *msg, size_t size,
+ int flags);
int inet_shutdown(struct socket *sock, int how);
int inet_listen(struct socket *sock, int backlog);
void inet_sock_destruct(struct sock *sk);
@@ -42,7 +41,7 @@ int inet_recv_error(struct sock *sk, struct msghdr *msg, int len,
static inline void inet_ctl_sock_destroy(struct sock *sk)
{
- sk_release_kernel(sk);
+ sock_release(sk->sk_socket);
}
#endif
diff --git a/include/net/inet_connection_sock.h b/include/net/inet_connection_sock.h
index 5976bdecf58b..0320bbb7d7b5 100644
--- a/include/net/inet_connection_sock.h
+++ b/include/net/inet_connection_sock.h
@@ -98,7 +98,8 @@ struct inet_connection_sock {
const struct tcp_congestion_ops *icsk_ca_ops;
const struct inet_connection_sock_af_ops *icsk_af_ops;
unsigned int (*icsk_sync_mss)(struct sock *sk, u32 pmtu);
- __u8 icsk_ca_state:7,
+ __u8 icsk_ca_state:6,
+ icsk_ca_setsockopt:1,
icsk_ca_dst_locked:1;
__u8 icsk_retransmits;
__u8 icsk_pending;
@@ -126,10 +127,13 @@ struct inet_connection_sock {
/* Information on the current probe. */
int probe_size;
+
+ u32 probe_timestamp;
} icsk_mtup;
- u32 icsk_ca_priv[16];
u32 icsk_user_timeout;
-#define ICSK_CA_PRIV_SIZE (16 * sizeof(u32))
+
+ u64 icsk_ca_priv[64 / sizeof(u64)];
+#define ICSK_CA_PRIV_SIZE (8 * sizeof(u64))
};
#define ICSK_TIME_RETRANS 1 /* Retransmit timer */
@@ -254,8 +258,7 @@ inet_csk_rto_backoff(const struct inet_connection_sock *icsk,
struct sock *inet_csk_accept(struct sock *sk, int flags, int *err);
-struct request_sock *inet_csk_search_req(const struct sock *sk,
- struct request_sock ***prevp,
+struct request_sock *inet_csk_search_req(struct sock *sk,
const __be16 rport,
const __be32 raddr,
const __be32 laddr);
@@ -278,18 +281,10 @@ static inline void inet_csk_reqsk_queue_add(struct sock *sk,
void inet_csk_reqsk_queue_hash_add(struct sock *sk, struct request_sock *req,
unsigned long timeout);
-static inline void inet_csk_reqsk_queue_removed(struct sock *sk,
- struct request_sock *req)
-{
- if (reqsk_queue_removed(&inet_csk(sk)->icsk_accept_queue, req) == 0)
- inet_csk_delete_keepalive_timer(sk);
-}
-
static inline void inet_csk_reqsk_queue_added(struct sock *sk,
const unsigned long timeout)
{
- if (reqsk_queue_added(&inet_csk(sk)->icsk_accept_queue) == 0)
- inet_csk_reset_keepalive_timer(sk, timeout);
+ reqsk_queue_added(&inet_csk(sk)->icsk_accept_queue);
}
static inline int inet_csk_reqsk_queue_len(const struct sock *sk)
@@ -307,26 +302,7 @@ static inline int inet_csk_reqsk_queue_is_full(const struct sock *sk)
return reqsk_queue_is_full(&inet_csk(sk)->icsk_accept_queue);
}
-static inline void inet_csk_reqsk_queue_unlink(struct sock *sk,
- struct request_sock *req,
- struct request_sock **prev)
-{
- reqsk_queue_unlink(&inet_csk(sk)->icsk_accept_queue, req, prev);
-}
-
-static inline void inet_csk_reqsk_queue_drop(struct sock *sk,
- struct request_sock *req,
- struct request_sock **prev)
-{
- inet_csk_reqsk_queue_unlink(sk, req, prev);
- inet_csk_reqsk_queue_removed(sk, req);
- reqsk_free(req);
-}
-
-void inet_csk_reqsk_queue_prune(struct sock *parent,
- const unsigned long interval,
- const unsigned long timeout,
- const unsigned long max_rto);
+void inet_csk_reqsk_queue_drop(struct sock *sk, struct request_sock *req);
void inet_csk_destroy_sock(struct sock *sk);
void inet_csk_prepare_forced_close(struct sock *sk);
diff --git a/include/net/inet_frag.h b/include/net/inet_frag.h
index 8d1765577acc..e1300b3dd597 100644
--- a/include/net/inet_frag.h
+++ b/include/net/inet_frag.h
@@ -43,7 +43,7 @@ enum {
* @len: total length of the original datagram
* @meat: length of received fragments so far
* @flags: fragment queue flags
- * @max_size: (ipv4 only) maximum received fragment size with IP_DF set
+ * @max_size: maximum received fragment size
* @net: namespace that this frag belongs to
*/
struct inet_frag_queue {
diff --git a/include/net/inet_hashtables.h b/include/net/inet_hashtables.h
index dd1950a7e273..b73c88a19dd4 100644
--- a/include/net/inet_hashtables.h
+++ b/include/net/inet_hashtables.h
@@ -24,7 +24,6 @@
#include <linux/spinlock.h>
#include <linux/types.h>
#include <linux/wait.h>
-#include <linux/vmalloc.h>
#include <net/inet_connection_sock.h>
#include <net/inet_sock.h>
@@ -76,9 +75,7 @@ struct inet_ehash_bucket {
* ports are created in O(1) time? I thought so. ;-) -DaveM
*/
struct inet_bind_bucket {
-#ifdef CONFIG_NET_NS
- struct net *ib_net;
-#endif
+ possible_net_t ib_net;
unsigned short port;
signed char fastreuse;
signed char fastreuseport;
@@ -150,8 +147,6 @@ struct inet_hashinfo {
*/
struct inet_listen_hashbucket listening_hash[INET_LHTABLE_SIZE]
____cacheline_aligned_in_smp;
-
- atomic_t bsockets;
};
static inline struct inet_ehash_bucket *inet_ehash_bucket(
@@ -168,52 +163,12 @@ static inline spinlock_t *inet_ehash_lockp(
return &hashinfo->ehash_locks[hash & hashinfo->ehash_locks_mask];
}
-static inline int inet_ehash_locks_alloc(struct inet_hashinfo *hashinfo)
-{
- unsigned int i, size = 256;
-#if defined(CONFIG_PROVE_LOCKING)
- unsigned int nr_pcpus = 2;
-#else
- unsigned int nr_pcpus = num_possible_cpus();
-#endif
- if (nr_pcpus >= 4)
- size = 512;
- if (nr_pcpus >= 8)
- size = 1024;
- if (nr_pcpus >= 16)
- size = 2048;
- if (nr_pcpus >= 32)
- size = 4096;
- if (sizeof(spinlock_t) != 0) {
-#ifdef CONFIG_NUMA
- if (size * sizeof(spinlock_t) > PAGE_SIZE)
- hashinfo->ehash_locks = vmalloc(size * sizeof(spinlock_t));
- else
-#endif
- hashinfo->ehash_locks = kmalloc(size * sizeof(spinlock_t),
- GFP_KERNEL);
- if (!hashinfo->ehash_locks)
- return ENOMEM;
- for (i = 0; i < size; i++)
- spin_lock_init(&hashinfo->ehash_locks[i]);
- }
- hashinfo->ehash_locks_mask = size - 1;
- return 0;
-}
+int inet_ehash_locks_alloc(struct inet_hashinfo *hashinfo);
static inline void inet_ehash_locks_free(struct inet_hashinfo *hashinfo)
{
- if (hashinfo->ehash_locks) {
-#ifdef CONFIG_NUMA
- unsigned int size = (hashinfo->ehash_locks_mask + 1) *
- sizeof(spinlock_t);
- if (size > PAGE_SIZE)
- vfree(hashinfo->ehash_locks);
- else
-#endif
- kfree(hashinfo->ehash_locks);
- hashinfo->ehash_locks = NULL;
- }
+ kvfree(hashinfo->ehash_locks);
+ hashinfo->ehash_locks = NULL;
}
struct inet_bind_bucket *
@@ -223,8 +178,8 @@ inet_bind_bucket_create(struct kmem_cache *cachep, struct net *net,
void inet_bind_bucket_destroy(struct kmem_cache *cachep,
struct inet_bind_bucket *tb);
-static inline int inet_bhashfn(struct net *net, const __u16 lport,
- const int bhash_size)
+static inline u32 inet_bhashfn(const struct net *net, const __u16 lport,
+ const u32 bhash_size)
{
return (lport + net_hash_mix(net)) & (bhash_size - 1);
}
@@ -233,7 +188,7 @@ void inet_bind_hash(struct sock *sk, struct inet_bind_bucket *tb,
const unsigned short snum);
/* These can have wildcards, don't try too hard. */
-static inline int inet_lhashfn(struct net *net, const unsigned short num)
+static inline u32 inet_lhashfn(const struct net *net, const unsigned short num)
{
return (num + net_hash_mix(net)) & (INET_LHTABLE_SIZE - 1);
}
@@ -251,6 +206,7 @@ void inet_put_port(struct sock *sk);
void inet_hashinfo_init(struct inet_hashinfo *h);
int __inet_hash_nolisten(struct sock *sk, struct inet_timewait_sock *tw);
+int __inet_hash(struct sock *sk, struct inet_timewait_sock *tw);
void inet_hash(struct sock *sk);
void inet_unhash(struct sock *sk);
@@ -385,13 +341,32 @@ static inline struct sock *__inet_lookup_skb(struct inet_hashinfo *hashinfo,
iph->daddr, dport, inet_iif(skb));
}
+u32 sk_ehashfn(const struct sock *sk);
+u32 inet6_ehashfn(const struct net *net,
+ const struct in6_addr *laddr, const u16 lport,
+ const struct in6_addr *faddr, const __be16 fport);
+
+static inline void sk_daddr_set(struct sock *sk, __be32 addr)
+{
+ sk->sk_daddr = addr; /* alias of inet_daddr */
+#if IS_ENABLED(CONFIG_IPV6)
+ ipv6_addr_set_v4mapped(addr, &sk->sk_v6_daddr);
+#endif
+}
+
+static inline void sk_rcv_saddr_set(struct sock *sk, __be32 addr)
+{
+ sk->sk_rcv_saddr = addr; /* alias of inet_rcv_saddr */
+#if IS_ENABLED(CONFIG_IPV6)
+ ipv6_addr_set_v4mapped(addr, &sk->sk_v6_rcv_saddr);
+#endif
+}
+
int __inet_hash_connect(struct inet_timewait_death_row *death_row,
struct sock *sk, u32 port_offset,
int (*check_established)(struct inet_timewait_death_row *,
struct sock *, __u16,
- struct inet_timewait_sock **),
- int (*hash)(struct sock *sk,
- struct inet_timewait_sock *twp));
+ struct inet_timewait_sock **));
int inet_hash_connect(struct inet_timewait_death_row *death_row,
struct sock *sk);
diff --git a/include/net/inet_sock.h b/include/net/inet_sock.h
index eb16c7beed1e..47eb67b08abd 100644
--- a/include/net/inet_sock.h
+++ b/include/net/inet_sock.h
@@ -27,6 +27,7 @@
#include <net/sock.h>
#include <net/request_sock.h>
#include <net/netns/hash.h>
+#include <net/tcp_states.h>
/** struct ip_options - IP Options
*
@@ -77,6 +78,10 @@ struct inet_request_sock {
#define ir_v6_rmt_addr req.__req_common.skc_v6_daddr
#define ir_v6_loc_addr req.__req_common.skc_v6_rcv_saddr
#define ir_iif req.__req_common.skc_bound_dev_if
+#define ir_cookie req.__req_common.skc_cookie
+#define ireq_net req.__req_common.skc_net
+#define ireq_state req.__req_common.skc_state
+#define ireq_family req.__req_common.skc_family
kmemcheck_bitfield_begin(flags);
u16 snd_wscale : 4,
@@ -88,11 +93,11 @@ struct inet_request_sock {
acked : 1,
no_srccheck: 1;
kmemcheck_bitfield_end(flags);
+ u32 ir_mark;
union {
struct ip_options_rcu *opt;
struct sk_buff *pktopts;
};
- u32 ir_mark;
};
static inline struct inet_request_sock *inet_rsk(const struct request_sock *sk)
@@ -100,13 +105,12 @@ static inline struct inet_request_sock *inet_rsk(const struct request_sock *sk)
return (struct inet_request_sock *)sk;
}
-static inline u32 inet_request_mark(struct sock *sk, struct sk_buff *skb)
+static inline u32 inet_request_mark(const struct sock *sk, struct sk_buff *skb)
{
- if (!sk->sk_mark && sock_net(sk)->ipv4.sysctl_tcp_fwmark_accept) {
+ if (!sk->sk_mark && sock_net(sk)->ipv4.sysctl_tcp_fwmark_accept)
return skb->mark;
- } else {
- return sk->sk_mark;
- }
+
+ return sk->sk_mark;
}
struct inet_cork {
@@ -183,6 +187,7 @@ struct inet_sock {
transparent:1,
mc_all:1,
nodefrag:1;
+ __u8 bind_address_no_port:1;
__u8 rcv_tos;
__u8 convert_csum;
int uc_index;
@@ -239,18 +244,8 @@ static inline unsigned int __inet_ehashfn(const __be32 laddr,
initval);
}
-static inline struct request_sock *inet_reqsk_alloc(struct request_sock_ops *ops)
-{
- struct request_sock *req = reqsk_alloc(ops);
- struct inet_request_sock *ireq = inet_rsk(req);
-
- if (req != NULL) {
- kmemcheck_annotate_bitfield(ireq, flags);
- ireq->opt = NULL;
- }
-
- return req;
-}
+struct request_sock *inet_reqsk_alloc(const struct request_sock_ops *ops,
+ struct sock *sk_listener);
static inline __u8 inet_sk_flowi_flags(const struct sock *sk)
{
diff --git a/include/net/inet_timewait_sock.h b/include/net/inet_timewait_sock.h
index 6c566034e26d..360c4802288d 100644
--- a/include/net/inet_timewait_sock.h
+++ b/include/net/inet_timewait_sock.h
@@ -31,67 +31,14 @@
struct inet_hashinfo;
-#define INET_TWDR_RECYCLE_SLOTS_LOG 5
-#define INET_TWDR_RECYCLE_SLOTS (1 << INET_TWDR_RECYCLE_SLOTS_LOG)
-
-/*
- * If time > 4sec, it is "slow" path, no recycling is required,
- * so that we select tick to get range about 4 seconds.
- */
-#if HZ <= 16 || HZ > 4096
-# error Unsupported: HZ <= 16 or HZ > 4096
-#elif HZ <= 32
-# define INET_TWDR_RECYCLE_TICK (5 + 2 - INET_TWDR_RECYCLE_SLOTS_LOG)
-#elif HZ <= 64
-# define INET_TWDR_RECYCLE_TICK (6 + 2 - INET_TWDR_RECYCLE_SLOTS_LOG)
-#elif HZ <= 128
-# define INET_TWDR_RECYCLE_TICK (7 + 2 - INET_TWDR_RECYCLE_SLOTS_LOG)
-#elif HZ <= 256
-# define INET_TWDR_RECYCLE_TICK (8 + 2 - INET_TWDR_RECYCLE_SLOTS_LOG)
-#elif HZ <= 512
-# define INET_TWDR_RECYCLE_TICK (9 + 2 - INET_TWDR_RECYCLE_SLOTS_LOG)
-#elif HZ <= 1024
-# define INET_TWDR_RECYCLE_TICK (10 + 2 - INET_TWDR_RECYCLE_SLOTS_LOG)
-#elif HZ <= 2048
-# define INET_TWDR_RECYCLE_TICK (11 + 2 - INET_TWDR_RECYCLE_SLOTS_LOG)
-#else
-# define INET_TWDR_RECYCLE_TICK (12 + 2 - INET_TWDR_RECYCLE_SLOTS_LOG)
-#endif
-
-static inline u32 inet_tw_time_stamp(void)
-{
- return jiffies;
-}
-
-/* TIME_WAIT reaping mechanism. */
-#define INET_TWDR_TWKILL_SLOTS 8 /* Please keep this a power of 2. */
-
-#define INET_TWDR_TWKILL_QUOTA 100
-
struct inet_timewait_death_row {
- /* Short-time timewait calendar */
- int twcal_hand;
- unsigned long twcal_jiffie;
- struct timer_list twcal_timer;
- struct hlist_head twcal_row[INET_TWDR_RECYCLE_SLOTS];
-
- spinlock_t death_lock;
- int tw_count;
- int period;
- u32 thread_slots;
- struct work_struct twkill_work;
- struct timer_list tw_timer;
- int slot;
- struct hlist_head cells[INET_TWDR_TWKILL_SLOTS];
- struct inet_hashinfo *hashinfo;
+ atomic_t tw_count;
+
+ struct inet_hashinfo *hashinfo ____cacheline_aligned_in_smp;
int sysctl_tw_recycle;
int sysctl_max_tw_buckets;
};
-void inet_twdr_hangman(unsigned long data);
-void inet_twdr_twkill_work(struct work_struct *work);
-void inet_twdr_twcal_tick(unsigned long data);
-
struct inet_bind_bucket;
/*
@@ -122,6 +69,7 @@ struct inet_timewait_sock {
#define tw_v6_rcv_saddr __tw_common.skc_v6_rcv_saddr
#define tw_dport __tw_common.skc_dport
#define tw_num __tw_common.skc_num
+#define tw_cookie __tw_common.skc_cookie
int tw_timeout;
volatile unsigned char tw_substate;
@@ -132,52 +80,18 @@ struct inet_timewait_sock {
__be16 tw_sport;
kmemcheck_bitfield_begin(flags);
/* And these are ours. */
- unsigned int tw_pad0 : 1, /* 1 bit hole */
+ unsigned int tw_kill : 1,
tw_transparent : 1,
tw_flowlabel : 20,
tw_pad : 2, /* 2 bits hole */
tw_tos : 8;
kmemcheck_bitfield_end(flags);
- u32 tw_ttd;
+ struct timer_list tw_timer;
struct inet_bind_bucket *tw_tb;
- struct hlist_node tw_death_node;
+ struct inet_timewait_death_row *tw_dr;
};
#define tw_tclass tw_tos
-static inline int inet_twsk_dead_hashed(const struct inet_timewait_sock *tw)
-{
- return !hlist_unhashed(&tw->tw_death_node);
-}
-
-static inline void inet_twsk_dead_node_init(struct inet_timewait_sock *tw)
-{
- tw->tw_death_node.pprev = NULL;
-}
-
-static inline void __inet_twsk_del_dead_node(struct inet_timewait_sock *tw)
-{
- __hlist_del(&tw->tw_death_node);
- inet_twsk_dead_node_init(tw);
-}
-
-static inline int inet_twsk_del_dead_node(struct inet_timewait_sock *tw)
-{
- if (inet_twsk_dead_hashed(tw)) {
- __inet_twsk_del_dead_node(tw);
- return 1;
- }
- return 0;
-}
-
-#define inet_twsk_for_each(tw, node, head) \
- hlist_nulls_for_each_entry(tw, node, head, tw_node)
-
-#define inet_twsk_for_each_inmate(tw, jail) \
- hlist_for_each_entry(tw, jail, tw_death_node)
-
-#define inet_twsk_for_each_inmate_safe(tw, safe, jail) \
- hlist_for_each_entry_safe(tw, safe, jail, tw_death_node)
-
static inline struct inet_timewait_sock *inet_twsk(const struct sock *sk)
{
return (struct inet_timewait_sock *)sk;
@@ -192,16 +106,14 @@ int inet_twsk_bind_unhash(struct inet_timewait_sock *tw,
struct inet_hashinfo *hashinfo);
struct inet_timewait_sock *inet_twsk_alloc(const struct sock *sk,
+ struct inet_timewait_death_row *dr,
const int state);
void __inet_twsk_hashdance(struct inet_timewait_sock *tw, struct sock *sk,
struct inet_hashinfo *hashinfo);
-void inet_twsk_schedule(struct inet_timewait_sock *tw,
- struct inet_timewait_death_row *twdr,
- const int timeo, const int timewait_len);
-void inet_twsk_deschedule(struct inet_timewait_sock *tw,
- struct inet_timewait_death_row *twdr);
+void inet_twsk_schedule(struct inet_timewait_sock *tw, const int timeo);
+void inet_twsk_deschedule(struct inet_timewait_sock *tw);
void inet_twsk_purge(struct inet_hashinfo *hashinfo,
struct inet_timewait_death_row *twdr, int family);
diff --git a/include/net/inetpeer.h b/include/net/inetpeer.h
index 80479abddf73..d5332ddcea3f 100644
--- a/include/net/inetpeer.h
+++ b/include/net/inetpeer.h
@@ -19,6 +19,7 @@ struct inetpeer_addr_base {
union {
__be32 a4;
__be32 a6[4];
+ struct in6_addr in6;
};
};
@@ -151,7 +152,7 @@ static inline struct inet_peer *inet_getpeer_v6(struct inet_peer_base *base,
{
struct inetpeer_addr daddr;
- *(struct in6_addr *)daddr.addr.a6 = *v6daddr;
+ daddr.addr.in6 = *v6daddr;
daddr.family = AF_INET6;
return inet_getpeer(base, &daddr, create);
}
diff --git a/include/net/ip.h b/include/net/ip.h
index 6cc1eafb153a..0750a186ea63 100644
--- a/include/net/ip.h
+++ b/include/net/ip.h
@@ -31,7 +31,7 @@
#include <net/route.h>
#include <net/snmp.h>
#include <net/flow.h>
-#include <net/flow_keys.h>
+#include <net/flow_dissector.h>
struct sock;
@@ -45,6 +45,7 @@ struct inet_skb_parm {
#define IPSKB_FRAG_COMPLETE BIT(3)
#define IPSKB_REROUTED BIT(4)
#define IPSKB_DOREDIRECT BIT(5)
+#define IPSKB_FRAG_PMTU BIT(6)
u16 frag_max_size;
};
@@ -108,8 +109,8 @@ int ip_local_deliver(struct sk_buff *skb);
int ip_mr_input(struct sk_buff *skb);
int ip_output(struct sock *sk, struct sk_buff *skb);
int ip_mc_output(struct sock *sk, struct sk_buff *skb);
-int ip_fragment(struct sk_buff *skb, int (*output)(struct sk_buff *));
-int ip_do_nat(struct sk_buff *skb);
+int ip_do_fragment(struct sock *sk, struct sk_buff *skb,
+ int (*output)(struct sock *, struct sk_buff *));
void ip_send_check(struct iphdr *ip);
int __ip_local_out(struct sk_buff *skb);
int ip_local_out_sk(struct sock *sk, struct sk_buff *skb);
@@ -318,9 +319,10 @@ static inline unsigned int ip_skb_dst_mtu(const struct sk_buff *skb)
}
u32 ip_idents_reserve(u32 hash, int segs);
-void __ip_select_ident(struct iphdr *iph, int segs);
+void __ip_select_ident(struct net *net, struct iphdr *iph, int segs);
-static inline void ip_select_ident_segs(struct sk_buff *skb, struct sock *sk, int segs)
+static inline void ip_select_ident_segs(struct net *net, struct sk_buff *skb,
+ struct sock *sk, int segs)
{
struct iphdr *iph = ip_hdr(skb);
@@ -337,13 +339,14 @@ static inline void ip_select_ident_segs(struct sk_buff *skb, struct sock *sk, in
iph->id = 0;
}
} else {
- __ip_select_ident(iph, segs);
+ __ip_select_ident(net, iph, segs);
}
}
-static inline void ip_select_ident(struct sk_buff *skb, struct sock *sk)
+static inline void ip_select_ident(struct net *net, struct sk_buff *skb,
+ struct sock *sk)
{
- ip_select_ident_segs(skb, sk, 1);
+ ip_select_ident_segs(net, skb, sk, 1);
}
static inline __wsum inet_compute_pseudo(struct sk_buff *skb, int proto)
@@ -352,15 +355,32 @@ static inline __wsum inet_compute_pseudo(struct sk_buff *skb, int proto)
skb->len, proto, 0);
}
+/* copy IPv4 saddr & daddr to flow_keys, possibly using 64bit load/store
+ * Equivalent to : flow->v4addrs.src = iph->saddr;
+ * flow->v4addrs.dst = iph->daddr;
+ */
+static inline void iph_to_flow_copy_v4addrs(struct flow_keys *flow,
+ const struct iphdr *iph)
+{
+ BUILD_BUG_ON(offsetof(typeof(flow->addrs), v4addrs.dst) !=
+ offsetof(typeof(flow->addrs), v4addrs.src) +
+ sizeof(flow->addrs.v4addrs.src));
+ memcpy(&flow->addrs.v4addrs, &iph->saddr, sizeof(flow->addrs.v4addrs));
+ flow->control.addr_type = FLOW_DISSECTOR_KEY_IPV4_ADDRS;
+}
+
static inline void inet_set_txhash(struct sock *sk)
{
struct inet_sock *inet = inet_sk(sk);
struct flow_keys keys;
- keys.src = inet->inet_saddr;
- keys.dst = inet->inet_daddr;
- keys.port16[0] = inet->inet_sport;
- keys.port16[1] = inet->inet_dport;
+ memset(&keys, 0, sizeof(keys));
+
+ keys.addrs.v4addrs.src = inet->inet_saddr;
+ keys.addrs.v4addrs.dst = inet->inet_daddr;
+ keys.control.addr_type = FLOW_DISSECTOR_KEY_IPV4_ADDRS;
+ keys.ports.src = inet->inet_sport;
+ keys.ports.dst = inet->inet_dport;
sk->sk_txhash = flow_hash_from_keys(&keys);
}
@@ -475,6 +495,16 @@ enum ip_defrag_users {
IP_DEFRAG_MACVLAN,
};
+/* Return true if the value of 'user' is between 'lower_bond'
+ * and 'upper_bond' inclusively.
+ */
+static inline bool ip_defrag_user_in_between(u32 user,
+ enum ip_defrag_users lower_bond,
+ enum ip_defrag_users upper_bond)
+{
+ return user >= lower_bond && user <= upper_bond;
+}
+
int ip_defrag(struct sk_buff *skb, u32 user);
#ifdef CONFIG_INET
struct sk_buff *ip_check_defrag(struct sk_buff *skb, u32 user);
diff --git a/include/net/ip6_fib.h b/include/net/ip6_fib.h
index 20e80fa7bbdd..3b76849c190f 100644
--- a/include/net/ip6_fib.h
+++ b/include/net/ip6_fib.h
@@ -120,45 +120,19 @@ struct rt6_info {
struct rt6key rt6i_src;
struct rt6key rt6i_prefsrc;
+ struct list_head rt6i_uncached;
+ struct uncached_list *rt6i_uncached_list;
+
struct inet6_dev *rt6i_idev;
- unsigned long _rt6i_peer;
+ struct rt6_info * __percpu *rt6i_pcpu;
u32 rt6i_metric;
+ u32 rt6i_pmtu;
/* more non-fragment space at head required */
unsigned short rt6i_nfheader_len;
u8 rt6i_protocol;
};
-static inline struct inet_peer *rt6_peer_ptr(struct rt6_info *rt)
-{
- return inetpeer_ptr(rt->_rt6i_peer);
-}
-
-static inline bool rt6_has_peer(struct rt6_info *rt)
-{
- return inetpeer_ptr_is_peer(rt->_rt6i_peer);
-}
-
-static inline void __rt6_set_peer(struct rt6_info *rt, struct inet_peer *peer)
-{
- __inetpeer_ptr_set_peer(&rt->_rt6i_peer, peer);
-}
-
-static inline bool rt6_set_peer(struct rt6_info *rt, struct inet_peer *peer)
-{
- return inetpeer_ptr_set_peer(&rt->_rt6i_peer, peer);
-}
-
-static inline void rt6_init_peer(struct rt6_info *rt, struct inet_peer_base *base)
-{
- inetpeer_init_ptr(&rt->_rt6i_peer, base);
-}
-
-static inline void rt6_transfer_peer(struct rt6_info *rt, struct rt6_info *ort)
-{
- inetpeer_transfer_peer(&rt->_rt6i_peer, &ort->_rt6i_peer);
-}
-
static inline struct inet6_dev *ip6_dst_idev(struct dst_entry *dst)
{
return ((struct rt6_info *)dst)->rt6i_idev;
@@ -189,13 +163,12 @@ static inline void rt6_update_expires(struct rt6_info *rt0, int timeout)
rt0->rt6i_flags |= RTF_EXPIRES;
}
-static inline void rt6_set_from(struct rt6_info *rt, struct rt6_info *from)
+static inline u32 rt6_get_cookie(const struct rt6_info *rt)
{
- struct dst_entry *new = (struct dst_entry *) from;
+ if (rt->rt6i_flags & RTF_PCPU || unlikely(rt->dst.flags & DST_NOCACHE))
+ rt = (struct rt6_info *)(rt->dst.from);
- rt->rt6i_flags &= ~RTF_EXPIRES;
- dst_hold(new);
- rt->dst.from = new;
+ return rt->rt6i_node ? rt->rt6i_node->fn_sernum : 0;
}
static inline void ip6_rt_put(struct rt6_info *rt)
diff --git a/include/net/ip6_route.h b/include/net/ip6_route.h
index eda131d179d9..297629aadb19 100644
--- a/include/net/ip6_route.h
+++ b/include/net/ip6_route.h
@@ -145,7 +145,7 @@ static inline void __ip6_dst_store(struct sock *sk, struct dst_entry *dst,
#ifdef CONFIG_IPV6_SUBTREES
np->saddr_cache = saddr;
#endif
- np->dst_cookie = rt->rt6i_node ? rt->rt6i_node->fn_sernum : 0;
+ np->dst_cookie = rt6_get_cookie(rt);
}
static inline void ip6_dst_store(struct sock *sk, struct dst_entry *dst,
@@ -163,14 +163,18 @@ static inline bool ipv6_unicast_destination(const struct sk_buff *skb)
return rt->rt6i_flags & RTF_LOCAL;
}
-static inline bool ipv6_anycast_destination(const struct sk_buff *skb)
+static inline bool ipv6_anycast_destination(const struct dst_entry *dst,
+ const struct in6_addr *daddr)
{
- struct rt6_info *rt = (struct rt6_info *) skb_dst(skb);
+ struct rt6_info *rt = (struct rt6_info *)dst;
- return rt->rt6i_flags & RTF_ANYCAST;
+ return rt->rt6i_flags & RTF_ANYCAST ||
+ (rt->rt6i_dst.plen != 128 &&
+ ipv6_addr_equal(&rt->rt6i_dst.addr, daddr));
}
-int ip6_fragment(struct sk_buff *skb, int (*output)(struct sk_buff *));
+int ip6_fragment(struct sock *sk, struct sk_buff *skb,
+ int (*output)(struct sock *, struct sk_buff *));
static inline int ip6_skb_dst_mtu(struct sk_buff *skb)
{
@@ -193,9 +197,15 @@ static inline bool ip6_sk_ignore_df(const struct sock *sk)
inet6_sk(sk)->pmtudisc == IPV6_PMTUDISC_OMIT;
}
-static inline struct in6_addr *rt6_nexthop(struct rt6_info *rt)
+static inline struct in6_addr *rt6_nexthop(struct rt6_info *rt,
+ struct in6_addr *daddr)
{
- return &rt->rt6i_gateway;
+ if (rt->rt6i_flags & RTF_GATEWAY)
+ return &rt->rt6i_gateway;
+ else if (unlikely(rt->rt6i_flags & RTF_CACHE))
+ return &rt->rt6i_dst.addr;
+ else
+ return daddr;
}
#endif
diff --git a/include/net/ip6_tunnel.h b/include/net/ip6_tunnel.h
index 76c091b53dae..b8529aa1dae7 100644
--- a/include/net/ip6_tunnel.h
+++ b/include/net/ip6_tunnel.h
@@ -71,14 +71,16 @@ __u16 ip6_tnl_parse_tlv_enc_lim(struct sk_buff *skb, __u8 *raw);
__u32 ip6_tnl_get_cap(struct ip6_tnl *t, const struct in6_addr *laddr,
const struct in6_addr *raddr);
struct net *ip6_tnl_get_link_net(const struct net_device *dev);
+int ip6_tnl_get_iflink(const struct net_device *dev);
-static inline void ip6tunnel_xmit(struct sk_buff *skb, struct net_device *dev)
+static inline void ip6tunnel_xmit(struct sock *sk, struct sk_buff *skb,
+ struct net_device *dev)
{
struct net_device_stats *stats = &dev->stats;
int pkt_len, err;
pkt_len = skb->len;
- err = ip6_local_out(skb);
+ err = ip6_local_out_sk(sk, skb);
if (net_xmit_eval(err) == 0) {
struct pcpu_sw_netstats *tstats = this_cpu_ptr(dev->tstats);
diff --git a/include/net/ip_fib.h b/include/net/ip_fib.h
index 5bd120e4bc0a..49c142bdf01e 100644
--- a/include/net/ip_fib.h
+++ b/include/net/ip_fib.h
@@ -136,7 +136,7 @@ struct fib_result {
u32 tclassid;
struct fib_info *fi;
struct fib_table *table;
- struct list_head *fa_head;
+ struct hlist_head *fa_head;
};
struct fib_result_nl {
@@ -185,7 +185,9 @@ struct fib_table {
u32 tb_id;
int tb_default;
int tb_num_default;
- unsigned long tb_data[0];
+ struct rcu_head rcu;
+ unsigned long *tb_data;
+ unsigned long __data[0];
};
int fib_table_lookup(struct fib_table *tb, const struct flowi4 *flp,
@@ -195,10 +197,10 @@ int fib_table_delete(struct fib_table *, struct fib_config *);
int fib_table_dump(struct fib_table *table, struct sk_buff *skb,
struct netlink_callback *cb);
int fib_table_flush(struct fib_table *table);
+struct fib_table *fib_trie_unmerge(struct fib_table *main_tb);
+void fib_table_flush_external(struct fib_table *table);
void fib_free_table(struct fib_table *tb);
-
-
#ifndef CONFIG_IP_MULTIPLE_TABLES
#define TABLE_LOCAL_INDEX (RT_TABLE_LOCAL & (FIB_TABLE_HASHSZ - 1))
@@ -206,12 +208,16 @@ void fib_free_table(struct fib_table *tb);
static inline struct fib_table *fib_get_table(struct net *net, u32 id)
{
+ struct hlist_node *tb_hlist;
struct hlist_head *ptr;
ptr = id == RT_TABLE_LOCAL ?
&net->ipv4.fib_table_hash[TABLE_LOCAL_INDEX] :
&net->ipv4.fib_table_hash[TABLE_MAIN_INDEX];
- return hlist_entry(ptr->first, struct fib_table, tb_hlist);
+
+ tb_hlist = rcu_dereference_rtnl(hlist_first_rcu(ptr));
+
+ return hlist_entry(tb_hlist, struct fib_table, tb_hlist);
}
static inline struct fib_table *fib_new_table(struct net *net, u32 id)
@@ -220,16 +226,15 @@ static inline struct fib_table *fib_new_table(struct net *net, u32 id)
}
static inline int fib_lookup(struct net *net, const struct flowi4 *flp,
- struct fib_result *res)
+ struct fib_result *res, unsigned int flags)
{
+ struct fib_table *tb;
int err = -ENETUNREACH;
rcu_read_lock();
- if (!fib_table_lookup(fib_get_table(net, RT_TABLE_LOCAL), flp, res,
- FIB_LOOKUP_NOREF) ||
- !fib_table_lookup(fib_get_table(net, RT_TABLE_MAIN), flp, res,
- FIB_LOOKUP_NOREF))
+ tb = fib_get_table(net, RT_TABLE_MAIN);
+ if (tb && !fib_table_lookup(tb, flp, res, flags | FIB_LOOKUP_NOREF))
err = 0;
rcu_read_unlock();
@@ -244,33 +249,36 @@ void __net_exit fib4_rules_exit(struct net *net);
struct fib_table *fib_new_table(struct net *net, u32 id);
struct fib_table *fib_get_table(struct net *net, u32 id);
-int __fib_lookup(struct net *net, struct flowi4 *flp, struct fib_result *res);
+int __fib_lookup(struct net *net, struct flowi4 *flp,
+ struct fib_result *res, unsigned int flags);
static inline int fib_lookup(struct net *net, struct flowi4 *flp,
- struct fib_result *res)
+ struct fib_result *res, unsigned int flags)
{
- if (!net->ipv4.fib_has_custom_rules) {
- int err = -ENETUNREACH;
-
- rcu_read_lock();
-
- res->tclassid = 0;
- if ((net->ipv4.fib_local &&
- !fib_table_lookup(net->ipv4.fib_local, flp, res,
- FIB_LOOKUP_NOREF)) ||
- (net->ipv4.fib_main &&
- !fib_table_lookup(net->ipv4.fib_main, flp, res,
- FIB_LOOKUP_NOREF)) ||
- (net->ipv4.fib_default &&
- !fib_table_lookup(net->ipv4.fib_default, flp, res,
- FIB_LOOKUP_NOREF)))
- err = 0;
-
- rcu_read_unlock();
-
- return err;
+ struct fib_table *tb;
+ int err;
+
+ flags |= FIB_LOOKUP_NOREF;
+ if (net->ipv4.fib_has_custom_rules)
+ return __fib_lookup(net, flp, res, flags);
+
+ rcu_read_lock();
+
+ res->tclassid = 0;
+
+ for (err = 0; !err; err = -ENETUNREACH) {
+ tb = rcu_dereference_rtnl(net->ipv4.fib_main);
+ if (tb && !fib_table_lookup(tb, flp, res, flags))
+ break;
+
+ tb = rcu_dereference_rtnl(net->ipv4.fib_default);
+ if (tb && !fib_table_lookup(tb, flp, res, flags))
+ break;
}
- return __fib_lookup(net, flp, res);
+
+ rcu_read_unlock();
+
+ return err;
}
#endif /* CONFIG_IP_MULTIPLE_TABLES */
@@ -294,17 +302,19 @@ static inline int fib_num_tclassid_users(struct net *net)
return 0;
}
#endif
+int fib_unmerge(struct net *net);
+void fib_flush_external(struct net *net);
/* Exported by fib_semantics.c */
int ip_fib_check_default(__be32 gw, struct net_device *dev);
-int fib_sync_down_dev(struct net_device *dev, int force);
+int fib_sync_down_dev(struct net_device *dev, unsigned long event);
int fib_sync_down_addr(struct net *net, __be32 local);
-int fib_sync_up(struct net_device *dev);
+int fib_sync_up(struct net_device *dev, unsigned int nh_flags);
void fib_select_multipath(struct fib_result *res);
/* Exported by fib_trie.c */
void fib_trie_init(void);
-struct fib_table *fib_trie_table(u32 id);
+struct fib_table *fib_trie_table(u32 id, struct fib_table *alias);
static inline void fib_combine_itag(u32 *itag, const struct fib_result *res)
{
diff --git a/include/net/ip_tunnels.h b/include/net/ip_tunnels.h
index 2c47061a6954..d8214cb88bbc 100644
--- a/include/net/ip_tunnels.h
+++ b/include/net/ip_tunnels.h
@@ -142,6 +142,7 @@ int ip_tunnel_init(struct net_device *dev);
void ip_tunnel_uninit(struct net_device *dev);
void ip_tunnel_dellink(struct net_device *dev, struct list_head *head);
struct net *ip_tunnel_get_link_net(const struct net_device *dev);
+int ip_tunnel_get_iflink(const struct net_device *dev);
int ip_tunnel_init_net(struct net *net, int ip_tnl_net_id,
struct rtnl_link_ops *ops, char *devname);
diff --git a/include/net/ip_vs.h b/include/net/ip_vs.h
index 615b20b58545..4e3731ee4eac 100644
--- a/include/net/ip_vs.h
+++ b/include/net/ip_vs.h
@@ -47,13 +47,13 @@ static inline struct net *skb_net(const struct sk_buff *skb)
* Start with the most likely hit
* End with BUG
*/
- if (likely(skb->dev && skb->dev->nd_net))
+ if (likely(skb->dev && dev_net(skb->dev)))
return dev_net(skb->dev);
if (skb_dst(skb) && skb_dst(skb)->dev)
return dev_net(skb_dst(skb)->dev);
WARN(skb->sk, "Maybe skb_sknet should be used in %s() at line:%d\n",
__func__, __LINE__);
- if (likely(skb->sk && skb->sk->sk_net))
+ if (likely(skb->sk && sock_net(skb->sk)))
return sock_net(skb->sk);
pr_err("There is no net ptr to find in the skb in %s() line:%d\n",
__func__, __LINE__);
@@ -71,11 +71,11 @@ static inline struct net *skb_sknet(const struct sk_buff *skb)
#ifdef CONFIG_NET_NS
#ifdef CONFIG_IP_VS_DEBUG
/* Start with the most likely hit */
- if (likely(skb->sk && skb->sk->sk_net))
+ if (likely(skb->sk && sock_net(skb->sk)))
return sock_net(skb->sk);
WARN(skb->dev, "Maybe skb_net should be used instead in %s() line:%d\n",
__func__, __LINE__);
- if (likely(skb->dev && skb->dev->nd_net))
+ if (likely(skb->dev && dev_net(skb->dev)))
return dev_net(skb->dev);
pr_err("There is no net ptr to find in the skb in %s() line:%d\n",
__func__, __LINE__);
@@ -365,15 +365,15 @@ struct ip_vs_seq {
/* counters per cpu */
struct ip_vs_counters {
- __u32 conns; /* connections scheduled */
- __u32 inpkts; /* incoming packets */
- __u32 outpkts; /* outgoing packets */
+ __u64 conns; /* connections scheduled */
+ __u64 inpkts; /* incoming packets */
+ __u64 outpkts; /* outgoing packets */
__u64 inbytes; /* incoming bytes */
__u64 outbytes; /* outgoing bytes */
};
/* Stats per cpu */
struct ip_vs_cpu_stats {
- struct ip_vs_counters ustats;
+ struct ip_vs_counters cnt;
struct u64_stats_sync syncp;
};
@@ -383,23 +383,40 @@ struct ip_vs_estimator {
u64 last_inbytes;
u64 last_outbytes;
- u32 last_conns;
- u32 last_inpkts;
- u32 last_outpkts;
-
- u32 cps;
- u32 inpps;
- u32 outpps;
- u32 inbps;
- u32 outbps;
+ u64 last_conns;
+ u64 last_inpkts;
+ u64 last_outpkts;
+
+ u64 cps;
+ u64 inpps;
+ u64 outpps;
+ u64 inbps;
+ u64 outbps;
+};
+
+/*
+ * IPVS statistics object, 64-bit kernel version of struct ip_vs_stats_user
+ */
+struct ip_vs_kstats {
+ u64 conns; /* connections scheduled */
+ u64 inpkts; /* incoming packets */
+ u64 outpkts; /* outgoing packets */
+ u64 inbytes; /* incoming bytes */
+ u64 outbytes; /* outgoing bytes */
+
+ u64 cps; /* current connection rate */
+ u64 inpps; /* current in packet rate */
+ u64 outpps; /* current out packet rate */
+ u64 inbps; /* current in byte rate */
+ u64 outbps; /* current out byte rate */
};
struct ip_vs_stats {
- struct ip_vs_stats_user ustats; /* statistics */
+ struct ip_vs_kstats kstats; /* kernel statistics */
struct ip_vs_estimator est; /* estimator */
struct ip_vs_cpu_stats __percpu *cpustats; /* per cpu counters */
spinlock_t lock; /* spin lock */
- struct ip_vs_stats_user ustats0; /* reset values */
+ struct ip_vs_kstats kstats0; /* reset values */
};
struct dst_entry;
@@ -924,6 +941,7 @@ struct netns_ipvs {
int sysctl_nat_icmp_send;
int sysctl_pmtu_disc;
int sysctl_backup_only;
+ int sysctl_conn_reuse_mode;
/* ip_vs_lblc */
int sysctl_lblc_expiration;
@@ -1042,6 +1060,11 @@ static inline int sysctl_backup_only(struct netns_ipvs *ipvs)
ipvs->sysctl_backup_only;
}
+static inline int sysctl_conn_reuse_mode(struct netns_ipvs *ipvs)
+{
+ return ipvs->sysctl_conn_reuse_mode;
+}
+
#else
static inline int sysctl_sync_threshold(struct netns_ipvs *ipvs)
@@ -1109,6 +1132,11 @@ static inline int sysctl_backup_only(struct netns_ipvs *ipvs)
return 0;
}
+static inline int sysctl_conn_reuse_mode(struct netns_ipvs *ipvs)
+{
+ return 1;
+}
+
#endif
/* IPVS core functions
@@ -1388,8 +1416,7 @@ void ip_vs_sync_conn(struct net *net, struct ip_vs_conn *cp, int pkts);
void ip_vs_start_estimator(struct net *net, struct ip_vs_stats *stats);
void ip_vs_stop_estimator(struct net *net, struct ip_vs_stats *stats);
void ip_vs_zero_estimator(struct ip_vs_stats *stats);
-void ip_vs_read_estimator(struct ip_vs_stats_user *dst,
- struct ip_vs_stats *stats);
+void ip_vs_read_estimator(struct ip_vs_kstats *dst, struct ip_vs_stats *stats);
/* Various IPVS packet transmitters (from ip_vs_xmit.c) */
int ip_vs_null_xmit(struct sk_buff *skb, struct ip_vs_conn *cp,
diff --git a/include/net/ipv6.h b/include/net/ipv6.h
index 4c9fe224d73b..82dbdb092a5d 100644
--- a/include/net/ipv6.h
+++ b/include/net/ipv6.h
@@ -19,7 +19,7 @@
#include <net/if_inet6.h>
#include <net/ndisc.h>
#include <net/flow.h>
-#include <net/flow_keys.h>
+#include <net/flow_dissector.h>
#include <net/snmp.h>
#define SIN6_LEN_RFC2133 24
@@ -47,8 +47,6 @@
#define NEXTHDR_MAX 255
-
-
#define IPV6_DEFAULT_HOPLIMIT 64
#define IPV6_DEFAULT_MCASTHOPS 1
@@ -241,8 +239,10 @@ struct ip6_flowlabel {
struct net *fl_net;
};
-#define IPV6_FLOWINFO_MASK cpu_to_be32(0x0FFFFFFF)
-#define IPV6_FLOWLABEL_MASK cpu_to_be32(0x000FFFFF)
+#define IPV6_FLOWINFO_MASK cpu_to_be32(0x0FFFFFFF)
+#define IPV6_FLOWLABEL_MASK cpu_to_be32(0x000FFFFF)
+#define IPV6_FLOWLABEL_STATELESS_FLAG cpu_to_be32(0x00080000)
+
#define IPV6_TCLASS_MASK (IPV6_FLOWINFO_MASK & ~IPV6_FLOWLABEL_MASK)
#define IPV6_TCLASS_SHIFT 20
@@ -671,8 +671,10 @@ static inline int ipv6_addr_diff(const struct in6_addr *a1, const struct in6_add
return __ipv6_addr_diff(a1, a2, sizeof(struct in6_addr));
}
-void ipv6_select_ident(struct frag_hdr *fhdr, struct rt6_info *rt);
-void ipv6_proxy_select_ident(struct sk_buff *skb);
+__be32 ipv6_select_ident(struct net *net,
+ const struct in6_addr *daddr,
+ const struct in6_addr *saddr);
+void ipv6_proxy_select_ident(struct net *net, struct sk_buff *skb);
int ip6_dst_hoplimit(struct dst_entry *dst);
@@ -690,6 +692,20 @@ static inline int ip6_sk_dst_hoplimit(struct ipv6_pinfo *np, struct flowi6 *fl6,
return hlimit;
}
+/* copy IPv6 saddr & daddr to flow_keys, possibly using 64bit load/store
+ * Equivalent to : flow->v6addrs.src = iph->saddr;
+ * flow->v6addrs.dst = iph->daddr;
+ */
+static inline void iph_to_flow_copy_v6addrs(struct flow_keys *flow,
+ const struct ipv6hdr *iph)
+{
+ BUILD_BUG_ON(offsetof(typeof(flow->addrs), v6addrs.dst) !=
+ offsetof(typeof(flow->addrs), v6addrs.src) +
+ sizeof(flow->addrs.v6addrs.src));
+ memcpy(&flow->addrs.v6addrs, &iph->saddr, sizeof(flow->addrs.v6addrs));
+ flow->control.addr_type = FLOW_DISSECTOR_KEY_IPV6_ADDRS;
+}
+
#if IS_ENABLED(CONFIG_IPV6)
static inline void ip6_set_txhash(struct sock *sk)
{
@@ -697,10 +713,15 @@ static inline void ip6_set_txhash(struct sock *sk)
struct ipv6_pinfo *np = inet6_sk(sk);
struct flow_keys keys;
- keys.src = (__force __be32)ipv6_addr_hash(&np->saddr);
- keys.dst = (__force __be32)ipv6_addr_hash(&sk->sk_v6_daddr);
- keys.port16[0] = inet->inet_sport;
- keys.port16[1] = inet->inet_dport;
+ memset(&keys, 0, sizeof(keys));
+
+ memcpy(&keys.addrs.v6addrs.src, &np->saddr,
+ sizeof(keys.addrs.v6addrs.src));
+ memcpy(&keys.addrs.v6addrs.dst, &sk->sk_v6_daddr,
+ sizeof(keys.addrs.v6addrs.dst));
+ keys.control.addr_type = FLOW_DISSECTOR_KEY_IPV6_ADDRS;
+ keys.ports.src = inet->inet_sport;
+ keys.ports.dst = inet->inet_dport;
sk->sk_txhash = flow_hash_from_keys(&keys);
}
@@ -720,6 +741,9 @@ static inline __be32 ip6_make_flowlabel(struct net *net, struct sk_buff *skb,
hash ^= hash >> 12;
flowlabel = (__force __be32)hash & IPV6_FLOWLABEL_MASK;
+
+ if (net->ipv6.sysctl.flowlabel_state_ranges)
+ flowlabel |= IPV6_FLOWLABEL_STATELESS_FLAG;
}
return flowlabel;
@@ -768,7 +792,7 @@ static inline u8 ip6_tclass(__be32 flowinfo)
int ipv6_rcv(struct sk_buff *skb, struct net_device *dev,
struct packet_type *pt, struct net_device *orig_dev);
-int ip6_rcv_finish(struct sk_buff *skb);
+int ip6_rcv_finish(struct sock *sk, struct sk_buff *skb);
/*
* upper-layer output functions
@@ -826,6 +850,7 @@ int ip6_input(struct sk_buff *skb);
int ip6_mc_input(struct sk_buff *skb);
int __ip6_local_out(struct sk_buff *skb);
+int ip6_local_out_sk(struct sock *sk, struct sk_buff *skb);
int ip6_local_out(struct sk_buff *skb);
/*
@@ -940,4 +965,8 @@ int ipv6_sysctl_register(void);
void ipv6_sysctl_unregister(void);
#endif
+int ipv6_sock_mc_join(struct sock *sk, int ifindex,
+ const struct in6_addr *addr);
+int ipv6_sock_mc_drop(struct sock *sk, int ifindex,
+ const struct in6_addr *addr);
#endif /* _NET_IPV6_H */
diff --git a/include/net/iw_handler.h b/include/net/iw_handler.h
index a830b01baba4..8f81bbbc38fc 100644
--- a/include/net/iw_handler.h
+++ b/include/net/iw_handler.h
@@ -519,6 +519,17 @@ iwe_stream_add_event(struct iw_request_info *info, char *stream, char *ends,
return stream;
}
+static inline char *
+iwe_stream_add_event_check(struct iw_request_info *info, char *stream,
+ char *ends, struct iw_event *iwe, int event_len)
+{
+ char *res = iwe_stream_add_event(info, stream, ends, iwe, event_len);
+
+ if (res == stream)
+ return ERR_PTR(-E2BIG);
+ return res;
+}
+
/*------------------------------------------------------------------*/
/*
* Wrapper to add an short Wireless Event containing a pointer to a
@@ -545,6 +556,17 @@ iwe_stream_add_point(struct iw_request_info *info, char *stream, char *ends,
return stream;
}
+static inline char *
+iwe_stream_add_point_check(struct iw_request_info *info, char *stream,
+ char *ends, struct iw_event *iwe, char *extra)
+{
+ char *res = iwe_stream_add_point(info, stream, ends, iwe, extra);
+
+ if (res == stream)
+ return ERR_PTR(-E2BIG);
+ return res;
+}
+
/*------------------------------------------------------------------*/
/*
* Wrapper to add a value to a Wireless Event in a stream of events.
diff --git a/include/net/llc_conn.h b/include/net/llc_conn.h
index 0134681acc4c..fe994d2e5286 100644
--- a/include/net/llc_conn.h
+++ b/include/net/llc_conn.h
@@ -96,7 +96,7 @@ static __inline__ char llc_backlog_type(struct sk_buff *skb)
}
struct sock *llc_sk_alloc(struct net *net, int family, gfp_t priority,
- struct proto *prot);
+ struct proto *prot, int kern);
void llc_sk_free(struct sock *sk);
void llc_sk_reset(struct sock *sk);
diff --git a/include/net/mac80211.h b/include/net/mac80211.h
index d52914b75331..6b1077c2a63f 100644
--- a/include/net/mac80211.h
+++ b/include/net/mac80211.h
@@ -84,6 +84,39 @@
*
*/
+/**
+ * DOC: mac80211 software tx queueing
+ *
+ * mac80211 provides an optional intermediate queueing implementation designed
+ * to allow the driver to keep hardware queues short and provide some fairness
+ * between different stations/interfaces.
+ * In this model, the driver pulls data frames from the mac80211 queue instead
+ * of letting mac80211 push them via drv_tx().
+ * Other frames (e.g. control or management) are still pushed using drv_tx().
+ *
+ * Drivers indicate that they use this model by implementing the .wake_tx_queue
+ * driver operation.
+ *
+ * Intermediate queues (struct ieee80211_txq) are kept per-sta per-tid, with a
+ * single per-vif queue for multicast data frames.
+ *
+ * The driver is expected to initialize its private per-queue data for stations
+ * and interfaces in the .add_interface and .sta_add ops.
+ *
+ * The driver can't access the queue directly. To dequeue a frame, it calls
+ * ieee80211_tx_dequeue(). Whenever mac80211 adds a new frame to a queue, it
+ * calls the .wake_tx_queue driver op.
+ *
+ * For AP powersave TIM handling, the driver only needs to indicate if it has
+ * buffered packets in the driver specific data structures by calling
+ * ieee80211_sta_set_buffered(). For frames buffered in the ieee80211_txq
+ * struct, mac80211 sets the appropriate TIM PVB bits and calls
+ * .release_buffered_frames().
+ * In that callback the driver is therefore expected to release its own
+ * buffered frames and afterwards also frames from the ieee80211_txq (obtained
+ * via the usual ieee80211_tx_dequeue).
+ */
+
struct device;
/**
@@ -301,17 +334,107 @@ enum ieee80211_bss_change {
#define IEEE80211_BSS_ARP_ADDR_LIST_LEN 4
/**
- * enum ieee80211_rssi_event - RSSI threshold event
- * An indicator for when RSSI goes below/above a certain threshold.
- * @RSSI_EVENT_HIGH: AP's rssi crossed the high threshold set by the driver.
- * @RSSI_EVENT_LOW: AP's rssi crossed the low threshold set by the driver.
+ * enum ieee80211_event_type - event to be notified to the low level driver
+ * @RSSI_EVENT: AP's rssi crossed the a threshold set by the driver.
+ * @MLME_EVENT: event related to MLME
+ * @BAR_RX_EVENT: a BAR was received
+ * @BA_FRAME_TIMEOUT: Frames were released from the reordering buffer because
+ * they timed out. This won't be called for each frame released, but only
+ * once each time the timeout triggers.
+ */
+enum ieee80211_event_type {
+ RSSI_EVENT,
+ MLME_EVENT,
+ BAR_RX_EVENT,
+ BA_FRAME_TIMEOUT,
+};
+
+/**
+ * enum ieee80211_rssi_event_data - relevant when event type is %RSSI_EVENT
+ * @RSSI_EVENT_HIGH: AP's rssi went below the threshold set by the driver.
+ * @RSSI_EVENT_LOW: AP's rssi went above the threshold set by the driver.
*/
-enum ieee80211_rssi_event {
+enum ieee80211_rssi_event_data {
RSSI_EVENT_HIGH,
RSSI_EVENT_LOW,
};
/**
+ * struct ieee80211_rssi_event - data attached to an %RSSI_EVENT
+ * @data: See &enum ieee80211_rssi_event_data
+ */
+struct ieee80211_rssi_event {
+ enum ieee80211_rssi_event_data data;
+};
+
+/**
+ * enum ieee80211_mlme_event_data - relevant when event type is %MLME_EVENT
+ * @AUTH_EVENT: the MLME operation is authentication
+ * @ASSOC_EVENT: the MLME operation is association
+ * @DEAUTH_RX_EVENT: deauth received..
+ * @DEAUTH_TX_EVENT: deauth sent.
+ */
+enum ieee80211_mlme_event_data {
+ AUTH_EVENT,
+ ASSOC_EVENT,
+ DEAUTH_RX_EVENT,
+ DEAUTH_TX_EVENT,
+};
+
+/**
+ * enum ieee80211_mlme_event_status - relevant when event type is %MLME_EVENT
+ * @MLME_SUCCESS: the MLME operation completed successfully.
+ * @MLME_DENIED: the MLME operation was denied by the peer.
+ * @MLME_TIMEOUT: the MLME operation timed out.
+ */
+enum ieee80211_mlme_event_status {
+ MLME_SUCCESS,
+ MLME_DENIED,
+ MLME_TIMEOUT,
+};
+
+/**
+ * struct ieee80211_mlme_event - data attached to an %MLME_EVENT
+ * @data: See &enum ieee80211_mlme_event_data
+ * @status: See &enum ieee80211_mlme_event_status
+ * @reason: the reason code if applicable
+ */
+struct ieee80211_mlme_event {
+ enum ieee80211_mlme_event_data data;
+ enum ieee80211_mlme_event_status status;
+ u16 reason;
+};
+
+/**
+ * struct ieee80211_ba_event - data attached for BlockAck related events
+ * @sta: pointer to the &ieee80211_sta to which this event relates
+ * @tid: the tid
+ * @ssn: the starting sequence number (for %BAR_RX_EVENT)
+ */
+struct ieee80211_ba_event {
+ struct ieee80211_sta *sta;
+ u16 tid;
+ u16 ssn;
+};
+
+/**
+ * struct ieee80211_event - event to be sent to the driver
+ * @type: The event itself. See &enum ieee80211_event_type.
+ * @rssi: relevant if &type is %RSSI_EVENT
+ * @mlme: relevant if &type is %AUTH_EVENT
+ * @ba: relevant if &type is %BAR_RX_EVENT or %BA_FRAME_TIMEOUT
+ * @u:union holding the fields above
+ */
+struct ieee80211_event {
+ enum ieee80211_event_type type;
+ union {
+ struct ieee80211_rssi_event rssi;
+ struct ieee80211_mlme_event mlme;
+ struct ieee80211_ba_event ba;
+ } u;
+};
+
+/**
* struct ieee80211_bss_conf - holds the BSS's changing parameters
*
* This structure keeps information about a BSS (and an association
@@ -323,12 +446,8 @@ enum ieee80211_rssi_event {
* @ibss_creator: indicates if a new IBSS network is being created
* @aid: association ID number, valid only when @assoc is true
* @use_cts_prot: use CTS protection
- * @use_short_preamble: use 802.11b short preamble;
- * if the hardware cannot handle this it must set the
- * IEEE80211_HW_2GHZ_SHORT_PREAMBLE_INCAPABLE hardware flag
- * @use_short_slot: use short slot time (only relevant for ERP);
- * if the hardware cannot handle this it must set the
- * IEEE80211_HW_2GHZ_SHORT_SLOT_INCAPABLE hardware flag
+ * @use_short_preamble: use 802.11b short preamble
+ * @use_short_slot: use short slot time (only relevant for ERP)
* @dtim_period: num of beacons before the next DTIM, for beaconing,
* valid in station mode only if after the driver was notified
* with the %BSS_CHANGED_BEACON_INFO flag, will be non-zero then.
@@ -337,12 +456,15 @@ enum ieee80211_rssi_event {
* HW flag %IEEE80211_HW_TIMING_BEACON_ONLY is set, then this can
* only come from a beacon, but might not become valid until after
* association when a beacon is received (which is notified with the
- * %BSS_CHANGED_DTIM flag.)
+ * %BSS_CHANGED_DTIM flag.). See also sync_dtim_count important notice.
* @sync_device_ts: the device timestamp corresponding to the sync_tsf,
* the driver/device can use this to calculate synchronisation
- * (see @sync_tsf)
+ * (see @sync_tsf). See also sync_dtim_count important notice.
* @sync_dtim_count: Only valid when %IEEE80211_HW_TIMING_BEACON_ONLY
* is requested, see @sync_tsf/@sync_device_ts.
+ * IMPORTANT: These three sync_* parameters would possibly be out of sync
+ * by the time the driver will use them. The synchronized view is currently
+ * guaranteed only in certain callbacks.
* @beacon_int: beacon interval
* @assoc_capability: capabilities taken from assoc resp
* @basic_rates: bitmap of basic rates, each bit stands for an
@@ -749,6 +871,9 @@ struct ieee80211_tx_info {
/* 4 bytes free */
} control;
struct {
+ u64 cookie;
+ } ack;
+ struct {
struct ieee80211_tx_rate rates[IEEE80211_TX_MAX_RATES];
s32 ack_signal;
u8 ampdu_ack_len;
@@ -1234,6 +1359,7 @@ enum ieee80211_vif_flags {
* monitor interface (if that is requested.)
* @drv_priv: data area for driver use, will always be aligned to
* sizeof(void *).
+ * @txq: the multicast data TX queue (if driver uses the TXQ abstraction)
*/
struct ieee80211_vif {
enum nl80211_iftype type;
@@ -1245,6 +1371,8 @@ struct ieee80211_vif {
u8 cab_queue;
u8 hw_queue[IEEE80211_NUM_ACS];
+ struct ieee80211_txq *txq;
+
struct ieee80211_chanctx_conf __rcu *chanctx_conf;
u32 driver_flags;
@@ -1279,6 +1407,19 @@ static inline bool ieee80211_vif_is_mesh(struct ieee80211_vif *vif)
struct ieee80211_vif *wdev_to_ieee80211_vif(struct wireless_dev *wdev);
/**
+ * ieee80211_vif_to_wdev - return a wdev struct from a vif
+ * @vif: the vif to get the wdev for
+ *
+ * This can be used by mac80211 drivers with direct cfg80211 APIs
+ * (like the vendor commands) that needs to get the wdev for a vif.
+ *
+ * Note that this function may return %NULL if the given wdev isn't
+ * associated with a vif that the driver knows about (e.g. monitor
+ * or AP_VLAN interfaces.)
+ */
+struct wireless_dev *ieee80211_vif_to_wdev(struct ieee80211_vif *vif);
+
+/**
* enum ieee80211_key_flags - key flags
*
* These flags are used for communication about keys between the driver
@@ -1337,6 +1478,9 @@ enum ieee80211_key_flags {
* wants to be given when a frame is transmitted and needs to be
* encrypted in hardware.
* @cipher: The key's cipher suite selector.
+ * @tx_pn: PN used for TX on non-TKIP keys, may be used by the driver
+ * as well if it needs to do software PN assignment by itself
+ * (e.g. due to TSO)
* @flags: key flags, see &enum ieee80211_key_flags.
* @keyidx: the key index (0-3)
* @keylen: key material length
@@ -1349,6 +1493,7 @@ enum ieee80211_key_flags {
* @iv_len: The IV length for this key type
*/
struct ieee80211_key_conf {
+ atomic64_t tx_pn;
u32 cipher;
u8 icv_len;
u8 iv_len;
@@ -1359,6 +1504,47 @@ struct ieee80211_key_conf {
u8 key[0];
};
+#define IEEE80211_MAX_PN_LEN 16
+
+/**
+ * struct ieee80211_key_seq - key sequence counter
+ *
+ * @tkip: TKIP data, containing IV32 and IV16 in host byte order
+ * @ccmp: PN data, most significant byte first (big endian,
+ * reverse order than in packet)
+ * @aes_cmac: PN data, most significant byte first (big endian,
+ * reverse order than in packet)
+ * @aes_gmac: PN data, most significant byte first (big endian,
+ * reverse order than in packet)
+ * @gcmp: PN data, most significant byte first (big endian,
+ * reverse order than in packet)
+ * @hw: data for HW-only (e.g. cipher scheme) keys
+ */
+struct ieee80211_key_seq {
+ union {
+ struct {
+ u32 iv32;
+ u16 iv16;
+ } tkip;
+ struct {
+ u8 pn[6];
+ } ccmp;
+ struct {
+ u8 pn[6];
+ } aes_cmac;
+ struct {
+ u8 pn[6];
+ } aes_gmac;
+ struct {
+ u8 pn[6];
+ } gcmp;
+ struct {
+ u8 seq[IEEE80211_MAX_PN_LEN];
+ u8 seq_len;
+ } hw;
+ };
+};
+
/**
* struct ieee80211_cipher_scheme - cipher scheme
*
@@ -1472,7 +1658,8 @@ struct ieee80211_sta_rates {
* @supp_rates: Bitmap of supported rates (per band)
* @ht_cap: HT capabilities of this STA; restricted to our own capabilities
* @vht_cap: VHT capabilities of this STA; restricted to our own capabilities
- * @wme: indicates whether the STA supports QoS/WME.
+ * @wme: indicates whether the STA supports QoS/WME (if local devices does,
+ * otherwise always false)
* @drv_priv: data area for driver use, will always be aligned to
* sizeof(void *), size is determined in hw information.
* @uapsd_queues: bitmap of queues configured for uapsd. Only valid
@@ -1488,6 +1675,8 @@ struct ieee80211_sta_rates {
* @tdls: indicates whether the STA is a TDLS peer
* @tdls_initiator: indicates the STA is an initiator of the TDLS link. Only
* valid if the STA is a TDLS peer in the first place.
+ * @mfp: indicates whether the STA uses management frame protection or not.
+ * @txq: per-TID data TX queues (if driver uses the TXQ abstraction)
*/
struct ieee80211_sta {
u32 supp_rates[IEEE80211_NUM_BANDS];
@@ -1504,6 +1693,9 @@ struct ieee80211_sta {
struct ieee80211_sta_rates __rcu *rates;
bool tdls;
bool tdls_initiator;
+ bool mfp;
+
+ struct ieee80211_txq *txq[IEEE80211_NUM_TIDS];
/* must be last */
u8 drv_priv[0] __aligned(sizeof(void *));
@@ -1533,6 +1725,28 @@ struct ieee80211_tx_control {
};
/**
+ * struct ieee80211_txq - Software intermediate tx queue
+ *
+ * @vif: &struct ieee80211_vif pointer from the add_interface callback.
+ * @sta: station table entry, %NULL for per-vif queue
+ * @tid: the TID for this queue (unused for per-vif queue)
+ * @ac: the AC for this queue
+ * @drv_priv: driver private area, sized by hw->txq_data_size
+ *
+ * The driver can obtain packets from this queue by calling
+ * ieee80211_tx_dequeue().
+ */
+struct ieee80211_txq {
+ struct ieee80211_vif *vif;
+ struct ieee80211_sta *sta;
+ u8 tid;
+ u8 ac;
+
+ /* must be last */
+ u8 drv_priv[0] __aligned(sizeof(void *));
+};
+
+/**
* enum ieee80211_hw_flags - hardware flags
*
* These flags are used to indicate hardware capabilities to
@@ -1566,13 +1780,6 @@ struct ieee80211_tx_control {
* multicast frames when there are power saving stations so that
* the driver can fetch them with ieee80211_get_buffered_bc().
*
- * @IEEE80211_HW_2GHZ_SHORT_SLOT_INCAPABLE:
- * Hardware is not capable of short slot operation on the 2.4 GHz band.
- *
- * @IEEE80211_HW_2GHZ_SHORT_PREAMBLE_INCAPABLE:
- * Hardware is not capable of receiving frames with short preamble on
- * the 2.4 GHz band.
- *
* @IEEE80211_HW_SIGNAL_UNSPEC:
* Hardware can provide signal values but we don't know its units. We
* expect values between 0 and @max_signal.
@@ -1647,6 +1854,10 @@ struct ieee80211_tx_control {
* the driver returns 1. This also forces the driver to advertise its
* supported cipher suites.
*
+ * @IEEE80211_HW_SUPPORT_FAST_XMIT: The driver/hardware supports fast-xmit,
+ * this currently requires only the ability to calculate the duration
+ * for frames.
+ *
* @IEEE80211_HW_QUEUE_CONTROL: The driver wants to control per-interface
* queue mapping in order to use different queues (not just one per AC)
* for different virtual interfaces. See the doc section on HW queue
@@ -1674,41 +1885,44 @@ struct ieee80211_tx_control {
* @IEEE80211_HW_SUPPORTS_CLONED_SKBS: The driver will never modify the payload
* or tailroom of TX skbs without copying them first.
*
- * @IEEE80211_SINGLE_HW_SCAN_ON_ALL_BANDS: The HW supports scanning on all bands
+ * @IEEE80211_HW_SINGLE_SCAN_ON_ALL_BANDS: The HW supports scanning on all bands
* in one command, mac80211 doesn't have to run separate scans per band.
+ *
+ * @NUM_IEEE80211_HW_FLAGS: number of hardware flags, used for sizing arrays
*/
enum ieee80211_hw_flags {
- IEEE80211_HW_HAS_RATE_CONTROL = 1<<0,
- IEEE80211_HW_RX_INCLUDES_FCS = 1<<1,
- IEEE80211_HW_HOST_BROADCAST_PS_BUFFERING = 1<<2,
- IEEE80211_HW_2GHZ_SHORT_SLOT_INCAPABLE = 1<<3,
- IEEE80211_HW_2GHZ_SHORT_PREAMBLE_INCAPABLE = 1<<4,
- IEEE80211_HW_SIGNAL_UNSPEC = 1<<5,
- IEEE80211_HW_SIGNAL_DBM = 1<<6,
- IEEE80211_HW_NEED_DTIM_BEFORE_ASSOC = 1<<7,
- IEEE80211_HW_SPECTRUM_MGMT = 1<<8,
- IEEE80211_HW_AMPDU_AGGREGATION = 1<<9,
- IEEE80211_HW_SUPPORTS_PS = 1<<10,
- IEEE80211_HW_PS_NULLFUNC_STACK = 1<<11,
- IEEE80211_HW_SUPPORTS_DYNAMIC_PS = 1<<12,
- IEEE80211_HW_MFP_CAPABLE = 1<<13,
- IEEE80211_HW_WANT_MONITOR_VIF = 1<<14,
- IEEE80211_HW_NO_AUTO_VIF = 1<<15,
- IEEE80211_HW_SW_CRYPTO_CONTROL = 1<<16,
- /* free slots */
- IEEE80211_HW_REPORTS_TX_ACK_STATUS = 1<<18,
- IEEE80211_HW_CONNECTION_MONITOR = 1<<19,
- IEEE80211_HW_QUEUE_CONTROL = 1<<20,
- IEEE80211_HW_SUPPORTS_PER_STA_GTK = 1<<21,
- IEEE80211_HW_AP_LINK_PS = 1<<22,
- IEEE80211_HW_TX_AMPDU_SETUP_IN_HW = 1<<23,
- IEEE80211_HW_SUPPORTS_RC_TABLE = 1<<24,
- IEEE80211_HW_P2P_DEV_ADDR_FOR_INTF = 1<<25,
- IEEE80211_HW_TIMING_BEACON_ONLY = 1<<26,
- IEEE80211_HW_SUPPORTS_HT_CCK_RATES = 1<<27,
- IEEE80211_HW_CHANCTX_STA_CSA = 1<<28,
- IEEE80211_HW_SUPPORTS_CLONED_SKBS = 1<<29,
- IEEE80211_SINGLE_HW_SCAN_ON_ALL_BANDS = 1<<30,
+ IEEE80211_HW_HAS_RATE_CONTROL,
+ IEEE80211_HW_RX_INCLUDES_FCS,
+ IEEE80211_HW_HOST_BROADCAST_PS_BUFFERING,
+ IEEE80211_HW_SIGNAL_UNSPEC,
+ IEEE80211_HW_SIGNAL_DBM,
+ IEEE80211_HW_NEED_DTIM_BEFORE_ASSOC,
+ IEEE80211_HW_SPECTRUM_MGMT,
+ IEEE80211_HW_AMPDU_AGGREGATION,
+ IEEE80211_HW_SUPPORTS_PS,
+ IEEE80211_HW_PS_NULLFUNC_STACK,
+ IEEE80211_HW_SUPPORTS_DYNAMIC_PS,
+ IEEE80211_HW_MFP_CAPABLE,
+ IEEE80211_HW_WANT_MONITOR_VIF,
+ IEEE80211_HW_NO_AUTO_VIF,
+ IEEE80211_HW_SW_CRYPTO_CONTROL,
+ IEEE80211_HW_SUPPORT_FAST_XMIT,
+ IEEE80211_HW_REPORTS_TX_ACK_STATUS,
+ IEEE80211_HW_CONNECTION_MONITOR,
+ IEEE80211_HW_QUEUE_CONTROL,
+ IEEE80211_HW_SUPPORTS_PER_STA_GTK,
+ IEEE80211_HW_AP_LINK_PS,
+ IEEE80211_HW_TX_AMPDU_SETUP_IN_HW,
+ IEEE80211_HW_SUPPORTS_RC_TABLE,
+ IEEE80211_HW_P2P_DEV_ADDR_FOR_INTF,
+ IEEE80211_HW_TIMING_BEACON_ONLY,
+ IEEE80211_HW_SUPPORTS_HT_CCK_RATES,
+ IEEE80211_HW_CHANCTX_STA_CSA,
+ IEEE80211_HW_SUPPORTS_CLONED_SKBS,
+ IEEE80211_HW_SINGLE_SCAN_ON_ALL_BANDS,
+
+ /* keep last, obviously */
+ NUM_IEEE80211_HW_FLAGS
};
/**
@@ -1756,6 +1970,8 @@ enum ieee80211_hw_flags {
* within &struct ieee80211_sta.
* @chanctx_data_size: size (in bytes) of the drv_priv data area
* within &struct ieee80211_chanctx_conf.
+ * @txq_data_size: size (in bytes) of the drv_priv data area
+ * within @struct ieee80211_txq.
*
* @max_rates: maximum number of alternate rate retry stages the hw
* can handle.
@@ -1787,8 +2003,8 @@ enum ieee80211_hw_flags {
* Use the %IEEE80211_RADIOTAP_VHT_KNOWN_* values.
*
* @netdev_features: netdev features to be set in each netdev created
- * from this HW. Note only HW checksum features are currently
- * compatible with mac80211. Other feature bits will be rejected.
+ * from this HW. Note that not all features are usable with mac80211,
+ * other features will be rejected during HW registration.
*
* @uapsd_queues: This bitmap is included in (re)association frame to indicate
* for each access category if it is uAPSD trigger-enabled and delivery-
@@ -1804,18 +2020,22 @@ enum ieee80211_hw_flags {
* @n_cipher_schemes: a size of an array of cipher schemes definitions.
* @cipher_schemes: a pointer to an array of cipher scheme definitions
* supported by HW.
+ *
+ * @txq_ac_max_pending: maximum number of frames per AC pending in all txq
+ * entries for a vif.
*/
struct ieee80211_hw {
struct ieee80211_conf conf;
struct wiphy *wiphy;
const char *rate_control_algorithm;
void *priv;
- u32 flags;
+ unsigned long flags[BITS_TO_LONGS(NUM_IEEE80211_HW_FLAGS)];
unsigned int extra_tx_headroom;
unsigned int extra_beacon_tailroom;
int vif_data_size;
int sta_data_size;
int chanctx_data_size;
+ int txq_data_size;
u16 queues;
u16 max_listen_interval;
s8 max_signal;
@@ -1832,8 +2052,23 @@ struct ieee80211_hw {
u8 uapsd_max_sp_len;
u8 n_cipher_schemes;
const struct ieee80211_cipher_scheme *cipher_schemes;
+ int txq_ac_max_pending;
};
+static inline bool _ieee80211_hw_check(struct ieee80211_hw *hw,
+ enum ieee80211_hw_flags flg)
+{
+ return test_bit(flg, hw->flags);
+}
+#define ieee80211_hw_check(hw, flg) _ieee80211_hw_check(hw, IEEE80211_HW_##flg)
+
+static inline void _ieee80211_hw_set(struct ieee80211_hw *hw,
+ enum ieee80211_hw_flags flg)
+{
+ return __set_bit(flg, hw->flags);
+}
+#define ieee80211_hw_set(hw, flg) _ieee80211_hw_set(hw, IEEE80211_HW_##flg)
+
/**
* struct ieee80211_scan_request - hw scan request
*
@@ -2347,10 +2582,6 @@ void ieee80211_free_txskb(struct ieee80211_hw *hw, struct sk_buff *skb);
* stack. It is always safe to pass more frames than requested,
* but this has negative impact on power consumption.
*
- * @FIF_PROMISC_IN_BSS: promiscuous mode within your BSS,
- * think of the BSS as your network segment and then this corresponds
- * to the regular ethernet device promiscuous mode.
- *
* @FIF_ALLMULTI: pass all multicast frames, this is used if requested
* by the user or if the hardware is not capable of filtering by
* multicast address.
@@ -2367,18 +2598,16 @@ void ieee80211_free_txskb(struct ieee80211_hw *hw, struct sk_buff *skb);
* mac80211 needs to do and the amount of CPU wakeups, so you should
* honour this flag if possible.
*
- * @FIF_CONTROL: pass control frames (except for PS Poll), if PROMISC_IN_BSS
- * is not set then only those addressed to this station.
+ * @FIF_CONTROL: pass control frames (except for PS Poll) addressed to this
+ * station
*
* @FIF_OTHER_BSS: pass frames destined to other BSSes
*
- * @FIF_PSPOLL: pass PS Poll frames, if PROMISC_IN_BSS is not set then only
- * those addressed to this station.
+ * @FIF_PSPOLL: pass PS Poll frames
*
* @FIF_PROBE_REQ: pass probe request frames
*/
enum ieee80211_filter_flags {
- FIF_PROMISC_IN_BSS = 1<<0,
FIF_ALLMULTI = 1<<1,
FIF_FCSFAIL = 1<<2,
FIF_PLCPFAIL = 1<<3,
@@ -2661,9 +2890,9 @@ enum ieee80211_reconfig_type {
* Returns zero if statistics are available.
* The callback can sleep.
*
- * @get_tkip_seq: If your device implements TKIP encryption in hardware this
- * callback should be provided to read the TKIP transmit IVs (both IV32
- * and IV16) for the given key from hardware.
+ * @get_key_seq: If your device implements encryption in hardware and does
+ * IV/PN assignment then this callback should be provided to read the
+ * IV/PN for the given key from hardware.
* The callback must be atomic.
*
* @set_frag_threshold: Configuration of fragmentation threshold. Assign this
@@ -2844,8 +3073,9 @@ enum ieee80211_reconfig_type {
* @set_bitrate_mask: Set a mask of rates to be used for rate control selection
* when transmitting a frame. Currently only legacy rates are handled.
* The callback can sleep.
- * @rssi_callback: Notify driver when the average RSSI goes above/below
- * thresholds that were registered previously. The callback can sleep.
+ * @event_callback: Notify driver about any event in mac80211. See
+ * &enum ieee80211_event_type for the different types.
+ * The callback must be atomic.
*
* @release_buffered_frames: Release buffered frames according to the given
* parameters. In the case where the driver buffers some frames for
@@ -2993,6 +3223,8 @@ enum ieee80211_reconfig_type {
* response template is provided, together with the location of the
* switch-timing IE within the template. The skb can only be used within
* the function call.
+ *
+ * @wake_tx_queue: Called when new packets have been added to the queue.
*/
struct ieee80211_ops {
void (*tx)(struct ieee80211_hw *hw,
@@ -3059,8 +3291,9 @@ struct ieee80211_ops {
struct ieee80211_vif *vif);
int (*get_stats)(struct ieee80211_hw *hw,
struct ieee80211_low_level_stats *stats);
- void (*get_tkip_seq)(struct ieee80211_hw *hw, u8 hw_key_idx,
- u32 *iv32, u16 *iv16);
+ void (*get_key_seq)(struct ieee80211_hw *hw,
+ struct ieee80211_key_conf *key,
+ struct ieee80211_key_seq *seq);
int (*set_frag_threshold)(struct ieee80211_hw *hw, u32 value);
int (*set_rts_threshold)(struct ieee80211_hw *hw, u32 value);
int (*sta_add)(struct ieee80211_hw *hw, struct ieee80211_vif *vif,
@@ -3141,9 +3374,9 @@ struct ieee80211_ops {
bool (*tx_frames_pending)(struct ieee80211_hw *hw);
int (*set_bitrate_mask)(struct ieee80211_hw *hw, struct ieee80211_vif *vif,
const struct cfg80211_bitrate_mask *mask);
- void (*rssi_callback)(struct ieee80211_hw *hw,
- struct ieee80211_vif *vif,
- enum ieee80211_rssi_event rssi_event);
+ void (*event_callback)(struct ieee80211_hw *hw,
+ struct ieee80211_vif *vif,
+ const struct ieee80211_event *event);
void (*allow_buffered_frames)(struct ieee80211_hw *hw,
struct ieee80211_sta *sta,
@@ -3224,6 +3457,9 @@ struct ieee80211_ops {
void (*tdls_recv_channel_switch)(struct ieee80211_hw *hw,
struct ieee80211_vif *vif,
struct ieee80211_tdls_ch_sw_params *params);
+
+ void (*wake_tx_queue)(struct ieee80211_hw *hw,
+ struct ieee80211_txq *txq);
};
/**
@@ -3305,14 +3541,15 @@ enum ieee80211_tpt_led_trigger_flags {
};
#ifdef CONFIG_MAC80211_LEDS
-char *__ieee80211_get_tx_led_name(struct ieee80211_hw *hw);
-char *__ieee80211_get_rx_led_name(struct ieee80211_hw *hw);
-char *__ieee80211_get_assoc_led_name(struct ieee80211_hw *hw);
-char *__ieee80211_get_radio_led_name(struct ieee80211_hw *hw);
-char *__ieee80211_create_tpt_led_trigger(struct ieee80211_hw *hw,
- unsigned int flags,
- const struct ieee80211_tpt_blink *blink_table,
- unsigned int blink_table_len);
+const char *__ieee80211_get_tx_led_name(struct ieee80211_hw *hw);
+const char *__ieee80211_get_rx_led_name(struct ieee80211_hw *hw);
+const char *__ieee80211_get_assoc_led_name(struct ieee80211_hw *hw);
+const char *__ieee80211_get_radio_led_name(struct ieee80211_hw *hw);
+const char *
+__ieee80211_create_tpt_led_trigger(struct ieee80211_hw *hw,
+ unsigned int flags,
+ const struct ieee80211_tpt_blink *blink_table,
+ unsigned int blink_table_len);
#endif
/**
* ieee80211_get_tx_led_name - get name of TX LED
@@ -3326,7 +3563,7 @@ char *__ieee80211_create_tpt_led_trigger(struct ieee80211_hw *hw,
*
* Return: The name of the LED trigger. %NULL if not configured for LEDs.
*/
-static inline char *ieee80211_get_tx_led_name(struct ieee80211_hw *hw)
+static inline const char *ieee80211_get_tx_led_name(struct ieee80211_hw *hw)
{
#ifdef CONFIG_MAC80211_LEDS
return __ieee80211_get_tx_led_name(hw);
@@ -3347,7 +3584,7 @@ static inline char *ieee80211_get_tx_led_name(struct ieee80211_hw *hw)
*
* Return: The name of the LED trigger. %NULL if not configured for LEDs.
*/
-static inline char *ieee80211_get_rx_led_name(struct ieee80211_hw *hw)
+static inline const char *ieee80211_get_rx_led_name(struct ieee80211_hw *hw)
{
#ifdef CONFIG_MAC80211_LEDS
return __ieee80211_get_rx_led_name(hw);
@@ -3368,7 +3605,7 @@ static inline char *ieee80211_get_rx_led_name(struct ieee80211_hw *hw)
*
* Return: The name of the LED trigger. %NULL if not configured for LEDs.
*/
-static inline char *ieee80211_get_assoc_led_name(struct ieee80211_hw *hw)
+static inline const char *ieee80211_get_assoc_led_name(struct ieee80211_hw *hw)
{
#ifdef CONFIG_MAC80211_LEDS
return __ieee80211_get_assoc_led_name(hw);
@@ -3389,7 +3626,7 @@ static inline char *ieee80211_get_assoc_led_name(struct ieee80211_hw *hw)
*
* Return: The name of the LED trigger. %NULL if not configured for LEDs.
*/
-static inline char *ieee80211_get_radio_led_name(struct ieee80211_hw *hw)
+static inline const char *ieee80211_get_radio_led_name(struct ieee80211_hw *hw)
{
#ifdef CONFIG_MAC80211_LEDS
return __ieee80211_get_radio_led_name(hw);
@@ -3410,7 +3647,7 @@ static inline char *ieee80211_get_radio_led_name(struct ieee80211_hw *hw)
*
* Note: This function must be called before ieee80211_register_hw().
*/
-static inline char *
+static inline const char *
ieee80211_create_tpt_led_trigger(struct ieee80211_hw *hw, unsigned int flags,
const struct ieee80211_tpt_blink *blink_table,
unsigned int blink_table_len)
@@ -4091,40 +4328,6 @@ void ieee80211_aes_cmac_calculate_k1_k2(struct ieee80211_key_conf *keyconf,
u8 *k1, u8 *k2);
/**
- * struct ieee80211_key_seq - key sequence counter
- *
- * @tkip: TKIP data, containing IV32 and IV16 in host byte order
- * @ccmp: PN data, most significant byte first (big endian,
- * reverse order than in packet)
- * @aes_cmac: PN data, most significant byte first (big endian,
- * reverse order than in packet)
- * @aes_gmac: PN data, most significant byte first (big endian,
- * reverse order than in packet)
- * @gcmp: PN data, most significant byte first (big endian,
- * reverse order than in packet)
- */
-struct ieee80211_key_seq {
- union {
- struct {
- u32 iv32;
- u16 iv16;
- } tkip;
- struct {
- u8 pn[6];
- } ccmp;
- struct {
- u8 pn[6];
- } aes_cmac;
- struct {
- u8 pn[6];
- } aes_gmac;
- struct {
- u8 pn[6];
- } gcmp;
- };
-};
-
-/**
* ieee80211_get_key_tx_seq - get key TX sequence counter
*
* @keyconf: the parameter passed with the set key
@@ -4343,13 +4546,33 @@ void ieee80211_sched_scan_stopped(struct ieee80211_hw *hw);
* haven't been re-added to the driver yet.
* @IEEE80211_IFACE_ITER_RESUME_ALL: During resume, iterate over all
* interfaces, even if they haven't been re-added to the driver yet.
+ * @IEEE80211_IFACE_ITER_ACTIVE: Iterate only active interfaces (netdev is up).
*/
enum ieee80211_interface_iteration_flags {
IEEE80211_IFACE_ITER_NORMAL = 0,
IEEE80211_IFACE_ITER_RESUME_ALL = BIT(0),
+ IEEE80211_IFACE_ITER_ACTIVE = BIT(1),
};
/**
+ * ieee80211_iterate_interfaces - iterate interfaces
+ *
+ * This function iterates over the interfaces associated with a given
+ * hardware and calls the callback for them. This includes active as well as
+ * inactive interfaces. This function allows the iterator function to sleep.
+ * Will iterate over a new interface during add_interface().
+ *
+ * @hw: the hardware struct of which the interfaces should be iterated over
+ * @iter_flags: iteration flags, see &enum ieee80211_interface_iteration_flags
+ * @iterator: the iterator function to call
+ * @data: first argument of the iterator function
+ */
+void ieee80211_iterate_interfaces(struct ieee80211_hw *hw, u32 iter_flags,
+ void (*iterator)(void *data, u8 *mac,
+ struct ieee80211_vif *vif),
+ void *data);
+
+/**
* ieee80211_iterate_active_interfaces - iterate active interfaces
*
* This function iterates over the interfaces associated with a given
@@ -4364,11 +4587,16 @@ enum ieee80211_interface_iteration_flags {
* @iterator: the iterator function to call
* @data: first argument of the iterator function
*/
-void ieee80211_iterate_active_interfaces(struct ieee80211_hw *hw,
- u32 iter_flags,
- void (*iterator)(void *data, u8 *mac,
- struct ieee80211_vif *vif),
- void *data);
+static inline void
+ieee80211_iterate_active_interfaces(struct ieee80211_hw *hw, u32 iter_flags,
+ void (*iterator)(void *data, u8 *mac,
+ struct ieee80211_vif *vif),
+ void *data)
+{
+ ieee80211_iterate_interfaces(hw,
+ iter_flags | IEEE80211_IFACE_ITER_ACTIVE,
+ iterator, data);
+}
/**
* ieee80211_iterate_active_interfaces_atomic - iterate active interfaces
@@ -5194,30 +5422,13 @@ int ieee80211_reserve_tid(struct ieee80211_sta *sta, u8 tid);
void ieee80211_unreserve_tid(struct ieee80211_sta *sta, u8 tid);
/**
- * ieee80211_ie_split - split an IE buffer according to ordering
- *
- * @ies: the IE buffer
- * @ielen: the length of the IE buffer
- * @ids: an array with element IDs that are allowed before
- * the split
- * @n_ids: the size of the element ID array
- * @offset: offset where to start splitting in the buffer
- *
- * This function splits an IE buffer by updating the @offset
- * variable to point to the location where the buffer should be
- * split.
+ * ieee80211_tx_dequeue - dequeue a packet from a software tx queue
*
- * It assumes that the given IE buffer is well-formed, this
- * has to be guaranteed by the caller!
- *
- * It also assumes that the IEs in the buffer are ordered
- * correctly, if not the result of using this function will not
- * be ordered correctly either, i.e. it does no reordering.
+ * @hw: pointer as obtained from ieee80211_alloc_hw()
+ * @txq: pointer obtained from station or virtual interface
*
- * The function returns the offset where the next part of the
- * buffer starts, which may be @ielen if the entire (remainder)
- * of the buffer should be used.
+ * Returns the skb if successful, %NULL if no frame was available.
*/
-size_t ieee80211_ie_split(const u8 *ies, size_t ielen,
- const u8 *ids, int n_ids, size_t offset);
+struct sk_buff *ieee80211_tx_dequeue(struct ieee80211_hw *hw,
+ struct ieee80211_txq *txq);
#endif /* MAC80211_H */
diff --git a/include/net/mac802154.h b/include/net/mac802154.h
index 850647811749..f534a46911dc 100644
--- a/include/net/mac802154.h
+++ b/include/net/mac802154.h
@@ -19,6 +19,7 @@
#include <net/af_ieee802154.h>
#include <linux/ieee802154.h>
#include <linux/skbuff.h>
+#include <linux/unaligned/memmove.h>
#include <net/cfg802154.h>
@@ -30,99 +31,122 @@
*/
#define MAC802154_FRAME_HARD_HEADER_LEN (2 + 1 + 20 + 14)
-/* The following flags are used to indicate changed address settings from
+/**
+ * enum ieee802154_hw_addr_filt_flags - hardware address filtering flags
+ *
+ * The following flags are used to indicate changed address settings from
* the stack to the hardware.
+ *
+ * @IEEE802154_AFILT_SADDR_CHANGED: Indicates that the short address will be
+ * change.
+ *
+ * @IEEE802154_AFILT_IEEEADDR_CHANGED: Indicates that the extended address
+ * will be change.
+ *
+ * @IEEE802154_AFILT_PANID_CHANGED: Indicates that the pan id will be change.
+ *
+ * @IEEE802154_AFILT_PANC_CHANGED: Indicates that the address filter will
+ * do frame address filtering as a pan coordinator.
*/
+enum ieee802154_hw_addr_filt_flags {
+ IEEE802154_AFILT_SADDR_CHANGED = BIT(0),
+ IEEE802154_AFILT_IEEEADDR_CHANGED = BIT(1),
+ IEEE802154_AFILT_PANID_CHANGED = BIT(2),
+ IEEE802154_AFILT_PANC_CHANGED = BIT(3),
+};
-/* indicates that the Short Address changed */
-#define IEEE802154_AFILT_SADDR_CHANGED 0x00000001
-/* indicates that the IEEE Address changed */
-#define IEEE802154_AFILT_IEEEADDR_CHANGED 0x00000002
-/* indicates that the PAN ID changed */
-#define IEEE802154_AFILT_PANID_CHANGED 0x00000004
-/* indicates that PAN Coordinator status changed */
-#define IEEE802154_AFILT_PANC_CHANGED 0x00000008
-
+/**
+ * struct ieee802154_hw_addr_filt - hardware address filtering settings
+ *
+ * @pan_id: pan_id which should be set to the hardware address filter.
+ *
+ * @short_addr: short_addr which should be set to the hardware address filter.
+ *
+ * @ieee_addr: extended address which should be set to the hardware address
+ * filter.
+ *
+ * @pan_coord: boolean if hardware filtering should be operate as coordinator.
+ */
struct ieee802154_hw_addr_filt {
- __le16 pan_id; /* Each independent PAN selects a unique
- * identifier. This PAN id allows communication
- * between devices within a network using short
- * addresses and enables transmissions between
- * devices across independent networks.
- */
+ __le16 pan_id;
__le16 short_addr;
__le64 ieee_addr;
- u8 pan_coord;
-};
-
-struct ieee802154_vif {
- int type;
-
- /* must be last */
- u8 drv_priv[0] __aligned(sizeof(void *));
+ bool pan_coord;
};
+/**
+ * struct ieee802154_hw - ieee802154 hardware
+ *
+ * @extra_tx_headroom: headroom to reserve in each transmit skb for use by the
+ * driver (e.g. for transmit headers.)
+ *
+ * @flags: hardware flags, see &enum ieee802154_hw_flags
+ *
+ * @parent: parent device of the hardware.
+ *
+ * @priv: pointer to private area that was allocated for driver use along with
+ * this structure.
+ *
+ * @phy: This points to the &struct wpan_phy allocated for this 802.15.4 PHY.
+ */
struct ieee802154_hw {
/* filled by the driver */
int extra_tx_headroom;
u32 flags;
struct device *parent;
+ void *priv;
/* filled by mac802154 core */
- struct ieee802154_hw_addr_filt hw_filt;
- void *priv;
struct wpan_phy *phy;
- size_t vif_data_size;
};
-/* Checksum is in hardware and is omitted from a packet
+/**
+ * enum ieee802154_hw_flags - hardware flags
*
- * These following flags are used to indicate hardware capabilities to
+ * These flags are used to indicate hardware capabilities to
* the stack. Generally, flags here should have their meaning
* done in a way that the simplest hardware doesn't need setting
* any particular flags. There are some exceptions to this rule,
* however, so you are advised to review these flags carefully.
+ *
+ * @IEEE802154_HW_TX_OMIT_CKSUM: Indicates that xmitter will add FCS on it's
+ * own.
+ *
+ * @IEEE802154_HW_LBT: Indicates that transceiver will support listen before
+ * transmit.
+ *
+ * @IEEE802154_HW_CSMA_PARAMS: Indicates that transceiver will support csma
+ * parameters (max_be, min_be, backoff exponents).
+ *
+ * @IEEE802154_HW_FRAME_RETRIES: Indicates that transceiver will support ARET
+ * frame retries setting.
+ *
+ * @IEEE802154_HW_AFILT: Indicates that transceiver will support hardware
+ * address filter setting.
+ *
+ * @IEEE802154_HW_PROMISCUOUS: Indicates that transceiver will support
+ * promiscuous mode setting.
+ *
+ * @IEEE802154_HW_RX_OMIT_CKSUM: Indicates that receiver omits FCS.
+ *
+ * @IEEE802154_HW_RX_DROP_BAD_CKSUM: Indicates that receiver will not filter
+ * frames with bad checksum.
*/
-
-/* Indicates that xmitter will add FCS on it's own. */
-#define IEEE802154_HW_TX_OMIT_CKSUM 0x00000001
-/* Indicates that receiver will autorespond with ACK frames. */
-#define IEEE802154_HW_AACK 0x00000002
-/* Indicates that transceiver will support transmit power setting. */
-#define IEEE802154_HW_TXPOWER 0x00000004
-/* Indicates that transceiver will support listen before transmit. */
-#define IEEE802154_HW_LBT 0x00000008
-/* Indicates that transceiver will support cca mode setting. */
-#define IEEE802154_HW_CCA_MODE 0x00000010
-/* Indicates that transceiver will support cca ed level setting. */
-#define IEEE802154_HW_CCA_ED_LEVEL 0x00000020
-/* Indicates that transceiver will support csma (max_be, min_be, csma retries)
- * settings. */
-#define IEEE802154_HW_CSMA_PARAMS 0x00000040
-/* Indicates that transceiver will support ARET frame retries setting. */
-#define IEEE802154_HW_FRAME_RETRIES 0x00000080
-/* Indicates that transceiver will support hardware address filter setting. */
-#define IEEE802154_HW_AFILT 0x00000100
-/* Indicates that transceiver will support promiscuous mode setting. */
-#define IEEE802154_HW_PROMISCUOUS 0x00000200
-/* Indicates that receiver omits FCS. */
-#define IEEE802154_HW_RX_OMIT_CKSUM 0x00000400
-/* Indicates that receiver will not filter frames with bad checksum. */
-#define IEEE802154_HW_RX_DROP_BAD_CKSUM 0x00000800
+enum ieee802154_hw_flags {
+ IEEE802154_HW_TX_OMIT_CKSUM = BIT(0),
+ IEEE802154_HW_LBT = BIT(1),
+ IEEE802154_HW_CSMA_PARAMS = BIT(2),
+ IEEE802154_HW_FRAME_RETRIES = BIT(3),
+ IEEE802154_HW_AFILT = BIT(4),
+ IEEE802154_HW_PROMISCUOUS = BIT(5),
+ IEEE802154_HW_RX_OMIT_CKSUM = BIT(6),
+ IEEE802154_HW_RX_DROP_BAD_CKSUM = BIT(7),
+};
/* Indicates that receiver omits FCS and xmitter will add FCS on it's own. */
#define IEEE802154_HW_OMIT_CKSUM (IEEE802154_HW_TX_OMIT_CKSUM | \
IEEE802154_HW_RX_OMIT_CKSUM)
-/* This groups the most common CSMA support fields into one. */
-#define IEEE802154_HW_CSMA (IEEE802154_HW_CCA_MODE | \
- IEEE802154_HW_CCA_ED_LEVEL | \
- IEEE802154_HW_CSMA_PARAMS)
-
-/* This groups the most common ARET support fields into one. */
-#define IEEE802154_HW_ARET (IEEE802154_HW_CSMA | \
- IEEE802154_HW_FRAME_RETRIES)
-
/* struct ieee802154_ops - callbacks from mac802154 to the driver
*
* This structure contains various callbacks that the driver may
@@ -170,7 +194,7 @@ struct ieee802154_hw {
* Returns either zero, or negative errno.
*
* set_txpower:
- * Set radio transmit power in dB. Called with pib_lock held.
+ * Set radio transmit power in mBm. Called with pib_lock held.
* Returns either zero, or negative errno.
*
* set_lbt
@@ -183,7 +207,7 @@ struct ieee802154_hw {
* Returns either zero, or negative errno.
*
* set_cca_ed_level
- * Sets the CCA energy detection threshold in dBm. Called with pib_lock
+ * Sets the CCA energy detection threshold in mBm. Called with pib_lock
* held.
* Returns either zero, or negative errno.
*
@@ -212,12 +236,11 @@ struct ieee802154_ops {
int (*set_hw_addr_filt)(struct ieee802154_hw *hw,
struct ieee802154_hw_addr_filt *filt,
unsigned long changed);
- int (*set_txpower)(struct ieee802154_hw *hw, int db);
+ int (*set_txpower)(struct ieee802154_hw *hw, s32 mbm);
int (*set_lbt)(struct ieee802154_hw *hw, bool on);
int (*set_cca_mode)(struct ieee802154_hw *hw,
const struct wpan_phy_cca *cca);
- int (*set_cca_ed_level)(struct ieee802154_hw *hw,
- s32 level);
+ int (*set_cca_ed_level)(struct ieee802154_hw *hw, s32 mbm);
int (*set_csma_params)(struct ieee802154_hw *hw,
u8 min_be, u8 max_be, u8 retries);
int (*set_frame_retries)(struct ieee802154_hw *hw,
@@ -233,9 +256,7 @@ struct ieee802154_ops {
*/
static inline void ieee802154_be64_to_le64(void *le64_dst, const void *be64_src)
{
- __le64 tmp = (__force __le64)swab64p(be64_src);
-
- memcpy(le64_dst, &tmp, IEEE802154_EXTENDED_ADDR_LEN);
+ __put_unaligned_memmove64(swab64p(be64_src), le64_dst);
}
/**
@@ -245,24 +266,112 @@ static inline void ieee802154_be64_to_le64(void *le64_dst, const void *be64_src)
*/
static inline void ieee802154_le64_to_be64(void *be64_dst, const void *le64_src)
{
- __be64 tmp = (__force __be64)swab64p(le64_src);
-
- memcpy(be64_dst, &tmp, IEEE802154_EXTENDED_ADDR_LEN);
+ __put_unaligned_memmove64(swab64p(le64_src), be64_dst);
}
-/* Basic interface to register ieee802154 hwice */
+/**
+ * ieee802154_alloc_hw - Allocate a new hardware device
+ *
+ * This must be called once for each hardware device. The returned pointer
+ * must be used to refer to this device when calling other functions.
+ * mac802154 allocates a private data area for the driver pointed to by
+ * @priv in &struct ieee802154_hw, the size of this area is given as
+ * @priv_data_len.
+ *
+ * @priv_data_len: length of private data
+ * @ops: callbacks for this device
+ *
+ * Return: A pointer to the new hardware device, or %NULL on error.
+ */
struct ieee802154_hw *
ieee802154_alloc_hw(size_t priv_data_len, const struct ieee802154_ops *ops);
+
+/**
+ * ieee802154_free_hw - free hardware descriptor
+ *
+ * This function frees everything that was allocated, including the
+ * private data for the driver. You must call ieee802154_unregister_hw()
+ * before calling this function.
+ *
+ * @hw: the hardware to free
+ */
void ieee802154_free_hw(struct ieee802154_hw *hw);
+
+/**
+ * ieee802154_register_hw - Register hardware device
+ *
+ * You must call this function before any other functions in
+ * mac802154. Note that before a hardware can be registered, you
+ * need to fill the contained wpan_phy's information.
+ *
+ * @hw: the device to register as returned by ieee802154_alloc_hw()
+ *
+ * Return: 0 on success. An error code otherwise.
+ */
int ieee802154_register_hw(struct ieee802154_hw *hw);
+
+/**
+ * ieee802154_unregister_hw - Unregister a hardware device
+ *
+ * This function instructs mac802154 to free allocated resources
+ * and unregister netdevices from the networking subsystem.
+ *
+ * @hw: the hardware to unregister
+ */
void ieee802154_unregister_hw(struct ieee802154_hw *hw);
+/**
+ * ieee802154_rx - receive frame
+ *
+ * Use this function to hand received frames to mac802154. The receive
+ * buffer in @skb must start with an IEEE 802.15.4 header. In case of a
+ * paged @skb is used, the driver is recommended to put the ieee802154
+ * header of the frame on the linear part of the @skb to avoid memory
+ * allocation and/or memcpy by the stack.
+ *
+ * This function may not be called in IRQ context. Calls to this function
+ * for a single hardware must be synchronized against each other.
+ *
+ * @hw: the hardware this frame came in on
+ * @skb: the buffer to receive, owned by mac802154 after this call
+ */
void ieee802154_rx(struct ieee802154_hw *hw, struct sk_buff *skb);
+
+/**
+ * ieee802154_rx_irqsafe - receive frame
+ *
+ * Like ieee802154_rx() but can be called in IRQ context
+ * (internally defers to a tasklet.)
+ *
+ * @hw: the hardware this frame came in on
+ * @skb: the buffer to receive, owned by mac802154 after this call
+ * @lqi: link quality indicator
+ */
void ieee802154_rx_irqsafe(struct ieee802154_hw *hw, struct sk_buff *skb,
u8 lqi);
-
+/**
+ * ieee802154_wake_queue - wake ieee802154 queue
+ * @hw: pointer as obtained from ieee802154_alloc_hw().
+ *
+ * Drivers should use this function instead of netif_wake_queue.
+ */
void ieee802154_wake_queue(struct ieee802154_hw *hw);
+
+/**
+ * ieee802154_stop_queue - stop ieee802154 queue
+ * @hw: pointer as obtained from ieee802154_alloc_hw().
+ *
+ * Drivers should use this function instead of netif_stop_queue.
+ */
void ieee802154_stop_queue(struct ieee802154_hw *hw);
+
+/**
+ * ieee802154_xmit_complete - frame transmission complete
+ *
+ * @hw: pointer as obtained from ieee802154_alloc_hw().
+ * @skb: buffer for transmission
+ * @ifs_handling: indicate interframe space handling
+ */
void ieee802154_xmit_complete(struct ieee802154_hw *hw, struct sk_buff *skb,
bool ifs_handling);
diff --git a/include/net/ndisc.h b/include/net/ndisc.h
index 6bbda34d5e59..b3a7751251b4 100644
--- a/include/net/ndisc.h
+++ b/include/net/ndisc.h
@@ -156,24 +156,7 @@ static inline u32 ndisc_hashfn(const void *pkey, const struct net_device *dev, _
static inline struct neighbour *__ipv6_neigh_lookup_noref(struct net_device *dev, const void *pkey)
{
- struct neigh_hash_table *nht;
- const u32 *p32 = pkey;
- struct neighbour *n;
- u32 hash_val;
-
- nht = rcu_dereference_bh(nd_tbl.nht);
- hash_val = ndisc_hashfn(pkey, dev, nht->hash_rnd) >> (32 - nht->hash_shift);
- for (n = rcu_dereference_bh(nht->hash_buckets[hash_val]);
- n != NULL;
- n = rcu_dereference_bh(n->next)) {
- u32 *n32 = (u32 *) n->primary_key;
- if (n->dev == dev &&
- ((n32[0] ^ p32[0]) | (n32[1] ^ p32[1]) |
- (n32[2] ^ p32[2]) | (n32[3] ^ p32[3])) == 0)
- return n;
- }
-
- return NULL;
+ return ___neigh_lookup_noref(&nd_tbl, neigh_key_eq128, ndisc_hashfn, pkey, dev);
}
static inline struct neighbour *__ipv6_neigh_lookup(struct net_device *dev, const void *pkey)
diff --git a/include/net/neighbour.h b/include/net/neighbour.h
index 76f708486aae..bd33e66f49aa 100644
--- a/include/net/neighbour.h
+++ b/include/net/neighbour.h
@@ -42,6 +42,7 @@ enum {
NEIGH_VAR_MCAST_PROBES,
NEIGH_VAR_UCAST_PROBES,
NEIGH_VAR_APP_PROBES,
+ NEIGH_VAR_MCAST_REPROBES,
NEIGH_VAR_RETRANS_TIME,
NEIGH_VAR_BASE_REACHABLE_TIME,
NEIGH_VAR_DELAY_PROBE_TIME,
@@ -65,9 +66,7 @@ enum {
};
struct neigh_parms {
-#ifdef CONFIG_NET_NS
- struct net *net;
-#endif
+ possible_net_t net;
struct net_device *dev;
struct list_head list;
int (*neigh_setup)(struct neighbour *);
@@ -167,9 +166,7 @@ struct neigh_ops {
struct pneigh_entry {
struct pneigh_entry *next;
-#ifdef CONFIG_NET_NS
- struct net *net;
-#endif
+ possible_net_t net;
struct net_device *dev;
u8 flags;
u8 key[0];
@@ -193,9 +190,11 @@ struct neigh_table {
int family;
int entry_size;
int key_len;
+ __be16 protocol;
__u32 (*hash)(const void *pkey,
const struct net_device *dev,
__u32 *hash_rnd);
+ bool (*key_eq)(const struct neighbour *, const void *pkey);
int (*constructor)(struct neighbour *);
int (*pconstructor)(struct pneigh_entry *);
void (*pdestructor)(struct pneigh_entry *);
@@ -224,6 +223,7 @@ enum {
NEIGH_ND_TABLE = 1,
NEIGH_DN_TABLE = 2,
NEIGH_NR_TABLES,
+ NEIGH_LINK_TABLE = NEIGH_NR_TABLES /* Pseudo table for neigh_xmit */
};
static inline int neigh_parms_family(struct neigh_parms *p)
@@ -246,6 +246,57 @@ static inline void *neighbour_priv(const struct neighbour *n)
#define NEIGH_UPDATE_F_ISROUTER 0x40000000
#define NEIGH_UPDATE_F_ADMIN 0x80000000
+
+static inline bool neigh_key_eq16(const struct neighbour *n, const void *pkey)
+{
+ return *(const u16 *)n->primary_key == *(const u16 *)pkey;
+}
+
+static inline bool neigh_key_eq32(const struct neighbour *n, const void *pkey)
+{
+ return *(const u32 *)n->primary_key == *(const u32 *)pkey;
+}
+
+static inline bool neigh_key_eq128(const struct neighbour *n, const void *pkey)
+{
+ const u32 *n32 = (const u32 *)n->primary_key;
+ const u32 *p32 = pkey;
+
+ return ((n32[0] ^ p32[0]) | (n32[1] ^ p32[1]) |
+ (n32[2] ^ p32[2]) | (n32[3] ^ p32[3])) == 0;
+}
+
+static inline struct neighbour *___neigh_lookup_noref(
+ struct neigh_table *tbl,
+ bool (*key_eq)(const struct neighbour *n, const void *pkey),
+ __u32 (*hash)(const void *pkey,
+ const struct net_device *dev,
+ __u32 *hash_rnd),
+ const void *pkey,
+ struct net_device *dev)
+{
+ struct neigh_hash_table *nht = rcu_dereference_bh(tbl->nht);
+ struct neighbour *n;
+ u32 hash_val;
+
+ hash_val = hash(pkey, dev, nht->hash_rnd) >> (32 - nht->hash_shift);
+ for (n = rcu_dereference_bh(nht->hash_buckets[hash_val]);
+ n != NULL;
+ n = rcu_dereference_bh(n->next)) {
+ if (n->dev == dev && key_eq(n, pkey))
+ return n;
+ }
+
+ return NULL;
+}
+
+static inline struct neighbour *__neigh_lookup_noref(struct neigh_table *tbl,
+ const void *pkey,
+ struct net_device *dev)
+{
+ return ___neigh_lookup_noref(tbl, tbl->key_eq, tbl->hash, pkey, dev);
+}
+
void neigh_table_init(int index, struct neigh_table *tbl);
int neigh_table_clear(int index, struct neigh_table *tbl);
struct neighbour *neigh_lookup(struct neigh_table *tbl, const void *pkey,
@@ -268,7 +319,6 @@ void neigh_changeaddr(struct neigh_table *tbl, struct net_device *dev);
int neigh_ifdown(struct neigh_table *tbl, struct net_device *dev);
int neigh_resolve_output(struct neighbour *neigh, struct sk_buff *skb);
int neigh_connected_output(struct neighbour *neigh, struct sk_buff *skb);
-int neigh_compat_output(struct neighbour *neigh, struct sk_buff *skb);
int neigh_direct_output(struct neighbour *neigh, struct sk_buff *skb);
struct neighbour *neigh_event_ns(struct neigh_table *tbl,
u8 *lladdr, void *saddr,
@@ -306,6 +356,7 @@ void neigh_for_each(struct neigh_table *tbl,
void (*cb)(struct neighbour *, void *), void *cookie);
void __neigh_for_each_release(struct neigh_table *tbl,
int (*cb)(struct neighbour *));
+int neigh_xmit(int fam, struct net_device *, const void *, struct sk_buff *);
void pneigh_for_each(struct neigh_table *tbl,
void (*cb)(struct pneigh_entry *));
@@ -459,4 +510,6 @@ static inline void neigh_ha_snapshot(char *dst, const struct neighbour *n,
memcpy(dst, n->ha, dev->addr_len);
} while (read_seqretry(&n->ha_lock, seq));
}
+
+
#endif
diff --git a/include/net/net_namespace.h b/include/net/net_namespace.h
index 36faf4990c4b..e951453e0a23 100644
--- a/include/net/net_namespace.h
+++ b/include/net/net_namespace.h
@@ -26,7 +26,10 @@
#endif
#include <net/netns/nftables.h>
#include <net/netns/xfrm.h>
+#include <net/netns/mpls.h>
#include <linux/ns_common.h>
+#include <linux/idr.h>
+#include <linux/skbuff.h>
struct user_namespace;
struct proc_dir_entry;
@@ -48,18 +51,16 @@ struct net {
atomic_t count; /* To decided when the network
* namespace should be shut down.
*/
-#ifdef NETNS_REFCNT_DEBUG
- atomic_t use_count; /* To track references we
- * destroy on demand
- */
-#endif
spinlock_t rules_mod_lock;
+ atomic64_t cookie_gen;
+
struct list_head list; /* list of network namespaces */
struct list_head cleanup_list; /* namespaces on death row */
struct list_head exit_list; /* Use only net_mutex */
struct user_namespace *user_ns; /* Owning user namespace */
+ spinlock_t nsid_lock;
struct idr netns_ids;
struct ns_common ns;
@@ -130,6 +131,9 @@ struct net {
#if IS_ENABLED(CONFIG_IP_VS)
struct netns_ipvs *ipvs;
#endif
+#if IS_ENABLED(CONFIG_MPLS)
+ struct netns_mpls mpls;
+#endif
struct sock *diag_nlsk;
atomic_t fnhe_genid;
};
@@ -230,48 +234,27 @@ int net_eq(const struct net *net1, const struct net *net2)
#endif
-#ifdef NETNS_REFCNT_DEBUG
-static inline struct net *hold_net(struct net *net)
-{
- if (net)
- atomic_inc(&net->use_count);
- return net;
-}
-
-static inline void release_net(struct net *net)
-{
- if (net)
- atomic_dec(&net->use_count);
-}
-#else
-static inline struct net *hold_net(struct net *net)
-{
- return net;
-}
-
-static inline void release_net(struct net *net)
-{
-}
-#endif
-
+typedef struct {
#ifdef CONFIG_NET_NS
+ struct net *net;
+#endif
+} possible_net_t;
-static inline void write_pnet(struct net **pnet, struct net *net)
+static inline void write_pnet(possible_net_t *pnet, struct net *net)
{
- *pnet = net;
+#ifdef CONFIG_NET_NS
+ pnet->net = net;
+#endif
}
-static inline struct net *read_pnet(struct net * const *pnet)
+static inline struct net *read_pnet(const possible_net_t *pnet)
{
- return *pnet;
-}
-
+#ifdef CONFIG_NET_NS
+ return pnet->net;
#else
-
-#define write_pnet(pnet, net) do { (void)(net);} while (0)
-#define read_pnet(pnet) (&init_net)
-
+ return &init_net;
#endif
+}
#define for_each_net(VAR) \
list_for_each_entry(VAR, &net_namespace_list, list)
@@ -291,7 +274,9 @@ static inline struct net *read_pnet(struct net * const *pnet)
#define __net_initconst __initconst
#endif
+int peernet2id_alloc(struct net *net, struct net *peer);
int peernet2id(struct net *net, struct net *peer);
+bool peernet_has_id(struct net *net, struct net *peer);
struct net *get_net_ns_by_id(struct net *net, int id);
struct pernet_operations {
diff --git a/include/net/netfilter/br_netfilter.h b/include/net/netfilter/br_netfilter.h
index 2aa6048a55c1..bab824bde92c 100644
--- a/include/net/netfilter/br_netfilter.h
+++ b/include/net/netfilter/br_netfilter.h
@@ -1,6 +1,66 @@
#ifndef _BR_NETFILTER_H_
#define _BR_NETFILTER_H_
+#include "../../../net/bridge/br_private.h"
+
+static inline struct nf_bridge_info *nf_bridge_alloc(struct sk_buff *skb)
+{
+ skb->nf_bridge = kzalloc(sizeof(struct nf_bridge_info), GFP_ATOMIC);
+
+ if (likely(skb->nf_bridge))
+ atomic_set(&(skb->nf_bridge->use), 1);
+
+ return skb->nf_bridge;
+}
+
+void nf_bridge_update_protocol(struct sk_buff *skb);
+
+static inline struct nf_bridge_info *
+nf_bridge_info_get(const struct sk_buff *skb)
+{
+ return skb->nf_bridge;
+}
+
+unsigned int nf_bridge_encap_header_len(const struct sk_buff *skb);
+
+static inline void nf_bridge_push_encap_header(struct sk_buff *skb)
+{
+ unsigned int len = nf_bridge_encap_header_len(skb);
+
+ skb_push(skb, len);
+ skb->network_header -= len;
+}
+
+int br_nf_pre_routing_finish_bridge(struct sock *sk, struct sk_buff *skb);
+
+static inline struct rtable *bridge_parent_rtable(const struct net_device *dev)
+{
+ struct net_bridge_port *port;
+
+ port = br_port_get_rcu(dev);
+ return port ? &port->br->fake_rtable : NULL;
+}
+
+struct net_device *setup_pre_routing(struct sk_buff *skb);
void br_netfilter_enable(void);
+#if IS_ENABLED(CONFIG_IPV6)
+int br_validate_ipv6(struct sk_buff *skb);
+unsigned int br_nf_pre_routing_ipv6(const struct nf_hook_ops *ops,
+ struct sk_buff *skb,
+ const struct nf_hook_state *state);
+#else
+static inline int br_validate_ipv6(struct sk_buff *skb)
+{
+ return -1;
+}
+
+static inline unsigned int
+br_nf_pre_routing_ipv6(const struct nf_hook_ops *ops, struct sk_buff *skb,
+ const struct nf_hook_state *state)
+{
+ return NF_DROP;
+}
+#endif
+
#endif /* _BR_NETFILTER_H_ */
diff --git a/include/net/netfilter/ipv4/nf_reject.h b/include/net/netfilter/ipv4/nf_reject.h
index 03e928a55229..77862c3645f0 100644
--- a/include/net/netfilter/ipv4/nf_reject.h
+++ b/include/net/netfilter/ipv4/nf_reject.h
@@ -5,18 +5,14 @@
#include <net/ip.h>
#include <net/icmp.h>
-static inline void nf_send_unreach(struct sk_buff *skb_in, int code)
-{
- icmp_send(skb_in, ICMP_DEST_UNREACH, code, 0);
-}
-
+void nf_send_unreach(struct sk_buff *skb_in, int code, int hook);
void nf_send_reset(struct sk_buff *oldskb, int hook);
const struct tcphdr *nf_reject_ip_tcphdr_get(struct sk_buff *oldskb,
struct tcphdr *_oth, int hook);
struct iphdr *nf_reject_iphdr_put(struct sk_buff *nskb,
const struct sk_buff *oldskb,
- __be16 protocol, int ttl);
+ __u8 protocol, int ttl);
void nf_reject_ip_tcphdr_put(struct sk_buff *nskb, const struct sk_buff *oldskb,
const struct tcphdr *oth);
diff --git a/include/net/netfilter/ipv6/nf_reject.h b/include/net/netfilter/ipv6/nf_reject.h
index 23216d48abf9..0ea4fa37db16 100644
--- a/include/net/netfilter/ipv6/nf_reject.h
+++ b/include/net/netfilter/ipv6/nf_reject.h
@@ -3,15 +3,8 @@
#include <linux/icmpv6.h>
-static inline void
-nf_send_unreach6(struct net *net, struct sk_buff *skb_in, unsigned char code,
- unsigned int hooknum)
-{
- if (hooknum == NF_INET_LOCAL_OUT && skb_in->dev == NULL)
- skb_in->dev = net->loopback_dev;
-
- icmpv6_send(skb_in, ICMPV6_DEST_UNREACH, code, 0);
-}
+void nf_send_unreach6(struct net *net, struct sk_buff *skb_in, unsigned char code,
+ unsigned int hooknum);
void nf_send_reset6(struct net *net, struct sk_buff *oldskb, int hook);
@@ -20,7 +13,7 @@ const struct tcphdr *nf_reject_ip6_tcphdr_get(struct sk_buff *oldskb,
unsigned int *otcplen, int hook);
struct ipv6hdr *nf_reject_ip6hdr_put(struct sk_buff *nskb,
const struct sk_buff *oldskb,
- __be16 protocol, int hoplimit);
+ __u8 protocol, int hoplimit);
void nf_reject_ip6_tcphdr_put(struct sk_buff *nskb,
const struct sk_buff *oldskb,
const struct tcphdr *oth, unsigned int otcplen);
diff --git a/include/net/netfilter/nf_conntrack.h b/include/net/netfilter/nf_conntrack.h
index 74f271a172dd..095433b8a8b0 100644
--- a/include/net/netfilter/nf_conntrack.h
+++ b/include/net/netfilter/nf_conntrack.h
@@ -95,9 +95,8 @@ struct nf_conn {
/* Timer function; drops refcnt when it goes off. */
struct timer_list timeout;
-#ifdef CONFIG_NET_NS
- struct net *ct_net;
-#endif
+ possible_net_t ct_net;
+
/* all members below initialized via memset */
u8 __nfct_init_offset[0];
diff --git a/include/net/netfilter/nf_nat_l3proto.h b/include/net/netfilter/nf_nat_l3proto.h
index 340c013795a4..a3127325f624 100644
--- a/include/net/netfilter/nf_nat_l3proto.h
+++ b/include/net/netfilter/nf_nat_l3proto.h
@@ -44,40 +44,32 @@ int nf_nat_icmp_reply_translation(struct sk_buff *skb, struct nf_conn *ct,
unsigned int hooknum);
unsigned int nf_nat_ipv4_in(const struct nf_hook_ops *ops, struct sk_buff *skb,
- const struct net_device *in,
- const struct net_device *out,
+ const struct nf_hook_state *state,
unsigned int (*do_chain)(const struct nf_hook_ops *ops,
struct sk_buff *skb,
- const struct net_device *in,
- const struct net_device *out,
+ const struct nf_hook_state *state,
struct nf_conn *ct));
unsigned int nf_nat_ipv4_out(const struct nf_hook_ops *ops, struct sk_buff *skb,
- const struct net_device *in,
- const struct net_device *out,
+ const struct nf_hook_state *state,
unsigned int (*do_chain)(const struct nf_hook_ops *ops,
struct sk_buff *skb,
- const struct net_device *in,
- const struct net_device *out,
+ const struct nf_hook_state *state,
struct nf_conn *ct));
unsigned int nf_nat_ipv4_local_fn(const struct nf_hook_ops *ops,
struct sk_buff *skb,
- const struct net_device *in,
- const struct net_device *out,
+ const struct nf_hook_state *state,
unsigned int (*do_chain)(const struct nf_hook_ops *ops,
struct sk_buff *skb,
- const struct net_device *in,
- const struct net_device *out,
+ const struct nf_hook_state *state,
struct nf_conn *ct));
unsigned int nf_nat_ipv4_fn(const struct nf_hook_ops *ops, struct sk_buff *skb,
- const struct net_device *in,
- const struct net_device *out,
+ const struct nf_hook_state *state,
unsigned int (*do_chain)(const struct nf_hook_ops *ops,
struct sk_buff *skb,
- const struct net_device *in,
- const struct net_device *out,
+ const struct nf_hook_state *state,
struct nf_conn *ct));
int nf_nat_icmpv6_reply_translation(struct sk_buff *skb, struct nf_conn *ct,
@@ -85,40 +77,32 @@ int nf_nat_icmpv6_reply_translation(struct sk_buff *skb, struct nf_conn *ct,
unsigned int hooknum, unsigned int hdrlen);
unsigned int nf_nat_ipv6_in(const struct nf_hook_ops *ops, struct sk_buff *skb,
- const struct net_device *in,
- const struct net_device *out,
+ const struct nf_hook_state *state,
unsigned int (*do_chain)(const struct nf_hook_ops *ops,
struct sk_buff *skb,
- const struct net_device *in,
- const struct net_device *out,
+ const struct nf_hook_state *state,
struct nf_conn *ct));
unsigned int nf_nat_ipv6_out(const struct nf_hook_ops *ops, struct sk_buff *skb,
- const struct net_device *in,
- const struct net_device *out,
+ const struct nf_hook_state *state,
unsigned int (*do_chain)(const struct nf_hook_ops *ops,
struct sk_buff *skb,
- const struct net_device *in,
- const struct net_device *out,
+ const struct nf_hook_state *state,
struct nf_conn *ct));
unsigned int nf_nat_ipv6_local_fn(const struct nf_hook_ops *ops,
struct sk_buff *skb,
- const struct net_device *in,
- const struct net_device *out,
+ const struct nf_hook_state *state,
unsigned int (*do_chain)(const struct nf_hook_ops *ops,
struct sk_buff *skb,
- const struct net_device *in,
- const struct net_device *out,
+ const struct nf_hook_state *state,
struct nf_conn *ct));
unsigned int nf_nat_ipv6_fn(const struct nf_hook_ops *ops, struct sk_buff *skb,
- const struct net_device *in,
- const struct net_device *out,
+ const struct nf_hook_state *state,
unsigned int (*do_chain)(const struct nf_hook_ops *ops,
struct sk_buff *skb,
- const struct net_device *in,
- const struct net_device *out,
+ const struct nf_hook_state *state,
struct nf_conn *ct));
#endif /* _NF_NAT_L3PROTO_H */
diff --git a/include/net/netfilter/nf_queue.h b/include/net/netfilter/nf_queue.h
index 84a53d780306..e8635854a55b 100644
--- a/include/net/netfilter/nf_queue.h
+++ b/include/net/netfilter/nf_queue.h
@@ -12,12 +12,8 @@ struct nf_queue_entry {
unsigned int id;
struct nf_hook_ops *elem;
- u_int8_t pf;
+ struct nf_hook_state state;
u16 size; /* sizeof(entry) + saved route keys */
- unsigned int hook;
- struct net_device *indev;
- struct net_device *outdev;
- int (*okfn)(struct sk_buff *);
/* extra space to store route keys */
};
@@ -28,6 +24,8 @@ struct nf_queue_entry {
struct nf_queue_handler {
int (*outfn)(struct nf_queue_entry *entry,
unsigned int queuenum);
+ void (*nf_hook_drop)(struct net *net,
+ struct nf_hook_ops *ops);
};
void nf_register_queue_handler(const struct nf_queue_handler *qh);
diff --git a/include/net/netfilter/nf_tables.h b/include/net/netfilter/nf_tables.h
index decb9a095ae7..2a246680a6c3 100644
--- a/include/net/netfilter/nf_tables.h
+++ b/include/net/netfilter/nf_tables.h
@@ -1,6 +1,7 @@
#ifndef _NET_NF_TABLES_H
#define _NET_NF_TABLES_H
+#include <linux/module.h>
#include <linux/list.h>
#include <linux/netfilter.h>
#include <linux/netfilter/nfnetlink.h>
@@ -26,40 +27,53 @@ struct nft_pktinfo {
static inline void nft_set_pktinfo(struct nft_pktinfo *pkt,
const struct nf_hook_ops *ops,
struct sk_buff *skb,
- const struct net_device *in,
- const struct net_device *out)
+ const struct nf_hook_state *state)
{
pkt->skb = skb;
- pkt->in = pkt->xt.in = in;
- pkt->out = pkt->xt.out = out;
+ pkt->in = pkt->xt.in = state->in;
+ pkt->out = pkt->xt.out = state->out;
pkt->ops = ops;
pkt->xt.hooknum = ops->hooknum;
pkt->xt.family = ops->pf;
}
+/**
+ * struct nft_verdict - nf_tables verdict
+ *
+ * @code: nf_tables/netfilter verdict code
+ * @chain: destination chain for NFT_JUMP/NFT_GOTO
+ */
+struct nft_verdict {
+ u32 code;
+ struct nft_chain *chain;
+};
+
struct nft_data {
union {
- u32 data[4];
- struct {
- u32 verdict;
- struct nft_chain *chain;
- };
+ u32 data[4];
+ struct nft_verdict verdict;
};
} __attribute__((aligned(__alignof__(u64))));
-static inline int nft_data_cmp(const struct nft_data *d1,
- const struct nft_data *d2,
- unsigned int len)
-{
- return memcmp(d1->data, d2->data, len);
-}
+/**
+ * struct nft_regs - nf_tables register set
+ *
+ * @data: data registers
+ * @verdict: verdict register
+ *
+ * The first four data registers alias to the verdict register.
+ */
+struct nft_regs {
+ union {
+ u32 data[20];
+ struct nft_verdict verdict;
+ };
+};
-static inline void nft_data_copy(struct nft_data *dst,
- const struct nft_data *src)
+static inline void nft_data_copy(u32 *dst, const struct nft_data *src,
+ unsigned int len)
{
- BUILD_BUG_ON(__alignof__(*dst) != __alignof__(u64));
- *(u64 *)&dst->data[0] = *(u64 *)&src->data[0];
- *(u64 *)&dst->data[2] = *(u64 *)&src->data[2];
+ memcpy(dst, src, len);
}
static inline void nft_data_debug(const struct nft_data *data)
@@ -97,7 +111,8 @@ struct nft_data_desc {
unsigned int len;
};
-int nft_data_init(const struct nft_ctx *ctx, struct nft_data *data,
+int nft_data_init(const struct nft_ctx *ctx,
+ struct nft_data *data, unsigned int size,
struct nft_data_desc *desc, const struct nlattr *nla);
void nft_data_uninit(const struct nft_data *data, enum nft_data_types type);
int nft_data_dump(struct sk_buff *skb, int attr, const struct nft_data *data,
@@ -113,12 +128,14 @@ static inline enum nft_registers nft_type_to_reg(enum nft_data_types type)
return type == NFT_DATA_VERDICT ? NFT_REG_VERDICT : NFT_REG_1;
}
-int nft_validate_input_register(enum nft_registers reg);
-int nft_validate_output_register(enum nft_registers reg);
-int nft_validate_data_load(const struct nft_ctx *ctx, enum nft_registers reg,
- const struct nft_data *data,
- enum nft_data_types type);
+unsigned int nft_parse_register(const struct nlattr *attr);
+int nft_dump_register(struct sk_buff *skb, unsigned int attr, unsigned int reg);
+int nft_validate_register_load(enum nft_registers reg, unsigned int len);
+int nft_validate_register_store(const struct nft_ctx *ctx,
+ enum nft_registers reg,
+ const struct nft_data *data,
+ enum nft_data_types type, unsigned int len);
/**
* struct nft_userdata - user defined data associated with an object
@@ -138,19 +155,15 @@ struct nft_userdata {
/**
* struct nft_set_elem - generic representation of set elements
*
- * @cookie: implementation specific element cookie
* @key: element key
- * @data: element data (maps only)
- * @flags: element flags (end of interval)
- *
- * The cookie can be used to store a handle to the element for subsequent
- * removal.
+ * @priv: element private data and extensions
*/
struct nft_set_elem {
- void *cookie;
- struct nft_data key;
- struct nft_data data;
- u32 flags;
+ union {
+ u32 buf[NFT_DATA_VALUE_MAXLEN / sizeof(u32)];
+ struct nft_data val;
+ } key;
+ void *priv;
};
struct nft_set;
@@ -202,11 +215,16 @@ struct nft_set_estimate {
enum nft_set_class class;
};
+struct nft_set_ext;
+struct nft_expr;
+
/**
* struct nft_set_ops - nf_tables set operations
*
* @lookup: look up an element within the set
* @insert: insert new element into set
+ * @activate: activate new element in the next generation
+ * @deactivate: deactivate element in the next generation
* @remove: remove element from set
* @walk: iterate over all set elemeennts
* @privsize: function to return size of set private data
@@ -214,16 +232,28 @@ struct nft_set_estimate {
* @destroy: destroy private data of set instance
* @list: nf_tables_set_ops list node
* @owner: module reference
+ * @elemsize: element private size
* @features: features supported by the implementation
*/
struct nft_set_ops {
bool (*lookup)(const struct nft_set *set,
- const struct nft_data *key,
- struct nft_data *data);
- int (*get)(const struct nft_set *set,
- struct nft_set_elem *elem);
+ const u32 *key,
+ const struct nft_set_ext **ext);
+ bool (*update)(struct nft_set *set,
+ const u32 *key,
+ void *(*new)(struct nft_set *,
+ const struct nft_expr *,
+ struct nft_regs *),
+ const struct nft_expr *expr,
+ struct nft_regs *regs,
+ const struct nft_set_ext **ext);
+
int (*insert)(const struct nft_set *set,
const struct nft_set_elem *elem);
+ void (*activate)(const struct nft_set *set,
+ const struct nft_set_elem *elem);
+ void * (*deactivate)(const struct nft_set *set,
+ const struct nft_set_elem *elem);
void (*remove)(const struct nft_set *set,
const struct nft_set_elem *elem);
void (*walk)(const struct nft_ctx *ctx,
@@ -241,6 +271,7 @@ struct nft_set_ops {
struct list_head list;
struct module *owner;
+ unsigned int elemsize;
u32 features;
};
@@ -257,8 +288,12 @@ void nft_unregister_set(struct nft_set_ops *ops);
* @dtype: data type (verdict or numeric type defined by userspace)
* @size: maximum set size
* @nelems: number of elements
+ * @ndeact: number of deactivated elements queued for removal
+ * @timeout: default timeout value in msecs
+ * @gc_int: garbage collection interval in msecs
* @policy: set parameterization (see enum nft_set_policies)
* @ops: set ops
+ * @pnet: network namespace
* @flags: set flags
* @klen: key length
* @dlen: data length
@@ -271,10 +306,14 @@ struct nft_set {
u32 ktype;
u32 dtype;
u32 size;
- u32 nelems;
+ atomic_t nelems;
+ u32 ndeact;
+ u64 timeout;
+ u32 gc_int;
u16 policy;
/* runtime data below here */
const struct nft_set_ops *ops ____cacheline_aligned;
+ possible_net_t pnet;
u16 flags;
u8 klen;
u8 dlen;
@@ -287,16 +326,27 @@ static inline void *nft_set_priv(const struct nft_set *set)
return (void *)set->data;
}
+static inline struct nft_set *nft_set_container_of(const void *priv)
+{
+ return (void *)priv - offsetof(struct nft_set, data);
+}
+
struct nft_set *nf_tables_set_lookup(const struct nft_table *table,
const struct nlattr *nla);
struct nft_set *nf_tables_set_lookup_byid(const struct net *net,
const struct nlattr *nla);
+static inline unsigned long nft_set_gc_interval(const struct nft_set *set)
+{
+ return set->gc_int ? msecs_to_jiffies(set->gc_int) : HZ;
+}
+
/**
* struct nft_set_binding - nf_tables set binding
*
* @list: set bindings list node
* @chain: chain containing the rule bound to the set
+ * @flags: set action flags
*
* A set binding contains all information necessary for validation
* of new elements added to a bound set.
@@ -304,6 +354,7 @@ struct nft_set *nf_tables_set_lookup_byid(const struct net *net,
struct nft_set_binding {
struct list_head list;
const struct nft_chain *chain;
+ u32 flags;
};
int nf_tables_bind_set(const struct nft_ctx *ctx, struct nft_set *set,
@@ -311,6 +362,215 @@ int nf_tables_bind_set(const struct nft_ctx *ctx, struct nft_set *set,
void nf_tables_unbind_set(const struct nft_ctx *ctx, struct nft_set *set,
struct nft_set_binding *binding);
+/**
+ * enum nft_set_extensions - set extension type IDs
+ *
+ * @NFT_SET_EXT_KEY: element key
+ * @NFT_SET_EXT_DATA: mapping data
+ * @NFT_SET_EXT_FLAGS: element flags
+ * @NFT_SET_EXT_TIMEOUT: element timeout
+ * @NFT_SET_EXT_EXPIRATION: element expiration time
+ * @NFT_SET_EXT_USERDATA: user data associated with the element
+ * @NFT_SET_EXT_EXPR: expression assiociated with the element
+ * @NFT_SET_EXT_NUM: number of extension types
+ */
+enum nft_set_extensions {
+ NFT_SET_EXT_KEY,
+ NFT_SET_EXT_DATA,
+ NFT_SET_EXT_FLAGS,
+ NFT_SET_EXT_TIMEOUT,
+ NFT_SET_EXT_EXPIRATION,
+ NFT_SET_EXT_USERDATA,
+ NFT_SET_EXT_EXPR,
+ NFT_SET_EXT_NUM
+};
+
+/**
+ * struct nft_set_ext_type - set extension type
+ *
+ * @len: fixed part length of the extension
+ * @align: alignment requirements of the extension
+ */
+struct nft_set_ext_type {
+ u8 len;
+ u8 align;
+};
+
+extern const struct nft_set_ext_type nft_set_ext_types[];
+
+/**
+ * struct nft_set_ext_tmpl - set extension template
+ *
+ * @len: length of extension area
+ * @offset: offsets of individual extension types
+ */
+struct nft_set_ext_tmpl {
+ u16 len;
+ u8 offset[NFT_SET_EXT_NUM];
+};
+
+/**
+ * struct nft_set_ext - set extensions
+ *
+ * @genmask: generation mask
+ * @offset: offsets of individual extension types
+ * @data: beginning of extension data
+ */
+struct nft_set_ext {
+ u8 genmask;
+ u8 offset[NFT_SET_EXT_NUM];
+ char data[0];
+};
+
+static inline void nft_set_ext_prepare(struct nft_set_ext_tmpl *tmpl)
+{
+ memset(tmpl, 0, sizeof(*tmpl));
+ tmpl->len = sizeof(struct nft_set_ext);
+}
+
+static inline void nft_set_ext_add_length(struct nft_set_ext_tmpl *tmpl, u8 id,
+ unsigned int len)
+{
+ tmpl->len = ALIGN(tmpl->len, nft_set_ext_types[id].align);
+ BUG_ON(tmpl->len > U8_MAX);
+ tmpl->offset[id] = tmpl->len;
+ tmpl->len += nft_set_ext_types[id].len + len;
+}
+
+static inline void nft_set_ext_add(struct nft_set_ext_tmpl *tmpl, u8 id)
+{
+ nft_set_ext_add_length(tmpl, id, 0);
+}
+
+static inline void nft_set_ext_init(struct nft_set_ext *ext,
+ const struct nft_set_ext_tmpl *tmpl)
+{
+ memcpy(ext->offset, tmpl->offset, sizeof(ext->offset));
+}
+
+static inline bool __nft_set_ext_exists(const struct nft_set_ext *ext, u8 id)
+{
+ return !!ext->offset[id];
+}
+
+static inline bool nft_set_ext_exists(const struct nft_set_ext *ext, u8 id)
+{
+ return ext && __nft_set_ext_exists(ext, id);
+}
+
+static inline void *nft_set_ext(const struct nft_set_ext *ext, u8 id)
+{
+ return (void *)ext + ext->offset[id];
+}
+
+static inline struct nft_data *nft_set_ext_key(const struct nft_set_ext *ext)
+{
+ return nft_set_ext(ext, NFT_SET_EXT_KEY);
+}
+
+static inline struct nft_data *nft_set_ext_data(const struct nft_set_ext *ext)
+{
+ return nft_set_ext(ext, NFT_SET_EXT_DATA);
+}
+
+static inline u8 *nft_set_ext_flags(const struct nft_set_ext *ext)
+{
+ return nft_set_ext(ext, NFT_SET_EXT_FLAGS);
+}
+
+static inline u64 *nft_set_ext_timeout(const struct nft_set_ext *ext)
+{
+ return nft_set_ext(ext, NFT_SET_EXT_TIMEOUT);
+}
+
+static inline unsigned long *nft_set_ext_expiration(const struct nft_set_ext *ext)
+{
+ return nft_set_ext(ext, NFT_SET_EXT_EXPIRATION);
+}
+
+static inline struct nft_userdata *nft_set_ext_userdata(const struct nft_set_ext *ext)
+{
+ return nft_set_ext(ext, NFT_SET_EXT_USERDATA);
+}
+
+static inline struct nft_expr *nft_set_ext_expr(const struct nft_set_ext *ext)
+{
+ return nft_set_ext(ext, NFT_SET_EXT_EXPR);
+}
+
+static inline bool nft_set_elem_expired(const struct nft_set_ext *ext)
+{
+ return nft_set_ext_exists(ext, NFT_SET_EXT_EXPIRATION) &&
+ time_is_before_eq_jiffies(*nft_set_ext_expiration(ext));
+}
+
+static inline struct nft_set_ext *nft_set_elem_ext(const struct nft_set *set,
+ void *elem)
+{
+ return elem + set->ops->elemsize;
+}
+
+void *nft_set_elem_init(const struct nft_set *set,
+ const struct nft_set_ext_tmpl *tmpl,
+ const u32 *key, const u32 *data,
+ u64 timeout, gfp_t gfp);
+void nft_set_elem_destroy(const struct nft_set *set, void *elem);
+
+/**
+ * struct nft_set_gc_batch_head - nf_tables set garbage collection batch
+ *
+ * @rcu: rcu head
+ * @set: set the elements belong to
+ * @cnt: count of elements
+ */
+struct nft_set_gc_batch_head {
+ struct rcu_head rcu;
+ const struct nft_set *set;
+ unsigned int cnt;
+};
+
+#define NFT_SET_GC_BATCH_SIZE ((PAGE_SIZE - \
+ sizeof(struct nft_set_gc_batch_head)) / \
+ sizeof(void *))
+
+/**
+ * struct nft_set_gc_batch - nf_tables set garbage collection batch
+ *
+ * @head: GC batch head
+ * @elems: garbage collection elements
+ */
+struct nft_set_gc_batch {
+ struct nft_set_gc_batch_head head;
+ void *elems[NFT_SET_GC_BATCH_SIZE];
+};
+
+struct nft_set_gc_batch *nft_set_gc_batch_alloc(const struct nft_set *set,
+ gfp_t gfp);
+void nft_set_gc_batch_release(struct rcu_head *rcu);
+
+static inline void nft_set_gc_batch_complete(struct nft_set_gc_batch *gcb)
+{
+ if (gcb != NULL)
+ call_rcu(&gcb->head.rcu, nft_set_gc_batch_release);
+}
+
+static inline struct nft_set_gc_batch *
+nft_set_gc_batch_check(const struct nft_set *set, struct nft_set_gc_batch *gcb,
+ gfp_t gfp)
+{
+ if (gcb != NULL) {
+ if (gcb->head.cnt + 1 < ARRAY_SIZE(gcb->elems))
+ return gcb;
+ nft_set_gc_batch_complete(gcb);
+ }
+ return nft_set_gc_batch_alloc(set, gfp);
+}
+
+static inline void nft_set_gc_batch_add(struct nft_set_gc_batch *gcb,
+ void *elem)
+{
+ gcb->elems[gcb->head.cnt++] = elem;
+}
/**
* struct nft_expr_type - nf_tables expression type
@@ -323,6 +583,7 @@ void nf_tables_unbind_set(const struct nft_ctx *ctx, struct nft_set *set,
* @policy: netlink attribute policy
* @maxattr: highest netlink attribute number
* @family: address family for AF-specific types
+ * @flags: expression type flags
*/
struct nft_expr_type {
const struct nft_expr_ops *(*select_ops)(const struct nft_ctx *,
@@ -334,8 +595,11 @@ struct nft_expr_type {
const struct nla_policy *policy;
unsigned int maxattr;
u8 family;
+ u8 flags;
};
+#define NFT_EXPR_STATEFUL 0x1
+
/**
* struct nft_expr_ops - nf_tables expression operations
*
@@ -351,7 +615,7 @@ struct nft_expr_type {
struct nft_expr;
struct nft_expr_ops {
void (*eval)(const struct nft_expr *expr,
- struct nft_data data[NFT_REG_MAX + 1],
+ struct nft_regs *regs,
const struct nft_pktinfo *pkt);
unsigned int size;
@@ -389,6 +653,18 @@ static inline void *nft_expr_priv(const struct nft_expr *expr)
return (void *)expr->data;
}
+struct nft_expr *nft_expr_init(const struct nft_ctx *ctx,
+ const struct nlattr *nla);
+void nft_expr_destroy(const struct nft_ctx *ctx, struct nft_expr *expr);
+int nft_expr_dump(struct sk_buff *skb, unsigned int attr,
+ const struct nft_expr *expr);
+
+static inline void nft_expr_clone(struct nft_expr *dst, struct nft_expr *src)
+{
+ __module_get(src->ops->type->owner);
+ memcpy(dst, src, src->ops->size);
+}
+
/**
* struct nft_rule - nf_tables rule
*
@@ -409,74 +685,6 @@ struct nft_rule {
__attribute__((aligned(__alignof__(struct nft_expr))));
};
-/**
- * struct nft_trans - nf_tables object update in transaction
- *
- * @list: used internally
- * @msg_type: message type
- * @ctx: transaction context
- * @data: internal information related to the transaction
- */
-struct nft_trans {
- struct list_head list;
- int msg_type;
- struct nft_ctx ctx;
- char data[0];
-};
-
-struct nft_trans_rule {
- struct nft_rule *rule;
-};
-
-#define nft_trans_rule(trans) \
- (((struct nft_trans_rule *)trans->data)->rule)
-
-struct nft_trans_set {
- struct nft_set *set;
- u32 set_id;
-};
-
-#define nft_trans_set(trans) \
- (((struct nft_trans_set *)trans->data)->set)
-#define nft_trans_set_id(trans) \
- (((struct nft_trans_set *)trans->data)->set_id)
-
-struct nft_trans_chain {
- bool update;
- char name[NFT_CHAIN_MAXNAMELEN];
- struct nft_stats __percpu *stats;
- u8 policy;
-};
-
-#define nft_trans_chain_update(trans) \
- (((struct nft_trans_chain *)trans->data)->update)
-#define nft_trans_chain_name(trans) \
- (((struct nft_trans_chain *)trans->data)->name)
-#define nft_trans_chain_stats(trans) \
- (((struct nft_trans_chain *)trans->data)->stats)
-#define nft_trans_chain_policy(trans) \
- (((struct nft_trans_chain *)trans->data)->policy)
-
-struct nft_trans_table {
- bool update;
- bool enable;
-};
-
-#define nft_trans_table_update(trans) \
- (((struct nft_trans_table *)trans->data)->update)
-#define nft_trans_table_enable(trans) \
- (((struct nft_trans_table *)trans->data)->enable)
-
-struct nft_trans_elem {
- struct nft_set *set;
- struct nft_set_elem elem;
-};
-
-#define nft_trans_elem_set(trans) \
- (((struct nft_trans_elem *)trans->data)->set)
-#define nft_trans_elem(trans) \
- (((struct nft_trans_elem *)trans->data)->elem)
-
static inline struct nft_expr *nft_expr_first(const struct nft_rule *rule)
{
return (struct nft_expr *)&rule->data[0];
@@ -517,7 +725,6 @@ enum nft_chain_flags {
*
* @rules: list of rules in the chain
* @list: used internally
- * @net: net namespace that this chain belongs to
* @table: table that this chain belongs to
* @handle: chain handle
* @use: number of jump references to this chain
@@ -528,7 +735,6 @@ enum nft_chain_flags {
struct nft_chain {
struct list_head rules;
struct list_head list;
- struct net *net;
struct nft_table *table;
u64 handle;
u32 use;
@@ -544,6 +750,25 @@ enum nft_chain_type {
NFT_CHAIN_T_MAX
};
+/**
+ * struct nf_chain_type - nf_tables chain type info
+ *
+ * @name: name of the type
+ * @type: numeric identifier
+ * @family: address family
+ * @owner: module owner
+ * @hook_mask: mask of valid hooks
+ * @hooks: hookfn overrides
+ */
+struct nf_chain_type {
+ const char *name;
+ enum nft_chain_type type;
+ int family;
+ struct module *owner;
+ unsigned int hook_mask;
+ nf_hookfn *hooks[NF_MAX_HOOKS];
+};
+
int nft_chain_validate_dependency(const struct nft_chain *chain,
enum nft_chain_type type);
int nft_chain_validate_hooks(const struct nft_chain *chain,
@@ -556,22 +781,28 @@ struct nft_stats {
};
#define NFT_HOOK_OPS_MAX 2
+#define NFT_BASECHAIN_DISABLED (1 << 0)
/**
* struct nft_base_chain - nf_tables base chain
*
* @ops: netfilter hook ops
+ * @pnet: net namespace that this chain belongs to
* @type: chain type
* @policy: default policy
* @stats: per-cpu chain stats
* @chain: the chain
+ * @dev_name: device name that this base chain is attached to (if any)
*/
struct nft_base_chain {
struct nf_hook_ops ops[NFT_HOOK_OPS_MAX];
+ possible_net_t pnet;
const struct nf_chain_type *type;
u8 policy;
+ u8 flags;
struct nft_stats __percpu *stats;
struct nft_chain chain;
+ char dev_name[IFNAMSIZ];
};
static inline struct nft_base_chain *nft_base_chain(const struct nft_chain *chain)
@@ -579,6 +810,11 @@ static inline struct nft_base_chain *nft_base_chain(const struct nft_chain *chai
return container_of(chain, struct nft_base_chain, chain);
}
+int nft_register_basechain(struct nft_base_chain *basechain,
+ unsigned int hook_nops);
+void nft_unregister_basechain(struct nft_base_chain *basechain,
+ unsigned int hook_nops);
+
unsigned int nft_do_chain(struct nft_pktinfo *pkt,
const struct nf_hook_ops *ops);
@@ -600,7 +836,11 @@ struct nft_table {
u64 hgenerator;
u32 use;
u16 flags;
- char name[];
+ char name[NFT_TABLE_MAXNAMELEN];
+};
+
+enum nft_af_flags {
+ NFT_AF_NEEDS_DEV = (1 << 0),
};
/**
@@ -611,6 +851,7 @@ struct nft_table {
* @nhooks: number of hooks in this family
* @owner: module owner
* @tables: used internally
+ * @flags: family flags
* @nops: number of hook ops in this family
* @hook_ops_init: initialization function for chain hook ops
* @hooks: hookfn overrides for packet validation
@@ -621,6 +862,7 @@ struct nft_af_info {
unsigned int nhooks;
struct module *owner;
struct list_head tables;
+ u32 flags;
unsigned int nops;
void (*hook_ops_init)(struct nf_hook_ops *,
unsigned int);
@@ -630,25 +872,6 @@ struct nft_af_info {
int nft_register_afinfo(struct net *, struct nft_af_info *);
void nft_unregister_afinfo(struct nft_af_info *);
-/**
- * struct nf_chain_type - nf_tables chain type info
- *
- * @name: name of the type
- * @type: numeric identifier
- * @family: address family
- * @owner: module owner
- * @hook_mask: mask of valid hooks
- * @hooks: hookfn overrides
- */
-struct nf_chain_type {
- const char *name;
- enum nft_chain_type type;
- int family;
- struct module *owner;
- unsigned int hook_mask;
- nf_hookfn *hooks[NF_MAX_HOOKS];
-};
-
int nft_register_chain_type(const struct nf_chain_type *);
void nft_unregister_chain_type(const struct nf_chain_type *);
@@ -673,4 +896,153 @@ void nft_unregister_expr(struct nft_expr_type *);
#define MODULE_ALIAS_NFT_SET() \
MODULE_ALIAS("nft-set")
+/*
+ * The gencursor defines two generations, the currently active and the
+ * next one. Objects contain a bitmask of 2 bits specifying the generations
+ * they're active in. A set bit means they're inactive in the generation
+ * represented by that bit.
+ *
+ * New objects start out as inactive in the current and active in the
+ * next generation. When committing the ruleset the bitmask is cleared,
+ * meaning they're active in all generations. When removing an object,
+ * it is set inactive in the next generation. After committing the ruleset,
+ * the objects are removed.
+ */
+static inline unsigned int nft_gencursor_next(const struct net *net)
+{
+ return net->nft.gencursor + 1 == 1 ? 1 : 0;
+}
+
+static inline u8 nft_genmask_next(const struct net *net)
+{
+ return 1 << nft_gencursor_next(net);
+}
+
+static inline u8 nft_genmask_cur(const struct net *net)
+{
+ /* Use ACCESS_ONCE() to prevent refetching the value for atomicity */
+ return 1 << ACCESS_ONCE(net->nft.gencursor);
+}
+
+#define NFT_GENMASK_ANY ((1 << 0) | (1 << 1))
+
+/*
+ * Set element transaction helpers
+ */
+
+static inline bool nft_set_elem_active(const struct nft_set_ext *ext,
+ u8 genmask)
+{
+ return !(ext->genmask & genmask);
+}
+
+static inline void nft_set_elem_change_active(const struct nft_set *set,
+ struct nft_set_ext *ext)
+{
+ ext->genmask ^= nft_genmask_next(read_pnet(&set->pnet));
+}
+
+/*
+ * We use a free bit in the genmask field to indicate the element
+ * is busy, meaning it is currently being processed either by
+ * the netlink API or GC.
+ *
+ * Even though the genmask is only a single byte wide, this works
+ * because the extension structure if fully constant once initialized,
+ * so there are no non-atomic write accesses unless it is already
+ * marked busy.
+ */
+#define NFT_SET_ELEM_BUSY_MASK (1 << 2)
+
+#if defined(__LITTLE_ENDIAN_BITFIELD)
+#define NFT_SET_ELEM_BUSY_BIT 2
+#elif defined(__BIG_ENDIAN_BITFIELD)
+#define NFT_SET_ELEM_BUSY_BIT (BITS_PER_LONG - BITS_PER_BYTE + 2)
+#else
+#error
+#endif
+
+static inline int nft_set_elem_mark_busy(struct nft_set_ext *ext)
+{
+ unsigned long *word = (unsigned long *)ext;
+
+ BUILD_BUG_ON(offsetof(struct nft_set_ext, genmask) != 0);
+ return test_and_set_bit(NFT_SET_ELEM_BUSY_BIT, word);
+}
+
+static inline void nft_set_elem_clear_busy(struct nft_set_ext *ext)
+{
+ unsigned long *word = (unsigned long *)ext;
+
+ clear_bit(NFT_SET_ELEM_BUSY_BIT, word);
+}
+
+/**
+ * struct nft_trans - nf_tables object update in transaction
+ *
+ * @list: used internally
+ * @msg_type: message type
+ * @ctx: transaction context
+ * @data: internal information related to the transaction
+ */
+struct nft_trans {
+ struct list_head list;
+ int msg_type;
+ struct nft_ctx ctx;
+ char data[0];
+};
+
+struct nft_trans_rule {
+ struct nft_rule *rule;
+};
+
+#define nft_trans_rule(trans) \
+ (((struct nft_trans_rule *)trans->data)->rule)
+
+struct nft_trans_set {
+ struct nft_set *set;
+ u32 set_id;
+};
+
+#define nft_trans_set(trans) \
+ (((struct nft_trans_set *)trans->data)->set)
+#define nft_trans_set_id(trans) \
+ (((struct nft_trans_set *)trans->data)->set_id)
+
+struct nft_trans_chain {
+ bool update;
+ char name[NFT_CHAIN_MAXNAMELEN];
+ struct nft_stats __percpu *stats;
+ u8 policy;
+};
+
+#define nft_trans_chain_update(trans) \
+ (((struct nft_trans_chain *)trans->data)->update)
+#define nft_trans_chain_name(trans) \
+ (((struct nft_trans_chain *)trans->data)->name)
+#define nft_trans_chain_stats(trans) \
+ (((struct nft_trans_chain *)trans->data)->stats)
+#define nft_trans_chain_policy(trans) \
+ (((struct nft_trans_chain *)trans->data)->policy)
+
+struct nft_trans_table {
+ bool update;
+ bool enable;
+};
+
+#define nft_trans_table_update(trans) \
+ (((struct nft_trans_table *)trans->data)->update)
+#define nft_trans_table_enable(trans) \
+ (((struct nft_trans_table *)trans->data)->enable)
+
+struct nft_trans_elem {
+ struct nft_set *set;
+ struct nft_set_elem elem;
+};
+
+#define nft_trans_elem_set(trans) \
+ (((struct nft_trans_elem *)trans->data)->set)
+#define nft_trans_elem(trans) \
+ (((struct nft_trans_elem *)trans->data)->elem)
+
#endif /* _NET_NF_TABLES_H */
diff --git a/include/net/netfilter/nf_tables_core.h b/include/net/netfilter/nf_tables_core.h
index a75fc8e27cd6..c6f400cfaac8 100644
--- a/include/net/netfilter/nf_tables_core.h
+++ b/include/net/netfilter/nf_tables_core.h
@@ -31,6 +31,9 @@ void nft_cmp_module_exit(void);
int nft_lookup_module_init(void);
void nft_lookup_module_exit(void);
+int nft_dynset_module_init(void);
+void nft_dynset_module_exit(void);
+
int nft_bitwise_module_init(void);
void nft_bitwise_module_exit(void);
diff --git a/include/net/netfilter/nf_tables_ipv4.h b/include/net/netfilter/nf_tables_ipv4.h
index cba143fbd2e4..2df7f96902ee 100644
--- a/include/net/netfilter/nf_tables_ipv4.h
+++ b/include/net/netfilter/nf_tables_ipv4.h
@@ -8,12 +8,11 @@ static inline void
nft_set_pktinfo_ipv4(struct nft_pktinfo *pkt,
const struct nf_hook_ops *ops,
struct sk_buff *skb,
- const struct net_device *in,
- const struct net_device *out)
+ const struct nf_hook_state *state)
{
struct iphdr *ip;
- nft_set_pktinfo(pkt, ops, skb, in, out);
+ nft_set_pktinfo(pkt, ops, skb, state);
ip = ip_hdr(pkt->skb);
pkt->tprot = ip->protocol;
diff --git a/include/net/netfilter/nf_tables_ipv6.h b/include/net/netfilter/nf_tables_ipv6.h
index 74d976137658..97db2e3a5e65 100644
--- a/include/net/netfilter/nf_tables_ipv6.h
+++ b/include/net/netfilter/nf_tables_ipv6.h
@@ -8,13 +8,12 @@ static inline int
nft_set_pktinfo_ipv6(struct nft_pktinfo *pkt,
const struct nf_hook_ops *ops,
struct sk_buff *skb,
- const struct net_device *in,
- const struct net_device *out)
+ const struct nf_hook_state *state)
{
int protohdr, thoff = 0;
unsigned short frag_off;
- nft_set_pktinfo(pkt, ops, skb, in, out);
+ nft_set_pktinfo(pkt, ops, skb, state);
protohdr = ipv6_find_hdr(pkt->skb, &thoff, -1, &frag_off, NULL);
/* If malformed, drop it */
diff --git a/include/net/netfilter/nft_meta.h b/include/net/netfilter/nft_meta.h
index 0ee47c3e2e31..711887a09e91 100644
--- a/include/net/netfilter/nft_meta.h
+++ b/include/net/netfilter/nft_meta.h
@@ -26,11 +26,11 @@ int nft_meta_set_dump(struct sk_buff *skb,
const struct nft_expr *expr);
void nft_meta_get_eval(const struct nft_expr *expr,
- struct nft_data data[NFT_REG_MAX + 1],
+ struct nft_regs *regs,
const struct nft_pktinfo *pkt);
void nft_meta_set_eval(const struct nft_expr *expr,
- struct nft_data data[NFT_REG_MAX + 1],
+ struct nft_regs *regs,
const struct nft_pktinfo *pkt);
#endif
diff --git a/include/net/netlink.h b/include/net/netlink.h
index e010ee8da41d..2a5dbcc90d1c 100644
--- a/include/net/netlink.h
+++ b/include/net/netlink.h
@@ -4,6 +4,7 @@
#include <linux/types.h>
#include <linux/netlink.h>
#include <linux/jiffies.h>
+#include <linux/in6.h>
/* ========================================================================
* Netlink Messages and Attributes Interface (As Seen On TV)
@@ -105,6 +106,8 @@
* nla_put_string(skb, type, str) add string attribute to skb
* nla_put_flag(skb, type) add flag attribute to skb
* nla_put_msecs(skb, type, jiffies) add msecs attribute to skb
+ * nla_put_in_addr(skb, type, addr) add IPv4 address attribute to skb
+ * nla_put_in6_addr(skb, type, addr) add IPv6 address attribute to skb
*
* Nested Attributes Construction:
* nla_nest_start(skb, type) start a nested attribute
@@ -957,6 +960,32 @@ static inline int nla_put_msecs(struct sk_buff *skb, int attrtype,
}
/**
+ * nla_put_in_addr - Add an IPv4 address netlink attribute to a socket
+ * buffer
+ * @skb: socket buffer to add attribute to
+ * @attrtype: attribute type
+ * @addr: IPv4 address
+ */
+static inline int nla_put_in_addr(struct sk_buff *skb, int attrtype,
+ __be32 addr)
+{
+ return nla_put_be32(skb, attrtype, addr);
+}
+
+/**
+ * nla_put_in6_addr - Add an IPv6 address netlink attribute to a socket
+ * buffer
+ * @skb: socket buffer to add attribute to
+ * @attrtype: attribute type
+ * @addr: IPv6 address
+ */
+static inline int nla_put_in6_addr(struct sk_buff *skb, int attrtype,
+ const struct in6_addr *addr)
+{
+ return nla_put(skb, attrtype, sizeof(*addr), addr);
+}
+
+/**
* nla_get_u32 - return payload of u32 attribute
* @nla: u32 netlink attribute
*/
@@ -1099,6 +1128,27 @@ static inline unsigned long nla_get_msecs(const struct nlattr *nla)
}
/**
+ * nla_get_in_addr - return payload of IPv4 address attribute
+ * @nla: IPv4 address netlink attribute
+ */
+static inline __be32 nla_get_in_addr(const struct nlattr *nla)
+{
+ return *(__be32 *) nla_data(nla);
+}
+
+/**
+ * nla_get_in6_addr - return payload of IPv6 address attribute
+ * @nla: IPv6 address netlink attribute
+ */
+static inline struct in6_addr nla_get_in6_addr(const struct nlattr *nla)
+{
+ struct in6_addr tmp;
+
+ nla_memcpy(&tmp, nla, sizeof(tmp));
+ return tmp;
+}
+
+/**
* nla_nest_start - Start a new level of nested attributes
* @skb: socket buffer to add attributes to
* @attrtype: attribute type of container
diff --git a/include/net/netns/generic.h b/include/net/netns/generic.h
index 0931618c0f7f..70e158551704 100644
--- a/include/net/netns/generic.h
+++ b/include/net/netns/generic.h
@@ -38,11 +38,9 @@ static inline void *net_generic(const struct net *net, int id)
rcu_read_lock();
ng = rcu_dereference(net->gen);
- BUG_ON(id == 0 || id > ng->len);
ptr = ng->ptr[id - 1];
rcu_read_unlock();
- BUG_ON(!ptr);
return ptr;
}
#endif
diff --git a/include/net/netns/hash.h b/include/net/netns/hash.h
index c06ac58ca107..69a6715d9f3f 100644
--- a/include/net/netns/hash.h
+++ b/include/net/netns/hash.h
@@ -5,7 +5,7 @@
struct net;
-static inline unsigned int net_hash_mix(struct net *net)
+static inline u32 net_hash_mix(const struct net *net)
{
#ifdef CONFIG_NET_NS
/*
@@ -13,7 +13,7 @@ static inline unsigned int net_hash_mix(struct net *net)
* always zeroed
*/
- return (unsigned)(((unsigned long)net) >> L1_CACHE_SHIFT);
+ return (u32)(((unsigned long)net) >> L1_CACHE_SHIFT);
#else
return 0;
#endif
diff --git a/include/net/netns/ipv4.h b/include/net/netns/ipv4.h
index dbe225478adb..c68926b4899c 100644
--- a/include/net/netns/ipv4.h
+++ b/include/net/netns/ipv4.h
@@ -7,6 +7,7 @@
#include <linux/uidgid.h>
#include <net/inet_frag.h>
+#include <linux/rcupdate.h>
struct tcpm_hash_bucket;
struct ctl_table_header;
@@ -18,6 +19,7 @@ struct sock;
struct local_ports {
seqlock_t lock;
int range[2];
+ bool warned;
};
struct ping_group_range {
@@ -38,21 +40,21 @@ struct netns_ipv4 {
#ifdef CONFIG_IP_MULTIPLE_TABLES
struct fib_rules_ops *rules_ops;
bool fib_has_custom_rules;
- struct fib_table *fib_local;
- struct fib_table *fib_main;
- struct fib_table *fib_default;
+ struct fib_table __rcu *fib_local;
+ struct fib_table __rcu *fib_main;
+ struct fib_table __rcu *fib_default;
#endif
#ifdef CONFIG_IP_ROUTE_CLASSID
int fib_num_tclassid_users;
#endif
struct hlist_head *fib_table_hash;
+ bool fib_offload_disabled;
struct sock *fibnl;
struct sock * __percpu *icmp_sk;
+ struct sock *mc_autojoin_sk;
struct inet_peer_base *peers;
- struct tcpm_hash_bucket *tcp_metrics_hash;
- unsigned int tcp_metrics_hash_log;
struct sock * __percpu *tcp_sk;
struct netns_frags frags;
#ifdef CONFIG_NETFILTER
@@ -76,6 +78,8 @@ struct netns_ipv4 {
struct local_ports ip_local_ports;
int sysctl_tcp_ecn;
+ int sysctl_tcp_ecn_fallback;
+
int sysctl_ip_no_pmtu_disc;
int sysctl_ip_fwd_use_pmtu;
int sysctl_ip_nonlocal_bind;
@@ -84,6 +88,8 @@ struct netns_ipv4 {
int sysctl_tcp_fwmark_accept;
int sysctl_tcp_mtu_probing;
int sysctl_tcp_base_mss;
+ int sysctl_tcp_probe_threshold;
+ u32 sysctl_tcp_probe_interval;
struct ping_group_range ping_group_range;
diff --git a/include/net/netns/ipv6.h b/include/net/netns/ipv6.h
index 69ae41f2098c..8d93544a2d2b 100644
--- a/include/net/netns/ipv6.h
+++ b/include/net/netns/ipv6.h
@@ -32,6 +32,9 @@ struct netns_sysctl_ipv6 {
int icmpv6_time;
int anycast_src_echo_reply;
int fwmark_reflect;
+ int idgen_retries;
+ int idgen_delay;
+ int flowlabel_state_ranges;
};
struct netns_ipv6 {
@@ -67,6 +70,7 @@ struct netns_ipv6 {
struct sock *ndisc_sk;
struct sock *tcp_sk;
struct sock *igmp_sk;
+ struct sock *mc_autojoin_sk;
#ifdef CONFIG_IPV6_MROUTE
#ifndef CONFIG_IPV6_MROUTE_MULTIPLE_TABLES
struct mr6_table *mrt6;
diff --git a/include/net/netns/mpls.h b/include/net/netns/mpls.h
new file mode 100644
index 000000000000..d29203651c01
--- /dev/null
+++ b/include/net/netns/mpls.h
@@ -0,0 +1,17 @@
+/*
+ * mpls in net namespaces
+ */
+
+#ifndef __NETNS_MPLS_H__
+#define __NETNS_MPLS_H__
+
+struct mpls_route;
+struct ctl_table_header;
+
+struct netns_mpls {
+ size_t platform_labels;
+ struct mpls_route __rcu * __rcu *platform_label;
+ struct ctl_table_header *ctl;
+};
+
+#endif /* __NETNS_MPLS_H__ */
diff --git a/include/net/netns/netfilter.h b/include/net/netns/netfilter.h
index 88740024ccf3..532e4ba64f49 100644
--- a/include/net/netns/netfilter.h
+++ b/include/net/netns/netfilter.h
@@ -1,9 +1,9 @@
#ifndef __NETNS_NETFILTER_H
#define __NETNS_NETFILTER_H
-#include <linux/proc_fs.h>
-#include <linux/netfilter.h>
+#include <linux/netfilter_defs.h>
+struct proc_dir_entry;
struct nf_logger;
struct netns_nf {
diff --git a/include/net/netns/nftables.h b/include/net/netns/nftables.h
index eee608b12cc9..c80781146019 100644
--- a/include/net/netns/nftables.h
+++ b/include/net/netns/nftables.h
@@ -13,6 +13,7 @@ struct netns_nftables {
struct nft_af_info *inet;
struct nft_af_info *arp;
struct nft_af_info *bridge;
+ struct nft_af_info *netdev;
unsigned int base_seq;
u8 gencursor;
};
diff --git a/include/net/netns/sctp.h b/include/net/netns/sctp.h
index 3573a81815ad..8ba379f9e467 100644
--- a/include/net/netns/sctp.h
+++ b/include/net/netns/sctp.h
@@ -31,6 +31,7 @@ struct netns_sctp {
struct list_head addr_waitq;
struct timer_list addr_wq_timer;
struct list_head auto_asconf_splist;
+ /* Lock that protects both addr_waitq and auto_asconf_splist */
spinlock_t addr_wq_lock;
/* Lock that protects the local_addr_list writers */
diff --git a/include/net/netns/x_tables.h b/include/net/netns/x_tables.h
index c24060ee411e..c8a7681efa6a 100644
--- a/include/net/netns/x_tables.h
+++ b/include/net/netns/x_tables.h
@@ -2,13 +2,14 @@
#define __NETNS_X_TABLES_H
#include <linux/list.h>
-#include <linux/netfilter.h>
+#include <linux/netfilter_defs.h>
struct ebt_table;
struct netns_xt {
struct list_head tables[NFPROTO_NUMPROTO];
bool notrack_deprecated_warning;
+ bool clusterip_deprecated_warning;
#if defined(CONFIG_BRIDGE_NF_EBTABLES) || \
defined(CONFIG_BRIDGE_NF_EBTABLES_MODULE)
struct ebt_table *broute_table;
diff --git a/include/net/nfc/hci.h b/include/net/nfc/hci.h
index ab672b537dd4..316694dafa5b 100644
--- a/include/net/nfc/hci.h
+++ b/include/net/nfc/hci.h
@@ -83,6 +83,10 @@ struct nfc_hci_pipe {
};
#define NFC_HCI_MAX_CUSTOM_GATES 50
+/*
+ * According to specification 102 622 chapter 4.4 Pipes,
+ * the pipe identifier is 7 bits long.
+ */
#define NFC_HCI_MAX_PIPES 127
struct nfc_hci_init_data {
u8 gate_count;
@@ -175,6 +179,13 @@ void nfc_hci_unregister_device(struct nfc_hci_dev *hdev);
void nfc_hci_set_clientdata(struct nfc_hci_dev *hdev, void *clientdata);
void *nfc_hci_get_clientdata(struct nfc_hci_dev *hdev);
+static inline int nfc_hci_set_vendor_cmds(struct nfc_hci_dev *hdev,
+ struct nfc_vendor_cmd *cmds,
+ int n_cmds)
+{
+ return nfc_set_vendor_cmds(hdev->ndev, cmds, n_cmds);
+}
+
void nfc_hci_driver_failure(struct nfc_hci_dev *hdev, int err);
int nfc_hci_result_to_errno(u8 result);
diff --git a/include/net/nfc/nci.h b/include/net/nfc/nci.h
index a2f2f3d3196d..75d2e1880059 100644
--- a/include/net/nfc/nci.h
+++ b/include/net/nfc/nci.h
@@ -35,6 +35,7 @@
#define NCI_MAX_NUM_RF_CONFIGS 10
#define NCI_MAX_NUM_CONN 10
#define NCI_MAX_PARAM_LEN 251
+#define NCI_MAX_PACKET_SIZE 258
/* NCI Status Codes */
#define NCI_STATUS_OK 0x00
diff --git a/include/net/nfc/nci_core.h b/include/net/nfc/nci_core.h
index ff87f8611fa3..01fc8c531115 100644
--- a/include/net/nfc/nci_core.h
+++ b/include/net/nfc/nci_core.h
@@ -31,6 +31,7 @@
#include <linux/interrupt.h>
#include <linux/skbuff.h>
+#include <linux/tty.h>
#include <net/nfc/nfc.h>
#include <net/nfc/nci.h>
@@ -66,11 +67,19 @@ enum nci_state {
struct nci_dev;
+struct nci_prop_ops {
+ __u16 opcode;
+ int (*rsp)(struct nci_dev *dev, struct sk_buff *skb);
+ int (*ntf)(struct nci_dev *dev, struct sk_buff *skb);
+};
+
struct nci_ops {
+ int (*init)(struct nci_dev *ndev);
int (*open)(struct nci_dev *ndev);
int (*close)(struct nci_dev *ndev);
int (*send)(struct nci_dev *ndev, struct sk_buff *skb);
int (*setup)(struct nci_dev *ndev);
+ int (*fw_download)(struct nci_dev *ndev, const char *firmware_name);
__u32 (*get_rfprotocol)(struct nci_dev *ndev, __u8 rf_protocol);
int (*discover_se)(struct nci_dev *ndev);
int (*disable_se)(struct nci_dev *ndev, u32 se_idx);
@@ -83,12 +92,16 @@ struct nci_ops {
struct sk_buff *skb);
void (*hci_cmd_received)(struct nci_dev *ndev, u8 pipe, u8 cmd,
struct sk_buff *skb);
+
+ struct nci_prop_ops *prop_ops;
+ size_t n_prop_ops;
};
#define NCI_MAX_SUPPORTED_RF_INTERFACES 4
#define NCI_MAX_DISCOVERED_TARGETS 10
#define NCI_MAX_NUM_NFCEE 255
#define NCI_MAX_CONN_ID 7
+#define NCI_MAX_PROPRIETARY_CMD 64
struct nci_conn_info {
struct list_head list;
@@ -137,6 +150,10 @@ struct nci_conn_info {
#define NCI_HCI_INVALID_HOST 0x80
#define NCI_HCI_MAX_CUSTOM_GATES 50
+/*
+ * According to specification 102 622 chapter 4.4 Pipes,
+ * the pipe identifier is 7 bits long.
+ */
#define NCI_HCI_MAX_PIPES 127
struct nci_hci_gate {
@@ -259,6 +276,8 @@ int nci_request(struct nci_dev *ndev,
void (*req)(struct nci_dev *ndev,
unsigned long opt),
unsigned long opt, __u32 timeout);
+int nci_prop_cmd(struct nci_dev *ndev, __u8 oid, size_t len, __u8 *payload);
+
int nci_recv_frame(struct nci_dev *ndev, struct sk_buff *skb);
int nci_set_config(struct nci_dev *ndev, __u8 id, size_t len, __u8 *val);
@@ -313,8 +332,19 @@ static inline void *nci_get_drvdata(struct nci_dev *ndev)
return ndev->driver_data;
}
+static inline int nci_set_vendor_cmds(struct nci_dev *ndev,
+ struct nfc_vendor_cmd *cmds,
+ int n_cmds)
+{
+ return nfc_set_vendor_cmds(ndev->nfc_dev, cmds, n_cmds);
+}
+
void nci_rsp_packet(struct nci_dev *ndev, struct sk_buff *skb);
void nci_ntf_packet(struct nci_dev *ndev, struct sk_buff *skb);
+int nci_prop_rsp_packet(struct nci_dev *ndev, __u16 opcode,
+ struct sk_buff *skb);
+int nci_prop_ntf_packet(struct nci_dev *ndev, __u16 opcode,
+ struct sk_buff *skb);
void nci_rx_data_packet(struct nci_dev *ndev, struct sk_buff *skb);
int nci_send_cmd(struct nci_dev *ndev, __u16 opcode, __u8 plen, void *payload);
int nci_send_data(struct nci_dev *ndev, __u8 conn_id, struct sk_buff *skb);
@@ -362,4 +392,50 @@ int nci_spi_send(struct nci_spi *nspi,
struct sk_buff *skb);
struct sk_buff *nci_spi_read(struct nci_spi *nspi);
+/* ----- NCI UART ---- */
+
+/* Ioctl */
+#define NCIUARTSETDRIVER _IOW('U', 0, char *)
+
+enum nci_uart_driver {
+ NCI_UART_DRIVER_MARVELL = 0,
+ NCI_UART_DRIVER_MAX
+};
+
+struct nci_uart;
+
+struct nci_uart_ops {
+ int (*open)(struct nci_uart *nci_uart);
+ void (*close)(struct nci_uart *nci_uart);
+ int (*recv)(struct nci_uart *nci_uart, struct sk_buff *skb);
+ int (*recv_buf)(struct nci_uart *nci_uart, const u8 *data, char *flags,
+ int count);
+ int (*send)(struct nci_uart *nci_uart, struct sk_buff *skb);
+ void (*tx_start)(struct nci_uart *nci_uart);
+ void (*tx_done)(struct nci_uart *nci_uart);
+};
+
+struct nci_uart {
+ struct module *owner;
+ struct nci_uart_ops ops;
+ const char *name;
+ enum nci_uart_driver driver;
+
+ /* Dynamic data */
+ struct nci_dev *ndev;
+ spinlock_t rx_lock;
+ struct work_struct write_work;
+ struct tty_struct *tty;
+ unsigned long tx_state;
+ struct sk_buff_head tx_q;
+ struct sk_buff *tx_skb;
+ struct sk_buff *rx_skb;
+ int rx_packet_len;
+ void *drv_data;
+};
+
+int nci_uart_register(struct nci_uart *nu);
+void nci_uart_unregister(struct nci_uart *nu);
+void nci_uart_set_config(struct nci_uart *nu, int baudrate, int flow_ctrl);
+
#endif /* __NCI_CORE_H */
diff --git a/include/net/nfc/nfc.h b/include/net/nfc/nfc.h
index 73190e65d5c1..f9e58ae45f9c 100644
--- a/include/net/nfc/nfc.h
+++ b/include/net/nfc/nfc.h
@@ -157,7 +157,7 @@ struct nfc_evt_transaction {
u32 aid_len;
u8 aid[NFC_MAX_AID_LENGTH];
u8 params_len;
- u8 params[NFC_MAX_PARAMS_LENGTH];
+ u8 params[0];
} __packed;
struct nfc_genl_data {
@@ -165,6 +165,12 @@ struct nfc_genl_data {
struct mutex genl_data_mutex;
};
+struct nfc_vendor_cmd {
+ __u32 vendor_id;
+ __u32 subcmd;
+ int (*doit)(struct nfc_dev *dev, void *data, size_t data_len);
+};
+
struct nfc_dev {
int idx;
u32 target_next_idx;
@@ -193,6 +199,9 @@ struct nfc_dev {
struct rfkill *rfkill;
+ struct nfc_vendor_cmd *vendor_cmds;
+ int n_vendor_cmds;
+
struct nfc_ops *ops;
};
#define to_nfc_dev(_dev) container_of(_dev, struct nfc_dev, dev)
@@ -296,4 +305,17 @@ struct nfc_se *nfc_find_se(struct nfc_dev *dev, u32 se_idx);
void nfc_send_to_raw_sock(struct nfc_dev *dev, struct sk_buff *skb,
u8 payload_type, u8 direction);
+static inline int nfc_set_vendor_cmds(struct nfc_dev *dev,
+ struct nfc_vendor_cmd *cmds,
+ int n_cmds)
+{
+ if (dev->vendor_cmds || dev->n_vendor_cmds)
+ return -EINVAL;
+
+ dev->vendor_cmds = cmds;
+ dev->n_vendor_cmds = n_cmds;
+
+ return 0;
+}
+
#endif /* __NET_NFC_H */
diff --git a/include/net/nl802154.h b/include/net/nl802154.h
index f8b5bc997959..b0ab530d28cd 100644
--- a/include/net/nl802154.h
+++ b/include/net/nl802154.h
@@ -100,6 +100,10 @@ enum nl802154_attrs {
NL802154_ATTR_EXTENDED_ADDR,
+ NL802154_ATTR_WPAN_PHY_CAPS,
+
+ NL802154_ATTR_SUPPORTED_COMMANDS,
+
/* add attributes here, update the policy in nl802154.c */
__NL802154_ATTR_AFTER_LAST,
@@ -120,6 +124,61 @@ enum nl802154_iftype {
};
/**
+ * enum nl802154_wpan_phy_capability_attr - wpan phy capability attributes
+ *
+ * @__NL802154_CAP_ATTR_INVALID: attribute number 0 is reserved
+ * @NL802154_CAP_ATTR_CHANNELS: a nested attribute for nl802154_channel_attr
+ * @NL802154_CAP_ATTR_TX_POWERS: a nested attribute for
+ * nl802154_wpan_phy_tx_power
+ * @NL802154_CAP_ATTR_MIN_CCA_ED_LEVEL: minimum value for cca_ed_level
+ * @NL802154_CAP_ATTR_MAX_CCA_ED_LEVEL: maxmimum value for cca_ed_level
+ * @NL802154_CAP_ATTR_CCA_MODES: nl802154_cca_modes flags
+ * @NL802154_CAP_ATTR_CCA_OPTS: nl802154_cca_opts flags
+ * @NL802154_CAP_ATTR_MIN_MINBE: minimum of minbe value
+ * @NL802154_CAP_ATTR_MAX_MINBE: maximum of minbe value
+ * @NL802154_CAP_ATTR_MIN_MAXBE: minimum of maxbe value
+ * @NL802154_CAP_ATTR_MAX_MINBE: maximum of maxbe value
+ * @NL802154_CAP_ATTR_MIN_CSMA_BACKOFFS: minimum of csma backoff value
+ * @NL802154_CAP_ATTR_MAX_CSMA_BACKOFFS: maximum of csma backoffs value
+ * @NL802154_CAP_ATTR_MIN_FRAME_RETRIES: minimum of frame retries value
+ * @NL802154_CAP_ATTR_MAX_FRAME_RETRIES: maximum of frame retries value
+ * @NL802154_CAP_ATTR_IFTYPES: nl802154_iftype flags
+ * @NL802154_CAP_ATTR_LBT: nl802154_supported_bool_states flags
+ * @NL802154_CAP_ATTR_MAX: highest cap attribute currently defined
+ * @__NL802154_CAP_ATTR_AFTER_LAST: internal use
+ */
+enum nl802154_wpan_phy_capability_attr {
+ __NL802154_CAP_ATTR_INVALID,
+
+ NL802154_CAP_ATTR_IFTYPES,
+
+ NL802154_CAP_ATTR_CHANNELS,
+ NL802154_CAP_ATTR_TX_POWERS,
+
+ NL802154_CAP_ATTR_CCA_ED_LEVELS,
+ NL802154_CAP_ATTR_CCA_MODES,
+ NL802154_CAP_ATTR_CCA_OPTS,
+
+ NL802154_CAP_ATTR_MIN_MINBE,
+ NL802154_CAP_ATTR_MAX_MINBE,
+
+ NL802154_CAP_ATTR_MIN_MAXBE,
+ NL802154_CAP_ATTR_MAX_MAXBE,
+
+ NL802154_CAP_ATTR_MIN_CSMA_BACKOFFS,
+ NL802154_CAP_ATTR_MAX_CSMA_BACKOFFS,
+
+ NL802154_CAP_ATTR_MIN_FRAME_RETRIES,
+ NL802154_CAP_ATTR_MAX_FRAME_RETRIES,
+
+ NL802154_CAP_ATTR_LBT,
+
+ /* keep last */
+ __NL802154_CAP_ATTR_AFTER_LAST,
+ NL802154_CAP_ATTR_MAX = __NL802154_CAP_ATTR_AFTER_LAST - 1
+};
+
+/**
* enum nl802154_cca_modes - cca modes
*
* @__NL802154_CCA_INVALID: cca mode number 0 is reserved
@@ -128,7 +187,7 @@ enum nl802154_iftype {
* @NL802154_CCA_ENERGY_CARRIER: Carrier sense with energy above threshold
* @NL802154_CCA_ALOHA: CCA shall always report an idle medium
* @NL802154_CCA_UWB_SHR: UWB preamble sense based on the SHR of a frame
- * @NL802154_CCA_UWB_MULTIPEXED: UWB preamble sense based on the packet with
+ * @NL802154_CCA_UWB_MULTIPLEXED: UWB preamble sense based on the packet with
* the multiplexed preamble
* @__NL802154_CCA_ATTR_AFTER_LAST: Internal
* @NL802154_CCA_ATTR_MAX: Maximum CCA attribute number
@@ -140,7 +199,7 @@ enum nl802154_cca_modes {
NL802154_CCA_ENERGY_CARRIER,
NL802154_CCA_ALOHA,
NL802154_CCA_UWB_SHR,
- NL802154_CCA_UWB_MULTIPEXED,
+ NL802154_CCA_UWB_MULTIPLEXED,
/* keep last */
__NL802154_CCA_ATTR_AFTER_LAST,
@@ -162,4 +221,26 @@ enum nl802154_cca_opts {
NL802154_CCA_OPT_ATTR_MAX = __NL802154_CCA_OPT_ATTR_AFTER_LAST - 1
};
+/**
+ * enum nl802154_supported_bool_states - bool states for bool capability entry
+ *
+ * @NL802154_SUPPORTED_BOOL_FALSE: indicates to set false
+ * @NL802154_SUPPORTED_BOOL_TRUE: indicates to set true
+ * @__NL802154_SUPPORTED_BOOL_INVALD: reserved
+ * @NL802154_SUPPORTED_BOOL_BOTH: indicates to set true and false
+ * @__NL802154_SUPPORTED_BOOL_AFTER_LAST: Internal
+ * @NL802154_SUPPORTED_BOOL_MAX: highest value for bool states
+ */
+enum nl802154_supported_bool_states {
+ NL802154_SUPPORTED_BOOL_FALSE,
+ NL802154_SUPPORTED_BOOL_TRUE,
+ /* to handle them in a mask */
+ __NL802154_SUPPORTED_BOOL_INVALD,
+ NL802154_SUPPORTED_BOOL_BOTH,
+
+ /* keep last */
+ __NL802154_SUPPORTED_BOOL_AFTER_LAST,
+ NL802154_SUPPORTED_BOOL_MAX = __NL802154_SUPPORTED_BOOL_AFTER_LAST - 1
+};
+
#endif /* __NL802154_H */
diff --git a/include/net/ping.h b/include/net/ping.h
index cc16d413f681..ac80cb45e630 100644
--- a/include/net/ping.h
+++ b/include/net/ping.h
@@ -75,12 +75,11 @@ void ping_err(struct sk_buff *skb, int offset, u32 info);
int ping_getfrag(void *from, char *to, int offset, int fraglen, int odd,
struct sk_buff *);
-int ping_recvmsg(struct kiocb *iocb, struct sock *sk, struct msghdr *msg,
- size_t len, int noblock, int flags, int *addr_len);
+int ping_recvmsg(struct sock *sk, struct msghdr *msg, size_t len, int noblock,
+ int flags, int *addr_len);
int ping_common_sendmsg(int family, struct msghdr *msg, size_t len,
void *user_icmph, size_t icmph_len);
-int ping_v6_sendmsg(struct kiocb *iocb, struct sock *sk, struct msghdr *msg,
- size_t len);
+int ping_v6_sendmsg(struct sock *sk, struct msghdr *msg, size_t len);
int ping_queue_rcv_skb(struct sock *sk, struct sk_buff *skb);
bool ping_rcv(struct sk_buff *skb);
diff --git a/include/net/request_sock.h b/include/net/request_sock.h
index 7f830ff67f08..87935cad2f7b 100644
--- a/include/net/request_sock.h
+++ b/include/net/request_sock.h
@@ -39,8 +39,7 @@ struct request_sock_ops {
void (*send_reset)(struct sock *sk,
struct sk_buff *skb);
void (*destructor)(struct request_sock *req);
- void (*syn_ack_timeout)(struct sock *sk,
- struct request_sock *req);
+ void (*syn_ack_timeout)(const struct request_sock *req);
};
int inet_rtx_syn_ack(struct sock *parent, struct request_sock *req);
@@ -49,7 +48,11 @@ int inet_rtx_syn_ack(struct sock *parent, struct request_sock *req);
*/
struct request_sock {
struct sock_common __req_common;
+#define rsk_refcnt __req_common.skc_refcnt
+#define rsk_hash __req_common.skc_hash
+
struct request_sock *dl_next;
+ struct sock *rsk_listener;
u16 mss;
u8 num_retrans; /* number of retransmits */
u8 cookie_ts:1; /* syncookie: encode tcpopts in timestamp */
@@ -58,32 +61,58 @@ struct request_sock {
u32 window_clamp; /* window clamp at creation time */
u32 rcv_wnd; /* rcv_wnd offered first time */
u32 ts_recent;
- unsigned long expires;
+ struct timer_list rsk_timer;
const struct request_sock_ops *rsk_ops;
struct sock *sk;
+ u32 *saved_syn;
u32 secid;
u32 peer_secid;
};
-static inline struct request_sock *reqsk_alloc(const struct request_sock_ops *ops)
+static inline struct request_sock *
+reqsk_alloc(const struct request_sock_ops *ops, struct sock *sk_listener)
{
struct request_sock *req = kmem_cache_alloc(ops->slab, GFP_ATOMIC);
- if (req != NULL)
+ if (req) {
req->rsk_ops = ops;
-
+ sock_hold(sk_listener);
+ req->rsk_listener = sk_listener;
+ req->saved_syn = NULL;
+ /* Following is temporary. It is coupled with debugging
+ * helpers in reqsk_put() & reqsk_free()
+ */
+ atomic_set(&req->rsk_refcnt, 0);
+ }
return req;
}
-static inline void __reqsk_free(struct request_sock *req)
+static inline struct request_sock *inet_reqsk(struct sock *sk)
{
- kmem_cache_free(req->rsk_ops->slab, req);
+ return (struct request_sock *)sk;
+}
+
+static inline struct sock *req_to_sk(struct request_sock *req)
+{
+ return (struct sock *)req;
}
static inline void reqsk_free(struct request_sock *req)
{
+ /* temporary debugging */
+ WARN_ON_ONCE(atomic_read(&req->rsk_refcnt) != 0);
+
req->rsk_ops->destructor(req);
- __reqsk_free(req);
+ if (req->rsk_listener)
+ sock_put(req->rsk_listener);
+ kfree(req->saved_syn);
+ kmem_cache_free(req->rsk_ops->slab, req);
+}
+
+static inline void reqsk_put(struct request_sock *req)
+{
+ if (atomic_dec_and_test(&req->rsk_refcnt))
+ reqsk_free(req);
}
extern int sysctl_max_syn_backlog;
@@ -93,12 +122,16 @@ extern int sysctl_max_syn_backlog;
* @max_qlen_log - log_2 of maximal queued SYNs/REQUESTs
*/
struct listen_sock {
- u8 max_qlen_log;
+ int qlen_inc; /* protected by listener lock */
+ int young_inc;/* protected by listener lock */
+
+ /* following fields can be updated by timer */
+ atomic_t qlen_dec; /* qlen = qlen_inc - qlen_dec */
+ atomic_t young_dec;
+
+ u8 max_qlen_log ____cacheline_aligned_in_smp;
u8 synflood_warned;
/* 2 bytes hole, try to use */
- int qlen;
- int qlen_young;
- int clock_hand;
u32 hash_rnd;
u32 nr_table_entries;
struct request_sock *syn_table[0];
@@ -142,18 +175,11 @@ struct fastopen_queue {
* %syn_wait_lock is necessary only to avoid proc interface having to grab the main
* lock sock while browsing the listening hash (otherwise it's deadlock prone).
*
- * This lock is acquired in read mode only from listening_get_next() seq_file
- * op and it's acquired in write mode _only_ from code that is actively
- * changing rskq_accept_head. All readers that are holding the master sock lock
- * don't need to grab this lock in read mode too as rskq_accept_head. writes
- * are always protected from the main sock lock.
*/
struct request_sock_queue {
struct request_sock *rskq_accept_head;
struct request_sock *rskq_accept_tail;
- rwlock_t syn_wait_lock;
u8 rskq_defer_accept;
- /* 3 bytes hole, try to pack */
struct listen_sock *listen_opt;
struct fastopen_queue *fastopenq; /* This is non-NULL iff TFO has been
* enabled on this listener. Check
@@ -161,6 +187,9 @@ struct request_sock_queue {
* to determine if TFO is enabled
* right at this moment.
*/
+
+ /* temporary alignment, our goal is to get rid of this lock */
+ spinlock_t syn_wait_lock ____cacheline_aligned_in_smp;
};
int reqsk_queue_alloc(struct request_sock_queue *queue,
@@ -185,15 +214,6 @@ static inline int reqsk_queue_empty(struct request_sock_queue *queue)
return queue->rskq_accept_head == NULL;
}
-static inline void reqsk_queue_unlink(struct request_sock_queue *queue,
- struct request_sock *req,
- struct request_sock **prev_req)
-{
- write_lock(&queue->syn_wait_lock);
- *prev_req = req->dl_next;
- write_unlock(&queue->syn_wait_lock);
-}
-
static inline void reqsk_queue_add(struct request_sock_queue *queue,
struct request_sock *req,
struct sock *parent,
@@ -224,57 +244,53 @@ static inline struct request_sock *reqsk_queue_remove(struct request_sock_queue
return req;
}
-static inline int reqsk_queue_removed(struct request_sock_queue *queue,
- struct request_sock *req)
+static inline void reqsk_queue_removed(struct request_sock_queue *queue,
+ const struct request_sock *req)
{
struct listen_sock *lopt = queue->listen_opt;
if (req->num_timeout == 0)
- --lopt->qlen_young;
-
- return --lopt->qlen;
+ atomic_inc(&lopt->young_dec);
+ atomic_inc(&lopt->qlen_dec);
}
-static inline int reqsk_queue_added(struct request_sock_queue *queue)
+static inline void reqsk_queue_added(struct request_sock_queue *queue)
{
struct listen_sock *lopt = queue->listen_opt;
- const int prev_qlen = lopt->qlen;
- lopt->qlen_young++;
- lopt->qlen++;
- return prev_qlen;
+ lopt->young_inc++;
+ lopt->qlen_inc++;
}
-static inline int reqsk_queue_len(const struct request_sock_queue *queue)
+static inline int listen_sock_qlen(const struct listen_sock *lopt)
{
- return queue->listen_opt != NULL ? queue->listen_opt->qlen : 0;
+ return lopt->qlen_inc - atomic_read(&lopt->qlen_dec);
}
-static inline int reqsk_queue_len_young(const struct request_sock_queue *queue)
+static inline int listen_sock_young(const struct listen_sock *lopt)
{
- return queue->listen_opt->qlen_young;
+ return lopt->young_inc - atomic_read(&lopt->young_dec);
}
-static inline int reqsk_queue_is_full(const struct request_sock_queue *queue)
+static inline int reqsk_queue_len(const struct request_sock_queue *queue)
{
- return queue->listen_opt->qlen >> queue->listen_opt->max_qlen_log;
+ const struct listen_sock *lopt = queue->listen_opt;
+
+ return lopt ? listen_sock_qlen(lopt) : 0;
}
-static inline void reqsk_queue_hash_req(struct request_sock_queue *queue,
- u32 hash, struct request_sock *req,
- unsigned long timeout)
+static inline int reqsk_queue_len_young(const struct request_sock_queue *queue)
{
- struct listen_sock *lopt = queue->listen_opt;
-
- req->expires = jiffies + timeout;
- req->num_retrans = 0;
- req->num_timeout = 0;
- req->sk = NULL;
- req->dl_next = lopt->syn_table[hash];
+ return listen_sock_young(queue->listen_opt);
+}
- write_lock(&queue->syn_wait_lock);
- lopt->syn_table[hash] = req;
- write_unlock(&queue->syn_wait_lock);
+static inline int reqsk_queue_is_full(const struct request_sock_queue *queue)
+{
+ return reqsk_queue_len(queue) >> queue->listen_opt->max_qlen_log;
}
+void reqsk_queue_hash_req(struct request_sock_queue *queue,
+ u32 hash, struct request_sock *req,
+ unsigned long timeout);
+
#endif /* _REQUEST_SOCK_H */
diff --git a/include/net/rtnetlink.h b/include/net/rtnetlink.h
index 6c6d5393fc34..343d922d15c2 100644
--- a/include/net/rtnetlink.h
+++ b/include/net/rtnetlink.h
@@ -137,7 +137,7 @@ void rtnl_af_register(struct rtnl_af_ops *ops);
void rtnl_af_unregister(struct rtnl_af_ops *ops);
struct net *rtnl_link_get_net(struct net *src_net, struct nlattr *tb[]);
-struct net_device *rtnl_create_link(struct net *net, char *ifname,
+struct net_device *rtnl_create_link(struct net *net, const char *ifname,
unsigned char name_assign_type,
const struct rtnl_link_ops *ops,
struct nlattr *tb[]);
diff --git a/include/net/sch_generic.h b/include/net/sch_generic.h
index c605d305c577..2738f6f87908 100644
--- a/include/net/sch_generic.h
+++ b/include/net/sch_generic.h
@@ -213,7 +213,7 @@ struct tcf_proto_ops {
const struct tcf_proto *,
struct tcf_result *);
int (*init)(struct tcf_proto*);
- void (*destroy)(struct tcf_proto*);
+ bool (*destroy)(struct tcf_proto*, bool);
unsigned long (*get)(struct tcf_proto*, u32 handle);
int (*change)(struct net *net, struct sk_buff *,
@@ -399,7 +399,7 @@ struct Qdisc *qdisc_create_dflt(struct netdev_queue *dev_queue,
const struct Qdisc_ops *ops, u32 parentid);
void __qdisc_calculate_pkt_len(struct sk_buff *skb,
const struct qdisc_size_table *stab);
-void tcf_destroy(struct tcf_proto *tp);
+bool tcf_destroy(struct tcf_proto *tp, bool force);
void tcf_destroy_chain(struct tcf_proto __rcu **fl);
/* Reset all TX qdiscs greater then index of a device. */
@@ -501,12 +501,6 @@ static inline int qdisc_enqueue(struct sk_buff *skb, struct Qdisc *sch)
return sch->enqueue(skb, sch);
}
-static inline int qdisc_enqueue_root(struct sk_buff *skb, struct Qdisc *sch)
-{
- qdisc_skb_cb(skb)->pkt_len = skb->len;
- return qdisc_enqueue(skb, sch) & NET_XMIT_MASK;
-}
-
static inline bool qdisc_is_percpu_stats(const struct Qdisc *q)
{
return q->flags & TCQ_F_CPUSTATS;
@@ -745,23 +739,6 @@ static inline u32 qdisc_l2t(struct qdisc_rate_table* rtab, unsigned int pktlen)
return rtab->data[slot];
}
-#ifdef CONFIG_NET_CLS_ACT
-static inline struct sk_buff *skb_act_clone(struct sk_buff *skb, gfp_t gfp_mask,
- int action)
-{
- struct sk_buff *n;
-
- n = skb_clone(skb, gfp_mask);
-
- if (n) {
- n->tc_verd = SET_TC_VERD(n->tc_verd, 0);
- n->tc_verd = CLR_TC_OK2MUNGE(n->tc_verd);
- n->tc_verd = CLR_TC_MUNGED(n->tc_verd);
- }
- return n;
-}
-#endif
-
struct psched_ratecfg {
u64 rate_bytes_ps; /* bytes per second */
u32 mult;
diff --git a/include/net/sctp/sctp.h b/include/net/sctp/sctp.h
index 856f01cb51dd..ce13cf20f625 100644
--- a/include/net/sctp/sctp.h
+++ b/include/net/sctp/sctp.h
@@ -166,6 +166,9 @@ void sctp_remaddr_proc_exit(struct net *net);
*/
extern struct kmem_cache *sctp_chunk_cachep __read_mostly;
extern struct kmem_cache *sctp_bucket_cachep __read_mostly;
+extern long sysctl_sctp_mem[3];
+extern int sysctl_sctp_rmem[3];
+extern int sysctl_sctp_wmem[3];
/*
* Section: Macros, externs, and inlines
@@ -571,11 +574,14 @@ static inline void sctp_v6_map_v4(union sctp_addr *addr)
/* Map v4 address to v4-mapped v6 address */
static inline void sctp_v4_map_v6(union sctp_addr *addr)
{
+ __be16 port;
+
+ port = addr->v4.sin_port;
+ addr->v6.sin6_addr.s6_addr32[3] = addr->v4.sin_addr.s_addr;
+ addr->v6.sin6_port = port;
addr->v6.sin6_family = AF_INET6;
addr->v6.sin6_flowinfo = 0;
addr->v6.sin6_scope_id = 0;
- addr->v6.sin6_port = addr->v4.sin_port;
- addr->v6.sin6_addr.s6_addr32[3] = addr->v4.sin_addr.s_addr;
addr->v6.sin6_addr.s6_addr32[0] = 0;
addr->v6.sin6_addr.s6_addr32[1] = 0;
addr->v6.sin6_addr.s6_addr32[2] = htonl(0x0000ffff);
diff --git a/include/net/sctp/structs.h b/include/net/sctp/structs.h
index 2bb2fcf5b11f..495c87e367b3 100644
--- a/include/net/sctp/structs.h
+++ b/include/net/sctp/structs.h
@@ -223,6 +223,10 @@ struct sctp_sock {
atomic_t pd_mode;
/* Receive to here while partial delivery is in effect. */
struct sk_buff_head pd_lobby;
+
+ /* These must be the last fields, as they will skipped on copies,
+ * like on accept and peeloff operations
+ */
struct list_head auto_asconf_list;
int do_auto_asconf;
};
diff --git a/include/net/sock.h b/include/net/sock.h
index e4079c28e6b8..14d539c040d7 100644
--- a/include/net/sock.h
+++ b/include/net/sock.h
@@ -57,7 +57,6 @@
#include <linux/page_counter.h>
#include <linux/memcontrol.h>
#include <linux/static_key.h>
-#include <linux/aio.h>
#include <linux/sched.h>
#include <linux/filter.h>
@@ -67,6 +66,7 @@
#include <linux/atomic.h>
#include <net/dst.h>
#include <net/checksum.h>
+#include <net/tcp_states.h>
#include <linux/net_tstamp.h>
struct cgroup;
@@ -184,21 +184,22 @@ struct sock_common {
unsigned char skc_reuse:4;
unsigned char skc_reuseport:1;
unsigned char skc_ipv6only:1;
+ unsigned char skc_net_refcnt:1;
int skc_bound_dev_if;
union {
struct hlist_node skc_bind_node;
struct hlist_nulls_node skc_portaddr_node;
};
struct proto *skc_prot;
-#ifdef CONFIG_NET_NS
- struct net *skc_net;
-#endif
+ possible_net_t skc_net;
#if IS_ENABLED(CONFIG_IPV6)
struct in6_addr skc_v6_daddr;
struct in6_addr skc_v6_rcv_saddr;
#endif
+ atomic64_t skc_cookie;
+
/*
* fields between dontcopy_begin/dontcopy_end
* are not copied in sock_copy()
@@ -323,12 +324,14 @@ struct sock {
#define sk_reuse __sk_common.skc_reuse
#define sk_reuseport __sk_common.skc_reuseport
#define sk_ipv6only __sk_common.skc_ipv6only
+#define sk_net_refcnt __sk_common.skc_net_refcnt
#define sk_bound_dev_if __sk_common.skc_bound_dev_if
#define sk_bind_node __sk_common.skc_bind_node
#define sk_prot __sk_common.skc_prot
#define sk_net __sk_common.skc_net
#define sk_v6_daddr __sk_common.skc_v6_daddr
#define sk_v6_rcv_saddr __sk_common.skc_v6_rcv_saddr
+#define sk_cookie __sk_common.skc_cookie
socket_lock_t sk_lock;
struct sk_buff_head sk_receive_queue;
@@ -403,8 +406,8 @@ struct sock {
rwlock_t sk_callback_lock;
int sk_err,
sk_err_soft;
- unsigned short sk_ack_backlog;
- unsigned short sk_max_ack_backlog;
+ u32 sk_ack_backlog;
+ u32 sk_max_ack_backlog;
__u32 sk_priority;
#if IS_ENABLED(CONFIG_CGROUP_NET_PRIO)
__u32 sk_cgrp_prioidx;
@@ -923,7 +926,6 @@ static inline void sk_prot_clear_nulls(struct sock *sk, int size)
/* Networking protocol blocks we attach to sockets.
* socket layer -> transport layer interface
- * transport -> network interface is defined by struct inet_proto
*/
struct proto {
void (*close)(struct sock *sk,
@@ -958,10 +960,9 @@ struct proto {
int (*compat_ioctl)(struct sock *sk,
unsigned int cmd, unsigned long arg);
#endif
- int (*sendmsg)(struct kiocb *iocb, struct sock *sk,
- struct msghdr *msg, size_t len);
- int (*recvmsg)(struct kiocb *iocb, struct sock *sk,
- struct msghdr *msg,
+ int (*sendmsg)(struct sock *sk, struct msghdr *msg,
+ size_t len);
+ int (*recvmsg)(struct sock *sk, struct msghdr *msg,
size_t len, int noblock, int flags,
int *addr_len);
int (*sendpage)(struct sock *sk, struct page *page,
@@ -1366,7 +1367,7 @@ static inline struct inode *SOCK_INODE(struct socket *socket)
* Functions for memory accounting
*/
int __sk_mem_schedule(struct sock *sk, int size, int kind);
-void __sk_mem_reclaim(struct sock *sk);
+void __sk_mem_reclaim(struct sock *sk, int amount);
#define SK_MEM_QUANTUM ((int)PAGE_SIZE)
#define SK_MEM_QUANTUM_SHIFT ilog2(SK_MEM_QUANTUM)
@@ -1407,7 +1408,7 @@ static inline void sk_mem_reclaim(struct sock *sk)
if (!sk_has_account(sk))
return;
if (sk->sk_forward_alloc >= SK_MEM_QUANTUM)
- __sk_mem_reclaim(sk);
+ __sk_mem_reclaim(sk, sk->sk_forward_alloc);
}
static inline void sk_mem_reclaim_partial(struct sock *sk)
@@ -1415,7 +1416,7 @@ static inline void sk_mem_reclaim_partial(struct sock *sk)
if (!sk_has_account(sk))
return;
if (sk->sk_forward_alloc > SK_MEM_QUANTUM)
- __sk_mem_reclaim(sk);
+ __sk_mem_reclaim(sk, sk->sk_forward_alloc - 1);
}
static inline void sk_mem_charge(struct sock *sk, int size)
@@ -1514,9 +1515,9 @@ static inline void unlock_sock_fast(struct sock *sk, bool slow)
struct sock *sk_alloc(struct net *net, int family, gfp_t priority,
- struct proto *prot);
+ struct proto *prot, int kern);
void sk_free(struct sock *sk);
-void sk_release_kernel(struct sock *sk);
+void sk_destruct(struct sock *sk);
struct sock *sk_clone_lock(const struct sock *sk, const gfp_t priority);
struct sk_buff *sock_wmalloc(struct sock *sk, unsigned long size, int force,
@@ -1562,9 +1563,8 @@ int sock_no_listen(struct socket *, int);
int sock_no_shutdown(struct socket *, int);
int sock_no_getsockopt(struct socket *, int , int, char __user *, int __user *);
int sock_no_setsockopt(struct socket *, int, int, char __user *, unsigned int);
-int sock_no_sendmsg(struct kiocb *, struct socket *, struct msghdr *, size_t);
-int sock_no_recvmsg(struct kiocb *, struct socket *, struct msghdr *, size_t,
- int);
+int sock_no_sendmsg(struct socket *, struct msghdr *, size_t);
+int sock_no_recvmsg(struct socket *, struct msghdr *, size_t, int);
int sock_no_mmap(struct file *file, struct socket *sock,
struct vm_area_struct *vma);
ssize_t sock_no_sendpage(struct socket *sock, struct page *page, int offset,
@@ -1576,8 +1576,8 @@ ssize_t sock_no_sendpage(struct socket *sock, struct page *page, int offset,
*/
int sock_common_getsockopt(struct socket *sock, int level, int optname,
char __user *optval, int __user *optlen);
-int sock_common_recvmsg(struct kiocb *iocb, struct socket *sock,
- struct msghdr *msg, size_t size, int flags);
+int sock_common_recvmsg(struct socket *sock, struct msghdr *msg, size_t size,
+ int flags);
int sock_common_setsockopt(struct socket *sock, int level, int optname,
char __user *optval, unsigned int optlen);
int compat_sock_common_getsockopt(struct socket *sock, int level,
@@ -1626,7 +1626,7 @@ static inline void sock_put(struct sock *sk)
sk_free(sk);
}
/* Generic version of sock_put(), dealing with all sockets
- * (TCP_TIMEWAIT, ESTABLISHED...)
+ * (TCP_TIMEWAIT, TCP_NEW_SYN_RECV, ESTABLISHED...)
*/
void sock_gen_put(struct sock *sk);
@@ -2025,7 +2025,8 @@ static inline void sk_stream_moderate_sndbuf(struct sock *sk)
}
}
-struct sk_buff *sk_stream_alloc_skb(struct sock *sk, int size, gfp_t gfp);
+struct sk_buff *sk_stream_alloc_skb(struct sock *sk, int size, gfp_t gfp,
+ bool force_schedule);
/**
* sk_page_frag - return an appropriate page_frag
@@ -2080,6 +2081,29 @@ static inline int sock_intr_errno(long timeo)
return timeo == MAX_SCHEDULE_TIMEOUT ? -ERESTARTSYS : -EINTR;
}
+struct sock_skb_cb {
+ u32 dropcount;
+};
+
+/* Store sock_skb_cb at the end of skb->cb[] so protocol families
+ * using skb->cb[] would keep using it directly and utilize its
+ * alignement guarantee.
+ */
+#define SOCK_SKB_CB_OFFSET ((FIELD_SIZEOF(struct sk_buff, cb) - \
+ sizeof(struct sock_skb_cb)))
+
+#define SOCK_SKB_CB(__skb) ((struct sock_skb_cb *)((__skb)->cb + \
+ SOCK_SKB_CB_OFFSET))
+
+#define sock_skb_cb_check_size(size) \
+ BUILD_BUG_ON((size) > SOCK_SKB_CB_OFFSET)
+
+static inline void
+sock_skb_set_dropcount(const struct sock *sk, struct sk_buff *skb)
+{
+ SOCK_SKB_CB(skb)->dropcount = atomic_read(&sk->sk_drops);
+}
+
void __sock_recv_timestamp(struct msghdr *msg, struct sock *sk,
struct sk_buff *skb);
void __sock_recv_wifi_status(struct msghdr *msg, struct sock *sk,
@@ -2170,22 +2194,6 @@ void sock_net_set(struct sock *sk, struct net *net)
write_pnet(&sk->sk_net, net);
}
-/*
- * Kernel sockets, f.e. rtnl or icmp_socket, are a part of a namespace.
- * They should not hold a reference to a namespace in order to allow
- * to stop it.
- * Sockets after sk_change_net should be released using sk_release_kernel
- */
-static inline void sk_change_net(struct sock *sk, struct net *net)
-{
- struct net *current_net = sock_net(sk);
-
- if (!net_eq(current_net, net)) {
- put_net(current_net);
- sock_net_set(sk, hold_net(net));
- }
-}
-
static inline struct sock *skb_steal_sock(struct sk_buff *skb)
{
if (skb->sk) {
@@ -2198,6 +2206,14 @@ static inline struct sock *skb_steal_sock(struct sk_buff *skb)
return NULL;
}
+/* This helper checks if a socket is a full socket,
+ * ie _not_ a timewait or request socket.
+ */
+static inline bool sk_fullsock(const struct sock *sk)
+{
+ return (1 << sk->sk_state) & ~(TCPF_TIME_WAIT | TCPF_NEW_SYN_RECV);
+}
+
void sock_enable_timestamp(struct sock *sk, int flag);
int sock_get_timestamp(struct sock *, struct timeval __user *);
int sock_get_timestampns(struct sock *, struct timespec __user *);
diff --git a/include/net/switchdev.h b/include/net/switchdev.h
index cfcdac2e5d25..d5671f118bfc 100644
--- a/include/net/switchdev.h
+++ b/include/net/switchdev.h
@@ -1,6 +1,7 @@
/*
* include/net/switchdev.h - Switch device API
* Copyright (c) 2014 Jiri Pirko <jiri@resnulli.us>
+ * Copyright (c) 2014-2015 Scott Feldman <sfeldma@gmail.com>
*
* This program is free software; you can redistribute it and/or modify
* it under the terms of the GNU General Public License as published by
@@ -13,102 +14,263 @@
#include <linux/netdevice.h>
#include <linux/notifier.h>
-enum netdev_switch_notifier_type {
- NETDEV_SWITCH_FDB_ADD = 1,
- NETDEV_SWITCH_FDB_DEL,
+#define SWITCHDEV_F_NO_RECURSE BIT(0)
+
+enum switchdev_trans {
+ SWITCHDEV_TRANS_NONE,
+ SWITCHDEV_TRANS_PREPARE,
+ SWITCHDEV_TRANS_ABORT,
+ SWITCHDEV_TRANS_COMMIT,
+};
+
+enum switchdev_attr_id {
+ SWITCHDEV_ATTR_UNDEFINED,
+ SWITCHDEV_ATTR_PORT_PARENT_ID,
+ SWITCHDEV_ATTR_PORT_STP_STATE,
+ SWITCHDEV_ATTR_PORT_BRIDGE_FLAGS,
+};
+
+struct switchdev_attr {
+ enum switchdev_attr_id id;
+ enum switchdev_trans trans;
+ u32 flags;
+ union {
+ struct netdev_phys_item_id ppid; /* PORT_PARENT_ID */
+ u8 stp_state; /* PORT_STP_STATE */
+ unsigned long brport_flags; /* PORT_BRIDGE_FLAGS */
+ } u;
+};
+
+struct fib_info;
+
+enum switchdev_obj_id {
+ SWITCHDEV_OBJ_UNDEFINED,
+ SWITCHDEV_OBJ_PORT_VLAN,
+ SWITCHDEV_OBJ_IPV4_FIB,
+ SWITCHDEV_OBJ_PORT_FDB,
+};
+
+struct switchdev_obj {
+ enum switchdev_obj_id id;
+ enum switchdev_trans trans;
+ int (*cb)(struct net_device *dev, struct switchdev_obj *obj);
+ union {
+ struct switchdev_obj_vlan { /* PORT_VLAN */
+ u16 flags;
+ u16 vid_begin;
+ u16 vid_end;
+ } vlan;
+ struct switchdev_obj_ipv4_fib { /* IPV4_FIB */
+ u32 dst;
+ int dst_len;
+ struct fib_info *fi;
+ u8 tos;
+ u8 type;
+ u32 nlflags;
+ u32 tb_id;
+ } ipv4_fib;
+ struct switchdev_obj_fdb { /* PORT_FDB */
+ const unsigned char *addr;
+ u16 vid;
+ } fdb;
+ } u;
+};
+
+/**
+ * struct switchdev_ops - switchdev operations
+ *
+ * @switchdev_port_attr_get: Get a port attribute (see switchdev_attr).
+ *
+ * @switchdev_port_attr_set: Set a port attribute (see switchdev_attr).
+ *
+ * @switchdev_port_obj_add: Add an object to port (see switchdev_obj).
+ *
+ * @switchdev_port_obj_del: Delete an object from port (see switchdev_obj).
+ *
+ * @switchdev_port_obj_dump: Dump port objects (see switchdev_obj).
+ */
+struct switchdev_ops {
+ int (*switchdev_port_attr_get)(struct net_device *dev,
+ struct switchdev_attr *attr);
+ int (*switchdev_port_attr_set)(struct net_device *dev,
+ struct switchdev_attr *attr);
+ int (*switchdev_port_obj_add)(struct net_device *dev,
+ struct switchdev_obj *obj);
+ int (*switchdev_port_obj_del)(struct net_device *dev,
+ struct switchdev_obj *obj);
+ int (*switchdev_port_obj_dump)(struct net_device *dev,
+ struct switchdev_obj *obj);
+};
+
+enum switchdev_notifier_type {
+ SWITCHDEV_FDB_ADD = 1,
+ SWITCHDEV_FDB_DEL,
};
-struct netdev_switch_notifier_info {
+struct switchdev_notifier_info {
struct net_device *dev;
};
-struct netdev_switch_notifier_fdb_info {
- struct netdev_switch_notifier_info info; /* must be first */
+struct switchdev_notifier_fdb_info {
+ struct switchdev_notifier_info info; /* must be first */
const unsigned char *addr;
u16 vid;
};
static inline struct net_device *
-netdev_switch_notifier_info_to_dev(const struct netdev_switch_notifier_info *info)
+switchdev_notifier_info_to_dev(const struct switchdev_notifier_info *info)
{
return info->dev;
}
#ifdef CONFIG_NET_SWITCHDEV
-int netdev_switch_parent_id_get(struct net_device *dev,
- struct netdev_phys_item_id *psid);
-int netdev_switch_port_stp_update(struct net_device *dev, u8 state);
-int register_netdev_switch_notifier(struct notifier_block *nb);
-int unregister_netdev_switch_notifier(struct notifier_block *nb);
-int call_netdev_switch_notifiers(unsigned long val, struct net_device *dev,
- struct netdev_switch_notifier_info *info);
-int netdev_switch_port_bridge_setlink(struct net_device *dev,
- struct nlmsghdr *nlh, u16 flags);
-int netdev_switch_port_bridge_dellink(struct net_device *dev,
- struct nlmsghdr *nlh, u16 flags);
-int ndo_dflt_netdev_switch_port_bridge_dellink(struct net_device *dev,
- struct nlmsghdr *nlh, u16 flags);
-int ndo_dflt_netdev_switch_port_bridge_setlink(struct net_device *dev,
- struct nlmsghdr *nlh, u16 flags);
+int switchdev_port_attr_get(struct net_device *dev,
+ struct switchdev_attr *attr);
+int switchdev_port_attr_set(struct net_device *dev,
+ struct switchdev_attr *attr);
+int switchdev_port_obj_add(struct net_device *dev, struct switchdev_obj *obj);
+int switchdev_port_obj_del(struct net_device *dev, struct switchdev_obj *obj);
+int switchdev_port_obj_dump(struct net_device *dev, struct switchdev_obj *obj);
+int register_switchdev_notifier(struct notifier_block *nb);
+int unregister_switchdev_notifier(struct notifier_block *nb);
+int call_switchdev_notifiers(unsigned long val, struct net_device *dev,
+ struct switchdev_notifier_info *info);
+int switchdev_port_bridge_getlink(struct sk_buff *skb, u32 pid, u32 seq,
+ struct net_device *dev, u32 filter_mask,
+ int nlflags);
+int switchdev_port_bridge_setlink(struct net_device *dev,
+ struct nlmsghdr *nlh, u16 flags);
+int switchdev_port_bridge_dellink(struct net_device *dev,
+ struct nlmsghdr *nlh, u16 flags);
+int switchdev_fib_ipv4_add(u32 dst, int dst_len, struct fib_info *fi,
+ u8 tos, u8 type, u32 nlflags, u32 tb_id);
+int switchdev_fib_ipv4_del(u32 dst, int dst_len, struct fib_info *fi,
+ u8 tos, u8 type, u32 tb_id);
+void switchdev_fib_ipv4_abort(struct fib_info *fi);
+int switchdev_port_fdb_add(struct ndmsg *ndm, struct nlattr *tb[],
+ struct net_device *dev, const unsigned char *addr,
+ u16 vid, u16 nlm_flags);
+int switchdev_port_fdb_del(struct ndmsg *ndm, struct nlattr *tb[],
+ struct net_device *dev, const unsigned char *addr,
+ u16 vid);
+int switchdev_port_fdb_dump(struct sk_buff *skb, struct netlink_callback *cb,
+ struct net_device *dev,
+ struct net_device *filter_dev, int idx);
+
#else
-static inline int netdev_switch_parent_id_get(struct net_device *dev,
- struct netdev_phys_item_id *psid)
+static inline int switchdev_port_attr_get(struct net_device *dev,
+ struct switchdev_attr *attr)
+{
+ return -EOPNOTSUPP;
+}
+
+static inline int switchdev_port_attr_set(struct net_device *dev,
+ struct switchdev_attr *attr)
+{
+ return -EOPNOTSUPP;
+}
+
+static inline int switchdev_port_obj_add(struct net_device *dev,
+ struct switchdev_obj *obj)
+{
+ return -EOPNOTSUPP;
+}
+
+static inline int switchdev_port_obj_del(struct net_device *dev,
+ struct switchdev_obj *obj)
{
return -EOPNOTSUPP;
}
-static inline int netdev_switch_port_stp_update(struct net_device *dev,
- u8 state)
+static inline int switchdev_port_obj_dump(struct net_device *dev,
+ struct switchdev_obj *obj)
{
return -EOPNOTSUPP;
}
-static inline int register_netdev_switch_notifier(struct notifier_block *nb)
+static inline int register_switchdev_notifier(struct notifier_block *nb)
{
return 0;
}
-static inline int unregister_netdev_switch_notifier(struct notifier_block *nb)
+static inline int unregister_switchdev_notifier(struct notifier_block *nb)
{
return 0;
}
-static inline int call_netdev_switch_notifiers(unsigned long val, struct net_device *dev,
- struct netdev_switch_notifier_info *info)
+static inline int call_switchdev_notifiers(unsigned long val,
+ struct net_device *dev,
+ struct switchdev_notifier_info *info)
{
return NOTIFY_DONE;
}
-static inline int netdev_switch_port_bridge_setlink(struct net_device *dev,
- struct nlmsghdr *nlh,
- u16 flags)
+static inline int switchdev_port_bridge_getlink(struct sk_buff *skb, u32 pid,
+ u32 seq, struct net_device *dev,
+ u32 filter_mask, int nlflags)
{
return -EOPNOTSUPP;
}
-static inline int netdev_switch_port_bridge_dellink(struct net_device *dev,
- struct nlmsghdr *nlh,
- u16 flags)
+static inline int switchdev_port_bridge_setlink(struct net_device *dev,
+ struct nlmsghdr *nlh,
+ u16 flags)
{
return -EOPNOTSUPP;
}
-static inline int ndo_dflt_netdev_switch_port_bridge_dellink(struct net_device *dev,
- struct nlmsghdr *nlh,
- u16 flags)
+static inline int switchdev_port_bridge_dellink(struct net_device *dev,
+ struct nlmsghdr *nlh,
+ u16 flags)
+{
+ return -EOPNOTSUPP;
+}
+
+static inline int switchdev_fib_ipv4_add(u32 dst, int dst_len,
+ struct fib_info *fi,
+ u8 tos, u8 type,
+ u32 nlflags, u32 tb_id)
{
return 0;
}
-static inline int ndo_dflt_netdev_switch_port_bridge_setlink(struct net_device *dev,
- struct nlmsghdr *nlh,
- u16 flags)
+static inline int switchdev_fib_ipv4_del(u32 dst, int dst_len,
+ struct fib_info *fi,
+ u8 tos, u8 type, u32 tb_id)
{
return 0;
}
+static inline void switchdev_fib_ipv4_abort(struct fib_info *fi)
+{
+}
+
+static inline int switchdev_port_fdb_add(struct ndmsg *ndm, struct nlattr *tb[],
+ struct net_device *dev,
+ const unsigned char *addr,
+ u16 vid, u16 nlm_flags)
+{
+ return -EOPNOTSUPP;
+}
+
+static inline int switchdev_port_fdb_del(struct ndmsg *ndm, struct nlattr *tb[],
+ struct net_device *dev,
+ const unsigned char *addr, u16 vid)
+{
+ return -EOPNOTSUPP;
+}
+
+static inline int switchdev_port_fdb_dump(struct sk_buff *skb,
+ struct netlink_callback *cb,
+ struct net_device *dev,
+ struct net_device *filter_dev,
+ int idx)
+{
+ return -EOPNOTSUPP;
+}
+
#endif
#endif /* _LINUX_SWITCHDEV_H_ */
diff --git a/include/net/tc_act/tc_bpf.h b/include/net/tc_act/tc_bpf.h
index 86a070ffc930..a152e9858b2c 100644
--- a/include/net/tc_act/tc_bpf.h
+++ b/include/net/tc_act/tc_bpf.h
@@ -16,8 +16,12 @@
struct tcf_bpf {
struct tcf_common common;
struct bpf_prog *filter;
+ union {
+ u32 bpf_fd;
+ u16 bpf_num_ops;
+ };
struct sock_filter *bpf_ops;
- u16 bpf_num_ops;
+ const char *bpf_name;
};
#define to_bpf(a) \
container_of(a->priv, struct tcf_bpf, common)
diff --git a/include/net/tcp.h b/include/net/tcp.h
index 8d6b983d5099..950cfecaad3c 100644
--- a/include/net/tcp.h
+++ b/include/net/tcp.h
@@ -65,7 +65,13 @@ void tcp_time_wait(struct sock *sk, int state, int timeo);
#define TCP_MIN_MSS 88U
/* The least MTU to use for probing */
-#define TCP_BASE_MSS 512
+#define TCP_BASE_MSS 1024
+
+/* probing interval, default to 10 minutes as per RFC4821 */
+#define TCP_PROBE_INTERVAL 600
+
+/* Specify interval when tcp mtu probing will stop */
+#define TCP_PROBE_THRESHOLD 8
/* After receiving this amount of duplicate ACKs fast retransmit starts. */
#define TCP_FASTRETRANS_THRESH 3
@@ -173,6 +179,7 @@ void tcp_time_wait(struct sock *sk, int state, int timeo);
#define TCPOPT_SACK 5 /* SACK Block */
#define TCPOPT_TIMESTAMP 8 /* Better RTT estimations/PAWS */
#define TCPOPT_MD5SIG 19 /* MD5 Signature (RFC2385) */
+#define TCPOPT_FASTOPEN 34 /* Fast open (RFC7413) */
#define TCPOPT_EXP 254 /* Experimental */
/* Magic number to be after the option value for sharing TCP
* experimental options. See draft-ietf-tcpm-experimental-options-00.txt
@@ -188,6 +195,7 @@ void tcp_time_wait(struct sock *sk, int state, int timeo);
#define TCPOLEN_SACK_PERM 2
#define TCPOLEN_TIMESTAMP 10
#define TCPOLEN_MD5SIG 18
+#define TCPOLEN_FASTOPEN_BASE 2
#define TCPOLEN_EXP_FASTOPEN_BASE 4
/* But this is what stacks really send out. */
@@ -278,6 +286,14 @@ extern atomic_long_t tcp_memory_allocated;
extern struct percpu_counter tcp_sockets_allocated;
extern int tcp_memory_pressure;
+/* optimized version of sk_under_memory_pressure() for TCP sockets */
+static inline bool tcp_under_memory_pressure(const struct sock *sk)
+{
+ if (mem_cgroup_sockets_enabled && sk->sk_cgrp)
+ return !!sk->sk_cgrp->memory_pressure;
+
+ return tcp_memory_pressure;
+}
/*
* The next routines deal with comparing 32 bit unsigned ints
* and worry about wraparound (automatic with unsigned arithmetic).
@@ -303,6 +319,8 @@ static inline bool tcp_out_of_memory(struct sock *sk)
return false;
}
+void sk_forced_mem_schedule(struct sock *sk, int size);
+
static inline bool tcp_too_many_orphans(struct sock *sk, int shift)
{
struct percpu_counter *ocp = sk->sk_prot->orphan_count;
@@ -318,18 +336,6 @@ static inline bool tcp_too_many_orphans(struct sock *sk, int shift)
bool tcp_check_oom(struct sock *sk, int shift);
-/* syncookies: remember time of last synqueue overflow */
-static inline void tcp_synq_overflow(struct sock *sk)
-{
- tcp_sk(sk)->rx_opt.ts_recent_stamp = jiffies;
-}
-
-/* syncookies: no recent synqueue overflow on this listening socket? */
-static inline bool tcp_synq_no_recent_overflow(const struct sock *sk)
-{
- unsigned long last_overflow = tcp_sk(sk)->rx_opt.ts_recent_stamp;
- return time_after(jiffies, last_overflow + TCP_TIMEOUT_FALLBACK);
-}
extern struct proto tcp_prot;
@@ -349,8 +355,7 @@ void tcp_v4_early_demux(struct sk_buff *skb);
int tcp_v4_rcv(struct sk_buff *skb);
int tcp_v4_tw_remember_stamp(struct inet_timewait_sock *tw);
-int tcp_sendmsg(struct kiocb *iocb, struct sock *sk, struct msghdr *msg,
- size_t size);
+int tcp_sendmsg(struct sock *sk, struct msghdr *msg, size_t size);
int tcp_sendpage(struct sock *sk, struct page *page, int offset, size_t size,
int flags);
void tcp_release_cb(struct sock *sk);
@@ -401,8 +406,7 @@ enum tcp_tw_status tcp_timewait_state_process(struct inet_timewait_sock *tw,
struct sk_buff *skb,
const struct tcphdr *th);
struct sock *tcp_check_req(struct sock *sk, struct sk_buff *skb,
- struct request_sock *req, struct request_sock **prev,
- bool fastopen);
+ struct request_sock *req, bool fastopen);
int tcp_child_process(struct sock *parent, struct sock *child,
struct sk_buff *skb);
void tcp_enter_loss(struct sock *sk);
@@ -429,9 +433,9 @@ int compat_tcp_getsockopt(struct sock *sk, int level, int optname,
int compat_tcp_setsockopt(struct sock *sk, int level, int optname,
char __user *optval, unsigned int optlen);
void tcp_set_keepalive(struct sock *sk, int val);
-void tcp_syn_ack_timeout(struct sock *sk, struct request_sock *req);
-int tcp_recvmsg(struct kiocb *iocb, struct sock *sk, struct msghdr *msg,
- size_t len, int nonblock, int flags, int *addr_len);
+void tcp_syn_ack_timeout(const struct request_sock *req);
+int tcp_recvmsg(struct sock *sk, struct msghdr *msg, size_t len, int nonblock,
+ int flags, int *addr_len);
void tcp_parse_options(const struct sk_buff *skb,
struct tcp_options_received *opt_rx,
int estab, struct tcp_fastopen_cookie *foc);
@@ -443,6 +447,7 @@ const u8 *tcp_parse_md5sig_option(const struct tcphdr *th);
void tcp_v4_send_check(struct sock *sk, struct sk_buff *skb);
void tcp_v4_mtu_reduced(struct sock *sk);
+void tcp_req_err(struct sock *sk, u32 seq);
int tcp_v4_conn_request(struct sock *sk, struct sk_buff *skb);
struct sock *tcp_create_openreq_child(struct sock *sk,
struct request_sock *req,
@@ -464,6 +469,9 @@ int tcp_send_rcvq(struct sock *sk, struct msghdr *msg, size_t size);
void inet_sk_rx_dst_set(struct sock *sk, const struct sk_buff *skb);
/* From syncookies.c */
+struct sock *tcp_get_cookie_sock(struct sock *sk, struct sk_buff *skb,
+ struct request_sock *req,
+ struct dst_entry *dst);
int __cookie_v4_check(const struct iphdr *iph, const struct tcphdr *th,
u32 cookie);
struct sock *cookie_v4_check(struct sock *sk, struct sk_buff *skb);
@@ -476,13 +484,35 @@ struct sock *cookie_v4_check(struct sock *sk, struct sk_buff *skb);
* i.e. a sent cookie is valid only at most for 2*60 seconds (or less if
* the counter advances immediately after a cookie is generated).
*/
-#define MAX_SYNCOOKIE_AGE 2
+#define MAX_SYNCOOKIE_AGE 2
+#define TCP_SYNCOOKIE_PERIOD (60 * HZ)
+#define TCP_SYNCOOKIE_VALID (MAX_SYNCOOKIE_AGE * TCP_SYNCOOKIE_PERIOD)
+
+/* syncookies: remember time of last synqueue overflow
+ * But do not dirty this field too often (once per second is enough)
+ */
+static inline void tcp_synq_overflow(struct sock *sk)
+{
+ unsigned long last_overflow = tcp_sk(sk)->rx_opt.ts_recent_stamp;
+ unsigned long now = jiffies;
+
+ if (time_after(now, last_overflow + HZ))
+ tcp_sk(sk)->rx_opt.ts_recent_stamp = now;
+}
+
+/* syncookies: no recent synqueue overflow on this listening socket? */
+static inline bool tcp_synq_no_recent_overflow(const struct sock *sk)
+{
+ unsigned long last_overflow = tcp_sk(sk)->rx_opt.ts_recent_stamp;
+
+ return time_after(jiffies, last_overflow + TCP_SYNCOOKIE_VALID);
+}
static inline u32 tcp_cookie_time(void)
{
u64 val = get_jiffies_64();
- do_div(val, 60 * HZ);
+ do_div(val, TCP_SYNCOOKIE_PERIOD);
return val;
}
@@ -520,12 +550,10 @@ int tcp_fragment(struct sock *, struct sk_buff *, u32, unsigned int, gfp_t);
void tcp_send_probe0(struct sock *);
void tcp_send_partial(struct sock *);
-int tcp_write_wakeup(struct sock *);
+int tcp_write_wakeup(struct sock *, int mib);
void tcp_send_fin(struct sock *sk);
void tcp_send_active_reset(struct sock *sk, gfp_t priority);
int tcp_send_synack(struct sock *);
-bool tcp_syn_flood_action(struct sock *sk, const struct sk_buff *skb,
- const char *proto);
void tcp_push_one(struct sock *, unsigned int mss_now);
void tcp_send_ack(struct sock *sk);
void tcp_send_delayed_ack(struct sock *sk);
@@ -571,7 +599,7 @@ static inline int tcp_bound_to_half_wnd(struct tcp_sock *tp, int pktsize)
}
/* tcp.c */
-void tcp_get_info(const struct sock *, struct tcp_info *);
+void tcp_get_info(struct sock *, struct tcp_info *);
/* Read 'sendfile()'-style from a TCP socket */
typedef int (*sk_read_actor_t)(read_descriptor_t *, struct sk_buff *,
@@ -687,6 +715,8 @@ static inline u32 tcp_skb_timestamp(const struct sk_buff *skb)
#define TCPHDR_ECE 0x40
#define TCPHDR_CWR 0x80
+#define TCPHDR_SYN_ECN (TCPHDR_SYN | TCPHDR_ECE | TCPHDR_CWR)
+
/* This is what the send packet queuing engine uses to pass
* TCP per-packet control information to the transmission code.
* We also store the host-order sequence numbers in here too.
@@ -700,11 +730,14 @@ struct tcp_skb_cb {
/* Note : tcp_tw_isn is used in input path only
* (isn chosen by tcp_timewait_state_process())
*
- * tcp_gso_segs is used in write queue only,
- * cf tcp_skb_pcount()
+ * tcp_gso_segs/size are used in write queue only,
+ * cf tcp_skb_pcount()/tcp_skb_mss()
*/
__u32 tcp_tw_isn;
- __u32 tcp_gso_segs;
+ struct {
+ u16 tcp_gso_segs;
+ u16 tcp_gso_size;
+ };
};
__u8 tcp_flags; /* TCP header flags. (tcp[13]) */
@@ -760,10 +793,10 @@ static inline void tcp_skb_pcount_add(struct sk_buff *skb, int segs)
TCP_SKB_CB(skb)->tcp_gso_segs += segs;
}
-/* This is valid iff tcp_skb_pcount() > 1. */
+/* This is valid iff skb is in write queue and tcp_skb_pcount() > 1. */
static inline int tcp_skb_mss(const struct sk_buff *skb)
{
- return skb_shinfo(skb)->gso_size;
+ return TCP_SKB_CB(skb)->tcp_gso_size;
}
/* Events passed to congestion control interface */
@@ -799,6 +832,8 @@ enum tcp_ca_ack_event_flags {
/* Requires ECN/ECT set on all packets */
#define TCP_CONG_NEEDS_ECN 0x2
+union tcp_cc_info;
+
struct tcp_congestion_ops {
struct list_head list;
u32 key;
@@ -824,7 +859,8 @@ struct tcp_congestion_ops {
/* hook for packet ack accounting (optional) */
void (*pkts_acked)(struct sock *sk, u32 num_acked, s32 rtt_us);
/* get info for inet_diag (optional) */
- void (*get_info)(struct sock *sk, u32 ext, struct sk_buff *skb);
+ size_t (*get_info)(struct sock *sk, u32 ext, int *attr,
+ union tcp_cc_info *info);
char name[TCP_CA_NAME_MAX];
struct module *owner;
@@ -1035,14 +1071,31 @@ static inline bool tcp_is_cwnd_limited(const struct sock *sk)
return tp->is_cwnd_limited;
}
-static inline void tcp_check_probe_timer(struct sock *sk)
+/* Something is really bad, we could not queue an additional packet,
+ * because qdisc is full or receiver sent a 0 window.
+ * We do not want to add fuel to the fire, or abort too early,
+ * so make sure the timer we arm now is at least 200ms in the future,
+ * regardless of current icsk_rto value (as it could be ~2ms)
+ */
+static inline unsigned long tcp_probe0_base(const struct sock *sk)
{
- const struct tcp_sock *tp = tcp_sk(sk);
- const struct inet_connection_sock *icsk = inet_csk(sk);
+ return max_t(unsigned long, inet_csk(sk)->icsk_rto, TCP_RTO_MIN);
+}
+
+/* Variant of inet_csk_rto_backoff() used for zero window probes */
+static inline unsigned long tcp_probe0_when(const struct sock *sk,
+ unsigned long max_when)
+{
+ u64 when = (u64)tcp_probe0_base(sk) << inet_csk(sk)->icsk_backoff;
- if (!tp->packets_out && !icsk->icsk_pending)
+ return (unsigned long)min_t(u64, when, max_when);
+}
+
+static inline void tcp_check_probe_timer(struct sock *sk)
+{
+ if (!tcp_sk(sk)->packets_out && !inet_csk(sk)->icsk_pending)
inet_csk_reset_xmit_timer(sk, ICSK_TIME_PROBE0,
- icsk->icsk_rto, TCP_RTO_MAX);
+ tcp_probe0_base(sk), TCP_RTO_MAX);
}
static inline void tcp_init_wl(struct tcp_sock *tp, u32 seq)
@@ -1132,31 +1185,6 @@ static inline int tcp_full_space(const struct sock *sk)
return tcp_win_from_space(sk->sk_rcvbuf);
}
-static inline void tcp_openreq_init(struct request_sock *req,
- struct tcp_options_received *rx_opt,
- struct sk_buff *skb, struct sock *sk)
-{
- struct inet_request_sock *ireq = inet_rsk(req);
-
- req->rcv_wnd = 0; /* So that tcp_send_synack() knows! */
- req->cookie_ts = 0;
- tcp_rsk(req)->rcv_isn = TCP_SKB_CB(skb)->seq;
- tcp_rsk(req)->rcv_nxt = TCP_SKB_CB(skb)->seq + 1;
- tcp_rsk(req)->snt_synack = tcp_time_stamp;
- tcp_rsk(req)->last_oow_ack_time = 0;
- req->mss = rx_opt->mss_clamp;
- req->ts_recent = rx_opt->saw_tstamp ? rx_opt->rcv_tsval : 0;
- ireq->tstamp_ok = rx_opt->tstamp_ok;
- ireq->sack_ok = rx_opt->sack_ok;
- ireq->snd_wscale = rx_opt->snd_wscale;
- ireq->wscale_ok = rx_opt->wscale_ok;
- ireq->acked = 0;
- ireq->ecn_ok = 0;
- ireq->ir_rmt_port = tcp_hdr(skb)->source;
- ireq->ir_num = ntohs(tcp_hdr(skb)->dest);
- ireq->ir_mark = inet_request_mark(sk, skb);
-}
-
extern void tcp_openreq_init_rwin(struct request_sock *req,
struct sock *sk, struct dst_entry *dst);
@@ -1236,36 +1264,8 @@ static inline bool tcp_paws_reject(const struct tcp_options_received *rx_opt,
return true;
}
-/* Return true if we're currently rate-limiting out-of-window ACKs and
- * thus shouldn't send a dupack right now. We rate-limit dupacks in
- * response to out-of-window SYNs or ACKs to mitigate ACK loops or DoS
- * attacks that send repeated SYNs or ACKs for the same connection. To
- * do this, we do not send a duplicate SYNACK or ACK if the remote
- * endpoint is sending out-of-window SYNs or pure ACKs at a high rate.
- */
-static inline bool tcp_oow_rate_limited(struct net *net,
- const struct sk_buff *skb,
- int mib_idx, u32 *last_oow_ack_time)
-{
- /* Data packets without SYNs are not likely part of an ACK loop. */
- if ((TCP_SKB_CB(skb)->seq != TCP_SKB_CB(skb)->end_seq) &&
- !tcp_hdr(skb)->syn)
- goto not_rate_limited;
-
- if (*last_oow_ack_time) {
- s32 elapsed = (s32)(tcp_time_stamp - *last_oow_ack_time);
-
- if (0 <= elapsed && elapsed < sysctl_tcp_invalid_ratelimit) {
- NET_INC_STATS_BH(net, mib_idx);
- return true; /* rate-limited: don't send yet! */
- }
- }
-
- *last_oow_ack_time = tcp_time_stamp;
-
-not_rate_limited:
- return false; /* not rate-limited: go ahead, send dupack now! */
-}
+bool tcp_oow_rate_limited(struct net *net, const struct sk_buff *skb,
+ int mib_idx, u32 *last_oow_ack_time);
static inline void tcp_mib_init(struct net *net)
{
@@ -1344,15 +1344,14 @@ struct tcp_md5sig_pool {
};
/* - functions */
-int tcp_v4_md5_hash_skb(char *md5_hash, struct tcp_md5sig_key *key,
- const struct sock *sk, const struct request_sock *req,
- const struct sk_buff *skb);
+int tcp_v4_md5_hash_skb(char *md5_hash, const struct tcp_md5sig_key *key,
+ const struct sock *sk, const struct sk_buff *skb);
int tcp_md5_do_add(struct sock *sk, const union tcp_md5_addr *addr,
int family, const u8 *newkey, u8 newkeylen, gfp_t gfp);
int tcp_md5_do_del(struct sock *sk, const union tcp_md5_addr *addr,
int family);
struct tcp_md5sig_key *tcp_v4_md5_lookup(struct sock *sk,
- struct sock *addr_sk);
+ const struct sock *addr_sk);
#ifdef CONFIG_TCP_MD5SIG
struct tcp_md5sig_key *tcp_md5_do_lookup(struct sock *sk,
@@ -1388,7 +1387,8 @@ void tcp_fastopen_cache_get(struct sock *sk, u16 *mss,
struct tcp_fastopen_cookie *cookie, int *syn_loss,
unsigned long *last_syn_loss);
void tcp_fastopen_cache_set(struct sock *sk, u16 mss,
- struct tcp_fastopen_cookie *cookie, bool syn_lost);
+ struct tcp_fastopen_cookie *cookie, bool syn_lost,
+ u16 try_exp);
struct tcp_fastopen_request {
/* Fast Open cookie. Size 0 means a cookie request */
struct tcp_fastopen_cookie cookie;
@@ -1663,28 +1663,26 @@ int tcp_conn_request(struct request_sock_ops *rsk_ops,
struct tcp_sock_af_ops {
#ifdef CONFIG_TCP_MD5SIG
struct tcp_md5sig_key *(*md5_lookup) (struct sock *sk,
- struct sock *addr_sk);
- int (*calc_md5_hash) (char *location,
- struct tcp_md5sig_key *md5,
- const struct sock *sk,
- const struct request_sock *req,
- const struct sk_buff *skb);
- int (*md5_parse) (struct sock *sk,
- char __user *optval,
- int optlen);
+ const struct sock *addr_sk);
+ int (*calc_md5_hash)(char *location,
+ const struct tcp_md5sig_key *md5,
+ const struct sock *sk,
+ const struct sk_buff *skb);
+ int (*md5_parse)(struct sock *sk,
+ char __user *optval,
+ int optlen);
#endif
};
struct tcp_request_sock_ops {
u16 mss_clamp;
#ifdef CONFIG_TCP_MD5SIG
- struct tcp_md5sig_key *(*md5_lookup) (struct sock *sk,
- struct request_sock *req);
- int (*calc_md5_hash) (char *location,
- struct tcp_md5sig_key *md5,
- const struct sock *sk,
- const struct request_sock *req,
- const struct sk_buff *skb);
+ struct tcp_md5sig_key *(*req_md5_lookup)(struct sock *sk,
+ const struct sock *addr_sk);
+ int (*calc_md5_hash) (char *location,
+ const struct tcp_md5sig_key *md5,
+ const struct sock *sk,
+ const struct sk_buff *skb);
#endif
void (*init_req)(struct request_sock *req, struct sock *sk,
struct sk_buff *skb);
diff --git a/include/net/tcp_states.h b/include/net/tcp_states.h
index b0b645988bd8..50e78a74d0df 100644
--- a/include/net/tcp_states.h
+++ b/include/net/tcp_states.h
@@ -25,6 +25,7 @@ enum {
TCP_LAST_ACK,
TCP_LISTEN,
TCP_CLOSING, /* Now a valid state */
+ TCP_NEW_SYN_RECV,
TCP_MAX_STATES /* Leave at the end! */
};
@@ -44,7 +45,8 @@ enum {
TCPF_CLOSE_WAIT = (1 << 8),
TCPF_LAST_ACK = (1 << 9),
TCPF_LISTEN = (1 << 10),
- TCPF_CLOSING = (1 << 11)
+ TCPF_CLOSING = (1 << 11),
+ TCPF_NEW_SYN_RECV = (1 << 12),
};
#endif /* _LINUX_TCP_STATES_H */
diff --git a/include/net/udp.h b/include/net/udp.h
index 07f9b70962f6..6d4ed18e1427 100644
--- a/include/net/udp.h
+++ b/include/net/udp.h
@@ -194,6 +194,8 @@ int udp_lib_get_port(struct sock *sk, unsigned short snum,
int (*)(const struct sock *, const struct sock *),
unsigned int hash2_nulladdr);
+u32 udp_flow_hashrnd(void);
+
static inline __be16 udp_flow_src_port(struct net *net, struct sk_buff *skb,
int min, int max, bool use_eth)
{
@@ -205,12 +207,19 @@ static inline __be16 udp_flow_src_port(struct net *net, struct sk_buff *skb,
}
hash = skb_get_hash(skb);
- if (unlikely(!hash) && use_eth) {
- /* Can't find a normal hash, caller has indicated an Ethernet
- * packet so use that to compute a hash.
- */
- hash = jhash(skb->data, 2 * ETH_ALEN,
- (__force u32) skb->protocol);
+ if (unlikely(!hash)) {
+ if (use_eth) {
+ /* Can't find a normal hash, caller has indicated an
+ * Ethernet packet so use that to compute a hash.
+ */
+ hash = jhash(skb->data, 2 * ETH_ALEN,
+ (__force u32) skb->protocol);
+ } else {
+ /* Can't derive any sort of hash for the packet, set
+ * to some consistent random value.
+ */
+ hash = udp_flow_hashrnd();
+ }
}
/* Since this is being sent on the wire obfuscate hash a bit
@@ -229,8 +238,7 @@ int udp_get_port(struct sock *sk, unsigned short snum,
int (*saddr_cmp)(const struct sock *,
const struct sock *));
void udp_err(struct sk_buff *, u32);
-int udp_sendmsg(struct kiocb *iocb, struct sock *sk, struct msghdr *msg,
- size_t len);
+int udp_sendmsg(struct sock *sk, struct msghdr *msg, size_t len);
int udp_push_pending_frames(struct sock *sk);
void udp_flush_pending_frames(struct sock *sk);
void udp4_hwcsum(struct sk_buff *skb, __be32 src, __be32 dst);
diff --git a/include/net/udp_tunnel.h b/include/net/udp_tunnel.h
index 1a20d33d56bc..c491c1221606 100644
--- a/include/net/udp_tunnel.h
+++ b/include/net/udp_tunnel.h
@@ -77,13 +77,14 @@ void setup_udp_tunnel_sock(struct net *net, struct socket *sock,
struct udp_tunnel_sock_cfg *sock_cfg);
/* Transmit the skb using UDP encapsulation. */
-int udp_tunnel_xmit_skb(struct rtable *rt, struct sk_buff *skb,
+int udp_tunnel_xmit_skb(struct rtable *rt, struct sock *sk, struct sk_buff *skb,
__be32 src, __be32 dst, __u8 tos, __u8 ttl,
__be16 df, __be16 src_port, __be16 dst_port,
bool xnet, bool nocheck);
#if IS_ENABLED(CONFIG_IPV6)
-int udp_tunnel6_xmit_skb(struct dst_entry *dst, struct sk_buff *skb,
+int udp_tunnel6_xmit_skb(struct dst_entry *dst, struct sock *sk,
+ struct sk_buff *skb,
struct net_device *dev, struct in6_addr *saddr,
struct in6_addr *daddr,
__u8 prio, __u8 ttl, __be16 src_port,
diff --git a/include/net/vxlan.h b/include/net/vxlan.h
index c73e7abbbaa5..0082b5d33d7d 100644
--- a/include/net/vxlan.h
+++ b/include/net/vxlan.h
@@ -131,7 +131,7 @@ struct vxlan_sock {
#define VXLAN_F_GBP 0x800
#define VXLAN_F_REMCSUM_NOPARTIAL 0x1000
-/* Flags that are used in the receive patch. These flags must match in
+/* Flags that are used in the receive path. These flags must match in
* order for a socket to be shareable
*/
#define VXLAN_F_RCV_FLAGS (VXLAN_F_GBP | \
@@ -145,7 +145,7 @@ struct vxlan_sock *vxlan_sock_add(struct net *net, __be16 port,
void vxlan_sock_release(struct vxlan_sock *vs);
-int vxlan_xmit_skb(struct rtable *rt, struct sk_buff *skb,
+int vxlan_xmit_skb(struct rtable *rt, struct sock *sk, struct sk_buff *skb,
__be32 src, __be32 dst, __u8 tos, __u8 ttl, __be16 df,
__be16 src_port, __be16 dst_port, struct vxlan_metadata *md,
bool xnet, u32 vxflags);
diff --git a/include/net/xfrm.h b/include/net/xfrm.h
index dc4865e90fe4..f0ee97eec24d 100644
--- a/include/net/xfrm.h
+++ b/include/net/xfrm.h
@@ -126,9 +126,7 @@ struct xfrm_state_walk {
/* Full description of state of transformer. */
struct xfrm_state {
-#ifdef CONFIG_NET_NS
- struct net *xs_net;
-#endif
+ possible_net_t xs_net;
union {
struct hlist_node gclist;
struct hlist_node bydst;
@@ -170,6 +168,7 @@ struct xfrm_state {
struct xfrm_algo *ealg;
struct xfrm_algo *calg;
struct xfrm_algo_aead *aead;
+ const char *geniv;
/* Data for encapsulator */
struct xfrm_encap_tmpl *encap;
@@ -334,7 +333,7 @@ struct xfrm_state_afinfo {
int (*tmpl_sort)(struct xfrm_tmpl **dst, struct xfrm_tmpl **src, int n);
int (*state_sort)(struct xfrm_state **dst, struct xfrm_state **src, int n);
int (*output)(struct sock *sk, struct sk_buff *skb);
- int (*output_finish)(struct sk_buff *skb);
+ int (*output_finish)(struct sock *sk, struct sk_buff *skb);
int (*extract_input)(struct xfrm_state *x,
struct sk_buff *skb);
int (*extract_output)(struct xfrm_state *x,
@@ -522,9 +521,7 @@ struct xfrm_policy_queue {
};
struct xfrm_policy {
-#ifdef CONFIG_NET_NS
- struct net *xp_net;
-#endif
+ possible_net_t xp_net;
struct hlist_node bydst;
struct hlist_node byidx;
@@ -1029,7 +1026,7 @@ xfrm_addr_any(const xfrm_address_t *addr, unsigned short family)
case AF_INET:
return addr->a4 == 0;
case AF_INET6:
- return ipv6_addr_any((struct in6_addr *)&addr->a6);
+ return ipv6_addr_any(&addr->in6);
}
return 0;
}
@@ -1242,8 +1239,8 @@ void xfrm_flowi_addr_get(const struct flowi *fl,
memcpy(&daddr->a4, &fl->u.ip4.daddr, sizeof(daddr->a4));
break;
case AF_INET6:
- *(struct in6_addr *)saddr->a6 = fl->u.ip6.saddr;
- *(struct in6_addr *)daddr->a6 = fl->u.ip6.daddr;
+ saddr->in6 = fl->u.ip6.saddr;
+ daddr->in6 = fl->u.ip6.daddr;
break;
}
}
@@ -1318,6 +1315,7 @@ static inline int xfrm_id_proto_match(u8 proto, u8 userproto)
* xfrm algorithm information
*/
struct xfrm_algo_aead_info {
+ char *geniv;
u16 icv_truncbits;
};
@@ -1327,6 +1325,7 @@ struct xfrm_algo_auth_info {
};
struct xfrm_algo_encr_info {
+ char *geniv;
u16 blockbits;
u16 defkeybits;
};
@@ -1507,7 +1506,7 @@ int xfrm_prepare_input(struct xfrm_state *x, struct sk_buff *skb);
int xfrm_input(struct sk_buff *skb, int nexthdr, __be32 spi, int encap_type);
int xfrm_input_resume(struct sk_buff *skb, int nexthdr);
int xfrm_output_resume(struct sk_buff *skb, int err);
-int xfrm_output(struct sk_buff *skb);
+int xfrm_output(struct sock *sk, struct sk_buff *skb);
int xfrm_inner_extract_output(struct xfrm_state *x, struct sk_buff *skb);
void xfrm_local_error(struct sk_buff *skb, int mtu);
int xfrm4_extract_header(struct sk_buff *skb);
@@ -1528,7 +1527,7 @@ static inline int xfrm4_rcv_spi(struct sk_buff *skb, int nexthdr, __be32 spi)
int xfrm4_extract_output(struct xfrm_state *x, struct sk_buff *skb);
int xfrm4_prepare_output(struct xfrm_state *x, struct sk_buff *skb);
int xfrm4_output(struct sock *sk, struct sk_buff *skb);
-int xfrm4_output_finish(struct sk_buff *skb);
+int xfrm4_output_finish(struct sock *sk, struct sk_buff *skb);
int xfrm4_rcv_cb(struct sk_buff *skb, u8 protocol, int err);
int xfrm4_protocol_register(struct xfrm4_protocol *handler, unsigned char protocol);
int xfrm4_protocol_deregister(struct xfrm4_protocol *handler, unsigned char protocol);
@@ -1553,7 +1552,7 @@ __be32 xfrm6_tunnel_spi_lookup(struct net *net, const xfrm_address_t *saddr);
int xfrm6_extract_output(struct xfrm_state *x, struct sk_buff *skb);
int xfrm6_prepare_output(struct xfrm_state *x, struct sk_buff *skb);
int xfrm6_output(struct sock *sk, struct sk_buff *skb);
-int xfrm6_output_finish(struct sk_buff *skb);
+int xfrm6_output_finish(struct sock *sk, struct sk_buff *skb);
int xfrm6_find_1stfragopt(struct xfrm_state *x, struct sk_buff *skb,
u8 **prevhdr);
diff --git a/include/ras/ras_event.h b/include/ras/ras_event.h
index 79abb9c71772..1443d79e4fe6 100644
--- a/include/ras/ras_event.h
+++ b/include/ras/ras_event.h
@@ -11,6 +11,7 @@
#include <linux/pci.h>
#include <linux/aer.h>
#include <linux/cper.h>
+#include <linux/mm.h>
/*
* MCE Extended Error Log trace event
@@ -232,6 +233,90 @@ TRACE_EVENT(aer_event,
__print_flags(__entry->status, "|", aer_uncorrectable_errors))
);
+/*
+ * memory-failure recovery action result event
+ *
+ * unsigned long pfn - Page Frame Number of the corrupted page
+ * int type - Page types of the corrupted page
+ * int result - Result of recovery action
+ */
+
+#ifdef CONFIG_MEMORY_FAILURE
+#define MF_ACTION_RESULT \
+ EM ( MF_IGNORED, "Ignored" ) \
+ EM ( MF_FAILED, "Failed" ) \
+ EM ( MF_DELAYED, "Delayed" ) \
+ EMe ( MF_RECOVERED, "Recovered" )
+
+#define MF_PAGE_TYPE \
+ EM ( MF_MSG_KERNEL, "reserved kernel page" ) \
+ EM ( MF_MSG_KERNEL_HIGH_ORDER, "high-order kernel page" ) \
+ EM ( MF_MSG_SLAB, "kernel slab page" ) \
+ EM ( MF_MSG_DIFFERENT_COMPOUND, "different compound page after locking" ) \
+ EM ( MF_MSG_POISONED_HUGE, "huge page already hardware poisoned" ) \
+ EM ( MF_MSG_HUGE, "huge page" ) \
+ EM ( MF_MSG_FREE_HUGE, "free huge page" ) \
+ EM ( MF_MSG_UNMAP_FAILED, "unmapping failed page" ) \
+ EM ( MF_MSG_DIRTY_SWAPCACHE, "dirty swapcache page" ) \
+ EM ( MF_MSG_CLEAN_SWAPCACHE, "clean swapcache page" ) \
+ EM ( MF_MSG_DIRTY_MLOCKED_LRU, "dirty mlocked LRU page" ) \
+ EM ( MF_MSG_CLEAN_MLOCKED_LRU, "clean mlocked LRU page" ) \
+ EM ( MF_MSG_DIRTY_UNEVICTABLE_LRU, "dirty unevictable LRU page" ) \
+ EM ( MF_MSG_CLEAN_UNEVICTABLE_LRU, "clean unevictable LRU page" ) \
+ EM ( MF_MSG_DIRTY_LRU, "dirty LRU page" ) \
+ EM ( MF_MSG_CLEAN_LRU, "clean LRU page" ) \
+ EM ( MF_MSG_TRUNCATED_LRU, "already truncated LRU page" ) \
+ EM ( MF_MSG_BUDDY, "free buddy page" ) \
+ EM ( MF_MSG_BUDDY_2ND, "free buddy page (2nd try)" ) \
+ EMe ( MF_MSG_UNKNOWN, "unknown page" )
+
+/*
+ * First define the enums in MM_ACTION_RESULT to be exported to userspace
+ * via TRACE_DEFINE_ENUM().
+ */
+#undef EM
+#undef EMe
+#define EM(a, b) TRACE_DEFINE_ENUM(a);
+#define EMe(a, b) TRACE_DEFINE_ENUM(a);
+
+MF_ACTION_RESULT
+MF_PAGE_TYPE
+
+/*
+ * Now redefine the EM() and EMe() macros to map the enums to the strings
+ * that will be printed in the output.
+ */
+#undef EM
+#undef EMe
+#define EM(a, b) { a, b },
+#define EMe(a, b) { a, b }
+
+TRACE_EVENT(memory_failure_event,
+ TP_PROTO(unsigned long pfn,
+ int type,
+ int result),
+
+ TP_ARGS(pfn, type, result),
+
+ TP_STRUCT__entry(
+ __field(unsigned long, pfn)
+ __field(int, type)
+ __field(int, result)
+ ),
+
+ TP_fast_assign(
+ __entry->pfn = pfn;
+ __entry->type = type;
+ __entry->result = result;
+ ),
+
+ TP_printk("pfn %#lx: recovery action for %s: %s",
+ __entry->pfn,
+ __print_symbolic(__entry->type, MF_PAGE_TYPE),
+ __print_symbolic(__entry->result, MF_ACTION_RESULT)
+ )
+);
+#endif /* CONFIG_MEMORY_FAILURE */
#endif /* _TRACE_HW_EVENT_MC_H */
/* This part must be outside protection */
diff --git a/include/rdma/ib_addr.h b/include/rdma/ib_addr.h
index ce55906b54a0..fde33ac6b58a 100644
--- a/include/rdma/ib_addr.h
+++ b/include/rdma/ib_addr.h
@@ -111,8 +111,8 @@ int rdma_copy_addr(struct rdma_dev_addr *dev_addr, struct net_device *dev,
int rdma_addr_size(struct sockaddr *addr);
int rdma_addr_find_smac_by_sgid(union ib_gid *sgid, u8 *smac, u16 *vlan_id);
-int rdma_addr_find_dmac_by_grh(union ib_gid *sgid, union ib_gid *dgid, u8 *smac,
- u16 *vlan_id);
+int rdma_addr_find_dmac_by_grh(const union ib_gid *sgid, const union ib_gid *dgid,
+ u8 *smac, u16 *vlan_id);
static inline u16 ib_addr_get_pkey(struct rdma_dev_addr *dev_addr)
{
@@ -160,7 +160,7 @@ static inline int rdma_ip2gid(struct sockaddr *addr, union ib_gid *gid)
}
/* Important - sockaddr should be a union of sockaddr_in and sockaddr_in6 */
-static inline int rdma_gid2ip(struct sockaddr *out, union ib_gid *gid)
+static inline void rdma_gid2ip(struct sockaddr *out, const union ib_gid *gid)
{
if (ipv6_addr_v4mapped((struct in6_addr *)gid)) {
struct sockaddr_in *out_in = (struct sockaddr_in *)out;
@@ -173,7 +173,6 @@ static inline int rdma_gid2ip(struct sockaddr *out, union ib_gid *gid)
out_in->sin6_family = AF_INET6;
memcpy(&out_in->sin6_addr.s6_addr, gid->raw, 16);
}
- return 0;
}
static inline void iboe_addr_get_sgid(struct rdma_dev_addr *dev_addr,
diff --git a/include/rdma/ib_cache.h b/include/rdma/ib_cache.h
index ad9a3c280944..bd92130f4ac5 100644
--- a/include/rdma/ib_cache.h
+++ b/include/rdma/ib_cache.h
@@ -64,10 +64,10 @@ int ib_get_cached_gid(struct ib_device *device,
* ib_find_cached_gid() searches for the specified GID value in
* the local software cache.
*/
-int ib_find_cached_gid(struct ib_device *device,
- union ib_gid *gid,
- u8 *port_num,
- u16 *index);
+int ib_find_cached_gid(struct ib_device *device,
+ const union ib_gid *gid,
+ u8 *port_num,
+ u16 *index);
/**
* ib_get_cached_pkey - Returns a cached PKey table entry
diff --git a/include/rdma/ib_cm.h b/include/rdma/ib_cm.h
index 0e3ff30647d5..39ed2d2fbd51 100644
--- a/include/rdma/ib_cm.h
+++ b/include/rdma/ib_cm.h
@@ -105,7 +105,8 @@ enum ib_cm_data_size {
IB_CM_SIDR_REQ_PRIVATE_DATA_SIZE = 216,
IB_CM_SIDR_REP_PRIVATE_DATA_SIZE = 136,
IB_CM_SIDR_REP_INFO_LENGTH = 72,
- IB_CM_COMPARE_SIZE = 64
+ /* compare done u32 at a time */
+ IB_CM_COMPARE_SIZE = (64 / sizeof(u32))
};
struct ib_cm_id;
@@ -337,8 +338,8 @@ void ib_destroy_cm_id(struct ib_cm_id *cm_id);
#define IB_SDP_SERVICE_ID_MASK cpu_to_be64(0xFFFFFFFFFFFF0000ULL)
struct ib_cm_compare_data {
- u8 data[IB_CM_COMPARE_SIZE];
- u8 mask[IB_CM_COMPARE_SIZE];
+ u32 data[IB_CM_COMPARE_SIZE];
+ u32 mask[IB_CM_COMPARE_SIZE];
};
/**
diff --git a/include/rdma/ib_mad.h b/include/rdma/ib_mad.h
index 9bb99e983f58..c8422d5a5a91 100644
--- a/include/rdma/ib_mad.h
+++ b/include/rdma/ib_mad.h
@@ -42,8 +42,11 @@
#include <rdma/ib_verbs.h>
#include <uapi/rdma/ib_user_mad.h>
-/* Management base version */
+/* Management base versions */
#define IB_MGMT_BASE_VERSION 1
+#define OPA_MGMT_BASE_VERSION 0x80
+
+#define OPA_SMP_CLASS_VERSION 0x80
/* Management classes */
#define IB_MGMT_CLASS_SUBN_LID_ROUTED 0x01
@@ -135,6 +138,10 @@ enum {
IB_MGMT_SA_DATA = 200,
IB_MGMT_DEVICE_HDR = 64,
IB_MGMT_DEVICE_DATA = 192,
+ IB_MGMT_MAD_SIZE = IB_MGMT_MAD_HDR + IB_MGMT_MAD_DATA,
+ OPA_MGMT_MAD_DATA = 2024,
+ OPA_MGMT_RMPP_DATA = 2012,
+ OPA_MGMT_MAD_SIZE = IB_MGMT_MAD_HDR + OPA_MGMT_MAD_DATA,
};
struct ib_mad_hdr {
@@ -181,12 +188,23 @@ struct ib_mad {
u8 data[IB_MGMT_MAD_DATA];
};
+struct opa_mad {
+ struct ib_mad_hdr mad_hdr;
+ u8 data[OPA_MGMT_MAD_DATA];
+};
+
struct ib_rmpp_mad {
struct ib_mad_hdr mad_hdr;
struct ib_rmpp_hdr rmpp_hdr;
u8 data[IB_MGMT_RMPP_DATA];
};
+struct opa_rmpp_mad {
+ struct ib_mad_hdr mad_hdr;
+ struct ib_rmpp_hdr rmpp_hdr;
+ u8 data[OPA_MGMT_RMPP_DATA];
+};
+
struct ib_sa_mad {
struct ib_mad_hdr mad_hdr;
struct ib_rmpp_hdr rmpp_hdr;
@@ -235,7 +253,10 @@ struct ib_class_port_info {
* includes the common MAD, RMPP, and class specific headers.
* @data_len: Indicates the total size of user-transferred data.
* @seg_count: The number of RMPP segments allocated for this send.
- * @seg_size: Size of each RMPP segment.
+ * @seg_size: Size of the data in each RMPP segment. This does not include
+ * class specific headers.
+ * @seg_rmpp_size: Size of each RMPP segment including the class specific
+ * headers.
* @timeout_ms: Time to wait for a response.
* @retries: Number of times to retry a request for a response. For MADs
* using RMPP, this applies per window. On completion, returns the number
@@ -255,6 +276,7 @@ struct ib_mad_send_buf {
int data_len;
int seg_count;
int seg_size;
+ int seg_rmpp_size;
int timeout_ms;
int retries;
};
@@ -263,7 +285,7 @@ struct ib_mad_send_buf {
* ib_response_mad - Returns if the specified MAD has been generated in
* response to a sent request or trap.
*/
-int ib_response_mad(struct ib_mad *mad);
+int ib_response_mad(const struct ib_mad_hdr *hdr);
/**
* ib_get_rmpp_resptime - Returns the RMPP response time.
@@ -401,7 +423,10 @@ struct ib_mad_send_wc {
struct ib_mad_recv_buf {
struct list_head list;
struct ib_grh *grh;
- struct ib_mad *mad;
+ union {
+ struct ib_mad *mad;
+ struct opa_mad *opa_mad;
+ };
};
/**
@@ -410,6 +435,7 @@ struct ib_mad_recv_buf {
* @recv_buf: Specifies the location of the received data buffer(s).
* @rmpp_list: Specifies a list of RMPP reassembled received MAD buffers.
* @mad_len: The length of the received MAD, without duplicated headers.
+ * @mad_seg_size: The size of individual MAD segments
*
* For received response, the wr_id contains a pointer to the ib_mad_send_buf
* for the corresponding send request.
@@ -419,6 +445,7 @@ struct ib_mad_recv_wc {
struct ib_mad_recv_buf recv_buf;
struct list_head rmpp_list;
int mad_len;
+ size_t mad_seg_size;
};
/**
@@ -618,6 +645,7 @@ int ib_process_mad_wc(struct ib_mad_agent *mad_agent,
* automatically adjust the allocated buffer size to account for any
* additional padding that may be necessary.
* @gfp_mask: GFP mask used for the memory allocation.
+ * @base_version: Base Version of this MAD
*
* This routine allocates a MAD for sending. The returned MAD send buffer
* will reference a data buffer usable for sending a MAD, along
@@ -633,7 +661,8 @@ struct ib_mad_send_buf *ib_create_send_mad(struct ib_mad_agent *mad_agent,
u32 remote_qpn, u16 pkey_index,
int rmpp_active,
int hdr_len, int data_len,
- gfp_t gfp_mask);
+ gfp_t gfp_mask,
+ u8 base_version);
/**
* ib_is_mad_class_rmpp - returns whether given management class
@@ -675,6 +704,6 @@ void ib_free_send_mad(struct ib_mad_send_buf *send_buf);
* @agent: the agent in question
* @return: true if agent is performing rmpp, false otherwise.
*/
-int ib_mad_kernel_rmpp_agent(struct ib_mad_agent *agent);
+int ib_mad_kernel_rmpp_agent(const struct ib_mad_agent *agent);
#endif /* IB_MAD_H */
diff --git a/include/rdma/ib_verbs.h b/include/rdma/ib_verbs.h
index 65994a19e840..986fddb08579 100644
--- a/include/rdma/ib_verbs.h
+++ b/include/rdma/ib_verbs.h
@@ -81,6 +81,13 @@ enum rdma_transport_type {
RDMA_TRANSPORT_USNIC_UDP
};
+enum rdma_protocol_type {
+ RDMA_PROTOCOL_IB,
+ RDMA_PROTOCOL_IBOE,
+ RDMA_PROTOCOL_IWARP,
+ RDMA_PROTOCOL_USNIC_UDP
+};
+
__attribute_const__ enum rdma_transport_type
rdma_node_get_transport(enum rdma_node_type node_type);
@@ -166,6 +173,16 @@ struct ib_odp_caps {
} per_transport_caps;
};
+enum ib_cq_creation_flags {
+ IB_CQ_FLAGS_TIMESTAMP_COMPLETION = 1 << 0,
+};
+
+struct ib_cq_init_attr {
+ unsigned int cqe;
+ int comp_vector;
+ u32 flags;
+};
+
struct ib_device_attr {
u64 fw_ver;
__be64 sys_image_guid;
@@ -210,6 +227,8 @@ struct ib_device_attr {
int sig_prot_cap;
int sig_guard_cap;
struct ib_odp_caps odp_caps;
+ uint64_t timestamp_mask;
+ uint64_t hca_core_clock; /* in KHZ */
};
enum ib_mtu {
@@ -346,6 +365,42 @@ union rdma_protocol_stats {
struct iw_protocol_stats iw;
};
+/* Define bits for the various functionality this port needs to be supported by
+ * the core.
+ */
+/* Management 0x00000FFF */
+#define RDMA_CORE_CAP_IB_MAD 0x00000001
+#define RDMA_CORE_CAP_IB_SMI 0x00000002
+#define RDMA_CORE_CAP_IB_CM 0x00000004
+#define RDMA_CORE_CAP_IW_CM 0x00000008
+#define RDMA_CORE_CAP_IB_SA 0x00000010
+#define RDMA_CORE_CAP_OPA_MAD 0x00000020
+
+/* Address format 0x000FF000 */
+#define RDMA_CORE_CAP_AF_IB 0x00001000
+#define RDMA_CORE_CAP_ETH_AH 0x00002000
+
+/* Protocol 0xFFF00000 */
+#define RDMA_CORE_CAP_PROT_IB 0x00100000
+#define RDMA_CORE_CAP_PROT_ROCE 0x00200000
+#define RDMA_CORE_CAP_PROT_IWARP 0x00400000
+
+#define RDMA_CORE_PORT_IBA_IB (RDMA_CORE_CAP_PROT_IB \
+ | RDMA_CORE_CAP_IB_MAD \
+ | RDMA_CORE_CAP_IB_SMI \
+ | RDMA_CORE_CAP_IB_CM \
+ | RDMA_CORE_CAP_IB_SA \
+ | RDMA_CORE_CAP_AF_IB)
+#define RDMA_CORE_PORT_IBA_ROCE (RDMA_CORE_CAP_PROT_ROCE \
+ | RDMA_CORE_CAP_IB_MAD \
+ | RDMA_CORE_CAP_IB_CM \
+ | RDMA_CORE_CAP_AF_IB \
+ | RDMA_CORE_CAP_ETH_AH)
+#define RDMA_CORE_PORT_IWARP (RDMA_CORE_CAP_PROT_IWARP \
+ | RDMA_CORE_CAP_IW_CM)
+#define RDMA_CORE_PORT_INTEL_OPA (RDMA_CORE_PORT_IBA_IB \
+ | RDMA_CORE_CAP_OPA_MAD)
+
struct ib_port_attr {
enum ib_port_state state;
enum ib_mtu max_mtu;
@@ -412,6 +467,8 @@ enum ib_event_type {
IB_EVENT_GID_CHANGE,
};
+__attribute_const__ const char *ib_event_msg(enum ib_event_type event);
+
struct ib_event {
struct ib_device *device;
union {
@@ -663,6 +720,8 @@ enum ib_wc_status {
IB_WC_GENERAL_ERR
};
+__attribute_const__ const char *ib_wc_status_msg(enum ib_wc_status status);
+
enum ib_wc_opcode {
IB_WC_SEND,
IB_WC_RDMA_WRITE,
@@ -1407,7 +1466,7 @@ struct ib_flow {
struct ib_uobject *uobject;
};
-struct ib_mad;
+struct ib_mad_hdr;
struct ib_grh;
enum ib_process_mad_flags {
@@ -1474,6 +1533,13 @@ struct ib_dma_mapping_ops {
struct iw_cm_verbs;
+struct ib_port_immutable {
+ int pkey_tbl_len;
+ int gid_tbl_len;
+ u32 core_cap_flags;
+ u32 max_mad_size;
+};
+
struct ib_device {
struct device *dma_device;
@@ -1487,8 +1553,10 @@ struct ib_device {
struct list_head client_data_list;
struct ib_cache cache;
- int *pkey_tbl_len;
- int *gid_tbl_len;
+ /**
+ * port_immutable is indexed by port number
+ */
+ struct ib_port_immutable *port_immutable;
int num_comp_vectors;
@@ -1497,7 +1565,8 @@ struct ib_device {
int (*get_protocol_stats)(struct ib_device *device,
union rdma_protocol_stats *stats);
int (*query_device)(struct ib_device *device,
- struct ib_device_attr *device_attr);
+ struct ib_device_attr *device_attr,
+ struct ib_udata *udata);
int (*query_port)(struct ib_device *device,
u8 port_num,
struct ib_port_attr *port_attr);
@@ -1561,8 +1630,8 @@ struct ib_device {
int (*post_recv)(struct ib_qp *qp,
struct ib_recv_wr *recv_wr,
struct ib_recv_wr **bad_recv_wr);
- struct ib_cq * (*create_cq)(struct ib_device *device, int cqe,
- int comp_vector,
+ struct ib_cq * (*create_cq)(struct ib_device *device,
+ const struct ib_cq_init_attr *attr,
struct ib_ucontext *context,
struct ib_udata *udata);
int (*modify_cq)(struct ib_cq *cq, u16 cq_count,
@@ -1637,10 +1706,13 @@ struct ib_device {
int (*process_mad)(struct ib_device *device,
int process_mad_flags,
u8 port_num,
- struct ib_wc *in_wc,
- struct ib_grh *in_grh,
- struct ib_mad *in_mad,
- struct ib_mad *out_mad);
+ const struct ib_wc *in_wc,
+ const struct ib_grh *in_grh,
+ const struct ib_mad_hdr *in_mad,
+ size_t in_mad_size,
+ struct ib_mad_hdr *out_mad,
+ size_t *out_mad_size,
+ u16 *out_mad_pkey_index);
struct ib_xrcd * (*alloc_xrcd)(struct ib_device *device,
struct ib_ucontext *ucontext,
struct ib_udata *udata);
@@ -1675,6 +1747,14 @@ struct ib_device {
u32 local_dma_lkey;
u8 node_type;
u8 phys_port_cnt;
+
+ /**
+ * The following mandatory functions are used only at device
+ * registration. Keep functions such as these at the end of this
+ * structure to avoid cache line misses when accessing struct ib_device
+ * in fast paths.
+ */
+ int (*get_port_immutable)(struct ib_device *, u8, struct ib_port_immutable *);
};
struct ib_client {
@@ -1743,6 +1823,284 @@ int ib_query_port(struct ib_device *device,
enum rdma_link_layer rdma_port_get_link_layer(struct ib_device *device,
u8 port_num);
+/**
+ * rdma_start_port - Return the first valid port number for the device
+ * specified
+ *
+ * @device: Device to be checked
+ *
+ * Return start port number
+ */
+static inline u8 rdma_start_port(const struct ib_device *device)
+{
+ return (device->node_type == RDMA_NODE_IB_SWITCH) ? 0 : 1;
+}
+
+/**
+ * rdma_end_port - Return the last valid port number for the device
+ * specified
+ *
+ * @device: Device to be checked
+ *
+ * Return last port number
+ */
+static inline u8 rdma_end_port(const struct ib_device *device)
+{
+ return (device->node_type == RDMA_NODE_IB_SWITCH) ?
+ 0 : device->phys_port_cnt;
+}
+
+static inline bool rdma_protocol_ib(const struct ib_device *device, u8 port_num)
+{
+ return device->port_immutable[port_num].core_cap_flags & RDMA_CORE_CAP_PROT_IB;
+}
+
+static inline bool rdma_protocol_roce(const struct ib_device *device, u8 port_num)
+{
+ return device->port_immutable[port_num].core_cap_flags & RDMA_CORE_CAP_PROT_ROCE;
+}
+
+static inline bool rdma_protocol_iwarp(const struct ib_device *device, u8 port_num)
+{
+ return device->port_immutable[port_num].core_cap_flags & RDMA_CORE_CAP_PROT_IWARP;
+}
+
+static inline bool rdma_ib_or_roce(const struct ib_device *device, u8 port_num)
+{
+ return device->port_immutable[port_num].core_cap_flags &
+ (RDMA_CORE_CAP_PROT_IB | RDMA_CORE_CAP_PROT_ROCE);
+}
+
+/**
+ * rdma_cap_ib_mad - Check if the port of a device supports Infiniband
+ * Management Datagrams.
+ * @device: Device to check
+ * @port_num: Port number to check
+ *
+ * Management Datagrams (MAD) are a required part of the InfiniBand
+ * specification and are supported on all InfiniBand devices. A slightly
+ * extended version are also supported on OPA interfaces.
+ *
+ * Return: true if the port supports sending/receiving of MAD packets.
+ */
+static inline bool rdma_cap_ib_mad(const struct ib_device *device, u8 port_num)
+{
+ return device->port_immutable[port_num].core_cap_flags & RDMA_CORE_CAP_IB_MAD;
+}
+
+/**
+ * rdma_cap_opa_mad - Check if the port of device provides support for OPA
+ * Management Datagrams.
+ * @device: Device to check
+ * @port_num: Port number to check
+ *
+ * Intel OmniPath devices extend and/or replace the InfiniBand Management
+ * datagrams with their own versions. These OPA MADs share many but not all of
+ * the characteristics of InfiniBand MADs.
+ *
+ * OPA MADs differ in the following ways:
+ *
+ * 1) MADs are variable size up to 2K
+ * IBTA defined MADs remain fixed at 256 bytes
+ * 2) OPA SMPs must carry valid PKeys
+ * 3) OPA SMP packets are a different format
+ *
+ * Return: true if the port supports OPA MAD packet formats.
+ */
+static inline bool rdma_cap_opa_mad(struct ib_device *device, u8 port_num)
+{
+ return (device->port_immutable[port_num].core_cap_flags & RDMA_CORE_CAP_OPA_MAD)
+ == RDMA_CORE_CAP_OPA_MAD;
+}
+
+/**
+ * rdma_cap_ib_smi - Check if the port of a device provides an Infiniband
+ * Subnet Management Agent (SMA) on the Subnet Management Interface (SMI).
+ * @device: Device to check
+ * @port_num: Port number to check
+ *
+ * Each InfiniBand node is required to provide a Subnet Management Agent
+ * that the subnet manager can access. Prior to the fabric being fully
+ * configured by the subnet manager, the SMA is accessed via a well known
+ * interface called the Subnet Management Interface (SMI). This interface
+ * uses directed route packets to communicate with the SM to get around the
+ * chicken and egg problem of the SM needing to know what's on the fabric
+ * in order to configure the fabric, and needing to configure the fabric in
+ * order to send packets to the devices on the fabric. These directed
+ * route packets do not need the fabric fully configured in order to reach
+ * their destination. The SMI is the only method allowed to send
+ * directed route packets on an InfiniBand fabric.
+ *
+ * Return: true if the port provides an SMI.
+ */
+static inline bool rdma_cap_ib_smi(const struct ib_device *device, u8 port_num)
+{
+ return device->port_immutable[port_num].core_cap_flags & RDMA_CORE_CAP_IB_SMI;
+}
+
+/**
+ * rdma_cap_ib_cm - Check if the port of device has the capability Infiniband
+ * Communication Manager.
+ * @device: Device to check
+ * @port_num: Port number to check
+ *
+ * The InfiniBand Communication Manager is one of many pre-defined General
+ * Service Agents (GSA) that are accessed via the General Service
+ * Interface (GSI). It's role is to facilitate establishment of connections
+ * between nodes as well as other management related tasks for established
+ * connections.
+ *
+ * Return: true if the port supports an IB CM (this does not guarantee that
+ * a CM is actually running however).
+ */
+static inline bool rdma_cap_ib_cm(const struct ib_device *device, u8 port_num)
+{
+ return device->port_immutable[port_num].core_cap_flags & RDMA_CORE_CAP_IB_CM;
+}
+
+/**
+ * rdma_cap_iw_cm - Check if the port of device has the capability IWARP
+ * Communication Manager.
+ * @device: Device to check
+ * @port_num: Port number to check
+ *
+ * Similar to above, but specific to iWARP connections which have a different
+ * managment protocol than InfiniBand.
+ *
+ * Return: true if the port supports an iWARP CM (this does not guarantee that
+ * a CM is actually running however).
+ */
+static inline bool rdma_cap_iw_cm(const struct ib_device *device, u8 port_num)
+{
+ return device->port_immutable[port_num].core_cap_flags & RDMA_CORE_CAP_IW_CM;
+}
+
+/**
+ * rdma_cap_ib_sa - Check if the port of device has the capability Infiniband
+ * Subnet Administration.
+ * @device: Device to check
+ * @port_num: Port number to check
+ *
+ * An InfiniBand Subnet Administration (SA) service is a pre-defined General
+ * Service Agent (GSA) provided by the Subnet Manager (SM). On InfiniBand
+ * fabrics, devices should resolve routes to other hosts by contacting the
+ * SA to query the proper route.
+ *
+ * Return: true if the port should act as a client to the fabric Subnet
+ * Administration interface. This does not imply that the SA service is
+ * running locally.
+ */
+static inline bool rdma_cap_ib_sa(const struct ib_device *device, u8 port_num)
+{
+ return device->port_immutable[port_num].core_cap_flags & RDMA_CORE_CAP_IB_SA;
+}
+
+/**
+ * rdma_cap_ib_mcast - Check if the port of device has the capability Infiniband
+ * Multicast.
+ * @device: Device to check
+ * @port_num: Port number to check
+ *
+ * InfiniBand multicast registration is more complex than normal IPv4 or
+ * IPv6 multicast registration. Each Host Channel Adapter must register
+ * with the Subnet Manager when it wishes to join a multicast group. It
+ * should do so only once regardless of how many queue pairs it subscribes
+ * to this group. And it should leave the group only after all queue pairs
+ * attached to the group have been detached.
+ *
+ * Return: true if the port must undertake the additional adminstrative
+ * overhead of registering/unregistering with the SM and tracking of the
+ * total number of queue pairs attached to the multicast group.
+ */
+static inline bool rdma_cap_ib_mcast(const struct ib_device *device, u8 port_num)
+{
+ return rdma_cap_ib_sa(device, port_num);
+}
+
+/**
+ * rdma_cap_af_ib - Check if the port of device has the capability
+ * Native Infiniband Address.
+ * @device: Device to check
+ * @port_num: Port number to check
+ *
+ * InfiniBand addressing uses a port's GUID + Subnet Prefix to make a default
+ * GID. RoCE uses a different mechanism, but still generates a GID via
+ * a prescribed mechanism and port specific data.
+ *
+ * Return: true if the port uses a GID address to identify devices on the
+ * network.
+ */
+static inline bool rdma_cap_af_ib(const struct ib_device *device, u8 port_num)
+{
+ return device->port_immutable[port_num].core_cap_flags & RDMA_CORE_CAP_AF_IB;
+}
+
+/**
+ * rdma_cap_eth_ah - Check if the port of device has the capability
+ * Ethernet Address Handle.
+ * @device: Device to check
+ * @port_num: Port number to check
+ *
+ * RoCE is InfiniBand over Ethernet, and it uses a well defined technique
+ * to fabricate GIDs over Ethernet/IP specific addresses native to the
+ * port. Normally, packet headers are generated by the sending host
+ * adapter, but when sending connectionless datagrams, we must manually
+ * inject the proper headers for the fabric we are communicating over.
+ *
+ * Return: true if we are running as a RoCE port and must force the
+ * addition of a Global Route Header built from our Ethernet Address
+ * Handle into our header list for connectionless packets.
+ */
+static inline bool rdma_cap_eth_ah(const struct ib_device *device, u8 port_num)
+{
+ return device->port_immutable[port_num].core_cap_flags & RDMA_CORE_CAP_ETH_AH;
+}
+
+/**
+ * rdma_cap_read_multi_sge - Check if the port of device has the capability
+ * RDMA Read Multiple Scatter-Gather Entries.
+ * @device: Device to check
+ * @port_num: Port number to check
+ *
+ * iWARP has a restriction that RDMA READ requests may only have a single
+ * Scatter/Gather Entry (SGE) in the work request.
+ *
+ * NOTE: although the linux kernel currently assumes all devices are either
+ * single SGE RDMA READ devices or identical SGE maximums for RDMA READs and
+ * WRITEs, according to Tom Talpey, this is not accurate. There are some
+ * devices out there that support more than a single SGE on RDMA READ
+ * requests, but do not support the same number of SGEs as they do on
+ * RDMA WRITE requests. The linux kernel would need rearchitecting to
+ * support these imbalanced READ/WRITE SGEs allowed devices. So, for now,
+ * suffice with either the device supports the same READ/WRITE SGEs, or
+ * it only gets one READ sge.
+ *
+ * Return: true for any device that allows more than one SGE in RDMA READ
+ * requests.
+ */
+static inline bool rdma_cap_read_multi_sge(struct ib_device *device,
+ u8 port_num)
+{
+ return !(device->port_immutable[port_num].core_cap_flags & RDMA_CORE_CAP_PROT_IWARP);
+}
+
+/**
+ * rdma_max_mad_size - Return the max MAD size required by this RDMA Port.
+ *
+ * @device: Device
+ * @port_num: Port number
+ *
+ * This MAD size includes the MAD headers and MAD payload. No other headers
+ * are included.
+ *
+ * Return the max MAD size required by the Port. Will return 0 if the port
+ * does not support MADs
+ */
+static inline size_t rdma_max_mad_size(const struct ib_device *device, u8 port_num)
+{
+ return device->port_immutable[port_num].max_mad_size;
+}
+
int ib_query_gid(struct ib_device *device,
u8 port_num, int index, union ib_gid *gid);
@@ -1799,8 +2157,9 @@ struct ib_ah *ib_create_ah(struct ib_pd *pd, struct ib_ah_attr *ah_attr);
* @ah_attr: Returned attributes that can be used when creating an address
* handle for replying to the message.
*/
-int ib_init_ah_from_wc(struct ib_device *device, u8 port_num, struct ib_wc *wc,
- struct ib_grh *grh, struct ib_ah_attr *ah_attr);
+int ib_init_ah_from_wc(struct ib_device *device, u8 port_num,
+ const struct ib_wc *wc, const struct ib_grh *grh,
+ struct ib_ah_attr *ah_attr);
/**
* ib_create_ah_from_wc - Creates an address handle associated with the
@@ -1814,8 +2173,8 @@ int ib_init_ah_from_wc(struct ib_device *device, u8 port_num, struct ib_wc *wc,
* The address handle is used to reference a local or global destination
* in all UD QP post sends.
*/
-struct ib_ah *ib_create_ah_from_wc(struct ib_pd *pd, struct ib_wc *wc,
- struct ib_grh *grh, u8 port_num);
+struct ib_ah *ib_create_ah_from_wc(struct ib_pd *pd, const struct ib_wc *wc,
+ const struct ib_grh *grh, u8 port_num);
/**
* ib_modify_ah - Modifies the address vector associated with an address
@@ -2011,16 +2370,15 @@ static inline int ib_post_recv(struct ib_qp *qp,
* asynchronous event not associated with a completion occurs on the CQ.
* @cq_context: Context associated with the CQ returned to the user via
* the associated completion and event handlers.
- * @cqe: The minimum size of the CQ.
- * @comp_vector - Completion vector used to signal completion events.
- * Must be >= 0 and < context->num_comp_vectors.
+ * @cq_attr: The attributes the CQ should be created upon.
*
* Users can examine the cq structure to determine the actual CQ size.
*/
struct ib_cq *ib_create_cq(struct ib_device *device,
ib_comp_handler comp_handler,
void (*event_handler)(struct ib_event *, void *),
- void *cq_context, int cqe, int comp_vector);
+ void *cq_context,
+ const struct ib_cq_init_attr *cq_attr);
/**
* ib_resize_cq - Modifies the capacity of the CQ.
diff --git a/include/rdma/iw_cm.h b/include/rdma/iw_cm.h
index 1017e0bdf8ba..036bd2772662 100644
--- a/include/rdma/iw_cm.h
+++ b/include/rdma/iw_cm.h
@@ -91,6 +91,7 @@ struct iw_cm_id {
/* Used by provider to add and remove refs on IW cm_id */
void (*add_ref)(struct iw_cm_id *);
void (*rem_ref)(struct iw_cm_id *);
+ u8 tos;
};
struct iw_cm_conn_param {
diff --git a/include/rdma/iw_portmap.h b/include/rdma/iw_portmap.h
index 928b2775e992..fda31673a562 100644
--- a/include/rdma/iw_portmap.h
+++ b/include/rdma/iw_portmap.h
@@ -148,6 +148,16 @@ int iwpm_add_mapping_cb(struct sk_buff *, struct netlink_callback *);
int iwpm_add_and_query_mapping_cb(struct sk_buff *, struct netlink_callback *);
/**
+ * iwpm_remote_info_cb - Process remote connecting peer address info, which
+ * the port mapper has received from the connecting peer
+ *
+ * @cb: Contains the received message (payload and netlink header)
+ *
+ * Stores the IPv4/IPv6 address info in a hash table
+ */
+int iwpm_remote_info_cb(struct sk_buff *, struct netlink_callback *);
+
+/**
* iwpm_mapping_error_cb - Process port mapper notification for error
*
* @skb:
@@ -175,6 +185,21 @@ int iwpm_mapping_info_cb(struct sk_buff *, struct netlink_callback *);
int iwpm_ack_mapping_info_cb(struct sk_buff *, struct netlink_callback *);
/**
+ * iwpm_get_remote_info - Get the remote connecting peer address info
+ *
+ * @mapped_loc_addr: Mapped local address of the listening peer
+ * @mapped_rem_addr: Mapped remote address of the connecting peer
+ * @remote_addr: To store the remote address of the connecting peer
+ * @nl_client: The index of the netlink client
+ *
+ * The remote address info is retrieved and provided to the client in
+ * the remote_addr. After that it is removed from the hash table
+ */
+int iwpm_get_remote_info(struct sockaddr_storage *mapped_loc_addr,
+ struct sockaddr_storage *mapped_rem_addr,
+ struct sockaddr_storage *remote_addr, u8 nl_client);
+
+/**
* iwpm_create_mapinfo - Store local and mapped IPv4/IPv6 address
* info in a hash table
* @local_addr: Local ip/tcp address
diff --git a/include/rdma/opa_smi.h b/include/rdma/opa_smi.h
new file mode 100644
index 000000000000..29063e84c253
--- /dev/null
+++ b/include/rdma/opa_smi.h
@@ -0,0 +1,106 @@
+/*
+ * Copyright (c) 2014 Intel Corporation. All rights reserved.
+ *
+ * This software is available to you under a choice of one of two
+ * licenses. You may choose to be licensed under the terms of the GNU
+ * General Public License (GPL) Version 2, available from the file
+ * COPYING in the main directory of this source tree, or the
+ * OpenIB.org BSD license below:
+ *
+ * Redistribution and use in source and binary forms, with or
+ * without modification, are permitted provided that the following
+ * conditions are met:
+ *
+ * - Redistributions of source code must retain the above
+ * copyright notice, this list of conditions and the following
+ * disclaimer.
+ *
+ * - Redistributions in binary form must reproduce the above
+ * copyright notice, this list of conditions and the following
+ * disclaimer in the documentation and/or other materials
+ * provided with the distribution.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND,
+ * EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
+ * MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND
+ * NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS
+ * BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN
+ * ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN
+ * CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
+ * SOFTWARE.
+ */
+
+#if !defined(OPA_SMI_H)
+#define OPA_SMI_H
+
+#include <rdma/ib_mad.h>
+#include <rdma/ib_smi.h>
+
+#define OPA_SMP_LID_DATA_SIZE 2016
+#define OPA_SMP_DR_DATA_SIZE 1872
+#define OPA_SMP_MAX_PATH_HOPS 64
+
+#define OPA_SMI_CLASS_VERSION 0x80
+
+#define OPA_LID_PERMISSIVE cpu_to_be32(0xFFFFFFFF)
+
+struct opa_smp {
+ u8 base_version;
+ u8 mgmt_class;
+ u8 class_version;
+ u8 method;
+ __be16 status;
+ u8 hop_ptr;
+ u8 hop_cnt;
+ __be64 tid;
+ __be16 attr_id;
+ __be16 resv;
+ __be32 attr_mod;
+ __be64 mkey;
+ union {
+ struct {
+ uint8_t data[OPA_SMP_LID_DATA_SIZE];
+ } lid;
+ struct {
+ __be32 dr_slid;
+ __be32 dr_dlid;
+ u8 initial_path[OPA_SMP_MAX_PATH_HOPS];
+ u8 return_path[OPA_SMP_MAX_PATH_HOPS];
+ u8 reserved[8];
+ u8 data[OPA_SMP_DR_DATA_SIZE];
+ } dr;
+ } route;
+} __packed;
+
+
+static inline u8
+opa_get_smp_direction(struct opa_smp *smp)
+{
+ return ib_get_smp_direction((struct ib_smp *)smp);
+}
+
+static inline u8 *opa_get_smp_data(struct opa_smp *smp)
+{
+ if (smp->mgmt_class == IB_MGMT_CLASS_SUBN_DIRECTED_ROUTE)
+ return smp->route.dr.data;
+
+ return smp->route.lid.data;
+}
+
+static inline size_t opa_get_smp_data_size(struct opa_smp *smp)
+{
+ if (smp->mgmt_class == IB_MGMT_CLASS_SUBN_DIRECTED_ROUTE)
+ return sizeof(smp->route.dr.data);
+
+ return sizeof(smp->route.lid.data);
+}
+
+static inline size_t opa_get_smp_header_size(struct opa_smp *smp)
+{
+ if (smp->mgmt_class == IB_MGMT_CLASS_SUBN_DIRECTED_ROUTE)
+ return sizeof(*smp) - sizeof(smp->route.dr.data);
+
+ return sizeof(*smp) - sizeof(smp->route.lid.data);
+}
+
+#endif /* OPA_SMI_H */
diff --git a/include/rdma/rdma_cm.h b/include/rdma/rdma_cm.h
index 1ed2088dc9f5..c92522c192d2 100644
--- a/include/rdma/rdma_cm.h
+++ b/include/rdma/rdma_cm.h
@@ -62,6 +62,8 @@ enum rdma_cm_event_type {
RDMA_CM_EVENT_TIMEWAIT_EXIT
};
+__attribute_const__ const char *rdma_event_msg(enum rdma_cm_event_type event);
+
enum rdma_port_space {
RDMA_PS_SDP = 0x0001,
RDMA_PS_IPOIB = 0x0002,
diff --git a/include/rxrpc/packet.h b/include/rxrpc/packet.h
index f2902ef7ab75..4dce116bfd80 100644
--- a/include/rxrpc/packet.h
+++ b/include/rxrpc/packet.h
@@ -47,7 +47,8 @@ struct rxrpc_header {
#define RXRPC_PACKET_TYPE_CHALLENGE 6 /* connection security challenge (SRVR->CLNT) */
#define RXRPC_PACKET_TYPE_RESPONSE 7 /* connection secutity response (CLNT->SRVR) */
#define RXRPC_PACKET_TYPE_DEBUG 8 /* debug info request */
-#define RXRPC_N_PACKET_TYPES 9 /* number of packet types (incl type 0) */
+#define RXRPC_PACKET_TYPE_VERSION 13 /* version string request */
+#define RXRPC_N_PACKET_TYPES 14 /* number of packet types (incl type 0) */
uint8_t flags; /* packet flags */
#define RXRPC_CLIENT_INITIATED 0x01 /* signifies a packet generated by a client */
diff --git a/include/scsi/scsi.h b/include/scsi/scsi.h
index d0a66aa1868d..e0a3398b1547 100644
--- a/include/scsi/scsi.h
+++ b/include/scsi/scsi.h
@@ -1,9 +1,6 @@
/*
* This header file contains public constants and structures used by
- * the scsi code for linux.
- *
- * For documentation on the OPCODES, MESSAGES, and SENSE values,
- * please consult the SCSI standard.
+ * the SCSI initiator code.
*/
#ifndef _SCSI_SCSI_H
#define _SCSI_SCSI_H
@@ -11,6 +8,8 @@
#include <linux/types.h>
#include <linux/scatterlist.h>
#include <linux/kernel.h>
+#include <scsi/scsi_common.h>
+#include <scsi/scsi_proto.h>
struct scsi_cmnd;
@@ -49,187 +48,6 @@ enum scsi_timeouts {
*/
#define SCAN_WILD_CARD ~0
-/*
- * SCSI opcodes
- */
-
-#define TEST_UNIT_READY 0x00
-#define REZERO_UNIT 0x01
-#define REQUEST_SENSE 0x03
-#define FORMAT_UNIT 0x04
-#define READ_BLOCK_LIMITS 0x05
-#define REASSIGN_BLOCKS 0x07
-#define INITIALIZE_ELEMENT_STATUS 0x07
-#define READ_6 0x08
-#define WRITE_6 0x0a
-#define SEEK_6 0x0b
-#define READ_REVERSE 0x0f
-#define WRITE_FILEMARKS 0x10
-#define SPACE 0x11
-#define INQUIRY 0x12
-#define RECOVER_BUFFERED_DATA 0x14
-#define MODE_SELECT 0x15
-#define RESERVE 0x16
-#define RELEASE 0x17
-#define COPY 0x18
-#define ERASE 0x19
-#define MODE_SENSE 0x1a
-#define START_STOP 0x1b
-#define RECEIVE_DIAGNOSTIC 0x1c
-#define SEND_DIAGNOSTIC 0x1d
-#define ALLOW_MEDIUM_REMOVAL 0x1e
-
-#define READ_FORMAT_CAPACITIES 0x23
-#define SET_WINDOW 0x24
-#define READ_CAPACITY 0x25
-#define READ_10 0x28
-#define WRITE_10 0x2a
-#define SEEK_10 0x2b
-#define POSITION_TO_ELEMENT 0x2b
-#define WRITE_VERIFY 0x2e
-#define VERIFY 0x2f
-#define SEARCH_HIGH 0x30
-#define SEARCH_EQUAL 0x31
-#define SEARCH_LOW 0x32
-#define SET_LIMITS 0x33
-#define PRE_FETCH 0x34
-#define READ_POSITION 0x34
-#define SYNCHRONIZE_CACHE 0x35
-#define LOCK_UNLOCK_CACHE 0x36
-#define READ_DEFECT_DATA 0x37
-#define MEDIUM_SCAN 0x38
-#define COMPARE 0x39
-#define COPY_VERIFY 0x3a
-#define WRITE_BUFFER 0x3b
-#define READ_BUFFER 0x3c
-#define UPDATE_BLOCK 0x3d
-#define READ_LONG 0x3e
-#define WRITE_LONG 0x3f
-#define CHANGE_DEFINITION 0x40
-#define WRITE_SAME 0x41
-#define UNMAP 0x42
-#define READ_TOC 0x43
-#define READ_HEADER 0x44
-#define GET_EVENT_STATUS_NOTIFICATION 0x4a
-#define LOG_SELECT 0x4c
-#define LOG_SENSE 0x4d
-#define XDWRITEREAD_10 0x53
-#define MODE_SELECT_10 0x55
-#define RESERVE_10 0x56
-#define RELEASE_10 0x57
-#define MODE_SENSE_10 0x5a
-#define PERSISTENT_RESERVE_IN 0x5e
-#define PERSISTENT_RESERVE_OUT 0x5f
-#define VARIABLE_LENGTH_CMD 0x7f
-#define REPORT_LUNS 0xa0
-#define SECURITY_PROTOCOL_IN 0xa2
-#define MAINTENANCE_IN 0xa3
-#define MAINTENANCE_OUT 0xa4
-#define MOVE_MEDIUM 0xa5
-#define EXCHANGE_MEDIUM 0xa6
-#define READ_12 0xa8
-#define SERVICE_ACTION_OUT_12 0xa9
-#define WRITE_12 0xaa
-#define READ_MEDIA_SERIAL_NUMBER 0xab /* Obsolete with SPC-2 */
-#define SERVICE_ACTION_IN_12 0xab
-#define WRITE_VERIFY_12 0xae
-#define VERIFY_12 0xaf
-#define SEARCH_HIGH_12 0xb0
-#define SEARCH_EQUAL_12 0xb1
-#define SEARCH_LOW_12 0xb2
-#define SECURITY_PROTOCOL_OUT 0xb5
-#define READ_ELEMENT_STATUS 0xb8
-#define SEND_VOLUME_TAG 0xb6
-#define WRITE_LONG_2 0xea
-#define EXTENDED_COPY 0x83
-#define RECEIVE_COPY_RESULTS 0x84
-#define ACCESS_CONTROL_IN 0x86
-#define ACCESS_CONTROL_OUT 0x87
-#define READ_16 0x88
-#define COMPARE_AND_WRITE 0x89
-#define WRITE_16 0x8a
-#define READ_ATTRIBUTE 0x8c
-#define WRITE_ATTRIBUTE 0x8d
-#define VERIFY_16 0x8f
-#define SYNCHRONIZE_CACHE_16 0x91
-#define WRITE_SAME_16 0x93
-#define SERVICE_ACTION_BIDIRECTIONAL 0x9d
-#define SERVICE_ACTION_IN_16 0x9e
-#define SERVICE_ACTION_OUT_16 0x9f
-/* values for service action in */
-#define SAI_READ_CAPACITY_16 0x10
-#define SAI_GET_LBA_STATUS 0x12
-#define SAI_REPORT_REFERRALS 0x13
-/* values for VARIABLE_LENGTH_CMD service action codes
- * see spc4r17 Section D.3.5, table D.7 and D.8 */
-#define VLC_SA_RECEIVE_CREDENTIAL 0x1800
-/* values for maintenance in */
-#define MI_REPORT_IDENTIFYING_INFORMATION 0x05
-#define MI_REPORT_TARGET_PGS 0x0a
-#define MI_REPORT_ALIASES 0x0b
-#define MI_REPORT_SUPPORTED_OPERATION_CODES 0x0c
-#define MI_REPORT_SUPPORTED_TASK_MANAGEMENT_FUNCTIONS 0x0d
-#define MI_REPORT_PRIORITY 0x0e
-#define MI_REPORT_TIMESTAMP 0x0f
-#define MI_MANAGEMENT_PROTOCOL_IN 0x10
-/* value for MI_REPORT_TARGET_PGS ext header */
-#define MI_EXT_HDR_PARAM_FMT 0x20
-/* values for maintenance out */
-#define MO_SET_IDENTIFYING_INFORMATION 0x06
-#define MO_SET_TARGET_PGS 0x0a
-#define MO_CHANGE_ALIASES 0x0b
-#define MO_SET_PRIORITY 0x0e
-#define MO_SET_TIMESTAMP 0x0f
-#define MO_MANAGEMENT_PROTOCOL_OUT 0x10
-/* values for variable length command */
-#define XDREAD_32 0x03
-#define XDWRITE_32 0x04
-#define XPWRITE_32 0x06
-#define XDWRITEREAD_32 0x07
-#define READ_32 0x09
-#define VERIFY_32 0x0a
-#define WRITE_32 0x0b
-#define WRITE_SAME_32 0x0d
-
-/* Values for T10/04-262r7 */
-#define ATA_16 0x85 /* 16-byte pass-thru */
-#define ATA_12 0xa1 /* 12-byte pass-thru */
-
-/* Vendor specific CDBs start here */
-#define VENDOR_SPECIFIC_CDB 0xc0
-
-/*
- * SCSI command lengths
- */
-
-#define SCSI_MAX_VARLEN_CDB_SIZE 260
-
-/* defined in T10 SCSI Primary Commands-2 (SPC2) */
-struct scsi_varlen_cdb_hdr {
- __u8 opcode; /* opcode always == VARIABLE_LENGTH_CMD */
- __u8 control;
- __u8 misc[5];
- __u8 additional_cdb_length; /* total cdb length - 8 */
- __be16 service_action;
- /* service specific data follows */
-};
-
-static inline unsigned
-scsi_varlen_cdb_length(const void *hdr)
-{
- return ((struct scsi_varlen_cdb_hdr *)hdr)->additional_cdb_length + 8;
-}
-
-extern const unsigned char scsi_command_size_tbl[8];
-#define COMMAND_SIZE(opcode) scsi_command_size_tbl[((opcode) >> 5) & 7]
-
-static inline unsigned
-scsi_command_size(const unsigned char *cmnd)
-{
- return (cmnd[0] == VARIABLE_LENGTH_CMD) ?
- scsi_varlen_cdb_length(cmnd) : COMMAND_SIZE(cmnd[0]);
-}
-
#ifdef CONFIG_ACPI
struct acpi_bus_type;
@@ -240,22 +58,6 @@ extern void
scsi_unregister_acpi_bus_type(struct acpi_bus_type *bus);
#endif
-/*
- * SCSI Architecture Model (SAM) Status codes. Taken from SAM-3 draft
- * T10/1561-D Revision 4 Draft dated 7th November 2002.
- */
-#define SAM_STAT_GOOD 0x00
-#define SAM_STAT_CHECK_CONDITION 0x02
-#define SAM_STAT_CONDITION_MET 0x04
-#define SAM_STAT_BUSY 0x08
-#define SAM_STAT_INTERMEDIATE 0x10
-#define SAM_STAT_INTERMEDIATE_CONDITION_MET 0x14
-#define SAM_STAT_RESERVATION_CONFLICT 0x18
-#define SAM_STAT_COMMAND_TERMINATED 0x22 /* obsolete in SAM-3 */
-#define SAM_STAT_TASK_SET_FULL 0x28
-#define SAM_STAT_ACA_ACTIVE 0x30
-#define SAM_STAT_TASK_ABORTED 0x40
-
/** scsi_status_is_good - check the status return.
*
* @status: the status passed up from the driver (including host and
@@ -279,86 +81,6 @@ static inline int scsi_status_is_good(int status)
(status == SAM_STAT_COMMAND_TERMINATED));
}
-/*
- * Status codes. These are deprecated as they are shifted 1 bit right
- * from those found in the SCSI standards. This causes confusion for
- * applications that are ported to several OSes. Prefer SAM Status codes
- * above.
- */
-
-#define GOOD 0x00
-#define CHECK_CONDITION 0x01
-#define CONDITION_GOOD 0x02
-#define BUSY 0x04
-#define INTERMEDIATE_GOOD 0x08
-#define INTERMEDIATE_C_GOOD 0x0a
-#define RESERVATION_CONFLICT 0x0c
-#define COMMAND_TERMINATED 0x11
-#define QUEUE_FULL 0x14
-#define ACA_ACTIVE 0x18
-#define TASK_ABORTED 0x20
-
-#define STATUS_MASK 0xfe
-
-/*
- * SENSE KEYS
- */
-
-#define NO_SENSE 0x00
-#define RECOVERED_ERROR 0x01
-#define NOT_READY 0x02
-#define MEDIUM_ERROR 0x03
-#define HARDWARE_ERROR 0x04
-#define ILLEGAL_REQUEST 0x05
-#define UNIT_ATTENTION 0x06
-#define DATA_PROTECT 0x07
-#define BLANK_CHECK 0x08
-#define COPY_ABORTED 0x0a
-#define ABORTED_COMMAND 0x0b
-#define VOLUME_OVERFLOW 0x0d
-#define MISCOMPARE 0x0e
-
-
-/*
- * DEVICE TYPES
- * Please keep them in 0x%02x format for $MODALIAS to work
- */
-
-#define TYPE_DISK 0x00
-#define TYPE_TAPE 0x01
-#define TYPE_PRINTER 0x02
-#define TYPE_PROCESSOR 0x03 /* HP scanners use this */
-#define TYPE_WORM 0x04 /* Treated as ROM by our system */
-#define TYPE_ROM 0x05
-#define TYPE_SCANNER 0x06
-#define TYPE_MOD 0x07 /* Magneto-optical disk -
- * - treated as TYPE_DISK */
-#define TYPE_MEDIUM_CHANGER 0x08
-#define TYPE_COMM 0x09 /* Communications device */
-#define TYPE_RAID 0x0c
-#define TYPE_ENCLOSURE 0x0d /* Enclosure Services Device */
-#define TYPE_RBC 0x0e
-#define TYPE_OSD 0x11
-#define TYPE_ZBC 0x14
-#define TYPE_WLUN 0x1e /* well-known logical unit */
-#define TYPE_NO_LUN 0x7f
-
-/* SCSI protocols; these are taken from SPC-3 section 7.5 */
-enum scsi_protocol {
- SCSI_PROTOCOL_FCP = 0, /* Fibre Channel */
- SCSI_PROTOCOL_SPI = 1, /* parallel SCSI */
- SCSI_PROTOCOL_SSA = 2, /* Serial Storage Architecture - Obsolete */
- SCSI_PROTOCOL_SBP = 3, /* firewire */
- SCSI_PROTOCOL_SRP = 4, /* Infiniband RDMA */
- SCSI_PROTOCOL_ISCSI = 5,
- SCSI_PROTOCOL_SAS = 6,
- SCSI_PROTOCOL_ADT = 7, /* Media Changers */
- SCSI_PROTOCOL_ATA = 8,
- SCSI_PROTOCOL_UNSPEC = 0xf, /* No specific protocol */
-};
-
-/* Returns a human-readable name for the device */
-extern const char * scsi_device_type(unsigned type);
/*
* standard mode-select header prepended to all mode-select commands
@@ -380,13 +102,6 @@ struct ccs_modesel_head {
};
/*
- * ScsiLun: 8 byte LUN.
- */
-struct scsi_lun {
- __u8 scsi_lun[8];
-};
-
-/*
* The Well Known LUNS (SAM-3) in our int representation of a LUN
*/
#define SCSI_W_LUN_BASE 0xc100
diff --git a/include/scsi/scsi_common.h b/include/scsi/scsi_common.h
new file mode 100644
index 000000000000..676b03b78e57
--- /dev/null
+++ b/include/scsi/scsi_common.h
@@ -0,0 +1,64 @@
+/*
+ * Functions used by both the SCSI initiator code and the SCSI target code.
+ */
+
+#ifndef _SCSI_COMMON_H_
+#define _SCSI_COMMON_H_
+
+#include <linux/types.h>
+#include <scsi/scsi_proto.h>
+
+static inline unsigned
+scsi_varlen_cdb_length(const void *hdr)
+{
+ return ((struct scsi_varlen_cdb_hdr *)hdr)->additional_cdb_length + 8;
+}
+
+extern const unsigned char scsi_command_size_tbl[8];
+#define COMMAND_SIZE(opcode) scsi_command_size_tbl[((opcode) >> 5) & 7]
+
+static inline unsigned
+scsi_command_size(const unsigned char *cmnd)
+{
+ return (cmnd[0] == VARIABLE_LENGTH_CMD) ?
+ scsi_varlen_cdb_length(cmnd) : COMMAND_SIZE(cmnd[0]);
+}
+
+/* Returns a human-readable name for the device */
+extern const char *scsi_device_type(unsigned type);
+
+extern void int_to_scsilun(u64, struct scsi_lun *);
+extern u64 scsilun_to_int(struct scsi_lun *);
+
+/*
+ * This is a slightly modified SCSI sense "descriptor" format header.
+ * The addition is to allow the 0x70 and 0x71 response codes. The idea
+ * is to place the salient data from either "fixed" or "descriptor" sense
+ * format into one structure to ease application processing.
+ *
+ * The original sense buffer should be kept around for those cases
+ * in which more information is required (e.g. the LBA of a MEDIUM ERROR).
+ */
+struct scsi_sense_hdr { /* See SPC-3 section 4.5 */
+ u8 response_code; /* permit: 0x0, 0x70, 0x71, 0x72, 0x73 */
+ u8 sense_key;
+ u8 asc;
+ u8 ascq;
+ u8 byte4;
+ u8 byte5;
+ u8 byte6;
+ u8 additional_length; /* always 0 for fixed sense format */
+};
+
+static inline bool scsi_sense_valid(const struct scsi_sense_hdr *sshdr)
+{
+ if (!sshdr)
+ return false;
+
+ return (sshdr->response_code & 0x70) == 0x70;
+}
+
+extern bool scsi_normalize_sense(const u8 *sense_buffer, int sb_len,
+ struct scsi_sense_hdr *sshdr);
+
+#endif /* _SCSI_COMMON_H_ */
diff --git a/include/scsi/scsi_device.h b/include/scsi/scsi_device.h
index a4c9336811d1..ae84b2214d40 100644
--- a/include/scsi/scsi_device.h
+++ b/include/scsi/scsi_device.h
@@ -413,8 +413,6 @@ extern void scsi_target_reap(struct scsi_target *);
extern void scsi_target_block(struct device *);
extern void scsi_target_unblock(struct device *, enum scsi_device_state);
extern void scsi_remove_target(struct device *);
-extern void int_to_scsilun(u64, struct scsi_lun *);
-extern u64 scsilun_to_int(struct scsi_lun *);
extern const char *scsi_device_state_name(enum scsi_device_state);
extern int scsi_is_sdev_device(const struct device *);
extern int scsi_is_target_device(const struct device *);
diff --git a/include/scsi/scsi_devinfo.h b/include/scsi/scsi_devinfo.h
index 183eaab7c380..96e3f56519e7 100644
--- a/include/scsi/scsi_devinfo.h
+++ b/include/scsi/scsi_devinfo.h
@@ -36,5 +36,6 @@
for sequential scan */
#define BLIST_TRY_VPD_PAGES 0x10000000 /* Attempt to read VPD pages */
#define BLIST_NO_RSOC 0x20000000 /* don't try to issue RSOC */
+#define BLIST_MAX_1024 0x40000000 /* maximum 1024 sector cdb length */
#endif
diff --git a/include/scsi/scsi_eh.h b/include/scsi/scsi_eh.h
index 5a4bb5bb66b3..4942710ef720 100644
--- a/include/scsi/scsi_eh.h
+++ b/include/scsi/scsi_eh.h
@@ -7,43 +7,12 @@
struct scsi_device;
struct Scsi_Host;
-/*
- * This is a slightly modified SCSI sense "descriptor" format header.
- * The addition is to allow the 0x70 and 0x71 response codes. The idea
- * is to place the salient data from either "fixed" or "descriptor" sense
- * format into one structure to ease application processing.
- *
- * The original sense buffer should be kept around for those cases
- * in which more information is required (e.g. the LBA of a MEDIUM ERROR).
- */
-struct scsi_sense_hdr { /* See SPC-3 section 4.5 */
- u8 response_code; /* permit: 0x0, 0x70, 0x71, 0x72, 0x73 */
- u8 sense_key;
- u8 asc;
- u8 ascq;
- u8 byte4;
- u8 byte5;
- u8 byte6;
- u8 additional_length; /* always 0 for fixed sense format */
-};
-
-static inline bool scsi_sense_valid(const struct scsi_sense_hdr *sshdr)
-{
- if (!sshdr)
- return false;
-
- return (sshdr->response_code & 0x70) == 0x70;
-}
-
-
extern void scsi_eh_finish_cmd(struct scsi_cmnd *scmd,
struct list_head *done_q);
extern void scsi_eh_flush_done_q(struct list_head *done_q);
extern void scsi_report_bus_reset(struct Scsi_Host *, int);
extern void scsi_report_device_reset(struct Scsi_Host *, int, int);
extern int scsi_block_when_processing_errors(struct scsi_device *);
-extern bool scsi_normalize_sense(const u8 *sense_buffer, int sb_len,
- struct scsi_sense_hdr *sshdr);
extern bool scsi_command_normalize_sense(const struct scsi_cmnd *cmd,
struct scsi_sense_hdr *sshdr);
diff --git a/include/scsi/scsi_proto.h b/include/scsi/scsi_proto.h
new file mode 100644
index 000000000000..a9fbf1b38e71
--- /dev/null
+++ b/include/scsi/scsi_proto.h
@@ -0,0 +1,281 @@
+/*
+ * This header file contains public constants and structures used by
+ * both the SCSI initiator and the SCSI target code.
+ *
+ * For documentation on the OPCODES, MESSAGES, and SENSE values,
+ * please consult the SCSI standard.
+ */
+
+#ifndef _SCSI_PROTO_H_
+#define _SCSI_PROTO_H_
+
+#include <linux/types.h>
+
+/*
+ * SCSI opcodes
+ */
+
+#define TEST_UNIT_READY 0x00
+#define REZERO_UNIT 0x01
+#define REQUEST_SENSE 0x03
+#define FORMAT_UNIT 0x04
+#define READ_BLOCK_LIMITS 0x05
+#define REASSIGN_BLOCKS 0x07
+#define INITIALIZE_ELEMENT_STATUS 0x07
+#define READ_6 0x08
+#define WRITE_6 0x0a
+#define SEEK_6 0x0b
+#define READ_REVERSE 0x0f
+#define WRITE_FILEMARKS 0x10
+#define SPACE 0x11
+#define INQUIRY 0x12
+#define RECOVER_BUFFERED_DATA 0x14
+#define MODE_SELECT 0x15
+#define RESERVE 0x16
+#define RELEASE 0x17
+#define COPY 0x18
+#define ERASE 0x19
+#define MODE_SENSE 0x1a
+#define START_STOP 0x1b
+#define RECEIVE_DIAGNOSTIC 0x1c
+#define SEND_DIAGNOSTIC 0x1d
+#define ALLOW_MEDIUM_REMOVAL 0x1e
+
+#define READ_FORMAT_CAPACITIES 0x23
+#define SET_WINDOW 0x24
+#define READ_CAPACITY 0x25
+#define READ_10 0x28
+#define WRITE_10 0x2a
+#define SEEK_10 0x2b
+#define POSITION_TO_ELEMENT 0x2b
+#define WRITE_VERIFY 0x2e
+#define VERIFY 0x2f
+#define SEARCH_HIGH 0x30
+#define SEARCH_EQUAL 0x31
+#define SEARCH_LOW 0x32
+#define SET_LIMITS 0x33
+#define PRE_FETCH 0x34
+#define READ_POSITION 0x34
+#define SYNCHRONIZE_CACHE 0x35
+#define LOCK_UNLOCK_CACHE 0x36
+#define READ_DEFECT_DATA 0x37
+#define MEDIUM_SCAN 0x38
+#define COMPARE 0x39
+#define COPY_VERIFY 0x3a
+#define WRITE_BUFFER 0x3b
+#define READ_BUFFER 0x3c
+#define UPDATE_BLOCK 0x3d
+#define READ_LONG 0x3e
+#define WRITE_LONG 0x3f
+#define CHANGE_DEFINITION 0x40
+#define WRITE_SAME 0x41
+#define UNMAP 0x42
+#define READ_TOC 0x43
+#define READ_HEADER 0x44
+#define GET_EVENT_STATUS_NOTIFICATION 0x4a
+#define LOG_SELECT 0x4c
+#define LOG_SENSE 0x4d
+#define XDWRITEREAD_10 0x53
+#define MODE_SELECT_10 0x55
+#define RESERVE_10 0x56
+#define RELEASE_10 0x57
+#define MODE_SENSE_10 0x5a
+#define PERSISTENT_RESERVE_IN 0x5e
+#define PERSISTENT_RESERVE_OUT 0x5f
+#define VARIABLE_LENGTH_CMD 0x7f
+#define REPORT_LUNS 0xa0
+#define SECURITY_PROTOCOL_IN 0xa2
+#define MAINTENANCE_IN 0xa3
+#define MAINTENANCE_OUT 0xa4
+#define MOVE_MEDIUM 0xa5
+#define EXCHANGE_MEDIUM 0xa6
+#define READ_12 0xa8
+#define SERVICE_ACTION_OUT_12 0xa9
+#define WRITE_12 0xaa
+#define READ_MEDIA_SERIAL_NUMBER 0xab /* Obsolete with SPC-2 */
+#define SERVICE_ACTION_IN_12 0xab
+#define WRITE_VERIFY_12 0xae
+#define VERIFY_12 0xaf
+#define SEARCH_HIGH_12 0xb0
+#define SEARCH_EQUAL_12 0xb1
+#define SEARCH_LOW_12 0xb2
+#define SECURITY_PROTOCOL_OUT 0xb5
+#define READ_ELEMENT_STATUS 0xb8
+#define SEND_VOLUME_TAG 0xb6
+#define WRITE_LONG_2 0xea
+#define EXTENDED_COPY 0x83
+#define RECEIVE_COPY_RESULTS 0x84
+#define ACCESS_CONTROL_IN 0x86
+#define ACCESS_CONTROL_OUT 0x87
+#define READ_16 0x88
+#define COMPARE_AND_WRITE 0x89
+#define WRITE_16 0x8a
+#define READ_ATTRIBUTE 0x8c
+#define WRITE_ATTRIBUTE 0x8d
+#define VERIFY_16 0x8f
+#define SYNCHRONIZE_CACHE_16 0x91
+#define WRITE_SAME_16 0x93
+#define SERVICE_ACTION_BIDIRECTIONAL 0x9d
+#define SERVICE_ACTION_IN_16 0x9e
+#define SERVICE_ACTION_OUT_16 0x9f
+/* values for service action in */
+#define SAI_READ_CAPACITY_16 0x10
+#define SAI_GET_LBA_STATUS 0x12
+#define SAI_REPORT_REFERRALS 0x13
+/* values for VARIABLE_LENGTH_CMD service action codes
+ * see spc4r17 Section D.3.5, table D.7 and D.8 */
+#define VLC_SA_RECEIVE_CREDENTIAL 0x1800
+/* values for maintenance in */
+#define MI_REPORT_IDENTIFYING_INFORMATION 0x05
+#define MI_REPORT_TARGET_PGS 0x0a
+#define MI_REPORT_ALIASES 0x0b
+#define MI_REPORT_SUPPORTED_OPERATION_CODES 0x0c
+#define MI_REPORT_SUPPORTED_TASK_MANAGEMENT_FUNCTIONS 0x0d
+#define MI_REPORT_PRIORITY 0x0e
+#define MI_REPORT_TIMESTAMP 0x0f
+#define MI_MANAGEMENT_PROTOCOL_IN 0x10
+/* value for MI_REPORT_TARGET_PGS ext header */
+#define MI_EXT_HDR_PARAM_FMT 0x20
+/* values for maintenance out */
+#define MO_SET_IDENTIFYING_INFORMATION 0x06
+#define MO_SET_TARGET_PGS 0x0a
+#define MO_CHANGE_ALIASES 0x0b
+#define MO_SET_PRIORITY 0x0e
+#define MO_SET_TIMESTAMP 0x0f
+#define MO_MANAGEMENT_PROTOCOL_OUT 0x10
+/* values for variable length command */
+#define XDREAD_32 0x03
+#define XDWRITE_32 0x04
+#define XPWRITE_32 0x06
+#define XDWRITEREAD_32 0x07
+#define READ_32 0x09
+#define VERIFY_32 0x0a
+#define WRITE_32 0x0b
+#define WRITE_SAME_32 0x0d
+
+/* Values for T10/04-262r7 */
+#define ATA_16 0x85 /* 16-byte pass-thru */
+#define ATA_12 0xa1 /* 12-byte pass-thru */
+
+/* Vendor specific CDBs start here */
+#define VENDOR_SPECIFIC_CDB 0xc0
+
+/*
+ * SCSI command lengths
+ */
+
+#define SCSI_MAX_VARLEN_CDB_SIZE 260
+
+/* defined in T10 SCSI Primary Commands-2 (SPC2) */
+struct scsi_varlen_cdb_hdr {
+ __u8 opcode; /* opcode always == VARIABLE_LENGTH_CMD */
+ __u8 control;
+ __u8 misc[5];
+ __u8 additional_cdb_length; /* total cdb length - 8 */
+ __be16 service_action;
+ /* service specific data follows */
+};
+
+/*
+ * SCSI Architecture Model (SAM) Status codes. Taken from SAM-3 draft
+ * T10/1561-D Revision 4 Draft dated 7th November 2002.
+ */
+#define SAM_STAT_GOOD 0x00
+#define SAM_STAT_CHECK_CONDITION 0x02
+#define SAM_STAT_CONDITION_MET 0x04
+#define SAM_STAT_BUSY 0x08
+#define SAM_STAT_INTERMEDIATE 0x10
+#define SAM_STAT_INTERMEDIATE_CONDITION_MET 0x14
+#define SAM_STAT_RESERVATION_CONFLICT 0x18
+#define SAM_STAT_COMMAND_TERMINATED 0x22 /* obsolete in SAM-3 */
+#define SAM_STAT_TASK_SET_FULL 0x28
+#define SAM_STAT_ACA_ACTIVE 0x30
+#define SAM_STAT_TASK_ABORTED 0x40
+
+/*
+ * Status codes. These are deprecated as they are shifted 1 bit right
+ * from those found in the SCSI standards. This causes confusion for
+ * applications that are ported to several OSes. Prefer SAM Status codes
+ * above.
+ */
+
+#define GOOD 0x00
+#define CHECK_CONDITION 0x01
+#define CONDITION_GOOD 0x02
+#define BUSY 0x04
+#define INTERMEDIATE_GOOD 0x08
+#define INTERMEDIATE_C_GOOD 0x0a
+#define RESERVATION_CONFLICT 0x0c
+#define COMMAND_TERMINATED 0x11
+#define QUEUE_FULL 0x14
+#define ACA_ACTIVE 0x18
+#define TASK_ABORTED 0x20
+
+#define STATUS_MASK 0xfe
+
+/*
+ * SENSE KEYS
+ */
+
+#define NO_SENSE 0x00
+#define RECOVERED_ERROR 0x01
+#define NOT_READY 0x02
+#define MEDIUM_ERROR 0x03
+#define HARDWARE_ERROR 0x04
+#define ILLEGAL_REQUEST 0x05
+#define UNIT_ATTENTION 0x06
+#define DATA_PROTECT 0x07
+#define BLANK_CHECK 0x08
+#define COPY_ABORTED 0x0a
+#define ABORTED_COMMAND 0x0b
+#define VOLUME_OVERFLOW 0x0d
+#define MISCOMPARE 0x0e
+
+
+/*
+ * DEVICE TYPES
+ * Please keep them in 0x%02x format for $MODALIAS to work
+ */
+
+#define TYPE_DISK 0x00
+#define TYPE_TAPE 0x01
+#define TYPE_PRINTER 0x02
+#define TYPE_PROCESSOR 0x03 /* HP scanners use this */
+#define TYPE_WORM 0x04 /* Treated as ROM by our system */
+#define TYPE_ROM 0x05
+#define TYPE_SCANNER 0x06
+#define TYPE_MOD 0x07 /* Magneto-optical disk -
+ * - treated as TYPE_DISK */
+#define TYPE_MEDIUM_CHANGER 0x08
+#define TYPE_COMM 0x09 /* Communications device */
+#define TYPE_RAID 0x0c
+#define TYPE_ENCLOSURE 0x0d /* Enclosure Services Device */
+#define TYPE_RBC 0x0e
+#define TYPE_OSD 0x11
+#define TYPE_ZBC 0x14
+#define TYPE_WLUN 0x1e /* well-known logical unit */
+#define TYPE_NO_LUN 0x7f
+
+/* SCSI protocols; these are taken from SPC-3 section 7.5 */
+enum scsi_protocol {
+ SCSI_PROTOCOL_FCP = 0, /* Fibre Channel */
+ SCSI_PROTOCOL_SPI = 1, /* parallel SCSI */
+ SCSI_PROTOCOL_SSA = 2, /* Serial Storage Architecture - Obsolete */
+ SCSI_PROTOCOL_SBP = 3, /* firewire */
+ SCSI_PROTOCOL_SRP = 4, /* Infiniband RDMA */
+ SCSI_PROTOCOL_ISCSI = 5,
+ SCSI_PROTOCOL_SAS = 6,
+ SCSI_PROTOCOL_ADT = 7, /* Media Changers */
+ SCSI_PROTOCOL_ATA = 8,
+ SCSI_PROTOCOL_UNSPEC = 0xf, /* No specific protocol */
+};
+
+/*
+ * ScsiLun: 8 byte LUN.
+ */
+struct scsi_lun {
+ __u8 scsi_lun[8];
+};
+
+
+#endif /* _SCSI_PROTO_H_ */
diff --git a/include/scsi/scsi_transport_fc.h b/include/scsi/scsi_transport_fc.h
index 007a0bc01b74..784bc2c0929f 100644
--- a/include/scsi/scsi_transport_fc.h
+++ b/include/scsi/scsi_transport_fc.h
@@ -135,6 +135,7 @@ enum fc_vport_state {
#define FC_PORTSPEED_40GBIT 0x100
#define FC_PORTSPEED_50GBIT 0x200
#define FC_PORTSPEED_100GBIT 0x400
+#define FC_PORTSPEED_25GBIT 0x800
#define FC_PORTSPEED_NOT_NEGOTIATED (1 << 15) /* Speed not established */
/*
diff --git a/include/scsi/srp.h b/include/scsi/srp.h
index 1ae84db4c9fb..5be834de491a 100644
--- a/include/scsi/srp.h
+++ b/include/scsi/srp.h
@@ -42,6 +42,7 @@
*/
#include <linux/types.h>
+#include <scsi/scsi.h>
enum {
SRP_LOGIN_REQ = 0x00,
@@ -179,7 +180,7 @@ struct srp_tsk_mgmt {
u8 reserved1[6];
u64 tag;
u8 reserved2[4];
- __be64 lun __attribute__((packed));
+ struct scsi_lun lun;
u8 reserved3[2];
u8 tsk_mgmt_func;
u8 reserved4;
@@ -200,7 +201,7 @@ struct srp_cmd {
u8 data_in_desc_cnt;
u64 tag;
u8 reserved2[4];
- __be64 lun __attribute__((packed));
+ struct scsi_lun lun;
u8 reserved3;
u8 task_attr;
u8 reserved4;
@@ -265,7 +266,7 @@ struct srp_aer_req {
__be32 req_lim_delta;
u64 tag;
u32 reserved2;
- __be64 lun;
+ struct scsi_lun lun;
__be32 sense_data_len;
u32 reserved3;
u8 sense_data[0];
diff --git a/include/sound/ac97_codec.h b/include/sound/ac97_codec.h
index d315a08d6c6d..0e9d75b49bed 100644
--- a/include/sound/ac97_codec.h
+++ b/include/sound/ac97_codec.h
@@ -608,7 +608,9 @@ struct ac97_quirk {
int type; /* quirk type above */
};
-int snd_ac97_tune_hardware(struct snd_ac97 *ac97, struct ac97_quirk *quirk, const char *override);
+int snd_ac97_tune_hardware(struct snd_ac97 *ac97,
+ const struct ac97_quirk *quirk,
+ const char *override);
int snd_ac97_set_rate(struct snd_ac97 *ac97, int reg, unsigned int rate);
/*
diff --git a/include/sound/compress_driver.h b/include/sound/compress_driver.h
index f48089d364c5..fa1d05512c09 100644
--- a/include/sound/compress_driver.h
+++ b/include/sound/compress_driver.h
@@ -70,7 +70,7 @@ struct snd_compr_runtime {
* @device: device pointer
* @direction: stream direction, playback/recording
* @metadata_set: metadata set flag, true when set
- * @next_track: has userspace signall next track transistion, true when set
+ * @next_track: has userspace signal next track transition, true when set
* @private_data: pointer to DSP private data
*/
struct snd_compr_stream {
@@ -95,7 +95,7 @@ struct snd_compr_stream {
* and the stream properties
* @get_params: retrieve the codec parameters, mandatory
* @set_metadata: Set the metadata values for a stream
- * @get_metadata: retreives the requested metadata values from stream
+ * @get_metadata: retrieves the requested metadata values from stream
* @trigger: Trigger operations like start, pause, resume, drain, stop.
* This callback is mandatory
* @pointer: Retrieve current h/w pointer information. Mandatory
diff --git a/include/sound/control.h b/include/sound/control.h
index 75f3054023f7..95aad6d3fd1a 100644
--- a/include/sound/control.h
+++ b/include/sound/control.h
@@ -227,7 +227,7 @@ snd_ctl_add_slave(struct snd_kcontrol *master, struct snd_kcontrol *slave)
* Add a virtual slave control to the given master.
* Unlike snd_ctl_add_slave(), the element added via this function
* is supposed to have volatile values, and get callback is called
- * at each time quried from the master.
+ * at each time queried from the master.
*
* When the control peeks the hardware values directly and the value
* can be changed by other means than the put callback of the element,
diff --git a/include/sound/core.h b/include/sound/core.h
index da5748289968..b12931f513f4 100644
--- a/include/sound/core.h
+++ b/include/sound/core.h
@@ -278,7 +278,8 @@ int snd_device_new(struct snd_card *card, enum snd_device_type type,
void *device_data, struct snd_device_ops *ops);
int snd_device_register(struct snd_card *card, void *device_data);
int snd_device_register_all(struct snd_card *card);
-int snd_device_disconnect_all(struct snd_card *card);
+void snd_device_disconnect(struct snd_card *card, void *device_data);
+void snd_device_disconnect_all(struct snd_card *card);
void snd_device_free(struct snd_card *card, void *device_data);
void snd_device_free_all(struct snd_card *card);
diff --git a/include/sound/designware_i2s.h b/include/sound/designware_i2s.h
index 26f406e0f673..3a8fca9409a7 100644
--- a/include/sound/designware_i2s.h
+++ b/include/sound/designware_i2s.h
@@ -1,5 +1,5 @@
/*
- * Copyright (ST) 2012 Rajeev Kumar (rajeev-dlh.kumar@st.com)
+ * Copyright (ST) 2012 Rajeev Kumar (rajeevkumar.linux@gmail.com)
*
* This program is free software; you can redistribute it and/or modify
* it under the terms of the GNU General Public License as published by
diff --git a/include/sound/emu10k1.h b/include/sound/emu10k1.h
index 0de95ccb92cf..5bd134651f5e 100644
--- a/include/sound/emu10k1.h
+++ b/include/sound/emu10k1.h
@@ -41,7 +41,8 @@
#define EMUPAGESIZE 4096
#define MAXREQVOICES 8
-#define MAXPAGES 8192
+#define MAXPAGES0 4096 /* 32 bit mode */
+#define MAXPAGES1 8192 /* 31 bit mode */
#define RESERVED 0
#define NUM_MIDI 16
#define NUM_G 64 /* use all channels */
@@ -50,8 +51,7 @@
/* FIXME? - according to the OSS driver the EMU10K1 needs a 29 bit DMA mask */
#define EMU10K1_DMA_MASK 0x7fffffffUL /* 31bit */
-#define AUDIGY_DMA_MASK 0x7fffffffUL /* 31bit FIXME - 32 should work? */
- /* See ALSA bug #1276 - rlrevell */
+#define AUDIGY_DMA_MASK 0xffffffffUL /* 32bit mode */
#define TMEMSIZE 256*1024
#define TMEMSIZEREG 4
@@ -466,8 +466,11 @@
#define MAPB 0x0d /* Cache map B */
-#define MAP_PTE_MASK 0xffffe000 /* The 19 MSBs of the PTE indexed by the PTI */
-#define MAP_PTI_MASK 0x00001fff /* The 13 bit index to one of the 8192 PTE dwords */
+#define MAP_PTE_MASK0 0xfffff000 /* The 20 MSBs of the PTE indexed by the PTI */
+#define MAP_PTI_MASK0 0x00000fff /* The 12 bit index to one of the 4096 PTE dwords */
+
+#define MAP_PTE_MASK1 0xffffe000 /* The 19 MSBs of the PTE indexed by the PTI */
+#define MAP_PTI_MASK1 0x00001fff /* The 13 bit index to one of the 8192 PTE dwords */
/* 0x0e, 0x0f: Not used */
@@ -1704,6 +1707,7 @@ struct snd_emu10k1 {
unsigned short model; /* subsystem id */
unsigned int card_type; /* EMU10K1_CARD_* */
unsigned int ecard_ctrl; /* ecard control bits */
+ unsigned int address_mode; /* address mode */
unsigned long dma_mask; /* PCI DMA mask */
unsigned int delay_pcm_irq; /* in samples */
int max_cache_pages; /* max memory size / PAGE_SIZE */
diff --git a/include/sound/hda_regmap.h b/include/sound/hda_regmap.h
new file mode 100644
index 000000000000..df705908480a
--- /dev/null
+++ b/include/sound/hda_regmap.h
@@ -0,0 +1,219 @@
+/*
+ * HD-audio regmap helpers
+ */
+
+#ifndef __SOUND_HDA_REGMAP_H
+#define __SOUND_HDA_REGMAP_H
+
+#include <linux/regmap.h>
+#include <sound/core.h>
+#include <sound/hdaudio.h>
+
+#define AC_AMP_FAKE_MUTE 0x10 /* fake mute bit set to amp verbs */
+
+int snd_hdac_regmap_init(struct hdac_device *codec);
+void snd_hdac_regmap_exit(struct hdac_device *codec);
+int snd_hdac_regmap_add_vendor_verb(struct hdac_device *codec,
+ unsigned int verb);
+int snd_hdac_regmap_read_raw(struct hdac_device *codec, unsigned int reg,
+ unsigned int *val);
+int snd_hdac_regmap_write_raw(struct hdac_device *codec, unsigned int reg,
+ unsigned int val);
+int snd_hdac_regmap_update_raw(struct hdac_device *codec, unsigned int reg,
+ unsigned int mask, unsigned int val);
+
+/**
+ * snd_hdac_regmap_encode_verb - encode the verb to a pseudo register
+ * @nid: widget NID
+ * @verb: codec verb
+ *
+ * Returns an encoded pseudo register.
+ */
+#define snd_hdac_regmap_encode_verb(nid, verb) \
+ (((verb) << 8) | 0x80000 | ((unsigned int)(nid) << 20))
+
+/**
+ * snd_hdac_regmap_encode_amp - encode the AMP verb to a pseudo register
+ * @nid: widget NID
+ * @ch: channel (left = 0, right = 1)
+ * @dir: direction (#HDA_INPUT, #HDA_OUTPUT)
+ * @idx: input index value
+ *
+ * Returns an encoded pseudo register.
+ */
+#define snd_hdac_regmap_encode_amp(nid, ch, dir, idx) \
+ (snd_hdac_regmap_encode_verb(nid, AC_VERB_GET_AMP_GAIN_MUTE) | \
+ ((ch) ? AC_AMP_GET_RIGHT : AC_AMP_GET_LEFT) | \
+ ((dir) == HDA_OUTPUT ? AC_AMP_GET_OUTPUT : AC_AMP_GET_INPUT) | \
+ (idx))
+
+/**
+ * snd_hdac_regmap_encode_amp_stereo - encode a pseudo register for stereo AMPs
+ * @nid: widget NID
+ * @dir: direction (#HDA_INPUT, #HDA_OUTPUT)
+ * @idx: input index value
+ *
+ * Returns an encoded pseudo register.
+ */
+#define snd_hdac_regmap_encode_amp_stereo(nid, dir, idx) \
+ (snd_hdac_regmap_encode_verb(nid, AC_VERB_GET_AMP_GAIN_MUTE) | \
+ AC_AMP_SET_LEFT | AC_AMP_SET_RIGHT | /* both bits set! */ \
+ ((dir) == HDA_OUTPUT ? AC_AMP_GET_OUTPUT : AC_AMP_GET_INPUT) | \
+ (idx))
+
+/**
+ * snd_hdac_regmap_write - Write a verb with caching
+ * @nid: codec NID
+ * @reg: verb to write
+ * @val: value to write
+ *
+ * For writing an amp value, use snd_hda_regmap_amp_update().
+ */
+static inline int
+snd_hdac_regmap_write(struct hdac_device *codec, hda_nid_t nid,
+ unsigned int verb, unsigned int val)
+{
+ unsigned int cmd = snd_hdac_regmap_encode_verb(nid, verb);
+
+ return snd_hdac_regmap_write_raw(codec, cmd, val);
+}
+
+/**
+ * snd_hda_regmap_update - Update a verb value with caching
+ * @nid: codec NID
+ * @verb: verb to update
+ * @mask: bit mask to update
+ * @val: value to update
+ *
+ * For updating an amp value, use snd_hda_regmap_amp_update().
+ */
+static inline int
+snd_hdac_regmap_update(struct hdac_device *codec, hda_nid_t nid,
+ unsigned int verb, unsigned int mask,
+ unsigned int val)
+{
+ unsigned int cmd = snd_hdac_regmap_encode_verb(nid, verb);
+
+ return snd_hdac_regmap_update_raw(codec, cmd, mask, val);
+}
+
+/**
+ * snd_hda_regmap_read - Read a verb with caching
+ * @nid: codec NID
+ * @verb: verb to read
+ * @val: pointer to store the value
+ *
+ * For reading an amp value, use snd_hda_regmap_get_amp().
+ */
+static inline int
+snd_hdac_regmap_read(struct hdac_device *codec, hda_nid_t nid,
+ unsigned int verb, unsigned int *val)
+{
+ unsigned int cmd = snd_hdac_regmap_encode_verb(nid, verb);
+
+ return snd_hdac_regmap_read_raw(codec, cmd, val);
+}
+
+/**
+ * snd_hdac_regmap_get_amp - Read AMP value
+ * @codec: HD-audio codec
+ * @nid: NID to read the AMP value
+ * @ch: channel (left=0 or right=1)
+ * @direction: #HDA_INPUT or #HDA_OUTPUT
+ * @index: the index value (only for input direction)
+ * @val: the pointer to store the value
+ *
+ * Read AMP value. The volume is between 0 to 0x7f, 0x80 = mute bit.
+ * Returns the value or a negative error.
+ */
+static inline int
+snd_hdac_regmap_get_amp(struct hdac_device *codec, hda_nid_t nid,
+ int ch, int dir, int idx)
+{
+ unsigned int cmd = snd_hdac_regmap_encode_amp(nid, ch, dir, idx);
+ int err, val;
+
+ err = snd_hdac_regmap_read_raw(codec, cmd, &val);
+ return err < 0 ? err : val;
+}
+
+/**
+ * snd_hdac_regmap_update_amp - update the AMP value
+ * @codec: HD-audio codec
+ * @nid: NID to read the AMP value
+ * @ch: channel (left=0 or right=1)
+ * @direction: #HDA_INPUT or #HDA_OUTPUT
+ * @idx: the index value (only for input direction)
+ * @mask: bit mask to set
+ * @val: the bits value to set
+ *
+ * Update the AMP value with a bit mask.
+ * Returns 0 if the value is unchanged, 1 if changed, or a negative error.
+ */
+static inline int
+snd_hdac_regmap_update_amp(struct hdac_device *codec, hda_nid_t nid,
+ int ch, int dir, int idx, int mask, int val)
+{
+ unsigned int cmd = snd_hdac_regmap_encode_amp(nid, ch, dir, idx);
+
+ return snd_hdac_regmap_update_raw(codec, cmd, mask, val);
+}
+
+/**
+ * snd_hdac_regmap_get_amp_stereo - Read stereo AMP values
+ * @codec: HD-audio codec
+ * @nid: NID to read the AMP value
+ * @ch: channel (left=0 or right=1)
+ * @direction: #HDA_INPUT or #HDA_OUTPUT
+ * @index: the index value (only for input direction)
+ * @val: the pointer to store the value
+ *
+ * Read stereo AMP values. The lower byte is left, the upper byte is right.
+ * Returns the value or a negative error.
+ */
+static inline int
+snd_hdac_regmap_get_amp_stereo(struct hdac_device *codec, hda_nid_t nid,
+ int dir, int idx)
+{
+ unsigned int cmd = snd_hdac_regmap_encode_amp_stereo(nid, dir, idx);
+ int err, val;
+
+ err = snd_hdac_regmap_read_raw(codec, cmd, &val);
+ return err < 0 ? err : val;
+}
+
+/**
+ * snd_hdac_regmap_update_amp_stereo - update the stereo AMP value
+ * @codec: HD-audio codec
+ * @nid: NID to read the AMP value
+ * @direction: #HDA_INPUT or #HDA_OUTPUT
+ * @idx: the index value (only for input direction)
+ * @mask: bit mask to set
+ * @val: the bits value to set
+ *
+ * Update the stereo AMP value with a bit mask.
+ * The lower byte is left, the upper byte is right.
+ * Returns 0 if the value is unchanged, 1 if changed, or a negative error.
+ */
+static inline int
+snd_hdac_regmap_update_amp_stereo(struct hdac_device *codec, hda_nid_t nid,
+ int dir, int idx, int mask, int val)
+{
+ unsigned int cmd = snd_hdac_regmap_encode_amp_stereo(nid, dir, idx);
+
+ return snd_hdac_regmap_update_raw(codec, cmd, mask, val);
+}
+
+/**
+ * snd_hdac_regmap_sync_node - sync the widget node attributes
+ * @codec: HD-audio codec
+ * @nid: NID to sync
+ */
+static inline void
+snd_hdac_regmap_sync_node(struct hdac_device *codec, hda_nid_t nid)
+{
+ regcache_mark_dirty(codec->regmap);
+ regcache_sync_region(codec->regmap, nid << 20, ((nid + 1) << 20) - 1);
+}
+
+#endif /* __SOUND_HDA_REGMAP_H */
diff --git a/include/sound/hdaudio.h b/include/sound/hdaudio.h
new file mode 100644
index 000000000000..2a8aa9dfb83d
--- /dev/null
+++ b/include/sound/hdaudio.h
@@ -0,0 +1,247 @@
+/*
+ * HD-audio core stuff
+ */
+
+#ifndef __SOUND_HDAUDIO_H
+#define __SOUND_HDAUDIO_H
+
+#include <linux/device.h>
+#include <sound/hda_verbs.h>
+
+/* codec node id */
+typedef u16 hda_nid_t;
+
+struct hdac_bus;
+struct hdac_device;
+struct hdac_driver;
+struct hdac_widget_tree;
+
+/*
+ * exported bus type
+ */
+extern struct bus_type snd_hda_bus_type;
+
+/*
+ * generic arrays
+ */
+struct snd_array {
+ unsigned int used;
+ unsigned int alloced;
+ unsigned int elem_size;
+ unsigned int alloc_align;
+ void *list;
+};
+
+/*
+ * HD-audio codec base device
+ */
+struct hdac_device {
+ struct device dev;
+ int type;
+ struct hdac_bus *bus;
+ unsigned int addr; /* codec address */
+ struct list_head list; /* list point for bus codec_list */
+
+ hda_nid_t afg; /* AFG node id */
+ hda_nid_t mfg; /* MFG node id */
+
+ /* ids */
+ unsigned int vendor_id;
+ unsigned int subsystem_id;
+ unsigned int revision_id;
+ unsigned int afg_function_id;
+ unsigned int mfg_function_id;
+ unsigned int afg_unsol:1;
+ unsigned int mfg_unsol:1;
+
+ unsigned int power_caps; /* FG power caps */
+
+ const char *vendor_name; /* codec vendor name */
+ const char *chip_name; /* codec chip name */
+
+ /* verb exec op override */
+ int (*exec_verb)(struct hdac_device *dev, unsigned int cmd,
+ unsigned int flags, unsigned int *res);
+
+ /* widgets */
+ unsigned int num_nodes;
+ hda_nid_t start_nid, end_nid;
+
+ /* misc flags */
+ atomic_t in_pm; /* suspend/resume being performed */
+
+ /* sysfs */
+ struct hdac_widget_tree *widgets;
+
+ /* regmap */
+ struct regmap *regmap;
+ struct snd_array vendor_verbs;
+ bool lazy_cache:1; /* don't wake up for writes */
+ bool caps_overwriting:1; /* caps overwrite being in process */
+ bool cache_coef:1; /* cache COEF read/write too */
+};
+
+/* device/driver type used for matching */
+enum {
+ HDA_DEV_CORE,
+ HDA_DEV_LEGACY,
+};
+
+/* direction */
+enum {
+ HDA_INPUT, HDA_OUTPUT
+};
+
+#define dev_to_hdac_dev(_dev) container_of(_dev, struct hdac_device, dev)
+
+int snd_hdac_device_init(struct hdac_device *dev, struct hdac_bus *bus,
+ const char *name, unsigned int addr);
+void snd_hdac_device_exit(struct hdac_device *dev);
+int snd_hdac_device_register(struct hdac_device *codec);
+void snd_hdac_device_unregister(struct hdac_device *codec);
+
+int snd_hdac_refresh_widgets(struct hdac_device *codec);
+
+unsigned int snd_hdac_make_cmd(struct hdac_device *codec, hda_nid_t nid,
+ unsigned int verb, unsigned int parm);
+int snd_hdac_exec_verb(struct hdac_device *codec, unsigned int cmd,
+ unsigned int flags, unsigned int *res);
+int snd_hdac_read(struct hdac_device *codec, hda_nid_t nid,
+ unsigned int verb, unsigned int parm, unsigned int *res);
+int _snd_hdac_read_parm(struct hdac_device *codec, hda_nid_t nid, int parm,
+ unsigned int *res);
+int snd_hdac_read_parm_uncached(struct hdac_device *codec, hda_nid_t nid,
+ int parm);
+int snd_hdac_override_parm(struct hdac_device *codec, hda_nid_t nid,
+ unsigned int parm, unsigned int val);
+int snd_hdac_get_connections(struct hdac_device *codec, hda_nid_t nid,
+ hda_nid_t *conn_list, int max_conns);
+int snd_hdac_get_sub_nodes(struct hdac_device *codec, hda_nid_t nid,
+ hda_nid_t *start_id);
+
+/**
+ * snd_hdac_read_parm - read a codec parameter
+ * @codec: the codec object
+ * @nid: NID to read a parameter
+ * @parm: parameter to read
+ *
+ * Returns -1 for error. If you need to distinguish the error more
+ * strictly, use _snd_hdac_read_parm() directly.
+ */
+static inline int snd_hdac_read_parm(struct hdac_device *codec, hda_nid_t nid,
+ int parm)
+{
+ unsigned int val;
+
+ return _snd_hdac_read_parm(codec, nid, parm, &val) < 0 ? -1 : val;
+}
+
+#ifdef CONFIG_PM
+void snd_hdac_power_up(struct hdac_device *codec);
+void snd_hdac_power_down(struct hdac_device *codec);
+void snd_hdac_power_up_pm(struct hdac_device *codec);
+void snd_hdac_power_down_pm(struct hdac_device *codec);
+#else
+static inline void snd_hdac_power_up(struct hdac_device *codec) {}
+static inline void snd_hdac_power_down(struct hdac_device *codec) {}
+static inline void snd_hdac_power_up_pm(struct hdac_device *codec) {}
+static inline void snd_hdac_power_down_pm(struct hdac_device *codec) {}
+#endif
+
+/*
+ * HD-audio codec base driver
+ */
+struct hdac_driver {
+ struct device_driver driver;
+ int type;
+ int (*match)(struct hdac_device *dev, struct hdac_driver *drv);
+ void (*unsol_event)(struct hdac_device *dev, unsigned int event);
+};
+
+#define drv_to_hdac_driver(_drv) container_of(_drv, struct hdac_driver, driver)
+
+/*
+ * HD-audio bus base driver
+ */
+struct hdac_bus_ops {
+ /* send a single command */
+ int (*command)(struct hdac_bus *bus, unsigned int cmd);
+ /* get a response from the last command */
+ int (*get_response)(struct hdac_bus *bus, unsigned int addr,
+ unsigned int *res);
+};
+
+#define HDA_UNSOL_QUEUE_SIZE 64
+
+struct hdac_bus {
+ struct device *dev;
+ const struct hdac_bus_ops *ops;
+
+ /* codec linked list */
+ struct list_head codec_list;
+ unsigned int num_codecs;
+
+ /* link caddr -> codec */
+ struct hdac_device *caddr_tbl[HDA_MAX_CODEC_ADDRESS + 1];
+
+ /* unsolicited event queue */
+ u32 unsol_queue[HDA_UNSOL_QUEUE_SIZE * 2]; /* ring buffer */
+ unsigned int unsol_rp, unsol_wp;
+ struct work_struct unsol_work;
+
+ /* bit flags of powered codecs */
+ unsigned long codec_powered;
+
+ /* flags */
+ bool sync_write:1; /* sync after verb write */
+
+ /* locks */
+ struct mutex cmd_mutex;
+};
+
+int snd_hdac_bus_init(struct hdac_bus *bus, struct device *dev,
+ const struct hdac_bus_ops *ops);
+void snd_hdac_bus_exit(struct hdac_bus *bus);
+int snd_hdac_bus_exec_verb(struct hdac_bus *bus, unsigned int addr,
+ unsigned int cmd, unsigned int *res);
+int snd_hdac_bus_exec_verb_unlocked(struct hdac_bus *bus, unsigned int addr,
+ unsigned int cmd, unsigned int *res);
+void snd_hdac_bus_queue_event(struct hdac_bus *bus, u32 res, u32 res_ex);
+
+int snd_hdac_bus_add_device(struct hdac_bus *bus, struct hdac_device *codec);
+void snd_hdac_bus_remove_device(struct hdac_bus *bus,
+ struct hdac_device *codec);
+
+static inline void snd_hdac_codec_link_up(struct hdac_device *codec)
+{
+ set_bit(codec->addr, &codec->bus->codec_powered);
+}
+
+static inline void snd_hdac_codec_link_down(struct hdac_device *codec)
+{
+ clear_bit(codec->addr, &codec->bus->codec_powered);
+}
+
+/*
+ * generic array helpers
+ */
+void *snd_array_new(struct snd_array *array);
+void snd_array_free(struct snd_array *array);
+static inline void snd_array_init(struct snd_array *array, unsigned int size,
+ unsigned int align)
+{
+ array->elem_size = size;
+ array->alloc_align = align;
+}
+
+static inline void *snd_array_elem(struct snd_array *array, unsigned int idx)
+{
+ return array->list + idx * array->elem_size;
+}
+
+static inline unsigned int snd_array_index(struct snd_array *array, void *ptr)
+{
+ return (unsigned long)(ptr - array->list) / array->elem_size;
+}
+
+#endif /* __SOUND_HDAUDIO_H */
diff --git a/include/sound/pcm.h b/include/sound/pcm.h
index c0ddb7e69c28..0cb7f3f5df7b 100644
--- a/include/sound/pcm.h
+++ b/include/sound/pcm.h
@@ -60,6 +60,9 @@ struct snd_pcm_hardware {
struct snd_pcm_substream;
+struct snd_pcm_audio_tstamp_config; /* definitions further down */
+struct snd_pcm_audio_tstamp_report;
+
struct snd_pcm_ops {
int (*open)(struct snd_pcm_substream *substream);
int (*close)(struct snd_pcm_substream *substream);
@@ -71,8 +74,10 @@ struct snd_pcm_ops {
int (*prepare)(struct snd_pcm_substream *substream);
int (*trigger)(struct snd_pcm_substream *substream, int cmd);
snd_pcm_uframes_t (*pointer)(struct snd_pcm_substream *substream);
- int (*wall_clock)(struct snd_pcm_substream *substream,
- struct timespec *audio_ts);
+ int (*get_time_info)(struct snd_pcm_substream *substream,
+ struct timespec *system_ts, struct timespec *audio_ts,
+ struct snd_pcm_audio_tstamp_config *audio_tstamp_config,
+ struct snd_pcm_audio_tstamp_report *audio_tstamp_report);
int (*copy)(struct snd_pcm_substream *substream, int channel,
snd_pcm_uframes_t pos,
void __user *buf, snd_pcm_uframes_t count);
@@ -281,6 +286,58 @@ struct snd_pcm_hw_constraint_ranges {
struct snd_pcm_hwptr_log;
+/*
+ * userspace-provided audio timestamp config to kernel,
+ * structure is for internal use only and filled with dedicated unpack routine
+ */
+struct snd_pcm_audio_tstamp_config {
+ /* 5 of max 16 bits used */
+ u32 type_requested:4;
+ u32 report_delay:1; /* add total delay to A/D or D/A */
+};
+
+static inline void snd_pcm_unpack_audio_tstamp_config(__u32 data,
+ struct snd_pcm_audio_tstamp_config *config)
+{
+ config->type_requested = data & 0xF;
+ config->report_delay = (data >> 4) & 1;
+}
+
+/*
+ * kernel-provided audio timestamp report to user-space
+ * structure is for internal use only and read by dedicated pack routine
+ */
+struct snd_pcm_audio_tstamp_report {
+ /* 6 of max 16 bits used for bit-fields */
+
+ /* for backwards compatibility */
+ u32 valid:1;
+
+ /* actual type if hardware could not support requested timestamp */
+ u32 actual_type:4;
+
+ /* accuracy represented in ns units */
+ u32 accuracy_report:1; /* 0 if accuracy unknown, 1 if accuracy field is valid */
+ u32 accuracy; /* up to 4.29s, will be packed in separate field */
+};
+
+static inline void snd_pcm_pack_audio_tstamp_report(__u32 *data, __u32 *accuracy,
+ const struct snd_pcm_audio_tstamp_report *report)
+{
+ u32 tmp;
+
+ tmp = report->accuracy_report;
+ tmp <<= 4;
+ tmp |= report->actual_type;
+ tmp <<= 1;
+ tmp |= report->valid;
+
+ *data &= 0xffff; /* zero-clear MSBs */
+ *data |= (tmp << 16);
+ *accuracy = report->accuracy;
+}
+
+
struct snd_pcm_runtime {
/* -- Status -- */
struct snd_pcm_substream *trigger_master;
@@ -361,6 +418,11 @@ struct snd_pcm_runtime {
struct snd_dma_buffer *dma_buffer_p; /* allocated buffer */
+ /* -- audio timestamp config -- */
+ struct snd_pcm_audio_tstamp_config audio_tstamp_config;
+ struct snd_pcm_audio_tstamp_report audio_tstamp_report;
+ struct timespec driver_tstamp;
+
#if defined(CONFIG_SND_PCM_OSS) || defined(CONFIG_SND_PCM_OSS_MODULE)
/* -- OSS things -- */
struct snd_pcm_oss_runtime oss;
diff --git a/include/sound/pcm_params.h b/include/sound/pcm_params.h
index 3c45f3924ba7..c704357775fc 100644
--- a/include/sound/pcm_params.h
+++ b/include/sound/pcm_params.h
@@ -366,4 +366,11 @@ static inline int params_physical_width(const struct snd_pcm_hw_params *p)
return snd_pcm_format_physical_width(params_format(p));
}
+static inline void
+params_set_format(struct snd_pcm_hw_params *p, snd_pcm_format_t fmt)
+{
+ snd_mask_set(hw_param_mask(p, SNDRV_PCM_HW_PARAM_FORMAT),
+ (__force int)fmt);
+}
+
#endif /* __SOUND_PCM_PARAMS_H */
diff --git a/include/sound/rt5670.h b/include/sound/rt5670.h
index bd311197a3b5..b7d60510819b 100644
--- a/include/sound/rt5670.h
+++ b/include/sound/rt5670.h
@@ -14,6 +14,7 @@
struct rt5670_platform_data {
int jd_mode;
bool in2_diff;
+ bool dev_gpio;
bool dmic_en;
unsigned int dmic1_data_pin;
diff --git a/include/sound/seq_device.h b/include/sound/seq_device.h
index 2b5f24cc7548..ddc0d504cf39 100644
--- a/include/sound/seq_device.h
+++ b/include/sound/seq_device.h
@@ -25,29 +25,26 @@
* registered device information
*/
-#define ID_LEN 32
-
-/* status flag */
-#define SNDRV_SEQ_DEVICE_FREE 0
-#define SNDRV_SEQ_DEVICE_REGISTERED 1
-
struct snd_seq_device {
/* device info */
struct snd_card *card; /* sound card */
int device; /* device number */
- char id[ID_LEN]; /* driver id */
+ const char *id; /* driver id */
char name[80]; /* device name */
int argsize; /* size of the argument */
void *driver_data; /* private data for driver */
- int status; /* flag - read only */
void *private_data; /* private data for the caller */
void (*private_free)(struct snd_seq_device *device);
- struct list_head list; /* link to next device */
+ struct device dev;
};
+#define to_seq_dev(_dev) \
+ container_of(_dev, struct snd_seq_device, dev)
+
+/* sequencer driver */
/* driver operators
- * init_device:
+ * probe:
* Initialize the device with given parameters.
* Typically,
* 1. call snd_hwdep_new
@@ -55,25 +52,40 @@ struct snd_seq_device {
* 3. call snd_hwdep_register
* 4. store the instance to dev->driver_data pointer.
*
- * free_device:
+ * remove:
* Release the private data.
* Typically, call snd_device_free(dev->card, dev->driver_data)
*/
-struct snd_seq_dev_ops {
- int (*init_device)(struct snd_seq_device *dev);
- int (*free_device)(struct snd_seq_device *dev);
+struct snd_seq_driver {
+ struct device_driver driver;
+ char *id;
+ int argsize;
};
+#define to_seq_drv(_drv) \
+ container_of(_drv, struct snd_seq_driver, driver)
+
/*
* prototypes
*/
+#ifdef CONFIG_MODULES
void snd_seq_device_load_drivers(void);
-int snd_seq_device_new(struct snd_card *card, int device, char *id, int argsize, struct snd_seq_device **result);
-int snd_seq_device_register_driver(char *id, struct snd_seq_dev_ops *entry, int argsize);
-int snd_seq_device_unregister_driver(char *id);
+#else
+#define snd_seq_device_load_drivers()
+#endif
+int snd_seq_device_new(struct snd_card *card, int device, const char *id,
+ int argsize, struct snd_seq_device **result);
#define SNDRV_SEQ_DEVICE_ARGPTR(dev) (void *)((char *)(dev) + sizeof(struct snd_seq_device))
+int __must_check __snd_seq_driver_register(struct snd_seq_driver *drv,
+ struct module *mod);
+#define snd_seq_driver_register(drv) \
+ __snd_seq_driver_register(drv, THIS_MODULE)
+void snd_seq_driver_unregister(struct snd_seq_driver *drv);
+
+#define module_snd_seq_driver(drv) \
+ module_driver(drv, snd_seq_driver_register, snd_seq_driver_unregister)
/*
* id strings for generic devices
diff --git a/include/sound/seq_kernel.h b/include/sound/seq_kernel.h
index 18a2ac58b88f..feb58d455560 100644
--- a/include/sound/seq_kernel.h
+++ b/include/sound/seq_kernel.h
@@ -99,13 +99,9 @@ int snd_seq_event_port_attach(int client, struct snd_seq_port_callback *pcbp,
int snd_seq_event_port_detach(int client, int port);
#ifdef CONFIG_MODULES
-void snd_seq_autoload_lock(void);
-void snd_seq_autoload_unlock(void);
void snd_seq_autoload_init(void);
-#define snd_seq_autoload_exit() snd_seq_autoload_lock()
+void snd_seq_autoload_exit(void);
#else
-#define snd_seq_autoload_lock()
-#define snd_seq_autoload_unlock()
#define snd_seq_autoload_init()
#define snd_seq_autoload_exit()
#endif
diff --git a/include/sound/simple_card.h b/include/sound/simple_card.h
index 1255ddb1d3e2..b9b4f289fe6b 100644
--- a/include/sound/simple_card.h
+++ b/include/sound/simple_card.h
@@ -16,7 +16,6 @@
struct asoc_simple_dai {
const char *name;
- unsigned int fmt;
unsigned int sysclk;
int slots;
int slot_width;
diff --git a/include/sound/soc-dapm.h b/include/sound/soc-dapm.h
index 8d7416e46861..1065095c6973 100644
--- a/include/sound/soc-dapm.h
+++ b/include/sound/soc-dapm.h
@@ -287,7 +287,7 @@ struct device;
.access = SNDRV_CTL_ELEM_ACCESS_TLV_READ | SNDRV_CTL_ELEM_ACCESS_READWRITE,\
.tlv.p = (tlv_array), \
.get = snd_soc_dapm_get_volsw, .put = snd_soc_dapm_put_volsw, \
- .private_value = SOC_SINGLE_VALUE(reg, shift, max, invert, 0) }
+ .private_value = SOC_SINGLE_VALUE(reg, shift, max, invert, 1) }
#define SOC_DAPM_SINGLE_TLV_VIRT(xname, max, tlv_array) \
SOC_DAPM_SINGLE(xname, SND_SOC_NOPM, 0, max, 0, tlv_array)
#define SOC_DAPM_ENUM(xname, xenum) \
@@ -378,6 +378,7 @@ int snd_soc_dapm_link_dai_widgets(struct snd_soc_card *card);
void snd_soc_dapm_connect_dai_link_widgets(struct snd_soc_card *card);
int snd_soc_dapm_new_pcm(struct snd_soc_card *card,
const struct snd_soc_pcm_stream *params,
+ unsigned int num_params,
struct snd_soc_dapm_widget *source,
struct snd_soc_dapm_widget *sink);
@@ -440,7 +441,6 @@ void dapm_mark_endpoints_dirty(struct snd_soc_card *card);
int snd_soc_dapm_dai_get_connected_widgets(struct snd_soc_dai *dai, int stream,
struct snd_soc_dapm_widget_list **list);
-struct snd_soc_codec *snd_soc_dapm_kcontrol_codec(struct snd_kcontrol *kcontrol);
struct snd_soc_dapm_context *snd_soc_dapm_kcontrol_dapm(
struct snd_kcontrol *kcontrol);
@@ -531,6 +531,8 @@ struct snd_soc_dapm_widget {
void *priv; /* widget specific data */
struct regulator *regulator; /* attached regulator */
const struct snd_soc_pcm_stream *params; /* params for dai links */
+ unsigned int num_params; /* number of params for dai links */
+ unsigned int params_select; /* currently selected param for dai link */
/* dapm control */
int reg; /* negative reg = no direct dapm */
@@ -586,8 +588,6 @@ struct snd_soc_dapm_update {
/* DAPM context */
struct snd_soc_dapm_context {
enum snd_soc_bias_level bias_level;
- enum snd_soc_bias_level suspend_bias_level;
- struct delayed_work delayed_work;
unsigned int idle_bias_off:1; /* Use BIAS_OFF instead of STANDBY */
/* Go to BIAS_OFF in suspend if the DAPM context is idle */
unsigned int suspend_bias_off:1;
diff --git a/include/sound/soc-dpcm.h b/include/sound/soc-dpcm.h
index 98f2ade0266e..806059052bfc 100644
--- a/include/sound/soc-dpcm.h
+++ b/include/sound/soc-dpcm.h
@@ -135,7 +135,7 @@ void snd_soc_dpcm_be_set_state(struct snd_soc_pcm_runtime *be, int stream,
/* internal use only */
int soc_dpcm_be_digital_mute(struct snd_soc_pcm_runtime *fe, int mute);
-int soc_dpcm_debugfs_add(struct snd_soc_pcm_runtime *rtd);
+void soc_dpcm_debugfs_add(struct snd_soc_pcm_runtime *rtd);
int soc_dpcm_runtime_update(struct snd_soc_card *);
int dpcm_path_get(struct snd_soc_pcm_runtime *fe,
diff --git a/include/sound/soc.h b/include/sound/soc.h
index 0d1ade195628..f6226914acfe 100644
--- a/include/sound/soc.h
+++ b/include/sound/soc.h
@@ -387,8 +387,20 @@ int snd_soc_codec_set_pll(struct snd_soc_codec *codec, int pll_id, int source,
int snd_soc_register_card(struct snd_soc_card *card);
int snd_soc_unregister_card(struct snd_soc_card *card);
int devm_snd_soc_register_card(struct device *dev, struct snd_soc_card *card);
+#ifdef CONFIG_PM_SLEEP
int snd_soc_suspend(struct device *dev);
int snd_soc_resume(struct device *dev);
+#else
+static inline int snd_soc_suspend(struct device *dev)
+{
+ return 0;
+}
+
+static inline int snd_soc_resume(struct device *dev)
+{
+ return 0;
+}
+#endif
int snd_soc_poweroff(struct device *dev);
int snd_soc_register_platform(struct device *dev,
const struct snd_soc_platform_driver *platform_drv);
@@ -450,8 +462,10 @@ int soc_dai_hw_params(struct snd_pcm_substream *substream,
struct snd_soc_dai *dai);
/* Jack reporting */
-int snd_soc_jack_new(struct snd_soc_codec *codec, const char *id, int type,
- struct snd_soc_jack *jack);
+int snd_soc_card_jack_new(struct snd_soc_card *card, const char *id, int type,
+ struct snd_soc_jack *jack, struct snd_soc_jack_pin *pins,
+ unsigned int num_pins);
+
void snd_soc_jack_report(struct snd_soc_jack *jack, int status, int mask);
int snd_soc_jack_add_pins(struct snd_soc_jack *jack, int count,
struct snd_soc_jack_pin *pins);
@@ -659,7 +673,7 @@ struct snd_soc_jack_gpio {
struct snd_soc_jack {
struct mutex mutex;
struct snd_jack *jack;
- struct snd_soc_codec *codec;
+ struct snd_soc_card *card;
struct list_head pins;
int status;
struct blocking_notifier_head notifier;
@@ -941,6 +955,7 @@ struct snd_soc_dai_link {
int be_id; /* optional ID for machine driver BE identification */
const struct snd_soc_pcm_stream *params;
+ unsigned int num_params;
unsigned int dai_fmt; /* format to set on init */
@@ -954,6 +969,9 @@ struct snd_soc_dai_link {
unsigned int symmetric_channels:1;
unsigned int symmetric_samplebits:1;
+ /* Mark this pcm with non atomic ops */
+ bool nonatomic;
+
/* Do not create a PCM for this DAI link (Backend link) */
unsigned int no_pcm:1;
@@ -1071,11 +1089,16 @@ struct snd_soc_card {
/*
* Card-specific routes and widgets.
+ * Note: of_dapm_xxx for Device Tree; Otherwise for driver build-in.
*/
const struct snd_soc_dapm_widget *dapm_widgets;
int num_dapm_widgets;
const struct snd_soc_dapm_route *dapm_routes;
int num_dapm_routes;
+ const struct snd_soc_dapm_widget *of_dapm_widgets;
+ int num_of_dapm_widgets;
+ const struct snd_soc_dapm_route *of_dapm_routes;
+ int num_of_dapm_routes;
bool fully_routed;
struct work_struct deferred_resume_work;
@@ -1258,6 +1281,19 @@ static inline struct snd_soc_dapm_context *snd_soc_component_get_dapm(
return component->dapm_ptr;
}
+/**
+ * snd_soc_dapm_kcontrol_codec() - Returns the codec associated to a kcontrol
+ * @kcontrol: The kcontrol
+ *
+ * This function must only be used on DAPM contexts that are known to be part of
+ * a CODEC (e.g. in a CODEC driver). Otherwise the behavior is undefined.
+ */
+static inline struct snd_soc_codec *snd_soc_dapm_kcontrol_codec(
+ struct snd_kcontrol *kcontrol)
+{
+ return snd_soc_dapm_to_codec(snd_soc_dapm_kcontrol_dapm(kcontrol));
+}
+
/* codec IO */
unsigned int snd_soc_read(struct snd_soc_codec *codec, unsigned int reg);
int snd_soc_write(struct snd_soc_codec *codec, unsigned int reg,
@@ -1469,7 +1505,7 @@ static inline struct snd_soc_codec *snd_soc_kcontrol_codec(
}
/**
- * snd_soc_kcontrol_platform() - Returns the platform that registerd the control
+ * snd_soc_kcontrol_platform() - Returns the platform that registered the control
* @kcontrol: The control for which to get the platform
*
* Note: This function will only work correctly if the control has been
diff --git a/include/sound/spear_dma.h b/include/sound/spear_dma.h
index 65aca51fe255..e290de4e7e82 100644
--- a/include/sound/spear_dma.h
+++ b/include/sound/spear_dma.h
@@ -1,7 +1,7 @@
/*
* linux/spear_dma.h
*
-* Copyright (ST) 2012 Rajeev Kumar (rajeev-dlh.kumar@st.com)
+* Copyright (ST) 2012 Rajeev Kumar (rajeevkumar.linux@gmail.com)
*
* This program is free software; you can redistribute it and/or modify
* it under the terms of the GNU General Public License as published by
diff --git a/include/target/iscsi/iscsi_target_core.h b/include/target/iscsi/iscsi_target_core.h
index d3583d3ee193..006983b296dd 100644
--- a/include/target/iscsi/iscsi_target_core.h
+++ b/include/target/iscsi/iscsi_target_core.h
@@ -5,7 +5,6 @@
#include <linux/configfs.h>
#include <net/sock.h>
#include <net/tcp.h>
-#include <scsi/scsi_cmnd.h>
#include <scsi/iscsi_proto.h>
#include <target/target_core_base.h>
@@ -20,6 +19,8 @@
#define ISCSIT_MIN_TAGS 16
#define ISCSIT_EXTRA_TAGS 8
#define ISCSIT_TCP_BACKLOG 256
+#define ISCSI_RX_THREAD_NAME "iscsi_trx"
+#define ISCSI_TX_THREAD_NAME "iscsi_ttx"
/* struct iscsi_node_attrib sanity values */
#define NA_DATAOUT_TIMEOUT 3
@@ -60,6 +61,7 @@
#define TA_CACHE_CORE_NPS 0
/* T10 protection information disabled by default */
#define TA_DEFAULT_T10_PI 0
+#define TA_DEFAULT_FABRIC_PROT_TYPE 0
#define ISCSI_IOV_DATA_BUFFER 5
@@ -600,8 +602,11 @@ struct iscsi_conn {
struct iscsi_tpg_np *tpg_np;
/* Pointer to parent session */
struct iscsi_session *sess;
- /* Pointer to thread_set in use for this conn's threads */
- struct iscsi_thread_set *thread_set;
+ int bitmap_id;
+ int rx_thread_active;
+ struct task_struct *rx_thread;
+ int tx_thread_active;
+ struct task_struct *tx_thread;
/* list_head for session connection list */
struct list_head conn_list;
} ____cacheline_aligned;
@@ -767,6 +772,7 @@ struct iscsi_tpg_attrib {
u32 demo_mode_discovery;
u32 default_erl;
u8 t10_pi;
+ u32 fabric_prot_type;
struct iscsi_portal_group *tpg;
};
@@ -871,10 +877,10 @@ struct iscsit_global {
/* Unique identifier used for the authentication daemon */
u32 auth_id;
u32 inactive_ts;
- /* Thread Set bitmap count */
- int ts_bitmap_count;
+#define ISCSIT_BITMAP_BITS 262144
/* Thread Set bitmap pointer */
unsigned long *ts_bitmap;
+ spinlock_t ts_bitmap_lock;
/* Used for iSCSI discovery session authentication */
struct iscsi_node_acl discovery_acl;
struct iscsi_portal_group *discovery_tpg;
diff --git a/include/target/target_core_backend.h b/include/target/target_core_backend.h
index d61be7297b2c..5f1225706993 100644
--- a/include/target/target_core_backend.h
+++ b/include/target/target_core_backend.h
@@ -1,9 +1,7 @@
#ifndef TARGET_CORE_BACKEND_H
#define TARGET_CORE_BACKEND_H
-#define TRANSPORT_PLUGIN_PHBA_PDEV 1
-#define TRANSPORT_PLUGIN_VHBA_PDEV 2
-#define TRANSPORT_PLUGIN_VHBA_VDEV 3
+#define TRANSPORT_FLAG_PASSTHROUGH 1
struct target_backend_cits {
struct config_item_type tb_dev_cit;
@@ -22,7 +20,7 @@ struct se_subsystem_api {
char inquiry_rev[4];
struct module *owner;
- u8 transport_type;
+ u8 transport_flags;
int (*attach_hba)(struct se_hba *, u32);
void (*detach_hba)(struct se_hba *);
@@ -138,5 +136,7 @@ int se_dev_set_queue_depth(struct se_device *, u32);
int se_dev_set_max_sectors(struct se_device *, u32);
int se_dev_set_optimal_sectors(struct se_device *, u32);
int se_dev_set_block_size(struct se_device *, u32);
+sense_reason_t passthrough_parse_cdb(struct se_cmd *cmd,
+ sense_reason_t (*exec_cmd)(struct se_cmd *cmd));
#endif /* TARGET_CORE_BACKEND_H */
diff --git a/include/target/target_core_base.h b/include/target/target_core_base.h
index 672150b6aaf5..aec6f6a4477c 100644
--- a/include/target/target_core_base.h
+++ b/include/target/target_core_base.h
@@ -6,7 +6,6 @@
#include <linux/dma-mapping.h>
#include <linux/blkdev.h>
#include <linux/percpu_ida.h>
-#include <scsi/scsi_cmnd.h>
#include <net/sock.h>
#include <net/tcp.h>
@@ -17,23 +16,15 @@
/* Don't raise above 511 or REPORT_LUNS needs to handle >1 page */
#define TRANSPORT_MAX_LUNS_PER_TPG 256
/*
- * By default we use 32-byte CDBs in TCM Core and subsystem plugin code.
- *
- * Note that both include/scsi/scsi_cmnd.h:MAX_COMMAND_SIZE and
- * include/linux/blkdev.h:BLOCK_MAX_CDB as of v2.6.36-rc4 still use
- * 16-byte CDBs by default and require an extra allocation for
- * 32-byte CDBs to because of legacy issues.
- *
- * Within TCM Core there are no such legacy limitiations, so we go ahead
- * use 32-byte CDBs by default and use include/scsi/scsi.h:scsi_command_size()
- * within all TCM Core and subsystem plugin code.
+ * Maximum size of a CDB that can be stored in se_cmd without allocating
+ * memory dynamically for the CDB.
*/
#define TCM_MAX_COMMAND_SIZE 32
/*
* From include/scsi/scsi_cmnd.h:SCSI_SENSE_BUFFERSIZE, currently
* defined 96, but the real limit is 252 (or 260 including the header)
*/
-#define TRANSPORT_SENSE_BUFFER SCSI_SENSE_BUFFERSIZE
+#define TRANSPORT_SENSE_BUFFER 96
/* Used by transport_send_check_condition_and_sense() */
#define SPC_SENSE_KEY_OFFSET 2
#define SPC_ADD_SENSE_LEN_OFFSET 7
@@ -165,10 +156,8 @@ enum se_cmd_flags_table {
SCF_SEND_DELAYED_TAS = 0x00004000,
SCF_ALUA_NON_OPTIMIZED = 0x00008000,
SCF_PASSTHROUGH_SG_TO_MEM_NOALLOC = 0x00020000,
- SCF_ACK_KREF = 0x00040000,
SCF_COMPARE_AND_WRITE = 0x00080000,
SCF_COMPARE_AND_WRITE_POST = 0x00100000,
- SCF_CMD_XCOPY_PASSTHROUGH = 0x00200000,
};
/* struct se_dev_entry->lun_flags and struct se_lun->lun_access */
@@ -520,11 +509,11 @@ struct se_cmd {
struct list_head se_cmd_list;
struct completion cmd_wait_comp;
struct kref cmd_kref;
- struct target_core_fabric_ops *se_tfo;
+ const struct target_core_fabric_ops *se_tfo;
sense_reason_t (*execute_cmd)(struct se_cmd *);
sense_reason_t (*execute_rw)(struct se_cmd *, struct scatterlist *,
u32, enum dma_data_direction);
- sense_reason_t (*transport_complete_callback)(struct se_cmd *);
+ sense_reason_t (*transport_complete_callback)(struct se_cmd *, bool);
unsigned char *t_task_cdb;
unsigned char __t_task_cdb[TCM_MAX_COMMAND_SIZE];
@@ -591,6 +580,7 @@ struct se_node_acl {
bool acl_stop:1;
u32 queue_depth;
u32 acl_index;
+ enum target_prot_type saved_prot_type;
#define MAX_ACL_TAG_SIZE 64
char acl_tag[MAX_ACL_TAG_SIZE];
/* Used for PR SPEC_I_PT=1 and REGISTER_AND_MOVE */
@@ -616,6 +606,7 @@ struct se_session {
unsigned sess_tearing_down:1;
u64 sess_bin_isid;
enum target_prot_op sup_prot_ops;
+ enum target_prot_type sess_prot_type;
struct se_node_acl *se_node_acl;
struct se_portal_group *se_tpg;
void *fabric_sess_ptr;
@@ -890,7 +881,7 @@ struct se_portal_group {
/* List of TCM sessions associated wth this TPG */
struct list_head tpg_sess_list;
/* Pointer to $FABRIC_MOD dependent code */
- struct target_core_fabric_ops *se_tpg_tfo;
+ const struct target_core_fabric_ops *se_tpg_tfo;
struct se_wwn *se_tpg_wwn;
struct config_group tpg_group;
struct config_group *tpg_default_groups[7];
diff --git a/include/target/target_core_configfs.h b/include/target/target_core_configfs.h
index e0801386e4dc..b99c01170392 100644
--- a/include/target/target_core_configfs.h
+++ b/include/target/target_core_configfs.h
@@ -5,12 +5,6 @@
#define TARGET_CORE_NAME_MAX_LEN 64
#define TARGET_FABRIC_NAME_SIZE 32
-extern struct target_fabric_configfs *target_fabric_configfs_init(
- struct module *, const char *);
-extern void target_fabric_configfs_free(struct target_fabric_configfs *);
-extern int target_fabric_configfs_register(struct target_fabric_configfs *);
-extern void target_fabric_configfs_deregister(struct target_fabric_configfs *);
-
struct target_fabric_configfs_template {
struct config_item_type tfc_discovery_cit;
struct config_item_type tfc_wwn_cit;
@@ -46,8 +40,6 @@ struct target_fabric_configfs {
struct config_item *tf_fabric;
/* Passed from fabric modules */
struct config_item_type *tf_fabric_cit;
- /* Pointer to target core subsystem */
- struct configfs_subsystem *tf_subsys;
/* Pointer to fabric's struct module */
struct module *tf_module;
struct target_core_fabric_ops tf_ops;
diff --git a/include/target/target_core_fabric.h b/include/target/target_core_fabric.h
index 22a4e98eec80..0f4dc3768587 100644
--- a/include/target/target_core_fabric.h
+++ b/include/target/target_core_fabric.h
@@ -2,7 +2,8 @@
#define TARGET_CORE_FABRIC_H
struct target_core_fabric_ops {
- struct configfs_subsystem *tf_subsys;
+ struct module *module;
+ const char *name;
char *(*get_fabric_name)(void);
u8 (*get_fabric_proto_ident)(struct se_portal_group *);
char *(*tpg_get_wwn)(struct se_portal_group *);
@@ -27,6 +28,14 @@ struct target_core_fabric_ops {
* inquiry response
*/
int (*tpg_check_demo_mode_login_only)(struct se_portal_group *);
+ /*
+ * Optionally used as a configfs tunable to determine when
+ * target-core should signal the PROTECT=1 feature bit for
+ * backends that don't support T10-PI, so that either fabric
+ * HW offload or target-core emulation performs the associated
+ * WRITE_STRIP and READ_INSERT operations.
+ */
+ int (*tpg_check_prot_fabric_only)(struct se_portal_group *);
struct se_node_acl *(*tpg_alloc_fabric_acl)(
struct se_portal_group *);
void (*tpg_release_fabric_acl)(struct se_portal_group *,
@@ -82,8 +91,26 @@ struct target_core_fabric_ops {
struct se_node_acl *(*fabric_make_nodeacl)(struct se_portal_group *,
struct config_group *, const char *);
void (*fabric_drop_nodeacl)(struct se_node_acl *);
+
+ struct configfs_attribute **tfc_discovery_attrs;
+ struct configfs_attribute **tfc_wwn_attrs;
+ struct configfs_attribute **tfc_tpg_base_attrs;
+ struct configfs_attribute **tfc_tpg_np_base_attrs;
+ struct configfs_attribute **tfc_tpg_attrib_attrs;
+ struct configfs_attribute **tfc_tpg_auth_attrs;
+ struct configfs_attribute **tfc_tpg_param_attrs;
+ struct configfs_attribute **tfc_tpg_nacl_base_attrs;
+ struct configfs_attribute **tfc_tpg_nacl_attrib_attrs;
+ struct configfs_attribute **tfc_tpg_nacl_auth_attrs;
+ struct configfs_attribute **tfc_tpg_nacl_param_attrs;
};
+int target_register_template(const struct target_core_fabric_ops *fo);
+void target_unregister_template(const struct target_core_fabric_ops *fo);
+
+int target_depend_item(struct config_item *item);
+void target_undepend_item(struct config_item *item);
+
struct se_session *transport_init_session(enum target_prot_op);
int transport_alloc_session_tags(struct se_session *, unsigned int,
unsigned int);
@@ -95,13 +122,15 @@ void transport_register_session(struct se_portal_group *,
struct se_node_acl *, struct se_session *, void *);
void target_get_session(struct se_session *);
void target_put_session(struct se_session *);
+ssize_t target_show_dynamic_sessions(struct se_portal_group *, char *);
void transport_free_session(struct se_session *);
void target_put_nacl(struct se_node_acl *);
void transport_deregister_session_configfs(struct se_session *);
void transport_deregister_session(struct se_session *);
-void transport_init_se_cmd(struct se_cmd *, struct target_core_fabric_ops *,
+void transport_init_se_cmd(struct se_cmd *,
+ const struct target_core_fabric_ops *,
struct se_session *, u32, int, int, unsigned char *);
sense_reason_t transport_lookup_cmd_lun(struct se_cmd *, u32);
sense_reason_t target_setup_cmd_from_cdb(struct se_cmd *, unsigned char *);
@@ -153,8 +182,8 @@ int core_tpg_set_initiator_node_queue_depth(struct se_portal_group *,
unsigned char *, u32, int);
int core_tpg_set_initiator_node_tag(struct se_portal_group *,
struct se_node_acl *, const char *);
-int core_tpg_register(struct target_core_fabric_ops *, struct se_wwn *,
- struct se_portal_group *, void *, int);
+int core_tpg_register(const struct target_core_fabric_ops *,
+ struct se_wwn *, struct se_portal_group *, void *, int);
int core_tpg_deregister(struct se_portal_group *);
/* SAS helpers */
diff --git a/include/target/target_core_fabric_configfs.h b/include/target/target_core_fabric_configfs.h
index b32a14905cfa..7a0649c09e79 100644
--- a/include/target/target_core_fabric_configfs.h
+++ b/include/target/target_core_fabric_configfs.h
@@ -90,6 +90,11 @@ static struct target_fabric_tpg_attribute _fabric##_tpg_##_name = \
_fabric##_tpg_store_##_name);
+#define TF_TPG_BASE_ATTR_RO(_fabric, _name) \
+static struct target_fabric_tpg_attribute _fabric##_tpg_##_name = \
+ __CONFIGFS_EATTR_RO(_name, \
+ _fabric##_tpg_show_##_name);
+
CONFIGFS_EATTR_STRUCT(target_fabric_wwn, target_fabric_configfs);
#define TF_WWN_ATTR(_fabric, _name, _mode) \
static struct target_fabric_wwn_attribute _fabric##_wwn_##_name = \
diff --git a/include/trace/events/9p.h b/include/trace/events/9p.h
index a0666362c111..633ee9ee9778 100644
--- a/include/trace/events/9p.h
+++ b/include/trace/events/9p.h
@@ -6,76 +6,95 @@
#include <linux/tracepoint.h>
+#define P9_MSG_T \
+ EM( P9_TLERROR, "P9_TLERROR" ) \
+ EM( P9_RLERROR, "P9_RLERROR" ) \
+ EM( P9_TSTATFS, "P9_TSTATFS" ) \
+ EM( P9_RSTATFS, "P9_RSTATFS" ) \
+ EM( P9_TLOPEN, "P9_TLOPEN" ) \
+ EM( P9_RLOPEN, "P9_RLOPEN" ) \
+ EM( P9_TLCREATE, "P9_TLCREATE" ) \
+ EM( P9_RLCREATE, "P9_RLCREATE" ) \
+ EM( P9_TSYMLINK, "P9_TSYMLINK" ) \
+ EM( P9_RSYMLINK, "P9_RSYMLINK" ) \
+ EM( P9_TMKNOD, "P9_TMKNOD" ) \
+ EM( P9_RMKNOD, "P9_RMKNOD" ) \
+ EM( P9_TRENAME, "P9_TRENAME" ) \
+ EM( P9_RRENAME, "P9_RRENAME" ) \
+ EM( P9_TREADLINK, "P9_TREADLINK" ) \
+ EM( P9_RREADLINK, "P9_RREADLINK" ) \
+ EM( P9_TGETATTR, "P9_TGETATTR" ) \
+ EM( P9_RGETATTR, "P9_RGETATTR" ) \
+ EM( P9_TSETATTR, "P9_TSETATTR" ) \
+ EM( P9_RSETATTR, "P9_RSETATTR" ) \
+ EM( P9_TXATTRWALK, "P9_TXATTRWALK" ) \
+ EM( P9_RXATTRWALK, "P9_RXATTRWALK" ) \
+ EM( P9_TXATTRCREATE, "P9_TXATTRCREATE" ) \
+ EM( P9_RXATTRCREATE, "P9_RXATTRCREATE" ) \
+ EM( P9_TREADDIR, "P9_TREADDIR" ) \
+ EM( P9_RREADDIR, "P9_RREADDIR" ) \
+ EM( P9_TFSYNC, "P9_TFSYNC" ) \
+ EM( P9_RFSYNC, "P9_RFSYNC" ) \
+ EM( P9_TLOCK, "P9_TLOCK" ) \
+ EM( P9_RLOCK, "P9_RLOCK" ) \
+ EM( P9_TGETLOCK, "P9_TGETLOCK" ) \
+ EM( P9_RGETLOCK, "P9_RGETLOCK" ) \
+ EM( P9_TLINK, "P9_TLINK" ) \
+ EM( P9_RLINK, "P9_RLINK" ) \
+ EM( P9_TMKDIR, "P9_TMKDIR" ) \
+ EM( P9_RMKDIR, "P9_RMKDIR" ) \
+ EM( P9_TRENAMEAT, "P9_TRENAMEAT" ) \
+ EM( P9_RRENAMEAT, "P9_RRENAMEAT" ) \
+ EM( P9_TUNLINKAT, "P9_TUNLINKAT" ) \
+ EM( P9_RUNLINKAT, "P9_RUNLINKAT" ) \
+ EM( P9_TVERSION, "P9_TVERSION" ) \
+ EM( P9_RVERSION, "P9_RVERSION" ) \
+ EM( P9_TAUTH, "P9_TAUTH" ) \
+ EM( P9_RAUTH, "P9_RAUTH" ) \
+ EM( P9_TATTACH, "P9_TATTACH" ) \
+ EM( P9_RATTACH, "P9_RATTACH" ) \
+ EM( P9_TERROR, "P9_TERROR" ) \
+ EM( P9_RERROR, "P9_RERROR" ) \
+ EM( P9_TFLUSH, "P9_TFLUSH" ) \
+ EM( P9_RFLUSH, "P9_RFLUSH" ) \
+ EM( P9_TWALK, "P9_TWALK" ) \
+ EM( P9_RWALK, "P9_RWALK" ) \
+ EM( P9_TOPEN, "P9_TOPEN" ) \
+ EM( P9_ROPEN, "P9_ROPEN" ) \
+ EM( P9_TCREATE, "P9_TCREATE" ) \
+ EM( P9_RCREATE, "P9_RCREATE" ) \
+ EM( P9_TREAD, "P9_TREAD" ) \
+ EM( P9_RREAD, "P9_RREAD" ) \
+ EM( P9_TWRITE, "P9_TWRITE" ) \
+ EM( P9_RWRITE, "P9_RWRITE" ) \
+ EM( P9_TCLUNK, "P9_TCLUNK" ) \
+ EM( P9_RCLUNK, "P9_RCLUNK" ) \
+ EM( P9_TREMOVE, "P9_TREMOVE" ) \
+ EM( P9_RREMOVE, "P9_RREMOVE" ) \
+ EM( P9_TSTAT, "P9_TSTAT" ) \
+ EM( P9_RSTAT, "P9_RSTAT" ) \
+ EM( P9_TWSTAT, "P9_TWSTAT" ) \
+ EMe(P9_RWSTAT, "P9_RWSTAT" )
+
+/* Define EM() to export the enums to userspace via TRACE_DEFINE_ENUM() */
+#undef EM
+#undef EMe
+#define EM(a, b) TRACE_DEFINE_ENUM(a);
+#define EMe(a, b) TRACE_DEFINE_ENUM(a);
+
+P9_MSG_T
+
+/*
+ * Now redefine the EM() and EMe() macros to map the enums to the strings
+ * that will be printed in the output.
+ */
+#undef EM
+#undef EMe
+#define EM(a, b) { a, b },
+#define EMe(a, b) { a, b }
+
#define show_9p_op(type) \
- __print_symbolic(type, \
- { P9_TLERROR, "P9_TLERROR" }, \
- { P9_RLERROR, "P9_RLERROR" }, \
- { P9_TSTATFS, "P9_TSTATFS" }, \
- { P9_RSTATFS, "P9_RSTATFS" }, \
- { P9_TLOPEN, "P9_TLOPEN" }, \
- { P9_RLOPEN, "P9_RLOPEN" }, \
- { P9_TLCREATE, "P9_TLCREATE" }, \
- { P9_RLCREATE, "P9_RLCREATE" }, \
- { P9_TSYMLINK, "P9_TSYMLINK" }, \
- { P9_RSYMLINK, "P9_RSYMLINK" }, \
- { P9_TMKNOD, "P9_TMKNOD" }, \
- { P9_RMKNOD, "P9_RMKNOD" }, \
- { P9_TRENAME, "P9_TRENAME" }, \
- { P9_RRENAME, "P9_RRENAME" }, \
- { P9_TREADLINK, "P9_TREADLINK" }, \
- { P9_RREADLINK, "P9_RREADLINK" }, \
- { P9_TGETATTR, "P9_TGETATTR" }, \
- { P9_RGETATTR, "P9_RGETATTR" }, \
- { P9_TSETATTR, "P9_TSETATTR" }, \
- { P9_RSETATTR, "P9_RSETATTR" }, \
- { P9_TXATTRWALK, "P9_TXATTRWALK" }, \
- { P9_RXATTRWALK, "P9_RXATTRWALK" }, \
- { P9_TXATTRCREATE, "P9_TXATTRCREATE" }, \
- { P9_RXATTRCREATE, "P9_RXATTRCREATE" }, \
- { P9_TREADDIR, "P9_TREADDIR" }, \
- { P9_RREADDIR, "P9_RREADDIR" }, \
- { P9_TFSYNC, "P9_TFSYNC" }, \
- { P9_RFSYNC, "P9_RFSYNC" }, \
- { P9_TLOCK, "P9_TLOCK" }, \
- { P9_RLOCK, "P9_RLOCK" }, \
- { P9_TGETLOCK, "P9_TGETLOCK" }, \
- { P9_RGETLOCK, "P9_RGETLOCK" }, \
- { P9_TLINK, "P9_TLINK" }, \
- { P9_RLINK, "P9_RLINK" }, \
- { P9_TMKDIR, "P9_TMKDIR" }, \
- { P9_RMKDIR, "P9_RMKDIR" }, \
- { P9_TRENAMEAT, "P9_TRENAMEAT" }, \
- { P9_RRENAMEAT, "P9_RRENAMEAT" }, \
- { P9_TUNLINKAT, "P9_TUNLINKAT" }, \
- { P9_RUNLINKAT, "P9_RUNLINKAT" }, \
- { P9_TVERSION, "P9_TVERSION" }, \
- { P9_RVERSION, "P9_RVERSION" }, \
- { P9_TAUTH, "P9_TAUTH" }, \
- { P9_RAUTH, "P9_RAUTH" }, \
- { P9_TATTACH, "P9_TATTACH" }, \
- { P9_RATTACH, "P9_RATTACH" }, \
- { P9_TERROR, "P9_TERROR" }, \
- { P9_RERROR, "P9_RERROR" }, \
- { P9_TFLUSH, "P9_TFLUSH" }, \
- { P9_RFLUSH, "P9_RFLUSH" }, \
- { P9_TWALK, "P9_TWALK" }, \
- { P9_RWALK, "P9_RWALK" }, \
- { P9_TOPEN, "P9_TOPEN" }, \
- { P9_ROPEN, "P9_ROPEN" }, \
- { P9_TCREATE, "P9_TCREATE" }, \
- { P9_RCREATE, "P9_RCREATE" }, \
- { P9_TREAD, "P9_TREAD" }, \
- { P9_RREAD, "P9_RREAD" }, \
- { P9_TWRITE, "P9_TWRITE" }, \
- { P9_RWRITE, "P9_RWRITE" }, \
- { P9_TCLUNK, "P9_TCLUNK" }, \
- { P9_RCLUNK, "P9_RCLUNK" }, \
- { P9_TREMOVE, "P9_TREMOVE" }, \
- { P9_RREMOVE, "P9_RREMOVE" }, \
- { P9_TSTAT, "P9_TSTAT" }, \
- { P9_RSTAT, "P9_RSTAT" }, \
- { P9_TWSTAT, "P9_TWSTAT" }, \
- { P9_RWSTAT, "P9_RWSTAT" })
+ __print_symbolic(type, P9_MSG_T)
TRACE_EVENT(9p_client_req,
TP_PROTO(struct p9_client *clnt, int8_t type, int tag),
diff --git a/include/trace/events/btrfs.h b/include/trace/events/btrfs.h
index 1faecea101f3..7f79cf459591 100644
--- a/include/trace/events/btrfs.h
+++ b/include/trace/events/btrfs.h
@@ -407,10 +407,10 @@ TRACE_EVENT(btrfs_sync_file,
TP_fast_assign(
struct dentry *dentry = file->f_path.dentry;
- struct inode *inode = dentry->d_inode;
+ struct inode *inode = d_inode(dentry);
__entry->ino = inode->i_ino;
- __entry->parent = dentry->d_parent->d_inode->i_ino;
+ __entry->parent = d_inode(dentry->d_parent)->i_ino;
__entry->datasync = datasync;
__entry->root_objectid =
BTRFS_I(inode)->root->root_key.objectid;
@@ -962,7 +962,7 @@ TRACE_EVENT(alloc_extent_state,
__entry->ip = IP
),
- TP_printk("state=%p; mask = %s; caller = %pF", __entry->state,
+ TP_printk("state=%p; mask = %s; caller = %pS", __entry->state,
show_gfp_flags(__entry->mask), (void *)__entry->ip)
);
@@ -982,7 +982,7 @@ TRACE_EVENT(free_extent_state,
__entry->ip = IP
),
- TP_printk(" state=%p; caller = %pF", __entry->state,
+ TP_printk(" state=%p; caller = %pS", __entry->state,
(void *)__entry->ip)
);
diff --git a/include/trace/events/clk.h b/include/trace/events/clk.h
new file mode 100644
index 000000000000..758607226bfd
--- /dev/null
+++ b/include/trace/events/clk.h
@@ -0,0 +1,198 @@
+/*
+ * Copyright (c) 2014-2015, The Linux Foundation. All rights reserved.
+ *
+ * This software is licensed under the terms of the GNU General Public
+ * License version 2, as published by the Free Software Foundation, and
+ * may be copied, distributed, and modified under those terms.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+ * GNU General Public License for more details.
+ */
+#undef TRACE_SYSTEM
+#define TRACE_SYSTEM clk
+
+#if !defined(_TRACE_CLK_H) || defined(TRACE_HEADER_MULTI_READ)
+#define _TRACE_CLK_H
+
+#include <linux/tracepoint.h>
+
+struct clk_core;
+
+DECLARE_EVENT_CLASS(clk,
+
+ TP_PROTO(struct clk_core *core),
+
+ TP_ARGS(core),
+
+ TP_STRUCT__entry(
+ __string( name, core->name )
+ ),
+
+ TP_fast_assign(
+ __assign_str(name, core->name);
+ ),
+
+ TP_printk("%s", __get_str(name))
+);
+
+DEFINE_EVENT(clk, clk_enable,
+
+ TP_PROTO(struct clk_core *core),
+
+ TP_ARGS(core)
+);
+
+DEFINE_EVENT(clk, clk_enable_complete,
+
+ TP_PROTO(struct clk_core *core),
+
+ TP_ARGS(core)
+);
+
+DEFINE_EVENT(clk, clk_disable,
+
+ TP_PROTO(struct clk_core *core),
+
+ TP_ARGS(core)
+);
+
+DEFINE_EVENT(clk, clk_disable_complete,
+
+ TP_PROTO(struct clk_core *core),
+
+ TP_ARGS(core)
+);
+
+DEFINE_EVENT(clk, clk_prepare,
+
+ TP_PROTO(struct clk_core *core),
+
+ TP_ARGS(core)
+);
+
+DEFINE_EVENT(clk, clk_prepare_complete,
+
+ TP_PROTO(struct clk_core *core),
+
+ TP_ARGS(core)
+);
+
+DEFINE_EVENT(clk, clk_unprepare,
+
+ TP_PROTO(struct clk_core *core),
+
+ TP_ARGS(core)
+);
+
+DEFINE_EVENT(clk, clk_unprepare_complete,
+
+ TP_PROTO(struct clk_core *core),
+
+ TP_ARGS(core)
+);
+
+DECLARE_EVENT_CLASS(clk_rate,
+
+ TP_PROTO(struct clk_core *core, unsigned long rate),
+
+ TP_ARGS(core, rate),
+
+ TP_STRUCT__entry(
+ __string( name, core->name )
+ __field(unsigned long, rate )
+ ),
+
+ TP_fast_assign(
+ __assign_str(name, core->name);
+ __entry->rate = rate;
+ ),
+
+ TP_printk("%s %lu", __get_str(name), (unsigned long)__entry->rate)
+);
+
+DEFINE_EVENT(clk_rate, clk_set_rate,
+
+ TP_PROTO(struct clk_core *core, unsigned long rate),
+
+ TP_ARGS(core, rate)
+);
+
+DEFINE_EVENT(clk_rate, clk_set_rate_complete,
+
+ TP_PROTO(struct clk_core *core, unsigned long rate),
+
+ TP_ARGS(core, rate)
+);
+
+DECLARE_EVENT_CLASS(clk_parent,
+
+ TP_PROTO(struct clk_core *core, struct clk_core *parent),
+
+ TP_ARGS(core, parent),
+
+ TP_STRUCT__entry(
+ __string( name, core->name )
+ __string( pname, parent->name )
+ ),
+
+ TP_fast_assign(
+ __assign_str(name, core->name);
+ __assign_str(pname, parent->name);
+ ),
+
+ TP_printk("%s %s", __get_str(name), __get_str(pname))
+);
+
+DEFINE_EVENT(clk_parent, clk_set_parent,
+
+ TP_PROTO(struct clk_core *core, struct clk_core *parent),
+
+ TP_ARGS(core, parent)
+);
+
+DEFINE_EVENT(clk_parent, clk_set_parent_complete,
+
+ TP_PROTO(struct clk_core *core, struct clk_core *parent),
+
+ TP_ARGS(core, parent)
+);
+
+DECLARE_EVENT_CLASS(clk_phase,
+
+ TP_PROTO(struct clk_core *core, int phase),
+
+ TP_ARGS(core, phase),
+
+ TP_STRUCT__entry(
+ __string( name, core->name )
+ __field( int, phase )
+ ),
+
+ TP_fast_assign(
+ __assign_str(name, core->name);
+ __entry->phase = phase;
+ ),
+
+ TP_printk("%s %d", __get_str(name), (int)__entry->phase)
+);
+
+DEFINE_EVENT(clk_phase, clk_set_phase,
+
+ TP_PROTO(struct clk_core *core, int phase),
+
+ TP_ARGS(core, phase)
+);
+
+DEFINE_EVENT(clk_phase, clk_set_phase_complete,
+
+ TP_PROTO(struct clk_core *core, int phase),
+
+ TP_ARGS(core, phase)
+);
+
+#endif /* _TRACE_CLK_H */
+
+/* This part must be outside protection */
+#include <trace/define_trace.h>
diff --git a/include/trace/events/cma.h b/include/trace/events/cma.h
new file mode 100644
index 000000000000..d7cd961720a7
--- /dev/null
+++ b/include/trace/events/cma.h
@@ -0,0 +1,66 @@
+#undef TRACE_SYSTEM
+#define TRACE_SYSTEM cma
+
+#if !defined(_TRACE_CMA_H) || defined(TRACE_HEADER_MULTI_READ)
+#define _TRACE_CMA_H
+
+#include <linux/types.h>
+#include <linux/tracepoint.h>
+
+TRACE_EVENT(cma_alloc,
+
+ TP_PROTO(unsigned long pfn, const struct page *page,
+ unsigned int count, unsigned int align),
+
+ TP_ARGS(pfn, page, count, align),
+
+ TP_STRUCT__entry(
+ __field(unsigned long, pfn)
+ __field(const struct page *, page)
+ __field(unsigned int, count)
+ __field(unsigned int, align)
+ ),
+
+ TP_fast_assign(
+ __entry->pfn = pfn;
+ __entry->page = page;
+ __entry->count = count;
+ __entry->align = align;
+ ),
+
+ TP_printk("pfn=%lx page=%p count=%u align=%u",
+ __entry->pfn,
+ __entry->page,
+ __entry->count,
+ __entry->align)
+);
+
+TRACE_EVENT(cma_release,
+
+ TP_PROTO(unsigned long pfn, const struct page *page,
+ unsigned int count),
+
+ TP_ARGS(pfn, page, count),
+
+ TP_STRUCT__entry(
+ __field(unsigned long, pfn)
+ __field(const struct page *, page)
+ __field(unsigned int, count)
+ ),
+
+ TP_fast_assign(
+ __entry->pfn = pfn;
+ __entry->page = page;
+ __entry->count = count;
+ ),
+
+ TP_printk("pfn=%lx page=%p count=%u",
+ __entry->pfn,
+ __entry->page,
+ __entry->count)
+);
+
+#endif /* _TRACE_CMA_H */
+
+/* This part must be outside protection */
+#include <trace/define_trace.h>
diff --git a/include/trace/events/ext3.h b/include/trace/events/ext3.h
index 6797b9de90ed..fc733d28117a 100644
--- a/include/trace/events/ext3.h
+++ b/include/trace/events/ext3.h
@@ -144,7 +144,7 @@ TRACE_EVENT(ext3_mark_inode_dirty,
__entry->ip = IP;
),
- TP_printk("dev %d,%d ino %lu caller %pF",
+ TP_printk("dev %d,%d ino %lu caller %pS",
MAJOR(__entry->dev), MINOR(__entry->dev),
(unsigned long) __entry->ino, (void *)__entry->ip)
);
@@ -439,10 +439,10 @@ TRACE_EVENT(ext3_sync_file_enter,
TP_fast_assign(
struct dentry *dentry = file->f_path.dentry;
- __entry->dev = dentry->d_inode->i_sb->s_dev;
- __entry->ino = dentry->d_inode->i_ino;
+ __entry->dev = d_inode(dentry)->i_sb->s_dev;
+ __entry->ino = d_inode(dentry)->i_ino;
__entry->datasync = datasync;
- __entry->parent = dentry->d_parent->d_inode->i_ino;
+ __entry->parent = d_inode(dentry->d_parent)->i_ino;
),
TP_printk("dev %d,%d ino %lu parent %ld datasync %d ",
@@ -710,9 +710,9 @@ TRACE_EVENT(ext3_unlink_enter,
TP_fast_assign(
__entry->parent = parent->i_ino;
- __entry->ino = dentry->d_inode->i_ino;
- __entry->size = dentry->d_inode->i_size;
- __entry->dev = dentry->d_inode->i_sb->s_dev;
+ __entry->ino = d_inode(dentry)->i_ino;
+ __entry->size = d_inode(dentry)->i_size;
+ __entry->dev = d_inode(dentry)->i_sb->s_dev;
),
TP_printk("dev %d,%d ino %lu size %lld parent %ld",
@@ -734,8 +734,8 @@ TRACE_EVENT(ext3_unlink_exit,
),
TP_fast_assign(
- __entry->ino = dentry->d_inode->i_ino;
- __entry->dev = dentry->d_inode->i_sb->s_dev;
+ __entry->ino = d_inode(dentry)->i_ino;
+ __entry->dev = d_inode(dentry)->i_sb->s_dev;
__entry->ret = ret;
),
diff --git a/include/trace/events/ext4.h b/include/trace/events/ext4.h
index 6e5abd6d38a2..594b4b29a224 100644
--- a/include/trace/events/ext4.h
+++ b/include/trace/events/ext4.h
@@ -240,7 +240,7 @@ TRACE_EVENT(ext4_mark_inode_dirty,
__entry->ip = IP;
),
- TP_printk("dev %d,%d ino %lu caller %pF",
+ TP_printk("dev %d,%d ino %lu caller %pS",
MAJOR(__entry->dev), MINOR(__entry->dev),
(unsigned long) __entry->ino, (void *)__entry->ip)
);
@@ -872,10 +872,10 @@ TRACE_EVENT(ext4_sync_file_enter,
TP_fast_assign(
struct dentry *dentry = file->f_path.dentry;
- __entry->dev = dentry->d_inode->i_sb->s_dev;
- __entry->ino = dentry->d_inode->i_ino;
+ __entry->dev = d_inode(dentry)->i_sb->s_dev;
+ __entry->ino = d_inode(dentry)->i_ino;
__entry->datasync = datasync;
- __entry->parent = dentry->d_parent->d_inode->i_ino;
+ __entry->parent = d_inode(dentry->d_parent)->i_ino;
),
TP_printk("dev %d,%d ino %lu parent %lu datasync %d ",
@@ -1185,15 +1185,14 @@ TRACE_EVENT(ext4_da_update_reserve_space,
);
TRACE_EVENT(ext4_da_reserve_space,
- TP_PROTO(struct inode *inode, int md_needed),
+ TP_PROTO(struct inode *inode),
- TP_ARGS(inode, md_needed),
+ TP_ARGS(inode),
TP_STRUCT__entry(
__field( dev_t, dev )
__field( ino_t, ino )
__field( __u64, i_blocks )
- __field( int, md_needed )
__field( int, reserved_data_blocks )
__field( int, reserved_meta_blocks )
__field( __u16, mode )
@@ -1203,18 +1202,17 @@ TRACE_EVENT(ext4_da_reserve_space,
__entry->dev = inode->i_sb->s_dev;
__entry->ino = inode->i_ino;
__entry->i_blocks = inode->i_blocks;
- __entry->md_needed = md_needed;
__entry->reserved_data_blocks = EXT4_I(inode)->i_reserved_data_blocks;
__entry->reserved_meta_blocks = EXT4_I(inode)->i_reserved_meta_blocks;
__entry->mode = inode->i_mode;
),
- TP_printk("dev %d,%d ino %lu mode 0%o i_blocks %llu md_needed %d "
+ TP_printk("dev %d,%d ino %lu mode 0%o i_blocks %llu "
"reserved_data_blocks %d reserved_meta_blocks %d",
MAJOR(__entry->dev), MINOR(__entry->dev),
(unsigned long) __entry->ino,
__entry->mode, __entry->i_blocks,
- __entry->md_needed, __entry->reserved_data_blocks,
+ __entry->reserved_data_blocks,
__entry->reserved_meta_blocks)
);
@@ -1453,10 +1451,10 @@ TRACE_EVENT(ext4_unlink_enter,
),
TP_fast_assign(
- __entry->dev = dentry->d_inode->i_sb->s_dev;
- __entry->ino = dentry->d_inode->i_ino;
+ __entry->dev = d_inode(dentry)->i_sb->s_dev;
+ __entry->ino = d_inode(dentry)->i_ino;
__entry->parent = parent->i_ino;
- __entry->size = dentry->d_inode->i_size;
+ __entry->size = d_inode(dentry)->i_size;
),
TP_printk("dev %d,%d ino %lu size %lld parent %lu",
@@ -1477,8 +1475,8 @@ TRACE_EVENT(ext4_unlink_exit,
),
TP_fast_assign(
- __entry->dev = dentry->d_inode->i_sb->s_dev;
- __entry->ino = dentry->d_inode->i_ino;
+ __entry->dev = d_inode(dentry)->i_sb->s_dev;
+ __entry->ino = d_inode(dentry)->i_ino;
__entry->ret = ret;
),
@@ -1762,7 +1760,7 @@ TRACE_EVENT(ext4_journal_start,
__entry->rsv_blocks = rsv_blocks;
),
- TP_printk("dev %d,%d blocks, %d rsv_blocks, %d caller %pF",
+ TP_printk("dev %d,%d blocks, %d rsv_blocks, %d caller %pS",
MAJOR(__entry->dev), MINOR(__entry->dev),
__entry->blocks, __entry->rsv_blocks, (void *)__entry->ip)
);
@@ -1784,7 +1782,7 @@ TRACE_EVENT(ext4_journal_start_reserved,
__entry->blocks = blocks;
),
- TP_printk("dev %d,%d blocks, %d caller %pF",
+ TP_printk("dev %d,%d blocks, %d caller %pS",
MAJOR(__entry->dev), MINOR(__entry->dev),
__entry->blocks, (void *)__entry->ip)
);
@@ -2478,6 +2476,31 @@ TRACE_EVENT(ext4_collapse_range,
__entry->offset, __entry->len)
);
+TRACE_EVENT(ext4_insert_range,
+ TP_PROTO(struct inode *inode, loff_t offset, loff_t len),
+
+ TP_ARGS(inode, offset, len),
+
+ TP_STRUCT__entry(
+ __field(dev_t, dev)
+ __field(ino_t, ino)
+ __field(loff_t, offset)
+ __field(loff_t, len)
+ ),
+
+ TP_fast_assign(
+ __entry->dev = inode->i_sb->s_dev;
+ __entry->ino = inode->i_ino;
+ __entry->offset = offset;
+ __entry->len = len;
+ ),
+
+ TP_printk("dev %d,%d ino %lu offset %lld len %lld",
+ MAJOR(__entry->dev), MINOR(__entry->dev),
+ (unsigned long) __entry->ino,
+ __entry->offset, __entry->len)
+);
+
TRACE_EVENT(ext4_es_shrink,
TP_PROTO(struct super_block *sb, int nr_shrunk, u64 scan_time,
int nr_skipped, int retried),
diff --git a/include/trace/events/f2fs.h b/include/trace/events/f2fs.h
index 5422dbfaf97d..04856a2d8c82 100644
--- a/include/trace/events/f2fs.h
+++ b/include/trace/events/f2fs.h
@@ -9,12 +9,51 @@
#define show_dev(entry) MAJOR(entry->dev), MINOR(entry->dev)
#define show_dev_ino(entry) show_dev(entry), (unsigned long)entry->ino
+TRACE_DEFINE_ENUM(NODE);
+TRACE_DEFINE_ENUM(DATA);
+TRACE_DEFINE_ENUM(META);
+TRACE_DEFINE_ENUM(META_FLUSH);
+TRACE_DEFINE_ENUM(INMEM);
+TRACE_DEFINE_ENUM(INMEM_DROP);
+TRACE_DEFINE_ENUM(IPU);
+TRACE_DEFINE_ENUM(OPU);
+TRACE_DEFINE_ENUM(CURSEG_HOT_DATA);
+TRACE_DEFINE_ENUM(CURSEG_WARM_DATA);
+TRACE_DEFINE_ENUM(CURSEG_COLD_DATA);
+TRACE_DEFINE_ENUM(CURSEG_HOT_NODE);
+TRACE_DEFINE_ENUM(CURSEG_WARM_NODE);
+TRACE_DEFINE_ENUM(CURSEG_COLD_NODE);
+TRACE_DEFINE_ENUM(NO_CHECK_TYPE);
+TRACE_DEFINE_ENUM(GC_GREEDY);
+TRACE_DEFINE_ENUM(GC_CB);
+TRACE_DEFINE_ENUM(FG_GC);
+TRACE_DEFINE_ENUM(BG_GC);
+TRACE_DEFINE_ENUM(LFS);
+TRACE_DEFINE_ENUM(SSR);
+TRACE_DEFINE_ENUM(__REQ_RAHEAD);
+TRACE_DEFINE_ENUM(__REQ_WRITE);
+TRACE_DEFINE_ENUM(__REQ_SYNC);
+TRACE_DEFINE_ENUM(__REQ_NOIDLE);
+TRACE_DEFINE_ENUM(__REQ_FLUSH);
+TRACE_DEFINE_ENUM(__REQ_FUA);
+TRACE_DEFINE_ENUM(__REQ_PRIO);
+TRACE_DEFINE_ENUM(__REQ_META);
+TRACE_DEFINE_ENUM(CP_UMOUNT);
+TRACE_DEFINE_ENUM(CP_FASTBOOT);
+TRACE_DEFINE_ENUM(CP_SYNC);
+TRACE_DEFINE_ENUM(CP_RECOVERY);
+TRACE_DEFINE_ENUM(CP_DISCARD);
+
#define show_block_type(type) \
__print_symbolic(type, \
{ NODE, "NODE" }, \
{ DATA, "DATA" }, \
{ META, "META" }, \
- { META_FLUSH, "META_FLUSH" })
+ { META_FLUSH, "META_FLUSH" }, \
+ { INMEM, "INMEM" }, \
+ { INMEM_DROP, "INMEM_DROP" }, \
+ { IPU, "IN-PLACE" }, \
+ { OPU, "OUT-OF-PLACE" })
#define F2FS_BIO_MASK(t) (t & (READA | WRITE_FLUSH_FUA))
#define F2FS_BIO_EXTRA_MASK(t) (t & (REQ_META | REQ_PRIO))
@@ -74,9 +113,11 @@
{ CP_UMOUNT, "Umount" }, \
{ CP_FASTBOOT, "Fastboot" }, \
{ CP_SYNC, "Sync" }, \
+ { CP_RECOVERY, "Recovery" }, \
{ CP_DISCARD, "Discard" })
struct victim_sel_policy;
+struct f2fs_map_blocks;
DECLARE_EVENT_CLASS(f2fs__inode,
@@ -441,36 +482,35 @@ TRACE_EVENT(f2fs_truncate_partial_nodes,
__entry->err)
);
-TRACE_EVENT(f2fs_get_data_block,
- TP_PROTO(struct inode *inode, sector_t iblock,
- struct buffer_head *bh, int ret),
+TRACE_EVENT(f2fs_map_blocks,
+ TP_PROTO(struct inode *inode, struct f2fs_map_blocks *map, int ret),
- TP_ARGS(inode, iblock, bh, ret),
+ TP_ARGS(inode, map, ret),
TP_STRUCT__entry(
__field(dev_t, dev)
__field(ino_t, ino)
- __field(sector_t, iblock)
- __field(sector_t, bh_start)
- __field(size_t, bh_size)
+ __field(block_t, m_lblk)
+ __field(block_t, m_pblk)
+ __field(unsigned int, m_len)
__field(int, ret)
),
TP_fast_assign(
__entry->dev = inode->i_sb->s_dev;
__entry->ino = inode->i_ino;
- __entry->iblock = iblock;
- __entry->bh_start = bh->b_blocknr;
- __entry->bh_size = bh->b_size;
+ __entry->m_lblk = map->m_lblk;
+ __entry->m_pblk = map->m_pblk;
+ __entry->m_len = map->m_len;
__entry->ret = ret;
),
TP_printk("dev = (%d,%d), ino = %lu, file offset = %llu, "
- "start blkaddr = 0x%llx, len = 0x%llx bytes, err = %d",
+ "start blkaddr = 0x%llx, len = 0x%llx, err = %d",
show_dev_ino(__entry),
- (unsigned long long)__entry->iblock,
- (unsigned long long)__entry->bh_start,
- (unsigned long long)__entry->bh_size,
+ (unsigned long long)__entry->m_lblk,
+ (unsigned long long)__entry->m_pblk,
+ (unsigned long long)__entry->m_len,
__entry->ret)
);
@@ -854,6 +894,13 @@ DEFINE_EVENT(f2fs__page, f2fs_writepage,
TP_ARGS(page, type)
);
+DEFINE_EVENT(f2fs__page, f2fs_do_write_data_page,
+
+ TP_PROTO(struct page *page, int type),
+
+ TP_ARGS(page, type)
+);
+
DEFINE_EVENT(f2fs__page, f2fs_readpage,
TP_PROTO(struct page *page, int type),
@@ -875,6 +922,20 @@ DEFINE_EVENT(f2fs__page, f2fs_vm_page_mkwrite,
TP_ARGS(page, type)
);
+DEFINE_EVENT(f2fs__page, f2fs_register_inmem_page,
+
+ TP_PROTO(struct page *page, int type),
+
+ TP_ARGS(page, type)
+);
+
+DEFINE_EVENT(f2fs__page, f2fs_commit_inmem_page,
+
+ TP_PROTO(struct page *page, int type),
+
+ TP_ARGS(page, type)
+);
+
TRACE_EVENT(f2fs_writepages,
TP_PROTO(struct inode *inode, struct writeback_control *wbc, int type),
@@ -1011,6 +1072,140 @@ TRACE_EVENT(f2fs_issue_flush,
__entry->nobarrier ? "skip (nobarrier)" : "issue",
__entry->flush_merge ? " with flush_merge" : "")
);
+
+TRACE_EVENT(f2fs_lookup_extent_tree_start,
+
+ TP_PROTO(struct inode *inode, unsigned int pgofs),
+
+ TP_ARGS(inode, pgofs),
+
+ TP_STRUCT__entry(
+ __field(dev_t, dev)
+ __field(ino_t, ino)
+ __field(unsigned int, pgofs)
+ ),
+
+ TP_fast_assign(
+ __entry->dev = inode->i_sb->s_dev;
+ __entry->ino = inode->i_ino;
+ __entry->pgofs = pgofs;
+ ),
+
+ TP_printk("dev = (%d,%d), ino = %lu, pgofs = %u",
+ show_dev_ino(__entry),
+ __entry->pgofs)
+);
+
+TRACE_EVENT_CONDITION(f2fs_lookup_extent_tree_end,
+
+ TP_PROTO(struct inode *inode, unsigned int pgofs,
+ struct extent_node *en),
+
+ TP_ARGS(inode, pgofs, en),
+
+ TP_CONDITION(en),
+
+ TP_STRUCT__entry(
+ __field(dev_t, dev)
+ __field(ino_t, ino)
+ __field(unsigned int, pgofs)
+ __field(unsigned int, fofs)
+ __field(u32, blk)
+ __field(unsigned int, len)
+ ),
+
+ TP_fast_assign(
+ __entry->dev = inode->i_sb->s_dev;
+ __entry->ino = inode->i_ino;
+ __entry->pgofs = pgofs;
+ __entry->fofs = en->ei.fofs;
+ __entry->blk = en->ei.blk;
+ __entry->len = en->ei.len;
+ ),
+
+ TP_printk("dev = (%d,%d), ino = %lu, pgofs = %u, "
+ "ext_info(fofs: %u, blk: %u, len: %u)",
+ show_dev_ino(__entry),
+ __entry->pgofs,
+ __entry->fofs,
+ __entry->blk,
+ __entry->len)
+);
+
+TRACE_EVENT(f2fs_update_extent_tree,
+
+ TP_PROTO(struct inode *inode, unsigned int pgofs, block_t blkaddr),
+
+ TP_ARGS(inode, pgofs, blkaddr),
+
+ TP_STRUCT__entry(
+ __field(dev_t, dev)
+ __field(ino_t, ino)
+ __field(unsigned int, pgofs)
+ __field(u32, blk)
+ ),
+
+ TP_fast_assign(
+ __entry->dev = inode->i_sb->s_dev;
+ __entry->ino = inode->i_ino;
+ __entry->pgofs = pgofs;
+ __entry->blk = blkaddr;
+ ),
+
+ TP_printk("dev = (%d,%d), ino = %lu, pgofs = %u, blkaddr = %u",
+ show_dev_ino(__entry),
+ __entry->pgofs,
+ __entry->blk)
+);
+
+TRACE_EVENT(f2fs_shrink_extent_tree,
+
+ TP_PROTO(struct f2fs_sb_info *sbi, unsigned int node_cnt,
+ unsigned int tree_cnt),
+
+ TP_ARGS(sbi, node_cnt, tree_cnt),
+
+ TP_STRUCT__entry(
+ __field(dev_t, dev)
+ __field(unsigned int, node_cnt)
+ __field(unsigned int, tree_cnt)
+ ),
+
+ TP_fast_assign(
+ __entry->dev = sbi->sb->s_dev;
+ __entry->node_cnt = node_cnt;
+ __entry->tree_cnt = tree_cnt;
+ ),
+
+ TP_printk("dev = (%d,%d), shrunk: node_cnt = %u, tree_cnt = %u",
+ show_dev(__entry),
+ __entry->node_cnt,
+ __entry->tree_cnt)
+);
+
+TRACE_EVENT(f2fs_destroy_extent_tree,
+
+ TP_PROTO(struct inode *inode, unsigned int node_cnt),
+
+ TP_ARGS(inode, node_cnt),
+
+ TP_STRUCT__entry(
+ __field(dev_t, dev)
+ __field(ino_t, ino)
+ __field(unsigned int, node_cnt)
+ ),
+
+ TP_fast_assign(
+ __entry->dev = inode->i_sb->s_dev;
+ __entry->ino = inode->i_ino;
+ __entry->node_cnt = node_cnt;
+ ),
+
+ TP_printk("dev = (%d,%d), ino = %lu, destroyed: node_cnt = %u",
+ show_dev_ino(__entry),
+ __entry->node_cnt)
+);
+
#endif /* _TRACE_F2FS_H */
/* This part must be outside protection */
diff --git a/include/trace/events/filemap.h b/include/trace/events/filemap.h
index 0421f49a20f7..42febb6bc1d5 100644
--- a/include/trace/events/filemap.h
+++ b/include/trace/events/filemap.h
@@ -18,14 +18,14 @@ DECLARE_EVENT_CLASS(mm_filemap_op_page_cache,
TP_ARGS(page),
TP_STRUCT__entry(
- __field(struct page *, page)
+ __field(unsigned long, pfn)
__field(unsigned long, i_ino)
__field(unsigned long, index)
__field(dev_t, s_dev)
),
TP_fast_assign(
- __entry->page = page;
+ __entry->pfn = page_to_pfn(page);
__entry->i_ino = page->mapping->host->i_ino;
__entry->index = page->index;
if (page->mapping->host->i_sb)
@@ -37,8 +37,8 @@ DECLARE_EVENT_CLASS(mm_filemap_op_page_cache,
TP_printk("dev %d:%d ino %lx page=%p pfn=%lu ofs=%lu",
MAJOR(__entry->s_dev), MINOR(__entry->s_dev),
__entry->i_ino,
- __entry->page,
- page_to_pfn(__entry->page),
+ pfn_to_page(__entry->pfn),
+ __entry->pfn,
__entry->index << PAGE_SHIFT)
);
diff --git a/include/trace/events/intel-sst.h b/include/trace/events/intel-sst.h
index 76c72d3f1902..edc24e6dea1b 100644
--- a/include/trace/events/intel-sst.h
+++ b/include/trace/events/intel-sst.h
@@ -1,6 +1,13 @@
#undef TRACE_SYSTEM
#define TRACE_SYSTEM intel-sst
+/*
+ * The TRACE_SYSTEM_VAR defaults to TRACE_SYSTEM, but must be a
+ * legitimate C variable. It is not exported to user space.
+ */
+#undef TRACE_SYSTEM_VAR
+#define TRACE_SYSTEM_VAR intel_sst
+
#if !defined(_TRACE_INTEL_SST_H) || defined(TRACE_HEADER_MULTI_READ)
#define _TRACE_INTEL_SST_H
diff --git a/include/trace/events/irq.h b/include/trace/events/irq.h
index 3608bebd3d9c..ff8f6c091a15 100644
--- a/include/trace/events/irq.h
+++ b/include/trace/events/irq.h
@@ -9,19 +9,34 @@
struct irqaction;
struct softirq_action;
-#define softirq_name(sirq) { sirq##_SOFTIRQ, #sirq }
+#define SOFTIRQ_NAME_LIST \
+ softirq_name(HI) \
+ softirq_name(TIMER) \
+ softirq_name(NET_TX) \
+ softirq_name(NET_RX) \
+ softirq_name(BLOCK) \
+ softirq_name(BLOCK_IOPOLL) \
+ softirq_name(TASKLET) \
+ softirq_name(SCHED) \
+ softirq_name(HRTIMER) \
+ softirq_name_end(RCU)
+
+#undef softirq_name
+#undef softirq_name_end
+
+#define softirq_name(sirq) TRACE_DEFINE_ENUM(sirq##_SOFTIRQ);
+#define softirq_name_end(sirq) TRACE_DEFINE_ENUM(sirq##_SOFTIRQ);
+
+SOFTIRQ_NAME_LIST
+
+#undef softirq_name
+#undef softirq_name_end
+
+#define softirq_name(sirq) { sirq##_SOFTIRQ, #sirq },
+#define softirq_name_end(sirq) { sirq##_SOFTIRQ, #sirq }
+
#define show_softirq_name(val) \
- __print_symbolic(val, \
- softirq_name(HI), \
- softirq_name(TIMER), \
- softirq_name(NET_TX), \
- softirq_name(NET_RX), \
- softirq_name(BLOCK), \
- softirq_name(BLOCK_IOPOLL), \
- softirq_name(TASKLET), \
- softirq_name(SCHED), \
- softirq_name(HRTIMER), \
- softirq_name(RCU))
+ __print_symbolic(val, SOFTIRQ_NAME_LIST)
/**
* irq_handler_entry - called immediately before the irq action handler
diff --git a/include/trace/events/kmem.h b/include/trace/events/kmem.h
index 4ad10baecd4d..f7554fd7fc62 100644
--- a/include/trace/events/kmem.h
+++ b/include/trace/events/kmem.h
@@ -140,32 +140,55 @@ DEFINE_EVENT(kmem_free, kfree,
TP_ARGS(call_site, ptr)
);
-DEFINE_EVENT(kmem_free, kmem_cache_free,
+DEFINE_EVENT_CONDITION(kmem_free, kmem_cache_free,
TP_PROTO(unsigned long call_site, const void *ptr),
- TP_ARGS(call_site, ptr)
+ TP_ARGS(call_site, ptr),
+
+ /*
+ * This trace can be potentially called from an offlined cpu.
+ * Since trace points use RCU and RCU should not be used from
+ * offline cpus, filter such calls out.
+ * While this trace can be called from a preemptable section,
+ * it has no impact on the condition since tasks can migrate
+ * only from online cpus to other online cpus. Thus its safe
+ * to use raw_smp_processor_id.
+ */
+ TP_CONDITION(cpu_online(raw_smp_processor_id()))
);
-TRACE_EVENT(mm_page_free,
+TRACE_EVENT_CONDITION(mm_page_free,
TP_PROTO(struct page *page, unsigned int order),
TP_ARGS(page, order),
+
+ /*
+ * This trace can be potentially called from an offlined cpu.
+ * Since trace points use RCU and RCU should not be used from
+ * offline cpus, filter such calls out.
+ * While this trace can be called from a preemptable section,
+ * it has no impact on the condition since tasks can migrate
+ * only from online cpus to other online cpus. Thus its safe
+ * to use raw_smp_processor_id.
+ */
+ TP_CONDITION(cpu_online(raw_smp_processor_id())),
+
TP_STRUCT__entry(
- __field( struct page *, page )
+ __field( unsigned long, pfn )
__field( unsigned int, order )
),
TP_fast_assign(
- __entry->page = page;
+ __entry->pfn = page_to_pfn(page);
__entry->order = order;
),
TP_printk("page=%p pfn=%lu order=%d",
- __entry->page,
- page_to_pfn(__entry->page),
+ pfn_to_page(__entry->pfn),
+ __entry->pfn,
__entry->order)
);
@@ -176,18 +199,18 @@ TRACE_EVENT(mm_page_free_batched,
TP_ARGS(page, cold),
TP_STRUCT__entry(
- __field( struct page *, page )
+ __field( unsigned long, pfn )
__field( int, cold )
),
TP_fast_assign(
- __entry->page = page;
+ __entry->pfn = page_to_pfn(page);
__entry->cold = cold;
),
TP_printk("page=%p pfn=%lu order=0 cold=%d",
- __entry->page,
- page_to_pfn(__entry->page),
+ pfn_to_page(__entry->pfn),
+ __entry->pfn,
__entry->cold)
);
@@ -199,22 +222,22 @@ TRACE_EVENT(mm_page_alloc,
TP_ARGS(page, order, gfp_flags, migratetype),
TP_STRUCT__entry(
- __field( struct page *, page )
+ __field( unsigned long, pfn )
__field( unsigned int, order )
__field( gfp_t, gfp_flags )
__field( int, migratetype )
),
TP_fast_assign(
- __entry->page = page;
+ __entry->pfn = page ? page_to_pfn(page) : -1UL;
__entry->order = order;
__entry->gfp_flags = gfp_flags;
__entry->migratetype = migratetype;
),
TP_printk("page=%p pfn=%lu order=%d migratetype=%d gfp_flags=%s",
- __entry->page,
- __entry->page ? page_to_pfn(__entry->page) : 0,
+ __entry->pfn != -1UL ? pfn_to_page(__entry->pfn) : NULL,
+ __entry->pfn != -1UL ? __entry->pfn : 0,
__entry->order,
__entry->migratetype,
show_gfp_flags(__entry->gfp_flags))
@@ -227,20 +250,20 @@ DECLARE_EVENT_CLASS(mm_page,
TP_ARGS(page, order, migratetype),
TP_STRUCT__entry(
- __field( struct page *, page )
+ __field( unsigned long, pfn )
__field( unsigned int, order )
__field( int, migratetype )
),
TP_fast_assign(
- __entry->page = page;
+ __entry->pfn = page ? page_to_pfn(page) : -1UL;
__entry->order = order;
__entry->migratetype = migratetype;
),
TP_printk("page=%p pfn=%lu order=%u migratetype=%d percpu_refill=%d",
- __entry->page,
- __entry->page ? page_to_pfn(__entry->page) : 0,
+ __entry->pfn != -1UL ? pfn_to_page(__entry->pfn) : NULL,
+ __entry->pfn != -1UL ? __entry->pfn : 0,
__entry->order,
__entry->migratetype,
__entry->order == 0)
@@ -253,14 +276,37 @@ DEFINE_EVENT(mm_page, mm_page_alloc_zone_locked,
TP_ARGS(page, order, migratetype)
);
-DEFINE_EVENT_PRINT(mm_page, mm_page_pcpu_drain,
+TRACE_EVENT_CONDITION(mm_page_pcpu_drain,
TP_PROTO(struct page *page, unsigned int order, int migratetype),
TP_ARGS(page, order, migratetype),
+ /*
+ * This trace can be potentially called from an offlined cpu.
+ * Since trace points use RCU and RCU should not be used from
+ * offline cpus, filter such calls out.
+ * While this trace can be called from a preemptable section,
+ * it has no impact on the condition since tasks can migrate
+ * only from online cpus to other online cpus. Thus its safe
+ * to use raw_smp_processor_id.
+ */
+ TP_CONDITION(cpu_online(raw_smp_processor_id())),
+
+ TP_STRUCT__entry(
+ __field( unsigned long, pfn )
+ __field( unsigned int, order )
+ __field( int, migratetype )
+ ),
+
+ TP_fast_assign(
+ __entry->pfn = page ? page_to_pfn(page) : -1UL;
+ __entry->order = order;
+ __entry->migratetype = migratetype;
+ ),
+
TP_printk("page=%p pfn=%lu order=%d migratetype=%d",
- __entry->page, page_to_pfn(__entry->page),
+ pfn_to_page(__entry->pfn), __entry->pfn,
__entry->order, __entry->migratetype)
);
@@ -275,7 +321,7 @@ TRACE_EVENT(mm_page_alloc_extfrag,
alloc_migratetype, fallback_migratetype),
TP_STRUCT__entry(
- __field( struct page *, page )
+ __field( unsigned long, pfn )
__field( int, alloc_order )
__field( int, fallback_order )
__field( int, alloc_migratetype )
@@ -284,7 +330,7 @@ TRACE_EVENT(mm_page_alloc_extfrag,
),
TP_fast_assign(
- __entry->page = page;
+ __entry->pfn = page_to_pfn(page);
__entry->alloc_order = alloc_order;
__entry->fallback_order = fallback_order;
__entry->alloc_migratetype = alloc_migratetype;
@@ -294,8 +340,8 @@ TRACE_EVENT(mm_page_alloc_extfrag,
),
TP_printk("page=%p pfn=%lu alloc_order=%d fallback_order=%d pageblock_order=%d alloc_migratetype=%d fallback_migratetype=%d fragmenting=%d change_ownership=%d",
- __entry->page,
- page_to_pfn(__entry->page),
+ pfn_to_page(__entry->pfn),
+ __entry->pfn,
__entry->alloc_order,
__entry->fallback_order,
pageblock_order,
diff --git a/include/trace/events/migrate.h b/include/trace/events/migrate.h
index dd2b5467d905..539b25a76111 100644
--- a/include/trace/events/migrate.h
+++ b/include/trace/events/migrate.h
@@ -7,18 +7,40 @@
#include <linux/tracepoint.h>
#define MIGRATE_MODE \
- {MIGRATE_ASYNC, "MIGRATE_ASYNC"}, \
- {MIGRATE_SYNC_LIGHT, "MIGRATE_SYNC_LIGHT"}, \
- {MIGRATE_SYNC, "MIGRATE_SYNC"}
+ EM( MIGRATE_ASYNC, "MIGRATE_ASYNC") \
+ EM( MIGRATE_SYNC_LIGHT, "MIGRATE_SYNC_LIGHT") \
+ EMe(MIGRATE_SYNC, "MIGRATE_SYNC")
+
#define MIGRATE_REASON \
- {MR_COMPACTION, "compaction"}, \
- {MR_MEMORY_FAILURE, "memory_failure"}, \
- {MR_MEMORY_HOTPLUG, "memory_hotplug"}, \
- {MR_SYSCALL, "syscall_or_cpuset"}, \
- {MR_MEMPOLICY_MBIND, "mempolicy_mbind"}, \
- {MR_NUMA_MISPLACED, "numa_misplaced"}, \
- {MR_CMA, "cma"}
+ EM( MR_COMPACTION, "compaction") \
+ EM( MR_MEMORY_FAILURE, "memory_failure") \
+ EM( MR_MEMORY_HOTPLUG, "memory_hotplug") \
+ EM( MR_SYSCALL, "syscall_or_cpuset") \
+ EM( MR_MEMPOLICY_MBIND, "mempolicy_mbind") \
+ EM( MR_NUMA_MISPLACED, "numa_misplaced") \
+ EMe(MR_CMA, "cma")
+
+/*
+ * First define the enums in the above macros to be exported to userspace
+ * via TRACE_DEFINE_ENUM().
+ */
+#undef EM
+#undef EMe
+#define EM(a, b) TRACE_DEFINE_ENUM(a);
+#define EMe(a, b) TRACE_DEFINE_ENUM(a);
+
+MIGRATE_MODE
+MIGRATE_REASON
+
+/*
+ * Now redefine the EM() and EMe() macros to map the enums to the strings
+ * that will be printed in the output.
+ */
+#undef EM
+#undef EMe
+#define EM(a, b) {a, b},
+#define EMe(a, b) {a, b}
TRACE_EVENT(mm_migrate_pages,
diff --git a/include/trace/events/module.h b/include/trace/events/module.h
index 81c4c183d348..28c45997e451 100644
--- a/include/trace/events/module.h
+++ b/include/trace/events/module.h
@@ -84,7 +84,7 @@ DECLARE_EVENT_CLASS(module_refcnt,
__assign_str(name, mod->name);
),
- TP_printk("%s call_site=%pf refcnt=%d",
+ TP_printk("%s call_site=%ps refcnt=%d",
__get_str(name), (void *)__entry->ip, __entry->refcnt)
);
@@ -121,7 +121,7 @@ TRACE_EVENT(module_request,
__assign_str(name, name);
),
- TP_printk("%s wait=%d call_site=%pf",
+ TP_printk("%s wait=%d call_site=%ps",
__get_str(name), (int)__entry->wait, (void *)__entry->ip)
);
diff --git a/include/trace/events/power.h b/include/trace/events/power.h
index d19840b0cac8..630d1e5e4de0 100644
--- a/include/trace/events/power.h
+++ b/include/trace/events/power.h
@@ -42,45 +42,54 @@ TRACE_EVENT(pstate_sample,
TP_PROTO(u32 core_busy,
u32 scaled_busy,
- u32 state,
+ u32 from,
+ u32 to,
u64 mperf,
u64 aperf,
+ u64 tsc,
u32 freq
),
TP_ARGS(core_busy,
scaled_busy,
- state,
+ from,
+ to,
mperf,
aperf,
+ tsc,
freq
),
TP_STRUCT__entry(
__field(u32, core_busy)
__field(u32, scaled_busy)
- __field(u32, state)
+ __field(u32, from)
+ __field(u32, to)
__field(u64, mperf)
__field(u64, aperf)
+ __field(u64, tsc)
__field(u32, freq)
-
- ),
+ ),
TP_fast_assign(
__entry->core_busy = core_busy;
__entry->scaled_busy = scaled_busy;
- __entry->state = state;
+ __entry->from = from;
+ __entry->to = to;
__entry->mperf = mperf;
__entry->aperf = aperf;
+ __entry->tsc = tsc;
__entry->freq = freq;
),
- TP_printk("core_busy=%lu scaled=%lu state=%lu mperf=%llu aperf=%llu freq=%lu ",
+ TP_printk("core_busy=%lu scaled=%lu from=%lu to=%lu mperf=%llu aperf=%llu tsc=%llu freq=%lu ",
(unsigned long)__entry->core_busy,
(unsigned long)__entry->scaled_busy,
- (unsigned long)__entry->state,
+ (unsigned long)__entry->from,
+ (unsigned long)__entry->to,
(unsigned long long)__entry->mperf,
(unsigned long long)__entry->aperf,
+ (unsigned long long)__entry->tsc,
(unsigned long)__entry->freq
)
diff --git a/include/trace/events/random.h b/include/trace/events/random.h
index 805af6db41cc..4684de344c5d 100644
--- a/include/trace/events/random.h
+++ b/include/trace/events/random.h
@@ -22,7 +22,7 @@ TRACE_EVENT(add_device_randomness,
__entry->IP = IP;
),
- TP_printk("bytes %d caller %pF",
+ TP_printk("bytes %d caller %pS",
__entry->bytes, (void *)__entry->IP)
);
@@ -43,7 +43,7 @@ DECLARE_EVENT_CLASS(random__mix_pool_bytes,
__entry->IP = IP;
),
- TP_printk("%s pool: bytes %d caller %pF",
+ TP_printk("%s pool: bytes %d caller %pS",
__entry->pool_name, __entry->bytes, (void *)__entry->IP)
);
@@ -82,7 +82,7 @@ TRACE_EVENT(credit_entropy_bits,
),
TP_printk("%s pool: bits %d entropy_count %d entropy_total %d "
- "caller %pF", __entry->pool_name, __entry->bits,
+ "caller %pS", __entry->pool_name, __entry->bits,
__entry->entropy_count, __entry->entropy_total,
(void *)__entry->IP)
);
@@ -207,7 +207,7 @@ DECLARE_EVENT_CLASS(random__get_random_bytes,
__entry->IP = IP;
),
- TP_printk("nbytes %d caller %pF", __entry->nbytes, (void *)__entry->IP)
+ TP_printk("nbytes %d caller %pS", __entry->nbytes, (void *)__entry->IP)
);
DEFINE_EVENT(random__get_random_bytes, get_random_bytes,
@@ -242,7 +242,7 @@ DECLARE_EVENT_CLASS(random__extract_entropy,
__entry->IP = IP;
),
- TP_printk("%s pool: nbytes %d entropy_count %d caller %pF",
+ TP_printk("%s pool: nbytes %d entropy_count %d caller %pS",
__entry->pool_name, __entry->nbytes, __entry->entropy_count,
(void *)__entry->IP)
);
diff --git a/include/trace/events/sched.h b/include/trace/events/sched.h
index 30fedaf3e56a..d57a575fe31f 100644
--- a/include/trace/events/sched.h
+++ b/include/trace/events/sched.h
@@ -147,7 +147,8 @@ TRACE_EVENT(sched_switch,
__print_flags(__entry->prev_state & (TASK_STATE_MAX-1), "|",
{ 1, "S"} , { 2, "D" }, { 4, "T" }, { 8, "t" },
{ 16, "Z" }, { 32, "X" }, { 64, "x" },
- { 128, "K" }, { 256, "W" }, { 512, "P" }) : "R",
+ { 128, "K" }, { 256, "W" }, { 512, "P" },
+ { 1024, "N" }) : "R",
__entry->prev_state & TASK_STATE_MAX ? "+" : "",
__entry->next_comm, __entry->next_pid, __entry->next_prio)
);
diff --git a/include/trace/events/sunrpc.h b/include/trace/events/sunrpc.h
index b9c1dc6c825a..fd1a02cb3c82 100644
--- a/include/trace/events/sunrpc.h
+++ b/include/trace/events/sunrpc.h
@@ -179,27 +179,53 @@ DEFINE_EVENT(rpc_task_queued, rpc_task_wakeup,
);
+/*
+ * First define the enums in the below macros to be exported to userspace
+ * via TRACE_DEFINE_ENUM().
+ */
+#undef EM
+#undef EMe
+#define EM(a, b) TRACE_DEFINE_ENUM(a);
+#define EMe(a, b) TRACE_DEFINE_ENUM(a);
+
+#define RPC_SHOW_SOCKET \
+ EM( SS_FREE, "FREE" ) \
+ EM( SS_UNCONNECTED, "UNCONNECTED" ) \
+ EM( SS_CONNECTING, "CONNECTING," ) \
+ EM( SS_CONNECTED, "CONNECTED," ) \
+ EMe(SS_DISCONNECTING, "DISCONNECTING" )
+
#define rpc_show_socket_state(state) \
- __print_symbolic(state, \
- { SS_FREE, "FREE" }, \
- { SS_UNCONNECTED, "UNCONNECTED" }, \
- { SS_CONNECTING, "CONNECTING," }, \
- { SS_CONNECTED, "CONNECTED," }, \
- { SS_DISCONNECTING, "DISCONNECTING" })
+ __print_symbolic(state, RPC_SHOW_SOCKET)
+
+RPC_SHOW_SOCKET
+
+#define RPC_SHOW_SOCK \
+ EM( TCP_ESTABLISHED, "ESTABLISHED" ) \
+ EM( TCP_SYN_SENT, "SYN_SENT" ) \
+ EM( TCP_SYN_RECV, "SYN_RECV" ) \
+ EM( TCP_FIN_WAIT1, "FIN_WAIT1" ) \
+ EM( TCP_FIN_WAIT2, "FIN_WAIT2" ) \
+ EM( TCP_TIME_WAIT, "TIME_WAIT" ) \
+ EM( TCP_CLOSE, "CLOSE" ) \
+ EM( TCP_CLOSE_WAIT, "CLOSE_WAIT" ) \
+ EM( TCP_LAST_ACK, "LAST_ACK" ) \
+ EM( TCP_LISTEN, "LISTEN" ) \
+ EMe( TCP_CLOSING, "CLOSING" )
#define rpc_show_sock_state(state) \
- __print_symbolic(state, \
- { TCP_ESTABLISHED, "ESTABLISHED" }, \
- { TCP_SYN_SENT, "SYN_SENT" }, \
- { TCP_SYN_RECV, "SYN_RECV" }, \
- { TCP_FIN_WAIT1, "FIN_WAIT1" }, \
- { TCP_FIN_WAIT2, "FIN_WAIT2" }, \
- { TCP_TIME_WAIT, "TIME_WAIT" }, \
- { TCP_CLOSE, "CLOSE" }, \
- { TCP_CLOSE_WAIT, "CLOSE_WAIT" }, \
- { TCP_LAST_ACK, "LAST_ACK" }, \
- { TCP_LISTEN, "LISTEN" }, \
- { TCP_CLOSING, "CLOSING" })
+ __print_symbolic(state, RPC_SHOW_SOCK)
+
+RPC_SHOW_SOCK
+
+/*
+ * Now redefine the EM() and EMe() macros to map the enums to the strings
+ * that will be printed in the output.
+ */
+#undef EM
+#undef EMe
+#define EM(a, b) {a, b},
+#define EMe(a, b) {a, b}
DECLARE_EVENT_CLASS(xs_socket_event,
diff --git a/include/trace/events/target.h b/include/trace/events/target.h
index 04c3c6efdcc2..50fea660c0f8 100644
--- a/include/trace/events/target.h
+++ b/include/trace/events/target.h
@@ -6,7 +6,7 @@
#include <linux/tracepoint.h>
#include <linux/trace_seq.h>
-#include <scsi/scsi.h>
+#include <scsi/scsi_proto.h>
#include <scsi/scsi_tcq.h>
#include <target/target_core_base.h>
diff --git a/include/trace/events/timer.h b/include/trace/events/timer.h
index 68c2c2000f02..073b9ac245ba 100644
--- a/include/trace/events/timer.h
+++ b/include/trace/events/timer.h
@@ -43,15 +43,18 @@ DEFINE_EVENT(timer_class, timer_init,
*/
TRACE_EVENT(timer_start,
- TP_PROTO(struct timer_list *timer, unsigned long expires),
+ TP_PROTO(struct timer_list *timer,
+ unsigned long expires,
+ unsigned int flags),
- TP_ARGS(timer, expires),
+ TP_ARGS(timer, expires, flags),
TP_STRUCT__entry(
__field( void *, timer )
__field( void *, function )
__field( unsigned long, expires )
__field( unsigned long, now )
+ __field( unsigned int, flags )
),
TP_fast_assign(
@@ -59,11 +62,12 @@ TRACE_EVENT(timer_start,
__entry->function = timer->function;
__entry->expires = expires;
__entry->now = jiffies;
+ __entry->flags = flags;
),
- TP_printk("timer=%p function=%pf expires=%lu [timeout=%ld]",
+ TP_printk("timer=%p function=%pf expires=%lu [timeout=%ld] flags=0x%08x",
__entry->timer, __entry->function, __entry->expires,
- (long)__entry->expires - __entry->now)
+ (long)__entry->expires - __entry->now, __entry->flags)
);
/**
diff --git a/include/trace/events/tlb.h b/include/trace/events/tlb.h
index 0e7635765153..4250f364a6ca 100644
--- a/include/trace/events/tlb.h
+++ b/include/trace/events/tlb.h
@@ -7,11 +7,31 @@
#include <linux/mm_types.h>
#include <linux/tracepoint.h>
-#define TLB_FLUSH_REASON \
- { TLB_FLUSH_ON_TASK_SWITCH, "flush on task switch" }, \
- { TLB_REMOTE_SHOOTDOWN, "remote shootdown" }, \
- { TLB_LOCAL_SHOOTDOWN, "local shootdown" }, \
- { TLB_LOCAL_MM_SHOOTDOWN, "local mm shootdown" }
+#define TLB_FLUSH_REASON \
+ EM( TLB_FLUSH_ON_TASK_SWITCH, "flush on task switch" ) \
+ EM( TLB_REMOTE_SHOOTDOWN, "remote shootdown" ) \
+ EM( TLB_LOCAL_SHOOTDOWN, "local shootdown" ) \
+ EMe( TLB_LOCAL_MM_SHOOTDOWN, "local mm shootdown" )
+
+/*
+ * First define the enums in TLB_FLUSH_REASON to be exported to userspace
+ * via TRACE_DEFINE_ENUM().
+ */
+#undef EM
+#undef EMe
+#define EM(a,b) TRACE_DEFINE_ENUM(a);
+#define EMe(a,b) TRACE_DEFINE_ENUM(a);
+
+TLB_FLUSH_REASON
+
+/*
+ * Now redefine the EM() and EMe() macros to map the enums to the strings
+ * that will be printed in the output.
+ */
+#undef EM
+#undef EMe
+#define EM(a,b) { a, b },
+#define EMe(a,b) { a, b }
TRACE_EVENT_CONDITION(tlb_flush,
diff --git a/include/trace/events/v4l2.h b/include/trace/events/v4l2.h
index b9bb1f204693..20112170ff11 100644
--- a/include/trace/events/v4l2.h
+++ b/include/trace/events/v4l2.h
@@ -6,33 +6,58 @@
#include <linux/tracepoint.h>
-#define show_type(type) \
- __print_symbolic(type, \
- { V4L2_BUF_TYPE_VIDEO_CAPTURE, "VIDEO_CAPTURE" }, \
- { V4L2_BUF_TYPE_VIDEO_OUTPUT, "VIDEO_OUTPUT" }, \
- { V4L2_BUF_TYPE_VIDEO_OVERLAY, "VIDEO_OVERLAY" }, \
- { V4L2_BUF_TYPE_VBI_CAPTURE, "VBI_CAPTURE" }, \
- { V4L2_BUF_TYPE_VBI_OUTPUT, "VBI_OUTPUT" }, \
- { V4L2_BUF_TYPE_SLICED_VBI_CAPTURE, "SLICED_VBI_CAPTURE" }, \
- { V4L2_BUF_TYPE_SLICED_VBI_OUTPUT, "SLICED_VBI_OUTPUT" }, \
- { V4L2_BUF_TYPE_VIDEO_OUTPUT_OVERLAY, "VIDEO_OUTPUT_OVERLAY" },\
- { V4L2_BUF_TYPE_VIDEO_CAPTURE_MPLANE, "VIDEO_CAPTURE_MPLANE" },\
- { V4L2_BUF_TYPE_VIDEO_OUTPUT_MPLANE, "VIDEO_OUTPUT_MPLANE" }, \
- { V4L2_BUF_TYPE_SDR_CAPTURE, "SDR_CAPTURE" }, \
- { V4L2_BUF_TYPE_PRIVATE, "PRIVATE" })
+/* Enums require being exported to userspace, for user tool parsing */
+#undef EM
+#undef EMe
+#define EM(a, b) TRACE_DEFINE_ENUM(a);
+#define EMe(a, b) TRACE_DEFINE_ENUM(a);
+
+#define show_type(type) \
+ __print_symbolic(type, SHOW_TYPE)
+
+#define SHOW_TYPE \
+ EM( V4L2_BUF_TYPE_VIDEO_CAPTURE, "VIDEO_CAPTURE" ) \
+ EM( V4L2_BUF_TYPE_VIDEO_OUTPUT, "VIDEO_OUTPUT" ) \
+ EM( V4L2_BUF_TYPE_VIDEO_OVERLAY, "VIDEO_OVERLAY" ) \
+ EM( V4L2_BUF_TYPE_VBI_CAPTURE, "VBI_CAPTURE" ) \
+ EM( V4L2_BUF_TYPE_VBI_OUTPUT, "VBI_OUTPUT" ) \
+ EM( V4L2_BUF_TYPE_SLICED_VBI_CAPTURE, "SLICED_VBI_CAPTURE" ) \
+ EM( V4L2_BUF_TYPE_SLICED_VBI_OUTPUT, "SLICED_VBI_OUTPUT" ) \
+ EM( V4L2_BUF_TYPE_VIDEO_OUTPUT_OVERLAY, "VIDEO_OUTPUT_OVERLAY" ) \
+ EM( V4L2_BUF_TYPE_VIDEO_CAPTURE_MPLANE, "VIDEO_CAPTURE_MPLANE" ) \
+ EM( V4L2_BUF_TYPE_VIDEO_OUTPUT_MPLANE, "VIDEO_OUTPUT_MPLANE" ) \
+ EM( V4L2_BUF_TYPE_SDR_CAPTURE, "SDR_CAPTURE" ) \
+ EMe(V4L2_BUF_TYPE_PRIVATE, "PRIVATE" )
+
+SHOW_TYPE
#define show_field(field) \
- __print_symbolic(field, \
- { V4L2_FIELD_ANY, "ANY" }, \
- { V4L2_FIELD_NONE, "NONE" }, \
- { V4L2_FIELD_TOP, "TOP" }, \
- { V4L2_FIELD_BOTTOM, "BOTTOM" }, \
- { V4L2_FIELD_INTERLACED, "INTERLACED" }, \
- { V4L2_FIELD_SEQ_TB, "SEQ_TB" }, \
- { V4L2_FIELD_SEQ_BT, "SEQ_BT" }, \
- { V4L2_FIELD_ALTERNATE, "ALTERNATE" }, \
- { V4L2_FIELD_INTERLACED_TB, "INTERLACED_TB" }, \
- { V4L2_FIELD_INTERLACED_BT, "INTERLACED_BT" })
+ __print_symbolic(field, SHOW_FIELD)
+
+#define SHOW_FIELD \
+ EM( V4L2_FIELD_ANY, "ANY" ) \
+ EM( V4L2_FIELD_NONE, "NONE" ) \
+ EM( V4L2_FIELD_TOP, "TOP" ) \
+ EM( V4L2_FIELD_BOTTOM, "BOTTOM" ) \
+ EM( V4L2_FIELD_INTERLACED, "INTERLACED" ) \
+ EM( V4L2_FIELD_SEQ_TB, "SEQ_TB" ) \
+ EM( V4L2_FIELD_SEQ_BT, "SEQ_BT" ) \
+ EM( V4L2_FIELD_ALTERNATE, "ALTERNATE" ) \
+ EM( V4L2_FIELD_INTERLACED_TB, "INTERLACED_TB" ) \
+ EMe( V4L2_FIELD_INTERLACED_BT, "INTERLACED_BT" )
+
+SHOW_FIELD
+
+/*
+ * Now redefine the EM() and EMe() macros to map the enums to the strings
+ * that will be printed in the output.
+ */
+#undef EM
+#undef EMe
+#define EM(a, b) {a, b},
+#define EMe(a, b) {a, b}
+
+/* V4L2_TC_TYPE_* are macros, not defines, they do not need processing */
#define show_timecode_type(type) \
__print_symbolic(type, \
diff --git a/include/trace/events/vmscan.h b/include/trace/events/vmscan.h
index 69590b6ffc09..f66476b96264 100644
--- a/include/trace/events/vmscan.h
+++ b/include/trace/events/vmscan.h
@@ -336,18 +336,18 @@ TRACE_EVENT(mm_vmscan_writepage,
TP_ARGS(page, reclaim_flags),
TP_STRUCT__entry(
- __field(struct page *, page)
+ __field(unsigned long, pfn)
__field(int, reclaim_flags)
),
TP_fast_assign(
- __entry->page = page;
+ __entry->pfn = page_to_pfn(page);
__entry->reclaim_flags = reclaim_flags;
),
TP_printk("page=%p pfn=%lu flags=%s",
- __entry->page,
- page_to_pfn(__entry->page),
+ pfn_to_page(__entry->pfn),
+ __entry->pfn,
show_reclaim_flags(__entry->reclaim_flags))
);
diff --git a/include/trace/events/writeback.h b/include/trace/events/writeback.h
index 5a14ead59696..a7aa607a4c55 100644
--- a/include/trace/events/writeback.h
+++ b/include/trace/events/writeback.h
@@ -23,15 +23,32 @@
{I_REFERENCED, "I_REFERENCED"} \
)
+/* enums need to be exported to user space */
+#undef EM
+#undef EMe
+#define EM(a,b) TRACE_DEFINE_ENUM(a);
+#define EMe(a,b) TRACE_DEFINE_ENUM(a);
+
#define WB_WORK_REASON \
- {WB_REASON_BACKGROUND, "background"}, \
- {WB_REASON_TRY_TO_FREE_PAGES, "try_to_free_pages"}, \
- {WB_REASON_SYNC, "sync"}, \
- {WB_REASON_PERIODIC, "periodic"}, \
- {WB_REASON_LAPTOP_TIMER, "laptop_timer"}, \
- {WB_REASON_FREE_MORE_MEM, "free_more_memory"}, \
- {WB_REASON_FS_FREE_SPACE, "fs_free_space"}, \
- {WB_REASON_FORKER_THREAD, "forker_thread"}
+ EM( WB_REASON_BACKGROUND, "background") \
+ EM( WB_REASON_TRY_TO_FREE_PAGES, "try_to_free_pages") \
+ EM( WB_REASON_SYNC, "sync") \
+ EM( WB_REASON_PERIODIC, "periodic") \
+ EM( WB_REASON_LAPTOP_TIMER, "laptop_timer") \
+ EM( WB_REASON_FREE_MORE_MEM, "free_more_memory") \
+ EM( WB_REASON_FS_FREE_SPACE, "fs_free_space") \
+ EMe(WB_REASON_FORKER_THREAD, "forker_thread")
+
+WB_WORK_REASON
+
+/*
+ * Now redefine the EM() and EMe() macros to map the enums to the strings
+ * that will be printed in the output.
+ */
+#undef EM
+#undef EMe
+#define EM(a,b) { a, b },
+#define EMe(a,b) { a, b }
struct wb_writeback_work;
@@ -233,7 +250,6 @@ DEFINE_EVENT(writeback_class, name, \
DEFINE_WRITEBACK_EVENT(writeback_nowork);
DEFINE_WRITEBACK_EVENT(writeback_wake_background);
DEFINE_WRITEBACK_EVENT(writeback_bdi_register);
-DEFINE_WRITEBACK_EVENT(writeback_bdi_unregister);
DECLARE_EVENT_CLASS(wbc_class,
TP_PROTO(struct writeback_control *wbc, struct backing_dev_info *bdi),
@@ -344,7 +360,7 @@ TRACE_EVENT(global_dirty_state,
__entry->nr_written = global_page_state(NR_WRITTEN);
__entry->background_thresh = background_thresh;
__entry->dirty_thresh = dirty_thresh;
- __entry->dirty_limit = global_dirty_limit;
+ __entry->dirty_limit = global_wb_domain.dirty_limit;
),
TP_printk("dirty=%lu writeback=%lu unstable=%lu "
@@ -383,13 +399,13 @@ TRACE_EVENT(bdi_dirty_ratelimit,
TP_fast_assign(
strlcpy(__entry->bdi, dev_name(bdi->dev), 32);
- __entry->write_bw = KBps(bdi->write_bandwidth);
- __entry->avg_write_bw = KBps(bdi->avg_write_bandwidth);
+ __entry->write_bw = KBps(bdi->wb.write_bandwidth);
+ __entry->avg_write_bw = KBps(bdi->wb.avg_write_bandwidth);
__entry->dirty_rate = KBps(dirty_rate);
- __entry->dirty_ratelimit = KBps(bdi->dirty_ratelimit);
+ __entry->dirty_ratelimit = KBps(bdi->wb.dirty_ratelimit);
__entry->task_ratelimit = KBps(task_ratelimit);
__entry->balanced_dirty_ratelimit =
- KBps(bdi->balanced_dirty_ratelimit);
+ KBps(bdi->wb.balanced_dirty_ratelimit);
),
TP_printk("bdi %s: "
@@ -446,8 +462,9 @@ TRACE_EVENT(balance_dirty_pages,
unsigned long freerun = (thresh + bg_thresh) / 2;
strlcpy(__entry->bdi, dev_name(bdi->dev), 32);
- __entry->limit = global_dirty_limit;
- __entry->setpoint = (global_dirty_limit + freerun) / 2;
+ __entry->limit = global_wb_domain.dirty_limit;
+ __entry->setpoint = (global_wb_domain.dirty_limit +
+ freerun) / 2;
__entry->dirty = dirty;
__entry->bdi_setpoint = __entry->setpoint *
bdi_thresh / (thresh + 1);
diff --git a/include/trace/events/xen.h b/include/trace/events/xen.h
index d06b6da5c1e3..bce990f5a35d 100644
--- a/include/trace/events/xen.h
+++ b/include/trace/events/xen.h
@@ -224,7 +224,7 @@ TRACE_EVENT(xen_mmu_pmd_clear,
TP_printk("pmdp %p", __entry->pmdp)
);
-#if PAGETABLE_LEVELS >= 4
+#if CONFIG_PGTABLE_LEVELS >= 4
TRACE_EVENT(xen_mmu_set_pud,
TP_PROTO(pud_t *pudp, pud_t pudval),
diff --git a/include/trace/ftrace.h b/include/trace/ftrace.h
index 41bf65f04dd9..37d4b10b111d 100644
--- a/include/trace/ftrace.h
+++ b/include/trace/ftrace.h
@@ -18,6 +18,34 @@
#include <linux/ftrace_event.h>
+#ifndef TRACE_SYSTEM_VAR
+#define TRACE_SYSTEM_VAR TRACE_SYSTEM
+#endif
+
+#define __app__(x, y) str__##x##y
+#define __app(x, y) __app__(x, y)
+
+#define TRACE_SYSTEM_STRING __app(TRACE_SYSTEM_VAR,__trace_system_name)
+
+#define TRACE_MAKE_SYSTEM_STR() \
+ static const char TRACE_SYSTEM_STRING[] = \
+ __stringify(TRACE_SYSTEM)
+
+TRACE_MAKE_SYSTEM_STR();
+
+#undef TRACE_DEFINE_ENUM
+#define TRACE_DEFINE_ENUM(a) \
+ static struct trace_enum_map __used __initdata \
+ __##TRACE_SYSTEM##_##a = \
+ { \
+ .system = TRACE_SYSTEM_STRING, \
+ .enum_string = #a, \
+ .enum_value = a \
+ }; \
+ static struct trace_enum_map __used \
+ __attribute__((section("_ftrace_enum_map"))) \
+ *TRACE_SYSTEM##_##a = &__##TRACE_SYSTEM##_##a
+
/*
* DECLARE_EVENT_CLASS can be used to add a generic function
* handlers for events. That is, if all events have the same
@@ -105,7 +133,6 @@
#include TRACE_INCLUDE(TRACE_INCLUDE_FILE)
-
/*
* Stage 2 of the trace events.
*
@@ -122,6 +149,9 @@
* The size of an array is also encoded, in the higher 16 bits of <item>.
*/
+#undef TRACE_DEFINE_ENUM
+#define TRACE_DEFINE_ENUM(a)
+
#undef __field
#define __field(type, item)
@@ -539,7 +569,7 @@ static inline notrace int ftrace_get_offsets_##call( \
* .trace = ftrace_raw_output_<call>, <-- stage 2
* };
*
- * static const char print_fmt_<call>[] = <TP_printk>;
+ * static char print_fmt_<call>[] = <TP_printk>;
*
* static struct ftrace_event_class __used event_class_<template> = {
* .system = "<system>",
@@ -690,9 +720,9 @@ static inline void ftrace_test_probe_##call(void) \
#undef DECLARE_EVENT_CLASS
#define DECLARE_EVENT_CLASS(call, proto, args, tstruct, assign, print) \
_TRACE_PERF_PROTO(call, PARAMS(proto)); \
-static const char print_fmt_##call[] = print; \
+static char print_fmt_##call[] = print; \
static struct ftrace_event_class __used __refdata event_class_##call = { \
- .system = __stringify(TRACE_SYSTEM), \
+ .system = TRACE_SYSTEM_STRING, \
.define_fields = ftrace_define_fields_##call, \
.fields = LIST_HEAD_INIT(event_class_##call.fields),\
.raw_init = trace_event_raw_init, \
@@ -719,7 +749,7 @@ __attribute__((section("_ftrace_events"))) *__event_##call = &event_##call
#undef DEFINE_EVENT_PRINT
#define DEFINE_EVENT_PRINT(template, call, proto, args, print) \
\
-static const char print_fmt_##call[] = print; \
+static char print_fmt_##call[] = print; \
\
static struct ftrace_event_call __used event_##call = { \
.class = &event_class_##template, \
@@ -735,6 +765,7 @@ __attribute__((section("_ftrace_events"))) *__event_##call = &event_##call
#include TRACE_INCLUDE(TRACE_INCLUDE_FILE)
+#undef TRACE_SYSTEM_VAR
#ifdef CONFIG_PERF_EVENTS
diff --git a/include/uapi/asm-generic/errno.h b/include/uapi/asm-generic/errno.h
index 1e1ea6e6e7a5..88e0914cf2d9 100644
--- a/include/uapi/asm-generic/errno.h
+++ b/include/uapi/asm-generic/errno.h
@@ -6,7 +6,16 @@
#define EDEADLK 35 /* Resource deadlock would occur */
#define ENAMETOOLONG 36 /* File name too long */
#define ENOLCK 37 /* No record locks available */
-#define ENOSYS 38 /* Function not implemented */
+
+/*
+ * This error code is special: arch syscall entry code will return
+ * -ENOSYS if users try to call a syscall that doesn't exist. To keep
+ * failures of syscalls that really do exist distinguishable from
+ * failures due to attempts to use a nonexistent syscall, syscall
+ * implementations should refrain from returning -ENOSYS.
+ */
+#define ENOSYS 38 /* Invalid system call number */
+
#define ENOTEMPTY 39 /* Directory not empty */
#define ELOOP 40 /* Too many symbolic links encountered */
#define EWOULDBLOCK EAGAIN /* Operation would block */
diff --git a/include/uapi/drm/drm.h b/include/uapi/drm/drm.h
index 01b2d6d0e355..ff6ef62d084b 100644
--- a/include/uapi/drm/drm.h
+++ b/include/uapi/drm/drm.h
@@ -630,6 +630,7 @@ struct drm_gem_open {
*/
#define DRM_CAP_CURSOR_WIDTH 0x8
#define DRM_CAP_CURSOR_HEIGHT 0x9
+#define DRM_CAP_ADDFB2_MODIFIERS 0x10
/** DRM_IOCTL_GET_CAP ioctl argument type */
struct drm_get_cap {
diff --git a/include/uapi/drm/drm_fourcc.h b/include/uapi/drm/drm_fourcc.h
index a284f11a8ef5..07735822a28f 100644
--- a/include/uapi/drm/drm_fourcc.h
+++ b/include/uapi/drm/drm_fourcc.h
@@ -129,4 +129,82 @@
#define DRM_FORMAT_YUV444 fourcc_code('Y', 'U', '2', '4') /* non-subsampled Cb (1) and Cr (2) planes */
#define DRM_FORMAT_YVU444 fourcc_code('Y', 'V', '2', '4') /* non-subsampled Cr (1) and Cb (2) planes */
+
+/*
+ * Format Modifiers:
+ *
+ * Format modifiers describe, typically, a re-ordering or modification
+ * of the data in a plane of an FB. This can be used to express tiled/
+ * swizzled formats, or compression, or a combination of the two.
+ *
+ * The upper 8 bits of the format modifier are a vendor-id as assigned
+ * below. The lower 56 bits are assigned as vendor sees fit.
+ */
+
+/* Vendor Ids: */
+#define DRM_FORMAT_MOD_NONE 0
+#define DRM_FORMAT_MOD_VENDOR_INTEL 0x01
+#define DRM_FORMAT_MOD_VENDOR_AMD 0x02
+#define DRM_FORMAT_MOD_VENDOR_NV 0x03
+#define DRM_FORMAT_MOD_VENDOR_SAMSUNG 0x04
+#define DRM_FORMAT_MOD_VENDOR_QCOM 0x05
+/* add more to the end as needed */
+
+#define fourcc_mod_code(vendor, val) \
+ ((((u64)DRM_FORMAT_MOD_VENDOR_## vendor) << 56) | (val & 0x00ffffffffffffffULL))
+
+/*
+ * Format Modifier tokens:
+ *
+ * When adding a new token please document the layout with a code comment,
+ * similar to the fourcc codes above. drm_fourcc.h is considered the
+ * authoritative source for all of these.
+ */
+
+/* Intel framebuffer modifiers */
+
+/*
+ * Intel X-tiling layout
+ *
+ * This is a tiled layout using 4Kb tiles (except on gen2 where the tiles 2Kb)
+ * in row-major layout. Within the tile bytes are laid out row-major, with
+ * a platform-dependent stride. On top of that the memory can apply
+ * platform-depending swizzling of some higher address bits into bit6.
+ *
+ * This format is highly platforms specific and not useful for cross-driver
+ * sharing. It exists since on a given platform it does uniquely identify the
+ * layout in a simple way for i915-specific userspace.
+ */
+#define I915_FORMAT_MOD_X_TILED fourcc_mod_code(INTEL, 1)
+
+/*
+ * Intel Y-tiling layout
+ *
+ * This is a tiled layout using 4Kb tiles (except on gen2 where the tiles 2Kb)
+ * in row-major layout. Within the tile bytes are laid out in OWORD (16 bytes)
+ * chunks column-major, with a platform-dependent height. On top of that the
+ * memory can apply platform-depending swizzling of some higher address bits
+ * into bit6.
+ *
+ * This format is highly platforms specific and not useful for cross-driver
+ * sharing. It exists since on a given platform it does uniquely identify the
+ * layout in a simple way for i915-specific userspace.
+ */
+#define I915_FORMAT_MOD_Y_TILED fourcc_mod_code(INTEL, 2)
+
+/*
+ * Intel Yf-tiling layout
+ *
+ * This is a tiled layout using 4Kb tiles in row-major layout.
+ * Within the tile pixels are laid out in 16 256 byte units / sub-tiles which
+ * are arranged in four groups (two wide, two high) with column-major layout.
+ * Each group therefore consits out of four 256 byte units, which are also laid
+ * out as 2x2 column-major.
+ * 256 byte units are made out of four 64 byte blocks of pixels, producing
+ * either a square block or a 2:1 unit.
+ * 64 byte blocks of pixels contain four pixel rows of 16 bytes, where the width
+ * in pixel depends on the pixel depth.
+ */
+#define I915_FORMAT_MOD_Yf_TILED fourcc_mod_code(INTEL, 3)
+
#endif /* DRM_FOURCC_H */
diff --git a/include/uapi/drm/drm_mode.h b/include/uapi/drm/drm_mode.h
index ca788e01dab2..dbeba949462a 100644
--- a/include/uapi/drm/drm_mode.h
+++ b/include/uapi/drm/drm_mode.h
@@ -336,6 +336,7 @@ struct drm_mode_fb_cmd {
};
#define DRM_MODE_FB_INTERLACED (1<<0) /* for interlaced framebuffers */
+#define DRM_MODE_FB_MODIFIERS (1<<1) /* enables ->modifer[] */
struct drm_mode_fb_cmd2 {
__u32 fb_id;
@@ -356,10 +357,18 @@ struct drm_mode_fb_cmd2 {
* So it would consist of Y as offsets[0] and UV as
* offsets[1]. Note that offsets[0] will generally
* be 0 (but this is not required).
+ *
+ * To accommodate tiled, compressed, etc formats, a per-plane
+ * modifier can be specified. The default value of zero
+ * indicates "native" format as specified by the fourcc.
+ * Vendor specific modifier token. This allows, for example,
+ * different tiling/swizzling pattern on different planes.
+ * See discussion above of DRM_FORMAT_MOD_xxx.
*/
__u32 handles[4];
__u32 pitches[4]; /* pitch for each plane */
__u32 offsets[4]; /* offset of each plane */
+ __u64 modifier[4]; /* ie, tiling, compressed (per plane) */
};
#define DRM_MODE_FB_DIRTY_ANNOTATE_COPY 0x01
diff --git a/include/uapi/drm/i915_drm.h b/include/uapi/drm/i915_drm.h
index 6eed16b92a24..551b6737f5df 100644
--- a/include/uapi/drm/i915_drm.h
+++ b/include/uapi/drm/i915_drm.h
@@ -270,7 +270,7 @@ typedef struct _drm_i915_sarea {
#define DRM_IOCTL_I915_OVERLAY_PUT_IMAGE DRM_IOW(DRM_COMMAND_BASE + DRM_I915_OVERLAY_PUT_IMAGE, struct drm_intel_overlay_put_image)
#define DRM_IOCTL_I915_OVERLAY_ATTRS DRM_IOWR(DRM_COMMAND_BASE + DRM_I915_OVERLAY_ATTRS, struct drm_intel_overlay_attrs)
#define DRM_IOCTL_I915_SET_SPRITE_COLORKEY DRM_IOWR(DRM_COMMAND_BASE + DRM_I915_SET_SPRITE_COLORKEY, struct drm_intel_sprite_colorkey)
-#define DRM_IOCTL_I915_GET_SPRITE_COLORKEY DRM_IOWR(DRM_COMMAND_BASE + DRM_I915_SET_SPRITE_COLORKEY, struct drm_intel_sprite_colorkey)
+#define DRM_IOCTL_I915_GET_SPRITE_COLORKEY DRM_IOWR(DRM_COMMAND_BASE + DRM_I915_GET_SPRITE_COLORKEY, struct drm_intel_sprite_colorkey)
#define DRM_IOCTL_I915_GEM_WAIT DRM_IOWR(DRM_COMMAND_BASE + DRM_I915_GEM_WAIT, struct drm_i915_gem_wait)
#define DRM_IOCTL_I915_GEM_CONTEXT_CREATE DRM_IOWR (DRM_COMMAND_BASE + DRM_I915_GEM_CONTEXT_CREATE, struct drm_i915_gem_context_create)
#define DRM_IOCTL_I915_GEM_CONTEXT_DESTROY DRM_IOW (DRM_COMMAND_BASE + DRM_I915_GEM_CONTEXT_DESTROY, struct drm_i915_gem_context_destroy)
@@ -347,6 +347,9 @@ typedef struct drm_i915_irq_wait {
#define I915_PARAM_HAS_COHERENT_PHYS_GTT 29
#define I915_PARAM_MMAP_VERSION 30
#define I915_PARAM_HAS_BSD2 31
+#define I915_PARAM_REVISION 32
+#define I915_PARAM_SUBSLICE_TOTAL 33
+#define I915_PARAM_EU_TOTAL 34
typedef struct drm_i915_getparam {
int param;
diff --git a/include/uapi/drm/nouveau_drm.h b/include/uapi/drm/nouveau_drm.h
index 0d7608dc1a34..5507eead5863 100644
--- a/include/uapi/drm/nouveau_drm.h
+++ b/include/uapi/drm/nouveau_drm.h
@@ -39,6 +39,7 @@
#define NOUVEAU_GEM_DOMAIN_VRAM (1 << 1)
#define NOUVEAU_GEM_DOMAIN_GART (1 << 2)
#define NOUVEAU_GEM_DOMAIN_MAPPABLE (1 << 3)
+#define NOUVEAU_GEM_DOMAIN_COHERENT (1 << 4)
#define NOUVEAU_GEM_TILE_COMP 0x00030000 /* nv50-only */
#define NOUVEAU_GEM_TILE_LAYOUT_MASK 0x0000ff00
diff --git a/include/uapi/drm/radeon_drm.h b/include/uapi/drm/radeon_drm.h
index 50d0fb41a3bf..94d44ab2fda1 100644
--- a/include/uapi/drm/radeon_drm.h
+++ b/include/uapi/drm/radeon_drm.h
@@ -1034,6 +1034,11 @@ struct drm_radeon_cs {
#define RADEON_INFO_VRAM_USAGE 0x1e
#define RADEON_INFO_GTT_USAGE 0x1f
#define RADEON_INFO_ACTIVE_CU_COUNT 0x20
+#define RADEON_INFO_CURRENT_GPU_TEMP 0x21
+#define RADEON_INFO_CURRENT_GPU_SCLK 0x22
+#define RADEON_INFO_CURRENT_GPU_MCLK 0x23
+#define RADEON_INFO_READ_REG 0x24
+#define RADEON_INFO_VA_UNMAP_WORKING 0x25
struct drm_radeon_info {
uint32_t request;
diff --git a/include/uapi/drm/tegra_drm.h b/include/uapi/drm/tegra_drm.h
index c15d781ecc0f..5391780c2b05 100644
--- a/include/uapi/drm/tegra_drm.h
+++ b/include/uapi/drm/tegra_drm.h
@@ -36,7 +36,8 @@ struct drm_tegra_gem_create {
struct drm_tegra_gem_mmap {
__u32 handle;
- __u32 offset;
+ __u32 pad;
+ __u64 offset;
};
struct drm_tegra_syncpt_read {
diff --git a/include/uapi/linux/Kbuild b/include/uapi/linux/Kbuild
index 38df23435ebb..1a0006a76b00 100644
--- a/include/uapi/linux/Kbuild
+++ b/include/uapi/linux/Kbuild
@@ -431,6 +431,7 @@ header-y += virtio_blk.h
header-y += virtio_config.h
header-y += virtio_console.h
header-y += virtio_ids.h
+header-y += virtio_input.h
header-y += virtio_net.h
header-y += virtio_pci.h
header-y += virtio_ring.h
@@ -447,5 +448,6 @@ header-y += wireless.h
header-y += x25.h
header-y += xattr.h
header-y += xfrm.h
+header-y += xilinx-v4l2-controls.h
header-y += zorro.h
header-y += zorro_ids.h
diff --git a/include/uapi/linux/am437x-vpfe.h b/include/uapi/linux/am437x-vpfe.h
index 9b03033f9cd6..d75774317b9b 100644
--- a/include/uapi/linux/am437x-vpfe.h
+++ b/include/uapi/linux/am437x-vpfe.h
@@ -21,6 +21,8 @@
#ifndef AM437X_VPFE_USER_H
#define AM437X_VPFE_USER_H
+#include <linux/videodev2.h>
+
enum vpfe_ccdc_data_size {
VPFE_CCDC_DATA_16BITS = 0,
VPFE_CCDC_DATA_15BITS,
diff --git a/include/uapi/linux/bpf.h b/include/uapi/linux/bpf.h
index 45da7ec7d274..29ef6f99e43d 100644
--- a/include/uapi/linux/bpf.h
+++ b/include/uapi/linux/bpf.h
@@ -113,13 +113,19 @@ enum bpf_map_type {
BPF_MAP_TYPE_UNSPEC,
BPF_MAP_TYPE_HASH,
BPF_MAP_TYPE_ARRAY,
+ BPF_MAP_TYPE_PROG_ARRAY,
};
enum bpf_prog_type {
BPF_PROG_TYPE_UNSPEC,
BPF_PROG_TYPE_SOCKET_FILTER,
+ BPF_PROG_TYPE_KPROBE,
+ BPF_PROG_TYPE_SCHED_CLS,
+ BPF_PROG_TYPE_SCHED_ACT,
};
+#define BPF_PSEUDO_MAP_FD 1
+
/* flags for BPF_MAP_UPDATE_ELEM command */
#define BPF_ANY 0 /* create new element or update existing */
#define BPF_NOEXIST 1 /* create new element if it didn't exist */
@@ -151,6 +157,7 @@ union bpf_attr {
__u32 log_level; /* verbosity level of verifier */
__u32 log_size; /* size of user buffer */
__aligned_u64 log_buf; /* user supplied buffer */
+ __u32 kern_version; /* checked when prog_type=kprobe */
};
} __attribute__((aligned(8)));
@@ -162,7 +169,106 @@ enum bpf_func_id {
BPF_FUNC_map_lookup_elem, /* void *map_lookup_elem(&map, &key) */
BPF_FUNC_map_update_elem, /* int map_update_elem(&map, &key, &value, flags) */
BPF_FUNC_map_delete_elem, /* int map_delete_elem(&map, &key) */
+ BPF_FUNC_probe_read, /* int bpf_probe_read(void *dst, int size, void *src) */
+ BPF_FUNC_ktime_get_ns, /* u64 bpf_ktime_get_ns(void) */
+ BPF_FUNC_trace_printk, /* int bpf_trace_printk(const char *fmt, int fmt_size, ...) */
+ BPF_FUNC_get_prandom_u32, /* u32 prandom_u32(void) */
+ BPF_FUNC_get_smp_processor_id, /* u32 raw_smp_processor_id(void) */
+
+ /**
+ * skb_store_bytes(skb, offset, from, len, flags) - store bytes into packet
+ * @skb: pointer to skb
+ * @offset: offset within packet from skb->mac_header
+ * @from: pointer where to copy bytes from
+ * @len: number of bytes to store into packet
+ * @flags: bit 0 - if true, recompute skb->csum
+ * other bits - reserved
+ * Return: 0 on success
+ */
+ BPF_FUNC_skb_store_bytes,
+
+ /**
+ * l3_csum_replace(skb, offset, from, to, flags) - recompute IP checksum
+ * @skb: pointer to skb
+ * @offset: offset within packet where IP checksum is located
+ * @from: old value of header field
+ * @to: new value of header field
+ * @flags: bits 0-3 - size of header field
+ * other bits - reserved
+ * Return: 0 on success
+ */
+ BPF_FUNC_l3_csum_replace,
+
+ /**
+ * l4_csum_replace(skb, offset, from, to, flags) - recompute TCP/UDP checksum
+ * @skb: pointer to skb
+ * @offset: offset within packet where TCP/UDP checksum is located
+ * @from: old value of header field
+ * @to: new value of header field
+ * @flags: bits 0-3 - size of header field
+ * bit 4 - is pseudo header
+ * other bits - reserved
+ * Return: 0 on success
+ */
+ BPF_FUNC_l4_csum_replace,
+
+ /**
+ * bpf_tail_call(ctx, prog_array_map, index) - jump into another BPF program
+ * @ctx: context pointer passed to next program
+ * @prog_array_map: pointer to map which type is BPF_MAP_TYPE_PROG_ARRAY
+ * @index: index inside array that selects specific program to run
+ * Return: 0 on success
+ */
+ BPF_FUNC_tail_call,
+
+ /**
+ * bpf_clone_redirect(skb, ifindex, flags) - redirect to another netdev
+ * @skb: pointer to skb
+ * @ifindex: ifindex of the net device
+ * @flags: bit 0 - if set, redirect to ingress instead of egress
+ * other bits - reserved
+ * Return: 0 on success
+ */
+ BPF_FUNC_clone_redirect,
+
+ /**
+ * u64 bpf_get_current_pid_tgid(void)
+ * Return: current->tgid << 32 | current->pid
+ */
+ BPF_FUNC_get_current_pid_tgid,
+
+ /**
+ * u64 bpf_get_current_uid_gid(void)
+ * Return: current_gid << 32 | current_uid
+ */
+ BPF_FUNC_get_current_uid_gid,
+
+ /**
+ * bpf_get_current_comm(char *buf, int size_of_buf)
+ * stores current->comm into buf
+ * Return: 0 on success
+ */
+ BPF_FUNC_get_current_comm,
__BPF_FUNC_MAX_ID,
};
+/* user accessible mirror of in-kernel sk_buff.
+ * new fields can only be added to the end of this structure
+ */
+struct __sk_buff {
+ __u32 len;
+ __u32 pkt_type;
+ __u32 mark;
+ __u32 queue_mapping;
+ __u32 protocol;
+ __u32 vlan_present;
+ __u32 vlan_tci;
+ __u32 vlan_proto;
+ __u32 priority;
+ __u32 ingress_ifindex;
+ __u32 ifindex;
+ __u32 tc_index;
+ __u32 cb[5];
+};
+
#endif /* _UAPI__LINUX_BPF_H__ */
diff --git a/include/uapi/linux/can.h b/include/uapi/linux/can.h
index 41892f720057..9692cda5f8fc 100644
--- a/include/uapi/linux/can.h
+++ b/include/uapi/linux/can.h
@@ -95,11 +95,17 @@ typedef __u32 can_err_mask_t;
* @can_dlc: frame payload length in byte (0 .. 8) aka data length code
* N.B. the DLC field from ISO 11898-1 Chapter 8.4.2.3 has a 1:1
* mapping of the 'data length code' to the real payload length
+ * @__pad: padding
+ * @__res0: reserved / padding
+ * @__res1: reserved / padding
* @data: CAN frame payload (up to 8 byte)
*/
struct can_frame {
canid_t can_id; /* 32 bit CAN_ID + EFF/RTR/ERR flags */
__u8 can_dlc; /* frame payload length in byte (0 .. CAN_MAX_DLEN) */
+ __u8 __pad; /* padding */
+ __u8 __res0; /* reserved / padding */
+ __u8 __res1; /* reserved / padding */
__u8 data[CAN_MAX_DLEN] __attribute__((aligned(8)));
};
diff --git a/include/uapi/linux/can/gw.h b/include/uapi/linux/can/gw.h
index 3e6184cf2f6d..5079b9d57e31 100644
--- a/include/uapi/linux/can/gw.h
+++ b/include/uapi/linux/can/gw.h
@@ -78,6 +78,7 @@ enum {
CGW_FILTER, /* specify struct can_filter on source CAN device */
CGW_DELETED, /* number of deleted CAN frames (see max_hops param) */
CGW_LIM_HOPS, /* limit the number of hops of this specific rule */
+ CGW_MOD_UID, /* user defined identifier for modification updates */
__CGW_MAX
};
@@ -162,6 +163,10 @@ enum {
* load time of the can-gw module). This value is used to reduce the number of
* possible hops for this gateway rule to a value smaller then max_hops.
*
+ * CGW_MOD_UID (length 4 bytes):
+ * Optional non-zero user defined routing job identifier to alter existing
+ * modification settings at runtime.
+ *
* CGW_CS_XOR (length 4 bytes):
* Set a simple XOR checksum starting with an initial value into
* data[result-idx] using data[start-idx] .. data[end-idx]
diff --git a/include/uapi/linux/can/raw.h b/include/uapi/linux/can/raw.h
index 78ec76fd89a6..8735f1080385 100644
--- a/include/uapi/linux/can/raw.h
+++ b/include/uapi/linux/can/raw.h
@@ -57,6 +57,7 @@ enum {
CAN_RAW_LOOPBACK, /* local loopback (default:on) */
CAN_RAW_RECV_OWN_MSGS, /* receive my own msgs (default:off) */
CAN_RAW_FD_FRAMES, /* allow CAN FD frames (default:off) */
+ CAN_RAW_JOIN_FILTERS, /* all filters must match to trigger */
};
#endif /* !_UAPI_CAN_RAW_H */
diff --git a/include/linux/cryptouser.h b/include/uapi/linux/cryptouser.h
index 4abf2ea6a887..2e67bb64c1da 100644
--- a/include/linux/cryptouser.h
+++ b/include/uapi/linux/cryptouser.h
@@ -25,6 +25,7 @@ enum {
CRYPTO_MSG_DELALG,
CRYPTO_MSG_UPDATEALG,
CRYPTO_MSG_GETALG,
+ CRYPTO_MSG_DELRNG,
__CRYPTO_MSG_MAX
};
#define CRYPTO_MSG_MAX (__CRYPTO_MSG_MAX - 1)
@@ -43,6 +44,7 @@ enum crypto_attr_type_t {
CRYPTOCFGA_REPORT_COMPRESS, /* struct crypto_report_comp */
CRYPTOCFGA_REPORT_RNG, /* struct crypto_report_rng */
CRYPTOCFGA_REPORT_CIPHER, /* struct crypto_report_cipher */
+ CRYPTOCFGA_REPORT_AKCIPHER, /* struct crypto_report_akcipher */
__CRYPTOCFGA_MAX
#define CRYPTOCFGA_MAX (__CRYPTOCFGA_MAX - 1)
@@ -101,5 +103,9 @@ struct crypto_report_rng {
unsigned int seedsize;
};
+struct crypto_report_akcipher {
+ char type[CRYPTO_MAX_NAME];
+};
+
#define CRYPTO_REPORT_MAXSIZE (sizeof(struct crypto_user_alg) + \
sizeof(struct crypto_report_blkcipher))
diff --git a/include/uapi/linux/dcbnl.h b/include/uapi/linux/dcbnl.h
index e711f20dc522..3ea470f35e40 100644
--- a/include/uapi/linux/dcbnl.h
+++ b/include/uapi/linux/dcbnl.h
@@ -78,6 +78,70 @@ struct ieee_maxrate {
__u64 tc_maxrate[IEEE_8021QAZ_MAX_TCS];
};
+enum dcbnl_cndd_states {
+ DCB_CNDD_RESET = 0,
+ DCB_CNDD_EDGE,
+ DCB_CNDD_INTERIOR,
+ DCB_CNDD_INTERIOR_READY,
+};
+
+/* This structure contains the IEEE 802.1Qau QCN managed object.
+ *
+ *@rpg_enable: enable QCN RP
+ *@rppp_max_rps: maximum number of RPs allowed for this CNPV on this port
+ *@rpg_time_reset: time between rate increases if no CNMs received.
+ * given in u-seconds
+ *@rpg_byte_reset: transmitted data between rate increases if no CNMs received.
+ * given in Bytes
+ *@rpg_threshold: The number of times rpByteStage or rpTimeStage can count
+ * before RP rate control state machine advances states
+ *@rpg_max_rate: the maxinun rate, in Mbits per second,
+ * at which an RP can transmit
+ *@rpg_ai_rate: The rate, in Mbits per second,
+ * used to increase rpTargetRate in the RPR_ACTIVE_INCREASE
+ *@rpg_hai_rate: The rate, in Mbits per second,
+ * used to increase rpTargetRate in the RPR_HYPER_INCREASE state
+ *@rpg_gd: Upon CNM receive, flow rate is limited to (Fb/Gd)*CurrentRate.
+ * rpgGd is given as log2(Gd), where Gd may only be powers of 2
+ *@rpg_min_dec_fac: The minimum factor by which the current transmit rate
+ * can be changed by reception of a CNM.
+ * value is given as percentage (1-100)
+ *@rpg_min_rate: The minimum value, in bits per second, for rate to limit
+ *@cndd_state_machine: The state of the congestion notification domain
+ * defense state machine, as defined by IEEE 802.3Qau
+ * section 32.1.1. In the interior ready state,
+ * the QCN capable hardware may add CN-TAG TLV to the
+ * outgoing traffic, to specifically identify outgoing
+ * flows.
+ */
+
+struct ieee_qcn {
+ __u8 rpg_enable[IEEE_8021QAZ_MAX_TCS];
+ __u32 rppp_max_rps[IEEE_8021QAZ_MAX_TCS];
+ __u32 rpg_time_reset[IEEE_8021QAZ_MAX_TCS];
+ __u32 rpg_byte_reset[IEEE_8021QAZ_MAX_TCS];
+ __u32 rpg_threshold[IEEE_8021QAZ_MAX_TCS];
+ __u32 rpg_max_rate[IEEE_8021QAZ_MAX_TCS];
+ __u32 rpg_ai_rate[IEEE_8021QAZ_MAX_TCS];
+ __u32 rpg_hai_rate[IEEE_8021QAZ_MAX_TCS];
+ __u32 rpg_gd[IEEE_8021QAZ_MAX_TCS];
+ __u32 rpg_min_dec_fac[IEEE_8021QAZ_MAX_TCS];
+ __u32 rpg_min_rate[IEEE_8021QAZ_MAX_TCS];
+ __u32 cndd_state_machine[IEEE_8021QAZ_MAX_TCS];
+};
+
+/* This structure contains the IEEE 802.1Qau QCN statistics.
+ *
+ *@rppp_rp_centiseconds: the number of RP-centiseconds accumulated
+ * by RPs at this priority level on this Port
+ *@rppp_created_rps: number of active RPs(flows) that react to CNMs
+ */
+
+struct ieee_qcn_stats {
+ __u64 rppp_rp_centiseconds[IEEE_8021QAZ_MAX_TCS];
+ __u32 rppp_created_rps[IEEE_8021QAZ_MAX_TCS];
+};
+
/* This structure contains the IEEE 802.1Qaz PFC managed object
*
* @pfc_cap: Indicates the number of traffic classes on the local device
@@ -143,8 +207,7 @@ struct cee_pfc {
#define IEEE_8021QAZ_APP_SEL_ANY 4
/* This structure contains the IEEE 802.1Qaz APP managed object. This
- * object is also used for the CEE std as well. There is no difference
- * between the objects.
+ * object is also used for the CEE std as well.
*
* @selector: protocol identifier type
* @protocol: protocol of type indicated
@@ -152,13 +215,18 @@ struct cee_pfc {
* 8-bit 802.1p user priority bitmap for CEE
*
* ----
- * Selector field values
+ * Selector field values for IEEE 802.1Qaz
* 0 Reserved
* 1 Ethertype
* 2 Well known port number over TCP or SCTP
* 3 Well known port number over UDP or DCCP
* 4 Well known port number over TCP, SCTP, UDP, or DCCP
* 5-7 Reserved
+ *
+ * Selector field values for CEE
+ * 0 Ethertype
+ * 1 Well known port number over TCP or UDP
+ * 2-3 Reserved
*/
struct dcb_app {
__u8 selector;
@@ -334,6 +402,8 @@ enum ieee_attrs {
DCB_ATTR_IEEE_PEER_PFC,
DCB_ATTR_IEEE_PEER_APP,
DCB_ATTR_IEEE_MAXRATE,
+ DCB_ATTR_IEEE_QCN,
+ DCB_ATTR_IEEE_QCN_STATS,
__DCB_ATTR_IEEE_MAX
};
#define DCB_ATTR_IEEE_MAX (__DCB_ATTR_IEEE_MAX - 1)
diff --git a/include/uapi/linux/dm-ioctl.h b/include/uapi/linux/dm-ioctl.h
index 889f3a5b7b18..eac8c3641f39 100644
--- a/include/uapi/linux/dm-ioctl.h
+++ b/include/uapi/linux/dm-ioctl.h
@@ -267,9 +267,9 @@ enum {
#define DM_DEV_SET_GEOMETRY _IOWR(DM_IOCTL, DM_DEV_SET_GEOMETRY_CMD, struct dm_ioctl)
#define DM_VERSION_MAJOR 4
-#define DM_VERSION_MINOR 30
+#define DM_VERSION_MINOR 31
#define DM_VERSION_PATCHLEVEL 0
-#define DM_VERSION_EXTRA "-ioctl (2014-12-22)"
+#define DM_VERSION_EXTRA "-ioctl (2015-3-12)"
/* Status bits */
#define DM_READONLY_FLAG (1 << 0) /* In/Out */
diff --git a/include/uapi/linux/elf-em.h b/include/uapi/linux/elf-em.h
index ae99f7743cf4..b08829667ed7 100644
--- a/include/uapi/linux/elf-em.h
+++ b/include/uapi/linux/elf-em.h
@@ -25,6 +25,7 @@
#define EM_ARM 40 /* ARM 32 bit */
#define EM_SH 42 /* SuperH */
#define EM_SPARCV9 43 /* SPARC v9 64-bit */
+#define EM_H8_300 46 /* Renesas H8/300 */
#define EM_IA_64 50 /* HP/Intel IA-64 */
#define EM_X86_64 62 /* AMD x86-64 */
#define EM_S390 22 /* IBM S/390 */
diff --git a/include/uapi/linux/ethtool.h b/include/uapi/linux/ethtool.h
index 2e49fc880d29..cd67aec187d9 100644
--- a/include/uapi/linux/ethtool.h
+++ b/include/uapi/linux/ethtool.h
@@ -215,6 +215,11 @@ enum tunable_id {
ETHTOOL_ID_UNSPEC,
ETHTOOL_RX_COPYBREAK,
ETHTOOL_TX_COPYBREAK,
+ /*
+ * Add your fresh new tubale attribute above and remember to update
+ * tunable_strings[] in net/core/ethtool.c
+ */
+ __ETHTOOL_TUNABLE_COUNT,
};
enum tunable_type_id {
@@ -545,6 +550,7 @@ enum ethtool_stringset {
ETH_SS_NTUPLE_FILTERS,
ETH_SS_FEATURES,
ETH_SS_RSS_HASH_FUNCS,
+ ETH_SS_TUNABLES,
};
/**
@@ -796,6 +802,31 @@ struct ethtool_rx_flow_spec {
__u32 location;
};
+/* How rings are layed out when accessing virtual functions or
+ * offloaded queues is device specific. To allow users to do flow
+ * steering and specify these queues the ring cookie is partitioned
+ * into a 32bit queue index with an 8 bit virtual function id.
+ * This also leaves the 3bytes for further specifiers. It is possible
+ * future devices may support more than 256 virtual functions if
+ * devices start supporting PCIe w/ARI. However at the moment I
+ * do not know of any devices that support this so I do not reserve
+ * space for this at this time. If a future patch consumes the next
+ * byte it should be aware of this possiblity.
+ */
+#define ETHTOOL_RX_FLOW_SPEC_RING 0x00000000FFFFFFFFLL
+#define ETHTOOL_RX_FLOW_SPEC_RING_VF 0x000000FF00000000LL
+#define ETHTOOL_RX_FLOW_SPEC_RING_VF_OFF 32
+static inline __u64 ethtool_get_flow_spec_ring(__u64 ring_cookie)
+{
+ return ETHTOOL_RX_FLOW_SPEC_RING & ring_cookie;
+};
+
+static inline __u64 ethtool_get_flow_spec_ring_vf(__u64 ring_cookie)
+{
+ return (ETHTOOL_RX_FLOW_SPEC_RING_VF & ring_cookie) >>
+ ETHTOOL_RX_FLOW_SPEC_RING_VF_OFF;
+};
+
/**
* struct ethtool_rxnfc - command to get or set RX flow classification rules
* @cmd: Specific command number - %ETHTOOL_GRXFH, %ETHTOOL_SRXFH,
@@ -1264,15 +1295,19 @@ enum ethtool_sfeatures_retval_bits {
* it was forced up into this mode or autonegotiated.
*/
-/* The forced speed, 10Mb, 100Mb, gigabit, [2.5|10|20|40|56]GbE. */
+/* The forced speed, 10Mb, 100Mb, gigabit, [2.5|5|10|20|25|40|50|56|100]GbE. */
#define SPEED_10 10
#define SPEED_100 100
#define SPEED_1000 1000
#define SPEED_2500 2500
+#define SPEED_5000 5000
#define SPEED_10000 10000
#define SPEED_20000 20000
+#define SPEED_25000 25000
#define SPEED_40000 40000
+#define SPEED_50000 50000
#define SPEED_56000 56000
+#define SPEED_100000 100000
#define SPEED_UNKNOWN -1
diff --git a/include/uapi/linux/falloc.h b/include/uapi/linux/falloc.h
index d1197ae3723c..3e445a760f14 100644
--- a/include/uapi/linux/falloc.h
+++ b/include/uapi/linux/falloc.h
@@ -41,4 +41,21 @@
*/
#define FALLOC_FL_ZERO_RANGE 0x10
+/*
+ * FALLOC_FL_INSERT_RANGE is use to insert space within the file size without
+ * overwriting any existing data. The contents of the file beyond offset are
+ * shifted towards right by len bytes to create a hole. As such, this
+ * operation will increase the size of the file by len bytes.
+ *
+ * Different filesystems may implement different limitations on the granularity
+ * of the operation. Most will limit operations to filesystem block size
+ * boundaries, but this boundary may be larger or smaller depending on
+ * the filesystem and/or the configuration of the filesystem or file.
+ *
+ * Attempting to insert space using this flag at OR beyond the end of
+ * the file is considered an illegal operation - just use ftruncate(2) or
+ * fallocate(2) with mode 0 for such type of operations.
+ */
+#define FALLOC_FL_INSERT_RANGE 0x20
+
#endif /* _UAPI_FALLOC_H_ */
diff --git a/include/uapi/linux/filter.h b/include/uapi/linux/filter.h
index 47785d5ecf17..c97340e43dd6 100644
--- a/include/uapi/linux/filter.h
+++ b/include/uapi/linux/filter.h
@@ -77,9 +77,13 @@ struct sock_fprog { /* Required for SO_ATTACH_FILTER. */
#define SKF_AD_VLAN_TAG_PRESENT 48
#define SKF_AD_PAY_OFFSET 52
#define SKF_AD_RANDOM 56
-#define SKF_AD_MAX 60
-#define SKF_NET_OFF (-0x100000)
-#define SKF_LL_OFF (-0x200000)
+#define SKF_AD_VLAN_TPID 60
+#define SKF_AD_MAX 64
+#define SKF_NET_OFF (-0x100000)
+#define SKF_LL_OFF (-0x200000)
+
+#define BPF_NET_OFF SKF_NET_OFF
+#define BPF_LL_OFF SKF_LL_OFF
#endif /* _UAPI__LINUX_FILTER_H__ */
diff --git a/include/uapi/linux/fou.h b/include/uapi/linux/fou.h
index c303588bb767..d2947c52dc67 100644
--- a/include/uapi/linux/fou.h
+++ b/include/uapi/linux/fou.h
@@ -25,6 +25,7 @@ enum {
FOU_CMD_UNSPEC,
FOU_CMD_ADD,
FOU_CMD_DEL,
+ FOU_CMD_GET,
__FOU_CMD_MAX,
};
diff --git a/include/uapi/linux/hsi/cs-protocol.h b/include/uapi/linux/hsi/cs-protocol.h
index 4957bba57cbe..f153d6ea7c62 100644
--- a/include/uapi/linux/hsi/cs-protocol.h
+++ b/include/uapi/linux/hsi/cs-protocol.h
@@ -76,6 +76,15 @@ struct cs_buffer_config {
};
/*
+ * struct for monotonic timestamp taken when the
+ * last control command was received
+ */
+struct cs_timestamp {
+ __u32 tv_sec; /* seconds */
+ __u32 tv_nsec; /* nanoseconds */
+};
+
+/*
* Struct describing the layout and contents of the driver mmap area.
* This information is meant as read-only information for the application.
*/
@@ -91,11 +100,8 @@ struct cs_mmap_config_block {
__u32 rx_ptr;
__u32 rx_ptr_boundary;
__u32 reserved3[2];
- /*
- * if enabled with CS_FEAT_TSTAMP_RX_CTRL, monotonic
- * timestamp taken when the last control command was received
- */
- struct timespec tstamp_rx_ctrl;
+ /* enabled with CS_FEAT_TSTAMP_RX_CTRL */
+ struct cs_timestamp tstamp_rx_ctrl;
};
#define CS_IO_MAGIC 'C'
diff --git a/include/uapi/linux/if_addr.h b/include/uapi/linux/if_addr.h
index dea10a87dfd1..4318ab1635ce 100644
--- a/include/uapi/linux/if_addr.h
+++ b/include/uapi/linux/if_addr.h
@@ -50,6 +50,8 @@ enum {
#define IFA_F_PERMANENT 0x80
#define IFA_F_MANAGETEMPADDR 0x100
#define IFA_F_NOPREFIXROUTE 0x200
+#define IFA_F_MCAUTOJOIN 0x400
+#define IFA_F_STABLE_PRIVACY 0x800
struct ifa_cacheinfo {
__u32 ifa_prefered;
diff --git a/include/uapi/linux/if_link.h b/include/uapi/linux/if_link.h
index dfd0bb22e554..2c7e8e3d3981 100644
--- a/include/uapi/linux/if_link.h
+++ b/include/uapi/linux/if_link.h
@@ -147,6 +147,7 @@ enum {
IFLA_CARRIER_CHANGES,
IFLA_PHYS_SWITCH_ID,
IFLA_LINK_NETNSID,
+ IFLA_PHYS_PORT_NAME,
__IFLA_MAX
};
@@ -215,6 +216,7 @@ enum {
enum in6_addr_gen_mode {
IN6_ADDR_GEN_MODE_EUI64,
IN6_ADDR_GEN_MODE_NONE,
+ IN6_ADDR_GEN_MODE_STABLE_PRIVACY,
};
/* Bridge section */
@@ -224,6 +226,9 @@ enum {
IFLA_BR_FORWARD_DELAY,
IFLA_BR_HELLO_TIME,
IFLA_BR_MAX_AGE,
+ IFLA_BR_AGEING_TIME,
+ IFLA_BR_STP_STATE,
+ IFLA_BR_PRIORITY,
__IFLA_BR_MAX,
};
@@ -247,6 +252,7 @@ enum {
IFLA_BRPORT_UNICAST_FLOOD, /* flood unicast traffic */
IFLA_BRPORT_PROXYARP, /* proxy ARP */
IFLA_BRPORT_LEARNING_SYNC, /* mac learning sync from device */
+ IFLA_BRPORT_PROXYARP_WIFI, /* proxy ARP for Wi-Fi */
__IFLA_BRPORT_MAX
};
#define IFLA_BRPORT_MAX (__IFLA_BRPORT_MAX - 1)
@@ -384,6 +390,17 @@ struct ifla_vxlan_port_range {
__be16 high;
};
+/* GENEVE section */
+enum {
+ IFLA_GENEVE_UNSPEC,
+ IFLA_GENEVE_ID,
+ IFLA_GENEVE_REMOTE,
+ IFLA_GENEVE_TTL,
+ IFLA_GENEVE_TOS,
+ __IFLA_GENEVE_MAX
+};
+#define IFLA_GENEVE_MAX (__IFLA_GENEVE_MAX - 1)
+
/* Bonding section */
enum {
@@ -411,6 +428,9 @@ enum {
IFLA_BOND_AD_LACP_RATE,
IFLA_BOND_AD_SELECT,
IFLA_BOND_AD_INFO,
+ IFLA_BOND_AD_ACTOR_SYS_PRIO,
+ IFLA_BOND_AD_USER_PORT_KEY,
+ IFLA_BOND_AD_ACTOR_SYSTEM,
__IFLA_BOND_MAX,
};
@@ -436,6 +456,8 @@ enum {
IFLA_BOND_SLAVE_PERM_HWADDR,
IFLA_BOND_SLAVE_QUEUE_ID,
IFLA_BOND_SLAVE_AD_AGGREGATOR_ID,
+ IFLA_BOND_SLAVE_AD_ACTOR_OPER_PORT_STATE,
+ IFLA_BOND_SLAVE_AD_PARTNER_OPER_PORT_STATE,
__IFLA_BOND_SLAVE_MAX,
};
@@ -459,6 +481,10 @@ enum {
IFLA_VF_SPOOFCHK, /* Spoof Checking on/off switch */
IFLA_VF_LINK_STATE, /* link state enable/disable/auto switch */
IFLA_VF_RATE, /* Min and Max TX Bandwidth Allocation */
+ IFLA_VF_RSS_QUERY_EN, /* RSS Redirection Table and Hash Key query
+ * on/off switch
+ */
+ IFLA_VF_STATS, /* network device statistics */
__IFLA_VF_MAX,
};
@@ -503,6 +529,23 @@ struct ifla_vf_link_state {
__u32 link_state;
};
+struct ifla_vf_rss_query_en {
+ __u32 vf;
+ __u32 setting;
+};
+
+enum {
+ IFLA_VF_STATS_RX_PACKETS,
+ IFLA_VF_STATS_TX_PACKETS,
+ IFLA_VF_STATS_RX_BYTES,
+ IFLA_VF_STATS_TX_BYTES,
+ IFLA_VF_STATS_BROADCAST,
+ IFLA_VF_STATS_MULTICAST,
+ __IFLA_VF_STATS_MAX,
+};
+
+#define IFLA_VF_STATS_MAX (__IFLA_VF_STATS_MAX - 1)
+
/* VF ports management section
*
* Nested layout of set/get msg is:
diff --git a/include/uapi/linux/if_packet.h b/include/uapi/linux/if_packet.h
index da2d668b8cf1..d3d715f8c88f 100644
--- a/include/uapi/linux/if_packet.h
+++ b/include/uapi/linux/if_packet.h
@@ -54,6 +54,7 @@ struct sockaddr_ll {
#define PACKET_FANOUT 18
#define PACKET_TX_HAS_OFF 19
#define PACKET_QDISC_BYPASS 20
+#define PACKET_ROLLOVER_STATS 21
#define PACKET_FANOUT_HASH 0
#define PACKET_FANOUT_LB 1
@@ -75,6 +76,12 @@ struct tpacket_stats_v3 {
unsigned int tp_freeze_q_cnt;
};
+struct tpacket_rollover_stats {
+ __aligned_u64 tp_all;
+ __aligned_u64 tp_huge;
+ __aligned_u64 tp_failed;
+};
+
union tpacket_stats_u {
struct tpacket_stats stats1;
struct tpacket_stats_v3 stats3;
@@ -99,6 +106,7 @@ struct tpacket_auxdata {
#define TP_STATUS_VLAN_VALID (1 << 4) /* auxdata has valid tp_vlan_tci */
#define TP_STATUS_BLK_TMO (1 << 5)
#define TP_STATUS_VLAN_TPID_VALID (1 << 6) /* auxdata has valid tp_vlan_tpid */
+#define TP_STATUS_CSUM_VALID (1 << 7)
/* Tx ring - header status */
#define TP_STATUS_AVAILABLE 0
diff --git a/include/uapi/linux/in.h b/include/uapi/linux/in.h
index 589ced069e8a..83d6236a2f08 100644
--- a/include/uapi/linux/in.h
+++ b/include/uapi/linux/in.h
@@ -69,6 +69,8 @@ enum {
#define IPPROTO_SCTP IPPROTO_SCTP
IPPROTO_UDPLITE = 136, /* UDP-Lite (RFC 3828) */
#define IPPROTO_UDPLITE IPPROTO_UDPLITE
+ IPPROTO_MPLS = 137, /* MPLS in IP (RFC 4023) */
+#define IPPROTO_MPLS IPPROTO_MPLS
IPPROTO_RAW = 255, /* Raw IP packets */
#define IPPROTO_RAW IPPROTO_RAW
IPPROTO_MAX
@@ -110,6 +112,7 @@ struct in_addr {
#define IP_MINTTL 21
#define IP_NODEFRAG 22
#define IP_CHECKSUM 23
+#define IP_BIND_ADDRESS_NO_PORT 24
/* IP_MTU_DISCOVER values */
#define IP_PMTUDISC_DONT 0 /* Never send DF frames */
diff --git a/include/uapi/linux/inet_diag.h b/include/uapi/linux/inet_diag.h
index d65c0a09efd3..68a1f71fde9f 100644
--- a/include/uapi/linux/inet_diag.h
+++ b/include/uapi/linux/inet_diag.h
@@ -111,9 +111,11 @@ enum {
INET_DIAG_SKMEMINFO,
INET_DIAG_SHUTDOWN,
INET_DIAG_DCTCPINFO,
+ INET_DIAG_PROTOCOL, /* response attribute only */
+ INET_DIAG_SKV6ONLY,
};
-#define INET_DIAG_MAX INET_DIAG_DCTCPINFO
+#define INET_DIAG_MAX INET_DIAG_SKV6ONLY
/* INET_DIAG_MEM */
@@ -143,4 +145,8 @@ struct tcp_dctcp_info {
__u32 dctcp_ab_tot;
};
+union tcp_cc_info {
+ struct tcpvegas_info vegas;
+ struct tcp_dctcp_info dctcp;
+};
#endif /* _UAPI_INET_DIAG_H_ */
diff --git a/include/uapi/linux/input.h b/include/uapi/linux/input.h
index 2f62ab2d7bf9..731417c025f6 100644
--- a/include/uapi/linux/input.h
+++ b/include/uapi/linux/input.h
@@ -369,7 +369,8 @@ struct input_keymap_entry {
#define KEY_MSDOS 151
#define KEY_COFFEE 152 /* AL Terminal Lock/Screensaver */
#define KEY_SCREENLOCK KEY_COFFEE
-#define KEY_DIRECTION 153
+#define KEY_ROTATE_DISPLAY 153 /* Display orientation for e.g. tablets */
+#define KEY_DIRECTION KEY_ROTATE_DISPLAY
#define KEY_CYCLEWINDOWS 154
#define KEY_MAIL 155
#define KEY_BOOKMARKS 156 /* AC Bookmarks */
@@ -702,6 +703,10 @@ struct input_keymap_entry {
#define KEY_NUMERIC_9 0x209
#define KEY_NUMERIC_STAR 0x20a
#define KEY_NUMERIC_POUND 0x20b
+#define KEY_NUMERIC_A 0x20c /* Phone key A - HUT Telephony 0xb9 */
+#define KEY_NUMERIC_B 0x20d
+#define KEY_NUMERIC_C 0x20e
+#define KEY_NUMERIC_D 0x20f
#define KEY_CAMERA_FOCUS 0x210
#define KEY_WPS_BUTTON 0x211 /* WiFi Protected Setup key */
diff --git a/include/uapi/linux/ip.h b/include/uapi/linux/ip.h
index 411959405ab6..08f894d2ddbd 100644
--- a/include/uapi/linux/ip.h
+++ b/include/uapi/linux/ip.h
@@ -164,6 +164,7 @@ enum
IPV4_DEVCONF_ROUTE_LOCALNET,
IPV4_DEVCONF_IGMPV2_UNSOLICITED_REPORT_INTERVAL,
IPV4_DEVCONF_IGMPV3_UNSOLICITED_REPORT_INTERVAL,
+ IPV4_DEVCONF_IGNORE_ROUTES_WITH_LINKDOWN,
__IPV4_DEVCONF_MAX
};
diff --git a/include/uapi/linux/ip_vs.h b/include/uapi/linux/ip_vs.h
index cabe95d5b461..3199243f2028 100644
--- a/include/uapi/linux/ip_vs.h
+++ b/include/uapi/linux/ip_vs.h
@@ -358,6 +358,8 @@ enum {
IPVS_SVC_ATTR_PE_NAME, /* name of ct retriever */
+ IPVS_SVC_ATTR_STATS64, /* nested attribute for service stats */
+
__IPVS_SVC_ATTR_MAX,
};
@@ -387,6 +389,8 @@ enum {
IPVS_DEST_ATTR_ADDR_FAMILY, /* Address family of address */
+ IPVS_DEST_ATTR_STATS64, /* nested attribute for dest stats */
+
__IPVS_DEST_ATTR_MAX,
};
@@ -410,7 +414,8 @@ enum {
/*
* Attributes used to describe service or destination entry statistics
*
- * Used inside nested attributes IPVS_SVC_ATTR_STATS and IPVS_DEST_ATTR_STATS
+ * Used inside nested attributes IPVS_SVC_ATTR_STATS, IPVS_DEST_ATTR_STATS,
+ * IPVS_SVC_ATTR_STATS64 and IPVS_DEST_ATTR_STATS64.
*/
enum {
IPVS_STATS_ATTR_UNSPEC = 0,
diff --git a/include/uapi/linux/ipv6.h b/include/uapi/linux/ipv6.h
index 437a6a4b125a..5efa54ae567c 100644
--- a/include/uapi/linux/ipv6.h
+++ b/include/uapi/linux/ipv6.h
@@ -170,6 +170,7 @@ enum {
DEVCONF_ACCEPT_RA_FROM_LOCAL,
DEVCONF_USE_OPTIMISTIC,
DEVCONF_ACCEPT_RA_MTU,
+ DEVCONF_STABLE_SECRET,
DEVCONF_MAX
};
diff --git a/include/uapi/linux/ipv6_route.h b/include/uapi/linux/ipv6_route.h
index 2be7bd174751..f6598d1c886e 100644
--- a/include/uapi/linux/ipv6_route.h
+++ b/include/uapi/linux/ipv6_route.h
@@ -34,6 +34,7 @@
#define RTF_PREF(pref) ((pref) << 27)
#define RTF_PREF_MASK 0x18000000
+#define RTF_PCPU 0x40000000
#define RTF_LOCAL 0x80000000
diff --git a/include/uapi/linux/kvm.h b/include/uapi/linux/kvm.h
index f574d7be7631..716ad4ae4d4b 100644
--- a/include/uapi/linux/kvm.h
+++ b/include/uapi/linux/kvm.h
@@ -202,7 +202,7 @@ struct kvm_run {
__u32 exit_reason;
__u8 ready_for_interrupt_injection;
__u8 if_flag;
- __u8 padding2[2];
+ __u16 flags;
/* in (pre_kvm_run), out (post_kvm_run) */
__u64 cr8;
@@ -813,6 +813,10 @@ struct kvm_ppc_smmu_info {
#define KVM_CAP_MIPS_MSA 112
#define KVM_CAP_S390_INJECT_IRQ 113
#define KVM_CAP_S390_IRQ_STATE 114
+#define KVM_CAP_PPC_HWRNG 115
+#define KVM_CAP_DISABLE_QUIRKS 116
+#define KVM_CAP_X86_SMM 117
+#define KVM_CAP_MULTI_ADDRESS_SPACE 118
#ifdef KVM_CAP_IRQ_ROUTING
@@ -893,7 +897,7 @@ struct kvm_xen_hvm_config {
*
* KVM_IRQFD_FLAG_RESAMPLE indicates resamplefd is valid and specifies
* the irqfd to operate in resampling mode for level triggered interrupt
- * emlation. See Documentation/virtual/kvm/api.txt.
+ * emulation. See Documentation/virtual/kvm/api.txt.
*/
#define KVM_IRQFD_FLAG_RESAMPLE (1 << 1)
@@ -1198,6 +1202,8 @@ struct kvm_s390_ucas_mapping {
/* Available with KVM_CAP_S390_IRQ_STATE */
#define KVM_S390_SET_IRQ_STATE _IOW(KVMIO, 0xb5, struct kvm_s390_irq_state)
#define KVM_S390_GET_IRQ_STATE _IOW(KVMIO, 0xb6, struct kvm_s390_irq_state)
+/* Available with KVM_CAP_X86_SMM */
+#define KVM_SMI _IO(KVMIO, 0xb7)
#define KVM_DEV_ASSIGN_ENABLE_IOMMU (1 << 0)
#define KVM_DEV_ASSIGN_PCI_2_3 (1 << 1)
diff --git a/include/uapi/linux/magic.h b/include/uapi/linux/magic.h
index 7d664ea85ebd..7b1425a6b370 100644
--- a/include/uapi/linux/magic.h
+++ b/include/uapi/linux/magic.h
@@ -58,6 +58,8 @@
#define STACK_END_MAGIC 0x57AC6E9D
+#define TRACEFS_MAGIC 0x74726163
+
#define V9FS_MAGIC 0x01021997
#define BDEVFS_MAGIC 0x62646576
diff --git a/include/uapi/linux/media-bus-format.h b/include/uapi/linux/media-bus-format.h
index 23b40908be30..190d491d5b13 100644
--- a/include/uapi/linux/media-bus-format.h
+++ b/include/uapi/linux/media-bus-format.h
@@ -33,22 +33,32 @@
#define MEDIA_BUS_FMT_FIXED 0x0001
-/* RGB - next is 0x100e */
+/* RGB - next is 0x1018 */
+#define MEDIA_BUS_FMT_RGB444_1X12 0x1016
#define MEDIA_BUS_FMT_RGB444_2X8_PADHI_BE 0x1001
#define MEDIA_BUS_FMT_RGB444_2X8_PADHI_LE 0x1002
#define MEDIA_BUS_FMT_RGB555_2X8_PADHI_BE 0x1003
#define MEDIA_BUS_FMT_RGB555_2X8_PADHI_LE 0x1004
+#define MEDIA_BUS_FMT_RGB565_1X16 0x1017
#define MEDIA_BUS_FMT_BGR565_2X8_BE 0x1005
#define MEDIA_BUS_FMT_BGR565_2X8_LE 0x1006
#define MEDIA_BUS_FMT_RGB565_2X8_BE 0x1007
#define MEDIA_BUS_FMT_RGB565_2X8_LE 0x1008
#define MEDIA_BUS_FMT_RGB666_1X18 0x1009
+#define MEDIA_BUS_FMT_RBG888_1X24 0x100e
+#define MEDIA_BUS_FMT_RGB666_1X24_CPADHI 0x1015
+#define MEDIA_BUS_FMT_RGB666_1X7X3_SPWG 0x1010
+#define MEDIA_BUS_FMT_BGR888_1X24 0x1013
+#define MEDIA_BUS_FMT_GBR888_1X24 0x1014
#define MEDIA_BUS_FMT_RGB888_1X24 0x100a
#define MEDIA_BUS_FMT_RGB888_2X12_BE 0x100b
#define MEDIA_BUS_FMT_RGB888_2X12_LE 0x100c
+#define MEDIA_BUS_FMT_RGB888_1X7X4_SPWG 0x1011
+#define MEDIA_BUS_FMT_RGB888_1X7X4_JEIDA 0x1012
#define MEDIA_BUS_FMT_ARGB8888_1X32 0x100d
+#define MEDIA_BUS_FMT_RGB888_1X32_PADHI 0x100f
-/* YUV (including grey) - next is 0x2024 */
+/* YUV (including grey) - next is 0x2026 */
#define MEDIA_BUS_FMT_Y8_1X8 0x2001
#define MEDIA_BUS_FMT_UV8_1X8 0x2015
#define MEDIA_BUS_FMT_UYVY8_1_5X8 0x2002
@@ -65,6 +75,10 @@
#define MEDIA_BUS_FMT_YUYV10_2X10 0x200b
#define MEDIA_BUS_FMT_YVYU10_2X10 0x200c
#define MEDIA_BUS_FMT_Y12_1X12 0x2013
+#define MEDIA_BUS_FMT_UYVY12_2X12 0x201c
+#define MEDIA_BUS_FMT_VYUY12_2X12 0x201d
+#define MEDIA_BUS_FMT_YUYV12_2X12 0x201e
+#define MEDIA_BUS_FMT_YVYU12_2X12 0x201f
#define MEDIA_BUS_FMT_UYVY8_1X16 0x200f
#define MEDIA_BUS_FMT_VYUY8_1X16 0x2010
#define MEDIA_BUS_FMT_YUYV8_1X16 0x2011
@@ -74,16 +88,14 @@
#define MEDIA_BUS_FMT_VYUY10_1X20 0x201b
#define MEDIA_BUS_FMT_YUYV10_1X20 0x200d
#define MEDIA_BUS_FMT_YVYU10_1X20 0x200e
-#define MEDIA_BUS_FMT_YUV10_1X30 0x2016
-#define MEDIA_BUS_FMT_AYUV8_1X32 0x2017
-#define MEDIA_BUS_FMT_UYVY12_2X12 0x201c
-#define MEDIA_BUS_FMT_VYUY12_2X12 0x201d
-#define MEDIA_BUS_FMT_YUYV12_2X12 0x201e
-#define MEDIA_BUS_FMT_YVYU12_2X12 0x201f
+#define MEDIA_BUS_FMT_VUY8_1X24 0x2024
+#define MEDIA_BUS_FMT_YUV8_1X24 0x2025
#define MEDIA_BUS_FMT_UYVY12_1X24 0x2020
#define MEDIA_BUS_FMT_VYUY12_1X24 0x2021
#define MEDIA_BUS_FMT_YUYV12_1X24 0x2022
#define MEDIA_BUS_FMT_YVYU12_1X24 0x2023
+#define MEDIA_BUS_FMT_YUV10_1X30 0x2016
+#define MEDIA_BUS_FMT_AYUV8_1X32 0x2017
/* Bayer - next is 0x3019 */
#define MEDIA_BUS_FMT_SBGGR8_1X8 0x3001
diff --git a/include/uapi/linux/media.h b/include/uapi/linux/media.h
index d847c760e8f0..4e816be3de39 100644
--- a/include/uapi/linux/media.h
+++ b/include/uapi/linux/media.h
@@ -50,7 +50,14 @@ struct media_device_info {
#define MEDIA_ENT_T_DEVNODE_V4L (MEDIA_ENT_T_DEVNODE + 1)
#define MEDIA_ENT_T_DEVNODE_FB (MEDIA_ENT_T_DEVNODE + 2)
#define MEDIA_ENT_T_DEVNODE_ALSA (MEDIA_ENT_T_DEVNODE + 3)
-#define MEDIA_ENT_T_DEVNODE_DVB (MEDIA_ENT_T_DEVNODE + 4)
+#define MEDIA_ENT_T_DEVNODE_DVB_FE (MEDIA_ENT_T_DEVNODE + 4)
+#define MEDIA_ENT_T_DEVNODE_DVB_DEMUX (MEDIA_ENT_T_DEVNODE + 5)
+#define MEDIA_ENT_T_DEVNODE_DVB_DVR (MEDIA_ENT_T_DEVNODE + 6)
+#define MEDIA_ENT_T_DEVNODE_DVB_CA (MEDIA_ENT_T_DEVNODE + 7)
+#define MEDIA_ENT_T_DEVNODE_DVB_NET (MEDIA_ENT_T_DEVNODE + 8)
+
+/* Legacy symbol. Use it to avoid userspace compilation breakages */
+#define MEDIA_ENT_T_DEVNODE_DVB MEDIA_ENT_T_DEVNODE_DVB_FE
#define MEDIA_ENT_T_V4L2_SUBDEV (2 << MEDIA_ENT_TYPE_SHIFT)
#define MEDIA_ENT_T_V4L2_SUBDEV_SENSOR (MEDIA_ENT_T_V4L2_SUBDEV + 1)
@@ -59,6 +66,8 @@ struct media_device_info {
/* A converter of analogue video to its digital representation. */
#define MEDIA_ENT_T_V4L2_SUBDEV_DECODER (MEDIA_ENT_T_V4L2_SUBDEV + 4)
+#define MEDIA_ENT_T_V4L2_SUBDEV_TUNER (MEDIA_ENT_T_V4L2_SUBDEV + 5)
+
#define MEDIA_ENT_FL_DEFAULT (1 << 0)
struct media_entity_desc {
@@ -78,17 +87,48 @@ struct media_entity_desc {
struct {
__u32 major;
__u32 minor;
- } v4l;
- struct {
- __u32 major;
- __u32 minor;
- } fb;
+ } dev;
+
+#if 1
+ /*
+ * TODO: this shouldn't have been added without
+ * actual drivers that use this. When the first real driver
+ * appears that sets this information, special attention
+ * should be given whether this information is 1) enough, and
+ * 2) can deal with udev rules that rename devices. The struct
+ * dev would not be sufficient for this since that does not
+ * contain the subdevice information. In addition, struct dev
+ * can only refer to a single device, and not to multiple (e.g.
+ * pcm and mixer devices).
+ *
+ * So for now mark this as a to do.
+ */
struct {
__u32 card;
__u32 device;
__u32 subdevice;
} alsa;
+#endif
+
+#if 1
+ /*
+ * DEPRECATED: previous node specifications. Kept just to
+ * avoid breaking compilation, but media_entity_desc.dev
+ * should be used instead. In particular, alsa and dvb
+ * fields below are wrong: for all devnodes, there should
+ * be just major/minor inside the struct, as this is enough
+ * to represent any devnode, no matter what type.
+ */
+ struct {
+ __u32 major;
+ __u32 minor;
+ } v4l;
+ struct {
+ __u32 major;
+ __u32 minor;
+ } fb;
int dvb;
+#endif
/* Sub-device specifications */
/* Nothing needed yet */
diff --git a/include/uapi/linux/mpls.h b/include/uapi/linux/mpls.h
index bc9abfe88c9a..139d4dd1cab8 100644
--- a/include/uapi/linux/mpls.h
+++ b/include/uapi/linux/mpls.h
@@ -31,4 +31,14 @@ struct mpls_label {
#define MPLS_LS_TTL_MASK 0x000000FF
#define MPLS_LS_TTL_SHIFT 0
+/* Reserved labels */
+#define MPLS_LABEL_IPV4NULL 0 /* RFC3032 */
+#define MPLS_LABEL_RTALERT 1 /* RFC3032 */
+#define MPLS_LABEL_IPV6NULL 2 /* RFC3032 */
+#define MPLS_LABEL_IMPLNULL 3 /* RFC3032 */
+#define MPLS_LABEL_ENTROPY 7 /* RFC6790 */
+#define MPLS_LABEL_GAL 13 /* RFC5586 */
+#define MPLS_LABEL_OAMALERT 14 /* RFC3429 */
+#define MPLS_LABEL_EXTENSION 15 /* RFC7274 */
+
#endif /* _UAPI_MPLS_H */
diff --git a/include/uapi/linux/nbd.h b/include/uapi/linux/nbd.h
index 4f52549b23ff..e08e413d5f71 100644
--- a/include/uapi/linux/nbd.h
+++ b/include/uapi/linux/nbd.h
@@ -44,8 +44,6 @@ enum {
/* there is a gap here to match userspace */
#define NBD_FLAG_SEND_TRIM (1 << 5) /* send trim/discard */
-#define nbd_cmd(req) ((req)->cmd[0])
-
/* userspace doesn't need the nbd_device structure */
/* These are sent over the network in the request/reply magic fields */
diff --git a/include/uapi/linux/neighbour.h b/include/uapi/linux/neighbour.h
index 3873a35509aa..2e35c61bbdd1 100644
--- a/include/uapi/linux/neighbour.h
+++ b/include/uapi/linux/neighbour.h
@@ -126,6 +126,7 @@ enum {
NDTPA_PROXY_QLEN, /* u32 */
NDTPA_LOCKTIME, /* u64, msecs */
NDTPA_QUEUE_LENBYTES, /* u32 */
+ NDTPA_MCAST_REPROBES, /* u32 */
__NDTPA_MAX
};
#define NDTPA_MAX (__NDTPA_MAX - 1)
diff --git a/include/uapi/linux/netfilter.h b/include/uapi/linux/netfilter.h
index ef1b1f88ca18..d93f949d1d9a 100644
--- a/include/uapi/linux/netfilter.h
+++ b/include/uapi/linux/netfilter.h
@@ -4,7 +4,8 @@
#include <linux/types.h>
#include <linux/compiler.h>
#include <linux/sysctl.h>
-
+#include <linux/in.h>
+#include <linux/in6.h>
/* Responses from hook functions. */
#define NF_DROP 0
@@ -51,11 +52,17 @@ enum nf_inet_hooks {
NF_INET_NUMHOOKS
};
+enum nf_dev_hooks {
+ NF_NETDEV_INGRESS,
+ NF_NETDEV_NUMHOOKS
+};
+
enum {
NFPROTO_UNSPEC = 0,
NFPROTO_INET = 1,
NFPROTO_IPV4 = 2,
NFPROTO_ARP = 3,
+ NFPROTO_NETDEV = 5,
NFPROTO_BRIDGE = 7,
NFPROTO_IPV6 = 10,
NFPROTO_DECNET = 12,
diff --git a/include/uapi/linux/netfilter/ipset/ip_set.h b/include/uapi/linux/netfilter/ipset/ip_set.h
index 5ab4e60894cf..63b2e34f1b60 100644
--- a/include/uapi/linux/netfilter/ipset/ip_set.h
+++ b/include/uapi/linux/netfilter/ipset/ip_set.h
@@ -15,12 +15,12 @@
/* The protocol version */
#define IPSET_PROTOCOL 6
-/* The maximum permissible comment length we will accept over netlink */
-#define IPSET_MAX_COMMENT_SIZE 255
-
/* The max length of strings including NUL: set and type identifiers */
#define IPSET_MAXNAMELEN 32
+/* The maximum permissible comment length we will accept over netlink */
+#define IPSET_MAX_COMMENT_SIZE 255
+
/* Message types and commands */
enum ipset_cmd {
IPSET_CMD_NONE,
diff --git a/include/uapi/linux/netfilter/nf_conntrack_tcp.h b/include/uapi/linux/netfilter/nf_conntrack_tcp.h
index 9993a421201c..ef9f80f0f529 100644
--- a/include/uapi/linux/netfilter/nf_conntrack_tcp.h
+++ b/include/uapi/linux/netfilter/nf_conntrack_tcp.h
@@ -42,6 +42,9 @@ enum tcp_conntrack {
/* The field td_maxack has been set */
#define IP_CT_TCP_FLAG_MAXACK_SET 0x20
+/* Marks possibility for expected RFC5961 challenge ACK */
+#define IP_CT_EXP_CHALLENGE_ACK 0x40
+
struct nf_ct_tcp_flags {
__u8 flags;
__u8 mask;
diff --git a/include/uapi/linux/netfilter/nf_tables.h b/include/uapi/linux/netfilter/nf_tables.h
index 832bc46db78b..a99e6a997140 100644
--- a/include/uapi/linux/netfilter/nf_tables.h
+++ b/include/uapi/linux/netfilter/nf_tables.h
@@ -1,19 +1,49 @@
#ifndef _LINUX_NF_TABLES_H
#define _LINUX_NF_TABLES_H
+#define NFT_TABLE_MAXNAMELEN 32
#define NFT_CHAIN_MAXNAMELEN 32
#define NFT_USERDATA_MAXLEN 256
+/**
+ * enum nft_registers - nf_tables registers
+ *
+ * nf_tables used to have five registers: a verdict register and four data
+ * registers of size 16. The data registers have been changed to 16 registers
+ * of size 4. For compatibility reasons, the NFT_REG_[1-4] registers still
+ * map to areas of size 16, the 4 byte registers are addressed using
+ * NFT_REG32_00 - NFT_REG32_15.
+ */
enum nft_registers {
NFT_REG_VERDICT,
NFT_REG_1,
NFT_REG_2,
NFT_REG_3,
NFT_REG_4,
- __NFT_REG_MAX
+ __NFT_REG_MAX,
+
+ NFT_REG32_00 = 8,
+ MFT_REG32_01,
+ NFT_REG32_02,
+ NFT_REG32_03,
+ NFT_REG32_04,
+ NFT_REG32_05,
+ NFT_REG32_06,
+ NFT_REG32_07,
+ NFT_REG32_08,
+ NFT_REG32_09,
+ NFT_REG32_10,
+ NFT_REG32_11,
+ NFT_REG32_12,
+ NFT_REG32_13,
+ NFT_REG32_14,
+ NFT_REG32_15,
};
#define NFT_REG_MAX (__NFT_REG_MAX - 1)
+#define NFT_REG_SIZE 16
+#define NFT_REG32_SIZE 4
+
/**
* enum nft_verdicts - nf_tables internal verdicts
*
@@ -92,11 +122,13 @@ enum nft_list_attributes {
*
* @NFTA_HOOK_HOOKNUM: netfilter hook number (NLA_U32)
* @NFTA_HOOK_PRIORITY: netfilter hook priority (NLA_U32)
+ * @NFTA_HOOK_DEV: netdevice name (NLA_STRING)
*/
enum nft_hook_attributes {
NFTA_HOOK_UNSPEC,
NFTA_HOOK_HOOKNUM,
NFTA_HOOK_PRIORITY,
+ NFTA_HOOK_DEV,
__NFTA_HOOK_MAX
};
#define NFTA_HOOK_MAX (__NFTA_HOOK_MAX - 1)
@@ -207,12 +239,16 @@ enum nft_rule_compat_attributes {
* @NFT_SET_CONSTANT: set contents may not change while bound
* @NFT_SET_INTERVAL: set contains intervals
* @NFT_SET_MAP: set is used as a dictionary
+ * @NFT_SET_TIMEOUT: set uses timeouts
+ * @NFT_SET_EVAL: set contains expressions for evaluation
*/
enum nft_set_flags {
NFT_SET_ANONYMOUS = 0x1,
NFT_SET_CONSTANT = 0x2,
NFT_SET_INTERVAL = 0x4,
NFT_SET_MAP = 0x8,
+ NFT_SET_TIMEOUT = 0x10,
+ NFT_SET_EVAL = 0x20,
};
/**
@@ -251,6 +287,8 @@ enum nft_set_desc_attributes {
* @NFTA_SET_POLICY: selection policy (NLA_U32)
* @NFTA_SET_DESC: set description (NLA_NESTED)
* @NFTA_SET_ID: uniquely identifies a set in a transaction (NLA_U32)
+ * @NFTA_SET_TIMEOUT: default timeout value (NLA_U64)
+ * @NFTA_SET_GC_INTERVAL: garbage collection interval (NLA_U32)
*/
enum nft_set_attributes {
NFTA_SET_UNSPEC,
@@ -264,6 +302,8 @@ enum nft_set_attributes {
NFTA_SET_POLICY,
NFTA_SET_DESC,
NFTA_SET_ID,
+ NFTA_SET_TIMEOUT,
+ NFTA_SET_GC_INTERVAL,
__NFTA_SET_MAX
};
#define NFTA_SET_MAX (__NFTA_SET_MAX - 1)
@@ -283,12 +323,20 @@ enum nft_set_elem_flags {
* @NFTA_SET_ELEM_KEY: key value (NLA_NESTED: nft_data)
* @NFTA_SET_ELEM_DATA: data value of mapping (NLA_NESTED: nft_data_attributes)
* @NFTA_SET_ELEM_FLAGS: bitmask of nft_set_elem_flags (NLA_U32)
+ * @NFTA_SET_ELEM_TIMEOUT: timeout value (NLA_U64)
+ * @NFTA_SET_ELEM_EXPIRATION: expiration time (NLA_U64)
+ * @NFTA_SET_ELEM_USERDATA: user data (NLA_BINARY)
+ * @NFTA_SET_ELEM_EXPR: expression (NLA_NESTED: nft_expr_attributes)
*/
enum nft_set_elem_attributes {
NFTA_SET_ELEM_UNSPEC,
NFTA_SET_ELEM_KEY,
NFTA_SET_ELEM_DATA,
NFTA_SET_ELEM_FLAGS,
+ NFTA_SET_ELEM_TIMEOUT,
+ NFTA_SET_ELEM_EXPIRATION,
+ NFTA_SET_ELEM_USERDATA,
+ NFTA_SET_ELEM_EXPR,
__NFTA_SET_ELEM_MAX
};
#define NFTA_SET_ELEM_MAX (__NFTA_SET_ELEM_MAX - 1)
@@ -346,6 +394,9 @@ enum nft_data_attributes {
};
#define NFTA_DATA_MAX (__NFTA_DATA_MAX - 1)
+/* Maximum length of a value */
+#define NFT_DATA_VALUE_MAXLEN 64
+
/**
* enum nft_verdict_attributes - nf_tables verdict netlink attributes
*
@@ -504,6 +555,35 @@ enum nft_lookup_attributes {
};
#define NFTA_LOOKUP_MAX (__NFTA_LOOKUP_MAX - 1)
+enum nft_dynset_ops {
+ NFT_DYNSET_OP_ADD,
+ NFT_DYNSET_OP_UPDATE,
+};
+
+/**
+ * enum nft_dynset_attributes - dynset expression attributes
+ *
+ * @NFTA_DYNSET_SET_NAME: name of set the to add data to (NLA_STRING)
+ * @NFTA_DYNSET_SET_ID: uniquely identifier of the set in the transaction (NLA_U32)
+ * @NFTA_DYNSET_OP: operation (NLA_U32)
+ * @NFTA_DYNSET_SREG_KEY: source register of the key (NLA_U32)
+ * @NFTA_DYNSET_SREG_DATA: source register of the data (NLA_U32)
+ * @NFTA_DYNSET_TIMEOUT: timeout value for the new element (NLA_U64)
+ * @NFTA_DYNSET_EXPR: expression (NLA_NESTED: nft_expr_attributes)
+ */
+enum nft_dynset_attributes {
+ NFTA_DYNSET_UNSPEC,
+ NFTA_DYNSET_SET_NAME,
+ NFTA_DYNSET_SET_ID,
+ NFTA_DYNSET_OP,
+ NFTA_DYNSET_SREG_KEY,
+ NFTA_DYNSET_SREG_DATA,
+ NFTA_DYNSET_TIMEOUT,
+ NFTA_DYNSET_EXPR,
+ __NFTA_DYNSET_MAX,
+};
+#define NFTA_DYNSET_MAX (__NFTA_DYNSET_MAX - 1)
+
/**
* enum nft_payload_bases - nf_tables payload expression offset bases
*
diff --git a/include/uapi/linux/netfilter/nfnetlink_queue.h b/include/uapi/linux/netfilter/nfnetlink_queue.h
index 8dd819e2b5fe..b67a853638ff 100644
--- a/include/uapi/linux/netfilter/nfnetlink_queue.h
+++ b/include/uapi/linux/netfilter/nfnetlink_queue.h
@@ -49,6 +49,7 @@ enum nfqnl_attr_type {
NFQA_EXP, /* nf_conntrack_netlink.h */
NFQA_UID, /* __u32 sk uid */
NFQA_GID, /* __u32 sk gid */
+ NFQA_SECCTX, /* security context string */
__NFQA_MAX
};
@@ -102,7 +103,8 @@ enum nfqnl_attr_config {
#define NFQA_CFG_F_CONNTRACK (1 << 1)
#define NFQA_CFG_F_GSO (1 << 2)
#define NFQA_CFG_F_UID_GID (1 << 3)
-#define NFQA_CFG_F_MAX (1 << 4)
+#define NFQA_CFG_F_SECCTX (1 << 4)
+#define NFQA_CFG_F_MAX (1 << 5)
/* flags for NFQA_SKB_INFO */
/* packet appears to have wrong checksums, but they are ok */
diff --git a/include/uapi/linux/netfilter/xt_socket.h b/include/uapi/linux/netfilter/xt_socket.h
index 6315e2ac3474..87644f832494 100644
--- a/include/uapi/linux/netfilter/xt_socket.h
+++ b/include/uapi/linux/netfilter/xt_socket.h
@@ -6,6 +6,7 @@
enum {
XT_SOCKET_TRANSPARENT = 1 << 0,
XT_SOCKET_NOWILDCARD = 1 << 1,
+ XT_SOCKET_RESTORESKMARK = 1 << 2,
};
struct xt_socket_mtinfo1 {
@@ -18,4 +19,11 @@ struct xt_socket_mtinfo2 {
};
#define XT_SOCKET_FLAGS_V2 (XT_SOCKET_TRANSPARENT | XT_SOCKET_NOWILDCARD)
+struct xt_socket_mtinfo3 {
+ __u8 flags;
+};
+#define XT_SOCKET_FLAGS_V3 (XT_SOCKET_TRANSPARENT \
+ | XT_SOCKET_NOWILDCARD \
+ | XT_SOCKET_RESTORESKMARK)
+
#endif /* _XT_SOCKET_H */
diff --git a/include/uapi/linux/netfilter_bridge/ebtables.h b/include/uapi/linux/netfilter_bridge/ebtables.h
index ba993360dbe9..fd2ee501726d 100644
--- a/include/uapi/linux/netfilter_bridge/ebtables.h
+++ b/include/uapi/linux/netfilter_bridge/ebtables.h
@@ -6,15 +6,13 @@
*
* ebtables.c,v 2.0, April, 2002
*
- * This code is stongly inspired on the iptables code which is
+ * This code is strongly inspired by the iptables code which is
* Copyright (C) 1999 Paul `Rusty' Russell & Michael J. Neuling
*/
#ifndef _UAPI__LINUX_BRIDGE_EFF_H
#define _UAPI__LINUX_BRIDGE_EFF_H
-#include <linux/if.h>
#include <linux/netfilter_bridge.h>
-#include <linux/if_ether.h>
#define EBT_TABLE_MAXNAMELEN 32
#define EBT_CHAIN_MAXNAMELEN EBT_TABLE_MAXNAMELEN
diff --git a/include/uapi/linux/netlink.h b/include/uapi/linux/netlink.h
index 1a85940f8ab7..cf6a65cccbdf 100644
--- a/include/uapi/linux/netlink.h
+++ b/include/uapi/linux/netlink.h
@@ -101,13 +101,15 @@ struct nlmsgerr {
struct nlmsghdr msg;
};
-#define NETLINK_ADD_MEMBERSHIP 1
-#define NETLINK_DROP_MEMBERSHIP 2
-#define NETLINK_PKTINFO 3
-#define NETLINK_BROADCAST_ERROR 4
-#define NETLINK_NO_ENOBUFS 5
-#define NETLINK_RX_RING 6
-#define NETLINK_TX_RING 7
+#define NETLINK_ADD_MEMBERSHIP 1
+#define NETLINK_DROP_MEMBERSHIP 2
+#define NETLINK_PKTINFO 3
+#define NETLINK_BROADCAST_ERROR 4
+#define NETLINK_NO_ENOBUFS 5
+#define NETLINK_RX_RING 6
+#define NETLINK_TX_RING 7
+#define NETLINK_LISTEN_ALL_NSID 8
+#define NETLINK_LIST_MEMBERSHIPS 9
struct nl_pktinfo {
__u32 group;
diff --git a/include/uapi/linux/nfc.h b/include/uapi/linux/nfc.h
index c1e2e63cf9b5..dd3f75389076 100644
--- a/include/uapi/linux/nfc.h
+++ b/include/uapi/linux/nfc.h
@@ -86,6 +86,8 @@
* for this event is the application ID (AID).
* @NFC_CMD_GET_SE: Dump all discovered secure elements from an NFC controller.
* @NFC_CMD_SE_IO: Send/Receive APDUs to/from the selected secure element.
+ * @NFC_CMD_VENDOR: Vendor specific command, to be implemented directly
+ * from the driver in order to support hardware specific operations.
*/
enum nfc_commands {
NFC_CMD_UNSPEC,
@@ -117,6 +119,7 @@ enum nfc_commands {
NFC_CMD_GET_SE,
NFC_CMD_SE_IO,
NFC_CMD_ACTIVATE_TARGET,
+ NFC_CMD_VENDOR,
/* private: internal use only */
__NFC_CMD_AFTER_LAST
};
@@ -153,6 +156,10 @@ enum nfc_commands {
* @NFC_ATTR_APDU: Secure element APDU
* @NFC_ATTR_TARGET_ISO15693_DSFID: ISO 15693 Data Storage Format Identifier
* @NFC_ATTR_TARGET_ISO15693_UID: ISO 15693 Unique Identifier
+ * @NFC_ATTR_VENDOR_ID: NFC manufacturer unique ID, typically an OUI
+ * @NFC_ATTR_VENDOR_SUBCMD: Vendor specific sub command
+ * @NFC_ATTR_VENDOR_DATA: Vendor specific data, to be optionally passed
+ * to a vendor specific command implementation
*/
enum nfc_attrs {
NFC_ATTR_UNSPEC,
@@ -184,6 +191,9 @@ enum nfc_attrs {
NFC_ATTR_TARGET_ISO15693_DSFID,
NFC_ATTR_TARGET_ISO15693_UID,
NFC_ATTR_SE_PARAMS,
+ NFC_ATTR_VENDOR_ID,
+ NFC_ATTR_VENDOR_SUBCMD,
+ NFC_ATTR_VENDOR_DATA,
/* private: internal use only */
__NFC_ATTR_AFTER_LAST
};
diff --git a/include/uapi/linux/nfs4.h b/include/uapi/linux/nfs4.h
index 35f5f4c6c260..adc0aff83fbb 100644
--- a/include/uapi/linux/nfs4.h
+++ b/include/uapi/linux/nfs4.h
@@ -162,13 +162,6 @@
*/
#define NFS4_MAX_BACK_CHANNEL_OPS 2
-enum nfs4_acl_whotype {
- NFS4_ACL_WHO_NAMED = 0,
- NFS4_ACL_WHO_OWNER,
- NFS4_ACL_WHO_GROUP,
- NFS4_ACL_WHO_EVERYONE,
-};
-
#endif /* _UAPI_LINUX_NFS4_H */
/*
diff --git a/include/uapi/linux/nfs_idmap.h b/include/uapi/linux/nfs_idmap.h
index 8d4b1c7b24d4..038e36c96669 100644
--- a/include/uapi/linux/nfs_idmap.h
+++ b/include/uapi/linux/nfs_idmap.h
@@ -1,5 +1,5 @@
/*
- * include/linux/nfs_idmap.h
+ * include/uapi/linux/nfs_idmap.h
*
* UID and GID to name mapping for clients.
*
diff --git a/include/uapi/linux/nfsd/debug.h b/include/uapi/linux/nfsd/debug.h
index 0bf130a1c58d..28ec6c9c421a 100644
--- a/include/uapi/linux/nfsd/debug.h
+++ b/include/uapi/linux/nfsd/debug.h
@@ -12,14 +12,6 @@
#include <linux/sunrpc/debug.h>
/*
- * Enable debugging for nfsd.
- * Requires RPC_DEBUG.
- */
-#if IS_ENABLED(CONFIG_SUNRPC_DEBUG)
-# define NFSD_DEBUG 1
-#endif
-
-/*
* knfsd debug flags
*/
#define NFSDDBG_SOCK 0x0001
diff --git a/include/uapi/linux/nfsd/export.h b/include/uapi/linux/nfsd/export.h
index d3bd6ffec041..0df7bd5d2fb1 100644
--- a/include/uapi/linux/nfsd/export.h
+++ b/include/uapi/linux/nfsd/export.h
@@ -21,6 +21,9 @@
/*
* Export flags.
+ *
+ * Please update the expflags[] array in fs/nfsd/export.c when adding
+ * a new flag.
*/
#define NFSEXP_READONLY 0x0001
#define NFSEXP_INSECURE_PORT 0x0002
diff --git a/include/uapi/linux/nl80211.h b/include/uapi/linux/nl80211.h
index 68b294e83944..c0ab6b0a3919 100644
--- a/include/uapi/linux/nl80211.h
+++ b/include/uapi/linux/nl80211.h
@@ -25,6 +25,19 @@
*
*/
+/*
+ * This header file defines the userspace API to the wireless stack. Please
+ * be careful not to break things - i.e. don't move anything around or so
+ * unless you can demonstrate that it breaks neither API nor ABI.
+ *
+ * Additions to the API should be accompanied by actual implementations in
+ * an upstream driver, so that example implementations exist in case there
+ * are ever concerns about the precise semantics of the API or changes are
+ * needed, and to ensure that code for dead (no longer implemented) API
+ * can actually be identified and removed.
+ * Nonetheless, semantics should also be documented carefully in this file.
+ */
+
#include <linux/types.h>
#define NL80211_GENL_NAME "nl80211"
@@ -1684,6 +1697,10 @@ enum nl80211_commands {
* If set during scheduled scan start then the new scan req will be
* owned by the netlink socket that created it and the scheduled scan will
* be stopped when the socket is closed.
+ * If set during configuration of regulatory indoor operation then the
+ * regulatory indoor configuration would be owned by the netlink socket
+ * that configured the indoor setting, and the indoor operation would be
+ * cleared when the socket is closed.
*
* @NL80211_ATTR_TDLS_INITIATOR: flag attribute indicating the current end is
* the TDLS link initiator.
@@ -1737,8 +1754,12 @@ enum nl80211_commands {
* should be contained in the result as the sum of the respective counters
* over all channels.
*
- * @NL80211_ATTR_SCHED_SCAN_DELAY: delay before a scheduled scan (or a
- * WoWLAN net-detect scan) is started, u32 in seconds.
+ * @NL80211_ATTR_SCHED_SCAN_DELAY: delay before the first cycle of a
+ * scheduled scan (or a WoWLAN net-detect scan) is started, u32
+ * in seconds.
+
+ * @NL80211_ATTR_REG_INDOOR: flag attribute, if set indicates that the device
+ * is operating in an indoor environment.
*
* @NUM_NL80211_ATTR: total number of nl80211_attrs available
* @NL80211_ATTR_MAX: highest attribute number currently defined
@@ -2107,6 +2128,8 @@ enum nl80211_attrs {
NL80211_ATTR_SCHED_SCAN_DELAY,
+ NL80211_ATTR_REG_INDOOR,
+
/* add attributes here, update the policy in nl80211.c */
__NL80211_ATTR_AFTER_LAST,
@@ -2597,16 +2620,17 @@ enum nl80211_band_attr {
* an indoor surroundings, i.e., it is connected to AC power (and not
* through portable DC inverters) or is under the control of a master
* that is acting as an AP and is connected to AC power.
- * @NL80211_FREQUENCY_ATTR_GO_CONCURRENT: GO operation is allowed on this
+ * @NL80211_FREQUENCY_ATTR_IR_CONCURRENT: IR operation is allowed on this
* channel if it's connected concurrently to a BSS on the same channel on
* the 2 GHz band or to a channel in the same UNII band (on the 5 GHz
- * band), and IEEE80211_CHAN_RADAR is not set. Instantiating a GO on a
- * channel that has the GO_CONCURRENT attribute set can be done when there
- * is a clear assessment that the device is operating under the guidance of
- * an authorized master, i.e., setting up a GO while the device is also
- * connected to an AP with DFS and radar detection on the UNII band (it is
- * up to user-space, i.e., wpa_supplicant to perform the required
- * verifications)
+ * band), and IEEE80211_CHAN_RADAR is not set. Instantiating a GO or TDLS
+ * off-channel on a channel that has the IR_CONCURRENT attribute set can be
+ * done when there is a clear assessment that the device is operating under
+ * the guidance of an authorized master, i.e., setting up a GO or TDLS
+ * off-channel while the device is also connected to an AP with DFS and
+ * radar detection on the UNII band (it is up to user-space, i.e.,
+ * wpa_supplicant to perform the required verifications). Using this
+ * attribute for IR is disallowed for master interfaces (IBSS, AP).
* @NL80211_FREQUENCY_ATTR_NO_20MHZ: 20 MHz operation is not allowed
* on this channel in current regulatory domain.
* @NL80211_FREQUENCY_ATTR_NO_10MHZ: 10 MHz operation is not allowed
@@ -2618,7 +2642,7 @@ enum nl80211_band_attr {
* See https://apps.fcc.gov/eas/comments/GetPublishedDocument.html?id=327&tn=528122
* for more information on the FCC description of the relaxations allowed
* by NL80211_FREQUENCY_ATTR_INDOOR_ONLY and
- * NL80211_FREQUENCY_ATTR_GO_CONCURRENT.
+ * NL80211_FREQUENCY_ATTR_IR_CONCURRENT.
*/
enum nl80211_frequency_attr {
__NL80211_FREQUENCY_ATTR_INVALID,
@@ -2636,7 +2660,7 @@ enum nl80211_frequency_attr {
NL80211_FREQUENCY_ATTR_NO_160MHZ,
NL80211_FREQUENCY_ATTR_DFS_CAC_TIME,
NL80211_FREQUENCY_ATTR_INDOOR_ONLY,
- NL80211_FREQUENCY_ATTR_GO_CONCURRENT,
+ NL80211_FREQUENCY_ATTR_IR_CONCURRENT,
NL80211_FREQUENCY_ATTR_NO_20MHZ,
NL80211_FREQUENCY_ATTR_NO_10MHZ,
@@ -2649,6 +2673,8 @@ enum nl80211_frequency_attr {
#define NL80211_FREQUENCY_ATTR_PASSIVE_SCAN NL80211_FREQUENCY_ATTR_NO_IR
#define NL80211_FREQUENCY_ATTR_NO_IBSS NL80211_FREQUENCY_ATTR_NO_IR
#define NL80211_FREQUENCY_ATTR_NO_IR NL80211_FREQUENCY_ATTR_NO_IR
+#define NL80211_FREQUENCY_ATTR_GO_CONCURRENT \
+ NL80211_FREQUENCY_ATTR_IR_CONCURRENT
/**
* enum nl80211_bitrate_attr - bitrate attributes
@@ -2807,7 +2833,7 @@ enum nl80211_sched_scan_match_attr {
* @NL80211_RRF_AUTO_BW: maximum available bandwidth should be calculated
* base on contiguous rules and wider channels will be allowed to cross
* multiple contiguous/overlapping frequency ranges.
- * @NL80211_RRF_GO_CONCURRENT: See &NL80211_FREQUENCY_ATTR_GO_CONCURRENT
+ * @NL80211_RRF_IR_CONCURRENT: See &NL80211_FREQUENCY_ATTR_IR_CONCURRENT
* @NL80211_RRF_NO_HT40MINUS: channels can't be used in HT40- operation
* @NL80211_RRF_NO_HT40PLUS: channels can't be used in HT40+ operation
* @NL80211_RRF_NO_80MHZ: 80MHz operation not allowed
@@ -2824,7 +2850,7 @@ enum nl80211_reg_rule_flags {
NL80211_RRF_NO_IR = 1<<7,
__NL80211_RRF_NO_IBSS = 1<<8,
NL80211_RRF_AUTO_BW = 1<<11,
- NL80211_RRF_GO_CONCURRENT = 1<<12,
+ NL80211_RRF_IR_CONCURRENT = 1<<12,
NL80211_RRF_NO_HT40MINUS = 1<<13,
NL80211_RRF_NO_HT40PLUS = 1<<14,
NL80211_RRF_NO_80MHZ = 1<<15,
@@ -2836,6 +2862,7 @@ enum nl80211_reg_rule_flags {
#define NL80211_RRF_NO_IR NL80211_RRF_NO_IR
#define NL80211_RRF_NO_HT40 (NL80211_RRF_NO_HT40MINUS |\
NL80211_RRF_NO_HT40PLUS)
+#define NL80211_RRF_GO_CONCURRENT NL80211_RRF_IR_CONCURRENT
/* For backport compatibility with older userspace */
#define NL80211_RRF_NO_IR_ALL (NL80211_RRF_NO_IR | __NL80211_RRF_NO_IBSS)
@@ -3092,7 +3119,8 @@ enum nl80211_mesh_power_mode {
*
* @NL80211_MESHCONF_PLINK_TIMEOUT: If no tx activity is seen from a STA we've
* established peering with for longer than this time (in seconds), then
- * remove it from the STA's list of peers. Default is 30 minutes.
+ * remove it from the STA's list of peers. You may set this to 0 to disable
+ * the removal of the STA. Default is 30 minutes.
*
* @__NL80211_MESHCONF_ATTR_AFTER_LAST: internal use
*/
@@ -3694,6 +3722,8 @@ struct nl80211_pattern_support {
* @NL80211_WOWLAN_TRIG_ANY: wake up on any activity, do not really put
* the chip into a special state -- works best with chips that have
* support for low-power operation already (flag)
+ * Note that this mode is incompatible with all of the others, if
+ * any others are even supported by the device.
* @NL80211_WOWLAN_TRIG_DISCONNECT: wake up on disconnect, the way disconnect
* is detected is implementation-specific (flag)
* @NL80211_WOWLAN_TRIG_MAGIC_PKT: wake up on magic packet (6x 0xff, followed
@@ -4327,11 +4357,13 @@ enum nl80211_feature_flags {
/**
* enum nl80211_ext_feature_index - bit index of extended features.
+ * @NL80211_EXT_FEATURE_VHT_IBSS: This driver supports IBSS with VHT datarates.
*
* @NUM_NL80211_EXT_FEATURES: number of extended features.
* @MAX_NL80211_EXT_FEATURES: highest extended feature index.
*/
enum nl80211_ext_feature_index {
+ NL80211_EXT_FEATURE_VHT_IBSS,
/* add new features before the definition below */
NUM_NL80211_EXT_FEATURES,
diff --git a/include/uapi/linux/nvme.h b/include/uapi/linux/nvme.h
index aef9a81b2d75..732b32e92b02 100644
--- a/include/uapi/linux/nvme.h
+++ b/include/uapi/linux/nvme.h
@@ -179,6 +179,10 @@ enum {
NVME_SMART_CRIT_VOLATILE_MEMORY = 1 << 4,
};
+enum {
+ NVME_AER_NOTICE_NS_CHANGED = 0x0002,
+};
+
struct nvme_lba_range_type {
__u8 type;
__u8 attributes;
@@ -579,5 +583,6 @@ struct nvme_passthru_cmd {
#define NVME_IOCTL_ADMIN_CMD _IOWR('N', 0x41, struct nvme_admin_cmd)
#define NVME_IOCTL_SUBMIT_IO _IOW('N', 0x42, struct nvme_user_io)
#define NVME_IOCTL_IO_CMD _IOWR('N', 0x43, struct nvme_passthru_cmd)
+#define NVME_IOCTL_RESET _IO('N', 0x44)
#endif /* _UAPI_LINUX_NVME_H */
diff --git a/include/uapi/linux/openvswitch.h b/include/uapi/linux/openvswitch.h
index bbd49a0c46c7..1dab77601c21 100644
--- a/include/uapi/linux/openvswitch.h
+++ b/include/uapi/linux/openvswitch.h
@@ -153,6 +153,8 @@ enum ovs_packet_cmd {
* flow key against the kernel's.
* @OVS_PACKET_ATTR_ACTIONS: Contains actions for the packet. Used
* for %OVS_PACKET_CMD_EXECUTE. It has nested %OVS_ACTION_ATTR_* attributes.
+ * Also used in upcall when %OVS_ACTION_ATTR_USERSPACE has optional
+ * %OVS_USERSPACE_ATTR_ACTIONS attribute.
* @OVS_PACKET_ATTR_USERDATA: Present for an %OVS_PACKET_CMD_ACTION
* notification if the %OVS_ACTION_ATTR_USERSPACE action specified an
* %OVS_USERSPACE_ATTR_USERDATA attribute, with the same length and content
@@ -528,6 +530,7 @@ enum ovs_sample_attr {
* copied to the %OVS_PACKET_CMD_ACTION message as %OVS_PACKET_ATTR_USERDATA.
* @OVS_USERSPACE_ATTR_EGRESS_TUN_PORT: If present, u32 output port to get
* tunnel info.
+ * @OVS_USERSPACE_ATTR_ACTIONS: If present, send actions with upcall.
*/
enum ovs_userspace_attr {
OVS_USERSPACE_ATTR_UNSPEC,
@@ -535,6 +538,7 @@ enum ovs_userspace_attr {
OVS_USERSPACE_ATTR_USERDATA, /* Optional user-specified cookie. */
OVS_USERSPACE_ATTR_EGRESS_TUN_PORT, /* Optional, u32 output port
* to get tunnel info. */
+ OVS_USERSPACE_ATTR_ACTIONS, /* Optional flag to get actions. */
__OVS_USERSPACE_ATTR_MAX
};
diff --git a/include/uapi/linux/perf_event.h b/include/uapi/linux/perf_event.h
index 9b79abbd1ab8..d97f84c080da 100644
--- a/include/uapi/linux/perf_event.h
+++ b/include/uapi/linux/perf_event.h
@@ -152,21 +152,44 @@ enum perf_event_sample_format {
* The branch types can be combined, however BRANCH_ANY covers all types
* of branches and therefore it supersedes all the other types.
*/
+enum perf_branch_sample_type_shift {
+ PERF_SAMPLE_BRANCH_USER_SHIFT = 0, /* user branches */
+ PERF_SAMPLE_BRANCH_KERNEL_SHIFT = 1, /* kernel branches */
+ PERF_SAMPLE_BRANCH_HV_SHIFT = 2, /* hypervisor branches */
+
+ PERF_SAMPLE_BRANCH_ANY_SHIFT = 3, /* any branch types */
+ PERF_SAMPLE_BRANCH_ANY_CALL_SHIFT = 4, /* any call branch */
+ PERF_SAMPLE_BRANCH_ANY_RETURN_SHIFT = 5, /* any return branch */
+ PERF_SAMPLE_BRANCH_IND_CALL_SHIFT = 6, /* indirect calls */
+ PERF_SAMPLE_BRANCH_ABORT_TX_SHIFT = 7, /* transaction aborts */
+ PERF_SAMPLE_BRANCH_IN_TX_SHIFT = 8, /* in transaction */
+ PERF_SAMPLE_BRANCH_NO_TX_SHIFT = 9, /* not in transaction */
+ PERF_SAMPLE_BRANCH_COND_SHIFT = 10, /* conditional branches */
+
+ PERF_SAMPLE_BRANCH_CALL_STACK_SHIFT = 11, /* call/ret stack */
+ PERF_SAMPLE_BRANCH_IND_JUMP_SHIFT = 12, /* indirect jumps */
+
+ PERF_SAMPLE_BRANCH_MAX_SHIFT /* non-ABI */
+};
+
enum perf_branch_sample_type {
- PERF_SAMPLE_BRANCH_USER = 1U << 0, /* user branches */
- PERF_SAMPLE_BRANCH_KERNEL = 1U << 1, /* kernel branches */
- PERF_SAMPLE_BRANCH_HV = 1U << 2, /* hypervisor branches */
-
- PERF_SAMPLE_BRANCH_ANY = 1U << 3, /* any branch types */
- PERF_SAMPLE_BRANCH_ANY_CALL = 1U << 4, /* any call branch */
- PERF_SAMPLE_BRANCH_ANY_RETURN = 1U << 5, /* any return branch */
- PERF_SAMPLE_BRANCH_IND_CALL = 1U << 6, /* indirect calls */
- PERF_SAMPLE_BRANCH_ABORT_TX = 1U << 7, /* transaction aborts */
- PERF_SAMPLE_BRANCH_IN_TX = 1U << 8, /* in transaction */
- PERF_SAMPLE_BRANCH_NO_TX = 1U << 9, /* not in transaction */
- PERF_SAMPLE_BRANCH_COND = 1U << 10, /* conditional branches */
-
- PERF_SAMPLE_BRANCH_MAX = 1U << 11, /* non-ABI */
+ PERF_SAMPLE_BRANCH_USER = 1U << PERF_SAMPLE_BRANCH_USER_SHIFT,
+ PERF_SAMPLE_BRANCH_KERNEL = 1U << PERF_SAMPLE_BRANCH_KERNEL_SHIFT,
+ PERF_SAMPLE_BRANCH_HV = 1U << PERF_SAMPLE_BRANCH_HV_SHIFT,
+
+ PERF_SAMPLE_BRANCH_ANY = 1U << PERF_SAMPLE_BRANCH_ANY_SHIFT,
+ PERF_SAMPLE_BRANCH_ANY_CALL = 1U << PERF_SAMPLE_BRANCH_ANY_CALL_SHIFT,
+ PERF_SAMPLE_BRANCH_ANY_RETURN = 1U << PERF_SAMPLE_BRANCH_ANY_RETURN_SHIFT,
+ PERF_SAMPLE_BRANCH_IND_CALL = 1U << PERF_SAMPLE_BRANCH_IND_CALL_SHIFT,
+ PERF_SAMPLE_BRANCH_ABORT_TX = 1U << PERF_SAMPLE_BRANCH_ABORT_TX_SHIFT,
+ PERF_SAMPLE_BRANCH_IN_TX = 1U << PERF_SAMPLE_BRANCH_IN_TX_SHIFT,
+ PERF_SAMPLE_BRANCH_NO_TX = 1U << PERF_SAMPLE_BRANCH_NO_TX_SHIFT,
+ PERF_SAMPLE_BRANCH_COND = 1U << PERF_SAMPLE_BRANCH_COND_SHIFT,
+
+ PERF_SAMPLE_BRANCH_CALL_STACK = 1U << PERF_SAMPLE_BRANCH_CALL_STACK_SHIFT,
+ PERF_SAMPLE_BRANCH_IND_JUMP = 1U << PERF_SAMPLE_BRANCH_IND_JUMP_SHIFT,
+
+ PERF_SAMPLE_BRANCH_MAX = 1U << PERF_SAMPLE_BRANCH_MAX_SHIFT,
};
#define PERF_SAMPLE_BRANCH_PLM_ALL \
@@ -240,6 +263,7 @@ enum perf_event_read_format {
#define PERF_ATTR_SIZE_VER3 96 /* add: sample_regs_user */
/* add: sample_stack_user */
#define PERF_ATTR_SIZE_VER4 104 /* add: sample_regs_intr */
+#define PERF_ATTR_SIZE_VER5 112 /* add: aux_watermark */
/*
* Hardware event_id to monitor via a performance monitoring event:
@@ -305,7 +329,8 @@ struct perf_event_attr {
exclude_callchain_user : 1, /* exclude user callchains */
mmap2 : 1, /* include mmap with inode data */
comm_exec : 1, /* flag comm events that are due to an exec */
- __reserved_1 : 39;
+ use_clockid : 1, /* use @clockid for time fields */
+ __reserved_1 : 38;
union {
__u32 wakeup_events; /* wakeup every n events */
@@ -334,8 +359,7 @@ struct perf_event_attr {
*/
__u32 sample_stack_user;
- /* Align to u64. */
- __u32 __reserved_2;
+ __s32 clockid;
/*
* Defines set of regs to dump for each sample
* state captured on:
@@ -345,6 +369,12 @@ struct perf_event_attr {
* See asm/perf_regs.h for details.
*/
__u64 sample_regs_intr;
+
+ /*
+ * Wakeup watermark for AUX area
+ */
+ __u32 aux_watermark;
+ __u32 __reserved_2; /* align to __u64 */
};
#define perf_flags(attr) (*(&(attr)->read_format + 1))
@@ -360,6 +390,7 @@ struct perf_event_attr {
#define PERF_EVENT_IOC_SET_OUTPUT _IO ('$', 5)
#define PERF_EVENT_IOC_SET_FILTER _IOW('$', 6, char *)
#define PERF_EVENT_IOC_ID _IOR('$', 7, __u64 *)
+#define PERF_EVENT_IOC_SET_BPF _IOW('$', 8, __u32)
enum perf_event_ioc_flags {
PERF_IOC_FLAG_GROUP = 1U << 0,
@@ -500,9 +531,30 @@ struct perf_event_mmap_page {
* In this case the kernel will not over-write unread data.
*
* See perf_output_put_handle() for the data ordering.
+ *
+ * data_{offset,size} indicate the location and size of the perf record
+ * buffer within the mmapped area.
*/
__u64 data_head; /* head in the data section */
__u64 data_tail; /* user-space written tail */
+ __u64 data_offset; /* where the buffer starts */
+ __u64 data_size; /* data buffer size */
+
+ /*
+ * AUX area is defined by aux_{offset,size} fields that should be set
+ * by the userspace, so that
+ *
+ * aux_offset >= data_offset + data_size
+ *
+ * prior to mmap()ing it. Size of the mmap()ed area should be aux_size.
+ *
+ * Ring buffer pointers aux_{head,tail} have the same semantics as
+ * data_{head,tail} and same ordering rules apply.
+ */
+ __u64 aux_head;
+ __u64 aux_tail;
+ __u64 aux_offset;
+ __u64 aux_size;
};
#define PERF_RECORD_MISC_CPUMODE_MASK (7 << 0)
@@ -514,6 +566,10 @@ struct perf_event_mmap_page {
#define PERF_RECORD_MISC_GUEST_USER (5 << 0)
/*
+ * Indicates that /proc/PID/maps parsing are truncated by time out.
+ */
+#define PERF_RECORD_MISC_PROC_MAP_PARSE_TIMEOUT (1 << 12)
+/*
* PERF_RECORD_MISC_MMAP_DATA and PERF_RECORD_MISC_COMM_EXEC are used on
* different events so can reuse the same bit position.
*/
@@ -725,6 +781,43 @@ enum perf_event_type {
*/
PERF_RECORD_MMAP2 = 10,
+ /*
+ * Records that new data landed in the AUX buffer part.
+ *
+ * struct {
+ * struct perf_event_header header;
+ *
+ * u64 aux_offset;
+ * u64 aux_size;
+ * u64 flags;
+ * struct sample_id sample_id;
+ * };
+ */
+ PERF_RECORD_AUX = 11,
+
+ /*
+ * Indicates that instruction trace has started
+ *
+ * struct {
+ * struct perf_event_header header;
+ * u32 pid;
+ * u32 tid;
+ * };
+ */
+ PERF_RECORD_ITRACE_START = 12,
+
+ /*
+ * Records the dropped/lost sample number.
+ *
+ * struct {
+ * struct perf_event_header header;
+ *
+ * u64 lost;
+ * struct sample_id sample_id;
+ * };
+ */
+ PERF_RECORD_LOST_SAMPLES = 13,
+
PERF_RECORD_MAX, /* non-ABI */
};
@@ -742,6 +835,12 @@ enum perf_callchain_context {
PERF_CONTEXT_MAX = (__u64)-4095,
};
+/**
+ * PERF_RECORD_AUX::flags bits
+ */
+#define PERF_AUX_FLAG_TRUNCATED 0x01 /* record was truncated to fit */
+#define PERF_AUX_FLAG_OVERWRITE 0x02 /* snapshot from overwrite mode */
+
#define PERF_FLAG_FD_NO_GROUP (1UL << 0)
#define PERF_FLAG_FD_OUTPUT (1UL << 1)
#define PERF_FLAG_PID_CGROUP (1UL << 2) /* pid=cgroup id, per-cpu mode only */
diff --git a/include/uapi/linux/pkt_cls.h b/include/uapi/linux/pkt_cls.h
index 25731dfb3fcc..4f0d1bc3647d 100644
--- a/include/uapi/linux/pkt_cls.h
+++ b/include/uapi/linux/pkt_cls.h
@@ -4,6 +4,7 @@
#include <linux/types.h>
#include <linux/pkt_sched.h>
+#ifdef __KERNEL__
/* I think i could have done better macros ; for now this is stolen from
* some arch/mips code - jhs
*/
@@ -35,20 +36,6 @@ bits 9,10,11: redirect counter - redirect TTL. Loop avoidance
*
* */
-#define TC_MUNGED _TC_MAKEMASK1(0)
-#define SET_TC_MUNGED(v) ( TC_MUNGED | (v & ~TC_MUNGED))
-#define CLR_TC_MUNGED(v) ( v & ~TC_MUNGED)
-
-#define TC_OK2MUNGE _TC_MAKEMASK1(1)
-#define SET_TC_OK2MUNGE(v) ( TC_OK2MUNGE | (v & ~TC_OK2MUNGE))
-#define CLR_TC_OK2MUNGE(v) ( v & ~TC_OK2MUNGE)
-
-#define S_TC_VERD _TC_MAKE32(2)
-#define M_TC_VERD _TC_MAKEMASK(4,S_TC_VERD)
-#define G_TC_VERD(x) _TC_GETVALUE(x,S_TC_VERD,M_TC_VERD)
-#define V_TC_VERD(x) _TC_MAKEVALUE(x,S_TC_VERD)
-#define SET_TC_VERD(v,n) ((V_TC_VERD(n)) | (v & ~M_TC_VERD))
-
#define S_TC_FROM _TC_MAKE32(6)
#define M_TC_FROM _TC_MAKEMASK(2,S_TC_FROM)
#define G_TC_FROM(x) _TC_GETVALUE(x,S_TC_FROM,M_TC_FROM)
@@ -62,18 +49,16 @@ bits 9,10,11: redirect counter - redirect TTL. Loop avoidance
#define SET_TC_NCLS(v) ( TC_NCLS | (v & ~TC_NCLS))
#define CLR_TC_NCLS(v) ( v & ~TC_NCLS)
-#define S_TC_RTTL _TC_MAKE32(9)
-#define M_TC_RTTL _TC_MAKEMASK(3,S_TC_RTTL)
-#define G_TC_RTTL(x) _TC_GETVALUE(x,S_TC_RTTL,M_TC_RTTL)
-#define V_TC_RTTL(x) _TC_MAKEVALUE(x,S_TC_RTTL)
-#define SET_TC_RTTL(v,n) ((V_TC_RTTL(n)) | (v & ~M_TC_RTTL))
-
#define S_TC_AT _TC_MAKE32(12)
#define M_TC_AT _TC_MAKEMASK(2,S_TC_AT)
#define G_TC_AT(x) _TC_GETVALUE(x,S_TC_AT,M_TC_AT)
#define V_TC_AT(x) _TC_MAKEVALUE(x,S_TC_AT)
#define SET_TC_AT(v,n) ((V_TC_AT(n)) | (v & ~M_TC_AT))
+#define MAX_REC_LOOP 4
+#define MAX_RED_LOOP 4
+#endif
+
/* Action attributes */
enum {
TCA_ACT_UNSPEC,
@@ -93,8 +78,6 @@ enum {
#define TCA_ACT_NOUNBIND 0
#define TCA_ACT_REPLACE 1
#define TCA_ACT_NOREPLACE 0
-#define MAX_REC_LOOP 4
-#define MAX_RED_LOOP 4
#define TC_ACT_UNSPEC (-1)
#define TC_ACT_OK 0
@@ -397,11 +380,43 @@ enum {
TCA_BPF_CLASSID,
TCA_BPF_OPS_LEN,
TCA_BPF_OPS,
+ TCA_BPF_FD,
+ TCA_BPF_NAME,
__TCA_BPF_MAX,
};
#define TCA_BPF_MAX (__TCA_BPF_MAX - 1)
+/* Flower classifier */
+
+enum {
+ TCA_FLOWER_UNSPEC,
+ TCA_FLOWER_CLASSID,
+ TCA_FLOWER_INDEV,
+ TCA_FLOWER_ACT,
+ TCA_FLOWER_KEY_ETH_DST, /* ETH_ALEN */
+ TCA_FLOWER_KEY_ETH_DST_MASK, /* ETH_ALEN */
+ TCA_FLOWER_KEY_ETH_SRC, /* ETH_ALEN */
+ TCA_FLOWER_KEY_ETH_SRC_MASK, /* ETH_ALEN */
+ TCA_FLOWER_KEY_ETH_TYPE, /* be16 */
+ TCA_FLOWER_KEY_IP_PROTO, /* u8 */
+ TCA_FLOWER_KEY_IPV4_SRC, /* be32 */
+ TCA_FLOWER_KEY_IPV4_SRC_MASK, /* be32 */
+ TCA_FLOWER_KEY_IPV4_DST, /* be32 */
+ TCA_FLOWER_KEY_IPV4_DST_MASK, /* be32 */
+ TCA_FLOWER_KEY_IPV6_SRC, /* struct in6_addr */
+ TCA_FLOWER_KEY_IPV6_SRC_MASK, /* struct in6_addr */
+ TCA_FLOWER_KEY_IPV6_DST, /* struct in6_addr */
+ TCA_FLOWER_KEY_IPV6_DST_MASK, /* struct in6_addr */
+ TCA_FLOWER_KEY_TCP_SRC, /* be16 */
+ TCA_FLOWER_KEY_TCP_DST, /* be16 */
+ TCA_FLOWER_KEY_UDP_SRC, /* be16 */
+ TCA_FLOWER_KEY_UDP_DST, /* be16 */
+ __TCA_FLOWER_MAX,
+};
+
+#define TCA_FLOWER_MAX (__TCA_FLOWER_MAX - 1)
+
/* Extended Matches */
struct tcf_ematch_tree_hdr {
diff --git a/include/uapi/linux/pkt_sched.h b/include/uapi/linux/pkt_sched.h
index 534b84710745..8d2530daca9f 100644
--- a/include/uapi/linux/pkt_sched.h
+++ b/include/uapi/linux/pkt_sched.h
@@ -268,7 +268,8 @@ enum {
TCA_GRED_STAB,
TCA_GRED_DPS,
TCA_GRED_MAX_P,
- __TCA_GRED_MAX,
+ TCA_GRED_LIMIT,
+ __TCA_GRED_MAX,
};
#define TCA_GRED_MAX (__TCA_GRED_MAX - 1)
@@ -679,6 +680,7 @@ enum {
TCA_CODEL_LIMIT,
TCA_CODEL_INTERVAL,
TCA_CODEL_ECN,
+ TCA_CODEL_CE_THRESHOLD,
__TCA_CODEL_MAX
};
@@ -695,6 +697,7 @@ struct tc_codel_xstats {
__u32 drop_overlimit; /* number of time max qdisc packet limit was hit */
__u32 ecn_mark; /* number of packets we ECN marked instead of dropped */
__u32 dropping; /* are we in dropping state ? */
+ __u32 ce_mark; /* number of CE marked packets because of ce_threshold */
};
/* FQ_CODEL */
@@ -707,6 +710,7 @@ enum {
TCA_FQ_CODEL_ECN,
TCA_FQ_CODEL_FLOWS,
TCA_FQ_CODEL_QUANTUM,
+ TCA_FQ_CODEL_CE_THRESHOLD,
__TCA_FQ_CODEL_MAX
};
@@ -730,6 +734,7 @@ struct tc_fq_codel_qd_stats {
*/
__u32 new_flows_len; /* count of flows in new list */
__u32 old_flows_len; /* count of flows in old list */
+ __u32 ce_mark; /* packets above ce_threshold */
};
struct tc_fq_codel_cl_stats {
diff --git a/include/uapi/linux/quota.h b/include/uapi/linux/quota.h
index 1f49b8341c99..9c95b2c1c88a 100644
--- a/include/uapi/linux/quota.h
+++ b/include/uapi/linux/quota.h
@@ -36,11 +36,12 @@
#include <linux/errno.h>
#include <linux/types.h>
-#define __DQUOT_VERSION__ "dquot_6.5.2"
+#define __DQUOT_VERSION__ "dquot_6.6.0"
-#define MAXQUOTAS 2
+#define MAXQUOTAS 3
#define USRQUOTA 0 /* element used for user quotas */
#define GRPQUOTA 1 /* element used for group quotas */
+#define PRJQUOTA 2 /* element used for project quotas */
/*
* Definitions for the default names of the quotas files.
@@ -48,6 +49,7 @@
#define INITQFNAMES { \
"user", /* USRQUOTA */ \
"group", /* GRPQUOTA */ \
+ "project", /* PRJQUOTA */ \
"undefined", \
};
diff --git a/include/uapi/linux/raid/md_p.h b/include/uapi/linux/raid/md_p.h
index 49f4210d4394..2ae6131e69a5 100644
--- a/include/uapi/linux/raid/md_p.h
+++ b/include/uapi/linux/raid/md_p.h
@@ -78,6 +78,12 @@
#define MD_DISK_ACTIVE 1 /* disk is running or spare disk */
#define MD_DISK_SYNC 2 /* disk is in sync with the raid set */
#define MD_DISK_REMOVED 3 /* disk is in sync with the raid set */
+#define MD_DISK_CLUSTER_ADD 4 /* Initiate a disk add across the cluster
+ * For clustered enviroments only.
+ */
+#define MD_DISK_CANDIDATE 5 /* disk is added as spare (local) until confirmed
+ * For clustered enviroments only.
+ */
#define MD_DISK_WRITEMOSTLY 9 /* disk is "write-mostly" is RAID1 config.
* read requests will only be sent here in
@@ -101,6 +107,7 @@ typedef struct mdp_device_descriptor_s {
#define MD_SB_CLEAN 0
#define MD_SB_ERRORS 1
+#define MD_SB_CLUSTERED 5 /* MD is clustered */
#define MD_SB_BITMAP_PRESENT 8 /* bitmap may be present nearby */
/*
diff --git a/include/uapi/linux/raid/md_u.h b/include/uapi/linux/raid/md_u.h
index 74e7c60c4716..1cb8aa6850b5 100644
--- a/include/uapi/linux/raid/md_u.h
+++ b/include/uapi/linux/raid/md_u.h
@@ -62,6 +62,7 @@
#define STOP_ARRAY _IO (MD_MAJOR, 0x32)
#define STOP_ARRAY_RO _IO (MD_MAJOR, 0x33)
#define RESTART_ARRAY_RW _IO (MD_MAJOR, 0x34)
+#define CLUSTERED_DISK_NACK _IO (MD_MAJOR, 0x35)
/* 63 partitions with the alternate major number (mdp) */
#define MdpMinorShift 6
diff --git a/include/uapi/linux/rds.h b/include/uapi/linux/rds.h
index 91950950aa59..0f9265cb2a96 100644
--- a/include/uapi/linux/rds.h
+++ b/include/uapi/linux/rds.h
@@ -38,6 +38,8 @@
#define RDS_IB_ABI_VERSION 0x301
+#define SOL_RDS 276
+
/*
* setsockopt/getsockopt for SOL_RDS
*/
@@ -48,6 +50,14 @@
#define RDS_RECVERR 5
#define RDS_CONG_MONITOR 6
#define RDS_GET_MR_FOR_DEST 7
+#define SO_RDS_TRANSPORT 8
+
+/* supported values for SO_RDS_TRANSPORT */
+#define RDS_TRANS_IB 0
+#define RDS_TRANS_IWARP 1
+#define RDS_TRANS_TCP 2
+#define RDS_TRANS_COUNT 3
+#define RDS_TRANS_NONE (~0)
/*
* Control message types for SOL_RDS.
diff --git a/include/uapi/linux/rtnetlink.h b/include/uapi/linux/rtnetlink.h
index 5cc5d66bf519..fdd8f07f1d34 100644
--- a/include/uapi/linux/rtnetlink.h
+++ b/include/uapi/linux/rtnetlink.h
@@ -134,6 +134,8 @@ enum {
RTM_NEWNSID = 88,
#define RTM_NEWNSID RTM_NEWNSID
+ RTM_DELNSID = 89,
+#define RTM_DELNSID RTM_DELNSID
RTM_GETNSID = 90,
#define RTM_GETNSID RTM_GETNSID
@@ -303,6 +305,9 @@ enum rtattr_type_t {
RTA_TABLE,
RTA_MARK,
RTA_MFC_STATS,
+ RTA_VIA,
+ RTA_NEWDST,
+ RTA_PREF,
__RTA_MAX
};
@@ -332,6 +337,10 @@ struct rtnexthop {
#define RTNH_F_DEAD 1 /* Nexthop is dead (used by multipath) */
#define RTNH_F_PERVASIVE 2 /* Do recursive gateway lookup */
#define RTNH_F_ONLINK 4 /* Gateway is forced on link */
+#define RTNH_F_OFFLOAD 8 /* offloaded route */
+#define RTNH_F_LINKDOWN 16 /* carrier-down on nexthop */
+
+#define RTNH_COMPARE_MASK (RTNH_F_DEAD | RTNH_F_LINKDOWN)
/* Macros to handle hexthops */
@@ -344,6 +353,12 @@ struct rtnexthop {
#define RTNH_SPACE(len) RTNH_ALIGN(RTNH_LENGTH(len))
#define RTNH_DATA(rtnh) ((struct rtattr*)(((char*)(rtnh)) + RTNH_LENGTH(0)))
+/* RTA_VIA */
+struct rtvia {
+ __kernel_sa_family_t rtvia_family;
+ __u8 rtvia_addr[0];
+};
+
/* RTM_CACHEINFO */
struct rta_cacheinfo {
@@ -623,6 +638,10 @@ enum rtnetlink_groups {
#define RTNLGRP_IPV6_NETCONF RTNLGRP_IPV6_NETCONF
RTNLGRP_MDB,
#define RTNLGRP_MDB RTNLGRP_MDB
+ RTNLGRP_MPLS_ROUTE,
+#define RTNLGRP_MPLS_ROUTE RTNLGRP_MPLS_ROUTE
+ RTNLGRP_NSID,
+#define RTNLGRP_NSID RTNLGRP_NSID
__RTNLGRP_MAX
};
#define RTNLGRP_MAX (__RTNLGRP_MAX - 1)
diff --git a/include/uapi/linux/serial_reg.h b/include/uapi/linux/serial_reg.h
index 00adb01fa5f3..e9b4cb0cd7ed 100644
--- a/include/uapi/linux/serial_reg.h
+++ b/include/uapi/linux/serial_reg.h
@@ -242,25 +242,6 @@
#define UART_FCR_PXAR32 0xc0 /* receive FIFO threshold = 32 */
/*
- * Intel MID on-chip HSU (High Speed UART) defined bits
- */
-#define UART_FCR_HSU_64_1B 0x00 /* receive FIFO treshold = 1 */
-#define UART_FCR_HSU_64_16B 0x40 /* receive FIFO treshold = 16 */
-#define UART_FCR_HSU_64_32B 0x80 /* receive FIFO treshold = 32 */
-#define UART_FCR_HSU_64_56B 0xc0 /* receive FIFO treshold = 56 */
-
-#define UART_FCR_HSU_16_1B 0x00 /* receive FIFO treshold = 1 */
-#define UART_FCR_HSU_16_4B 0x40 /* receive FIFO treshold = 4 */
-#define UART_FCR_HSU_16_8B 0x80 /* receive FIFO treshold = 8 */
-#define UART_FCR_HSU_16_14B 0xc0 /* receive FIFO treshold = 14 */
-
-#define UART_FCR_HSU_64B_FIFO 0x20 /* chose 64 bytes FIFO */
-#define UART_FCR_HSU_16B_FIFO 0x00 /* chose 16 bytes FIFO */
-
-#define UART_FCR_HALF_EMPT_TXI 0x00 /* trigger TX_EMPT IRQ for half empty */
-#define UART_FCR_FULL_EMPT_TXI 0x08 /* trigger TX_EMPT IRQ for full empty */
-
-/*
* These register definitions are for the 16C950
*/
#define UART_ASR 0x01 /* Additional Status Register */
diff --git a/include/uapi/linux/snmp.h b/include/uapi/linux/snmp.h
index 6a6fb747c78d..eee8968407f0 100644
--- a/include/uapi/linux/snmp.h
+++ b/include/uapi/linux/snmp.h
@@ -276,6 +276,8 @@ enum
LINUX_MIB_TCPACKSKIPPEDFINWAIT2, /* TCPACKSkippedFinWait2 */
LINUX_MIB_TCPACKSKIPPEDTIMEWAIT, /* TCPACKSkippedTimeWait */
LINUX_MIB_TCPACKSKIPPEDCHALLENGE, /* TCPACKSkippedChallenge */
+ LINUX_MIB_TCPWINPROBE, /* TCPWinProbe */
+ LINUX_MIB_TCPKEEPALIVE, /* TCPKeepAlive */
__LINUX_MIB_MAX
};
diff --git a/include/uapi/linux/sock_diag.h b/include/uapi/linux/sock_diag.h
index b00e29efb161..49230d36f9ce 100644
--- a/include/uapi/linux/sock_diag.h
+++ b/include/uapi/linux/sock_diag.h
@@ -23,4 +23,14 @@ enum {
SK_MEMINFO_VARS,
};
+enum sknetlink_groups {
+ SKNLGRP_NONE,
+ SKNLGRP_INET_TCP_DESTROY,
+ SKNLGRP_INET_UDP_DESTROY,
+ SKNLGRP_INET6_TCP_DESTROY,
+ SKNLGRP_INET6_UDP_DESTROY,
+ __SKNLGRP_MAX,
+};
+#define SKNLGRP_MAX (__SKNLGRP_MAX - 1)
+
#endif /* _UAPI__SOCK_DIAG_H__ */
diff --git a/include/uapi/linux/target_core_user.h b/include/uapi/linux/target_core_user.h
index b483d1909d3e..b67f99d3c520 100644
--- a/include/uapi/linux/target_core_user.h
+++ b/include/uapi/linux/target_core_user.h
@@ -6,7 +6,7 @@
#include <linux/types.h>
#include <linux/uio.h>
-#define TCMU_VERSION "1.0"
+#define TCMU_VERSION "2.0"
/*
* Ring Design
@@ -39,9 +39,13 @@
* should process the next packet the same way, and so on.
*/
-#define TCMU_MAILBOX_VERSION 1
+#define TCMU_MAILBOX_VERSION 2
#define ALIGN_SIZE 64 /* Should be enough for most CPUs */
+/* See https://gcc.gnu.org/onlinedocs/cpp/Stringification.html */
+#define xstr(s) str(s)
+#define str(s) #s
+
struct tcmu_mailbox {
__u16 version;
__u16 flags;
@@ -64,31 +68,36 @@ enum tcmu_opcode {
* Only a few opcodes, and length is 8-byte aligned, so use low bits for opcode.
*/
struct tcmu_cmd_entry_hdr {
- __u32 len_op;
+ __u32 len_op;
+ __u16 cmd_id;
+ __u8 kflags;
+#define TCMU_UFLAG_UNKNOWN_OP 0x1
+ __u8 uflags;
+
} __packed;
#define TCMU_OP_MASK 0x7
-static inline enum tcmu_opcode tcmu_hdr_get_op(struct tcmu_cmd_entry_hdr *hdr)
+static inline enum tcmu_opcode tcmu_hdr_get_op(__u32 len_op)
{
- return hdr->len_op & TCMU_OP_MASK;
+ return len_op & TCMU_OP_MASK;
}
-static inline void tcmu_hdr_set_op(struct tcmu_cmd_entry_hdr *hdr, enum tcmu_opcode op)
+static inline void tcmu_hdr_set_op(__u32 *len_op, enum tcmu_opcode op)
{
- hdr->len_op &= ~TCMU_OP_MASK;
- hdr->len_op |= (op & TCMU_OP_MASK);
+ *len_op &= ~TCMU_OP_MASK;
+ *len_op |= (op & TCMU_OP_MASK);
}
-static inline __u32 tcmu_hdr_get_len(struct tcmu_cmd_entry_hdr *hdr)
+static inline __u32 tcmu_hdr_get_len(__u32 len_op)
{
- return hdr->len_op & ~TCMU_OP_MASK;
+ return len_op & ~TCMU_OP_MASK;
}
-static inline void tcmu_hdr_set_len(struct tcmu_cmd_entry_hdr *hdr, __u32 len)
+static inline void tcmu_hdr_set_len(__u32 *len_op, __u32 len)
{
- hdr->len_op &= TCMU_OP_MASK;
- hdr->len_op |= len;
+ *len_op &= TCMU_OP_MASK;
+ *len_op |= len;
}
/* Currently the same as SCSI_SENSE_BUFFERSIZE */
@@ -97,13 +106,14 @@ static inline void tcmu_hdr_set_len(struct tcmu_cmd_entry_hdr *hdr, __u32 len)
struct tcmu_cmd_entry {
struct tcmu_cmd_entry_hdr hdr;
- uint16_t cmd_id;
- uint16_t __pad1;
-
union {
struct {
+ uint32_t iov_cnt;
+ uint32_t iov_bidi_cnt;
+ uint32_t iov_dif_cnt;
uint64_t cdb_off;
- uint64_t iov_cnt;
+ uint64_t __pad1;
+ uint64_t __pad2;
struct iovec iov[0];
} req;
struct {
diff --git a/include/uapi/linux/tc_act/tc_bpf.h b/include/uapi/linux/tc_act/tc_bpf.h
index 5288bd77e63b..07f17cc70bb3 100644
--- a/include/uapi/linux/tc_act/tc_bpf.h
+++ b/include/uapi/linux/tc_act/tc_bpf.h
@@ -24,6 +24,8 @@ enum {
TCA_ACT_BPF_PARMS,
TCA_ACT_BPF_OPS_LEN,
TCA_ACT_BPF_OPS,
+ TCA_ACT_BPF_FD,
+ TCA_ACT_BPF_NAME,
__TCA_ACT_BPF_MAX,
};
#define TCA_ACT_BPF_MAX (__TCA_ACT_BPF_MAX - 1)
diff --git a/include/uapi/linux/tcp.h b/include/uapi/linux/tcp.h
index 3b9718328d8b..65a77b071e22 100644
--- a/include/uapi/linux/tcp.h
+++ b/include/uapi/linux/tcp.h
@@ -112,6 +112,9 @@ enum {
#define TCP_FASTOPEN 23 /* Enable FastOpen on listeners */
#define TCP_TIMESTAMP 24
#define TCP_NOTSENT_LOWAT 25 /* limit number of unsent bytes in write queue */
+#define TCP_CC_INFO 26 /* Get Congestion Control (optional) info */
+#define TCP_SAVE_SYN 27 /* Record SYN headers for new connections */
+#define TCP_SAVED_SYN 28 /* Get SYN headers recorded for connection */
struct tcp_repair_opt {
__u32 opt_code;
@@ -189,6 +192,10 @@ struct tcp_info {
__u64 tcpi_pacing_rate;
__u64 tcpi_max_pacing_rate;
+ __u64 tcpi_bytes_acked; /* RFC4898 tcpEStatsAppHCThruOctetsAcked */
+ __u64 tcpi_bytes_received; /* RFC4898 tcpEStatsAppHCThruOctetsReceived */
+ __u32 tcpi_segs_out; /* RFC4898 tcpEStatsPerfSegsOut */
+ __u32 tcpi_segs_in; /* RFC4898 tcpEStatsPerfSegsIn */
};
/* for TCP_MD5SIG socket option */
diff --git a/include/uapi/linux/tipc_netlink.h b/include/uapi/linux/tipc_netlink.h
index 8d723824ad69..d4c8f142ba63 100644
--- a/include/uapi/linux/tipc_netlink.h
+++ b/include/uapi/linux/tipc_netlink.h
@@ -83,11 +83,20 @@ enum {
TIPC_NLA_BEARER_NAME, /* string */
TIPC_NLA_BEARER_PROP, /* nest */
TIPC_NLA_BEARER_DOMAIN, /* u32 */
+ TIPC_NLA_BEARER_UDP_OPTS, /* nest */
__TIPC_NLA_BEARER_MAX,
TIPC_NLA_BEARER_MAX = __TIPC_NLA_BEARER_MAX - 1
};
+enum {
+ TIPC_NLA_UDP_UNSPEC,
+ TIPC_NLA_UDP_LOCAL, /* sockaddr_storage */
+ TIPC_NLA_UDP_REMOTE, /* sockaddr_storage */
+
+ __TIPC_NLA_UDP_MAX,
+ TIPC_NLA_UDP_MAX = __TIPC_NLA_UDP_MAX - 1
+};
/* Socket info */
enum {
TIPC_NLA_SOCK_UNSPEC,
diff --git a/include/uapi/linux/tty.h b/include/uapi/linux/tty.h
index dac199a2dba5..01c4410352ff 100644
--- a/include/uapi/linux/tty.h
+++ b/include/uapi/linux/tty.h
@@ -34,5 +34,6 @@
#define N_TI_WL 22 /* for TI's WL BT, FM, GPS combo chips */
#define N_TRACESINK 23 /* Trace data routing for MIPI P1149.7 */
#define N_TRACEROUTER 24 /* Trace data routing for MIPI P1149.7 */
+#define N_NCI 25 /* NFC NCI UART */
#endif /* _UAPI_LINUX_TTY_H */
diff --git a/include/uapi/linux/v4l2-dv-timings.h b/include/uapi/linux/v4l2-dv-timings.h
index 6c8f159e416e..c039f1d68a09 100644
--- a/include/uapi/linux/v4l2-dv-timings.h
+++ b/include/uapi/linux/v4l2-dv-timings.h
@@ -48,14 +48,15 @@
.type = V4L2_DV_BT_656_1120, \
V4L2_INIT_BT_TIMINGS(720, 480, 1, 0, \
13500000, 19, 62, 57, 4, 3, 15, 4, 3, 16, \
- V4L2_DV_BT_STD_CEA861, V4L2_DV_FL_HALF_LINE) \
+ V4L2_DV_BT_STD_CEA861, \
+ V4L2_DV_FL_HALF_LINE | V4L2_DV_FL_IS_CE_VIDEO) \
}
#define V4L2_DV_BT_CEA_720X480P59_94 { \
.type = V4L2_DV_BT_656_1120, \
V4L2_INIT_BT_TIMINGS(720, 480, 0, 0, \
27000000, 16, 62, 60, 9, 6, 30, 0, 0, 0, \
- V4L2_DV_BT_STD_CEA861, 0) \
+ V4L2_DV_BT_STD_CEA861, V4L2_DV_FL_IS_CE_VIDEO) \
}
/* Note: these are the nominal timings, for HDMI links this format is typically
@@ -64,14 +65,15 @@
.type = V4L2_DV_BT_656_1120, \
V4L2_INIT_BT_TIMINGS(720, 576, 1, 0, \
13500000, 12, 63, 69, 2, 3, 19, 2, 3, 20, \
- V4L2_DV_BT_STD_CEA861, V4L2_DV_FL_HALF_LINE) \
+ V4L2_DV_BT_STD_CEA861, \
+ V4L2_DV_FL_HALF_LINE | V4L2_DV_FL_IS_CE_VIDEO) \
}
#define V4L2_DV_BT_CEA_720X576P50 { \
.type = V4L2_DV_BT_656_1120, \
V4L2_INIT_BT_TIMINGS(720, 576, 0, 0, \
27000000, 12, 64, 68, 5, 5, 39, 0, 0, 0, \
- V4L2_DV_BT_STD_CEA861, 0) \
+ V4L2_DV_BT_STD_CEA861, V4L2_DV_FL_IS_CE_VIDEO) \
}
#define V4L2_DV_BT_CEA_1280X720P24 { \
@@ -88,7 +90,7 @@
V4L2_INIT_BT_TIMINGS(1280, 720, 0, \
V4L2_DV_HSYNC_POS_POL | V4L2_DV_VSYNC_POS_POL, \
74250000, 2420, 40, 220, 5, 5, 20, 0, 0, 0, \
- V4L2_DV_BT_STD_CEA861, 0) \
+ V4L2_DV_BT_STD_CEA861, V4L2_DV_FL_IS_CE_VIDEO) \
}
#define V4L2_DV_BT_CEA_1280X720P30 { \
@@ -96,7 +98,8 @@
V4L2_INIT_BT_TIMINGS(1280, 720, 0, \
V4L2_DV_HSYNC_POS_POL | V4L2_DV_VSYNC_POS_POL, \
74250000, 1760, 40, 220, 5, 5, 20, 0, 0, 0, \
- V4L2_DV_BT_STD_CEA861, V4L2_DV_FL_CAN_REDUCE_FPS) \
+ V4L2_DV_BT_STD_CEA861, \
+ V4L2_DV_FL_CAN_REDUCE_FPS | V4L2_DV_FL_IS_CE_VIDEO) \
}
#define V4L2_DV_BT_CEA_1280X720P50 { \
@@ -104,7 +107,7 @@
V4L2_INIT_BT_TIMINGS(1280, 720, 0, \
V4L2_DV_HSYNC_POS_POL | V4L2_DV_VSYNC_POS_POL, \
74250000, 440, 40, 220, 5, 5, 20, 0, 0, 0, \
- V4L2_DV_BT_STD_CEA861, 0) \
+ V4L2_DV_BT_STD_CEA861, V4L2_DV_FL_IS_CE_VIDEO) \
}
#define V4L2_DV_BT_CEA_1280X720P60 { \
@@ -112,7 +115,8 @@
V4L2_INIT_BT_TIMINGS(1280, 720, 0, \
V4L2_DV_HSYNC_POS_POL | V4L2_DV_VSYNC_POS_POL, \
74250000, 110, 40, 220, 5, 5, 20, 0, 0, 0, \
- V4L2_DV_BT_STD_CEA861, V4L2_DV_FL_CAN_REDUCE_FPS) \
+ V4L2_DV_BT_STD_CEA861, \
+ V4L2_DV_FL_CAN_REDUCE_FPS | V4L2_DV_FL_IS_CE_VIDEO) \
}
#define V4L2_DV_BT_CEA_1920X1080P24 { \
@@ -120,7 +124,8 @@
V4L2_INIT_BT_TIMINGS(1920, 1080, 0, \
V4L2_DV_HSYNC_POS_POL | V4L2_DV_VSYNC_POS_POL, \
74250000, 638, 44, 148, 4, 5, 36, 0, 0, 0, \
- V4L2_DV_BT_STD_CEA861, V4L2_DV_FL_CAN_REDUCE_FPS) \
+ V4L2_DV_BT_STD_CEA861, \
+ V4L2_DV_FL_CAN_REDUCE_FPS | V4L2_DV_FL_IS_CE_VIDEO) \
}
#define V4L2_DV_BT_CEA_1920X1080P25 { \
@@ -128,7 +133,7 @@
V4L2_INIT_BT_TIMINGS(1920, 1080, 0, \
V4L2_DV_HSYNC_POS_POL | V4L2_DV_VSYNC_POS_POL, \
74250000, 528, 44, 148, 4, 5, 36, 0, 0, 0, \
- V4L2_DV_BT_STD_CEA861, 0) \
+ V4L2_DV_BT_STD_CEA861, V4L2_DV_FL_IS_CE_VIDEO) \
}
#define V4L2_DV_BT_CEA_1920X1080P30 { \
@@ -136,7 +141,8 @@
V4L2_INIT_BT_TIMINGS(1920, 1080, 0, \
V4L2_DV_HSYNC_POS_POL | V4L2_DV_VSYNC_POS_POL, \
74250000, 88, 44, 148, 4, 5, 36, 0, 0, 0, \
- V4L2_DV_BT_STD_CEA861, V4L2_DV_FL_CAN_REDUCE_FPS) \
+ V4L2_DV_BT_STD_CEA861, \
+ V4L2_DV_FL_CAN_REDUCE_FPS | V4L2_DV_FL_IS_CE_VIDEO) \
}
#define V4L2_DV_BT_CEA_1920X1080I50 { \
@@ -144,7 +150,8 @@
V4L2_INIT_BT_TIMINGS(1920, 1080, 1, \
V4L2_DV_HSYNC_POS_POL | V4L2_DV_VSYNC_POS_POL, \
74250000, 528, 44, 148, 2, 5, 15, 2, 5, 16, \
- V4L2_DV_BT_STD_CEA861, V4L2_DV_FL_HALF_LINE) \
+ V4L2_DV_BT_STD_CEA861, \
+ V4L2_DV_FL_HALF_LINE | V4L2_DV_FL_IS_CE_VIDEO) \
}
#define V4L2_DV_BT_CEA_1920X1080P50 { \
@@ -152,7 +159,7 @@
V4L2_INIT_BT_TIMINGS(1920, 1080, 0, \
V4L2_DV_HSYNC_POS_POL | V4L2_DV_VSYNC_POS_POL, \
148500000, 528, 44, 148, 4, 5, 36, 0, 0, 0, \
- V4L2_DV_BT_STD_CEA861, 0) \
+ V4L2_DV_BT_STD_CEA861, V4L2_DV_FL_IS_CE_VIDEO) \
}
#define V4L2_DV_BT_CEA_1920X1080I60 { \
@@ -161,7 +168,8 @@
V4L2_DV_HSYNC_POS_POL | V4L2_DV_VSYNC_POS_POL, \
74250000, 88, 44, 148, 2, 5, 15, 2, 5, 16, \
V4L2_DV_BT_STD_CEA861, \
- V4L2_DV_FL_CAN_REDUCE_FPS | V4L2_DV_FL_HALF_LINE) \
+ V4L2_DV_FL_CAN_REDUCE_FPS | \
+ V4L2_DV_FL_HALF_LINE | V4L2_DV_FL_IS_CE_VIDEO) \
}
#define V4L2_DV_BT_CEA_1920X1080P60 { \
@@ -170,77 +178,83 @@
V4L2_DV_HSYNC_POS_POL | V4L2_DV_VSYNC_POS_POL, \
148500000, 88, 44, 148, 4, 5, 36, 0, 0, 0, \
V4L2_DV_BT_STD_DMT | V4L2_DV_BT_STD_CEA861, \
- V4L2_DV_FL_CAN_REDUCE_FPS) \
+ V4L2_DV_FL_CAN_REDUCE_FPS | V4L2_DV_FL_IS_CE_VIDEO) \
}
#define V4L2_DV_BT_CEA_3840X2160P24 { \
.type = V4L2_DV_BT_656_1120, \
V4L2_INIT_BT_TIMINGS(3840, 2160, 0, V4L2_DV_HSYNC_POS_POL, \
297000000, 1276, 88, 296, 8, 10, 72, 0, 0, 0, \
- V4L2_DV_BT_STD_CEA861, V4L2_DV_FL_CAN_REDUCE_FPS) \
+ V4L2_DV_BT_STD_CEA861, \
+ V4L2_DV_FL_CAN_REDUCE_FPS | V4L2_DV_FL_IS_CE_VIDEO) \
}
#define V4L2_DV_BT_CEA_3840X2160P25 { \
.type = V4L2_DV_BT_656_1120, \
V4L2_INIT_BT_TIMINGS(3840, 2160, 0, V4L2_DV_HSYNC_POS_POL, \
297000000, 1056, 88, 296, 8, 10, 72, 0, 0, 0, \
- V4L2_DV_BT_STD_CEA861, 0) \
+ V4L2_DV_BT_STD_CEA861, V4L2_DV_FL_IS_CE_VIDEO) \
}
#define V4L2_DV_BT_CEA_3840X2160P30 { \
.type = V4L2_DV_BT_656_1120, \
V4L2_INIT_BT_TIMINGS(3840, 2160, 0, V4L2_DV_HSYNC_POS_POL, \
297000000, 176, 88, 296, 8, 10, 72, 0, 0, 0, \
- V4L2_DV_BT_STD_CEA861, V4L2_DV_FL_CAN_REDUCE_FPS) \
+ V4L2_DV_BT_STD_CEA861, \
+ V4L2_DV_FL_CAN_REDUCE_FPS | V4L2_DV_FL_IS_CE_VIDEO) \
}
#define V4L2_DV_BT_CEA_3840X2160P50 { \
.type = V4L2_DV_BT_656_1120, \
V4L2_INIT_BT_TIMINGS(3840, 2160, 0, V4L2_DV_HSYNC_POS_POL, \
594000000, 1056, 88, 296, 8, 10, 72, 0, 0, 0, \
- V4L2_DV_BT_STD_CEA861, 0) \
+ V4L2_DV_BT_STD_CEA861, V4L2_DV_FL_IS_CE_VIDEO) \
}
#define V4L2_DV_BT_CEA_3840X2160P60 { \
.type = V4L2_DV_BT_656_1120, \
V4L2_INIT_BT_TIMINGS(3840, 2160, 0, V4L2_DV_HSYNC_POS_POL, \
594000000, 176, 88, 296, 8, 10, 72, 0, 0, 0, \
- V4L2_DV_BT_STD_CEA861, V4L2_DV_FL_CAN_REDUCE_FPS) \
+ V4L2_DV_BT_STD_CEA861, \
+ V4L2_DV_FL_CAN_REDUCE_FPS | V4L2_DV_FL_IS_CE_VIDEO) \
}
#define V4L2_DV_BT_CEA_4096X2160P24 { \
.type = V4L2_DV_BT_656_1120, \
V4L2_INIT_BT_TIMINGS(4096, 2160, 0, V4L2_DV_HSYNC_POS_POL, \
297000000, 1020, 88, 296, 8, 10, 72, 0, 0, 0, \
- V4L2_DV_BT_STD_CEA861, V4L2_DV_FL_CAN_REDUCE_FPS) \
+ V4L2_DV_BT_STD_CEA861, \
+ V4L2_DV_FL_CAN_REDUCE_FPS | V4L2_DV_FL_IS_CE_VIDEO) \
}
#define V4L2_DV_BT_CEA_4096X2160P25 { \
.type = V4L2_DV_BT_656_1120, \
V4L2_INIT_BT_TIMINGS(4096, 2160, 0, V4L2_DV_HSYNC_POS_POL, \
297000000, 968, 88, 128, 8, 10, 72, 0, 0, 0, \
- V4L2_DV_BT_STD_CEA861, 0) \
+ V4L2_DV_BT_STD_CEA861, V4L2_DV_FL_IS_CE_VIDEO) \
}
#define V4L2_DV_BT_CEA_4096X2160P30 { \
.type = V4L2_DV_BT_656_1120, \
V4L2_INIT_BT_TIMINGS(4096, 2160, 0, V4L2_DV_HSYNC_POS_POL, \
297000000, 88, 88, 128, 8, 10, 72, 0, 0, 0, \
- V4L2_DV_BT_STD_CEA861, V4L2_DV_FL_CAN_REDUCE_FPS) \
+ V4L2_DV_BT_STD_CEA861, \
+ V4L2_DV_FL_CAN_REDUCE_FPS | V4L2_DV_FL_IS_CE_VIDEO) \
}
#define V4L2_DV_BT_CEA_4096X2160P50 { \
.type = V4L2_DV_BT_656_1120, \
V4L2_INIT_BT_TIMINGS(4096, 2160, 0, V4L2_DV_HSYNC_POS_POL, \
594000000, 968, 88, 128, 8, 10, 72, 0, 0, 0, \
- V4L2_DV_BT_STD_CEA861, 0) \
+ V4L2_DV_BT_STD_CEA861, V4L2_DV_FL_IS_CE_VIDEO) \
}
#define V4L2_DV_BT_CEA_4096X2160P60 { \
.type = V4L2_DV_BT_656_1120, \
V4L2_INIT_BT_TIMINGS(4096, 2160, 0, V4L2_DV_HSYNC_POS_POL, \
594000000, 88, 88, 128, 8, 10, 72, 0, 0, 0, \
- V4L2_DV_BT_STD_CEA861, V4L2_DV_FL_CAN_REDUCE_FPS) \
+ V4L2_DV_BT_STD_CEA861, \
+ V4L2_DV_FL_CAN_REDUCE_FPS | V4L2_DV_FL_IS_CE_VIDEO) \
}
diff --git a/include/uapi/linux/v4l2-subdev.h b/include/uapi/linux/v4l2-subdev.h
index e0a7e3da498a..dbce2b554e02 100644
--- a/include/uapi/linux/v4l2-subdev.h
+++ b/include/uapi/linux/v4l2-subdev.h
@@ -69,12 +69,14 @@ struct v4l2_subdev_crop {
* @pad: pad number, as reported by the media API
* @index: format index during enumeration
* @code: format code (MEDIA_BUS_FMT_ definitions)
+ * @which: format type (from enum v4l2_subdev_format_whence)
*/
struct v4l2_subdev_mbus_code_enum {
__u32 pad;
__u32 index;
__u32 code;
- __u32 reserved[9];
+ __u32 which;
+ __u32 reserved[8];
};
/**
@@ -82,6 +84,7 @@ struct v4l2_subdev_mbus_code_enum {
* @pad: pad number, as reported by the media API
* @index: format index during enumeration
* @code: format code (MEDIA_BUS_FMT_ definitions)
+ * @which: format type (from enum v4l2_subdev_format_whence)
*/
struct v4l2_subdev_frame_size_enum {
__u32 index;
@@ -91,7 +94,8 @@ struct v4l2_subdev_frame_size_enum {
__u32 max_width;
__u32 min_height;
__u32 max_height;
- __u32 reserved[9];
+ __u32 which;
+ __u32 reserved[8];
};
/**
@@ -113,6 +117,7 @@ struct v4l2_subdev_frame_interval {
* @width: frame width in pixels
* @height: frame height in pixels
* @interval: frame interval in seconds
+ * @which: format type (from enum v4l2_subdev_format_whence)
*/
struct v4l2_subdev_frame_interval_enum {
__u32 index;
@@ -121,7 +126,8 @@ struct v4l2_subdev_frame_interval_enum {
__u32 width;
__u32 height;
struct v4l2_fract interval;
- __u32 reserved[9];
+ __u32 which;
+ __u32 reserved[8];
};
/**
diff --git a/include/uapi/linux/vfio.h b/include/uapi/linux/vfio.h
index 82889c30f4f5..9fd7b5d8df2f 100644
--- a/include/uapi/linux/vfio.h
+++ b/include/uapi/linux/vfio.h
@@ -36,6 +36,8 @@
/* Two-stage IOMMU */
#define VFIO_TYPE1_NESTING_IOMMU 6 /* Implies v2 */
+#define VFIO_SPAPR_TCE_v2_IOMMU 7
+
/*
* The IOCTL interface is designed for extensibility by embedding the
* structure length (argsz) and flags into structures passed between
@@ -160,6 +162,8 @@ struct vfio_device_info {
__u32 flags;
#define VFIO_DEVICE_FLAGS_RESET (1 << 0) /* Device supports reset */
#define VFIO_DEVICE_FLAGS_PCI (1 << 1) /* vfio-pci device */
+#define VFIO_DEVICE_FLAGS_PLATFORM (1 << 2) /* vfio-platform device */
+#define VFIO_DEVICE_FLAGS_AMBA (1 << 3) /* vfio-amba device */
__u32 num_regions; /* Max region index + 1 */
__u32 num_irqs; /* Max IRQ index + 1 */
};
@@ -441,6 +445,23 @@ struct vfio_iommu_type1_dma_unmap {
/* -------- Additional API for SPAPR TCE (Server POWERPC) IOMMU -------- */
/*
+ * The SPAPR TCE DDW info struct provides the information about
+ * the details of Dynamic DMA window capability.
+ *
+ * @pgsizes contains a page size bitmask, 4K/64K/16M are supported.
+ * @max_dynamic_windows_supported tells the maximum number of windows
+ * which the platform can create.
+ * @levels tells the maximum number of levels in multi-level IOMMU tables;
+ * this allows splitting a table into smaller chunks which reduces
+ * the amount of physically contiguous memory required for the table.
+ */
+struct vfio_iommu_spapr_tce_ddw_info {
+ __u64 pgsizes; /* Bitmap of supported page sizes */
+ __u32 max_dynamic_windows_supported;
+ __u32 levels;
+};
+
+/*
* The SPAPR TCE info struct provides the information about the PCI bus
* address ranges available for DMA, these values are programmed into
* the hardware so the guest has to know that information.
@@ -450,14 +471,17 @@ struct vfio_iommu_type1_dma_unmap {
* addresses too so the window works as a filter rather than an offset
* for IOVA addresses.
*
- * A flag will need to be added if other page sizes are supported,
- * so as defined here, it is always 4k.
+ * Flags supported:
+ * - VFIO_IOMMU_SPAPR_INFO_DDW: informs the userspace that dynamic DMA windows
+ * (DDW) support is present. @ddw is only supported when DDW is present.
*/
struct vfio_iommu_spapr_tce_info {
__u32 argsz;
- __u32 flags; /* reserved for future use */
+ __u32 flags;
+#define VFIO_IOMMU_SPAPR_INFO_DDW (1 << 0) /* DDW supported */
__u32 dma32_window_start; /* 32 bit window start (bytes) */
__u32 dma32_window_size; /* 32 bit window size (bytes) */
+ struct vfio_iommu_spapr_tce_ddw_info ddw;
};
#define VFIO_IOMMU_SPAPR_TCE_GET_INFO _IO(VFIO_TYPE, VFIO_BASE + 12)
@@ -468,12 +492,23 @@ struct vfio_iommu_spapr_tce_info {
* - unfreeze IO/DMA for frozen PE;
* - read PE state;
* - reset PE;
- * - configure PE.
+ * - configure PE;
+ * - inject EEH error.
*/
+struct vfio_eeh_pe_err {
+ __u32 type;
+ __u32 func;
+ __u64 addr;
+ __u64 mask;
+};
+
struct vfio_eeh_pe_op {
__u32 argsz;
__u32 flags;
__u32 op;
+ union {
+ struct vfio_eeh_pe_err err;
+ };
};
#define VFIO_EEH_PE_DISABLE 0 /* Disable EEH functionality */
@@ -490,9 +525,70 @@ struct vfio_eeh_pe_op {
#define VFIO_EEH_PE_RESET_HOT 6 /* Assert hot reset */
#define VFIO_EEH_PE_RESET_FUNDAMENTAL 7 /* Assert fundamental reset */
#define VFIO_EEH_PE_CONFIGURE 8 /* PE configuration */
+#define VFIO_EEH_PE_INJECT_ERR 9 /* Inject EEH error */
#define VFIO_EEH_PE_OP _IO(VFIO_TYPE, VFIO_BASE + 21)
+/**
+ * VFIO_IOMMU_SPAPR_REGISTER_MEMORY - _IOW(VFIO_TYPE, VFIO_BASE + 17, struct vfio_iommu_spapr_register_memory)
+ *
+ * Registers user space memory where DMA is allowed. It pins
+ * user pages and does the locked memory accounting so
+ * subsequent VFIO_IOMMU_MAP_DMA/VFIO_IOMMU_UNMAP_DMA calls
+ * get faster.
+ */
+struct vfio_iommu_spapr_register_memory {
+ __u32 argsz;
+ __u32 flags;
+ __u64 vaddr; /* Process virtual address */
+ __u64 size; /* Size of mapping (bytes) */
+};
+#define VFIO_IOMMU_SPAPR_REGISTER_MEMORY _IO(VFIO_TYPE, VFIO_BASE + 17)
+
+/**
+ * VFIO_IOMMU_SPAPR_UNREGISTER_MEMORY - _IOW(VFIO_TYPE, VFIO_BASE + 18, struct vfio_iommu_spapr_register_memory)
+ *
+ * Unregisters user space memory registered with
+ * VFIO_IOMMU_SPAPR_REGISTER_MEMORY.
+ * Uses vfio_iommu_spapr_register_memory for parameters.
+ */
+#define VFIO_IOMMU_SPAPR_UNREGISTER_MEMORY _IO(VFIO_TYPE, VFIO_BASE + 18)
+
+/**
+ * VFIO_IOMMU_SPAPR_TCE_CREATE - _IOWR(VFIO_TYPE, VFIO_BASE + 19, struct vfio_iommu_spapr_tce_create)
+ *
+ * Creates an additional TCE table and programs it (sets a new DMA window)
+ * to every IOMMU group in the container. It receives page shift, window
+ * size and number of levels in the TCE table being created.
+ *
+ * It allocates and returns an offset on a PCI bus of the new DMA window.
+ */
+struct vfio_iommu_spapr_tce_create {
+ __u32 argsz;
+ __u32 flags;
+ /* in */
+ __u32 page_shift;
+ __u64 window_size;
+ __u32 levels;
+ /* out */
+ __u64 start_addr;
+};
+#define VFIO_IOMMU_SPAPR_TCE_CREATE _IO(VFIO_TYPE, VFIO_BASE + 19)
+
+/**
+ * VFIO_IOMMU_SPAPR_TCE_REMOVE - _IOW(VFIO_TYPE, VFIO_BASE + 20, struct vfio_iommu_spapr_tce_remove)
+ *
+ * Unprograms a TCE table from all groups in the container and destroys it.
+ * It receives a PCI bus offset as a window id.
+ */
+struct vfio_iommu_spapr_tce_remove {
+ __u32 argsz;
+ __u32 flags;
+ /* in */
+ __u64 start_addr;
+};
+#define VFIO_IOMMU_SPAPR_TCE_REMOVE _IO(VFIO_TYPE, VFIO_BASE + 20)
+
/* ***************************************************************** */
#endif /* _UAPIVFIO_H */
diff --git a/include/uapi/linux/videodev2.h b/include/uapi/linux/videodev2.h
index fbdc3602ee27..fa376f7666ba 100644
--- a/include/uapi/linux/videodev2.h
+++ b/include/uapi/linux/videodev2.h
@@ -268,9 +268,10 @@ enum v4l2_ycbcr_encoding {
enum v4l2_quantization {
/*
- * The default for R'G'B' quantization is always full range. For
- * Y'CbCr the quantization is always limited range, except for
- * SYCC, XV601, XV709 or JPEG: those are full range.
+ * The default for R'G'B' quantization is always full range, except
+ * for the BT2020 colorspace. For Y'CbCr the quantization is always
+ * limited range, except for COLORSPACE_JPEG, SYCC, XV601 or XV709:
+ * those are full range.
*/
V4L2_QUANTIZATION_DEFAULT = 0,
V4L2_QUANTIZATION_FULL_RANGE = 1,
@@ -1187,6 +1188,12 @@ struct v4l2_bt_timings {
exactly the same number of half-lines. Whether half-lines can be detected
or used depends on the hardware. */
#define V4L2_DV_FL_HALF_LINE (1 << 3)
+/* If set, then this is a Consumer Electronics (CE) video format. Such formats
+ * differ from other formats (commonly called IT formats) in that if RGB
+ * encoding is used then by default the RGB values use limited range (i.e.
+ * use the range 16-235) as opposed to 0-255. All formats defined in CEA-861
+ * except for the 640x480 format are CE formats. */
+#define V4L2_DV_FL_IS_CE_VIDEO (1 << 4)
/* A few useful defines to calculate the total blanking and frame sizes */
#define V4L2_DV_BT_BLANKING_WIDTH(bt) \
@@ -1456,6 +1463,7 @@ struct v4l2_querymenu {
#define V4L2_CTRL_FLAG_WRITE_ONLY 0x0040
#define V4L2_CTRL_FLAG_VOLATILE 0x0080
#define V4L2_CTRL_FLAG_HAS_PAYLOAD 0x0100
+#define V4L2_CTRL_FLAG_EXECUTE_ON_WRITE 0x0200
/* Query flags, to be ORed with the control ID */
#define V4L2_CTRL_FLAG_NEXT_CTRL 0x80000000
@@ -1841,8 +1849,8 @@ struct v4l2_mpeg_vbi_fmt_ivtv {
*/
struct v4l2_plane_pix_format {
__u32 sizeimage;
- __u16 bytesperline;
- __u16 reserved[7];
+ __u32 bytesperline;
+ __u16 reserved[6];
} __attribute__ ((packed));
/**
diff --git a/include/uapi/linux/virtio_balloon.h b/include/uapi/linux/virtio_balloon.h
index 4b0488f20b2e..d7f1cbc3766c 100644
--- a/include/uapi/linux/virtio_balloon.h
+++ b/include/uapi/linux/virtio_balloon.h
@@ -25,6 +25,8 @@
* LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY
* OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF
* SUCH DAMAGE. */
+#include <linux/types.h>
+#include <linux/virtio_types.h>
#include <linux/virtio_ids.h>
#include <linux/virtio_config.h>
@@ -38,9 +40,9 @@
struct virtio_balloon_config {
/* Number of pages host wants Guest to give up. */
- __le32 num_pages;
+ __u32 num_pages;
/* Number of pages we've actually got in balloon. */
- __le32 actual;
+ __u32 actual;
};
#define VIRTIO_BALLOON_S_SWAP_IN 0 /* Amount of memory swapped in */
@@ -51,9 +53,32 @@ struct virtio_balloon_config {
#define VIRTIO_BALLOON_S_MEMTOT 5 /* Total amount of memory */
#define VIRTIO_BALLOON_S_NR 6
+/*
+ * Memory statistics structure.
+ * Driver fills an array of these structures and passes to device.
+ *
+ * NOTE: fields are laid out in a way that would make compiler add padding
+ * between and after fields, so we have to use compiler-specific attributes to
+ * pack it, to disable this padding. This also often causes compiler to
+ * generate suboptimal code.
+ *
+ * We maintain this statistics structure format for backwards compatibility,
+ * but don't follow this example.
+ *
+ * If implementing a similar structure, do something like the below instead:
+ * struct virtio_balloon_stat {
+ * __virtio16 tag;
+ * __u8 reserved[6];
+ * __virtio64 val;
+ * };
+ *
+ * In other words, add explicit reserved fields to align field and
+ * structure boundaries at field size, avoiding compiler padding
+ * without the packed attribute.
+ */
struct virtio_balloon_stat {
- __u16 tag;
- __u64 val;
+ __virtio16 tag;
+ __virtio64 val;
} __attribute__((packed));
#endif /* _LINUX_VIRTIO_BALLOON_H */
diff --git a/include/uapi/linux/virtio_ids.h b/include/uapi/linux/virtio_ids.h
index 284fc3a05f7b..5f60aa4be50a 100644
--- a/include/uapi/linux/virtio_ids.h
+++ b/include/uapi/linux/virtio_ids.h
@@ -39,5 +39,6 @@
#define VIRTIO_ID_9P 9 /* 9p virtio console */
#define VIRTIO_ID_RPROC_SERIAL 11 /* virtio remoteproc serial link */
#define VIRTIO_ID_CAIF 12 /* Virtio caif */
+#define VIRTIO_ID_INPUT 18 /* virtio input */
#endif /* _LINUX_VIRTIO_IDS_H */
diff --git a/include/uapi/linux/virtio_input.h b/include/uapi/linux/virtio_input.h
new file mode 100644
index 000000000000..a7fe5c8fb135
--- /dev/null
+++ b/include/uapi/linux/virtio_input.h
@@ -0,0 +1,76 @@
+#ifndef _LINUX_VIRTIO_INPUT_H
+#define _LINUX_VIRTIO_INPUT_H
+/* This header is BSD licensed so anyone can use the definitions to implement
+ * compatible drivers/servers.
+ *
+ * Redistribution and use in source and binary forms, with or without
+ * modification, are permitted provided that the following conditions
+ * are met:
+ * 1. Redistributions of source code must retain the above copyright
+ * notice, this list of conditions and the following disclaimer.
+ * 2. Redistributions in binary form must reproduce the above copyright
+ * notice, this list of conditions and the following disclaimer in the
+ * documentation and/or other materials provided with the distribution.
+ * 3. Neither the name of IBM nor the names of its contributors
+ * may be used to endorse or promote products derived from this software
+ * without specific prior written permission.
+ * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
+ * ``AS IS'' AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
+ * LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS
+ * FOR A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL IBM OR
+ * CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
+ * SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
+ * LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF
+ * USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND
+ * ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY,
+ * OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT
+ * OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF
+ * SUCH DAMAGE. */
+
+#include <linux/types.h>
+
+enum virtio_input_config_select {
+ VIRTIO_INPUT_CFG_UNSET = 0x00,
+ VIRTIO_INPUT_CFG_ID_NAME = 0x01,
+ VIRTIO_INPUT_CFG_ID_SERIAL = 0x02,
+ VIRTIO_INPUT_CFG_ID_DEVIDS = 0x03,
+ VIRTIO_INPUT_CFG_PROP_BITS = 0x10,
+ VIRTIO_INPUT_CFG_EV_BITS = 0x11,
+ VIRTIO_INPUT_CFG_ABS_INFO = 0x12,
+};
+
+struct virtio_input_absinfo {
+ __u32 min;
+ __u32 max;
+ __u32 fuzz;
+ __u32 flat;
+ __u32 res;
+};
+
+struct virtio_input_devids {
+ __u16 bustype;
+ __u16 vendor;
+ __u16 product;
+ __u16 version;
+};
+
+struct virtio_input_config {
+ __u8 select;
+ __u8 subsel;
+ __u8 size;
+ __u8 reserved[5];
+ union {
+ char string[128];
+ __u8 bitmap[128];
+ struct virtio_input_absinfo abs;
+ struct virtio_input_devids ids;
+ } u;
+};
+
+struct virtio_input_event {
+ __le16 type;
+ __le16 code;
+ __le32 value;
+};
+
+#endif /* _LINUX_VIRTIO_INPUT_H */
diff --git a/include/uapi/linux/virtio_ring.h b/include/uapi/linux/virtio_ring.h
index a3318f31e8e7..915980ac68df 100644
--- a/include/uapi/linux/virtio_ring.h
+++ b/include/uapi/linux/virtio_ring.h
@@ -155,7 +155,7 @@ static inline unsigned vring_size(unsigned int num, unsigned long align)
}
/* The following is used with USED_EVENT_IDX and AVAIL_EVENT_IDX */
-/* Assuming a given event_idx value from the other size, if
+/* Assuming a given event_idx value from the other side, if
* we have just incremented index from old to new_idx,
* should we trigger an event? */
static inline int vring_need_event(__u16 event_idx, __u16 new_idx, __u16 old)
diff --git a/include/uapi/linux/xfrm.h b/include/uapi/linux/xfrm.h
index 02d5125a5ee8..2cd9e608d0d1 100644
--- a/include/uapi/linux/xfrm.h
+++ b/include/uapi/linux/xfrm.h
@@ -1,6 +1,7 @@
#ifndef _LINUX_XFRM_H
#define _LINUX_XFRM_H
+#include <linux/in6.h>
#include <linux/types.h>
/* All of the structures in this file may not change size as they are
@@ -13,6 +14,7 @@
typedef union {
__be32 a4;
__be32 a6[4];
+ struct in6_addr in6;
} xfrm_address_t;
/* Ident of a specific xfrm_state. It is used on input to lookup
diff --git a/include/uapi/linux/xilinx-v4l2-controls.h b/include/uapi/linux/xilinx-v4l2-controls.h
new file mode 100644
index 000000000000..fb495b91e800
--- /dev/null
+++ b/include/uapi/linux/xilinx-v4l2-controls.h
@@ -0,0 +1,73 @@
+/*
+ * Xilinx Controls Header
+ *
+ * Copyright (C) 2013-2015 Ideas on Board
+ * Copyright (C) 2013-2015 Xilinx, Inc.
+ *
+ * Contacts: Hyun Kwon <hyun.kwon@xilinx.com>
+ * Laurent Pinchart <laurent.pinchart@ideasonboard.com>
+ *
+ * This software is licensed under the terms of the GNU General Public
+ * License version 2, as published by the Free Software Foundation, and
+ * may be copied, distributed, and modified under those terms.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+ * GNU General Public License for more details.
+ */
+
+#ifndef __UAPI_XILINX_V4L2_CONTROLS_H__
+#define __UAPI_XILINX_V4L2_CONTROLS_H__
+
+#include <linux/v4l2-controls.h>
+
+#define V4L2_CID_XILINX_OFFSET 0xc000
+#define V4L2_CID_XILINX_BASE (V4L2_CID_USER_BASE + V4L2_CID_XILINX_OFFSET)
+
+/*
+ * Private Controls for Xilinx Video IPs
+ */
+
+/*
+ * Xilinx TPG Video IP
+ */
+
+#define V4L2_CID_XILINX_TPG (V4L2_CID_USER_BASE + 0xc000)
+
+/* Draw cross hairs */
+#define V4L2_CID_XILINX_TPG_CROSS_HAIRS (V4L2_CID_XILINX_TPG + 1)
+/* Enable a moving box */
+#define V4L2_CID_XILINX_TPG_MOVING_BOX (V4L2_CID_XILINX_TPG + 2)
+/* Mask out a color component */
+#define V4L2_CID_XILINX_TPG_COLOR_MASK (V4L2_CID_XILINX_TPG + 3)
+/* Enable a stuck pixel feature */
+#define V4L2_CID_XILINX_TPG_STUCK_PIXEL (V4L2_CID_XILINX_TPG + 4)
+/* Enable a noisy output */
+#define V4L2_CID_XILINX_TPG_NOISE (V4L2_CID_XILINX_TPG + 5)
+/* Enable the motion feature */
+#define V4L2_CID_XILINX_TPG_MOTION (V4L2_CID_XILINX_TPG + 6)
+/* Configure the motion speed of moving patterns */
+#define V4L2_CID_XILINX_TPG_MOTION_SPEED (V4L2_CID_XILINX_TPG + 7)
+/* The row of horizontal cross hair location */
+#define V4L2_CID_XILINX_TPG_CROSS_HAIR_ROW (V4L2_CID_XILINX_TPG + 8)
+/* The colum of vertical cross hair location */
+#define V4L2_CID_XILINX_TPG_CROSS_HAIR_COLUMN (V4L2_CID_XILINX_TPG + 9)
+/* Set starting point of sine wave for horizontal component */
+#define V4L2_CID_XILINX_TPG_ZPLATE_HOR_START (V4L2_CID_XILINX_TPG + 10)
+/* Set speed of the horizontal component */
+#define V4L2_CID_XILINX_TPG_ZPLATE_HOR_SPEED (V4L2_CID_XILINX_TPG + 11)
+/* Set starting point of sine wave for vertical component */
+#define V4L2_CID_XILINX_TPG_ZPLATE_VER_START (V4L2_CID_XILINX_TPG + 12)
+/* Set speed of the vertical component */
+#define V4L2_CID_XILINX_TPG_ZPLATE_VER_SPEED (V4L2_CID_XILINX_TPG + 13)
+/* Moving box size */
+#define V4L2_CID_XILINX_TPG_BOX_SIZE (V4L2_CID_XILINX_TPG + 14)
+/* Moving box color */
+#define V4L2_CID_XILINX_TPG_BOX_COLOR (V4L2_CID_XILINX_TPG + 15)
+/* Upper limit count of generated stuck pixels */
+#define V4L2_CID_XILINX_TPG_STUCK_PIXEL_THRESH (V4L2_CID_XILINX_TPG + 16)
+/* Noise level */
+#define V4L2_CID_XILINX_TPG_NOISE_GAIN (V4L2_CID_XILINX_TPG + 17)
+
+#endif /* __UAPI_XILINX_V4L2_CONTROLS_H__ */
diff --git a/include/uapi/misc/cxl.h b/include/uapi/misc/cxl.h
index cd6d789b73ec..99a8ca15fe64 100644
--- a/include/uapi/misc/cxl.h
+++ b/include/uapi/misc/cxl.h
@@ -32,10 +32,32 @@ struct cxl_ioctl_start_work {
#define CXL_START_WORK_ALL (CXL_START_WORK_AMR |\
CXL_START_WORK_NUM_IRQS)
+
+/* Possible modes that an afu can be in */
+#define CXL_MODE_DEDICATED 0x1
+#define CXL_MODE_DIRECTED 0x2
+
+/* possible flags for the cxl_afu_id flags field */
+#define CXL_AFUID_FLAG_SLAVE 0x1 /* In directed-mode afu is in slave mode */
+
+struct cxl_afu_id {
+ __u64 flags; /* One of CXL_AFUID_FLAG_X */
+ __u32 card_id;
+ __u32 afu_offset;
+ __u32 afu_mode; /* one of the CXL_MODE_X */
+ __u32 reserved1;
+ __u64 reserved2;
+ __u64 reserved3;
+ __u64 reserved4;
+ __u64 reserved5;
+ __u64 reserved6;
+};
+
/* ioctl numbers */
#define CXL_MAGIC 0xCA
#define CXL_IOCTL_START_WORK _IOW(CXL_MAGIC, 0x00, struct cxl_ioctl_start_work)
#define CXL_IOCTL_GET_PROCESS_ELEMENT _IOR(CXL_MAGIC, 0x01, __u32)
+#define CXL_IOCTL_GET_AFU_ID _IOR(CXL_MAGIC, 0x02, struct cxl_afu_id)
#define CXL_READ_MIN_SIZE 0x1000 /* 4K */
diff --git a/include/uapi/rdma/ib_user_verbs.h b/include/uapi/rdma/ib_user_verbs.h
index b513e662d8e4..978841eeaff1 100644
--- a/include/uapi/rdma/ib_user_verbs.h
+++ b/include/uapi/rdma/ib_user_verbs.h
@@ -91,6 +91,7 @@ enum {
enum {
IB_USER_VERBS_EX_CMD_QUERY_DEVICE = IB_USER_VERBS_CMD_QUERY_DEVICE,
+ IB_USER_VERBS_EX_CMD_CREATE_CQ = IB_USER_VERBS_CMD_CREATE_CQ,
IB_USER_VERBS_EX_CMD_CREATE_FLOW = IB_USER_VERBS_CMD_THRESHOLD,
IB_USER_VERBS_EX_CMD_DESTROY_FLOW,
};
@@ -222,6 +223,8 @@ struct ib_uverbs_ex_query_device_resp {
__u32 comp_mask;
__u32 response_length;
struct ib_uverbs_odp_caps odp_caps;
+ __u64 timestamp_mask;
+ __u64 hca_core_clock; /* in KHZ */
};
struct ib_uverbs_query_port {
@@ -353,11 +356,27 @@ struct ib_uverbs_create_cq {
__u64 driver_data[0];
};
+struct ib_uverbs_ex_create_cq {
+ __u64 user_handle;
+ __u32 cqe;
+ __u32 comp_vector;
+ __s32 comp_channel;
+ __u32 comp_mask;
+ __u32 flags;
+ __u32 reserved;
+};
+
struct ib_uverbs_create_cq_resp {
__u32 cq_handle;
__u32 cqe;
};
+struct ib_uverbs_ex_create_cq_resp {
+ struct ib_uverbs_create_cq_resp base;
+ __u32 comp_mask;
+ __u32 response_length;
+};
+
struct ib_uverbs_resize_cq {
__u64 response;
__u32 cq_handle;
diff --git a/include/uapi/rdma/rdma_netlink.h b/include/uapi/rdma/rdma_netlink.h
index de69170a30ce..6e4bb4270ca2 100644
--- a/include/uapi/rdma/rdma_netlink.h
+++ b/include/uapi/rdma/rdma_netlink.h
@@ -37,6 +37,7 @@ enum {
RDMA_NL_IWPM_ADD_MAPPING,
RDMA_NL_IWPM_QUERY_MAPPING,
RDMA_NL_IWPM_REMOVE_MAPPING,
+ RDMA_NL_IWPM_REMOTE_INFO,
RDMA_NL_IWPM_HANDLE_ERR,
RDMA_NL_IWPM_MAPINFO,
RDMA_NL_IWPM_MAPINFO_NUM,
diff --git a/include/uapi/sound/asequencer.h b/include/uapi/sound/asequencer.h
index 09c8a00ea503..5a5fa4956ebd 100644
--- a/include/uapi/sound/asequencer.h
+++ b/include/uapi/sound/asequencer.h
@@ -22,6 +22,7 @@
#ifndef _UAPI__SOUND_ASEQUENCER_H
#define _UAPI__SOUND_ASEQUENCER_H
+#include <sound/asound.h>
/** version of the sequencer */
#define SNDRV_SEQ_VERSION SNDRV_PROTOCOL_VERSION (1, 0, 1)
diff --git a/include/uapi/sound/asound.h b/include/uapi/sound/asound.h
index 0e88e7a0f0eb..a45be6bdcf5b 100644
--- a/include/uapi/sound/asound.h
+++ b/include/uapi/sound/asound.h
@@ -25,6 +25,9 @@
#include <linux/types.h>
+#ifndef __KERNEL__
+#include <stdlib.h>
+#endif
/*
* protocol version
@@ -140,7 +143,7 @@ struct snd_hwdep_dsp_image {
* *
*****************************************************************************/
-#define SNDRV_PCM_VERSION SNDRV_PROTOCOL_VERSION(2, 0, 12)
+#define SNDRV_PCM_VERSION SNDRV_PROTOCOL_VERSION(2, 0, 13)
typedef unsigned long snd_pcm_uframes_t;
typedef signed long snd_pcm_sframes_t;
@@ -267,10 +270,17 @@ typedef int __bitwise snd_pcm_subformat_t;
#define SNDRV_PCM_INFO_JOINT_DUPLEX 0x00200000 /* playback and capture stream are somewhat correlated */
#define SNDRV_PCM_INFO_SYNC_START 0x00400000 /* pcm support some kind of sync go */
#define SNDRV_PCM_INFO_NO_PERIOD_WAKEUP 0x00800000 /* period wakeup can be disabled */
-#define SNDRV_PCM_INFO_HAS_WALL_CLOCK 0x01000000 /* has audio wall clock for audio/system time sync */
+#define SNDRV_PCM_INFO_HAS_WALL_CLOCK 0x01000000 /* (Deprecated)has audio wall clock for audio/system time sync */
+#define SNDRV_PCM_INFO_HAS_LINK_ATIME 0x01000000 /* report hardware link audio time, reset on startup */
+#define SNDRV_PCM_INFO_HAS_LINK_ABSOLUTE_ATIME 0x02000000 /* report absolute hardware link audio time, not reset on startup */
+#define SNDRV_PCM_INFO_HAS_LINK_ESTIMATED_ATIME 0x04000000 /* report estimated link audio time */
+#define SNDRV_PCM_INFO_HAS_LINK_SYNCHRONIZED_ATIME 0x08000000 /* report synchronized audio/system time */
+
#define SNDRV_PCM_INFO_DRAIN_TRIGGER 0x40000000 /* internal kernel flag - trigger in drain */
#define SNDRV_PCM_INFO_FIFO_IN_FRAMES 0x80000000 /* internal kernel flag - FIFO size is in frames */
+
+
typedef int __bitwise snd_pcm_state_t;
#define SNDRV_PCM_STATE_OPEN ((__force snd_pcm_state_t) 0) /* stream is open */
#define SNDRV_PCM_STATE_SETUP ((__force snd_pcm_state_t) 1) /* stream has a setup */
@@ -408,6 +418,22 @@ struct snd_pcm_channel_info {
unsigned int step; /* samples distance in bits */
};
+enum {
+ /*
+ * first definition for backwards compatibility only,
+ * maps to wallclock/link time for HDAudio playback and DEFAULT/DMA time for everything else
+ */
+ SNDRV_PCM_AUDIO_TSTAMP_TYPE_COMPAT = 0,
+
+ /* timestamp definitions */
+ SNDRV_PCM_AUDIO_TSTAMP_TYPE_DEFAULT = 1, /* DMA time, reported as per hw_ptr */
+ SNDRV_PCM_AUDIO_TSTAMP_TYPE_LINK = 2, /* link time reported by sample or wallclock counter, reset on startup */
+ SNDRV_PCM_AUDIO_TSTAMP_TYPE_LINK_ABSOLUTE = 3, /* link time reported by sample or wallclock counter, not reset on startup */
+ SNDRV_PCM_AUDIO_TSTAMP_TYPE_LINK_ESTIMATED = 4, /* link time estimated indirectly */
+ SNDRV_PCM_AUDIO_TSTAMP_TYPE_LINK_SYNCHRONIZED = 5, /* link time synchronized with system time */
+ SNDRV_PCM_AUDIO_TSTAMP_TYPE_LAST = SNDRV_PCM_AUDIO_TSTAMP_TYPE_LINK_SYNCHRONIZED
+};
+
struct snd_pcm_status {
snd_pcm_state_t state; /* stream state */
struct timespec trigger_tstamp; /* time when stream was started/stopped/paused */
@@ -419,9 +445,11 @@ struct snd_pcm_status {
snd_pcm_uframes_t avail_max; /* max frames available on hw since last status */
snd_pcm_uframes_t overrange; /* count of ADC (capture) overrange detections from last status */
snd_pcm_state_t suspended_state; /* suspended stream state */
- __u32 reserved_alignment; /* must be filled with zero */
- struct timespec audio_tstamp; /* from sample counter or wall clock */
- unsigned char reserved[56-sizeof(struct timespec)]; /* must be filled with zero */
+ __u32 audio_tstamp_data; /* needed for 64-bit alignment, used for configs/report to/from userspace */
+ struct timespec audio_tstamp; /* sample counter, wall clock, PHC or on-demand sync'ed */
+ struct timespec driver_tstamp; /* useful in case reference system tstamp is reported with delay */
+ __u32 audio_tstamp_accuracy; /* in ns units, only valid if indicated in audio_tstamp_data */
+ unsigned char reserved[52-2*sizeof(struct timespec)]; /* must be filled with zero */
};
struct snd_pcm_mmap_status {
@@ -534,6 +562,7 @@ enum {
#define SNDRV_PCM_IOCTL_DELAY _IOR('A', 0x21, snd_pcm_sframes_t)
#define SNDRV_PCM_IOCTL_HWSYNC _IO('A', 0x22)
#define SNDRV_PCM_IOCTL_SYNC_PTR _IOWR('A', 0x23, struct snd_pcm_sync_ptr)
+#define SNDRV_PCM_IOCTL_STATUS_EXT _IOWR('A', 0x24, struct snd_pcm_status)
#define SNDRV_PCM_IOCTL_CHANNEL_INFO _IOR('A', 0x32, struct snd_pcm_channel_info)
#define SNDRV_PCM_IOCTL_PREPARE _IO('A', 0x40)
#define SNDRV_PCM_IOCTL_RESET _IO('A', 0x41)
@@ -835,7 +864,7 @@ struct snd_ctl_elem_id {
snd_ctl_elem_iface_t iface; /* interface identifier */
unsigned int device; /* device/client number */
unsigned int subdevice; /* subdevice (substream) number */
- unsigned char name[44]; /* ASCII name of item */
+ unsigned char name[SNDRV_CTL_ELEM_ID_NAME_MAXLEN]; /* ASCII name of item */
unsigned int index; /* index of item */
};
diff --git a/include/uapi/sound/compress_offload.h b/include/uapi/sound/compress_offload.h
index 22ed8cb7800b..e00d8cbfc628 100644
--- a/include/uapi/sound/compress_offload.h
+++ b/include/uapi/sound/compress_offload.h
@@ -75,7 +75,7 @@ struct snd_compr_tstamp {
/**
* struct snd_compr_avail - avail descriptor
* @avail: Number of bytes available in ring buffer for writing/reading
- * @tstamp: timestamp infomation
+ * @tstamp: timestamp information
*/
struct snd_compr_avail {
__u64 avail;
diff --git a/include/uapi/sound/emu10k1.h b/include/uapi/sound/emu10k1.h
index d1bbaf78457a..ec1535bb6aed 100644
--- a/include/uapi/sound/emu10k1.h
+++ b/include/uapi/sound/emu10k1.h
@@ -23,8 +23,7 @@
#define _UAPI__SOUND_EMU10K1_H
#include <linux/types.h>
-
-
+#include <sound/asound.h>
/*
* ---- FX8010 ----
diff --git a/include/uapi/sound/hdspm.h b/include/uapi/sound/hdspm.h
index b357f1a5e29c..5737332d38f2 100644
--- a/include/uapi/sound/hdspm.h
+++ b/include/uapi/sound/hdspm.h
@@ -20,6 +20,12 @@
* Foundation, Inc., 675 Mass Ave, Cambridge, MA 02139, USA.
*/
+#ifdef __KERNEL__
+#include <linux/types.h>
+#else
+#include <stdint.h>
+#endif
+
/* Maximum channels is 64 even on 56Mode you have 64playbacks to matrix */
#define HDSPM_MAX_CHANNELS 64
diff --git a/include/video/imx-ipu-v3.h b/include/video/imx-ipu-v3.h
index 73390c120cad..85dedca3dcfb 100644
--- a/include/video/imx-ipu-v3.h
+++ b/include/video/imx-ipu-v3.h
@@ -39,7 +39,7 @@ struct ipu_di_signal_cfg {
struct videomode mode;
- u32 pixel_fmt;
+ u32 bus_format;
u32 v_to_h_sync;
#define IPU_DI_CLKMODE_SYNC (1 << 0)
diff --git a/include/video/neomagic.h b/include/video/neomagic.h
index bc5013e8059d..91e225a6107d 100644
--- a/include/video/neomagic.h
+++ b/include/video/neomagic.h
@@ -159,10 +159,7 @@ struct neofb_par {
unsigned char VCLK3NumeratorHigh;
unsigned char VCLK3Denominator;
unsigned char VerticalExt;
-
-#ifdef CONFIG_MTRR
- int mtrr;
-#endif
+ int wc_cookie;
u8 __iomem *mmio_vbase;
u8 cursorOff;
u8 *cursorPad; /* Must die !! */
diff --git a/include/video/omapdss.h b/include/video/omapdss.h
index c8ed15daad02..f001a356fd98 100644
--- a/include/video/omapdss.h
+++ b/include/video/omapdss.h
@@ -129,14 +129,13 @@ enum omap_rfbi_te_mode {
};
enum omap_dss_signal_level {
- OMAPDSS_SIG_ACTIVE_HIGH = 0,
- OMAPDSS_SIG_ACTIVE_LOW = 1,
+ OMAPDSS_SIG_ACTIVE_LOW,
+ OMAPDSS_SIG_ACTIVE_HIGH,
};
enum omap_dss_signal_edge {
- OMAPDSS_DRIVE_SIG_OPPOSITE_EDGES,
- OMAPDSS_DRIVE_SIG_RISING_EDGE,
OMAPDSS_DRIVE_SIG_FALLING_EDGE,
+ OMAPDSS_DRIVE_SIG_RISING_EDGE,
};
enum omap_dss_venc_type {
diff --git a/include/video/samsung_fimd.h b/include/video/samsung_fimd.h
index a20e4a3a8b15..0530e5a4c6b1 100644
--- a/include/video/samsung_fimd.h
+++ b/include/video/samsung_fimd.h
@@ -289,6 +289,11 @@
#define VIDISD14C_ALPHA1_B_LIMIT 0xf
#define VIDISD14C_ALPHA1_B(_x) ((_x) << 0)
+#define VIDW_ALPHA 0x021c
+#define VIDW_ALPHA_R(_x) ((_x) << 16)
+#define VIDW_ALPHA_G(_x) ((_x) << 8)
+#define VIDW_ALPHA_B(_x) ((_x) << 0)
+
/* Video buffer addresses */
#define VIDW_BUF_START(_buff) (0xA0 + ((_buff) * 8))
#define VIDW_BUF_START1(_buff) (0xA4 + ((_buff) * 8))
@@ -436,6 +441,12 @@
#define BLENDCON_NEW_8BIT_ALPHA_VALUE (1 << 0)
#define BLENDCON_NEW_4BIT_ALPHA_VALUE (0 << 0)
+/* Display port clock control */
+#define DP_MIE_CLKCON 0x27c
+#define DP_MIE_CLK_DISABLE 0x0
+#define DP_MIE_CLK_DP_ENABLE 0x2
+#define DP_MIE_CLK_MIE_ENABLE 0x3
+
/* Notes on per-window bpp settings
*
* Value Win0 Win1 Win2 Win3 Win 4
diff --git a/include/video/tdfx.h b/include/video/tdfx.h
index befbaf0a92d8..69674b94bb68 100644
--- a/include/video/tdfx.h
+++ b/include/video/tdfx.h
@@ -196,7 +196,7 @@ struct tdfx_par {
u32 palette[16];
void __iomem *regbase_virt;
unsigned long iobase;
- int mtrr_handle;
+ int wc_cookie;
#ifdef CONFIG_FB_3DFX_I2C
struct tdfxfb_i2c_chan chan[2];
#endif
diff --git a/include/xen/events.h b/include/xen/events.h
index 5321cd9636e6..7d95fdf9cf3e 100644
--- a/include/xen/events.h
+++ b/include/xen/events.h
@@ -17,7 +17,7 @@ int bind_evtchn_to_irqhandler(unsigned int evtchn,
irq_handler_t handler,
unsigned long irqflags, const char *devname,
void *dev_id);
-int bind_virq_to_irq(unsigned int virq, unsigned int cpu);
+int bind_virq_to_irq(unsigned int virq, unsigned int cpu, bool percpu);
int bind_virq_to_irqhandler(unsigned int virq, unsigned int cpu,
irq_handler_t handler,
unsigned long irqflags, const char *devname,
diff --git a/include/xen/grant_table.h b/include/xen/grant_table.h
index 143ca5ffab7a..4478f4b4aae2 100644
--- a/include/xen/grant_table.h
+++ b/include/xen/grant_table.h
@@ -191,6 +191,7 @@ int gnttab_unmap_refs(struct gnttab_unmap_grant_ref *unmap_ops,
struct gnttab_unmap_grant_ref *kunmap_ops,
struct page **pages, unsigned int count);
void gnttab_unmap_refs_async(struct gntab_unmap_queue_data* item);
+int gnttab_unmap_refs_sync(struct gntab_unmap_queue_data *item);
/* Perform a batch of grant map/copy operations. Retry every batch slot
diff --git a/include/xen/interface/xen.h b/include/xen/interface/xen.h
index f68719f405af..a48378958062 100644
--- a/include/xen/interface/xen.h
+++ b/include/xen/interface/xen.h
@@ -67,7 +67,7 @@
#define __HYPERVISOR_vcpu_op 24
#define __HYPERVISOR_set_segment_base 25 /* x86/64 only */
#define __HYPERVISOR_mmuext_op 26
-#define __HYPERVISOR_acm_op 27
+#define __HYPERVISOR_xsm_op 27
#define __HYPERVISOR_nmi_op 28
#define __HYPERVISOR_sched_op 29
#define __HYPERVISOR_callback_op 30
@@ -75,7 +75,11 @@
#define __HYPERVISOR_event_channel_op 32
#define __HYPERVISOR_physdev_op 33
#define __HYPERVISOR_hvm_op 34
+#define __HYPERVISOR_sysctl 35
+#define __HYPERVISOR_domctl 36
+#define __HYPERVISOR_kexec_op 37
#define __HYPERVISOR_tmem_op 38
+#define __HYPERVISOR_xc_reserved_op 39 /* reserved for XenClient */
/* Architecture-specific hypercall definitions. */
#define __HYPERVISOR_arch_0 48
diff --git a/include/xen/xen-ops.h b/include/xen/xen-ops.h
index 83338210ee04..0ce4f32017ea 100644
--- a/include/xen/xen-ops.h
+++ b/include/xen/xen-ops.h
@@ -13,6 +13,7 @@ void xen_arch_post_suspend(int suspend_cancelled);
void xen_timer_resume(void);
void xen_arch_resume(void);
+void xen_arch_suspend(void);
void xen_resume_notifier_register(struct notifier_block *nb);
void xen_resume_notifier_unregister(struct notifier_block *nb);
@@ -27,13 +28,58 @@ int xen_create_contiguous_region(phys_addr_t pstart, unsigned int order,
void xen_destroy_contiguous_region(phys_addr_t pstart, unsigned int order);
struct vm_area_struct;
+
+/*
+ * xen_remap_domain_mfn_array() - map an array of foreign frames
+ * @vma: VMA to map the pages into
+ * @addr: Address at which to map the pages
+ * @gfn: Array of GFNs to map
+ * @nr: Number entries in the GFN array
+ * @err_ptr: Returns per-GFN error status.
+ * @prot: page protection mask
+ * @domid: Domain owning the pages
+ * @pages: Array of pages if this domain has an auto-translated physmap
+ *
+ * @gfn and @err_ptr may point to the same buffer, the GFNs will be
+ * overwritten by the error codes after they are mapped.
+ *
+ * Returns the number of successfully mapped frames, or a -ve error
+ * code.
+ */
+int xen_remap_domain_mfn_array(struct vm_area_struct *vma,
+ unsigned long addr,
+ xen_pfn_t *gfn, int nr,
+ int *err_ptr, pgprot_t prot,
+ unsigned domid,
+ struct page **pages);
+
+/* xen_remap_domain_mfn_range() - map a range of foreign frames
+ * @vma: VMA to map the pages into
+ * @addr: Address at which to map the pages
+ * @gfn: First GFN to map.
+ * @nr: Number frames to map
+ * @prot: page protection mask
+ * @domid: Domain owning the pages
+ * @pages: Array of pages if this domain has an auto-translated physmap
+ *
+ * Returns the number of successfully mapped frames, or a -ve error
+ * code.
+ */
int xen_remap_domain_mfn_range(struct vm_area_struct *vma,
unsigned long addr,
- xen_pfn_t mfn, int nr,
+ xen_pfn_t gfn, int nr,
pgprot_t prot, unsigned domid,
struct page **pages);
int xen_unmap_domain_mfn_range(struct vm_area_struct *vma,
int numpgs, struct page **pages);
+int xen_xlate_remap_gfn_array(struct vm_area_struct *vma,
+ unsigned long addr,
+ xen_pfn_t *gfn, int nr,
+ int *err_ptr, pgprot_t prot,
+ unsigned domid,
+ struct page **pages);
+int xen_xlate_unmap_gfn_range(struct vm_area_struct *vma,
+ int nr, struct page **pages);
bool xen_running_on_version_or_later(unsigned int major, unsigned int minor);
diff --git a/include/xen/xenbus.h b/include/xen/xenbus.h
index b0f1c9e5d687..289c0b5f08fe 100644
--- a/include/xen/xenbus.h
+++ b/include/xen/xenbus.h
@@ -46,6 +46,10 @@
#include <xen/interface/io/xenbus.h>
#include <xen/interface/io/xs_wire.h>
+#define XENBUS_MAX_RING_PAGE_ORDER 4
+#define XENBUS_MAX_RING_PAGES (1U << XENBUS_MAX_RING_PAGE_ORDER)
+#define INVALID_GRANT_HANDLE (~0U)
+
/* Register callback to watch this node. */
struct xenbus_watch
{
@@ -199,15 +203,19 @@ int xenbus_watch_pathfmt(struct xenbus_device *dev, struct xenbus_watch *watch,
const char *pathfmt, ...);
int xenbus_switch_state(struct xenbus_device *dev, enum xenbus_state new_state);
-int xenbus_grant_ring(struct xenbus_device *dev, unsigned long ring_mfn);
-int xenbus_map_ring_valloc(struct xenbus_device *dev,
- int gnt_ref, void **vaddr);
-int xenbus_map_ring(struct xenbus_device *dev, int gnt_ref,
- grant_handle_t *handle, void *vaddr);
+int xenbus_grant_ring(struct xenbus_device *dev, void *vaddr,
+ unsigned int nr_pages, grant_ref_t *grefs);
+int xenbus_map_ring_valloc(struct xenbus_device *dev, grant_ref_t *gnt_refs,
+ unsigned int nr_grefs, void **vaddr);
+int xenbus_map_ring(struct xenbus_device *dev,
+ grant_ref_t *gnt_refs, unsigned int nr_grefs,
+ grant_handle_t *handles, unsigned long *vaddrs,
+ bool *leaked);
int xenbus_unmap_ring_vfree(struct xenbus_device *dev, void *vaddr);
int xenbus_unmap_ring(struct xenbus_device *dev,
- grant_handle_t handle, void *vaddr);
+ grant_handle_t *handles, unsigned int nr_handles,
+ unsigned long *vaddrs);
int xenbus_alloc_evtchn(struct xenbus_device *dev, int *port);
int xenbus_free_evtchn(struct xenbus_device *dev, int port);