aboutsummaryrefslogtreecommitdiffstatshomepage
diff options
context:
space:
mode:
authorJakub Kicinski <kuba@kernel.org>2023-04-13 16:04:28 -0700
committerJakub Kicinski <kuba@kernel.org>2023-04-13 16:04:28 -0700
commit800e68c44ffe71f9715f745b38fd1af6910b3773 (patch)
tree898caef9a109e429776e65c1083d1503207bebed
parentMerge branch 'net-use-read_once-write_once-for-ring-index-accesses' (diff)
parentMerge tag 'net-6.3-rc7' of git://git.kernel.org/pub/scm/linux/kernel/git/netdev/net (diff)
downloadwireguard-linux-800e68c44ffe71f9715f745b38fd1af6910b3773.tar.xz
wireguard-linux-800e68c44ffe71f9715f745b38fd1af6910b3773.zip
Merge git://git.kernel.org/pub/scm/linux/kernel/git/netdev/net
Conflicts: tools/testing/selftests/net/config 62199e3f1658 ("selftests: net: Add VXLAN MDB test") 3a0385be133e ("selftests: add the missing CONFIG_IP_SCTP in net config") Signed-off-by: Jakub Kicinski <kuba@kernel.org>
-rw-r--r--.mailmap2
-rw-r--r--Documentation/devicetree/bindings/interrupt-controller/loongson,cpu-interrupt-controller.yaml (renamed from Documentation/devicetree/bindings/interrupt-controller/loongarch,cpu-interrupt-controller.yaml)6
-rw-r--r--Documentation/devicetree/bindings/serial/renesas,scif.yaml4
-rw-r--r--Documentation/mm/zsmalloc.rst135
-rw-r--r--Documentation/networking/ip-sysctl.rst2
-rw-r--r--MAINTAINERS16
-rw-r--r--Makefile2
-rw-r--r--arch/arm64/kernel/compat_alignment.c32
-rw-r--r--arch/arm64/kvm/arm.c26
-rw-r--r--arch/arm64/kvm/hyp/include/nvhe/fixed_config.h5
-rw-r--r--arch/arm64/kvm/hyp/nvhe/sys_regs.c7
-rw-r--r--arch/arm64/kvm/pmu-emul.c1
-rw-r--r--arch/arm64/kvm/sys_regs.c1
-rw-r--r--arch/arm64/net/bpf_jit.h4
-rw-r--r--arch/arm64/net/bpf_jit_comp.c3
-rw-r--r--arch/loongarch/net/bpf_jit.c4
-rw-r--r--arch/x86/Makefile.um5
-rw-r--r--arch/x86/include/asm/intel-family.h2
-rw-r--r--arch/x86/kernel/acpi/boot.c9
-rw-r--r--arch/x86/pci/fixup.c21
-rw-r--r--block/blk-mq.c4
-rw-r--r--block/genhd.c8
-rw-r--r--drivers/acpi/acpi_video.c15
-rw-r--r--drivers/acpi/video_detect.c58
-rw-r--r--drivers/block/ublk_drv.c26
-rw-r--r--drivers/block/virtio_blk.c269
-rw-r--r--drivers/bluetooth/btbcm.c2
-rw-r--r--drivers/bluetooth/btsdio.c1
-rw-r--r--drivers/bus/imx-weim.c6
-rw-r--r--drivers/counter/104-quad-8.c31
-rw-r--r--drivers/cxl/core/hdm.c126
-rw-r--r--drivers/cxl/core/pci.c38
-rw-r--r--drivers/cxl/core/pmem.c6
-rw-r--r--drivers/cxl/core/port.c38
-rw-r--r--drivers/cxl/core/region.c33
-rw-r--r--drivers/cxl/cxl.h8
-rw-r--r--drivers/cxl/cxlpci.h14
-rw-r--r--drivers/cxl/port.c4
-rw-r--r--drivers/dma/apple-admac.c20
-rw-r--r--drivers/dma/dmaengine.c2
-rw-r--r--drivers/dma/xilinx/xdma.c2
-rw-r--r--drivers/gpio/Kconfig2
-rw-r--r--drivers/gpio/gpio-davinci.c5
-rw-r--r--drivers/gpu/drm/amd/display/amdgpu_dm/amdgpu_dm_helpers.c57
-rw-r--r--drivers/gpu/drm/amd/pm/swsmu/inc/smu_v13_0.h6
-rw-r--r--drivers/gpu/drm/amd/pm/swsmu/smu13/smu_v13_0_0_ppt.c4
-rw-r--r--drivers/gpu/drm/amd/pm/swsmu/smu13/smu_v13_0_7_ppt.c87
-rw-r--r--drivers/gpu/drm/armada/armada_drv.c1
-rw-r--r--drivers/gpu/drm/i915/display/icl_dsi.c20
-rw-r--r--drivers/gpu/drm/nouveau/nvkm/subdev/fb/gf108.c1
-rw-r--r--drivers/gpu/drm/nouveau/nvkm/subdev/fb/gk104.c1
-rw-r--r--drivers/gpu/drm/nouveau/nvkm/subdev/fb/gk110.c1
-rw-r--r--drivers/gpu/drm/nouveau/nvkm/subdev/fb/gm107.c1
-rw-r--r--drivers/gpu/drm/scheduler/sched_entity.c11
-rw-r--r--drivers/hid/Kconfig2
-rw-r--r--drivers/hid/hid-ids.h4
-rw-r--r--drivers/hid/hid-input.c6
-rw-r--r--drivers/hid/hid-sensor-custom.c2
-rw-r--r--drivers/hid/hid-topre.c2
-rw-r--r--drivers/hid/intel-ish-hid/ishtp/bus.c4
-rw-r--r--drivers/hwtracing/coresight/coresight-etm4x-core.c24
-rw-r--r--drivers/hwtracing/coresight/coresight-etm4x.h20
-rw-r--r--drivers/i2c/i2c-core-of.c5
-rw-r--r--drivers/iio/accel/kionix-kx022a.c2
-rw-r--r--drivers/iio/adc/ad7791.c2
-rw-r--r--drivers/iio/adc/ltc2497.c6
-rw-r--r--drivers/iio/adc/max11410.c22
-rw-r--r--drivers/iio/adc/palmas_gpadc.c2
-rw-r--r--drivers/iio/adc/qcom-spmi-adc5.c10
-rw-r--r--drivers/iio/adc/ti-ads7950.c1
-rw-r--r--drivers/iio/dac/cio-dac.c4
-rw-r--r--drivers/iio/imu/Kconfig1
-rw-r--r--drivers/iio/industrialio-buffer.c21
-rw-r--r--drivers/iio/light/cm32181.c12
-rw-r--r--drivers/iio/light/vcnl4000.c3
-rw-r--r--drivers/mtd/mtdblock.c12
-rw-r--r--drivers/mtd/nand/raw/meson_nand.c6
-rw-r--r--drivers/mtd/nand/raw/stm32_fmc2_nand.c3
-rw-r--r--drivers/net/bonding/bond_main.c5
-rw-r--r--drivers/net/ethernet/cadence/macb_main.c4
-rw-r--r--drivers/net/ethernet/freescale/enetc/enetc_ethtool.c16
-rw-r--r--drivers/net/ethernet/intel/iavf/iavf.h20
-rw-r--r--drivers/net/ethernet/intel/iavf/iavf_main.c44
-rw-r--r--drivers/net/ethernet/intel/iavf/iavf_virtchnl.c68
-rw-r--r--drivers/net/ethernet/mellanox/mlx4/en_rx.c22
-rw-r--r--drivers/net/ethernet/mellanox/mlx4/mlx4_en.h3
-rw-r--r--drivers/net/ethernet/mellanox/mlx5/core/en/xdp.c63
-rw-r--r--drivers/net/ethernet/qlogic/qlcnic/qlcnic_ctx.c8
-rw-r--r--drivers/net/ethernet/sun/niu.c2
-rw-r--r--drivers/net/ethernet/ti/cpsw.c2
-rw-r--r--drivers/net/ethernet/ti/cpsw_new.c3
-rw-r--r--drivers/net/phy/nxp-c45-tja11xx.c14
-rw-r--r--drivers/net/phy/sfp.c19
-rw-r--r--drivers/net/usb/r8152.c2
-rw-r--r--drivers/net/veth.c10
-rw-r--r--drivers/net/wwan/iosm/iosm_ipc_pcie.c3
-rw-r--r--drivers/nvme/host/core.c6
-rw-r--r--drivers/of/dynamic.c1
-rw-r--r--drivers/of/platform.c5
-rw-r--r--drivers/pci/doe.c30
-rw-r--r--drivers/pci/remove.c5
-rw-r--r--drivers/pinctrl/pinctrl-amd.c36
-rw-r--r--drivers/scsi/iscsi_tcp.c3
-rw-r--r--drivers/scsi/mpi3mr/mpi3mr_fw.c2
-rw-r--r--drivers/scsi/qla2xxx/qla_os.c1
-rw-r--r--drivers/spi/spi.c5
-rw-r--r--drivers/tty/serial/8250/8250_port.c11
-rw-r--r--drivers/tty/serial/fsl_lpuart.c10
-rw-r--r--drivers/tty/serial/sh-sci.c10
-rw-r--r--drivers/ufs/core/ufshcd.c47
-rw-r--r--drivers/usb/cdns3/cdnsp-ep0.c3
-rw-r--r--drivers/usb/dwc3/dwc3-pci.c4
-rw-r--r--drivers/usb/gadget/function/f_fs.c2
-rw-r--r--drivers/usb/gadget/legacy/inode.c2
-rw-r--r--drivers/usb/host/xhci-pci.c7
-rw-r--r--drivers/usb/host/xhci-tegra.c6
-rw-r--r--drivers/usb/host/xhci.c7
-rw-r--r--drivers/usb/serial/cp210x.c1
-rw-r--r--drivers/usb/serial/option.c10
-rw-r--r--drivers/usb/typec/altmodes/displayport.c6
-rw-r--r--drivers/vdpa/mlx5/net/mlx5_vnet.c8
-rw-r--r--drivers/vdpa/vdpa_sim/vdpa_sim_net.c13
-rw-r--r--drivers/vhost/scsi.c39
-rw-r--r--drivers/video/fbdev/core/fbcon.c18
-rw-r--r--drivers/video/fbdev/core/fbmem.c2
-rw-r--r--fs/9p/xattr.c8
-rw-r--r--fs/btrfs/disk-io.c14
-rw-r--r--fs/btrfs/super.c4
-rw-r--r--fs/cifs/cifssmb.c2
-rw-r--r--fs/cifs/fs_context.c13
-rw-r--r--fs/cifs/fs_context.h3
-rw-r--r--fs/cifs/misc.c2
-rw-r--r--fs/dax.c52
-rw-r--r--fs/ksmbd/connection.c17
-rw-r--r--fs/ksmbd/ksmbd_work.h2
-rw-r--r--fs/ksmbd/server.c5
-rw-r--r--fs/ksmbd/smb2pdu.c36
-rw-r--r--fs/ksmbd/smb2pdu.h1
-rw-r--r--fs/ksmbd/smb_common.c138
-rw-r--r--fs/ksmbd/smb_common.h2
-rw-r--r--fs/ksmbd/unicode.c18
-rw-r--r--fs/netfs/iterator.c2
-rw-r--r--fs/nilfs2/btree.c1
-rw-r--r--fs/nilfs2/direct.c1
-rw-r--r--fs/nilfs2/segment.c3
-rw-r--r--fs/nilfs2/super.c2
-rw-r--r--fs/nilfs2/the_nilfs.c12
-rw-r--r--include/acpi/video.h15
-rw-r--r--include/linux/mlx5/device.h14
-rw-r--r--include/linux/mm_types.h3
-rw-r--r--include/linux/netdevice.h3
-rw-r--r--include/linux/pci-doe.h8
-rw-r--r--include/linux/pci.h2
-rw-r--r--include/linux/rtnetlink.h3
-rw-r--r--include/net/bluetooth/hci_core.h1
-rw-r--r--include/net/bonding.h8
-rw-r--r--include/net/xdp.h47
-rw-r--r--include/uapi/linux/virtio_blk.h18
-rw-r--r--include/ufs/ufshcd.h1
-rw-r--r--io_uring/io_uring.c2
-rw-r--r--io_uring/kbuf.c7
-rw-r--r--kernel/dma/swiotlb.c6
-rw-r--r--kernel/events/core.c14
-rw-r--r--kernel/fork.c3
-rw-r--r--kernel/rcu/tree.c27
-rw-r--r--kernel/trace/ftrace.c15
-rw-r--r--kernel/trace/trace_events_synth.c2
-rw-r--r--lib/Kconfig.debug4
-rw-r--r--lib/maple_tree.c287
-rw-r--r--mm/hugetlb.c14
-rw-r--r--mm/kfence/core.c32
-rw-r--r--mm/memory.c16
-rw-r--r--mm/mmap.c3
-rw-r--r--mm/swapfile.c3
-rw-r--r--mm/vmalloc.c8
-rw-r--r--net/9p/trans_xen.c4
-rw-r--r--net/bluetooth/hci_conn.c89
-rw-r--r--net/bluetooth/hci_event.c18
-rw-r--r--net/bluetooth/hci_sync.c13
-rw-r--r--net/bluetooth/hidp/core.c2
-rw-r--r--net/bluetooth/l2cap_core.c24
-rw-r--r--net/bluetooth/sco.c85
-rw-r--r--net/core/dev.c3
-rw-r--r--net/core/rtnetlink.c11
-rw-r--r--net/core/skbuff.c16
-rw-r--r--net/core/xdp.c10
-rw-r--r--net/ipv4/sysctl_net_ipv4.c3
-rw-r--r--net/ipv4/tcp_ipv4.c4
-rw-r--r--net/ipv6/udp.c8
-rw-r--r--net/mptcp/fastopen.c11
-rw-r--r--net/mptcp/options.c5
-rw-r--r--net/mptcp/protocol.c2
-rw-r--r--net/mptcp/subflow.c18
-rw-r--r--net/openvswitch/actions.c2
-rw-r--r--net/qrtr/af_qrtr.c8
-rw-r--r--net/sctp/stream_interleave.c3
-rw-r--r--net/smc/af_smc.c11
-rw-r--r--tools/testing/radix-tree/maple.c16
-rw-r--r--tools/testing/selftests/bpf/prog_tests/xdp_do_redirect.c30
-rw-r--r--tools/testing/selftests/bpf/prog_tests/xdp_metadata.c2
-rw-r--r--tools/testing/selftests/bpf/progs/xdp_hw_metadata.c42
-rw-r--r--tools/testing/selftests/bpf/progs/xdp_metadata.c6
-rw-r--r--tools/testing/selftests/bpf/progs/xdp_metadata2.c7
-rw-r--r--tools/testing/selftests/bpf/xdp_hw_metadata.c10
-rw-r--r--tools/testing/selftests/bpf/xdp_metadata.h4
-rw-r--r--tools/testing/selftests/drivers/net/bonding/Makefile3
-rwxr-xr-xtools/testing/selftests/drivers/net/bonding/bond_options.sh264
-rw-r--r--tools/testing/selftests/drivers/net/bonding/bond_topo_3d1c.sh143
-rwxr-xr-xtools/testing/selftests/drivers/net/bonding/option_prio.sh245
-rw-r--r--tools/testing/selftests/net/config1
-rwxr-xr-xtools/testing/selftests/net/mptcp/userspace_pm.sh2
-rw-r--r--tools/testing/selftests/net/openvswitch/ovs-dpctl.py2
-rw-r--r--tools/virtio/virtio-trace/README2
213 files changed, 2628 insertions, 1384 deletions
diff --git a/.mailmap b/.mailmap
index e2af78f67f7c..e42486317d18 100644
--- a/.mailmap
+++ b/.mailmap
@@ -265,7 +265,9 @@ Krzysztof Kozlowski <krzk@kernel.org> <k.kozlowski@samsung.com>
Krzysztof Kozlowski <krzk@kernel.org> <krzysztof.kozlowski@canonical.com>
Kuninori Morimoto <kuninori.morimoto.gx@renesas.com>
Kuogee Hsieh <quic_khsieh@quicinc.com> <khsieh@codeaurora.org>
+Leonard Crestez <leonard.crestez@nxp.com> Leonard Crestez <cdleonard@gmail.com>
Leonardo Bras <leobras.c@gmail.com> <leonardo@linux.ibm.com>
+Leonard Göhrs <l.goehrs@pengutronix.de>
Leonid I Ananiev <leonid.i.ananiev@intel.com>
Leon Romanovsky <leon@kernel.org> <leon@leon.nu>
Leon Romanovsky <leon@kernel.org> <leonro@mellanox.com>
diff --git a/Documentation/devicetree/bindings/interrupt-controller/loongarch,cpu-interrupt-controller.yaml b/Documentation/devicetree/bindings/interrupt-controller/loongson,cpu-interrupt-controller.yaml
index 2a1cf885c99d..adf989976dcc 100644
--- a/Documentation/devicetree/bindings/interrupt-controller/loongarch,cpu-interrupt-controller.yaml
+++ b/Documentation/devicetree/bindings/interrupt-controller/loongson,cpu-interrupt-controller.yaml
@@ -1,7 +1,7 @@
# SPDX-License-Identifier: GPL-2.0-only OR BSD-2-Clause
%YAML 1.2
---
-$id: http://devicetree.org/schemas/interrupt-controller/loongarch,cpu-interrupt-controller.yaml#
+$id: http://devicetree.org/schemas/interrupt-controller/loongson,cpu-interrupt-controller.yaml#
$schema: http://devicetree.org/meta-schemas/core.yaml#
title: LoongArch CPU Interrupt Controller
@@ -11,7 +11,7 @@ maintainers:
properties:
compatible:
- const: loongarch,cpu-interrupt-controller
+ const: loongson,cpu-interrupt-controller
'#interrupt-cells':
const: 1
@@ -28,7 +28,7 @@ required:
examples:
- |
interrupt-controller {
- compatible = "loongarch,cpu-interrupt-controller";
+ compatible = "loongson,cpu-interrupt-controller";
#interrupt-cells = <1>;
interrupt-controller;
};
diff --git a/Documentation/devicetree/bindings/serial/renesas,scif.yaml b/Documentation/devicetree/bindings/serial/renesas,scif.yaml
index 1989bd67d04e..54e4f41be9b4 100644
--- a/Documentation/devicetree/bindings/serial/renesas,scif.yaml
+++ b/Documentation/devicetree/bindings/serial/renesas,scif.yaml
@@ -92,7 +92,7 @@ properties:
- description: Error interrupt
- description: Receive buffer full interrupt
- description: Transmit buffer empty interrupt
- - description: Transmit End interrupt
+ - description: Break interrupt
- items:
- description: Error interrupt
- description: Receive buffer full interrupt
@@ -107,7 +107,7 @@ properties:
- const: eri
- const: rxi
- const: txi
- - const: tei
+ - const: bri
- items:
- const: eri
- const: rxi
diff --git a/Documentation/mm/zsmalloc.rst b/Documentation/mm/zsmalloc.rst
index 64d127bfc221..a3c26d587752 100644
--- a/Documentation/mm/zsmalloc.rst
+++ b/Documentation/mm/zsmalloc.rst
@@ -39,13 +39,12 @@ With CONFIG_ZSMALLOC_STAT, we could see zsmalloc internal information via
# cat /sys/kernel/debug/zsmalloc/zram0/classes
- class size almost_full almost_empty obj_allocated obj_used pages_used pages_per_zspage
+ class size 10% 20% 30% 40% 50% 60% 70% 80% 90% 99% 100% obj_allocated obj_used pages_used pages_per_zspage freeable
...
...
- 9 176 0 1 186 129 8 4
- 10 192 1 0 2880 2872 135 3
- 11 208 0 1 819 795 42 2
- 12 224 0 1 219 159 12 4
+ 30 512 0 12 4 1 0 1 0 0 1 0 414 3464 3346 433 1 14
+ 31 528 2 7 2 2 1 0 1 0 0 2 117 4154 3793 536 4 44
+ 32 544 6 3 4 1 2 1 0 0 0 1 260 4170 3965 556 2 26
...
...
@@ -54,10 +53,28 @@ class
index
size
object size zspage stores
-almost_empty
- the number of ZS_ALMOST_EMPTY zspages(see below)
-almost_full
- the number of ZS_ALMOST_FULL zspages(see below)
+10%
+ the number of zspages with usage ratio less than 10% (see below)
+20%
+ the number of zspages with usage ratio between 10% and 20%
+30%
+ the number of zspages with usage ratio between 20% and 30%
+40%
+ the number of zspages with usage ratio between 30% and 40%
+50%
+ the number of zspages with usage ratio between 40% and 50%
+60%
+ the number of zspages with usage ratio between 50% and 60%
+70%
+ the number of zspages with usage ratio between 60% and 70%
+80%
+ the number of zspages with usage ratio between 70% and 80%
+90%
+ the number of zspages with usage ratio between 80% and 90%
+99%
+ the number of zspages with usage ratio between 90% and 99%
+100%
+ the number of zspages with usage ratio 100%
obj_allocated
the number of objects allocated
obj_used
@@ -66,19 +83,14 @@ pages_used
the number of pages allocated for the class
pages_per_zspage
the number of 0-order pages to make a zspage
+freeable
+ the approximate number of pages class compaction can free
-We assign a zspage to ZS_ALMOST_EMPTY fullness group when n <= N / f, where
-
-* n = number of allocated objects
-* N = total number of objects zspage can store
-* f = fullness_threshold_frac(ie, 4 at the moment)
-
-Similarly, we assign zspage to:
-
-* ZS_ALMOST_FULL when n > N / f
-* ZS_EMPTY when n == 0
-* ZS_FULL when n == N
-
+Each zspage maintains inuse counter which keeps track of the number of
+objects stored in the zspage. The inuse counter determines the zspage's
+"fullness group" which is calculated as the ratio of the "inuse" objects to
+the total number of objects the zspage can hold (objs_per_zspage). The
+closer the inuse counter is to objs_per_zspage, the better.
Internals
=========
@@ -94,10 +106,10 @@ of objects that each zspage can store.
For instance, consider the following size classes:::
- class size almost_full almost_empty obj_allocated obj_used pages_used pages_per_zspage freeable
+ class size 10% .... 100% obj_allocated obj_used pages_used pages_per_zspage freeable
...
- 94 1536 0 0 0 0 0 3 0
- 100 1632 0 0 0 0 0 2 0
+ 94 1536 0 .... 0 0 0 0 3 0
+ 100 1632 0 .... 0 0 0 0 2 0
...
@@ -134,10 +146,11 @@ reduces memory wastage.
Let's take a closer look at the bottom of `/sys/kernel/debug/zsmalloc/zramX/classes`:::
- class size almost_full almost_empty obj_allocated obj_used pages_used pages_per_zspage freeable
+ class size 10% .... 100% obj_allocated obj_used pages_used pages_per_zspage freeable
+
...
- 202 3264 0 0 0 0 0 4 0
- 254 4096 0 0 0 0 0 1 0
+ 202 3264 0 .. 0 0 0 0 4 0
+ 254 4096 0 .. 0 0 0 0 1 0
...
Size class #202 stores objects of size 3264 bytes and has a maximum of 4 pages
@@ -151,40 +164,42 @@ efficient storage of large objects.
For zspage chain size of 8, huge class watermark becomes 3632 bytes:::
- class size almost_full almost_empty obj_allocated obj_used pages_used pages_per_zspage freeable
+ class size 10% .... 100% obj_allocated obj_used pages_used pages_per_zspage freeable
+
...
- 202 3264 0 0 0 0 0 4 0
- 211 3408 0 0 0 0 0 5 0
- 217 3504 0 0 0 0 0 6 0
- 222 3584 0 0 0 0 0 7 0
- 225 3632 0 0 0 0 0 8 0
- 254 4096 0 0 0 0 0 1 0
+ 202 3264 0 .. 0 0 0 0 4 0
+ 211 3408 0 .. 0 0 0 0 5 0
+ 217 3504 0 .. 0 0 0 0 6 0
+ 222 3584 0 .. 0 0 0 0 7 0
+ 225 3632 0 .. 0 0 0 0 8 0
+ 254 4096 0 .. 0 0 0 0 1 0
...
For zspage chain size of 16, huge class watermark becomes 3840 bytes:::
- class size almost_full almost_empty obj_allocated obj_used pages_used pages_per_zspage freeable
+ class size 10% .... 100% obj_allocated obj_used pages_used pages_per_zspage freeable
+
...
- 202 3264 0 0 0 0 0 4 0
- 206 3328 0 0 0 0 0 13 0
- 207 3344 0 0 0 0 0 9 0
- 208 3360 0 0 0 0 0 14 0
- 211 3408 0 0 0 0 0 5 0
- 212 3424 0 0 0 0 0 16 0
- 214 3456 0 0 0 0 0 11 0
- 217 3504 0 0 0 0 0 6 0
- 219 3536 0 0 0 0 0 13 0
- 222 3584 0 0 0 0 0 7 0
- 223 3600 0 0 0 0 0 15 0
- 225 3632 0 0 0 0 0 8 0
- 228 3680 0 0 0 0 0 9 0
- 230 3712 0 0 0 0 0 10 0
- 232 3744 0 0 0 0 0 11 0
- 234 3776 0 0 0 0 0 12 0
- 235 3792 0 0 0 0 0 13 0
- 236 3808 0 0 0 0 0 14 0
- 238 3840 0 0 0 0 0 15 0
- 254 4096 0 0 0 0 0 1 0
+ 202 3264 0 .. 0 0 0 0 4 0
+ 206 3328 0 .. 0 0 0 0 13 0
+ 207 3344 0 .. 0 0 0 0 9 0
+ 208 3360 0 .. 0 0 0 0 14 0
+ 211 3408 0 .. 0 0 0 0 5 0
+ 212 3424 0 .. 0 0 0 0 16 0
+ 214 3456 0 .. 0 0 0 0 11 0
+ 217 3504 0 .. 0 0 0 0 6 0
+ 219 3536 0 .. 0 0 0 0 13 0
+ 222 3584 0 .. 0 0 0 0 7 0
+ 223 3600 0 .. 0 0 0 0 15 0
+ 225 3632 0 .. 0 0 0 0 8 0
+ 228 3680 0 .. 0 0 0 0 9 0
+ 230 3712 0 .. 0 0 0 0 10 0
+ 232 3744 0 .. 0 0 0 0 11 0
+ 234 3776 0 .. 0 0 0 0 12 0
+ 235 3792 0 .. 0 0 0 0 13 0
+ 236 3808 0 .. 0 0 0 0 14 0
+ 238 3840 0 .. 0 0 0 0 15 0
+ 254 4096 0 .. 0 0 0 0 1 0
...
Overall the combined zspage chain size effect on zsmalloc pool configuration:::
@@ -214,9 +229,10 @@ zram as a build artifacts storage (Linux kernel compilation).
zsmalloc classes stats:::
- class size almost_full almost_empty obj_allocated obj_used pages_used pages_per_zspage freeable
+ class size 10% .... 100% obj_allocated obj_used pages_used pages_per_zspage freeable
+
...
- Total 13 51 413836 412973 159955 3
+ Total 13 .. 51 413836 412973 159955 3
zram mm_stat:::
@@ -227,9 +243,10 @@ zram as a build artifacts storage (Linux kernel compilation).
zsmalloc classes stats:::
- class size almost_full almost_empty obj_allocated obj_used pages_used pages_per_zspage freeable
+ class size 10% .... 100% obj_allocated obj_used pages_used pages_per_zspage freeable
+
...
- Total 18 87 414852 412978 156666 0
+ Total 18 .. 87 414852 412978 156666 0
zram mm_stat:::
diff --git a/Documentation/networking/ip-sysctl.rst b/Documentation/networking/ip-sysctl.rst
index 87dd1c5283e6..58a78a316697 100644
--- a/Documentation/networking/ip-sysctl.rst
+++ b/Documentation/networking/ip-sysctl.rst
@@ -340,6 +340,8 @@ tcp_app_win - INTEGER
Reserve max(window/2^tcp_app_win, mss) of window for application
buffer. Value 0 is special, it means that nothing is reserved.
+ Possible values are [0, 31], inclusive.
+
Default: 31
tcp_autocorking - BOOLEAN
diff --git a/MAINTAINERS b/MAINTAINERS
index fdcef63fa9a0..b8b275e27cdb 100644
--- a/MAINTAINERS
+++ b/MAINTAINERS
@@ -224,13 +224,13 @@ S: Orphan / Obsolete
F: drivers/net/ethernet/8390/
9P FILE SYSTEM
-M: Eric Van Hensbergen <ericvh@gmail.com>
+M: Eric Van Hensbergen <ericvh@kernel.org>
M: Latchesar Ionkov <lucho@ionkov.net>
M: Dominique Martinet <asmadeus@codewreck.org>
R: Christian Schoenebeck <linux_oss@crudebyte.com>
-L: v9fs-developer@lists.sourceforge.net
+L: v9fs@lists.linux.dev
S: Maintained
-W: http://swik.net/v9fs
+W: http://github.com/v9fs
Q: http://patchwork.kernel.org/project/v9fs-devel/list/
T: git git://git.kernel.org/pub/scm/linux/kernel/git/ericvh/v9fs.git
T: git git://github.com/martinetd/linux.git
@@ -4468,14 +4468,14 @@ F: Documentation/devicetree/bindings/net/ieee802154/ca8210.txt
F: drivers/net/ieee802154/ca8210.c
CANAAN/KENDRYTE K210 SOC FPIOA DRIVER
-M: Damien Le Moal <damien.lemoal@wdc.com>
+M: Damien Le Moal <dlemoal@kernel.org>
L: linux-riscv@lists.infradead.org
L: linux-gpio@vger.kernel.org (pinctrl driver)
F: Documentation/devicetree/bindings/pinctrl/canaan,k210-fpioa.yaml
F: drivers/pinctrl/pinctrl-k210.c
CANAAN/KENDRYTE K210 SOC RESET CONTROLLER DRIVER
-M: Damien Le Moal <damien.lemoal@wdc.com>
+M: Damien Le Moal <dlemoal@kernel.org>
L: linux-kernel@vger.kernel.org
L: linux-riscv@lists.infradead.org
S: Maintained
@@ -4483,7 +4483,7 @@ F: Documentation/devicetree/bindings/reset/canaan,k210-rst.yaml
F: drivers/reset/reset-k210.c
CANAAN/KENDRYTE K210 SOC SYSTEM CONTROLLER DRIVER
-M: Damien Le Moal <damien.lemoal@wdc.com>
+M: Damien Le Moal <dlemoal@kernel.org>
L: linux-riscv@lists.infradead.org
S: Maintained
F: Documentation/devicetree/bindings/mfd/canaan,k210-sysctl.yaml
@@ -11765,7 +11765,7 @@ T: git git://git.kernel.org/pub/scm/linux/kernel/git/axboe/linux-block.git
F: drivers/ata/sata_promise.*
LIBATA SUBSYSTEM (Serial and Parallel ATA drivers)
-M: Damien Le Moal <damien.lemoal@opensource.wdc.com>
+M: Damien Le Moal <dlemoal@kernel.org>
L: linux-ide@vger.kernel.org
S: Maintained
T: git git://git.kernel.org/pub/scm/linux/kernel/git/dlemoal/libata.git
@@ -23139,7 +23139,7 @@ S: Maintained
F: arch/x86/kernel/cpu/zhaoxin.c
ZONEFS FILESYSTEM
-M: Damien Le Moal <damien.lemoal@opensource.wdc.com>
+M: Damien Le Moal <dlemoal@kernel.org>
M: Naohiro Aota <naohiro.aota@wdc.com>
R: Johannes Thumshirn <jth@kernel.org>
L: linux-fsdevel@vger.kernel.org
diff --git a/Makefile b/Makefile
index ef4e96b9cd5b..5aeea3d98fc0 100644
--- a/Makefile
+++ b/Makefile
@@ -2,7 +2,7 @@
VERSION = 6
PATCHLEVEL = 3
SUBLEVEL = 0
-EXTRAVERSION = -rc5
+EXTRAVERSION = -rc6
NAME = Hurr durr I'ma ninja sloth
# *DOCUMENTATION*
diff --git a/arch/arm64/kernel/compat_alignment.c b/arch/arm64/kernel/compat_alignment.c
index 5edec2f49ec9..deff21bfa680 100644
--- a/arch/arm64/kernel/compat_alignment.c
+++ b/arch/arm64/kernel/compat_alignment.c
@@ -314,36 +314,32 @@ int do_compat_alignment_fixup(unsigned long addr, struct pt_regs *regs)
int (*handler)(unsigned long addr, u32 instr, struct pt_regs *regs);
unsigned int type;
u32 instr = 0;
- u16 tinstr = 0;
int isize = 4;
int thumb2_32b = 0;
- int fault;
instrptr = instruction_pointer(regs);
if (compat_thumb_mode(regs)) {
__le16 __user *ptr = (__le16 __user *)(instrptr & ~1);
+ u16 tinstr, tinst2;
- fault = alignment_get_thumb(regs, ptr, &tinstr);
- if (!fault) {
- if (IS_T32(tinstr)) {
- /* Thumb-2 32-bit */
- u16 tinst2;
- fault = alignment_get_thumb(regs, ptr + 1, &tinst2);
- instr = ((u32)tinstr << 16) | tinst2;
- thumb2_32b = 1;
- } else {
- isize = 2;
- instr = thumb2arm(tinstr);
- }
+ if (alignment_get_thumb(regs, ptr, &tinstr))
+ return 1;
+
+ if (IS_T32(tinstr)) { /* Thumb-2 32-bit */
+ if (alignment_get_thumb(regs, ptr + 1, &tinst2))
+ return 1;
+ instr = ((u32)tinstr << 16) | tinst2;
+ thumb2_32b = 1;
+ } else {
+ isize = 2;
+ instr = thumb2arm(tinstr);
}
} else {
- fault = alignment_get_arm(regs, (__le32 __user *)instrptr, &instr);
+ if (alignment_get_arm(regs, (__le32 __user *)instrptr, &instr))
+ return 1;
}
- if (fault)
- return 1;
-
switch (CODING_BITS(instr)) {
case 0x00000000: /* 3.13.4 load/store instruction extensions */
if (LDSTHD_I_BIT(instr))
diff --git a/arch/arm64/kvm/arm.c b/arch/arm64/kvm/arm.c
index 3f6a5efdbcf0..4b2e16e696a8 100644
--- a/arch/arm64/kvm/arm.c
+++ b/arch/arm64/kvm/arm.c
@@ -1890,9 +1890,33 @@ static int __init do_pkvm_init(u32 hyp_va_bits)
return ret;
}
+static u64 get_hyp_id_aa64pfr0_el1(void)
+{
+ /*
+ * Track whether the system isn't affected by spectre/meltdown in the
+ * hypervisor's view of id_aa64pfr0_el1, used for protected VMs.
+ * Although this is per-CPU, we make it global for simplicity, e.g., not
+ * to have to worry about vcpu migration.
+ *
+ * Unlike for non-protected VMs, userspace cannot override this for
+ * protected VMs.
+ */
+ u64 val = read_sanitised_ftr_reg(SYS_ID_AA64PFR0_EL1);
+
+ val &= ~(ARM64_FEATURE_MASK(ID_AA64PFR0_EL1_CSV2) |
+ ARM64_FEATURE_MASK(ID_AA64PFR0_EL1_CSV3));
+
+ val |= FIELD_PREP(ARM64_FEATURE_MASK(ID_AA64PFR0_EL1_CSV2),
+ arm64_get_spectre_v2_state() == SPECTRE_UNAFFECTED);
+ val |= FIELD_PREP(ARM64_FEATURE_MASK(ID_AA64PFR0_EL1_CSV3),
+ arm64_get_meltdown_state() == SPECTRE_UNAFFECTED);
+
+ return val;
+}
+
static void kvm_hyp_init_symbols(void)
{
- kvm_nvhe_sym(id_aa64pfr0_el1_sys_val) = read_sanitised_ftr_reg(SYS_ID_AA64PFR0_EL1);
+ kvm_nvhe_sym(id_aa64pfr0_el1_sys_val) = get_hyp_id_aa64pfr0_el1();
kvm_nvhe_sym(id_aa64pfr1_el1_sys_val) = read_sanitised_ftr_reg(SYS_ID_AA64PFR1_EL1);
kvm_nvhe_sym(id_aa64isar0_el1_sys_val) = read_sanitised_ftr_reg(SYS_ID_AA64ISAR0_EL1);
kvm_nvhe_sym(id_aa64isar1_el1_sys_val) = read_sanitised_ftr_reg(SYS_ID_AA64ISAR1_EL1);
diff --git a/arch/arm64/kvm/hyp/include/nvhe/fixed_config.h b/arch/arm64/kvm/hyp/include/nvhe/fixed_config.h
index 07edfc7524c9..37440e1dda93 100644
--- a/arch/arm64/kvm/hyp/include/nvhe/fixed_config.h
+++ b/arch/arm64/kvm/hyp/include/nvhe/fixed_config.h
@@ -33,11 +33,14 @@
* Allow for protected VMs:
* - Floating-point and Advanced SIMD
* - Data Independent Timing
+ * - Spectre/Meltdown Mitigation
*/
#define PVM_ID_AA64PFR0_ALLOW (\
ARM64_FEATURE_MASK(ID_AA64PFR0_EL1_FP) | \
ARM64_FEATURE_MASK(ID_AA64PFR0_EL1_AdvSIMD) | \
- ARM64_FEATURE_MASK(ID_AA64PFR0_EL1_DIT) \
+ ARM64_FEATURE_MASK(ID_AA64PFR0_EL1_DIT) | \
+ ARM64_FEATURE_MASK(ID_AA64PFR0_EL1_CSV2) | \
+ ARM64_FEATURE_MASK(ID_AA64PFR0_EL1_CSV3) \
)
/*
diff --git a/arch/arm64/kvm/hyp/nvhe/sys_regs.c b/arch/arm64/kvm/hyp/nvhe/sys_regs.c
index 08d2b004f4b7..edd969a1f36b 100644
--- a/arch/arm64/kvm/hyp/nvhe/sys_regs.c
+++ b/arch/arm64/kvm/hyp/nvhe/sys_regs.c
@@ -85,19 +85,12 @@ static u64 get_restricted_features_unsigned(u64 sys_reg_val,
static u64 get_pvm_id_aa64pfr0(const struct kvm_vcpu *vcpu)
{
- const struct kvm *kvm = (const struct kvm *)kern_hyp_va(vcpu->kvm);
u64 set_mask = 0;
u64 allow_mask = PVM_ID_AA64PFR0_ALLOW;
set_mask |= get_restricted_features_unsigned(id_aa64pfr0_el1_sys_val,
PVM_ID_AA64PFR0_RESTRICT_UNSIGNED);
- /* Spectre and Meltdown mitigation in KVM */
- set_mask |= FIELD_PREP(ARM64_FEATURE_MASK(ID_AA64PFR0_EL1_CSV2),
- (u64)kvm->arch.pfr0_csv2);
- set_mask |= FIELD_PREP(ARM64_FEATURE_MASK(ID_AA64PFR0_EL1_CSV3),
- (u64)kvm->arch.pfr0_csv3);
-
return (id_aa64pfr0_el1_sys_val & allow_mask) | set_mask;
}
diff --git a/arch/arm64/kvm/pmu-emul.c b/arch/arm64/kvm/pmu-emul.c
index c243b10f3e15..5eca0cdd961d 100644
--- a/arch/arm64/kvm/pmu-emul.c
+++ b/arch/arm64/kvm/pmu-emul.c
@@ -558,6 +558,7 @@ void kvm_pmu_handle_pmcr(struct kvm_vcpu *vcpu, u64 val)
for_each_set_bit(i, &mask, 32)
kvm_pmu_set_pmc_value(kvm_vcpu_idx_to_pmc(vcpu, i), 0, true);
}
+ kvm_vcpu_pmu_restore_guest(vcpu);
}
static bool kvm_pmu_counter_is_enabled(struct kvm_pmc *pmc)
diff --git a/arch/arm64/kvm/sys_regs.c b/arch/arm64/kvm/sys_regs.c
index 1b2c161120be..34688918c811 100644
--- a/arch/arm64/kvm/sys_regs.c
+++ b/arch/arm64/kvm/sys_regs.c
@@ -794,7 +794,6 @@ static bool access_pmcr(struct kvm_vcpu *vcpu, struct sys_reg_params *p,
if (!kvm_supports_32bit_el0())
val |= ARMV8_PMU_PMCR_LC;
kvm_pmu_handle_pmcr(vcpu, val);
- kvm_vcpu_pmu_restore_guest(vcpu);
} else {
/* PMCR.P & PMCR.C are RAZ */
val = __vcpu_sys_reg(vcpu, PMCR_EL0)
diff --git a/arch/arm64/net/bpf_jit.h b/arch/arm64/net/bpf_jit.h
index a6acb94ea3d6..c2edadb8ec6a 100644
--- a/arch/arm64/net/bpf_jit.h
+++ b/arch/arm64/net/bpf_jit.h
@@ -281,4 +281,8 @@
/* DMB */
#define A64_DMB_ISH aarch64_insn_gen_dmb(AARCH64_INSN_MB_ISH)
+/* ADR */
+#define A64_ADR(Rd, offset) \
+ aarch64_insn_gen_adr(0, offset, Rd, AARCH64_INSN_ADR_TYPE_ADR)
+
#endif /* _BPF_JIT_H */
diff --git a/arch/arm64/net/bpf_jit_comp.c b/arch/arm64/net/bpf_jit_comp.c
index 62f805f427b7..b26da8efa616 100644
--- a/arch/arm64/net/bpf_jit_comp.c
+++ b/arch/arm64/net/bpf_jit_comp.c
@@ -1900,7 +1900,8 @@ static int prepare_trampoline(struct jit_ctx *ctx, struct bpf_tramp_image *im,
restore_args(ctx, args_off, nargs);
/* call original func */
emit(A64_LDR64I(A64_R(10), A64_SP, retaddr_off), ctx);
- emit(A64_BLR(A64_R(10)), ctx);
+ emit(A64_ADR(A64_LR, AARCH64_INSN_SIZE * 2), ctx);
+ emit(A64_RET(A64_R(10)), ctx);
/* store return value */
emit(A64_STR64I(A64_R(0), A64_SP, retval_off), ctx);
/* reserve a nop for bpf_tramp_image_put */
diff --git a/arch/loongarch/net/bpf_jit.c b/arch/loongarch/net/bpf_jit.c
index e70c846efaa1..db9342b2d0e6 100644
--- a/arch/loongarch/net/bpf_jit.c
+++ b/arch/loongarch/net/bpf_jit.c
@@ -1022,6 +1022,10 @@ static int build_insn(const struct bpf_insn *insn, struct jit_ctx *ctx, bool ext
emit_atomic(insn, ctx);
break;
+ /* Speculation barrier */
+ case BPF_ST | BPF_NOSPEC:
+ break;
+
default:
pr_err("bpf_jit: unknown opcode %02x\n", code);
return -EINVAL;
diff --git a/arch/x86/Makefile.um b/arch/x86/Makefile.um
index b70559b821df..2106a2bd152b 100644
--- a/arch/x86/Makefile.um
+++ b/arch/x86/Makefile.um
@@ -3,9 +3,14 @@ core-y += arch/x86/crypto/
#
# Disable SSE and other FP/SIMD instructions to match normal x86
+# This is required to work around issues in older LLVM versions, but breaks
+# GCC versions < 11. See:
+# https://gcc.gnu.org/bugzilla/show_bug.cgi?id=99652
#
+ifeq ($(CONFIG_CC_IS_CLANG),y)
KBUILD_CFLAGS += -mno-sse -mno-mmx -mno-sse2 -mno-3dnow -mno-avx
KBUILD_RUSTFLAGS += -Ctarget-feature=-sse,-sse2,-sse3,-ssse3,-sse4.1,-sse4.2,-avx,-avx2
+endif
ifeq ($(CONFIG_X86_32),y)
START := 0x8048000
diff --git a/arch/x86/include/asm/intel-family.h b/arch/x86/include/asm/intel-family.h
index cbaf174d8efd..b3af2d45bbbb 100644
--- a/arch/x86/include/asm/intel-family.h
+++ b/arch/x86/include/asm/intel-family.h
@@ -125,6 +125,8 @@
#define INTEL_FAM6_LUNARLAKE_M 0xBD
+#define INTEL_FAM6_ARROWLAKE 0xC6
+
/* "Small Core" Processors (Atom/E-Core) */
#define INTEL_FAM6_ATOM_BONNELL 0x1C /* Diamondville, Pineview */
diff --git a/arch/x86/kernel/acpi/boot.c b/arch/x86/kernel/acpi/boot.c
index 1c38174b5f01..0dac4ab5b55b 100644
--- a/arch/x86/kernel/acpi/boot.c
+++ b/arch/x86/kernel/acpi/boot.c
@@ -146,7 +146,11 @@ static int __init acpi_parse_madt(struct acpi_table_header *table)
pr_debug("Local APIC address 0x%08x\n", madt->address);
}
- if (madt->header.revision >= 5)
+
+ /* ACPI 6.3 and newer support the online capable bit. */
+ if (acpi_gbl_FADT.header.revision > 6 ||
+ (acpi_gbl_FADT.header.revision == 6 &&
+ acpi_gbl_FADT.minor_revision >= 3))
acpi_support_online_capable = true;
default_acpi_madt_oem_check(madt->header.oem_id,
@@ -193,7 +197,8 @@ static bool __init acpi_is_processor_usable(u32 lapic_flags)
if (lapic_flags & ACPI_MADT_ENABLED)
return true;
- if (acpi_support_online_capable && (lapic_flags & ACPI_MADT_ONLINE_CAPABLE))
+ if (!acpi_support_online_capable ||
+ (lapic_flags & ACPI_MADT_ONLINE_CAPABLE))
return true;
return false;
diff --git a/arch/x86/pci/fixup.c b/arch/x86/pci/fixup.c
index 615a76d70019..bf5161dcf89e 100644
--- a/arch/x86/pci/fixup.c
+++ b/arch/x86/pci/fixup.c
@@ -7,6 +7,7 @@
#include <linux/dmi.h>
#include <linux/pci.h>
#include <linux/vgaarb.h>
+#include <asm/amd_nb.h>
#include <asm/hpet.h>
#include <asm/pci_x86.h>
@@ -824,3 +825,23 @@ static void rs690_fix_64bit_dma(struct pci_dev *pdev)
DECLARE_PCI_FIXUP_FINAL(PCI_VENDOR_ID_ATI, 0x7910, rs690_fix_64bit_dma);
#endif
+
+#ifdef CONFIG_AMD_NB
+
+#define AMD_15B8_RCC_DEV2_EPF0_STRAP2 0x10136008
+#define AMD_15B8_RCC_DEV2_EPF0_STRAP2_NO_SOFT_RESET_DEV2_F0_MASK 0x00000080L
+
+static void quirk_clear_strap_no_soft_reset_dev2_f0(struct pci_dev *dev)
+{
+ u32 data;
+
+ if (!amd_smn_read(0, AMD_15B8_RCC_DEV2_EPF0_STRAP2, &data)) {
+ data &= ~AMD_15B8_RCC_DEV2_EPF0_STRAP2_NO_SOFT_RESET_DEV2_F0_MASK;
+ if (amd_smn_write(0, AMD_15B8_RCC_DEV2_EPF0_STRAP2, data))
+ pci_err(dev, "Failed to write data 0x%x\n", data);
+ } else {
+ pci_err(dev, "Failed to read data\n");
+ }
+}
+DECLARE_PCI_FIXUP_FINAL(PCI_VENDOR_ID_AMD, 0x15b8, quirk_clear_strap_no_soft_reset_dev2_f0);
+#endif
diff --git a/block/blk-mq.c b/block/blk-mq.c
index cf1a39adf9a5..f0ea9dcfb966 100644
--- a/block/blk-mq.c
+++ b/block/blk-mq.c
@@ -1359,8 +1359,6 @@ bool blk_rq_is_poll(struct request *rq)
return false;
if (rq->mq_hctx->type != HCTX_TYPE_POLL)
return false;
- if (WARN_ON_ONCE(!rq->bio))
- return false;
return true;
}
EXPORT_SYMBOL_GPL(blk_rq_is_poll);
@@ -1368,7 +1366,7 @@ EXPORT_SYMBOL_GPL(blk_rq_is_poll);
static void blk_rq_poll_completion(struct request *rq, struct completion *wait)
{
do {
- bio_poll(rq->bio, NULL, 0);
+ blk_mq_poll(rq->q, blk_rq_to_qc(rq), NULL, 0);
cond_resched();
} while (!completion_done(wait));
}
diff --git a/block/genhd.c b/block/genhd.c
index 02d9cfb9e077..7f874737af68 100644
--- a/block/genhd.c
+++ b/block/genhd.c
@@ -368,7 +368,6 @@ int disk_scan_partitions(struct gendisk *disk, fmode_t mode)
if (disk->open_partitions)
return -EBUSY;
- set_bit(GD_NEED_PART_SCAN, &disk->state);
/*
* If the device is opened exclusively by current thread already, it's
* safe to scan partitons, otherwise, use bd_prepare_to_claim() to
@@ -381,12 +380,19 @@ int disk_scan_partitions(struct gendisk *disk, fmode_t mode)
return ret;
}
+ set_bit(GD_NEED_PART_SCAN, &disk->state);
bdev = blkdev_get_by_dev(disk_devt(disk), mode & ~FMODE_EXCL, NULL);
if (IS_ERR(bdev))
ret = PTR_ERR(bdev);
else
blkdev_put(bdev, mode & ~FMODE_EXCL);
+ /*
+ * If blkdev_get_by_dev() failed early, GD_NEED_PART_SCAN is still set,
+ * and this will cause that re-assemble partitioned raid device will
+ * creat partition for underlying disk.
+ */
+ clear_bit(GD_NEED_PART_SCAN, &disk->state);
if (!(mode & FMODE_EXCL))
bd_abort_claiming(disk->part0, disk_scan_partitions);
return ret;
diff --git a/drivers/acpi/acpi_video.c b/drivers/acpi/acpi_video.c
index 97b711e57bff..c7a6d0b69dab 100644
--- a/drivers/acpi/acpi_video.c
+++ b/drivers/acpi/acpi_video.c
@@ -1984,6 +1984,7 @@ static int instance;
static int acpi_video_bus_add(struct acpi_device *device)
{
struct acpi_video_bus *video;
+ bool auto_detect;
int error;
acpi_status status;
@@ -2045,10 +2046,20 @@ static int acpi_video_bus_add(struct acpi_device *device)
mutex_unlock(&video_list_lock);
/*
- * The userspace visible backlight_device gets registered separately
- * from acpi_video_register_backlight().
+ * If backlight-type auto-detection is used then a native backlight may
+ * show up later and this may change the result from video to native.
+ * Therefor normally the userspace visible /sys/class/backlight device
+ * gets registered separately by the GPU driver calling
+ * acpi_video_register_backlight() when an internal panel is detected.
+ * Register the backlight now when not using auto-detection, so that
+ * when the kernel cmdline or DMI-quirks are used the backlight will
+ * get registered even if acpi_video_register_backlight() is not called.
*/
acpi_video_run_bcl_for_osi(video);
+ if (__acpi_video_get_backlight_type(false, &auto_detect) == acpi_backlight_video &&
+ !auto_detect)
+ acpi_video_bus_register_backlight(video);
+
acpi_video_bus_add_notify_handler(video);
return 0;
diff --git a/drivers/acpi/video_detect.c b/drivers/acpi/video_detect.c
index fd7cbce8076e..e85729fc481f 100644
--- a/drivers/acpi/video_detect.c
+++ b/drivers/acpi/video_detect.c
@@ -277,6 +277,43 @@ static const struct dmi_system_id video_detect_dmi_table[] = {
},
/*
+ * Models which need acpi_video backlight control where the GPU drivers
+ * do not call acpi_video_register_backlight() because no internal panel
+ * is detected. Typically these are all-in-ones (monitors with builtin
+ * PC) where the panel connection shows up as regular DP instead of eDP.
+ */
+ {
+ .callback = video_detect_force_video,
+ /* Apple iMac14,1 */
+ .matches = {
+ DMI_MATCH(DMI_SYS_VENDOR, "Apple Inc."),
+ DMI_MATCH(DMI_PRODUCT_NAME, "iMac14,1"),
+ },
+ },
+ {
+ .callback = video_detect_force_video,
+ /* Apple iMac14,2 */
+ .matches = {
+ DMI_MATCH(DMI_SYS_VENDOR, "Apple Inc."),
+ DMI_MATCH(DMI_PRODUCT_NAME, "iMac14,2"),
+ },
+ },
+
+ /*
+ * Older models with nvidia GPU which need acpi_video backlight
+ * control and where the old nvidia binary driver series does not
+ * call acpi_video_register_backlight().
+ */
+ {
+ .callback = video_detect_force_video,
+ /* ThinkPad W530 */
+ .matches = {
+ DMI_MATCH(DMI_SYS_VENDOR, "LENOVO"),
+ DMI_MATCH(DMI_PRODUCT_VERSION, "ThinkPad W530"),
+ },
+ },
+
+ /*
* These models have a working acpi_video backlight control, and using
* native backlight causes a regression where backlight does not work
* when userspace is not handling brightness key events. Disable
@@ -782,7 +819,7 @@ static bool prefer_native_over_acpi_video(void)
* Determine which type of backlight interface to use on this system,
* First check cmdline, then dmi quirks, then do autodetect.
*/
-static enum acpi_backlight_type __acpi_video_get_backlight_type(bool native)
+enum acpi_backlight_type __acpi_video_get_backlight_type(bool native, bool *auto_detect)
{
static DEFINE_MUTEX(init_mutex);
static bool nvidia_wmi_ec_present;
@@ -807,6 +844,9 @@ static enum acpi_backlight_type __acpi_video_get_backlight_type(bool native)
native_available = true;
mutex_unlock(&init_mutex);
+ if (auto_detect)
+ *auto_detect = false;
+
/*
* The below heuristics / detection steps are in order of descending
* presedence. The commandline takes presedence over anything else.
@@ -818,6 +858,9 @@ static enum acpi_backlight_type __acpi_video_get_backlight_type(bool native)
if (acpi_backlight_dmi != acpi_backlight_undef)
return acpi_backlight_dmi;
+ if (auto_detect)
+ *auto_detect = true;
+
/* Special cases such as nvidia_wmi_ec and apple gmux. */
if (nvidia_wmi_ec_present)
return acpi_backlight_nvidia_wmi_ec;
@@ -837,15 +880,4 @@ static enum acpi_backlight_type __acpi_video_get_backlight_type(bool native)
/* No ACPI video/native (old hw), use vendor specific fw methods. */
return acpi_backlight_vendor;
}
-
-enum acpi_backlight_type acpi_video_get_backlight_type(void)
-{
- return __acpi_video_get_backlight_type(false);
-}
-EXPORT_SYMBOL(acpi_video_get_backlight_type);
-
-bool acpi_video_backlight_use_native(void)
-{
- return __acpi_video_get_backlight_type(true) == acpi_backlight_native;
-}
-EXPORT_SYMBOL(acpi_video_backlight_use_native);
+EXPORT_SYMBOL(__acpi_video_get_backlight_type);
diff --git a/drivers/block/ublk_drv.c b/drivers/block/ublk_drv.c
index c73cc57ec547..604c1a13c76e 100644
--- a/drivers/block/ublk_drv.c
+++ b/drivers/block/ublk_drv.c
@@ -246,7 +246,7 @@ static int ublk_validate_params(const struct ublk_device *ub)
if (ub->params.types & UBLK_PARAM_TYPE_BASIC) {
const struct ublk_param_basic *p = &ub->params.basic;
- if (p->logical_bs_shift > PAGE_SHIFT)
+ if (p->logical_bs_shift > PAGE_SHIFT || p->logical_bs_shift < 9)
return -EINVAL;
if (p->logical_bs_shift > p->physical_bs_shift)
@@ -1261,9 +1261,10 @@ static void ublk_handle_need_get_data(struct ublk_device *ub, int q_id,
ublk_queue_cmd(ubq, req);
}
-static int ublk_ch_uring_cmd(struct io_uring_cmd *cmd, unsigned int issue_flags)
+static int __ublk_ch_uring_cmd(struct io_uring_cmd *cmd,
+ unsigned int issue_flags,
+ struct ublksrv_io_cmd *ub_cmd)
{
- struct ublksrv_io_cmd *ub_cmd = (struct ublksrv_io_cmd *)cmd->cmd;
struct ublk_device *ub = cmd->file->private_data;
struct ublk_queue *ubq;
struct ublk_io *io;
@@ -1362,6 +1363,23 @@ static int ublk_ch_uring_cmd(struct io_uring_cmd *cmd, unsigned int issue_flags)
return -EIOCBQUEUED;
}
+static int ublk_ch_uring_cmd(struct io_uring_cmd *cmd, unsigned int issue_flags)
+{
+ struct ublksrv_io_cmd *ub_src = (struct ublksrv_io_cmd *) cmd->cmd;
+ struct ublksrv_io_cmd ub_cmd;
+
+ /*
+ * Not necessary for async retry, but let's keep it simple and always
+ * copy the values to avoid any potential reuse.
+ */
+ ub_cmd.q_id = READ_ONCE(ub_src->q_id);
+ ub_cmd.tag = READ_ONCE(ub_src->tag);
+ ub_cmd.result = READ_ONCE(ub_src->result);
+ ub_cmd.addr = READ_ONCE(ub_src->addr);
+
+ return __ublk_ch_uring_cmd(cmd, issue_flags, &ub_cmd);
+}
+
static const struct file_operations ublk_ch_fops = {
.owner = THIS_MODULE,
.open = ublk_ch_open,
@@ -1952,6 +1970,8 @@ static int ublk_ctrl_set_params(struct ublk_device *ub,
/* clear all we don't support yet */
ub->params.types &= UBLK_PARAM_TYPE_ALL;
ret = ublk_validate_params(ub);
+ if (ret)
+ ub->params.types = 0;
}
mutex_unlock(&ub->mutex);
diff --git a/drivers/block/virtio_blk.c b/drivers/block/virtio_blk.c
index 2723eede6f21..2b918e28acaa 100644
--- a/drivers/block/virtio_blk.c
+++ b/drivers/block/virtio_blk.c
@@ -96,16 +96,14 @@ struct virtblk_req {
/*
* The zone append command has an extended in header.
- * The status field in zone_append_in_hdr must have
- * the same offset in virtblk_req as the non-zoned
- * status field above.
+ * The status field in zone_append_in_hdr must always
+ * be the last byte.
*/
struct {
+ __virtio64 sector;
u8 status;
- u8 reserved[7];
- __le64 append_sector;
- } zone_append_in_hdr;
- };
+ } zone_append;
+ } in_hdr;
size_t in_hdr_len;
@@ -154,7 +152,7 @@ static int virtblk_add_req(struct virtqueue *vq, struct virtblk_req *vbr)
sgs[num_out + num_in++] = vbr->sg_table.sgl;
}
- sg_init_one(&in_hdr, &vbr->status, vbr->in_hdr_len);
+ sg_init_one(&in_hdr, &vbr->in_hdr.status, vbr->in_hdr_len);
sgs[num_out + num_in++] = &in_hdr;
return virtqueue_add_sgs(vq, sgs, num_out, num_in, vbr, GFP_ATOMIC);
@@ -242,11 +240,14 @@ static blk_status_t virtblk_setup_cmd(struct virtio_device *vdev,
struct request *req,
struct virtblk_req *vbr)
{
- size_t in_hdr_len = sizeof(vbr->status);
+ size_t in_hdr_len = sizeof(vbr->in_hdr.status);
bool unmap = false;
u32 type;
u64 sector = 0;
+ if (!IS_ENABLED(CONFIG_BLK_DEV_ZONED) && op_is_zone_mgmt(req_op(req)))
+ return BLK_STS_NOTSUPP;
+
/* Set fields for all request types */
vbr->out_hdr.ioprio = cpu_to_virtio32(vdev, req_get_ioprio(req));
@@ -287,7 +288,7 @@ static blk_status_t virtblk_setup_cmd(struct virtio_device *vdev,
case REQ_OP_ZONE_APPEND:
type = VIRTIO_BLK_T_ZONE_APPEND;
sector = blk_rq_pos(req);
- in_hdr_len = sizeof(vbr->zone_append_in_hdr);
+ in_hdr_len = sizeof(vbr->in_hdr.zone_append);
break;
case REQ_OP_ZONE_RESET:
type = VIRTIO_BLK_T_ZONE_RESET;
@@ -297,7 +298,10 @@ static blk_status_t virtblk_setup_cmd(struct virtio_device *vdev,
type = VIRTIO_BLK_T_ZONE_RESET_ALL;
break;
case REQ_OP_DRV_IN:
- /* Out header already filled in, nothing to do */
+ /*
+ * Out header has already been prepared by the caller (virtblk_get_id()
+ * or virtblk_submit_zone_report()), nothing to do here.
+ */
return 0;
default:
WARN_ON_ONCE(1);
@@ -318,16 +322,28 @@ static blk_status_t virtblk_setup_cmd(struct virtio_device *vdev,
return 0;
}
+/*
+ * The status byte is always the last byte of the virtblk request
+ * in-header. This helper fetches its value for all in-header formats
+ * that are currently defined.
+ */
+static inline u8 virtblk_vbr_status(struct virtblk_req *vbr)
+{
+ return *((u8 *)&vbr->in_hdr + vbr->in_hdr_len - 1);
+}
+
static inline void virtblk_request_done(struct request *req)
{
struct virtblk_req *vbr = blk_mq_rq_to_pdu(req);
- blk_status_t status = virtblk_result(vbr->status);
+ blk_status_t status = virtblk_result(virtblk_vbr_status(vbr));
+ struct virtio_blk *vblk = req->mq_hctx->queue->queuedata;
virtblk_unmap_data(req, vbr);
virtblk_cleanup_cmd(req);
if (req_op(req) == REQ_OP_ZONE_APPEND)
- req->__sector = le64_to_cpu(vbr->zone_append_in_hdr.append_sector);
+ req->__sector = virtio64_to_cpu(vblk->vdev,
+ vbr->in_hdr.zone_append.sector);
blk_mq_end_request(req, status);
}
@@ -355,7 +371,7 @@ static int virtblk_handle_req(struct virtio_blk_vq *vq,
if (likely(!blk_should_fake_timeout(req->q)) &&
!blk_mq_complete_request_remote(req) &&
- !blk_mq_add_to_batch(req, iob, vbr->status,
+ !blk_mq_add_to_batch(req, iob, virtblk_vbr_status(vbr),
virtblk_complete_batch))
virtblk_request_done(req);
req_done++;
@@ -550,7 +566,6 @@ static void virtio_queue_rqs(struct request **rqlist)
#ifdef CONFIG_BLK_DEV_ZONED
static void *virtblk_alloc_report_buffer(struct virtio_blk *vblk,
unsigned int nr_zones,
- unsigned int zone_sectors,
size_t *buflen)
{
struct request_queue *q = vblk->disk->queue;
@@ -558,7 +573,7 @@ static void *virtblk_alloc_report_buffer(struct virtio_blk *vblk,
void *buf;
nr_zones = min_t(unsigned int, nr_zones,
- get_capacity(vblk->disk) >> ilog2(zone_sectors));
+ get_capacity(vblk->disk) >> ilog2(vblk->zone_sectors));
bufsize = sizeof(struct virtio_blk_zone_report) +
nr_zones * sizeof(struct virtio_blk_zone_descriptor);
@@ -592,7 +607,7 @@ static int virtblk_submit_zone_report(struct virtio_blk *vblk,
return PTR_ERR(req);
vbr = blk_mq_rq_to_pdu(req);
- vbr->in_hdr_len = sizeof(vbr->status);
+ vbr->in_hdr_len = sizeof(vbr->in_hdr.status);
vbr->out_hdr.type = cpu_to_virtio32(vblk->vdev, VIRTIO_BLK_T_ZONE_REPORT);
vbr->out_hdr.sector = cpu_to_virtio64(vblk->vdev, sector);
@@ -601,7 +616,7 @@ static int virtblk_submit_zone_report(struct virtio_blk *vblk,
goto out;
blk_execute_rq(req, false);
- err = blk_status_to_errno(virtblk_result(vbr->status));
+ err = blk_status_to_errno(virtblk_result(vbr->in_hdr.status));
out:
blk_mq_free_request(req);
return err;
@@ -609,29 +624,72 @@ out:
static int virtblk_parse_zone(struct virtio_blk *vblk,
struct virtio_blk_zone_descriptor *entry,
- unsigned int idx, unsigned int zone_sectors,
- report_zones_cb cb, void *data)
+ unsigned int idx, report_zones_cb cb, void *data)
{
struct blk_zone zone = { };
- if (entry->z_type != VIRTIO_BLK_ZT_SWR &&
- entry->z_type != VIRTIO_BLK_ZT_SWP &&
- entry->z_type != VIRTIO_BLK_ZT_CONV) {
- dev_err(&vblk->vdev->dev, "invalid zone type %#x\n",
- entry->z_type);
- return -EINVAL;
+ zone.start = virtio64_to_cpu(vblk->vdev, entry->z_start);
+ if (zone.start + vblk->zone_sectors <= get_capacity(vblk->disk))
+ zone.len = vblk->zone_sectors;
+ else
+ zone.len = get_capacity(vblk->disk) - zone.start;
+ zone.capacity = virtio64_to_cpu(vblk->vdev, entry->z_cap);
+ zone.wp = virtio64_to_cpu(vblk->vdev, entry->z_wp);
+
+ switch (entry->z_type) {
+ case VIRTIO_BLK_ZT_SWR:
+ zone.type = BLK_ZONE_TYPE_SEQWRITE_REQ;
+ break;
+ case VIRTIO_BLK_ZT_SWP:
+ zone.type = BLK_ZONE_TYPE_SEQWRITE_PREF;
+ break;
+ case VIRTIO_BLK_ZT_CONV:
+ zone.type = BLK_ZONE_TYPE_CONVENTIONAL;
+ break;
+ default:
+ dev_err(&vblk->vdev->dev, "zone %llu: invalid type %#x\n",
+ zone.start, entry->z_type);
+ return -EIO;
}
- zone.type = entry->z_type;
- zone.cond = entry->z_state;
- zone.len = zone_sectors;
- zone.capacity = le64_to_cpu(entry->z_cap);
- zone.start = le64_to_cpu(entry->z_start);
- if (zone.cond == BLK_ZONE_COND_FULL)
+ switch (entry->z_state) {
+ case VIRTIO_BLK_ZS_EMPTY:
+ zone.cond = BLK_ZONE_COND_EMPTY;
+ break;
+ case VIRTIO_BLK_ZS_CLOSED:
+ zone.cond = BLK_ZONE_COND_CLOSED;
+ break;
+ case VIRTIO_BLK_ZS_FULL:
+ zone.cond = BLK_ZONE_COND_FULL;
zone.wp = zone.start + zone.len;
- else
- zone.wp = le64_to_cpu(entry->z_wp);
+ break;
+ case VIRTIO_BLK_ZS_EOPEN:
+ zone.cond = BLK_ZONE_COND_EXP_OPEN;
+ break;
+ case VIRTIO_BLK_ZS_IOPEN:
+ zone.cond = BLK_ZONE_COND_IMP_OPEN;
+ break;
+ case VIRTIO_BLK_ZS_NOT_WP:
+ zone.cond = BLK_ZONE_COND_NOT_WP;
+ break;
+ case VIRTIO_BLK_ZS_RDONLY:
+ zone.cond = BLK_ZONE_COND_READONLY;
+ zone.wp = ULONG_MAX;
+ break;
+ case VIRTIO_BLK_ZS_OFFLINE:
+ zone.cond = BLK_ZONE_COND_OFFLINE;
+ zone.wp = ULONG_MAX;
+ break;
+ default:
+ dev_err(&vblk->vdev->dev, "zone %llu: invalid condition %#x\n",
+ zone.start, entry->z_state);
+ return -EIO;
+ }
+ /*
+ * The callback below checks the validity of the reported
+ * entry data, no need to further validate it here.
+ */
return cb(&zone, idx, data);
}
@@ -641,39 +699,47 @@ static int virtblk_report_zones(struct gendisk *disk, sector_t sector,
{
struct virtio_blk *vblk = disk->private_data;
struct virtio_blk_zone_report *report;
- unsigned int zone_sectors = vblk->zone_sectors;
- unsigned int nz, i;
- int ret, zone_idx = 0;
+ unsigned long long nz, i;
size_t buflen;
+ unsigned int zone_idx = 0;
+ int ret;
if (WARN_ON_ONCE(!vblk->zone_sectors))
return -EOPNOTSUPP;
- report = virtblk_alloc_report_buffer(vblk, nr_zones,
- zone_sectors, &buflen);
+ report = virtblk_alloc_report_buffer(vblk, nr_zones, &buflen);
if (!report)
return -ENOMEM;
+ mutex_lock(&vblk->vdev_mutex);
+
+ if (!vblk->vdev) {
+ ret = -ENXIO;
+ goto fail_report;
+ }
+
while (zone_idx < nr_zones && sector < get_capacity(vblk->disk)) {
memset(report, 0, buflen);
ret = virtblk_submit_zone_report(vblk, (char *)report,
buflen, sector);
- if (ret) {
- if (ret > 0)
- ret = -EIO;
- goto out_free;
- }
- nz = min((unsigned int)le64_to_cpu(report->nr_zones), nr_zones);
+ if (ret)
+ goto fail_report;
+
+ nz = min_t(u64, virtio64_to_cpu(vblk->vdev, report->nr_zones),
+ nr_zones);
if (!nz)
break;
for (i = 0; i < nz && zone_idx < nr_zones; i++) {
ret = virtblk_parse_zone(vblk, &report->zones[i],
- zone_idx, zone_sectors, cb, data);
+ zone_idx, cb, data);
if (ret)
- goto out_free;
- sector = le64_to_cpu(report->zones[i].z_start) + zone_sectors;
+ goto fail_report;
+
+ sector = virtio64_to_cpu(vblk->vdev,
+ report->zones[i].z_start) +
+ vblk->zone_sectors;
zone_idx++;
}
}
@@ -682,7 +748,8 @@ static int virtblk_report_zones(struct gendisk *disk, sector_t sector,
ret = zone_idx;
else
ret = -EINVAL;
-out_free:
+fail_report:
+ mutex_unlock(&vblk->vdev_mutex);
kvfree(report);
return ret;
}
@@ -691,20 +758,28 @@ static void virtblk_revalidate_zones(struct virtio_blk *vblk)
{
u8 model;
- if (!vblk->zone_sectors)
- return;
-
virtio_cread(vblk->vdev, struct virtio_blk_config,
zoned.model, &model);
- if (!blk_revalidate_disk_zones(vblk->disk, NULL))
- set_capacity_and_notify(vblk->disk, 0);
+ switch (model) {
+ default:
+ dev_err(&vblk->vdev->dev, "unknown zone model %d\n", model);
+ fallthrough;
+ case VIRTIO_BLK_Z_NONE:
+ case VIRTIO_BLK_Z_HA:
+ disk_set_zoned(vblk->disk, BLK_ZONED_NONE);
+ return;
+ case VIRTIO_BLK_Z_HM:
+ WARN_ON_ONCE(!vblk->zone_sectors);
+ if (!blk_revalidate_disk_zones(vblk->disk, NULL))
+ set_capacity_and_notify(vblk->disk, 0);
+ }
}
static int virtblk_probe_zoned_device(struct virtio_device *vdev,
struct virtio_blk *vblk,
struct request_queue *q)
{
- u32 v;
+ u32 v, wg;
u8 model;
int ret;
@@ -713,16 +788,11 @@ static int virtblk_probe_zoned_device(struct virtio_device *vdev,
switch (model) {
case VIRTIO_BLK_Z_NONE:
+ case VIRTIO_BLK_Z_HA:
+ /* Present the host-aware device as non-zoned */
return 0;
case VIRTIO_BLK_Z_HM:
break;
- case VIRTIO_BLK_Z_HA:
- /*
- * Present the host-aware device as a regular drive.
- * TODO It is possible to add an option to make it appear
- * in the system as a zoned drive.
- */
- return 0;
default:
dev_err(&vdev->dev, "unsupported zone model %d\n", model);
return -EINVAL;
@@ -735,32 +805,31 @@ static int virtblk_probe_zoned_device(struct virtio_device *vdev,
virtio_cread(vdev, struct virtio_blk_config,
zoned.max_open_zones, &v);
- disk_set_max_open_zones(vblk->disk, le32_to_cpu(v));
-
- dev_dbg(&vdev->dev, "max open zones = %u\n", le32_to_cpu(v));
+ disk_set_max_open_zones(vblk->disk, v);
+ dev_dbg(&vdev->dev, "max open zones = %u\n", v);
virtio_cread(vdev, struct virtio_blk_config,
zoned.max_active_zones, &v);
- disk_set_max_active_zones(vblk->disk, le32_to_cpu(v));
- dev_dbg(&vdev->dev, "max active zones = %u\n", le32_to_cpu(v));
+ disk_set_max_active_zones(vblk->disk, v);
+ dev_dbg(&vdev->dev, "max active zones = %u\n", v);
virtio_cread(vdev, struct virtio_blk_config,
- zoned.write_granularity, &v);
- if (!v) {
+ zoned.write_granularity, &wg);
+ if (!wg) {
dev_warn(&vdev->dev, "zero write granularity reported\n");
return -ENODEV;
}
- blk_queue_physical_block_size(q, le32_to_cpu(v));
- blk_queue_io_min(q, le32_to_cpu(v));
+ blk_queue_physical_block_size(q, wg);
+ blk_queue_io_min(q, wg);
- dev_dbg(&vdev->dev, "write granularity = %u\n", le32_to_cpu(v));
+ dev_dbg(&vdev->dev, "write granularity = %u\n", wg);
/*
* virtio ZBD specification doesn't require zones to be a power of
* two sectors in size, but the code in this driver expects that.
*/
- virtio_cread(vdev, struct virtio_blk_config, zoned.zone_sectors, &v);
- vblk->zone_sectors = le32_to_cpu(v);
+ virtio_cread(vdev, struct virtio_blk_config, zoned.zone_sectors,
+ &vblk->zone_sectors);
if (vblk->zone_sectors == 0 || !is_power_of_2(vblk->zone_sectors)) {
dev_err(&vdev->dev,
"zoned device with non power of two zone size %u\n",
@@ -783,36 +852,46 @@ static int virtblk_probe_zoned_device(struct virtio_device *vdev,
dev_warn(&vdev->dev, "zero max_append_sectors reported\n");
return -ENODEV;
}
- blk_queue_max_zone_append_sectors(q, le32_to_cpu(v));
- dev_dbg(&vdev->dev, "max append sectors = %u\n", le32_to_cpu(v));
+ if ((v << SECTOR_SHIFT) < wg) {
+ dev_err(&vdev->dev,
+ "write granularity %u exceeds max_append_sectors %u limit\n",
+ wg, v);
+ return -ENODEV;
+ }
+
+ blk_queue_max_zone_append_sectors(q, v);
+ dev_dbg(&vdev->dev, "max append sectors = %u\n", v);
}
return ret;
}
-static inline bool virtblk_has_zoned_feature(struct virtio_device *vdev)
-{
- return virtio_has_feature(vdev, VIRTIO_BLK_F_ZONED);
-}
#else
/*
* Zoned block device support is not configured in this kernel.
- * We only need to define a few symbols to avoid compilation errors.
+ * Host-managed zoned devices can't be supported, but others are
+ * good to go as regular block devices.
*/
#define virtblk_report_zones NULL
+
static inline void virtblk_revalidate_zones(struct virtio_blk *vblk)
{
}
+
static inline int virtblk_probe_zoned_device(struct virtio_device *vdev,
struct virtio_blk *vblk, struct request_queue *q)
{
- return -EOPNOTSUPP;
-}
+ u8 model;
-static inline bool virtblk_has_zoned_feature(struct virtio_device *vdev)
-{
- return false;
+ virtio_cread(vdev, struct virtio_blk_config, zoned.model, &model);
+ if (model == VIRTIO_BLK_Z_HM) {
+ dev_err(&vdev->dev,
+ "virtio_blk: zoned devices are not supported");
+ return -EOPNOTSUPP;
+ }
+
+ return 0;
}
#endif /* CONFIG_BLK_DEV_ZONED */
@@ -831,7 +910,7 @@ static int virtblk_get_id(struct gendisk *disk, char *id_str)
return PTR_ERR(req);
vbr = blk_mq_rq_to_pdu(req);
- vbr->in_hdr_len = sizeof(vbr->status);
+ vbr->in_hdr_len = sizeof(vbr->in_hdr.status);
vbr->out_hdr.type = cpu_to_virtio32(vblk->vdev, VIRTIO_BLK_T_GET_ID);
vbr->out_hdr.sector = 0;
@@ -840,7 +919,7 @@ static int virtblk_get_id(struct gendisk *disk, char *id_str)
goto out;
blk_execute_rq(req, false);
- err = blk_status_to_errno(virtblk_result(vbr->status));
+ err = blk_status_to_errno(virtblk_result(vbr->in_hdr.status));
out:
blk_mq_free_request(req);
return err;
@@ -1498,15 +1577,16 @@ static int virtblk_probe(struct virtio_device *vdev)
virtblk_update_capacity(vblk, false);
virtio_device_ready(vdev);
- if (virtblk_has_zoned_feature(vdev)) {
+ /*
+ * All steps that follow use the VQs therefore they need to be
+ * placed after the virtio_device_ready() call above.
+ */
+ if (virtio_has_feature(vdev, VIRTIO_BLK_F_ZONED)) {
err = virtblk_probe_zoned_device(vdev, vblk, q);
if (err)
goto out_cleanup_disk;
}
- dev_info(&vdev->dev, "blk config size: %zu\n",
- sizeof(struct virtio_blk_config));
-
err = device_add_disk(&vdev->dev, vblk->disk, virtblk_attr_groups);
if (err)
goto out_cleanup_disk;
@@ -1607,10 +1687,7 @@ static unsigned int features[] = {
VIRTIO_BLK_F_RO, VIRTIO_BLK_F_BLK_SIZE,
VIRTIO_BLK_F_FLUSH, VIRTIO_BLK_F_TOPOLOGY, VIRTIO_BLK_F_CONFIG_WCE,
VIRTIO_BLK_F_MQ, VIRTIO_BLK_F_DISCARD, VIRTIO_BLK_F_WRITE_ZEROES,
- VIRTIO_BLK_F_SECURE_ERASE,
-#ifdef CONFIG_BLK_DEV_ZONED
- VIRTIO_BLK_F_ZONED,
-#endif /* CONFIG_BLK_DEV_ZONED */
+ VIRTIO_BLK_F_SECURE_ERASE, VIRTIO_BLK_F_ZONED,
};
static struct virtio_driver virtio_blk = {
diff --git a/drivers/bluetooth/btbcm.c b/drivers/bluetooth/btbcm.c
index 3006e2a0f37e..43e98a598bd9 100644
--- a/drivers/bluetooth/btbcm.c
+++ b/drivers/bluetooth/btbcm.c
@@ -511,7 +511,7 @@ static const char *btbcm_get_board_name(struct device *dev)
len = strlen(tmp) + 1;
board_type = devm_kzalloc(dev, len, GFP_KERNEL);
strscpy(board_type, tmp, len);
- for (i = 0; i < board_type[i]; i++) {
+ for (i = 0; i < len; i++) {
if (board_type[i] == '/')
board_type[i] = '-';
}
diff --git a/drivers/bluetooth/btsdio.c b/drivers/bluetooth/btsdio.c
index 02893600db39..51000320e1ea 100644
--- a/drivers/bluetooth/btsdio.c
+++ b/drivers/bluetooth/btsdio.c
@@ -358,6 +358,7 @@ static void btsdio_remove(struct sdio_func *func)
if (!data)
return;
+ cancel_work_sync(&data->work);
hdev = data->hdev;
sdio_set_drvdata(func, NULL);
diff --git a/drivers/bus/imx-weim.c b/drivers/bus/imx-weim.c
index 36d42484142a..cf463c1d2102 100644
--- a/drivers/bus/imx-weim.c
+++ b/drivers/bus/imx-weim.c
@@ -329,6 +329,12 @@ static int of_weim_notify(struct notifier_block *nb, unsigned long action,
"Failed to setup timing for '%pOF'\n", rd->dn);
if (!of_node_check_flag(rd->dn, OF_POPULATED)) {
+ /*
+ * Clear the flag before adding the device so that
+ * fw_devlink doesn't skip adding consumers to this
+ * device.
+ */
+ rd->dn->fwnode.flags &= ~FWNODE_FLAG_NOT_DEVICE;
if (!of_platform_device_create(rd->dn, NULL, &pdev->dev)) {
dev_err(&pdev->dev,
"Failed to create child device '%pOF'\n",
diff --git a/drivers/counter/104-quad-8.c b/drivers/counter/104-quad-8.c
index deed4afadb29..d9cb937665cf 100644
--- a/drivers/counter/104-quad-8.c
+++ b/drivers/counter/104-quad-8.c
@@ -97,10 +97,6 @@ struct quad8 {
struct quad8_reg __iomem *reg;
};
-/* Borrow Toggle flip-flop */
-#define QUAD8_FLAG_BT BIT(0)
-/* Carry Toggle flip-flop */
-#define QUAD8_FLAG_CT BIT(1)
/* Error flag */
#define QUAD8_FLAG_E BIT(4)
/* Up/Down flag */
@@ -133,6 +129,9 @@ struct quad8 {
#define QUAD8_CMR_QUADRATURE_X2 0x10
#define QUAD8_CMR_QUADRATURE_X4 0x18
+/* Each Counter is 24 bits wide */
+#define LS7267_CNTR_MAX GENMASK(23, 0)
+
static int quad8_signal_read(struct counter_device *counter,
struct counter_signal *signal,
enum counter_signal_level *level)
@@ -156,18 +155,10 @@ static int quad8_count_read(struct counter_device *counter,
{
struct quad8 *const priv = counter_priv(counter);
struct channel_reg __iomem *const chan = priv->reg->channel + count->id;
- unsigned int flags;
- unsigned int borrow;
- unsigned int carry;
unsigned long irqflags;
int i;
- flags = ioread8(&chan->control);
- borrow = flags & QUAD8_FLAG_BT;
- carry = !!(flags & QUAD8_FLAG_CT);
-
- /* Borrow XOR Carry effectively doubles count range */
- *val = (unsigned long)(borrow ^ carry) << 24;
+ *val = 0;
spin_lock_irqsave(&priv->lock, irqflags);
@@ -191,8 +182,7 @@ static int quad8_count_write(struct counter_device *counter,
unsigned long irqflags;
int i;
- /* Only 24-bit values are supported */
- if (val > 0xFFFFFF)
+ if (val > LS7267_CNTR_MAX)
return -ERANGE;
spin_lock_irqsave(&priv->lock, irqflags);
@@ -378,7 +368,7 @@ static int quad8_action_read(struct counter_device *counter,
/* Handle Index signals */
if (synapse->signal->id >= 16) {
- if (priv->preset_enable[count->id])
+ if (!priv->preset_enable[count->id])
*action = COUNTER_SYNAPSE_ACTION_RISING_EDGE;
else
*action = COUNTER_SYNAPSE_ACTION_NONE;
@@ -806,8 +796,7 @@ static int quad8_count_preset_write(struct counter_device *counter,
struct quad8 *const priv = counter_priv(counter);
unsigned long irqflags;
- /* Only 24-bit values are supported */
- if (preset > 0xFFFFFF)
+ if (preset > LS7267_CNTR_MAX)
return -ERANGE;
spin_lock_irqsave(&priv->lock, irqflags);
@@ -834,8 +823,7 @@ static int quad8_count_ceiling_read(struct counter_device *counter,
*ceiling = priv->preset[count->id];
break;
default:
- /* By default 0x1FFFFFF (25 bits unsigned) is maximum count */
- *ceiling = 0x1FFFFFF;
+ *ceiling = LS7267_CNTR_MAX;
break;
}
@@ -850,8 +838,7 @@ static int quad8_count_ceiling_write(struct counter_device *counter,
struct quad8 *const priv = counter_priv(counter);
unsigned long irqflags;
- /* Only 24-bit values are supported */
- if (ceiling > 0xFFFFFF)
+ if (ceiling > LS7267_CNTR_MAX)
return -ERANGE;
spin_lock_irqsave(&priv->lock, irqflags);
diff --git a/drivers/cxl/core/hdm.c b/drivers/cxl/core/hdm.c
index 45deda18ed32..02cc2c38b44b 100644
--- a/drivers/cxl/core/hdm.c
+++ b/drivers/cxl/core/hdm.c
@@ -101,25 +101,40 @@ static int map_hdm_decoder_regs(struct cxl_port *port, void __iomem *crb,
BIT(CXL_CM_CAP_CAP_ID_HDM));
}
-static struct cxl_hdm *devm_cxl_setup_emulated_hdm(struct cxl_port *port,
- struct cxl_endpoint_dvsec_info *info)
+static bool should_emulate_decoders(struct cxl_endpoint_dvsec_info *info)
{
- struct device *dev = &port->dev;
struct cxl_hdm *cxlhdm;
+ void __iomem *hdm;
+ u32 ctrl;
+ int i;
- if (!info->mem_enabled)
- return ERR_PTR(-ENODEV);
+ if (!info)
+ return false;
- cxlhdm = devm_kzalloc(dev, sizeof(*cxlhdm), GFP_KERNEL);
- if (!cxlhdm)
- return ERR_PTR(-ENOMEM);
+ cxlhdm = dev_get_drvdata(&info->port->dev);
+ hdm = cxlhdm->regs.hdm_decoder;
- cxlhdm->port = port;
- cxlhdm->decoder_count = info->ranges;
- cxlhdm->target_count = info->ranges;
- dev_set_drvdata(&port->dev, cxlhdm);
+ if (!hdm)
+ return true;
- return cxlhdm;
+ /*
+ * If HDM decoders are present and the driver is in control of
+ * Mem_Enable skip DVSEC based emulation
+ */
+ if (!info->mem_enabled)
+ return false;
+
+ /*
+ * If any decoders are committed already, there should not be any
+ * emulated DVSEC decoders.
+ */
+ for (i = 0; i < cxlhdm->decoder_count; i++) {
+ ctrl = readl(hdm + CXL_HDM_DECODER0_CTRL_OFFSET(i));
+ if (FIELD_GET(CXL_HDM_DECODER0_CTRL_COMMITTED, ctrl))
+ return false;
+ }
+
+ return true;
}
/**
@@ -138,13 +153,14 @@ struct cxl_hdm *devm_cxl_setup_hdm(struct cxl_port *port,
cxlhdm = devm_kzalloc(dev, sizeof(*cxlhdm), GFP_KERNEL);
if (!cxlhdm)
return ERR_PTR(-ENOMEM);
-
cxlhdm->port = port;
- crb = ioremap(port->component_reg_phys, CXL_COMPONENT_REG_BLOCK_SIZE);
- if (!crb) {
- if (info && info->mem_enabled)
- return devm_cxl_setup_emulated_hdm(port, info);
+ dev_set_drvdata(dev, cxlhdm);
+ crb = ioremap(port->component_reg_phys, CXL_COMPONENT_REG_BLOCK_SIZE);
+ if (!crb && info && info->mem_enabled) {
+ cxlhdm->decoder_count = info->ranges;
+ return cxlhdm;
+ } else if (!crb) {
dev_err(dev, "No component registers mapped\n");
return ERR_PTR(-ENXIO);
}
@@ -160,7 +176,15 @@ struct cxl_hdm *devm_cxl_setup_hdm(struct cxl_port *port,
return ERR_PTR(-ENXIO);
}
- dev_set_drvdata(dev, cxlhdm);
+ /*
+ * Now that the hdm capability is parsed, decide if range
+ * register emulation is needed and fixup cxlhdm accordingly.
+ */
+ if (should_emulate_decoders(info)) {
+ dev_dbg(dev, "Fallback map %d range register%s\n", info->ranges,
+ info->ranges > 1 ? "s" : "");
+ cxlhdm->decoder_count = info->ranges;
+ }
return cxlhdm;
}
@@ -714,14 +738,20 @@ static int cxl_decoder_reset(struct cxl_decoder *cxld)
return 0;
}
-static int cxl_setup_hdm_decoder_from_dvsec(struct cxl_port *port,
- struct cxl_decoder *cxld, int which,
- struct cxl_endpoint_dvsec_info *info)
+static int cxl_setup_hdm_decoder_from_dvsec(
+ struct cxl_port *port, struct cxl_decoder *cxld, u64 *dpa_base,
+ int which, struct cxl_endpoint_dvsec_info *info)
{
+ struct cxl_endpoint_decoder *cxled;
+ u64 len;
+ int rc;
+
if (!is_cxl_endpoint(port))
return -EOPNOTSUPP;
- if (!range_len(&info->dvsec_range[which]))
+ cxled = to_cxl_endpoint_decoder(&cxld->dev);
+ len = range_len(&info->dvsec_range[which]);
+ if (!len)
return -ENOENT;
cxld->target_type = CXL_DECODER_EXPANDER;
@@ -736,40 +766,24 @@ static int cxl_setup_hdm_decoder_from_dvsec(struct cxl_port *port,
cxld->flags |= CXL_DECODER_F_ENABLE | CXL_DECODER_F_LOCK;
port->commit_end = cxld->id;
- return 0;
-}
-
-static bool should_emulate_decoders(struct cxl_port *port)
-{
- struct cxl_hdm *cxlhdm = dev_get_drvdata(&port->dev);
- void __iomem *hdm = cxlhdm->regs.hdm_decoder;
- u32 ctrl;
- int i;
-
- if (!is_cxl_endpoint(cxlhdm->port))
- return false;
-
- if (!hdm)
- return true;
-
- /*
- * If any decoders are committed already, there should not be any
- * emulated DVSEC decoders.
- */
- for (i = 0; i < cxlhdm->decoder_count; i++) {
- ctrl = readl(hdm + CXL_HDM_DECODER0_CTRL_OFFSET(i));
- if (FIELD_GET(CXL_HDM_DECODER0_CTRL_COMMITTED, ctrl))
- return false;
+ rc = devm_cxl_dpa_reserve(cxled, *dpa_base, len, 0);
+ if (rc) {
+ dev_err(&port->dev,
+ "decoder%d.%d: Failed to reserve DPA range %#llx - %#llx\n (%d)",
+ port->id, cxld->id, *dpa_base, *dpa_base + len - 1, rc);
+ return rc;
}
+ *dpa_base += len;
+ cxled->state = CXL_DECODER_STATE_AUTO;
- return true;
+ return 0;
}
static int init_hdm_decoder(struct cxl_port *port, struct cxl_decoder *cxld,
int *target_map, void __iomem *hdm, int which,
u64 *dpa_base, struct cxl_endpoint_dvsec_info *info)
{
- struct cxl_endpoint_decoder *cxled = NULL;
+ struct cxl_endpoint_decoder *cxled;
u64 size, base, skip, dpa_size;
bool committed;
u32 remainder;
@@ -780,11 +794,9 @@ static int init_hdm_decoder(struct cxl_port *port, struct cxl_decoder *cxld,
unsigned char target_id[8];
} target_list;
- if (should_emulate_decoders(port))
- return cxl_setup_hdm_decoder_from_dvsec(port, cxld, which, info);
-
- if (is_endpoint_decoder(&cxld->dev))
- cxled = to_cxl_endpoint_decoder(&cxld->dev);
+ if (should_emulate_decoders(info))
+ return cxl_setup_hdm_decoder_from_dvsec(port, cxld, dpa_base,
+ which, info);
ctrl = readl(hdm + CXL_HDM_DECODER0_CTRL_OFFSET(which));
base = ioread64_hi_lo(hdm + CXL_HDM_DECODER0_BASE_LOW_OFFSET(which));
@@ -806,9 +818,6 @@ static int init_hdm_decoder(struct cxl_port *port, struct cxl_decoder *cxld,
.end = base + size - 1,
};
- if (cxled && !committed && range_len(&info->dvsec_range[which]))
- return cxl_setup_hdm_decoder_from_dvsec(port, cxld, which, info);
-
/* decoders are enabled if committed */
if (committed) {
cxld->flags |= CXL_DECODER_F_ENABLE;
@@ -846,7 +855,7 @@ static int init_hdm_decoder(struct cxl_port *port, struct cxl_decoder *cxld,
if (rc)
return rc;
- if (!cxled) {
+ if (!info) {
target_list.value =
ioread64_hi_lo(hdm + CXL_HDM_DECODER0_TL_LOW(which));
for (i = 0; i < cxld->interleave_ways; i++)
@@ -866,6 +875,7 @@ static int init_hdm_decoder(struct cxl_port *port, struct cxl_decoder *cxld,
return -ENXIO;
}
skip = ioread64_hi_lo(hdm + CXL_HDM_DECODER0_SKIP_LOW(which));
+ cxled = to_cxl_endpoint_decoder(&cxld->dev);
rc = devm_cxl_dpa_reserve(cxled, *dpa_base + skip, dpa_size, skip);
if (rc) {
dev_err(&port->dev,
diff --git a/drivers/cxl/core/pci.c b/drivers/cxl/core/pci.c
index 7328a2552411..523d5b9fd7fc 100644
--- a/drivers/cxl/core/pci.c
+++ b/drivers/cxl/core/pci.c
@@ -462,7 +462,7 @@ static struct pci_doe_mb *find_cdat_doe(struct device *uport)
return NULL;
}
-#define CDAT_DOE_REQ(entry_handle) \
+#define CDAT_DOE_REQ(entry_handle) cpu_to_le32 \
(FIELD_PREP(CXL_DOE_TABLE_ACCESS_REQ_CODE, \
CXL_DOE_TABLE_ACCESS_REQ_CODE_READ) | \
FIELD_PREP(CXL_DOE_TABLE_ACCESS_TABLE_TYPE, \
@@ -475,8 +475,8 @@ static void cxl_doe_task_complete(struct pci_doe_task *task)
}
struct cdat_doe_task {
- u32 request_pl;
- u32 response_pl[32];
+ __le32 request_pl;
+ __le32 response_pl[32];
struct completion c;
struct pci_doe_task task;
};
@@ -510,10 +510,10 @@ static int cxl_cdat_get_length(struct device *dev,
return rc;
}
wait_for_completion(&t.c);
- if (t.task.rv < sizeof(u32))
+ if (t.task.rv < 2 * sizeof(__le32))
return -EIO;
- *length = t.response_pl[1];
+ *length = le32_to_cpu(t.response_pl[1]);
dev_dbg(dev, "CDAT length %zu\n", *length);
return 0;
@@ -524,13 +524,13 @@ static int cxl_cdat_read_table(struct device *dev,
struct cxl_cdat *cdat)
{
size_t length = cdat->length;
- u32 *data = cdat->table;
+ __le32 *data = cdat->table;
int entry_handle = 0;
do {
DECLARE_CDAT_DOE_TASK(CDAT_DOE_REQ(entry_handle), t);
+ struct cdat_entry_header *entry;
size_t entry_dw;
- u32 *entry;
int rc;
rc = pci_doe_submit_task(cdat_doe, &t.task);
@@ -539,26 +539,34 @@ static int cxl_cdat_read_table(struct device *dev,
return rc;
}
wait_for_completion(&t.c);
- /* 1 DW header + 1 DW data min */
- if (t.task.rv < (2 * sizeof(u32)))
+
+ /* 1 DW Table Access Response Header + CDAT entry */
+ entry = (struct cdat_entry_header *)(t.response_pl + 1);
+ if ((entry_handle == 0 &&
+ t.task.rv != sizeof(__le32) + sizeof(struct cdat_header)) ||
+ (entry_handle > 0 &&
+ (t.task.rv < sizeof(__le32) + sizeof(*entry) ||
+ t.task.rv != sizeof(__le32) + le16_to_cpu(entry->length))))
return -EIO;
/* Get the CXL table access header entry handle */
entry_handle = FIELD_GET(CXL_DOE_TABLE_ACCESS_ENTRY_HANDLE,
- t.response_pl[0]);
- entry = t.response_pl + 1;
- entry_dw = t.task.rv / sizeof(u32);
+ le32_to_cpu(t.response_pl[0]));
+ entry_dw = t.task.rv / sizeof(__le32);
/* Skip Header */
entry_dw -= 1;
- entry_dw = min(length / sizeof(u32), entry_dw);
+ entry_dw = min(length / sizeof(__le32), entry_dw);
/* Prevent length < 1 DW from causing a buffer overflow */
if (entry_dw) {
- memcpy(data, entry, entry_dw * sizeof(u32));
- length -= entry_dw * sizeof(u32);
+ memcpy(data, entry, entry_dw * sizeof(__le32));
+ length -= entry_dw * sizeof(__le32);
data += entry_dw;
}
} while (entry_handle != CXL_DOE_TABLE_ACCESS_LAST_ENTRY);
+ /* Length in CDAT header may exceed concatenation of CDAT entries */
+ cdat->length -= length;
+
return 0;
}
diff --git a/drivers/cxl/core/pmem.c b/drivers/cxl/core/pmem.c
index c2e4b1093788..f8c38d997252 100644
--- a/drivers/cxl/core/pmem.c
+++ b/drivers/cxl/core/pmem.c
@@ -62,9 +62,9 @@ static int match_nvdimm_bridge(struct device *dev, void *data)
return is_cxl_nvdimm_bridge(dev);
}
-struct cxl_nvdimm_bridge *cxl_find_nvdimm_bridge(struct device *start)
+struct cxl_nvdimm_bridge *cxl_find_nvdimm_bridge(struct cxl_memdev *cxlmd)
{
- struct cxl_port *port = find_cxl_root(start);
+ struct cxl_port *port = find_cxl_root(dev_get_drvdata(&cxlmd->dev));
struct device *dev;
if (!port)
@@ -253,7 +253,7 @@ int devm_cxl_add_nvdimm(struct cxl_memdev *cxlmd)
struct device *dev;
int rc;
- cxl_nvb = cxl_find_nvdimm_bridge(&cxlmd->dev);
+ cxl_nvb = cxl_find_nvdimm_bridge(cxlmd);
if (!cxl_nvb)
return -ENODEV;
diff --git a/drivers/cxl/core/port.c b/drivers/cxl/core/port.c
index 8ee6b6e2e2a4..4d1f9c5b5029 100644
--- a/drivers/cxl/core/port.c
+++ b/drivers/cxl/core/port.c
@@ -823,41 +823,17 @@ static bool dev_is_cxl_root_child(struct device *dev)
return false;
}
-/* Find a 2nd level CXL port that has a dport that is an ancestor of @match */
-static int match_root_child(struct device *dev, const void *match)
+struct cxl_port *find_cxl_root(struct cxl_port *port)
{
- const struct device *iter = NULL;
- struct cxl_dport *dport;
- struct cxl_port *port;
-
- if (!dev_is_cxl_root_child(dev))
- return 0;
-
- port = to_cxl_port(dev);
- iter = match;
- while (iter) {
- dport = cxl_find_dport_by_dev(port, iter);
- if (dport)
- break;
- iter = iter->parent;
- }
-
- return !!iter;
-}
+ struct cxl_port *iter = port;
-struct cxl_port *find_cxl_root(struct device *dev)
-{
- struct device *port_dev;
- struct cxl_port *root;
+ while (iter && !is_cxl_root(iter))
+ iter = to_cxl_port(iter->dev.parent);
- port_dev = bus_find_device(&cxl_bus_type, NULL, dev, match_root_child);
- if (!port_dev)
+ if (!iter)
return NULL;
-
- root = to_cxl_port(port_dev->parent);
- get_device(&root->dev);
- put_device(port_dev);
- return root;
+ get_device(&iter->dev);
+ return iter;
}
EXPORT_SYMBOL_NS_GPL(find_cxl_root, CXL);
diff --git a/drivers/cxl/core/region.c b/drivers/cxl/core/region.c
index f29028148806..b2fd67fcebfb 100644
--- a/drivers/cxl/core/region.c
+++ b/drivers/cxl/core/region.c
@@ -134,9 +134,13 @@ static int cxl_region_decode_reset(struct cxl_region *cxlr, int count)
struct cxl_endpoint_decoder *cxled = p->targets[i];
struct cxl_memdev *cxlmd = cxled_to_memdev(cxled);
struct cxl_port *iter = cxled_to_port(cxled);
+ struct cxl_dev_state *cxlds = cxlmd->cxlds;
struct cxl_ep *ep;
int rc = 0;
+ if (cxlds->rcd)
+ goto endpoint_reset;
+
while (!is_cxl_root(to_cxl_port(iter->dev.parent)))
iter = to_cxl_port(iter->dev.parent);
@@ -153,6 +157,7 @@ static int cxl_region_decode_reset(struct cxl_region *cxlr, int count)
return rc;
}
+endpoint_reset:
rc = cxled->cxld.reset(&cxled->cxld);
if (rc)
return rc;
@@ -1199,6 +1204,7 @@ static void cxl_region_teardown_targets(struct cxl_region *cxlr)
{
struct cxl_region_params *p = &cxlr->params;
struct cxl_endpoint_decoder *cxled;
+ struct cxl_dev_state *cxlds;
struct cxl_memdev *cxlmd;
struct cxl_port *iter;
struct cxl_ep *ep;
@@ -1214,6 +1220,10 @@ static void cxl_region_teardown_targets(struct cxl_region *cxlr)
for (i = 0; i < p->nr_targets; i++) {
cxled = p->targets[i];
cxlmd = cxled_to_memdev(cxled);
+ cxlds = cxlmd->cxlds;
+
+ if (cxlds->rcd)
+ continue;
iter = cxled_to_port(cxled);
while (!is_cxl_root(to_cxl_port(iter->dev.parent)))
@@ -1229,14 +1239,24 @@ static int cxl_region_setup_targets(struct cxl_region *cxlr)
{
struct cxl_region_params *p = &cxlr->params;
struct cxl_endpoint_decoder *cxled;
+ struct cxl_dev_state *cxlds;
+ int i, rc, rch = 0, vh = 0;
struct cxl_memdev *cxlmd;
struct cxl_port *iter;
struct cxl_ep *ep;
- int i, rc;
for (i = 0; i < p->nr_targets; i++) {
cxled = p->targets[i];
cxlmd = cxled_to_memdev(cxled);
+ cxlds = cxlmd->cxlds;
+
+ /* validate that all targets agree on topology */
+ if (!cxlds->rcd) {
+ vh++;
+ } else {
+ rch++;
+ continue;
+ }
iter = cxled_to_port(cxled);
while (!is_cxl_root(to_cxl_port(iter->dev.parent)))
@@ -1256,6 +1276,12 @@ static int cxl_region_setup_targets(struct cxl_region *cxlr)
}
}
+ if (rch && vh) {
+ dev_err(&cxlr->dev, "mismatched CXL topologies detected\n");
+ cxl_region_teardown_targets(cxlr);
+ return -ENXIO;
+ }
+
return 0;
}
@@ -1648,6 +1674,7 @@ static int cxl_region_attach(struct cxl_region *cxlr,
if (rc)
goto err_decrement;
p->state = CXL_CONFIG_ACTIVE;
+ set_bit(CXL_REGION_F_INCOHERENT, &cxlr->flags);
}
cxled->cxld.interleave_ways = p->interleave_ways;
@@ -1749,8 +1776,6 @@ static int attach_target(struct cxl_region *cxlr,
down_read(&cxl_dpa_rwsem);
rc = cxl_region_attach(cxlr, cxled, pos);
- if (rc == 0)
- set_bit(CXL_REGION_F_INCOHERENT, &cxlr->flags);
up_read(&cxl_dpa_rwsem);
up_write(&cxl_region_rwsem);
return rc;
@@ -2251,7 +2276,7 @@ static struct cxl_pmem_region *cxl_pmem_region_alloc(struct cxl_region *cxlr)
* bridge for one device is the same for all.
*/
if (i == 0) {
- cxl_nvb = cxl_find_nvdimm_bridge(&cxlmd->dev);
+ cxl_nvb = cxl_find_nvdimm_bridge(cxlmd);
if (!cxl_nvb) {
cxlr_pmem = ERR_PTR(-ENODEV);
goto out;
diff --git a/drivers/cxl/cxl.h b/drivers/cxl/cxl.h
index f2b0962a552d..044a92d9813e 100644
--- a/drivers/cxl/cxl.h
+++ b/drivers/cxl/cxl.h
@@ -658,7 +658,7 @@ struct pci_bus *cxl_port_to_pci_bus(struct cxl_port *port);
struct cxl_port *devm_cxl_add_port(struct device *host, struct device *uport,
resource_size_t component_reg_phys,
struct cxl_dport *parent_dport);
-struct cxl_port *find_cxl_root(struct device *dev);
+struct cxl_port *find_cxl_root(struct cxl_port *port);
int devm_cxl_enumerate_ports(struct cxl_memdev *cxlmd);
void cxl_bus_rescan(void);
void cxl_bus_drain(void);
@@ -695,13 +695,15 @@ int cxl_endpoint_autoremove(struct cxl_memdev *cxlmd, struct cxl_port *endpoint)
/**
* struct cxl_endpoint_dvsec_info - Cached DVSEC info
- * @mem_enabled: cached value of mem_enabled in the DVSEC, PCIE_DEVICE
+ * @mem_enabled: cached value of mem_enabled in the DVSEC at init time
* @ranges: Number of active HDM ranges this device uses.
+ * @port: endpoint port associated with this info instance
* @dvsec_range: cached attributes of the ranges in the DVSEC, PCIE_DEVICE
*/
struct cxl_endpoint_dvsec_info {
bool mem_enabled;
int ranges;
+ struct cxl_port *port;
struct range dvsec_range[2];
};
@@ -758,7 +760,7 @@ struct cxl_nvdimm *to_cxl_nvdimm(struct device *dev);
bool is_cxl_nvdimm(struct device *dev);
bool is_cxl_nvdimm_bridge(struct device *dev);
int devm_cxl_add_nvdimm(struct cxl_memdev *cxlmd);
-struct cxl_nvdimm_bridge *cxl_find_nvdimm_bridge(struct device *dev);
+struct cxl_nvdimm_bridge *cxl_find_nvdimm_bridge(struct cxl_memdev *cxlmd);
#ifdef CONFIG_CXL_REGION
bool is_cxl_pmem_region(struct device *dev);
diff --git a/drivers/cxl/cxlpci.h b/drivers/cxl/cxlpci.h
index be6a2ef3cce3..0465ef963cd6 100644
--- a/drivers/cxl/cxlpci.h
+++ b/drivers/cxl/cxlpci.h
@@ -68,6 +68,20 @@ enum cxl_regloc_type {
CXL_REGLOC_RBI_TYPES
};
+struct cdat_header {
+ __le32 length;
+ u8 revision;
+ u8 checksum;
+ u8 reserved[6];
+ __le32 sequence;
+} __packed;
+
+struct cdat_entry_header {
+ u8 type;
+ u8 reserved;
+ __le16 length;
+} __packed;
+
int devm_cxl_port_enumerate_dports(struct cxl_port *port);
struct cxl_dev_state;
int cxl_hdm_decode_init(struct cxl_dev_state *cxlds, struct cxl_hdm *cxlhdm,
diff --git a/drivers/cxl/port.c b/drivers/cxl/port.c
index 1049bb5ea496..22a7ab2bae7c 100644
--- a/drivers/cxl/port.c
+++ b/drivers/cxl/port.c
@@ -78,8 +78,8 @@ static int cxl_switch_port_probe(struct cxl_port *port)
static int cxl_endpoint_port_probe(struct cxl_port *port)
{
+ struct cxl_endpoint_dvsec_info info = { .port = port };
struct cxl_memdev *cxlmd = to_cxl_memdev(port->uport);
- struct cxl_endpoint_dvsec_info info = { 0 };
struct cxl_dev_state *cxlds = cxlmd->cxlds;
struct cxl_hdm *cxlhdm;
struct cxl_port *root;
@@ -119,7 +119,7 @@ static int cxl_endpoint_port_probe(struct cxl_port *port)
* This can't fail in practice as CXL root exit unregisters all
* descendant ports and that in turn synchronizes with cxl_port_probe()
*/
- root = find_cxl_root(&cxlmd->dev);
+ root = find_cxl_root(port);
/*
* Now that all endpoint decoders are successfully enumerated, try to
diff --git a/drivers/dma/apple-admac.c b/drivers/dma/apple-admac.c
index 90f28bda29c8..4cf8da77bdd9 100644
--- a/drivers/dma/apple-admac.c
+++ b/drivers/dma/apple-admac.c
@@ -75,6 +75,7 @@
#define REG_TX_INTSTATE(idx) (0x0030 + (idx) * 4)
#define REG_RX_INTSTATE(idx) (0x0040 + (idx) * 4)
+#define REG_GLOBAL_INTSTATE(idx) (0x0050 + (idx) * 4)
#define REG_CHAN_INTSTATUS(ch, idx) (0x8010 + (ch) * 0x200 + (idx) * 4)
#define REG_CHAN_INTMASK(ch, idx) (0x8020 + (ch) * 0x200 + (idx) * 4)
@@ -511,7 +512,10 @@ static int admac_terminate_all(struct dma_chan *chan)
admac_stop_chan(adchan);
admac_reset_rings(adchan);
- adchan->current_tx = NULL;
+ if (adchan->current_tx) {
+ list_add_tail(&adchan->current_tx->node, &adchan->to_free);
+ adchan->current_tx = NULL;
+ }
/*
* Descriptors can only be freed after the tasklet
* has been killed (in admac_synchronize).
@@ -672,13 +676,14 @@ static void admac_handle_chan_int(struct admac_data *ad, int no)
static irqreturn_t admac_interrupt(int irq, void *devid)
{
struct admac_data *ad = devid;
- u32 rx_intstate, tx_intstate;
+ u32 rx_intstate, tx_intstate, global_intstate;
int i;
rx_intstate = readl_relaxed(ad->base + REG_RX_INTSTATE(ad->irq_index));
tx_intstate = readl_relaxed(ad->base + REG_TX_INTSTATE(ad->irq_index));
+ global_intstate = readl_relaxed(ad->base + REG_GLOBAL_INTSTATE(ad->irq_index));
- if (!tx_intstate && !rx_intstate)
+ if (!tx_intstate && !rx_intstate && !global_intstate)
return IRQ_NONE;
for (i = 0; i < ad->nchannels; i += 2) {
@@ -693,6 +698,12 @@ static irqreturn_t admac_interrupt(int irq, void *devid)
rx_intstate >>= 1;
}
+ if (global_intstate) {
+ dev_warn(ad->dev, "clearing unknown global interrupt flag: %x\n",
+ global_intstate);
+ writel_relaxed(~(u32) 0, ad->base + REG_GLOBAL_INTSTATE(ad->irq_index));
+ }
+
return IRQ_HANDLED;
}
@@ -850,6 +861,9 @@ static int admac_probe(struct platform_device *pdev)
dma->directions = BIT(DMA_MEM_TO_DEV) | BIT(DMA_DEV_TO_MEM);
dma->residue_granularity = DMA_RESIDUE_GRANULARITY_BURST;
+ dma->src_addr_widths = BIT(DMA_SLAVE_BUSWIDTH_1_BYTE) |
+ BIT(DMA_SLAVE_BUSWIDTH_2_BYTES) |
+ BIT(DMA_SLAVE_BUSWIDTH_4_BYTES);
dma->dst_addr_widths = BIT(DMA_SLAVE_BUSWIDTH_1_BYTE) |
BIT(DMA_SLAVE_BUSWIDTH_2_BYTES) |
BIT(DMA_SLAVE_BUSWIDTH_4_BYTES);
diff --git a/drivers/dma/dmaengine.c b/drivers/dma/dmaengine.c
index c24bca210104..826b98284fa1 100644
--- a/drivers/dma/dmaengine.c
+++ b/drivers/dma/dmaengine.c
@@ -1342,7 +1342,7 @@ int dmaenginem_async_device_register(struct dma_device *device)
if (ret)
return ret;
- return devm_add_action(device->dev, dmaenginem_async_device_unregister, device);
+ return devm_add_action_or_reset(device->dev, dmaenginem_async_device_unregister, device);
}
EXPORT_SYMBOL(dmaenginem_async_device_register);
diff --git a/drivers/dma/xilinx/xdma.c b/drivers/dma/xilinx/xdma.c
index 462109c61653..93ee298d52b8 100644
--- a/drivers/dma/xilinx/xdma.c
+++ b/drivers/dma/xilinx/xdma.c
@@ -277,7 +277,7 @@ failed:
/**
* xdma_xfer_start - Start DMA transfer
- * @xdma_chan: DMA channel pointer
+ * @xchan: DMA channel pointer
*/
static int xdma_xfer_start(struct xdma_chan *xchan)
{
diff --git a/drivers/gpio/Kconfig b/drivers/gpio/Kconfig
index 13be729710f2..badbe0582318 100644
--- a/drivers/gpio/Kconfig
+++ b/drivers/gpio/Kconfig
@@ -100,7 +100,7 @@ config GPIO_GENERIC
tristate
config GPIO_REGMAP
- depends on REGMAP
+ select REGMAP
tristate
# put drivers in the right section, in alphabetical order
diff --git a/drivers/gpio/gpio-davinci.c b/drivers/gpio/gpio-davinci.c
index 26b1f7465e09..43b2dc8821e6 100644
--- a/drivers/gpio/gpio-davinci.c
+++ b/drivers/gpio/gpio-davinci.c
@@ -324,7 +324,7 @@ static struct irq_chip gpio_irqchip = {
.irq_enable = gpio_irq_enable,
.irq_disable = gpio_irq_disable,
.irq_set_type = gpio_irq_type,
- .flags = IRQCHIP_SET_TYPE_MASKED,
+ .flags = IRQCHIP_SET_TYPE_MASKED | IRQCHIP_SKIP_SET_WAKE,
};
static void gpio_irq_handler(struct irq_desc *desc)
@@ -641,9 +641,6 @@ static void davinci_gpio_save_context(struct davinci_gpio_controller *chips,
context->set_falling = readl_relaxed(&g->set_falling);
}
- /* Clear Bank interrupt enable bit */
- writel_relaxed(0, base + BINTEN);
-
/* Clear all interrupt status registers */
writel_relaxed(GENMASK(31, 0), &g->intstat);
}
diff --git a/drivers/gpu/drm/amd/display/amdgpu_dm/amdgpu_dm_helpers.c b/drivers/gpu/drm/amd/display/amdgpu_dm/amdgpu_dm_helpers.c
index 1583157da355..efd025d8961e 100644
--- a/drivers/gpu/drm/amd/display/amdgpu_dm/amdgpu_dm_helpers.c
+++ b/drivers/gpu/drm/amd/display/amdgpu_dm/amdgpu_dm_helpers.c
@@ -177,6 +177,40 @@ void dm_helpers_dp_update_branch_info(
const struct dc_link *link)
{}
+static void dm_helpers_construct_old_payload(
+ struct dc_link *link,
+ int pbn_per_slot,
+ struct drm_dp_mst_atomic_payload *new_payload,
+ struct drm_dp_mst_atomic_payload *old_payload)
+{
+ struct link_mst_stream_allocation_table current_link_table =
+ link->mst_stream_alloc_table;
+ struct link_mst_stream_allocation *dc_alloc;
+ int i;
+
+ *old_payload = *new_payload;
+
+ /* Set correct time_slots/PBN of old payload.
+ * other fields (delete & dsc_enabled) in
+ * struct drm_dp_mst_atomic_payload are don't care fields
+ * while calling drm_dp_remove_payload()
+ */
+ for (i = 0; i < current_link_table.stream_count; i++) {
+ dc_alloc =
+ &current_link_table.stream_allocations[i];
+
+ if (dc_alloc->vcp_id == new_payload->vcpi) {
+ old_payload->time_slots = dc_alloc->slot_count;
+ old_payload->pbn = dc_alloc->slot_count * pbn_per_slot;
+ break;
+ }
+ }
+
+ /* make sure there is an old payload*/
+ ASSERT(i != current_link_table.stream_count);
+
+}
+
/*
* Writes payload allocation table in immediate downstream device.
*/
@@ -188,7 +222,7 @@ bool dm_helpers_dp_mst_write_payload_allocation_table(
{
struct amdgpu_dm_connector *aconnector;
struct drm_dp_mst_topology_state *mst_state;
- struct drm_dp_mst_atomic_payload *payload;
+ struct drm_dp_mst_atomic_payload *target_payload, *new_payload, old_payload;
struct drm_dp_mst_topology_mgr *mst_mgr;
aconnector = (struct amdgpu_dm_connector *)stream->dm_stream_context;
@@ -204,17 +238,26 @@ bool dm_helpers_dp_mst_write_payload_allocation_table(
mst_state = to_drm_dp_mst_topology_state(mst_mgr->base.state);
/* It's OK for this to fail */
- payload = drm_atomic_get_mst_payload_state(mst_state, aconnector->mst_output_port);
- if (enable)
- drm_dp_add_payload_part1(mst_mgr, mst_state, payload);
- else
- drm_dp_remove_payload(mst_mgr, mst_state, payload, payload);
+ new_payload = drm_atomic_get_mst_payload_state(mst_state, aconnector->mst_output_port);
+
+ if (enable) {
+ target_payload = new_payload;
+
+ drm_dp_add_payload_part1(mst_mgr, mst_state, new_payload);
+ } else {
+ /* construct old payload by VCPI*/
+ dm_helpers_construct_old_payload(stream->link, mst_state->pbn_div,
+ new_payload, &old_payload);
+ target_payload = &old_payload;
+
+ drm_dp_remove_payload(mst_mgr, mst_state, &old_payload, new_payload);
+ }
/* mst_mgr->->payloads are VC payload notify MST branch using DPCD or
* AUX message. The sequence is slot 1-63 allocated sequence for each
* stream. AMD ASIC stream slot allocation should follow the same
* sequence. copy DRM MST allocation to dc */
- fill_dc_mst_payload_table_from_drm(stream->link, enable, payload, proposed_table);
+ fill_dc_mst_payload_table_from_drm(stream->link, enable, target_payload, proposed_table);
return true;
}
diff --git a/drivers/gpu/drm/amd/pm/swsmu/inc/smu_v13_0.h b/drivers/gpu/drm/amd/pm/swsmu/inc/smu_v13_0.h
index f085cb97a620..85a090b9e3d9 100644
--- a/drivers/gpu/drm/amd/pm/swsmu/inc/smu_v13_0.h
+++ b/drivers/gpu/drm/amd/pm/swsmu/inc/smu_v13_0.h
@@ -61,6 +61,12 @@
#define CTF_OFFSET_HOTSPOT 5
#define CTF_OFFSET_MEM 5
+static const int pmfw_decoded_link_speed[5] = {1, 2, 3, 4, 5};
+static const int pmfw_decoded_link_width[7] = {0, 1, 2, 4, 8, 12, 16};
+
+#define DECODE_GEN_SPEED(gen_speed_idx) (pmfw_decoded_link_speed[gen_speed_idx])
+#define DECODE_LANE_WIDTH(lane_width_idx) (pmfw_decoded_link_width[lane_width_idx])
+
struct smu_13_0_max_sustainable_clocks {
uint32_t display_clock;
uint32_t phy_clock;
diff --git a/drivers/gpu/drm/amd/pm/swsmu/smu13/smu_v13_0_0_ppt.c b/drivers/gpu/drm/amd/pm/swsmu/smu13/smu_v13_0_0_ppt.c
index 27448ffe60a4..a5c97d61e92a 100644
--- a/drivers/gpu/drm/amd/pm/swsmu/smu13/smu_v13_0_0_ppt.c
+++ b/drivers/gpu/drm/amd/pm/swsmu/smu13/smu_v13_0_0_ppt.c
@@ -1144,8 +1144,8 @@ static int smu_v13_0_0_print_clk_levels(struct smu_context *smu,
(pcie_table->pcie_lane[i] == 5) ? "x12" :
(pcie_table->pcie_lane[i] == 6) ? "x16" : "",
pcie_table->clk_freq[i],
- ((gen_speed - 1) == pcie_table->pcie_gen[i]) &&
- (lane_width == link_width[pcie_table->pcie_lane[i]]) ?
+ (gen_speed == DECODE_GEN_SPEED(pcie_table->pcie_gen[i])) &&
+ (lane_width == DECODE_LANE_WIDTH(link_width[pcie_table->pcie_lane[i]])) ?
"*" : "");
break;
diff --git a/drivers/gpu/drm/amd/pm/swsmu/smu13/smu_v13_0_7_ppt.c b/drivers/gpu/drm/amd/pm/swsmu/smu13/smu_v13_0_7_ppt.c
index 9e1967d8049e..4399416dd9b8 100644
--- a/drivers/gpu/drm/amd/pm/swsmu/smu13/smu_v13_0_7_ppt.c
+++ b/drivers/gpu/drm/amd/pm/swsmu/smu13/smu_v13_0_7_ppt.c
@@ -575,6 +575,14 @@ static int smu_v13_0_7_set_default_dpm_table(struct smu_context *smu)
dpm_table);
if (ret)
return ret;
+
+ if (skutable->DriverReportedClocks.GameClockAc &&
+ (dpm_table->dpm_levels[dpm_table->count - 1].value >
+ skutable->DriverReportedClocks.GameClockAc)) {
+ dpm_table->dpm_levels[dpm_table->count - 1].value =
+ skutable->DriverReportedClocks.GameClockAc;
+ dpm_table->max = skutable->DriverReportedClocks.GameClockAc;
+ }
} else {
dpm_table->count = 1;
dpm_table->dpm_levels[0].value = smu->smu_table.boot_values.gfxclk / 100;
@@ -828,6 +836,57 @@ static int smu_v13_0_7_get_smu_metrics_data(struct smu_context *smu,
return ret;
}
+static int smu_v13_0_7_get_dpm_ultimate_freq(struct smu_context *smu,
+ enum smu_clk_type clk_type,
+ uint32_t *min,
+ uint32_t *max)
+{
+ struct smu_13_0_dpm_context *dpm_context =
+ smu->smu_dpm.dpm_context;
+ struct smu_13_0_dpm_table *dpm_table;
+
+ switch (clk_type) {
+ case SMU_MCLK:
+ case SMU_UCLK:
+ /* uclk dpm table */
+ dpm_table = &dpm_context->dpm_tables.uclk_table;
+ break;
+ case SMU_GFXCLK:
+ case SMU_SCLK:
+ /* gfxclk dpm table */
+ dpm_table = &dpm_context->dpm_tables.gfx_table;
+ break;
+ case SMU_SOCCLK:
+ /* socclk dpm table */
+ dpm_table = &dpm_context->dpm_tables.soc_table;
+ break;
+ case SMU_FCLK:
+ /* fclk dpm table */
+ dpm_table = &dpm_context->dpm_tables.fclk_table;
+ break;
+ case SMU_VCLK:
+ case SMU_VCLK1:
+ /* vclk dpm table */
+ dpm_table = &dpm_context->dpm_tables.vclk_table;
+ break;
+ case SMU_DCLK:
+ case SMU_DCLK1:
+ /* dclk dpm table */
+ dpm_table = &dpm_context->dpm_tables.dclk_table;
+ break;
+ default:
+ dev_err(smu->adev->dev, "Unsupported clock type!\n");
+ return -EINVAL;
+ }
+
+ if (min)
+ *min = dpm_table->min;
+ if (max)
+ *max = dpm_table->max;
+
+ return 0;
+}
+
static int smu_v13_0_7_read_sensor(struct smu_context *smu,
enum amd_pp_sensors sensor,
void *data,
@@ -1074,8 +1133,8 @@ static int smu_v13_0_7_print_clk_levels(struct smu_context *smu,
(pcie_table->pcie_lane[i] == 5) ? "x12" :
(pcie_table->pcie_lane[i] == 6) ? "x16" : "",
pcie_table->clk_freq[i],
- (gen_speed == pcie_table->pcie_gen[i]) &&
- (lane_width == pcie_table->pcie_lane[i]) ?
+ (gen_speed == DECODE_GEN_SPEED(pcie_table->pcie_gen[i])) &&
+ (lane_width == DECODE_LANE_WIDTH(pcie_table->pcie_lane[i])) ?
"*" : "");
break;
@@ -1329,9 +1388,17 @@ static int smu_v13_0_7_populate_umd_state_clk(struct smu_context *smu)
&dpm_context->dpm_tables.fclk_table;
struct smu_umd_pstate_table *pstate_table =
&smu->pstate_table;
+ struct smu_table_context *table_context = &smu->smu_table;
+ PPTable_t *pptable = table_context->driver_pptable;
+ DriverReportedClocks_t driver_clocks =
+ pptable->SkuTable.DriverReportedClocks;
pstate_table->gfxclk_pstate.min = gfx_table->min;
- pstate_table->gfxclk_pstate.peak = gfx_table->max;
+ if (driver_clocks.GameClockAc &&
+ (driver_clocks.GameClockAc < gfx_table->max))
+ pstate_table->gfxclk_pstate.peak = driver_clocks.GameClockAc;
+ else
+ pstate_table->gfxclk_pstate.peak = gfx_table->max;
pstate_table->uclk_pstate.min = mem_table->min;
pstate_table->uclk_pstate.peak = mem_table->max;
@@ -1348,12 +1415,12 @@ static int smu_v13_0_7_populate_umd_state_clk(struct smu_context *smu)
pstate_table->fclk_pstate.min = fclk_table->min;
pstate_table->fclk_pstate.peak = fclk_table->max;
- /*
- * For now, just use the mininum clock frequency.
- * TODO: update them when the real pstate settings available
- */
- pstate_table->gfxclk_pstate.standard = gfx_table->min;
- pstate_table->uclk_pstate.standard = mem_table->min;
+ if (driver_clocks.BaseClockAc &&
+ driver_clocks.BaseClockAc < gfx_table->max)
+ pstate_table->gfxclk_pstate.standard = driver_clocks.BaseClockAc;
+ else
+ pstate_table->gfxclk_pstate.standard = gfx_table->max;
+ pstate_table->uclk_pstate.standard = mem_table->max;
pstate_table->socclk_pstate.standard = soc_table->min;
pstate_table->vclk_pstate.standard = vclk_table->min;
pstate_table->dclk_pstate.standard = dclk_table->min;
@@ -1676,7 +1743,7 @@ static const struct pptable_funcs smu_v13_0_7_ppt_funcs = {
.dpm_set_jpeg_enable = smu_v13_0_set_jpeg_enable,
.init_pptable_microcode = smu_v13_0_init_pptable_microcode,
.populate_umd_state_clk = smu_v13_0_7_populate_umd_state_clk,
- .get_dpm_ultimate_freq = smu_v13_0_get_dpm_ultimate_freq,
+ .get_dpm_ultimate_freq = smu_v13_0_7_get_dpm_ultimate_freq,
.get_vbios_bootup_values = smu_v13_0_get_vbios_bootup_values,
.read_sensor = smu_v13_0_7_read_sensor,
.feature_is_enabled = smu_cmn_feature_is_enabled,
diff --git a/drivers/gpu/drm/armada/armada_drv.c b/drivers/gpu/drm/armada/armada_drv.c
index 0643887800b4..142668cd6d7c 100644
--- a/drivers/gpu/drm/armada/armada_drv.c
+++ b/drivers/gpu/drm/armada/armada_drv.c
@@ -99,7 +99,6 @@ static int armada_drm_bind(struct device *dev)
if (ret) {
dev_err(dev, "[" DRM_NAME ":%s] can't kick out simple-fb: %d\n",
__func__, ret);
- kfree(priv);
return ret;
}
diff --git a/drivers/gpu/drm/i915/display/icl_dsi.c b/drivers/gpu/drm/i915/display/icl_dsi.c
index 468a792e6a40..fc0eaf40dc94 100644
--- a/drivers/gpu/drm/i915/display/icl_dsi.c
+++ b/drivers/gpu/drm/i915/display/icl_dsi.c
@@ -300,9 +300,21 @@ static void configure_dual_link_mode(struct intel_encoder *encoder,
{
struct drm_i915_private *dev_priv = to_i915(encoder->base.dev);
struct intel_dsi *intel_dsi = enc_to_intel_dsi(encoder);
+ i915_reg_t dss_ctl1_reg, dss_ctl2_reg;
u32 dss_ctl1;
- dss_ctl1 = intel_de_read(dev_priv, DSS_CTL1);
+ /* FIXME: Move all DSS handling to intel_vdsc.c */
+ if (DISPLAY_VER(dev_priv) >= 12) {
+ struct intel_crtc *crtc = to_intel_crtc(pipe_config->uapi.crtc);
+
+ dss_ctl1_reg = ICL_PIPE_DSS_CTL1(crtc->pipe);
+ dss_ctl2_reg = ICL_PIPE_DSS_CTL2(crtc->pipe);
+ } else {
+ dss_ctl1_reg = DSS_CTL1;
+ dss_ctl2_reg = DSS_CTL2;
+ }
+
+ dss_ctl1 = intel_de_read(dev_priv, dss_ctl1_reg);
dss_ctl1 |= SPLITTER_ENABLE;
dss_ctl1 &= ~OVERLAP_PIXELS_MASK;
dss_ctl1 |= OVERLAP_PIXELS(intel_dsi->pixel_overlap);
@@ -323,16 +335,16 @@ static void configure_dual_link_mode(struct intel_encoder *encoder,
dss_ctl1 &= ~LEFT_DL_BUF_TARGET_DEPTH_MASK;
dss_ctl1 |= LEFT_DL_BUF_TARGET_DEPTH(dl_buffer_depth);
- dss_ctl2 = intel_de_read(dev_priv, DSS_CTL2);
+ dss_ctl2 = intel_de_read(dev_priv, dss_ctl2_reg);
dss_ctl2 &= ~RIGHT_DL_BUF_TARGET_DEPTH_MASK;
dss_ctl2 |= RIGHT_DL_BUF_TARGET_DEPTH(dl_buffer_depth);
- intel_de_write(dev_priv, DSS_CTL2, dss_ctl2);
+ intel_de_write(dev_priv, dss_ctl2_reg, dss_ctl2);
} else {
/* Interleave */
dss_ctl1 |= DUAL_LINK_MODE_INTERLEAVE;
}
- intel_de_write(dev_priv, DSS_CTL1, dss_ctl1);
+ intel_de_write(dev_priv, dss_ctl1_reg, dss_ctl1);
}
/* aka DSI 8X clock */
diff --git a/drivers/gpu/drm/nouveau/nvkm/subdev/fb/gf108.c b/drivers/gpu/drm/nouveau/nvkm/subdev/fb/gf108.c
index 76678dd60f93..c4c6f67af7cc 100644
--- a/drivers/gpu/drm/nouveau/nvkm/subdev/fb/gf108.c
+++ b/drivers/gpu/drm/nouveau/nvkm/subdev/fb/gf108.c
@@ -31,6 +31,7 @@ gf108_fb = {
.init = gf100_fb_init,
.init_page = gf100_fb_init_page,
.intr = gf100_fb_intr,
+ .sysmem.flush_page_init = gf100_fb_sysmem_flush_page_init,
.ram_new = gf108_ram_new,
.default_bigpage = 17,
};
diff --git a/drivers/gpu/drm/nouveau/nvkm/subdev/fb/gk104.c b/drivers/gpu/drm/nouveau/nvkm/subdev/fb/gk104.c
index f73442ccb424..433fa966ba23 100644
--- a/drivers/gpu/drm/nouveau/nvkm/subdev/fb/gk104.c
+++ b/drivers/gpu/drm/nouveau/nvkm/subdev/fb/gk104.c
@@ -77,6 +77,7 @@ gk104_fb = {
.init = gf100_fb_init,
.init_page = gf100_fb_init_page,
.intr = gf100_fb_intr,
+ .sysmem.flush_page_init = gf100_fb_sysmem_flush_page_init,
.ram_new = gk104_ram_new,
.default_bigpage = 17,
.clkgate_pack = gk104_fb_clkgate_pack,
diff --git a/drivers/gpu/drm/nouveau/nvkm/subdev/fb/gk110.c b/drivers/gpu/drm/nouveau/nvkm/subdev/fb/gk110.c
index 45d6cdffafee..4dc283dedf8b 100644
--- a/drivers/gpu/drm/nouveau/nvkm/subdev/fb/gk110.c
+++ b/drivers/gpu/drm/nouveau/nvkm/subdev/fb/gk110.c
@@ -59,6 +59,7 @@ gk110_fb = {
.init = gf100_fb_init,
.init_page = gf100_fb_init_page,
.intr = gf100_fb_intr,
+ .sysmem.flush_page_init = gf100_fb_sysmem_flush_page_init,
.ram_new = gk104_ram_new,
.default_bigpage = 17,
.clkgate_pack = gk110_fb_clkgate_pack,
diff --git a/drivers/gpu/drm/nouveau/nvkm/subdev/fb/gm107.c b/drivers/gpu/drm/nouveau/nvkm/subdev/fb/gm107.c
index de52462a92bf..90bfff616d35 100644
--- a/drivers/gpu/drm/nouveau/nvkm/subdev/fb/gm107.c
+++ b/drivers/gpu/drm/nouveau/nvkm/subdev/fb/gm107.c
@@ -31,6 +31,7 @@ gm107_fb = {
.init = gf100_fb_init,
.init_page = gf100_fb_init_page,
.intr = gf100_fb_intr,
+ .sysmem.flush_page_init = gf100_fb_sysmem_flush_page_init,
.ram_new = gm107_ram_new,
.default_bigpage = 17,
};
diff --git a/drivers/gpu/drm/scheduler/sched_entity.c b/drivers/gpu/drm/scheduler/sched_entity.c
index 15d04a0ec623..e0a8890a62e2 100644
--- a/drivers/gpu/drm/scheduler/sched_entity.c
+++ b/drivers/gpu/drm/scheduler/sched_entity.c
@@ -507,12 +507,19 @@ void drm_sched_entity_push_job(struct drm_sched_job *sched_job)
{
struct drm_sched_entity *entity = sched_job->entity;
bool first;
+ ktime_t submit_ts;
trace_drm_sched_job(sched_job, entity);
atomic_inc(entity->rq->sched->score);
WRITE_ONCE(entity->last_user, current->group_leader);
+
+ /*
+ * After the sched_job is pushed into the entity queue, it may be
+ * completed and freed up at any time. We can no longer access it.
+ * Make sure to set the submit_ts first, to avoid a race.
+ */
+ sched_job->submit_ts = submit_ts = ktime_get();
first = spsc_queue_push(&entity->job_queue, &sched_job->queue_node);
- sched_job->submit_ts = ktime_get();
/* first job wakes up scheduler */
if (first) {
@@ -529,7 +536,7 @@ void drm_sched_entity_push_job(struct drm_sched_job *sched_job)
spin_unlock(&entity->rq_lock);
if (drm_sched_policy == DRM_SCHED_POLICY_FIFO)
- drm_sched_rq_update_fifo(entity, sched_job->submit_ts);
+ drm_sched_rq_update_fifo(entity, submit_ts);
drm_sched_wakeup(entity->rq->sched);
}
diff --git a/drivers/hid/Kconfig b/drivers/hid/Kconfig
index 82f64fb31fda..4ce012f83253 100644
--- a/drivers/hid/Kconfig
+++ b/drivers/hid/Kconfig
@@ -1122,7 +1122,7 @@ config HID_TOPRE
tristate "Topre REALFORCE keyboards"
depends on HID
help
- Say Y for N-key rollover support on Topre REALFORCE R2 108 key keyboards.
+ Say Y for N-key rollover support on Topre REALFORCE R2 108/87 key keyboards.
config HID_THINGM
tristate "ThingM blink(1) USB RGB LED"
diff --git a/drivers/hid/hid-ids.h b/drivers/hid/hid-ids.h
index 63545cd307e5..c2e9b6d1fd7d 100644
--- a/drivers/hid/hid-ids.h
+++ b/drivers/hid/hid-ids.h
@@ -420,6 +420,9 @@
#define I2C_DEVICE_ID_SURFACE_GO_TOUCHSCREEN 0x261A
#define I2C_DEVICE_ID_SURFACE_GO2_TOUCHSCREEN 0x2A1C
#define I2C_DEVICE_ID_LENOVO_YOGA_C630_TOUCHSCREEN 0x279F
+#define I2C_DEVICE_ID_HP_SPECTRE_X360_13T_AW100 0x29F5
+#define I2C_DEVICE_ID_HP_SPECTRE_X360_14T_EA100_V1 0x2BED
+#define I2C_DEVICE_ID_HP_SPECTRE_X360_14T_EA100_V2 0x2BEE
#define USB_VENDOR_ID_ELECOM 0x056e
#define USB_DEVICE_ID_ELECOM_BM084 0x0061
@@ -1249,6 +1252,7 @@
#define USB_VENDOR_ID_TOPRE 0x0853
#define USB_DEVICE_ID_TOPRE_REALFORCE_R2_108 0x0148
+#define USB_DEVICE_ID_TOPRE_REALFORCE_R2_87 0x0146
#define USB_VENDOR_ID_TOPSEED 0x0766
#define USB_DEVICE_ID_TOPSEED_CYBERLINK 0x0204
diff --git a/drivers/hid/hid-input.c b/drivers/hid/hid-input.c
index 7fc967964dd8..5c65a584b3fa 100644
--- a/drivers/hid/hid-input.c
+++ b/drivers/hid/hid-input.c
@@ -398,6 +398,12 @@ static const struct hid_device_id hid_battery_quirks[] = {
HID_BATTERY_QUIRK_IGNORE },
{ HID_I2C_DEVICE(USB_VENDOR_ID_ELAN, I2C_DEVICE_ID_LENOVO_YOGA_C630_TOUCHSCREEN),
HID_BATTERY_QUIRK_IGNORE },
+ { HID_I2C_DEVICE(USB_VENDOR_ID_ELAN, I2C_DEVICE_ID_HP_SPECTRE_X360_13T_AW100),
+ HID_BATTERY_QUIRK_IGNORE },
+ { HID_I2C_DEVICE(USB_VENDOR_ID_ELAN, I2C_DEVICE_ID_HP_SPECTRE_X360_14T_EA100_V1),
+ HID_BATTERY_QUIRK_IGNORE },
+ { HID_I2C_DEVICE(USB_VENDOR_ID_ELAN, I2C_DEVICE_ID_HP_SPECTRE_X360_14T_EA100_V2),
+ HID_BATTERY_QUIRK_IGNORE },
{}
};
diff --git a/drivers/hid/hid-sensor-custom.c b/drivers/hid/hid-sensor-custom.c
index 3e3f89e01d81..d85398721659 100644
--- a/drivers/hid/hid-sensor-custom.c
+++ b/drivers/hid/hid-sensor-custom.c
@@ -940,7 +940,7 @@ hid_sensor_register_platform_device(struct platform_device *pdev,
struct hid_sensor_hub_device *hsdev,
const struct hid_sensor_custom_match *match)
{
- char real_usage[HID_SENSOR_USAGE_LENGTH];
+ char real_usage[HID_SENSOR_USAGE_LENGTH] = { 0 };
struct platform_device *custom_pdev;
const char *dev_name;
char *c;
diff --git a/drivers/hid/hid-topre.c b/drivers/hid/hid-topre.c
index 88a91cdad5f8..d1d5ca310ead 100644
--- a/drivers/hid/hid-topre.c
+++ b/drivers/hid/hid-topre.c
@@ -36,6 +36,8 @@ static __u8 *topre_report_fixup(struct hid_device *hdev, __u8 *rdesc,
static const struct hid_device_id topre_id_table[] = {
{ HID_USB_DEVICE(USB_VENDOR_ID_TOPRE,
USB_DEVICE_ID_TOPRE_REALFORCE_R2_108) },
+ { HID_USB_DEVICE(USB_VENDOR_ID_TOPRE,
+ USB_DEVICE_ID_TOPRE_REALFORCE_R2_87) },
{ }
};
MODULE_DEVICE_TABLE(hid, topre_id_table);
diff --git a/drivers/hid/intel-ish-hid/ishtp/bus.c b/drivers/hid/intel-ish-hid/ishtp/bus.c
index 81385ab37fa9..7fc738a22375 100644
--- a/drivers/hid/intel-ish-hid/ishtp/bus.c
+++ b/drivers/hid/intel-ish-hid/ishtp/bus.c
@@ -241,8 +241,8 @@ static int ishtp_cl_bus_match(struct device *dev, struct device_driver *drv)
struct ishtp_cl_device *device = to_ishtp_cl_device(dev);
struct ishtp_cl_driver *driver = to_ishtp_cl_driver(drv);
- return guid_equal(&driver->id[0].guid,
- &device->fw_client->props.protocol_name);
+ return(device->fw_client ? guid_equal(&driver->id[0].guid,
+ &device->fw_client->props.protocol_name) : 0);
}
/**
diff --git a/drivers/hwtracing/coresight/coresight-etm4x-core.c b/drivers/hwtracing/coresight/coresight-etm4x-core.c
index 1ea8f173cca0..4c15fae534f3 100644
--- a/drivers/hwtracing/coresight/coresight-etm4x-core.c
+++ b/drivers/hwtracing/coresight/coresight-etm4x-core.c
@@ -472,7 +472,7 @@ static int etm4_enable_hw(struct etmv4_drvdata *drvdata)
if (etm4x_sspcicrn_present(drvdata, i))
etm4x_relaxed_write32(csa, config->ss_pe_cmp[i], TRCSSPCICRn(i));
}
- for (i = 0; i < drvdata->nr_addr_cmp; i++) {
+ for (i = 0; i < drvdata->nr_addr_cmp * 2; i++) {
etm4x_relaxed_write64(csa, config->addr_val[i], TRCACVRn(i));
etm4x_relaxed_write64(csa, config->addr_acc[i], TRCACATRn(i));
}
@@ -1070,25 +1070,21 @@ static bool etm4_init_iomem_access(struct etmv4_drvdata *drvdata,
struct csdev_access *csa)
{
u32 devarch = readl_relaxed(drvdata->base + TRCDEVARCH);
- u32 idr1 = readl_relaxed(drvdata->base + TRCIDR1);
/*
* All ETMs must implement TRCDEVARCH to indicate that
- * the component is an ETMv4. To support any broken
- * implementations we fall back to TRCIDR1 check, which
- * is not really reliable.
+ * the component is an ETMv4. Even though TRCIDR1 also
+ * contains the information, it is part of the "Trace"
+ * register and must be accessed with the OSLK cleared,
+ * with MMIO. But we cannot touch the OSLK until we are
+ * sure this is an ETM. So rely only on the TRCDEVARCH.
*/
- if ((devarch & ETM_DEVARCH_ID_MASK) == ETM_DEVARCH_ETMv4x_ARCH) {
- drvdata->arch = etm_devarch_to_arch(devarch);
- } else {
- pr_warn("CPU%d: ETM4x incompatible TRCDEVARCH: %x, falling back to TRCIDR1\n",
- smp_processor_id(), devarch);
-
- if (ETM_TRCIDR1_ARCH_MAJOR(idr1) != ETM_TRCIDR1_ARCH_ETMv4)
- return false;
- drvdata->arch = etm_trcidr_to_arch(idr1);
+ if ((devarch & ETM_DEVARCH_ID_MASK) != ETM_DEVARCH_ETMv4x_ARCH) {
+ pr_warn_once("TRCDEVARCH doesn't match ETMv4 architecture\n");
+ return false;
}
+ drvdata->arch = etm_devarch_to_arch(devarch);
*csa = CSDEV_ACCESS_IOMEM(drvdata->base);
return true;
}
diff --git a/drivers/hwtracing/coresight/coresight-etm4x.h b/drivers/hwtracing/coresight/coresight-etm4x.h
index 434f4e95ee17..27c8a9901868 100644
--- a/drivers/hwtracing/coresight/coresight-etm4x.h
+++ b/drivers/hwtracing/coresight/coresight-etm4x.h
@@ -753,14 +753,12 @@
* TRCDEVARCH - CoreSight architected register
* - Bits[15:12] - Major version
* - Bits[19:16] - Minor version
- * TRCIDR1 - ETM architected register
- * - Bits[11:8] - Major version
- * - Bits[7:4] - Minor version
- * We must rely on TRCDEVARCH for the version information,
- * however we don't want to break the support for potential
- * old implementations which might not implement it. Thus
- * we fall back to TRCIDR1 if TRCDEVARCH is not implemented
- * for memory mapped components.
+ *
+ * We must rely only on TRCDEVARCH for the version information. Even though,
+ * TRCIDR1 also provides the architecture version, it is a "Trace" register
+ * and as such must be accessed only with Trace power domain ON. This may
+ * not be available at probe time.
+ *
* Now to make certain decisions easier based on the version
* we use an internal representation of the version in the
* driver, as follows :
@@ -786,12 +784,6 @@ static inline u8 etm_devarch_to_arch(u32 devarch)
ETM_DEVARCH_REVISION(devarch));
}
-static inline u8 etm_trcidr_to_arch(u32 trcidr1)
-{
- return ETM_ARCH_VERSION(ETM_TRCIDR1_ARCH_MAJOR(trcidr1),
- ETM_TRCIDR1_ARCH_MINOR(trcidr1));
-}
-
enum etm_impdef_type {
ETM4_IMPDEF_HISI_CORE_COMMIT,
ETM4_IMPDEF_FEATURE_MAX,
diff --git a/drivers/i2c/i2c-core-of.c b/drivers/i2c/i2c-core-of.c
index bce6b796e04c..545436b7dd53 100644
--- a/drivers/i2c/i2c-core-of.c
+++ b/drivers/i2c/i2c-core-of.c
@@ -178,6 +178,11 @@ static int of_i2c_notify(struct notifier_block *nb, unsigned long action,
return NOTIFY_OK;
}
+ /*
+ * Clear the flag before adding the device so that fw_devlink
+ * doesn't skip adding consumers to this device.
+ */
+ rd->dn->fwnode.flags &= ~FWNODE_FLAG_NOT_DEVICE;
client = of_i2c_register_device(adap, rd->dn);
if (IS_ERR(client)) {
dev_err(&adap->dev, "failed to create client for '%pOF'\n",
diff --git a/drivers/iio/accel/kionix-kx022a.c b/drivers/iio/accel/kionix-kx022a.c
index f866859855cd..1c3a72380fb8 100644
--- a/drivers/iio/accel/kionix-kx022a.c
+++ b/drivers/iio/accel/kionix-kx022a.c
@@ -864,7 +864,7 @@ static irqreturn_t kx022a_trigger_handler(int irq, void *p)
if (ret < 0)
goto err_read;
- iio_push_to_buffers_with_timestamp(idev, data->buffer, pf->timestamp);
+ iio_push_to_buffers_with_timestamp(idev, data->buffer, data->timestamp);
err_read:
iio_trigger_notify_done(idev->trig);
diff --git a/drivers/iio/adc/ad7791.c b/drivers/iio/adc/ad7791.c
index fee8d129a5f0..86effe8501b4 100644
--- a/drivers/iio/adc/ad7791.c
+++ b/drivers/iio/adc/ad7791.c
@@ -253,7 +253,7 @@ static const struct ad_sigma_delta_info ad7791_sigma_delta_info = {
.has_registers = true,
.addr_shift = 4,
.read_mask = BIT(3),
- .irq_flags = IRQF_TRIGGER_LOW,
+ .irq_flags = IRQF_TRIGGER_FALLING,
};
static int ad7791_read_raw(struct iio_dev *indio_dev,
diff --git a/drivers/iio/adc/ltc2497.c b/drivers/iio/adc/ltc2497.c
index 17370c5eb6fe..ec198c6f13d6 100644
--- a/drivers/iio/adc/ltc2497.c
+++ b/drivers/iio/adc/ltc2497.c
@@ -28,7 +28,6 @@ struct ltc2497_driverdata {
struct ltc2497core_driverdata common_ddata;
struct i2c_client *client;
u32 recv_size;
- u32 sub_lsb;
/*
* DMA (thus cache coherency maintenance) may require the
* transfer buffers to live in their own cache lines.
@@ -65,10 +64,10 @@ static int ltc2497_result_and_measure(struct ltc2497core_driverdata *ddata,
* equivalent to a sign extension.
*/
if (st->recv_size == 3) {
- *val = (get_unaligned_be24(st->data.d8) >> st->sub_lsb)
+ *val = (get_unaligned_be24(st->data.d8) >> 6)
- BIT(ddata->chip_info->resolution + 1);
} else {
- *val = (be32_to_cpu(st->data.d32) >> st->sub_lsb)
+ *val = (be32_to_cpu(st->data.d32) >> 6)
- BIT(ddata->chip_info->resolution + 1);
}
@@ -122,7 +121,6 @@ static int ltc2497_probe(struct i2c_client *client)
st->common_ddata.chip_info = chip_info;
resolution = chip_info->resolution;
- st->sub_lsb = 31 - (resolution + 1);
st->recv_size = BITS_TO_BYTES(resolution) + 1;
return ltc2497core_probe(dev, indio_dev);
diff --git a/drivers/iio/adc/max11410.c b/drivers/iio/adc/max11410.c
index b74b689ee7de..f6895bc9fc4b 100644
--- a/drivers/iio/adc/max11410.c
+++ b/drivers/iio/adc/max11410.c
@@ -414,13 +414,17 @@ static int max11410_sample(struct max11410_state *st, int *sample_raw,
if (!ret)
return -ETIMEDOUT;
} else {
+ int ret2;
+
/* Wait for status register Conversion Ready flag */
- ret = read_poll_timeout(max11410_read_reg, ret,
- ret || (val & MAX11410_STATUS_CONV_READY_BIT),
+ ret = read_poll_timeout(max11410_read_reg, ret2,
+ ret2 || (val & MAX11410_STATUS_CONV_READY_BIT),
5000, MAX11410_CONVERSION_TIMEOUT_MS * 1000,
true, st, MAX11410_REG_STATUS, &val);
if (ret)
return ret;
+ if (ret2)
+ return ret2;
}
/* Read ADC Data */
@@ -851,17 +855,21 @@ static int max11410_init_vref(struct device *dev,
static int max11410_calibrate(struct max11410_state *st, u32 cal_type)
{
- int ret, val;
+ int ret, ret2, val;
ret = max11410_write_reg(st, MAX11410_REG_CAL_START, cal_type);
if (ret)
return ret;
/* Wait for status register Calibration Ready flag */
- return read_poll_timeout(max11410_read_reg, ret,
- ret || (val & MAX11410_STATUS_CAL_READY_BIT),
- 50000, MAX11410_CALIB_TIMEOUT_MS * 1000, true,
- st, MAX11410_REG_STATUS, &val);
+ ret = read_poll_timeout(max11410_read_reg, ret2,
+ ret2 || (val & MAX11410_STATUS_CAL_READY_BIT),
+ 50000, MAX11410_CALIB_TIMEOUT_MS * 1000, true,
+ st, MAX11410_REG_STATUS, &val);
+ if (ret)
+ return ret;
+
+ return ret2;
}
static int max11410_self_calibrate(struct max11410_state *st)
diff --git a/drivers/iio/adc/palmas_gpadc.c b/drivers/iio/adc/palmas_gpadc.c
index fd000345ec5c..849a697a467e 100644
--- a/drivers/iio/adc/palmas_gpadc.c
+++ b/drivers/iio/adc/palmas_gpadc.c
@@ -639,7 +639,7 @@ out:
static int palmas_gpadc_remove(struct platform_device *pdev)
{
- struct iio_dev *indio_dev = dev_to_iio_dev(&pdev->dev);
+ struct iio_dev *indio_dev = dev_get_drvdata(&pdev->dev);
struct palmas_gpadc *adc = iio_priv(indio_dev);
if (adc->wakeup1_enable || adc->wakeup2_enable)
diff --git a/drivers/iio/adc/qcom-spmi-adc5.c b/drivers/iio/adc/qcom-spmi-adc5.c
index e90c299c913a..c2d5e06f137a 100644
--- a/drivers/iio/adc/qcom-spmi-adc5.c
+++ b/drivers/iio/adc/qcom-spmi-adc5.c
@@ -628,12 +628,20 @@ static int adc5_get_fw_channel_data(struct adc5_chip *adc,
struct fwnode_handle *fwnode,
const struct adc5_data *data)
{
- const char *name = fwnode_get_name(fwnode), *channel_name;
+ const char *channel_name;
+ char *name;
u32 chan, value, varr[2];
u32 sid = 0;
int ret;
struct device *dev = adc->dev;
+ name = devm_kasprintf(dev, GFP_KERNEL, "%pfwP", fwnode);
+ if (!name)
+ return -ENOMEM;
+
+ /* Cut the address part */
+ name[strchrnul(name, '@') - name] = '\0';
+
ret = fwnode_property_read_u32(fwnode, "reg", &chan);
if (ret) {
dev_err(dev, "invalid channel number %s\n", name);
diff --git a/drivers/iio/adc/ti-ads7950.c b/drivers/iio/adc/ti-ads7950.c
index 2cc9a9bd9db6..263fc3a1b87e 100644
--- a/drivers/iio/adc/ti-ads7950.c
+++ b/drivers/iio/adc/ti-ads7950.c
@@ -634,6 +634,7 @@ static int ti_ads7950_probe(struct spi_device *spi)
st->chip.label = dev_name(&st->spi->dev);
st->chip.parent = &st->spi->dev;
st->chip.owner = THIS_MODULE;
+ st->chip.can_sleep = true;
st->chip.base = -1;
st->chip.ngpio = TI_ADS7950_NUM_GPIOS;
st->chip.get_direction = ti_ads7950_get_direction;
diff --git a/drivers/iio/dac/cio-dac.c b/drivers/iio/dac/cio-dac.c
index 791dd999cf29..18a64f72fc18 100644
--- a/drivers/iio/dac/cio-dac.c
+++ b/drivers/iio/dac/cio-dac.c
@@ -66,8 +66,8 @@ static int cio_dac_write_raw(struct iio_dev *indio_dev,
if (mask != IIO_CHAN_INFO_RAW)
return -EINVAL;
- /* DAC can only accept up to a 16-bit value */
- if ((unsigned int)val > 65535)
+ /* DAC can only accept up to a 12-bit value */
+ if ((unsigned int)val > 4095)
return -EINVAL;
priv->chan_out_states[chan->channel] = val;
diff --git a/drivers/iio/imu/Kconfig b/drivers/iio/imu/Kconfig
index f1d7d4b5e222..c2f97629e9cd 100644
--- a/drivers/iio/imu/Kconfig
+++ b/drivers/iio/imu/Kconfig
@@ -47,6 +47,7 @@ config ADIS16480
depends on SPI
select IIO_ADIS_LIB
select IIO_ADIS_LIB_BUFFER if IIO_BUFFER
+ select CRC32
help
Say yes here to build support for Analog Devices ADIS16375, ADIS16480,
ADIS16485, ADIS16488 inertial sensors.
diff --git a/drivers/iio/industrialio-buffer.c b/drivers/iio/industrialio-buffer.c
index 80c78bd6bbef..a7a080bed180 100644
--- a/drivers/iio/industrialio-buffer.c
+++ b/drivers/iio/industrialio-buffer.c
@@ -203,24 +203,27 @@ static ssize_t iio_buffer_write(struct file *filp, const char __user *buf,
break;
}
+ if (filp->f_flags & O_NONBLOCK) {
+ if (!written)
+ ret = -EAGAIN;
+ break;
+ }
+
wait_woken(&wait, TASK_INTERRUPTIBLE,
MAX_SCHEDULE_TIMEOUT);
continue;
}
ret = rb->access->write(rb, n - written, buf + written);
- if (ret == 0 && (filp->f_flags & O_NONBLOCK))
- ret = -EAGAIN;
+ if (ret < 0)
+ break;
- if (ret > 0) {
- written += ret;
- if (written != n && !(filp->f_flags & O_NONBLOCK))
- continue;
- }
- } while (ret == 0);
+ written += ret;
+
+ } while (written != n);
remove_wait_queue(&rb->pollq, &wait);
- return ret < 0 ? ret : n;
+ return ret < 0 ? ret : written;
}
/**
diff --git a/drivers/iio/light/cm32181.c b/drivers/iio/light/cm32181.c
index b1674a5bfa36..d4a34a3bf00d 100644
--- a/drivers/iio/light/cm32181.c
+++ b/drivers/iio/light/cm32181.c
@@ -429,6 +429,14 @@ static const struct iio_info cm32181_info = {
.attrs = &cm32181_attribute_group,
};
+static void cm32181_unregister_dummy_client(void *data)
+{
+ struct i2c_client *client = data;
+
+ /* Unregister the dummy client */
+ i2c_unregister_device(client);
+}
+
static int cm32181_probe(struct i2c_client *client)
{
struct device *dev = &client->dev;
@@ -460,6 +468,10 @@ static int cm32181_probe(struct i2c_client *client)
client = i2c_acpi_new_device(dev, 1, &board_info);
if (IS_ERR(client))
return PTR_ERR(client);
+
+ ret = devm_add_action_or_reset(dev, cm32181_unregister_dummy_client, client);
+ if (ret)
+ return ret;
}
cm32181 = iio_priv(indio_dev);
diff --git a/drivers/iio/light/vcnl4000.c b/drivers/iio/light/vcnl4000.c
index 6bdfce9747f9..5c44a36ab5b3 100644
--- a/drivers/iio/light/vcnl4000.c
+++ b/drivers/iio/light/vcnl4000.c
@@ -208,7 +208,6 @@ static int vcnl4000_init(struct vcnl4000_data *data)
data->rev = ret & 0xf;
data->al_scale = 250000;
- mutex_init(&data->vcnl4000_lock);
return data->chip_spec->set_power_state(data, true);
};
@@ -1367,6 +1366,8 @@ static int vcnl4000_probe(struct i2c_client *client)
data->id = id->driver_data;
data->chip_spec = &vcnl4000_chip_spec_cfg[data->id];
+ mutex_init(&data->vcnl4000_lock);
+
ret = data->chip_spec->init(data);
if (ret < 0)
return ret;
diff --git a/drivers/mtd/mtdblock.c b/drivers/mtd/mtdblock.c
index 1e94e7d10b8b..a0a1194dc1d9 100644
--- a/drivers/mtd/mtdblock.c
+++ b/drivers/mtd/mtdblock.c
@@ -153,7 +153,7 @@ static int do_cached_write (struct mtdblk_dev *mtdblk, unsigned long pos,
mtdblk->cache_state = STATE_EMPTY;
ret = mtd_read(mtd, sect_start, sect_size,
&retlen, mtdblk->cache_data);
- if (ret)
+ if (ret && !mtd_is_bitflip(ret))
return ret;
if (retlen != sect_size)
return -EIO;
@@ -188,8 +188,12 @@ static int do_cached_read (struct mtdblk_dev *mtdblk, unsigned long pos,
pr_debug("mtdblock: read on \"%s\" at 0x%lx, size 0x%x\n",
mtd->name, pos, len);
- if (!sect_size)
- return mtd_read(mtd, pos, len, &retlen, buf);
+ if (!sect_size) {
+ ret = mtd_read(mtd, pos, len, &retlen, buf);
+ if (ret && !mtd_is_bitflip(ret))
+ return ret;
+ return 0;
+ }
while (len > 0) {
unsigned long sect_start = (pos/sect_size)*sect_size;
@@ -209,7 +213,7 @@ static int do_cached_read (struct mtdblk_dev *mtdblk, unsigned long pos,
memcpy (buf, mtdblk->cache_data + offset, size);
} else {
ret = mtd_read(mtd, pos, size, &retlen, buf);
- if (ret)
+ if (ret && !mtd_is_bitflip(ret))
return ret;
if (retlen != size)
return -EIO;
diff --git a/drivers/mtd/nand/raw/meson_nand.c b/drivers/mtd/nand/raw/meson_nand.c
index a28574c00900..074e14225c06 100644
--- a/drivers/mtd/nand/raw/meson_nand.c
+++ b/drivers/mtd/nand/raw/meson_nand.c
@@ -280,7 +280,7 @@ static void meson_nfc_cmd_access(struct nand_chip *nand, int raw, bool dir,
if (raw) {
len = mtd->writesize + mtd->oobsize;
- cmd = (len & GENMASK(5, 0)) | scrambler | DMA_DIR(dir);
+ cmd = (len & GENMASK(13, 0)) | scrambler | DMA_DIR(dir);
writel(cmd, nfc->reg_base + NFC_REG_CMD);
return;
}
@@ -544,7 +544,7 @@ static int meson_nfc_read_buf(struct nand_chip *nand, u8 *buf, int len)
if (ret)
goto out;
- cmd = NFC_CMD_N2M | (len & GENMASK(5, 0));
+ cmd = NFC_CMD_N2M | (len & GENMASK(13, 0));
writel(cmd, nfc->reg_base + NFC_REG_CMD);
meson_nfc_drain_cmd(nfc);
@@ -568,7 +568,7 @@ static int meson_nfc_write_buf(struct nand_chip *nand, u8 *buf, int len)
if (ret)
return ret;
- cmd = NFC_CMD_M2N | (len & GENMASK(5, 0));
+ cmd = NFC_CMD_M2N | (len & GENMASK(13, 0));
writel(cmd, nfc->reg_base + NFC_REG_CMD);
meson_nfc_drain_cmd(nfc);
diff --git a/drivers/mtd/nand/raw/stm32_fmc2_nand.c b/drivers/mtd/nand/raw/stm32_fmc2_nand.c
index 5d627048c420..9e74bcd90aaa 100644
--- a/drivers/mtd/nand/raw/stm32_fmc2_nand.c
+++ b/drivers/mtd/nand/raw/stm32_fmc2_nand.c
@@ -1531,6 +1531,9 @@ static int stm32_fmc2_nfc_setup_interface(struct nand_chip *chip, int chipnr,
if (IS_ERR(sdrt))
return PTR_ERR(sdrt);
+ if (conf->timings.mode > 3)
+ return -EOPNOTSUPP;
+
if (chipnr == NAND_DATA_IFACE_CHECK_ONLY)
return 0;
diff --git a/drivers/net/bonding/bond_main.c b/drivers/net/bonding/bond_main.c
index 236e5219c811..8cc9a74789b7 100644
--- a/drivers/net/bonding/bond_main.c
+++ b/drivers/net/bonding/bond_main.c
@@ -3269,7 +3269,8 @@ static int bond_na_rcv(const struct sk_buff *skb, struct bonding *bond,
combined = skb_header_pointer(skb, 0, sizeof(_combined), &_combined);
if (!combined || combined->ip6.nexthdr != NEXTHDR_ICMP ||
- combined->icmp6.icmp6_type != NDISC_NEIGHBOUR_ADVERTISEMENT)
+ (combined->icmp6.icmp6_type != NDISC_NEIGHBOUR_SOLICITATION &&
+ combined->icmp6.icmp6_type != NDISC_NEIGHBOUR_ADVERTISEMENT))
goto out;
saddr = &combined->ip6.saddr;
@@ -3291,7 +3292,7 @@ static int bond_na_rcv(const struct sk_buff *skb, struct bonding *bond,
else if (curr_active_slave &&
time_after(slave_last_rx(bond, curr_active_slave),
curr_active_slave->last_link_up))
- bond_validate_na(bond, slave, saddr, daddr);
+ bond_validate_na(bond, slave, daddr, saddr);
else if (curr_arp_slave &&
bond_time_in_interval(bond, slave_last_tx(curr_arp_slave), 1))
bond_validate_na(bond, slave, saddr, daddr);
diff --git a/drivers/net/ethernet/cadence/macb_main.c b/drivers/net/ethernet/cadence/macb_main.c
index f77bd1223c8f..541e4dda7950 100644
--- a/drivers/net/ethernet/cadence/macb_main.c
+++ b/drivers/net/ethernet/cadence/macb_main.c
@@ -1063,6 +1063,10 @@ static dma_addr_t macb_get_addr(struct macb *bp, struct macb_dma_desc *desc)
}
#endif
addr |= MACB_BF(RX_WADDR, MACB_BFEXT(RX_WADDR, desc->addr));
+#ifdef CONFIG_MACB_USE_HWSTAMP
+ if (bp->hw_dma_cap & HW_DMA_CAP_PTP)
+ addr &= ~GEM_BIT(DMA_RXVALID);
+#endif
return addr;
}
diff --git a/drivers/net/ethernet/freescale/enetc/enetc_ethtool.c b/drivers/net/ethernet/freescale/enetc/enetc_ethtool.c
index da9d4b310fcd..838750a03cf6 100644
--- a/drivers/net/ethernet/freescale/enetc/enetc_ethtool.c
+++ b/drivers/net/ethernet/freescale/enetc/enetc_ethtool.c
@@ -989,6 +989,20 @@ static int enetc_get_mm(struct net_device *ndev, struct ethtool_mm_state *state)
return 0;
}
+/* FIXME: Workaround for the link partner's verification failing if ENETC
+ * priorly received too much express traffic. The documentation doesn't
+ * suggest this is needed.
+ */
+static void enetc_restart_emac_rx(struct enetc_si *si)
+{
+ u32 val = enetc_port_rd(&si->hw, ENETC_PM0_CMD_CFG);
+
+ enetc_port_wr(&si->hw, ENETC_PM0_CMD_CFG, val & ~ENETC_PM0_RX_EN);
+
+ if (val & ENETC_PM0_RX_EN)
+ enetc_port_wr(&si->hw, ENETC_PM0_CMD_CFG, val);
+}
+
static int enetc_set_mm(struct net_device *ndev, struct ethtool_mm_cfg *cfg,
struct netlink_ext_ack *extack)
{
@@ -1040,6 +1054,8 @@ static int enetc_set_mm(struct net_device *ndev, struct ethtool_mm_cfg *cfg,
enetc_port_wr(hw, ENETC_MMCSR, val);
+ enetc_restart_emac_rx(priv->si);
+
mutex_unlock(&priv->mm_lock);
return 0;
diff --git a/drivers/net/ethernet/intel/iavf/iavf.h b/drivers/net/ethernet/intel/iavf/iavf.h
index 2cdce251472c..9abaff1f2aff 100644
--- a/drivers/net/ethernet/intel/iavf/iavf.h
+++ b/drivers/net/ethernet/intel/iavf/iavf.h
@@ -58,8 +58,6 @@ enum iavf_vsi_state_t {
struct iavf_vsi {
struct iavf_adapter *back;
struct net_device *netdev;
- unsigned long active_cvlans[BITS_TO_LONGS(VLAN_N_VID)];
- unsigned long active_svlans[BITS_TO_LONGS(VLAN_N_VID)];
u16 seid;
u16 id;
DECLARE_BITMAP(state, __IAVF_VSI_STATE_SIZE__);
@@ -157,15 +155,20 @@ struct iavf_vlan {
u16 tpid;
};
+enum iavf_vlan_state_t {
+ IAVF_VLAN_INVALID,
+ IAVF_VLAN_ADD, /* filter needs to be added */
+ IAVF_VLAN_IS_NEW, /* filter is new, wait for PF answer */
+ IAVF_VLAN_ACTIVE, /* filter is accepted by PF */
+ IAVF_VLAN_DISABLE, /* filter needs to be deleted by PF, then marked INACTIVE */
+ IAVF_VLAN_INACTIVE, /* filter is inactive, we are in IFF_DOWN */
+ IAVF_VLAN_REMOVE, /* filter needs to be removed from list */
+};
+
struct iavf_vlan_filter {
struct list_head list;
struct iavf_vlan vlan;
- struct {
- u8 is_new_vlan:1; /* filter is new, wait for PF answer */
- u8 remove:1; /* filter needs to be removed */
- u8 add:1; /* filter needs to be added */
- u8 padding:5;
- };
+ enum iavf_vlan_state_t state;
};
#define IAVF_MAX_TRAFFIC_CLASS 4
@@ -257,6 +260,7 @@ struct iavf_adapter {
wait_queue_head_t vc_waitqueue;
struct iavf_q_vector *q_vectors;
struct list_head vlan_filter_list;
+ int num_vlan_filters;
struct list_head mac_filter_list;
struct mutex crit_lock;
struct mutex client_lock;
diff --git a/drivers/net/ethernet/intel/iavf/iavf_main.c b/drivers/net/ethernet/intel/iavf/iavf_main.c
index 095201e83c9d..2de4baff4c20 100644
--- a/drivers/net/ethernet/intel/iavf/iavf_main.c
+++ b/drivers/net/ethernet/intel/iavf/iavf_main.c
@@ -791,7 +791,8 @@ iavf_vlan_filter *iavf_add_vlan(struct iavf_adapter *adapter,
f->vlan = vlan;
list_add_tail(&f->list, &adapter->vlan_filter_list);
- f->add = true;
+ f->state = IAVF_VLAN_ADD;
+ adapter->num_vlan_filters++;
adapter->aq_required |= IAVF_FLAG_AQ_ADD_VLAN_FILTER;
}
@@ -813,7 +814,7 @@ static void iavf_del_vlan(struct iavf_adapter *adapter, struct iavf_vlan vlan)
f = iavf_find_vlan(adapter, vlan);
if (f) {
- f->remove = true;
+ f->state = IAVF_VLAN_REMOVE;
adapter->aq_required |= IAVF_FLAG_AQ_DEL_VLAN_FILTER;
}
@@ -828,14 +829,18 @@ static void iavf_del_vlan(struct iavf_adapter *adapter, struct iavf_vlan vlan)
**/
static void iavf_restore_filters(struct iavf_adapter *adapter)
{
- u16 vid;
+ struct iavf_vlan_filter *f;
/* re-add all VLAN filters */
- for_each_set_bit(vid, adapter->vsi.active_cvlans, VLAN_N_VID)
- iavf_add_vlan(adapter, IAVF_VLAN(vid, ETH_P_8021Q));
+ spin_lock_bh(&adapter->mac_vlan_list_lock);
- for_each_set_bit(vid, adapter->vsi.active_svlans, VLAN_N_VID)
- iavf_add_vlan(adapter, IAVF_VLAN(vid, ETH_P_8021AD));
+ list_for_each_entry(f, &adapter->vlan_filter_list, list) {
+ if (f->state == IAVF_VLAN_INACTIVE)
+ f->state = IAVF_VLAN_ADD;
+ }
+
+ spin_unlock_bh(&adapter->mac_vlan_list_lock);
+ adapter->aq_required |= IAVF_FLAG_AQ_ADD_VLAN_FILTER;
}
/**
@@ -844,8 +849,7 @@ static void iavf_restore_filters(struct iavf_adapter *adapter)
*/
u16 iavf_get_num_vlans_added(struct iavf_adapter *adapter)
{
- return bitmap_weight(adapter->vsi.active_cvlans, VLAN_N_VID) +
- bitmap_weight(adapter->vsi.active_svlans, VLAN_N_VID);
+ return adapter->num_vlan_filters;
}
/**
@@ -928,11 +932,6 @@ static int iavf_vlan_rx_kill_vid(struct net_device *netdev,
return 0;
iavf_del_vlan(adapter, IAVF_VLAN(vid, be16_to_cpu(proto)));
- if (proto == cpu_to_be16(ETH_P_8021Q))
- clear_bit(vid, adapter->vsi.active_cvlans);
- else
- clear_bit(vid, adapter->vsi.active_svlans);
-
return 0;
}
@@ -1293,16 +1292,11 @@ static void iavf_clear_mac_vlan_filters(struct iavf_adapter *adapter)
}
}
- /* remove all VLAN filters */
+ /* disable all VLAN filters */
list_for_each_entry_safe(vlf, vlftmp, &adapter->vlan_filter_list,
- list) {
- if (vlf->add) {
- list_del(&vlf->list);
- kfree(vlf);
- } else {
- vlf->remove = true;
- }
- }
+ list)
+ vlf->state = IAVF_VLAN_DISABLE;
+
spin_unlock_bh(&adapter->mac_vlan_list_lock);
}
@@ -2914,6 +2908,7 @@ static void iavf_disable_vf(struct iavf_adapter *adapter)
list_del(&fv->list);
kfree(fv);
}
+ adapter->num_vlan_filters = 0;
spin_unlock_bh(&adapter->mac_vlan_list_lock);
@@ -3131,9 +3126,6 @@ continue_reset:
adapter->aq_required |= IAVF_FLAG_AQ_ADD_CLOUD_FILTER;
iavf_misc_irq_enable(adapter);
- bitmap_clear(adapter->vsi.active_cvlans, 0, VLAN_N_VID);
- bitmap_clear(adapter->vsi.active_svlans, 0, VLAN_N_VID);
-
mod_delayed_work(adapter->wq, &adapter->watchdog_task, 2);
/* We were running when the reset started, so we need to restore some
diff --git a/drivers/net/ethernet/intel/iavf/iavf_virtchnl.c b/drivers/net/ethernet/intel/iavf/iavf_virtchnl.c
index 4e17d006c52d..9afbbdac3590 100644
--- a/drivers/net/ethernet/intel/iavf/iavf_virtchnl.c
+++ b/drivers/net/ethernet/intel/iavf/iavf_virtchnl.c
@@ -642,16 +642,10 @@ static void iavf_vlan_add_reject(struct iavf_adapter *adapter)
spin_lock_bh(&adapter->mac_vlan_list_lock);
list_for_each_entry_safe(f, ftmp, &adapter->vlan_filter_list, list) {
- if (f->is_new_vlan) {
- if (f->vlan.tpid == ETH_P_8021Q)
- clear_bit(f->vlan.vid,
- adapter->vsi.active_cvlans);
- else
- clear_bit(f->vlan.vid,
- adapter->vsi.active_svlans);
-
+ if (f->state == IAVF_VLAN_IS_NEW) {
list_del(&f->list);
kfree(f);
+ adapter->num_vlan_filters--;
}
}
spin_unlock_bh(&adapter->mac_vlan_list_lock);
@@ -679,7 +673,7 @@ void iavf_add_vlans(struct iavf_adapter *adapter)
spin_lock_bh(&adapter->mac_vlan_list_lock);
list_for_each_entry(f, &adapter->vlan_filter_list, list) {
- if (f->add)
+ if (f->state == IAVF_VLAN_ADD)
count++;
}
if (!count || !VLAN_FILTERING_ALLOWED(adapter)) {
@@ -710,11 +704,10 @@ void iavf_add_vlans(struct iavf_adapter *adapter)
vvfl->vsi_id = adapter->vsi_res->vsi_id;
vvfl->num_elements = count;
list_for_each_entry(f, &adapter->vlan_filter_list, list) {
- if (f->add) {
+ if (f->state == IAVF_VLAN_ADD) {
vvfl->vlan_id[i] = f->vlan.vid;
i++;
- f->add = false;
- f->is_new_vlan = true;
+ f->state = IAVF_VLAN_IS_NEW;
if (i == count)
break;
}
@@ -760,7 +753,7 @@ void iavf_add_vlans(struct iavf_adapter *adapter)
vvfl_v2->vport_id = adapter->vsi_res->vsi_id;
vvfl_v2->num_elements = count;
list_for_each_entry(f, &adapter->vlan_filter_list, list) {
- if (f->add) {
+ if (f->state == IAVF_VLAN_ADD) {
struct virtchnl_vlan_supported_caps *filtering_support =
&adapter->vlan_v2_caps.filtering.filtering_support;
struct virtchnl_vlan *vlan;
@@ -778,8 +771,7 @@ void iavf_add_vlans(struct iavf_adapter *adapter)
vlan->tpid = f->vlan.tpid;
i++;
- f->add = false;
- f->is_new_vlan = true;
+ f->state = IAVF_VLAN_IS_NEW;
}
}
@@ -822,10 +814,16 @@ void iavf_del_vlans(struct iavf_adapter *adapter)
* filters marked for removal to enable bailing out before
* sending a virtchnl message
*/
- if (f->remove && !VLAN_FILTERING_ALLOWED(adapter)) {
+ if (f->state == IAVF_VLAN_REMOVE &&
+ !VLAN_FILTERING_ALLOWED(adapter)) {
list_del(&f->list);
kfree(f);
- } else if (f->remove) {
+ adapter->num_vlan_filters--;
+ } else if (f->state == IAVF_VLAN_DISABLE &&
+ !VLAN_FILTERING_ALLOWED(adapter)) {
+ f->state = IAVF_VLAN_INACTIVE;
+ } else if (f->state == IAVF_VLAN_REMOVE ||
+ f->state == IAVF_VLAN_DISABLE) {
count++;
}
}
@@ -857,11 +855,18 @@ void iavf_del_vlans(struct iavf_adapter *adapter)
vvfl->vsi_id = adapter->vsi_res->vsi_id;
vvfl->num_elements = count;
list_for_each_entry_safe(f, ftmp, &adapter->vlan_filter_list, list) {
- if (f->remove) {
+ if (f->state == IAVF_VLAN_DISABLE) {
vvfl->vlan_id[i] = f->vlan.vid;
+ f->state = IAVF_VLAN_INACTIVE;
i++;
+ if (i == count)
+ break;
+ } else if (f->state == IAVF_VLAN_REMOVE) {
+ vvfl->vlan_id[i] = f->vlan.vid;
list_del(&f->list);
kfree(f);
+ adapter->num_vlan_filters--;
+ i++;
if (i == count)
break;
}
@@ -901,7 +906,8 @@ void iavf_del_vlans(struct iavf_adapter *adapter)
vvfl_v2->vport_id = adapter->vsi_res->vsi_id;
vvfl_v2->num_elements = count;
list_for_each_entry_safe(f, ftmp, &adapter->vlan_filter_list, list) {
- if (f->remove) {
+ if (f->state == IAVF_VLAN_DISABLE ||
+ f->state == IAVF_VLAN_REMOVE) {
struct virtchnl_vlan_supported_caps *filtering_support =
&adapter->vlan_v2_caps.filtering.filtering_support;
struct virtchnl_vlan *vlan;
@@ -915,8 +921,13 @@ void iavf_del_vlans(struct iavf_adapter *adapter)
vlan->tci = f->vlan.vid;
vlan->tpid = f->vlan.tpid;
- list_del(&f->list);
- kfree(f);
+ if (f->state == IAVF_VLAN_DISABLE) {
+ f->state = IAVF_VLAN_INACTIVE;
+ } else {
+ list_del(&f->list);
+ kfree(f);
+ adapter->num_vlan_filters--;
+ }
i++;
if (i == count)
break;
@@ -2192,7 +2203,7 @@ void iavf_virtchnl_completion(struct iavf_adapter *adapter,
list_for_each_entry(vlf,
&adapter->vlan_filter_list,
list)
- vlf->add = true;
+ vlf->state = IAVF_VLAN_ADD;
adapter->aq_required |=
IAVF_FLAG_AQ_ADD_VLAN_FILTER;
@@ -2260,7 +2271,7 @@ void iavf_virtchnl_completion(struct iavf_adapter *adapter,
list_for_each_entry(vlf,
&adapter->vlan_filter_list,
list)
- vlf->add = true;
+ vlf->state = IAVF_VLAN_ADD;
aq_required |= IAVF_FLAG_AQ_ADD_VLAN_FILTER;
}
@@ -2444,15 +2455,8 @@ void iavf_virtchnl_completion(struct iavf_adapter *adapter,
spin_lock_bh(&adapter->mac_vlan_list_lock);
list_for_each_entry(f, &adapter->vlan_filter_list, list) {
- if (f->is_new_vlan) {
- f->is_new_vlan = false;
- if (f->vlan.tpid == ETH_P_8021Q)
- set_bit(f->vlan.vid,
- adapter->vsi.active_cvlans);
- else
- set_bit(f->vlan.vid,
- adapter->vsi.active_svlans);
- }
+ if (f->state == IAVF_VLAN_IS_NEW)
+ f->state = IAVF_VLAN_ACTIVE;
}
spin_unlock_bh(&adapter->mac_vlan_list_lock);
}
diff --git a/drivers/net/ethernet/mellanox/mlx4/en_rx.c b/drivers/net/ethernet/mellanox/mlx4/en_rx.c
index 4b5e459b6d49..332472fe4990 100644
--- a/drivers/net/ethernet/mellanox/mlx4/en_rx.c
+++ b/drivers/net/ethernet/mellanox/mlx4/en_rx.c
@@ -681,14 +681,32 @@ int mlx4_en_xdp_rx_timestamp(const struct xdp_md *ctx, u64 *timestamp)
return 0;
}
-int mlx4_en_xdp_rx_hash(const struct xdp_md *ctx, u32 *hash)
+int mlx4_en_xdp_rx_hash(const struct xdp_md *ctx, u32 *hash,
+ enum xdp_rss_hash_type *rss_type)
{
struct mlx4_en_xdp_buff *_ctx = (void *)ctx;
+ struct mlx4_cqe *cqe = _ctx->cqe;
+ enum xdp_rss_hash_type xht = 0;
+ __be16 status;
if (unlikely(!(_ctx->dev->features & NETIF_F_RXHASH)))
return -ENODATA;
- *hash = be32_to_cpu(_ctx->cqe->immed_rss_invalid);
+ *hash = be32_to_cpu(cqe->immed_rss_invalid);
+ status = cqe->status;
+ if (status & cpu_to_be16(MLX4_CQE_STATUS_TCP))
+ xht = XDP_RSS_L4_TCP;
+ if (status & cpu_to_be16(MLX4_CQE_STATUS_UDP))
+ xht = XDP_RSS_L4_UDP;
+ if (status & cpu_to_be16(MLX4_CQE_STATUS_IPV4 | MLX4_CQE_STATUS_IPV4F))
+ xht |= XDP_RSS_L3_IPV4;
+ if (status & cpu_to_be16(MLX4_CQE_STATUS_IPV6)) {
+ xht |= XDP_RSS_L3_IPV6;
+ if (cqe->ipv6_ext_mask)
+ xht |= XDP_RSS_L3_DYNHDR;
+ }
+ *rss_type = xht;
+
return 0;
}
diff --git a/drivers/net/ethernet/mellanox/mlx4/mlx4_en.h b/drivers/net/ethernet/mellanox/mlx4/mlx4_en.h
index 034733b13b1a..321f801c1d7c 100644
--- a/drivers/net/ethernet/mellanox/mlx4/mlx4_en.h
+++ b/drivers/net/ethernet/mellanox/mlx4/mlx4_en.h
@@ -798,7 +798,8 @@ int mlx4_en_netdev_event(struct notifier_block *this,
struct xdp_md;
int mlx4_en_xdp_rx_timestamp(const struct xdp_md *ctx, u64 *timestamp);
-int mlx4_en_xdp_rx_hash(const struct xdp_md *ctx, u32 *hash);
+int mlx4_en_xdp_rx_hash(const struct xdp_md *ctx, u32 *hash,
+ enum xdp_rss_hash_type *rss_type);
/*
* Functions for time stamping
diff --git a/drivers/net/ethernet/mellanox/mlx5/core/en/xdp.c b/drivers/net/ethernet/mellanox/mlx5/core/en/xdp.c
index 15d15d0e5ef9..c8b532cea7d1 100644
--- a/drivers/net/ethernet/mellanox/mlx5/core/en/xdp.c
+++ b/drivers/net/ethernet/mellanox/mlx5/core/en/xdp.c
@@ -34,6 +34,7 @@
#include <net/xdp_sock_drv.h>
#include "en/xdp.h"
#include "en/params.h"
+#include <linux/bitfield.h>
int mlx5e_xdp_max_mtu(struct mlx5e_params *params, struct mlx5e_xsk_param *xsk)
{
@@ -169,14 +170,72 @@ static int mlx5e_xdp_rx_timestamp(const struct xdp_md *ctx, u64 *timestamp)
return 0;
}
-static int mlx5e_xdp_rx_hash(const struct xdp_md *ctx, u32 *hash)
+/* Mapping HW RSS Type bits CQE_RSS_HTYPE_IP + CQE_RSS_HTYPE_L4 into 4-bits*/
+#define RSS_TYPE_MAX_TABLE 16 /* 4-bits max 16 entries */
+#define RSS_L4 GENMASK(1, 0)
+#define RSS_L3 GENMASK(3, 2) /* Same as CQE_RSS_HTYPE_IP */
+
+/* Valid combinations of CQE_RSS_HTYPE_IP + CQE_RSS_HTYPE_L4 sorted numerical */
+enum mlx5_rss_hash_type {
+ RSS_TYPE_NO_HASH = (FIELD_PREP_CONST(RSS_L3, CQE_RSS_IP_NONE) |
+ FIELD_PREP_CONST(RSS_L4, CQE_RSS_L4_NONE)),
+ RSS_TYPE_L3_IPV4 = (FIELD_PREP_CONST(RSS_L3, CQE_RSS_IPV4) |
+ FIELD_PREP_CONST(RSS_L4, CQE_RSS_L4_NONE)),
+ RSS_TYPE_L4_IPV4_TCP = (FIELD_PREP_CONST(RSS_L3, CQE_RSS_IPV4) |
+ FIELD_PREP_CONST(RSS_L4, CQE_RSS_L4_TCP)),
+ RSS_TYPE_L4_IPV4_UDP = (FIELD_PREP_CONST(RSS_L3, CQE_RSS_IPV4) |
+ FIELD_PREP_CONST(RSS_L4, CQE_RSS_L4_UDP)),
+ RSS_TYPE_L4_IPV4_IPSEC = (FIELD_PREP_CONST(RSS_L3, CQE_RSS_IPV4) |
+ FIELD_PREP_CONST(RSS_L4, CQE_RSS_L4_IPSEC)),
+ RSS_TYPE_L3_IPV6 = (FIELD_PREP_CONST(RSS_L3, CQE_RSS_IPV6) |
+ FIELD_PREP_CONST(RSS_L4, CQE_RSS_L4_NONE)),
+ RSS_TYPE_L4_IPV6_TCP = (FIELD_PREP_CONST(RSS_L3, CQE_RSS_IPV6) |
+ FIELD_PREP_CONST(RSS_L4, CQE_RSS_L4_TCP)),
+ RSS_TYPE_L4_IPV6_UDP = (FIELD_PREP_CONST(RSS_L3, CQE_RSS_IPV6) |
+ FIELD_PREP_CONST(RSS_L4, CQE_RSS_L4_UDP)),
+ RSS_TYPE_L4_IPV6_IPSEC = (FIELD_PREP_CONST(RSS_L3, CQE_RSS_IPV6) |
+ FIELD_PREP_CONST(RSS_L4, CQE_RSS_L4_IPSEC)),
+};
+
+/* Invalid combinations will simply return zero, allows no boundary checks */
+static const enum xdp_rss_hash_type mlx5_xdp_rss_type[RSS_TYPE_MAX_TABLE] = {
+ [RSS_TYPE_NO_HASH] = XDP_RSS_TYPE_NONE,
+ [1] = XDP_RSS_TYPE_NONE, /* Implicit zero */
+ [2] = XDP_RSS_TYPE_NONE, /* Implicit zero */
+ [3] = XDP_RSS_TYPE_NONE, /* Implicit zero */
+ [RSS_TYPE_L3_IPV4] = XDP_RSS_TYPE_L3_IPV4,
+ [RSS_TYPE_L4_IPV4_TCP] = XDP_RSS_TYPE_L4_IPV4_TCP,
+ [RSS_TYPE_L4_IPV4_UDP] = XDP_RSS_TYPE_L4_IPV4_UDP,
+ [RSS_TYPE_L4_IPV4_IPSEC] = XDP_RSS_TYPE_L4_IPV4_IPSEC,
+ [RSS_TYPE_L3_IPV6] = XDP_RSS_TYPE_L3_IPV6,
+ [RSS_TYPE_L4_IPV6_TCP] = XDP_RSS_TYPE_L4_IPV6_TCP,
+ [RSS_TYPE_L4_IPV6_UDP] = XDP_RSS_TYPE_L4_IPV6_UDP,
+ [RSS_TYPE_L4_IPV6_IPSEC] = XDP_RSS_TYPE_L4_IPV6_IPSEC,
+ [12] = XDP_RSS_TYPE_NONE, /* Implicit zero */
+ [13] = XDP_RSS_TYPE_NONE, /* Implicit zero */
+ [14] = XDP_RSS_TYPE_NONE, /* Implicit zero */
+ [15] = XDP_RSS_TYPE_NONE, /* Implicit zero */
+};
+
+static int mlx5e_xdp_rx_hash(const struct xdp_md *ctx, u32 *hash,
+ enum xdp_rss_hash_type *rss_type)
{
const struct mlx5e_xdp_buff *_ctx = (void *)ctx;
+ const struct mlx5_cqe64 *cqe = _ctx->cqe;
+ u32 hash_type, l4_type, ip_type, lookup;
if (unlikely(!(_ctx->xdp.rxq->dev->features & NETIF_F_RXHASH)))
return -ENODATA;
- *hash = be32_to_cpu(_ctx->cqe->rss_hash_result);
+ *hash = be32_to_cpu(cqe->rss_hash_result);
+
+ hash_type = cqe->rss_hash_type;
+ BUILD_BUG_ON(CQE_RSS_HTYPE_IP != RSS_L3); /* same mask */
+ ip_type = hash_type & CQE_RSS_HTYPE_IP;
+ l4_type = FIELD_GET(CQE_RSS_HTYPE_L4, hash_type);
+ lookup = ip_type | l4_type;
+ *rss_type = mlx5_xdp_rss_type[lookup];
+
return 0;
}
diff --git a/drivers/net/ethernet/qlogic/qlcnic/qlcnic_ctx.c b/drivers/net/ethernet/qlogic/qlcnic/qlcnic_ctx.c
index 87f76bac2e46..eb827b86ecae 100644
--- a/drivers/net/ethernet/qlogic/qlcnic/qlcnic_ctx.c
+++ b/drivers/net/ethernet/qlogic/qlcnic/qlcnic_ctx.c
@@ -628,7 +628,13 @@ int qlcnic_fw_create_ctx(struct qlcnic_adapter *dev)
int i, err, ring;
if (dev->flags & QLCNIC_NEED_FLR) {
- pci_reset_function(dev->pdev);
+ err = pci_reset_function(dev->pdev);
+ if (err) {
+ dev_err(&dev->pdev->dev,
+ "Adapter reset failed (%d). Please reboot\n",
+ err);
+ return err;
+ }
dev->flags &= ~QLCNIC_NEED_FLR;
}
diff --git a/drivers/net/ethernet/sun/niu.c b/drivers/net/ethernet/sun/niu.c
index ab8b09a9ef61..7a2e76776297 100644
--- a/drivers/net/ethernet/sun/niu.c
+++ b/drivers/net/ethernet/sun/niu.c
@@ -4522,7 +4522,7 @@ static int niu_alloc_channels(struct niu *np)
err = niu_rbr_fill(np, rp, GFP_KERNEL);
if (err)
- return err;
+ goto out_err;
}
tx_rings = kcalloc(num_tx_rings, sizeof(struct tx_ring_info),
diff --git a/drivers/net/ethernet/ti/cpsw.c b/drivers/net/ethernet/ti/cpsw.c
index 37f0b62ec5d6..f9cd566d1c9b 100644
--- a/drivers/net/ethernet/ti/cpsw.c
+++ b/drivers/net/ethernet/ti/cpsw.c
@@ -27,7 +27,7 @@
#include <linux/of.h>
#include <linux/of_mdio.h>
#include <linux/of_net.h>
-#include <linux/of_device.h>
+#include <linux/of_platform.h>
#include <linux/if_vlan.h>
#include <linux/kmemleak.h>
#include <linux/sys_soc.h>
diff --git a/drivers/net/ethernet/ti/cpsw_new.c b/drivers/net/ethernet/ti/cpsw_new.c
index 35128dd45ffc..c61e4e44a78f 100644
--- a/drivers/net/ethernet/ti/cpsw_new.c
+++ b/drivers/net/ethernet/ti/cpsw_new.c
@@ -7,6 +7,7 @@
#include <linux/io.h>
#include <linux/clk.h>
+#include <linux/platform_device.h>
#include <linux/timer.h>
#include <linux/module.h>
#include <linux/irqreturn.h>
@@ -23,7 +24,7 @@
#include <linux/of.h>
#include <linux/of_mdio.h>
#include <linux/of_net.h>
-#include <linux/of_device.h>
+#include <linux/of_platform.h>
#include <linux/if_vlan.h>
#include <linux/kmemleak.h>
#include <linux/sys_soc.h>
diff --git a/drivers/net/phy/nxp-c45-tja11xx.c b/drivers/net/phy/nxp-c45-tja11xx.c
index 5813b07242ce..029875a59ff8 100644
--- a/drivers/net/phy/nxp-c45-tja11xx.c
+++ b/drivers/net/phy/nxp-c45-tja11xx.c
@@ -191,7 +191,7 @@
#define MAX_ID_PS 2260U
#define DEFAULT_ID_PS 2000U
-#define PPM_TO_SUBNS_INC(ppb) div_u64(GENMASK(31, 0) * (ppb) * \
+#define PPM_TO_SUBNS_INC(ppb) div_u64(GENMASK_ULL(31, 0) * (ppb) * \
PTP_CLK_PERIOD_100BT1, NSEC_PER_SEC)
#define NXP_C45_SKB_CB(skb) ((struct nxp_c45_skb_cb *)(skb)->cb)
@@ -1337,6 +1337,17 @@ no_ptp_support:
return ret;
}
+static void nxp_c45_remove(struct phy_device *phydev)
+{
+ struct nxp_c45_phy *priv = phydev->priv;
+
+ if (priv->ptp_clock)
+ ptp_clock_unregister(priv->ptp_clock);
+
+ skb_queue_purge(&priv->tx_queue);
+ skb_queue_purge(&priv->rx_queue);
+}
+
static struct phy_driver nxp_c45_driver[] = {
{
PHY_ID_MATCH_MODEL(PHY_ID_TJA_1103),
@@ -1359,6 +1370,7 @@ static struct phy_driver nxp_c45_driver[] = {
.set_loopback = genphy_c45_loopback,
.get_sqi = nxp_c45_get_sqi,
.get_sqi_max = nxp_c45_get_sqi_max,
+ .remove = nxp_c45_remove,
},
};
diff --git a/drivers/net/phy/sfp.c b/drivers/net/phy/sfp.c
index cc9d29b71098..89636dc71e48 100644
--- a/drivers/net/phy/sfp.c
+++ b/drivers/net/phy/sfp.c
@@ -210,6 +210,12 @@ static const enum gpiod_flags gpio_flags[] = {
#define SFP_PHY_ADDR 22
#define SFP_PHY_ADDR_ROLLBALL 17
+/* SFP_EEPROM_BLOCK_SIZE is the size of data chunk to read the EEPROM
+ * at a time. Some SFP modules and also some Linux I2C drivers do not like
+ * reads longer than 16 bytes.
+ */
+#define SFP_EEPROM_BLOCK_SIZE 16
+
struct sff_data {
unsigned int gpios;
bool (*module_supported)(const struct sfp_eeprom_id *id);
@@ -1951,11 +1957,7 @@ static int sfp_sm_mod_probe(struct sfp *sfp, bool report)
u8 check;
int ret;
- /* Some SFP modules and also some Linux I2C drivers do not like reads
- * longer than 16 bytes, so read the EEPROM in chunks of 16 bytes at
- * a time.
- */
- sfp->i2c_block_size = 16;
+ sfp->i2c_block_size = SFP_EEPROM_BLOCK_SIZE;
ret = sfp_read(sfp, false, 0, &id.base, sizeof(id.base));
if (ret < 0) {
@@ -2513,6 +2515,9 @@ static int sfp_module_eeprom(struct sfp *sfp, struct ethtool_eeprom *ee,
unsigned int first, last, len;
int ret;
+ if (!(sfp->state & SFP_F_PRESENT))
+ return -ENODEV;
+
if (ee->len == 0)
return -EINVAL;
@@ -2545,6 +2550,9 @@ static int sfp_module_eeprom_by_page(struct sfp *sfp,
const struct ethtool_module_eeprom *page,
struct netlink_ext_ack *extack)
{
+ if (!(sfp->state & SFP_F_PRESENT))
+ return -ENODEV;
+
if (page->bank) {
NL_SET_ERR_MSG(extack, "Banks not supported");
return -EOPNOTSUPP;
@@ -2649,6 +2657,7 @@ static struct sfp *sfp_alloc(struct device *dev)
return ERR_PTR(-ENOMEM);
sfp->dev = dev;
+ sfp->i2c_block_size = SFP_EEPROM_BLOCK_SIZE;
mutex_init(&sfp->sm_mutex);
mutex_init(&sfp->st_mutex);
diff --git a/drivers/net/usb/r8152.c b/drivers/net/usb/r8152.c
index decb5ba56a25..0fc4b959edc1 100644
--- a/drivers/net/usb/r8152.c
+++ b/drivers/net/usb/r8152.c
@@ -1943,7 +1943,7 @@ static struct rx_agg *alloc_rx_agg(struct r8152 *tp, gfp_t mflags)
if (!rx_agg)
return NULL;
- rx_agg->page = alloc_pages(mflags | __GFP_COMP, order);
+ rx_agg->page = alloc_pages(mflags | __GFP_COMP | __GFP_NOWARN, order);
if (!rx_agg->page)
goto free_rx;
diff --git a/drivers/net/veth.c b/drivers/net/veth.c
index c1178915496d..e1b38fbf1dd9 100644
--- a/drivers/net/veth.c
+++ b/drivers/net/veth.c
@@ -1648,14 +1648,18 @@ static int veth_xdp_rx_timestamp(const struct xdp_md *ctx, u64 *timestamp)
return 0;
}
-static int veth_xdp_rx_hash(const struct xdp_md *ctx, u32 *hash)
+static int veth_xdp_rx_hash(const struct xdp_md *ctx, u32 *hash,
+ enum xdp_rss_hash_type *rss_type)
{
struct veth_xdp_buff *_ctx = (void *)ctx;
+ struct sk_buff *skb = _ctx->skb;
- if (!_ctx->skb)
+ if (!skb)
return -ENODATA;
- *hash = skb_get_hash(_ctx->skb);
+ *hash = skb_get_hash(skb);
+ *rss_type = skb->l4_hash ? XDP_RSS_TYPE_L4_ANY : XDP_RSS_TYPE_NONE;
+
return 0;
}
diff --git a/drivers/net/wwan/iosm/iosm_ipc_pcie.c b/drivers/net/wwan/iosm/iosm_ipc_pcie.c
index 5bf5a93937c9..04517bd3325a 100644
--- a/drivers/net/wwan/iosm/iosm_ipc_pcie.c
+++ b/drivers/net/wwan/iosm/iosm_ipc_pcie.c
@@ -295,7 +295,7 @@ static int ipc_pcie_probe(struct pci_dev *pci,
ret = dma_set_mask(ipc_pcie->dev, DMA_BIT_MASK(64));
if (ret) {
dev_err(ipc_pcie->dev, "Could not set PCI DMA mask: %d", ret);
- return ret;
+ goto set_mask_fail;
}
ipc_pcie_config_aspm(ipc_pcie);
@@ -323,6 +323,7 @@ static int ipc_pcie_probe(struct pci_dev *pci,
imem_init_fail:
ipc_pcie_resources_release(ipc_pcie);
resources_req_fail:
+set_mask_fail:
pci_disable_device(pci);
pci_enable_fail:
kfree(ipc_pcie);
diff --git a/drivers/nvme/host/core.c b/drivers/nvme/host/core.c
index 53ef028596c6..d6a9bac91a4c 100644
--- a/drivers/nvme/host/core.c
+++ b/drivers/nvme/host/core.c
@@ -1674,6 +1674,9 @@ static void nvme_config_discard(struct gendisk *disk, struct nvme_ns *ns)
struct request_queue *queue = disk->queue;
u32 size = queue_logical_block_size(queue);
+ if (ctrl->dmrsl && ctrl->dmrsl <= nvme_sect_to_lba(ns, UINT_MAX))
+ ctrl->max_discard_sectors = nvme_lba_to_sect(ns, ctrl->dmrsl);
+
if (ctrl->max_discard_sectors == 0) {
blk_queue_max_discard_sectors(queue, 0);
return;
@@ -1688,9 +1691,6 @@ static void nvme_config_discard(struct gendisk *disk, struct nvme_ns *ns)
if (queue->limits.max_discard_sectors)
return;
- if (ctrl->dmrsl && ctrl->dmrsl <= nvme_sect_to_lba(ns, UINT_MAX))
- ctrl->max_discard_sectors = nvme_lba_to_sect(ns, ctrl->dmrsl);
-
blk_queue_max_discard_sectors(queue, ctrl->max_discard_sectors);
blk_queue_max_discard_segments(queue, ctrl->max_discard_segments);
diff --git a/drivers/of/dynamic.c b/drivers/of/dynamic.c
index 07d93753b12f..e311d406b170 100644
--- a/drivers/of/dynamic.c
+++ b/drivers/of/dynamic.c
@@ -226,6 +226,7 @@ static void __of_attach_node(struct device_node *np)
np->sibling = np->parent->child;
np->parent->child = np;
of_node_clear_flag(np, OF_DETACHED);
+ np->fwnode.flags |= FWNODE_FLAG_NOT_DEVICE;
}
/**
diff --git a/drivers/of/platform.c b/drivers/of/platform.c
index b2bd2e783445..78ae84187449 100644
--- a/drivers/of/platform.c
+++ b/drivers/of/platform.c
@@ -737,6 +737,11 @@ static int of_platform_notify(struct notifier_block *nb,
if (of_node_check_flag(rd->dn, OF_POPULATED))
return NOTIFY_OK;
+ /*
+ * Clear the flag before adding the device so that fw_devlink
+ * doesn't skip adding consumers to this device.
+ */
+ rd->dn->fwnode.flags &= ~FWNODE_FLAG_NOT_DEVICE;
/* pdev_parent may be NULL when no bus platform device */
pdev_parent = of_find_device_by_node(rd->dn->parent);
pdev = of_platform_device_create(rd->dn, NULL,
diff --git a/drivers/pci/doe.c b/drivers/pci/doe.c
index 66d9ab288646..e5e9b287b976 100644
--- a/drivers/pci/doe.c
+++ b/drivers/pci/doe.c
@@ -128,7 +128,7 @@ static int pci_doe_send_req(struct pci_doe_mb *doe_mb,
return -EIO;
/* Length is 2 DW of header + length of payload in DW */
- length = 2 + task->request_pl_sz / sizeof(u32);
+ length = 2 + task->request_pl_sz / sizeof(__le32);
if (length > PCI_DOE_MAX_LENGTH)
return -EIO;
if (length == PCI_DOE_MAX_LENGTH)
@@ -141,9 +141,9 @@ static int pci_doe_send_req(struct pci_doe_mb *doe_mb,
pci_write_config_dword(pdev, offset + PCI_DOE_WRITE,
FIELD_PREP(PCI_DOE_DATA_OBJECT_HEADER_2_LENGTH,
length));
- for (i = 0; i < task->request_pl_sz / sizeof(u32); i++)
+ for (i = 0; i < task->request_pl_sz / sizeof(__le32); i++)
pci_write_config_dword(pdev, offset + PCI_DOE_WRITE,
- task->request_pl[i]);
+ le32_to_cpu(task->request_pl[i]));
pci_doe_write_ctrl(doe_mb, PCI_DOE_CTRL_GO);
@@ -195,11 +195,11 @@ static int pci_doe_recv_resp(struct pci_doe_mb *doe_mb, struct pci_doe_task *tas
/* First 2 dwords have already been read */
length -= 2;
- payload_length = min(length, task->response_pl_sz / sizeof(u32));
+ payload_length = min(length, task->response_pl_sz / sizeof(__le32));
/* Read the rest of the response payload */
for (i = 0; i < payload_length; i++) {
- pci_read_config_dword(pdev, offset + PCI_DOE_READ,
- &task->response_pl[i]);
+ pci_read_config_dword(pdev, offset + PCI_DOE_READ, &val);
+ task->response_pl[i] = cpu_to_le32(val);
/* Prior to the last ack, ensure Data Object Ready */
if (i == (payload_length - 1) && !pci_doe_data_obj_ready(doe_mb))
return -EIO;
@@ -217,13 +217,14 @@ static int pci_doe_recv_resp(struct pci_doe_mb *doe_mb, struct pci_doe_task *tas
if (FIELD_GET(PCI_DOE_STATUS_ERROR, val))
return -EIO;
- return min(length, task->response_pl_sz / sizeof(u32)) * sizeof(u32);
+ return min(length, task->response_pl_sz / sizeof(__le32)) * sizeof(__le32);
}
static void signal_task_complete(struct pci_doe_task *task, int rv)
{
task->rv = rv;
task->complete(task);
+ destroy_work_on_stack(&task->work);
}
static void signal_task_abort(struct pci_doe_task *task, int rv)
@@ -317,14 +318,16 @@ static int pci_doe_discovery(struct pci_doe_mb *doe_mb, u8 *index, u16 *vid,
{
u32 request_pl = FIELD_PREP(PCI_DOE_DATA_OBJECT_DISC_REQ_3_INDEX,
*index);
+ __le32 request_pl_le = cpu_to_le32(request_pl);
+ __le32 response_pl_le;
u32 response_pl;
DECLARE_COMPLETION_ONSTACK(c);
struct pci_doe_task task = {
.prot.vid = PCI_VENDOR_ID_PCI_SIG,
.prot.type = PCI_DOE_PROTOCOL_DISCOVERY,
- .request_pl = &request_pl,
+ .request_pl = &request_pl_le,
.request_pl_sz = sizeof(request_pl),
- .response_pl = &response_pl,
+ .response_pl = &response_pl_le,
.response_pl_sz = sizeof(response_pl),
.complete = pci_doe_task_complete,
.private = &c,
@@ -340,6 +343,7 @@ static int pci_doe_discovery(struct pci_doe_mb *doe_mb, u8 *index, u16 *vid,
if (task.rv != sizeof(response_pl))
return -EIO;
+ response_pl = le32_to_cpu(response_pl_le);
*vid = FIELD_GET(PCI_DOE_DATA_OBJECT_DISC_RSP_3_VID, response_pl);
*protocol = FIELD_GET(PCI_DOE_DATA_OBJECT_DISC_RSP_3_PROTOCOL,
response_pl);
@@ -520,6 +524,8 @@ EXPORT_SYMBOL_GPL(pci_doe_supports_prot);
* task->complete will be called when the state machine is done processing this
* task.
*
+ * @task must be allocated on the stack.
+ *
* Excess data will be discarded.
*
* RETURNS: 0 when task has been successfully queued, -ERRNO on error
@@ -533,15 +539,15 @@ int pci_doe_submit_task(struct pci_doe_mb *doe_mb, struct pci_doe_task *task)
* DOE requests must be a whole number of DW and the response needs to
* be big enough for at least 1 DW
*/
- if (task->request_pl_sz % sizeof(u32) ||
- task->response_pl_sz < sizeof(u32))
+ if (task->request_pl_sz % sizeof(__le32) ||
+ task->response_pl_sz < sizeof(__le32))
return -EINVAL;
if (test_bit(PCI_DOE_FLAG_DEAD, &doe_mb->flags))
return -EIO;
task->doe_mb = doe_mb;
- INIT_WORK(&task->work, doe_statemachine_work);
+ INIT_WORK_ONSTACK(&task->work, doe_statemachine_work);
queue_work(doe_mb->work_queue, &task->work);
return 0;
}
diff --git a/drivers/pci/remove.c b/drivers/pci/remove.c
index 0145aef1b930..22d39e12b236 100644
--- a/drivers/pci/remove.c
+++ b/drivers/pci/remove.c
@@ -157,8 +157,6 @@ void pci_remove_root_bus(struct pci_bus *bus)
list_for_each_entry_safe(child, tmp,
&bus->devices, bus_list)
pci_remove_bus_device(child);
- pci_remove_bus(bus);
- host_bridge->bus = NULL;
#ifdef CONFIG_PCI_DOMAINS_GENERIC
/* Release domain_nr if it was dynamically allocated */
@@ -166,6 +164,9 @@ void pci_remove_root_bus(struct pci_bus *bus)
pci_bus_release_domain_nr(bus, host_bridge->dev.parent);
#endif
+ pci_remove_bus(bus);
+ host_bridge->bus = NULL;
+
/* remove the host bridge */
device_del(&host_bridge->dev);
}
diff --git a/drivers/pinctrl/pinctrl-amd.c b/drivers/pinctrl/pinctrl-amd.c
index 609821b756c2..9236a132c7ba 100644
--- a/drivers/pinctrl/pinctrl-amd.c
+++ b/drivers/pinctrl/pinctrl-amd.c
@@ -872,34 +872,32 @@ static const struct pinconf_ops amd_pinconf_ops = {
.pin_config_group_set = amd_pinconf_group_set,
};
-static void amd_gpio_irq_init_pin(struct amd_gpio *gpio_dev, int pin)
+static void amd_gpio_irq_init(struct amd_gpio *gpio_dev)
{
- const struct pin_desc *pd;
+ struct pinctrl_desc *desc = gpio_dev->pctrl->desc;
unsigned long flags;
u32 pin_reg, mask;
+ int i;
mask = BIT(WAKE_CNTRL_OFF_S0I3) | BIT(WAKE_CNTRL_OFF_S3) |
BIT(INTERRUPT_MASK_OFF) | BIT(INTERRUPT_ENABLE_OFF) |
BIT(WAKE_CNTRL_OFF_S4);
- pd = pin_desc_get(gpio_dev->pctrl, pin);
- if (!pd)
- return;
+ for (i = 0; i < desc->npins; i++) {
+ int pin = desc->pins[i].number;
+ const struct pin_desc *pd = pin_desc_get(gpio_dev->pctrl, pin);
- raw_spin_lock_irqsave(&gpio_dev->lock, flags);
- pin_reg = readl(gpio_dev->base + pin * 4);
- pin_reg &= ~mask;
- writel(pin_reg, gpio_dev->base + pin * 4);
- raw_spin_unlock_irqrestore(&gpio_dev->lock, flags);
-}
+ if (!pd)
+ continue;
-static void amd_gpio_irq_init(struct amd_gpio *gpio_dev)
-{
- struct pinctrl_desc *desc = gpio_dev->pctrl->desc;
- int i;
+ raw_spin_lock_irqsave(&gpio_dev->lock, flags);
- for (i = 0; i < desc->npins; i++)
- amd_gpio_irq_init_pin(gpio_dev, i);
+ pin_reg = readl(gpio_dev->base + i * 4);
+ pin_reg &= ~mask;
+ writel(pin_reg, gpio_dev->base + i * 4);
+
+ raw_spin_unlock_irqrestore(&gpio_dev->lock, flags);
+ }
}
#ifdef CONFIG_PM_SLEEP
@@ -952,10 +950,8 @@ static int amd_gpio_resume(struct device *dev)
for (i = 0; i < desc->npins; i++) {
int pin = desc->pins[i].number;
- if (!amd_gpio_should_save(gpio_dev, pin)) {
- amd_gpio_irq_init_pin(gpio_dev, pin);
+ if (!amd_gpio_should_save(gpio_dev, pin))
continue;
- }
raw_spin_lock_irqsave(&gpio_dev->lock, flags);
gpio_dev->saved_regs[i] |= readl(gpio_dev->base + pin * 4) & PIN_IRQ_PENDING;
diff --git a/drivers/scsi/iscsi_tcp.c b/drivers/scsi/iscsi_tcp.c
index c76f82fb8b63..15f452908926 100644
--- a/drivers/scsi/iscsi_tcp.c
+++ b/drivers/scsi/iscsi_tcp.c
@@ -771,13 +771,12 @@ static int iscsi_sw_tcp_conn_set_param(struct iscsi_cls_conn *cls_conn,
iscsi_set_param(cls_conn, param, buf, buflen);
break;
case ISCSI_PARAM_DATADGST_EN:
- iscsi_set_param(cls_conn, param, buf, buflen);
-
mutex_lock(&tcp_sw_conn->sock_lock);
if (!tcp_sw_conn->sock) {
mutex_unlock(&tcp_sw_conn->sock_lock);
return -ENOTCONN;
}
+ iscsi_set_param(cls_conn, param, buf, buflen);
tcp_sw_conn->sendpage = conn->datadgst_en ?
sock_no_sendpage : tcp_sw_conn->sock->ops->sendpage;
mutex_unlock(&tcp_sw_conn->sock_lock);
diff --git a/drivers/scsi/mpi3mr/mpi3mr_fw.c b/drivers/scsi/mpi3mr/mpi3mr_fw.c
index a565817aa56d..d109a4ceb72b 100644
--- a/drivers/scsi/mpi3mr/mpi3mr_fw.c
+++ b/drivers/scsi/mpi3mr/mpi3mr_fw.c
@@ -2526,7 +2526,7 @@ static void mpi3mr_watchdog_work(struct work_struct *work)
mrioc->unrecoverable = 1;
goto schedule_work;
case MPI3_SYSIF_FAULT_CODE_SOFT_RESET_IN_PROGRESS:
- return;
+ goto schedule_work;
case MPI3_SYSIF_FAULT_CODE_CI_ACTIVATION_RESET:
reset_reason = MPI3MR_RESET_FROM_CIACTIV_FAULT;
break;
diff --git a/drivers/scsi/qla2xxx/qla_os.c b/drivers/scsi/qla2xxx/qla_os.c
index bee1b8a82020..d0cdbfe771a9 100644
--- a/drivers/scsi/qla2xxx/qla_os.c
+++ b/drivers/scsi/qla2xxx/qla_os.c
@@ -3617,6 +3617,7 @@ skip_dpc:
probe_failed:
qla_enode_stop(base_vha);
qla_edb_stop(base_vha);
+ vfree(base_vha->scan.l);
if (base_vha->gnl.l) {
dma_free_coherent(&ha->pdev->dev, base_vha->gnl.size,
base_vha->gnl.l, base_vha->gnl.ldma);
diff --git a/drivers/spi/spi.c b/drivers/spi/spi.c
index 44b85a8d47f1..7bc14fb309a6 100644
--- a/drivers/spi/spi.c
+++ b/drivers/spi/spi.c
@@ -4456,6 +4456,11 @@ static int of_spi_notify(struct notifier_block *nb, unsigned long action,
return NOTIFY_OK;
}
+ /*
+ * Clear the flag before adding the device so that fw_devlink
+ * doesn't skip adding consumers to this device.
+ */
+ rd->dn->fwnode.flags &= ~FWNODE_FLAG_NOT_DEVICE;
spi = of_register_spi_device(ctlr, rd->dn);
put_device(&ctlr->dev);
diff --git a/drivers/tty/serial/8250/8250_port.c b/drivers/tty/serial/8250/8250_port.c
index fa43df05342b..3ba9c8b93ae6 100644
--- a/drivers/tty/serial/8250/8250_port.c
+++ b/drivers/tty/serial/8250/8250_port.c
@@ -1903,6 +1903,17 @@ EXPORT_SYMBOL_GPL(serial8250_modem_status);
static bool handle_rx_dma(struct uart_8250_port *up, unsigned int iir)
{
switch (iir & 0x3f) {
+ case UART_IIR_THRI:
+ /*
+ * Postpone DMA or not decision to IIR_RDI or IIR_RX_TIMEOUT
+ * because it's impossible to do an informed decision about
+ * that with IIR_THRI.
+ *
+ * This also fixes one known DMA Rx corruption issue where
+ * DR is asserted but DMA Rx only gets a corrupted zero byte
+ * (too early DR?).
+ */
+ return false;
case UART_IIR_RDI:
if (!up->dma->rx_running)
break;
diff --git a/drivers/tty/serial/fsl_lpuart.c b/drivers/tty/serial/fsl_lpuart.c
index 56e6ba3250cd..074bfed57fc9 100644
--- a/drivers/tty/serial/fsl_lpuart.c
+++ b/drivers/tty/serial/fsl_lpuart.c
@@ -858,11 +858,17 @@ static unsigned int lpuart32_tx_empty(struct uart_port *port)
struct lpuart_port, port);
unsigned long stat = lpuart32_read(port, UARTSTAT);
unsigned long sfifo = lpuart32_read(port, UARTFIFO);
+ unsigned long ctrl = lpuart32_read(port, UARTCTRL);
if (sport->dma_tx_in_progress)
return 0;
- if (stat & UARTSTAT_TC && sfifo & UARTFIFO_TXEMPT)
+ /*
+ * LPUART Transmission Complete Flag may never be set while queuing a break
+ * character, so avoid checking for transmission complete when UARTCTRL_SBK
+ * is asserted.
+ */
+ if ((stat & UARTSTAT_TC && sfifo & UARTFIFO_TXEMPT) || ctrl & UARTCTRL_SBK)
return TIOCSER_TEMT;
return 0;
@@ -2942,7 +2948,7 @@ static bool lpuart_uport_is_active(struct lpuart_port *sport)
tty = tty_port_tty_get(port);
if (tty) {
tty_dev = tty->dev;
- may_wake = device_may_wakeup(tty_dev);
+ may_wake = tty_dev && device_may_wakeup(tty_dev);
tty_kref_put(tty);
}
diff --git a/drivers/tty/serial/sh-sci.c b/drivers/tty/serial/sh-sci.c
index 7bd080720929..caa09a0c48f4 100644
--- a/drivers/tty/serial/sh-sci.c
+++ b/drivers/tty/serial/sh-sci.c
@@ -31,6 +31,7 @@
#include <linux/ioport.h>
#include <linux/ktime.h>
#include <linux/major.h>
+#include <linux/minmax.h>
#include <linux/module.h>
#include <linux/mm.h>
#include <linux/of.h>
@@ -2864,6 +2865,13 @@ static int sci_init_single(struct platform_device *dev,
sci_port->irqs[i] = platform_get_irq(dev, i);
}
+ /*
+ * The fourth interrupt on SCI port is transmit end interrupt, so
+ * shuffle the interrupts.
+ */
+ if (p->type == PORT_SCI)
+ swap(sci_port->irqs[SCIx_BRI_IRQ], sci_port->irqs[SCIx_TEI_IRQ]);
+
/* The SCI generates several interrupts. They can be muxed together or
* connected to different interrupt lines. In the muxed case only one
* interrupt resource is specified as there is only one interrupt ID.
@@ -2929,7 +2937,7 @@ static int sci_init_single(struct platform_device *dev,
port->flags = UPF_FIXED_PORT | UPF_BOOT_AUTOCONF | p->flags;
port->fifosize = sci_port->params->fifosize;
- if (port->type == PORT_SCI) {
+ if (port->type == PORT_SCI && !dev->dev.of_node) {
if (sci_port->reg_size >= 0x20)
port->regshift = 2;
else
diff --git a/drivers/ufs/core/ufshcd.c b/drivers/ufs/core/ufshcd.c
index 37e178a9ac47..70b112038792 100644
--- a/drivers/ufs/core/ufshcd.c
+++ b/drivers/ufs/core/ufshcd.c
@@ -1409,13 +1409,6 @@ static int ufshcd_devfreq_target(struct device *dev,
struct ufs_clk_info *clki;
unsigned long irq_flags;
- /*
- * Skip devfreq if UFS initialization is not finished.
- * Otherwise ufs could be in a inconsistent state.
- */
- if (!smp_load_acquire(&hba->logical_unit_scan_finished))
- return 0;
-
if (!ufshcd_is_clkscaling_supported(hba))
return -EINVAL;
@@ -8399,6 +8392,22 @@ static int ufshcd_add_lus(struct ufs_hba *hba)
if (ret)
goto out;
+ /* Initialize devfreq after UFS device is detected */
+ if (ufshcd_is_clkscaling_supported(hba)) {
+ memcpy(&hba->clk_scaling.saved_pwr_info.info,
+ &hba->pwr_info,
+ sizeof(struct ufs_pa_layer_attr));
+ hba->clk_scaling.saved_pwr_info.is_valid = true;
+ hba->clk_scaling.is_allowed = true;
+
+ ret = ufshcd_devfreq_init(hba);
+ if (ret)
+ goto out;
+
+ hba->clk_scaling.is_enabled = true;
+ ufshcd_init_clk_scaling_sysfs(hba);
+ }
+
ufs_bsg_probe(hba);
ufshpb_init(hba);
scsi_scan_host(hba->host);
@@ -8670,12 +8679,6 @@ out:
if (ret) {
pm_runtime_put_sync(hba->dev);
ufshcd_hba_exit(hba);
- } else {
- /*
- * Make sure that when reader code sees UFS initialization has finished,
- * all initialization steps have really been executed.
- */
- smp_store_release(&hba->logical_unit_scan_finished, true);
}
}
@@ -10316,30 +10319,12 @@ int ufshcd_init(struct ufs_hba *hba, void __iomem *mmio_base, unsigned int irq)
*/
ufshcd_set_ufs_dev_active(hba);
- /* Initialize devfreq */
- if (ufshcd_is_clkscaling_supported(hba)) {
- memcpy(&hba->clk_scaling.saved_pwr_info.info,
- &hba->pwr_info,
- sizeof(struct ufs_pa_layer_attr));
- hba->clk_scaling.saved_pwr_info.is_valid = true;
- hba->clk_scaling.is_allowed = true;
-
- err = ufshcd_devfreq_init(hba);
- if (err)
- goto rpm_put_sync;
-
- hba->clk_scaling.is_enabled = true;
- ufshcd_init_clk_scaling_sysfs(hba);
- }
-
async_schedule(ufshcd_async_scan, hba);
ufs_sysfs_add_nodes(hba->dev);
device_enable_async_suspend(dev);
return 0;
-rpm_put_sync:
- pm_runtime_put_sync(dev);
free_tmf_queue:
blk_mq_destroy_queue(hba->tmf_queue);
blk_put_queue(hba->tmf_queue);
diff --git a/drivers/usb/cdns3/cdnsp-ep0.c b/drivers/usb/cdns3/cdnsp-ep0.c
index d63d5d92f255..f317d3c84781 100644
--- a/drivers/usb/cdns3/cdnsp-ep0.c
+++ b/drivers/usb/cdns3/cdnsp-ep0.c
@@ -414,7 +414,7 @@ static int cdnsp_ep0_std_request(struct cdnsp_device *pdev,
void cdnsp_setup_analyze(struct cdnsp_device *pdev)
{
struct usb_ctrlrequest *ctrl = &pdev->setup;
- int ret = 0;
+ int ret = -EINVAL;
u16 len;
trace_cdnsp_ctrl_req(ctrl);
@@ -424,7 +424,6 @@ void cdnsp_setup_analyze(struct cdnsp_device *pdev)
if (pdev->gadget.state == USB_STATE_NOTATTACHED) {
dev_err(pdev->dev, "ERR: Setup detected in unattached state\n");
- ret = -EINVAL;
goto out;
}
diff --git a/drivers/usb/dwc3/dwc3-pci.c b/drivers/usb/dwc3/dwc3-pci.c
index a23ddbb81979..560793545362 100644
--- a/drivers/usb/dwc3/dwc3-pci.c
+++ b/drivers/usb/dwc3/dwc3-pci.c
@@ -49,6 +49,7 @@
#define PCI_DEVICE_ID_INTEL_RPLS 0x7a61
#define PCI_DEVICE_ID_INTEL_MTLM 0x7eb1
#define PCI_DEVICE_ID_INTEL_MTLP 0x7ec1
+#define PCI_DEVICE_ID_INTEL_MTLS 0x7f6f
#define PCI_DEVICE_ID_INTEL_MTL 0x7e7e
#define PCI_DEVICE_ID_INTEL_TGL 0x9a15
#define PCI_DEVICE_ID_AMD_MR 0x163a
@@ -474,6 +475,9 @@ static const struct pci_device_id dwc3_pci_id_table[] = {
{ PCI_VDEVICE(INTEL, PCI_DEVICE_ID_INTEL_MTLP),
(kernel_ulong_t) &dwc3_pci_intel_swnode, },
+ { PCI_VDEVICE(INTEL, PCI_DEVICE_ID_INTEL_MTLS),
+ (kernel_ulong_t) &dwc3_pci_intel_swnode, },
+
{ PCI_VDEVICE(INTEL, PCI_DEVICE_ID_INTEL_MTL),
(kernel_ulong_t) &dwc3_pci_intel_swnode, },
diff --git a/drivers/usb/gadget/function/f_fs.c b/drivers/usb/gadget/function/f_fs.c
index ddfc537c7526..56cdfb2e4211 100644
--- a/drivers/usb/gadget/function/f_fs.c
+++ b/drivers/usb/gadget/function/f_fs.c
@@ -1251,7 +1251,7 @@ static ssize_t ffs_epfile_read_iter(struct kiocb *kiocb, struct iov_iter *to)
p->kiocb = kiocb;
if (p->aio) {
p->to_free = dup_iter(&p->data, to, GFP_KERNEL);
- if (!p->to_free) {
+ if (!iter_is_ubuf(&p->data) && !p->to_free) {
kfree(p);
return -ENOMEM;
}
diff --git a/drivers/usb/gadget/legacy/inode.c b/drivers/usb/gadget/legacy/inode.c
index d605bc2e7e8f..28249d0bf062 100644
--- a/drivers/usb/gadget/legacy/inode.c
+++ b/drivers/usb/gadget/legacy/inode.c
@@ -614,7 +614,7 @@ ep_read_iter(struct kiocb *iocb, struct iov_iter *to)
if (!priv)
goto fail;
priv->to_free = dup_iter(&priv->to, to, GFP_KERNEL);
- if (!priv->to_free) {
+ if (!iter_is_ubuf(&priv->to) && !priv->to_free) {
kfree(priv);
goto fail;
}
diff --git a/drivers/usb/host/xhci-pci.c b/drivers/usb/host/xhci-pci.c
index fb988e4ea924..6db07ca419c3 100644
--- a/drivers/usb/host/xhci-pci.c
+++ b/drivers/usb/host/xhci-pci.c
@@ -771,12 +771,11 @@ static struct pci_driver xhci_pci_driver = {
/* suspend and resume implemented later */
.shutdown = usb_hcd_pci_shutdown,
- .driver = {
#ifdef CONFIG_PM
- .pm = &usb_hcd_pci_pm_ops,
-#endif
- .probe_type = PROBE_PREFER_ASYNCHRONOUS,
+ .driver = {
+ .pm = &usb_hcd_pci_pm_ops
},
+#endif
};
static int __init xhci_pci_init(void)
diff --git a/drivers/usb/host/xhci-tegra.c b/drivers/usb/host/xhci-tegra.c
index 1ff22f675930..a88c39e525c2 100644
--- a/drivers/usb/host/xhci-tegra.c
+++ b/drivers/usb/host/xhci-tegra.c
@@ -1360,6 +1360,9 @@ static void tegra_xhci_id_work(struct work_struct *work)
mutex_unlock(&tegra->lock);
+ tegra->otg_usb3_port = tegra_xusb_padctl_get_usb3_companion(tegra->padctl,
+ tegra->otg_usb2_port);
+
if (tegra->host_mode) {
/* switch to host mode */
if (tegra->otg_usb3_port >= 0) {
@@ -1474,9 +1477,6 @@ static int tegra_xhci_id_notify(struct notifier_block *nb,
}
tegra->otg_usb2_port = tegra_xusb_get_usb2_port(tegra, usbphy);
- tegra->otg_usb3_port = tegra_xusb_padctl_get_usb3_companion(
- tegra->padctl,
- tegra->otg_usb2_port);
tegra->host_mode = (usbphy->last_event == USB_EVENT_ID) ? true : false;
diff --git a/drivers/usb/host/xhci.c b/drivers/usb/host/xhci.c
index 6183ce8574b1..6307bae9cddf 100644
--- a/drivers/usb/host/xhci.c
+++ b/drivers/usb/host/xhci.c
@@ -9,6 +9,7 @@
*/
#include <linux/pci.h>
+#include <linux/iommu.h>
#include <linux/iopoll.h>
#include <linux/irq.h>
#include <linux/log2.h>
@@ -228,6 +229,7 @@ int xhci_reset(struct xhci_hcd *xhci, u64 timeout_us)
static void xhci_zero_64b_regs(struct xhci_hcd *xhci)
{
struct device *dev = xhci_to_hcd(xhci)->self.sysdev;
+ struct iommu_domain *domain;
int err, i;
u64 val;
u32 intrs;
@@ -246,7 +248,9 @@ static void xhci_zero_64b_regs(struct xhci_hcd *xhci)
* an iommu. Doing anything when there is no iommu is definitely
* unsafe...
*/
- if (!(xhci->quirks & XHCI_ZERO_64B_REGS) || !device_iommu_mapped(dev))
+ domain = iommu_get_domain_for_dev(dev);
+ if (!(xhci->quirks & XHCI_ZERO_64B_REGS) || !domain ||
+ domain->type == IOMMU_DOMAIN_IDENTITY)
return;
xhci_info(xhci, "Zeroing 64bit base registers, expecting fault\n");
@@ -4438,6 +4442,7 @@ static int __maybe_unused xhci_change_max_exit_latency(struct xhci_hcd *xhci,
if (!virt_dev || max_exit_latency == virt_dev->current_mel) {
spin_unlock_irqrestore(&xhci->lock, flags);
+ xhci_free_command(xhci, command);
return 0;
}
diff --git a/drivers/usb/serial/cp210x.c b/drivers/usb/serial/cp210x.c
index 832ad592b7ef..cdea1bff3b70 100644
--- a/drivers/usb/serial/cp210x.c
+++ b/drivers/usb/serial/cp210x.c
@@ -120,6 +120,7 @@ static const struct usb_device_id id_table[] = {
{ USB_DEVICE(0x10C4, 0x826B) }, /* Cygnal Integrated Products, Inc., Fasttrax GPS demonstration module */
{ USB_DEVICE(0x10C4, 0x8281) }, /* Nanotec Plug & Drive */
{ USB_DEVICE(0x10C4, 0x8293) }, /* Telegesis ETRX2USB */
+ { USB_DEVICE(0x10C4, 0x82AA) }, /* Silicon Labs IFS-USB-DATACABLE used with Quint UPS */
{ USB_DEVICE(0x10C4, 0x82EF) }, /* CESINEL FALCO 6105 AC Power Supply */
{ USB_DEVICE(0x10C4, 0x82F1) }, /* CESINEL MEDCAL EFD Earth Fault Detector */
{ USB_DEVICE(0x10C4, 0x82F2) }, /* CESINEL MEDCAL ST Network Analyzer */
diff --git a/drivers/usb/serial/option.c b/drivers/usb/serial/option.c
index e6d8d9b35ad0..f31cc3c76329 100644
--- a/drivers/usb/serial/option.c
+++ b/drivers/usb/serial/option.c
@@ -1198,6 +1198,8 @@ static const struct usb_device_id option_ids[] = {
{ USB_DEVICE_AND_INTERFACE_INFO(QUECTEL_VENDOR_ID, QUECTEL_PRODUCT_RM520N, 0xff, 0xff, 0x30) },
{ USB_DEVICE_AND_INTERFACE_INFO(QUECTEL_VENDOR_ID, QUECTEL_PRODUCT_RM520N, 0xff, 0, 0x40) },
{ USB_DEVICE_AND_INTERFACE_INFO(QUECTEL_VENDOR_ID, QUECTEL_PRODUCT_RM520N, 0xff, 0, 0) },
+ { USB_DEVICE_AND_INTERFACE_INFO(QUECTEL_VENDOR_ID, 0x0900, 0xff, 0, 0), /* RM500U-CN */
+ .driver_info = ZLP },
{ USB_DEVICE_AND_INTERFACE_INFO(QUECTEL_VENDOR_ID, QUECTEL_PRODUCT_EC200U, 0xff, 0, 0) },
{ USB_DEVICE_AND_INTERFACE_INFO(QUECTEL_VENDOR_ID, QUECTEL_PRODUCT_EC200S_CN, 0xff, 0, 0) },
{ USB_DEVICE_AND_INTERFACE_INFO(QUECTEL_VENDOR_ID, QUECTEL_PRODUCT_EC200T, 0xff, 0, 0) },
@@ -1300,6 +1302,14 @@ static const struct usb_device_id option_ids[] = {
.driver_info = NCTRL(0) | RSVD(1) },
{ USB_DEVICE_INTERFACE_CLASS(TELIT_VENDOR_ID, 0x1075, 0xff), /* Telit FN990 (PCIe) */
.driver_info = RSVD(0) },
+ { USB_DEVICE_INTERFACE_CLASS(TELIT_VENDOR_ID, 0x1080, 0xff), /* Telit FE990 (rmnet) */
+ .driver_info = NCTRL(0) | RSVD(1) | RSVD(2) },
+ { USB_DEVICE_INTERFACE_CLASS(TELIT_VENDOR_ID, 0x1081, 0xff), /* Telit FE990 (MBIM) */
+ .driver_info = NCTRL(0) | RSVD(1) },
+ { USB_DEVICE_INTERFACE_CLASS(TELIT_VENDOR_ID, 0x1082, 0xff), /* Telit FE990 (RNDIS) */
+ .driver_info = NCTRL(2) | RSVD(3) },
+ { USB_DEVICE_INTERFACE_CLASS(TELIT_VENDOR_ID, 0x1083, 0xff), /* Telit FE990 (ECM) */
+ .driver_info = NCTRL(0) | RSVD(1) },
{ USB_DEVICE(TELIT_VENDOR_ID, TELIT_PRODUCT_ME910),
.driver_info = NCTRL(0) | RSVD(1) | RSVD(3) },
{ USB_DEVICE(TELIT_VENDOR_ID, TELIT_PRODUCT_ME910_DUAL_MODEM),
diff --git a/drivers/usb/typec/altmodes/displayport.c b/drivers/usb/typec/altmodes/displayport.c
index 662cd043b50e..8f3e884222ad 100644
--- a/drivers/usb/typec/altmodes/displayport.c
+++ b/drivers/usb/typec/altmodes/displayport.c
@@ -112,8 +112,12 @@ static int dp_altmode_configure(struct dp_altmode *dp, u8 con)
if (dp->data.status & DP_STATUS_PREFER_MULTI_FUNC &&
pin_assign & DP_PIN_ASSIGN_MULTI_FUNC_MASK)
pin_assign &= DP_PIN_ASSIGN_MULTI_FUNC_MASK;
- else if (pin_assign & DP_PIN_ASSIGN_DP_ONLY_MASK)
+ else if (pin_assign & DP_PIN_ASSIGN_DP_ONLY_MASK) {
pin_assign &= DP_PIN_ASSIGN_DP_ONLY_MASK;
+ /* Default to pin assign C if available */
+ if (pin_assign & BIT(DP_PIN_ASSIGN_C))
+ pin_assign = BIT(DP_PIN_ASSIGN_C);
+ }
if (!pin_assign)
return -EINVAL;
diff --git a/drivers/vdpa/mlx5/net/mlx5_vnet.c b/drivers/vdpa/mlx5/net/mlx5_vnet.c
index 520646ae7fa0..195963b82b63 100644
--- a/drivers/vdpa/mlx5/net/mlx5_vnet.c
+++ b/drivers/vdpa/mlx5/net/mlx5_vnet.c
@@ -2467,10 +2467,11 @@ static int setup_driver(struct mlx5_vdpa_dev *mvdev)
err = 0;
goto out;
}
+ mlx5_vdpa_add_debugfs(ndev);
err = setup_virtqueues(mvdev);
if (err) {
mlx5_vdpa_warn(mvdev, "setup_virtqueues\n");
- goto out;
+ goto err_setup;
}
err = create_rqt(ndev);
@@ -2500,6 +2501,8 @@ err_tir:
destroy_rqt(ndev);
err_rqt:
teardown_virtqueues(ndev);
+err_setup:
+ mlx5_vdpa_remove_debugfs(ndev->debugfs);
out:
return err;
}
@@ -2513,6 +2516,8 @@ static void teardown_driver(struct mlx5_vdpa_net *ndev)
if (!ndev->setup)
return;
+ mlx5_vdpa_remove_debugfs(ndev->debugfs);
+ ndev->debugfs = NULL;
teardown_steering(ndev);
destroy_tir(ndev);
destroy_rqt(ndev);
@@ -3261,7 +3266,6 @@ static int mlx5_vdpa_dev_add(struct vdpa_mgmt_dev *v_mdev, const char *name,
if (err)
goto err_reg;
- mlx5_vdpa_add_debugfs(ndev);
mgtdev->ndev = ndev;
return 0;
diff --git a/drivers/vdpa/vdpa_sim/vdpa_sim_net.c b/drivers/vdpa/vdpa_sim/vdpa_sim_net.c
index 862f405362de..dfe2ce341803 100644
--- a/drivers/vdpa/vdpa_sim/vdpa_sim_net.c
+++ b/drivers/vdpa/vdpa_sim/vdpa_sim_net.c
@@ -466,16 +466,21 @@ static int vdpasim_net_dev_add(struct vdpa_mgmt_dev *mdev, const char *name,
vdpasim_net_setup_config(simdev, config);
- ret = _vdpa_register_device(&simdev->vdpa, VDPASIM_NET_VQ_NUM);
- if (ret)
- goto reg_err;
-
net = sim_to_net(simdev);
u64_stats_init(&net->tx_stats.syncp);
u64_stats_init(&net->rx_stats.syncp);
u64_stats_init(&net->cq_stats.syncp);
+ /*
+ * Initialization must be completed before this call, since it can
+ * connect the device to the vDPA bus, so requests can arrive after
+ * this call.
+ */
+ ret = _vdpa_register_device(&simdev->vdpa, VDPASIM_NET_VQ_NUM);
+ if (ret)
+ goto reg_err;
+
return 0;
reg_err:
diff --git a/drivers/vhost/scsi.c b/drivers/vhost/scsi.c
index b244e7c0f514..32d0be968103 100644
--- a/drivers/vhost/scsi.c
+++ b/drivers/vhost/scsi.c
@@ -125,7 +125,6 @@ struct vhost_scsi_tpg {
struct se_portal_group se_tpg;
/* Pointer back to vhost_scsi, protected by tv_tpg_mutex */
struct vhost_scsi *vhost_scsi;
- struct list_head tmf_queue;
};
struct vhost_scsi_tport {
@@ -206,10 +205,8 @@ struct vhost_scsi {
struct vhost_scsi_tmf {
struct vhost_work vwork;
- struct vhost_scsi_tpg *tpg;
struct vhost_scsi *vhost;
struct vhost_scsi_virtqueue *svq;
- struct list_head queue_entry;
struct se_cmd se_cmd;
u8 scsi_resp;
@@ -352,12 +349,9 @@ static void vhost_scsi_release_cmd_res(struct se_cmd *se_cmd)
static void vhost_scsi_release_tmf_res(struct vhost_scsi_tmf *tmf)
{
- struct vhost_scsi_tpg *tpg = tmf->tpg;
struct vhost_scsi_inflight *inflight = tmf->inflight;
- mutex_lock(&tpg->tv_tpg_mutex);
- list_add_tail(&tpg->tmf_queue, &tmf->queue_entry);
- mutex_unlock(&tpg->tv_tpg_mutex);
+ kfree(tmf);
vhost_scsi_put_inflight(inflight);
}
@@ -1194,19 +1188,11 @@ vhost_scsi_handle_tmf(struct vhost_scsi *vs, struct vhost_scsi_tpg *tpg,
goto send_reject;
}
- mutex_lock(&tpg->tv_tpg_mutex);
- if (list_empty(&tpg->tmf_queue)) {
- pr_err("Missing reserve TMF. Could not handle LUN RESET.\n");
- mutex_unlock(&tpg->tv_tpg_mutex);
+ tmf = kzalloc(sizeof(*tmf), GFP_KERNEL);
+ if (!tmf)
goto send_reject;
- }
-
- tmf = list_first_entry(&tpg->tmf_queue, struct vhost_scsi_tmf,
- queue_entry);
- list_del_init(&tmf->queue_entry);
- mutex_unlock(&tpg->tv_tpg_mutex);
- tmf->tpg = tpg;
+ vhost_work_init(&tmf->vwork, vhost_scsi_tmf_resp_work);
tmf->vhost = vs;
tmf->svq = svq;
tmf->resp_iov = vq->iov[vc->out];
@@ -1658,7 +1644,10 @@ undepend:
for (i = 0; i < VHOST_SCSI_MAX_TARGET; i++) {
tpg = vs_tpg[i];
if (tpg) {
+ mutex_lock(&tpg->tv_tpg_mutex);
+ tpg->vhost_scsi = NULL;
tpg->tv_tpg_vhost_count--;
+ mutex_unlock(&tpg->tv_tpg_mutex);
target_undepend_item(&tpg->se_tpg.tpg_group.cg_item);
}
}
@@ -2032,19 +2021,11 @@ static int vhost_scsi_port_link(struct se_portal_group *se_tpg,
{
struct vhost_scsi_tpg *tpg = container_of(se_tpg,
struct vhost_scsi_tpg, se_tpg);
- struct vhost_scsi_tmf *tmf;
-
- tmf = kzalloc(sizeof(*tmf), GFP_KERNEL);
- if (!tmf)
- return -ENOMEM;
- INIT_LIST_HEAD(&tmf->queue_entry);
- vhost_work_init(&tmf->vwork, vhost_scsi_tmf_resp_work);
mutex_lock(&vhost_scsi_mutex);
mutex_lock(&tpg->tv_tpg_mutex);
tpg->tv_tpg_port_count++;
- list_add_tail(&tmf->queue_entry, &tpg->tmf_queue);
mutex_unlock(&tpg->tv_tpg_mutex);
vhost_scsi_hotplug(tpg, lun);
@@ -2059,16 +2040,11 @@ static void vhost_scsi_port_unlink(struct se_portal_group *se_tpg,
{
struct vhost_scsi_tpg *tpg = container_of(se_tpg,
struct vhost_scsi_tpg, se_tpg);
- struct vhost_scsi_tmf *tmf;
mutex_lock(&vhost_scsi_mutex);
mutex_lock(&tpg->tv_tpg_mutex);
tpg->tv_tpg_port_count--;
- tmf = list_first_entry(&tpg->tmf_queue, struct vhost_scsi_tmf,
- queue_entry);
- list_del(&tmf->queue_entry);
- kfree(tmf);
mutex_unlock(&tpg->tv_tpg_mutex);
vhost_scsi_hotunplug(tpg, lun);
@@ -2329,7 +2305,6 @@ vhost_scsi_make_tpg(struct se_wwn *wwn, const char *name)
}
mutex_init(&tpg->tv_tpg_mutex);
INIT_LIST_HEAD(&tpg->tv_tpg_list);
- INIT_LIST_HEAD(&tpg->tmf_queue);
tpg->tport = tport;
tpg->tport_tpgt = tpgt;
diff --git a/drivers/video/fbdev/core/fbcon.c b/drivers/video/fbdev/core/fbcon.c
index 0a2c47df01f4..eb565a10e5cd 100644
--- a/drivers/video/fbdev/core/fbcon.c
+++ b/drivers/video/fbdev/core/fbcon.c
@@ -823,7 +823,7 @@ static int set_con2fb_map(int unit, int newidx, int user)
int oldidx = con2fb_map[unit];
struct fb_info *info = fbcon_registered_fb[newidx];
struct fb_info *oldinfo = NULL;
- int found, err = 0, show_logo;
+ int err = 0, show_logo;
WARN_CONSOLE_UNLOCKED();
@@ -841,26 +841,26 @@ static int set_con2fb_map(int unit, int newidx, int user)
if (oldidx != -1)
oldinfo = fbcon_registered_fb[oldidx];
- found = search_fb_in_map(newidx);
-
- if (!err && !found) {
+ if (!search_fb_in_map(newidx)) {
err = con2fb_acquire_newinfo(vc, info, unit);
- if (!err)
- con2fb_map[unit] = newidx;
+ if (err)
+ return err;
+
+ fbcon_add_cursor_work(info);
}
+ con2fb_map[unit] = newidx;
+
/*
* If old fb is not mapped to any of the consoles,
* fbcon should release it.
*/
- if (!err && oldinfo && !search_fb_in_map(oldidx))
+ if (oldinfo && !search_fb_in_map(oldidx))
con2fb_release_oldinfo(vc, oldinfo, info);
show_logo = (fg_console == 0 && !user &&
logo_shown != FBCON_LOGO_DONTSHOW);
- if (!found)
- fbcon_add_cursor_work(info);
con2fb_map_boot[unit] = newidx;
con2fb_init_display(vc, info, unit, show_logo);
diff --git a/drivers/video/fbdev/core/fbmem.c b/drivers/video/fbdev/core/fbmem.c
index 875541ff185b..3fd95a79e4c3 100644
--- a/drivers/video/fbdev/core/fbmem.c
+++ b/drivers/video/fbdev/core/fbmem.c
@@ -1116,6 +1116,8 @@ static long do_fb_ioctl(struct fb_info *info, unsigned int cmd,
case FBIOPUT_VSCREENINFO:
if (copy_from_user(&var, argp, sizeof(var)))
return -EFAULT;
+ /* only for kernel-internal use */
+ var.activate &= ~FB_ACTIVATE_KD_TEXT;
console_lock();
lock_fb_info(info);
ret = fbcon_modechange_possible(info, &var);
diff --git a/fs/9p/xattr.c b/fs/9p/xattr.c
index 50f7f3f6b55e..1974a38bce20 100644
--- a/fs/9p/xattr.c
+++ b/fs/9p/xattr.c
@@ -35,10 +35,12 @@ ssize_t v9fs_fid_xattr_get(struct p9_fid *fid, const char *name,
return retval;
}
if (attr_size > buffer_size) {
- if (!buffer_size) /* request to get the attr_size */
- retval = attr_size;
- else
+ if (buffer_size)
retval = -ERANGE;
+ else if (attr_size > SSIZE_MAX)
+ retval = -EOVERFLOW;
+ else /* request to get the attr_size */
+ retval = attr_size;
} else {
iov_iter_truncate(&to, attr_size);
retval = p9_client_read(attr_fid, 0, &to, &err);
diff --git a/fs/btrfs/disk-io.c b/fs/btrfs/disk-io.c
index b53f0e30ce2b..9e1596bb208d 100644
--- a/fs/btrfs/disk-io.c
+++ b/fs/btrfs/disk-io.c
@@ -2250,6 +2250,20 @@ static int btrfs_init_csum_hash(struct btrfs_fs_info *fs_info, u16 csum_type)
fs_info->csum_shash = csum_shash;
+ /*
+ * Check if the checksum implementation is a fast accelerated one.
+ * As-is this is a bit of a hack and should be replaced once the csum
+ * implementations provide that information themselves.
+ */
+ switch (csum_type) {
+ case BTRFS_CSUM_TYPE_CRC32:
+ if (!strstr(crypto_shash_driver_name(csum_shash), "generic"))
+ set_bit(BTRFS_FS_CSUM_IMPL_FAST, &fs_info->flags);
+ break;
+ default:
+ break;
+ }
+
btrfs_info(fs_info, "using %s (%s) checksum algorithm",
btrfs_super_csum_name(csum_type),
crypto_shash_driver_name(csum_shash));
diff --git a/fs/btrfs/super.c b/fs/btrfs/super.c
index 581845bc206a..366fb4cde145 100644
--- a/fs/btrfs/super.c
+++ b/fs/btrfs/super.c
@@ -1516,8 +1516,6 @@ static struct dentry *btrfs_mount_root(struct file_system_type *fs_type,
shrinker_debugfs_rename(&s->s_shrink, "sb-%s:%s", fs_type->name,
s->s_id);
btrfs_sb(s)->bdev_holder = fs_type;
- if (!strstr(crc32c_impl(), "generic"))
- set_bit(BTRFS_FS_CSUM_IMPL_FAST, &fs_info->flags);
error = btrfs_fill_super(s, fs_devices, data);
}
if (!error)
@@ -1631,6 +1629,8 @@ static void btrfs_resize_thread_pool(struct btrfs_fs_info *fs_info,
btrfs_workqueue_set_max(fs_info->hipri_workers, new_pool_size);
btrfs_workqueue_set_max(fs_info->delalloc_workers, new_pool_size);
btrfs_workqueue_set_max(fs_info->caching_workers, new_pool_size);
+ workqueue_set_max_active(fs_info->endio_workers, new_pool_size);
+ workqueue_set_max_active(fs_info->endio_meta_workers, new_pool_size);
btrfs_workqueue_set_max(fs_info->endio_write_workers, new_pool_size);
btrfs_workqueue_set_max(fs_info->endio_freespace_worker, new_pool_size);
btrfs_workqueue_set_max(fs_info->delayed_workers, new_pool_size);
diff --git a/fs/cifs/cifssmb.c b/fs/cifs/cifssmb.c
index 0d30b17494e4..9d963caec35c 100644
--- a/fs/cifs/cifssmb.c
+++ b/fs/cifs/cifssmb.c
@@ -120,7 +120,7 @@ again:
spin_lock(&server->srv_lock);
if (server->tcpStatus == CifsNeedReconnect) {
spin_unlock(&server->srv_lock);
- mutex_lock(&ses->session_mutex);
+ mutex_unlock(&ses->session_mutex);
if (tcon->retry)
goto again;
diff --git a/fs/cifs/fs_context.c b/fs/cifs/fs_context.c
index 6d13f8207e96..ace11a1a7c8a 100644
--- a/fs/cifs/fs_context.c
+++ b/fs/cifs/fs_context.c
@@ -441,13 +441,14 @@ out:
* but there are some bugs that prevent rename from working if there are
* multiple delimiters.
*
- * Returns a sanitized duplicate of @path. The caller is responsible for
- * cleaning up the original.
+ * Returns a sanitized duplicate of @path. @gfp indicates the GFP_* flags
+ * for kstrdup.
+ * The caller is responsible for freeing the original.
*/
#define IS_DELIM(c) ((c) == '/' || (c) == '\\')
-static char *sanitize_path(char *path)
+char *cifs_sanitize_prepath(char *prepath, gfp_t gfp)
{
- char *cursor1 = path, *cursor2 = path;
+ char *cursor1 = prepath, *cursor2 = prepath;
/* skip all prepended delimiters */
while (IS_DELIM(*cursor1))
@@ -469,7 +470,7 @@ static char *sanitize_path(char *path)
cursor2--;
*(cursor2) = '\0';
- return kstrdup(path, GFP_KERNEL);
+ return kstrdup(prepath, gfp);
}
/*
@@ -531,7 +532,7 @@ smb3_parse_devname(const char *devname, struct smb3_fs_context *ctx)
if (!*pos)
return 0;
- ctx->prepath = sanitize_path(pos);
+ ctx->prepath = cifs_sanitize_prepath(pos, GFP_KERNEL);
if (!ctx->prepath)
return -ENOMEM;
diff --git a/fs/cifs/fs_context.h b/fs/cifs/fs_context.h
index 3de00e7127ec..f4eaf8558902 100644
--- a/fs/cifs/fs_context.h
+++ b/fs/cifs/fs_context.h
@@ -287,4 +287,7 @@ extern void smb3_update_mnt_flags(struct cifs_sb_info *cifs_sb);
*/
#define SMB3_MAX_DCLOSETIMEO (1 << 30)
#define SMB3_DEF_DCLOSETIMEO (1 * HZ) /* even 1 sec enough to help eg open/write/close/open/read */
+
+extern char *cifs_sanitize_prepath(char *prepath, gfp_t gfp);
+
#endif
diff --git a/fs/cifs/misc.c b/fs/cifs/misc.c
index b44fb51968bf..7f085ed2d866 100644
--- a/fs/cifs/misc.c
+++ b/fs/cifs/misc.c
@@ -1195,7 +1195,7 @@ int cifs_update_super_prepath(struct cifs_sb_info *cifs_sb, char *prefix)
kfree(cifs_sb->prepath);
if (prefix && *prefix) {
- cifs_sb->prepath = kstrdup(prefix, GFP_ATOMIC);
+ cifs_sb->prepath = cifs_sanitize_prepath(prefix, GFP_ATOMIC);
if (!cifs_sb->prepath)
return -ENOMEM;
diff --git a/fs/dax.c b/fs/dax.c
index 3e457a16c7d1..2ababb89918d 100644
--- a/fs/dax.c
+++ b/fs/dax.c
@@ -781,6 +781,33 @@ out:
return ret;
}
+static int __dax_clear_dirty_range(struct address_space *mapping,
+ pgoff_t start, pgoff_t end)
+{
+ XA_STATE(xas, &mapping->i_pages, start);
+ unsigned int scanned = 0;
+ void *entry;
+
+ xas_lock_irq(&xas);
+ xas_for_each(&xas, entry, end) {
+ entry = get_unlocked_entry(&xas, 0);
+ xas_clear_mark(&xas, PAGECACHE_TAG_DIRTY);
+ xas_clear_mark(&xas, PAGECACHE_TAG_TOWRITE);
+ put_unlocked_entry(&xas, entry, WAKE_NEXT);
+
+ if (++scanned % XA_CHECK_SCHED)
+ continue;
+
+ xas_pause(&xas);
+ xas_unlock_irq(&xas);
+ cond_resched();
+ xas_lock_irq(&xas);
+ }
+ xas_unlock_irq(&xas);
+
+ return 0;
+}
+
/*
* Delete DAX entry at @index from @mapping. Wait for it
* to be unlocked before deleting it.
@@ -1258,15 +1285,20 @@ static s64 dax_unshare_iter(struct iomap_iter *iter)
/* don't bother with blocks that are not shared to start with */
if (!(iomap->flags & IOMAP_F_SHARED))
return length;
- /* don't bother with holes or unwritten extents */
- if (srcmap->type == IOMAP_HOLE || srcmap->type == IOMAP_UNWRITTEN)
- return length;
id = dax_read_lock();
ret = dax_iomap_direct_access(iomap, pos, length, &daddr, NULL);
if (ret < 0)
goto out_unlock;
+ /* zero the distance if srcmap is HOLE or UNWRITTEN */
+ if (srcmap->flags & IOMAP_F_SHARED || srcmap->type == IOMAP_UNWRITTEN) {
+ memset(daddr, 0, length);
+ dax_flush(iomap->dax_dev, daddr, length);
+ ret = length;
+ goto out_unlock;
+ }
+
ret = dax_iomap_direct_access(srcmap, pos, length, &saddr, NULL);
if (ret < 0)
goto out_unlock;
@@ -1435,6 +1467,16 @@ static loff_t dax_iomap_iter(const struct iomap_iter *iomi,
* written by write(2) is visible in mmap.
*/
if (iomap->flags & IOMAP_F_NEW || cow) {
+ /*
+ * Filesystem allows CoW on non-shared extents. The src extents
+ * may have been mmapped with dirty mark before. To be able to
+ * invalidate its dax entries, we need to clear the dirty mark
+ * in advance.
+ */
+ if (cow)
+ __dax_clear_dirty_range(iomi->inode->i_mapping,
+ pos >> PAGE_SHIFT,
+ (end - 1) >> PAGE_SHIFT);
invalidate_inode_pages2_range(iomi->inode->i_mapping,
pos >> PAGE_SHIFT,
(end - 1) >> PAGE_SHIFT);
@@ -2022,8 +2064,8 @@ int dax_dedupe_file_range_compare(struct inode *src, loff_t srcoff,
while ((ret = iomap_iter(&src_iter, ops)) > 0 &&
(ret = iomap_iter(&dst_iter, ops)) > 0) {
- compared = dax_range_compare_iter(&src_iter, &dst_iter, len,
- same);
+ compared = dax_range_compare_iter(&src_iter, &dst_iter,
+ min(src_iter.len, dst_iter.len), same);
if (compared < 0)
return ret;
src_iter.processed = dst_iter.processed = compared;
diff --git a/fs/ksmbd/connection.c b/fs/ksmbd/connection.c
index 115a67d2cf78..365ac32af505 100644
--- a/fs/ksmbd/connection.c
+++ b/fs/ksmbd/connection.c
@@ -112,10 +112,8 @@ void ksmbd_conn_enqueue_request(struct ksmbd_work *work)
struct ksmbd_conn *conn = work->conn;
struct list_head *requests_queue = NULL;
- if (conn->ops->get_cmd_val(work) != SMB2_CANCEL_HE) {
+ if (conn->ops->get_cmd_val(work) != SMB2_CANCEL_HE)
requests_queue = &conn->requests;
- work->synchronous = true;
- }
if (requests_queue) {
atomic_inc(&conn->req_running);
@@ -136,14 +134,14 @@ int ksmbd_conn_try_dequeue_request(struct ksmbd_work *work)
if (!work->multiRsp)
atomic_dec(&conn->req_running);
- spin_lock(&conn->request_lock);
if (!work->multiRsp) {
+ spin_lock(&conn->request_lock);
list_del_init(&work->request_entry);
- if (!work->synchronous)
- list_del_init(&work->async_request_entry);
+ spin_unlock(&conn->request_lock);
+ if (work->asynchronous)
+ release_async_work(work);
ret = 0;
}
- spin_unlock(&conn->request_lock);
wake_up_all(&conn->req_running_q);
return ret;
@@ -326,10 +324,7 @@ int ksmbd_conn_handler_loop(void *p)
/* 4 for rfc1002 length field */
size = pdu_size + 4;
- conn->request_buf = kvmalloc(size,
- GFP_KERNEL |
- __GFP_NOWARN |
- __GFP_NORETRY);
+ conn->request_buf = kvmalloc(size, GFP_KERNEL);
if (!conn->request_buf)
break;
diff --git a/fs/ksmbd/ksmbd_work.h b/fs/ksmbd/ksmbd_work.h
index 3234f2cf6327..f8ae6144c0ae 100644
--- a/fs/ksmbd/ksmbd_work.h
+++ b/fs/ksmbd/ksmbd_work.h
@@ -68,7 +68,7 @@ struct ksmbd_work {
/* Request is encrypted */
bool encrypted:1;
/* Is this SYNC or ASYNC ksmbd_work */
- bool synchronous:1;
+ bool asynchronous:1;
bool need_invalidate_rkey:1;
unsigned int remote_key;
diff --git a/fs/ksmbd/server.c b/fs/ksmbd/server.c
index 394b6ceac431..0d8242789dc8 100644
--- a/fs/ksmbd/server.c
+++ b/fs/ksmbd/server.c
@@ -289,10 +289,7 @@ static int queue_ksmbd_work(struct ksmbd_conn *conn)
work->request_buf = conn->request_buf;
conn->request_buf = NULL;
- if (ksmbd_init_smb_server(work)) {
- ksmbd_free_work_struct(work);
- return -EINVAL;
- }
+ ksmbd_init_smb_server(work);
ksmbd_conn_enqueue_request(work);
atomic_inc(&conn->r_count);
diff --git a/fs/ksmbd/smb2pdu.c b/fs/ksmbd/smb2pdu.c
index 97c9d1b5bcc0..8af939a181be 100644
--- a/fs/ksmbd/smb2pdu.c
+++ b/fs/ksmbd/smb2pdu.c
@@ -229,9 +229,6 @@ int init_smb2_neg_rsp(struct ksmbd_work *work)
struct smb2_negotiate_rsp *rsp;
struct ksmbd_conn *conn = work->conn;
- if (conn->need_neg == false)
- return -EINVAL;
-
*(__be32 *)work->response_buf =
cpu_to_be32(conn->vals->header_size);
@@ -498,12 +495,6 @@ int init_smb2_rsp_hdr(struct ksmbd_work *work)
rsp_hdr->SessionId = rcv_hdr->SessionId;
memcpy(rsp_hdr->Signature, rcv_hdr->Signature, 16);
- work->synchronous = true;
- if (work->async_id) {
- ksmbd_release_id(&conn->async_ida, work->async_id);
- work->async_id = 0;
- }
-
return 0;
}
@@ -644,7 +635,7 @@ int setup_async_work(struct ksmbd_work *work, void (*fn)(void **), void **arg)
pr_err("Failed to alloc async message id\n");
return id;
}
- work->synchronous = false;
+ work->asynchronous = true;
work->async_id = id;
rsp_hdr->Id.AsyncId = cpu_to_le64(id);
@@ -664,6 +655,24 @@ int setup_async_work(struct ksmbd_work *work, void (*fn)(void **), void **arg)
return 0;
}
+void release_async_work(struct ksmbd_work *work)
+{
+ struct ksmbd_conn *conn = work->conn;
+
+ spin_lock(&conn->request_lock);
+ list_del_init(&work->async_request_entry);
+ spin_unlock(&conn->request_lock);
+
+ work->asynchronous = 0;
+ work->cancel_fn = NULL;
+ kfree(work->cancel_argv);
+ work->cancel_argv = NULL;
+ if (work->async_id) {
+ ksmbd_release_id(&conn->async_ida, work->async_id);
+ work->async_id = 0;
+ }
+}
+
void smb2_send_interim_resp(struct ksmbd_work *work, __le32 status)
{
struct smb2_hdr *rsp_hdr;
@@ -7045,13 +7054,9 @@ skip:
ksmbd_vfs_posix_lock_wait(flock);
- spin_lock(&work->conn->request_lock);
spin_lock(&fp->f_lock);
list_del(&work->fp_entry);
- work->cancel_fn = NULL;
- kfree(argv);
spin_unlock(&fp->f_lock);
- spin_unlock(&work->conn->request_lock);
if (work->state != KSMBD_WORK_ACTIVE) {
list_del(&smb_lock->llist);
@@ -7069,6 +7074,7 @@ skip:
work->send_no_response = 1;
goto out;
}
+
init_smb2_rsp_hdr(work);
smb2_set_err_rsp(work);
rsp->hdr.Status =
@@ -7081,7 +7087,7 @@ skip:
spin_lock(&work->conn->llist_lock);
list_del(&smb_lock->clist);
spin_unlock(&work->conn->llist_lock);
-
+ release_async_work(work);
goto retry;
} else if (!rc) {
spin_lock(&work->conn->llist_lock);
diff --git a/fs/ksmbd/smb2pdu.h b/fs/ksmbd/smb2pdu.h
index 0c8a770fe318..9420dd2813fb 100644
--- a/fs/ksmbd/smb2pdu.h
+++ b/fs/ksmbd/smb2pdu.h
@@ -486,6 +486,7 @@ int find_matching_smb2_dialect(int start_index, __le16 *cli_dialects,
struct file_lock *smb_flock_init(struct file *f);
int setup_async_work(struct ksmbd_work *work, void (*fn)(void **),
void **arg);
+void release_async_work(struct ksmbd_work *work);
void smb2_send_interim_resp(struct ksmbd_work *work, __le32 status);
struct channel *lookup_chann_list(struct ksmbd_session *sess,
struct ksmbd_conn *conn);
diff --git a/fs/ksmbd/smb_common.c b/fs/ksmbd/smb_common.c
index 9c1ce6d199ce..af0c2a9b8529 100644
--- a/fs/ksmbd/smb_common.c
+++ b/fs/ksmbd/smb_common.c
@@ -283,20 +283,121 @@ err_out:
return BAD_PROT_ID;
}
-int ksmbd_init_smb_server(struct ksmbd_work *work)
+#define SMB_COM_NEGOTIATE_EX 0x0
+
+/**
+ * get_smb1_cmd_val() - get smb command value from smb header
+ * @work: smb work containing smb header
+ *
+ * Return: smb command value
+ */
+static u16 get_smb1_cmd_val(struct ksmbd_work *work)
{
- struct ksmbd_conn *conn = work->conn;
+ return SMB_COM_NEGOTIATE_EX;
+}
- if (conn->need_neg == false)
+/**
+ * init_smb1_rsp_hdr() - initialize smb negotiate response header
+ * @work: smb work containing smb request
+ *
+ * Return: 0 on success, otherwise -EINVAL
+ */
+static int init_smb1_rsp_hdr(struct ksmbd_work *work)
+{
+ struct smb_hdr *rsp_hdr = (struct smb_hdr *)work->response_buf;
+ struct smb_hdr *rcv_hdr = (struct smb_hdr *)work->request_buf;
+
+ /*
+ * Remove 4 byte direct TCP header.
+ */
+ *(__be32 *)work->response_buf =
+ cpu_to_be32(sizeof(struct smb_hdr) - 4);
+
+ rsp_hdr->Command = SMB_COM_NEGOTIATE;
+ *(__le32 *)rsp_hdr->Protocol = SMB1_PROTO_NUMBER;
+ rsp_hdr->Flags = SMBFLG_RESPONSE;
+ rsp_hdr->Flags2 = SMBFLG2_UNICODE | SMBFLG2_ERR_STATUS |
+ SMBFLG2_EXT_SEC | SMBFLG2_IS_LONG_NAME;
+ rsp_hdr->Pid = rcv_hdr->Pid;
+ rsp_hdr->Mid = rcv_hdr->Mid;
+ return 0;
+}
+
+/**
+ * smb1_check_user_session() - check for valid session for a user
+ * @work: smb work containing smb request buffer
+ *
+ * Return: 0 on success, otherwise error
+ */
+static int smb1_check_user_session(struct ksmbd_work *work)
+{
+ unsigned int cmd = work->conn->ops->get_cmd_val(work);
+
+ if (cmd == SMB_COM_NEGOTIATE_EX)
return 0;
- init_smb3_11_server(conn);
+ return -EINVAL;
+}
+
+/**
+ * smb1_allocate_rsp_buf() - allocate response buffer for a command
+ * @work: smb work containing smb request
+ *
+ * Return: 0 on success, otherwise -ENOMEM
+ */
+static int smb1_allocate_rsp_buf(struct ksmbd_work *work)
+{
+ work->response_buf = kmalloc(MAX_CIFS_SMALL_BUFFER_SIZE,
+ GFP_KERNEL | __GFP_ZERO);
+ work->response_sz = MAX_CIFS_SMALL_BUFFER_SIZE;
+
+ if (!work->response_buf) {
+ pr_err("Failed to allocate %u bytes buffer\n",
+ MAX_CIFS_SMALL_BUFFER_SIZE);
+ return -ENOMEM;
+ }
- if (conn->ops->get_cmd_val(work) != SMB_COM_NEGOTIATE)
- conn->need_neg = false;
return 0;
}
+static struct smb_version_ops smb1_server_ops = {
+ .get_cmd_val = get_smb1_cmd_val,
+ .init_rsp_hdr = init_smb1_rsp_hdr,
+ .allocate_rsp_buf = smb1_allocate_rsp_buf,
+ .check_user_session = smb1_check_user_session,
+};
+
+static int smb1_negotiate(struct ksmbd_work *work)
+{
+ return ksmbd_smb_negotiate_common(work, SMB_COM_NEGOTIATE);
+}
+
+static struct smb_version_cmds smb1_server_cmds[1] = {
+ [SMB_COM_NEGOTIATE_EX] = { .proc = smb1_negotiate, },
+};
+
+static void init_smb1_server(struct ksmbd_conn *conn)
+{
+ conn->ops = &smb1_server_ops;
+ conn->cmds = smb1_server_cmds;
+ conn->max_cmds = ARRAY_SIZE(smb1_server_cmds);
+}
+
+void ksmbd_init_smb_server(struct ksmbd_work *work)
+{
+ struct ksmbd_conn *conn = work->conn;
+ __le32 proto;
+
+ if (conn->need_neg == false)
+ return;
+
+ proto = *(__le32 *)((struct smb_hdr *)work->request_buf)->Protocol;
+ if (proto == SMB1_PROTO_NUMBER)
+ init_smb1_server(conn);
+ else
+ init_smb3_11_server(conn);
+}
+
int ksmbd_populate_dot_dotdot_entries(struct ksmbd_work *work, int info_level,
struct ksmbd_file *dir,
struct ksmbd_dir_info *d_info,
@@ -444,20 +545,10 @@ static int smb_handle_negotiate(struct ksmbd_work *work)
ksmbd_debug(SMB, "Unsupported SMB1 protocol\n");
- /*
- * Remove 4 byte direct TCP header, add 2 byte bcc and
- * 2 byte DialectIndex.
- */
- *(__be32 *)work->response_buf =
- cpu_to_be32(sizeof(struct smb_hdr) - 4 + 2 + 2);
+ /* Add 2 byte bcc and 2 byte DialectIndex. */
+ inc_rfc1001_len(work->response_buf, 4);
neg_rsp->hdr.Status.CifsError = STATUS_SUCCESS;
- neg_rsp->hdr.Command = SMB_COM_NEGOTIATE;
- *(__le32 *)neg_rsp->hdr.Protocol = SMB1_PROTO_NUMBER;
- neg_rsp->hdr.Flags = SMBFLG_RESPONSE;
- neg_rsp->hdr.Flags2 = SMBFLG2_UNICODE | SMBFLG2_ERR_STATUS |
- SMBFLG2_EXT_SEC | SMBFLG2_IS_LONG_NAME;
-
neg_rsp->hdr.WordCount = 1;
neg_rsp->DialectIndex = cpu_to_le16(work->conn->dialect);
neg_rsp->ByteCount = 0;
@@ -474,23 +565,12 @@ int ksmbd_smb_negotiate_common(struct ksmbd_work *work, unsigned int command)
ksmbd_debug(SMB, "conn->dialect 0x%x\n", conn->dialect);
if (command == SMB2_NEGOTIATE_HE) {
- struct smb2_hdr *smb2_hdr = smb2_get_msg(work->request_buf);
-
- if (smb2_hdr->ProtocolId != SMB2_PROTO_NUMBER) {
- ksmbd_debug(SMB, "Downgrade to SMB1 negotiation\n");
- command = SMB_COM_NEGOTIATE;
- }
- }
-
- if (command == SMB2_NEGOTIATE_HE) {
ret = smb2_handle_negotiate(work);
- init_smb2_neg_rsp(work);
return ret;
}
if (command == SMB_COM_NEGOTIATE) {
if (__smb2_negotiate(conn)) {
- conn->need_neg = true;
init_smb3_11_server(conn);
init_smb2_neg_rsp(work);
ksmbd_debug(SMB, "Upgrade to SMB2 negotiation\n");
diff --git a/fs/ksmbd/smb_common.h b/fs/ksmbd/smb_common.h
index d30ce4c1a151..9130d2e3cd78 100644
--- a/fs/ksmbd/smb_common.h
+++ b/fs/ksmbd/smb_common.h
@@ -427,7 +427,7 @@ bool ksmbd_smb_request(struct ksmbd_conn *conn);
int ksmbd_lookup_dialect_by_id(__le16 *cli_dialects, __le16 dialects_count);
-int ksmbd_init_smb_server(struct ksmbd_work *work);
+void ksmbd_init_smb_server(struct ksmbd_work *work);
struct ksmbd_kstat;
int ksmbd_populate_dot_dotdot_entries(struct ksmbd_work *work,
diff --git a/fs/ksmbd/unicode.c b/fs/ksmbd/unicode.c
index a0db699ddafd..9ae676906ed3 100644
--- a/fs/ksmbd/unicode.c
+++ b/fs/ksmbd/unicode.c
@@ -114,24 +114,6 @@ cp_convert:
}
/*
- * is_char_allowed() - check for valid character
- * @ch: input character to be checked
- *
- * Return: 1 if char is allowed, otherwise 0
- */
-static inline int is_char_allowed(char *ch)
-{
- /* check for control chars, wildcards etc. */
- if (!(*ch & 0x80) &&
- (*ch <= 0x1f ||
- *ch == '?' || *ch == '"' || *ch == '<' ||
- *ch == '>' || *ch == '|'))
- return 0;
-
- return 1;
-}
-
-/*
* smb_from_utf16() - convert utf16le string to local charset
* @to: destination buffer
* @from: source buffer
diff --git a/fs/netfs/iterator.c b/fs/netfs/iterator.c
index e9a45dea748a..8a4c86687429 100644
--- a/fs/netfs/iterator.c
+++ b/fs/netfs/iterator.c
@@ -139,7 +139,7 @@ static ssize_t netfs_extract_user_to_sg(struct iov_iter *iter,
size_t seg = min_t(size_t, PAGE_SIZE - off, len);
*pages++ = NULL;
- sg_set_page(sg, page, len, off);
+ sg_set_page(sg, page, seg, off);
sgtable->nents++;
sg++;
len -= seg;
diff --git a/fs/nilfs2/btree.c b/fs/nilfs2/btree.c
index 2681a449edc1..13592e82eaf6 100644
--- a/fs/nilfs2/btree.c
+++ b/fs/nilfs2/btree.c
@@ -2219,6 +2219,7 @@ static int nilfs_btree_assign_p(struct nilfs_bmap *btree,
/* on-disk format */
binfo->bi_dat.bi_blkoff = cpu_to_le64(key);
binfo->bi_dat.bi_level = level;
+ memset(binfo->bi_dat.bi_pad, 0, sizeof(binfo->bi_dat.bi_pad));
return 0;
}
diff --git a/fs/nilfs2/direct.c b/fs/nilfs2/direct.c
index a35f2795b242..4c85914f2abc 100644
--- a/fs/nilfs2/direct.c
+++ b/fs/nilfs2/direct.c
@@ -314,6 +314,7 @@ static int nilfs_direct_assign_p(struct nilfs_bmap *direct,
binfo->bi_dat.bi_blkoff = cpu_to_le64(key);
binfo->bi_dat.bi_level = 0;
+ memset(binfo->bi_dat.bi_pad, 0, sizeof(binfo->bi_dat.bi_pad));
return 0;
}
diff --git a/fs/nilfs2/segment.c b/fs/nilfs2/segment.c
index 19446a8243d7..6ad41390fa74 100644
--- a/fs/nilfs2/segment.c
+++ b/fs/nilfs2/segment.c
@@ -2609,11 +2609,10 @@ static int nilfs_segctor_thread(void *arg)
goto loop;
end_thread:
- spin_unlock(&sci->sc_state_lock);
-
/* end sync. */
sci->sc_task = NULL;
wake_up(&sci->sc_wait_task); /* for nilfs_segctor_kill_thread() */
+ spin_unlock(&sci->sc_state_lock);
return 0;
}
diff --git a/fs/nilfs2/super.c b/fs/nilfs2/super.c
index 1422b8ba24ed..77f1e5778d1c 100644
--- a/fs/nilfs2/super.c
+++ b/fs/nilfs2/super.c
@@ -482,6 +482,7 @@ static void nilfs_put_super(struct super_block *sb)
up_write(&nilfs->ns_sem);
}
+ nilfs_sysfs_delete_device_group(nilfs);
iput(nilfs->ns_sufile);
iput(nilfs->ns_cpfile);
iput(nilfs->ns_dat);
@@ -1105,6 +1106,7 @@ nilfs_fill_super(struct super_block *sb, void *data, int silent)
nilfs_put_root(fsroot);
failed_unload:
+ nilfs_sysfs_delete_device_group(nilfs);
iput(nilfs->ns_sufile);
iput(nilfs->ns_cpfile);
iput(nilfs->ns_dat);
diff --git a/fs/nilfs2/the_nilfs.c b/fs/nilfs2/the_nilfs.c
index 3a4c9c150cbf..2894152a6b25 100644
--- a/fs/nilfs2/the_nilfs.c
+++ b/fs/nilfs2/the_nilfs.c
@@ -87,7 +87,6 @@ void destroy_nilfs(struct the_nilfs *nilfs)
{
might_sleep();
if (nilfs_init(nilfs)) {
- nilfs_sysfs_delete_device_group(nilfs);
brelse(nilfs->ns_sbh[0]);
brelse(nilfs->ns_sbh[1]);
}
@@ -305,6 +304,10 @@ int load_nilfs(struct the_nilfs *nilfs, struct super_block *sb)
goto failed;
}
+ err = nilfs_sysfs_create_device_group(sb);
+ if (unlikely(err))
+ goto sysfs_error;
+
if (valid_fs)
goto skip_recovery;
@@ -366,6 +369,9 @@ int load_nilfs(struct the_nilfs *nilfs, struct super_block *sb)
goto failed;
failed_unload:
+ nilfs_sysfs_delete_device_group(nilfs);
+
+ sysfs_error:
iput(nilfs->ns_cpfile);
iput(nilfs->ns_sufile);
iput(nilfs->ns_dat);
@@ -697,10 +703,6 @@ int init_nilfs(struct the_nilfs *nilfs, struct super_block *sb, char *data)
if (err)
goto failed_sbh;
- err = nilfs_sysfs_create_device_group(sb);
- if (err)
- goto failed_sbh;
-
set_nilfs_init(nilfs);
err = 0;
out:
diff --git a/include/acpi/video.h b/include/acpi/video.h
index 8ed9bec03e53..ff5a8da5d883 100644
--- a/include/acpi/video.h
+++ b/include/acpi/video.h
@@ -59,8 +59,6 @@ extern void acpi_video_unregister(void);
extern void acpi_video_register_backlight(void);
extern int acpi_video_get_edid(struct acpi_device *device, int type,
int device_id, void **edid);
-extern enum acpi_backlight_type acpi_video_get_backlight_type(void);
-extern bool acpi_video_backlight_use_native(void);
/*
* Note: The value returned by acpi_video_handles_brightness_key_presses()
* may change over time and should not be cached.
@@ -69,6 +67,19 @@ extern bool acpi_video_handles_brightness_key_presses(void);
extern int acpi_video_get_levels(struct acpi_device *device,
struct acpi_video_device_brightness **dev_br,
int *pmax_level);
+
+extern enum acpi_backlight_type __acpi_video_get_backlight_type(bool native,
+ bool *auto_detect);
+
+static inline enum acpi_backlight_type acpi_video_get_backlight_type(void)
+{
+ return __acpi_video_get_backlight_type(false, NULL);
+}
+
+static inline bool acpi_video_backlight_use_native(void)
+{
+ return __acpi_video_get_backlight_type(true, NULL) == acpi_backlight_native;
+}
#else
static inline void acpi_video_report_nolcd(void) { return; };
static inline int acpi_video_register(void) { return -ENODEV; }
diff --git a/include/linux/mlx5/device.h b/include/linux/mlx5/device.h
index af4dd536a52c..10ec6dcf40bb 100644
--- a/include/linux/mlx5/device.h
+++ b/include/linux/mlx5/device.h
@@ -36,6 +36,7 @@
#include <linux/types.h>
#include <rdma/ib_verbs.h>
#include <linux/mlx5/mlx5_ifc.h>
+#include <linux/bitfield.h>
#if defined(__LITTLE_ENDIAN)
#define MLX5_SET_HOST_ENDIANNESS 0
@@ -980,14 +981,23 @@ enum {
};
enum {
- CQE_RSS_HTYPE_IP = 0x3 << 2,
+ CQE_RSS_HTYPE_IP = GENMASK(3, 2),
/* cqe->rss_hash_type[3:2] - IP destination selected for hash
* (00 = none, 01 = IPv4, 10 = IPv6, 11 = Reserved)
*/
- CQE_RSS_HTYPE_L4 = 0x3 << 6,
+ CQE_RSS_IP_NONE = 0x0,
+ CQE_RSS_IPV4 = 0x1,
+ CQE_RSS_IPV6 = 0x2,
+ CQE_RSS_RESERVED = 0x3,
+
+ CQE_RSS_HTYPE_L4 = GENMASK(7, 6),
/* cqe->rss_hash_type[7:6] - L4 destination selected for hash
* (00 = none, 01 = TCP. 10 = UDP, 11 = IPSEC.SPI
*/
+ CQE_RSS_L4_NONE = 0x0,
+ CQE_RSS_L4_TCP = 0x1,
+ CQE_RSS_L4_UDP = 0x2,
+ CQE_RSS_L4_IPSEC = 0x3,
};
enum {
diff --git a/include/linux/mm_types.h b/include/linux/mm_types.h
index 0722859c3647..a57e6ae78e65 100644
--- a/include/linux/mm_types.h
+++ b/include/linux/mm_types.h
@@ -774,7 +774,8 @@ struct mm_struct {
unsigned long cpu_bitmap[];
};
-#define MM_MT_FLAGS (MT_FLAGS_ALLOC_RANGE | MT_FLAGS_LOCK_EXTERN)
+#define MM_MT_FLAGS (MT_FLAGS_ALLOC_RANGE | MT_FLAGS_LOCK_EXTERN | \
+ MT_FLAGS_USE_RCU)
extern struct mm_struct init_mm;
/* Pointer magic because the dynamic array size confuses some compilers. */
diff --git a/include/linux/netdevice.h b/include/linux/netdevice.h
index fe355592dfde..96d27d558b0c 100644
--- a/include/linux/netdevice.h
+++ b/include/linux/netdevice.h
@@ -1650,7 +1650,8 @@ struct net_device_ops {
struct xdp_metadata_ops {
int (*xmo_rx_timestamp)(const struct xdp_md *ctx, u64 *timestamp);
- int (*xmo_rx_hash)(const struct xdp_md *ctx, u32 *hash);
+ int (*xmo_rx_hash)(const struct xdp_md *ctx, u32 *hash,
+ enum xdp_rss_hash_type *rss_type);
};
/**
diff --git a/include/linux/pci-doe.h b/include/linux/pci-doe.h
index ed9b4df792b8..43765eaf2342 100644
--- a/include/linux/pci-doe.h
+++ b/include/linux/pci-doe.h
@@ -34,6 +34,10 @@ struct pci_doe_mb;
* @work: Used internally by the mailbox
* @doe_mb: Used internally by the mailbox
*
+ * Payloads are treated as opaque byte streams which are transmitted verbatim,
+ * without byte-swapping. If payloads contain little-endian register values,
+ * the caller is responsible for conversion with cpu_to_le32() / le32_to_cpu().
+ *
* The payload sizes and rv are specified in bytes with the following
* restrictions concerning the protocol.
*
@@ -45,9 +49,9 @@ struct pci_doe_mb;
*/
struct pci_doe_task {
struct pci_doe_protocol prot;
- u32 *request_pl;
+ __le32 *request_pl;
size_t request_pl_sz;
- u32 *response_pl;
+ __le32 *response_pl;
size_t response_pl_sz;
int rv;
void (*complete)(struct pci_doe_task *task);
diff --git a/include/linux/pci.h b/include/linux/pci.h
index b50e5c79f7e3..a5dda515fcd1 100644
--- a/include/linux/pci.h
+++ b/include/linux/pci.h
@@ -1624,6 +1624,8 @@ pci_alloc_irq_vectors(struct pci_dev *dev, unsigned int min_vecs,
flags, NULL);
}
+static inline bool pci_msix_can_alloc_dyn(struct pci_dev *dev)
+{ return false; }
static inline struct msi_map pci_msix_alloc_irq_at(struct pci_dev *dev, unsigned int index,
const struct irq_affinity_desc *affdesc)
{
diff --git a/include/linux/rtnetlink.h b/include/linux/rtnetlink.h
index f0c87baaf6c0..3d6cf306cd55 100644
--- a/include/linux/rtnetlink.h
+++ b/include/linux/rtnetlink.h
@@ -25,7 +25,8 @@ void rtmsg_ifinfo_newnet(int type, struct net_device *dev, unsigned int change,
struct sk_buff *rtmsg_ifinfo_build_skb(int type, struct net_device *dev,
unsigned change, u32 event,
gfp_t flags, int *new_nsid,
- int new_ifindex, u32 portid, u32 seq);
+ int new_ifindex, u32 portid,
+ const struct nlmsghdr *nlh);
void rtmsg_ifinfo_send(struct sk_buff *skb, struct net_device *dev,
gfp_t flags, u32 portid, const struct nlmsghdr *nlh);
diff --git a/include/net/bluetooth/hci_core.h b/include/net/bluetooth/hci_core.h
index 6ed9b4d546a7..d5311ceb21c6 100644
--- a/include/net/bluetooth/hci_core.h
+++ b/include/net/bluetooth/hci_core.h
@@ -954,6 +954,7 @@ enum {
HCI_CONN_STK_ENCRYPT,
HCI_CONN_AUTH_INITIATOR,
HCI_CONN_DROP,
+ HCI_CONN_CANCEL,
HCI_CONN_PARAM_REMOVAL_PEND,
HCI_CONN_NEW_LINK_KEY,
HCI_CONN_SCANNING,
diff --git a/include/net/bonding.h b/include/net/bonding.h
index ea36ab7f9e72..c3843239517d 100644
--- a/include/net/bonding.h
+++ b/include/net/bonding.h
@@ -761,13 +761,17 @@ static inline int bond_get_targets_ip(__be32 *targets, __be32 ip)
#if IS_ENABLED(CONFIG_IPV6)
static inline int bond_get_targets_ip6(struct in6_addr *targets, struct in6_addr *ip)
{
+ struct in6_addr mcaddr;
int i;
- for (i = 0; i < BOND_MAX_NS_TARGETS; i++)
- if (ipv6_addr_equal(&targets[i], ip))
+ for (i = 0; i < BOND_MAX_NS_TARGETS; i++) {
+ addrconf_addr_solict_mult(&targets[i], &mcaddr);
+ if ((ipv6_addr_equal(&targets[i], ip)) ||
+ (ipv6_addr_equal(&mcaddr, ip)))
return i;
else if (ipv6_addr_any(&targets[i]))
break;
+ }
return -1;
}
diff --git a/include/net/xdp.h b/include/net/xdp.h
index 41c57b8b1671..76aa748e7923 100644
--- a/include/net/xdp.h
+++ b/include/net/xdp.h
@@ -8,6 +8,7 @@
#include <linux/skbuff.h> /* skb_shared_info */
#include <uapi/linux/netdev.h>
+#include <linux/bitfield.h>
/**
* DOC: XDP RX-queue information
@@ -425,6 +426,52 @@ XDP_METADATA_KFUNC_xxx
MAX_XDP_METADATA_KFUNC,
};
+enum xdp_rss_hash_type {
+ /* First part: Individual bits for L3/L4 types */
+ XDP_RSS_L3_IPV4 = BIT(0),
+ XDP_RSS_L3_IPV6 = BIT(1),
+
+ /* The fixed (L3) IPv4 and IPv6 headers can both be followed by
+ * variable/dynamic headers, IPv4 called Options and IPv6 called
+ * Extension Headers. HW RSS type can contain this info.
+ */
+ XDP_RSS_L3_DYNHDR = BIT(2),
+
+ /* When RSS hash covers L4 then drivers MUST set XDP_RSS_L4 bit in
+ * addition to the protocol specific bit. This ease interaction with
+ * SKBs and avoids reserving a fixed mask for future L4 protocol bits.
+ */
+ XDP_RSS_L4 = BIT(3), /* L4 based hash, proto can be unknown */
+ XDP_RSS_L4_TCP = BIT(4),
+ XDP_RSS_L4_UDP = BIT(5),
+ XDP_RSS_L4_SCTP = BIT(6),
+ XDP_RSS_L4_IPSEC = BIT(7), /* L4 based hash include IPSEC SPI */
+
+ /* Second part: RSS hash type combinations used for driver HW mapping */
+ XDP_RSS_TYPE_NONE = 0,
+ XDP_RSS_TYPE_L2 = XDP_RSS_TYPE_NONE,
+
+ XDP_RSS_TYPE_L3_IPV4 = XDP_RSS_L3_IPV4,
+ XDP_RSS_TYPE_L3_IPV6 = XDP_RSS_L3_IPV6,
+ XDP_RSS_TYPE_L3_IPV4_OPT = XDP_RSS_L3_IPV4 | XDP_RSS_L3_DYNHDR,
+ XDP_RSS_TYPE_L3_IPV6_EX = XDP_RSS_L3_IPV6 | XDP_RSS_L3_DYNHDR,
+
+ XDP_RSS_TYPE_L4_ANY = XDP_RSS_L4,
+ XDP_RSS_TYPE_L4_IPV4_TCP = XDP_RSS_L3_IPV4 | XDP_RSS_L4 | XDP_RSS_L4_TCP,
+ XDP_RSS_TYPE_L4_IPV4_UDP = XDP_RSS_L3_IPV4 | XDP_RSS_L4 | XDP_RSS_L4_UDP,
+ XDP_RSS_TYPE_L4_IPV4_SCTP = XDP_RSS_L3_IPV4 | XDP_RSS_L4 | XDP_RSS_L4_SCTP,
+ XDP_RSS_TYPE_L4_IPV4_IPSEC = XDP_RSS_L3_IPV4 | XDP_RSS_L4 | XDP_RSS_L4_IPSEC,
+
+ XDP_RSS_TYPE_L4_IPV6_TCP = XDP_RSS_L3_IPV6 | XDP_RSS_L4 | XDP_RSS_L4_TCP,
+ XDP_RSS_TYPE_L4_IPV6_UDP = XDP_RSS_L3_IPV6 | XDP_RSS_L4 | XDP_RSS_L4_UDP,
+ XDP_RSS_TYPE_L4_IPV6_SCTP = XDP_RSS_L3_IPV6 | XDP_RSS_L4 | XDP_RSS_L4_SCTP,
+ XDP_RSS_TYPE_L4_IPV6_IPSEC = XDP_RSS_L3_IPV6 | XDP_RSS_L4 | XDP_RSS_L4_IPSEC,
+
+ XDP_RSS_TYPE_L4_IPV6_TCP_EX = XDP_RSS_TYPE_L4_IPV6_TCP | XDP_RSS_L3_DYNHDR,
+ XDP_RSS_TYPE_L4_IPV6_UDP_EX = XDP_RSS_TYPE_L4_IPV6_UDP | XDP_RSS_L3_DYNHDR,
+ XDP_RSS_TYPE_L4_IPV6_SCTP_EX = XDP_RSS_TYPE_L4_IPV6_SCTP | XDP_RSS_L3_DYNHDR,
+};
+
#ifdef CONFIG_NET
u32 bpf_xdp_metadata_kfunc_id(int id);
bool bpf_dev_bound_kfunc_id(u32 btf_id);
diff --git a/include/uapi/linux/virtio_blk.h b/include/uapi/linux/virtio_blk.h
index 5af2a0300bb9..3744e4da1b2a 100644
--- a/include/uapi/linux/virtio_blk.h
+++ b/include/uapi/linux/virtio_blk.h
@@ -140,11 +140,11 @@ struct virtio_blk_config {
/* Zoned block device characteristics (if VIRTIO_BLK_F_ZONED) */
struct virtio_blk_zoned_characteristics {
- __le32 zone_sectors;
- __le32 max_open_zones;
- __le32 max_active_zones;
- __le32 max_append_sectors;
- __le32 write_granularity;
+ __virtio32 zone_sectors;
+ __virtio32 max_open_zones;
+ __virtio32 max_active_zones;
+ __virtio32 max_append_sectors;
+ __virtio32 write_granularity;
__u8 model;
__u8 unused2[3];
} zoned;
@@ -241,11 +241,11 @@ struct virtio_blk_outhdr {
*/
struct virtio_blk_zone_descriptor {
/* Zone capacity */
- __le64 z_cap;
+ __virtio64 z_cap;
/* The starting sector of the zone */
- __le64 z_start;
+ __virtio64 z_start;
/* Zone write pointer position in sectors */
- __le64 z_wp;
+ __virtio64 z_wp;
/* Zone type */
__u8 z_type;
/* Zone state */
@@ -254,7 +254,7 @@ struct virtio_blk_zone_descriptor {
};
struct virtio_blk_zone_report {
- __le64 nr_zones;
+ __virtio64 nr_zones;
__u8 reserved[56];
struct virtio_blk_zone_descriptor zones[];
};
diff --git a/include/ufs/ufshcd.h b/include/ufs/ufshcd.h
index 25aab8ec4f86..431c3afb2ce0 100644
--- a/include/ufs/ufshcd.h
+++ b/include/ufs/ufshcd.h
@@ -979,7 +979,6 @@ struct ufs_hba {
struct completion *uic_async_done;
enum ufshcd_state ufshcd_state;
- bool logical_unit_scan_finished;
u32 eh_flags;
u32 intr_mask;
u16 ee_ctrl_mask;
diff --git a/io_uring/io_uring.c b/io_uring/io_uring.c
index 722624b6d0dc..2a8b8c304d2a 100644
--- a/io_uring/io_uring.c
+++ b/io_uring/io_uring.c
@@ -2789,8 +2789,8 @@ static __cold void io_ring_ctx_free(struct io_ring_ctx *ctx)
io_eventfd_unregister(ctx);
io_alloc_cache_free(&ctx->apoll_cache, io_apoll_cache_free);
io_alloc_cache_free(&ctx->netmsg_cache, io_netmsg_cache_free);
- mutex_unlock(&ctx->uring_lock);
io_destroy_buffers(ctx);
+ mutex_unlock(&ctx->uring_lock);
if (ctx->sq_creds)
put_cred(ctx->sq_creds);
if (ctx->submitter_task)
diff --git a/io_uring/kbuf.c b/io_uring/kbuf.c
index 3002dc827195..a90c820ce99e 100644
--- a/io_uring/kbuf.c
+++ b/io_uring/kbuf.c
@@ -228,17 +228,18 @@ static int __io_remove_buffers(struct io_ring_ctx *ctx,
return i;
}
- /* the head kbuf is the list itself */
+ /* protects io_buffers_cache */
+ lockdep_assert_held(&ctx->uring_lock);
+
while (!list_empty(&bl->buf_list)) {
struct io_buffer *nxt;
nxt = list_first_entry(&bl->buf_list, struct io_buffer, list);
- list_del(&nxt->list);
+ list_move(&nxt->list, &ctx->io_buffers_cache);
if (++i == nbufs)
return i;
cond_resched();
}
- i++;
return i;
}
diff --git a/kernel/dma/swiotlb.c b/kernel/dma/swiotlb.c
index 5b919ef832b6..dac42a2ad588 100644
--- a/kernel/dma/swiotlb.c
+++ b/kernel/dma/swiotlb.c
@@ -623,7 +623,7 @@ static int swiotlb_do_find_slots(struct device *dev, int area_index,
phys_to_dma_unencrypted(dev, mem->start) & boundary_mask;
unsigned long max_slots = get_max_slots(boundary_mask);
unsigned int iotlb_align_mask =
- dma_get_min_align_mask(dev) & ~(IO_TLB_SIZE - 1);
+ dma_get_min_align_mask(dev) | alloc_align_mask;
unsigned int nslots = nr_slots(alloc_size), stride;
unsigned int offset = swiotlb_align_offset(dev, orig_addr);
unsigned int index, slots_checked, count = 0, i;
@@ -639,8 +639,8 @@ static int swiotlb_do_find_slots(struct device *dev, int area_index,
* allocations.
*/
if (alloc_size >= PAGE_SIZE)
- iotlb_align_mask &= PAGE_MASK;
- iotlb_align_mask &= alloc_align_mask;
+ iotlb_align_mask |= ~PAGE_MASK;
+ iotlb_align_mask &= ~(IO_TLB_SIZE - 1);
/*
* For mappings with an alignment requirement don't bother looping to
diff --git a/kernel/events/core.c b/kernel/events/core.c
index fb3e436bcd4a..435815d3be3f 100644
--- a/kernel/events/core.c
+++ b/kernel/events/core.c
@@ -12173,7 +12173,7 @@ perf_event_set_output(struct perf_event *event, struct perf_event *output_event)
/*
* If its not a per-cpu rb, it must be the same task.
*/
- if (output_event->cpu == -1 && output_event->ctx != event->ctx)
+ if (output_event->cpu == -1 && output_event->hw.target != event->hw.target)
goto out;
/*
@@ -12893,12 +12893,14 @@ void perf_pmu_migrate_context(struct pmu *pmu, int src_cpu, int dst_cpu)
__perf_pmu_remove(src_ctx, src_cpu, pmu, &src_ctx->pinned_groups, &events);
__perf_pmu_remove(src_ctx, src_cpu, pmu, &src_ctx->flexible_groups, &events);
- /*
- * Wait for the events to quiesce before re-instating them.
- */
- synchronize_rcu();
+ if (!list_empty(&events)) {
+ /*
+ * Wait for the events to quiesce before re-instating them.
+ */
+ synchronize_rcu();
- __perf_pmu_install(dst_ctx, dst_cpu, pmu, &events);
+ __perf_pmu_install(dst_ctx, dst_cpu, pmu, &events);
+ }
mutex_unlock(&dst_ctx->mutex);
mutex_unlock(&src_ctx->mutex);
diff --git a/kernel/fork.c b/kernel/fork.c
index c0257cbee093..0c92f224c68c 100644
--- a/kernel/fork.c
+++ b/kernel/fork.c
@@ -617,6 +617,7 @@ static __latent_entropy int dup_mmap(struct mm_struct *mm,
if (retval)
goto out;
+ mt_clear_in_rcu(vmi.mas.tree);
for_each_vma(old_vmi, mpnt) {
struct file *file;
@@ -700,6 +701,8 @@ static __latent_entropy int dup_mmap(struct mm_struct *mm,
retval = arch_dup_mmap(oldmm, mm);
loop_out:
vma_iter_free(&vmi);
+ if (!retval)
+ mt_set_in_rcu(vmi.mas.tree);
out:
mmap_write_unlock(mm);
flush_tlb_mm(oldmm);
diff --git a/kernel/rcu/tree.c b/kernel/rcu/tree.c
index 8e880c09ab59..7b95ee98a1a5 100644
--- a/kernel/rcu/tree.c
+++ b/kernel/rcu/tree.c
@@ -3024,6 +3024,18 @@ need_offload_krc(struct kfree_rcu_cpu *krcp)
return !!READ_ONCE(krcp->head);
}
+static bool
+need_wait_for_krwp_work(struct kfree_rcu_cpu_work *krwp)
+{
+ int i;
+
+ for (i = 0; i < FREE_N_CHANNELS; i++)
+ if (!list_empty(&krwp->bulk_head_free[i]))
+ return true;
+
+ return !!krwp->head_free;
+}
+
static int krc_count(struct kfree_rcu_cpu *krcp)
{
int sum = atomic_read(&krcp->head_count);
@@ -3107,15 +3119,14 @@ static void kfree_rcu_monitor(struct work_struct *work)
for (i = 0; i < KFREE_N_BATCHES; i++) {
struct kfree_rcu_cpu_work *krwp = &(krcp->krw_arr[i]);
- // Try to detach bulk_head or head and attach it over any
- // available corresponding free channel. It can be that
- // a previous RCU batch is in progress, it means that
- // immediately to queue another one is not possible so
- // in that case the monitor work is rearmed.
- if ((!list_empty(&krcp->bulk_head[0]) && list_empty(&krwp->bulk_head_free[0])) ||
- (!list_empty(&krcp->bulk_head[1]) && list_empty(&krwp->bulk_head_free[1])) ||
- (READ_ONCE(krcp->head) && !krwp->head_free)) {
+ // Try to detach bulk_head or head and attach it, only when
+ // all channels are free. Any channel is not free means at krwp
+ // there is on-going rcu work to handle krwp's free business.
+ if (need_wait_for_krwp_work(krwp))
+ continue;
+ // kvfree_rcu_drain_ready() might handle this krcp, if so give up.
+ if (need_offload_krc(krcp)) {
// Channel 1 corresponds to the SLAB-pointer bulk path.
// Channel 2 corresponds to vmalloc-pointer bulk path.
for (j = 0; j < FREE_N_CHANNELS; j++) {
diff --git a/kernel/trace/ftrace.c b/kernel/trace/ftrace.c
index 0feea145bb29..c67bcc89a771 100644
--- a/kernel/trace/ftrace.c
+++ b/kernel/trace/ftrace.c
@@ -5667,12 +5667,15 @@ int modify_ftrace_direct(unsigned long ip,
ret = 0;
}
- if (unlikely(ret && new_direct)) {
- direct->count++;
- list_del_rcu(&new_direct->next);
- synchronize_rcu_tasks();
- kfree(new_direct);
- ftrace_direct_func_count--;
+ if (ret) {
+ direct->addr = old_addr;
+ if (unlikely(new_direct)) {
+ direct->count++;
+ list_del_rcu(&new_direct->next);
+ synchronize_rcu_tasks();
+ kfree(new_direct);
+ ftrace_direct_func_count--;
+ }
}
out_unlock:
diff --git a/kernel/trace/trace_events_synth.c b/kernel/trace/trace_events_synth.c
index f0ff730125bf..d6a70aff2410 100644
--- a/kernel/trace/trace_events_synth.c
+++ b/kernel/trace/trace_events_synth.c
@@ -44,7 +44,7 @@ enum { ERRORS };
static const char *err_text[] = { ERRORS };
-DEFINE_MUTEX(lastcmd_mutex);
+static DEFINE_MUTEX(lastcmd_mutex);
static char *last_cmd;
static int errpos(const char *str)
diff --git a/lib/Kconfig.debug b/lib/Kconfig.debug
index c8b379e2e9ad..39d1d93164bd 100644
--- a/lib/Kconfig.debug
+++ b/lib/Kconfig.debug
@@ -1143,7 +1143,7 @@ menu "Scheduler Debugging"
config SCHED_DEBUG
bool "Collect scheduler debugging info"
- depends on DEBUG_KERNEL && PROC_FS
+ depends on DEBUG_KERNEL && DEBUG_FS
default y
help
If you say Y here, the /sys/kernel/debug/sched file will be provided
@@ -1392,7 +1392,7 @@ config LOCKDEP_STACK_TRACE_HASH_BITS
range 10 30
default 14
help
- Try increasing this value if you need large MAX_STACK_TRACE_ENTRIES.
+ Try increasing this value if you need large STACK_TRACE_HASH_SIZE.
config LOCKDEP_CIRCULAR_QUEUE_BITS
int "Bitsize for elements in circular_queue struct"
diff --git a/lib/maple_tree.c b/lib/maple_tree.c
index 9e2735cbc2b4..db60edb55f2f 100644
--- a/lib/maple_tree.c
+++ b/lib/maple_tree.c
@@ -185,7 +185,7 @@ static void mt_free_rcu(struct rcu_head *head)
*/
static void ma_free_rcu(struct maple_node *node)
{
- node->parent = ma_parent_ptr(node);
+ WARN_ON(node->parent != ma_parent_ptr(node));
call_rcu(&node->rcu, mt_free_rcu);
}
@@ -539,11 +539,14 @@ static inline struct maple_node *mte_parent(const struct maple_enode *enode)
*/
static inline bool ma_dead_node(const struct maple_node *node)
{
- struct maple_node *parent = (void *)((unsigned long)
- node->parent & ~MAPLE_NODE_MASK);
+ struct maple_node *parent;
+ /* Do not reorder reads from the node prior to the parent check */
+ smp_rmb();
+ parent = (void *)((unsigned long) node->parent & ~MAPLE_NODE_MASK);
return (parent == node);
}
+
/*
* mte_dead_node() - check if the @enode is dead.
* @enode: The encoded maple node
@@ -555,6 +558,8 @@ static inline bool mte_dead_node(const struct maple_enode *enode)
struct maple_node *parent, *node;
node = mte_to_node(enode);
+ /* Do not reorder reads from the node prior to the parent check */
+ smp_rmb();
parent = mte_parent(enode);
return (parent == node);
}
@@ -625,6 +630,8 @@ static inline unsigned int mas_alloc_req(const struct ma_state *mas)
* @node - the maple node
* @type - the node type
*
+ * In the event of a dead node, this array may be %NULL
+ *
* Return: A pointer to the maple node pivots
*/
static inline unsigned long *ma_pivots(struct maple_node *node,
@@ -817,6 +824,11 @@ static inline void *mt_slot(const struct maple_tree *mt,
return rcu_dereference_check(slots[offset], mt_locked(mt));
}
+static inline void *mt_slot_locked(struct maple_tree *mt, void __rcu **slots,
+ unsigned char offset)
+{
+ return rcu_dereference_protected(slots[offset], mt_locked(mt));
+}
/*
* mas_slot_locked() - Get the slot value when holding the maple tree lock.
* @mas: The maple state
@@ -828,7 +840,7 @@ static inline void *mt_slot(const struct maple_tree *mt,
static inline void *mas_slot_locked(struct ma_state *mas, void __rcu **slots,
unsigned char offset)
{
- return rcu_dereference_protected(slots[offset], mt_locked(mas->tree));
+ return mt_slot_locked(mas->tree, slots, offset);
}
/*
@@ -900,6 +912,45 @@ static inline void ma_set_meta(struct maple_node *mn, enum maple_type mt,
}
/*
+ * mt_clear_meta() - clear the metadata information of a node, if it exists
+ * @mt: The maple tree
+ * @mn: The maple node
+ * @type: The maple node type
+ * @offset: The offset of the highest sub-gap in this node.
+ * @end: The end of the data in this node.
+ */
+static inline void mt_clear_meta(struct maple_tree *mt, struct maple_node *mn,
+ enum maple_type type)
+{
+ struct maple_metadata *meta;
+ unsigned long *pivots;
+ void __rcu **slots;
+ void *next;
+
+ switch (type) {
+ case maple_range_64:
+ pivots = mn->mr64.pivot;
+ if (unlikely(pivots[MAPLE_RANGE64_SLOTS - 2])) {
+ slots = mn->mr64.slot;
+ next = mt_slot_locked(mt, slots,
+ MAPLE_RANGE64_SLOTS - 1);
+ if (unlikely((mte_to_node(next) &&
+ mte_node_type(next))))
+ return; /* no metadata, could be node */
+ }
+ fallthrough;
+ case maple_arange_64:
+ meta = ma_meta(mn, type);
+ break;
+ default:
+ return;
+ }
+
+ meta->gap = 0;
+ meta->end = 0;
+}
+
+/*
* ma_meta_end() - Get the data end of a node from the metadata
* @mn: The maple node
* @mt: The maple node type
@@ -1096,8 +1147,11 @@ static int mas_ascend(struct ma_state *mas)
a_type = mas_parent_enum(mas, p_enode);
a_node = mte_parent(p_enode);
a_slot = mte_parent_slot(p_enode);
- pivots = ma_pivots(a_node, a_type);
a_enode = mt_mk_node(a_node, a_type);
+ pivots = ma_pivots(a_node, a_type);
+
+ if (unlikely(ma_dead_node(a_node)))
+ return 1;
if (!set_min && a_slot) {
set_min = true;
@@ -1354,12 +1408,16 @@ static inline struct maple_enode *mas_start(struct ma_state *mas)
mas->max = ULONG_MAX;
mas->depth = 0;
+retry:
root = mas_root(mas);
/* Tree with nodes */
if (likely(xa_is_node(root))) {
mas->depth = 1;
mas->node = mte_safe_root(root);
mas->offset = 0;
+ if (mte_dead_node(mas->node))
+ goto retry;
+
return NULL;
}
@@ -1401,6 +1459,9 @@ static inline unsigned char ma_data_end(struct maple_node *node,
{
unsigned char offset;
+ if (!pivots)
+ return 0;
+
if (type == maple_arange_64)
return ma_meta_end(node, type);
@@ -1436,6 +1497,9 @@ static inline unsigned char mas_data_end(struct ma_state *mas)
return ma_meta_end(node, type);
pivots = ma_pivots(node, type);
+ if (unlikely(ma_dead_node(node)))
+ return 0;
+
offset = mt_pivots[type] - 1;
if (likely(!pivots[offset]))
return ma_meta_end(node, type);
@@ -1724,8 +1788,10 @@ static inline void mas_replace(struct ma_state *mas, bool advanced)
rcu_assign_pointer(slots[offset], mas->node);
}
- if (!advanced)
+ if (!advanced) {
+ mte_set_node_dead(old_enode);
mas_free(mas, old_enode);
+ }
}
/*
@@ -3659,10 +3725,9 @@ static inline int mas_root_expand(struct ma_state *mas, void *entry)
slot++;
mas->depth = 1;
mas_set_height(mas);
-
+ ma_set_meta(node, maple_leaf_64, 0, slot);
/* swap the new root into the tree */
rcu_assign_pointer(mas->tree->ma_root, mte_mk_root(mas->node));
- ma_set_meta(node, maple_leaf_64, 0, slot);
return slot;
}
@@ -3875,18 +3940,13 @@ static inline void *mtree_lookup_walk(struct ma_state *mas)
end = ma_data_end(node, type, pivots, max);
if (unlikely(ma_dead_node(node)))
goto dead_node;
-
- if (pivots[offset] >= mas->index)
- goto next;
-
do {
- offset++;
- } while ((offset < end) && (pivots[offset] < mas->index));
-
- if (likely(offset > end))
- max = pivots[offset];
+ if (pivots[offset] >= mas->index) {
+ max = pivots[offset];
+ break;
+ }
+ } while (++offset < end);
-next:
slots = ma_slots(node, type);
next = mt_slot(mas->tree, slots, offset);
if (unlikely(ma_dead_node(node)))
@@ -4164,6 +4224,7 @@ static inline bool mas_wr_node_store(struct ma_wr_state *wr_mas)
done:
mas_leaf_set_meta(mas, newnode, dst_pivots, maple_leaf_64, new_end);
if (in_rcu) {
+ mte_set_node_dead(mas->node);
mas->node = mt_mk_node(newnode, wr_mas->type);
mas_replace(mas, false);
} else {
@@ -4505,6 +4566,9 @@ static inline int mas_prev_node(struct ma_state *mas, unsigned long min)
node = mas_mn(mas);
slots = ma_slots(node, mt);
pivots = ma_pivots(node, mt);
+ if (unlikely(ma_dead_node(node)))
+ return 1;
+
mas->max = pivots[offset];
if (offset)
mas->min = pivots[offset - 1] + 1;
@@ -4526,6 +4590,9 @@ static inline int mas_prev_node(struct ma_state *mas, unsigned long min)
slots = ma_slots(node, mt);
pivots = ma_pivots(node, mt);
offset = ma_data_end(node, mt, pivots, mas->max);
+ if (unlikely(ma_dead_node(node)))
+ return 1;
+
if (offset)
mas->min = pivots[offset - 1] + 1;
@@ -4574,6 +4641,7 @@ static inline int mas_next_node(struct ma_state *mas, struct maple_node *node,
struct maple_enode *enode;
int level = 0;
unsigned char offset;
+ unsigned char node_end;
enum maple_type mt;
void __rcu **slots;
@@ -4597,7 +4665,11 @@ static inline int mas_next_node(struct ma_state *mas, struct maple_node *node,
node = mas_mn(mas);
mt = mte_node_type(mas->node);
pivots = ma_pivots(node, mt);
- } while (unlikely(offset == ma_data_end(node, mt, pivots, mas->max)));
+ node_end = ma_data_end(node, mt, pivots, mas->max);
+ if (unlikely(ma_dead_node(node)))
+ return 1;
+
+ } while (unlikely(offset == node_end));
slots = ma_slots(node, mt);
pivot = mas_safe_pivot(mas, pivots, ++offset, mt);
@@ -4613,6 +4685,9 @@ static inline int mas_next_node(struct ma_state *mas, struct maple_node *node,
mt = mte_node_type(mas->node);
slots = ma_slots(node, mt);
pivots = ma_pivots(node, mt);
+ if (unlikely(ma_dead_node(node)))
+ return 1;
+
offset = 0;
pivot = pivots[0];
}
@@ -4659,11 +4734,14 @@ static inline void *mas_next_nentry(struct ma_state *mas,
return NULL;
}
- pivots = ma_pivots(node, type);
slots = ma_slots(node, type);
- mas->index = mas_safe_min(mas, pivots, mas->offset);
+ pivots = ma_pivots(node, type);
count = ma_data_end(node, type, pivots, mas->max);
- if (ma_dead_node(node))
+ if (unlikely(ma_dead_node(node)))
+ return NULL;
+
+ mas->index = mas_safe_min(mas, pivots, mas->offset);
+ if (unlikely(ma_dead_node(node)))
return NULL;
if (mas->index > max)
@@ -4817,6 +4895,11 @@ retry:
slots = ma_slots(mn, mt);
pivots = ma_pivots(mn, mt);
+ if (unlikely(ma_dead_node(mn))) {
+ mas_rewalk(mas, index);
+ goto retry;
+ }
+
if (offset == mt_pivots[mt])
pivot = mas->max;
else
@@ -5400,24 +5483,26 @@ no_gap:
}
/*
- * mas_dead_leaves() - Mark all leaves of a node as dead.
+ * mte_dead_leaves() - Mark all leaves of a node as dead.
* @mas: The maple state
* @slots: Pointer to the slot array
+ * @type: The maple node type
*
* Must hold the write lock.
*
* Return: The number of leaves marked as dead.
*/
static inline
-unsigned char mas_dead_leaves(struct ma_state *mas, void __rcu **slots)
+unsigned char mte_dead_leaves(struct maple_enode *enode, struct maple_tree *mt,
+ void __rcu **slots)
{
struct maple_node *node;
enum maple_type type;
void *entry;
int offset;
- for (offset = 0; offset < mt_slot_count(mas->node); offset++) {
- entry = mas_slot_locked(mas, slots, offset);
+ for (offset = 0; offset < mt_slot_count(enode); offset++) {
+ entry = mt_slot(mt, slots, offset);
type = mte_node_type(entry);
node = mte_to_node(entry);
/* Use both node and type to catch LE & BE metadata */
@@ -5425,7 +5510,6 @@ unsigned char mas_dead_leaves(struct ma_state *mas, void __rcu **slots)
break;
mte_set_node_dead(entry);
- smp_wmb(); /* Needed for RCU */
node->type = type;
rcu_assign_pointer(slots[offset], node);
}
@@ -5433,151 +5517,160 @@ unsigned char mas_dead_leaves(struct ma_state *mas, void __rcu **slots)
return offset;
}
-static void __rcu **mas_dead_walk(struct ma_state *mas, unsigned char offset)
+/**
+ * mte_dead_walk() - Walk down a dead tree to just before the leaves
+ * @enode: The maple encoded node
+ * @offset: The starting offset
+ *
+ * Note: This can only be used from the RCU callback context.
+ */
+static void __rcu **mte_dead_walk(struct maple_enode **enode, unsigned char offset)
{
struct maple_node *node, *next;
void __rcu **slots = NULL;
- next = mas_mn(mas);
+ next = mte_to_node(*enode);
do {
- mas->node = ma_enode_ptr(next);
- node = mas_mn(mas);
+ *enode = ma_enode_ptr(next);
+ node = mte_to_node(*enode);
slots = ma_slots(node, node->type);
- next = mas_slot_locked(mas, slots, offset);
+ next = rcu_dereference_protected(slots[offset],
+ lock_is_held(&rcu_callback_map));
offset = 0;
} while (!ma_is_leaf(next->type));
return slots;
}
+/**
+ * mt_free_walk() - Walk & free a tree in the RCU callback context
+ * @head: The RCU head that's within the node.
+ *
+ * Note: This can only be used from the RCU callback context.
+ */
static void mt_free_walk(struct rcu_head *head)
{
void __rcu **slots;
struct maple_node *node, *start;
- struct maple_tree mt;
+ struct maple_enode *enode;
unsigned char offset;
enum maple_type type;
- MA_STATE(mas, &mt, 0, 0);
node = container_of(head, struct maple_node, rcu);
if (ma_is_leaf(node->type))
goto free_leaf;
- mt_init_flags(&mt, node->ma_flags);
- mas_lock(&mas);
start = node;
- mas.node = mt_mk_node(node, node->type);
- slots = mas_dead_walk(&mas, 0);
- node = mas_mn(&mas);
+ enode = mt_mk_node(node, node->type);
+ slots = mte_dead_walk(&enode, 0);
+ node = mte_to_node(enode);
do {
mt_free_bulk(node->slot_len, slots);
offset = node->parent_slot + 1;
- mas.node = node->piv_parent;
- if (mas_mn(&mas) == node)
- goto start_slots_free;
-
- type = mte_node_type(mas.node);
- slots = ma_slots(mte_to_node(mas.node), type);
- if ((offset < mt_slots[type]) && (slots[offset]))
- slots = mas_dead_walk(&mas, offset);
-
- node = mas_mn(&mas);
+ enode = node->piv_parent;
+ if (mte_to_node(enode) == node)
+ goto free_leaf;
+
+ type = mte_node_type(enode);
+ slots = ma_slots(mte_to_node(enode), type);
+ if ((offset < mt_slots[type]) &&
+ rcu_dereference_protected(slots[offset],
+ lock_is_held(&rcu_callback_map)))
+ slots = mte_dead_walk(&enode, offset);
+ node = mte_to_node(enode);
} while ((node != start) || (node->slot_len < offset));
slots = ma_slots(node, node->type);
mt_free_bulk(node->slot_len, slots);
-start_slots_free:
- mas_unlock(&mas);
free_leaf:
mt_free_rcu(&node->rcu);
}
-static inline void __rcu **mas_destroy_descend(struct ma_state *mas,
- struct maple_enode *prev, unsigned char offset)
+static inline void __rcu **mte_destroy_descend(struct maple_enode **enode,
+ struct maple_tree *mt, struct maple_enode *prev, unsigned char offset)
{
struct maple_node *node;
- struct maple_enode *next = mas->node;
+ struct maple_enode *next = *enode;
void __rcu **slots = NULL;
+ enum maple_type type;
+ unsigned char next_offset = 0;
do {
- mas->node = next;
- node = mas_mn(mas);
- slots = ma_slots(node, mte_node_type(mas->node));
- next = mas_slot_locked(mas, slots, 0);
+ *enode = next;
+ node = mte_to_node(*enode);
+ type = mte_node_type(*enode);
+ slots = ma_slots(node, type);
+ next = mt_slot_locked(mt, slots, next_offset);
if ((mte_dead_node(next)))
- next = mas_slot_locked(mas, slots, 1);
+ next = mt_slot_locked(mt, slots, ++next_offset);
- mte_set_node_dead(mas->node);
- node->type = mte_node_type(mas->node);
+ mte_set_node_dead(*enode);
+ node->type = type;
node->piv_parent = prev;
node->parent_slot = offset;
- offset = 0;
- prev = mas->node;
+ offset = next_offset;
+ next_offset = 0;
+ prev = *enode;
} while (!mte_is_leaf(next));
return slots;
}
-static void mt_destroy_walk(struct maple_enode *enode, unsigned char ma_flags,
+static void mt_destroy_walk(struct maple_enode *enode, struct maple_tree *mt,
bool free)
{
void __rcu **slots;
struct maple_node *node = mte_to_node(enode);
struct maple_enode *start;
- struct maple_tree mt;
- MA_STATE(mas, &mt, 0, 0);
-
- if (mte_is_leaf(enode))
+ if (mte_is_leaf(enode)) {
+ node->type = mte_node_type(enode);
goto free_leaf;
+ }
- mt_init_flags(&mt, ma_flags);
- mas_lock(&mas);
-
- mas.node = start = enode;
- slots = mas_destroy_descend(&mas, start, 0);
- node = mas_mn(&mas);
+ start = enode;
+ slots = mte_destroy_descend(&enode, mt, start, 0);
+ node = mte_to_node(enode); // Updated in the above call.
do {
enum maple_type type;
unsigned char offset;
struct maple_enode *parent, *tmp;
- node->slot_len = mas_dead_leaves(&mas, slots);
+ node->slot_len = mte_dead_leaves(enode, mt, slots);
if (free)
mt_free_bulk(node->slot_len, slots);
offset = node->parent_slot + 1;
- mas.node = node->piv_parent;
- if (mas_mn(&mas) == node)
- goto start_slots_free;
+ enode = node->piv_parent;
+ if (mte_to_node(enode) == node)
+ goto free_leaf;
- type = mte_node_type(mas.node);
- slots = ma_slots(mte_to_node(mas.node), type);
+ type = mte_node_type(enode);
+ slots = ma_slots(mte_to_node(enode), type);
if (offset >= mt_slots[type])
goto next;
- tmp = mas_slot_locked(&mas, slots, offset);
+ tmp = mt_slot_locked(mt, slots, offset);
if (mte_node_type(tmp) && mte_to_node(tmp)) {
- parent = mas.node;
- mas.node = tmp;
- slots = mas_destroy_descend(&mas, parent, offset);
+ parent = enode;
+ enode = tmp;
+ slots = mte_destroy_descend(&enode, mt, parent, offset);
}
next:
- node = mas_mn(&mas);
- } while (start != mas.node);
+ node = mte_to_node(enode);
+ } while (start != enode);
- node = mas_mn(&mas);
- node->slot_len = mas_dead_leaves(&mas, slots);
+ node = mte_to_node(enode);
+ node->slot_len = mte_dead_leaves(enode, mt, slots);
if (free)
mt_free_bulk(node->slot_len, slots);
-start_slots_free:
- mas_unlock(&mas);
-
free_leaf:
if (free)
mt_free_rcu(&node->rcu);
+ else
+ mt_clear_meta(mt, node, node->type);
}
/*
@@ -5593,10 +5686,10 @@ static inline void mte_destroy_walk(struct maple_enode *enode,
struct maple_node *node = mte_to_node(enode);
if (mt_in_rcu(mt)) {
- mt_destroy_walk(enode, mt->ma_flags, false);
+ mt_destroy_walk(enode, mt, false);
call_rcu(&node->rcu, mt_free_walk);
} else {
- mt_destroy_walk(enode, mt->ma_flags, true);
+ mt_destroy_walk(enode, mt, true);
}
}
@@ -6617,11 +6710,11 @@ static inline void *mas_first_entry(struct ma_state *mas, struct maple_node *mn,
while (likely(!ma_is_leaf(mt))) {
MT_BUG_ON(mas->tree, mte_dead_node(mas->node));
slots = ma_slots(mn, mt);
- pivots = ma_pivots(mn, mt);
- max = pivots[0];
entry = mas_slot(mas, slots, 0);
+ pivots = ma_pivots(mn, mt);
if (unlikely(ma_dead_node(mn)))
return NULL;
+ max = pivots[0];
mas->node = entry;
mn = mas_mn(mas);
mt = mte_node_type(mas->node);
@@ -6641,13 +6734,13 @@ static inline void *mas_first_entry(struct ma_state *mas, struct maple_node *mn,
if (likely(entry))
return entry;
- pivots = ma_pivots(mn, mt);
- mas->index = pivots[0] + 1;
mas->offset = 1;
entry = mas_slot(mas, slots, 1);
+ pivots = ma_pivots(mn, mt);
if (unlikely(ma_dead_node(mn)))
return NULL;
+ mas->index = pivots[0] + 1;
if (mas->index > limit)
goto none;
diff --git a/mm/hugetlb.c b/mm/hugetlb.c
index 07abcb6eb203..245038a9fe4e 100644
--- a/mm/hugetlb.c
+++ b/mm/hugetlb.c
@@ -5478,7 +5478,7 @@ static vm_fault_t hugetlb_wp(struct mm_struct *mm, struct vm_area_struct *vma,
struct folio *pagecache_folio, spinlock_t *ptl)
{
const bool unshare = flags & FAULT_FLAG_UNSHARE;
- pte_t pte;
+ pte_t pte = huge_ptep_get(ptep);
struct hstate *h = hstate_vma(vma);
struct page *old_page;
struct folio *new_folio;
@@ -5488,6 +5488,17 @@ static vm_fault_t hugetlb_wp(struct mm_struct *mm, struct vm_area_struct *vma,
struct mmu_notifier_range range;
/*
+ * Never handle CoW for uffd-wp protected pages. It should be only
+ * handled when the uffd-wp protection is removed.
+ *
+ * Note that only the CoW optimization path (in hugetlb_no_page())
+ * can trigger this, because hugetlb_fault() will always resolve
+ * uffd-wp bit first.
+ */
+ if (!unshare && huge_pte_uffd_wp(pte))
+ return 0;
+
+ /*
* hugetlb does not support FOLL_FORCE-style write faults that keep the
* PTE mapped R/O such as maybe_mkwrite() would do.
*/
@@ -5500,7 +5511,6 @@ static vm_fault_t hugetlb_wp(struct mm_struct *mm, struct vm_area_struct *vma,
return 0;
}
- pte = huge_ptep_get(ptep);
old_page = pte_page(pte);
delayacct_wpcopy_start();
diff --git a/mm/kfence/core.c b/mm/kfence/core.c
index 79c94ee55f97..1065e0568d05 100644
--- a/mm/kfence/core.c
+++ b/mm/kfence/core.c
@@ -556,15 +556,11 @@ static unsigned long kfence_init_pool(void)
* enters __slab_free() slow-path.
*/
for (i = 0; i < KFENCE_POOL_SIZE / PAGE_SIZE; i++) {
- struct slab *slab = page_slab(&pages[i]);
+ struct slab *slab = page_slab(nth_page(pages, i));
if (!i || (i % 2))
continue;
- /* Verify we do not have a compound head page. */
- if (WARN_ON(compound_head(&pages[i]) != &pages[i]))
- return addr;
-
__folio_set_slab(slab_folio(slab));
#ifdef CONFIG_MEMCG
slab->memcg_data = (unsigned long)&kfence_metadata[i / 2 - 1].objcg |
@@ -597,12 +593,26 @@ static unsigned long kfence_init_pool(void)
/* Protect the right redzone. */
if (unlikely(!kfence_protect(addr + PAGE_SIZE)))
- return addr;
+ goto reset_slab;
addr += 2 * PAGE_SIZE;
}
return 0;
+
+reset_slab:
+ for (i = 0; i < KFENCE_POOL_SIZE / PAGE_SIZE; i++) {
+ struct slab *slab = page_slab(nth_page(pages, i));
+
+ if (!i || (i % 2))
+ continue;
+#ifdef CONFIG_MEMCG
+ slab->memcg_data = 0;
+#endif
+ __folio_clear_slab(slab_folio(slab));
+ }
+
+ return addr;
}
static bool __init kfence_init_pool_early(void)
@@ -632,16 +642,6 @@ static bool __init kfence_init_pool_early(void)
* fails for the first page, and therefore expect addr==__kfence_pool in
* most failure cases.
*/
- for (char *p = (char *)addr; p < __kfence_pool + KFENCE_POOL_SIZE; p += PAGE_SIZE) {
- struct slab *slab = virt_to_slab(p);
-
- if (!slab)
- continue;
-#ifdef CONFIG_MEMCG
- slab->memcg_data = 0;
-#endif
- __folio_clear_slab(slab_folio(slab));
- }
memblock_free_late(__pa(addr), KFENCE_POOL_SIZE - (addr - (unsigned long)__kfence_pool));
__kfence_pool = NULL;
return false;
diff --git a/mm/memory.c b/mm/memory.c
index f456f3b5049c..01a23ad48a04 100644
--- a/mm/memory.c
+++ b/mm/memory.c
@@ -3563,8 +3563,21 @@ static vm_fault_t remove_device_exclusive_entry(struct vm_fault *vmf)
struct vm_area_struct *vma = vmf->vma;
struct mmu_notifier_range range;
- if (!folio_lock_or_retry(folio, vma->vm_mm, vmf->flags))
+ /*
+ * We need a reference to lock the folio because we don't hold
+ * the PTL so a racing thread can remove the device-exclusive
+ * entry and unmap it. If the folio is free the entry must
+ * have been removed already. If it happens to have already
+ * been re-allocated after being freed all we do is lock and
+ * unlock it.
+ */
+ if (!folio_try_get(folio))
+ return 0;
+
+ if (!folio_lock_or_retry(folio, vma->vm_mm, vmf->flags)) {
+ folio_put(folio);
return VM_FAULT_RETRY;
+ }
mmu_notifier_range_init_owner(&range, MMU_NOTIFY_EXCLUSIVE, 0,
vma->vm_mm, vmf->address & PAGE_MASK,
(vmf->address & PAGE_MASK) + PAGE_SIZE, NULL);
@@ -3577,6 +3590,7 @@ static vm_fault_t remove_device_exclusive_entry(struct vm_fault *vmf)
pte_unmap_unlock(vmf->pte, vmf->ptl);
folio_unlock(folio);
+ folio_put(folio);
mmu_notifier_invalidate_range_end(&range);
return 0;
diff --git a/mm/mmap.c b/mm/mmap.c
index ad499f7b767f..ff68a67a2a7c 100644
--- a/mm/mmap.c
+++ b/mm/mmap.c
@@ -2277,7 +2277,7 @@ do_vmi_align_munmap(struct vma_iterator *vmi, struct vm_area_struct *vma,
int count = 0;
int error = -ENOMEM;
MA_STATE(mas_detach, &mt_detach, 0, 0);
- mt_init_flags(&mt_detach, MT_FLAGS_LOCK_EXTERN);
+ mt_init_flags(&mt_detach, vmi->mas.tree->ma_flags & MT_FLAGS_LOCK_MASK);
mt_set_external_lock(&mt_detach, &mm->mmap_lock);
/*
@@ -3037,6 +3037,7 @@ void exit_mmap(struct mm_struct *mm)
*/
set_bit(MMF_OOM_SKIP, &mm->flags);
mmap_write_lock(mm);
+ mt_clear_in_rcu(&mm->mm_mt);
free_pgtables(&tlb, &mm->mm_mt, vma, FIRST_USER_ADDRESS,
USER_PGTABLES_CEILING);
tlb_finish_mmu(&tlb);
diff --git a/mm/swapfile.c b/mm/swapfile.c
index 62ba2bf577d7..2c718f45745f 100644
--- a/mm/swapfile.c
+++ b/mm/swapfile.c
@@ -679,6 +679,7 @@ static void __del_from_avail_list(struct swap_info_struct *p)
{
int nid;
+ assert_spin_locked(&p->lock);
for_each_node(nid)
plist_del(&p->avail_lists[nid], &swap_avail_heads[nid]);
}
@@ -2434,8 +2435,8 @@ SYSCALL_DEFINE1(swapoff, const char __user *, specialfile)
spin_unlock(&swap_lock);
goto out_dput;
}
- del_from_avail_list(p);
spin_lock(&p->lock);
+ del_from_avail_list(p);
if (p->prio < 0) {
struct swap_info_struct *si = p;
int nid;
diff --git a/mm/vmalloc.c b/mm/vmalloc.c
index bef6cf2b4d46..a50072066221 100644
--- a/mm/vmalloc.c
+++ b/mm/vmalloc.c
@@ -3042,9 +3042,11 @@ static void *__vmalloc_area_node(struct vm_struct *area, gfp_t gfp_mask,
* allocation request, free them via vfree() if any.
*/
if (area->nr_pages != nr_small_pages) {
- warn_alloc(gfp_mask, NULL,
- "vmalloc error: size %lu, page order %u, failed to allocate pages",
- area->nr_pages * PAGE_SIZE, page_order);
+ /* vm_area_alloc_pages() can also fail due to a fatal signal */
+ if (!fatal_signal_pending(current))
+ warn_alloc(gfp_mask, NULL,
+ "vmalloc error: size %lu, page order %u, failed to allocate pages",
+ area->nr_pages * PAGE_SIZE, page_order);
goto fail;
}
diff --git a/net/9p/trans_xen.c b/net/9p/trans_xen.c
index c64050e839ac..1fffe2bed5b0 100644
--- a/net/9p/trans_xen.c
+++ b/net/9p/trans_xen.c
@@ -280,6 +280,10 @@ static void xen_9pfs_front_free(struct xen_9pfs_front_priv *priv)
write_unlock(&xen_9pfs_lock);
for (i = 0; i < priv->num_rings; i++) {
+ struct xen_9pfs_dataring *ring = &priv->rings[i];
+
+ cancel_work_sync(&ring->work);
+
if (!priv->rings[i].intf)
break;
if (priv->rings[i].irq > 0)
diff --git a/net/bluetooth/hci_conn.c b/net/bluetooth/hci_conn.c
index 17b946f9ba31..8455ba141ee6 100644
--- a/net/bluetooth/hci_conn.c
+++ b/net/bluetooth/hci_conn.c
@@ -68,7 +68,7 @@ static const struct sco_param esco_param_msbc[] = {
};
/* This function requires the caller holds hdev->lock */
-static void hci_connect_le_scan_cleanup(struct hci_conn *conn)
+static void hci_connect_le_scan_cleanup(struct hci_conn *conn, u8 status)
{
struct hci_conn_params *params;
struct hci_dev *hdev = conn->hdev;
@@ -88,9 +88,28 @@ static void hci_connect_le_scan_cleanup(struct hci_conn *conn)
params = hci_pend_le_action_lookup(&hdev->pend_le_conns, bdaddr,
bdaddr_type);
- if (!params || !params->explicit_connect)
+ if (!params)
return;
+ if (params->conn) {
+ hci_conn_drop(params->conn);
+ hci_conn_put(params->conn);
+ params->conn = NULL;
+ }
+
+ if (!params->explicit_connect)
+ return;
+
+ /* If the status indicates successful cancellation of
+ * the attempt (i.e. Unknown Connection Id) there's no point of
+ * notifying failure since we'll go back to keep trying to
+ * connect. The only exception is explicit connect requests
+ * where a timeout + cancel does indicate an actual failure.
+ */
+ if (status && status != HCI_ERROR_UNKNOWN_CONN_ID)
+ mgmt_connect_failed(hdev, &conn->dst, conn->type,
+ conn->dst_type, status);
+
/* The connection attempt was doing scan for new RPA, and is
* in scan phase. If params are not associated with any other
* autoconnect action, remove them completely. If they are, just unmark
@@ -178,7 +197,7 @@ static void le_scan_cleanup(struct work_struct *work)
rcu_read_unlock();
if (c == conn) {
- hci_connect_le_scan_cleanup(conn);
+ hci_connect_le_scan_cleanup(conn, 0x00);
hci_conn_cleanup(conn);
}
@@ -1049,6 +1068,17 @@ struct hci_conn *hci_conn_add(struct hci_dev *hdev, int type, bdaddr_t *dst,
return conn;
}
+static bool hci_conn_unlink(struct hci_conn *conn)
+{
+ if (!conn->link)
+ return false;
+
+ conn->link->link = NULL;
+ conn->link = NULL;
+
+ return true;
+}
+
int hci_conn_del(struct hci_conn *conn)
{
struct hci_dev *hdev = conn->hdev;
@@ -1060,15 +1090,16 @@ int hci_conn_del(struct hci_conn *conn)
cancel_delayed_work_sync(&conn->idle_work);
if (conn->type == ACL_LINK) {
- struct hci_conn *sco = conn->link;
- if (sco) {
- sco->link = NULL;
+ struct hci_conn *link = conn->link;
+
+ if (link) {
+ hci_conn_unlink(conn);
/* Due to race, SCO connection might be not established
* yet at this point. Delete it now, otherwise it is
* possible for it to be stuck and can't be deleted.
*/
- if (sco->handle == HCI_CONN_HANDLE_UNSET)
- hci_conn_del(sco);
+ if (link->handle == HCI_CONN_HANDLE_UNSET)
+ hci_conn_del(link);
}
/* Unacked frames */
@@ -1084,7 +1115,7 @@ int hci_conn_del(struct hci_conn *conn)
struct hci_conn *acl = conn->link;
if (acl) {
- acl->link = NULL;
+ hci_conn_unlink(conn);
hci_conn_drop(acl);
}
@@ -1179,31 +1210,8 @@ EXPORT_SYMBOL(hci_get_route);
static void hci_le_conn_failed(struct hci_conn *conn, u8 status)
{
struct hci_dev *hdev = conn->hdev;
- struct hci_conn_params *params;
- params = hci_pend_le_action_lookup(&hdev->pend_le_conns, &conn->dst,
- conn->dst_type);
- if (params && params->conn) {
- hci_conn_drop(params->conn);
- hci_conn_put(params->conn);
- params->conn = NULL;
- }
-
- /* If the status indicates successful cancellation of
- * the attempt (i.e. Unknown Connection Id) there's no point of
- * notifying failure since we'll go back to keep trying to
- * connect. The only exception is explicit connect requests
- * where a timeout + cancel does indicate an actual failure.
- */
- if (status != HCI_ERROR_UNKNOWN_CONN_ID ||
- (params && params->explicit_connect))
- mgmt_connect_failed(hdev, &conn->dst, conn->type,
- conn->dst_type, status);
-
- /* Since we may have temporarily stopped the background scanning in
- * favor of connection establishment, we should restart it.
- */
- hci_update_passive_scan(hdev);
+ hci_connect_le_scan_cleanup(conn, status);
/* Enable advertising in case this was a failed connection
* attempt as a peripheral.
@@ -1237,15 +1245,15 @@ static void create_le_conn_complete(struct hci_dev *hdev, void *data, int err)
{
struct hci_conn *conn = data;
+ bt_dev_dbg(hdev, "err %d", err);
+
hci_dev_lock(hdev);
if (!err) {
- hci_connect_le_scan_cleanup(conn);
+ hci_connect_le_scan_cleanup(conn, 0x00);
goto done;
}
- bt_dev_err(hdev, "request failed to create LE connection: err %d", err);
-
/* Check if connection is still pending */
if (conn != hci_lookup_le_connect(hdev))
goto done;
@@ -2438,6 +2446,12 @@ void hci_conn_hash_flush(struct hci_dev *hdev)
c->state = BT_CLOSED;
hci_disconn_cfm(c, HCI_ERROR_LOCAL_HOST_TERM);
+
+ /* Unlink before deleting otherwise it is possible that
+ * hci_conn_del removes the link which may cause the list to
+ * contain items already freed.
+ */
+ hci_conn_unlink(c);
hci_conn_del(c);
}
}
@@ -2775,6 +2789,9 @@ int hci_abort_conn(struct hci_conn *conn, u8 reason)
{
int r = 0;
+ if (test_and_set_bit(HCI_CONN_CANCEL, &conn->flags))
+ return 0;
+
switch (conn->state) {
case BT_CONNECTED:
case BT_CONFIG:
diff --git a/net/bluetooth/hci_event.c b/net/bluetooth/hci_event.c
index ad92a4be5851..e87c928c9e17 100644
--- a/net/bluetooth/hci_event.c
+++ b/net/bluetooth/hci_event.c
@@ -2881,16 +2881,6 @@ static void cs_le_create_conn(struct hci_dev *hdev, bdaddr_t *peer_addr,
conn->resp_addr_type = peer_addr_type;
bacpy(&conn->resp_addr, peer_addr);
-
- /* We don't want the connection attempt to stick around
- * indefinitely since LE doesn't have a page timeout concept
- * like BR/EDR. Set a timer for any connection that doesn't use
- * the accept list for connecting.
- */
- if (filter_policy == HCI_LE_USE_PEER_ADDR)
- queue_delayed_work(conn->hdev->workqueue,
- &conn->le_conn_timeout,
- conn->conn_timeout);
}
static void hci_cs_le_create_conn(struct hci_dev *hdev, u8 status)
@@ -5902,6 +5892,12 @@ static void le_conn_complete_evt(struct hci_dev *hdev, u8 status,
if (status)
goto unlock;
+ /* Drop the connection if it has been aborted */
+ if (test_bit(HCI_CONN_CANCEL, &conn->flags)) {
+ hci_conn_drop(conn);
+ goto unlock;
+ }
+
if (conn->dst_type == ADDR_LE_DEV_PUBLIC)
addr_type = BDADDR_LE_PUBLIC;
else
@@ -6995,7 +6991,7 @@ static void hci_le_big_sync_established_evt(struct hci_dev *hdev, void *data,
bis->iso_qos.in.latency = le16_to_cpu(ev->interval) * 125 / 100;
bis->iso_qos.in.sdu = le16_to_cpu(ev->max_pdu);
- hci_connect_cfm(bis, ev->status);
+ hci_iso_setup_path(bis);
}
hci_dev_unlock(hdev);
diff --git a/net/bluetooth/hci_sync.c b/net/bluetooth/hci_sync.c
index 5a6aa1627791..632be1267288 100644
--- a/net/bluetooth/hci_sync.c
+++ b/net/bluetooth/hci_sync.c
@@ -246,8 +246,9 @@ int __hci_cmd_sync_status_sk(struct hci_dev *hdev, u16 opcode, u32 plen,
skb = __hci_cmd_sync_sk(hdev, opcode, plen, param, event, timeout, sk);
if (IS_ERR(skb)) {
- bt_dev_err(hdev, "Opcode 0x%4x failed: %ld", opcode,
- PTR_ERR(skb));
+ if (!event)
+ bt_dev_err(hdev, "Opcode 0x%4x failed: %ld", opcode,
+ PTR_ERR(skb));
return PTR_ERR(skb);
}
@@ -5126,8 +5127,11 @@ static int hci_le_connect_cancel_sync(struct hci_dev *hdev,
if (test_bit(HCI_CONN_SCANNING, &conn->flags))
return 0;
+ if (test_and_set_bit(HCI_CONN_CANCEL, &conn->flags))
+ return 0;
+
return __hci_cmd_sync_status(hdev, HCI_OP_LE_CREATE_CONN_CANCEL,
- 6, &conn->dst, HCI_CMD_TIMEOUT);
+ 0, NULL, HCI_CMD_TIMEOUT);
}
static int hci_connect_cancel_sync(struct hci_dev *hdev, struct hci_conn *conn)
@@ -6102,6 +6106,9 @@ int hci_le_create_conn_sync(struct hci_dev *hdev, struct hci_conn *conn)
conn->conn_timeout, NULL);
done:
+ if (err == -ETIMEDOUT)
+ hci_le_connect_cancel_sync(hdev, conn);
+
/* Re-enable advertising after the connection attempt is finished. */
hci_resume_advertising_sync(hdev);
return err;
diff --git a/net/bluetooth/hidp/core.c b/net/bluetooth/hidp/core.c
index bed1a7b9205c..707f229f896a 100644
--- a/net/bluetooth/hidp/core.c
+++ b/net/bluetooth/hidp/core.c
@@ -433,7 +433,7 @@ static void hidp_set_timer(struct hidp_session *session)
static void hidp_del_timer(struct hidp_session *session)
{
if (session->idle_to > 0)
- del_timer(&session->timer);
+ del_timer_sync(&session->timer);
}
static void hidp_process_report(struct hidp_session *session, int type,
diff --git a/net/bluetooth/l2cap_core.c b/net/bluetooth/l2cap_core.c
index 49926f59cc12..55a7226233f9 100644
--- a/net/bluetooth/l2cap_core.c
+++ b/net/bluetooth/l2cap_core.c
@@ -4652,33 +4652,27 @@ static inline int l2cap_disconnect_req(struct l2cap_conn *conn,
BT_DBG("scid 0x%4.4x dcid 0x%4.4x", scid, dcid);
- mutex_lock(&conn->chan_lock);
-
- chan = __l2cap_get_chan_by_scid(conn, dcid);
+ chan = l2cap_get_chan_by_scid(conn, dcid);
if (!chan) {
- mutex_unlock(&conn->chan_lock);
cmd_reject_invalid_cid(conn, cmd->ident, dcid, scid);
return 0;
}
- l2cap_chan_hold(chan);
- l2cap_chan_lock(chan);
-
rsp.dcid = cpu_to_le16(chan->scid);
rsp.scid = cpu_to_le16(chan->dcid);
l2cap_send_cmd(conn, cmd->ident, L2CAP_DISCONN_RSP, sizeof(rsp), &rsp);
chan->ops->set_shutdown(chan);
+ mutex_lock(&conn->chan_lock);
l2cap_chan_del(chan, ECONNRESET);
+ mutex_unlock(&conn->chan_lock);
chan->ops->close(chan);
l2cap_chan_unlock(chan);
l2cap_chan_put(chan);
- mutex_unlock(&conn->chan_lock);
-
return 0;
}
@@ -4698,33 +4692,27 @@ static inline int l2cap_disconnect_rsp(struct l2cap_conn *conn,
BT_DBG("dcid 0x%4.4x scid 0x%4.4x", dcid, scid);
- mutex_lock(&conn->chan_lock);
-
- chan = __l2cap_get_chan_by_scid(conn, scid);
+ chan = l2cap_get_chan_by_scid(conn, scid);
if (!chan) {
mutex_unlock(&conn->chan_lock);
return 0;
}
- l2cap_chan_hold(chan);
- l2cap_chan_lock(chan);
-
if (chan->state != BT_DISCONN) {
l2cap_chan_unlock(chan);
l2cap_chan_put(chan);
- mutex_unlock(&conn->chan_lock);
return 0;
}
+ mutex_lock(&conn->chan_lock);
l2cap_chan_del(chan, 0);
+ mutex_unlock(&conn->chan_lock);
chan->ops->close(chan);
l2cap_chan_unlock(chan);
l2cap_chan_put(chan);
- mutex_unlock(&conn->chan_lock);
-
return 0;
}
diff --git a/net/bluetooth/sco.c b/net/bluetooth/sco.c
index 1111da4e2f2b..cd1a27ac555d 100644
--- a/net/bluetooth/sco.c
+++ b/net/bluetooth/sco.c
@@ -235,27 +235,41 @@ static int sco_chan_add(struct sco_conn *conn, struct sock *sk,
return err;
}
-static int sco_connect(struct hci_dev *hdev, struct sock *sk)
+static int sco_connect(struct sock *sk)
{
struct sco_conn *conn;
struct hci_conn *hcon;
+ struct hci_dev *hdev;
int err, type;
BT_DBG("%pMR -> %pMR", &sco_pi(sk)->src, &sco_pi(sk)->dst);
+ hdev = hci_get_route(&sco_pi(sk)->dst, &sco_pi(sk)->src, BDADDR_BREDR);
+ if (!hdev)
+ return -EHOSTUNREACH;
+
+ hci_dev_lock(hdev);
+
if (lmp_esco_capable(hdev) && !disable_esco)
type = ESCO_LINK;
else
type = SCO_LINK;
if (sco_pi(sk)->setting == BT_VOICE_TRANSPARENT &&
- (!lmp_transp_capable(hdev) || !lmp_esco_capable(hdev)))
- return -EOPNOTSUPP;
+ (!lmp_transp_capable(hdev) || !lmp_esco_capable(hdev))) {
+ err = -EOPNOTSUPP;
+ goto unlock;
+ }
hcon = hci_connect_sco(hdev, type, &sco_pi(sk)->dst,
sco_pi(sk)->setting, &sco_pi(sk)->codec);
- if (IS_ERR(hcon))
- return PTR_ERR(hcon);
+ if (IS_ERR(hcon)) {
+ err = PTR_ERR(hcon);
+ goto unlock;
+ }
+
+ hci_dev_unlock(hdev);
+ hci_dev_put(hdev);
conn = sco_conn_add(hcon);
if (!conn) {
@@ -263,13 +277,15 @@ static int sco_connect(struct hci_dev *hdev, struct sock *sk)
return -ENOMEM;
}
- /* Update source addr of the socket */
- bacpy(&sco_pi(sk)->src, &hcon->src);
-
err = sco_chan_add(conn, sk, NULL);
if (err)
return err;
+ lock_sock(sk);
+
+ /* Update source addr of the socket */
+ bacpy(&sco_pi(sk)->src, &hcon->src);
+
if (hcon->state == BT_CONNECTED) {
sco_sock_clear_timer(sk);
sk->sk_state = BT_CONNECTED;
@@ -278,6 +294,13 @@ static int sco_connect(struct hci_dev *hdev, struct sock *sk)
sco_sock_set_timer(sk, sk->sk_sndtimeo);
}
+ release_sock(sk);
+
+ return err;
+
+unlock:
+ hci_dev_unlock(hdev);
+ hci_dev_put(hdev);
return err;
}
@@ -565,7 +588,6 @@ static int sco_sock_connect(struct socket *sock, struct sockaddr *addr, int alen
{
struct sockaddr_sco *sa = (struct sockaddr_sco *) addr;
struct sock *sk = sock->sk;
- struct hci_dev *hdev;
int err;
BT_DBG("sk %p", sk);
@@ -574,37 +596,26 @@ static int sco_sock_connect(struct socket *sock, struct sockaddr *addr, int alen
addr->sa_family != AF_BLUETOOTH)
return -EINVAL;
- lock_sock(sk);
- if (sk->sk_state != BT_OPEN && sk->sk_state != BT_BOUND) {
- err = -EBADFD;
- goto done;
- }
+ if (sk->sk_state != BT_OPEN && sk->sk_state != BT_BOUND)
+ return -EBADFD;
- if (sk->sk_type != SOCK_SEQPACKET) {
+ if (sk->sk_type != SOCK_SEQPACKET)
err = -EINVAL;
- goto done;
- }
-
- hdev = hci_get_route(&sa->sco_bdaddr, &sco_pi(sk)->src, BDADDR_BREDR);
- if (!hdev) {
- err = -EHOSTUNREACH;
- goto done;
- }
- hci_dev_lock(hdev);
+ lock_sock(sk);
/* Set destination address and psm */
bacpy(&sco_pi(sk)->dst, &sa->sco_bdaddr);
+ release_sock(sk);
- err = sco_connect(hdev, sk);
- hci_dev_unlock(hdev);
- hci_dev_put(hdev);
+ err = sco_connect(sk);
if (err)
- goto done;
+ return err;
+
+ lock_sock(sk);
err = bt_sock_wait_state(sk, BT_CONNECTED,
sock_sndtimeo(sk, flags & O_NONBLOCK));
-done:
release_sock(sk);
return err;
}
@@ -1129,6 +1140,8 @@ static int sco_sock_getsockopt(struct socket *sock, int level, int optname,
break;
}
+ release_sock(sk);
+
/* find total buffer size required to copy codec + caps */
hci_dev_lock(hdev);
list_for_each_entry(c, &hdev->local_codecs, list) {
@@ -1146,15 +1159,13 @@ static int sco_sock_getsockopt(struct socket *sock, int level, int optname,
buf_len += sizeof(struct bt_codecs);
if (buf_len > len) {
hci_dev_put(hdev);
- err = -ENOBUFS;
- break;
+ return -ENOBUFS;
}
ptr = optval;
if (put_user(num_codecs, ptr)) {
hci_dev_put(hdev);
- err = -EFAULT;
- break;
+ return -EFAULT;
}
ptr += sizeof(num_codecs);
@@ -1194,12 +1205,14 @@ static int sco_sock_getsockopt(struct socket *sock, int level, int optname,
ptr += len;
}
- if (!err && put_user(buf_len, optlen))
- err = -EFAULT;
-
hci_dev_unlock(hdev);
hci_dev_put(hdev);
+ lock_sock(sk);
+
+ if (!err && put_user(buf_len, optlen))
+ err = -EFAULT;
+
break;
default:
diff --git a/net/core/dev.c b/net/core/dev.c
index 480600a075ce..c7f13742b56c 100644
--- a/net/core/dev.c
+++ b/net/core/dev.c
@@ -3197,6 +3197,7 @@ static u16 skb_tx_hash(const struct net_device *dev,
}
if (skb_rx_queue_recorded(skb)) {
+ DEBUG_NET_WARN_ON_ONCE(qcount == 0);
hash = skb_get_rx_queue(skb);
if (hash >= qoffset)
hash -= qoffset;
@@ -10870,7 +10871,7 @@ void unregister_netdevice_many_notify(struct list_head *head,
dev->rtnl_link_state == RTNL_LINK_INITIALIZED)
skb = rtmsg_ifinfo_build_skb(RTM_DELLINK, dev, ~0U, 0,
GFP_KERNEL, NULL, 0,
- portid, nlmsg_seq(nlh));
+ portid, nlh);
/*
* Flush the unicast and multicast chains
diff --git a/net/core/rtnetlink.c b/net/core/rtnetlink.c
index 906aebdc566b..e844d75220fb 100644
--- a/net/core/rtnetlink.c
+++ b/net/core/rtnetlink.c
@@ -3975,16 +3975,23 @@ static int rtnl_dump_all(struct sk_buff *skb, struct netlink_callback *cb)
struct sk_buff *rtmsg_ifinfo_build_skb(int type, struct net_device *dev,
unsigned int change,
u32 event, gfp_t flags, int *new_nsid,
- int new_ifindex, u32 portid, u32 seq)
+ int new_ifindex, u32 portid,
+ const struct nlmsghdr *nlh)
{
struct net *net = dev_net(dev);
struct sk_buff *skb;
int err = -ENOBUFS;
+ u32 seq = 0;
skb = nlmsg_new(if_nlmsg_size(dev, 0), flags);
if (skb == NULL)
goto errout;
+ if (nlmsg_report(nlh))
+ seq = nlmsg_seq(nlh);
+ else
+ portid = 0;
+
err = rtnl_fill_ifinfo(skb, dev, dev_net(dev),
type, portid, seq, change, 0, 0, event,
new_nsid, new_ifindex, -1, flags);
@@ -4020,7 +4027,7 @@ static void rtmsg_ifinfo_event(int type, struct net_device *dev,
return;
skb = rtmsg_ifinfo_build_skb(type, dev, change, event, flags, new_nsid,
- new_ifindex, portid, nlmsg_seq(nlh));
+ new_ifindex, portid, nlh);
if (skb)
rtmsg_ifinfo_send(skb, dev, flags, portid, nlh);
}
diff --git a/net/core/skbuff.c b/net/core/skbuff.c
index 050a875d09c5..78238a13dbcf 100644
--- a/net/core/skbuff.c
+++ b/net/core/skbuff.c
@@ -5597,18 +5597,18 @@ bool skb_try_coalesce(struct sk_buff *to, struct sk_buff *from,
if (skb_cloned(to))
return false;
- /* In general, avoid mixing slab allocated and page_pool allocated
- * pages within the same SKB. However when @to is not pp_recycle and
- * @from is cloned, we can transition frag pages from page_pool to
- * reference counted.
- *
- * On the other hand, don't allow coalescing two pp_recycle SKBs if
- * @from is cloned, in case the SKB is using page_pool fragment
+ /* In general, avoid mixing page_pool and non-page_pool allocated
+ * pages within the same SKB. Additionally avoid dealing with clones
+ * with page_pool pages, in case the SKB is using page_pool fragment
* references (PP_FLAG_PAGE_FRAG). Since we only take full page
* references for cloned SKBs at the moment that would result in
* inconsistent reference counts.
+ * In theory we could take full references if @from is cloned and
+ * !@to->pp_recycle but its tricky (due to potential race with
+ * the clone disappearing) and rare, so not worth dealing with.
*/
- if (to->pp_recycle != (from->pp_recycle && !skb_cloned(from)))
+ if (to->pp_recycle != from->pp_recycle ||
+ (from->pp_recycle && skb_cloned(from)))
return false;
if (len <= skb_tailroom(to)) {
diff --git a/net/core/xdp.c b/net/core/xdp.c
index 528d4b37983d..fb85aca81961 100644
--- a/net/core/xdp.c
+++ b/net/core/xdp.c
@@ -734,13 +734,21 @@ __bpf_kfunc int bpf_xdp_metadata_rx_timestamp(const struct xdp_md *ctx, u64 *tim
* bpf_xdp_metadata_rx_hash - Read XDP frame RX hash.
* @ctx: XDP context pointer.
* @hash: Return value pointer.
+ * @rss_type: Return value pointer for RSS type.
+ *
+ * The RSS hash type (@rss_type) specifies what portion of packet headers NIC
+ * hardware used when calculating RSS hash value. The RSS type can be decoded
+ * via &enum xdp_rss_hash_type either matching on individual L3/L4 bits
+ * ``XDP_RSS_L*`` or by combined traditional *RSS Hashing Types*
+ * ``XDP_RSS_TYPE_L*``.
*
* Return:
* * Returns 0 on success or ``-errno`` on error.
* * ``-EOPNOTSUPP`` : means device driver doesn't implement kfunc
* * ``-ENODATA`` : means no RX-hash available for this frame
*/
-__bpf_kfunc int bpf_xdp_metadata_rx_hash(const struct xdp_md *ctx, u32 *hash)
+__bpf_kfunc int bpf_xdp_metadata_rx_hash(const struct xdp_md *ctx, u32 *hash,
+ enum xdp_rss_hash_type *rss_type)
{
return -EOPNOTSUPP;
}
diff --git a/net/ipv4/sysctl_net_ipv4.c b/net/ipv4/sysctl_net_ipv4.c
index 0d0cc4ef2b85..40fe70fc2015 100644
--- a/net/ipv4/sysctl_net_ipv4.c
+++ b/net/ipv4/sysctl_net_ipv4.c
@@ -25,6 +25,7 @@ static int ip_local_port_range_min[] = { 1, 1 };
static int ip_local_port_range_max[] = { 65535, 65535 };
static int tcp_adv_win_scale_min = -31;
static int tcp_adv_win_scale_max = 31;
+static int tcp_app_win_max = 31;
static int tcp_min_snd_mss_min = TCP_MIN_SND_MSS;
static int tcp_min_snd_mss_max = 65535;
static int ip_privileged_port_min;
@@ -1198,6 +1199,8 @@ static struct ctl_table ipv4_net_table[] = {
.maxlen = sizeof(u8),
.mode = 0644,
.proc_handler = proc_dou8vec_minmax,
+ .extra1 = SYSCTL_ZERO,
+ .extra2 = &tcp_app_win_max,
},
{
.procname = "tcp_adv_win_scale",
diff --git a/net/ipv4/tcp_ipv4.c b/net/ipv4/tcp_ipv4.c
index 89daa6b953ff..39bda2b1066e 100644
--- a/net/ipv4/tcp_ipv4.c
+++ b/net/ipv4/tcp_ipv4.c
@@ -2780,7 +2780,7 @@ static int tcp_prog_seq_show(struct bpf_prog *prog, struct bpf_iter_meta *meta,
static void bpf_iter_tcp_put_batch(struct bpf_tcp_iter_state *iter)
{
while (iter->cur_sk < iter->end_sk)
- sock_put(iter->batch[iter->cur_sk++]);
+ sock_gen_put(iter->batch[iter->cur_sk++]);
}
static int bpf_iter_tcp_realloc_batch(struct bpf_tcp_iter_state *iter,
@@ -2941,7 +2941,7 @@ static void *bpf_iter_tcp_seq_next(struct seq_file *seq, void *v, loff_t *pos)
* st->bucket. See tcp_seek_last_pos().
*/
st->offset++;
- sock_put(iter->batch[iter->cur_sk++]);
+ sock_gen_put(iter->batch[iter->cur_sk++]);
}
if (iter->cur_sk < iter->end_sk)
diff --git a/net/ipv6/udp.c b/net/ipv6/udp.c
index 4caa70a1b871..e5a337e6b970 100644
--- a/net/ipv6/udp.c
+++ b/net/ipv6/udp.c
@@ -1397,9 +1397,11 @@ int udpv6_sendmsg(struct sock *sk, struct msghdr *msg, size_t len)
msg->msg_name = &sin;
msg->msg_namelen = sizeof(sin);
do_udp_sendmsg:
- if (ipv6_only_sock(sk))
- return -ENETUNREACH;
- return udp_sendmsg(sk, msg, len);
+ err = ipv6_only_sock(sk) ?
+ -ENETUNREACH : udp_sendmsg(sk, msg, len);
+ msg->msg_name = sin6;
+ msg->msg_namelen = addr_len;
+ return err;
}
}
diff --git a/net/mptcp/fastopen.c b/net/mptcp/fastopen.c
index d237d142171c..bceaab8dd8e4 100644
--- a/net/mptcp/fastopen.c
+++ b/net/mptcp/fastopen.c
@@ -9,11 +9,18 @@
void mptcp_fastopen_subflow_synack_set_params(struct mptcp_subflow_context *subflow,
struct request_sock *req)
{
- struct sock *ssk = subflow->tcp_sock;
- struct sock *sk = subflow->conn;
+ struct sock *sk, *ssk;
struct sk_buff *skb;
struct tcp_sock *tp;
+ /* on early fallback the subflow context is deleted by
+ * subflow_syn_recv_sock()
+ */
+ if (!subflow)
+ return;
+
+ ssk = subflow->tcp_sock;
+ sk = subflow->conn;
tp = tcp_sk(ssk);
subflow->is_mptfo = 1;
diff --git a/net/mptcp/options.c b/net/mptcp/options.c
index b30cea2fbf3f..355f798d575a 100644
--- a/net/mptcp/options.c
+++ b/net/mptcp/options.c
@@ -1192,9 +1192,8 @@ bool mptcp_incoming_options(struct sock *sk, struct sk_buff *skb)
*/
if (TCP_SKB_CB(skb)->seq == TCP_SKB_CB(skb)->end_seq) {
if (mp_opt.data_fin && mp_opt.data_len == 1 &&
- mptcp_update_rcv_data_fin(msk, mp_opt.data_seq, mp_opt.dsn64) &&
- schedule_work(&msk->work))
- sock_hold(subflow->conn);
+ mptcp_update_rcv_data_fin(msk, mp_opt.data_seq, mp_opt.dsn64))
+ mptcp_schedule_work((struct sock *)msk);
return true;
}
diff --git a/net/mptcp/protocol.c b/net/mptcp/protocol.c
index 2d26b9114373..e6cb36784a68 100644
--- a/net/mptcp/protocol.c
+++ b/net/mptcp/protocol.c
@@ -2626,7 +2626,7 @@ static void mptcp_worker(struct work_struct *work)
lock_sock(sk);
state = sk->sk_state;
- if (unlikely(state == TCP_CLOSE))
+ if (unlikely((1 << state) & (TCPF_CLOSE | TCPF_LISTEN)))
goto unlock;
mptcp_check_data_fin_ack(sk);
diff --git a/net/mptcp/subflow.c b/net/mptcp/subflow.c
index 33dd27765116..f46d8f6c40aa 100644
--- a/net/mptcp/subflow.c
+++ b/net/mptcp/subflow.c
@@ -408,9 +408,8 @@ void mptcp_subflow_reset(struct sock *ssk)
tcp_send_active_reset(ssk, GFP_ATOMIC);
tcp_done(ssk);
- if (!test_and_set_bit(MPTCP_WORK_CLOSE_SUBFLOW, &mptcp_sk(sk)->flags) &&
- schedule_work(&mptcp_sk(sk)->work))
- return; /* worker will put sk for us */
+ if (!test_and_set_bit(MPTCP_WORK_CLOSE_SUBFLOW, &mptcp_sk(sk)->flags))
+ mptcp_schedule_work(sk);
sock_put(sk);
}
@@ -1101,8 +1100,8 @@ static enum mapping_status get_mapping_status(struct sock *ssk,
skb_ext_del(skb, SKB_EXT_MPTCP);
return MAPPING_OK;
} else {
- if (updated && schedule_work(&msk->work))
- sock_hold((struct sock *)msk);
+ if (updated)
+ mptcp_schedule_work((struct sock *)msk);
return MAPPING_DATA_FIN;
}
@@ -1205,17 +1204,12 @@ static void mptcp_subflow_discard_data(struct sock *ssk, struct sk_buff *skb,
/* sched mptcp worker to remove the subflow if no more data is pending */
static void subflow_sched_work_if_closed(struct mptcp_sock *msk, struct sock *ssk)
{
- struct sock *sk = (struct sock *)msk;
-
if (likely(ssk->sk_state != TCP_CLOSE))
return;
if (skb_queue_empty(&ssk->sk_receive_queue) &&
- !test_and_set_bit(MPTCP_WORK_CLOSE_SUBFLOW, &msk->flags)) {
- sock_hold(sk);
- if (!schedule_work(&msk->work))
- sock_put(sk);
- }
+ !test_and_set_bit(MPTCP_WORK_CLOSE_SUBFLOW, &msk->flags))
+ mptcp_schedule_work((struct sock *)msk);
}
static bool subflow_can_fallback(struct mptcp_subflow_context *subflow)
diff --git a/net/openvswitch/actions.c b/net/openvswitch/actions.c
index ca3ebfdb3023..a8cf9a88758e 100644
--- a/net/openvswitch/actions.c
+++ b/net/openvswitch/actions.c
@@ -913,7 +913,7 @@ static void do_output(struct datapath *dp, struct sk_buff *skb, int out_port,
{
struct vport *vport = ovs_vport_rcu(dp, out_port);
- if (likely(vport)) {
+ if (likely(vport && netif_carrier_ok(vport->dev))) {
u16 mru = OVS_CB(skb)->mru;
u32 cutlen = OVS_CB(skb)->cutlen;
diff --git a/net/qrtr/af_qrtr.c b/net/qrtr/af_qrtr.c
index 3a70255c8d02..76f0434d3d06 100644
--- a/net/qrtr/af_qrtr.c
+++ b/net/qrtr/af_qrtr.c
@@ -498,6 +498,11 @@ int qrtr_endpoint_post(struct qrtr_endpoint *ep, const void *data, size_t len)
if (!size || len != ALIGN(size, 4) + hdrlen)
goto err;
+ if ((cb->type == QRTR_TYPE_NEW_SERVER ||
+ cb->type == QRTR_TYPE_RESUME_TX) &&
+ size < sizeof(struct qrtr_ctrl_pkt))
+ goto err;
+
if (cb->dst_port != QRTR_PORT_CTRL && cb->type != QRTR_TYPE_DATA &&
cb->type != QRTR_TYPE_RESUME_TX)
goto err;
@@ -510,9 +515,6 @@ int qrtr_endpoint_post(struct qrtr_endpoint *ep, const void *data, size_t len)
/* Remote node endpoint can bridge other distant nodes */
const struct qrtr_ctrl_pkt *pkt;
- if (size < sizeof(*pkt))
- goto err;
-
pkt = data + hdrlen;
qrtr_node_assign(node, le32_to_cpu(pkt->server.node));
}
diff --git a/net/sctp/stream_interleave.c b/net/sctp/stream_interleave.c
index 94727feb07b3..b046b11200c9 100644
--- a/net/sctp/stream_interleave.c
+++ b/net/sctp/stream_interleave.c
@@ -1154,7 +1154,8 @@ static void sctp_generate_iftsn(struct sctp_outq *q, __u32 ctsn)
#define _sctp_walk_ifwdtsn(pos, chunk, end) \
for (pos = chunk->subh.ifwdtsn_hdr->skip; \
- (void *)pos < (void *)chunk->subh.ifwdtsn_hdr->skip + (end); pos++)
+ (void *)pos <= (void *)chunk->subh.ifwdtsn_hdr->skip + (end) - \
+ sizeof(struct sctp_ifwdtsn_skip); pos++)
#define sctp_walk_ifwdtsn(pos, ch) \
_sctp_walk_ifwdtsn((pos), (ch), ntohs((ch)->chunk_hdr->length) - \
diff --git a/net/smc/af_smc.c b/net/smc/af_smc.c
index c6b4a62276f6..50c38b624f77 100644
--- a/net/smc/af_smc.c
+++ b/net/smc/af_smc.c
@@ -3270,6 +3270,17 @@ static int __smc_create(struct net *net, struct socket *sock, int protocol,
sk_common_release(sk);
goto out;
}
+
+ /* smc_clcsock_release() does not wait smc->clcsock->sk's
+ * destruction; its sk_state might not be TCP_CLOSE after
+ * smc->sk is close()d, and TCP timers can be fired later,
+ * which need net ref.
+ */
+ sk = smc->clcsock->sk;
+ __netns_tracker_free(net, &sk->ns_tracker, false);
+ sk->sk_net_refcnt = 1;
+ get_net_track(net, &sk->ns_tracker, GFP_KERNEL);
+ sock_inuse_add(net, 1);
} else {
smc->clcsock = clcsock;
}
diff --git a/tools/testing/radix-tree/maple.c b/tools/testing/radix-tree/maple.c
index 958ee9bdb316..4c89ff333f6f 100644
--- a/tools/testing/radix-tree/maple.c
+++ b/tools/testing/radix-tree/maple.c
@@ -108,6 +108,7 @@ static noinline void check_new_node(struct maple_tree *mt)
MT_BUG_ON(mt, mn->slot[1] != NULL);
MT_BUG_ON(mt, mas_allocated(&mas) != 0);
+ mn->parent = ma_parent_ptr(mn);
ma_free_rcu(mn);
mas.node = MAS_START;
mas_nomem(&mas, GFP_KERNEL);
@@ -160,6 +161,7 @@ static noinline void check_new_node(struct maple_tree *mt)
MT_BUG_ON(mt, mas_allocated(&mas) != i);
MT_BUG_ON(mt, !mn);
MT_BUG_ON(mt, not_empty(mn));
+ mn->parent = ma_parent_ptr(mn);
ma_free_rcu(mn);
}
@@ -192,6 +194,7 @@ static noinline void check_new_node(struct maple_tree *mt)
MT_BUG_ON(mt, not_empty(mn));
MT_BUG_ON(mt, mas_allocated(&mas) != i - 1);
MT_BUG_ON(mt, !mn);
+ mn->parent = ma_parent_ptr(mn);
ma_free_rcu(mn);
}
@@ -210,6 +213,7 @@ static noinline void check_new_node(struct maple_tree *mt)
mn = mas_pop_node(&mas);
MT_BUG_ON(mt, not_empty(mn));
MT_BUG_ON(mt, mas_allocated(&mas) != j - 1);
+ mn->parent = ma_parent_ptr(mn);
ma_free_rcu(mn);
}
MT_BUG_ON(mt, mas_allocated(&mas) != 0);
@@ -233,6 +237,7 @@ static noinline void check_new_node(struct maple_tree *mt)
MT_BUG_ON(mt, mas_allocated(&mas) != i - j);
mn = mas_pop_node(&mas);
MT_BUG_ON(mt, not_empty(mn));
+ mn->parent = ma_parent_ptr(mn);
ma_free_rcu(mn);
MT_BUG_ON(mt, mas_allocated(&mas) != i - j - 1);
}
@@ -269,6 +274,7 @@ static noinline void check_new_node(struct maple_tree *mt)
mn = mas_pop_node(&mas); /* get the next node. */
MT_BUG_ON(mt, mn == NULL);
MT_BUG_ON(mt, not_empty(mn));
+ mn->parent = ma_parent_ptr(mn);
ma_free_rcu(mn);
}
MT_BUG_ON(mt, mas_allocated(&mas) != 0);
@@ -294,6 +300,7 @@ static noinline void check_new_node(struct maple_tree *mt)
mn = mas_pop_node(&mas2); /* get the next node. */
MT_BUG_ON(mt, mn == NULL);
MT_BUG_ON(mt, not_empty(mn));
+ mn->parent = ma_parent_ptr(mn);
ma_free_rcu(mn);
}
MT_BUG_ON(mt, mas_allocated(&mas2) != 0);
@@ -334,10 +341,12 @@ static noinline void check_new_node(struct maple_tree *mt)
MT_BUG_ON(mt, mas_allocated(&mas) != MAPLE_ALLOC_SLOTS + 2);
mn = mas_pop_node(&mas);
MT_BUG_ON(mt, not_empty(mn));
+ mn->parent = ma_parent_ptr(mn);
ma_free_rcu(mn);
for (i = 1; i <= MAPLE_ALLOC_SLOTS + 1; i++) {
mn = mas_pop_node(&mas);
MT_BUG_ON(mt, not_empty(mn));
+ mn->parent = ma_parent_ptr(mn);
ma_free_rcu(mn);
}
MT_BUG_ON(mt, mas_allocated(&mas) != 0);
@@ -375,6 +384,7 @@ static noinline void check_new_node(struct maple_tree *mt)
mas_node_count(&mas, i); /* Request */
mas_nomem(&mas, GFP_KERNEL); /* Fill request */
mn = mas_pop_node(&mas); /* get the next node. */
+ mn->parent = ma_parent_ptr(mn);
ma_free_rcu(mn);
mas_destroy(&mas);
@@ -382,10 +392,13 @@ static noinline void check_new_node(struct maple_tree *mt)
mas_node_count(&mas, i); /* Request */
mas_nomem(&mas, GFP_KERNEL); /* Fill request */
mn = mas_pop_node(&mas); /* get the next node. */
+ mn->parent = ma_parent_ptr(mn);
ma_free_rcu(mn);
mn = mas_pop_node(&mas); /* get the next node. */
+ mn->parent = ma_parent_ptr(mn);
ma_free_rcu(mn);
mn = mas_pop_node(&mas); /* get the next node. */
+ mn->parent = ma_parent_ptr(mn);
ma_free_rcu(mn);
mas_destroy(&mas);
}
@@ -35369,6 +35382,7 @@ static noinline void check_prealloc(struct maple_tree *mt)
MT_BUG_ON(mt, allocated != 1 + height * 3);
mn = mas_pop_node(&mas);
MT_BUG_ON(mt, mas_allocated(&mas) != allocated - 1);
+ mn->parent = ma_parent_ptr(mn);
ma_free_rcu(mn);
MT_BUG_ON(mt, mas_preallocate(&mas, GFP_KERNEL) != 0);
mas_destroy(&mas);
@@ -35386,6 +35400,7 @@ static noinline void check_prealloc(struct maple_tree *mt)
mas_destroy(&mas);
allocated = mas_allocated(&mas);
MT_BUG_ON(mt, allocated != 0);
+ mn->parent = ma_parent_ptr(mn);
ma_free_rcu(mn);
MT_BUG_ON(mt, mas_preallocate(&mas, GFP_KERNEL) != 0);
@@ -35756,6 +35771,7 @@ void farmer_tests(void)
tree.ma_root = mt_mk_node(node, maple_leaf_64);
mt_dump(&tree);
+ node->parent = ma_parent_ptr(node);
ma_free_rcu(node);
/* Check things that will make lockdep angry */
diff --git a/tools/testing/selftests/bpf/prog_tests/xdp_do_redirect.c b/tools/testing/selftests/bpf/prog_tests/xdp_do_redirect.c
index 662b6c6c5ed7..fde13010980d 100644
--- a/tools/testing/selftests/bpf/prog_tests/xdp_do_redirect.c
+++ b/tools/testing/selftests/bpf/prog_tests/xdp_do_redirect.c
@@ -159,8 +159,7 @@ void test_xdp_do_redirect(void)
if (!ASSERT_EQ(query_opts.feature_flags,
NETDEV_XDP_ACT_BASIC | NETDEV_XDP_ACT_REDIRECT |
- NETDEV_XDP_ACT_NDO_XMIT | NETDEV_XDP_ACT_RX_SG |
- NETDEV_XDP_ACT_NDO_XMIT_SG,
+ NETDEV_XDP_ACT_RX_SG,
"veth_src query_opts.feature_flags"))
goto out;
@@ -170,9 +169,34 @@ void test_xdp_do_redirect(void)
if (!ASSERT_EQ(query_opts.feature_flags,
NETDEV_XDP_ACT_BASIC | NETDEV_XDP_ACT_REDIRECT |
+ NETDEV_XDP_ACT_RX_SG,
+ "veth_dst query_opts.feature_flags"))
+ goto out;
+
+ /* Enable GRO */
+ SYS("ethtool -K veth_src gro on");
+ SYS("ethtool -K veth_dst gro on");
+
+ err = bpf_xdp_query(ifindex_src, XDP_FLAGS_DRV_MODE, &query_opts);
+ if (!ASSERT_OK(err, "veth_src bpf_xdp_query gro on"))
+ goto out;
+
+ if (!ASSERT_EQ(query_opts.feature_flags,
+ NETDEV_XDP_ACT_BASIC | NETDEV_XDP_ACT_REDIRECT |
NETDEV_XDP_ACT_NDO_XMIT | NETDEV_XDP_ACT_RX_SG |
NETDEV_XDP_ACT_NDO_XMIT_SG,
- "veth_dst query_opts.feature_flags"))
+ "veth_src query_opts.feature_flags gro on"))
+ goto out;
+
+ err = bpf_xdp_query(ifindex_dst, XDP_FLAGS_DRV_MODE, &query_opts);
+ if (!ASSERT_OK(err, "veth_dst bpf_xdp_query gro on"))
+ goto out;
+
+ if (!ASSERT_EQ(query_opts.feature_flags,
+ NETDEV_XDP_ACT_BASIC | NETDEV_XDP_ACT_REDIRECT |
+ NETDEV_XDP_ACT_NDO_XMIT | NETDEV_XDP_ACT_RX_SG |
+ NETDEV_XDP_ACT_NDO_XMIT_SG,
+ "veth_dst query_opts.feature_flags gro on"))
goto out;
memcpy(skel->rodata->expect_dst, &pkt_udp.eth.h_dest, ETH_ALEN);
diff --git a/tools/testing/selftests/bpf/prog_tests/xdp_metadata.c b/tools/testing/selftests/bpf/prog_tests/xdp_metadata.c
index 490e851dc27d..626c461fa34d 100644
--- a/tools/testing/selftests/bpf/prog_tests/xdp_metadata.c
+++ b/tools/testing/selftests/bpf/prog_tests/xdp_metadata.c
@@ -268,6 +268,8 @@ static int verify_xsk_metadata(struct xsk *xsk)
if (!ASSERT_NEQ(meta->rx_hash, 0, "rx_hash"))
return -1;
+ ASSERT_EQ(meta->rx_hash_type, 0, "rx_hash_type");
+
xsk_ring_cons__release(&xsk->rx, 1);
refill_rx(xsk, comp_addr);
diff --git a/tools/testing/selftests/bpf/progs/xdp_hw_metadata.c b/tools/testing/selftests/bpf/progs/xdp_hw_metadata.c
index 4c55b4d79d3d..e1c787815e44 100644
--- a/tools/testing/selftests/bpf/progs/xdp_hw_metadata.c
+++ b/tools/testing/selftests/bpf/progs/xdp_hw_metadata.c
@@ -12,10 +12,14 @@ struct {
__type(value, __u32);
} xsk SEC(".maps");
+__u64 pkts_skip = 0;
+__u64 pkts_fail = 0;
+__u64 pkts_redir = 0;
+
extern int bpf_xdp_metadata_rx_timestamp(const struct xdp_md *ctx,
__u64 *timestamp) __ksym;
-extern int bpf_xdp_metadata_rx_hash(const struct xdp_md *ctx,
- __u32 *hash) __ksym;
+extern int bpf_xdp_metadata_rx_hash(const struct xdp_md *ctx, __u32 *hash,
+ enum xdp_rss_hash_type *rss_type) __ksym;
SEC("xdp")
int rx(struct xdp_md *ctx)
@@ -26,7 +30,7 @@ int rx(struct xdp_md *ctx)
struct udphdr *udp = NULL;
struct iphdr *iph = NULL;
struct xdp_meta *meta;
- int ret;
+ int err;
data = (void *)(long)ctx->data;
data_end = (void *)(long)ctx->data_end;
@@ -46,17 +50,20 @@ int rx(struct xdp_md *ctx)
udp = NULL;
}
- if (!udp)
+ if (!udp) {
+ __sync_add_and_fetch(&pkts_skip, 1);
return XDP_PASS;
+ }
- if (udp->dest != bpf_htons(9091))
+ /* Forwarding UDP:9091 to AF_XDP */
+ if (udp->dest != bpf_htons(9091)) {
+ __sync_add_and_fetch(&pkts_skip, 1);
return XDP_PASS;
+ }
- bpf_printk("forwarding UDP:9091 to AF_XDP");
-
- ret = bpf_xdp_adjust_meta(ctx, -(int)sizeof(struct xdp_meta));
- if (ret != 0) {
- bpf_printk("bpf_xdp_adjust_meta returned %d", ret);
+ err = bpf_xdp_adjust_meta(ctx, -(int)sizeof(struct xdp_meta));
+ if (err) {
+ __sync_add_and_fetch(&pkts_fail, 1);
return XDP_PASS;
}
@@ -65,20 +72,19 @@ int rx(struct xdp_md *ctx)
meta = data_meta;
if (meta + 1 > data) {
- bpf_printk("bpf_xdp_adjust_meta doesn't appear to work");
+ __sync_add_and_fetch(&pkts_fail, 1);
return XDP_PASS;
}
- if (!bpf_xdp_metadata_rx_timestamp(ctx, &meta->rx_timestamp))
- bpf_printk("populated rx_timestamp with %llu", meta->rx_timestamp);
- else
+ err = bpf_xdp_metadata_rx_timestamp(ctx, &meta->rx_timestamp);
+ if (err)
meta->rx_timestamp = 0; /* Used by AF_XDP as not avail signal */
- if (!bpf_xdp_metadata_rx_hash(ctx, &meta->rx_hash))
- bpf_printk("populated rx_hash with %u", meta->rx_hash);
- else
- meta->rx_hash = 0; /* Used by AF_XDP as not avail signal */
+ err = bpf_xdp_metadata_rx_hash(ctx, &meta->rx_hash, &meta->rx_hash_type);
+ if (err < 0)
+ meta->rx_hash_err = err; /* Used by AF_XDP as no hash signal */
+ __sync_add_and_fetch(&pkts_redir, 1);
return bpf_redirect_map(&xsk, ctx->rx_queue_index, XDP_PASS);
}
diff --git a/tools/testing/selftests/bpf/progs/xdp_metadata.c b/tools/testing/selftests/bpf/progs/xdp_metadata.c
index 77678b034389..d151d406a123 100644
--- a/tools/testing/selftests/bpf/progs/xdp_metadata.c
+++ b/tools/testing/selftests/bpf/progs/xdp_metadata.c
@@ -21,8 +21,8 @@ struct {
extern int bpf_xdp_metadata_rx_timestamp(const struct xdp_md *ctx,
__u64 *timestamp) __ksym;
-extern int bpf_xdp_metadata_rx_hash(const struct xdp_md *ctx,
- __u32 *hash) __ksym;
+extern int bpf_xdp_metadata_rx_hash(const struct xdp_md *ctx, __u32 *hash,
+ enum xdp_rss_hash_type *rss_type) __ksym;
SEC("xdp")
int rx(struct xdp_md *ctx)
@@ -56,7 +56,7 @@ int rx(struct xdp_md *ctx)
if (timestamp == 0)
meta->rx_timestamp = 1;
- bpf_xdp_metadata_rx_hash(ctx, &meta->rx_hash);
+ bpf_xdp_metadata_rx_hash(ctx, &meta->rx_hash, &meta->rx_hash_type);
return bpf_redirect_map(&xsk, ctx->rx_queue_index, XDP_PASS);
}
diff --git a/tools/testing/selftests/bpf/progs/xdp_metadata2.c b/tools/testing/selftests/bpf/progs/xdp_metadata2.c
index cf69d05451c3..85f88d9d7a78 100644
--- a/tools/testing/selftests/bpf/progs/xdp_metadata2.c
+++ b/tools/testing/selftests/bpf/progs/xdp_metadata2.c
@@ -5,17 +5,18 @@
#include <bpf/bpf_helpers.h>
#include <bpf/bpf_endian.h>
-extern int bpf_xdp_metadata_rx_hash(const struct xdp_md *ctx,
- __u32 *hash) __ksym;
+extern int bpf_xdp_metadata_rx_hash(const struct xdp_md *ctx, __u32 *hash,
+ enum xdp_rss_hash_type *rss_type) __ksym;
int called;
SEC("freplace/rx")
int freplace_rx(struct xdp_md *ctx)
{
+ enum xdp_rss_hash_type type = 0;
u32 hash = 0;
/* Call _any_ metadata function to make sure we don't crash. */
- bpf_xdp_metadata_rx_hash(ctx, &hash);
+ bpf_xdp_metadata_rx_hash(ctx, &hash, &type);
called++;
return XDP_PASS;
}
diff --git a/tools/testing/selftests/bpf/xdp_hw_metadata.c b/tools/testing/selftests/bpf/xdp_hw_metadata.c
index 1c8acb68b977..987cf0db5ebc 100644
--- a/tools/testing/selftests/bpf/xdp_hw_metadata.c
+++ b/tools/testing/selftests/bpf/xdp_hw_metadata.c
@@ -141,7 +141,11 @@ static void verify_xdp_metadata(void *data)
meta = data - sizeof(*meta);
printf("rx_timestamp: %llu\n", meta->rx_timestamp);
- printf("rx_hash: %u\n", meta->rx_hash);
+ if (meta->rx_hash_err < 0)
+ printf("No rx_hash err=%d\n", meta->rx_hash_err);
+ else
+ printf("rx_hash: 0x%X with RSS type:0x%X\n",
+ meta->rx_hash, meta->rx_hash_type);
}
static void verify_skb_metadata(int fd)
@@ -212,7 +216,9 @@ static int verify_metadata(struct xsk *rx_xsk, int rxq, int server_fd)
while (true) {
errno = 0;
ret = poll(fds, rxq + 1, 1000);
- printf("poll: %d (%d)\n", ret, errno);
+ printf("poll: %d (%d) skip=%llu fail=%llu redir=%llu\n",
+ ret, errno, bpf_obj->bss->pkts_skip,
+ bpf_obj->bss->pkts_fail, bpf_obj->bss->pkts_redir);
if (ret < 0)
break;
if (ret == 0)
diff --git a/tools/testing/selftests/bpf/xdp_metadata.h b/tools/testing/selftests/bpf/xdp_metadata.h
index f6780fbb0a21..0c4624dc6f2f 100644
--- a/tools/testing/selftests/bpf/xdp_metadata.h
+++ b/tools/testing/selftests/bpf/xdp_metadata.h
@@ -12,4 +12,8 @@
struct xdp_meta {
__u64 rx_timestamp;
__u32 rx_hash;
+ union {
+ __u32 rx_hash_type;
+ __s32 rx_hash_err;
+ };
};
diff --git a/tools/testing/selftests/drivers/net/bonding/Makefile b/tools/testing/selftests/drivers/net/bonding/Makefile
index a39bb2560d9b..03f92d7aeb19 100644
--- a/tools/testing/selftests/drivers/net/bonding/Makefile
+++ b/tools/testing/selftests/drivers/net/bonding/Makefile
@@ -8,11 +8,12 @@ TEST_PROGS := \
dev_addr_lists.sh \
mode-1-recovery-updelay.sh \
mode-2-recovery-updelay.sh \
- option_prio.sh \
+ bond_options.sh \
bond-eth-type-change.sh
TEST_FILES := \
lag_lib.sh \
+ bond_topo_3d1c.sh \
net_forwarding_lib.sh
include ../../../lib.mk
diff --git a/tools/testing/selftests/drivers/net/bonding/bond_options.sh b/tools/testing/selftests/drivers/net/bonding/bond_options.sh
new file mode 100755
index 000000000000..db29a3146a86
--- /dev/null
+++ b/tools/testing/selftests/drivers/net/bonding/bond_options.sh
@@ -0,0 +1,264 @@
+#!/bin/bash
+# SPDX-License-Identifier: GPL-2.0
+#
+# Test bonding options with mode 1,5,6
+
+ALL_TESTS="
+ prio
+ arp_validate
+"
+
+REQUIRE_MZ=no
+NUM_NETIFS=0
+lib_dir=$(dirname "$0")
+source ${lib_dir}/net_forwarding_lib.sh
+source ${lib_dir}/bond_topo_3d1c.sh
+
+skip_prio()
+{
+ local skip=1
+
+ # check if iproute support prio option
+ ip -n ${s_ns} link set eth0 type bond_slave prio 10
+ [[ $? -ne 0 ]] && skip=0
+
+ # check if kernel support prio option
+ ip -n ${s_ns} -d link show eth0 | grep -q "prio 10"
+ [[ $? -ne 0 ]] && skip=0
+
+ return $skip
+}
+
+skip_ns()
+{
+ local skip=1
+
+ # check if iproute support ns_ip6_target option
+ ip -n ${s_ns} link add bond1 type bond ns_ip6_target ${g_ip6}
+ [[ $? -ne 0 ]] && skip=0
+
+ # check if kernel support ns_ip6_target option
+ ip -n ${s_ns} -d link show bond1 | grep -q "ns_ip6_target ${g_ip6}"
+ [[ $? -ne 0 ]] && skip=0
+
+ ip -n ${s_ns} link del bond1
+
+ return $skip
+}
+
+active_slave=""
+check_active_slave()
+{
+ local target_active_slave=$1
+ active_slave=$(cmd_jq "ip -n ${s_ns} -d -j link show bond0" ".[].linkinfo.info_data.active_slave")
+ test "$active_slave" = "$target_active_slave"
+ check_err $? "Current active slave is $active_slave but not $target_active_slave"
+}
+
+
+# Test bonding prio option
+prio_test()
+{
+ local param="$1"
+ RET=0
+
+ # create bond
+ bond_reset "${param}"
+
+ # check bonding member prio value
+ ip -n ${s_ns} link set eth0 type bond_slave prio 0
+ ip -n ${s_ns} link set eth1 type bond_slave prio 10
+ ip -n ${s_ns} link set eth2 type bond_slave prio 11
+ cmd_jq "ip -n ${s_ns} -d -j link show eth0" \
+ ".[].linkinfo.info_slave_data | select (.prio == 0)" "-e" &> /dev/null
+ check_err $? "eth0 prio is not 0"
+ cmd_jq "ip -n ${s_ns} -d -j link show eth1" \
+ ".[].linkinfo.info_slave_data | select (.prio == 10)" "-e" &> /dev/null
+ check_err $? "eth1 prio is not 10"
+ cmd_jq "ip -n ${s_ns} -d -j link show eth2" \
+ ".[].linkinfo.info_slave_data | select (.prio == 11)" "-e" &> /dev/null
+ check_err $? "eth2 prio is not 11"
+
+ bond_check_connection "setup"
+
+ # active slave should be the primary slave
+ check_active_slave eth1
+
+ # active slave should be the higher prio slave
+ ip -n ${s_ns} link set $active_slave down
+ bond_check_connection "fail over"
+ check_active_slave eth2
+
+ # when only 1 slave is up
+ ip -n ${s_ns} link set $active_slave down
+ bond_check_connection "only 1 slave up"
+ check_active_slave eth0
+
+ # when a higher prio slave change to up
+ ip -n ${s_ns} link set eth2 up
+ bond_check_connection "higher prio slave up"
+ case $primary_reselect in
+ "0")
+ check_active_slave "eth2"
+ ;;
+ "1")
+ check_active_slave "eth0"
+ ;;
+ "2")
+ check_active_slave "eth0"
+ ;;
+ esac
+ local pre_active_slave=$active_slave
+
+ # when the primary slave change to up
+ ip -n ${s_ns} link set eth1 up
+ bond_check_connection "primary slave up"
+ case $primary_reselect in
+ "0")
+ check_active_slave "eth1"
+ ;;
+ "1")
+ check_active_slave "$pre_active_slave"
+ ;;
+ "2")
+ check_active_slave "$pre_active_slave"
+ ip -n ${s_ns} link set $active_slave down
+ bond_check_connection "pre_active slave down"
+ check_active_slave "eth1"
+ ;;
+ esac
+
+ # Test changing bond slave prio
+ if [[ "$primary_reselect" == "0" ]];then
+ ip -n ${s_ns} link set eth0 type bond_slave prio 1000000
+ ip -n ${s_ns} link set eth1 type bond_slave prio 0
+ ip -n ${s_ns} link set eth2 type bond_slave prio -50
+ ip -n ${s_ns} -d link show eth0 | grep -q 'prio 1000000'
+ check_err $? "eth0 prio is not 1000000"
+ ip -n ${s_ns} -d link show eth1 | grep -q 'prio 0'
+ check_err $? "eth1 prio is not 0"
+ ip -n ${s_ns} -d link show eth2 | grep -q 'prio -50'
+ check_err $? "eth3 prio is not -50"
+ check_active_slave "eth1"
+
+ ip -n ${s_ns} link set $active_slave down
+ bond_check_connection "change slave prio"
+ check_active_slave "eth0"
+ fi
+}
+
+prio_miimon()
+{
+ local primary_reselect
+ local mode=$1
+
+ for primary_reselect in 0 1 2; do
+ prio_test "mode $mode miimon 100 primary eth1 primary_reselect $primary_reselect"
+ log_test "prio" "$mode miimon primary_reselect $primary_reselect"
+ done
+}
+
+prio_arp()
+{
+ local primary_reselect
+ local mode=$1
+
+ for primary_reselect in 0 1 2; do
+ prio_test "mode active-backup arp_interval 100 arp_ip_target ${g_ip4} primary eth1 primary_reselect $primary_reselect"
+ log_test "prio" "$mode arp_ip_target primary_reselect $primary_reselect"
+ done
+}
+
+prio_ns()
+{
+ local primary_reselect
+ local mode=$1
+
+ if skip_ns; then
+ log_test_skip "prio ns" "Current iproute or kernel doesn't support bond option 'ns_ip6_target'."
+ return 0
+ fi
+
+ for primary_reselect in 0 1 2; do
+ prio_test "mode active-backup arp_interval 100 ns_ip6_target ${g_ip6} primary eth1 primary_reselect $primary_reselect"
+ log_test "prio" "$mode ns_ip6_target primary_reselect $primary_reselect"
+ done
+}
+
+prio()
+{
+ local mode modes="active-backup balance-tlb balance-alb"
+
+ if skip_prio; then
+ log_test_skip "prio" "Current iproute or kernel doesn't support bond option 'prio'."
+ return 0
+ fi
+
+ for mode in $modes; do
+ prio_miimon $mode
+ prio_arp $mode
+ prio_ns $mode
+ done
+}
+
+arp_validate_test()
+{
+ local param="$1"
+ RET=0
+
+ # create bond
+ bond_reset "${param}"
+
+ bond_check_connection
+ [ $RET -ne 0 ] && log_test "arp_validate" "$retmsg"
+
+ # wait for a while to make sure the mii status stable
+ sleep 5
+ for i in $(seq 0 2); do
+ mii_status=$(cmd_jq "ip -n ${s_ns} -j -d link show eth$i" ".[].linkinfo.info_slave_data.mii_status")
+ if [ ${mii_status} != "UP" ]; then
+ RET=1
+ log_test "arp_validate" "interface eth$i mii_status $mii_status"
+ fi
+ done
+}
+
+arp_validate_arp()
+{
+ local mode=$1
+ local val
+ for val in $(seq 0 6); do
+ arp_validate_test "mode $mode arp_interval 100 arp_ip_target ${g_ip4} arp_validate $val"
+ log_test "arp_validate" "$mode arp_ip_target arp_validate $val"
+ done
+}
+
+arp_validate_ns()
+{
+ local mode=$1
+ local val
+
+ if skip_ns; then
+ log_test_skip "arp_validate ns" "Current iproute or kernel doesn't support bond option 'ns_ip6_target'."
+ return 0
+ fi
+
+ for val in $(seq 0 6); do
+ arp_validate_test "mode $mode arp_interval 100 ns_ip6_target ${g_ip6} arp_validate $val"
+ log_test "arp_validate" "$mode ns_ip6_target arp_validate $val"
+ done
+}
+
+arp_validate()
+{
+ arp_validate_arp "active-backup"
+ arp_validate_ns "active-backup"
+}
+
+trap cleanup EXIT
+
+setup_prepare
+setup_wait
+tests_run
+
+exit $EXIT_STATUS
diff --git a/tools/testing/selftests/drivers/net/bonding/bond_topo_3d1c.sh b/tools/testing/selftests/drivers/net/bonding/bond_topo_3d1c.sh
new file mode 100644
index 000000000000..4045ca97fb22
--- /dev/null
+++ b/tools/testing/selftests/drivers/net/bonding/bond_topo_3d1c.sh
@@ -0,0 +1,143 @@
+#!/bin/bash
+# SPDX-License-Identifier: GPL-2.0
+#
+# Topology for Bond mode 1,5,6 testing
+#
+# +-------------------------------------+
+# | bond0 |
+# | + | Server
+# | eth0 | eth1 eth2 | 192.0.2.1/24
+# | +-------------------+ | 2001:db8::1/24
+# | | | | |
+# +-------------------------------------+
+# | | |
+# +-------------------------------------+
+# | | | | |
+# | +---+---------+---------+---+ | Gateway
+# | | br0 | | 192.0.2.254/24
+# | +-------------+-------------+ | 2001:db8::254/24
+# | | |
+# +-------------------------------------+
+# |
+# +-------------------------------------+
+# | | | Client
+# | + | 192.0.2.10/24
+# | eth0 | 2001:db8::10/24
+# +-------------------------------------+
+
+s_ns="s-$(mktemp -u XXXXXX)"
+c_ns="c-$(mktemp -u XXXXXX)"
+g_ns="g-$(mktemp -u XXXXXX)"
+s_ip4="192.0.2.1"
+c_ip4="192.0.2.10"
+g_ip4="192.0.2.254"
+s_ip6="2001:db8::1"
+c_ip6="2001:db8::10"
+g_ip6="2001:db8::254"
+
+gateway_create()
+{
+ ip netns add ${g_ns}
+ ip -n ${g_ns} link add br0 type bridge
+ ip -n ${g_ns} link set br0 up
+ ip -n ${g_ns} addr add ${g_ip4}/24 dev br0
+ ip -n ${g_ns} addr add ${g_ip6}/24 dev br0
+}
+
+gateway_destroy()
+{
+ ip -n ${g_ns} link del br0
+ ip netns del ${g_ns}
+}
+
+server_create()
+{
+ ip netns add ${s_ns}
+ ip -n ${s_ns} link add bond0 type bond mode active-backup miimon 100
+
+ for i in $(seq 0 2); do
+ ip -n ${s_ns} link add eth${i} type veth peer name s${i} netns ${g_ns}
+
+ ip -n ${g_ns} link set s${i} up
+ ip -n ${g_ns} link set s${i} master br0
+ ip -n ${s_ns} link set eth${i} master bond0
+ done
+
+ ip -n ${s_ns} link set bond0 up
+ ip -n ${s_ns} addr add ${s_ip4}/24 dev bond0
+ ip -n ${s_ns} addr add ${s_ip6}/24 dev bond0
+ sleep 2
+}
+
+# Reset bond with new mode and options
+bond_reset()
+{
+ local param="$1"
+
+ ip -n ${s_ns} link set bond0 down
+ ip -n ${s_ns} link del bond0
+
+ ip -n ${s_ns} link add bond0 type bond $param
+ for i in $(seq 0 2); do
+ ip -n ${s_ns} link set eth$i master bond0
+ done
+
+ ip -n ${s_ns} link set bond0 up
+ ip -n ${s_ns} addr add ${s_ip4}/24 dev bond0
+ ip -n ${s_ns} addr add ${s_ip6}/24 dev bond0
+ sleep 2
+}
+
+server_destroy()
+{
+ for i in $(seq 0 2); do
+ ip -n ${s_ns} link del eth${i}
+ done
+ ip netns del ${s_ns}
+}
+
+client_create()
+{
+ ip netns add ${c_ns}
+ ip -n ${c_ns} link add eth0 type veth peer name c0 netns ${g_ns}
+
+ ip -n ${g_ns} link set c0 up
+ ip -n ${g_ns} link set c0 master br0
+
+ ip -n ${c_ns} link set eth0 up
+ ip -n ${c_ns} addr add ${c_ip4}/24 dev eth0
+ ip -n ${c_ns} addr add ${c_ip6}/24 dev eth0
+}
+
+client_destroy()
+{
+ ip -n ${c_ns} link del eth0
+ ip netns del ${c_ns}
+}
+
+setup_prepare()
+{
+ gateway_create
+ server_create
+ client_create
+}
+
+cleanup()
+{
+ pre_cleanup
+
+ client_destroy
+ server_destroy
+ gateway_destroy
+}
+
+bond_check_connection()
+{
+ local msg=${1:-"check connection"}
+
+ sleep 2
+ ip netns exec ${s_ns} ping ${c_ip4} -c5 -i 0.1 &>/dev/null
+ check_err $? "${msg}: ping failed"
+ ip netns exec ${s_ns} ping6 ${c_ip6} -c5 -i 0.1 &>/dev/null
+ check_err $? "${msg}: ping6 failed"
+}
diff --git a/tools/testing/selftests/drivers/net/bonding/option_prio.sh b/tools/testing/selftests/drivers/net/bonding/option_prio.sh
deleted file mode 100755
index c32eebff5005..000000000000
--- a/tools/testing/selftests/drivers/net/bonding/option_prio.sh
+++ /dev/null
@@ -1,245 +0,0 @@
-#!/bin/bash
-# SPDX-License-Identifier: GPL-2.0
-#
-# Test bonding option prio
-#
-
-ALL_TESTS="
- prio_arp_ip_target_test
- prio_miimon_test
-"
-
-REQUIRE_MZ=no
-REQUIRE_JQ=no
-NUM_NETIFS=0
-lib_dir=$(dirname "$0")
-source "$lib_dir"/net_forwarding_lib.sh
-
-destroy()
-{
- ip link del bond0 &>/dev/null
- ip link del br0 &>/dev/null
- ip link del veth0 &>/dev/null
- ip link del veth1 &>/dev/null
- ip link del veth2 &>/dev/null
- ip netns del ns1 &>/dev/null
- ip link del veth3 &>/dev/null
-}
-
-cleanup()
-{
- pre_cleanup
-
- destroy
-}
-
-skip()
-{
- local skip=1
- ip link add name bond0 type bond mode 1 miimon 100 &>/dev/null
- ip link add name veth0 type veth peer name veth0_p
- ip link set veth0 master bond0
-
- # check if iproute support prio option
- ip link set dev veth0 type bond_slave prio 10
- [[ $? -ne 0 ]] && skip=0
-
- # check if bonding support prio option
- ip -d link show veth0 | grep -q "prio 10"
- [[ $? -ne 0 ]] && skip=0
-
- ip link del bond0 &>/dev/null
- ip link del veth0
-
- return $skip
-}
-
-active_slave=""
-check_active_slave()
-{
- local target_active_slave=$1
- active_slave="$(cat /sys/class/net/bond0/bonding/active_slave)"
- test "$active_slave" = "$target_active_slave"
- check_err $? "Current active slave is $active_slave but not $target_active_slave"
-}
-
-
-# Test bonding prio option with mode=$mode monitor=$monitor
-# and primary_reselect=$primary_reselect
-prio_test()
-{
- RET=0
-
- local monitor=$1
- local mode=$2
- local primary_reselect=$3
-
- local bond_ip4="192.169.1.2"
- local peer_ip4="192.169.1.1"
- local bond_ip6="2009:0a:0b::02"
- local peer_ip6="2009:0a:0b::01"
-
-
- # create veths
- ip link add name veth0 type veth peer name veth0_p
- ip link add name veth1 type veth peer name veth1_p
- ip link add name veth2 type veth peer name veth2_p
-
- # create bond
- if [[ "$monitor" == "miimon" ]];then
- ip link add name bond0 type bond mode $mode miimon 100 primary veth1 primary_reselect $primary_reselect
- elif [[ "$monitor" == "arp_ip_target" ]];then
- ip link add name bond0 type bond mode $mode arp_interval 1000 arp_ip_target $peer_ip4 primary veth1 primary_reselect $primary_reselect
- elif [[ "$monitor" == "ns_ip6_target" ]];then
- ip link add name bond0 type bond mode $mode arp_interval 1000 ns_ip6_target $peer_ip6 primary veth1 primary_reselect $primary_reselect
- fi
- ip link set bond0 up
- ip link set veth0 master bond0
- ip link set veth1 master bond0
- ip link set veth2 master bond0
- # check bonding member prio value
- ip link set dev veth0 type bond_slave prio 0
- ip link set dev veth1 type bond_slave prio 10
- ip link set dev veth2 type bond_slave prio 11
- ip -d link show veth0 | grep -q 'prio 0'
- check_err $? "veth0 prio is not 0"
- ip -d link show veth1 | grep -q 'prio 10'
- check_err $? "veth0 prio is not 10"
- ip -d link show veth2 | grep -q 'prio 11'
- check_err $? "veth0 prio is not 11"
-
- ip link set veth0 up
- ip link set veth1 up
- ip link set veth2 up
- ip link set veth0_p up
- ip link set veth1_p up
- ip link set veth2_p up
-
- # prepare ping target
- ip link add name br0 type bridge
- ip link set br0 up
- ip link set veth0_p master br0
- ip link set veth1_p master br0
- ip link set veth2_p master br0
- ip link add name veth3 type veth peer name veth3_p
- ip netns add ns1
- ip link set veth3_p master br0 up
- ip link set veth3 netns ns1 up
- ip netns exec ns1 ip addr add $peer_ip4/24 dev veth3
- ip netns exec ns1 ip addr add $peer_ip6/64 dev veth3
- ip addr add $bond_ip4/24 dev bond0
- ip addr add $bond_ip6/64 dev bond0
- sleep 5
-
- ping $peer_ip4 -c5 -I bond0 &>/dev/null
- check_err $? "ping failed 1."
- ping6 $peer_ip6 -c5 -I bond0 &>/dev/null
- check_err $? "ping6 failed 1."
-
- # active salve should be the primary slave
- check_active_slave veth1
-
- # active slave should be the higher prio slave
- ip link set $active_slave down
- ping $peer_ip4 -c5 -I bond0 &>/dev/null
- check_err $? "ping failed 2."
- check_active_slave veth2
-
- # when only 1 slave is up
- ip link set $active_slave down
- ping $peer_ip4 -c5 -I bond0 &>/dev/null
- check_err $? "ping failed 3."
- check_active_slave veth0
-
- # when a higher prio slave change to up
- ip link set veth2 up
- ping $peer_ip4 -c5 -I bond0 &>/dev/null
- check_err $? "ping failed 4."
- case $primary_reselect in
- "0")
- check_active_slave "veth2"
- ;;
- "1")
- check_active_slave "veth0"
- ;;
- "2")
- check_active_slave "veth0"
- ;;
- esac
- local pre_active_slave=$active_slave
-
- # when the primary slave change to up
- ip link set veth1 up
- ping $peer_ip4 -c5 -I bond0 &>/dev/null
- check_err $? "ping failed 5."
- case $primary_reselect in
- "0")
- check_active_slave "veth1"
- ;;
- "1")
- check_active_slave "$pre_active_slave"
- ;;
- "2")
- check_active_slave "$pre_active_slave"
- ip link set $active_slave down
- ping $peer_ip4 -c5 -I bond0 &>/dev/null
- check_err $? "ping failed 6."
- check_active_slave "veth1"
- ;;
- esac
-
- # Test changing bond salve prio
- if [[ "$primary_reselect" == "0" ]];then
- ip link set dev veth0 type bond_slave prio 1000000
- ip link set dev veth1 type bond_slave prio 0
- ip link set dev veth2 type bond_slave prio -50
- ip -d link show veth0 | grep -q 'prio 1000000'
- check_err $? "veth0 prio is not 1000000"
- ip -d link show veth1 | grep -q 'prio 0'
- check_err $? "veth1 prio is not 0"
- ip -d link show veth2 | grep -q 'prio -50'
- check_err $? "veth3 prio is not -50"
- check_active_slave "veth1"
-
- ip link set $active_slave down
- ping $peer_ip4 -c5 -I bond0 &>/dev/null
- check_err $? "ping failed 7."
- check_active_slave "veth0"
- fi
-
- cleanup
-
- log_test "prio_test" "Test bonding option 'prio' with mode=$mode monitor=$monitor and primary_reselect=$primary_reselect"
-}
-
-prio_miimon_test()
-{
- local mode
- local primary_reselect
-
- for mode in 1 5 6; do
- for primary_reselect in 0 1 2; do
- prio_test "miimon" $mode $primary_reselect
- done
- done
-}
-
-prio_arp_ip_target_test()
-{
- local primary_reselect
-
- for primary_reselect in 0 1 2; do
- prio_test "arp_ip_target" 1 $primary_reselect
- done
-}
-
-if skip;then
- log_test_skip "option_prio.sh" "Current iproute doesn't support 'prio'."
- exit 0
-fi
-
-trap cleanup EXIT
-
-tests_run
-
-exit "$EXIT_STATUS"
diff --git a/tools/testing/selftests/net/config b/tools/testing/selftests/net/config
index 4c7ce07afa2f..d1d421ec10a3 100644
--- a/tools/testing/selftests/net/config
+++ b/tools/testing/selftests/net/config
@@ -49,3 +49,4 @@ CONFIG_IPV6_IOAM6_LWTUNNEL=y
CONFIG_CRYPTO_SM4_GENERIC=y
CONFIG_AMT=m
CONFIG_VXLAN=m
+CONFIG_IP_SCTP=m
diff --git a/tools/testing/selftests/net/mptcp/userspace_pm.sh b/tools/testing/selftests/net/mptcp/userspace_pm.sh
index 48e52f995a98..b1eb7bce599d 100755
--- a/tools/testing/selftests/net/mptcp/userspace_pm.sh
+++ b/tools/testing/selftests/net/mptcp/userspace_pm.sh
@@ -913,6 +913,7 @@ test_listener()
$client4_port > /dev/null 2>&1 &
local listener_pid=$!
+ sleep 0.5
verify_listener_events $client_evts $LISTENER_CREATED $AF_INET 10.0.2.2 $client4_port
# ADD_ADDR from client to server machine reusing the subflow port
@@ -928,6 +929,7 @@ test_listener()
# Delete the listener from the client ns, if one was created
kill_wait $listener_pid
+ sleep 0.5
verify_listener_events $client_evts $LISTENER_CLOSED $AF_INET 10.0.2.2 $client4_port
}
diff --git a/tools/testing/selftests/net/openvswitch/ovs-dpctl.py b/tools/testing/selftests/net/openvswitch/ovs-dpctl.py
index 3243c90d449e..5d467d1993cb 100644
--- a/tools/testing/selftests/net/openvswitch/ovs-dpctl.py
+++ b/tools/testing/selftests/net/openvswitch/ovs-dpctl.py
@@ -62,7 +62,7 @@ class OvsDatapath(GenericNetlinkSocket):
nla_map = (
("OVS_DP_ATTR_UNSPEC", "none"),
("OVS_DP_ATTR_NAME", "asciiz"),
- ("OVS_DP_ATTR_UPCALL_PID", "uint32"),
+ ("OVS_DP_ATTR_UPCALL_PID", "array(uint32)"),
("OVS_DP_ATTR_STATS", "dpstats"),
("OVS_DP_ATTR_MEGAFLOW_STATS", "megaflowstats"),
("OVS_DP_ATTR_USER_FEATURES", "uint32"),
diff --git a/tools/virtio/virtio-trace/README b/tools/virtio/virtio-trace/README
index b64845b823ab..4fb9368bf751 100644
--- a/tools/virtio/virtio-trace/README
+++ b/tools/virtio/virtio-trace/README
@@ -61,7 +61,7 @@ and
id=channel0,name=agent-ctl-path\
##data path##
-chardev pipe,id=charchannel1,path=/tmp/virtio-trace/trace-path-cpu0\
- -device virtserialport,bus=virtio-serial0.0,nr=2,chardev=charchannel0,\
+ -device virtserialport,bus=virtio-serial0.0,nr=2,chardev=charchannel1,\
id=channel1,name=trace-path-cpu0\
...