aboutsummaryrefslogtreecommitdiffstats
path: root/drivers
diff options
context:
space:
mode:
Diffstat (limited to 'drivers')
-rw-r--r--drivers/acpi/video_detect.c41
-rw-r--r--drivers/ata/libata-scsi.c13
-rw-r--r--drivers/ata/libata-transport.c19
-rw-r--r--drivers/clocksource/hyperv_timer.c29
-rw-r--r--drivers/dma/apple-admac.c2
-rw-r--r--drivers/dma/at_hdmac.c153
-rw-r--r--drivers/dma/at_hdmac_regs.h10
-rw-r--r--drivers/dma/idxd/cdev.c18
-rw-r--r--drivers/dma/idxd/device.c26
-rw-r--r--drivers/dma/idxd/idxd.h32
-rw-r--r--drivers/dma/idxd/init.c4
-rw-r--r--drivers/dma/idxd/sysfs.c2
-rw-r--r--drivers/dma/mv_xor_v2.c1
-rw-r--r--drivers/dma/pxa_dma.c4
-rw-r--r--drivers/dma/stm32-dma.c14
-rw-r--r--drivers/dma/stm32-mdma.c1
-rw-r--r--drivers/dma/ti/k3-udma-glue.c3
-rw-r--r--drivers/firmware/efi/libstub/Makefile2
-rw-r--r--drivers/firmware/efi/libstub/arm64-stub.c17
-rw-r--r--drivers/firmware/efi/libstub/efistub.h28
-rw-r--r--drivers/firmware/efi/libstub/smbios.c48
-rw-r--r--drivers/gpu/drm/amd/amdgpu/amdgpu_cs.c1
-rw-r--r--drivers/gpu/drm/amd/amdgpu/amdgpu_vm.c26
-rw-r--r--drivers/gpu/drm/amd/amdgpu/amdgpu_vm.h41
-rw-r--r--drivers/gpu/drm/amd/amdgpu/amdgpu_vm_pt.c2
-rw-r--r--drivers/gpu/drm/amd/amdgpu/amdgpu_vram_mgr.c2
-rw-r--r--drivers/gpu/drm/amd/amdkfd/kfd_chardev.c34
-rw-r--r--drivers/gpu/drm/amd/amdkfd/kfd_events.c3
-rw-r--r--drivers/gpu/drm/amd/display/amdgpu_dm/amdgpu_dm.c7
-rw-r--r--drivers/gpu/drm/amd/display/dc/clk_mgr/dcn314/dcn314_clk_mgr.c32
-rw-r--r--drivers/gpu/drm/amd/display/dc/dc.h1
-rw-r--r--drivers/gpu/drm/amd/display/dc/dcn314/dcn314_dio_stream_encoder.c24
-rw-r--r--drivers/gpu/drm/amd/display/dc/dcn32/dcn32_resource.c1
-rw-r--r--drivers/gpu/drm/amd/display/dc/dcn321/dcn321_resource.c1
-rw-r--r--drivers/gpu/drm/amd/display/dc/dml/dcn314/dcn314_fpu.c4
-rw-r--r--drivers/gpu/drm/amd/display/dc/dml/dcn32/dcn32_fpu.c2
-rw-r--r--drivers/gpu/drm/amd/display/dc/dml/dcn32/display_mode_vba_32.c5
-rw-r--r--drivers/gpu/drm/amd/display/dc/dml/dcn32/display_mode_vba_32.h3
-rw-r--r--drivers/gpu/drm/amd/display/dc/dml/dcn32/display_mode_vba_util_32.c14
-rw-r--r--drivers/gpu/drm/amd/display/dc/dml/dcn32/display_mode_vba_util_32.h3
-rw-r--r--drivers/gpu/drm/amd/display/dc/dml/dcn321/dcn321_fpu.c2
-rw-r--r--drivers/gpu/drm/amd/display/dc/dml/display_mode_structs.h1
-rw-r--r--drivers/gpu/drm/amd/display/dc/gpio/dcn32/hw_factory_dcn32.c14
-rw-r--r--drivers/gpu/drm/amd/display/dc/gpio/hw_ddc.c9
-rw-r--r--drivers/gpu/drm/amd/pm/powerplay/hwmgr/vega10_thermal.c25
-rw-r--r--drivers/gpu/drm/amd/pm/swsmu/inc/pmfw_if/smu_v13_0_4_ppsmc.h15
-rw-r--r--drivers/gpu/drm/amd/pm/swsmu/smu11/sienna_cichlid_ppt.c4
-rw-r--r--drivers/gpu/drm/drm_panel_orientation_quirks.c12
-rw-r--r--drivers/gpu/drm/i915/display/intel_psr.c5
-rw-r--r--drivers/gpu/drm/i915/gem/i915_gem_dmabuf.c4
-rw-r--r--drivers/gpu/drm/i915/gem/i915_gem_shmem.c4
-rw-r--r--drivers/gpu/drm/i915/gem/i915_gem_userptr.c5
-rw-r--r--drivers/gpu/drm/panfrost/panfrost_mmu.c11
-rw-r--r--drivers/gpu/drm/rcar-du/Kconfig13
-rw-r--r--drivers/gpu/drm/vc4/vc4_drv.c7
-rw-r--r--drivers/gpu/drm/vc4/vc4_hdmi.c47
-rw-r--r--drivers/gpu/drm/vc4/vc4_hdmi.h1
-rw-r--r--drivers/hid/hid-asus.c4
-rw-r--r--drivers/hid/hid-hyperv.c2
-rw-r--r--drivers/hid/wacom_wac.c11
-rw-r--r--drivers/hv/hv_balloon.c2
-rw-r--r--drivers/hwspinlock/qcom_hwspinlock.c42
-rw-r--r--drivers/mmc/host/sdhci-brcmstb.c3
-rw-r--r--drivers/mmc/host/sdhci-cqhci.h24
-rw-r--r--drivers/mmc/host/sdhci-esdhc-imx.c7
-rw-r--r--drivers/mmc/host/sdhci-of-arasan.c3
-rw-r--r--drivers/mmc/host/sdhci-tegra.c3
-rw-r--r--drivers/mmc/host/sdhci_am654.c7
-rw-r--r--drivers/net/can/at91_can.c2
-rw-r--r--drivers/net/can/c_can/c_can_main.c2
-rw-r--r--drivers/net/can/can327.c2
-rw-r--r--drivers/net/can/cc770/cc770.c2
-rw-r--r--drivers/net/can/ctucanfd/ctucanfd_base.c2
-rw-r--r--drivers/net/can/dev/skb.c10
-rw-r--r--drivers/net/can/flexcan/flexcan-core.c2
-rw-r--r--drivers/net/can/grcan.c2
-rw-r--r--drivers/net/can/ifi_canfd/ifi_canfd.c2
-rw-r--r--drivers/net/can/janz-ican3.c2
-rw-r--r--drivers/net/can/kvaser_pciefd.c2
-rw-r--r--drivers/net/can/m_can/m_can.c2
-rw-r--r--drivers/net/can/mscan/mscan.c2
-rw-r--r--drivers/net/can/pch_can.c2
-rw-r--r--drivers/net/can/peak_canfd/peak_canfd.c2
-rw-r--r--drivers/net/can/rcar/rcar_can.c2
-rw-r--r--drivers/net/can/rcar/rcar_canfd.c15
-rw-r--r--drivers/net/can/sja1000/sja1000.c2
-rw-r--r--drivers/net/can/slcan/slcan-core.c2
-rw-r--r--drivers/net/can/softing/softing_main.c2
-rw-r--r--drivers/net/can/spi/hi311x.c2
-rw-r--r--drivers/net/can/spi/mcp251x.c2
-rw-r--r--drivers/net/can/spi/mcp251xfd/mcp251xfd-tx.c2
-rw-r--r--drivers/net/can/sun4i_can.c2
-rw-r--r--drivers/net/can/ti_hecc.c2
-rw-r--r--drivers/net/can/usb/ems_usb.c2
-rw-r--r--drivers/net/can/usb/esd_usb.c2
-rw-r--r--drivers/net/can/usb/etas_es58x/es58x_core.c2
-rw-r--r--drivers/net/can/usb/gs_usb.c2
-rw-r--r--drivers/net/can/usb/kvaser_usb/kvaser_usb_core.c2
-rw-r--r--drivers/net/can/usb/mcba_usb.c2
-rw-r--r--drivers/net/can/usb/peak_usb/pcan_usb_core.c2
-rw-r--r--drivers/net/can/usb/ucan.c2
-rw-r--r--drivers/net/can/usb/usb_8dev.c2
-rw-r--r--drivers/net/can/xilinx_can.c2
-rw-r--r--drivers/net/ethernet/apm/xgene/xgene_enet_main.c4
-rw-r--r--drivers/net/ethernet/aquantia/atlantic/aq_macsec.c2
-rw-r--r--drivers/net/ethernet/aquantia/atlantic/macsec/macsec_api.c18
-rw-r--r--drivers/net/ethernet/broadcom/Kconfig2
-rw-r--r--drivers/net/ethernet/broadcom/bnxt/bnxt.c54
-rw-r--r--drivers/net/ethernet/broadcom/bnxt/bnxt.h1
-rw-r--r--drivers/net/ethernet/broadcom/bnxt/bnxt_ethtool.c2
-rw-r--r--drivers/net/ethernet/broadcom/bnxt/bnxt_hwrm.c3
-rw-r--r--drivers/net/ethernet/chelsio/cxgb3/cxgb3_main.c1
-rw-r--r--drivers/net/ethernet/chelsio/cxgb4vf/cxgb4vf_main.c2
-rw-r--r--drivers/net/ethernet/freescale/fman/mac.c9
-rw-r--r--drivers/net/ethernet/hisilicon/hns3/hns3pf/hclge_main.c8
-rw-r--r--drivers/net/ethernet/ibm/ibmveth.c3
-rw-r--r--drivers/net/ethernet/ibm/ibmveth.h1
-rw-r--r--drivers/net/ethernet/intel/iavf/iavf_virtchnl.c2
-rw-r--r--drivers/net/ethernet/intel/ice/ice_base.c2
-rw-r--r--drivers/net/ethernet/intel/ice/ice_lib.c25
-rw-r--r--drivers/net/ethernet/intel/ice/ice_lib.h1
-rw-r--r--drivers/net/ethernet/intel/ice/ice_vf_lib.c5
-rw-r--r--drivers/net/ethernet/marvell/mv643xx_eth.c1
-rw-r--r--drivers/net/ethernet/marvell/octeontx2/Kconfig2
-rw-r--r--drivers/net/ethernet/marvell/octeontx2/nic/otx2_common.c1
-rw-r--r--drivers/net/ethernet/marvell/octeontx2/nic/otx2_pf.c135
-rw-r--r--drivers/net/ethernet/marvell/octeontx2/nic/otx2_struct.h57
-rw-r--r--drivers/net/ethernet/marvell/octeontx2/nic/otx2_txrx.c32
-rw-r--r--drivers/net/ethernet/marvell/octeontx2/nic/otx2_txrx.h1
-rw-r--r--drivers/net/ethernet/marvell/prestera/prestera_rxtx.c7
-rw-r--r--drivers/net/ethernet/mediatek/mtk_star_emac.c2
-rw-r--r--drivers/net/ethernet/mellanox/mlx5/core/cmd.c11
-rw-r--r--drivers/net/ethernet/mellanox/mlx5/core/en/rep/bridge.c31
-rw-r--r--drivers/net/ethernet/mellanox/mlx5/core/en/tc/act/act.c92
-rw-r--r--drivers/net/ethernet/mellanox/mlx5/core/en/txrx.h24
-rw-r--r--drivers/net/ethernet/mellanox/mlx5/core/en/xdp.c4
-rw-r--r--drivers/net/ethernet/mellanox/mlx5/core/en_main.c7
-rw-r--r--drivers/net/ethernet/mellanox/mlx5/core/en_rx.c27
-rw-r--r--drivers/net/ethernet/mellanox/mlx5/core/en_tc.c14
-rw-r--r--drivers/net/ethernet/mellanox/mlx5/core/en_tx.c5
-rw-r--r--drivers/net/ethernet/mellanox/mlx5/core/eswitch.c14
-rw-r--r--drivers/net/ethernet/mellanox/mlx5/core/eswitch_offloads.c18
-rw-r--r--drivers/net/ethernet/mellanox/mlx5/core/eswitch_offloads_termtbl.c14
-rw-r--r--drivers/net/ethernet/mellanox/mlx5/core/fw_reset.c3
-rw-r--r--drivers/net/ethernet/neterion/s2io.c29
-rw-r--r--drivers/net/ethernet/ni/nixge.c1
-rw-r--r--drivers/net/ethernet/stmicro/stmmac/dwmac-intel.c11
-rw-r--r--drivers/net/ethernet/stmicro/stmmac/dwmac-loongson.c39
-rw-r--r--drivers/net/ethernet/stmicro/stmmac/dwmac-meson8b.c8
-rw-r--r--drivers/net/ethernet/sunplus/spl2sw_driver.c1
-rw-r--r--drivers/net/ethernet/ti/am65-cpsw-nuss.c2
-rw-r--r--drivers/net/ethernet/ti/cpsw.c2
-rw-r--r--drivers/net/ethernet/tundra/tsi108_eth.c5
-rw-r--r--drivers/net/hamradio/bpqether.c2
-rw-r--r--drivers/net/macsec.c50
-rw-r--r--drivers/net/macvlan.c4
-rw-r--r--drivers/net/phy/mscc/mscc_macsec.c1
-rw-r--r--drivers/net/tun.c18
-rw-r--r--drivers/net/wan/lapbether.c3
-rw-r--r--drivers/net/wireless/ath/ath11k/qmi.h2
-rw-r--r--drivers/net/wireless/ath/ath11k/reg.c6
-rw-r--r--drivers/net/wireless/broadcom/brcm80211/brcmfmac/fweh.c4
-rw-r--r--drivers/net/wireless/cisco/airo.c18
-rw-r--r--drivers/net/wireless/mac80211_hwsim.c5
-rw-r--r--drivers/net/wireless/ralink/rt2x00/rt2400pci.c8
-rw-r--r--drivers/net/wireless/ralink/rt2x00/rt2400pci.h2
-rw-r--r--drivers/net/wireless/ralink/rt2x00/rt2500pci.c8
-rw-r--r--drivers/net/wireless/ralink/rt2x00/rt2500pci.h2
-rw-r--r--drivers/net/wireless/ralink/rt2x00/rt2500usb.c8
-rw-r--r--drivers/net/wireless/ralink/rt2x00/rt2500usb.h2
-rw-r--r--drivers/net/wireless/ralink/rt2x00/rt2800lib.c60
-rw-r--r--drivers/net/wireless/ralink/rt2x00/rt2800lib.h8
-rw-r--r--drivers/net/wireless/ralink/rt2x00/rt2x00usb.c6
-rw-r--r--drivers/net/wireless/ralink/rt2x00/rt61pci.c4
-rw-r--r--drivers/net/wireless/ralink/rt2x00/rt61pci.h2
-rw-r--r--drivers/net/wireless/ralink/rt2x00/rt73usb.c4
-rw-r--r--drivers/net/wireless/ralink/rt2x00/rt73usb.h2
-rw-r--r--drivers/net/wwan/Kconfig2
-rw-r--r--drivers/net/wwan/iosm/iosm_ipc_coredump.c1
-rw-r--r--drivers/net/wwan/iosm/iosm_ipc_devlink.c1
-rw-r--r--drivers/net/wwan/iosm/iosm_ipc_imem_ops.c8
-rw-r--r--drivers/net/wwan/iosm/iosm_ipc_mux.h1
-rw-r--r--drivers/net/wwan/iosm/iosm_ipc_pcie.c18
-rw-r--r--drivers/net/wwan/iosm/iosm_ipc_wwan.c43
-rw-r--r--drivers/net/wwan/mhi_wwan_mbim.c1
-rw-r--r--drivers/nvme/host/core.c3
-rw-r--r--drivers/nvme/host/pci.c2
-rw-r--r--drivers/nvme/target/configfs.c8
-rw-r--r--drivers/pci/controller/pci-hyperv.c22
-rw-r--r--drivers/phy/qualcomm/phy-qcom-qmp-combo.c2
-rw-r--r--drivers/phy/ralink/phy-mt7621-pci.c3
-rw-r--r--drivers/phy/st/phy-stm32-usbphyc.c2
-rw-r--r--drivers/phy/sunplus/phy-sunplus-usb2.c4
-rw-r--r--drivers/phy/tegra/xusb.c20
-rw-r--r--drivers/platform/x86/hp-wmi.c12
-rw-r--r--drivers/platform/x86/ideapad-laptop.c25
-rw-r--r--drivers/platform/x86/intel/hid.c3
-rw-r--r--drivers/platform/x86/intel/pmt/class.c31
-rw-r--r--drivers/platform/x86/p2sb.c15
-rw-r--r--drivers/platform/x86/touchscreen_dmi.c25
-rw-r--r--drivers/s390/crypto/zcrypt_msgtype6.c21
-rw-r--r--drivers/scsi/ibmvscsi/ibmvfc.c14
-rw-r--r--drivers/scsi/scsi_debug.c7
-rw-r--r--drivers/scsi/scsi_transport_sas.c13
-rw-r--r--drivers/soundwire/intel.c1
-rw-r--r--drivers/soundwire/qcom.c9
-rw-r--r--drivers/spi/spi-amd.c2
-rw-r--r--drivers/spi/spi-intel.c8
-rw-r--r--drivers/spi/spi-meson-spicc.c24
-rw-r--r--drivers/spi/spi-mt65xx.c23
-rw-r--r--drivers/spi/spi-stm32.c3
-rw-r--r--drivers/spi/spi-tegra210-quad.c6
212 files changed, 1623 insertions, 785 deletions
diff --git a/drivers/acpi/video_detect.c b/drivers/acpi/video_detect.c
index 9cd8797d12bb..b2a616287638 100644
--- a/drivers/acpi/video_detect.c
+++ b/drivers/acpi/video_detect.c
@@ -646,6 +646,20 @@ static const struct dmi_system_id video_detect_dmi_table[] = {
},
/*
+ * Models which have nvidia-ec-wmi support, but should not use it.
+ * Note this indicates a likely firmware bug on these models and should
+ * be revisited if/when Linux gets support for dynamic mux mode.
+ */
+ {
+ .callback = video_detect_force_native,
+ /* Dell G15 5515 */
+ .matches = {
+ DMI_MATCH(DMI_SYS_VENDOR, "Dell Inc."),
+ DMI_MATCH(DMI_PRODUCT_NAME, "Dell G15 5515"),
+ },
+ },
+
+ /*
* Desktops which falsely report a backlight and which our heuristics
* for this do not catch.
*/
@@ -670,7 +684,7 @@ static const struct dmi_system_id video_detect_dmi_table[] = {
static bool google_cros_ec_present(void)
{
- return acpi_dev_found("GOOG0004");
+ return acpi_dev_found("GOOG0004") || acpi_dev_found("GOOG000C");
}
/*
@@ -718,6 +732,10 @@ static enum acpi_backlight_type __acpi_video_get_backlight_type(bool native)
if (apple_gmux_present())
return acpi_backlight_apple_gmux;
+ /* Chromebooks should always prefer native backlight control. */
+ if (google_cros_ec_present() && native_available)
+ return acpi_backlight_native;
+
/* On systems with ACPI video use either native or ACPI video. */
if (video_caps & ACPI_VIDEO_BACKLIGHT) {
/*
@@ -735,13 +753,6 @@ static enum acpi_backlight_type __acpi_video_get_backlight_type(bool native)
return acpi_backlight_video;
}
- /*
- * Chromebooks that don't have backlight handle in ACPI table
- * are supposed to use native backlight if it's available.
- */
- if (google_cros_ec_present() && native_available)
- return acpi_backlight_native;
-
/* No ACPI video (old hw), use vendor specific fw methods. */
return acpi_backlight_vendor;
}
@@ -754,6 +765,18 @@ EXPORT_SYMBOL(acpi_video_get_backlight_type);
bool acpi_video_backlight_use_native(void)
{
- return __acpi_video_get_backlight_type(true) == acpi_backlight_native;
+ /*
+ * Call __acpi_video_get_backlight_type() to let it know that
+ * a native backlight is available.
+ */
+ __acpi_video_get_backlight_type(true);
+
+ /*
+ * For now just always return true. There is a whole bunch of laptop
+ * models where (video_caps & ACPI_VIDEO_BACKLIGHT) is false causing
+ * __acpi_video_get_backlight_type() to return vendor, while these
+ * models only have a native backlight control.
+ */
+ return true;
}
EXPORT_SYMBOL(acpi_video_backlight_use_native);
diff --git a/drivers/ata/libata-scsi.c b/drivers/ata/libata-scsi.c
index e2ebb0b065e2..06a3d95ed8f9 100644
--- a/drivers/ata/libata-scsi.c
+++ b/drivers/ata/libata-scsi.c
@@ -3264,6 +3264,7 @@ static unsigned int ata_scsiop_maint_in(struct ata_scsi_args *args, u8 *rbuf)
case REPORT_LUNS:
case REQUEST_SENSE:
case SYNCHRONIZE_CACHE:
+ case SYNCHRONIZE_CACHE_16:
case REZERO_UNIT:
case SEEK_6:
case SEEK_10:
@@ -3922,6 +3923,7 @@ static inline ata_xlat_func_t ata_get_xlat_func(struct ata_device *dev, u8 cmd)
return ata_scsi_write_same_xlat;
case SYNCHRONIZE_CACHE:
+ case SYNCHRONIZE_CACHE_16:
if (ata_try_flush_cache(dev))
return ata_scsi_flush_xlat;
break;
@@ -3962,9 +3964,19 @@ static inline ata_xlat_func_t ata_get_xlat_func(struct ata_device *dev, u8 cmd)
int __ata_scsi_queuecmd(struct scsi_cmnd *scmd, struct ata_device *dev)
{
+ struct ata_port *ap = dev->link->ap;
u8 scsi_op = scmd->cmnd[0];
ata_xlat_func_t xlat_func;
+ /*
+ * scsi_queue_rq() will defer commands if scsi_host_in_recovery().
+ * However, this check is done without holding the ap->lock (a libata
+ * specific lock), so we can have received an error irq since then,
+ * therefore we must check if EH is pending, while holding ap->lock.
+ */
+ if (ap->pflags & (ATA_PFLAG_EH_PENDING | ATA_PFLAG_EH_IN_PROGRESS))
+ return SCSI_MLQUEUE_DEVICE_BUSY;
+
if (unlikely(!scmd->cmd_len))
goto bad_cdb_len;
@@ -4145,6 +4157,7 @@ void ata_scsi_simulate(struct ata_device *dev, struct scsi_cmnd *cmd)
* turning this into a no-op.
*/
case SYNCHRONIZE_CACHE:
+ case SYNCHRONIZE_CACHE_16:
fallthrough;
/* no-op's, complete with success */
diff --git a/drivers/ata/libata-transport.c b/drivers/ata/libata-transport.c
index a7e9a75410a3..e4fb9d1b9b39 100644
--- a/drivers/ata/libata-transport.c
+++ b/drivers/ata/libata-transport.c
@@ -301,7 +301,9 @@ int ata_tport_add(struct device *parent,
pm_runtime_enable(dev);
pm_runtime_forbid(dev);
- transport_add_device(dev);
+ error = transport_add_device(dev);
+ if (error)
+ goto tport_transport_add_err;
transport_configure_device(dev);
error = ata_tlink_add(&ap->link);
@@ -312,12 +314,12 @@ int ata_tport_add(struct device *parent,
tport_link_err:
transport_remove_device(dev);
+ tport_transport_add_err:
device_del(dev);
tport_err:
transport_destroy_device(dev);
put_device(dev);
- ata_host_put(ap->host);
return error;
}
@@ -456,7 +458,9 @@ int ata_tlink_add(struct ata_link *link)
goto tlink_err;
}
- transport_add_device(dev);
+ error = transport_add_device(dev);
+ if (error)
+ goto tlink_transport_err;
transport_configure_device(dev);
ata_for_each_dev(ata_dev, link, ALL) {
@@ -471,6 +475,7 @@ int ata_tlink_add(struct ata_link *link)
ata_tdev_delete(ata_dev);
}
transport_remove_device(dev);
+ tlink_transport_err:
device_del(dev);
tlink_err:
transport_destroy_device(dev);
@@ -708,7 +713,13 @@ static int ata_tdev_add(struct ata_device *ata_dev)
return error;
}
- transport_add_device(dev);
+ error = transport_add_device(dev);
+ if (error) {
+ device_del(dev);
+ ata_tdev_free(ata_dev);
+ return error;
+ }
+
transport_configure_device(dev);
return 0;
}
diff --git a/drivers/clocksource/hyperv_timer.c b/drivers/clocksource/hyperv_timer.c
index bb47610bbd1c..18de1f439ffd 100644
--- a/drivers/clocksource/hyperv_timer.c
+++ b/drivers/clocksource/hyperv_timer.c
@@ -21,6 +21,7 @@
#include <linux/interrupt.h>
#include <linux/irq.h>
#include <linux/acpi.h>
+#include <linux/hyperv.h>
#include <clocksource/hyperv_timer.h>
#include <asm/hyperv-tlfs.h>
#include <asm/mshyperv.h>
@@ -395,25 +396,25 @@ static u64 notrace read_hv_sched_clock_tsc(void)
static void suspend_hv_clock_tsc(struct clocksource *arg)
{
- u64 tsc_msr;
+ union hv_reference_tsc_msr tsc_msr;
/* Disable the TSC page */
- tsc_msr = hv_get_register(HV_REGISTER_REFERENCE_TSC);
- tsc_msr &= ~BIT_ULL(0);
- hv_set_register(HV_REGISTER_REFERENCE_TSC, tsc_msr);
+ tsc_msr.as_uint64 = hv_get_register(HV_REGISTER_REFERENCE_TSC);
+ tsc_msr.enable = 0;
+ hv_set_register(HV_REGISTER_REFERENCE_TSC, tsc_msr.as_uint64);
}
static void resume_hv_clock_tsc(struct clocksource *arg)
{
phys_addr_t phys_addr = virt_to_phys(&tsc_pg);
- u64 tsc_msr;
+ union hv_reference_tsc_msr tsc_msr;
/* Re-enable the TSC page */
- tsc_msr = hv_get_register(HV_REGISTER_REFERENCE_TSC);
- tsc_msr &= GENMASK_ULL(11, 0);
- tsc_msr |= BIT_ULL(0) | (u64)phys_addr;
- hv_set_register(HV_REGISTER_REFERENCE_TSC, tsc_msr);
+ tsc_msr.as_uint64 = hv_get_register(HV_REGISTER_REFERENCE_TSC);
+ tsc_msr.enable = 1;
+ tsc_msr.pfn = HVPFN_DOWN(phys_addr);
+ hv_set_register(HV_REGISTER_REFERENCE_TSC, tsc_msr.as_uint64);
}
#ifdef HAVE_VDSO_CLOCKMODE_HVCLOCK
@@ -495,7 +496,7 @@ static __always_inline void hv_setup_sched_clock(void *sched_clock) {}
static bool __init hv_init_tsc_clocksource(void)
{
- u64 tsc_msr;
+ union hv_reference_tsc_msr tsc_msr;
phys_addr_t phys_addr;
if (!(ms_hyperv.features & HV_MSR_REFERENCE_TSC_AVAILABLE))
@@ -530,10 +531,10 @@ static bool __init hv_init_tsc_clocksource(void)
* (which already has at least the low 12 bits set to zero since
* it is page aligned). Also set the "enable" bit, which is bit 0.
*/
- tsc_msr = hv_get_register(HV_REGISTER_REFERENCE_TSC);
- tsc_msr &= GENMASK_ULL(11, 0);
- tsc_msr = tsc_msr | 0x1 | (u64)phys_addr;
- hv_set_register(HV_REGISTER_REFERENCE_TSC, tsc_msr);
+ tsc_msr.as_uint64 = hv_get_register(HV_REGISTER_REFERENCE_TSC);
+ tsc_msr.enable = 1;
+ tsc_msr.pfn = HVPFN_DOWN(phys_addr);
+ hv_set_register(HV_REGISTER_REFERENCE_TSC, tsc_msr.as_uint64);
clocksource_register_hz(&hyperv_cs_tsc, NSEC_PER_SEC/100);
diff --git a/drivers/dma/apple-admac.c b/drivers/dma/apple-admac.c
index 317ca76ccafd..a2cc520225d3 100644
--- a/drivers/dma/apple-admac.c
+++ b/drivers/dma/apple-admac.c
@@ -493,7 +493,7 @@ static struct dma_chan *admac_dma_of_xlate(struct of_phandle_args *dma_spec,
return NULL;
}
- return &ad->channels[index].chan;
+ return dma_get_slave_channel(&ad->channels[index].chan);
}
static int admac_drain_reports(struct admac_data *ad, int channo)
diff --git a/drivers/dma/at_hdmac.c b/drivers/dma/at_hdmac.c
index 5a50423b7378..858bd64f1313 100644
--- a/drivers/dma/at_hdmac.c
+++ b/drivers/dma/at_hdmac.c
@@ -256,6 +256,8 @@ static void atc_dostart(struct at_dma_chan *atchan, struct at_desc *first)
ATC_SPIP_BOUNDARY(first->boundary));
channel_writel(atchan, DPIP, ATC_DPIP_HOLE(first->dst_hole) |
ATC_DPIP_BOUNDARY(first->boundary));
+ /* Don't allow CPU to reorder channel enable. */
+ wmb();
dma_writel(atdma, CHER, atchan->mask);
vdbg_dump_regs(atchan);
@@ -316,7 +318,8 @@ static int atc_get_bytes_left(struct dma_chan *chan, dma_cookie_t cookie)
struct at_desc *desc_first = atc_first_active(atchan);
struct at_desc *desc;
int ret;
- u32 ctrla, dscr, trials;
+ u32 ctrla, dscr;
+ unsigned int i;
/*
* If the cookie doesn't match to the currently running transfer then
@@ -386,7 +389,7 @@ static int atc_get_bytes_left(struct dma_chan *chan, dma_cookie_t cookie)
dscr = channel_readl(atchan, DSCR);
rmb(); /* ensure DSCR is read before CTRLA */
ctrla = channel_readl(atchan, CTRLA);
- for (trials = 0; trials < ATC_MAX_DSCR_TRIALS; ++trials) {
+ for (i = 0; i < ATC_MAX_DSCR_TRIALS; ++i) {
u32 new_dscr;
rmb(); /* ensure DSCR is read after CTRLA */
@@ -412,7 +415,7 @@ static int atc_get_bytes_left(struct dma_chan *chan, dma_cookie_t cookie)
rmb(); /* ensure DSCR is read before CTRLA */
ctrla = channel_readl(atchan, CTRLA);
}
- if (unlikely(trials >= ATC_MAX_DSCR_TRIALS))
+ if (unlikely(i == ATC_MAX_DSCR_TRIALS))
return -ETIMEDOUT;
/* for the first descriptor we can be more accurate */
@@ -462,18 +465,6 @@ atc_chain_complete(struct at_dma_chan *atchan, struct at_desc *desc)
if (!atc_chan_is_cyclic(atchan))
dma_cookie_complete(txd);
- /* If the transfer was a memset, free our temporary buffer */
- if (desc->memset_buffer) {
- dma_pool_free(atdma->memset_pool, desc->memset_vaddr,
- desc->memset_paddr);
- desc->memset_buffer = false;
- }
-
- /* move children to free_list */
- list_splice_init(&desc->tx_list, &atchan->free_list);
- /* move myself to free_list */
- list_move(&desc->desc_node, &atchan->free_list);
-
spin_unlock_irqrestore(&atchan->lock, flags);
dma_descriptor_unmap(txd);
@@ -483,42 +474,20 @@ atc_chain_complete(struct at_dma_chan *atchan, struct at_desc *desc)
dmaengine_desc_get_callback_invoke(txd, NULL);
dma_run_dependencies(txd);
-}
-
-/**
- * atc_complete_all - finish work for all transactions
- * @atchan: channel to complete transactions for
- *
- * Eventually submit queued descriptors if any
- *
- * Assume channel is idle while calling this function
- * Called with atchan->lock held and bh disabled
- */
-static void atc_complete_all(struct at_dma_chan *atchan)
-{
- struct at_desc *desc, *_desc;
- LIST_HEAD(list);
- unsigned long flags;
-
- dev_vdbg(chan2dev(&atchan->chan_common), "complete all\n");
spin_lock_irqsave(&atchan->lock, flags);
-
- /*
- * Submit queued descriptors ASAP, i.e. before we go through
- * the completed ones.
- */
- if (!list_empty(&atchan->queue))
- atc_dostart(atchan, atc_first_queued(atchan));
- /* empty active_list now it is completed */
- list_splice_init(&atchan->active_list, &list);
- /* empty queue list by moving descriptors (if any) to active_list */
- list_splice_init(&atchan->queue, &atchan->active_list);
-
+ /* move children to free_list */
+ list_splice_init(&desc->tx_list, &atchan->free_list);
+ /* add myself to free_list */
+ list_add(&desc->desc_node, &atchan->free_list);
spin_unlock_irqrestore(&atchan->lock, flags);
- list_for_each_entry_safe(desc, _desc, &list, desc_node)
- atc_chain_complete(atchan, desc);
+ /* If the transfer was a memset, free our temporary buffer */
+ if (desc->memset_buffer) {
+ dma_pool_free(atdma->memset_pool, desc->memset_vaddr,
+ desc->memset_paddr);
+ desc->memset_buffer = false;
+ }
}
/**
@@ -527,26 +496,28 @@ static void atc_complete_all(struct at_dma_chan *atchan)
*/
static void atc_advance_work(struct at_dma_chan *atchan)
{
+ struct at_desc *desc;
unsigned long flags;
- int ret;
dev_vdbg(chan2dev(&atchan->chan_common), "advance_work\n");
spin_lock_irqsave(&atchan->lock, flags);
- ret = atc_chan_is_enabled(atchan);
- spin_unlock_irqrestore(&atchan->lock, flags);
- if (ret)
- return;
-
- if (list_empty(&atchan->active_list) ||
- list_is_singular(&atchan->active_list))
- return atc_complete_all(atchan);
+ if (atc_chan_is_enabled(atchan) || list_empty(&atchan->active_list))
+ return spin_unlock_irqrestore(&atchan->lock, flags);
- atc_chain_complete(atchan, atc_first_active(atchan));
+ desc = atc_first_active(atchan);
+ /* Remove the transfer node from the active list. */
+ list_del_init(&desc->desc_node);
+ spin_unlock_irqrestore(&atchan->lock, flags);
+ atc_chain_complete(atchan, desc);
/* advance work */
spin_lock_irqsave(&atchan->lock, flags);
- atc_dostart(atchan, atc_first_active(atchan));
+ if (!list_empty(&atchan->active_list)) {
+ desc = atc_first_queued(atchan);
+ list_move_tail(&desc->desc_node, &atchan->active_list);
+ atc_dostart(atchan, desc);
+ }
spin_unlock_irqrestore(&atchan->lock, flags);
}
@@ -558,6 +529,7 @@ static void atc_advance_work(struct at_dma_chan *atchan)
static void atc_handle_error(struct at_dma_chan *atchan)
{
struct at_desc *bad_desc;
+ struct at_desc *desc;
struct at_desc *child;
unsigned long flags;
@@ -570,13 +542,12 @@ static void atc_handle_error(struct at_dma_chan *atchan)
bad_desc = atc_first_active(atchan);
list_del_init(&bad_desc->desc_node);
- /* As we are stopped, take advantage to push queued descriptors
- * in active_list */
- list_splice_init(&atchan->queue, atchan->active_list.prev);
-
/* Try to restart the controller */
- if (!list_empty(&atchan->active_list))
- atc_dostart(atchan, atc_first_active(atchan));
+ if (!list_empty(&atchan->active_list)) {
+ desc = atc_first_queued(atchan);
+ list_move_tail(&desc->desc_node, &atchan->active_list);
+ atc_dostart(atchan, desc);
+ }
/*
* KERN_CRITICAL may seem harsh, but since this only happens
@@ -691,19 +662,11 @@ static dma_cookie_t atc_tx_submit(struct dma_async_tx_descriptor *tx)
spin_lock_irqsave(&atchan->lock, flags);
cookie = dma_cookie_assign(tx);
- if (list_empty(&atchan->active_list)) {
- dev_vdbg(chan2dev(tx->chan), "tx_submit: started %u\n",
- desc->txd.cookie);
- atc_dostart(atchan, desc);
- list_add_tail(&desc->desc_node, &atchan->active_list);
- } else {
- dev_vdbg(chan2dev(tx->chan), "tx_submit: queued %u\n",
- desc->txd.cookie);
- list_add_tail(&desc->desc_node, &atchan->queue);
- }
-
+ list_add_tail(&desc->desc_node, &atchan->queue);
spin_unlock_irqrestore(&atchan->lock, flags);
+ dev_vdbg(chan2dev(tx->chan), "tx_submit: queued %u\n",
+ desc->txd.cookie);
return cookie;
}
@@ -1445,11 +1408,8 @@ static int atc_terminate_all(struct dma_chan *chan)
struct at_dma_chan *atchan = to_at_dma_chan(chan);
struct at_dma *atdma = to_at_dma(chan->device);
int chan_id = atchan->chan_common.chan_id;
- struct at_desc *desc, *_desc;
unsigned long flags;
- LIST_HEAD(list);
-
dev_vdbg(chan2dev(chan), "%s\n", __func__);
/*
@@ -1468,19 +1428,15 @@ static int atc_terminate_all(struct dma_chan *chan)
cpu_relax();
/* active_list entries will end up before queued entries */
- list_splice_init(&atchan->queue, &list);
- list_splice_init(&atchan->active_list, &list);
-
- spin_unlock_irqrestore(&atchan->lock, flags);
-
- /* Flush all pending and queued descriptors */
- list_for_each_entry_safe(desc, _desc, &list, desc_node)
- atc_chain_complete(atchan, desc);
+ list_splice_tail_init(&atchan->queue, &atchan->free_list);
+ list_splice_tail_init(&atchan->active_list, &atchan->free_list);
clear_bit(ATC_IS_PAUSED, &atchan->status);
/* if channel dedicated to cyclic operations, free it */
clear_bit(ATC_IS_CYCLIC, &atchan->status);
+ spin_unlock_irqrestore(&atchan->lock, flags);
+
return 0;
}
@@ -1535,20 +1491,26 @@ atc_tx_status(struct dma_chan *chan,
}
/**
- * atc_issue_pending - try to finish work
+ * atc_issue_pending - takes the first transaction descriptor in the pending
+ * queue and starts the transfer.
* @chan: target DMA channel
*/
static void atc_issue_pending(struct dma_chan *chan)
{
- struct at_dma_chan *atchan = to_at_dma_chan(chan);
+ struct at_dma_chan *atchan = to_at_dma_chan(chan);
+ struct at_desc *desc;
+ unsigned long flags;
dev_vdbg(chan2dev(chan), "issue_pending\n");
- /* Not needed for cyclic transfers */
- if (atc_chan_is_cyclic(atchan))
- return;
+ spin_lock_irqsave(&atchan->lock, flags);
+ if (atc_chan_is_enabled(atchan) || list_empty(&atchan->queue))
+ return spin_unlock_irqrestore(&atchan->lock, flags);
- atc_advance_work(atchan);
+ desc = atc_first_queued(atchan);
+ list_move_tail(&desc->desc_node, &atchan->active_list);
+ atc_dostart(atchan, desc);
+ spin_unlock_irqrestore(&atchan->lock, flags);
}
/**
@@ -1966,7 +1928,11 @@ static int __init at_dma_probe(struct platform_device *pdev)
dma_has_cap(DMA_SLAVE, atdma->dma_common.cap_mask) ? "slave " : "",
plat_dat->nr_channels);
- dma_async_device_register(&atdma->dma_common);
+ err = dma_async_device_register(&atdma->dma_common);
+ if (err) {
+ dev_err(&pdev->dev, "Unable to register: %d.\n", err);
+ goto err_dma_async_device_register;
+ }
/*
* Do not return an error if the dmac node is not present in order to
@@ -1986,6 +1952,7 @@ static int __init at_dma_probe(struct platform_device *pdev)
err_of_dma_controller_register:
dma_async_device_unregister(&atdma->dma_common);
+err_dma_async_device_register:
dma_pool_destroy(atdma->memset_pool);
err_memset_pool_create:
dma_pool_destroy(atdma->dma_desc_pool);
diff --git a/drivers/dma/at_hdmac_regs.h b/drivers/dma/at_hdmac_regs.h
index 4d1ebc040031..d4d382d74607 100644
--- a/drivers/dma/at_hdmac_regs.h
+++ b/drivers/dma/at_hdmac_regs.h
@@ -186,13 +186,13 @@
/* LLI == Linked List Item; aka DMA buffer descriptor */
struct at_lli {
/* values that are not changed by hardware */
- dma_addr_t saddr;
- dma_addr_t daddr;
+ u32 saddr;
+ u32 daddr;
/* value that may get written back: */
- u32 ctrla;
+ u32 ctrla;
/* more values that are not changed by hardware */
- u32 ctrlb;
- dma_addr_t dscr; /* chain to next lli */
+ u32 ctrlb;
+ u32 dscr; /* chain to next lli */
};
/**
diff --git a/drivers/dma/idxd/cdev.c b/drivers/dma/idxd/cdev.c
index c2808fd081d6..a9b96b18772f 100644
--- a/drivers/dma/idxd/cdev.c
+++ b/drivers/dma/idxd/cdev.c
@@ -312,6 +312,24 @@ static int idxd_user_drv_probe(struct idxd_dev *idxd_dev)
if (idxd->state != IDXD_DEV_ENABLED)
return -ENXIO;
+ /*
+ * User type WQ is enabled only when SVA is enabled for two reasons:
+ * - If no IOMMU or IOMMU Passthrough without SVA, userspace
+ * can directly access physical address through the WQ.
+ * - The IDXD cdev driver does not provide any ways to pin
+ * user pages and translate the address from user VA to IOVA or
+ * PA without IOMMU SVA. Therefore the application has no way
+ * to instruct the device to perform DMA function. This makes
+ * the cdev not usable for normal application usage.
+ */
+ if (!device_user_pasid_enabled(idxd)) {
+ idxd->cmd_status = IDXD_SCMD_WQ_USER_NO_IOMMU;
+ dev_dbg(&idxd->pdev->dev,
+ "User type WQ cannot be enabled without SVA.\n");
+
+ return -EOPNOTSUPP;
+ }
+
mutex_lock(&wq->wq_lock);
wq->type = IDXD_WQT_USER;
rc = drv_enable_wq(wq);
diff --git a/drivers/dma/idxd/device.c b/drivers/dma/idxd/device.c
index 2c1e6f6daa62..6f44fa8f78a5 100644
--- a/drivers/dma/idxd/device.c
+++ b/drivers/dma/idxd/device.c
@@ -390,7 +390,7 @@ static void idxd_wq_disable_cleanup(struct idxd_wq *wq)
clear_bit(WQ_FLAG_ATS_DISABLE, &wq->flags);
memset(wq->name, 0, WQ_NAME_SIZE);
wq->max_xfer_bytes = WQ_DEFAULT_MAX_XFER;
- wq->max_batch_size = WQ_DEFAULT_MAX_BATCH;
+ idxd_wq_set_max_batch_size(idxd->data->type, wq, WQ_DEFAULT_MAX_BATCH);
if (wq->opcap_bmap)
bitmap_copy(wq->opcap_bmap, idxd->opcap_bmap, IDXD_MAX_OPCAP_BITS);
}
@@ -730,13 +730,21 @@ static void idxd_device_wqs_clear_state(struct idxd_device *idxd)
void idxd_device_clear_state(struct idxd_device *idxd)
{
- if (!test_bit(IDXD_FLAG_CONFIGURABLE, &idxd->flags))
- return;
+ /* IDXD is always disabled. Other states are cleared only when IDXD is configurable. */
+ if (test_bit(IDXD_FLAG_CONFIGURABLE, &idxd->flags)) {
+ /*
+ * Clearing wq state is protected by wq lock.
+ * So no need to be protected by device lock.
+ */
+ idxd_device_wqs_clear_state(idxd);
+
+ spin_lock(&idxd->dev_lock);
+ idxd_groups_clear_state(idxd);
+ idxd_engines_clear_state(idxd);
+ } else {
+ spin_lock(&idxd->dev_lock);
+ }
- idxd_device_wqs_clear_state(idxd);
- spin_lock(&idxd->dev_lock);
- idxd_groups_clear_state(idxd);
- idxd_engines_clear_state(idxd);
idxd->state = IDXD_DEV_DISABLED;
spin_unlock(&idxd->dev_lock);
}
@@ -869,7 +877,7 @@ static int idxd_wq_config_write(struct idxd_wq *wq)
/* bytes 12-15 */
wq->wqcfg->max_xfer_shift = ilog2(wq->max_xfer_bytes);
- wq->wqcfg->max_batch_shift = ilog2(wq->max_batch_size);
+ idxd_wqcfg_set_max_batch_shift(idxd->data->type, wq->wqcfg, ilog2(wq->max_batch_size));
/* bytes 32-63 */
if (idxd->hw.wq_cap.op_config && wq->opcap_bmap) {
@@ -1051,7 +1059,7 @@ static int idxd_wq_load_config(struct idxd_wq *wq)
wq->priority = wq->wqcfg->priority;
wq->max_xfer_bytes = 1ULL << wq->wqcfg->max_xfer_shift;
- wq->max_batch_size = 1ULL << wq->wqcfg->max_batch_shift;
+ idxd_wq_set_max_batch_size(idxd->data->type, wq, 1U << wq->wqcfg->max_batch_shift);
for (i = 0; i < WQCFG_STRIDES(idxd); i++) {
wqcfg_offset = WQCFG_OFFSET(idxd, wq->id, i);
diff --git a/drivers/dma/idxd/idxd.h b/drivers/dma/idxd/idxd.h
index 1196ab342f01..7ced8d283d98 100644
--- a/drivers/dma/idxd/idxd.h
+++ b/drivers/dma/idxd/idxd.h
@@ -548,6 +548,38 @@ static inline int idxd_wq_refcount(struct idxd_wq *wq)
return wq->client_count;
};
+/*
+ * Intel IAA does not support batch processing.
+ * The max batch size of device, max batch size of wq and
+ * max batch shift of wqcfg should be always 0 on IAA.
+ */
+static inline void idxd_set_max_batch_size(int idxd_type, struct idxd_device *idxd,
+ u32 max_batch_size)
+{
+ if (idxd_type == IDXD_TYPE_IAX)
+ idxd->max_batch_size = 0;
+ else
+ idxd->max_batch_size = max_batch_size;
+}
+
+static inline void idxd_wq_set_max_batch_size(int idxd_type, struct idxd_wq *wq,
+ u32 max_batch_size)
+{
+ if (idxd_type == IDXD_TYPE_IAX)
+ wq->max_batch_size = 0;
+ else
+ wq->max_batch_size = max_batch_size;
+}
+
+static inline void idxd_wqcfg_set_max_batch_shift(int idxd_type, union wqcfg *wqcfg,
+ u32 max_batch_shift)
+{
+ if (idxd_type == IDXD_TYPE_IAX)
+ wqcfg->max_batch_shift = 0;
+ else
+ wqcfg->max_batch_shift = max_batch_shift;
+}
+
int __must_check __idxd_driver_register(struct idxd_device_driver *idxd_drv,
struct module *module, const char *mod_name);
#define idxd_driver_register(driver) \
diff --git a/drivers/dma/idxd/init.c b/drivers/dma/idxd/init.c
index 2b18d512cbfc..09cbf0c179ba 100644
--- a/drivers/dma/idxd/init.c
+++ b/drivers/dma/idxd/init.c
@@ -183,7 +183,7 @@ static int idxd_setup_wqs(struct idxd_device *idxd)
init_completion(&wq->wq_dead);
init_completion(&wq->wq_resurrect);
wq->max_xfer_bytes = WQ_DEFAULT_MAX_XFER;
- wq->max_batch_size = WQ_DEFAULT_MAX_BATCH;
+ idxd_wq_set_max_batch_size(idxd->data->type, wq, WQ_DEFAULT_MAX_BATCH);
wq->enqcmds_retries = IDXD_ENQCMDS_RETRIES;
wq->wqcfg = kzalloc_node(idxd->wqcfg_size, GFP_KERNEL, dev_to_node(dev));
if (!wq->wqcfg) {
@@ -418,7 +418,7 @@ static void idxd_read_caps(struct idxd_device *idxd)
idxd->max_xfer_bytes = 1ULL << idxd->hw.gen_cap.max_xfer_shift;
dev_dbg(dev, "max xfer size: %llu bytes\n", idxd->max_xfer_bytes);
- idxd->max_batch_size = 1U << idxd->hw.gen_cap.max_batch_shift;
+ idxd_set_max_batch_size(idxd->data->type, idxd, 1U << idxd->hw.gen_cap.max_batch_shift);
dev_dbg(dev, "max batch size: %u\n", idxd->max_batch_size);
if (idxd->hw.gen_cap.config_en)
set_bit(IDXD_FLAG_CONFIGURABLE, &idxd->flags);
diff --git a/drivers/dma/idxd/sysfs.c b/drivers/dma/idxd/sysfs.c
index bdaccf9e0436..7269bd54554f 100644
--- a/drivers/dma/idxd/sysfs.c
+++ b/drivers/dma/idxd/sysfs.c
@@ -1046,7 +1046,7 @@ static ssize_t wq_max_batch_size_store(struct device *dev, struct device_attribu
if (batch_size > idxd->max_batch_size)
return -EINVAL;
- wq->max_batch_size = (u32)batch_size;
+ idxd_wq_set_max_batch_size(idxd->data->type, wq, (u32)batch_size);
return count;
}
diff --git a/drivers/dma/mv_xor_v2.c b/drivers/dma/mv_xor_v2.c
index f629ef6fd3c2..113834e1167b 100644
--- a/drivers/dma/mv_xor_v2.c
+++ b/drivers/dma/mv_xor_v2.c
@@ -893,6 +893,7 @@ static int mv_xor_v2_remove(struct platform_device *pdev)
tasklet_kill(&xor_dev->irq_tasklet);
clk_disable_unprepare(xor_dev->clk);
+ clk_disable_unprepare(xor_dev->reg_clk);
return 0;
}
diff --git a/drivers/dma/pxa_dma.c b/drivers/dma/pxa_dma.c
index e7034f6f3994..22a392fe6d32 100644
--- a/drivers/dma/pxa_dma.c
+++ b/drivers/dma/pxa_dma.c
@@ -1247,14 +1247,14 @@ static int pxad_init_phys(struct platform_device *op,
return -ENOMEM;
for (i = 0; i < nb_phy_chans; i++)
- if (platform_get_irq(op, i) > 0)
+ if (platform_get_irq_optional(op, i) > 0)
nr_irq++;
for (i = 0; i < nb_phy_chans; i++) {
phy = &pdev->phys[i];
phy->base = pdev->base;
phy->idx = i;
- irq = platform_get_irq(op, i);
+ irq = platform_get_irq_optional(op, i);
if ((nr_irq > 1) && (irq > 0))
ret = devm_request_irq(&op->dev, irq,
pxad_chan_handler,
diff --git a/drivers/dma/stm32-dma.c b/drivers/dma/stm32-dma.c
index 4891a1767e5a..37674029cb42 100644
--- a/drivers/dma/stm32-dma.c
+++ b/drivers/dma/stm32-dma.c
@@ -675,6 +675,8 @@ static void stm32_dma_handle_chan_paused(struct stm32_dma_chan *chan)
chan->chan_reg.dma_sndtr = stm32_dma_read(dmadev, STM32_DMA_SNDTR(chan->id));
+ chan->status = DMA_PAUSED;
+
dev_dbg(chan2dev(chan), "vchan %pK: paused\n", &chan->vchan);
}
@@ -789,9 +791,7 @@ static irqreturn_t stm32_dma_chan_irq(int irq, void *devid)
if (status & STM32_DMA_TCI) {
stm32_dma_irq_clear(chan, STM32_DMA_TCI);
if (scr & STM32_DMA_SCR_TCIE) {
- if (chan->status == DMA_PAUSED && !(scr & STM32_DMA_SCR_EN))
- stm32_dma_handle_chan_paused(chan);
- else
+ if (chan->status != DMA_PAUSED)
stm32_dma_handle_chan_done(chan, scr);
}
status &= ~STM32_DMA_TCI;
@@ -838,13 +838,11 @@ static int stm32_dma_pause(struct dma_chan *c)
return -EPERM;
spin_lock_irqsave(&chan->vchan.lock, flags);
+
ret = stm32_dma_disable_chan(chan);
- /*
- * A transfer complete flag is set to indicate the end of transfer due to the stream
- * interruption, so wait for interrupt
- */
if (!ret)
- chan->status = DMA_PAUSED;
+ stm32_dma_handle_chan_paused(chan);
+
spin_unlock_irqrestore(&chan->vchan.lock, flags);
return ret;
diff --git a/drivers/dma/stm32-mdma.c b/drivers/dma/stm32-mdma.c
index e28acbcb53f4..b9d4c843635f 100644
--- a/drivers/dma/stm32-mdma.c
+++ b/drivers/dma/stm32-mdma.c
@@ -1539,6 +1539,7 @@ static struct dma_chan *stm32_mdma_of_xlate(struct of_phandle_args *dma_spec,
return NULL;
}
+ memset(&config, 0, sizeof(config));
config.request = dma_spec->args[0];
config.priority_level = dma_spec->args[1];
config.transfer_config = dma_spec->args[2];
diff --git a/drivers/dma/ti/k3-udma-glue.c b/drivers/dma/ti/k3-udma-glue.c
index 4fdd9f06b723..4f1aeb81e9c7 100644
--- a/drivers/dma/ti/k3-udma-glue.c
+++ b/drivers/dma/ti/k3-udma-glue.c
@@ -299,6 +299,7 @@ struct k3_udma_glue_tx_channel *k3_udma_glue_request_tx_chn(struct device *dev,
ret = device_register(&tx_chn->common.chan_dev);
if (ret) {
dev_err(dev, "Channel Device registration failed %d\n", ret);
+ put_device(&tx_chn->common.chan_dev);
tx_chn->common.chan_dev.parent = NULL;
goto err;
}
@@ -917,6 +918,7 @@ k3_udma_glue_request_rx_chn_priv(struct device *dev, const char *name,
ret = device_register(&rx_chn->common.chan_dev);
if (ret) {
dev_err(dev, "Channel Device registration failed %d\n", ret);
+ put_device(&rx_chn->common.chan_dev);
rx_chn->common.chan_dev.parent = NULL;
goto err;
}
@@ -1048,6 +1050,7 @@ k3_udma_glue_request_remote_rx_chn(struct device *dev, const char *name,
ret = device_register(&rx_chn->common.chan_dev);
if (ret) {
dev_err(dev, "Channel Device registration failed %d\n", ret);
+ put_device(&rx_chn->common.chan_dev);
rx_chn->common.chan_dev.parent = NULL;
goto err;
}
diff --git a/drivers/firmware/efi/libstub/Makefile b/drivers/firmware/efi/libstub/Makefile
index b1601aad7e1a..ef5045a53ce0 100644
--- a/drivers/firmware/efi/libstub/Makefile
+++ b/drivers/firmware/efi/libstub/Makefile
@@ -82,7 +82,7 @@ $(obj)/lib-%.o: $(srctree)/lib/%.c FORCE
lib-$(CONFIG_EFI_GENERIC_STUB) += efi-stub.o string.o intrinsics.o systable.o
lib-$(CONFIG_ARM) += arm32-stub.o
-lib-$(CONFIG_ARM64) += arm64-stub.o
+lib-$(CONFIG_ARM64) += arm64-stub.o smbios.o
lib-$(CONFIG_X86) += x86-stub.o
lib-$(CONFIG_RISCV) += riscv-stub.o
lib-$(CONFIG_LOONGARCH) += loongarch-stub.o
diff --git a/drivers/firmware/efi/libstub/arm64-stub.c b/drivers/firmware/efi/libstub/arm64-stub.c
index 259e4b852d63..f9de5217ea65 100644
--- a/drivers/firmware/efi/libstub/arm64-stub.c
+++ b/drivers/firmware/efi/libstub/arm64-stub.c
@@ -15,6 +15,21 @@
#include "efistub.h"
+static bool system_needs_vamap(void)
+{
+ const u8 *type1_family = efi_get_smbios_string(1, family);
+
+ /*
+ * Ampere Altra machines crash in SetTime() if SetVirtualAddressMap()
+ * has not been called prior.
+ */
+ if (!type1_family || strcmp(type1_family, "Altra"))
+ return false;
+
+ efi_warn("Working around broken SetVirtualAddressMap()\n");
+ return true;
+}
+
efi_status_t check_platform_features(void)
{
u64 tg;
@@ -24,7 +39,7 @@ efi_status_t check_platform_features(void)
* UEFI runtime regions 1:1 and so calling SetVirtualAddressMap() is
* unnecessary.
*/
- if (VA_BITS_MIN >= 48)
+ if (VA_BITS_MIN >= 48 && !system_needs_vamap())
efi_novamap = true;
/* UEFI mandates support for 4 KB granularity, no need to check */
diff --git a/drivers/firmware/efi/libstub/efistub.h b/drivers/firmware/efi/libstub/efistub.h
index a30fb5d8ef05..eb03d5a9aac8 100644
--- a/drivers/firmware/efi/libstub/efistub.h
+++ b/drivers/firmware/efi/libstub/efistub.h
@@ -975,4 +975,32 @@ efi_enable_reset_attack_mitigation(void) { }
void efi_retrieve_tpm2_eventlog(void);
+struct efi_smbios_record {
+ u8 type;
+ u8 length;
+ u16 handle;
+};
+
+struct efi_smbios_type1_record {
+ struct efi_smbios_record header;
+
+ u8 manufacturer;
+ u8 product_name;
+ u8 version;
+ u8 serial_number;
+ efi_guid_t uuid;
+ u8 wakeup_type;
+ u8 sku_number;
+ u8 family;
+};
+
+#define efi_get_smbios_string(__type, __name) ({ \
+ int size = sizeof(struct efi_smbios_type ## __type ## _record); \
+ int off = offsetof(struct efi_smbios_type ## __type ## _record, \
+ __name); \
+ __efi_get_smbios_string(__type, off, size); \
+})
+
+const u8 *__efi_get_smbios_string(u8 type, int offset, int recsize);
+
#endif
diff --git a/drivers/firmware/efi/libstub/smbios.c b/drivers/firmware/efi/libstub/smbios.c
new file mode 100644
index 000000000000..460418b7f5f5
--- /dev/null
+++ b/drivers/firmware/efi/libstub/smbios.c
@@ -0,0 +1,48 @@
+// SPDX-License-Identifier: GPL-2.0-only
+// Copyright 2022 Google LLC
+// Author: Ard Biesheuvel <ardb@google.com>
+
+#include <linux/efi.h>
+
+#include "efistub.h"
+
+typedef struct efi_smbios_protocol efi_smbios_protocol_t;
+
+struct efi_smbios_protocol {
+ efi_status_t (__efiapi *add)(efi_smbios_protocol_t *, efi_handle_t,
+ u16 *, struct efi_smbios_record *);
+ efi_status_t (__efiapi *update_string)(efi_smbios_protocol_t *, u16 *,
+ unsigned long *, u8 *);
+ efi_status_t (__efiapi *remove)(efi_smbios_protocol_t *, u16);
+ efi_status_t (__efiapi *get_next)(efi_smbios_protocol_t *, u16 *, u8 *,
+ struct efi_smbios_record **,
+ efi_handle_t *);
+
+ u8 major_version;
+ u8 minor_version;
+};
+
+const u8 *__efi_get_smbios_string(u8 type, int offset, int recsize)
+{
+ struct efi_smbios_record *record;
+ efi_smbios_protocol_t *smbios;
+ efi_status_t status;
+ u16 handle = 0xfffe;
+ const u8 *strtable;
+
+ status = efi_bs_call(locate_protocol, &EFI_SMBIOS_PROTOCOL_GUID, NULL,
+ (void **)&smbios) ?:
+ efi_call_proto(smbios, get_next, &handle, &type, &record, NULL);
+ if (status != EFI_SUCCESS)
+ return NULL;
+
+ strtable = (u8 *)record + recsize;
+ for (int i = 1; i < ((u8 *)record)[offset]; i++) {
+ int len = strlen(strtable);
+
+ if (!len)
+ return NULL;
+ strtable += len + 1;
+ }
+ return strtable;
+}
diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_cs.c b/drivers/gpu/drm/amd/amdgpu/amdgpu_cs.c
index 1bbd39b3b0fc..d371000a5727 100644
--- a/drivers/gpu/drm/amd/amdgpu/amdgpu_cs.c
+++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_cs.c
@@ -992,6 +992,7 @@ out_free_user_pages:
kvfree(e->user_pages);
e->user_pages = NULL;
}
+ mutex_unlock(&p->bo_list->bo_list_mutex);
return r;
}
diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_vm.c b/drivers/gpu/drm/amd/amdgpu/amdgpu_vm.c
index 2291aa14d888..003aa9e47085 100644
--- a/drivers/gpu/drm/amd/amdgpu/amdgpu_vm.c
+++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_vm.c
@@ -143,32 +143,6 @@ int amdgpu_vm_set_pasid(struct amdgpu_device *adev, struct amdgpu_vm *vm,
return 0;
}
-/*
- * vm eviction_lock can be taken in MMU notifiers. Make sure no reclaim-FS
- * happens while holding this lock anywhere to prevent deadlocks when
- * an MMU notifier runs in reclaim-FS context.
- */
-static inline void amdgpu_vm_eviction_lock(struct amdgpu_vm *vm)
-{
- mutex_lock(&vm->eviction_lock);
- vm->saved_flags = memalloc_noreclaim_save();
-}
-
-static inline int amdgpu_vm_eviction_trylock(struct amdgpu_vm *vm)
-{
- if (mutex_trylock(&vm->eviction_lock)) {
- vm->saved_flags = memalloc_noreclaim_save();
- return 1;
- }
- return 0;
-}
-
-static inline void amdgpu_vm_eviction_unlock(struct amdgpu_vm *vm)
-{
- memalloc_noreclaim_restore(vm->saved_flags);
- mutex_unlock(&vm->eviction_lock);
-}
-
/**
* amdgpu_vm_bo_evicted - vm_bo is evicted
*
diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_vm.h b/drivers/gpu/drm/amd/amdgpu/amdgpu_vm.h
index 83acb7bd80fe..6546e786bf00 100644
--- a/drivers/gpu/drm/amd/amdgpu/amdgpu_vm.h
+++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_vm.h
@@ -492,7 +492,48 @@ void amdgpu_debugfs_vm_bo_info(struct amdgpu_vm *vm, struct seq_file *m);
*/
static inline uint64_t amdgpu_vm_tlb_seq(struct amdgpu_vm *vm)
{
+ unsigned long flags;
+ spinlock_t *lock;
+
+ /*
+ * Workaround to stop racing between the fence signaling and handling
+ * the cb. The lock is static after initially setting it up, just make
+ * sure that the dma_fence structure isn't freed up.
+ */
+ rcu_read_lock();
+ lock = vm->last_tlb_flush->lock;
+ rcu_read_unlock();
+
+ spin_lock_irqsave(lock, flags);
+ spin_unlock_irqrestore(lock, flags);
+
return atomic64_read(&vm->tlb_seq);
}
+/*
+ * vm eviction_lock can be taken in MMU notifiers. Make sure no reclaim-FS
+ * happens while holding this lock anywhere to prevent deadlocks when
+ * an MMU notifier runs in reclaim-FS context.
+ */
+static inline void amdgpu_vm_eviction_lock(struct amdgpu_vm *vm)
+{
+ mutex_lock(&vm->eviction_lock);
+ vm->saved_flags = memalloc_noreclaim_save();
+}
+
+static inline bool amdgpu_vm_eviction_trylock(struct amdgpu_vm *vm)
+{
+ if (mutex_trylock(&vm->eviction_lock)) {
+ vm->saved_flags = memalloc_noreclaim_save();
+ return true;
+ }
+ return false;
+}
+
+static inline void amdgpu_vm_eviction_unlock(struct amdgpu_vm *vm)
+{
+ memalloc_noreclaim_restore(vm->saved_flags);
+ mutex_unlock(&vm->eviction_lock);
+}
+
#endif
diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_vm_pt.c b/drivers/gpu/drm/amd/amdgpu/amdgpu_vm_pt.c
index 358b91243e37..b5f3bba851db 100644
--- a/drivers/gpu/drm/amd/amdgpu/amdgpu_vm_pt.c
+++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_vm_pt.c
@@ -597,7 +597,9 @@ static int amdgpu_vm_pt_alloc(struct amdgpu_device *adev,
if (entry->bo)
return 0;
+ amdgpu_vm_eviction_unlock(vm);
r = amdgpu_vm_pt_create(adev, vm, cursor->level, immediate, &pt);
+ amdgpu_vm_eviction_lock(vm);
if (r)
return r;
diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_vram_mgr.c b/drivers/gpu/drm/amd/amdgpu/amdgpu_vram_mgr.c
index 73a517bcf5c1..80dd1343594c 100644
--- a/drivers/gpu/drm/amd/amdgpu/amdgpu_vram_mgr.c
+++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_vram_mgr.c
@@ -435,7 +435,7 @@ static int amdgpu_vram_mgr_new(struct ttm_resource_manager *man,
if (place->flags & TTM_PL_FLAG_TOPDOWN)
vres->flags |= DRM_BUDDY_TOPDOWN_ALLOCATION;
- if (fpfn || lpfn != man->size)
+ if (fpfn || lpfn != mgr->mm.size)
/* Allocate blocks in desired range */
vres->flags |= DRM_BUDDY_RANGE_ALLOCATION;
diff --git a/drivers/gpu/drm/amd/amdkfd/kfd_chardev.c b/drivers/gpu/drm/amd/amdkfd/kfd_chardev.c
index 5feaba6a77de..6d291aa6386b 100644
--- a/drivers/gpu/drm/amd/amdkfd/kfd_chardev.c
+++ b/drivers/gpu/drm/amd/amdkfd/kfd_chardev.c
@@ -1950,7 +1950,7 @@ static int criu_checkpoint(struct file *filep,
{
int ret;
uint32_t num_devices, num_bos, num_objects;
- uint64_t priv_size, priv_offset = 0;
+ uint64_t priv_size, priv_offset = 0, bo_priv_offset;
if (!args->devices || !args->bos || !args->priv_data)
return -EINVAL;
@@ -1994,38 +1994,34 @@ static int criu_checkpoint(struct file *filep,
if (ret)
goto exit_unlock;
- ret = criu_checkpoint_bos(p, num_bos, (uint8_t __user *)args->bos,
- (uint8_t __user *)args->priv_data, &priv_offset);
- if (ret)
- goto exit_unlock;
+ /* Leave room for BOs in the private data. They need to be restored
+ * before events, but we checkpoint them last to simplify the error
+ * handling.
+ */
+ bo_priv_offset = priv_offset;
+ priv_offset += num_bos * sizeof(struct kfd_criu_bo_priv_data);
if (num_objects) {
ret = kfd_criu_checkpoint_queues(p, (uint8_t __user *)args->priv_data,
&priv_offset);
if (ret)
- goto close_bo_fds;
+ goto exit_unlock;
ret = kfd_criu_checkpoint_events(p, (uint8_t __user *)args->priv_data,
&priv_offset);
if (ret)
- goto close_bo_fds;
+ goto exit_unlock;
ret = kfd_criu_checkpoint_svm(p, (uint8_t __user *)args->priv_data, &priv_offset);
if (ret)
- goto close_bo_fds;
+ goto exit_unlock;
}
-close_bo_fds:
- if (ret) {
- /* If IOCTL returns err, user assumes all FDs opened in criu_dump_bos are closed */
- uint32_t i;
- struct kfd_criu_bo_bucket *bo_buckets = (struct kfd_criu_bo_bucket *) args->bos;
-
- for (i = 0; i < num_bos; i++) {
- if (bo_buckets[i].alloc_flags & KFD_IOC_ALLOC_MEM_FLAGS_VRAM)
- close_fd(bo_buckets[i].dmabuf_fd);
- }
- }
+ /* This must be the last thing in this function that can fail.
+ * Otherwise we leak dmabuf file descriptors.
+ */
+ ret = criu_checkpoint_bos(p, num_bos, (uint8_t __user *)args->bos,
+ (uint8_t __user *)args->priv_data, &bo_priv_offset);
exit_unlock:
mutex_unlock(&p->mutex);
diff --git a/drivers/gpu/drm/amd/amdkfd/kfd_events.c b/drivers/gpu/drm/amd/amdkfd/kfd_events.c
index 83e3ce9f6049..729d26d648af 100644
--- a/drivers/gpu/drm/amd/amdkfd/kfd_events.c
+++ b/drivers/gpu/drm/amd/amdkfd/kfd_events.c
@@ -506,6 +506,7 @@ int kfd_criu_restore_event(struct file *devkfd,
ret = create_other_event(p, ev, &ev_priv->event_id);
break;
}
+ mutex_unlock(&p->event_mutex);
exit:
if (ret)
@@ -513,8 +514,6 @@ exit:
kfree(ev_priv);
- mutex_unlock(&p->event_mutex);
-
return ret;
}
diff --git a/drivers/gpu/drm/amd/display/amdgpu_dm/amdgpu_dm.c b/drivers/gpu/drm/amd/display/amdgpu_dm/amdgpu_dm.c
index 589bee9acf16..509739d83b5a 100644
--- a/drivers/gpu/drm/amd/display/amdgpu_dm/amdgpu_dm.c
+++ b/drivers/gpu/drm/amd/display/amdgpu_dm/amdgpu_dm.c
@@ -7622,9 +7622,10 @@ static void amdgpu_dm_commit_planes(struct drm_atomic_state *state,
bundle->surface_updates[planes_count].plane_info =
&bundle->plane_infos[planes_count];
- fill_dc_dirty_rects(plane, old_plane_state, new_plane_state,
- new_crtc_state,
- &bundle->flip_addrs[planes_count]);
+ if (acrtc_state->stream->link->psr_settings.psr_feature_enabled)
+ fill_dc_dirty_rects(plane, old_plane_state,
+ new_plane_state, new_crtc_state,
+ &bundle->flip_addrs[planes_count]);
/*
* Only allow immediate flips for fast updates that don't
diff --git a/drivers/gpu/drm/amd/display/dc/clk_mgr/dcn314/dcn314_clk_mgr.c b/drivers/gpu/drm/amd/display/dc/clk_mgr/dcn314/dcn314_clk_mgr.c
index 1131c6d73f6c..20a06c04e4a1 100644
--- a/drivers/gpu/drm/amd/display/dc/clk_mgr/dcn314/dcn314_clk_mgr.c
+++ b/drivers/gpu/drm/amd/display/dc/clk_mgr/dcn314/dcn314_clk_mgr.c
@@ -363,32 +363,32 @@ static struct wm_table ddr5_wm_table = {
.wm_inst = WM_A,
.wm_type = WM_TYPE_PSTATE_CHG,
.pstate_latency_us = 11.72,
- .sr_exit_time_us = 9,
- .sr_enter_plus_exit_time_us = 11,
+ .sr_exit_time_us = 12.5,
+ .sr_enter_plus_exit_time_us = 14.5,
.valid = true,
},
{
.wm_inst = WM_B,
.wm_type = WM_TYPE_PSTATE_CHG,
.pstate_latency_us = 11.72,
- .sr_exit_time_us = 9,
- .sr_enter_plus_exit_time_us = 11,
+ .sr_exit_time_us = 12.5,
+ .sr_enter_plus_exit_time_us = 14.5,
.valid = true,
},
{
.wm_inst = WM_C,
.wm_type = WM_TYPE_PSTATE_CHG,
.pstate_latency_us = 11.72,
- .sr_exit_time_us = 9,
- .sr_enter_plus_exit_time_us = 11,
+ .sr_exit_time_us = 12.5,
+ .sr_enter_plus_exit_time_us = 14.5,
.valid = true,
},
{
.wm_inst = WM_D,
.wm_type = WM_TYPE_PSTATE_CHG,
.pstate_latency_us = 11.72,
- .sr_exit_time_us = 9,
- .sr_enter_plus_exit_time_us = 11,
+ .sr_exit_time_us = 12.5,
+ .sr_enter_plus_exit_time_us = 14.5,
.valid = true,
},
}
@@ -400,32 +400,32 @@ static struct wm_table lpddr5_wm_table = {
.wm_inst = WM_A,
.wm_type = WM_TYPE_PSTATE_CHG,
.pstate_latency_us = 11.65333,
- .sr_exit_time_us = 11.5,
- .sr_enter_plus_exit_time_us = 14.5,
+ .sr_exit_time_us = 16.5,
+ .sr_enter_plus_exit_time_us = 18.5,
.valid = true,
},
{
.wm_inst = WM_B,
.wm_type = WM_TYPE_PSTATE_CHG,
.pstate_latency_us = 11.65333,
- .sr_exit_time_us = 11.5,
- .sr_enter_plus_exit_time_us = 14.5,
+ .sr_exit_time_us = 16.5,
+ .sr_enter_plus_exit_time_us = 18.5,
.valid = true,
},
{
.wm_inst = WM_C,
.wm_type = WM_TYPE_PSTATE_CHG,
.pstate_latency_us = 11.65333,
- .sr_exit_time_us = 11.5,
- .sr_enter_plus_exit_time_us = 14.5,
+ .sr_exit_time_us = 16.5,
+ .sr_enter_plus_exit_time_us = 18.5,
.valid = true,
},
{
.wm_inst = WM_D,
.wm_type = WM_TYPE_PSTATE_CHG,
.pstate_latency_us = 11.65333,
- .sr_exit_time_us = 11.5,
- .sr_enter_plus_exit_time_us = 14.5,
+ .sr_exit_time_us = 16.5,
+ .sr_enter_plus_exit_time_us = 18.5,
.valid = true,
},
}
diff --git a/drivers/gpu/drm/amd/display/dc/dc.h b/drivers/gpu/drm/amd/display/dc/dc.h
index 737b221ca689..0598465fd1a1 100644
--- a/drivers/gpu/drm/amd/display/dc/dc.h
+++ b/drivers/gpu/drm/amd/display/dc/dc.h
@@ -853,6 +853,7 @@ struct dc_debug_options {
bool enable_dp_dig_pixel_rate_div_policy;
enum lttpr_mode lttpr_mode_override;
unsigned int dsc_delay_factor_wa_x1000;
+ unsigned int min_prefetch_in_strobe_ns;
};
struct gpu_info_soc_bounding_box_v1_0;
diff --git a/drivers/gpu/drm/amd/display/dc/dcn314/dcn314_dio_stream_encoder.c b/drivers/gpu/drm/amd/display/dc/dcn314/dcn314_dio_stream_encoder.c
index 7e773bf7b895..38842f938bed 100644
--- a/drivers/gpu/drm/amd/display/dc/dcn314/dcn314_dio_stream_encoder.c
+++ b/drivers/gpu/drm/amd/display/dc/dcn314/dcn314_dio_stream_encoder.c
@@ -49,18 +49,30 @@
#define CTX \
enc1->base.ctx
+static void enc314_reset_fifo(struct stream_encoder *enc, bool reset)
+{
+ struct dcn10_stream_encoder *enc1 = DCN10STRENC_FROM_STRENC(enc);
+ uint32_t reset_val = reset ? 1 : 0;
+ uint32_t is_symclk_on;
+
+ REG_UPDATE(DIG_FIFO_CTRL0, DIG_FIFO_RESET, reset_val);
+ REG_GET(DIG_FE_CNTL, DIG_SYMCLK_FE_ON, &is_symclk_on);
+
+ if (is_symclk_on)
+ REG_WAIT(DIG_FIFO_CTRL0, DIG_FIFO_RESET_DONE, reset_val, 10, 5000);
+ else
+ udelay(10);
+}
static void enc314_enable_fifo(struct stream_encoder *enc)
{
struct dcn10_stream_encoder *enc1 = DCN10STRENC_FROM_STRENC(enc);
- /* TODO: Confirm if we need to wait for DIG_SYMCLK_FE_ON */
- REG_WAIT(DIG_FE_CNTL, DIG_SYMCLK_FE_ON, 1, 10, 5000);
REG_UPDATE(DIG_FIFO_CTRL0, DIG_FIFO_READ_START_LEVEL, 0x7);
- REG_UPDATE(DIG_FIFO_CTRL0, DIG_FIFO_RESET, 1);
- REG_WAIT(DIG_FIFO_CTRL0, DIG_FIFO_RESET_DONE, 1, 10, 5000);
- REG_UPDATE(DIG_FIFO_CTRL0, DIG_FIFO_RESET, 0);
- REG_WAIT(DIG_FIFO_CTRL0, DIG_FIFO_RESET_DONE, 0, 10, 5000);
+
+ enc314_reset_fifo(enc, true);
+ enc314_reset_fifo(enc, false);
+
REG_UPDATE(DIG_FIFO_CTRL0, DIG_FIFO_ENABLE, 1);
}
diff --git a/drivers/gpu/drm/amd/display/dc/dcn32/dcn32_resource.c b/drivers/gpu/drm/amd/display/dc/dcn32/dcn32_resource.c
index a88dd7b3d1c1..d1598e3131f6 100644
--- a/drivers/gpu/drm/amd/display/dc/dcn32/dcn32_resource.c
+++ b/drivers/gpu/drm/amd/display/dc/dcn32/dcn32_resource.c
@@ -724,6 +724,7 @@ static const struct dc_debug_options debug_defaults_drv = {
.enable_dp_dig_pixel_rate_div_policy = 1,
.allow_sw_cursor_fallback = false,
.alloc_extra_way_for_cursor = true,
+ .min_prefetch_in_strobe_ns = 60000, // 60us
};
static const struct dc_debug_options debug_defaults_diags = {
diff --git a/drivers/gpu/drm/amd/display/dc/dcn321/dcn321_resource.c b/drivers/gpu/drm/amd/display/dc/dcn321/dcn321_resource.c
index 61087f2385a9..6292ac515d1a 100644
--- a/drivers/gpu/drm/amd/display/dc/dcn321/dcn321_resource.c
+++ b/drivers/gpu/drm/amd/display/dc/dcn321/dcn321_resource.c
@@ -722,6 +722,7 @@ static const struct dc_debug_options debug_defaults_drv = {
.enable_dp_dig_pixel_rate_div_policy = 1,
.allow_sw_cursor_fallback = false,
.alloc_extra_way_for_cursor = true,
+ .min_prefetch_in_strobe_ns = 60000, // 60us
};
static const struct dc_debug_options debug_defaults_diags = {
diff --git a/drivers/gpu/drm/amd/display/dc/dml/dcn314/dcn314_fpu.c b/drivers/gpu/drm/amd/display/dc/dml/dcn314/dcn314_fpu.c
index cf420ad2b8dc..34b6c763a455 100644
--- a/drivers/gpu/drm/amd/display/dc/dml/dcn314/dcn314_fpu.c
+++ b/drivers/gpu/drm/amd/display/dc/dml/dcn314/dcn314_fpu.c
@@ -146,8 +146,8 @@ struct _vcs_dpi_soc_bounding_box_st dcn3_14_soc = {
},
},
.num_states = 5,
- .sr_exit_time_us = 9.0,
- .sr_enter_plus_exit_time_us = 11.0,
+ .sr_exit_time_us = 16.5,
+ .sr_enter_plus_exit_time_us = 18.5,
.sr_exit_z8_time_us = 442.0,
.sr_enter_plus_exit_z8_time_us = 560.0,
.writeback_latency_us = 12.0,
diff --git a/drivers/gpu/drm/amd/display/dc/dml/dcn32/dcn32_fpu.c b/drivers/gpu/drm/amd/display/dc/dml/dcn32/dcn32_fpu.c
index f37c9a6b3b7e..659323ebd79d 100644
--- a/drivers/gpu/drm/amd/display/dc/dml/dcn32/dcn32_fpu.c
+++ b/drivers/gpu/drm/amd/display/dc/dml/dcn32/dcn32_fpu.c
@@ -2364,6 +2364,8 @@ void dcn32_update_bw_bounding_box_fpu(struct dc *dc, struct clk_bw_params *bw_pa
/* DML DSC delay factor workaround */
dcn3_2_ip.dsc_delay_factor_wa = dc->debug.dsc_delay_factor_wa_x1000 / 1000.0;
+ dcn3_2_ip.min_prefetch_in_strobe_us = dc->debug.min_prefetch_in_strobe_ns / 1000.0;
+
/* Override dispclk_dppclk_vco_speed_mhz from Clk Mgr */
dcn3_2_soc.dispclk_dppclk_vco_speed_mhz = dc->clk_mgr->dentist_vco_freq_khz / 1000.0;
dc->dml.soc.dispclk_dppclk_vco_speed_mhz = dc->clk_mgr->dentist_vco_freq_khz / 1000.0;
diff --git a/drivers/gpu/drm/amd/display/dc/dml/dcn32/display_mode_vba_32.c b/drivers/gpu/drm/amd/display/dc/dml/dcn32/display_mode_vba_32.c
index 3d184679f129..244fd15d24b4 100644
--- a/drivers/gpu/drm/amd/display/dc/dml/dcn32/display_mode_vba_32.c
+++ b/drivers/gpu/drm/amd/display/dc/dml/dcn32/display_mode_vba_32.c
@@ -786,6 +786,8 @@ static void DISPCLKDPPCLKDCFCLKDeepSleepPrefetchParametersWatermarksAndPerforman
v->SwathHeightY[k],
v->SwathHeightC[k],
TWait,
+ v->DRAMSpeedPerState[mode_lib->vba.VoltageLevel] <= MEM_STROBE_FREQ_MHZ ?
+ mode_lib->vba.ip.min_prefetch_in_strobe_us : 0,
/* Output */
&v->DSTXAfterScaler[k],
&v->DSTYAfterScaler[k],
@@ -3192,6 +3194,7 @@ void dml32_ModeSupportAndSystemConfigurationFull(struct display_mode_lib *mode_l
mode_lib->vba.FCLKChangeLatency, mode_lib->vba.UrgLatency[i],
mode_lib->vba.SREnterPlusExitTime);
+ memset(&v->dummy_vars.dml32_ModeSupportAndSystemConfigurationFull, 0, sizeof(DmlPipe));
v->dummy_vars.dml32_ModeSupportAndSystemConfigurationFull.myPipe.Dppclk = mode_lib->vba.RequiredDPPCLK[i][j][k];
v->dummy_vars.dml32_ModeSupportAndSystemConfigurationFull.myPipe.Dispclk = mode_lib->vba.RequiredDISPCLK[i][j];
v->dummy_vars.dml32_ModeSupportAndSystemConfigurationFull.myPipe.PixelClock = mode_lib->vba.PixelClock[k];
@@ -3244,6 +3247,8 @@ void dml32_ModeSupportAndSystemConfigurationFull(struct display_mode_lib *mode_l
v->swath_width_chroma_ub_this_state[k],
v->SwathHeightYThisState[k],
v->SwathHeightCThisState[k], v->TWait,
+ v->DRAMSpeedPerState[i] <= MEM_STROBE_FREQ_MHZ ?
+ mode_lib->vba.ip.min_prefetch_in_strobe_us : 0,
/* Output */
&v->dummy_vars.dml32_ModeSupportAndSystemConfigurationFull.DSTXAfterScaler[k],
diff --git a/drivers/gpu/drm/amd/display/dc/dml/dcn32/display_mode_vba_32.h b/drivers/gpu/drm/amd/display/dc/dml/dcn32/display_mode_vba_32.h
index c62e0991358b..f82e14cd9d8a 100644
--- a/drivers/gpu/drm/amd/display/dc/dml/dcn32/display_mode_vba_32.h
+++ b/drivers/gpu/drm/amd/display/dc/dml/dcn32/display_mode_vba_32.h
@@ -49,6 +49,9 @@
#define BPP_INVALID 0
#define BPP_BLENDED_PIPE 0xffffffff
+#define MEM_STROBE_FREQ_MHZ 1600
+#define MEM_STROBE_MAX_DELIVERY_TIME_US 60.0
+
struct display_mode_lib;
void dml32_ModeSupportAndSystemConfigurationFull(struct display_mode_lib *mode_lib);
diff --git a/drivers/gpu/drm/amd/display/dc/dml/dcn32/display_mode_vba_util_32.c b/drivers/gpu/drm/amd/display/dc/dml/dcn32/display_mode_vba_util_32.c
index 968924c491c1..635fc54338fa 100644
--- a/drivers/gpu/drm/amd/display/dc/dml/dcn32/display_mode_vba_util_32.c
+++ b/drivers/gpu/drm/amd/display/dc/dml/dcn32/display_mode_vba_util_32.c
@@ -3417,6 +3417,7 @@ bool dml32_CalculatePrefetchSchedule(
unsigned int SwathHeightY,
unsigned int SwathHeightC,
double TWait,
+ double TPreReq,
/* Output */
double *DSTXAfterScaler,
double *DSTYAfterScaler,
@@ -3474,6 +3475,7 @@ bool dml32_CalculatePrefetchSchedule(
double min_Lsw;
double Tsw_est1 = 0;
double Tsw_est3 = 0;
+ double TPreMargin = 0;
if (v->GPUVMEnable == true && v->HostVMEnable == true)
HostVMDynamicLevelsTrips = v->HostVMMaxNonCachedPageTableLevels;
@@ -3699,6 +3701,8 @@ bool dml32_CalculatePrefetchSchedule(
dst_y_prefetch_equ = dml_floor(4.0 * (dst_y_prefetch_equ + 0.125), 1) / 4.0;
Tpre_rounded = dst_y_prefetch_equ * LineTime;
+
+ TPreMargin = Tpre_rounded - TPreReq;
#ifdef __DML_VBA_DEBUG__
dml_print("DML::%s: dst_y_prefetch_equ: %f (after round)\n", __func__, dst_y_prefetch_equ);
dml_print("DML::%s: LineTime: %f\n", __func__, LineTime);
@@ -3726,7 +3730,7 @@ bool dml32_CalculatePrefetchSchedule(
*VRatioPrefetchY = 0;
*VRatioPrefetchC = 0;
*RequiredPrefetchPixDataBWLuma = 0;
- if (dst_y_prefetch_equ > 1) {
+ if (dst_y_prefetch_equ > 1 && TPreMargin > 0.0) {
double PrefetchBandwidth1;
double PrefetchBandwidth2;
double PrefetchBandwidth3;
@@ -3872,7 +3876,11 @@ bool dml32_CalculatePrefetchSchedule(
}
if (dst_y_prefetch_oto < dst_y_prefetch_equ) {
- *DestinationLinesForPrefetch = dst_y_prefetch_oto;
+ if (dst_y_prefetch_oto * LineTime < TPreReq) {
+ *DestinationLinesForPrefetch = dst_y_prefetch_equ;
+ } else {
+ *DestinationLinesForPrefetch = dst_y_prefetch_oto;
+ }
TimeForFetchingMetaPTE = Tvm_oto;
TimeForFetchingRowInVBlank = Tr0_oto;
*PrefetchBandwidth = prefetch_bw_oto;
@@ -4397,7 +4405,7 @@ void dml32_CalculateWatermarksMALLUseAndDRAMSpeedChangeSupport(
if (v->NumberOfActiveSurfaces > 1) {
ActiveClockChangeLatencyHidingY = ActiveClockChangeLatencyHidingY
- - (1 - 1 / v->NumberOfActiveSurfaces) * SwathHeightY[k] * v->HTotal[k]
+ - (1.0 - 1.0 / v->NumberOfActiveSurfaces) * SwathHeightY[k] * v->HTotal[k]
/ v->PixelClock[k] / v->VRatio[k];
}
diff --git a/drivers/gpu/drm/amd/display/dc/dml/dcn32/display_mode_vba_util_32.h b/drivers/gpu/drm/amd/display/dc/dml/dcn32/display_mode_vba_util_32.h
index 2c3827546ac7..3989c2a28fae 100644
--- a/drivers/gpu/drm/amd/display/dc/dml/dcn32/display_mode_vba_util_32.h
+++ b/drivers/gpu/drm/amd/display/dc/dml/dcn32/display_mode_vba_util_32.h
@@ -30,7 +30,7 @@
#include "os_types.h"
#include "../dc_features.h"
#include "../display_mode_structs.h"
-#include "dml/display_mode_vba.h"
+#include "../display_mode_vba.h"
unsigned int dml32_dscceComputeDelay(
unsigned int bpc,
@@ -743,6 +743,7 @@ bool dml32_CalculatePrefetchSchedule(
unsigned int SwathHeightY,
unsigned int SwathHeightC,
double TWait,
+ double TPreReq,
/* Output */
double *DSTXAfterScaler,
double *DSTYAfterScaler,
diff --git a/drivers/gpu/drm/amd/display/dc/dml/dcn321/dcn321_fpu.c b/drivers/gpu/drm/amd/display/dc/dml/dcn321/dcn321_fpu.c
index ec0486efab14..432b4ecd01a7 100644
--- a/drivers/gpu/drm/amd/display/dc/dml/dcn321/dcn321_fpu.c
+++ b/drivers/gpu/drm/amd/display/dc/dml/dcn321/dcn321_fpu.c
@@ -544,6 +544,8 @@ void dcn321_update_bw_bounding_box_fpu(struct dc *dc, struct clk_bw_params *bw_p
/* DML DSC delay factor workaround */
dcn3_21_ip.dsc_delay_factor_wa = dc->debug.dsc_delay_factor_wa_x1000 / 1000.0;
+ dcn3_21_ip.min_prefetch_in_strobe_us = dc->debug.min_prefetch_in_strobe_ns / 1000.0;
+
/* Override dispclk_dppclk_vco_speed_mhz from Clk Mgr */
dcn3_21_soc.dispclk_dppclk_vco_speed_mhz = dc->clk_mgr->dentist_vco_freq_khz / 1000.0;
dc->dml.soc.dispclk_dppclk_vco_speed_mhz = dc->clk_mgr->dentist_vco_freq_khz / 1000.0;
diff --git a/drivers/gpu/drm/amd/display/dc/dml/display_mode_structs.h b/drivers/gpu/drm/amd/display/dc/dml/display_mode_structs.h
index d7be01ac0751..64d602e6412f 100644
--- a/drivers/gpu/drm/amd/display/dc/dml/display_mode_structs.h
+++ b/drivers/gpu/drm/amd/display/dc/dml/display_mode_structs.h
@@ -367,6 +367,7 @@ struct _vcs_dpi_ip_params_st {
/* DM workarounds */
double dsc_delay_factor_wa; // TODO: Remove after implementing root cause fix
+ double min_prefetch_in_strobe_us;
};
struct _vcs_dpi_display_xfc_params_st {
diff --git a/drivers/gpu/drm/amd/display/dc/gpio/dcn32/hw_factory_dcn32.c b/drivers/gpu/drm/amd/display/dc/gpio/dcn32/hw_factory_dcn32.c
index d635b73af46f..0ea52ba5ac82 100644
--- a/drivers/gpu/drm/amd/display/dc/gpio/dcn32/hw_factory_dcn32.c
+++ b/drivers/gpu/drm/amd/display/dc/gpio/dcn32/hw_factory_dcn32.c
@@ -108,6 +108,13 @@ static const struct ddc_registers ddc_data_regs_dcn[] = {
ddc_data_regs_dcn2(4),
ddc_data_regs_dcn2(5),
{
+ // add a dummy entry for cases no such port
+ {0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0,},
+ .ddc_setup = 0,
+ .phy_aux_cntl = 0,
+ .dc_gpio_aux_ctrl_5 = 0
+ },
+ {
DDC_GPIO_VGA_REG_LIST(DATA),
.ddc_setup = 0,
.phy_aux_cntl = 0,
@@ -122,6 +129,13 @@ static const struct ddc_registers ddc_clk_regs_dcn[] = {
ddc_clk_regs_dcn2(4),
ddc_clk_regs_dcn2(5),
{
+ // add a dummy entry for cases no such port
+ {0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0,},
+ .ddc_setup = 0,
+ .phy_aux_cntl = 0,
+ .dc_gpio_aux_ctrl_5 = 0
+ },
+ {
DDC_GPIO_VGA_REG_LIST(CLK),
.ddc_setup = 0,
.phy_aux_cntl = 0,
diff --git a/drivers/gpu/drm/amd/display/dc/gpio/hw_ddc.c b/drivers/gpu/drm/amd/display/dc/gpio/hw_ddc.c
index 6fd38cdd68c0..525bc8881950 100644
--- a/drivers/gpu/drm/amd/display/dc/gpio/hw_ddc.c
+++ b/drivers/gpu/drm/amd/display/dc/gpio/hw_ddc.c
@@ -94,11 +94,14 @@ static enum gpio_result set_config(
* is required for detection of AUX mode */
if (hw_gpio->base.en != GPIO_DDC_LINE_VIP_PAD) {
if (!ddc_data_pd_en || !ddc_clk_pd_en) {
-
- REG_SET_2(gpio.MASK_reg, regval,
+ if (hw_gpio->base.en == GPIO_DDC_LINE_DDC_VGA) {
+ // bit 4 of mask has different usage in some cases
+ REG_SET(gpio.MASK_reg, regval, DC_GPIO_DDC1DATA_PD_EN, 1);
+ } else {
+ REG_SET_2(gpio.MASK_reg, regval,
DC_GPIO_DDC1DATA_PD_EN, 1,
DC_GPIO_DDC1CLK_PD_EN, 1);
-
+ }
if (config_data->type ==
GPIO_CONFIG_TYPE_I2C_AUX_DUAL_MODE)
msleep(3);
diff --git a/drivers/gpu/drm/amd/pm/powerplay/hwmgr/vega10_thermal.c b/drivers/gpu/drm/amd/pm/powerplay/hwmgr/vega10_thermal.c
index dad3e3741a4e..190af79f3236 100644
--- a/drivers/gpu/drm/amd/pm/powerplay/hwmgr/vega10_thermal.c
+++ b/drivers/gpu/drm/amd/pm/powerplay/hwmgr/vega10_thermal.c
@@ -67,22 +67,21 @@ int vega10_fan_ctrl_get_fan_speed_info(struct pp_hwmgr *hwmgr,
int vega10_fan_ctrl_get_fan_speed_pwm(struct pp_hwmgr *hwmgr,
uint32_t *speed)
{
- uint32_t current_rpm;
- uint32_t percent = 0;
-
- if (hwmgr->thermal_controller.fanInfo.bNoFan)
- return 0;
+ struct amdgpu_device *adev = hwmgr->adev;
+ uint32_t duty100, duty;
+ uint64_t tmp64;
- if (vega10_get_current_rpm(hwmgr, &current_rpm))
- return -1;
+ duty100 = REG_GET_FIELD(RREG32_SOC15(THM, 0, mmCG_FDO_CTRL1),
+ CG_FDO_CTRL1, FMAX_DUTY100);
+ duty = REG_GET_FIELD(RREG32_SOC15(THM, 0, mmCG_THERMAL_STATUS),
+ CG_THERMAL_STATUS, FDO_PWM_DUTY);
- if (hwmgr->thermal_controller.
- advanceFanControlParameters.usMaxFanRPM != 0)
- percent = current_rpm * 255 /
- hwmgr->thermal_controller.
- advanceFanControlParameters.usMaxFanRPM;
+ if (!duty100)
+ return -EINVAL;
- *speed = MIN(percent, 255);
+ tmp64 = (uint64_t)duty * 255;
+ do_div(tmp64, duty100);
+ *speed = MIN((uint32_t)tmp64, 255);
return 0;
}
diff --git a/drivers/gpu/drm/amd/pm/swsmu/inc/pmfw_if/smu_v13_0_4_ppsmc.h b/drivers/gpu/drm/amd/pm/swsmu/inc/pmfw_if/smu_v13_0_4_ppsmc.h
index d9b0cd752200..f4d6c07b56ea 100644
--- a/drivers/gpu/drm/amd/pm/swsmu/inc/pmfw_if/smu_v13_0_4_ppsmc.h
+++ b/drivers/gpu/drm/amd/pm/swsmu/inc/pmfw_if/smu_v13_0_4_ppsmc.h
@@ -54,14 +54,14 @@
#define PPSMC_MSG_TestMessage 0x01 ///< To check if PMFW is alive and responding. Requirement specified by PMFW team
#define PPSMC_MSG_GetPmfwVersion 0x02 ///< Get PMFW version
#define PPSMC_MSG_GetDriverIfVersion 0x03 ///< Get PMFW_DRIVER_IF version
-#define PPSMC_MSG_EnableGfxOff 0x04 ///< Enable GFXOFF
-#define PPSMC_MSG_DisableGfxOff 0x05 ///< Disable GFXOFF
+#define PPSMC_MSG_SPARE0 0x04 ///< SPARE
+#define PPSMC_MSG_SPARE1 0x05 ///< SPARE
#define PPSMC_MSG_PowerDownVcn 0x06 ///< Power down VCN
#define PPSMC_MSG_PowerUpVcn 0x07 ///< Power up VCN; VCN is power gated by default
#define PPSMC_MSG_SetHardMinVcn 0x08 ///< For wireless display
#define PPSMC_MSG_SetSoftMinGfxclk 0x09 ///< Set SoftMin for GFXCLK, argument is frequency in MHz
-#define PPSMC_MSG_ActiveProcessNotify 0x0A ///< Needs update
-#define PPSMC_MSG_ForcePowerDownGfx 0x0B ///< Force power down GFX, i.e. enter GFXOFF
+#define PPSMC_MSG_SPARE2 0x0A ///< SPARE
+#define PPSMC_MSG_SPARE3 0x0B ///< SPARE
#define PPSMC_MSG_PrepareMp1ForUnload 0x0C ///< Prepare PMFW for GFX driver unload
#define PPSMC_MSG_SetDriverDramAddrHigh 0x0D ///< Set high 32 bits of DRAM address for Driver table transfer
#define PPSMC_MSG_SetDriverDramAddrLow 0x0E ///< Set low 32 bits of DRAM address for Driver table transfer
@@ -73,8 +73,7 @@
#define PPSMC_MSG_SetSoftMinFclk 0x14 ///< Set hard min for FCLK
#define PPSMC_MSG_SetSoftMinVcn 0x15 ///< Set soft min for VCN clocks (VCLK and DCLK)
-
-#define PPSMC_MSG_EnableGfxImu 0x16 ///< Needs update
+#define PPSMC_MSG_EnableGfxImu 0x16 ///< Enable GFX IMU
#define PPSMC_MSG_GetGfxclkFrequency 0x17 ///< Get GFX clock frequency
#define PPSMC_MSG_GetFclkFrequency 0x18 ///< Get FCLK frequency
@@ -102,8 +101,8 @@
#define PPSMC_MSG_SetHardMinIspxclkByFreq 0x2C ///< Set HardMin by frequency for ISPXCLK
#define PPSMC_MSG_PowerDownUmsch 0x2D ///< Power down VCN.UMSCH (aka VSCH) scheduler
#define PPSMC_MSG_PowerUpUmsch 0x2E ///< Power up VCN.UMSCH (aka VSCH) scheduler
-#define PPSMC_Message_IspStutterOn_MmhubPgDis 0x2F ///< ISP StutterOn mmHub PgDis
-#define PPSMC_Message_IspStutterOff_MmhubPgEn 0x30 ///< ISP StufferOff mmHub PgEn
+#define PPSMC_MSG_IspStutterOn_MmhubPgDis 0x2F ///< ISP StutterOn mmHub PgDis
+#define PPSMC_MSG_IspStutterOff_MmhubPgEn 0x30 ///< ISP StufferOff mmHub PgEn
#define PPSMC_Message_Count 0x31 ///< Total number of PPSMC messages
/** @}*/
diff --git a/drivers/gpu/drm/amd/pm/swsmu/smu11/sienna_cichlid_ppt.c b/drivers/gpu/drm/amd/pm/swsmu/smu11/sienna_cichlid_ppt.c
index 74996a8fb671..6212fd270857 100644
--- a/drivers/gpu/drm/amd/pm/swsmu/smu11/sienna_cichlid_ppt.c
+++ b/drivers/gpu/drm/amd/pm/swsmu/smu11/sienna_cichlid_ppt.c
@@ -377,7 +377,9 @@ static void sienna_cichlid_check_bxco_support(struct smu_context *smu)
if (((adev->pdev->device == 0x73A1) &&
(adev->pdev->revision == 0x00)) ||
((adev->pdev->device == 0x73BF) &&
- (adev->pdev->revision == 0xCF)))
+ (adev->pdev->revision == 0xCF)) ||
+ ((adev->pdev->device == 0x7422) &&
+ (adev->pdev->revision == 0x00)))
smu_baco->platform_support = false;
}
diff --git a/drivers/gpu/drm/drm_panel_orientation_quirks.c b/drivers/gpu/drm/drm_panel_orientation_quirks.c
index 8a0c0e0bb5bd..52d8800a8ab8 100644
--- a/drivers/gpu/drm/drm_panel_orientation_quirks.c
+++ b/drivers/gpu/drm/drm_panel_orientation_quirks.c
@@ -134,6 +134,12 @@ static const struct dmi_system_id orientation_data[] = {
DMI_EXACT_MATCH(DMI_PRODUCT_NAME, "One S1003"),
},
.driver_data = (void *)&lcd800x1280_rightside_up,
+ }, { /* Acer Switch V 10 (SW5-017) */
+ .matches = {
+ DMI_EXACT_MATCH(DMI_SYS_VENDOR, "Acer"),
+ DMI_EXACT_MATCH(DMI_PRODUCT_NAME, "SW5-017"),
+ },
+ .driver_data = (void *)&lcd800x1280_rightside_up,
}, { /* Anbernic Win600 */
.matches = {
DMI_EXACT_MATCH(DMI_BOARD_VENDOR, "Anbernic"),
@@ -319,6 +325,12 @@ static const struct dmi_system_id orientation_data[] = {
DMI_MATCH(DMI_BIOS_VERSION, "BLADE_21"),
},
.driver_data = (void *)&lcd1200x1920_rightside_up,
+ }, { /* Nanote UMPC-01 */
+ .matches = {
+ DMI_MATCH(DMI_SYS_VENDOR, "RWC CO.,LTD"),
+ DMI_MATCH(DMI_PRODUCT_NAME, "UMPC-01"),
+ },
+ .driver_data = (void *)&lcd1200x1920_rightside_up,
}, { /* OneGX1 Pro */
.matches = {
DMI_EXACT_MATCH(DMI_SYS_VENDOR, "SYSTEM_MANUFACTURER"),
diff --git a/drivers/gpu/drm/i915/display/intel_psr.c b/drivers/gpu/drm/i915/display/intel_psr.c
index d4cce627d7a8..15c3e448aa0e 100644
--- a/drivers/gpu/drm/i915/display/intel_psr.c
+++ b/drivers/gpu/drm/i915/display/intel_psr.c
@@ -2201,8 +2201,11 @@ static void _psr_invalidate_handle(struct intel_dp *intel_dp)
if (intel_dp->psr.psr2_sel_fetch_enabled) {
u32 val;
- if (intel_dp->psr.psr2_sel_fetch_cff_enabled)
+ if (intel_dp->psr.psr2_sel_fetch_cff_enabled) {
+ /* Send one update otherwise lag is observed in screen */
+ intel_de_write(dev_priv, CURSURFLIVE(intel_dp->psr.pipe), 0);
return;
+ }
val = man_trk_ctl_enable_bit_get(dev_priv) |
man_trk_ctl_partial_frame_bit_get(dev_priv) |
diff --git a/drivers/gpu/drm/i915/gem/i915_gem_dmabuf.c b/drivers/gpu/drm/i915/gem/i915_gem_dmabuf.c
index f5062d0c6333..824971a1ceec 100644
--- a/drivers/gpu/drm/i915/gem/i915_gem_dmabuf.c
+++ b/drivers/gpu/drm/i915/gem/i915_gem_dmabuf.c
@@ -40,13 +40,13 @@ static struct sg_table *i915_gem_map_dma_buf(struct dma_buf_attachment *attachme
goto err;
}
- ret = sg_alloc_table(st, obj->mm.pages->nents, GFP_KERNEL);
+ ret = sg_alloc_table(st, obj->mm.pages->orig_nents, GFP_KERNEL);
if (ret)
goto err_free;
src = obj->mm.pages->sgl;
dst = st->sgl;
- for (i = 0; i < obj->mm.pages->nents; i++) {
+ for (i = 0; i < obj->mm.pages->orig_nents; i++) {
sg_set_page(dst, sg_page(src), src->length, 0);
dst = sg_next(dst);
src = sg_next(src);
diff --git a/drivers/gpu/drm/i915/gem/i915_gem_shmem.c b/drivers/gpu/drm/i915/gem/i915_gem_shmem.c
index 11125c32dd35..2f7804492cd5 100644
--- a/drivers/gpu/drm/i915/gem/i915_gem_shmem.c
+++ b/drivers/gpu/drm/i915/gem/i915_gem_shmem.c
@@ -369,14 +369,14 @@ __i915_gem_object_release_shmem(struct drm_i915_gem_object *obj,
__start_cpu_write(obj);
/*
- * On non-LLC platforms, force the flush-on-acquire if this is ever
+ * On non-LLC igfx platforms, force the flush-on-acquire if this is ever
* swapped-in. Our async flush path is not trust worthy enough yet(and
* happens in the wrong order), and with some tricks it's conceivable
* for userspace to change the cache-level to I915_CACHE_NONE after the
* pages are swapped-in, and since execbuf binds the object before doing
* the async flush, we have a race window.
*/
- if (!HAS_LLC(i915))
+ if (!HAS_LLC(i915) && !IS_DGFX(i915))
obj->cache_dirty = true;
}
diff --git a/drivers/gpu/drm/i915/gem/i915_gem_userptr.c b/drivers/gpu/drm/i915/gem/i915_gem_userptr.c
index f34e01a7fefb..ba14b18d65f3 100644
--- a/drivers/gpu/drm/i915/gem/i915_gem_userptr.c
+++ b/drivers/gpu/drm/i915/gem/i915_gem_userptr.c
@@ -428,9 +428,10 @@ probe_range(struct mm_struct *mm, unsigned long addr, unsigned long len)
{
VMA_ITERATOR(vmi, mm, addr);
struct vm_area_struct *vma;
+ unsigned long end = addr + len;
mmap_read_lock(mm);
- for_each_vma_range(vmi, vma, addr + len) {
+ for_each_vma_range(vmi, vma, end) {
/* Check for holes, note that we also update the addr below */
if (vma->vm_start > addr)
break;
@@ -442,7 +443,7 @@ probe_range(struct mm_struct *mm, unsigned long addr, unsigned long len)
}
mmap_read_unlock(mm);
- if (vma)
+ if (vma || addr < end)
return -EFAULT;
return 0;
}
diff --git a/drivers/gpu/drm/panfrost/panfrost_mmu.c b/drivers/gpu/drm/panfrost/panfrost_mmu.c
index e246d914e7f6..4e83a1891f3e 100644
--- a/drivers/gpu/drm/panfrost/panfrost_mmu.c
+++ b/drivers/gpu/drm/panfrost/panfrost_mmu.c
@@ -250,13 +250,22 @@ void panfrost_mmu_reset(struct panfrost_device *pfdev)
static size_t get_pgsize(u64 addr, size_t size, size_t *count)
{
+ /*
+ * io-pgtable only operates on multiple pages within a single table
+ * entry, so we need to split at boundaries of the table size, i.e.
+ * the next block size up. The distance from address A to the next
+ * boundary of block size B is logically B - A % B, but in unsigned
+ * two's complement where B is a power of two we get the equivalence
+ * B - A % B == (B - A) % B == (n * B - A) % B, and choose n = 0 :)
+ */
size_t blk_offset = -addr % SZ_2M;
if (blk_offset || size < SZ_2M) {
*count = min_not_zero(blk_offset, size) / SZ_4K;
return SZ_4K;
}
- *count = size / SZ_2M;
+ blk_offset = -addr % SZ_1G ?: SZ_1G;
+ *count = min(blk_offset, size) / SZ_2M;
return SZ_2M;
}
diff --git a/drivers/gpu/drm/rcar-du/Kconfig b/drivers/gpu/drm/rcar-du/Kconfig
index c959e8c6be7d..fd2c2eaee26b 100644
--- a/drivers/gpu/drm/rcar-du/Kconfig
+++ b/drivers/gpu/drm/rcar-du/Kconfig
@@ -44,13 +44,18 @@ config DRM_RCAR_LVDS
select OF_FLATTREE
select OF_OVERLAY
-config DRM_RCAR_MIPI_DSI
- tristate "R-Car DU MIPI DSI Encoder Support"
- depends on DRM && DRM_BRIDGE && OF
- select DRM_MIPI_DSI
+config DRM_RCAR_USE_MIPI_DSI
+ bool "R-Car DU MIPI DSI Encoder Support"
+ depends on DRM_BRIDGE && OF
+ default DRM_RCAR_DU
help
Enable support for the R-Car Display Unit embedded MIPI DSI encoders.
+config DRM_RCAR_MIPI_DSI
+ def_tristate DRM_RCAR_DU
+ depends on DRM_RCAR_USE_MIPI_DSI
+ select DRM_MIPI_DSI
+
config DRM_RCAR_VSP
bool "R-Car DU VSP Compositor Support" if ARM
default y if ARM64
diff --git a/drivers/gpu/drm/vc4/vc4_drv.c b/drivers/gpu/drm/vc4/vc4_drv.c
index 2027063fdc30..8c329c071c62 100644
--- a/drivers/gpu/drm/vc4/vc4_drv.c
+++ b/drivers/gpu/drm/vc4/vc4_drv.c
@@ -476,7 +476,12 @@ static int __init vc4_drm_register(void)
if (ret)
return ret;
- return platform_driver_register(&vc4_platform_driver);
+ ret = platform_driver_register(&vc4_platform_driver);
+ if (ret)
+ platform_unregister_drivers(component_drivers,
+ ARRAY_SIZE(component_drivers));
+
+ return ret;
}
static void __exit vc4_drm_unregister(void)
diff --git a/drivers/gpu/drm/vc4/vc4_hdmi.c b/drivers/gpu/drm/vc4/vc4_hdmi.c
index 596e311d6e58..470432c8fd70 100644
--- a/drivers/gpu/drm/vc4/vc4_hdmi.c
+++ b/drivers/gpu/drm/vc4/vc4_hdmi.c
@@ -349,27 +349,40 @@ static int vc4_hdmi_reset_link(struct drm_connector *connector,
if (!crtc_state->active)
return 0;
- if (!vc4_hdmi_supports_scrambling(encoder))
+ mutex_lock(&vc4_hdmi->mutex);
+
+ if (!vc4_hdmi_supports_scrambling(encoder)) {
+ mutex_unlock(&vc4_hdmi->mutex);
return 0;
+ }
scrambling_needed = vc4_hdmi_mode_needs_scrambling(&vc4_hdmi->saved_adjusted_mode,
vc4_hdmi->output_bpc,
vc4_hdmi->output_format);
- if (!scrambling_needed)
+ if (!scrambling_needed) {
+ mutex_unlock(&vc4_hdmi->mutex);
return 0;
+ }
if (conn_state->commit &&
- !try_wait_for_completion(&conn_state->commit->hw_done))
+ !try_wait_for_completion(&conn_state->commit->hw_done)) {
+ mutex_unlock(&vc4_hdmi->mutex);
return 0;
+ }
ret = drm_scdc_readb(connector->ddc, SCDC_TMDS_CONFIG, &config);
if (ret < 0) {
drm_err(drm, "Failed to read TMDS config: %d\n", ret);
+ mutex_unlock(&vc4_hdmi->mutex);
return 0;
}
- if (!!(config & SCDC_SCRAMBLING_ENABLE) == scrambling_needed)
+ if (!!(config & SCDC_SCRAMBLING_ENABLE) == scrambling_needed) {
+ mutex_unlock(&vc4_hdmi->mutex);
return 0;
+ }
+
+ mutex_unlock(&vc4_hdmi->mutex);
/*
* HDMI 2.0 says that one should not send scrambled data
@@ -397,9 +410,8 @@ static void vc4_hdmi_handle_hotplug(struct vc4_hdmi *vc4_hdmi,
* .adap_enable, which leads to that funtion being called with
* our mutex held.
*
- * A similar situation occurs with
- * drm_atomic_helper_connector_hdmi_reset_link() that will call
- * into our KMS hooks if the scrambling was enabled.
+ * A similar situation occurs with vc4_hdmi_reset_link() that
+ * will call into our KMS hooks if the scrambling was enabled.
*
* Concurrency isn't an issue at the moment since we don't share
* any state with any of the other frameworks so we can ignore
@@ -3160,9 +3172,16 @@ static int vc4_hdmi_init_resources(struct drm_device *drm,
DRM_ERROR("Failed to get HDMI state machine clock\n");
return PTR_ERR(vc4_hdmi->hsm_clock);
}
+
vc4_hdmi->audio_clock = vc4_hdmi->hsm_clock;
vc4_hdmi->cec_clock = vc4_hdmi->hsm_clock;
+ vc4_hdmi->hsm_rpm_clock = devm_clk_get(dev, "hdmi");
+ if (IS_ERR(vc4_hdmi->hsm_rpm_clock)) {
+ DRM_ERROR("Failed to get HDMI state machine clock\n");
+ return PTR_ERR(vc4_hdmi->hsm_rpm_clock);
+ }
+
return 0;
}
@@ -3245,6 +3264,12 @@ static int vc5_hdmi_init_resources(struct drm_device *drm,
return PTR_ERR(vc4_hdmi->hsm_clock);
}
+ vc4_hdmi->hsm_rpm_clock = devm_clk_get(dev, "hdmi");
+ if (IS_ERR(vc4_hdmi->hsm_rpm_clock)) {
+ DRM_ERROR("Failed to get HDMI state machine clock\n");
+ return PTR_ERR(vc4_hdmi->hsm_rpm_clock);
+ }
+
vc4_hdmi->pixel_bvb_clock = devm_clk_get(dev, "bvb");
if (IS_ERR(vc4_hdmi->pixel_bvb_clock)) {
DRM_ERROR("Failed to get pixel bvb clock\n");
@@ -3308,7 +3333,7 @@ static int vc4_hdmi_runtime_suspend(struct device *dev)
{
struct vc4_hdmi *vc4_hdmi = dev_get_drvdata(dev);
- clk_disable_unprepare(vc4_hdmi->hsm_clock);
+ clk_disable_unprepare(vc4_hdmi->hsm_rpm_clock);
return 0;
}
@@ -3326,11 +3351,11 @@ static int vc4_hdmi_runtime_resume(struct device *dev)
* its frequency while the power domain is active so that it
* keeps its rate.
*/
- ret = clk_set_min_rate(vc4_hdmi->hsm_clock, HSM_MIN_CLOCK_FREQ);
+ ret = clk_set_min_rate(vc4_hdmi->hsm_rpm_clock, HSM_MIN_CLOCK_FREQ);
if (ret)
return ret;
- ret = clk_prepare_enable(vc4_hdmi->hsm_clock);
+ ret = clk_prepare_enable(vc4_hdmi->hsm_rpm_clock);
if (ret)
return ret;
@@ -3343,7 +3368,7 @@ static int vc4_hdmi_runtime_resume(struct device *dev)
* case, it will lead to a silent CPU stall. Let's make sure we
* prevent such a case.
*/
- rate = clk_get_rate(vc4_hdmi->hsm_clock);
+ rate = clk_get_rate(vc4_hdmi->hsm_rpm_clock);
if (!rate) {
ret = -EINVAL;
goto err_disable_clk;
diff --git a/drivers/gpu/drm/vc4/vc4_hdmi.h b/drivers/gpu/drm/vc4/vc4_hdmi.h
index db823efb2563..1ad8e8c377e2 100644
--- a/drivers/gpu/drm/vc4/vc4_hdmi.h
+++ b/drivers/gpu/drm/vc4/vc4_hdmi.h
@@ -172,6 +172,7 @@ struct vc4_hdmi {
struct clk *cec_clock;
struct clk *pixel_clock;
struct clk *hsm_clock;
+ struct clk *hsm_rpm_clock;
struct clk *audio_clock;
struct clk *pixel_bvb_clock;
diff --git a/drivers/hid/hid-asus.c b/drivers/hid/hid-asus.c
index b59c3dafa6a4..f99752b998f3 100644
--- a/drivers/hid/hid-asus.c
+++ b/drivers/hid/hid-asus.c
@@ -219,14 +219,13 @@ static void asus_report_tool_width(struct asus_drvdata *drvdat)
{
struct input_mt *mt = drvdat->input->mt;
struct input_mt_slot *oldest;
- int oldid, count, i;
+ int oldid, i;
if (drvdat->tp->contact_size < 5)
return;
oldest = NULL;
oldid = mt->trkid;
- count = 0;
for (i = 0; i < mt->num_slots; ++i) {
struct input_mt_slot *ps = &mt->slots[i];
@@ -238,7 +237,6 @@ static void asus_report_tool_width(struct asus_drvdata *drvdat)
oldest = ps;
oldid = id;
}
- count++;
}
if (oldest) {
diff --git a/drivers/hid/hid-hyperv.c b/drivers/hid/hid-hyperv.c
index e0bc73124196..ab57b49a44ed 100644
--- a/drivers/hid/hid-hyperv.c
+++ b/drivers/hid/hid-hyperv.c
@@ -499,7 +499,7 @@ static int mousevsc_probe(struct hv_device *device,
ret = hid_add_device(hid_dev);
if (ret)
- goto probe_err1;
+ goto probe_err2;
ret = hid_parse(hid_dev);
diff --git a/drivers/hid/wacom_wac.c b/drivers/hid/wacom_wac.c
index 77486962a773..0f3d57b42684 100644
--- a/drivers/hid/wacom_wac.c
+++ b/drivers/hid/wacom_wac.c
@@ -2520,11 +2520,12 @@ static void wacom_wac_pen_report(struct hid_device *hdev,
if (!delay_pen_events(wacom_wac) && wacom_wac->tool[0]) {
int id = wacom_wac->id[0];
- if (wacom_wac->features.quirks & WACOM_QUIRK_PEN_BUTTON3 &&
- wacom_wac->hid_data.barrelswitch & wacom_wac->hid_data.barrelswitch2) {
- wacom_wac->hid_data.barrelswitch = 0;
- wacom_wac->hid_data.barrelswitch2 = 0;
- wacom_wac->hid_data.barrelswitch3 = 1;
+ if (wacom_wac->features.quirks & WACOM_QUIRK_PEN_BUTTON3) {
+ int sw_state = wacom_wac->hid_data.barrelswitch |
+ (wacom_wac->hid_data.barrelswitch2 << 1);
+ wacom_wac->hid_data.barrelswitch = sw_state == 1;
+ wacom_wac->hid_data.barrelswitch2 = sw_state == 2;
+ wacom_wac->hid_data.barrelswitch3 = sw_state == 3;
}
input_report_key(input, BTN_STYLUS, wacom_wac->hid_data.barrelswitch);
input_report_key(input, BTN_STYLUS2, wacom_wac->hid_data.barrelswitch2);
diff --git a/drivers/hv/hv_balloon.c b/drivers/hv/hv_balloon.c
index fdf6decacf06..6c127f061f06 100644
--- a/drivers/hv/hv_balloon.c
+++ b/drivers/hv/hv_balloon.c
@@ -905,7 +905,7 @@ static unsigned long handle_pg_range(unsigned long pg_start,
* We have some residual hot add range
* that needs to be hot added; hot add
* it now. Hot add a multiple of
- * of HA_CHUNK that fully covers the pages
+ * HA_CHUNK that fully covers the pages
* we have.
*/
size = (has->end_pfn - has->ha_end_pfn);
diff --git a/drivers/hwspinlock/qcom_hwspinlock.c b/drivers/hwspinlock/qcom_hwspinlock.c
index 80ea45b3a815..9cf186362ae2 100644
--- a/drivers/hwspinlock/qcom_hwspinlock.c
+++ b/drivers/hwspinlock/qcom_hwspinlock.c
@@ -22,6 +22,7 @@
struct qcom_hwspinlock_of_data {
u32 offset;
u32 stride;
+ const struct regmap_config *regmap_config;
};
static int qcom_hwspinlock_trylock(struct hwspinlock *lock)
@@ -73,15 +74,42 @@ static const struct qcom_hwspinlock_of_data of_sfpb_mutex = {
.stride = 0x4,
};
-/* All modern platform has offset 0 and stride of 4k */
+static const struct regmap_config tcsr_msm8226_mutex_config = {
+ .reg_bits = 32,
+ .reg_stride = 4,
+ .val_bits = 32,
+ .max_register = 0x1000,
+ .fast_io = true,
+};
+
+static const struct qcom_hwspinlock_of_data of_msm8226_tcsr_mutex = {
+ .offset = 0,
+ .stride = 0x80,
+ .regmap_config = &tcsr_msm8226_mutex_config,
+};
+
+static const struct regmap_config tcsr_mutex_config = {
+ .reg_bits = 32,
+ .reg_stride = 4,
+ .val_bits = 32,
+ .max_register = 0x20000,
+ .fast_io = true,
+};
+
static const struct qcom_hwspinlock_of_data of_tcsr_mutex = {
.offset = 0,
.stride = 0x1000,
+ .regmap_config = &tcsr_mutex_config,
};
static const struct of_device_id qcom_hwspinlock_of_match[] = {
{ .compatible = "qcom,sfpb-mutex", .data = &of_sfpb_mutex },
{ .compatible = "qcom,tcsr-mutex", .data = &of_tcsr_mutex },
+ { .compatible = "qcom,apq8084-tcsr-mutex", .data = &of_msm8226_tcsr_mutex },
+ { .compatible = "qcom,ipq6018-tcsr-mutex", .data = &of_msm8226_tcsr_mutex },
+ { .compatible = "qcom,msm8226-tcsr-mutex", .data = &of_msm8226_tcsr_mutex },
+ { .compatible = "qcom,msm8974-tcsr-mutex", .data = &of_msm8226_tcsr_mutex },
+ { .compatible = "qcom,msm8994-tcsr-mutex", .data = &of_msm8226_tcsr_mutex },
{ }
};
MODULE_DEVICE_TABLE(of, qcom_hwspinlock_of_match);
@@ -117,14 +145,6 @@ static struct regmap *qcom_hwspinlock_probe_syscon(struct platform_device *pdev,
return regmap;
}
-static const struct regmap_config tcsr_mutex_config = {
- .reg_bits = 32,
- .reg_stride = 4,
- .val_bits = 32,
- .max_register = 0x40000,
- .fast_io = true,
-};
-
static struct regmap *qcom_hwspinlock_probe_mmio(struct platform_device *pdev,
u32 *offset, u32 *stride)
{
@@ -133,6 +153,8 @@ static struct regmap *qcom_hwspinlock_probe_mmio(struct platform_device *pdev,
void __iomem *base;
data = of_device_get_match_data(dev);
+ if (!data->regmap_config)
+ return ERR_PTR(-EINVAL);
*offset = data->offset;
*stride = data->stride;
@@ -141,7 +163,7 @@ static struct regmap *qcom_hwspinlock_probe_mmio(struct platform_device *pdev,
if (IS_ERR(base))
return ERR_CAST(base);
- return devm_regmap_init_mmio(dev, base, &tcsr_mutex_config);
+ return devm_regmap_init_mmio(dev, base, data->regmap_config);
}
static int qcom_hwspinlock_probe(struct platform_device *pdev)
diff --git a/drivers/mmc/host/sdhci-brcmstb.c b/drivers/mmc/host/sdhci-brcmstb.c
index aff36a933ebe..55d8bd232695 100644
--- a/drivers/mmc/host/sdhci-brcmstb.c
+++ b/drivers/mmc/host/sdhci-brcmstb.c
@@ -12,6 +12,7 @@
#include <linux/bitops.h>
#include <linux/delay.h>
+#include "sdhci-cqhci.h"
#include "sdhci-pltfm.h"
#include "cqhci.h"
@@ -55,7 +56,7 @@ static void brcmstb_reset(struct sdhci_host *host, u8 mask)
struct sdhci_pltfm_host *pltfm_host = sdhci_priv(host);
struct sdhci_brcmstb_priv *priv = sdhci_pltfm_priv(pltfm_host);
- sdhci_reset(host, mask);
+ sdhci_and_cqhci_reset(host, mask);
/* Reset will clear this, so re-enable it */
if (priv->flags & BRCMSTB_PRIV_FLAGS_GATE_CLOCK)
diff --git a/drivers/mmc/host/sdhci-cqhci.h b/drivers/mmc/host/sdhci-cqhci.h
new file mode 100644
index 000000000000..cf8e7ba71bbd
--- /dev/null
+++ b/drivers/mmc/host/sdhci-cqhci.h
@@ -0,0 +1,24 @@
+/* SPDX-License-Identifier: GPL-2.0-only */
+/*
+ * Copyright 2022 The Chromium OS Authors
+ *
+ * Support that applies to the combination of SDHCI and CQHCI, while not
+ * expressing a dependency between the two modules.
+ */
+
+#ifndef __MMC_HOST_SDHCI_CQHCI_H__
+#define __MMC_HOST_SDHCI_CQHCI_H__
+
+#include "cqhci.h"
+#include "sdhci.h"
+
+static inline void sdhci_and_cqhci_reset(struct sdhci_host *host, u8 mask)
+{
+ if ((host->mmc->caps2 & MMC_CAP2_CQE) && (mask & SDHCI_RESET_ALL) &&
+ host->mmc->cqe_private)
+ cqhci_deactivate(host->mmc);
+
+ sdhci_reset(host, mask);
+}
+
+#endif /* __MMC_HOST_SDHCI_CQHCI_H__ */
diff --git a/drivers/mmc/host/sdhci-esdhc-imx.c b/drivers/mmc/host/sdhci-esdhc-imx.c
index 747df79d90ee..31ea0a2fce35 100644
--- a/drivers/mmc/host/sdhci-esdhc-imx.c
+++ b/drivers/mmc/host/sdhci-esdhc-imx.c
@@ -25,6 +25,7 @@
#include <linux/of_device.h>
#include <linux/pinctrl/consumer.h>
#include <linux/pm_runtime.h>
+#include "sdhci-cqhci.h"
#include "sdhci-pltfm.h"
#include "sdhci-esdhc.h"
#include "cqhci.h"
@@ -1288,7 +1289,7 @@ static void esdhc_set_uhs_signaling(struct sdhci_host *host, unsigned timing)
static void esdhc_reset(struct sdhci_host *host, u8 mask)
{
- sdhci_reset(host, mask);
+ sdhci_and_cqhci_reset(host, mask);
sdhci_writel(host, host->ier, SDHCI_INT_ENABLE);
sdhci_writel(host, host->ier, SDHCI_SIGNAL_ENABLE);
@@ -1671,14 +1672,14 @@ static int sdhci_esdhc_imx_probe(struct platform_device *pdev)
if (imx_data->socdata->flags & ESDHC_FLAG_ERR004536)
host->quirks |= SDHCI_QUIRK_BROKEN_ADMA;
- if (host->caps & MMC_CAP_8_BIT_DATA &&
+ if (host->mmc->caps & MMC_CAP_8_BIT_DATA &&
imx_data->socdata->flags & ESDHC_FLAG_HS400)
host->mmc->caps2 |= MMC_CAP2_HS400;
if (imx_data->socdata->flags & ESDHC_FLAG_BROKEN_AUTO_CMD23)
host->quirks2 |= SDHCI_QUIRK2_ACMD23_BROKEN;
- if (host->caps & MMC_CAP_8_BIT_DATA &&
+ if (host->mmc->caps & MMC_CAP_8_BIT_DATA &&
imx_data->socdata->flags & ESDHC_FLAG_HS400_ES) {
host->mmc->caps2 |= MMC_CAP2_HS400_ES;
host->mmc_host_ops.hs400_enhanced_strobe =
diff --git a/drivers/mmc/host/sdhci-of-arasan.c b/drivers/mmc/host/sdhci-of-arasan.c
index 3997cad1f793..cfb891430174 100644
--- a/drivers/mmc/host/sdhci-of-arasan.c
+++ b/drivers/mmc/host/sdhci-of-arasan.c
@@ -25,6 +25,7 @@
#include <linux/firmware/xlnx-zynqmp.h>
#include "cqhci.h"
+#include "sdhci-cqhci.h"
#include "sdhci-pltfm.h"
#define SDHCI_ARASAN_VENDOR_REGISTER 0x78
@@ -366,7 +367,7 @@ static void sdhci_arasan_reset(struct sdhci_host *host, u8 mask)
struct sdhci_pltfm_host *pltfm_host = sdhci_priv(host);
struct sdhci_arasan_data *sdhci_arasan = sdhci_pltfm_priv(pltfm_host);
- sdhci_reset(host, mask);
+ sdhci_and_cqhci_reset(host, mask);
if (sdhci_arasan->quirks & SDHCI_ARASAN_QUIRK_FORCE_CDTEST) {
ctrl = sdhci_readb(host, SDHCI_HOST_CONTROL);
diff --git a/drivers/mmc/host/sdhci-tegra.c b/drivers/mmc/host/sdhci-tegra.c
index 413925bce0ca..c71000a07656 100644
--- a/drivers/mmc/host/sdhci-tegra.c
+++ b/drivers/mmc/host/sdhci-tegra.c
@@ -28,6 +28,7 @@
#include <soc/tegra/common.h>
+#include "sdhci-cqhci.h"
#include "sdhci-pltfm.h"
#include "cqhci.h"
@@ -367,7 +368,7 @@ static void tegra_sdhci_reset(struct sdhci_host *host, u8 mask)
const struct sdhci_tegra_soc_data *soc_data = tegra_host->soc_data;
u32 misc_ctrl, clk_ctrl, pad_ctrl;
- sdhci_reset(host, mask);
+ sdhci_and_cqhci_reset(host, mask);
if (!(mask & SDHCI_RESET_ALL))
return;
diff --git a/drivers/mmc/host/sdhci_am654.c b/drivers/mmc/host/sdhci_am654.c
index 8f1023480e12..c2333c7acac9 100644
--- a/drivers/mmc/host/sdhci_am654.c
+++ b/drivers/mmc/host/sdhci_am654.c
@@ -15,6 +15,7 @@
#include <linux/sys_soc.h>
#include "cqhci.h"
+#include "sdhci-cqhci.h"
#include "sdhci-pltfm.h"
/* CTL_CFG Registers */
@@ -378,7 +379,7 @@ static void sdhci_am654_reset(struct sdhci_host *host, u8 mask)
struct sdhci_pltfm_host *pltfm_host = sdhci_priv(host);
struct sdhci_am654_data *sdhci_am654 = sdhci_pltfm_priv(pltfm_host);
- sdhci_reset(host, mask);
+ sdhci_and_cqhci_reset(host, mask);
if (sdhci_am654->quirks & SDHCI_AM654_QUIRK_FORCE_CDTEST) {
ctrl = sdhci_readb(host, SDHCI_HOST_CONTROL);
@@ -464,7 +465,7 @@ static struct sdhci_ops sdhci_am654_ops = {
.set_clock = sdhci_am654_set_clock,
.write_b = sdhci_am654_write_b,
.irq = sdhci_am654_cqhci_irq,
- .reset = sdhci_reset,
+ .reset = sdhci_and_cqhci_reset,
};
static const struct sdhci_pltfm_data sdhci_am654_pdata = {
@@ -494,7 +495,7 @@ static struct sdhci_ops sdhci_j721e_8bit_ops = {
.set_clock = sdhci_am654_set_clock,
.write_b = sdhci_am654_write_b,
.irq = sdhci_am654_cqhci_irq,
- .reset = sdhci_reset,
+ .reset = sdhci_and_cqhci_reset,
};
static const struct sdhci_pltfm_data sdhci_j721e_8bit_pdata = {
diff --git a/drivers/net/can/at91_can.c b/drivers/net/can/at91_can.c
index 3a2d109a3792..199cb200f2bd 100644
--- a/drivers/net/can/at91_can.c
+++ b/drivers/net/can/at91_can.c
@@ -452,7 +452,7 @@ static netdev_tx_t at91_start_xmit(struct sk_buff *skb, struct net_device *dev)
unsigned int mb, prio;
u32 reg_mid, reg_mcr;
- if (can_dropped_invalid_skb(dev, skb))
+ if (can_dev_dropped_skb(dev, skb))
return NETDEV_TX_OK;
mb = get_tx_next_mb(priv);
diff --git a/drivers/net/can/c_can/c_can_main.c b/drivers/net/can/c_can/c_can_main.c
index d6605dbb7737..c63f7fc1e691 100644
--- a/drivers/net/can/c_can/c_can_main.c
+++ b/drivers/net/can/c_can/c_can_main.c
@@ -457,7 +457,7 @@ static netdev_tx_t c_can_start_xmit(struct sk_buff *skb,
struct c_can_tx_ring *tx_ring = &priv->tx;
u32 idx, obj, cmd = IF_COMM_TX;
- if (can_dropped_invalid_skb(dev, skb))
+ if (can_dev_dropped_skb(dev, skb))
return NETDEV_TX_OK;
if (c_can_tx_busy(priv, tx_ring))
diff --git a/drivers/net/can/can327.c b/drivers/net/can/can327.c
index 0aa1af31d0fe..094197780776 100644
--- a/drivers/net/can/can327.c
+++ b/drivers/net/can/can327.c
@@ -813,7 +813,7 @@ static netdev_tx_t can327_netdev_start_xmit(struct sk_buff *skb,
struct can327 *elm = netdev_priv(dev);
struct can_frame *frame = (struct can_frame *)skb->data;
- if (can_dropped_invalid_skb(dev, skb))
+ if (can_dev_dropped_skb(dev, skb))
return NETDEV_TX_OK;
/* We shouldn't get here after a hardware fault:
diff --git a/drivers/net/can/cc770/cc770.c b/drivers/net/can/cc770/cc770.c
index 0b9dfc76e769..30909f3aab57 100644
--- a/drivers/net/can/cc770/cc770.c
+++ b/drivers/net/can/cc770/cc770.c
@@ -429,7 +429,7 @@ static netdev_tx_t cc770_start_xmit(struct sk_buff *skb, struct net_device *dev)
struct cc770_priv *priv = netdev_priv(dev);
unsigned int mo = obj2msgobj(CC770_OBJ_TX);
- if (can_dropped_invalid_skb(dev, skb))
+ if (can_dev_dropped_skb(dev, skb))
return NETDEV_TX_OK;
netif_stop_queue(dev);
diff --git a/drivers/net/can/ctucanfd/ctucanfd_base.c b/drivers/net/can/ctucanfd/ctucanfd_base.c
index b8da15ea6ad9..64c349fd4600 100644
--- a/drivers/net/can/ctucanfd/ctucanfd_base.c
+++ b/drivers/net/can/ctucanfd/ctucanfd_base.c
@@ -600,7 +600,7 @@ static netdev_tx_t ctucan_start_xmit(struct sk_buff *skb, struct net_device *nde
bool ok;
unsigned long flags;
- if (can_dropped_invalid_skb(ndev, skb))
+ if (can_dev_dropped_skb(ndev, skb))
return NETDEV_TX_OK;
if (unlikely(!CTU_CAN_FD_TXTNF(priv))) {
diff --git a/drivers/net/can/dev/skb.c b/drivers/net/can/dev/skb.c
index 791a51e2f5d6..241ec636e91f 100644
--- a/drivers/net/can/dev/skb.c
+++ b/drivers/net/can/dev/skb.c
@@ -5,7 +5,6 @@
*/
#include <linux/can/dev.h>
-#include <linux/can/netlink.h>
#include <linux/module.h>
#define MOD_DESC "CAN device driver interface"
@@ -337,8 +336,6 @@ static bool can_skb_headroom_valid(struct net_device *dev, struct sk_buff *skb)
/* Drop a given socketbuffer if it does not contain a valid CAN frame. */
bool can_dropped_invalid_skb(struct net_device *dev, struct sk_buff *skb)
{
- struct can_priv *priv = netdev_priv(dev);
-
switch (ntohs(skb->protocol)) {
case ETH_P_CAN:
if (!can_is_can_skb(skb))
@@ -359,13 +356,8 @@ bool can_dropped_invalid_skb(struct net_device *dev, struct sk_buff *skb)
goto inval_skb;
}
- if (!can_skb_headroom_valid(dev, skb)) {
+ if (!can_skb_headroom_valid(dev, skb))
goto inval_skb;
- } else if (priv->ctrlmode & CAN_CTRLMODE_LISTENONLY) {
- netdev_info_once(dev,
- "interface in listen only mode, dropping skb\n");
- goto inval_skb;
- }
return false;
diff --git a/drivers/net/can/flexcan/flexcan-core.c b/drivers/net/can/flexcan/flexcan-core.c
index 5ee38e586fd8..9bdadd716f4e 100644
--- a/drivers/net/can/flexcan/flexcan-core.c
+++ b/drivers/net/can/flexcan/flexcan-core.c
@@ -742,7 +742,7 @@ static netdev_tx_t flexcan_start_xmit(struct sk_buff *skb, struct net_device *de
u32 ctrl = FLEXCAN_MB_CODE_TX_DATA | ((can_fd_len2dlc(cfd->len)) << 16);
int i;
- if (can_dropped_invalid_skb(dev, skb))
+ if (can_dev_dropped_skb(dev, skb))
return NETDEV_TX_OK;
netif_stop_queue(dev);
diff --git a/drivers/net/can/grcan.c b/drivers/net/can/grcan.c
index 6c37aab93eb3..4bedcc3eea0d 100644
--- a/drivers/net/can/grcan.c
+++ b/drivers/net/can/grcan.c
@@ -1345,7 +1345,7 @@ static netdev_tx_t grcan_start_xmit(struct sk_buff *skb,
unsigned long flags;
u32 oneshotmode = priv->can.ctrlmode & CAN_CTRLMODE_ONE_SHOT;
- if (can_dropped_invalid_skb(dev, skb))
+ if (can_dev_dropped_skb(dev, skb))
return NETDEV_TX_OK;
/* Trying to transmit in silent mode will generate error interrupts, but
diff --git a/drivers/net/can/ifi_canfd/ifi_canfd.c b/drivers/net/can/ifi_canfd/ifi_canfd.c
index 8d42b7e6661f..07eaf724a572 100644
--- a/drivers/net/can/ifi_canfd/ifi_canfd.c
+++ b/drivers/net/can/ifi_canfd/ifi_canfd.c
@@ -860,7 +860,7 @@ static netdev_tx_t ifi_canfd_start_xmit(struct sk_buff *skb,
u32 txst, txid, txdlc;
int i;
- if (can_dropped_invalid_skb(ndev, skb))
+ if (can_dev_dropped_skb(ndev, skb))
return NETDEV_TX_OK;
/* Check if the TX buffer is full */
diff --git a/drivers/net/can/janz-ican3.c b/drivers/net/can/janz-ican3.c
index 71a2caae0757..0732a5092141 100644
--- a/drivers/net/can/janz-ican3.c
+++ b/drivers/net/can/janz-ican3.c
@@ -1693,7 +1693,7 @@ static netdev_tx_t ican3_xmit(struct sk_buff *skb, struct net_device *ndev)
void __iomem *desc_addr;
unsigned long flags;
- if (can_dropped_invalid_skb(ndev, skb))
+ if (can_dev_dropped_skb(ndev, skb))
return NETDEV_TX_OK;
spin_lock_irqsave(&mod->lock, flags);
diff --git a/drivers/net/can/kvaser_pciefd.c b/drivers/net/can/kvaser_pciefd.c
index 4e9680c8eb34..bcad11709bc9 100644
--- a/drivers/net/can/kvaser_pciefd.c
+++ b/drivers/net/can/kvaser_pciefd.c
@@ -772,7 +772,7 @@ static netdev_tx_t kvaser_pciefd_start_xmit(struct sk_buff *skb,
int nwords;
u8 count;
- if (can_dropped_invalid_skb(netdev, skb))
+ if (can_dev_dropped_skb(netdev, skb))
return NETDEV_TX_OK;
nwords = kvaser_pciefd_prepare_tx_packet(&packet, can, skb);
diff --git a/drivers/net/can/m_can/m_can.c b/drivers/net/can/m_can/m_can.c
index dcb582563d5e..00d11e95fd98 100644
--- a/drivers/net/can/m_can/m_can.c
+++ b/drivers/net/can/m_can/m_can.c
@@ -1721,7 +1721,7 @@ static netdev_tx_t m_can_start_xmit(struct sk_buff *skb,
{
struct m_can_classdev *cdev = netdev_priv(dev);
- if (can_dropped_invalid_skb(dev, skb))
+ if (can_dev_dropped_skb(dev, skb))
return NETDEV_TX_OK;
if (cdev->is_peripheral) {
diff --git a/drivers/net/can/mscan/mscan.c b/drivers/net/can/mscan/mscan.c
index 2119fbb287ef..a6829cdc0e81 100644
--- a/drivers/net/can/mscan/mscan.c
+++ b/drivers/net/can/mscan/mscan.c
@@ -191,7 +191,7 @@ static netdev_tx_t mscan_start_xmit(struct sk_buff *skb, struct net_device *dev)
int i, rtr, buf_id;
u32 can_id;
- if (can_dropped_invalid_skb(dev, skb))
+ if (can_dev_dropped_skb(dev, skb))
return NETDEV_TX_OK;
out_8(&regs->cantier, 0);
diff --git a/drivers/net/can/pch_can.c b/drivers/net/can/pch_can.c
index 0558ff67ec6a..2a44b2803e55 100644
--- a/drivers/net/can/pch_can.c
+++ b/drivers/net/can/pch_can.c
@@ -882,7 +882,7 @@ static netdev_tx_t pch_xmit(struct sk_buff *skb, struct net_device *ndev)
int i;
u32 id2;
- if (can_dropped_invalid_skb(ndev, skb))
+ if (can_dev_dropped_skb(ndev, skb))
return NETDEV_TX_OK;
tx_obj_no = priv->tx_obj;
diff --git a/drivers/net/can/peak_canfd/peak_canfd.c b/drivers/net/can/peak_canfd/peak_canfd.c
index f8420cc1d907..31c9c127e24b 100644
--- a/drivers/net/can/peak_canfd/peak_canfd.c
+++ b/drivers/net/can/peak_canfd/peak_canfd.c
@@ -651,7 +651,7 @@ static netdev_tx_t peak_canfd_start_xmit(struct sk_buff *skb,
int room_left;
u8 len;
- if (can_dropped_invalid_skb(ndev, skb))
+ if (can_dev_dropped_skb(ndev, skb))
return NETDEV_TX_OK;
msg_size = ALIGN(sizeof(*msg) + cf->len, 4);
diff --git a/drivers/net/can/rcar/rcar_can.c b/drivers/net/can/rcar/rcar_can.c
index 6ee968c59ac9..cc43c9c5e38c 100644
--- a/drivers/net/can/rcar/rcar_can.c
+++ b/drivers/net/can/rcar/rcar_can.c
@@ -590,7 +590,7 @@ static netdev_tx_t rcar_can_start_xmit(struct sk_buff *skb,
struct can_frame *cf = (struct can_frame *)skb->data;
u32 data, i;
- if (can_dropped_invalid_skb(ndev, skb))
+ if (can_dev_dropped_skb(ndev, skb))
return NETDEV_TX_OK;
if (cf->can_id & CAN_EFF_FLAG) /* Extended frame format */
diff --git a/drivers/net/can/rcar/rcar_canfd.c b/drivers/net/can/rcar/rcar_canfd.c
index 198da643ee6d..b306cf554634 100644
--- a/drivers/net/can/rcar/rcar_canfd.c
+++ b/drivers/net/can/rcar/rcar_canfd.c
@@ -81,8 +81,7 @@ enum rcanfd_chip_id {
/* RSCFDnCFDGERFL / RSCFDnGERFL */
#define RCANFD_GERFL_EEF0_7 GENMASK(23, 16)
-#define RCANFD_GERFL_EEF1 BIT(17)
-#define RCANFD_GERFL_EEF0 BIT(16)
+#define RCANFD_GERFL_EEF(ch) BIT(16 + (ch))
#define RCANFD_GERFL_CMPOF BIT(3) /* CAN FD only */
#define RCANFD_GERFL_THLES BIT(2)
#define RCANFD_GERFL_MES BIT(1)
@@ -90,7 +89,7 @@ enum rcanfd_chip_id {
#define RCANFD_GERFL_ERR(gpriv, x) \
((x) & (reg_v3u(gpriv, RCANFD_GERFL_EEF0_7, \
- RCANFD_GERFL_EEF0 | RCANFD_GERFL_EEF1) | \
+ RCANFD_GERFL_EEF(0) | RCANFD_GERFL_EEF(1)) | \
RCANFD_GERFL_MES | \
((gpriv)->fdmode ? RCANFD_GERFL_CMPOF : 0)))
@@ -936,12 +935,8 @@ static void rcar_canfd_global_error(struct net_device *ndev)
u32 ridx = ch + RCANFD_RFFIFO_IDX;
gerfl = rcar_canfd_read(priv->base, RCANFD_GERFL);
- if ((gerfl & RCANFD_GERFL_EEF0) && (ch == 0)) {
- netdev_dbg(ndev, "Ch0: ECC Error flag\n");
- stats->tx_dropped++;
- }
- if ((gerfl & RCANFD_GERFL_EEF1) && (ch == 1)) {
- netdev_dbg(ndev, "Ch1: ECC Error flag\n");
+ if (gerfl & RCANFD_GERFL_EEF(ch)) {
+ netdev_dbg(ndev, "Ch%u: ECC Error flag\n", ch);
stats->tx_dropped++;
}
if (gerfl & RCANFD_GERFL_MES) {
@@ -1481,7 +1476,7 @@ static netdev_tx_t rcar_canfd_start_xmit(struct sk_buff *skb,
unsigned long flags;
u32 ch = priv->channel;
- if (can_dropped_invalid_skb(ndev, skb))
+ if (can_dev_dropped_skb(ndev, skb))
return NETDEV_TX_OK;
if (cf->can_id & CAN_EFF_FLAG) {
diff --git a/drivers/net/can/sja1000/sja1000.c b/drivers/net/can/sja1000/sja1000.c
index 1bb1129b0450..aac5956e4a53 100644
--- a/drivers/net/can/sja1000/sja1000.c
+++ b/drivers/net/can/sja1000/sja1000.c
@@ -291,7 +291,7 @@ static netdev_tx_t sja1000_start_xmit(struct sk_buff *skb,
u8 cmd_reg_val = 0x00;
int i;
- if (can_dropped_invalid_skb(dev, skb))
+ if (can_dev_dropped_skb(dev, skb))
return NETDEV_TX_OK;
netif_stop_queue(dev);
diff --git a/drivers/net/can/slcan/slcan-core.c b/drivers/net/can/slcan/slcan-core.c
index 8d13fdf8c28a..fbb34139daa1 100644
--- a/drivers/net/can/slcan/slcan-core.c
+++ b/drivers/net/can/slcan/slcan-core.c
@@ -594,7 +594,7 @@ static netdev_tx_t slcan_netdev_xmit(struct sk_buff *skb,
{
struct slcan *sl = netdev_priv(dev);
- if (can_dropped_invalid_skb(dev, skb))
+ if (can_dev_dropped_skb(dev, skb))
return NETDEV_TX_OK;
spin_lock(&sl->lock);
diff --git a/drivers/net/can/softing/softing_main.c b/drivers/net/can/softing/softing_main.c
index a5ef57f415f7..c72f505d29fe 100644
--- a/drivers/net/can/softing/softing_main.c
+++ b/drivers/net/can/softing/softing_main.c
@@ -60,7 +60,7 @@ static netdev_tx_t softing_netdev_start_xmit(struct sk_buff *skb,
struct can_frame *cf = (struct can_frame *)skb->data;
uint8_t buf[DPRAM_TX_SIZE];
- if (can_dropped_invalid_skb(dev, skb))
+ if (can_dev_dropped_skb(dev, skb))
return NETDEV_TX_OK;
spin_lock(&card->spin);
diff --git a/drivers/net/can/spi/hi311x.c b/drivers/net/can/spi/hi311x.c
index b87dc420428d..e1b8533a602e 100644
--- a/drivers/net/can/spi/hi311x.c
+++ b/drivers/net/can/spi/hi311x.c
@@ -373,7 +373,7 @@ static netdev_tx_t hi3110_hard_start_xmit(struct sk_buff *skb,
return NETDEV_TX_BUSY;
}
- if (can_dropped_invalid_skb(net, skb))
+ if (can_dev_dropped_skb(net, skb))
return NETDEV_TX_OK;
netif_stop_queue(net);
diff --git a/drivers/net/can/spi/mcp251x.c b/drivers/net/can/spi/mcp251x.c
index 24883a65ca66..79c4bab5f724 100644
--- a/drivers/net/can/spi/mcp251x.c
+++ b/drivers/net/can/spi/mcp251x.c
@@ -789,7 +789,7 @@ static netdev_tx_t mcp251x_hard_start_xmit(struct sk_buff *skb,
return NETDEV_TX_BUSY;
}
- if (can_dropped_invalid_skb(net, skb))
+ if (can_dev_dropped_skb(net, skb))
return NETDEV_TX_OK;
netif_stop_queue(net);
diff --git a/drivers/net/can/spi/mcp251xfd/mcp251xfd-tx.c b/drivers/net/can/spi/mcp251xfd/mcp251xfd-tx.c
index ffb6c36b7d9b..160528d3cc26 100644
--- a/drivers/net/can/spi/mcp251xfd/mcp251xfd-tx.c
+++ b/drivers/net/can/spi/mcp251xfd/mcp251xfd-tx.c
@@ -172,7 +172,7 @@ netdev_tx_t mcp251xfd_start_xmit(struct sk_buff *skb,
u8 tx_head;
int err;
- if (can_dropped_invalid_skb(ndev, skb))
+ if (can_dev_dropped_skb(ndev, skb))
return NETDEV_TX_OK;
if (mcp251xfd_tx_busy(priv, tx_ring))
diff --git a/drivers/net/can/sun4i_can.c b/drivers/net/can/sun4i_can.c
index 525309da1320..2b78f9197681 100644
--- a/drivers/net/can/sun4i_can.c
+++ b/drivers/net/can/sun4i_can.c
@@ -429,7 +429,7 @@ static netdev_tx_t sun4ican_start_xmit(struct sk_buff *skb, struct net_device *d
canid_t id;
int i;
- if (can_dropped_invalid_skb(dev, skb))
+ if (can_dev_dropped_skb(dev, skb))
return NETDEV_TX_OK;
netif_stop_queue(dev);
diff --git a/drivers/net/can/ti_hecc.c b/drivers/net/can/ti_hecc.c
index b218fb3c6b76..27700f72eac2 100644
--- a/drivers/net/can/ti_hecc.c
+++ b/drivers/net/can/ti_hecc.c
@@ -470,7 +470,7 @@ static netdev_tx_t ti_hecc_xmit(struct sk_buff *skb, struct net_device *ndev)
u32 mbxno, mbx_mask, data;
unsigned long flags;
- if (can_dropped_invalid_skb(ndev, skb))
+ if (can_dev_dropped_skb(ndev, skb))
return NETDEV_TX_OK;
mbxno = get_tx_head_mb(priv);
diff --git a/drivers/net/can/usb/ems_usb.c b/drivers/net/can/usb/ems_usb.c
index d31191686a54..050c0b49938a 100644
--- a/drivers/net/can/usb/ems_usb.c
+++ b/drivers/net/can/usb/ems_usb.c
@@ -747,7 +747,7 @@ static netdev_tx_t ems_usb_start_xmit(struct sk_buff *skb, struct net_device *ne
size_t size = CPC_HEADER_SIZE + CPC_MSG_HEADER_LEN
+ sizeof(struct cpc_can_msg);
- if (can_dropped_invalid_skb(netdev, skb))
+ if (can_dev_dropped_skb(netdev, skb))
return NETDEV_TX_OK;
/* create a URB, and a buffer for it, and copy the data to the URB */
diff --git a/drivers/net/can/usb/esd_usb.c b/drivers/net/can/usb/esd_usb.c
index 1bcfad11b1e4..81b88e9e5bdc 100644
--- a/drivers/net/can/usb/esd_usb.c
+++ b/drivers/net/can/usb/esd_usb.c
@@ -725,7 +725,7 @@ static netdev_tx_t esd_usb_start_xmit(struct sk_buff *skb,
int ret = NETDEV_TX_OK;
size_t size = sizeof(struct esd_usb_msg);
- if (can_dropped_invalid_skb(netdev, skb))
+ if (can_dev_dropped_skb(netdev, skb))
return NETDEV_TX_OK;
/* create a URB, and a buffer for it, and copy the data to the URB */
diff --git a/drivers/net/can/usb/etas_es58x/es58x_core.c b/drivers/net/can/usb/etas_es58x/es58x_core.c
index 51294b717040..25f863b4f5f0 100644
--- a/drivers/net/can/usb/etas_es58x/es58x_core.c
+++ b/drivers/net/can/usb/etas_es58x/es58x_core.c
@@ -1913,7 +1913,7 @@ static netdev_tx_t es58x_start_xmit(struct sk_buff *skb,
unsigned int frame_len;
int ret;
- if (can_dropped_invalid_skb(netdev, skb)) {
+ if (can_dev_dropped_skb(netdev, skb)) {
if (priv->tx_urb)
goto xmit_commit;
return NETDEV_TX_OK;
diff --git a/drivers/net/can/usb/gs_usb.c b/drivers/net/can/usb/gs_usb.c
index f0065d40eb24..9c2c25fde3d1 100644
--- a/drivers/net/can/usb/gs_usb.c
+++ b/drivers/net/can/usb/gs_usb.c
@@ -723,7 +723,7 @@ static netdev_tx_t gs_can_start_xmit(struct sk_buff *skb,
unsigned int idx;
struct gs_tx_context *txc;
- if (can_dropped_invalid_skb(netdev, skb))
+ if (can_dev_dropped_skb(netdev, skb))
return NETDEV_TX_OK;
/* find an empty context to keep track of transmission */
diff --git a/drivers/net/can/usb/kvaser_usb/kvaser_usb_core.c b/drivers/net/can/usb/kvaser_usb/kvaser_usb_core.c
index e91648ed7386..802e27c0eced 100644
--- a/drivers/net/can/usb/kvaser_usb/kvaser_usb_core.c
+++ b/drivers/net/can/usb/kvaser_usb/kvaser_usb_core.c
@@ -570,7 +570,7 @@ static netdev_tx_t kvaser_usb_start_xmit(struct sk_buff *skb,
unsigned int i;
unsigned long flags;
- if (can_dropped_invalid_skb(netdev, skb))
+ if (can_dev_dropped_skb(netdev, skb))
return NETDEV_TX_OK;
urb = usb_alloc_urb(0, GFP_ATOMIC);
diff --git a/drivers/net/can/usb/mcba_usb.c b/drivers/net/can/usb/mcba_usb.c
index 69346c63021f..218b098b261d 100644
--- a/drivers/net/can/usb/mcba_usb.c
+++ b/drivers/net/can/usb/mcba_usb.c
@@ -311,7 +311,7 @@ static netdev_tx_t mcba_usb_start_xmit(struct sk_buff *skb,
.cmd_id = MBCA_CMD_TRANSMIT_MESSAGE_EV
};
- if (can_dropped_invalid_skb(netdev, skb))
+ if (can_dev_dropped_skb(netdev, skb))
return NETDEV_TX_OK;
ctx = mcba_usb_get_free_ctx(priv, cf);
diff --git a/drivers/net/can/usb/peak_usb/pcan_usb_core.c b/drivers/net/can/usb/peak_usb/pcan_usb_core.c
index 225697d70a9a..1d996d3320fe 100644
--- a/drivers/net/can/usb/peak_usb/pcan_usb_core.c
+++ b/drivers/net/can/usb/peak_usb/pcan_usb_core.c
@@ -351,7 +351,7 @@ static netdev_tx_t peak_usb_ndo_start_xmit(struct sk_buff *skb,
int i, err;
size_t size = dev->adapter->tx_buffer_size;
- if (can_dropped_invalid_skb(netdev, skb))
+ if (can_dev_dropped_skb(netdev, skb))
return NETDEV_TX_OK;
for (i = 0; i < PCAN_USB_MAX_TX_URBS; i++)
diff --git a/drivers/net/can/usb/ucan.c b/drivers/net/can/usb/ucan.c
index 7c35f50fda4e..67c2ff407d06 100644
--- a/drivers/net/can/usb/ucan.c
+++ b/drivers/net/can/usb/ucan.c
@@ -1120,7 +1120,7 @@ static netdev_tx_t ucan_start_xmit(struct sk_buff *skb,
struct can_frame *cf = (struct can_frame *)skb->data;
/* check skb */
- if (can_dropped_invalid_skb(netdev, skb))
+ if (can_dev_dropped_skb(netdev, skb))
return NETDEV_TX_OK;
/* allocate a context and slow down tx path, if fifo state is low */
diff --git a/drivers/net/can/usb/usb_8dev.c b/drivers/net/can/usb/usb_8dev.c
index 64c00abe91cf..8a5596ce4e46 100644
--- a/drivers/net/can/usb/usb_8dev.c
+++ b/drivers/net/can/usb/usb_8dev.c
@@ -602,7 +602,7 @@ static netdev_tx_t usb_8dev_start_xmit(struct sk_buff *skb,
int i, err;
size_t size = sizeof(struct usb_8dev_tx_msg);
- if (can_dropped_invalid_skb(netdev, skb))
+ if (can_dev_dropped_skb(netdev, skb))
return NETDEV_TX_OK;
/* create a URB, and a buffer for it, and copy the data to the URB */
diff --git a/drivers/net/can/xilinx_can.c b/drivers/net/can/xilinx_can.c
index 5d3172795ad0..43c812ea1de0 100644
--- a/drivers/net/can/xilinx_can.c
+++ b/drivers/net/can/xilinx_can.c
@@ -743,7 +743,7 @@ static netdev_tx_t xcan_start_xmit(struct sk_buff *skb, struct net_device *ndev)
struct xcan_priv *priv = netdev_priv(ndev);
int ret;
- if (can_dropped_invalid_skb(ndev, skb))
+ if (can_dev_dropped_skb(ndev, skb))
return NETDEV_TX_OK;
if (priv->devtype.flags & XCAN_FLAG_TX_MAILBOXES)
diff --git a/drivers/net/ethernet/apm/xgene/xgene_enet_main.c b/drivers/net/ethernet/apm/xgene/xgene_enet_main.c
index d6cfea65a714..390671640388 100644
--- a/drivers/net/ethernet/apm/xgene/xgene_enet_main.c
+++ b/drivers/net/ethernet/apm/xgene/xgene_enet_main.c
@@ -1004,8 +1004,10 @@ static int xgene_enet_open(struct net_device *ndev)
xgene_enet_napi_enable(pdata);
ret = xgene_enet_register_irq(ndev);
- if (ret)
+ if (ret) {
+ xgene_enet_napi_disable(pdata);
return ret;
+ }
if (ndev->phydev) {
phy_start(ndev->phydev);
diff --git a/drivers/net/ethernet/aquantia/atlantic/aq_macsec.c b/drivers/net/ethernet/aquantia/atlantic/aq_macsec.c
index a0180811305d..7eb5851eb95d 100644
--- a/drivers/net/ethernet/aquantia/atlantic/aq_macsec.c
+++ b/drivers/net/ethernet/aquantia/atlantic/aq_macsec.c
@@ -570,6 +570,7 @@ static int aq_update_txsa(struct aq_nic_s *nic, const unsigned int sc_idx,
ret = aq_mss_set_egress_sakey_record(hw, &key_rec, sa_idx);
+ memzero_explicit(&key_rec, sizeof(key_rec));
return ret;
}
@@ -899,6 +900,7 @@ static int aq_update_rxsa(struct aq_nic_s *nic, const unsigned int sc_idx,
ret = aq_mss_set_ingress_sakey_record(hw, &sa_key_record, sa_idx);
+ memzero_explicit(&sa_key_record, sizeof(sa_key_record));
return ret;
}
diff --git a/drivers/net/ethernet/aquantia/atlantic/macsec/macsec_api.c b/drivers/net/ethernet/aquantia/atlantic/macsec/macsec_api.c
index 36c7cf05630a..431924959520 100644
--- a/drivers/net/ethernet/aquantia/atlantic/macsec/macsec_api.c
+++ b/drivers/net/ethernet/aquantia/atlantic/macsec/macsec_api.c
@@ -757,6 +757,7 @@ set_ingress_sakey_record(struct aq_hw_s *hw,
u16 table_index)
{
u16 packed_record[18];
+ int ret;
if (table_index >= NUMROWS_INGRESSSAKEYRECORD)
return -EINVAL;
@@ -789,9 +790,12 @@ set_ingress_sakey_record(struct aq_hw_s *hw,
packed_record[16] = rec->key_len & 0x3;
- return set_raw_ingress_record(hw, packed_record, 18, 2,
- ROWOFFSET_INGRESSSAKEYRECORD +
- table_index);
+ ret = set_raw_ingress_record(hw, packed_record, 18, 2,
+ ROWOFFSET_INGRESSSAKEYRECORD +
+ table_index);
+
+ memzero_explicit(packed_record, sizeof(packed_record));
+ return ret;
}
int aq_mss_set_ingress_sakey_record(struct aq_hw_s *hw,
@@ -1739,14 +1743,14 @@ static int set_egress_sakey_record(struct aq_hw_s *hw,
ret = set_raw_egress_record(hw, packed_record, 8, 2,
ROWOFFSET_EGRESSSAKEYRECORD + table_index);
if (unlikely(ret))
- return ret;
+ goto clear_key;
ret = set_raw_egress_record(hw, packed_record + 8, 8, 2,
ROWOFFSET_EGRESSSAKEYRECORD + table_index -
32);
- if (unlikely(ret))
- return ret;
- return 0;
+clear_key:
+ memzero_explicit(packed_record, sizeof(packed_record));
+ return ret;
}
int aq_mss_set_egress_sakey_record(struct aq_hw_s *hw,
diff --git a/drivers/net/ethernet/broadcom/Kconfig b/drivers/net/ethernet/broadcom/Kconfig
index f4e1ca68d831..55dfdb34e37b 100644
--- a/drivers/net/ethernet/broadcom/Kconfig
+++ b/drivers/net/ethernet/broadcom/Kconfig
@@ -77,7 +77,7 @@ config BCMGENET
select BCM7XXX_PHY
select MDIO_BCM_UNIMAC
select DIMLIB
- select BROADCOM_PHY if ARCH_BCM2835
+ select BROADCOM_PHY if (ARCH_BCM2835 && PTP_1588_CLOCK_OPTIONAL)
help
This driver supports the built-in Ethernet MACs found in the
Broadcom BCM7xxx Set Top Box family chipset.
diff --git a/drivers/net/ethernet/broadcom/bnxt/bnxt.c b/drivers/net/ethernet/broadcom/bnxt/bnxt.c
index 04cf7684f1b0..c78b6e9dea2c 100644
--- a/drivers/net/ethernet/broadcom/bnxt/bnxt.c
+++ b/drivers/net/ethernet/broadcom/bnxt/bnxt.c
@@ -9983,17 +9983,12 @@ static int bnxt_try_recover_fw(struct bnxt *bp)
return -ENODEV;
}
-int bnxt_cancel_reservations(struct bnxt *bp, bool fw_reset)
+static void bnxt_clear_reservations(struct bnxt *bp, bool fw_reset)
{
struct bnxt_hw_resc *hw_resc = &bp->hw_resc;
- int rc;
if (!BNXT_NEW_RM(bp))
- return 0; /* no resource reservations required */
-
- rc = bnxt_hwrm_func_resc_qcaps(bp, true);
- if (rc)
- netdev_err(bp->dev, "resc_qcaps failed\n");
+ return; /* no resource reservations required */
hw_resc->resv_cp_rings = 0;
hw_resc->resv_stat_ctxs = 0;
@@ -10006,6 +10001,20 @@ int bnxt_cancel_reservations(struct bnxt *bp, bool fw_reset)
bp->tx_nr_rings = 0;
bp->rx_nr_rings = 0;
}
+}
+
+int bnxt_cancel_reservations(struct bnxt *bp, bool fw_reset)
+{
+ int rc;
+
+ if (!BNXT_NEW_RM(bp))
+ return 0; /* no resource reservations required */
+
+ rc = bnxt_hwrm_func_resc_qcaps(bp, true);
+ if (rc)
+ netdev_err(bp->dev, "resc_qcaps failed\n");
+
+ bnxt_clear_reservations(bp, fw_reset);
return rc;
}
@@ -12894,8 +12903,8 @@ static int bnxt_rx_flow_steer(struct net_device *dev, const struct sk_buff *skb,
rcu_read_lock();
hlist_for_each_entry_rcu(fltr, head, hash) {
if (bnxt_fltr_match(fltr, new_fltr)) {
+ rc = fltr->sw_id;
rcu_read_unlock();
- rc = 0;
goto err_free;
}
}
@@ -13913,7 +13922,9 @@ static pci_ers_result_t bnxt_io_slot_reset(struct pci_dev *pdev)
pci_ers_result_t result = PCI_ERS_RESULT_DISCONNECT;
struct net_device *netdev = pci_get_drvdata(pdev);
struct bnxt *bp = netdev_priv(netdev);
- int err = 0, off;
+ int retry = 0;
+ int err = 0;
+ int off;
netdev_info(bp->dev, "PCI Slot Reset\n");
@@ -13941,11 +13952,36 @@ static pci_ers_result_t bnxt_io_slot_reset(struct pci_dev *pdev)
pci_restore_state(pdev);
pci_save_state(pdev);
+ bnxt_inv_fw_health_reg(bp);
+ bnxt_try_map_fw_health_reg(bp);
+
+ /* In some PCIe AER scenarios, firmware may take up to
+ * 10 seconds to become ready in the worst case.
+ */
+ do {
+ err = bnxt_try_recover_fw(bp);
+ if (!err)
+ break;
+ retry++;
+ } while (retry < BNXT_FW_SLOT_RESET_RETRY);
+
+ if (err) {
+ dev_err(&pdev->dev, "Firmware not ready\n");
+ goto reset_exit;
+ }
+
err = bnxt_hwrm_func_reset(bp);
if (!err)
result = PCI_ERS_RESULT_RECOVERED;
+
+ bnxt_ulp_irq_stop(bp);
+ bnxt_clear_int_mode(bp);
+ err = bnxt_init_int_mode(bp);
+ bnxt_ulp_irq_restart(bp, err);
}
+reset_exit:
+ bnxt_clear_reservations(bp, true);
rtnl_unlock();
return result;
diff --git a/drivers/net/ethernet/broadcom/bnxt/bnxt.h b/drivers/net/ethernet/broadcom/bnxt/bnxt.h
index b1b17f911300..d5fa43cfe524 100644
--- a/drivers/net/ethernet/broadcom/bnxt/bnxt.h
+++ b/drivers/net/ethernet/broadcom/bnxt/bnxt.h
@@ -1621,6 +1621,7 @@ struct bnxt_fw_health {
#define BNXT_FW_RETRY 5
#define BNXT_FW_IF_RETRY 10
+#define BNXT_FW_SLOT_RESET_RETRY 4
enum board_idx {
BCM57301,
diff --git a/drivers/net/ethernet/broadcom/bnxt/bnxt_ethtool.c b/drivers/net/ethernet/broadcom/bnxt/bnxt_ethtool.c
index f57e524c7e30..8cad15c458b3 100644
--- a/drivers/net/ethernet/broadcom/bnxt/bnxt_ethtool.c
+++ b/drivers/net/ethernet/broadcom/bnxt/bnxt_ethtool.c
@@ -162,7 +162,7 @@ static int bnxt_set_coalesce(struct net_device *dev,
}
reset_coalesce:
- if (netif_running(dev)) {
+ if (test_bit(BNXT_STATE_OPEN, &bp->state)) {
if (update_stats) {
rc = bnxt_close_nic(bp, true, false);
if (!rc)
diff --git a/drivers/net/ethernet/broadcom/bnxt/bnxt_hwrm.c b/drivers/net/ethernet/broadcom/bnxt/bnxt_hwrm.c
index b01d42928a53..132442f16fe6 100644
--- a/drivers/net/ethernet/broadcom/bnxt/bnxt_hwrm.c
+++ b/drivers/net/ethernet/broadcom/bnxt/bnxt_hwrm.c
@@ -476,7 +476,8 @@ static int __hwrm_send(struct bnxt *bp, struct bnxt_hwrm_ctx *ctx)
memset(ctx->resp, 0, PAGE_SIZE);
req_type = le16_to_cpu(ctx->req->req_type);
- if (BNXT_NO_FW_ACCESS(bp) && req_type != HWRM_FUNC_RESET) {
+ if (BNXT_NO_FW_ACCESS(bp) &&
+ (req_type != HWRM_FUNC_RESET && req_type != HWRM_VER_GET)) {
netdev_dbg(bp->dev, "hwrm req_type 0x%x skipped, FW channel down\n",
req_type);
goto exit;
diff --git a/drivers/net/ethernet/chelsio/cxgb3/cxgb3_main.c b/drivers/net/ethernet/chelsio/cxgb3/cxgb3_main.c
index a52e6b6e2876..9b84c8d8d309 100644
--- a/drivers/net/ethernet/chelsio/cxgb3/cxgb3_main.c
+++ b/drivers/net/ethernet/chelsio/cxgb3/cxgb3_main.c
@@ -1301,6 +1301,7 @@ static int cxgb_up(struct adapter *adap)
if (ret < 0) {
CH_ERR(adap, "failed to bind qsets, err %d\n", ret);
t3_intr_disable(adap);
+ quiesce_rx(adap);
free_irq_resources(adap);
err = ret;
goto out;
diff --git a/drivers/net/ethernet/chelsio/cxgb4vf/cxgb4vf_main.c b/drivers/net/ethernet/chelsio/cxgb4vf/cxgb4vf_main.c
index 54db79f4dcfe..63b2bd084130 100644
--- a/drivers/net/ethernet/chelsio/cxgb4vf/cxgb4vf_main.c
+++ b/drivers/net/ethernet/chelsio/cxgb4vf/cxgb4vf_main.c
@@ -858,7 +858,7 @@ static int cxgb4vf_open(struct net_device *dev)
*/
err = t4vf_update_port_info(pi);
if (err < 0)
- return err;
+ goto err_unwind;
/*
* Note that this interface is up and start everything up ...
diff --git a/drivers/net/ethernet/freescale/fman/mac.c b/drivers/net/ethernet/freescale/fman/mac.c
index 65df308bad97..13e67f2864be 100644
--- a/drivers/net/ethernet/freescale/fman/mac.c
+++ b/drivers/net/ethernet/freescale/fman/mac.c
@@ -487,12 +487,21 @@ _return_of_node_put:
return err;
}
+static int mac_remove(struct platform_device *pdev)
+{
+ struct mac_device *mac_dev = platform_get_drvdata(pdev);
+
+ platform_device_unregister(mac_dev->priv->eth_dev);
+ return 0;
+}
+
static struct platform_driver mac_driver = {
.driver = {
.name = KBUILD_MODNAME,
.of_match_table = mac_match,
},
.probe = mac_probe,
+ .remove = mac_remove,
};
builtin_platform_driver(mac_driver);
diff --git a/drivers/net/ethernet/hisilicon/hns3/hns3pf/hclge_main.c b/drivers/net/ethernet/hisilicon/hns3/hns3pf/hclge_main.c
index 6962a9d69cf8..987271da6e9b 100644
--- a/drivers/net/ethernet/hisilicon/hns3/hns3pf/hclge_main.c
+++ b/drivers/net/ethernet/hisilicon/hns3/hns3pf/hclge_main.c
@@ -12984,14 +12984,16 @@ static void hclge_clean_vport_config(struct hnae3_ae_dev *ae_dev, int num_vfs)
static int hclge_get_dscp_prio(struct hnae3_handle *h, u8 dscp, u8 *tc_mode,
u8 *priority)
{
+ struct hclge_vport *vport = hclge_get_vport(h);
+
if (dscp >= HNAE3_MAX_DSCP)
return -EINVAL;
if (tc_mode)
- *tc_mode = h->kinfo.tc_map_mode;
+ *tc_mode = vport->nic.kinfo.tc_map_mode;
if (priority)
- *priority = h->kinfo.dscp_prio[dscp] == HNAE3_PRIO_ID_INVALID ? 0 :
- h->kinfo.dscp_prio[dscp];
+ *priority = vport->nic.kinfo.dscp_prio[dscp] == HNAE3_PRIO_ID_INVALID ? 0 :
+ vport->nic.kinfo.dscp_prio[dscp];
return 0;
}
diff --git a/drivers/net/ethernet/ibm/ibmveth.c b/drivers/net/ethernet/ibm/ibmveth.c
index 3b14dc93f59d..5b96cd94dcd2 100644
--- a/drivers/net/ethernet/ibm/ibmveth.c
+++ b/drivers/net/ethernet/ibm/ibmveth.c
@@ -1757,7 +1757,8 @@ static int ibmveth_probe(struct vio_dev *dev, const struct vio_device_id *id)
kobject_uevent(kobj, KOBJ_ADD);
}
- rc = netif_set_real_num_tx_queues(netdev, ibmveth_real_max_tx_queues());
+ rc = netif_set_real_num_tx_queues(netdev, min(num_online_cpus(),
+ IBMVETH_DEFAULT_QUEUES));
if (rc) {
netdev_dbg(netdev, "failed to set number of tx queues rc=%d\n",
rc);
diff --git a/drivers/net/ethernet/ibm/ibmveth.h b/drivers/net/ethernet/ibm/ibmveth.h
index daf6f615c03f..115d4c45aa77 100644
--- a/drivers/net/ethernet/ibm/ibmveth.h
+++ b/drivers/net/ethernet/ibm/ibmveth.h
@@ -100,6 +100,7 @@ static inline long h_illan_attributes(unsigned long unit_address,
#define IBMVETH_MAX_BUF_SIZE (1024 * 128)
#define IBMVETH_MAX_TX_BUF_SIZE (1024 * 64)
#define IBMVETH_MAX_QUEUES 16U
+#define IBMVETH_DEFAULT_QUEUES 8U
static int pool_size[] = { 512, 1024 * 2, 1024 * 16, 1024 * 32, 1024 * 64 };
static int pool_count[] = { 256, 512, 256, 256, 256 };
diff --git a/drivers/net/ethernet/intel/iavf/iavf_virtchnl.c b/drivers/net/ethernet/intel/iavf/iavf_virtchnl.c
index 5a9e6563923e..24a701fd140e 100644
--- a/drivers/net/ethernet/intel/iavf/iavf_virtchnl.c
+++ b/drivers/net/ethernet/intel/iavf/iavf_virtchnl.c
@@ -2438,6 +2438,8 @@ void iavf_virtchnl_completion(struct iavf_adapter *adapter,
list_for_each_entry(f, &adapter->vlan_filter_list, list) {
if (f->is_new_vlan) {
f->is_new_vlan = false;
+ if (!f->vlan.vid)
+ continue;
if (f->vlan.tpid == ETH_P_8021Q)
set_bit(f->vlan.vid,
adapter->vsi.active_cvlans);
diff --git a/drivers/net/ethernet/intel/ice/ice_base.c b/drivers/net/ethernet/intel/ice/ice_base.c
index 9e36f01dfa4f..e864634d66bc 100644
--- a/drivers/net/ethernet/intel/ice/ice_base.c
+++ b/drivers/net/ethernet/intel/ice/ice_base.c
@@ -958,7 +958,7 @@ ice_vsi_stop_tx_ring(struct ice_vsi *vsi, enum ice_disq_rst_src rst_src,
* associated to the queue to schedule NAPI handler
*/
q_vector = ring->q_vector;
- if (q_vector)
+ if (q_vector && !(vsi->vf && ice_is_vf_disabled(vsi->vf)))
ice_trigger_sw_intr(hw, q_vector);
status = ice_dis_vsi_txq(vsi->port_info, txq_meta->vsi_idx,
diff --git a/drivers/net/ethernet/intel/ice/ice_lib.c b/drivers/net/ethernet/intel/ice/ice_lib.c
index 938ba8c215cb..7276badfa19e 100644
--- a/drivers/net/ethernet/intel/ice/ice_lib.c
+++ b/drivers/net/ethernet/intel/ice/ice_lib.c
@@ -2240,6 +2240,31 @@ int ice_vsi_stop_xdp_tx_rings(struct ice_vsi *vsi)
}
/**
+ * ice_vsi_is_rx_queue_active
+ * @vsi: the VSI being configured
+ *
+ * Return true if at least one queue is active.
+ */
+bool ice_vsi_is_rx_queue_active(struct ice_vsi *vsi)
+{
+ struct ice_pf *pf = vsi->back;
+ struct ice_hw *hw = &pf->hw;
+ int i;
+
+ ice_for_each_rxq(vsi, i) {
+ u32 rx_reg;
+ int pf_q;
+
+ pf_q = vsi->rxq_map[i];
+ rx_reg = rd32(hw, QRX_CTRL(pf_q));
+ if (rx_reg & QRX_CTRL_QENA_STAT_M)
+ return true;
+ }
+
+ return false;
+}
+
+/**
* ice_vsi_is_vlan_pruning_ena - check if VLAN pruning is enabled or not
* @vsi: VSI to check whether or not VLAN pruning is enabled.
*
diff --git a/drivers/net/ethernet/intel/ice/ice_lib.h b/drivers/net/ethernet/intel/ice/ice_lib.h
index ec4bf0c89857..dcdf69a693e9 100644
--- a/drivers/net/ethernet/intel/ice/ice_lib.h
+++ b/drivers/net/ethernet/intel/ice/ice_lib.h
@@ -129,4 +129,5 @@ u16 ice_vsi_num_non_zero_vlans(struct ice_vsi *vsi);
bool ice_is_feature_supported(struct ice_pf *pf, enum ice_feature f);
void ice_clear_feature_support(struct ice_pf *pf, enum ice_feature f);
void ice_init_feature_support(struct ice_pf *pf);
+bool ice_vsi_is_rx_queue_active(struct ice_vsi *vsi);
#endif /* !_ICE_LIB_H_ */
diff --git a/drivers/net/ethernet/intel/ice/ice_vf_lib.c b/drivers/net/ethernet/intel/ice/ice_vf_lib.c
index 0abeed092de1..1c51778db951 100644
--- a/drivers/net/ethernet/intel/ice/ice_vf_lib.c
+++ b/drivers/net/ethernet/intel/ice/ice_vf_lib.c
@@ -576,7 +576,10 @@ int ice_reset_vf(struct ice_vf *vf, u32 flags)
return -EINVAL;
}
ice_vsi_stop_lan_tx_rings(vsi, ICE_NO_RESET, vf->vf_id);
- ice_vsi_stop_all_rx_rings(vsi);
+
+ if (ice_vsi_is_rx_queue_active(vsi))
+ ice_vsi_stop_all_rx_rings(vsi);
+
dev_dbg(dev, "VF is already disabled, there is no need for resetting it, telling VM, all is fine %d\n",
vf->vf_id);
return 0;
diff --git a/drivers/net/ethernet/marvell/mv643xx_eth.c b/drivers/net/ethernet/marvell/mv643xx_eth.c
index 707993b445d1..8941f69d93e9 100644
--- a/drivers/net/ethernet/marvell/mv643xx_eth.c
+++ b/drivers/net/ethernet/marvell/mv643xx_eth.c
@@ -2481,6 +2481,7 @@ out_free:
for (i = 0; i < mp->rxq_count; i++)
rxq_deinit(mp->rxq + i);
out:
+ napi_disable(&mp->napi);
free_irq(dev->irq, dev);
return err;
diff --git a/drivers/net/ethernet/marvell/octeontx2/Kconfig b/drivers/net/ethernet/marvell/octeontx2/Kconfig
index e1036b0eb6b1..6b4f640163f7 100644
--- a/drivers/net/ethernet/marvell/octeontx2/Kconfig
+++ b/drivers/net/ethernet/marvell/octeontx2/Kconfig
@@ -32,10 +32,12 @@ config OCTEONTX2_PF
tristate "Marvell OcteonTX2 NIC Physical Function driver"
select OCTEONTX2_MBOX
select NET_DEVLINK
+ depends on MACSEC || !MACSEC
depends on (64BIT && COMPILE_TEST) || ARM64
select DIMLIB
depends on PCI
depends on PTP_1588_CLOCK_OPTIONAL
+ depends on MACSEC || !MACSEC
help
This driver supports Marvell's OcteonTX2 NIC physical function.
diff --git a/drivers/net/ethernet/marvell/octeontx2/nic/otx2_common.c b/drivers/net/ethernet/marvell/octeontx2/nic/otx2_common.c
index 9ac9e6615ae7..9e10e7471b88 100644
--- a/drivers/net/ethernet/marvell/octeontx2/nic/otx2_common.c
+++ b/drivers/net/ethernet/marvell/octeontx2/nic/otx2_common.c
@@ -898,6 +898,7 @@ static int otx2_sq_init(struct otx2_nic *pfvf, u16 qidx, u16 sqb_aura)
}
sq->head = 0;
+ sq->cons_head = 0;
sq->sqe_per_sqb = (pfvf->hw.sqb_size / sq->sqe_size) - 1;
sq->num_sqbs = (qset->sqe_cnt + sq->sqe_per_sqb) / sq->sqe_per_sqb;
/* Set SQE threshold to 10% of total SQEs */
diff --git a/drivers/net/ethernet/marvell/octeontx2/nic/otx2_pf.c b/drivers/net/ethernet/marvell/octeontx2/nic/otx2_pf.c
index 892ca88e0cf4..303930499a4c 100644
--- a/drivers/net/ethernet/marvell/octeontx2/nic/otx2_pf.c
+++ b/drivers/net/ethernet/marvell/octeontx2/nic/otx2_pf.c
@@ -15,6 +15,7 @@
#include <net/ip.h>
#include <linux/bpf.h>
#include <linux/bpf_trace.h>
+#include <linux/bitfield.h>
#include "otx2_reg.h"
#include "otx2_common.h"
@@ -1171,6 +1172,59 @@ int otx2_set_real_num_queues(struct net_device *netdev,
}
EXPORT_SYMBOL(otx2_set_real_num_queues);
+static char *nix_sqoperr_e_str[NIX_SQOPERR_MAX] = {
+ "NIX_SQOPERR_OOR",
+ "NIX_SQOPERR_CTX_FAULT",
+ "NIX_SQOPERR_CTX_POISON",
+ "NIX_SQOPERR_DISABLED",
+ "NIX_SQOPERR_SIZE_ERR",
+ "NIX_SQOPERR_OFLOW",
+ "NIX_SQOPERR_SQB_NULL",
+ "NIX_SQOPERR_SQB_FAULT",
+ "NIX_SQOPERR_SQE_SZ_ZERO",
+};
+
+static char *nix_mnqerr_e_str[NIX_MNQERR_MAX] = {
+ "NIX_MNQERR_SQ_CTX_FAULT",
+ "NIX_MNQERR_SQ_CTX_POISON",
+ "NIX_MNQERR_SQB_FAULT",
+ "NIX_MNQERR_SQB_POISON",
+ "NIX_MNQERR_TOTAL_ERR",
+ "NIX_MNQERR_LSO_ERR",
+ "NIX_MNQERR_CQ_QUERY_ERR",
+ "NIX_MNQERR_MAX_SQE_SIZE_ERR",
+ "NIX_MNQERR_MAXLEN_ERR",
+ "NIX_MNQERR_SQE_SIZEM1_ZERO",
+};
+
+static char *nix_snd_status_e_str[NIX_SND_STATUS_MAX] = {
+ "NIX_SND_STATUS_GOOD",
+ "NIX_SND_STATUS_SQ_CTX_FAULT",
+ "NIX_SND_STATUS_SQ_CTX_POISON",
+ "NIX_SND_STATUS_SQB_FAULT",
+ "NIX_SND_STATUS_SQB_POISON",
+ "NIX_SND_STATUS_HDR_ERR",
+ "NIX_SND_STATUS_EXT_ERR",
+ "NIX_SND_STATUS_JUMP_FAULT",
+ "NIX_SND_STATUS_JUMP_POISON",
+ "NIX_SND_STATUS_CRC_ERR",
+ "NIX_SND_STATUS_IMM_ERR",
+ "NIX_SND_STATUS_SG_ERR",
+ "NIX_SND_STATUS_MEM_ERR",
+ "NIX_SND_STATUS_INVALID_SUBDC",
+ "NIX_SND_STATUS_SUBDC_ORDER_ERR",
+ "NIX_SND_STATUS_DATA_FAULT",
+ "NIX_SND_STATUS_DATA_POISON",
+ "NIX_SND_STATUS_NPC_DROP_ACTION",
+ "NIX_SND_STATUS_LOCK_VIOL",
+ "NIX_SND_STATUS_NPC_UCAST_CHAN_ERR",
+ "NIX_SND_STATUS_NPC_MCAST_CHAN_ERR",
+ "NIX_SND_STATUS_NPC_MCAST_ABORT",
+ "NIX_SND_STATUS_NPC_VTAG_PTR_ERR",
+ "NIX_SND_STATUS_NPC_VTAG_SIZE_ERR",
+ "NIX_SND_STATUS_SEND_STATS_ERR",
+};
+
static irqreturn_t otx2_q_intr_handler(int irq, void *data)
{
struct otx2_nic *pf = data;
@@ -1204,46 +1258,67 @@ static irqreturn_t otx2_q_intr_handler(int irq, void *data)
/* SQ */
for (qidx = 0; qidx < pf->hw.tot_tx_queues; qidx++) {
+ u64 sq_op_err_dbg, mnq_err_dbg, snd_err_dbg;
+ u8 sq_op_err_code, mnq_err_code, snd_err_code;
+
+ /* Below debug registers captures first errors corresponding to
+ * those registers. We don't have to check against SQ qid as
+ * these are fatal errors.
+ */
+
ptr = otx2_get_regaddr(pf, NIX_LF_SQ_OP_INT);
val = otx2_atomic64_add((qidx << 44), ptr);
otx2_write64(pf, NIX_LF_SQ_OP_INT, (qidx << 44) |
(val & NIX_SQINT_BITS));
- if (!(val & (NIX_SQINT_BITS | BIT_ULL(42))))
- continue;
-
if (val & BIT_ULL(42)) {
netdev_err(pf->netdev, "SQ%lld: error reading NIX_LF_SQ_OP_INT, NIX_LF_ERR_INT 0x%llx\n",
qidx, otx2_read64(pf, NIX_LF_ERR_INT));
- } else {
- if (val & BIT_ULL(NIX_SQINT_LMT_ERR)) {
- netdev_err(pf->netdev, "SQ%lld: LMT store error NIX_LF_SQ_OP_ERR_DBG:0x%llx",
- qidx,
- otx2_read64(pf,
- NIX_LF_SQ_OP_ERR_DBG));
- otx2_write64(pf, NIX_LF_SQ_OP_ERR_DBG,
- BIT_ULL(44));
- }
- if (val & BIT_ULL(NIX_SQINT_MNQ_ERR)) {
- netdev_err(pf->netdev, "SQ%lld: Meta-descriptor enqueue error NIX_LF_MNQ_ERR_DGB:0x%llx\n",
- qidx,
- otx2_read64(pf, NIX_LF_MNQ_ERR_DBG));
- otx2_write64(pf, NIX_LF_MNQ_ERR_DBG,
- BIT_ULL(44));
- }
- if (val & BIT_ULL(NIX_SQINT_SEND_ERR)) {
- netdev_err(pf->netdev, "SQ%lld: Send error, NIX_LF_SEND_ERR_DBG 0x%llx",
- qidx,
- otx2_read64(pf,
- NIX_LF_SEND_ERR_DBG));
- otx2_write64(pf, NIX_LF_SEND_ERR_DBG,
- BIT_ULL(44));
- }
- if (val & BIT_ULL(NIX_SQINT_SQB_ALLOC_FAIL))
- netdev_err(pf->netdev, "SQ%lld: SQB allocation failed",
- qidx);
+ goto done;
}
+ sq_op_err_dbg = otx2_read64(pf, NIX_LF_SQ_OP_ERR_DBG);
+ if (!(sq_op_err_dbg & BIT(44)))
+ goto chk_mnq_err_dbg;
+
+ sq_op_err_code = FIELD_GET(GENMASK(7, 0), sq_op_err_dbg);
+ netdev_err(pf->netdev, "SQ%lld: NIX_LF_SQ_OP_ERR_DBG(%llx) err=%s\n",
+ qidx, sq_op_err_dbg, nix_sqoperr_e_str[sq_op_err_code]);
+
+ otx2_write64(pf, NIX_LF_SQ_OP_ERR_DBG, BIT_ULL(44));
+
+ if (sq_op_err_code == NIX_SQOPERR_SQB_NULL)
+ goto chk_mnq_err_dbg;
+
+ /* Err is not NIX_SQOPERR_SQB_NULL, call aq function to read SQ structure.
+ * TODO: But we are in irq context. How to call mbox functions which does sleep
+ */
+
+chk_mnq_err_dbg:
+ mnq_err_dbg = otx2_read64(pf, NIX_LF_MNQ_ERR_DBG);
+ if (!(mnq_err_dbg & BIT(44)))
+ goto chk_snd_err_dbg;
+
+ mnq_err_code = FIELD_GET(GENMASK(7, 0), mnq_err_dbg);
+ netdev_err(pf->netdev, "SQ%lld: NIX_LF_MNQ_ERR_DBG(%llx) err=%s\n",
+ qidx, mnq_err_dbg, nix_mnqerr_e_str[mnq_err_code]);
+ otx2_write64(pf, NIX_LF_MNQ_ERR_DBG, BIT_ULL(44));
+
+chk_snd_err_dbg:
+ snd_err_dbg = otx2_read64(pf, NIX_LF_SEND_ERR_DBG);
+ if (snd_err_dbg & BIT(44)) {
+ snd_err_code = FIELD_GET(GENMASK(7, 0), snd_err_dbg);
+ netdev_err(pf->netdev, "SQ%lld: NIX_LF_SND_ERR_DBG:0x%llx err=%s\n",
+ qidx, snd_err_dbg, nix_snd_status_e_str[snd_err_code]);
+ otx2_write64(pf, NIX_LF_SEND_ERR_DBG, BIT_ULL(44));
+ }
+
+done:
+ /* Print values and reset */
+ if (val & BIT_ULL(NIX_SQINT_SQB_ALLOC_FAIL))
+ netdev_err(pf->netdev, "SQ%lld: SQB allocation failed",
+ qidx);
+
schedule_work(&pf->reset_task);
}
diff --git a/drivers/net/ethernet/marvell/octeontx2/nic/otx2_struct.h b/drivers/net/ethernet/marvell/octeontx2/nic/otx2_struct.h
index aa205a0d158f..fa37b9f312ca 100644
--- a/drivers/net/ethernet/marvell/octeontx2/nic/otx2_struct.h
+++ b/drivers/net/ethernet/marvell/octeontx2/nic/otx2_struct.h
@@ -281,4 +281,61 @@ enum nix_sqint_e {
BIT_ULL(NIX_SQINT_SEND_ERR) | \
BIT_ULL(NIX_SQINT_SQB_ALLOC_FAIL))
+enum nix_sqoperr_e {
+ NIX_SQOPERR_OOR = 0,
+ NIX_SQOPERR_CTX_FAULT = 1,
+ NIX_SQOPERR_CTX_POISON = 2,
+ NIX_SQOPERR_DISABLED = 3,
+ NIX_SQOPERR_SIZE_ERR = 4,
+ NIX_SQOPERR_OFLOW = 5,
+ NIX_SQOPERR_SQB_NULL = 6,
+ NIX_SQOPERR_SQB_FAULT = 7,
+ NIX_SQOPERR_SQE_SZ_ZERO = 8,
+ NIX_SQOPERR_MAX,
+};
+
+enum nix_mnqerr_e {
+ NIX_MNQERR_SQ_CTX_FAULT = 0,
+ NIX_MNQERR_SQ_CTX_POISON = 1,
+ NIX_MNQERR_SQB_FAULT = 2,
+ NIX_MNQERR_SQB_POISON = 3,
+ NIX_MNQERR_TOTAL_ERR = 4,
+ NIX_MNQERR_LSO_ERR = 5,
+ NIX_MNQERR_CQ_QUERY_ERR = 6,
+ NIX_MNQERR_MAX_SQE_SIZE_ERR = 7,
+ NIX_MNQERR_MAXLEN_ERR = 8,
+ NIX_MNQERR_SQE_SIZEM1_ZERO = 9,
+ NIX_MNQERR_MAX,
+};
+
+enum nix_snd_status_e {
+ NIX_SND_STATUS_GOOD = 0x0,
+ NIX_SND_STATUS_SQ_CTX_FAULT = 0x1,
+ NIX_SND_STATUS_SQ_CTX_POISON = 0x2,
+ NIX_SND_STATUS_SQB_FAULT = 0x3,
+ NIX_SND_STATUS_SQB_POISON = 0x4,
+ NIX_SND_STATUS_HDR_ERR = 0x5,
+ NIX_SND_STATUS_EXT_ERR = 0x6,
+ NIX_SND_STATUS_JUMP_FAULT = 0x7,
+ NIX_SND_STATUS_JUMP_POISON = 0x8,
+ NIX_SND_STATUS_CRC_ERR = 0x9,
+ NIX_SND_STATUS_IMM_ERR = 0x10,
+ NIX_SND_STATUS_SG_ERR = 0x11,
+ NIX_SND_STATUS_MEM_ERR = 0x12,
+ NIX_SND_STATUS_INVALID_SUBDC = 0x13,
+ NIX_SND_STATUS_SUBDC_ORDER_ERR = 0x14,
+ NIX_SND_STATUS_DATA_FAULT = 0x15,
+ NIX_SND_STATUS_DATA_POISON = 0x16,
+ NIX_SND_STATUS_NPC_DROP_ACTION = 0x17,
+ NIX_SND_STATUS_LOCK_VIOL = 0x18,
+ NIX_SND_STATUS_NPC_UCAST_CHAN_ERR = 0x19,
+ NIX_SND_STATUS_NPC_MCAST_CHAN_ERR = 0x20,
+ NIX_SND_STATUS_NPC_MCAST_ABORT = 0x21,
+ NIX_SND_STATUS_NPC_VTAG_PTR_ERR = 0x22,
+ NIX_SND_STATUS_NPC_VTAG_SIZE_ERR = 0x23,
+ NIX_SND_STATUS_SEND_MEM_FAULT = 0x24,
+ NIX_SND_STATUS_SEND_STATS_ERR = 0x25,
+ NIX_SND_STATUS_MAX,
+};
+
#endif /* OTX2_STRUCT_H */
diff --git a/drivers/net/ethernet/marvell/octeontx2/nic/otx2_txrx.c b/drivers/net/ethernet/marvell/octeontx2/nic/otx2_txrx.c
index 5ec11d71bf60..ef10aef3cda0 100644
--- a/drivers/net/ethernet/marvell/octeontx2/nic/otx2_txrx.c
+++ b/drivers/net/ethernet/marvell/octeontx2/nic/otx2_txrx.c
@@ -441,6 +441,7 @@ static int otx2_tx_napi_handler(struct otx2_nic *pfvf,
struct otx2_cq_queue *cq, int budget)
{
int tx_pkts = 0, tx_bytes = 0, qidx;
+ struct otx2_snd_queue *sq;
struct nix_cqe_tx_s *cqe;
int processed_cqe = 0;
@@ -451,6 +452,9 @@ static int otx2_tx_napi_handler(struct otx2_nic *pfvf,
return 0;
process_cqe:
+ qidx = cq->cq_idx - pfvf->hw.rx_queues;
+ sq = &pfvf->qset.sq[qidx];
+
while (likely(processed_cqe < budget) && cq->pend_cqe) {
cqe = (struct nix_cqe_tx_s *)otx2_get_next_cqe(cq);
if (unlikely(!cqe)) {
@@ -458,18 +462,20 @@ process_cqe:
return 0;
break;
}
+
if (cq->cq_type == CQ_XDP) {
- qidx = cq->cq_idx - pfvf->hw.rx_queues;
- otx2_xdp_snd_pkt_handler(pfvf, &pfvf->qset.sq[qidx],
- cqe);
+ otx2_xdp_snd_pkt_handler(pfvf, sq, cqe);
} else {
- otx2_snd_pkt_handler(pfvf, cq,
- &pfvf->qset.sq[cq->cint_idx],
- cqe, budget, &tx_pkts, &tx_bytes);
+ otx2_snd_pkt_handler(pfvf, cq, sq, cqe, budget,
+ &tx_pkts, &tx_bytes);
}
+
cqe->hdr.cqe_type = NIX_XQE_TYPE_INVALID;
processed_cqe++;
cq->pend_cqe--;
+
+ sq->cons_head++;
+ sq->cons_head &= (sq->sqe_cnt - 1);
}
/* Free CQEs to HW */
@@ -1072,17 +1078,17 @@ bool otx2_sq_append_skb(struct net_device *netdev, struct otx2_snd_queue *sq,
{
struct netdev_queue *txq = netdev_get_tx_queue(netdev, qidx);
struct otx2_nic *pfvf = netdev_priv(netdev);
- int offset, num_segs, free_sqe;
+ int offset, num_segs, free_desc;
struct nix_sqe_hdr_s *sqe_hdr;
- /* Check if there is room for new SQE.
- * 'Num of SQBs freed to SQ's pool - SQ's Aura count'
- * will give free SQE count.
+ /* Check if there is enough room between producer
+ * and consumer index.
*/
- free_sqe = (sq->num_sqbs - *sq->aura_fc_addr) * sq->sqe_per_sqb;
+ free_desc = (sq->cons_head - sq->head - 1 + sq->sqe_cnt) & (sq->sqe_cnt - 1);
+ if (free_desc < sq->sqe_thresh)
+ return false;
- if (free_sqe < sq->sqe_thresh ||
- free_sqe < otx2_get_sqe_count(pfvf, skb))
+ if (free_desc < otx2_get_sqe_count(pfvf, skb))
return false;
num_segs = skb_shinfo(skb)->nr_frags + 1;
diff --git a/drivers/net/ethernet/marvell/octeontx2/nic/otx2_txrx.h b/drivers/net/ethernet/marvell/octeontx2/nic/otx2_txrx.h
index fbe62bbfb789..93cac2c2664c 100644
--- a/drivers/net/ethernet/marvell/octeontx2/nic/otx2_txrx.h
+++ b/drivers/net/ethernet/marvell/octeontx2/nic/otx2_txrx.h
@@ -79,6 +79,7 @@ struct sg_list {
struct otx2_snd_queue {
u8 aura_id;
u16 head;
+ u16 cons_head;
u16 sqe_size;
u32 sqe_cnt;
u16 num_sqbs;
diff --git a/drivers/net/ethernet/marvell/prestera/prestera_rxtx.c b/drivers/net/ethernet/marvell/prestera/prestera_rxtx.c
index 42ee963e9f75..9277a8fd1339 100644
--- a/drivers/net/ethernet/marvell/prestera/prestera_rxtx.c
+++ b/drivers/net/ethernet/marvell/prestera/prestera_rxtx.c
@@ -776,6 +776,7 @@ tx_done:
int prestera_rxtx_switch_init(struct prestera_switch *sw)
{
struct prestera_rxtx *rxtx;
+ int err;
rxtx = kzalloc(sizeof(*rxtx), GFP_KERNEL);
if (!rxtx)
@@ -783,7 +784,11 @@ int prestera_rxtx_switch_init(struct prestera_switch *sw)
sw->rxtx = rxtx;
- return prestera_sdma_switch_init(sw);
+ err = prestera_sdma_switch_init(sw);
+ if (err)
+ kfree(rxtx);
+
+ return err;
}
void prestera_rxtx_switch_fini(struct prestera_switch *sw)
diff --git a/drivers/net/ethernet/mediatek/mtk_star_emac.c b/drivers/net/ethernet/mediatek/mtk_star_emac.c
index 7e890f81148e..7050351250b7 100644
--- a/drivers/net/ethernet/mediatek/mtk_star_emac.c
+++ b/drivers/net/ethernet/mediatek/mtk_star_emac.c
@@ -1026,6 +1026,8 @@ static int mtk_star_enable(struct net_device *ndev)
return 0;
err_free_irq:
+ napi_disable(&priv->rx_napi);
+ napi_disable(&priv->tx_napi);
free_irq(ndev->irq, ndev);
err_free_skbs:
mtk_star_free_rx_skbs(priv);
diff --git a/drivers/net/ethernet/mellanox/mlx5/core/cmd.c b/drivers/net/ethernet/mellanox/mlx5/core/cmd.c
index 46ba4c2faad2..2e0d59ca62b5 100644
--- a/drivers/net/ethernet/mellanox/mlx5/core/cmd.c
+++ b/drivers/net/ethernet/mellanox/mlx5/core/cmd.c
@@ -1770,12 +1770,17 @@ void mlx5_cmd_flush(struct mlx5_core_dev *dev)
struct mlx5_cmd *cmd = &dev->cmd;
int i;
- for (i = 0; i < cmd->max_reg_cmds; i++)
- while (down_trylock(&cmd->sem))
+ for (i = 0; i < cmd->max_reg_cmds; i++) {
+ while (down_trylock(&cmd->sem)) {
mlx5_cmd_trigger_completions(dev);
+ cond_resched();
+ }
+ }
- while (down_trylock(&cmd->pages_sem))
+ while (down_trylock(&cmd->pages_sem)) {
mlx5_cmd_trigger_completions(dev);
+ cond_resched();
+ }
/* Unlock cmdif */
up(&cmd->pages_sem);
diff --git a/drivers/net/ethernet/mellanox/mlx5/core/en/rep/bridge.c b/drivers/net/ethernet/mellanox/mlx5/core/en/rep/bridge.c
index 39ef2a2561a3..8099a21e674c 100644
--- a/drivers/net/ethernet/mellanox/mlx5/core/en/rep/bridge.c
+++ b/drivers/net/ethernet/mellanox/mlx5/core/en/rep/bridge.c
@@ -164,6 +164,36 @@ static int mlx5_esw_bridge_port_changeupper(struct notifier_block *nb, void *ptr
return err;
}
+static int
+mlx5_esw_bridge_changeupper_validate_netdev(void *ptr)
+{
+ struct net_device *dev = netdev_notifier_info_to_dev(ptr);
+ struct netdev_notifier_changeupper_info *info = ptr;
+ struct net_device *upper = info->upper_dev;
+ struct net_device *lower;
+ struct list_head *iter;
+
+ if (!netif_is_bridge_master(upper) || !netif_is_lag_master(dev))
+ return 0;
+
+ netdev_for_each_lower_dev(dev, lower, iter) {
+ struct mlx5_core_dev *mdev;
+ struct mlx5e_priv *priv;
+
+ if (!mlx5e_eswitch_rep(lower))
+ continue;
+
+ priv = netdev_priv(lower);
+ mdev = priv->mdev;
+ if (!mlx5_lag_is_active(mdev))
+ return -EAGAIN;
+ if (!mlx5_lag_is_shared_fdb(mdev))
+ return -EOPNOTSUPP;
+ }
+
+ return 0;
+}
+
static int mlx5_esw_bridge_switchdev_port_event(struct notifier_block *nb,
unsigned long event, void *ptr)
{
@@ -171,6 +201,7 @@ static int mlx5_esw_bridge_switchdev_port_event(struct notifier_block *nb,
switch (event) {
case NETDEV_PRECHANGEUPPER:
+ err = mlx5_esw_bridge_changeupper_validate_netdev(ptr);
break;
case NETDEV_CHANGEUPPER:
diff --git a/drivers/net/ethernet/mellanox/mlx5/core/en/tc/act/act.c b/drivers/net/ethernet/mellanox/mlx5/core/en/tc/act/act.c
index 305fde62a78d..3337241cfd84 100644
--- a/drivers/net/ethernet/mellanox/mlx5/core/en/tc/act/act.c
+++ b/drivers/net/ethernet/mellanox/mlx5/core/en/tc/act/act.c
@@ -6,70 +6,42 @@
#include "en/tc_priv.h"
#include "mlx5_core.h"
-/* Must be aligned with enum flow_action_id. */
static struct mlx5e_tc_act *tc_acts_fdb[NUM_FLOW_ACTIONS] = {
- &mlx5e_tc_act_accept,
- &mlx5e_tc_act_drop,
- &mlx5e_tc_act_trap,
- &mlx5e_tc_act_goto,
- &mlx5e_tc_act_mirred,
- &mlx5e_tc_act_mirred,
- &mlx5e_tc_act_redirect_ingress,
- NULL, /* FLOW_ACTION_MIRRED_INGRESS, */
- &mlx5e_tc_act_vlan,
- &mlx5e_tc_act_vlan,
- &mlx5e_tc_act_vlan_mangle,
- &mlx5e_tc_act_tun_encap,
- &mlx5e_tc_act_tun_decap,
- &mlx5e_tc_act_pedit,
- &mlx5e_tc_act_pedit,
- &mlx5e_tc_act_csum,
- NULL, /* FLOW_ACTION_MARK, */
- &mlx5e_tc_act_ptype,
- NULL, /* FLOW_ACTION_PRIORITY, */
- NULL, /* FLOW_ACTION_WAKE, */
- NULL, /* FLOW_ACTION_QUEUE, */
- &mlx5e_tc_act_sample,
- &mlx5e_tc_act_police,
- &mlx5e_tc_act_ct,
- NULL, /* FLOW_ACTION_CT_METADATA, */
- &mlx5e_tc_act_mpls_push,
- &mlx5e_tc_act_mpls_pop,
- NULL, /* FLOW_ACTION_MPLS_MANGLE, */
- NULL, /* FLOW_ACTION_GATE, */
- NULL, /* FLOW_ACTION_PPPOE_PUSH, */
- NULL, /* FLOW_ACTION_JUMP, */
- NULL, /* FLOW_ACTION_PIPE, */
- &mlx5e_tc_act_vlan,
- &mlx5e_tc_act_vlan,
+ [FLOW_ACTION_ACCEPT] = &mlx5e_tc_act_accept,
+ [FLOW_ACTION_DROP] = &mlx5e_tc_act_drop,
+ [FLOW_ACTION_TRAP] = &mlx5e_tc_act_trap,
+ [FLOW_ACTION_GOTO] = &mlx5e_tc_act_goto,
+ [FLOW_ACTION_REDIRECT] = &mlx5e_tc_act_mirred,
+ [FLOW_ACTION_MIRRED] = &mlx5e_tc_act_mirred,
+ [FLOW_ACTION_REDIRECT_INGRESS] = &mlx5e_tc_act_redirect_ingress,
+ [FLOW_ACTION_VLAN_PUSH] = &mlx5e_tc_act_vlan,
+ [FLOW_ACTION_VLAN_POP] = &mlx5e_tc_act_vlan,
+ [FLOW_ACTION_VLAN_MANGLE] = &mlx5e_tc_act_vlan_mangle,
+ [FLOW_ACTION_TUNNEL_ENCAP] = &mlx5e_tc_act_tun_encap,
+ [FLOW_ACTION_TUNNEL_DECAP] = &mlx5e_tc_act_tun_decap,
+ [FLOW_ACTION_MANGLE] = &mlx5e_tc_act_pedit,
+ [FLOW_ACTION_ADD] = &mlx5e_tc_act_pedit,
+ [FLOW_ACTION_CSUM] = &mlx5e_tc_act_csum,
+ [FLOW_ACTION_PTYPE] = &mlx5e_tc_act_ptype,
+ [FLOW_ACTION_SAMPLE] = &mlx5e_tc_act_sample,
+ [FLOW_ACTION_POLICE] = &mlx5e_tc_act_police,
+ [FLOW_ACTION_CT] = &mlx5e_tc_act_ct,
+ [FLOW_ACTION_MPLS_PUSH] = &mlx5e_tc_act_mpls_push,
+ [FLOW_ACTION_MPLS_POP] = &mlx5e_tc_act_mpls_pop,
+ [FLOW_ACTION_VLAN_PUSH_ETH] = &mlx5e_tc_act_vlan,
+ [FLOW_ACTION_VLAN_POP_ETH] = &mlx5e_tc_act_vlan,
};
-/* Must be aligned with enum flow_action_id. */
static struct mlx5e_tc_act *tc_acts_nic[NUM_FLOW_ACTIONS] = {
- &mlx5e_tc_act_accept,
- &mlx5e_tc_act_drop,
- NULL, /* FLOW_ACTION_TRAP, */
- &mlx5e_tc_act_goto,
- &mlx5e_tc_act_mirred_nic,
- NULL, /* FLOW_ACTION_MIRRED, */
- NULL, /* FLOW_ACTION_REDIRECT_INGRESS, */
- NULL, /* FLOW_ACTION_MIRRED_INGRESS, */
- NULL, /* FLOW_ACTION_VLAN_PUSH, */
- NULL, /* FLOW_ACTION_VLAN_POP, */
- NULL, /* FLOW_ACTION_VLAN_MANGLE, */
- NULL, /* FLOW_ACTION_TUNNEL_ENCAP, */
- NULL, /* FLOW_ACTION_TUNNEL_DECAP, */
- &mlx5e_tc_act_pedit,
- &mlx5e_tc_act_pedit,
- &mlx5e_tc_act_csum,
- &mlx5e_tc_act_mark,
- NULL, /* FLOW_ACTION_PTYPE, */
- NULL, /* FLOW_ACTION_PRIORITY, */
- NULL, /* FLOW_ACTION_WAKE, */
- NULL, /* FLOW_ACTION_QUEUE, */
- NULL, /* FLOW_ACTION_SAMPLE, */
- NULL, /* FLOW_ACTION_POLICE, */
- &mlx5e_tc_act_ct,
+ [FLOW_ACTION_ACCEPT] = &mlx5e_tc_act_accept,
+ [FLOW_ACTION_DROP] = &mlx5e_tc_act_drop,
+ [FLOW_ACTION_GOTO] = &mlx5e_tc_act_goto,
+ [FLOW_ACTION_REDIRECT] = &mlx5e_tc_act_mirred_nic,
+ [FLOW_ACTION_MANGLE] = &mlx5e_tc_act_pedit,
+ [FLOW_ACTION_ADD] = &mlx5e_tc_act_pedit,
+ [FLOW_ACTION_CSUM] = &mlx5e_tc_act_csum,
+ [FLOW_ACTION_MARK] = &mlx5e_tc_act_mark,
+ [FLOW_ACTION_CT] = &mlx5e_tc_act_ct,
};
/**
diff --git a/drivers/net/ethernet/mellanox/mlx5/core/en/txrx.h b/drivers/net/ethernet/mellanox/mlx5/core/en/txrx.h
index cb164b62f543..853f312cd757 100644
--- a/drivers/net/ethernet/mellanox/mlx5/core/en/txrx.h
+++ b/drivers/net/ethernet/mellanox/mlx5/core/en/txrx.h
@@ -11,6 +11,27 @@
#define INL_HDR_START_SZ (sizeof(((struct mlx5_wqe_eth_seg *)NULL)->inline_hdr.start))
+/* IPSEC inline data includes:
+ * 1. ESP trailer: up to 255 bytes of padding, 1 byte for pad length, 1 byte for
+ * next header.
+ * 2. ESP authentication data: 16 bytes for ICV.
+ */
+#define MLX5E_MAX_TX_IPSEC_DS DIV_ROUND_UP(sizeof(struct mlx5_wqe_inline_seg) + \
+ 255 + 1 + 1 + 16, MLX5_SEND_WQE_DS)
+
+/* 366 should be big enough to cover all L2, L3 and L4 headers with possible
+ * encapsulations.
+ */
+#define MLX5E_MAX_TX_INLINE_DS DIV_ROUND_UP(366 - INL_HDR_START_SZ + VLAN_HLEN, \
+ MLX5_SEND_WQE_DS)
+
+/* Sync the calculation with mlx5e_sq_calc_wqe_attr. */
+#define MLX5E_MAX_TX_WQEBBS DIV_ROUND_UP(MLX5E_TX_WQE_EMPTY_DS_COUNT + \
+ MLX5E_MAX_TX_INLINE_DS + \
+ MLX5E_MAX_TX_IPSEC_DS + \
+ MAX_SKB_FRAGS + 1, \
+ MLX5_SEND_WQEBB_NUM_DS)
+
#define MLX5E_RX_ERR_CQE(cqe) (get_cqe_opcode(cqe) != MLX5_CQE_RESP_SEND)
static inline
@@ -424,6 +445,8 @@ mlx5e_set_eseg_swp(struct sk_buff *skb, struct mlx5_wqe_eth_seg *eseg,
static inline u16 mlx5e_stop_room_for_wqe(struct mlx5_core_dev *mdev, u16 wqe_size)
{
+ WARN_ON_ONCE(PAGE_SIZE / MLX5_SEND_WQE_BB < mlx5e_get_max_sq_wqebbs(mdev));
+
/* A WQE must not cross the page boundary, hence two conditions:
* 1. Its size must not exceed the page size.
* 2. If the WQE size is X, and the space remaining in a page is less
@@ -436,7 +459,6 @@ static inline u16 mlx5e_stop_room_for_wqe(struct mlx5_core_dev *mdev, u16 wqe_si
"wqe_size %u is greater than max SQ WQEBBs %u",
wqe_size, mlx5e_get_max_sq_wqebbs(mdev));
-
return MLX5E_STOP_ROOM(wqe_size);
}
diff --git a/drivers/net/ethernet/mellanox/mlx5/core/en/xdp.c b/drivers/net/ethernet/mellanox/mlx5/core/en/xdp.c
index 4685c652c97e..20507ef2f956 100644
--- a/drivers/net/ethernet/mellanox/mlx5/core/en/xdp.c
+++ b/drivers/net/ethernet/mellanox/mlx5/core/en/xdp.c
@@ -117,7 +117,7 @@ mlx5e_xmit_xdp_buff(struct mlx5e_xdpsq *sq, struct mlx5e_rq *rq,
xdpi.page.rq = rq;
dma_addr = page_pool_get_dma_addr(page) + (xdpf->data - (void *)xdpf);
- dma_sync_single_for_device(sq->pdev, dma_addr, xdptxd.len, DMA_TO_DEVICE);
+ dma_sync_single_for_device(sq->pdev, dma_addr, xdptxd.len, DMA_BIDIRECTIONAL);
if (unlikely(xdp_frame_has_frags(xdpf))) {
sinfo = xdp_get_shared_info_from_frame(xdpf);
@@ -131,7 +131,7 @@ mlx5e_xmit_xdp_buff(struct mlx5e_xdpsq *sq, struct mlx5e_rq *rq,
skb_frag_off(frag);
len = skb_frag_size(frag);
dma_sync_single_for_device(sq->pdev, addr, len,
- DMA_TO_DEVICE);
+ DMA_BIDIRECTIONAL);
}
}
diff --git a/drivers/net/ethernet/mellanox/mlx5/core/en_main.c b/drivers/net/ethernet/mellanox/mlx5/core/en_main.c
index 364f04309149..e3a4f01bcceb 100644
--- a/drivers/net/ethernet/mellanox/mlx5/core/en_main.c
+++ b/drivers/net/ethernet/mellanox/mlx5/core/en_main.c
@@ -5694,6 +5694,13 @@ int mlx5e_attach_netdev(struct mlx5e_priv *priv)
mlx5e_fs_set_state_destroy(priv->fs,
!test_bit(MLX5E_STATE_DESTROYING, &priv->state));
+ /* Validate the max_wqe_size_sq capability. */
+ if (WARN_ON_ONCE(mlx5e_get_max_sq_wqebbs(priv->mdev) < MLX5E_MAX_TX_WQEBBS)) {
+ mlx5_core_warn(priv->mdev, "MLX5E: Max SQ WQEBBs firmware capability: %u, needed %lu\n",
+ mlx5e_get_max_sq_wqebbs(priv->mdev), MLX5E_MAX_TX_WQEBBS);
+ return -EIO;
+ }
+
/* max number of channels may have changed */
max_nch = mlx5e_calc_max_nch(priv->mdev, priv->netdev, profile);
if (priv->channels.params.num_channels > max_nch) {
diff --git a/drivers/net/ethernet/mellanox/mlx5/core/en_rx.c b/drivers/net/ethernet/mellanox/mlx5/core/en_rx.c
index 58084650151f..a61a43fc8d5c 100644
--- a/drivers/net/ethernet/mellanox/mlx5/core/en_rx.c
+++ b/drivers/net/ethernet/mellanox/mlx5/core/en_rx.c
@@ -266,7 +266,7 @@ static inline bool mlx5e_rx_cache_get(struct mlx5e_rq *rq, union mlx5e_alloc_uni
addr = page_pool_get_dma_addr(au->page);
/* Non-XSK always uses PAGE_SIZE. */
- dma_sync_single_for_device(rq->pdev, addr, PAGE_SIZE, DMA_FROM_DEVICE);
+ dma_sync_single_for_device(rq->pdev, addr, PAGE_SIZE, rq->buff.map_dir);
return true;
}
@@ -282,8 +282,7 @@ static inline int mlx5e_page_alloc_pool(struct mlx5e_rq *rq, union mlx5e_alloc_u
return -ENOMEM;
/* Non-XSK always uses PAGE_SIZE. */
- addr = dma_map_page_attrs(rq->pdev, au->page, 0, PAGE_SIZE,
- rq->buff.map_dir, DMA_ATTR_SKIP_CPU_SYNC);
+ addr = dma_map_page(rq->pdev, au->page, 0, PAGE_SIZE, rq->buff.map_dir);
if (unlikely(dma_mapping_error(rq->pdev, addr))) {
page_pool_recycle_direct(rq->page_pool, au->page);
au->page = NULL;
@@ -427,14 +426,15 @@ mlx5e_add_skb_frag(struct mlx5e_rq *rq, struct sk_buff *skb,
{
dma_addr_t addr = page_pool_get_dma_addr(au->page);
- dma_sync_single_for_cpu(rq->pdev, addr + frag_offset, len, DMA_FROM_DEVICE);
+ dma_sync_single_for_cpu(rq->pdev, addr + frag_offset, len,
+ rq->buff.map_dir);
page_ref_inc(au->page);
skb_add_rx_frag(skb, skb_shinfo(skb)->nr_frags,
au->page, frag_offset, len, truesize);
}
static inline void
-mlx5e_copy_skb_header(struct device *pdev, struct sk_buff *skb,
+mlx5e_copy_skb_header(struct mlx5e_rq *rq, struct sk_buff *skb,
struct page *page, dma_addr_t addr,
int offset_from, int dma_offset, u32 headlen)
{
@@ -442,7 +442,8 @@ mlx5e_copy_skb_header(struct device *pdev, struct sk_buff *skb,
/* Aligning len to sizeof(long) optimizes memcpy performance */
unsigned int len = ALIGN(headlen, sizeof(long));
- dma_sync_single_for_cpu(pdev, addr + dma_offset, len, DMA_FROM_DEVICE);
+ dma_sync_single_for_cpu(rq->pdev, addr + dma_offset, len,
+ rq->buff.map_dir);
skb_copy_to_linear_data(skb, from, len);
}
@@ -1538,7 +1539,7 @@ mlx5e_skb_from_cqe_linear(struct mlx5e_rq *rq, struct mlx5e_wqe_frag_info *wi,
addr = page_pool_get_dma_addr(au->page);
dma_sync_single_range_for_cpu(rq->pdev, addr, wi->offset,
- frag_size, DMA_FROM_DEVICE);
+ frag_size, rq->buff.map_dir);
net_prefetch(data);
prog = rcu_dereference(rq->xdp_prog);
@@ -1587,7 +1588,7 @@ mlx5e_skb_from_cqe_nonlinear(struct mlx5e_rq *rq, struct mlx5e_wqe_frag_info *wi
addr = page_pool_get_dma_addr(au->page);
dma_sync_single_range_for_cpu(rq->pdev, addr, wi->offset,
- rq->buff.frame0_sz, DMA_FROM_DEVICE);
+ rq->buff.frame0_sz, rq->buff.map_dir);
net_prefetchw(va); /* xdp_frame data area */
net_prefetch(va + rx_headroom);
@@ -1608,7 +1609,7 @@ mlx5e_skb_from_cqe_nonlinear(struct mlx5e_rq *rq, struct mlx5e_wqe_frag_info *wi
addr = page_pool_get_dma_addr(au->page);
dma_sync_single_for_cpu(rq->pdev, addr + wi->offset,
- frag_consumed_bytes, DMA_FROM_DEVICE);
+ frag_consumed_bytes, rq->buff.map_dir);
if (!xdp_buff_has_frags(&xdp)) {
/* Init on the first fragment to avoid cold cache access
@@ -1905,7 +1906,7 @@ mlx5e_skb_from_cqe_mpwrq_nonlinear(struct mlx5e_rq *rq, struct mlx5e_mpw_info *w
mlx5e_fill_skb_data(skb, rq, au, byte_cnt, frag_offset);
/* copy header */
addr = page_pool_get_dma_addr(head_au->page);
- mlx5e_copy_skb_header(rq->pdev, skb, head_au->page, addr,
+ mlx5e_copy_skb_header(rq, skb, head_au->page, addr,
head_offset, head_offset, headlen);
/* skb linear part was allocated with headlen and aligned to long */
skb->tail += headlen;
@@ -1939,7 +1940,7 @@ mlx5e_skb_from_cqe_mpwrq_linear(struct mlx5e_rq *rq, struct mlx5e_mpw_info *wi,
addr = page_pool_get_dma_addr(au->page);
dma_sync_single_range_for_cpu(rq->pdev, addr, head_offset,
- frag_size, DMA_FROM_DEVICE);
+ frag_size, rq->buff.map_dir);
net_prefetch(data);
prog = rcu_dereference(rq->xdp_prog);
@@ -1987,7 +1988,7 @@ mlx5e_skb_from_cqe_shampo(struct mlx5e_rq *rq, struct mlx5e_mpw_info *wi,
if (likely(frag_size <= BIT(MLX5E_SHAMPO_LOG_MAX_HEADER_ENTRY_SIZE))) {
/* build SKB around header */
- dma_sync_single_range_for_cpu(rq->pdev, head->addr, 0, frag_size, DMA_FROM_DEVICE);
+ dma_sync_single_range_for_cpu(rq->pdev, head->addr, 0, frag_size, rq->buff.map_dir);
prefetchw(hdr);
prefetch(data);
skb = mlx5e_build_linear_skb(rq, hdr, frag_size, rx_headroom, head_size, 0);
@@ -2009,7 +2010,7 @@ mlx5e_skb_from_cqe_shampo(struct mlx5e_rq *rq, struct mlx5e_mpw_info *wi,
}
prefetchw(skb->data);
- mlx5e_copy_skb_header(rq->pdev, skb, head->page, head->addr,
+ mlx5e_copy_skb_header(rq, skb, head->page, head->addr,
head_offset + rx_headroom,
rx_headroom, head_size);
/* skb linear part was allocated with headlen and aligned to long */
diff --git a/drivers/net/ethernet/mellanox/mlx5/core/en_tc.c b/drivers/net/ethernet/mellanox/mlx5/core/en_tc.c
index dd6fea9e9a5b..5a6aa61ec82a 100644
--- a/drivers/net/ethernet/mellanox/mlx5/core/en_tc.c
+++ b/drivers/net/ethernet/mellanox/mlx5/core/en_tc.c
@@ -3633,10 +3633,14 @@ mlx5e_clone_flow_attr_for_post_act(struct mlx5_flow_attr *attr,
attr2->action = 0;
attr2->flags = 0;
attr2->parse_attr = parse_attr;
- attr2->esw_attr->out_count = 0;
- attr2->esw_attr->split_count = 0;
attr2->dest_chain = 0;
attr2->dest_ft = NULL;
+
+ if (ns_type == MLX5_FLOW_NAMESPACE_FDB) {
+ attr2->esw_attr->out_count = 0;
+ attr2->esw_attr->split_count = 0;
+ }
+
return attr2;
}
@@ -4758,12 +4762,6 @@ int mlx5e_policer_validate(const struct flow_action *action,
return -EOPNOTSUPP;
}
- if (act->police.rate_pkt_ps) {
- NL_SET_ERR_MSG_MOD(extack,
- "QoS offload not support packets per second");
- return -EOPNOTSUPP;
- }
-
return 0;
}
diff --git a/drivers/net/ethernet/mellanox/mlx5/core/en_tx.c b/drivers/net/ethernet/mellanox/mlx5/core/en_tx.c
index 6adca01fbdc9..f7897ddb29c5 100644
--- a/drivers/net/ethernet/mellanox/mlx5/core/en_tx.c
+++ b/drivers/net/ethernet/mellanox/mlx5/core/en_tx.c
@@ -305,6 +305,8 @@ static void mlx5e_sq_calc_wqe_attr(struct sk_buff *skb, const struct mlx5e_tx_at
u16 ds_cnt_inl = 0;
u16 ds_cnt_ids = 0;
+ /* Sync the calculation with MLX5E_MAX_TX_WQEBBS. */
+
if (attr->insz)
ds_cnt_ids = DIV_ROUND_UP(sizeof(struct mlx5_wqe_inline_seg) + attr->insz,
MLX5_SEND_WQE_DS);
@@ -317,6 +319,9 @@ static void mlx5e_sq_calc_wqe_attr(struct sk_buff *skb, const struct mlx5e_tx_at
inl += VLAN_HLEN;
ds_cnt_inl = DIV_ROUND_UP(inl, MLX5_SEND_WQE_DS);
+ if (WARN_ON_ONCE(ds_cnt_inl > MLX5E_MAX_TX_INLINE_DS))
+ netdev_warn(skb->dev, "ds_cnt_inl = %u > max %u\n", ds_cnt_inl,
+ (u16)MLX5E_MAX_TX_INLINE_DS);
ds_cnt += ds_cnt_inl;
}
diff --git a/drivers/net/ethernet/mellanox/mlx5/core/eswitch.c b/drivers/net/ethernet/mellanox/mlx5/core/eswitch.c
index c59107fa9e6d..2169486c4bfb 100644
--- a/drivers/net/ethernet/mellanox/mlx5/core/eswitch.c
+++ b/drivers/net/ethernet/mellanox/mlx5/core/eswitch.c
@@ -1387,12 +1387,14 @@ void mlx5_eswitch_disable_locked(struct mlx5_eswitch *esw)
esw->mode == MLX5_ESWITCH_LEGACY ? "LEGACY" : "OFFLOADS",
esw->esw_funcs.num_vfs, esw->enabled_vports);
- esw->fdb_table.flags &= ~MLX5_ESW_FDB_CREATED;
- if (esw->mode == MLX5_ESWITCH_OFFLOADS)
- esw_offloads_disable(esw);
- else if (esw->mode == MLX5_ESWITCH_LEGACY)
- esw_legacy_disable(esw);
- mlx5_esw_acls_ns_cleanup(esw);
+ if (esw->fdb_table.flags & MLX5_ESW_FDB_CREATED) {
+ esw->fdb_table.flags &= ~MLX5_ESW_FDB_CREATED;
+ if (esw->mode == MLX5_ESWITCH_OFFLOADS)
+ esw_offloads_disable(esw);
+ else if (esw->mode == MLX5_ESWITCH_LEGACY)
+ esw_legacy_disable(esw);
+ mlx5_esw_acls_ns_cleanup(esw);
+ }
if (esw->mode == MLX5_ESWITCH_OFFLOADS)
devl_rate_nodes_destroy(devlink);
diff --git a/drivers/net/ethernet/mellanox/mlx5/core/eswitch_offloads.c b/drivers/net/ethernet/mellanox/mlx5/core/eswitch_offloads.c
index 4e50df3139c6..728ca9f2bb9d 100644
--- a/drivers/net/ethernet/mellanox/mlx5/core/eswitch_offloads.c
+++ b/drivers/net/ethernet/mellanox/mlx5/core/eswitch_offloads.c
@@ -2310,7 +2310,7 @@ out_free:
static int esw_offloads_start(struct mlx5_eswitch *esw,
struct netlink_ext_ack *extack)
{
- int err, err1;
+ int err;
esw->mode = MLX5_ESWITCH_OFFLOADS;
err = mlx5_eswitch_enable_locked(esw, esw->dev->priv.sriov.num_vfs);
@@ -2318,11 +2318,6 @@ static int esw_offloads_start(struct mlx5_eswitch *esw,
NL_SET_ERR_MSG_MOD(extack,
"Failed setting eswitch to offloads");
esw->mode = MLX5_ESWITCH_LEGACY;
- err1 = mlx5_eswitch_enable_locked(esw, MLX5_ESWITCH_IGNORE_NUM_VFS);
- if (err1) {
- NL_SET_ERR_MSG_MOD(extack,
- "Failed setting eswitch back to legacy");
- }
mlx5_rescan_drivers(esw->dev);
}
if (esw->offloads.inline_mode == MLX5_INLINE_MODE_NONE) {
@@ -3389,19 +3384,12 @@ err_metadata:
static int esw_offloads_stop(struct mlx5_eswitch *esw,
struct netlink_ext_ack *extack)
{
- int err, err1;
+ int err;
esw->mode = MLX5_ESWITCH_LEGACY;
err = mlx5_eswitch_enable_locked(esw, MLX5_ESWITCH_IGNORE_NUM_VFS);
- if (err) {
+ if (err)
NL_SET_ERR_MSG_MOD(extack, "Failed setting eswitch to legacy");
- esw->mode = MLX5_ESWITCH_OFFLOADS;
- err1 = mlx5_eswitch_enable_locked(esw, MLX5_ESWITCH_IGNORE_NUM_VFS);
- if (err1) {
- NL_SET_ERR_MSG_MOD(extack,
- "Failed setting eswitch back to offloads");
- }
- }
return err;
}
diff --git a/drivers/net/ethernet/mellanox/mlx5/core/eswitch_offloads_termtbl.c b/drivers/net/ethernet/mellanox/mlx5/core/eswitch_offloads_termtbl.c
index ee568bf34ae2..108a3503f413 100644
--- a/drivers/net/ethernet/mellanox/mlx5/core/eswitch_offloads_termtbl.c
+++ b/drivers/net/ethernet/mellanox/mlx5/core/eswitch_offloads_termtbl.c
@@ -30,9 +30,9 @@ mlx5_eswitch_termtbl_hash(struct mlx5_flow_act *flow_act,
sizeof(dest->vport.num), hash);
hash = jhash((const void *)&dest->vport.vhca_id,
sizeof(dest->vport.num), hash);
- if (dest->vport.pkt_reformat)
- hash = jhash(dest->vport.pkt_reformat,
- sizeof(*dest->vport.pkt_reformat),
+ if (flow_act->pkt_reformat)
+ hash = jhash(flow_act->pkt_reformat,
+ sizeof(*flow_act->pkt_reformat),
hash);
return hash;
}
@@ -53,9 +53,11 @@ mlx5_eswitch_termtbl_cmp(struct mlx5_flow_act *flow_act1,
if (ret)
return ret;
- return dest1->vport.pkt_reformat && dest2->vport.pkt_reformat ?
- memcmp(dest1->vport.pkt_reformat, dest2->vport.pkt_reformat,
- sizeof(*dest1->vport.pkt_reformat)) : 0;
+ if (flow_act1->pkt_reformat && flow_act2->pkt_reformat)
+ return memcmp(flow_act1->pkt_reformat, flow_act2->pkt_reformat,
+ sizeof(*flow_act1->pkt_reformat));
+
+ return !(flow_act1->pkt_reformat == flow_act2->pkt_reformat);
}
static int
diff --git a/drivers/net/ethernet/mellanox/mlx5/core/fw_reset.c b/drivers/net/ethernet/mellanox/mlx5/core/fw_reset.c
index 07c583996c29..9d908a0ccfef 100644
--- a/drivers/net/ethernet/mellanox/mlx5/core/fw_reset.c
+++ b/drivers/net/ethernet/mellanox/mlx5/core/fw_reset.c
@@ -152,7 +152,8 @@ static void mlx5_fw_reset_complete_reload(struct mlx5_core_dev *dev)
mlx5_unload_one(dev);
if (mlx5_health_wait_pci_up(dev))
mlx5_core_err(dev, "reset reload flow aborted, PCI reads still not working\n");
- mlx5_load_one(dev, false);
+ else
+ mlx5_load_one(dev, false);
devlink_remote_reload_actions_performed(priv_to_devlink(dev), 0,
BIT(DEVLINK_RELOAD_ACTION_DRIVER_REINIT) |
BIT(DEVLINK_RELOAD_ACTION_FW_ACTIVATE));
diff --git a/drivers/net/ethernet/neterion/s2io.c b/drivers/net/ethernet/neterion/s2io.c
index dcf8212119f9..1d3c4474b7cb 100644
--- a/drivers/net/ethernet/neterion/s2io.c
+++ b/drivers/net/ethernet/neterion/s2io.c
@@ -7128,9 +7128,8 @@ static int s2io_card_up(struct s2io_nic *sp)
if (ret) {
DBG_PRINT(ERR_DBG, "%s: Out of memory in Open\n",
dev->name);
- s2io_reset(sp);
- free_rx_buffers(sp);
- return -ENOMEM;
+ ret = -ENOMEM;
+ goto err_fill_buff;
}
DBG_PRINT(INFO_DBG, "Buf in ring:%d is %d:\n", i,
ring->rx_bufs_left);
@@ -7168,18 +7167,16 @@ static int s2io_card_up(struct s2io_nic *sp)
/* Enable Rx Traffic and interrupts on the NIC */
if (start_nic(sp)) {
DBG_PRINT(ERR_DBG, "%s: Starting NIC failed\n", dev->name);
- s2io_reset(sp);
- free_rx_buffers(sp);
- return -ENODEV;
+ ret = -ENODEV;
+ goto err_out;
}
/* Add interrupt service routine */
if (s2io_add_isr(sp) != 0) {
if (sp->config.intr_type == MSI_X)
s2io_rem_isr(sp);
- s2io_reset(sp);
- free_rx_buffers(sp);
- return -ENODEV;
+ ret = -ENODEV;
+ goto err_out;
}
timer_setup(&sp->alarm_timer, s2io_alarm_handle, 0);
@@ -7199,6 +7196,20 @@ static int s2io_card_up(struct s2io_nic *sp)
}
return 0;
+
+err_out:
+ if (config->napi) {
+ if (config->intr_type == MSI_X) {
+ for (i = 0; i < sp->config.rx_ring_num; i++)
+ napi_disable(&sp->mac_control.rings[i].napi);
+ } else {
+ napi_disable(&sp->napi);
+ }
+ }
+err_fill_buff:
+ s2io_reset(sp);
+ free_rx_buffers(sp);
+ return ret;
}
/**
diff --git a/drivers/net/ethernet/ni/nixge.c b/drivers/net/ethernet/ni/nixge.c
index 3db4a2431741..19d043b593cc 100644
--- a/drivers/net/ethernet/ni/nixge.c
+++ b/drivers/net/ethernet/ni/nixge.c
@@ -900,6 +900,7 @@ static int nixge_open(struct net_device *ndev)
err_rx_irq:
free_irq(priv->tx_irq, ndev);
err_tx_irq:
+ napi_disable(&priv->napi);
phy_stop(phy);
phy_disconnect(phy);
tasklet_kill(&priv->dma_err_tasklet);
diff --git a/drivers/net/ethernet/stmicro/stmmac/dwmac-intel.c b/drivers/net/ethernet/stmicro/stmmac/dwmac-intel.c
index 0a2afc1a3124..7deb1f817dac 100644
--- a/drivers/net/ethernet/stmicro/stmmac/dwmac-intel.c
+++ b/drivers/net/ethernet/stmicro/stmmac/dwmac-intel.c
@@ -629,7 +629,6 @@ static int ehl_common_data(struct pci_dev *pdev,
{
plat->rx_queues_to_use = 8;
plat->tx_queues_to_use = 8;
- plat->clk_ptp_rate = 200000000;
plat->use_phy_wol = 1;
plat->safety_feat_cfg->tsoee = 1;
@@ -654,6 +653,8 @@ static int ehl_sgmii_data(struct pci_dev *pdev,
plat->serdes_powerup = intel_serdes_powerup;
plat->serdes_powerdown = intel_serdes_powerdown;
+ plat->clk_ptp_rate = 204800000;
+
return ehl_common_data(pdev, plat);
}
@@ -667,6 +668,8 @@ static int ehl_rgmii_data(struct pci_dev *pdev,
plat->bus_id = 1;
plat->phy_interface = PHY_INTERFACE_MODE_RGMII;
+ plat->clk_ptp_rate = 204800000;
+
return ehl_common_data(pdev, plat);
}
@@ -683,6 +686,8 @@ static int ehl_pse0_common_data(struct pci_dev *pdev,
plat->bus_id = 2;
plat->addr64 = 32;
+ plat->clk_ptp_rate = 200000000;
+
intel_mgbe_pse_crossts_adj(intel_priv, EHL_PSE_ART_MHZ);
return ehl_common_data(pdev, plat);
@@ -722,6 +727,8 @@ static int ehl_pse1_common_data(struct pci_dev *pdev,
plat->bus_id = 3;
plat->addr64 = 32;
+ plat->clk_ptp_rate = 200000000;
+
intel_mgbe_pse_crossts_adj(intel_priv, EHL_PSE_ART_MHZ);
return ehl_common_data(pdev, plat);
@@ -757,7 +764,7 @@ static int tgl_common_data(struct pci_dev *pdev,
{
plat->rx_queues_to_use = 6;
plat->tx_queues_to_use = 4;
- plat->clk_ptp_rate = 200000000;
+ plat->clk_ptp_rate = 204800000;
plat->speed_mode_2500 = intel_speed_mode_2500;
plat->safety_feat_cfg->tsoee = 1;
diff --git a/drivers/net/ethernet/stmicro/stmmac/dwmac-loongson.c b/drivers/net/ethernet/stmicro/stmmac/dwmac-loongson.c
index 79fa7870563b..a25c187d3185 100644
--- a/drivers/net/ethernet/stmicro/stmmac/dwmac-loongson.c
+++ b/drivers/net/ethernet/stmicro/stmmac/dwmac-loongson.c
@@ -75,20 +75,24 @@ static int loongson_dwmac_probe(struct pci_dev *pdev, const struct pci_device_id
plat->mdio_bus_data = devm_kzalloc(&pdev->dev,
sizeof(*plat->mdio_bus_data),
GFP_KERNEL);
- if (!plat->mdio_bus_data)
- return -ENOMEM;
+ if (!plat->mdio_bus_data) {
+ ret = -ENOMEM;
+ goto err_put_node;
+ }
plat->mdio_bus_data->needs_reset = true;
}
plat->dma_cfg = devm_kzalloc(&pdev->dev, sizeof(*plat->dma_cfg), GFP_KERNEL);
- if (!plat->dma_cfg)
- return -ENOMEM;
+ if (!plat->dma_cfg) {
+ ret = -ENOMEM;
+ goto err_put_node;
+ }
/* Enable pci device */
ret = pci_enable_device(pdev);
if (ret) {
dev_err(&pdev->dev, "%s: ERROR: failed to enable device\n", __func__);
- return ret;
+ goto err_put_node;
}
/* Get the base address of device */
@@ -97,7 +101,7 @@ static int loongson_dwmac_probe(struct pci_dev *pdev, const struct pci_device_id
continue;
ret = pcim_iomap_regions(pdev, BIT(0), pci_name(pdev));
if (ret)
- return ret;
+ goto err_disable_device;
break;
}
@@ -108,7 +112,8 @@ static int loongson_dwmac_probe(struct pci_dev *pdev, const struct pci_device_id
phy_mode = device_get_phy_mode(&pdev->dev);
if (phy_mode < 0) {
dev_err(&pdev->dev, "phy_mode not found\n");
- return phy_mode;
+ ret = phy_mode;
+ goto err_disable_device;
}
plat->phy_interface = phy_mode;
@@ -125,6 +130,7 @@ static int loongson_dwmac_probe(struct pci_dev *pdev, const struct pci_device_id
if (res.irq < 0) {
dev_err(&pdev->dev, "IRQ macirq not found\n");
ret = -ENODEV;
+ goto err_disable_msi;
}
res.wol_irq = of_irq_get_byname(np, "eth_wake_irq");
@@ -137,15 +143,31 @@ static int loongson_dwmac_probe(struct pci_dev *pdev, const struct pci_device_id
if (res.lpi_irq < 0) {
dev_err(&pdev->dev, "IRQ eth_lpi not found\n");
ret = -ENODEV;
+ goto err_disable_msi;
}
- return stmmac_dvr_probe(&pdev->dev, plat, &res);
+ ret = stmmac_dvr_probe(&pdev->dev, plat, &res);
+ if (ret)
+ goto err_disable_msi;
+
+ return ret;
+
+err_disable_msi:
+ pci_disable_msi(pdev);
+err_disable_device:
+ pci_disable_device(pdev);
+err_put_node:
+ of_node_put(plat->mdio_node);
+ return ret;
}
static void loongson_dwmac_remove(struct pci_dev *pdev)
{
+ struct net_device *ndev = dev_get_drvdata(&pdev->dev);
+ struct stmmac_priv *priv = netdev_priv(ndev);
int i;
+ of_node_put(priv->plat->mdio_node);
stmmac_dvr_remove(&pdev->dev);
for (i = 0; i < PCI_STD_NUM_BARS; i++) {
@@ -155,6 +177,7 @@ static void loongson_dwmac_remove(struct pci_dev *pdev)
break;
}
+ pci_disable_msi(pdev);
pci_disable_device(pdev);
}
diff --git a/drivers/net/ethernet/stmicro/stmmac/dwmac-meson8b.c b/drivers/net/ethernet/stmicro/stmmac/dwmac-meson8b.c
index c7a6588d9398..e8b507f88fbc 100644
--- a/drivers/net/ethernet/stmicro/stmmac/dwmac-meson8b.c
+++ b/drivers/net/ethernet/stmicro/stmmac/dwmac-meson8b.c
@@ -272,11 +272,9 @@ static int meson8b_devm_clk_prepare_enable(struct meson8b_dwmac *dwmac,
if (ret)
return ret;
- devm_add_action_or_reset(dwmac->dev,
- (void(*)(void *))clk_disable_unprepare,
- dwmac->rgmii_tx_clk);
-
- return 0;
+ return devm_add_action_or_reset(dwmac->dev,
+ (void(*)(void *))clk_disable_unprepare,
+ clk);
}
static int meson8b_init_rgmii_delays(struct meson8b_dwmac *dwmac)
diff --git a/drivers/net/ethernet/sunplus/spl2sw_driver.c b/drivers/net/ethernet/sunplus/spl2sw_driver.c
index 9be585237277..c499a14314f1 100644
--- a/drivers/net/ethernet/sunplus/spl2sw_driver.c
+++ b/drivers/net/ethernet/sunplus/spl2sw_driver.c
@@ -287,7 +287,6 @@ static u32 spl2sw_init_netdev(struct platform_device *pdev, u8 *mac_addr,
if (ret) {
dev_err(&pdev->dev, "Failed to register net device \"%s\"!\n",
ndev->name);
- free_netdev(ndev);
*r_ndev = NULL;
return ret;
}
diff --git a/drivers/net/ethernet/ti/am65-cpsw-nuss.c b/drivers/net/ethernet/ti/am65-cpsw-nuss.c
index 7f86068f3ff6..c50b137f92d7 100644
--- a/drivers/net/ethernet/ti/am65-cpsw-nuss.c
+++ b/drivers/net/ethernet/ti/am65-cpsw-nuss.c
@@ -2823,7 +2823,6 @@ static int am65_cpsw_nuss_remove(struct platform_device *pdev)
if (ret < 0)
return ret;
- am65_cpsw_nuss_phylink_cleanup(common);
am65_cpsw_unregister_devlink(common);
am65_cpsw_unregister_notifiers(common);
@@ -2831,6 +2830,7 @@ static int am65_cpsw_nuss_remove(struct platform_device *pdev)
* dma_deconfigure(dev) before devres_release_all(dev)
*/
am65_cpsw_nuss_cleanup_ndev(common);
+ am65_cpsw_nuss_phylink_cleanup(common);
of_platform_device_destroy(common->mdio_dev, NULL);
diff --git a/drivers/net/ethernet/ti/cpsw.c b/drivers/net/ethernet/ti/cpsw.c
index 709ca6dd6ecb..13c9c2d6b79b 100644
--- a/drivers/net/ethernet/ti/cpsw.c
+++ b/drivers/net/ethernet/ti/cpsw.c
@@ -854,6 +854,8 @@ static int cpsw_ndo_open(struct net_device *ndev)
err_cleanup:
if (!cpsw->usage_count) {
+ napi_disable(&cpsw->napi_rx);
+ napi_disable(&cpsw->napi_tx);
cpdma_ctlr_stop(cpsw->dma);
cpsw_destroy_xdp_rxqs(cpsw);
}
diff --git a/drivers/net/ethernet/tundra/tsi108_eth.c b/drivers/net/ethernet/tundra/tsi108_eth.c
index 2cd2afc3fff0..d09d352e1c0a 100644
--- a/drivers/net/ethernet/tundra/tsi108_eth.c
+++ b/drivers/net/ethernet/tundra/tsi108_eth.c
@@ -1290,12 +1290,15 @@ static int tsi108_open(struct net_device *dev)
data->rxring = dma_alloc_coherent(&data->pdev->dev, rxring_size,
&data->rxdma, GFP_KERNEL);
- if (!data->rxring)
+ if (!data->rxring) {
+ free_irq(data->irq_num, dev);
return -ENOMEM;
+ }
data->txring = dma_alloc_coherent(&data->pdev->dev, txring_size,
&data->txdma, GFP_KERNEL);
if (!data->txring) {
+ free_irq(data->irq_num, dev);
dma_free_coherent(&data->pdev->dev, rxring_size, data->rxring,
data->rxdma);
return -ENOMEM;
diff --git a/drivers/net/hamradio/bpqether.c b/drivers/net/hamradio/bpqether.c
index 30af0081e2be..83a16d10eedb 100644
--- a/drivers/net/hamradio/bpqether.c
+++ b/drivers/net/hamradio/bpqether.c
@@ -533,7 +533,7 @@ static int bpq_device_event(struct notifier_block *this,
if (!net_eq(dev_net(dev), &init_net))
return NOTIFY_DONE;
- if (!dev_is_ethdev(dev))
+ if (!dev_is_ethdev(dev) && !bpq_get_ax25_dev(dev))
return NOTIFY_DONE;
switch (event) {
diff --git a/drivers/net/macsec.c b/drivers/net/macsec.c
index c891b60937a7..85376d2f24ca 100644
--- a/drivers/net/macsec.c
+++ b/drivers/net/macsec.c
@@ -1413,7 +1413,8 @@ static struct macsec_rx_sc *del_rx_sc(struct macsec_secy *secy, sci_t sci)
return NULL;
}
-static struct macsec_rx_sc *create_rx_sc(struct net_device *dev, sci_t sci)
+static struct macsec_rx_sc *create_rx_sc(struct net_device *dev, sci_t sci,
+ bool active)
{
struct macsec_rx_sc *rx_sc;
struct macsec_dev *macsec;
@@ -1437,7 +1438,7 @@ static struct macsec_rx_sc *create_rx_sc(struct net_device *dev, sci_t sci)
}
rx_sc->sci = sci;
- rx_sc->active = true;
+ rx_sc->active = active;
refcount_set(&rx_sc->refcnt, 1);
secy = &macsec_priv(dev)->secy;
@@ -1838,6 +1839,7 @@ static int macsec_add_rxsa(struct sk_buff *skb, struct genl_info *info)
secy->key_len);
err = macsec_offload(ops->mdo_add_rxsa, &ctx);
+ memzero_explicit(ctx.sa.key, secy->key_len);
if (err)
goto cleanup;
}
@@ -1876,7 +1878,7 @@ static int macsec_add_rxsc(struct sk_buff *skb, struct genl_info *info)
struct macsec_rx_sc *rx_sc;
struct nlattr *tb_rxsc[MACSEC_RXSC_ATTR_MAX + 1];
struct macsec_secy *secy;
- bool was_active;
+ bool active = true;
int ret;
if (!attrs[MACSEC_ATTR_IFINDEX])
@@ -1898,16 +1900,15 @@ static int macsec_add_rxsc(struct sk_buff *skb, struct genl_info *info)
secy = &macsec_priv(dev)->secy;
sci = nla_get_sci(tb_rxsc[MACSEC_RXSC_ATTR_SCI]);
- rx_sc = create_rx_sc(dev, sci);
+ if (tb_rxsc[MACSEC_RXSC_ATTR_ACTIVE])
+ active = nla_get_u8(tb_rxsc[MACSEC_RXSC_ATTR_ACTIVE]);
+
+ rx_sc = create_rx_sc(dev, sci, active);
if (IS_ERR(rx_sc)) {
rtnl_unlock();
return PTR_ERR(rx_sc);
}
- was_active = rx_sc->active;
- if (tb_rxsc[MACSEC_RXSC_ATTR_ACTIVE])
- rx_sc->active = !!nla_get_u8(tb_rxsc[MACSEC_RXSC_ATTR_ACTIVE]);
-
if (macsec_is_offloaded(netdev_priv(dev))) {
const struct macsec_ops *ops;
struct macsec_context ctx;
@@ -1931,7 +1932,8 @@ static int macsec_add_rxsc(struct sk_buff *skb, struct genl_info *info)
return 0;
cleanup:
- rx_sc->active = was_active;
+ del_rx_sc(secy, sci);
+ free_rx_sc(rx_sc);
rtnl_unlock();
return ret;
}
@@ -2080,6 +2082,7 @@ static int macsec_add_txsa(struct sk_buff *skb, struct genl_info *info)
secy->key_len);
err = macsec_offload(ops->mdo_add_txsa, &ctx);
+ memzero_explicit(ctx.sa.key, secy->key_len);
if (err)
goto cleanup;
}
@@ -2570,7 +2573,7 @@ static bool macsec_is_configured(struct macsec_dev *macsec)
struct macsec_tx_sc *tx_sc = &secy->tx_sc;
int i;
- if (secy->n_rx_sc > 0)
+ if (secy->rx_sc)
return true;
for (i = 0; i < MACSEC_NUM_AN; i++)
@@ -2654,11 +2657,6 @@ static int macsec_upd_offload(struct sk_buff *skb, struct genl_info *info)
if (ret)
goto rollback;
- /* Force features update, since they are different for SW MACSec and
- * HW offloading cases.
- */
- netdev_update_features(dev);
-
rtnl_unlock();
return 0;
@@ -3432,16 +3430,9 @@ static netdev_tx_t macsec_start_xmit(struct sk_buff *skb,
return ret;
}
-#define SW_MACSEC_FEATURES \
+#define MACSEC_FEATURES \
(NETIF_F_SG | NETIF_F_HIGHDMA | NETIF_F_FRAGLIST)
-/* If h/w offloading is enabled, use real device features save for
- * VLAN_FEATURES - they require additional ops
- * HW_MACSEC - no reason to report it
- */
-#define REAL_DEV_FEATURES(dev) \
- ((dev)->features & ~(NETIF_F_VLAN_FEATURES | NETIF_F_HW_MACSEC))
-
static int macsec_dev_init(struct net_device *dev)
{
struct macsec_dev *macsec = macsec_priv(dev);
@@ -3458,12 +3449,8 @@ static int macsec_dev_init(struct net_device *dev)
return err;
}
- if (macsec_is_offloaded(macsec)) {
- dev->features = REAL_DEV_FEATURES(real_dev);
- } else {
- dev->features = real_dev->features & SW_MACSEC_FEATURES;
- dev->features |= NETIF_F_LLTX | NETIF_F_GSO_SOFTWARE;
- }
+ dev->features = real_dev->features & MACSEC_FEATURES;
+ dev->features |= NETIF_F_LLTX | NETIF_F_GSO_SOFTWARE;
dev->needed_headroom = real_dev->needed_headroom +
MACSEC_NEEDED_HEADROOM;
@@ -3495,10 +3482,7 @@ static netdev_features_t macsec_fix_features(struct net_device *dev,
struct macsec_dev *macsec = macsec_priv(dev);
struct net_device *real_dev = macsec->real_dev;
- if (macsec_is_offloaded(macsec))
- return REAL_DEV_FEATURES(real_dev);
-
- features &= (real_dev->features & SW_MACSEC_FEATURES) |
+ features &= (real_dev->features & MACSEC_FEATURES) |
NETIF_F_GSO_SOFTWARE | NETIF_F_SOFT_FEATURES;
features |= NETIF_F_LLTX;
diff --git a/drivers/net/macvlan.c b/drivers/net/macvlan.c
index c5cfe8555199..578897aaada0 100644
--- a/drivers/net/macvlan.c
+++ b/drivers/net/macvlan.c
@@ -1533,8 +1533,10 @@ destroy_macvlan_port:
/* the macvlan port may be freed by macvlan_uninit when fail to register.
* so we destroy the macvlan port only when it's valid.
*/
- if (create && macvlan_port_get_rtnl(lowerdev))
+ if (create && macvlan_port_get_rtnl(lowerdev)) {
+ macvlan_flush_sources(port, vlan);
macvlan_port_destroy(port->dev);
+ }
return err;
}
EXPORT_SYMBOL_GPL(macvlan_common_newlink);
diff --git a/drivers/net/phy/mscc/mscc_macsec.c b/drivers/net/phy/mscc/mscc_macsec.c
index ee5b17edca39..f81b077618f4 100644
--- a/drivers/net/phy/mscc/mscc_macsec.c
+++ b/drivers/net/phy/mscc/mscc_macsec.c
@@ -632,6 +632,7 @@ static void vsc8584_macsec_free_flow(struct vsc8531_private *priv,
list_del(&flow->list);
clear_bit(flow->index, bitmap);
+ memzero_explicit(flow->key, sizeof(flow->key));
kfree(flow);
}
diff --git a/drivers/net/tun.c b/drivers/net/tun.c
index 946628050f28..7a3ab3427369 100644
--- a/drivers/net/tun.c
+++ b/drivers/net/tun.c
@@ -1967,17 +1967,25 @@ drop:
skb_headlen(skb));
if (unlikely(headlen > skb_headlen(skb))) {
+ WARN_ON_ONCE(1);
+ err = -ENOMEM;
dev_core_stats_rx_dropped_inc(tun->dev);
+napi_busy:
napi_free_frags(&tfile->napi);
rcu_read_unlock();
mutex_unlock(&tfile->napi_mutex);
- WARN_ON(1);
- return -ENOMEM;
+ return err;
}
- local_bh_disable();
- napi_gro_frags(&tfile->napi);
- local_bh_enable();
+ if (likely(napi_schedule_prep(&tfile->napi))) {
+ local_bh_disable();
+ napi_gro_frags(&tfile->napi);
+ napi_complete(&tfile->napi);
+ local_bh_enable();
+ } else {
+ err = -EBUSY;
+ goto napi_busy;
+ }
mutex_unlock(&tfile->napi_mutex);
} else if (tfile->napi_enabled) {
struct sk_buff_head *queue = &tfile->sk.sk_write_queue;
diff --git a/drivers/net/wan/lapbether.c b/drivers/net/wan/lapbether.c
index 960f1393595c..d62a904d2e42 100644
--- a/drivers/net/wan/lapbether.c
+++ b/drivers/net/wan/lapbether.c
@@ -325,6 +325,7 @@ static int lapbeth_open(struct net_device *dev)
err = lapb_register(dev, &lapbeth_callbacks);
if (err != LAPB_OK) {
+ napi_disable(&lapbeth->napi);
pr_err("lapb_register error: %d\n", err);
return -ENODEV;
}
@@ -446,7 +447,7 @@ static int lapbeth_device_event(struct notifier_block *this,
if (dev_net(dev) != &init_net)
return NOTIFY_DONE;
- if (!dev_is_ethdev(dev))
+ if (!dev_is_ethdev(dev) && !lapbeth_get_x25_dev(dev))
return NOTIFY_DONE;
switch (event) {
diff --git a/drivers/net/wireless/ath/ath11k/qmi.h b/drivers/net/wireless/ath/ath11k/qmi.h
index 2ec56a34fa81..0909d53cefeb 100644
--- a/drivers/net/wireless/ath/ath11k/qmi.h
+++ b/drivers/net/wireless/ath/ath11k/qmi.h
@@ -27,7 +27,7 @@
#define ATH11K_QMI_WLANFW_MAX_NUM_MEM_SEG_V01 52
#define ATH11K_QMI_CALDB_SIZE 0x480000
#define ATH11K_QMI_BDF_EXT_STR_LENGTH 0x20
-#define ATH11K_QMI_FW_MEM_REQ_SEGMENT_CNT 3
+#define ATH11K_QMI_FW_MEM_REQ_SEGMENT_CNT 5
#define QMI_WLFW_REQUEST_MEM_IND_V01 0x0035
#define QMI_WLFW_FW_MEM_READY_IND_V01 0x0037
diff --git a/drivers/net/wireless/ath/ath11k/reg.c b/drivers/net/wireless/ath/ath11k/reg.c
index 7ee3ff69dfc8..6fae4e61ede7 100644
--- a/drivers/net/wireless/ath/ath11k/reg.c
+++ b/drivers/net/wireless/ath/ath11k/reg.c
@@ -287,11 +287,7 @@ int ath11k_regd_update(struct ath11k *ar)
goto err;
}
- rtnl_lock();
- wiphy_lock(ar->hw->wiphy);
- ret = regulatory_set_wiphy_regd_sync(ar->hw->wiphy, regd_copy);
- wiphy_unlock(ar->hw->wiphy);
- rtnl_unlock();
+ ret = regulatory_set_wiphy_regd(ar->hw->wiphy, regd_copy);
kfree(regd_copy);
diff --git a/drivers/net/wireless/broadcom/brcm80211/brcmfmac/fweh.c b/drivers/net/wireless/broadcom/brcm80211/brcmfmac/fweh.c
index bc3f4e4edcdf..dac7eb77799b 100644
--- a/drivers/net/wireless/broadcom/brcm80211/brcmfmac/fweh.c
+++ b/drivers/net/wireless/broadcom/brcm80211/brcmfmac/fweh.c
@@ -228,6 +228,10 @@ static void brcmf_fweh_event_worker(struct work_struct *work)
brcmf_fweh_event_name(event->code), event->code,
event->emsg.ifidx, event->emsg.bsscfgidx,
event->emsg.addr);
+ if (event->emsg.bsscfgidx >= BRCMF_MAX_IFS) {
+ bphy_err(drvr, "invalid bsscfg index: %u\n", event->emsg.bsscfgidx);
+ goto event_free;
+ }
/* convert event message */
emsg_be = &event->emsg;
diff --git a/drivers/net/wireless/cisco/airo.c b/drivers/net/wireless/cisco/airo.c
index 10daef81c355..fb2c35bd73bb 100644
--- a/drivers/net/wireless/cisco/airo.c
+++ b/drivers/net/wireless/cisco/airo.c
@@ -5232,7 +5232,7 @@ static int get_wep_tx_idx(struct airo_info *ai)
return -1;
}
-static int set_wep_key(struct airo_info *ai, u16 index, const char *key,
+static int set_wep_key(struct airo_info *ai, u16 index, const u8 *key,
u16 keylen, int perm, int lock)
{
static const unsigned char macaddr[ETH_ALEN] = { 0x01, 0, 0, 0, 0, 0 };
@@ -5283,7 +5283,7 @@ static void proc_wepkey_on_close(struct inode *inode, struct file *file)
struct net_device *dev = pde_data(inode);
struct airo_info *ai = dev->ml_priv;
int i, rc;
- char key[16];
+ u8 key[16];
u16 index = 0;
int j = 0;
@@ -5311,12 +5311,22 @@ static void proc_wepkey_on_close(struct inode *inode, struct file *file)
}
for (i = 0; i < 16*3 && data->wbuffer[i+j]; i++) {
+ int val;
+
+ if (i % 3 == 2)
+ continue;
+
+ val = hex_to_bin(data->wbuffer[i+j]);
+ if (val < 0) {
+ airo_print_err(ai->dev->name, "WebKey passed invalid key hex");
+ return;
+ }
switch(i%3) {
case 0:
- key[i/3] = hex_to_bin(data->wbuffer[i+j])<<4;
+ key[i/3] = (u8)val << 4;
break;
case 1:
- key[i/3] |= hex_to_bin(data->wbuffer[i+j]);
+ key[i/3] |= (u8)val;
break;
}
}
diff --git a/drivers/net/wireless/mac80211_hwsim.c b/drivers/net/wireless/mac80211_hwsim.c
index a40636c90ec3..0d81098c7b45 100644
--- a/drivers/net/wireless/mac80211_hwsim.c
+++ b/drivers/net/wireless/mac80211_hwsim.c
@@ -910,6 +910,7 @@ static void hwsim_send_nullfunc(struct mac80211_hwsim_data *data, u8 *mac,
struct hwsim_vif_priv *vp = (void *)vif->drv_priv;
struct sk_buff *skb;
struct ieee80211_hdr *hdr;
+ struct ieee80211_tx_info *cb;
if (!vp->assoc)
return;
@@ -931,6 +932,10 @@ static void hwsim_send_nullfunc(struct mac80211_hwsim_data *data, u8 *mac,
memcpy(hdr->addr2, mac, ETH_ALEN);
memcpy(hdr->addr3, vp->bssid, ETH_ALEN);
+ cb = IEEE80211_SKB_CB(skb);
+ cb->control.rates[0].count = 1;
+ cb->control.rates[1].idx = -1;
+
rcu_read_lock();
mac80211_hwsim_tx_frame(data->hw, skb,
rcu_dereference(vif->bss_conf.chanctx_conf)->def.chan);
diff --git a/drivers/net/wireless/ralink/rt2x00/rt2400pci.c b/drivers/net/wireless/ralink/rt2x00/rt2400pci.c
index 273c5eac3362..ddfc16de1b26 100644
--- a/drivers/net/wireless/ralink/rt2x00/rt2400pci.c
+++ b/drivers/net/wireless/ralink/rt2x00/rt2400pci.c
@@ -1023,9 +1023,9 @@ static int rt2400pci_set_state(struct rt2x00_dev *rt2x00dev,
{
u32 reg, reg2;
unsigned int i;
- char put_to_sleep;
- char bbp_state;
- char rf_state;
+ bool put_to_sleep;
+ u8 bbp_state;
+ u8 rf_state;
put_to_sleep = (state != STATE_AWAKE);
@@ -1561,7 +1561,7 @@ static int rt2400pci_probe_hw_mode(struct rt2x00_dev *rt2x00dev)
{
struct hw_mode_spec *spec = &rt2x00dev->spec;
struct channel_info *info;
- char *tx_power;
+ u8 *tx_power;
unsigned int i;
/*
diff --git a/drivers/net/wireless/ralink/rt2x00/rt2400pci.h b/drivers/net/wireless/ralink/rt2x00/rt2400pci.h
index b8187b6de143..979d5fd8babf 100644
--- a/drivers/net/wireless/ralink/rt2x00/rt2400pci.h
+++ b/drivers/net/wireless/ralink/rt2x00/rt2400pci.h
@@ -939,7 +939,7 @@
#define DEFAULT_TXPOWER 39
#define __CLAMP_TX(__txpower) \
- clamp_t(char, (__txpower), MIN_TXPOWER, MAX_TXPOWER)
+ clamp_t(u8, (__txpower), MIN_TXPOWER, MAX_TXPOWER)
#define TXPOWER_FROM_DEV(__txpower) \
((__CLAMP_TX(__txpower) - MAX_TXPOWER) + MIN_TXPOWER)
diff --git a/drivers/net/wireless/ralink/rt2x00/rt2500pci.c b/drivers/net/wireless/ralink/rt2x00/rt2500pci.c
index 8faa0a80e73a..cd6371e25062 100644
--- a/drivers/net/wireless/ralink/rt2x00/rt2500pci.c
+++ b/drivers/net/wireless/ralink/rt2x00/rt2500pci.c
@@ -1176,9 +1176,9 @@ static int rt2500pci_set_state(struct rt2x00_dev *rt2x00dev,
{
u32 reg, reg2;
unsigned int i;
- char put_to_sleep;
- char bbp_state;
- char rf_state;
+ bool put_to_sleep;
+ u8 bbp_state;
+ u8 rf_state;
put_to_sleep = (state != STATE_AWAKE);
@@ -1856,7 +1856,7 @@ static int rt2500pci_probe_hw_mode(struct rt2x00_dev *rt2x00dev)
{
struct hw_mode_spec *spec = &rt2x00dev->spec;
struct channel_info *info;
- char *tx_power;
+ u8 *tx_power;
unsigned int i;
/*
diff --git a/drivers/net/wireless/ralink/rt2x00/rt2500pci.h b/drivers/net/wireless/ralink/rt2x00/rt2500pci.h
index 7e64aee2a172..ba362675c52c 100644
--- a/drivers/net/wireless/ralink/rt2x00/rt2500pci.h
+++ b/drivers/net/wireless/ralink/rt2x00/rt2500pci.h
@@ -1219,6 +1219,6 @@
(((u8)(__txpower)) > MAX_TXPOWER) ? DEFAULT_TXPOWER : (__txpower)
#define TXPOWER_TO_DEV(__txpower) \
- clamp_t(char, __txpower, MIN_TXPOWER, MAX_TXPOWER)
+ clamp_t(u8, __txpower, MIN_TXPOWER, MAX_TXPOWER)
#endif /* RT2500PCI_H */
diff --git a/drivers/net/wireless/ralink/rt2x00/rt2500usb.c b/drivers/net/wireless/ralink/rt2x00/rt2500usb.c
index bb5ed6630645..4f3b0e6c6256 100644
--- a/drivers/net/wireless/ralink/rt2x00/rt2500usb.c
+++ b/drivers/net/wireless/ralink/rt2x00/rt2500usb.c
@@ -984,9 +984,9 @@ static int rt2500usb_set_state(struct rt2x00_dev *rt2x00dev,
u16 reg;
u16 reg2;
unsigned int i;
- char put_to_sleep;
- char bbp_state;
- char rf_state;
+ bool put_to_sleep;
+ u8 bbp_state;
+ u8 rf_state;
put_to_sleep = (state != STATE_AWAKE);
@@ -1663,7 +1663,7 @@ static int rt2500usb_probe_hw_mode(struct rt2x00_dev *rt2x00dev)
{
struct hw_mode_spec *spec = &rt2x00dev->spec;
struct channel_info *info;
- char *tx_power;
+ u8 *tx_power;
unsigned int i;
/*
diff --git a/drivers/net/wireless/ralink/rt2x00/rt2500usb.h b/drivers/net/wireless/ralink/rt2x00/rt2500usb.h
index 0c070288a140..746f0e950b76 100644
--- a/drivers/net/wireless/ralink/rt2x00/rt2500usb.h
+++ b/drivers/net/wireless/ralink/rt2x00/rt2500usb.h
@@ -839,6 +839,6 @@
(((u8)(__txpower)) > MAX_TXPOWER) ? DEFAULT_TXPOWER : (__txpower)
#define TXPOWER_TO_DEV(__txpower) \
- clamp_t(char, __txpower, MIN_TXPOWER, MAX_TXPOWER)
+ clamp_t(u8, __txpower, MIN_TXPOWER, MAX_TXPOWER)
#endif /* RT2500USB_H */
diff --git a/drivers/net/wireless/ralink/rt2x00/rt2800lib.c b/drivers/net/wireless/ralink/rt2x00/rt2800lib.c
index cbbb1a4849cf..12b700c7b9c3 100644
--- a/drivers/net/wireless/ralink/rt2x00/rt2800lib.c
+++ b/drivers/net/wireless/ralink/rt2x00/rt2800lib.c
@@ -3372,10 +3372,10 @@ static void rt2800_config_channel_rf53xx(struct rt2x00_dev *rt2x00dev,
if (rt2x00_has_cap_bt_coexist(rt2x00dev)) {
if (rt2x00_rt_rev_gte(rt2x00dev, RT5390, REV_RT5390F)) {
/* r55/r59 value array of channel 1~14 */
- static const char r55_bt_rev[] = {0x83, 0x83,
+ static const u8 r55_bt_rev[] = {0x83, 0x83,
0x83, 0x73, 0x73, 0x63, 0x53, 0x53,
0x53, 0x43, 0x43, 0x43, 0x43, 0x43};
- static const char r59_bt_rev[] = {0x0e, 0x0e,
+ static const u8 r59_bt_rev[] = {0x0e, 0x0e,
0x0e, 0x0e, 0x0e, 0x0b, 0x0a, 0x09,
0x07, 0x07, 0x07, 0x07, 0x07, 0x07};
@@ -3384,7 +3384,7 @@ static void rt2800_config_channel_rf53xx(struct rt2x00_dev *rt2x00dev,
rt2800_rfcsr_write(rt2x00dev, 59,
r59_bt_rev[idx]);
} else {
- static const char r59_bt[] = {0x8b, 0x8b, 0x8b,
+ static const u8 r59_bt[] = {0x8b, 0x8b, 0x8b,
0x8b, 0x8b, 0x8b, 0x8b, 0x8a, 0x89,
0x88, 0x88, 0x86, 0x85, 0x84};
@@ -3392,10 +3392,10 @@ static void rt2800_config_channel_rf53xx(struct rt2x00_dev *rt2x00dev,
}
} else {
if (rt2x00_rt_rev_gte(rt2x00dev, RT5390, REV_RT5390F)) {
- static const char r55_nonbt_rev[] = {0x23, 0x23,
+ static const u8 r55_nonbt_rev[] = {0x23, 0x23,
0x23, 0x23, 0x13, 0x13, 0x03, 0x03,
0x03, 0x03, 0x03, 0x03, 0x03, 0x03};
- static const char r59_nonbt_rev[] = {0x07, 0x07,
+ static const u8 r59_nonbt_rev[] = {0x07, 0x07,
0x07, 0x07, 0x07, 0x07, 0x07, 0x07,
0x07, 0x07, 0x06, 0x05, 0x04, 0x04};
@@ -3406,14 +3406,14 @@ static void rt2800_config_channel_rf53xx(struct rt2x00_dev *rt2x00dev,
} else if (rt2x00_rt(rt2x00dev, RT5390) ||
rt2x00_rt(rt2x00dev, RT5392) ||
rt2x00_rt(rt2x00dev, RT6352)) {
- static const char r59_non_bt[] = {0x8f, 0x8f,
+ static const u8 r59_non_bt[] = {0x8f, 0x8f,
0x8f, 0x8f, 0x8f, 0x8f, 0x8f, 0x8d,
0x8a, 0x88, 0x88, 0x87, 0x87, 0x86};
rt2800_rfcsr_write(rt2x00dev, 59,
r59_non_bt[idx]);
} else if (rt2x00_rt(rt2x00dev, RT5350)) {
- static const char r59_non_bt[] = {0x0b, 0x0b,
+ static const u8 r59_non_bt[] = {0x0b, 0x0b,
0x0b, 0x0b, 0x0b, 0x0b, 0x0b, 0x0a,
0x0a, 0x09, 0x08, 0x07, 0x07, 0x06};
@@ -4035,23 +4035,23 @@ static void rt2800_iq_calibrate(struct rt2x00_dev *rt2x00dev, int channel)
rt2800_bbp_write(rt2x00dev, 159, cal != 0xff ? cal : 0);
}
-static char rt2800_txpower_to_dev(struct rt2x00_dev *rt2x00dev,
+static s8 rt2800_txpower_to_dev(struct rt2x00_dev *rt2x00dev,
unsigned int channel,
- char txpower)
+ s8 txpower)
{
if (rt2x00_rt(rt2x00dev, RT3593) ||
rt2x00_rt(rt2x00dev, RT3883))
txpower = rt2x00_get_field8(txpower, EEPROM_TXPOWER_ALC);
if (channel <= 14)
- return clamp_t(char, txpower, MIN_G_TXPOWER, MAX_G_TXPOWER);
+ return clamp_t(s8, txpower, MIN_G_TXPOWER, MAX_G_TXPOWER);
if (rt2x00_rt(rt2x00dev, RT3593) ||
rt2x00_rt(rt2x00dev, RT3883))
- return clamp_t(char, txpower, MIN_A_TXPOWER_3593,
+ return clamp_t(s8, txpower, MIN_A_TXPOWER_3593,
MAX_A_TXPOWER_3593);
else
- return clamp_t(char, txpower, MIN_A_TXPOWER, MAX_A_TXPOWER);
+ return clamp_t(s8, txpower, MIN_A_TXPOWER, MAX_A_TXPOWER);
}
static void rt3883_bbp_adjust(struct rt2x00_dev *rt2x00dev,
@@ -8530,7 +8530,7 @@ static void rt2800_r_calibration(struct rt2x00_dev *rt2x00dev)
u8 bytevalue = 0;
int rcalcode;
u8 r_cal_code = 0;
- char d1 = 0, d2 = 0;
+ s8 d1 = 0, d2 = 0;
u8 rfvalue;
u32 MAC_RF_BYPASS0, MAC_RF_CONTROL0, MAC_PWR_PIN_CFG;
u32 maccfg;
@@ -8591,7 +8591,7 @@ static void rt2800_r_calibration(struct rt2x00_dev *rt2x00dev)
if (bytevalue > 128)
d1 = bytevalue - 256;
else
- d1 = (char)bytevalue;
+ d1 = (s8)bytevalue;
rt2800_bbp_write(rt2x00dev, 22, 0x0);
rt2800_rfcsr_write_bank(rt2x00dev, 0, 35, 0x01);
@@ -8601,7 +8601,7 @@ static void rt2800_r_calibration(struct rt2x00_dev *rt2x00dev)
if (bytevalue > 128)
d2 = bytevalue - 256;
else
- d2 = (char)bytevalue;
+ d2 = (s8)bytevalue;
rt2800_bbp_write(rt2x00dev, 22, 0x0);
rcalcode = rt2800_calcrcalibrationcode(rt2x00dev, d1, d2);
@@ -8703,7 +8703,7 @@ static void rt2800_rxdcoc_calibration(struct rt2x00_dev *rt2x00dev)
static u32 rt2800_do_sqrt_accumulation(u32 si)
{
u32 root, root_pre, bit;
- char i;
+ s8 i;
bit = 1 << 15;
root = 0;
@@ -9330,11 +9330,11 @@ static void rt2800_loft_search(struct rt2x00_dev *rt2x00dev, u8 ch_idx,
u8 alc_idx, u8 dc_result[][RF_ALC_NUM][2])
{
u32 p0 = 0, p1 = 0, pf = 0;
- char idx0 = 0, idx1 = 0;
+ s8 idx0 = 0, idx1 = 0;
u8 idxf[] = {0x00, 0x00};
u8 ibit = 0x20;
u8 iorq;
- char bidx;
+ s8 bidx;
rt2800_bbp_write(rt2x00dev, 158, 0xb0);
rt2800_bbp_write(rt2x00dev, 159, 0x80);
@@ -9384,17 +9384,17 @@ static void rt2800_loft_search(struct rt2x00_dev *rt2x00dev, u8 ch_idx,
static void rt2800_iq_search(struct rt2x00_dev *rt2x00dev, u8 ch_idx, u8 *ges, u8 *pes)
{
u32 p0 = 0, p1 = 0, pf = 0;
- char perr = 0, gerr = 0, iq_err = 0;
- char pef = 0, gef = 0;
- char psta, pend;
- char gsta, gend;
+ s8 perr = 0, gerr = 0, iq_err = 0;
+ s8 pef = 0, gef = 0;
+ s8 psta, pend;
+ s8 gsta, gend;
u8 ibit = 0x20;
u8 first_search = 0x00, touch_neg_max = 0x00;
- char idx0 = 0, idx1 = 0;
+ s8 idx0 = 0, idx1 = 0;
u8 gop;
u8 bbp = 0;
- char bidx;
+ s8 bidx;
for (bidx = 5; bidx >= 1; bidx--) {
for (gop = 0; gop < 2; gop++) {
@@ -10043,11 +10043,11 @@ static int rt2800_rf_lp_config(struct rt2x00_dev *rt2x00dev, bool btxcal)
return 0;
}
-static char rt2800_lp_tx_filter_bw_cal(struct rt2x00_dev *rt2x00dev)
+static s8 rt2800_lp_tx_filter_bw_cal(struct rt2x00_dev *rt2x00dev)
{
unsigned int cnt;
u8 bbp_val;
- char cal_val;
+ s8 cal_val;
rt2800_bbp_dcoc_write(rt2x00dev, 0, 0x82);
@@ -10079,7 +10079,7 @@ static void rt2800_bw_filter_calibration(struct rt2x00_dev *rt2x00dev,
u8 rx_filter_target_20m = 0x27, rx_filter_target_40m = 0x31;
int loop = 0, is_ht40, cnt;
u8 bbp_val, rf_val;
- char cal_r32_init, cal_r32_val, cal_diff;
+ s8 cal_r32_init, cal_r32_val, cal_diff;
u8 saverfb5r00, saverfb5r01, saverfb5r03, saverfb5r04, saverfb5r05;
u8 saverfb5r06, saverfb5r07;
u8 saverfb5r08, saverfb5r17, saverfb5r18, saverfb5r19, saverfb5r20;
@@ -11550,9 +11550,9 @@ static int rt2800_probe_hw_mode(struct rt2x00_dev *rt2x00dev)
{
struct hw_mode_spec *spec = &rt2x00dev->spec;
struct channel_info *info;
- char *default_power1;
- char *default_power2;
- char *default_power3;
+ s8 *default_power1;
+ s8 *default_power2;
+ s8 *default_power3;
unsigned int i, tx_chains, rx_chains;
u32 reg;
diff --git a/drivers/net/wireless/ralink/rt2x00/rt2800lib.h b/drivers/net/wireless/ralink/rt2x00/rt2800lib.h
index 3cbef77b4bd3..194de676df8f 100644
--- a/drivers/net/wireless/ralink/rt2x00/rt2800lib.h
+++ b/drivers/net/wireless/ralink/rt2x00/rt2800lib.h
@@ -32,10 +32,10 @@ struct rf_reg_pair {
struct rt2800_drv_data {
u8 calibration_bw20;
u8 calibration_bw40;
- char rx_calibration_bw20;
- char rx_calibration_bw40;
- char tx_calibration_bw20;
- char tx_calibration_bw40;
+ s8 rx_calibration_bw20;
+ s8 rx_calibration_bw40;
+ s8 tx_calibration_bw20;
+ s8 tx_calibration_bw40;
u8 bbp25;
u8 bbp26;
u8 txmixer_gain_24g;
diff --git a/drivers/net/wireless/ralink/rt2x00/rt2x00usb.c b/drivers/net/wireless/ralink/rt2x00/rt2x00usb.c
index 0827bc860bf8..8fd22c69855f 100644
--- a/drivers/net/wireless/ralink/rt2x00/rt2x00usb.c
+++ b/drivers/net/wireless/ralink/rt2x00/rt2x00usb.c
@@ -117,12 +117,12 @@ int rt2x00usb_vendor_request_buff(struct rt2x00_dev *rt2x00dev,
const u16 buffer_length)
{
int status = 0;
- unsigned char *tb;
+ u8 *tb;
u16 off, len, bsize;
mutex_lock(&rt2x00dev->csr_mutex);
- tb = (char *)buffer;
+ tb = (u8 *)buffer;
off = offset;
len = buffer_length;
while (len && !status) {
@@ -215,7 +215,7 @@ void rt2x00usb_register_read_async(struct rt2x00_dev *rt2x00dev,
rd->cr.wLength = cpu_to_le16(sizeof(u32));
usb_fill_control_urb(urb, usb_dev, usb_rcvctrlpipe(usb_dev, 0),
- (unsigned char *)(&rd->cr), &rd->reg, sizeof(rd->reg),
+ (u8 *)(&rd->cr), &rd->reg, sizeof(rd->reg),
rt2x00usb_register_read_async_cb, rd);
usb_anchor_urb(urb, rt2x00dev->anchor);
if (usb_submit_urb(urb, GFP_ATOMIC) < 0) {
diff --git a/drivers/net/wireless/ralink/rt2x00/rt61pci.c b/drivers/net/wireless/ralink/rt2x00/rt61pci.c
index d92f9eb07dc9..81db7f57c7e4 100644
--- a/drivers/net/wireless/ralink/rt2x00/rt61pci.c
+++ b/drivers/net/wireless/ralink/rt2x00/rt61pci.c
@@ -1709,7 +1709,7 @@ static int rt61pci_set_state(struct rt2x00_dev *rt2x00dev, enum dev_state state)
{
u32 reg, reg2;
unsigned int i;
- char put_to_sleep;
+ bool put_to_sleep;
put_to_sleep = (state != STATE_AWAKE);
@@ -2656,7 +2656,7 @@ static int rt61pci_probe_hw_mode(struct rt2x00_dev *rt2x00dev)
{
struct hw_mode_spec *spec = &rt2x00dev->spec;
struct channel_info *info;
- char *tx_power;
+ u8 *tx_power;
unsigned int i;
/*
diff --git a/drivers/net/wireless/ralink/rt2x00/rt61pci.h b/drivers/net/wireless/ralink/rt2x00/rt61pci.h
index 5f208ad509bd..d72d0ffd1127 100644
--- a/drivers/net/wireless/ralink/rt2x00/rt61pci.h
+++ b/drivers/net/wireless/ralink/rt2x00/rt61pci.h
@@ -1484,6 +1484,6 @@ struct hw_pairwise_ta_entry {
(((u8)(__txpower)) > MAX_TXPOWER) ? DEFAULT_TXPOWER : (__txpower)
#define TXPOWER_TO_DEV(__txpower) \
- clamp_t(char, __txpower, MIN_TXPOWER, MAX_TXPOWER)
+ clamp_t(u8, __txpower, MIN_TXPOWER, MAX_TXPOWER)
#endif /* RT61PCI_H */
diff --git a/drivers/net/wireless/ralink/rt2x00/rt73usb.c b/drivers/net/wireless/ralink/rt2x00/rt73usb.c
index e3269fd7c59e..861035444374 100644
--- a/drivers/net/wireless/ralink/rt2x00/rt73usb.c
+++ b/drivers/net/wireless/ralink/rt2x00/rt73usb.c
@@ -1378,7 +1378,7 @@ static int rt73usb_set_state(struct rt2x00_dev *rt2x00dev, enum dev_state state)
{
u32 reg, reg2;
unsigned int i;
- char put_to_sleep;
+ bool put_to_sleep;
put_to_sleep = (state != STATE_AWAKE);
@@ -2090,7 +2090,7 @@ static int rt73usb_probe_hw_mode(struct rt2x00_dev *rt2x00dev)
{
struct hw_mode_spec *spec = &rt2x00dev->spec;
struct channel_info *info;
- char *tx_power;
+ u8 *tx_power;
unsigned int i;
/*
diff --git a/drivers/net/wireless/ralink/rt2x00/rt73usb.h b/drivers/net/wireless/ralink/rt2x00/rt73usb.h
index 1b56d285c34b..bb0a68516c08 100644
--- a/drivers/net/wireless/ralink/rt2x00/rt73usb.h
+++ b/drivers/net/wireless/ralink/rt2x00/rt73usb.h
@@ -1063,6 +1063,6 @@ struct hw_pairwise_ta_entry {
(((u8)(__txpower)) > MAX_TXPOWER) ? DEFAULT_TXPOWER : (__txpower)
#define TXPOWER_TO_DEV(__txpower) \
- clamp_t(char, __txpower, MIN_TXPOWER, MAX_TXPOWER)
+ clamp_t(u8, __txpower, MIN_TXPOWER, MAX_TXPOWER)
#endif /* RT73USB_H */
diff --git a/drivers/net/wwan/Kconfig b/drivers/net/wwan/Kconfig
index 3486ffe94ac4..ac4d73b5626f 100644
--- a/drivers/net/wwan/Kconfig
+++ b/drivers/net/wwan/Kconfig
@@ -94,7 +94,7 @@ config RPMSG_WWAN_CTRL
config IOSM
tristate "IOSM Driver for Intel M.2 WWAN Device"
- depends on INTEL_IOMMU
+ depends on PCI
select NET_DEVLINK
select RELAY if WWAN_DEBUGFS
help
diff --git a/drivers/net/wwan/iosm/iosm_ipc_coredump.c b/drivers/net/wwan/iosm/iosm_ipc_coredump.c
index 9acd87724c9d..26ca30476f40 100644
--- a/drivers/net/wwan/iosm/iosm_ipc_coredump.c
+++ b/drivers/net/wwan/iosm/iosm_ipc_coredump.c
@@ -2,6 +2,7 @@
/*
* Copyright (C) 2020-2021 Intel Corporation.
*/
+#include <linux/vmalloc.h>
#include "iosm_ipc_coredump.h"
diff --git a/drivers/net/wwan/iosm/iosm_ipc_devlink.c b/drivers/net/wwan/iosm/iosm_ipc_devlink.c
index 17da85a8f337..2fe724d623c0 100644
--- a/drivers/net/wwan/iosm/iosm_ipc_devlink.c
+++ b/drivers/net/wwan/iosm/iosm_ipc_devlink.c
@@ -2,6 +2,7 @@
/*
* Copyright (C) 2020-2021 Intel Corporation.
*/
+#include <linux/vmalloc.h>
#include "iosm_ipc_chnl_cfg.h"
#include "iosm_ipc_coredump.h"
diff --git a/drivers/net/wwan/iosm/iosm_ipc_imem_ops.c b/drivers/net/wwan/iosm/iosm_ipc_imem_ops.c
index b7f9237dedf7..66b90cc4c346 100644
--- a/drivers/net/wwan/iosm/iosm_ipc_imem_ops.c
+++ b/drivers/net/wwan/iosm/iosm_ipc_imem_ops.c
@@ -91,6 +91,14 @@ void ipc_imem_wwan_channel_init(struct iosm_imem *ipc_imem,
}
ipc_chnl_cfg_get(&chnl_cfg, ipc_imem->nr_of_channels);
+
+ if (ipc_imem->mmio->mux_protocol == MUX_AGGREGATION &&
+ ipc_imem->nr_of_channels == IPC_MEM_IP_CHL_ID_0) {
+ chnl_cfg.ul_nr_of_entries = IPC_MEM_MAX_TDS_MUX_AGGR_UL;
+ chnl_cfg.dl_nr_of_entries = IPC_MEM_MAX_TDS_MUX_AGGR_DL;
+ chnl_cfg.dl_buf_size = IPC_MEM_MAX_ADB_BUF_SIZE;
+ }
+
ipc_imem_channel_init(ipc_imem, IPC_CTYPE_WWAN, chnl_cfg,
IRQ_MOD_OFF);
diff --git a/drivers/net/wwan/iosm/iosm_ipc_mux.h b/drivers/net/wwan/iosm/iosm_ipc_mux.h
index cd9d74cc097f..9968bb885c1f 100644
--- a/drivers/net/wwan/iosm/iosm_ipc_mux.h
+++ b/drivers/net/wwan/iosm/iosm_ipc_mux.h
@@ -10,6 +10,7 @@
#define IPC_MEM_MAX_UL_DG_ENTRIES 100
#define IPC_MEM_MAX_TDS_MUX_AGGR_UL 60
+#define IPC_MEM_MAX_TDS_MUX_AGGR_DL 60
#define IPC_MEM_MAX_ADB_BUF_SIZE (16 * 1024)
#define IPC_MEM_MAX_UL_ADB_BUF_SIZE IPC_MEM_MAX_ADB_BUF_SIZE
diff --git a/drivers/net/wwan/iosm/iosm_ipc_pcie.c b/drivers/net/wwan/iosm/iosm_ipc_pcie.c
index 31f57b986df2..d3d34d1c4704 100644
--- a/drivers/net/wwan/iosm/iosm_ipc_pcie.c
+++ b/drivers/net/wwan/iosm/iosm_ipc_pcie.c
@@ -232,6 +232,7 @@ static void ipc_pcie_config_init(struct iosm_pcie *ipc_pcie)
*/
static enum ipc_pcie_sleep_state ipc_pcie_read_bios_cfg(struct device *dev)
{
+ enum ipc_pcie_sleep_state sleep_state = IPC_PCIE_D0L12;
union acpi_object *object;
acpi_handle handle_acpi;
@@ -242,18 +243,23 @@ static enum ipc_pcie_sleep_state ipc_pcie_read_bios_cfg(struct device *dev)
}
object = acpi_evaluate_dsm(handle_acpi, &wwan_acpi_guid, 0, 3, NULL);
+ if (!object)
+ goto default_ret;
+
+ if (object->integer.value == 3)
+ sleep_state = IPC_PCIE_D3L2;
- if (object && object->integer.value == 3)
- return IPC_PCIE_D3L2;
+ kfree(object);
default_ret:
- return IPC_PCIE_D0L12;
+ return sleep_state;
}
static int ipc_pcie_probe(struct pci_dev *pci,
const struct pci_device_id *pci_id)
{
struct iosm_pcie *ipc_pcie = kzalloc(sizeof(*ipc_pcie), GFP_KERNEL);
+ int ret;
pr_debug("Probing device 0x%X from the vendor 0x%X", pci_id->device,
pci_id->vendor);
@@ -286,6 +292,12 @@ static int ipc_pcie_probe(struct pci_dev *pci,
goto pci_enable_fail;
}
+ ret = dma_set_mask(ipc_pcie->dev, DMA_BIT_MASK(64));
+ if (ret) {
+ dev_err(ipc_pcie->dev, "Could not set PCI DMA mask: %d", ret);
+ return ret;
+ }
+
ipc_pcie_config_aspm(ipc_pcie);
dev_dbg(ipc_pcie->dev, "PCIe device enabled.");
diff --git a/drivers/net/wwan/iosm/iosm_ipc_wwan.c b/drivers/net/wwan/iosm/iosm_ipc_wwan.c
index 2f1f8b5d5b59..4c9022a93e01 100644
--- a/drivers/net/wwan/iosm/iosm_ipc_wwan.c
+++ b/drivers/net/wwan/iosm/iosm_ipc_wwan.c
@@ -40,13 +40,11 @@ struct iosm_netdev_priv {
* @ipc_imem: Pointer to imem data-struct
* @sub_netlist: List of active netdevs
* @dev: Pointer device structure
- * @if_mutex: Mutex used for add and remove interface id
*/
struct iosm_wwan {
struct iosm_imem *ipc_imem;
struct iosm_netdev_priv __rcu *sub_netlist[IP_MUX_SESSION_END + 1];
struct device *dev;
- struct mutex if_mutex; /* Mutex used for add and remove interface id */
};
/* Bring-up the wwan net link */
@@ -55,14 +53,11 @@ static int ipc_wwan_link_open(struct net_device *netdev)
struct iosm_netdev_priv *priv = wwan_netdev_drvpriv(netdev);
struct iosm_wwan *ipc_wwan = priv->ipc_wwan;
int if_id = priv->if_id;
- int ret;
if (if_id < IP_MUX_SESSION_START ||
if_id >= ARRAY_SIZE(ipc_wwan->sub_netlist))
return -EINVAL;
- mutex_lock(&ipc_wwan->if_mutex);
-
/* get channel id */
priv->ch_id = ipc_imem_sys_wwan_open(ipc_wwan->ipc_imem, if_id);
@@ -70,8 +65,7 @@ static int ipc_wwan_link_open(struct net_device *netdev)
dev_err(ipc_wwan->dev,
"cannot connect wwan0 & id %d to the IPC mem layer",
if_id);
- ret = -ENODEV;
- goto out;
+ return -ENODEV;
}
/* enable tx path, DL data may follow */
@@ -80,10 +74,7 @@ static int ipc_wwan_link_open(struct net_device *netdev)
dev_dbg(ipc_wwan->dev, "Channel id %d allocated to if_id %d",
priv->ch_id, priv->if_id);
- ret = 0;
-out:
- mutex_unlock(&ipc_wwan->if_mutex);
- return ret;
+ return 0;
}
/* Bring-down the wwan net link */
@@ -93,11 +84,9 @@ static int ipc_wwan_link_stop(struct net_device *netdev)
netif_stop_queue(netdev);
- mutex_lock(&priv->ipc_wwan->if_mutex);
ipc_imem_sys_wwan_close(priv->ipc_wwan->ipc_imem, priv->if_id,
priv->ch_id);
priv->ch_id = -1;
- mutex_unlock(&priv->ipc_wwan->if_mutex);
return 0;
}
@@ -168,6 +157,7 @@ static void ipc_wwan_setup(struct net_device *iosm_dev)
iosm_dev->max_mtu = ETH_MAX_MTU;
iosm_dev->flags = IFF_POINTOPOINT | IFF_NOARP;
+ iosm_dev->needs_free_netdev = true;
iosm_dev->netdev_ops = &ipc_inm_ops;
}
@@ -189,26 +179,17 @@ static int ipc_wwan_newlink(void *ctxt, struct net_device *dev,
priv->netdev = dev;
priv->ipc_wwan = ipc_wwan;
- mutex_lock(&ipc_wwan->if_mutex);
- if (rcu_access_pointer(ipc_wwan->sub_netlist[if_id])) {
- err = -EBUSY;
- goto out_unlock;
- }
+ if (rcu_access_pointer(ipc_wwan->sub_netlist[if_id]))
+ return -EBUSY;
err = register_netdevice(dev);
if (err)
- goto out_unlock;
+ return err;
rcu_assign_pointer(ipc_wwan->sub_netlist[if_id], priv);
- mutex_unlock(&ipc_wwan->if_mutex);
-
netif_device_attach(dev);
return 0;
-
-out_unlock:
- mutex_unlock(&ipc_wwan->if_mutex);
- return err;
}
static void ipc_wwan_dellink(void *ctxt, struct net_device *dev,
@@ -222,17 +203,12 @@ static void ipc_wwan_dellink(void *ctxt, struct net_device *dev,
if_id >= ARRAY_SIZE(ipc_wwan->sub_netlist)))
return;
- mutex_lock(&ipc_wwan->if_mutex);
-
if (WARN_ON(rcu_access_pointer(ipc_wwan->sub_netlist[if_id]) != priv))
- goto unlock;
+ return;
RCU_INIT_POINTER(ipc_wwan->sub_netlist[if_id], NULL);
/* unregistering includes synchronize_net() */
unregister_netdevice_queue(dev, head);
-
-unlock:
- mutex_unlock(&ipc_wwan->if_mutex);
}
static const struct wwan_ops iosm_wwan_ops = {
@@ -323,12 +299,9 @@ struct iosm_wwan *ipc_wwan_init(struct iosm_imem *ipc_imem, struct device *dev)
ipc_wwan->dev = dev;
ipc_wwan->ipc_imem = ipc_imem;
- mutex_init(&ipc_wwan->if_mutex);
-
/* WWAN core will create a netdev for the default IP MUX channel */
if (wwan_register_ops(ipc_wwan->dev, &iosm_wwan_ops, ipc_wwan,
IP_MUX_SESSION_DEFAULT)) {
- mutex_destroy(&ipc_wwan->if_mutex);
kfree(ipc_wwan);
return NULL;
}
@@ -341,7 +314,5 @@ void ipc_wwan_deinit(struct iosm_wwan *ipc_wwan)
/* This call will remove all child netdev(s) */
wwan_unregister_ops(ipc_wwan->dev);
- mutex_destroy(&ipc_wwan->if_mutex);
-
kfree(ipc_wwan);
}
diff --git a/drivers/net/wwan/mhi_wwan_mbim.c b/drivers/net/wwan/mhi_wwan_mbim.c
index 6872782e8dd8..ef70bb7c88ad 100644
--- a/drivers/net/wwan/mhi_wwan_mbim.c
+++ b/drivers/net/wwan/mhi_wwan_mbim.c
@@ -582,6 +582,7 @@ static void mhi_mbim_setup(struct net_device *ndev)
ndev->min_mtu = ETH_MIN_MTU;
ndev->max_mtu = MHI_MAX_BUF_SZ - ndev->needed_headroom;
ndev->tx_queue_len = 1000;
+ ndev->needs_free_netdev = true;
}
static const struct wwan_ops mhi_mbim_wwan_ops = {
diff --git a/drivers/nvme/host/core.c b/drivers/nvme/host/core.c
index dc4220600585..da55ce45ac70 100644
--- a/drivers/nvme/host/core.c
+++ b/drivers/nvme/host/core.c
@@ -675,6 +675,7 @@ void nvme_init_request(struct request *req, struct nvme_command *cmd)
if (req->mq_hctx->type == HCTX_TYPE_POLL)
req->cmd_flags |= REQ_POLLED;
nvme_clear_nvme_request(req);
+ req->rq_flags |= RQF_QUIET;
memcpy(nvme_req(req)->cmd, cmd, sizeof(*cmd));
}
EXPORT_SYMBOL_GPL(nvme_init_request);
@@ -1037,7 +1038,6 @@ int __nvme_submit_sync_cmd(struct request_queue *q, struct nvme_command *cmd,
goto out;
}
- req->rq_flags |= RQF_QUIET;
ret = nvme_execute_rq(req, at_head);
if (result && ret >= 0)
*result = nvme_req(req)->result;
@@ -1227,7 +1227,6 @@ static void nvme_keep_alive_work(struct work_struct *work)
rq->timeout = ctrl->kato * HZ;
rq->end_io = nvme_keep_alive_end_io;
rq->end_io_data = ctrl;
- rq->rq_flags |= RQF_QUIET;
blk_execute_rq_nowait(rq, false);
}
diff --git a/drivers/nvme/host/pci.c b/drivers/nvme/host/pci.c
index 31e577b01257..02b5578773a1 100644
--- a/drivers/nvme/host/pci.c
+++ b/drivers/nvme/host/pci.c
@@ -1436,7 +1436,6 @@ static enum blk_eh_timer_return nvme_timeout(struct request *req)
abort_req->end_io = abort_endio;
abort_req->end_io_data = NULL;
- abort_req->rq_flags |= RQF_QUIET;
blk_execute_rq_nowait(abort_req, false);
/*
@@ -2490,7 +2489,6 @@ static int nvme_delete_queue(struct nvme_queue *nvmeq, u8 opcode)
req->end_io_data = nvmeq;
init_completion(&nvmeq->delete_done);
- req->rq_flags |= RQF_QUIET;
blk_execute_rq_nowait(req, false);
return 0;
}
diff --git a/drivers/nvme/target/configfs.c b/drivers/nvme/target/configfs.c
index 9443ee1d4ae3..6a2816f3b4e8 100644
--- a/drivers/nvme/target/configfs.c
+++ b/drivers/nvme/target/configfs.c
@@ -1215,6 +1215,7 @@ static ssize_t nvmet_subsys_attr_model_store_locked(struct nvmet_subsys *subsys,
const char *page, size_t count)
{
int pos = 0, len;
+ char *val;
if (subsys->subsys_discovered) {
pr_err("Can't set model number. %s is already assigned\n",
@@ -1237,9 +1238,11 @@ static ssize_t nvmet_subsys_attr_model_store_locked(struct nvmet_subsys *subsys,
return -EINVAL;
}
- subsys->model_number = kmemdup_nul(page, len, GFP_KERNEL);
- if (!subsys->model_number)
+ val = kmemdup_nul(page, len, GFP_KERNEL);
+ if (!val)
return -ENOMEM;
+ kfree(subsys->model_number);
+ subsys->model_number = val;
return count;
}
@@ -1836,6 +1839,7 @@ static void nvmet_host_release(struct config_item *item)
#ifdef CONFIG_NVME_TARGET_AUTH
kfree(host->dhchap_secret);
+ kfree(host->dhchap_ctrl_secret);
#endif
kfree(host);
}
diff --git a/drivers/pci/controller/pci-hyperv.c b/drivers/pci/controller/pci-hyperv.c
index e7c6f6629e7c..ba64284eaf9f 100644
--- a/drivers/pci/controller/pci-hyperv.c
+++ b/drivers/pci/controller/pci-hyperv.c
@@ -1614,7 +1614,7 @@ out:
static u32 hv_compose_msi_req_v1(
struct pci_create_interrupt *int_pkt, const struct cpumask *affinity,
- u32 slot, u8 vector, u8 vector_count)
+ u32 slot, u8 vector, u16 vector_count)
{
int_pkt->message_type.type = PCI_CREATE_INTERRUPT_MESSAGE;
int_pkt->wslot.slot = slot;
@@ -1642,7 +1642,7 @@ static int hv_compose_msi_req_get_cpu(const struct cpumask *affinity)
static u32 hv_compose_msi_req_v2(
struct pci_create_interrupt2 *int_pkt, const struct cpumask *affinity,
- u32 slot, u8 vector, u8 vector_count)
+ u32 slot, u8 vector, u16 vector_count)
{
int cpu;
@@ -1661,7 +1661,7 @@ static u32 hv_compose_msi_req_v2(
static u32 hv_compose_msi_req_v3(
struct pci_create_interrupt3 *int_pkt, const struct cpumask *affinity,
- u32 slot, u32 vector, u8 vector_count)
+ u32 slot, u32 vector, u16 vector_count)
{
int cpu;
@@ -1701,7 +1701,12 @@ static void hv_compose_msi_msg(struct irq_data *data, struct msi_msg *msg)
struct compose_comp_ctxt comp;
struct tran_int_desc *int_desc;
struct msi_desc *msi_desc;
- u8 vector, vector_count;
+ /*
+ * vector_count should be u16: see hv_msi_desc, hv_msi_desc2
+ * and hv_msi_desc3. vector must be u32: see hv_msi_desc3.
+ */
+ u16 vector_count;
+ u32 vector;
struct {
struct pci_packet pci_pkt;
union {
@@ -1767,6 +1772,11 @@ static void hv_compose_msi_msg(struct irq_data *data, struct msi_msg *msg)
vector_count = 1;
}
+ /*
+ * hv_compose_msi_req_v1 and v2 are for x86 only, meaning 'vector'
+ * can't exceed u8. Cast 'vector' down to u8 for v1/v2 explicitly
+ * for better readability.
+ */
memset(&ctxt, 0, sizeof(ctxt));
init_completion(&comp.comp_pkt.host_event);
ctxt.pci_pkt.completion_func = hv_pci_compose_compl;
@@ -1777,7 +1787,7 @@ static void hv_compose_msi_msg(struct irq_data *data, struct msi_msg *msg)
size = hv_compose_msi_req_v1(&ctxt.int_pkts.v1,
dest,
hpdev->desc.win_slot.slot,
- vector,
+ (u8)vector,
vector_count);
break;
@@ -1786,7 +1796,7 @@ static void hv_compose_msi_msg(struct irq_data *data, struct msi_msg *msg)
size = hv_compose_msi_req_v2(&ctxt.int_pkts.v2,
dest,
hpdev->desc.win_slot.slot,
- vector,
+ (u8)vector,
vector_count);
break;
diff --git a/drivers/phy/qualcomm/phy-qcom-qmp-combo.c b/drivers/phy/qualcomm/phy-qcom-qmp-combo.c
index 9807c4d935cd..ba9d761ec49a 100644
--- a/drivers/phy/qualcomm/phy-qcom-qmp-combo.c
+++ b/drivers/phy/qualcomm/phy-qcom-qmp-combo.c
@@ -2240,7 +2240,7 @@ static void qmp_combo_enable_autonomous_mode(struct qmp_phy *qphy)
static void qmp_combo_disable_autonomous_mode(struct qmp_phy *qphy)
{
const struct qmp_phy_cfg *cfg = qphy->cfg;
- void __iomem *pcs_usb = qphy->pcs_usb ?: qphy->pcs_usb;
+ void __iomem *pcs_usb = qphy->pcs_usb ?: qphy->pcs;
void __iomem *pcs_misc = qphy->pcs_misc;
/* Disable i/o clamp_n on resume for normal mode */
diff --git a/drivers/phy/ralink/phy-mt7621-pci.c b/drivers/phy/ralink/phy-mt7621-pci.c
index 5e6530f545b5..85888ab2d307 100644
--- a/drivers/phy/ralink/phy-mt7621-pci.c
+++ b/drivers/phy/ralink/phy-mt7621-pci.c
@@ -280,7 +280,8 @@ static struct phy *mt7621_pcie_phy_of_xlate(struct device *dev,
}
static const struct soc_device_attribute mt7621_pci_quirks_match[] = {
- { .soc_id = "mt7621", .revision = "E2" }
+ { .soc_id = "mt7621", .revision = "E2" },
+ { /* sentinel */ }
};
static const struct regmap_config mt7621_pci_phy_regmap_config = {
diff --git a/drivers/phy/st/phy-stm32-usbphyc.c b/drivers/phy/st/phy-stm32-usbphyc.c
index a98c911cc37a..5bb9647b078f 100644
--- a/drivers/phy/st/phy-stm32-usbphyc.c
+++ b/drivers/phy/st/phy-stm32-usbphyc.c
@@ -710,6 +710,8 @@ static int stm32_usbphyc_probe(struct platform_device *pdev)
ret = of_property_read_u32(child, "reg", &index);
if (ret || index > usbphyc->nphys) {
dev_err(&phy->dev, "invalid reg property: %d\n", ret);
+ if (!ret)
+ ret = -EINVAL;
goto put_child;
}
diff --git a/drivers/phy/sunplus/phy-sunplus-usb2.c b/drivers/phy/sunplus/phy-sunplus-usb2.c
index b932087c55b2..e827b79f6d49 100644
--- a/drivers/phy/sunplus/phy-sunplus-usb2.c
+++ b/drivers/phy/sunplus/phy-sunplus-usb2.c
@@ -256,8 +256,8 @@ static int sp_usb_phy_probe(struct platform_device *pdev)
usbphy->moon4_res_mem = platform_get_resource_byname(pdev, IORESOURCE_MEM, "moon4");
usbphy->moon4_regs = devm_ioremap(&pdev->dev, usbphy->moon4_res_mem->start,
resource_size(usbphy->moon4_res_mem));
- if (IS_ERR(usbphy->moon4_regs))
- return PTR_ERR(usbphy->moon4_regs);
+ if (!usbphy->moon4_regs)
+ return -ENOMEM;
usbphy->phy_clk = devm_clk_get(&pdev->dev, NULL);
if (IS_ERR(usbphy->phy_clk))
diff --git a/drivers/phy/tegra/xusb.c b/drivers/phy/tegra/xusb.c
index 95091876c422..dce45fbbd699 100644
--- a/drivers/phy/tegra/xusb.c
+++ b/drivers/phy/tegra/xusb.c
@@ -1461,8 +1461,14 @@ EXPORT_SYMBOL_GPL(tegra_phy_xusb_utmi_port_reset);
void tegra_phy_xusb_utmi_pad_power_on(struct phy *phy)
{
- struct tegra_xusb_lane *lane = phy_get_drvdata(phy);
- struct tegra_xusb_padctl *padctl = lane->pad->padctl;
+ struct tegra_xusb_lane *lane;
+ struct tegra_xusb_padctl *padctl;
+
+ if (!phy)
+ return;
+
+ lane = phy_get_drvdata(phy);
+ padctl = lane->pad->padctl;
if (padctl->soc->ops->utmi_pad_power_on)
padctl->soc->ops->utmi_pad_power_on(phy);
@@ -1471,8 +1477,14 @@ EXPORT_SYMBOL_GPL(tegra_phy_xusb_utmi_pad_power_on);
void tegra_phy_xusb_utmi_pad_power_down(struct phy *phy)
{
- struct tegra_xusb_lane *lane = phy_get_drvdata(phy);
- struct tegra_xusb_padctl *padctl = lane->pad->padctl;
+ struct tegra_xusb_lane *lane;
+ struct tegra_xusb_padctl *padctl;
+
+ if (!phy)
+ return;
+
+ lane = phy_get_drvdata(phy);
+ padctl = lane->pad->padctl;
if (padctl->soc->ops->utmi_pad_power_down)
padctl->soc->ops->utmi_pad_power_down(phy);
diff --git a/drivers/platform/x86/hp-wmi.c b/drivers/platform/x86/hp-wmi.c
index 627a6d0eaf83..12449038bed1 100644
--- a/drivers/platform/x86/hp-wmi.c
+++ b/drivers/platform/x86/hp-wmi.c
@@ -1300,8 +1300,16 @@ static int __init hp_wmi_bios_setup(struct platform_device *device)
wwan_rfkill = NULL;
rfkill2_count = 0;
- if (hp_wmi_rfkill_setup(device))
- hp_wmi_rfkill2_setup(device);
+ /*
+ * In pre-2009 BIOS, command 1Bh return 0x4 to indicate that
+ * BIOS no longer controls the power for the wireless
+ * devices. All features supported by this command will no
+ * longer be supported.
+ */
+ if (!hp_wmi_bios_2009_later()) {
+ if (hp_wmi_rfkill_setup(device))
+ hp_wmi_rfkill2_setup(device);
+ }
err = hp_wmi_hwmon_init();
diff --git a/drivers/platform/x86/ideapad-laptop.c b/drivers/platform/x86/ideapad-laptop.c
index abd0c81d62c4..33b3dfdd1b08 100644
--- a/drivers/platform/x86/ideapad-laptop.c
+++ b/drivers/platform/x86/ideapad-laptop.c
@@ -1533,6 +1533,24 @@ static const struct dmi_system_id hw_rfkill_list[] = {
{}
};
+static const struct dmi_system_id no_touchpad_switch_list[] = {
+ {
+ .ident = "Lenovo Yoga 3 Pro 1370",
+ .matches = {
+ DMI_MATCH(DMI_SYS_VENDOR, "LENOVO"),
+ DMI_MATCH(DMI_PRODUCT_VERSION, "Lenovo YOGA 3"),
+ },
+ },
+ {
+ .ident = "ZhaoYang K4e-IML",
+ .matches = {
+ DMI_MATCH(DMI_SYS_VENDOR, "LENOVO"),
+ DMI_MATCH(DMI_PRODUCT_VERSION, "ZhaoYang K4e-IML"),
+ },
+ },
+ {}
+};
+
static void ideapad_check_features(struct ideapad_private *priv)
{
acpi_handle handle = priv->adev->handle;
@@ -1541,7 +1559,12 @@ static void ideapad_check_features(struct ideapad_private *priv)
priv->features.hw_rfkill_switch = dmi_check_system(hw_rfkill_list);
/* Most ideapads with ELAN0634 touchpad don't use EC touchpad switch */
- priv->features.touchpad_ctrl_via_ec = !acpi_dev_present("ELAN0634", NULL, -1);
+ if (acpi_dev_present("ELAN0634", NULL, -1))
+ priv->features.touchpad_ctrl_via_ec = 0;
+ else if (dmi_check_system(no_touchpad_switch_list))
+ priv->features.touchpad_ctrl_via_ec = 0;
+ else
+ priv->features.touchpad_ctrl_via_ec = 1;
if (!read_ec_data(handle, VPCCMD_R_FAN, &val))
priv->features.fan_mode = true;
diff --git a/drivers/platform/x86/intel/hid.c b/drivers/platform/x86/intel/hid.c
index 79cff1fc675c..b6313ecd190c 100644
--- a/drivers/platform/x86/intel/hid.c
+++ b/drivers/platform/x86/intel/hid.c
@@ -27,6 +27,9 @@ static const struct acpi_device_id intel_hid_ids[] = {
{"INTC1051", 0},
{"INTC1054", 0},
{"INTC1070", 0},
+ {"INTC1076", 0},
+ {"INTC1077", 0},
+ {"INTC1078", 0},
{"", 0},
};
MODULE_DEVICE_TABLE(acpi, intel_hid_ids);
diff --git a/drivers/platform/x86/intel/pmt/class.c b/drivers/platform/x86/intel/pmt/class.c
index 53d7fd2943b4..46598dcb634a 100644
--- a/drivers/platform/x86/intel/pmt/class.c
+++ b/drivers/platform/x86/intel/pmt/class.c
@@ -9,6 +9,7 @@
*/
#include <linux/kernel.h>
+#include <linux/io-64-nonatomic-lo-hi.h>
#include <linux/module.h>
#include <linux/mm.h>
#include <linux/pci.h>
@@ -19,6 +20,7 @@
#define PMT_XA_START 0
#define PMT_XA_MAX INT_MAX
#define PMT_XA_LIMIT XA_LIMIT(PMT_XA_START, PMT_XA_MAX)
+#define GUID_SPR_PUNIT 0x9956f43f
bool intel_pmt_is_early_client_hw(struct device *dev)
{
@@ -33,6 +35,29 @@ bool intel_pmt_is_early_client_hw(struct device *dev)
}
EXPORT_SYMBOL_GPL(intel_pmt_is_early_client_hw);
+static inline int
+pmt_memcpy64_fromio(void *to, const u64 __iomem *from, size_t count)
+{
+ int i, remain;
+ u64 *buf = to;
+
+ if (!IS_ALIGNED((unsigned long)from, 8))
+ return -EFAULT;
+
+ for (i = 0; i < count/8; i++)
+ buf[i] = readq(&from[i]);
+
+ /* Copy any remaining bytes */
+ remain = count % 8;
+ if (remain) {
+ u64 tmp = readq(&from[i]);
+
+ memcpy(&buf[i], &tmp, remain);
+ }
+
+ return count;
+}
+
/*
* sysfs
*/
@@ -54,7 +79,11 @@ intel_pmt_read(struct file *filp, struct kobject *kobj,
if (count > entry->size - off)
count = entry->size - off;
- memcpy_fromio(buf, entry->base + off, count);
+ if (entry->guid == GUID_SPR_PUNIT)
+ /* PUNIT on SPR only supports aligned 64-bit read */
+ count = pmt_memcpy64_fromio(buf, entry->base + off, count);
+ else
+ memcpy_fromio(buf, entry->base + off, count);
return count;
}
diff --git a/drivers/platform/x86/p2sb.c b/drivers/platform/x86/p2sb.c
index 384d0962ae93..1cf2471d54dd 100644
--- a/drivers/platform/x86/p2sb.c
+++ b/drivers/platform/x86/p2sb.c
@@ -19,26 +19,23 @@
#define P2SBC 0xe0
#define P2SBC_HIDE BIT(8)
+#define P2SB_DEVFN_DEFAULT PCI_DEVFN(31, 1)
+
static const struct x86_cpu_id p2sb_cpu_ids[] = {
X86_MATCH_INTEL_FAM6_MODEL(ATOM_GOLDMONT, PCI_DEVFN(13, 0)),
- X86_MATCH_INTEL_FAM6_MODEL(ATOM_GOLDMONT_D, PCI_DEVFN(31, 1)),
- X86_MATCH_INTEL_FAM6_MODEL(ATOM_SILVERMONT_D, PCI_DEVFN(31, 1)),
- X86_MATCH_INTEL_FAM6_MODEL(KABYLAKE, PCI_DEVFN(31, 1)),
- X86_MATCH_INTEL_FAM6_MODEL(KABYLAKE_L, PCI_DEVFN(31, 1)),
- X86_MATCH_INTEL_FAM6_MODEL(SKYLAKE, PCI_DEVFN(31, 1)),
- X86_MATCH_INTEL_FAM6_MODEL(SKYLAKE_L, PCI_DEVFN(31, 1)),
{}
};
static int p2sb_get_devfn(unsigned int *devfn)
{
+ unsigned int fn = P2SB_DEVFN_DEFAULT;
const struct x86_cpu_id *id;
id = x86_match_cpu(p2sb_cpu_ids);
- if (!id)
- return -ENODEV;
+ if (id)
+ fn = (unsigned int)id->driver_data;
- *devfn = (unsigned int)id->driver_data;
+ *devfn = fn;
return 0;
}
diff --git a/drivers/platform/x86/touchscreen_dmi.c b/drivers/platform/x86/touchscreen_dmi.c
index bc97bfa8e8a6..baae3120efd0 100644
--- a/drivers/platform/x86/touchscreen_dmi.c
+++ b/drivers/platform/x86/touchscreen_dmi.c
@@ -770,6 +770,22 @@ static const struct ts_dmi_data predia_basic_data = {
.properties = predia_basic_props,
};
+static const struct property_entry rca_cambio_w101_v2_props[] = {
+ PROPERTY_ENTRY_U32("touchscreen-min-x", 4),
+ PROPERTY_ENTRY_U32("touchscreen-min-y", 20),
+ PROPERTY_ENTRY_U32("touchscreen-size-x", 1644),
+ PROPERTY_ENTRY_U32("touchscreen-size-y", 874),
+ PROPERTY_ENTRY_BOOL("touchscreen-swapped-x-y"),
+ PROPERTY_ENTRY_STRING("firmware-name", "gsl1680-rca-cambio-w101-v2.fw"),
+ PROPERTY_ENTRY_U32("silead,max-fingers", 10),
+ { }
+};
+
+static const struct ts_dmi_data rca_cambio_w101_v2_data = {
+ .acpi_name = "MSSL1680:00",
+ .properties = rca_cambio_w101_v2_props,
+};
+
static const struct property_entry rwc_nanote_p8_props[] = {
PROPERTY_ENTRY_U32("touchscreen-min-y", 46),
PROPERTY_ENTRY_U32("touchscreen-size-x", 1728),
@@ -1410,6 +1426,15 @@ const struct dmi_system_id touchscreen_dmi_table[] = {
},
},
{
+ /* RCA Cambio W101 v2 */
+ /* https://github.com/onitake/gsl-firmware/discussions/193 */
+ .driver_data = (void *)&rca_cambio_w101_v2_data,
+ .matches = {
+ DMI_MATCH(DMI_SYS_VENDOR, "RCA"),
+ DMI_MATCH(DMI_PRODUCT_NAME, "W101SA23T1"),
+ },
+ },
+ {
/* RWC NANOTE P8 */
.driver_data = (void *)&rwc_nanote_p8_data,
.matches = {
diff --git a/drivers/s390/crypto/zcrypt_msgtype6.c b/drivers/s390/crypto/zcrypt_msgtype6.c
index 8fb34b8eeb18..5ad251477593 100644
--- a/drivers/s390/crypto/zcrypt_msgtype6.c
+++ b/drivers/s390/crypto/zcrypt_msgtype6.c
@@ -342,7 +342,10 @@ static int xcrb_msg_to_type6cprb_msgx(bool userspace, struct ap_message *ap_msg,
};
struct {
struct type6_hdr hdr;
- struct CPRBX cprbx;
+ union {
+ struct CPRBX cprbx;
+ DECLARE_FLEX_ARRAY(u8, userdata);
+ };
} __packed * msg = ap_msg->msg;
int rcblen = CEIL4(xcrb->request_control_blk_length);
@@ -403,7 +406,8 @@ static int xcrb_msg_to_type6cprb_msgx(bool userspace, struct ap_message *ap_msg,
msg->hdr.fromcardlen2 = xcrb->reply_data_length;
/* prepare CPRB */
- if (z_copy_from_user(userspace, &msg->cprbx, xcrb->request_control_blk_addr,
+ if (z_copy_from_user(userspace, msg->userdata,
+ xcrb->request_control_blk_addr,
xcrb->request_control_blk_length))
return -EFAULT;
if (msg->cprbx.cprb_len + sizeof(msg->hdr.function_code) >
@@ -469,9 +473,14 @@ static int xcrb_msg_to_type6_ep11cprb_msgx(bool userspace, struct ap_message *ap
struct {
struct type6_hdr hdr;
- struct ep11_cprb cprbx;
- unsigned char pld_tag; /* fixed value 0x30 */
- unsigned char pld_lenfmt; /* payload length format */
+ union {
+ struct {
+ struct ep11_cprb cprbx;
+ unsigned char pld_tag; /* fixed value 0x30 */
+ unsigned char pld_lenfmt; /* length format */
+ } __packed;
+ DECLARE_FLEX_ARRAY(u8, userdata);
+ };
} __packed * msg = ap_msg->msg;
struct pld_hdr {
@@ -500,7 +509,7 @@ static int xcrb_msg_to_type6_ep11cprb_msgx(bool userspace, struct ap_message *ap
msg->hdr.fromcardlen1 = xcrb->resp_len;
/* Import CPRB data from the ioctl input parameter */
- if (z_copy_from_user(userspace, &msg->cprbx.cprb_len,
+ if (z_copy_from_user(userspace, msg->userdata,
(char __force __user *)xcrb->req, xcrb->req_len)) {
return -EFAULT;
}
diff --git a/drivers/scsi/ibmvscsi/ibmvfc.c b/drivers/scsi/ibmvscsi/ibmvfc.c
index 00684e11976b..1a0c0b7289d2 100644
--- a/drivers/scsi/ibmvscsi/ibmvfc.c
+++ b/drivers/scsi/ibmvscsi/ibmvfc.c
@@ -708,8 +708,13 @@ static void ibmvfc_init_host(struct ibmvfc_host *vhost)
memset(vhost->async_crq.msgs.async, 0, PAGE_SIZE);
vhost->async_crq.cur = 0;
- list_for_each_entry(tgt, &vhost->targets, queue)
- ibmvfc_del_tgt(tgt);
+ list_for_each_entry(tgt, &vhost->targets, queue) {
+ if (vhost->client_migrated)
+ tgt->need_login = 1;
+ else
+ ibmvfc_del_tgt(tgt);
+ }
+
scsi_block_requests(vhost->host);
ibmvfc_set_host_action(vhost, IBMVFC_HOST_ACTION_INIT);
vhost->job_step = ibmvfc_npiv_login;
@@ -3235,9 +3240,12 @@ static void ibmvfc_handle_crq(struct ibmvfc_crq *crq, struct ibmvfc_host *vhost,
/* We need to re-setup the interpartition connection */
dev_info(vhost->dev, "Partition migrated, Re-enabling adapter\n");
vhost->client_migrated = 1;
+
+ scsi_block_requests(vhost->host);
ibmvfc_purge_requests(vhost, DID_REQUEUE);
- ibmvfc_link_down(vhost, IBMVFC_LINK_DOWN);
+ ibmvfc_set_host_state(vhost, IBMVFC_LINK_DOWN);
ibmvfc_set_host_action(vhost, IBMVFC_HOST_ACTION_REENABLE);
+ wake_up(&vhost->work_wait_q);
} else if (crq->format == IBMVFC_PARTNER_FAILED || crq->format == IBMVFC_PARTNER_DEREGISTER) {
dev_err(vhost->dev, "Host partner adapter deregistered or failed (rc=%d)\n", crq->format);
ibmvfc_purge_requests(vhost, DID_ERROR);
diff --git a/drivers/scsi/scsi_debug.c b/drivers/scsi/scsi_debug.c
index 697fc57bc711..629853662b82 100644
--- a/drivers/scsi/scsi_debug.c
+++ b/drivers/scsi/scsi_debug.c
@@ -1899,6 +1899,13 @@ static int resp_readcap16(struct scsi_cmnd *scp,
arr[14] |= 0x40;
}
+ /*
+ * Since the scsi_debug READ CAPACITY implementation always reports the
+ * total disk capacity, set RC BASIS = 1 for host-managed ZBC devices.
+ */
+ if (devip->zmodel == BLK_ZONED_HM)
+ arr[12] |= 1 << 4;
+
arr[15] = sdebug_lowest_aligned & 0xff;
if (have_dif_prot) {
diff --git a/drivers/scsi/scsi_transport_sas.c b/drivers/scsi/scsi_transport_sas.c
index 2f88c61216ee..74b99f2b0b74 100644
--- a/drivers/scsi/scsi_transport_sas.c
+++ b/drivers/scsi/scsi_transport_sas.c
@@ -722,12 +722,17 @@ int sas_phy_add(struct sas_phy *phy)
int error;
error = device_add(&phy->dev);
- if (!error) {
- transport_add_device(&phy->dev);
- transport_configure_device(&phy->dev);
+ if (error)
+ return error;
+
+ error = transport_add_device(&phy->dev);
+ if (error) {
+ device_del(&phy->dev);
+ return error;
}
+ transport_configure_device(&phy->dev);
- return error;
+ return 0;
}
EXPORT_SYMBOL(sas_phy_add);
diff --git a/drivers/soundwire/intel.c b/drivers/soundwire/intel.c
index 244209358784..8c76541d553f 100644
--- a/drivers/soundwire/intel.c
+++ b/drivers/soundwire/intel.c
@@ -1513,6 +1513,7 @@ static int intel_link_probe(struct auxiliary_device *auxdev,
bus->link_id = auxdev->id;
bus->dev_num_ida_min = INTEL_DEV_NUM_IDA_MIN;
+ bus->clk_stop_timeout = 1;
sdw_cdns_probe(cdns);
diff --git a/drivers/soundwire/qcom.c b/drivers/soundwire/qcom.c
index b33d5db494a5..cee2b2223141 100644
--- a/drivers/soundwire/qcom.c
+++ b/drivers/soundwire/qcom.c
@@ -344,6 +344,9 @@ static int qcom_swrm_cmd_fifo_wr_cmd(struct qcom_swrm_ctrl *swrm, u8 cmd_data,
if (swrm_wait_for_wr_fifo_avail(swrm))
return SDW_CMD_FAIL_OTHER;
+ if (cmd_id == SWR_BROADCAST_CMD_ID)
+ reinit_completion(&swrm->broadcast);
+
/* Its assumed that write is okay as we do not get any status back */
swrm->reg_write(swrm, SWRM_CMD_FIFO_WR_CMD, val);
@@ -377,6 +380,12 @@ static int qcom_swrm_cmd_fifo_rd_cmd(struct qcom_swrm_ctrl *swrm,
val = swrm_get_packed_reg_val(&swrm->rcmd_id, len, dev_addr, reg_addr);
+ /*
+ * Check for outstanding cmd wrt. write fifo depth to avoid
+ * overflow as read will also increase write fifo cnt.
+ */
+ swrm_wait_for_wr_fifo_avail(swrm);
+
/* wait for FIFO RD to complete to avoid overflow */
usleep_range(100, 105);
swrm->reg_write(swrm, SWRM_CMD_FIFO_RD_CMD, val);
diff --git a/drivers/spi/spi-amd.c b/drivers/spi/spi-amd.c
index e23121456c70..bfc3ab5f39ea 100644
--- a/drivers/spi/spi-amd.c
+++ b/drivers/spi/spi-amd.c
@@ -65,7 +65,7 @@ enum amd_spi_speed {
F_16_66MHz,
F_100MHz,
F_800KHz,
- SPI_SPD7,
+ SPI_SPD7 = 0x7,
F_50MHz = 0x4,
F_4MHz = 0x32,
F_3_17MHz = 0x3F
diff --git a/drivers/spi/spi-intel.c b/drivers/spi/spi-intel.c
index 605acb1bf4b0..3ac73691fbb5 100644
--- a/drivers/spi/spi-intel.c
+++ b/drivers/spi/spi-intel.c
@@ -52,17 +52,17 @@
#define FRACC 0x50
#define FREG(n) (0x54 + ((n) * 4))
-#define FREG_BASE_MASK 0x3fff
+#define FREG_BASE_MASK GENMASK(14, 0)
#define FREG_LIMIT_SHIFT 16
-#define FREG_LIMIT_MASK (0x03fff << FREG_LIMIT_SHIFT)
+#define FREG_LIMIT_MASK GENMASK(30, 16)
/* Offset is from @ispi->pregs */
#define PR(n) ((n) * 4)
#define PR_WPE BIT(31)
#define PR_LIMIT_SHIFT 16
-#define PR_LIMIT_MASK (0x3fff << PR_LIMIT_SHIFT)
+#define PR_LIMIT_MASK GENMASK(30, 16)
#define PR_RPE BIT(15)
-#define PR_BASE_MASK 0x3fff
+#define PR_BASE_MASK GENMASK(14, 0)
/* Offsets are from @ispi->sregs */
#define SSFSTS_CTL 0x00
diff --git a/drivers/spi/spi-meson-spicc.c b/drivers/spi/spi-meson-spicc.c
index bad201510a99..1b4195c54ee2 100644
--- a/drivers/spi/spi-meson-spicc.c
+++ b/drivers/spi/spi-meson-spicc.c
@@ -160,6 +160,7 @@ struct meson_spicc_device {
struct clk *clk;
struct spi_message *message;
struct spi_transfer *xfer;
+ struct completion done;
const struct meson_spicc_data *data;
u8 *tx_buf;
u8 *rx_buf;
@@ -282,7 +283,7 @@ static irqreturn_t meson_spicc_irq(int irq, void *data)
/* Disable all IRQs */
writel(0, spicc->base + SPICC_INTREG);
- spi_finalize_current_transfer(spicc->master);
+ complete(&spicc->done);
return IRQ_HANDLED;
}
@@ -386,6 +387,7 @@ static int meson_spicc_transfer_one(struct spi_master *master,
struct spi_transfer *xfer)
{
struct meson_spicc_device *spicc = spi_master_get_devdata(master);
+ uint64_t timeout;
/* Store current transfer */
spicc->xfer = xfer;
@@ -410,13 +412,29 @@ static int meson_spicc_transfer_one(struct spi_master *master,
/* Setup burst */
meson_spicc_setup_burst(spicc);
+ /* Setup wait for completion */
+ reinit_completion(&spicc->done);
+
+ /* For each byte we wait for 8 cycles of the SPI clock */
+ timeout = 8LL * MSEC_PER_SEC * xfer->len;
+ do_div(timeout, xfer->speed_hz);
+
+ /* Add 10us delay between each fifo bursts */
+ timeout += ((xfer->len >> 4) * 10) / MSEC_PER_SEC;
+
+ /* Increase it twice and add 200 ms tolerance */
+ timeout += timeout + 200;
+
/* Start burst */
writel_bits_relaxed(SPICC_XCH, SPICC_XCH, spicc->base + SPICC_CONREG);
/* Enable interrupts */
writel_relaxed(SPICC_TC_EN, spicc->base + SPICC_INTREG);
- return 1;
+ if (!wait_for_completion_timeout(&spicc->done, msecs_to_jiffies(timeout)))
+ return -ETIMEDOUT;
+
+ return 0;
}
static int meson_spicc_prepare_message(struct spi_master *master,
@@ -743,6 +761,8 @@ static int meson_spicc_probe(struct platform_device *pdev)
spicc->pdev = pdev;
platform_set_drvdata(pdev, spicc);
+ init_completion(&spicc->done);
+
spicc->base = devm_platform_ioremap_resource(pdev, 0);
if (IS_ERR(spicc->base)) {
dev_err(&pdev->dev, "io resource mapping failed\n");
diff --git a/drivers/spi/spi-mt65xx.c b/drivers/spi/spi-mt65xx.c
index 11aeae7fe7fc..a33c9a3de395 100644
--- a/drivers/spi/spi-mt65xx.c
+++ b/drivers/spi/spi-mt65xx.c
@@ -551,14 +551,17 @@ static void mtk_spi_enable_transfer(struct spi_master *master)
writel(cmd, mdata->base + SPI_CMD_REG);
}
-static int mtk_spi_get_mult_delta(u32 xfer_len)
+static int mtk_spi_get_mult_delta(struct mtk_spi *mdata, u32 xfer_len)
{
- u32 mult_delta;
+ u32 mult_delta = 0;
- if (xfer_len > MTK_SPI_PACKET_SIZE)
- mult_delta = xfer_len % MTK_SPI_PACKET_SIZE;
- else
- mult_delta = 0;
+ if (mdata->dev_comp->ipm_design) {
+ if (xfer_len > MTK_SPI_IPM_PACKET_SIZE)
+ mult_delta = xfer_len % MTK_SPI_IPM_PACKET_SIZE;
+ } else {
+ if (xfer_len > MTK_SPI_PACKET_SIZE)
+ mult_delta = xfer_len % MTK_SPI_PACKET_SIZE;
+ }
return mult_delta;
}
@@ -570,22 +573,22 @@ static void mtk_spi_update_mdata_len(struct spi_master *master)
if (mdata->tx_sgl_len && mdata->rx_sgl_len) {
if (mdata->tx_sgl_len > mdata->rx_sgl_len) {
- mult_delta = mtk_spi_get_mult_delta(mdata->rx_sgl_len);
+ mult_delta = mtk_spi_get_mult_delta(mdata, mdata->rx_sgl_len);
mdata->xfer_len = mdata->rx_sgl_len - mult_delta;
mdata->rx_sgl_len = mult_delta;
mdata->tx_sgl_len -= mdata->xfer_len;
} else {
- mult_delta = mtk_spi_get_mult_delta(mdata->tx_sgl_len);
+ mult_delta = mtk_spi_get_mult_delta(mdata, mdata->tx_sgl_len);
mdata->xfer_len = mdata->tx_sgl_len - mult_delta;
mdata->tx_sgl_len = mult_delta;
mdata->rx_sgl_len -= mdata->xfer_len;
}
} else if (mdata->tx_sgl_len) {
- mult_delta = mtk_spi_get_mult_delta(mdata->tx_sgl_len);
+ mult_delta = mtk_spi_get_mult_delta(mdata, mdata->tx_sgl_len);
mdata->xfer_len = mdata->tx_sgl_len - mult_delta;
mdata->tx_sgl_len = mult_delta;
} else if (mdata->rx_sgl_len) {
- mult_delta = mtk_spi_get_mult_delta(mdata->rx_sgl_len);
+ mult_delta = mtk_spi_get_mult_delta(mdata, mdata->rx_sgl_len);
mdata->xfer_len = mdata->rx_sgl_len - mult_delta;
mdata->rx_sgl_len = mult_delta;
}
diff --git a/drivers/spi/spi-stm32.c b/drivers/spi/spi-stm32.c
index 6fe617b445a5..def09cf0dc14 100644
--- a/drivers/spi/spi-stm32.c
+++ b/drivers/spi/spi-stm32.c
@@ -434,7 +434,7 @@ static int stm32_spi_prepare_mbr(struct stm32_spi *spi, u32 speed_hz,
u32 div, mbrdiv;
/* Ensure spi->clk_rate is even */
- div = DIV_ROUND_UP(spi->clk_rate & ~0x1, speed_hz);
+ div = DIV_ROUND_CLOSEST(spi->clk_rate & ~0x1, speed_hz);
/*
* SPI framework set xfer->speed_hz to master->max_speed_hz if
@@ -886,6 +886,7 @@ static irqreturn_t stm32h7_spi_irq_thread(int irq, void *dev_id)
static DEFINE_RATELIMIT_STATE(rs,
DEFAULT_RATELIMIT_INTERVAL * 10,
1);
+ ratelimit_set_flags(&rs, RATELIMIT_MSG_ON_RELEASE);
if (__ratelimit(&rs))
dev_dbg_ratelimited(spi->dev, "Communication suspended\n");
if (!spi->cur_usedma && (spi->rx_buf && (spi->rx_len > 0)))
diff --git a/drivers/spi/spi-tegra210-quad.c b/drivers/spi/spi-tegra210-quad.c
index 904972606bd4..10f0c5a6e0dc 100644
--- a/drivers/spi/spi-tegra210-quad.c
+++ b/drivers/spi/spi-tegra210-quad.c
@@ -720,6 +720,9 @@ static int tegra_qspi_start_cpu_based_transfer(struct tegra_qspi *qspi, struct s
static void tegra_qspi_deinit_dma(struct tegra_qspi *tqspi)
{
+ if (!tqspi->soc_data->has_dma)
+ return;
+
if (tqspi->tx_dma_buf) {
dma_free_coherent(tqspi->dev, tqspi->dma_buf_size,
tqspi->tx_dma_buf, tqspi->tx_dma_phys);
@@ -750,6 +753,9 @@ static int tegra_qspi_init_dma(struct tegra_qspi *tqspi)
u32 *dma_buf;
int err;
+ if (!tqspi->soc_data->has_dma)
+ return 0;
+
dma_chan = dma_request_chan(tqspi->dev, "rx");
if (IS_ERR(dma_chan)) {
err = PTR_ERR(dma_chan);