aboutsummaryrefslogtreecommitdiffstats
path: root/drivers
diff options
context:
space:
mode:
Diffstat (limited to 'drivers')
-rw-r--r--drivers/cxl/mem.c152
-rw-r--r--drivers/dax/bus.c6
-rw-r--r--drivers/dma/dmaengine.c1
-rw-r--r--drivers/dma/dw/Kconfig2
-rw-r--r--drivers/dma/idxd/device.c65
-rw-r--r--drivers/dma/idxd/idxd.h3
-rw-r--r--drivers/dma/idxd/init.c11
-rw-r--r--drivers/dma/idxd/irq.c4
-rw-r--r--drivers/dma/idxd/sysfs.c19
-rw-r--r--drivers/dma/plx_dma.c18
-rw-r--r--drivers/dma/tegra20-apb-dma.c4
-rw-r--r--drivers/dma/xilinx/xilinx_dpdma.c31
-rw-r--r--drivers/gpio/gpiolib-sysfs.c8
-rw-r--r--drivers/hid/amd-sfh-hid/amd_sfh_pcie.c40
-rw-r--r--drivers/hid/amd-sfh-hid/amd_sfh_pcie.h1
-rw-r--r--drivers/hid/hid-alps.c1
-rw-r--r--drivers/hid/hid-asus.c3
-rw-r--r--drivers/hid/hid-cp2112.c22
-rw-r--r--drivers/hid/hid-google-hammer.c2
-rw-r--r--drivers/hid/hid-ids.h2
-rw-r--r--drivers/hid/wacom_wac.c8
-rw-r--r--drivers/i2c/busses/i2c-mv64xxx.c4
-rw-r--r--drivers/input/joystick/n64joy.c4
-rw-r--r--drivers/input/keyboard/nspire-keypad.c56
-rw-r--r--drivers/input/serio/i8042-x86ia64io.h1
-rw-r--r--drivers/input/touchscreen/elants_i2c.c5
-rw-r--r--drivers/input/touchscreen/s6sy761.c4
-rw-r--r--drivers/md/dm-verity-fec.c11
-rw-r--r--drivers/md/dm-verity-fec.h1
-rw-r--r--drivers/mtd/nand/raw/mtk_nand.c4
-rw-r--r--drivers/net/dsa/mv88e6xxx/chip.c30
-rw-r--r--drivers/net/ethernet/cadence/macb_main.c2
-rw-r--r--drivers/net/ethernet/cavium/liquidio/cn66xx_regs.h2
-rw-r--r--drivers/net/ethernet/chelsio/inline_crypto/ch_ktls/chcr_ktls.c102
-rw-r--r--drivers/net/ethernet/davicom/dm9000.c6
-rw-r--r--drivers/net/ethernet/ibm/ibmvnic.c25
-rw-r--r--drivers/net/ethernet/intel/i40e/i40e_main.c6
-rw-r--r--drivers/net/ethernet/intel/ice/ice_dcb.c4
-rw-r--r--drivers/net/ethernet/intel/ixgbe/ixgbe_main.c14
-rw-r--r--drivers/net/ethernet/mellanox/mlx5/core/devlink.c5
-rw-r--r--drivers/net/ethernet/mellanox/mlx5/core/en/port.c23
-rw-r--r--drivers/net/ethernet/mellanox/mlx5/core/en_tc.c3
-rw-r--r--drivers/net/ethernet/realtek/r8169_main.c9
-rw-r--r--drivers/net/ethernet/stmicro/stmmac/stmmac_main.c84
-rw-r--r--drivers/net/geneve.c6
-rw-r--r--drivers/net/phy/marvell.c32
-rw-r--r--drivers/net/vrf.c10
-rw-r--r--drivers/net/xen-netback/xenbus.c12
-rw-r--r--drivers/nvdimm/bus.c14
-rw-r--r--drivers/nvdimm/pmem.c37
-rw-r--r--drivers/nvdimm/region_devs.c16
-rw-r--r--drivers/vfio/pci/vfio_pci.c4
52 files changed, 501 insertions, 438 deletions
diff --git a/drivers/cxl/mem.c b/drivers/cxl/mem.c
index 244cb7d89678..2acc6173da36 100644
--- a/drivers/cxl/mem.c
+++ b/drivers/cxl/mem.c
@@ -4,6 +4,7 @@
#include <linux/security.h>
#include <linux/debugfs.h>
#include <linux/module.h>
+#include <linux/sizes.h>
#include <linux/mutex.h>
#include <linux/cdev.h>
#include <linux/idr.h>
@@ -96,21 +97,18 @@ struct mbox_cmd {
* @dev: driver core device object
* @cdev: char dev core object for ioctl operations
* @cxlm: pointer to the parent device driver data
- * @ops_active: active user of @cxlm in ops handlers
- * @ops_dead: completion when all @cxlm ops users have exited
* @id: id number of this memdev instance.
*/
struct cxl_memdev {
struct device dev;
struct cdev cdev;
struct cxl_mem *cxlm;
- struct percpu_ref ops_active;
- struct completion ops_dead;
int id;
};
static int cxl_mem_major;
static DEFINE_IDA(cxl_memdev_ida);
+static DECLARE_RWSEM(cxl_memdev_rwsem);
static struct dentry *cxl_debugfs;
static bool cxl_raw_allow_all;
@@ -169,7 +167,7 @@ struct cxl_mem_command {
* table will be validated against the user's input. For example, if size_in is
* 0, and the user passed in 1, it is an error.
*/
-static struct cxl_mem_command mem_commands[] = {
+static struct cxl_mem_command mem_commands[CXL_MEM_COMMAND_ID_MAX] = {
CXL_CMD(IDENTIFY, 0, 0x43, CXL_CMD_FLAG_FORCE_ENABLE),
#ifdef CONFIG_CXL_MEM_RAW_COMMANDS
CXL_CMD(RAW, ~0, ~0, 0),
@@ -776,26 +774,43 @@ static long __cxl_memdev_ioctl(struct cxl_memdev *cxlmd, unsigned int cmd,
static long cxl_memdev_ioctl(struct file *file, unsigned int cmd,
unsigned long arg)
{
- struct cxl_memdev *cxlmd;
- struct inode *inode;
- int rc = -ENOTTY;
+ struct cxl_memdev *cxlmd = file->private_data;
+ int rc = -ENXIO;
- inode = file_inode(file);
- cxlmd = container_of(inode->i_cdev, typeof(*cxlmd), cdev);
+ down_read(&cxl_memdev_rwsem);
+ if (cxlmd->cxlm)
+ rc = __cxl_memdev_ioctl(cxlmd, cmd, arg);
+ up_read(&cxl_memdev_rwsem);
- if (!percpu_ref_tryget_live(&cxlmd->ops_active))
- return -ENXIO;
+ return rc;
+}
- rc = __cxl_memdev_ioctl(cxlmd, cmd, arg);
+static int cxl_memdev_open(struct inode *inode, struct file *file)
+{
+ struct cxl_memdev *cxlmd =
+ container_of(inode->i_cdev, typeof(*cxlmd), cdev);
- percpu_ref_put(&cxlmd->ops_active);
+ get_device(&cxlmd->dev);
+ file->private_data = cxlmd;
- return rc;
+ return 0;
+}
+
+static int cxl_memdev_release_file(struct inode *inode, struct file *file)
+{
+ struct cxl_memdev *cxlmd =
+ container_of(inode->i_cdev, typeof(*cxlmd), cdev);
+
+ put_device(&cxlmd->dev);
+
+ return 0;
}
static const struct file_operations cxl_memdev_fops = {
.owner = THIS_MODULE,
.unlocked_ioctl = cxl_memdev_ioctl,
+ .open = cxl_memdev_open,
+ .release = cxl_memdev_release_file,
.compat_ioctl = compat_ptr_ioctl,
.llseek = noop_llseek,
};
@@ -984,7 +999,7 @@ static struct cxl_mem *cxl_mem_create(struct pci_dev *pdev, u32 reg_lo,
return NULL;
}
- offset = ((u64)reg_hi << 32) | FIELD_GET(CXL_REGLOC_ADDR_MASK, reg_lo);
+ offset = ((u64)reg_hi << 32) | (reg_lo & CXL_REGLOC_ADDR_MASK);
bar = FIELD_GET(CXL_REGLOC_BIR_MASK, reg_lo);
/* Basic sanity check that BAR is big enough */
@@ -1049,7 +1064,6 @@ static void cxl_memdev_release(struct device *dev)
{
struct cxl_memdev *cxlmd = to_cxl_memdev(dev);
- percpu_ref_exit(&cxlmd->ops_active);
ida_free(&cxl_memdev_ida, cxlmd->id);
kfree(cxlmd);
}
@@ -1066,7 +1080,7 @@ static ssize_t firmware_version_show(struct device *dev,
struct cxl_memdev *cxlmd = to_cxl_memdev(dev);
struct cxl_mem *cxlm = cxlmd->cxlm;
- return sprintf(buf, "%.16s\n", cxlm->firmware_version);
+ return sysfs_emit(buf, "%.16s\n", cxlm->firmware_version);
}
static DEVICE_ATTR_RO(firmware_version);
@@ -1076,7 +1090,7 @@ static ssize_t payload_max_show(struct device *dev,
struct cxl_memdev *cxlmd = to_cxl_memdev(dev);
struct cxl_mem *cxlm = cxlmd->cxlm;
- return sprintf(buf, "%zu\n", cxlm->payload_size);
+ return sysfs_emit(buf, "%zu\n", cxlm->payload_size);
}
static DEVICE_ATTR_RO(payload_max);
@@ -1087,7 +1101,7 @@ static ssize_t ram_size_show(struct device *dev, struct device_attribute *attr,
struct cxl_mem *cxlm = cxlmd->cxlm;
unsigned long long len = range_len(&cxlm->ram_range);
- return sprintf(buf, "%#llx\n", len);
+ return sysfs_emit(buf, "%#llx\n", len);
}
static struct device_attribute dev_attr_ram_size =
@@ -1100,7 +1114,7 @@ static ssize_t pmem_size_show(struct device *dev, struct device_attribute *attr,
struct cxl_mem *cxlm = cxlmd->cxlm;
unsigned long long len = range_len(&cxlm->pmem_range);
- return sprintf(buf, "%#llx\n", len);
+ return sysfs_emit(buf, "%#llx\n", len);
}
static struct device_attribute dev_attr_pmem_size =
@@ -1150,27 +1164,24 @@ static const struct device_type cxl_memdev_type = {
.groups = cxl_memdev_attribute_groups,
};
-static void cxlmdev_unregister(void *_cxlmd)
+static void cxl_memdev_shutdown(struct cxl_memdev *cxlmd)
{
- struct cxl_memdev *cxlmd = _cxlmd;
- struct device *dev = &cxlmd->dev;
-
- percpu_ref_kill(&cxlmd->ops_active);
- cdev_device_del(&cxlmd->cdev, dev);
- wait_for_completion(&cxlmd->ops_dead);
+ down_write(&cxl_memdev_rwsem);
cxlmd->cxlm = NULL;
- put_device(dev);
+ up_write(&cxl_memdev_rwsem);
}
-static void cxlmdev_ops_active_release(struct percpu_ref *ref)
+static void cxl_memdev_unregister(void *_cxlmd)
{
- struct cxl_memdev *cxlmd =
- container_of(ref, typeof(*cxlmd), ops_active);
+ struct cxl_memdev *cxlmd = _cxlmd;
+ struct device *dev = &cxlmd->dev;
- complete(&cxlmd->ops_dead);
+ cdev_device_del(&cxlmd->cdev, dev);
+ cxl_memdev_shutdown(cxlmd);
+ put_device(dev);
}
-static int cxl_mem_add_memdev(struct cxl_mem *cxlm)
+static struct cxl_memdev *cxl_memdev_alloc(struct cxl_mem *cxlm)
{
struct pci_dev *pdev = cxlm->pdev;
struct cxl_memdev *cxlmd;
@@ -1180,22 +1191,11 @@ static int cxl_mem_add_memdev(struct cxl_mem *cxlm)
cxlmd = kzalloc(sizeof(*cxlmd), GFP_KERNEL);
if (!cxlmd)
- return -ENOMEM;
- init_completion(&cxlmd->ops_dead);
-
- /*
- * @cxlm is deallocated when the driver unbinds so operations
- * that are using it need to hold a live reference.
- */
- cxlmd->cxlm = cxlm;
- rc = percpu_ref_init(&cxlmd->ops_active, cxlmdev_ops_active_release, 0,
- GFP_KERNEL);
- if (rc)
- goto err_ref;
+ return ERR_PTR(-ENOMEM);
rc = ida_alloc_range(&cxl_memdev_ida, 0, CXL_MEM_MAX_DEVS, GFP_KERNEL);
if (rc < 0)
- goto err_id;
+ goto err;
cxlmd->id = rc;
dev = &cxlmd->dev;
@@ -1204,30 +1204,54 @@ static int cxl_mem_add_memdev(struct cxl_mem *cxlm)
dev->bus = &cxl_bus_type;
dev->devt = MKDEV(cxl_mem_major, cxlmd->id);
dev->type = &cxl_memdev_type;
- dev_set_name(dev, "mem%d", cxlmd->id);
+ device_set_pm_not_required(dev);
cdev = &cxlmd->cdev;
cdev_init(cdev, &cxl_memdev_fops);
+ return cxlmd;
+
+err:
+ kfree(cxlmd);
+ return ERR_PTR(rc);
+}
+
+static int cxl_mem_add_memdev(struct cxl_mem *cxlm)
+{
+ struct cxl_memdev *cxlmd;
+ struct device *dev;
+ struct cdev *cdev;
+ int rc;
+
+ cxlmd = cxl_memdev_alloc(cxlm);
+ if (IS_ERR(cxlmd))
+ return PTR_ERR(cxlmd);
+
+ dev = &cxlmd->dev;
+ rc = dev_set_name(dev, "mem%d", cxlmd->id);
+ if (rc)
+ goto err;
+
+ /*
+ * Activate ioctl operations, no cxl_memdev_rwsem manipulation
+ * needed as this is ordered with cdev_add() publishing the device.
+ */
+ cxlmd->cxlm = cxlm;
+ cdev = &cxlmd->cdev;
rc = cdev_device_add(cdev, dev);
if (rc)
- goto err_add;
+ goto err;
- return devm_add_action_or_reset(dev->parent, cxlmdev_unregister, cxlmd);
+ return devm_add_action_or_reset(dev->parent, cxl_memdev_unregister,
+ cxlmd);
-err_add:
- ida_free(&cxl_memdev_ida, cxlmd->id);
-err_id:
+err:
/*
- * Theoretically userspace could have already entered the fops,
- * so flush ops_active.
+ * The cdev was briefly live, shutdown any ioctl operations that
+ * saw that state.
*/
- percpu_ref_kill(&cxlmd->ops_active);
- wait_for_completion(&cxlmd->ops_dead);
- percpu_ref_exit(&cxlmd->ops_active);
-err_ref:
- kfree(cxlmd);
-
+ cxl_memdev_shutdown(cxlmd);
+ put_device(dev);
return rc;
}
@@ -1396,6 +1420,7 @@ out:
*/
static int cxl_mem_identify(struct cxl_mem *cxlm)
{
+ /* See CXL 2.0 Table 175 Identify Memory Device Output Payload */
struct cxl_mbox_identify {
char fw_revision[0x10];
__le64 total_capacity;
@@ -1424,10 +1449,11 @@ static int cxl_mem_identify(struct cxl_mem *cxlm)
* For now, only the capacity is exported in sysfs
*/
cxlm->ram_range.start = 0;
- cxlm->ram_range.end = le64_to_cpu(id.volatile_capacity) - 1;
+ cxlm->ram_range.end = le64_to_cpu(id.volatile_capacity) * SZ_256M - 1;
cxlm->pmem_range.start = 0;
- cxlm->pmem_range.end = le64_to_cpu(id.persistent_capacity) - 1;
+ cxlm->pmem_range.end =
+ le64_to_cpu(id.persistent_capacity) * SZ_256M - 1;
memcpy(cxlm->firmware_version, id.fw_revision, sizeof(id.fw_revision));
diff --git a/drivers/dax/bus.c b/drivers/dax/bus.c
index 452e85ae87a8..5aee26e1bbd6 100644
--- a/drivers/dax/bus.c
+++ b/drivers/dax/bus.c
@@ -90,13 +90,11 @@ static ssize_t do_id_store(struct device_driver *drv, const char *buf,
list_add(&dax_id->list, &dax_drv->ids);
} else
rc = -ENOMEM;
- } else
- /* nothing to remove */;
+ }
} else if (action == ID_REMOVE) {
list_del(&dax_id->list);
kfree(dax_id);
- } else
- /* dax_id already added */;
+ }
mutex_unlock(&dax_bus_lock);
if (rc < 0)
diff --git a/drivers/dma/dmaengine.c b/drivers/dma/dmaengine.c
index fe6a460c4373..af3ee288bc11 100644
--- a/drivers/dma/dmaengine.c
+++ b/drivers/dma/dmaengine.c
@@ -1086,6 +1086,7 @@ static int __dma_async_device_channel_register(struct dma_device *device,
kfree(chan->dev);
err_free_local:
free_percpu(chan->local);
+ chan->local = NULL;
return rc;
}
diff --git a/drivers/dma/dw/Kconfig b/drivers/dma/dw/Kconfig
index e5162690de8f..db25f9b7778c 100644
--- a/drivers/dma/dw/Kconfig
+++ b/drivers/dma/dw/Kconfig
@@ -10,6 +10,7 @@ config DW_DMAC_CORE
config DW_DMAC
tristate "Synopsys DesignWare AHB DMA platform driver"
+ depends on HAS_IOMEM
select DW_DMAC_CORE
help
Support the Synopsys DesignWare AHB DMA controller. This
@@ -18,6 +19,7 @@ config DW_DMAC
config DW_DMAC_PCI
tristate "Synopsys DesignWare AHB DMA PCI driver"
depends on PCI
+ depends on HAS_IOMEM
select DW_DMAC_CORE
help
Support the Synopsys DesignWare AHB DMA controller on the
diff --git a/drivers/dma/idxd/device.c b/drivers/dma/idxd/device.c
index 84a6ea60ecf0..31c819544a22 100644
--- a/drivers/dma/idxd/device.c
+++ b/drivers/dma/idxd/device.c
@@ -282,6 +282,22 @@ void idxd_wq_drain(struct idxd_wq *wq)
idxd_cmd_exec(idxd, IDXD_CMD_DRAIN_WQ, operand, NULL);
}
+void idxd_wq_reset(struct idxd_wq *wq)
+{
+ struct idxd_device *idxd = wq->idxd;
+ struct device *dev = &idxd->pdev->dev;
+ u32 operand;
+
+ if (wq->state != IDXD_WQ_ENABLED) {
+ dev_dbg(dev, "WQ %d in wrong state: %d\n", wq->id, wq->state);
+ return;
+ }
+
+ operand = BIT(wq->id % 16) | ((wq->id / 16) << 16);
+ idxd_cmd_exec(idxd, IDXD_CMD_RESET_WQ, operand, NULL);
+ wq->state = IDXD_WQ_DISABLED;
+}
+
int idxd_wq_map_portal(struct idxd_wq *wq)
{
struct idxd_device *idxd = wq->idxd;
@@ -363,8 +379,6 @@ int idxd_wq_disable_pasid(struct idxd_wq *wq)
void idxd_wq_disable_cleanup(struct idxd_wq *wq)
{
struct idxd_device *idxd = wq->idxd;
- struct device *dev = &idxd->pdev->dev;
- int i, wq_offset;
lockdep_assert_held(&idxd->dev_lock);
memset(wq->wqcfg, 0, idxd->wqcfg_size);
@@ -376,14 +390,6 @@ void idxd_wq_disable_cleanup(struct idxd_wq *wq)
wq->ats_dis = 0;
clear_bit(WQ_FLAG_DEDICATED, &wq->flags);
memset(wq->name, 0, WQ_NAME_SIZE);
-
- for (i = 0; i < WQCFG_STRIDES(idxd); i++) {
- wq_offset = WQCFG_OFFSET(idxd, wq->id, i);
- iowrite32(0, idxd->reg_base + wq_offset);
- dev_dbg(dev, "WQ[%d][%d][%#x]: %#x\n",
- wq->id, i, wq_offset,
- ioread32(idxd->reg_base + wq_offset));
- }
}
/* Device control bits */
@@ -574,6 +580,36 @@ void idxd_device_drain_pasid(struct idxd_device *idxd, int pasid)
}
/* Device configuration bits */
+void idxd_msix_perm_setup(struct idxd_device *idxd)
+{
+ union msix_perm mperm;
+ int i, msixcnt;
+
+ msixcnt = pci_msix_vec_count(idxd->pdev);
+ if (msixcnt < 0)
+ return;
+
+ mperm.bits = 0;
+ mperm.pasid = idxd->pasid;
+ mperm.pasid_en = device_pasid_enabled(idxd);
+ for (i = 1; i < msixcnt; i++)
+ iowrite32(mperm.bits, idxd->reg_base + idxd->msix_perm_offset + i * 8);
+}
+
+void idxd_msix_perm_clear(struct idxd_device *idxd)
+{
+ union msix_perm mperm;
+ int i, msixcnt;
+
+ msixcnt = pci_msix_vec_count(idxd->pdev);
+ if (msixcnt < 0)
+ return;
+
+ mperm.bits = 0;
+ for (i = 1; i < msixcnt; i++)
+ iowrite32(mperm.bits, idxd->reg_base + idxd->msix_perm_offset + i * 8);
+}
+
static void idxd_group_config_write(struct idxd_group *group)
{
struct idxd_device *idxd = group->idxd;
@@ -642,7 +678,14 @@ static int idxd_wq_config_write(struct idxd_wq *wq)
if (!wq->group)
return 0;
- memset(wq->wqcfg, 0, idxd->wqcfg_size);
+ /*
+ * Instead of memset the entire shadow copy of WQCFG, copy from the hardware after
+ * wq reset. This will copy back the sticky values that are present on some devices.
+ */
+ for (i = 0; i < WQCFG_STRIDES(idxd); i++) {
+ wq_offset = WQCFG_OFFSET(idxd, wq->id, i);
+ wq->wqcfg->bits[i] = ioread32(idxd->reg_base + wq_offset);
+ }
/* byte 0-3 */
wq->wqcfg->wq_size = wq->size;
diff --git a/drivers/dma/idxd/idxd.h b/drivers/dma/idxd/idxd.h
index 81a0e65fd316..76014c14f473 100644
--- a/drivers/dma/idxd/idxd.h
+++ b/drivers/dma/idxd/idxd.h
@@ -316,6 +316,8 @@ void idxd_unregister_driver(void);
struct bus_type *idxd_get_bus_type(struct idxd_device *idxd);
/* device interrupt control */
+void idxd_msix_perm_setup(struct idxd_device *idxd);
+void idxd_msix_perm_clear(struct idxd_device *idxd);
irqreturn_t idxd_irq_handler(int vec, void *data);
irqreturn_t idxd_misc_thread(int vec, void *data);
irqreturn_t idxd_wq_thread(int irq, void *data);
@@ -341,6 +343,7 @@ void idxd_wq_free_resources(struct idxd_wq *wq);
int idxd_wq_enable(struct idxd_wq *wq);
int idxd_wq_disable(struct idxd_wq *wq);
void idxd_wq_drain(struct idxd_wq *wq);
+void idxd_wq_reset(struct idxd_wq *wq);
int idxd_wq_map_portal(struct idxd_wq *wq);
void idxd_wq_unmap_portal(struct idxd_wq *wq);
void idxd_wq_disable_cleanup(struct idxd_wq *wq);
diff --git a/drivers/dma/idxd/init.c b/drivers/dma/idxd/init.c
index 085a0c3b62c6..6584b0ec07d5 100644
--- a/drivers/dma/idxd/init.c
+++ b/drivers/dma/idxd/init.c
@@ -65,7 +65,6 @@ static int idxd_setup_interrupts(struct idxd_device *idxd)
struct idxd_irq_entry *irq_entry;
int i, msixcnt;
int rc = 0;
- union msix_perm mperm;
msixcnt = pci_msix_vec_count(pdev);
if (msixcnt < 0) {
@@ -144,14 +143,7 @@ static int idxd_setup_interrupts(struct idxd_device *idxd)
}
idxd_unmask_error_interrupts(idxd);
-
- /* Setup MSIX permission table */
- mperm.bits = 0;
- mperm.pasid = idxd->pasid;
- mperm.pasid_en = device_pasid_enabled(idxd);
- for (i = 1; i < msixcnt; i++)
- iowrite32(mperm.bits, idxd->reg_base + idxd->msix_perm_offset + i * 8);
-
+ idxd_msix_perm_setup(idxd);
return 0;
err_no_irq:
@@ -510,6 +502,7 @@ static void idxd_shutdown(struct pci_dev *pdev)
idxd_flush_work_list(irq_entry);
}
+ idxd_msix_perm_clear(idxd);
destroy_workqueue(idxd->wq);
}
diff --git a/drivers/dma/idxd/irq.c b/drivers/dma/idxd/irq.c
index a60ca11a5784..f1463fc58112 100644
--- a/drivers/dma/idxd/irq.c
+++ b/drivers/dma/idxd/irq.c
@@ -124,7 +124,9 @@ static int process_misc_interrupts(struct idxd_device *idxd, u32 cause)
for (i = 0; i < 4; i++)
idxd->sw_err.bits[i] = ioread64(idxd->reg_base +
IDXD_SWERR_OFFSET + i * sizeof(u64));
- iowrite64(IDXD_SWERR_ACK, idxd->reg_base + IDXD_SWERR_OFFSET);
+
+ iowrite64(idxd->sw_err.bits[0] & IDXD_SWERR_ACK,
+ idxd->reg_base + IDXD_SWERR_OFFSET);
if (idxd->sw_err.valid && idxd->sw_err.wq_idx_valid) {
int id = idxd->sw_err.wq_idx;
diff --git a/drivers/dma/idxd/sysfs.c b/drivers/dma/idxd/sysfs.c
index 4dbb03c545e4..18bf4d148989 100644
--- a/drivers/dma/idxd/sysfs.c
+++ b/drivers/dma/idxd/sysfs.c
@@ -275,7 +275,6 @@ static void disable_wq(struct idxd_wq *wq)
{
struct idxd_device *idxd = wq->idxd;
struct device *dev = &idxd->pdev->dev;
- int rc;
mutex_lock(&wq->wq_lock);
dev_dbg(dev, "%s removing WQ %s\n", __func__, dev_name(&wq->conf_dev));
@@ -296,17 +295,13 @@ static void disable_wq(struct idxd_wq *wq)
idxd_wq_unmap_portal(wq);
idxd_wq_drain(wq);
- rc = idxd_wq_disable(wq);
+ idxd_wq_reset(wq);
idxd_wq_free_resources(wq);
wq->client_count = 0;
mutex_unlock(&wq->wq_lock);
- if (rc < 0)
- dev_warn(dev, "Failed to disable %s: %d\n",
- dev_name(&wq->conf_dev), rc);
- else
- dev_info(dev, "wq %s disabled\n", dev_name(&wq->conf_dev));
+ dev_info(dev, "wq %s disabled\n", dev_name(&wq->conf_dev));
}
static int idxd_config_bus_remove(struct device *dev)
@@ -989,7 +984,7 @@ static ssize_t wq_size_store(struct device *dev,
if (!test_bit(IDXD_FLAG_CONFIGURABLE, &idxd->flags))
return -EPERM;
- if (wq->state != IDXD_WQ_DISABLED)
+ if (idxd->state == IDXD_DEV_ENABLED)
return -EPERM;
if (size + total_claimed_wq_size(idxd) - wq->size > idxd->max_wq_size)
@@ -1449,8 +1444,14 @@ static ssize_t op_cap_show(struct device *dev,
{
struct idxd_device *idxd =
container_of(dev, struct idxd_device, conf_dev);
+ int i, rc = 0;
+
+ for (i = 0; i < 4; i++)
+ rc += sysfs_emit_at(buf, rc, "%#llx ", idxd->hw.opcap.bits[i]);
- return sprintf(buf, "%#llx\n", idxd->hw.opcap.bits[0]);
+ rc--;
+ rc += sysfs_emit_at(buf, rc, "\n");
+ return rc;
}
static DEVICE_ATTR_RO(op_cap);
diff --git a/drivers/dma/plx_dma.c b/drivers/dma/plx_dma.c
index f387c5bbc170..166934544161 100644
--- a/drivers/dma/plx_dma.c
+++ b/drivers/dma/plx_dma.c
@@ -507,10 +507,8 @@ static int plx_dma_create(struct pci_dev *pdev)
rc = request_irq(pci_irq_vector(pdev, 0), plx_dma_isr, 0,
KBUILD_MODNAME, plxdev);
- if (rc) {
- kfree(plxdev);
- return rc;
- }
+ if (rc)
+ goto free_plx;
spin_lock_init(&plxdev->ring_lock);
tasklet_setup(&plxdev->desc_task, plx_dma_desc_task);
@@ -540,14 +538,20 @@ static int plx_dma_create(struct pci_dev *pdev)
rc = dma_async_device_register(dma);
if (rc) {
pci_err(pdev, "Failed to register dma device: %d\n", rc);
- free_irq(pci_irq_vector(pdev, 0), plxdev);
- kfree(plxdev);
- return rc;
+ goto put_device;
}
pci_set_drvdata(pdev, plxdev);
return 0;
+
+put_device:
+ put_device(&pdev->dev);
+ free_irq(pci_irq_vector(pdev, 0), plxdev);
+free_plx:
+ kfree(plxdev);
+
+ return rc;
}
static int plx_dma_probe(struct pci_dev *pdev,
diff --git a/drivers/dma/tegra20-apb-dma.c b/drivers/dma/tegra20-apb-dma.c
index 71827d9b0aa1..b7260749e8ee 100644
--- a/drivers/dma/tegra20-apb-dma.c
+++ b/drivers/dma/tegra20-apb-dma.c
@@ -723,7 +723,7 @@ static void tegra_dma_issue_pending(struct dma_chan *dc)
goto end;
}
if (!tdc->busy) {
- err = pm_runtime_get_sync(tdc->tdma->dev);
+ err = pm_runtime_resume_and_get(tdc->tdma->dev);
if (err < 0) {
dev_err(tdc2dev(tdc), "Failed to enable DMA\n");
goto end;
@@ -818,7 +818,7 @@ static void tegra_dma_synchronize(struct dma_chan *dc)
struct tegra_dma_channel *tdc = to_tegra_dma_chan(dc);
int err;
- err = pm_runtime_get_sync(tdc->tdma->dev);
+ err = pm_runtime_resume_and_get(tdc->tdma->dev);
if (err < 0) {
dev_err(tdc2dev(tdc), "Failed to synchronize DMA: %d\n", err);
return;
diff --git a/drivers/dma/xilinx/xilinx_dpdma.c b/drivers/dma/xilinx/xilinx_dpdma.c
index 55df63dead8d..70b29bd079c9 100644
--- a/drivers/dma/xilinx/xilinx_dpdma.c
+++ b/drivers/dma/xilinx/xilinx_dpdma.c
@@ -839,6 +839,7 @@ static void xilinx_dpdma_chan_queue_transfer(struct xilinx_dpdma_chan *chan)
struct xilinx_dpdma_tx_desc *desc;
struct virt_dma_desc *vdesc;
u32 reg, channels;
+ bool first_frame;
lockdep_assert_held(&chan->lock);
@@ -852,14 +853,6 @@ static void xilinx_dpdma_chan_queue_transfer(struct xilinx_dpdma_chan *chan)
chan->running = true;
}
- if (chan->video_group)
- channels = xilinx_dpdma_chan_video_group_ready(chan);
- else
- channels = BIT(chan->id);
-
- if (!channels)
- return;
-
vdesc = vchan_next_desc(&chan->vchan);
if (!vdesc)
return;
@@ -884,13 +877,26 @@ static void xilinx_dpdma_chan_queue_transfer(struct xilinx_dpdma_chan *chan)
FIELD_PREP(XILINX_DPDMA_CH_DESC_START_ADDRE_MASK,
upper_32_bits(sw_desc->dma_addr)));
- if (chan->first_frame)
+ first_frame = chan->first_frame;
+ chan->first_frame = false;
+
+ if (chan->video_group) {
+ channels = xilinx_dpdma_chan_video_group_ready(chan);
+ /*
+ * Trigger the transfer only when all channels in the group are
+ * ready.
+ */
+ if (!channels)
+ return;
+ } else {
+ channels = BIT(chan->id);
+ }
+
+ if (first_frame)
reg = XILINX_DPDMA_GBL_TRIG_MASK(channels);
else
reg = XILINX_DPDMA_GBL_RETRIG_MASK(channels);
- chan->first_frame = false;
-
dpdma_write(xdev->reg, XILINX_DPDMA_GBL, reg);
}
@@ -1042,13 +1048,14 @@ static int xilinx_dpdma_chan_stop(struct xilinx_dpdma_chan *chan)
*/
static void xilinx_dpdma_chan_done_irq(struct xilinx_dpdma_chan *chan)
{
- struct xilinx_dpdma_tx_desc *active = chan->desc.active;
+ struct xilinx_dpdma_tx_desc *active;
unsigned long flags;
spin_lock_irqsave(&chan->lock, flags);
xilinx_dpdma_debugfs_desc_done_irq(chan);
+ active = chan->desc.active;
if (active)
vchan_cyclic_callback(&active->vdesc);
else
diff --git a/drivers/gpio/gpiolib-sysfs.c b/drivers/gpio/gpiolib-sysfs.c
index 26c5466b8179..ae49bb23c6ed 100644
--- a/drivers/gpio/gpiolib-sysfs.c
+++ b/drivers/gpio/gpiolib-sysfs.c
@@ -458,6 +458,8 @@ static ssize_t export_store(struct class *class,
long gpio;
struct gpio_desc *desc;
int status;
+ struct gpio_chip *gc;
+ int offset;
status = kstrtol(buf, 0, &gpio);
if (status < 0)
@@ -469,6 +471,12 @@ static ssize_t export_store(struct class *class,
pr_warn("%s: invalid GPIO %ld\n", __func__, gpio);
return -EINVAL;
}
+ gc = desc->gdev->chip;
+ offset = gpio_chip_hwgpio(desc);
+ if (!gpiochip_line_is_valid(gc, offset)) {
+ pr_warn("%s: GPIO %ld masked\n", __func__, gpio);
+ return -EINVAL;
+ }
/* No extra locking here; FLAG_SYSFS just signifies that the
* request and export were done by on behalf of userspace, so
diff --git a/drivers/hid/amd-sfh-hid/amd_sfh_pcie.c b/drivers/hid/amd-sfh-hid/amd_sfh_pcie.c
index dbac16641662..ddecc84fd6f0 100644
--- a/drivers/hid/amd-sfh-hid/amd_sfh_pcie.c
+++ b/drivers/hid/amd-sfh-hid/amd_sfh_pcie.c
@@ -10,6 +10,7 @@
#include <linux/bitops.h>
#include <linux/delay.h>
#include <linux/dma-mapping.h>
+#include <linux/dmi.h>
#include <linux/interrupt.h>
#include <linux/io-64-nonatomic-lo-hi.h>
#include <linux/module.h>
@@ -22,9 +23,13 @@
#define ACEL_EN BIT(0)
#define GYRO_EN BIT(1)
-#define MAGNO_EN BIT(2)
+#define MAGNO_EN BIT(2)
#define ALS_EN BIT(19)
+static int sensor_mask_override = -1;
+module_param_named(sensor_mask, sensor_mask_override, int, 0444);
+MODULE_PARM_DESC(sensor_mask, "override the detected sensors mask");
+
void amd_start_sensor(struct amd_mp2_dev *privdata, struct amd_mp2_sensor_info info)
{
union sfh_cmd_param cmd_param;
@@ -73,12 +78,41 @@ void amd_stop_all_sensors(struct amd_mp2_dev *privdata)
writel(cmd_base.ul, privdata->mmio + AMD_C2P_MSG0);
}
+static const struct dmi_system_id dmi_sensor_mask_overrides[] = {
+ {
+ .matches = {
+ DMI_MATCH(DMI_PRODUCT_NAME, "HP ENVY x360 Convertible 13-ag0xxx"),
+ },
+ .driver_data = (void *)(ACEL_EN | MAGNO_EN),
+ },
+ {
+ .matches = {
+ DMI_MATCH(DMI_PRODUCT_NAME, "HP ENVY x360 Convertible 15-cp0xxx"),
+ },
+ .driver_data = (void *)(ACEL_EN | MAGNO_EN),
+ },
+ { }
+};
+
int amd_mp2_get_sensor_num(struct amd_mp2_dev *privdata, u8 *sensor_id)
{
int activestatus, num_of_sensors = 0;
+ const struct dmi_system_id *dmi_id;
+ u32 activecontrolstatus;
+
+ if (sensor_mask_override == -1) {
+ dmi_id = dmi_first_match(dmi_sensor_mask_overrides);
+ if (dmi_id)
+ sensor_mask_override = (long)dmi_id->driver_data;
+ }
+
+ if (sensor_mask_override >= 0) {
+ activestatus = sensor_mask_override;
+ } else {
+ activecontrolstatus = readl(privdata->mmio + AMD_P2C_MSG3);
+ activestatus = activecontrolstatus >> 4;
+ }
- privdata->activecontrolstatus = readl(privdata->mmio + AMD_P2C_MSG3);
- activestatus = privdata->activecontrolstatus >> 4;
if (ACEL_EN & activestatus)
sensor_id[num_of_sensors++] = accel_idx;
diff --git a/drivers/hid/amd-sfh-hid/amd_sfh_pcie.h b/drivers/hid/amd-sfh-hid/amd_sfh_pcie.h
index 8f8d19b2cfe5..489415f7c22c 100644
--- a/drivers/hid/amd-sfh-hid/amd_sfh_pcie.h
+++ b/drivers/hid/amd-sfh-hid/amd_sfh_pcie.h
@@ -61,7 +61,6 @@ struct amd_mp2_dev {
struct pci_dev *pdev;
struct amdtp_cl_data *cl_data;
void __iomem *mmio;
- u32 activecontrolstatus;
};
struct amd_mp2_sensor_info {
diff --git a/drivers/hid/hid-alps.c b/drivers/hid/hid-alps.c
index 3feaece13ade..6b665931147d 100644
--- a/drivers/hid/hid-alps.c
+++ b/drivers/hid/hid-alps.c
@@ -761,6 +761,7 @@ static int alps_input_configured(struct hid_device *hdev, struct hid_input *hi)
if (input_register_device(data->input2)) {
input_free_device(input2);
+ ret = -ENOENT;
goto exit;
}
}
diff --git a/drivers/hid/hid-asus.c b/drivers/hid/hid-asus.c
index 1dfe184ebf5a..2ab22b925941 100644
--- a/drivers/hid/hid-asus.c
+++ b/drivers/hid/hid-asus.c
@@ -1222,6 +1222,9 @@ static const struct hid_device_id asus_devices[] = {
USB_DEVICE_ID_ASUSTEK_ROG_NKEY_KEYBOARD),
QUIRK_USE_KBD_BACKLIGHT | QUIRK_ROG_NKEY_KEYBOARD },
{ HID_USB_DEVICE(USB_VENDOR_ID_ASUSTEK,
+ USB_DEVICE_ID_ASUSTEK_ROG_NKEY_KEYBOARD2),
+ QUIRK_USE_KBD_BACKLIGHT | QUIRK_ROG_NKEY_KEYBOARD },
+ { HID_USB_DEVICE(USB_VENDOR_ID_ASUSTEK,
USB_DEVICE_ID_ASUSTEK_T100TA_KEYBOARD),
QUIRK_T100_KEYBOARD | QUIRK_NO_CONSUMER_USAGES },
{ HID_USB_DEVICE(USB_VENDOR_ID_ASUSTEK,
diff --git a/drivers/hid/hid-cp2112.c b/drivers/hid/hid-cp2112.c
index 21e15627a461..477baa30889c 100644
--- a/drivers/hid/hid-cp2112.c
+++ b/drivers/hid/hid-cp2112.c
@@ -161,6 +161,7 @@ struct cp2112_device {
atomic_t read_avail;
atomic_t xfer_avail;
struct gpio_chip gc;
+ struct irq_chip irq;
u8 *in_out_buffer;
struct mutex lock;
@@ -1175,16 +1176,6 @@ static int cp2112_gpio_irq_type(struct irq_data *d, unsigned int type)
return 0;
}
-static struct irq_chip cp2112_gpio_irqchip = {
- .name = "cp2112-gpio",
- .irq_startup = cp2112_gpio_irq_startup,
- .irq_shutdown = cp2112_gpio_irq_shutdown,
- .irq_ack = cp2112_gpio_irq_ack,
- .irq_mask = cp2112_gpio_irq_mask,
- .irq_unmask = cp2112_gpio_irq_unmask,
- .irq_set_type = cp2112_gpio_irq_type,
-};
-
static int __maybe_unused cp2112_allocate_irq(struct cp2112_device *dev,
int pin)
{
@@ -1339,8 +1330,17 @@ static int cp2112_probe(struct hid_device *hdev, const struct hid_device_id *id)
dev->gc.can_sleep = 1;
dev->gc.parent = &hdev->dev;
+ dev->irq.name = "cp2112-gpio";
+ dev->irq.irq_startup = cp2112_gpio_irq_startup;
+ dev->irq.irq_shutdown = cp2112_gpio_irq_shutdown;
+ dev->irq.irq_ack = cp2112_gpio_irq_ack;
+ dev->irq.irq_mask = cp2112_gpio_irq_mask;
+ dev->irq.irq_unmask = cp2112_gpio_irq_unmask;
+ dev->irq.irq_set_type = cp2112_gpio_irq_type;
+ dev->irq.flags = IRQCHIP_MASK_ON_SUSPEND;
+
girq = &dev->gc.irq;
- girq->chip = &cp2112_gpio_irqchip;
+ girq->chip = &dev->irq;
/* The event comes from the outside so no parent handler */
girq->parent_handler = NULL;
girq->num_parents = 0;
diff --git a/drivers/hid/hid-google-hammer.c b/drivers/hid/hid-google-hammer.c
index d9319622da44..e60c31dd05ff 100644
--- a/drivers/hid/hid-google-hammer.c
+++ b/drivers/hid/hid-google-hammer.c
@@ -574,6 +574,8 @@ static void hammer_remove(struct hid_device *hdev)
static const struct hid_device_id hammer_devices[] = {
{ HID_DEVICE(BUS_USB, HID_GROUP_GENERIC,
+ USB_VENDOR_ID_GOOGLE, USB_DEVICE_ID_GOOGLE_DON) },
+ { HID_DEVICE(BUS_USB, HID_GROUP_GENERIC,
USB_VENDOR_ID_GOOGLE, USB_DEVICE_ID_GOOGLE_HAMMER) },
{ HID_DEVICE(BUS_USB, HID_GROUP_GENERIC,
USB_VENDOR_ID_GOOGLE, USB_DEVICE_ID_GOOGLE_MAGNEMITE) },
diff --git a/drivers/hid/hid-ids.h b/drivers/hid/hid-ids.h
index e42aaae3138f..67fd8a2f5aba 100644
--- a/drivers/hid/hid-ids.h
+++ b/drivers/hid/hid-ids.h
@@ -194,6 +194,7 @@
#define USB_DEVICE_ID_ASUSTEK_ROG_KEYBOARD2 0x1837
#define USB_DEVICE_ID_ASUSTEK_ROG_KEYBOARD3 0x1822
#define USB_DEVICE_ID_ASUSTEK_ROG_NKEY_KEYBOARD 0x1866
+#define USB_DEVICE_ID_ASUSTEK_ROG_NKEY_KEYBOARD2 0x19b6
#define USB_DEVICE_ID_ASUSTEK_FX503VD_KEYBOARD 0x1869
#define USB_VENDOR_ID_ATEN 0x0557
@@ -493,6 +494,7 @@
#define USB_DEVICE_ID_GOOGLE_MASTERBALL 0x503c
#define USB_DEVICE_ID_GOOGLE_MAGNEMITE 0x503d
#define USB_DEVICE_ID_GOOGLE_MOONBALL 0x5044
+#define USB_DEVICE_ID_GOOGLE_DON 0x5050
#define USB_VENDOR_ID_GOTOP 0x08f2
#define USB_DEVICE_ID_SUPER_Q2 0x007f
diff --git a/drivers/hid/wacom_wac.c b/drivers/hid/wacom_wac.c
index 44d715c12f6a..2d70dc4bea65 100644
--- a/drivers/hid/wacom_wac.c
+++ b/drivers/hid/wacom_wac.c
@@ -2533,7 +2533,7 @@ static void wacom_wac_finger_slot(struct wacom_wac *wacom_wac,
!wacom_wac->shared->is_touch_on) {
if (!wacom_wac->shared->touch_down)
return;
- prox = 0;
+ prox = false;
}
wacom_wac->hid_data.num_received++;
@@ -3574,8 +3574,6 @@ int wacom_setup_pen_input_capabilities(struct input_dev *input_dev,
{
struct wacom_features *features = &wacom_wac->features;
- input_dev->evbit[0] |= BIT_MASK(EV_KEY) | BIT_MASK(EV_ABS);
-
if (!(features->device_type & WACOM_DEVICETYPE_PEN))
return -ENODEV;
@@ -3590,6 +3588,7 @@ int wacom_setup_pen_input_capabilities(struct input_dev *input_dev,
return 0;
}
+ input_dev->evbit[0] |= BIT_MASK(EV_KEY) | BIT_MASK(EV_ABS);
__set_bit(BTN_TOUCH, input_dev->keybit);
__set_bit(ABS_MISC, input_dev->absbit);
@@ -3742,8 +3741,6 @@ int wacom_setup_touch_input_capabilities(struct input_dev *input_dev,
{
struct wacom_features *features = &wacom_wac->features;
- input_dev->evbit[0] |= BIT_MASK(EV_KEY) | BIT_MASK(EV_ABS);
-
if (!(features->device_type & WACOM_DEVICETYPE_TOUCH))
return -ENODEV;
@@ -3756,6 +3753,7 @@ int wacom_setup_touch_input_capabilities(struct input_dev *input_dev,
/* setup has already been done */
return 0;
+ input_dev->evbit[0] |= BIT_MASK(EV_KEY) | BIT_MASK(EV_ABS);
__set_bit(BTN_TOUCH, input_dev->keybit);
if (features->touch_max == 1) {
diff --git a/drivers/i2c/busses/i2c-mv64xxx.c b/drivers/i2c/busses/i2c-mv64xxx.c
index c590d36b5fd1..5c8e94b6cdb5 100644
--- a/drivers/i2c/busses/i2c-mv64xxx.c
+++ b/drivers/i2c/busses/i2c-mv64xxx.c
@@ -221,6 +221,10 @@ mv64xxx_i2c_hw_init(struct mv64xxx_i2c_data *drv_data)
writel(0, drv_data->reg_base + drv_data->reg_offsets.ext_addr);
writel(MV64XXX_I2C_REG_CONTROL_TWSIEN | MV64XXX_I2C_REG_CONTROL_STOP,
drv_data->reg_base + drv_data->reg_offsets.control);
+
+ if (drv_data->errata_delay)
+ udelay(5);
+
drv_data->state = MV64XXX_I2C_STATE_IDLE;
}
diff --git a/drivers/input/joystick/n64joy.c b/drivers/input/joystick/n64joy.c
index 8bcc529942bc..9dbca366613e 100644
--- a/drivers/input/joystick/n64joy.c
+++ b/drivers/input/joystick/n64joy.c
@@ -252,8 +252,8 @@ static int __init n64joy_probe(struct platform_device *pdev)
mutex_init(&priv->n64joy_mutex);
priv->reg_base = devm_platform_ioremap_resource(pdev, 0);
- if (!priv->reg_base) {
- err = -EINVAL;
+ if (IS_ERR(priv->reg_base)) {
+ err = PTR_ERR(priv->reg_base);
goto fail;
}
diff --git a/drivers/input/keyboard/nspire-keypad.c b/drivers/input/keyboard/nspire-keypad.c
index 63d5e488137d..e9fa1423f136 100644
--- a/drivers/input/keyboard/nspire-keypad.c
+++ b/drivers/input/keyboard/nspire-keypad.c
@@ -93,9 +93,15 @@ static irqreturn_t nspire_keypad_irq(int irq, void *dev_id)
return IRQ_HANDLED;
}
-static int nspire_keypad_chip_init(struct nspire_keypad *keypad)
+static int nspire_keypad_open(struct input_dev *input)
{
+ struct nspire_keypad *keypad = input_get_drvdata(input);
unsigned long val = 0, cycles_per_us, delay_cycles, row_delay_cycles;
+ int error;
+
+ error = clk_prepare_enable(keypad->clk);
+ if (error)
+ return error;
cycles_per_us = (clk_get_rate(keypad->clk) / 1000000);
if (cycles_per_us == 0)
@@ -121,30 +127,6 @@ static int nspire_keypad_chip_init(struct nspire_keypad *keypad)
keypad->int_mask = 1 << 1;
writel(keypad->int_mask, keypad->reg_base + KEYPAD_INTMSK);
- /* Disable GPIO interrupts to prevent hanging on touchpad */
- /* Possibly used to detect touchpad events */
- writel(0, keypad->reg_base + KEYPAD_UNKNOWN_INT);
- /* Acknowledge existing interrupts */
- writel(~0, keypad->reg_base + KEYPAD_UNKNOWN_INT_STS);
-
- return 0;
-}
-
-static int nspire_keypad_open(struct input_dev *input)
-{
- struct nspire_keypad *keypad = input_get_drvdata(input);
- int error;
-
- error = clk_prepare_enable(keypad->clk);
- if (error)
- return error;
-
- error = nspire_keypad_chip_init(keypad);
- if (error) {
- clk_disable_unprepare(keypad->clk);
- return error;
- }
-
return 0;
}
@@ -152,6 +134,11 @@ static void nspire_keypad_close(struct input_dev *input)
{
struct nspire_keypad *keypad = input_get_drvdata(input);
+ /* Disable interrupts */
+ writel(0, keypad->reg_base + KEYPAD_INTMSK);
+ /* Acknowledge existing interrupts */
+ writel(~0, keypad->reg_base + KEYPAD_INT);
+
clk_disable_unprepare(keypad->clk);
}
@@ -210,6 +197,25 @@ static int nspire_keypad_probe(struct platform_device *pdev)
return -ENOMEM;
}
+ error = clk_prepare_enable(keypad->clk);
+ if (error) {
+ dev_err(&pdev->dev, "failed to enable clock\n");
+ return error;
+ }
+
+ /* Disable interrupts */
+ writel(0, keypad->reg_base + KEYPAD_INTMSK);
+ /* Acknowledge existing interrupts */
+ writel(~0, keypad->reg_base + KEYPAD_INT);
+
+ /* Disable GPIO interrupts to prevent hanging on touchpad */
+ /* Possibly used to detect touchpad events */
+ writel(0, keypad->reg_base + KEYPAD_UNKNOWN_INT);
+ /* Acknowledge existing GPIO interrupts */
+ writel(~0, keypad->reg_base + KEYPAD_UNKNOWN_INT_STS);
+
+ clk_disable_unprepare(keypad->clk);
+
input_set_drvdata(input, keypad);
input->id.bustype = BUS_HOST;
diff --git a/drivers/input/serio/i8042-x86ia64io.h b/drivers/input/serio/i8042-x86ia64io.h
index 9119e12a5778..a5a003553646 100644
--- a/drivers/input/serio/i8042-x86ia64io.h
+++ b/drivers/input/serio/i8042-x86ia64io.h
@@ -588,6 +588,7 @@ static const struct dmi_system_id i8042_dmi_noselftest_table[] = {
DMI_MATCH(DMI_SYS_VENDOR, "ASUSTeK COMPUTER INC."),
DMI_MATCH(DMI_CHASSIS_TYPE, "10"), /* Notebook */
},
+ }, {
.matches = {
DMI_MATCH(DMI_SYS_VENDOR, "ASUSTeK COMPUTER INC."),
DMI_MATCH(DMI_CHASSIS_TYPE, "31"), /* Convertible Notebook */
diff --git a/drivers/input/touchscreen/elants_i2c.c b/drivers/input/touchscreen/elants_i2c.c
index 4c2b579f6c8b..5f7706febcb0 100644
--- a/drivers/input/touchscreen/elants_i2c.c
+++ b/drivers/input/touchscreen/elants_i2c.c
@@ -1441,7 +1441,7 @@ static int elants_i2c_probe(struct i2c_client *client,
touchscreen_parse_properties(ts->input, true, &ts->prop);
- if (ts->chip_id == EKTF3624) {
+ if (ts->chip_id == EKTF3624 && ts->phy_x && ts->phy_y) {
/* calculate resolution from size */
ts->x_res = DIV_ROUND_CLOSEST(ts->prop.max_x, ts->phy_x);
ts->y_res = DIV_ROUND_CLOSEST(ts->prop.max_y, ts->phy_y);
@@ -1449,8 +1449,7 @@ static int elants_i2c_probe(struct i2c_client *client,
input_abs_set_res(ts->input, ABS_MT_POSITION_X, ts->x_res);
input_abs_set_res(ts->input, ABS_MT_POSITION_Y, ts->y_res);
- if (ts->major_res > 0)
- input_abs_set_res(ts->input, ABS_MT_TOUCH_MAJOR, ts->major_res);
+ input_abs_set_res(ts->input, ABS_MT_TOUCH_MAJOR, ts->major_res);
error = input_mt_init_slots(ts->input, MAX_CONTACT_NUM,
INPUT_MT_DIRECT | INPUT_MT_DROP_UNUSED);
diff --git a/drivers/input/touchscreen/s6sy761.c b/drivers/input/touchscreen/s6sy761.c
index b63d7fdf0cd2..85a1f465c097 100644
--- a/drivers/input/touchscreen/s6sy761.c
+++ b/drivers/input/touchscreen/s6sy761.c
@@ -145,8 +145,8 @@ static void s6sy761_report_coordinates(struct s6sy761_data *sdata,
u8 major = event[4];
u8 minor = event[5];
u8 z = event[6] & S6SY761_MASK_Z;
- u16 x = (event[1] << 3) | ((event[3] & S6SY761_MASK_X) >> 4);
- u16 y = (event[2] << 3) | (event[3] & S6SY761_MASK_Y);
+ u16 x = (event[1] << 4) | ((event[3] & S6SY761_MASK_X) >> 4);
+ u16 y = (event[2] << 4) | (event[3] & S6SY761_MASK_Y);
input_mt_slot(sdata->input, tid);
diff --git a/drivers/md/dm-verity-fec.c b/drivers/md/dm-verity-fec.c
index 66f4c6398f67..cea2b3789736 100644
--- a/drivers/md/dm-verity-fec.c
+++ b/drivers/md/dm-verity-fec.c
@@ -65,7 +65,7 @@ static u8 *fec_read_parity(struct dm_verity *v, u64 rsb, int index,
u8 *res;
position = (index + rsb) * v->fec->roots;
- block = div64_u64_rem(position, v->fec->roots << SECTOR_SHIFT, &rem);
+ block = div64_u64_rem(position, v->fec->io_size, &rem);
*offset = (unsigned)rem;
res = dm_bufio_read(v->fec->bufio, block, buf);
@@ -154,7 +154,7 @@ static int fec_decode_bufs(struct dm_verity *v, struct dm_verity_fec_io *fio,
/* read the next block when we run out of parity bytes */
offset += v->fec->roots;
- if (offset >= v->fec->roots << SECTOR_SHIFT) {
+ if (offset >= v->fec->io_size) {
dm_bufio_release(buf);
par = fec_read_parity(v, rsb, block_offset, &offset, &buf);
@@ -742,8 +742,13 @@ int verity_fec_ctr(struct dm_verity *v)
return -E2BIG;
}
+ if ((f->roots << SECTOR_SHIFT) & ((1 << v->data_dev_block_bits) - 1))
+ f->io_size = 1 << v->data_dev_block_bits;
+ else
+ f->io_size = v->fec->roots << SECTOR_SHIFT;
+
f->bufio = dm_bufio_client_create(f->dev->bdev,
- f->roots << SECTOR_SHIFT,
+ f->io_size,
1, 0, NULL, NULL);
if (IS_ERR(f->bufio)) {
ti->error = "Cannot initialize FEC bufio client";
diff --git a/drivers/md/dm-verity-fec.h b/drivers/md/dm-verity-fec.h
index 42fbd3a7fc9f..3c46c8d61883 100644
--- a/drivers/md/dm-verity-fec.h
+++ b/drivers/md/dm-verity-fec.h
@@ -36,6 +36,7 @@ struct dm_verity_fec {
struct dm_dev *dev; /* parity data device */
struct dm_bufio_client *data_bufio; /* for data dev access */
struct dm_bufio_client *bufio; /* for parity data access */
+ size_t io_size; /* IO size for roots */
sector_t start; /* parity data start in blocks */
sector_t blocks; /* number of blocks covered */
sector_t rounds; /* number of interleaving rounds */
diff --git a/drivers/mtd/nand/raw/mtk_nand.c b/drivers/mtd/nand/raw/mtk_nand.c
index 57f1f1708994..5c5c92132287 100644
--- a/drivers/mtd/nand/raw/mtk_nand.c
+++ b/drivers/mtd/nand/raw/mtk_nand.c
@@ -488,8 +488,8 @@ static int mtk_nfc_exec_instr(struct nand_chip *chip,
return 0;
case NAND_OP_WAITRDY_INSTR:
return readl_poll_timeout(nfc->regs + NFI_STA, status,
- status & STA_BUSY, 20,
- instr->ctx.waitrdy.timeout_ms);
+ !(status & STA_BUSY), 20,
+ instr->ctx.waitrdy.timeout_ms * 1000);
default:
break;
}
diff --git a/drivers/net/dsa/mv88e6xxx/chip.c b/drivers/net/dsa/mv88e6xxx/chip.c
index 903d619e08ed..e08bf9377140 100644
--- a/drivers/net/dsa/mv88e6xxx/chip.c
+++ b/drivers/net/dsa/mv88e6xxx/chip.c
@@ -3026,10 +3026,17 @@ out_resources:
return err;
}
+/* prod_id for switch families which do not have a PHY model number */
+static const u16 family_prod_id_table[] = {
+ [MV88E6XXX_FAMILY_6341] = MV88E6XXX_PORT_SWITCH_ID_PROD_6341,
+ [MV88E6XXX_FAMILY_6390] = MV88E6XXX_PORT_SWITCH_ID_PROD_6390,
+};
+
static int mv88e6xxx_mdio_read(struct mii_bus *bus, int phy, int reg)
{
struct mv88e6xxx_mdio_bus *mdio_bus = bus->priv;
struct mv88e6xxx_chip *chip = mdio_bus->chip;
+ u16 prod_id;
u16 val;
int err;
@@ -3040,23 +3047,12 @@ static int mv88e6xxx_mdio_read(struct mii_bus *bus, int phy, int reg)
err = chip->info->ops->phy_read(chip, bus, phy, reg, &val);
mv88e6xxx_reg_unlock(chip);
- if (reg == MII_PHYSID2) {
- /* Some internal PHYs don't have a model number. */
- if (chip->info->family != MV88E6XXX_FAMILY_6165)
- /* Then there is the 6165 family. It gets is
- * PHYs correct. But it can also have two
- * SERDES interfaces in the PHY address
- * space. And these don't have a model
- * number. But they are not PHYs, so we don't
- * want to give them something a PHY driver
- * will recognise.
- *
- * Use the mv88e6390 family model number
- * instead, for anything which really could be
- * a PHY,
- */
- if (!(val & 0x3f0))
- val |= MV88E6XXX_PORT_SWITCH_ID_PROD_6390 >> 4;
+ /* Some internal PHYs don't have a model number. */
+ if (reg == MII_PHYSID2 && !(val & 0x3f0) &&
+ chip->info->family < ARRAY_SIZE(family_prod_id_table)) {
+ prod_id = family_prod_id_table[chip->info->family];
+ if (prod_id)
+ val |= prod_id >> 4;
}
return err ? err : val;
diff --git a/drivers/net/ethernet/cadence/macb_main.c b/drivers/net/ethernet/cadence/macb_main.c
index 6e5cf490c01d..0f6a6cb7e98d 100644
--- a/drivers/net/ethernet/cadence/macb_main.c
+++ b/drivers/net/ethernet/cadence/macb_main.c
@@ -3918,6 +3918,7 @@ static int macb_init(struct platform_device *pdev)
reg = gem_readl(bp, DCFG8);
bp->max_tuples = min((GEM_BFEXT(SCR2CMP, reg) / 3),
GEM_BFEXT(T2SCR, reg));
+ INIT_LIST_HEAD(&bp->rx_fs_list.list);
if (bp->max_tuples > 0) {
/* also needs one ethtype match to check IPv4 */
if (GEM_BFEXT(SCR2ETH, reg) > 0) {
@@ -3928,7 +3929,6 @@ static int macb_init(struct platform_device *pdev)
/* Filtering is supported in hw but don't enable it in kernel now */
dev->hw_features |= NETIF_F_NTUPLE;
/* init Rx flow definitions */
- INIT_LIST_HEAD(&bp->rx_fs_list.list);
bp->rx_fs_list.count = 0;
spin_lock_init(&bp->rx_fs_lock);
} else
diff --git a/drivers/net/ethernet/cavium/liquidio/cn66xx_regs.h b/drivers/net/ethernet/cavium/liquidio/cn66xx_regs.h
index b248966837b4..7aad40b2aa73 100644
--- a/drivers/net/ethernet/cavium/liquidio/cn66xx_regs.h
+++ b/drivers/net/ethernet/cavium/liquidio/cn66xx_regs.h
@@ -412,7 +412,7 @@
| CN6XXX_INTR_M0UNWI_ERR \
| CN6XXX_INTR_M1UPB0_ERR \
| CN6XXX_INTR_M1UPWI_ERR \
- | CN6XXX_INTR_M1UPB0_ERR \
+ | CN6XXX_INTR_M1UNB0_ERR \
| CN6XXX_INTR_M1UNWI_ERR \
| CN6XXX_INTR_INSTR_DB_OF_ERR \
| CN6XXX_INTR_SLIST_DB_OF_ERR \
diff --git a/drivers/net/ethernet/chelsio/inline_crypto/ch_ktls/chcr_ktls.c b/drivers/net/ethernet/chelsio/inline_crypto/ch_ktls/chcr_ktls.c
index 1115b8f9ea4e..a3f5b80888e5 100644
--- a/drivers/net/ethernet/chelsio/inline_crypto/ch_ktls/chcr_ktls.c
+++ b/drivers/net/ethernet/chelsio/inline_crypto/ch_ktls/chcr_ktls.c
@@ -350,18 +350,6 @@ static int chcr_set_tcb_field(struct chcr_ktls_info *tx_info, u16 word,
}
/*
- * chcr_ktls_mark_tcb_close: mark tcb state to CLOSE
- * @tx_info - driver specific tls info.
- * return: NET_TX_OK/NET_XMIT_DROP.
- */
-static int chcr_ktls_mark_tcb_close(struct chcr_ktls_info *tx_info)
-{
- return chcr_set_tcb_field(tx_info, TCB_T_STATE_W,
- TCB_T_STATE_V(TCB_T_STATE_M),
- CHCR_TCB_STATE_CLOSED, 1);
-}
-
-/*
* chcr_ktls_dev_del: call back for tls_dev_del.
* Remove the tid and l2t entry and close the connection.
* it per connection basis.
@@ -395,8 +383,6 @@ static void chcr_ktls_dev_del(struct net_device *netdev,
/* clear tid */
if (tx_info->tid != -1) {
- /* clear tcb state and then release tid */
- chcr_ktls_mark_tcb_close(tx_info);
cxgb4_remove_tid(&tx_info->adap->tids, tx_info->tx_chan,
tx_info->tid, tx_info->ip_family);
}
@@ -574,7 +560,6 @@ static int chcr_ktls_dev_add(struct net_device *netdev, struct sock *sk,
return 0;
free_tid:
- chcr_ktls_mark_tcb_close(tx_info);
#if IS_ENABLED(CONFIG_IPV6)
/* clear clip entry */
if (tx_info->ip_family == AF_INET6)
@@ -672,10 +657,6 @@ static int chcr_ktls_cpl_act_open_rpl(struct adapter *adap,
if (tx_info->pending_close) {
spin_unlock(&tx_info->lock);
if (!status) {
- /* it's a late success, tcb status is established,
- * mark it close.
- */
- chcr_ktls_mark_tcb_close(tx_info);
cxgb4_remove_tid(&tx_info->adap->tids, tx_info->tx_chan,
tid, tx_info->ip_family);
}
@@ -1664,54 +1645,6 @@ static void chcr_ktls_copy_record_in_skb(struct sk_buff *nskb,
}
/*
- * chcr_ktls_update_snd_una: Reset the SEND_UNA. It will be done to avoid
- * sending the same segment again. It will discard the segment which is before
- * the current tx max.
- * @tx_info - driver specific tls info.
- * @q - TX queue.
- * return: NET_TX_OK/NET_XMIT_DROP.
- */
-static int chcr_ktls_update_snd_una(struct chcr_ktls_info *tx_info,
- struct sge_eth_txq *q)
-{
- struct fw_ulptx_wr *wr;
- unsigned int ndesc;
- int credits;
- void *pos;
- u32 len;
-
- len = sizeof(*wr) + roundup(CHCR_SET_TCB_FIELD_LEN, 16);
- ndesc = DIV_ROUND_UP(len, 64);
-
- credits = chcr_txq_avail(&q->q) - ndesc;
- if (unlikely(credits < 0)) {
- chcr_eth_txq_stop(q);
- return NETDEV_TX_BUSY;
- }
-
- pos = &q->q.desc[q->q.pidx];
-
- wr = pos;
- /* ULPTX wr */
- wr->op_to_compl = htonl(FW_WR_OP_V(FW_ULPTX_WR));
- wr->cookie = 0;
- /* fill len in wr field */
- wr->flowid_len16 = htonl(FW_WR_LEN16_V(DIV_ROUND_UP(len, 16)));
-
- pos += sizeof(*wr);
-
- pos = chcr_write_cpl_set_tcb_ulp(tx_info, q, tx_info->tid, pos,
- TCB_SND_UNA_RAW_W,
- TCB_SND_UNA_RAW_V(TCB_SND_UNA_RAW_M),
- TCB_SND_UNA_RAW_V(0), 0);
-
- chcr_txq_advance(&q->q, ndesc);
- cxgb4_ring_tx_db(tx_info->adap, &q->q, ndesc);
-
- return 0;
-}
-
-/*
* chcr_end_part_handler: This handler will handle the record which
* is complete or if record's end part is received. T6 adapter has a issue that
* it can't send out TAG with partial record so if its an end part then we have
@@ -1735,7 +1668,9 @@ static int chcr_end_part_handler(struct chcr_ktls_info *tx_info,
struct sge_eth_txq *q, u32 skb_offset,
u32 tls_end_offset, bool last_wr)
{
+ bool free_skb_if_tx_fails = false;
struct sk_buff *nskb = NULL;
+
/* check if it is a complete record */
if (tls_end_offset == record->len) {
nskb = skb;
@@ -1758,6 +1693,8 @@ static int chcr_end_part_handler(struct chcr_ktls_info *tx_info,
if (last_wr)
dev_kfree_skb_any(skb);
+ else
+ free_skb_if_tx_fails = true;
last_wr = true;
@@ -1769,6 +1706,8 @@ static int chcr_end_part_handler(struct chcr_ktls_info *tx_info,
record->num_frags,
(last_wr && tcp_push_no_fin),
mss)) {
+ if (free_skb_if_tx_fails)
+ dev_kfree_skb_any(skb);
goto out;
}
tx_info->prev_seq = record->end_seq;
@@ -1905,11 +1844,6 @@ static int chcr_short_record_handler(struct chcr_ktls_info *tx_info,
/* reset tcp_seq as per the prior_data_required len */
tcp_seq -= prior_data_len;
}
- /* reset snd una, so the middle record won't send the already
- * sent part.
- */
- if (chcr_ktls_update_snd_una(tx_info, q))
- goto out;
atomic64_inc(&tx_info->adap->ch_ktls_stats.ktls_tx_middle_pkts);
} else {
atomic64_inc(&tx_info->adap->ch_ktls_stats.ktls_tx_start_pkts);
@@ -2010,12 +1944,11 @@ static int chcr_ktls_xmit(struct sk_buff *skb, struct net_device *dev)
* we will send the complete record again.
*/
+ spin_lock_irqsave(&tx_ctx->base.lock, flags);
+
do {
- int i;
cxgb4_reclaim_completed_tx(adap, &q->q, true);
- /* lock taken */
- spin_lock_irqsave(&tx_ctx->base.lock, flags);
/* fetch the tls record */
record = tls_get_record(&tx_ctx->base, tcp_seq,
&tx_info->record_no);
@@ -2074,11 +2007,11 @@ static int chcr_ktls_xmit(struct sk_buff *skb, struct net_device *dev)
tls_end_offset, skb_offset,
0);
- spin_unlock_irqrestore(&tx_ctx->base.lock, flags);
if (ret) {
/* free the refcount taken earlier */
if (tls_end_offset < data_len)
dev_kfree_skb_any(skb);
+ spin_unlock_irqrestore(&tx_ctx->base.lock, flags);
goto out;
}
@@ -2088,16 +2021,6 @@ static int chcr_ktls_xmit(struct sk_buff *skb, struct net_device *dev)
continue;
}
- /* increase page reference count of the record, so that there
- * won't be any chance of page free in middle if in case stack
- * receives ACK and try to delete the record.
- */
- for (i = 0; i < record->num_frags; i++)
- __skb_frag_ref(&record->frags[i]);
- /* lock cleared */
- spin_unlock_irqrestore(&tx_ctx->base.lock, flags);
-
-
/* if a tls record is finishing in this SKB */
if (tls_end_offset <= data_len) {
ret = chcr_end_part_handler(tx_info, skb, record,
@@ -2122,13 +2045,9 @@ static int chcr_ktls_xmit(struct sk_buff *skb, struct net_device *dev)
data_len = 0;
}
- /* clear the frag ref count which increased locally before */
- for (i = 0; i < record->num_frags; i++) {
- /* clear the frag ref count */
- __skb_frag_unref(&record->frags[i]);
- }
/* if any failure, come out from the loop. */
if (ret) {
+ spin_unlock_irqrestore(&tx_ctx->base.lock, flags);
if (th->fin)
dev_kfree_skb_any(skb);
@@ -2143,6 +2062,7 @@ static int chcr_ktls_xmit(struct sk_buff *skb, struct net_device *dev)
} while (data_len > 0);
+ spin_unlock_irqrestore(&tx_ctx->base.lock, flags);
atomic64_inc(&port_stats->ktls_tx_encrypted_packets);
atomic64_add(skb_data_len, &port_stats->ktls_tx_encrypted_bytes);
diff --git a/drivers/net/ethernet/davicom/dm9000.c b/drivers/net/ethernet/davicom/dm9000.c
index 252adfa5d837..8a9096aa85cd 100644
--- a/drivers/net/ethernet/davicom/dm9000.c
+++ b/drivers/net/ethernet/davicom/dm9000.c
@@ -1471,8 +1471,10 @@ dm9000_probe(struct platform_device *pdev)
/* Init network device */
ndev = alloc_etherdev(sizeof(struct board_info));
- if (!ndev)
- return -ENOMEM;
+ if (!ndev) {
+ ret = -ENOMEM;
+ goto out_regulator_disable;
+ }
SET_NETDEV_DEV(ndev, &pdev->dev);
diff --git a/drivers/net/ethernet/ibm/ibmvnic.c b/drivers/net/ethernet/ibm/ibmvnic.c
index 9c6438d3b3a5..ffb2a91750c7 100644
--- a/drivers/net/ethernet/ibm/ibmvnic.c
+++ b/drivers/net/ethernet/ibm/ibmvnic.c
@@ -1149,19 +1149,13 @@ static int __ibmvnic_open(struct net_device *netdev)
rc = set_link_state(adapter, IBMVNIC_LOGICAL_LNK_UP);
if (rc) {
- for (i = 0; i < adapter->req_rx_queues; i++)
- napi_disable(&adapter->napi[i]);
+ ibmvnic_napi_disable(adapter);
release_resources(adapter);
return rc;
}
netif_tx_start_all_queues(netdev);
- if (prev_state == VNIC_CLOSED) {
- for (i = 0; i < adapter->req_rx_queues; i++)
- napi_schedule(&adapter->napi[i]);
- }
-
adapter->state = VNIC_OPEN;
return rc;
}
@@ -1922,7 +1916,7 @@ static int do_reset(struct ibmvnic_adapter *adapter,
u64 old_num_rx_queues, old_num_tx_queues;
u64 old_num_rx_slots, old_num_tx_slots;
struct net_device *netdev = adapter->netdev;
- int i, rc;
+ int rc;
netdev_dbg(adapter->netdev,
"[S:%d FOP:%d] Reset reason %d, reset_state %d\n",
@@ -2111,10 +2105,6 @@ static int do_reset(struct ibmvnic_adapter *adapter,
/* refresh device's multicast list */
ibmvnic_set_multi(netdev);
- /* kick napi */
- for (i = 0; i < adapter->req_rx_queues; i++)
- napi_schedule(&adapter->napi[i]);
-
if (adapter->reset_reason == VNIC_RESET_FAILOVER ||
adapter->reset_reason == VNIC_RESET_MOBILITY)
__netdev_notify_peers(netdev);
@@ -3204,9 +3194,6 @@ restart_loop:
next = ibmvnic_next_scrq(adapter, scrq);
for (i = 0; i < next->tx_comp.num_comps; i++) {
- if (next->tx_comp.rcs[i])
- dev_err(dev, "tx error %x\n",
- next->tx_comp.rcs[i]);
index = be32_to_cpu(next->tx_comp.correlators[i]);
if (index & IBMVNIC_TSO_POOL_MASK) {
tx_pool = &adapter->tso_pool[pool];
@@ -3220,7 +3207,13 @@ restart_loop:
num_entries += txbuff->num_entries;
if (txbuff->skb) {
total_bytes += txbuff->skb->len;
- dev_consume_skb_irq(txbuff->skb);
+ if (next->tx_comp.rcs[i]) {
+ dev_err(dev, "tx error %x\n",
+ next->tx_comp.rcs[i]);
+ dev_kfree_skb_irq(txbuff->skb);
+ } else {
+ dev_consume_skb_irq(txbuff->skb);
+ }
txbuff->skb = NULL;
} else {
netdev_warn(adapter->netdev,
diff --git a/drivers/net/ethernet/intel/i40e/i40e_main.c b/drivers/net/ethernet/intel/i40e/i40e_main.c
index 30ad7c08d0fb..527023ee4c07 100644
--- a/drivers/net/ethernet/intel/i40e/i40e_main.c
+++ b/drivers/net/ethernet/intel/i40e/i40e_main.c
@@ -12357,6 +12357,7 @@ static int i40e_sw_init(struct i40e_pf *pf)
{
int err = 0;
int size;
+ u16 pow;
/* Set default capability flags */
pf->flags = I40E_FLAG_RX_CSUM_ENABLED |
@@ -12375,6 +12376,11 @@ static int i40e_sw_init(struct i40e_pf *pf)
pf->rss_table_size = pf->hw.func_caps.rss_table_size;
pf->rss_size_max = min_t(int, pf->rss_size_max,
pf->hw.func_caps.num_tx_qp);
+
+ /* find the next higher power-of-2 of num cpus */
+ pow = roundup_pow_of_two(num_online_cpus());
+ pf->rss_size_max = min_t(int, pf->rss_size_max, pow);
+
if (pf->hw.func_caps.rss) {
pf->flags |= I40E_FLAG_RSS_ENABLED;
pf->alloc_rss_size = min_t(int, pf->rss_size_max,
diff --git a/drivers/net/ethernet/intel/ice/ice_dcb.c b/drivers/net/ethernet/intel/ice/ice_dcb.c
index 211ac6f907ad..28e834a128c0 100644
--- a/drivers/net/ethernet/intel/ice/ice_dcb.c
+++ b/drivers/net/ethernet/intel/ice/ice_dcb.c
@@ -747,8 +747,8 @@ ice_cee_to_dcb_cfg(struct ice_aqc_get_cee_dcb_cfg_resp *cee_cfg,
struct ice_port_info *pi)
{
u32 status, tlv_status = le32_to_cpu(cee_cfg->tlv_status);
- u32 ice_aqc_cee_status_mask, ice_aqc_cee_status_shift;
- u8 i, j, err, sync, oper, app_index, ice_app_sel_type;
+ u32 ice_aqc_cee_status_mask, ice_aqc_cee_status_shift, j;
+ u8 i, err, sync, oper, app_index, ice_app_sel_type;
u16 app_prio = le16_to_cpu(cee_cfg->oper_app_prio);
u16 ice_aqc_cee_app_mask, ice_aqc_cee_app_shift;
struct ice_dcbx_cfg *cmp_dcbcfg, *dcbcfg;
diff --git a/drivers/net/ethernet/intel/ixgbe/ixgbe_main.c b/drivers/net/ethernet/intel/ixgbe/ixgbe_main.c
index 03d9aad516d4..cffb95f8f632 100644
--- a/drivers/net/ethernet/intel/ixgbe/ixgbe_main.c
+++ b/drivers/net/ethernet/intel/ixgbe/ixgbe_main.c
@@ -6536,6 +6536,13 @@ err_setup_tx:
return err;
}
+static int ixgbe_rx_napi_id(struct ixgbe_ring *rx_ring)
+{
+ struct ixgbe_q_vector *q_vector = rx_ring->q_vector;
+
+ return q_vector ? q_vector->napi.napi_id : 0;
+}
+
/**
* ixgbe_setup_rx_resources - allocate Rx resources (Descriptors)
* @adapter: pointer to ixgbe_adapter
@@ -6583,7 +6590,7 @@ int ixgbe_setup_rx_resources(struct ixgbe_adapter *adapter,
/* XDP RX-queue info */
if (xdp_rxq_info_reg(&rx_ring->xdp_rxq, adapter->netdev,
- rx_ring->queue_index, rx_ring->q_vector->napi.napi_id) < 0)
+ rx_ring->queue_index, ixgbe_rx_napi_id(rx_ring)) < 0)
goto err;
rx_ring->xdp_prog = adapter->xdp_prog;
@@ -6892,6 +6899,11 @@ static int __maybe_unused ixgbe_resume(struct device *dev_d)
adapter->hw.hw_addr = adapter->io_addr;
+ err = pci_enable_device_mem(pdev);
+ if (err) {
+ e_dev_err("Cannot enable PCI device from suspend\n");
+ return err;
+ }
smp_mb__before_atomic();
clear_bit(__IXGBE_DISABLED, &adapter->state);
pci_set_master(pdev);
diff --git a/drivers/net/ethernet/mellanox/mlx5/core/devlink.c b/drivers/net/ethernet/mellanox/mlx5/core/devlink.c
index d7d8a68ef23d..d0f9d3cee97d 100644
--- a/drivers/net/ethernet/mellanox/mlx5/core/devlink.c
+++ b/drivers/net/ethernet/mellanox/mlx5/core/devlink.c
@@ -246,6 +246,11 @@ static int mlx5_devlink_trap_action_set(struct devlink *devlink,
struct mlx5_devlink_trap *dl_trap;
int err = 0;
+ if (is_mdev_switchdev_mode(dev)) {
+ NL_SET_ERR_MSG_MOD(extack, "Devlink traps can't be set in switchdev mode");
+ return -EOPNOTSUPP;
+ }
+
dl_trap = mlx5_find_trap_by_id(dev, trap->id);
if (!dl_trap) {
mlx5_core_err(dev, "Devlink trap: Set action on invalid trap id 0x%x", trap->id);
diff --git a/drivers/net/ethernet/mellanox/mlx5/core/en/port.c b/drivers/net/ethernet/mellanox/mlx5/core/en/port.c
index 308fd279669e..89510cac46c2 100644
--- a/drivers/net/ethernet/mellanox/mlx5/core/en/port.c
+++ b/drivers/net/ethernet/mellanox/mlx5/core/en/port.c
@@ -387,21 +387,6 @@ enum mlx5e_fec_supported_link_mode {
*_policy = MLX5_GET(pplm_reg, _buf, fec_override_admin_##link); \
} while (0)
-#define MLX5E_FEC_OVERRIDE_ADMIN_50G_POLICY(buf, policy, write, link) \
- do { \
- unsigned long policy_long; \
- u16 *__policy = &(policy); \
- bool _write = (write); \
- \
- policy_long = *__policy; \
- if (_write && *__policy) \
- *__policy = find_first_bit(&policy_long, \
- sizeof(policy_long) * BITS_PER_BYTE);\
- MLX5E_FEC_OVERRIDE_ADMIN_POLICY(buf, *__policy, _write, link); \
- if (!_write && *__policy) \
- *__policy = 1 << *__policy; \
- } while (0)
-
/* get/set FEC admin field for a given speed */
static int mlx5e_fec_admin_field(u32 *pplm, u16 *fec_policy, bool write,
enum mlx5e_fec_supported_link_mode link_mode)
@@ -423,16 +408,16 @@ static int mlx5e_fec_admin_field(u32 *pplm, u16 *fec_policy, bool write,
MLX5E_FEC_OVERRIDE_ADMIN_POLICY(pplm, *fec_policy, write, 100g);
break;
case MLX5E_FEC_SUPPORTED_LINK_MODE_50G_1X:
- MLX5E_FEC_OVERRIDE_ADMIN_50G_POLICY(pplm, *fec_policy, write, 50g_1x);
+ MLX5E_FEC_OVERRIDE_ADMIN_POLICY(pplm, *fec_policy, write, 50g_1x);
break;
case MLX5E_FEC_SUPPORTED_LINK_MODE_100G_2X:
- MLX5E_FEC_OVERRIDE_ADMIN_50G_POLICY(pplm, *fec_policy, write, 100g_2x);
+ MLX5E_FEC_OVERRIDE_ADMIN_POLICY(pplm, *fec_policy, write, 100g_2x);
break;
case MLX5E_FEC_SUPPORTED_LINK_MODE_200G_4X:
- MLX5E_FEC_OVERRIDE_ADMIN_50G_POLICY(pplm, *fec_policy, write, 200g_4x);
+ MLX5E_FEC_OVERRIDE_ADMIN_POLICY(pplm, *fec_policy, write, 200g_4x);
break;
case MLX5E_FEC_SUPPORTED_LINK_MODE_400G_8X:
- MLX5E_FEC_OVERRIDE_ADMIN_50G_POLICY(pplm, *fec_policy, write, 400g_8x);
+ MLX5E_FEC_OVERRIDE_ADMIN_POLICY(pplm, *fec_policy, write, 400g_8x);
break;
default:
return -EINVAL;
diff --git a/drivers/net/ethernet/mellanox/mlx5/core/en_tc.c b/drivers/net/ethernet/mellanox/mlx5/core/en_tc.c
index df2a0af854bb..d675107d9eca 100644
--- a/drivers/net/ethernet/mellanox/mlx5/core/en_tc.c
+++ b/drivers/net/ethernet/mellanox/mlx5/core/en_tc.c
@@ -1895,6 +1895,9 @@ static int mlx5e_flower_parse_meta(struct net_device *filter_dev,
return 0;
flow_rule_match_meta(rule, &match);
+ if (!match.mask->ingress_ifindex)
+ return 0;
+
if (match.mask->ingress_ifindex != 0xFFFFFFFF) {
NL_SET_ERR_MSG_MOD(extack, "Unsupported ingress ifindex mask");
return -EOPNOTSUPP;
diff --git a/drivers/net/ethernet/realtek/r8169_main.c b/drivers/net/ethernet/realtek/r8169_main.c
index 581a92fc3292..1df2c002c9f6 100644
--- a/drivers/net/ethernet/realtek/r8169_main.c
+++ b/drivers/net/ethernet/realtek/r8169_main.c
@@ -2350,6 +2350,13 @@ static void rtl_jumbo_config(struct rtl8169_private *tp)
if (pci_is_pcie(tp->pci_dev) && tp->supports_gmii)
pcie_set_readrq(tp->pci_dev, readrq);
+
+ /* Chip doesn't support pause in jumbo mode */
+ linkmode_mod_bit(ETHTOOL_LINK_MODE_Pause_BIT,
+ tp->phydev->advertising, !jumbo);
+ linkmode_mod_bit(ETHTOOL_LINK_MODE_Asym_Pause_BIT,
+ tp->phydev->advertising, !jumbo);
+ phy_start_aneg(tp->phydev);
}
DECLARE_RTL_COND(rtl_chipcmd_cond)
@@ -4630,8 +4637,6 @@ static int r8169_phy_connect(struct rtl8169_private *tp)
if (!tp->supports_gmii)
phy_set_max_speed(phydev, SPEED_100);
- phy_support_asym_pause(phydev);
-
phy_attached_info(phydev);
return 0;
diff --git a/drivers/net/ethernet/stmicro/stmmac/stmmac_main.c b/drivers/net/ethernet/stmicro/stmmac/stmmac_main.c
index 208cae344ffa..4749bd0af160 100644
--- a/drivers/net/ethernet/stmicro/stmmac/stmmac_main.c
+++ b/drivers/net/ethernet/stmicro/stmmac/stmmac_main.c
@@ -1380,88 +1380,6 @@ static void stmmac_free_tx_buffer(struct stmmac_priv *priv, u32 queue, int i)
}
/**
- * stmmac_reinit_rx_buffers - reinit the RX descriptor buffer.
- * @priv: driver private structure
- * Description: this function is called to re-allocate a receive buffer, perform
- * the DMA mapping and init the descriptor.
- */
-static void stmmac_reinit_rx_buffers(struct stmmac_priv *priv)
-{
- u32 rx_count = priv->plat->rx_queues_to_use;
- u32 queue;
- int i;
-
- for (queue = 0; queue < rx_count; queue++) {
- struct stmmac_rx_queue *rx_q = &priv->rx_queue[queue];
-
- for (i = 0; i < priv->dma_rx_size; i++) {
- struct stmmac_rx_buffer *buf = &rx_q->buf_pool[i];
-
- if (buf->page) {
- page_pool_recycle_direct(rx_q->page_pool, buf->page);
- buf->page = NULL;
- }
-
- if (priv->sph && buf->sec_page) {
- page_pool_recycle_direct(rx_q->page_pool, buf->sec_page);
- buf->sec_page = NULL;
- }
- }
- }
-
- for (queue = 0; queue < rx_count; queue++) {
- struct stmmac_rx_queue *rx_q = &priv->rx_queue[queue];
-
- for (i = 0; i < priv->dma_rx_size; i++) {
- struct stmmac_rx_buffer *buf = &rx_q->buf_pool[i];
- struct dma_desc *p;
-
- if (priv->extend_desc)
- p = &((rx_q->dma_erx + i)->basic);
- else
- p = rx_q->dma_rx + i;
-
- if (!buf->page) {
- buf->page = page_pool_dev_alloc_pages(rx_q->page_pool);
- if (!buf->page)
- goto err_reinit_rx_buffers;
-
- buf->addr = page_pool_get_dma_addr(buf->page);
- }
-
- if (priv->sph && !buf->sec_page) {
- buf->sec_page = page_pool_dev_alloc_pages(rx_q->page_pool);
- if (!buf->sec_page)
- goto err_reinit_rx_buffers;
-
- buf->sec_addr = page_pool_get_dma_addr(buf->sec_page);
- }
-
- stmmac_set_desc_addr(priv, p, buf->addr);
- if (priv->sph)
- stmmac_set_desc_sec_addr(priv, p, buf->sec_addr, true);
- else
- stmmac_set_desc_sec_addr(priv, p, buf->sec_addr, false);
- if (priv->dma_buf_sz == BUF_SIZE_16KiB)
- stmmac_init_desc3(priv, p);
- }
- }
-
- return;
-
-err_reinit_rx_buffers:
- do {
- while (--i >= 0)
- stmmac_free_rx_buffer(priv, queue, i);
-
- if (queue == 0)
- break;
-
- i = priv->dma_rx_size;
- } while (queue-- > 0);
-}
-
-/**
* init_dma_rx_desc_rings - init the RX descriptor rings
* @dev: net device structure
* @flags: gfp flag.
@@ -5428,7 +5346,7 @@ int stmmac_resume(struct device *dev)
mutex_lock(&priv->lock);
stmmac_reset_queues_param(priv);
- stmmac_reinit_rx_buffers(priv);
+
stmmac_free_tx_skbufs(priv);
stmmac_clear_descriptors(priv);
diff --git a/drivers/net/geneve.c b/drivers/net/geneve.c
index d5b1e48e0c09..42f31c681846 100644
--- a/drivers/net/geneve.c
+++ b/drivers/net/geneve.c
@@ -891,6 +891,9 @@ static int geneve_xmit_skb(struct sk_buff *skb, struct net_device *dev,
__be16 sport;
int err;
+ if (!pskb_network_may_pull(skb, sizeof(struct iphdr)))
+ return -EINVAL;
+
sport = udp_flow_src_port(geneve->net, skb, 1, USHRT_MAX, true);
rt = geneve_get_v4_rt(skb, dev, gs4, &fl4, info,
geneve->cfg.info.key.tp_dst, sport);
@@ -985,6 +988,9 @@ static int geneve6_xmit_skb(struct sk_buff *skb, struct net_device *dev,
__be16 sport;
int err;
+ if (!pskb_network_may_pull(skb, sizeof(struct ipv6hdr)))
+ return -EINVAL;
+
sport = udp_flow_src_port(geneve->net, skb, 1, USHRT_MAX, true);
dst = geneve_get_v6_dst(skb, dev, gs6, &fl6, info,
geneve->cfg.info.key.tp_dst, sport);
diff --git a/drivers/net/phy/marvell.c b/drivers/net/phy/marvell.c
index e26a5d663f8a..8018ddf7f316 100644
--- a/drivers/net/phy/marvell.c
+++ b/drivers/net/phy/marvell.c
@@ -3021,9 +3021,34 @@ static struct phy_driver marvell_drivers[] = {
.get_stats = marvell_get_stats,
},
{
- .phy_id = MARVELL_PHY_ID_88E6390,
+ .phy_id = MARVELL_PHY_ID_88E6341_FAMILY,
.phy_id_mask = MARVELL_PHY_ID_MASK,
- .name = "Marvell 88E6390",
+ .name = "Marvell 88E6341 Family",
+ /* PHY_GBIT_FEATURES */
+ .flags = PHY_POLL_CABLE_TEST,
+ .probe = m88e1510_probe,
+ .config_init = marvell_config_init,
+ .config_aneg = m88e6390_config_aneg,
+ .read_status = marvell_read_status,
+ .config_intr = marvell_config_intr,
+ .handle_interrupt = marvell_handle_interrupt,
+ .resume = genphy_resume,
+ .suspend = genphy_suspend,
+ .read_page = marvell_read_page,
+ .write_page = marvell_write_page,
+ .get_sset_count = marvell_get_sset_count,
+ .get_strings = marvell_get_strings,
+ .get_stats = marvell_get_stats,
+ .get_tunable = m88e1540_get_tunable,
+ .set_tunable = m88e1540_set_tunable,
+ .cable_test_start = marvell_vct7_cable_test_start,
+ .cable_test_tdr_start = marvell_vct5_cable_test_tdr_start,
+ .cable_test_get_status = marvell_vct7_cable_test_get_status,
+ },
+ {
+ .phy_id = MARVELL_PHY_ID_88E6390_FAMILY,
+ .phy_id_mask = MARVELL_PHY_ID_MASK,
+ .name = "Marvell 88E6390 Family",
/* PHY_GBIT_FEATURES */
.flags = PHY_POLL_CABLE_TEST,
.probe = m88e6390_probe,
@@ -3107,7 +3132,8 @@ static struct mdio_device_id __maybe_unused marvell_tbl[] = {
{ MARVELL_PHY_ID_88E1540, MARVELL_PHY_ID_MASK },
{ MARVELL_PHY_ID_88E1545, MARVELL_PHY_ID_MASK },
{ MARVELL_PHY_ID_88E3016, MARVELL_PHY_ID_MASK },
- { MARVELL_PHY_ID_88E6390, MARVELL_PHY_ID_MASK },
+ { MARVELL_PHY_ID_88E6341_FAMILY, MARVELL_PHY_ID_MASK },
+ { MARVELL_PHY_ID_88E6390_FAMILY, MARVELL_PHY_ID_MASK },
{ MARVELL_PHY_ID_88E1340S, MARVELL_PHY_ID_MASK },
{ MARVELL_PHY_ID_88E1548P, MARVELL_PHY_ID_MASK },
{ }
diff --git a/drivers/net/vrf.c b/drivers/net/vrf.c
index 6d9130859c55..503e2fd7ce51 100644
--- a/drivers/net/vrf.c
+++ b/drivers/net/vrf.c
@@ -471,9 +471,8 @@ static netdev_tx_t vrf_process_v6_outbound(struct sk_buff *skb,
skb_dst_drop(skb);
- /* if dst.dev is loopback or the VRF device again this is locally
- * originated traffic destined to a local address. Short circuit
- * to Rx path
+ /* if dst.dev is the VRF device again this is locally originated traffic
+ * destined to a local address. Short circuit to Rx path.
*/
if (dst->dev == dev)
return vrf_local_xmit(skb, dev, dst);
@@ -547,9 +546,8 @@ static netdev_tx_t vrf_process_v4_outbound(struct sk_buff *skb,
skb_dst_drop(skb);
- /* if dst.dev is loopback or the VRF device again this is locally
- * originated traffic destined to a local address. Short circuit
- * to Rx path
+ /* if dst.dev is the VRF device again this is locally originated traffic
+ * destined to a local address. Short circuit to Rx path.
*/
if (rt->dst.dev == vrf_dev)
return vrf_local_xmit(skb, vrf_dev, &rt->dst);
diff --git a/drivers/net/xen-netback/xenbus.c b/drivers/net/xen-netback/xenbus.c
index a5439c130130..d24b7a7993aa 100644
--- a/drivers/net/xen-netback/xenbus.c
+++ b/drivers/net/xen-netback/xenbus.c
@@ -824,11 +824,15 @@ static void connect(struct backend_info *be)
xenvif_carrier_on(be->vif);
unregister_hotplug_status_watch(be);
- err = xenbus_watch_pathfmt(dev, &be->hotplug_status_watch, NULL,
- hotplug_status_changed,
- "%s/%s", dev->nodename, "hotplug-status");
- if (!err)
+ if (xenbus_exists(XBT_NIL, dev->nodename, "hotplug-status")) {
+ err = xenbus_watch_pathfmt(dev, &be->hotplug_status_watch,
+ NULL, hotplug_status_changed,
+ "%s/%s", dev->nodename,
+ "hotplug-status");
+ if (err)
+ goto err;
be->have_hotplug_status_watch = 1;
+ }
netif_tx_wake_all_queues(be->vif->dev);
diff --git a/drivers/nvdimm/bus.c b/drivers/nvdimm/bus.c
index 48f0985ca8a0..3a777d0073b7 100644
--- a/drivers/nvdimm/bus.c
+++ b/drivers/nvdimm/bus.c
@@ -631,16 +631,14 @@ void nvdimm_check_and_set_ro(struct gendisk *disk)
struct nd_region *nd_region = to_nd_region(dev->parent);
int disk_ro = get_disk_ro(disk);
- /*
- * Upgrade to read-only if the region is read-only preserve as
- * read-only if the disk is already read-only.
- */
- if (disk_ro || nd_region->ro == disk_ro)
+ /* catch the disk up with the region ro state */
+ if (disk_ro == nd_region->ro)
return;
- dev_info(dev, "%s read-only, marking %s read-only\n",
- dev_name(&nd_region->dev), disk->disk_name);
- set_disk_ro(disk, 1);
+ dev_info(dev, "%s read-%s, marking %s read-%s\n",
+ dev_name(&nd_region->dev), nd_region->ro ? "only" : "write",
+ disk->disk_name, nd_region->ro ? "only" : "write");
+ set_disk_ro(disk, nd_region->ro);
}
EXPORT_SYMBOL(nvdimm_check_and_set_ro);
diff --git a/drivers/nvdimm/pmem.c b/drivers/nvdimm/pmem.c
index b8a85bfb2e95..7daac795db39 100644
--- a/drivers/nvdimm/pmem.c
+++ b/drivers/nvdimm/pmem.c
@@ -26,6 +26,7 @@
#include <linux/mm.h>
#include <asm/cacheflush.h>
#include "pmem.h"
+#include "btt.h"
#include "pfn.h"
#include "nd.h"
@@ -585,7 +586,7 @@ static void nd_pmem_shutdown(struct device *dev)
nvdimm_flush(to_nd_region(dev->parent), NULL);
}
-static void nd_pmem_notify(struct device *dev, enum nvdimm_event event)
+static void pmem_revalidate_poison(struct device *dev)
{
struct nd_region *nd_region;
resource_size_t offset = 0, end_trunc = 0;
@@ -595,9 +596,6 @@ static void nd_pmem_notify(struct device *dev, enum nvdimm_event event)
struct range range;
struct kernfs_node *bb_state;
- if (event != NVDIMM_REVALIDATE_POISON)
- return;
-
if (is_nd_btt(dev)) {
struct nd_btt *nd_btt = to_nd_btt(dev);
@@ -635,6 +633,37 @@ static void nd_pmem_notify(struct device *dev, enum nvdimm_event event)
sysfs_notify_dirent(bb_state);
}
+static void pmem_revalidate_region(struct device *dev)
+{
+ struct pmem_device *pmem;
+
+ if (is_nd_btt(dev)) {
+ struct nd_btt *nd_btt = to_nd_btt(dev);
+ struct btt *btt = nd_btt->btt;
+
+ nvdimm_check_and_set_ro(btt->btt_disk);
+ return;
+ }
+
+ pmem = dev_get_drvdata(dev);
+ nvdimm_check_and_set_ro(pmem->disk);
+}
+
+static void nd_pmem_notify(struct device *dev, enum nvdimm_event event)
+{
+ switch (event) {
+ case NVDIMM_REVALIDATE_POISON:
+ pmem_revalidate_poison(dev);
+ break;
+ case NVDIMM_REVALIDATE_REGION:
+ pmem_revalidate_region(dev);
+ break;
+ default:
+ dev_WARN_ONCE(dev, 1, "notify: unknown event: %d\n", event);
+ break;
+ }
+}
+
MODULE_ALIAS("pmem");
MODULE_ALIAS_ND_DEVICE(ND_DEVICE_NAMESPACE_IO);
MODULE_ALIAS_ND_DEVICE(ND_DEVICE_NAMESPACE_PMEM);
diff --git a/drivers/nvdimm/region_devs.c b/drivers/nvdimm/region_devs.c
index ef23119db574..9ccf3d608799 100644
--- a/drivers/nvdimm/region_devs.c
+++ b/drivers/nvdimm/region_devs.c
@@ -518,6 +518,12 @@ static ssize_t read_only_show(struct device *dev,
return sprintf(buf, "%d\n", nd_region->ro);
}
+static int revalidate_read_only(struct device *dev, void *data)
+{
+ nd_device_notify(dev, NVDIMM_REVALIDATE_REGION);
+ return 0;
+}
+
static ssize_t read_only_store(struct device *dev,
struct device_attribute *attr, const char *buf, size_t len)
{
@@ -529,6 +535,7 @@ static ssize_t read_only_store(struct device *dev,
return rc;
nd_region->ro = ro;
+ device_for_each_child(dev, NULL, revalidate_read_only);
return len;
}
static DEVICE_ATTR_RW(read_only);
@@ -1239,6 +1246,11 @@ int nvdimm_has_flush(struct nd_region *nd_region)
|| !IS_ENABLED(CONFIG_ARCH_HAS_PMEM_API))
return -ENXIO;
+ /* Test if an explicit flush function is defined */
+ if (test_bit(ND_REGION_ASYNC, &nd_region->flags) && nd_region->flush)
+ return 1;
+
+ /* Test if any flush hints for the region are available */
for (i = 0; i < nd_region->ndr_mappings; i++) {
struct nd_mapping *nd_mapping = &nd_region->mapping[i];
struct nvdimm *nvdimm = nd_mapping->nvdimm;
@@ -1249,8 +1261,8 @@ int nvdimm_has_flush(struct nd_region *nd_region)
}
/*
- * The platform defines dimm devices without hints, assume
- * platform persistence mechanism like ADR
+ * The platform defines dimm devices without hints nor explicit flush,
+ * assume platform persistence mechanism like ADR
*/
return 0;
}
diff --git a/drivers/vfio/pci/vfio_pci.c b/drivers/vfio/pci/vfio_pci.c
index 65e7e6b44578..5023e23db3bc 100644
--- a/drivers/vfio/pci/vfio_pci.c
+++ b/drivers/vfio/pci/vfio_pci.c
@@ -1656,6 +1656,8 @@ static int vfio_pci_mmap(void *device_data, struct vm_area_struct *vma)
index = vma->vm_pgoff >> (VFIO_PCI_OFFSET_SHIFT - PAGE_SHIFT);
+ if (index >= VFIO_PCI_NUM_REGIONS + vdev->num_regions)
+ return -EINVAL;
if (vma->vm_end < vma->vm_start)
return -EINVAL;
if ((vma->vm_flags & VM_SHARED) == 0)
@@ -1664,7 +1666,7 @@ static int vfio_pci_mmap(void *device_data, struct vm_area_struct *vma)
int regnum = index - VFIO_PCI_NUM_REGIONS;
struct vfio_pci_region *region = vdev->region + regnum;
- if (region && region->ops && region->ops->mmap &&
+ if (region->ops && region->ops->mmap &&
(region->flags & VFIO_REGION_INFO_FLAG_MMAP))
return region->ops->mmap(vdev, region, vma);
return -EINVAL;