aboutsummaryrefslogtreecommitdiffstats
path: root/drivers/dma/idxd/init.c
diff options
context:
space:
mode:
Diffstat (limited to 'drivers/dma/idxd/init.c')
-rw-r--r--drivers/dma/idxd/init.c128
1 files changed, 98 insertions, 30 deletions
diff --git a/drivers/dma/idxd/init.c b/drivers/dma/idxd/init.c
index 11e5ce168177..2c051e07c34c 100644
--- a/drivers/dma/idxd/init.c
+++ b/drivers/dma/idxd/init.c
@@ -14,6 +14,8 @@
#include <linux/io-64-nonatomic-lo-hi.h>
#include <linux/device.h>
#include <linux/idr.h>
+#include <linux/intel-svm.h>
+#include <linux/iommu.h>
#include <uapi/linux/idxd.h>
#include <linux/dmaengine.h>
#include "../dmaengine.h"
@@ -26,18 +28,24 @@ MODULE_AUTHOR("Intel Corporation");
#define DRV_NAME "idxd"
+bool support_enqcmd;
+
static struct idr idxd_idrs[IDXD_TYPE_MAX];
static struct mutex idxd_idr_lock;
static struct pci_device_id idxd_pci_tbl[] = {
/* DSA ver 1.0 platforms */
{ PCI_VDEVICE(INTEL, PCI_DEVICE_ID_INTEL_DSA_SPR0) },
+
+ /* IAX ver 1.0 platforms */
+ { PCI_VDEVICE(INTEL, PCI_DEVICE_ID_INTEL_IAX_SPR0) },
{ 0, }
};
MODULE_DEVICE_TABLE(pci, idxd_pci_tbl);
static char *idxd_name[] = {
"dsa",
+ "iax"
};
const char *idxd_get_dev_name(struct idxd_device *idxd)
@@ -53,6 +61,7 @@ static int idxd_setup_interrupts(struct idxd_device *idxd)
struct idxd_irq_entry *irq_entry;
int i, msixcnt;
int rc = 0;
+ union msix_perm mperm;
msixcnt = pci_msix_vec_count(pdev);
if (msixcnt < 0) {
@@ -92,6 +101,7 @@ static int idxd_setup_interrupts(struct idxd_device *idxd)
for (i = 0; i < msixcnt; i++) {
idxd->irq_entries[i].id = i;
idxd->irq_entries[i].idxd = idxd;
+ spin_lock_init(&idxd->irq_entries[i].list_lock);
}
msix = &idxd->msix_entries[0];
@@ -131,6 +141,13 @@ static int idxd_setup_interrupts(struct idxd_device *idxd)
idxd_unmask_error_interrupts(idxd);
+ /* Setup MSIX permission table */
+ mperm.bits = 0;
+ mperm.pasid = idxd->pasid;
+ mperm.pasid_en = device_pasid_enabled(idxd);
+ for (i = 1; i < msixcnt; i++)
+ iowrite32(mperm.bits, idxd->reg_base + idxd->msix_perm_offset + i * 8);
+
return 0;
err_no_irq:
@@ -178,6 +195,9 @@ static int idxd_setup_internals(struct idxd_device *idxd)
wq->idxd_cdev.minor = -1;
wq->max_xfer_bytes = idxd->max_xfer_bytes;
wq->max_batch_size = idxd->max_batch_size;
+ wq->wqcfg = devm_kzalloc(dev, idxd->wqcfg_size, GFP_KERNEL);
+ if (!wq->wqcfg)
+ return -ENOMEM;
}
for (i = 0; i < idxd->max_engines; i++) {
@@ -198,17 +218,14 @@ static void idxd_read_table_offsets(struct idxd_device *idxd)
struct device *dev = &idxd->pdev->dev;
offsets.bits[0] = ioread64(idxd->reg_base + IDXD_TABLE_OFFSET);
- offsets.bits[1] = ioread64(idxd->reg_base + IDXD_TABLE_OFFSET
- + sizeof(u64));
- idxd->grpcfg_offset = offsets.grpcfg * 0x100;
+ offsets.bits[1] = ioread64(idxd->reg_base + IDXD_TABLE_OFFSET + sizeof(u64));
+ idxd->grpcfg_offset = offsets.grpcfg * IDXD_TABLE_MULT;
dev_dbg(dev, "IDXD Group Config Offset: %#x\n", idxd->grpcfg_offset);
- idxd->wqcfg_offset = offsets.wqcfg * 0x100;
- dev_dbg(dev, "IDXD Work Queue Config Offset: %#x\n",
- idxd->wqcfg_offset);
- idxd->msix_perm_offset = offsets.msix_perm * 0x100;
- dev_dbg(dev, "IDXD MSIX Permission Offset: %#x\n",
- idxd->msix_perm_offset);
- idxd->perfmon_offset = offsets.perfmon * 0x100;
+ idxd->wqcfg_offset = offsets.wqcfg * IDXD_TABLE_MULT;
+ dev_dbg(dev, "IDXD Work Queue Config Offset: %#x\n", idxd->wqcfg_offset);
+ idxd->msix_perm_offset = offsets.msix_perm * IDXD_TABLE_MULT;
+ dev_dbg(dev, "IDXD MSIX Permission Offset: %#x\n", idxd->msix_perm_offset);
+ idxd->perfmon_offset = offsets.perfmon * IDXD_TABLE_MULT;
dev_dbg(dev, "IDXD Perfmon Offset: %#x\n", idxd->perfmon_offset);
}
@@ -251,6 +268,8 @@ static void idxd_read_caps(struct idxd_device *idxd)
dev_dbg(dev, "total workqueue size: %u\n", idxd->max_wq_size);
idxd->max_wqs = idxd->hw.wq_cap.num_wqs;
dev_dbg(dev, "max workqueues: %u\n", idxd->max_wqs);
+ idxd->wqcfg_size = 1 << (idxd->hw.wq_cap.wqcfg_size + IDXD_WQCFG_MIN);
+ dev_dbg(dev, "wqcfg size: %u\n", idxd->wqcfg_size);
/* reading operation capabilities */
for (i = 0; i < 4; i++) {
@@ -260,8 +279,7 @@ static void idxd_read_caps(struct idxd_device *idxd)
}
}
-static struct idxd_device *idxd_alloc(struct pci_dev *pdev,
- void __iomem * const *iomap)
+static struct idxd_device *idxd_alloc(struct pci_dev *pdev)
{
struct device *dev = &pdev->dev;
struct idxd_device *idxd;
@@ -271,12 +289,45 @@ static struct idxd_device *idxd_alloc(struct pci_dev *pdev,
return NULL;
idxd->pdev = pdev;
- idxd->reg_base = iomap[IDXD_MMIO_BAR];
spin_lock_init(&idxd->dev_lock);
return idxd;
}
+static int idxd_enable_system_pasid(struct idxd_device *idxd)
+{
+ int flags;
+ unsigned int pasid;
+ struct iommu_sva *sva;
+
+ flags = SVM_FLAG_SUPERVISOR_MODE;
+
+ sva = iommu_sva_bind_device(&idxd->pdev->dev, NULL, &flags);
+ if (IS_ERR(sva)) {
+ dev_warn(&idxd->pdev->dev,
+ "iommu sva bind failed: %ld\n", PTR_ERR(sva));
+ return PTR_ERR(sva);
+ }
+
+ pasid = iommu_sva_get_pasid(sva);
+ if (pasid == IOMMU_PASID_INVALID) {
+ iommu_sva_unbind_device(sva);
+ return -ENODEV;
+ }
+
+ idxd->sva = sva;
+ idxd->pasid = pasid;
+ dev_dbg(&idxd->pdev->dev, "system pasid: %u\n", pasid);
+ return 0;
+}
+
+static void idxd_disable_system_pasid(struct idxd_device *idxd)
+{
+
+ iommu_sva_unbind_device(idxd->sva);
+ idxd->sva = NULL;
+}
+
static int idxd_probe(struct idxd_device *idxd)
{
struct pci_dev *pdev = idxd->pdev;
@@ -287,6 +338,14 @@ static int idxd_probe(struct idxd_device *idxd)
idxd_device_init_reset(idxd);
dev_dbg(dev, "IDXD reset complete\n");
+ if (IS_ENABLED(CONFIG_INTEL_IDXD_SVM)) {
+ rc = idxd_enable_system_pasid(idxd);
+ if (rc < 0)
+ dev_warn(dev, "Failed to enable PASID. No SVA support: %d\n", rc);
+ else
+ set_bit(IDXD_FLAG_PASID_ENABLED, &idxd->flags);
+ }
+
idxd_read_caps(idxd);
idxd_read_table_offsets(idxd);
@@ -317,29 +376,37 @@ static int idxd_probe(struct idxd_device *idxd)
idxd_mask_error_interrupts(idxd);
idxd_mask_msix_vectors(idxd);
err_setup:
+ if (device_pasid_enabled(idxd))
+ idxd_disable_system_pasid(idxd);
return rc;
}
+static void idxd_type_init(struct idxd_device *idxd)
+{
+ if (idxd->type == IDXD_TYPE_DSA)
+ idxd->compl_size = sizeof(struct dsa_completion_record);
+ else if (idxd->type == IDXD_TYPE_IAX)
+ idxd->compl_size = sizeof(struct iax_completion_record);
+}
+
static int idxd_pci_probe(struct pci_dev *pdev, const struct pci_device_id *id)
{
- void __iomem * const *iomap;
struct device *dev = &pdev->dev;
struct idxd_device *idxd;
int rc;
- unsigned int mask;
rc = pcim_enable_device(pdev);
if (rc)
return rc;
- dev_dbg(dev, "Mapping BARs\n");
- mask = (1 << IDXD_MMIO_BAR);
- rc = pcim_iomap_regions(pdev, mask, DRV_NAME);
- if (rc)
- return rc;
+ dev_dbg(dev, "Alloc IDXD context\n");
+ idxd = idxd_alloc(pdev);
+ if (!idxd)
+ return -ENOMEM;
- iomap = pcim_iomap_table(pdev);
- if (!iomap)
+ dev_dbg(dev, "Mapping BARs\n");
+ idxd->reg_base = pcim_iomap(pdev, IDXD_MMIO_BAR, 0);
+ if (!idxd->reg_base)
return -ENOMEM;
dev_dbg(dev, "Set DMA masks\n");
@@ -355,13 +422,10 @@ static int idxd_pci_probe(struct pci_dev *pdev, const struct pci_device_id *id)
if (rc)
return rc;
- dev_dbg(dev, "Alloc IDXD context\n");
- idxd = idxd_alloc(pdev, iomap);
- if (!idxd)
- return -ENOMEM;
-
idxd_set_type(idxd);
+ idxd_type_init(idxd);
+
dev_dbg(dev, "Set PCI master\n");
pci_set_master(pdev);
pci_set_drvdata(pdev, idxd);
@@ -447,6 +511,8 @@ static void idxd_remove(struct pci_dev *pdev)
dev_dbg(&pdev->dev, "%s called\n", __func__);
idxd_cleanup_sysfs(idxd);
idxd_shutdown(pdev);
+ if (device_pasid_enabled(idxd))
+ idxd_disable_system_pasid(idxd);
mutex_lock(&idxd_idr_lock);
idr_remove(&idxd_idrs[idxd->type], idxd->id);
mutex_unlock(&idxd_idr_lock);
@@ -465,7 +531,7 @@ static int __init idxd_init_module(void)
int err, i;
/*
- * If the CPU does not support write512, there's no point in
+ * If the CPU does not support MOVDIR64B or ENQCMDS, there's no point in
* enumerating the device. We can not utilize it.
*/
if (!boot_cpu_has(X86_FEATURE_MOVDIR64B)) {
@@ -473,8 +539,10 @@ static int __init idxd_init_module(void)
return -ENODEV;
}
- pr_info("%s: Intel(R) Accelerator Devices Driver %s\n",
- DRV_NAME, IDXD_DRIVER_VERSION);
+ if (!boot_cpu_has(X86_FEATURE_ENQCMD))
+ pr_warn("Platform does not have ENQCMD(S) support.\n");
+ else
+ support_enqcmd = true;
mutex_init(&idxd_idr_lock);
for (i = 0; i < IDXD_TYPE_MAX; i++)