diff options
Diffstat (limited to 'drivers/dma/idxd/device.c')
-rw-r--r-- | drivers/dma/idxd/device.c | 435 |
1 files changed, 294 insertions, 141 deletions
diff --git a/drivers/dma/idxd/device.c b/drivers/dma/idxd/device.c index fab412349f7f..6f44fa8f78a5 100644 --- a/drivers/dma/idxd/device.c +++ b/drivers/dma/idxd/device.c @@ -19,30 +19,6 @@ static void idxd_device_wqs_clear_state(struct idxd_device *idxd); static void idxd_wq_disable_cleanup(struct idxd_wq *wq); /* Interrupt control bits */ -void idxd_mask_msix_vector(struct idxd_device *idxd, int vec_id) -{ - struct irq_data *data = irq_get_irq_data(idxd->irq_entries[vec_id].vector); - - pci_msi_mask_irq(data); -} - -void idxd_mask_msix_vectors(struct idxd_device *idxd) -{ - struct pci_dev *pdev = idxd->pdev; - int msixcnt = pci_msix_vec_count(pdev); - int i; - - for (i = 0; i < msixcnt; i++) - idxd_mask_msix_vector(idxd, i); -} - -void idxd_unmask_msix_vector(struct idxd_device *idxd, int vec_id) -{ - struct irq_data *data = irq_get_irq_data(idxd->irq_entries[vec_id].vector); - - pci_msi_unmask_irq(data); -} - void idxd_unmask_error_interrupts(struct idxd_device *idxd) { union genctrl_reg genctrl; @@ -208,7 +184,7 @@ int idxd_wq_enable(struct idxd_wq *wq) if (wq->state == IDXD_WQ_ENABLED) { dev_dbg(dev, "WQ %d already enabled\n", wq->id); - return -ENXIO; + return 0; } idxd_cmd_exec(idxd, IDXD_CMD_ENABLE_WQ, wq->id, &status); @@ -220,6 +196,7 @@ int idxd_wq_enable(struct idxd_wq *wq) } wq->state = IDXD_WQ_ENABLED; + set_bit(wq->id, idxd->wq_enable_map); dev_dbg(dev, "WQ %d enabled\n", wq->id); return 0; } @@ -247,6 +224,7 @@ int idxd_wq_disable(struct idxd_wq *wq, bool reset_config) if (reset_config) idxd_wq_disable_cleanup(wq); + clear_bit(wq->id, idxd->wq_enable_map); wq->state = IDXD_WQ_DISABLED; dev_dbg(dev, "WQ %d disabled\n", wq->id); return 0; @@ -282,7 +260,6 @@ void idxd_wq_reset(struct idxd_wq *wq) operand = BIT(wq->id % 16) | ((wq->id / 16) << 16); idxd_cmd_exec(idxd, IDXD_CMD_RESET_WQ, operand, NULL); idxd_wq_disable_cleanup(wq); - wq->state = IDXD_WQ_DISABLED; } int idxd_wq_map_portal(struct idxd_wq *wq) @@ -323,24 +300,46 @@ void idxd_wqs_unmap_portal(struct idxd_device *idxd) } } -int idxd_wq_set_pasid(struct idxd_wq *wq, int pasid) +static void __idxd_wq_set_priv_locked(struct idxd_wq *wq, int priv) { struct idxd_device *idxd = wq->idxd; - int rc; union wqcfg wqcfg; unsigned int offset; - rc = idxd_wq_disable(wq, false); - if (rc < 0) - return rc; + offset = WQCFG_OFFSET(idxd, wq->id, WQCFG_PRIVL_IDX); + spin_lock(&idxd->dev_lock); + wqcfg.bits[WQCFG_PRIVL_IDX] = ioread32(idxd->reg_base + offset); + wqcfg.priv = priv; + wq->wqcfg->bits[WQCFG_PRIVL_IDX] = wqcfg.bits[WQCFG_PRIVL_IDX]; + iowrite32(wqcfg.bits[WQCFG_PRIVL_IDX], idxd->reg_base + offset); + spin_unlock(&idxd->dev_lock); +} + +static void __idxd_wq_set_pasid_locked(struct idxd_wq *wq, int pasid) +{ + struct idxd_device *idxd = wq->idxd; + union wqcfg wqcfg; + unsigned int offset; offset = WQCFG_OFFSET(idxd, wq->id, WQCFG_PASID_IDX); spin_lock(&idxd->dev_lock); wqcfg.bits[WQCFG_PASID_IDX] = ioread32(idxd->reg_base + offset); wqcfg.pasid_en = 1; wqcfg.pasid = pasid; + wq->wqcfg->bits[WQCFG_PASID_IDX] = wqcfg.bits[WQCFG_PASID_IDX]; iowrite32(wqcfg.bits[WQCFG_PASID_IDX], idxd->reg_base + offset); spin_unlock(&idxd->dev_lock); +} + +int idxd_wq_set_pasid(struct idxd_wq *wq, int pasid) +{ + int rc; + + rc = idxd_wq_disable(wq, false); + if (rc < 0) + return rc; + + __idxd_wq_set_pasid_locked(wq, pasid); rc = idxd_wq_enable(wq); if (rc < 0) @@ -380,16 +379,28 @@ static void idxd_wq_disable_cleanup(struct idxd_wq *wq) struct idxd_device *idxd = wq->idxd; lockdep_assert_held(&wq->wq_lock); + wq->state = IDXD_WQ_DISABLED; memset(wq->wqcfg, 0, idxd->wqcfg_size); wq->type = IDXD_WQT_NONE; - wq->size = 0; - wq->group = NULL; wq->threshold = 0; wq->priority = 0; - wq->ats_dis = 0; + wq->enqcmds_retries = IDXD_ENQCMDS_RETRIES; clear_bit(WQ_FLAG_DEDICATED, &wq->flags); clear_bit(WQ_FLAG_BLOCK_ON_FAULT, &wq->flags); + clear_bit(WQ_FLAG_ATS_DISABLE, &wq->flags); memset(wq->name, 0, WQ_NAME_SIZE); + wq->max_xfer_bytes = WQ_DEFAULT_MAX_XFER; + idxd_wq_set_max_batch_size(idxd->data->type, wq, WQ_DEFAULT_MAX_BATCH); + if (wq->opcap_bmap) + bitmap_copy(wq->opcap_bmap, idxd->opcap_bmap, IDXD_MAX_OPCAP_BITS); +} + +static void idxd_wq_device_reset_cleanup(struct idxd_wq *wq) +{ + lockdep_assert_held(&wq->wq_lock); + + wq->size = 0; + wq->group = NULL; } static void idxd_wq_ref_release(struct percpu_ref *ref) @@ -404,19 +415,31 @@ int idxd_wq_init_percpu_ref(struct idxd_wq *wq) int rc; memset(&wq->wq_active, 0, sizeof(wq->wq_active)); - rc = percpu_ref_init(&wq->wq_active, idxd_wq_ref_release, 0, GFP_KERNEL); + rc = percpu_ref_init(&wq->wq_active, idxd_wq_ref_release, + PERCPU_REF_ALLOW_REINIT, GFP_KERNEL); if (rc < 0) return rc; reinit_completion(&wq->wq_dead); + reinit_completion(&wq->wq_resurrect); return 0; } -void idxd_wq_quiesce(struct idxd_wq *wq) +void __idxd_wq_quiesce(struct idxd_wq *wq) { + lockdep_assert_held(&wq->wq_lock); + reinit_completion(&wq->wq_resurrect); percpu_ref_kill(&wq->wq_active); + complete_all(&wq->wq_resurrect); wait_for_completion(&wq->wq_dead); } +void idxd_wq_quiesce(struct idxd_wq *wq) +{ + mutex_lock(&wq->wq_lock); + __idxd_wq_quiesce(wq); + mutex_unlock(&wq->wq_lock); +} + /* Device control bits */ static inline bool idxd_is_enabled(struct idxd_device *idxd) { @@ -558,21 +581,16 @@ int idxd_device_disable(struct idxd_device *idxd) return -ENXIO; } - spin_lock(&idxd->dev_lock); idxd_device_clear_state(idxd); - idxd->state = IDXD_DEV_DISABLED; - spin_unlock(&idxd->dev_lock); return 0; } void idxd_device_reset(struct idxd_device *idxd) { idxd_cmd_exec(idxd, IDXD_CMD_RESET_DEVICE, 0, NULL); - spin_lock(&idxd->dev_lock); idxd_device_clear_state(idxd); - idxd->state = IDXD_DEV_DISABLED; + spin_lock(&idxd->dev_lock); idxd_unmask_error_interrupts(idxd); - idxd_msix_perm_setup(idxd); spin_unlock(&idxd->dev_lock); } @@ -681,11 +699,18 @@ static void idxd_groups_clear_state(struct idxd_device *idxd) memset(&group->grpcfg, 0, sizeof(group->grpcfg)); group->num_engines = 0; group->num_wqs = 0; - group->use_token_limit = false; - group->tokens_allowed = 0; - group->tokens_reserved = 0; - group->tc_a = -1; - group->tc_b = -1; + group->use_rdbuf_limit = false; + group->rdbufs_allowed = 0; + group->rdbufs_reserved = 0; + if (idxd->hw.version < DEVICE_VERSION_2 && !tc_override) { + group->tc_a = 1; + group->tc_b = 1; + } else { + group->tc_a = -1; + group->tc_b = -1; + } + group->desc_progress_limit = 0; + group->batch_progress_limit = 0; } } @@ -693,52 +718,35 @@ static void idxd_device_wqs_clear_state(struct idxd_device *idxd) { int i; - lockdep_assert_held(&idxd->dev_lock); for (i = 0; i < idxd->max_wqs; i++) { struct idxd_wq *wq = idxd->wqs[i]; - if (wq->state == IDXD_WQ_ENABLED) { - idxd_wq_disable_cleanup(wq); - wq->state = IDXD_WQ_DISABLED; - } + mutex_lock(&wq->wq_lock); + idxd_wq_disable_cleanup(wq); + idxd_wq_device_reset_cleanup(wq); + mutex_unlock(&wq->wq_lock); } } void idxd_device_clear_state(struct idxd_device *idxd) { - idxd_groups_clear_state(idxd); - idxd_engines_clear_state(idxd); - idxd_device_wqs_clear_state(idxd); -} - -void idxd_msix_perm_setup(struct idxd_device *idxd) -{ - union msix_perm mperm; - int i, msixcnt; - - msixcnt = pci_msix_vec_count(idxd->pdev); - if (msixcnt < 0) - return; - - mperm.bits = 0; - mperm.pasid = idxd->pasid; - mperm.pasid_en = device_pasid_enabled(idxd); - for (i = 1; i < msixcnt; i++) - iowrite32(mperm.bits, idxd->reg_base + idxd->msix_perm_offset + i * 8); -} - -void idxd_msix_perm_clear(struct idxd_device *idxd) -{ - union msix_perm mperm; - int i, msixcnt; + /* IDXD is always disabled. Other states are cleared only when IDXD is configurable. */ + if (test_bit(IDXD_FLAG_CONFIGURABLE, &idxd->flags)) { + /* + * Clearing wq state is protected by wq lock. + * So no need to be protected by device lock. + */ + idxd_device_wqs_clear_state(idxd); - msixcnt = pci_msix_vec_count(idxd->pdev); - if (msixcnt < 0) - return; + spin_lock(&idxd->dev_lock); + idxd_groups_clear_state(idxd); + idxd_engines_clear_state(idxd); + } else { + spin_lock(&idxd->dev_lock); + } - mperm.bits = 0; - for (i = 1; i < msixcnt; i++) - iowrite32(mperm.bits, idxd->reg_base + idxd->msix_perm_offset + i * 8); + idxd->state = IDXD_DEV_DISABLED; + spin_unlock(&idxd->dev_lock); } static void idxd_group_config_write(struct idxd_group *group) @@ -767,10 +775,10 @@ static void idxd_group_config_write(struct idxd_group *group) /* setup GRPFLAGS */ grpcfg_offset = GRPFLGCFG_OFFSET(idxd, group->id); - iowrite32(group->grpcfg.flags.bits, idxd->reg_base + grpcfg_offset); - dev_dbg(dev, "GRPFLAGS flags[%d: %#x]: %#x\n", + iowrite64(group->grpcfg.flags.bits, idxd->reg_base + grpcfg_offset); + dev_dbg(dev, "GRPFLAGS flags[%d: %#x]: %#llx\n", group->id, grpcfg_offset, - ioread32(idxd->reg_base + grpcfg_offset)); + ioread64(idxd->reg_base + grpcfg_offset)); } static int idxd_groups_config_write(struct idxd_device *idxd) @@ -780,10 +788,10 @@ static int idxd_groups_config_write(struct idxd_device *idxd) int i; struct device *dev = &idxd->pdev->dev; - /* Setup bandwidth token limit */ - if (idxd->hw.gen_cap.config_en && idxd->token_limit) { + /* Setup bandwidth rdbuf limit */ + if (idxd->hw.gen_cap.config_en && idxd->rdbuf_limit) { reg.bits = ioread32(idxd->reg_base + IDXD_GENCFG_OFFSET); - reg.token_limit = idxd->token_limit; + reg.rdbuf_limit = idxd->rdbuf_limit; iowrite32(reg.bits, idxd->reg_base + IDXD_GENCFG_OFFSET); } @@ -813,7 +821,7 @@ static int idxd_wq_config_write(struct idxd_wq *wq) struct idxd_device *idxd = wq->idxd; struct device *dev = &idxd->pdev->dev; u32 wq_offset; - int i; + int i, n; if (!wq->group) return 0; @@ -824,18 +832,15 @@ static int idxd_wq_config_write(struct idxd_wq *wq) */ for (i = 0; i < WQCFG_STRIDES(idxd); i++) { wq_offset = WQCFG_OFFSET(idxd, wq->id, i); - wq->wqcfg->bits[i] = ioread32(idxd->reg_base + wq_offset); + wq->wqcfg->bits[i] |= ioread32(idxd->reg_base + wq_offset); } + if (wq->size == 0 && wq->type != IDXD_WQT_NONE) + wq->size = WQ_DEFAULT_QUEUE_DEPTH; + /* byte 0-3 */ wq->wqcfg->wq_size = wq->size; - if (wq->size == 0) { - idxd->cmd_status = IDXD_SCMD_WQ_NO_SIZE; - dev_warn(dev, "Incorrect work queue size: 0\n"); - return -EINVAL; - } - /* bytes 4-7 */ wq->wqcfg->wq_thresh = wq->threshold; @@ -843,14 +848,8 @@ static int idxd_wq_config_write(struct idxd_wq *wq) if (wq_dedicated(wq)) wq->wqcfg->mode = 1; - if (device_pasid_enabled(idxd)) { - wq->wqcfg->pasid_en = 1; - if (wq->type == IDXD_WQT_KERNEL && wq_dedicated(wq)) - wq->wqcfg->pasid = idxd->pasid; - } - /* - * Here the priv bit is set depending on the WQ type. priv = 1 if the + * The WQ priv bit is set depending on the WQ type. priv = 1 if the * WQ type is kernel to indicate privileged access. This setting only * matters for dedicated WQ. According to the DSA spec: * If the WQ is in dedicated mode, WQ PASID Enable is 1, and the @@ -860,7 +859,6 @@ static int idxd_wq_config_write(struct idxd_wq *wq) * In the case of a dedicated kernel WQ that is not able to support * the PASID cap, then the configuration will be rejected. */ - wq->wqcfg->priv = !!(wq->type == IDXD_WQT_KERNEL); if (wq_dedicated(wq) && wq->wqcfg->pasid_en && !idxd_device_pasid_priv_enabled(idxd) && wq->type == IDXD_WQT_KERNEL) { @@ -875,11 +873,22 @@ static int idxd_wq_config_write(struct idxd_wq *wq) wq->wqcfg->bof = 1; if (idxd->hw.wq_cap.wq_ats_support) - wq->wqcfg->wq_ats_disable = wq->ats_dis; + wq->wqcfg->wq_ats_disable = test_bit(WQ_FLAG_ATS_DISABLE, &wq->flags); /* bytes 12-15 */ wq->wqcfg->max_xfer_shift = ilog2(wq->max_xfer_bytes); - wq->wqcfg->max_batch_shift = ilog2(wq->max_batch_size); + idxd_wqcfg_set_max_batch_shift(idxd->data->type, wq->wqcfg, ilog2(wq->max_batch_size)); + + /* bytes 32-63 */ + if (idxd->hw.wq_cap.op_config && wq->opcap_bmap) { + memset(wq->wqcfg->op_config, 0, IDXD_MAX_OPCAP_BITS / 8); + for_each_set_bit(n, wq->opcap_bmap, IDXD_MAX_OPCAP_BITS) { + int pos = n % BITS_PER_LONG_LONG; + int idx = n / BITS_PER_LONG_LONG; + + wq->wqcfg->op_config[idx] |= BIT(pos); + } + } dev_dbg(dev, "WQ %d CFGs\n", wq->id); for (i = 0; i < WQCFG_STRIDES(idxd); i++) { @@ -924,13 +933,15 @@ static void idxd_group_flags_setup(struct idxd_device *idxd) group->tc_b = group->grpcfg.flags.tc_b = 1; else group->grpcfg.flags.tc_b = group->tc_b; - group->grpcfg.flags.use_token_limit = group->use_token_limit; - group->grpcfg.flags.tokens_reserved = group->tokens_reserved; - if (group->tokens_allowed) - group->grpcfg.flags.tokens_allowed = - group->tokens_allowed; + group->grpcfg.flags.use_rdbuf_limit = group->use_rdbuf_limit; + group->grpcfg.flags.rdbufs_reserved = group->rdbufs_reserved; + if (group->rdbufs_allowed) + group->grpcfg.flags.rdbufs_allowed = group->rdbufs_allowed; else - group->grpcfg.flags.tokens_allowed = idxd->max_tokens; + group->grpcfg.flags.rdbufs_allowed = idxd->max_rdbufs; + + group->grpcfg.flags.desc_progress_limit = group->desc_progress_limit; + group->grpcfg.flags.batch_progress_limit = group->batch_progress_limit; } } @@ -981,10 +992,8 @@ static int idxd_wqs_setup(struct idxd_device *idxd) if (!wq->group) continue; - if (!wq->size) - continue; - if (wq_shared(wq) && !device_swq_supported(idxd)) { + if (wq_shared(wq) && !wq_shared_supported(wq)) { idxd->cmd_status = IDXD_SCMD_WQ_NO_SWQ_SUPPORT; dev_warn(dev, "No shared wq support but configured.\n"); return -EINVAL; @@ -1049,6 +1058,9 @@ static int idxd_wq_load_config(struct idxd_wq *wq) wq->priority = wq->wqcfg->priority; + wq->max_xfer_bytes = 1ULL << wq->wqcfg->max_xfer_shift; + idxd_wq_set_max_batch_size(idxd->data->type, wq, 1U << wq->wqcfg->max_batch_shift); + for (i = 0; i < WQCFG_STRIDES(idxd); i++) { wqcfg_offset = WQCFG_OFFSET(idxd, wq->id, i); dev_dbg(dev, "WQ[%d][%d][%#x]: %#x\n", wq->id, i, wqcfg_offset, wq->wqcfg->bits[i]); @@ -1112,8 +1124,8 @@ static void idxd_group_load_config(struct idxd_group *group) } grpcfg_offset = GRPFLGCFG_OFFSET(idxd, group->id); - group->grpcfg.flags.bits = ioread32(idxd->reg_base + grpcfg_offset); - dev_dbg(dev, "GRPFLAGS flags[%d: %#x]: %#x\n", + group->grpcfg.flags.bits = ioread64(idxd->reg_base + grpcfg_offset); + dev_dbg(dev, "GRPFLAGS flags[%d: %#x]: %#llx\n", group->id, grpcfg_offset, group->grpcfg.flags.bits); } @@ -1123,7 +1135,7 @@ int idxd_device_load_config(struct idxd_device *idxd) int i, rc; reg.bits = ioread32(idxd->reg_base + IDXD_GENCFG_OFFSET); - idxd->token_limit = reg.token_limit; + idxd->rdbuf_limit = reg.rdbuf_limit; for (i = 0; i < idxd->max_groups; i++) { struct idxd_group *group = idxd->groups[i]; @@ -1142,7 +1154,112 @@ int idxd_device_load_config(struct idxd_device *idxd) return 0; } -int __drv_enable_wq(struct idxd_wq *wq) +static void idxd_flush_pending_descs(struct idxd_irq_entry *ie) +{ + struct idxd_desc *desc, *itr; + struct llist_node *head; + LIST_HEAD(flist); + enum idxd_complete_type ctype; + + spin_lock(&ie->list_lock); + head = llist_del_all(&ie->pending_llist); + if (head) { + llist_for_each_entry_safe(desc, itr, head, llnode) + list_add_tail(&desc->list, &ie->work_list); + } + + list_for_each_entry_safe(desc, itr, &ie->work_list, list) + list_move_tail(&desc->list, &flist); + spin_unlock(&ie->list_lock); + + list_for_each_entry_safe(desc, itr, &flist, list) { + list_del(&desc->list); + ctype = desc->completion->status ? IDXD_COMPLETE_NORMAL : IDXD_COMPLETE_ABORT; + idxd_dma_complete_txd(desc, ctype, true); + } +} + +static void idxd_device_set_perm_entry(struct idxd_device *idxd, + struct idxd_irq_entry *ie) +{ + union msix_perm mperm; + + if (ie->pasid == INVALID_IOASID) + return; + + mperm.bits = 0; + mperm.pasid = ie->pasid; + mperm.pasid_en = 1; + iowrite32(mperm.bits, idxd->reg_base + idxd->msix_perm_offset + ie->id * 8); +} + +static void idxd_device_clear_perm_entry(struct idxd_device *idxd, + struct idxd_irq_entry *ie) +{ + iowrite32(0, idxd->reg_base + idxd->msix_perm_offset + ie->id * 8); +} + +void idxd_wq_free_irq(struct idxd_wq *wq) +{ + struct idxd_device *idxd = wq->idxd; + struct idxd_irq_entry *ie = &wq->ie; + + if (wq->type != IDXD_WQT_KERNEL) + return; + + free_irq(ie->vector, ie); + idxd_flush_pending_descs(ie); + if (idxd->request_int_handles) + idxd_device_release_int_handle(idxd, ie->int_handle, IDXD_IRQ_MSIX); + idxd_device_clear_perm_entry(idxd, ie); + ie->vector = -1; + ie->int_handle = INVALID_INT_HANDLE; + ie->pasid = INVALID_IOASID; +} + +int idxd_wq_request_irq(struct idxd_wq *wq) +{ + struct idxd_device *idxd = wq->idxd; + struct pci_dev *pdev = idxd->pdev; + struct device *dev = &pdev->dev; + struct idxd_irq_entry *ie; + int rc; + + if (wq->type != IDXD_WQT_KERNEL) + return 0; + + ie = &wq->ie; + ie->vector = pci_irq_vector(pdev, ie->id); + ie->pasid = device_pasid_enabled(idxd) ? idxd->pasid : INVALID_IOASID; + idxd_device_set_perm_entry(idxd, ie); + + rc = request_threaded_irq(ie->vector, NULL, idxd_wq_thread, 0, "idxd-portal", ie); + if (rc < 0) { + dev_err(dev, "Failed to request irq %d.\n", ie->vector); + goto err_irq; + } + + if (idxd->request_int_handles) { + rc = idxd_device_request_int_handle(idxd, ie->id, &ie->int_handle, + IDXD_IRQ_MSIX); + if (rc < 0) + goto err_int_handle; + } else { + ie->int_handle = ie->id; + } + + return 0; + +err_int_handle: + ie->int_handle = INVALID_INT_HANDLE; + free_irq(ie->vector, ie); +err_irq: + idxd_device_clear_perm_entry(idxd, ie); + ie->pasid = INVALID_IOASID; + return rc; +} + +int drv_enable_wq(struct idxd_wq *wq) { struct idxd_device *idxd = wq->idxd; struct device *dev = &idxd->pdev->dev; @@ -1176,7 +1293,7 @@ int __drv_enable_wq(struct idxd_wq *wq) /* Shared WQ checks */ if (wq_shared(wq)) { - if (!device_swq_supported(idxd)) { + if (!wq_shared_supported(wq)) { idxd->cmd_status = IDXD_SCMD_WQ_NO_SVM; dev_dbg(dev, "PASID not enabled and shared wq.\n"); goto err; @@ -1196,6 +1313,29 @@ int __drv_enable_wq(struct idxd_wq *wq) } } + /* + * In the event that the WQ is configurable for pasid and priv bits. + * For kernel wq, the driver should setup the pasid, pasid_en, and priv bit. + * However, for non-kernel wq, the driver should only set the pasid_en bit for + * shared wq. A dedicated wq that is not 'kernel' type will configure pasid and + * pasid_en later on so there is no need to setup. + */ + if (test_bit(IDXD_FLAG_CONFIGURABLE, &idxd->flags)) { + int priv = 0; + + if (wq_pasid_enabled(wq)) { + if (is_idxd_wq_kernel(wq) || wq_shared(wq)) { + u32 pasid = wq_dedicated(wq) ? idxd->pasid : 0; + + __idxd_wq_set_pasid_locked(wq, pasid); + } + } + + if (is_idxd_wq_kernel(wq)) + priv = 1; + __idxd_wq_set_priv_locked(wq, priv); + } + rc = 0; spin_lock(&idxd->dev_lock); if (test_bit(IDXD_FLAG_CONFIGURABLE, &idxd->flags)) @@ -1220,8 +1360,36 @@ int __drv_enable_wq(struct idxd_wq *wq) } wq->client_count = 0; + + rc = idxd_wq_request_irq(wq); + if (rc < 0) { + idxd->cmd_status = IDXD_SCMD_WQ_IRQ_ERR; + dev_dbg(dev, "WQ %d irq setup failed: %d\n", wq->id, rc); + goto err_irq; + } + + rc = idxd_wq_alloc_resources(wq); + if (rc < 0) { + idxd->cmd_status = IDXD_SCMD_WQ_RES_ALLOC_ERR; + dev_dbg(dev, "WQ resource alloc failed\n"); + goto err_res_alloc; + } + + rc = idxd_wq_init_percpu_ref(wq); + if (rc < 0) { + idxd->cmd_status = IDXD_SCMD_PERCPU_ERR; + dev_dbg(dev, "percpu_ref setup failed\n"); + goto err_ref; + } + return 0; +err_ref: + idxd_wq_free_resources(wq); +err_res_alloc: + idxd_wq_free_irq(wq); +err_irq: + idxd_wq_unmap_portal(wq); err_map_portal: rc = idxd_wq_disable(wq, false); if (rc < 0) @@ -1230,17 +1398,7 @@ err: return rc; } -int drv_enable_wq(struct idxd_wq *wq) -{ - int rc; - - mutex_lock(&wq->wq_lock); - rc = __drv_enable_wq(wq); - mutex_unlock(&wq->wq_lock); - return rc; -} - -void __drv_disable_wq(struct idxd_wq *wq) +void drv_disable_wq(struct idxd_wq *wq) { struct idxd_device *idxd = wq->idxd; struct device *dev = &idxd->pdev->dev; @@ -1251,21 +1409,16 @@ void __drv_disable_wq(struct idxd_wq *wq) dev_warn(dev, "Clients has claim on wq %d: %d\n", wq->id, idxd_wq_refcount(wq)); + idxd_wq_free_resources(wq); idxd_wq_unmap_portal(wq); - idxd_wq_drain(wq); + idxd_wq_free_irq(wq); idxd_wq_reset(wq); - + percpu_ref_exit(&wq->wq_active); + wq->type = IDXD_WQT_NONE; wq->client_count = 0; } -void drv_disable_wq(struct idxd_wq *wq) -{ - mutex_lock(&wq->wq_lock); - __drv_disable_wq(wq); - mutex_unlock(&wq->wq_lock); -} - int idxd_device_drv_probe(struct idxd_dev *idxd_dev) { struct idxd_device *idxd = idxd_dev_to_idxd(idxd_dev); |