aboutsummaryrefslogtreecommitdiffstats
path: root/drivers/gpu/drm/amd/amdkfd/kfd_device_queue_manager.c
diff options
context:
space:
mode:
Diffstat (limited to 'drivers/gpu/drm/amd/amdkfd/kfd_device_queue_manager.c')
-rw-r--r--drivers/gpu/drm/amd/amdkfd/kfd_device_queue_manager.c82
1 files changed, 57 insertions, 25 deletions
diff --git a/drivers/gpu/drm/amd/amdkfd/kfd_device_queue_manager.c b/drivers/gpu/drm/amd/amdkfd/kfd_device_queue_manager.c
index 596ee5cd3b84..4bb7f4223762 100644
--- a/drivers/gpu/drm/amd/amdkfd/kfd_device_queue_manager.c
+++ b/drivers/gpu/drm/amd/amdkfd/kfd_device_queue_manager.c
@@ -45,7 +45,8 @@ static int create_compute_queue_nocpsch(struct device_queue_manager *dqm,
struct qcm_process_device *qpd);
static int execute_queues_cpsch(struct device_queue_manager *dqm, bool lock);
-static int destroy_queues_cpsch(struct device_queue_manager *dqm, bool lock);
+static int destroy_queues_cpsch(struct device_queue_manager *dqm,
+ bool preempt_static_queues, bool lock);
static int create_sdma_queue_nocpsch(struct device_queue_manager *dqm,
struct queue *q,
@@ -523,6 +524,17 @@ int init_pipelines(struct device_queue_manager *dqm,
return 0;
}
+static void init_interrupts(struct device_queue_manager *dqm)
+{
+ unsigned int i;
+
+ BUG_ON(dqm == NULL);
+
+ for (i = 0 ; i < get_pipes_num(dqm) ; i++)
+ dqm->dev->kfd2kgd->init_interrupts(dqm->dev->kgd,
+ i + get_first_pipe(dqm));
+}
+
static int init_scheduler(struct device_queue_manager *dqm)
{
int retval;
@@ -582,6 +594,7 @@ static void uninitialize_nocpsch(struct device_queue_manager *dqm)
static int start_nocpsch(struct device_queue_manager *dqm)
{
+ init_interrupts(dqm);
return 0;
}
@@ -615,19 +628,6 @@ static void deallocate_sdma_queue(struct device_queue_manager *dqm,
set_bit(sdma_queue_id, (unsigned long *)&dqm->sdma_bitmap);
}
-static void init_sdma_vm(struct device_queue_manager *dqm, struct queue *q,
- struct qcm_process_device *qpd)
-{
- uint32_t value = SDMA_ATC;
-
- if (q->process->is_32bit_user_mode)
- value |= SDMA_VA_PTR32 | get_sh_mem_bases_32(qpd_to_pdd(qpd));
- else
- value |= SDMA_VA_SHARED_BASE(get_sh_mem_bases_nybble_64(
- qpd_to_pdd(qpd)));
- q->properties.sdma_vm_addr = value;
-}
-
static int create_sdma_queue_nocpsch(struct device_queue_manager *dqm,
struct queue *q,
struct qcm_process_device *qpd)
@@ -650,7 +650,7 @@ static int create_sdma_queue_nocpsch(struct device_queue_manager *dqm,
pr_debug(" sdma queue id: %d\n", q->properties.sdma_queue_id);
pr_debug(" sdma engine id: %d\n", q->properties.sdma_engine_id);
- init_sdma_vm(dqm, q, qpd);
+ dqm->ops_asic_specific.init_sdma_vm(dqm, q, qpd);
retval = mqd->init_mqd(mqd, &q->mqd, &q->mqd_mem_obj,
&q->gart_mqd_addr, &q->properties);
if (retval != 0) {
@@ -751,6 +751,9 @@ static int start_cpsch(struct device_queue_manager *dqm)
dqm->fence_addr = dqm->fence_mem->cpu_ptr;
dqm->fence_gpu_addr = dqm->fence_mem->gpu_addr;
+
+ init_interrupts(dqm);
+
list_for_each_entry(node, &dqm->queues, list)
if (node->qpd->pqm->process && dqm->dev)
kfd_bind_process_to_device(dqm->dev,
@@ -773,7 +776,7 @@ static int stop_cpsch(struct device_queue_manager *dqm)
BUG_ON(!dqm);
- destroy_queues_cpsch(dqm, true);
+ destroy_queues_cpsch(dqm, true, true);
list_for_each_entry(node, &dqm->queues, list) {
pdd = qpd_to_pdd(node->qpd);
@@ -827,7 +830,8 @@ static void destroy_kernel_queue_cpsch(struct device_queue_manager *dqm,
pr_debug("kfd: In %s\n", __func__);
mutex_lock(&dqm->lock);
- destroy_queues_cpsch(dqm, false);
+ /* here we actually preempt the DIQ */
+ destroy_queues_cpsch(dqm, true, false);
list_del(&kq->list);
dqm->queue_count--;
qpd->is_debug = false;
@@ -883,8 +887,7 @@ static int create_queue_cpsch(struct device_queue_manager *dqm, struct queue *q,
return -ENOMEM;
}
- init_sdma_vm(dqm, q, qpd);
-
+ dqm->ops_asic_specific.init_sdma_vm(dqm, q, qpd);
retval = mqd->init_mqd(mqd, &q->mqd, &q->mqd_mem_obj,
&q->gart_mqd_addr, &q->properties);
if (retval != 0)
@@ -912,7 +915,7 @@ out:
return retval;
}
-static int amdkfd_fence_wait_timeout(unsigned int *fence_addr,
+int amdkfd_fence_wait_timeout(unsigned int *fence_addr,
unsigned int fence_value,
unsigned long timeout)
{
@@ -934,13 +937,16 @@ static int destroy_sdma_queues(struct device_queue_manager *dqm,
unsigned int sdma_engine)
{
return pm_send_unmap_queue(&dqm->packets, KFD_QUEUE_TYPE_SDMA,
- KFD_PREEMPT_TYPE_FILTER_ALL_QUEUES, 0, false,
+ KFD_PREEMPT_TYPE_FILTER_DYNAMIC_QUEUES, 0, false,
sdma_engine);
}
-static int destroy_queues_cpsch(struct device_queue_manager *dqm, bool lock)
+static int destroy_queues_cpsch(struct device_queue_manager *dqm,
+ bool preempt_static_queues, bool lock)
{
int retval;
+ enum kfd_preempt_type_filter preempt_type;
+ struct kfd_process_device *pdd;
BUG_ON(!dqm);
@@ -959,8 +965,12 @@ static int destroy_queues_cpsch(struct device_queue_manager *dqm, bool lock)
destroy_sdma_queues(dqm, 1);
}
+ preempt_type = preempt_static_queues ?
+ KFD_PREEMPT_TYPE_FILTER_ALL_QUEUES :
+ KFD_PREEMPT_TYPE_FILTER_DYNAMIC_QUEUES;
+
retval = pm_send_unmap_queue(&dqm->packets, KFD_QUEUE_TYPE_COMPUTE,
- KFD_PREEMPT_TYPE_FILTER_ALL_QUEUES, 0, false, 0);
+ preempt_type, 0, false, 0);
if (retval != 0)
goto out;
@@ -968,8 +978,14 @@ static int destroy_queues_cpsch(struct device_queue_manager *dqm, bool lock)
pm_send_query_status(&dqm->packets, dqm->fence_gpu_addr,
KFD_FENCE_COMPLETED);
/* should be timed out */
- amdkfd_fence_wait_timeout(dqm->fence_addr, KFD_FENCE_COMPLETED,
+ retval = amdkfd_fence_wait_timeout(dqm->fence_addr, KFD_FENCE_COMPLETED,
QUEUE_PREEMPT_DEFAULT_TIMEOUT_MS);
+ if (retval != 0) {
+ pdd = kfd_get_process_device_data(dqm->dev,
+ kfd_get_process(current));
+ pdd->reset_wavefronts = true;
+ goto out;
+ }
pm_release_ib(&dqm->packets);
dqm->active_runlist = false;
@@ -988,7 +1004,7 @@ static int execute_queues_cpsch(struct device_queue_manager *dqm, bool lock)
if (lock)
mutex_lock(&dqm->lock);
- retval = destroy_queues_cpsch(dqm, false);
+ retval = destroy_queues_cpsch(dqm, false, false);
if (retval != 0) {
pr_err("kfd: the cp might be in an unrecoverable state due to an unsuccessful queues preemption");
goto out;
@@ -1023,13 +1039,27 @@ static int destroy_queue_cpsch(struct device_queue_manager *dqm,
{
int retval;
struct mqd_manager *mqd;
+ bool preempt_all_queues;
BUG_ON(!dqm || !qpd || !q);
+ preempt_all_queues = false;
+
retval = 0;
/* remove queue from list to prevent rescheduling after preemption */
mutex_lock(&dqm->lock);
+
+ if (qpd->is_debug) {
+ /*
+ * error, currently we do not allow to destroy a queue
+ * of a currently debugged process
+ */
+ retval = -EBUSY;
+ goto failed_try_destroy_debugged_queue;
+
+ }
+
mqd = dqm->ops.get_mqd_manager(dqm,
get_mqd_type_from_queue_type(q->properties.type));
if (!mqd) {
@@ -1061,6 +1091,8 @@ static int destroy_queue_cpsch(struct device_queue_manager *dqm,
return 0;
failed:
+failed_try_destroy_debugged_queue:
+
mutex_unlock(&dqm->lock);
return retval;
}