aboutsummaryrefslogtreecommitdiffstats
path: root/drivers/gpu/drm/amd/amdkfd/kfd_process_queue_manager.c
diff options
context:
space:
mode:
Diffstat (limited to 'drivers/gpu/drm/amd/amdkfd/kfd_process_queue_manager.c')
-rw-r--r--drivers/gpu/drm/amd/amdkfd/kfd_process_queue_manager.c446
1 files changed, 433 insertions, 13 deletions
diff --git a/drivers/gpu/drm/amd/amdkfd/kfd_process_queue_manager.c b/drivers/gpu/drm/amd/amdkfd/kfd_process_queue_manager.c
index 3627e7ac161b..5137476ec18e 100644
--- a/drivers/gpu/drm/amd/amdkfd/kfd_process_queue_manager.c
+++ b/drivers/gpu/drm/amd/amdkfd/kfd_process_queue_manager.c
@@ -1,5 +1,6 @@
+// SPDX-License-Identifier: GPL-2.0 OR MIT
/*
- * Copyright 2014 Advanced Micro Devices, Inc.
+ * Copyright 2014-2022 Advanced Micro Devices, Inc.
*
* Permission is hereby granted, free of charge, to any person obtaining a
* copy of this software and associated documentation files (the "Software"),
@@ -42,6 +43,20 @@ static inline struct process_queue_node *get_queue_by_qid(
return NULL;
}
+static int assign_queue_slot_by_qid(struct process_queue_manager *pqm,
+ unsigned int qid)
+{
+ if (qid >= KFD_MAX_NUM_OF_QUEUES_PER_PROCESS)
+ return -EINVAL;
+
+ if (__test_and_set_bit(qid, pqm->queue_slot_bitmap)) {
+ pr_err("Cannot create new queue because requested qid(%u) is in use\n", qid);
+ return -ENOSPC;
+ }
+
+ return 0;
+}
+
static int find_available_queue_slot(struct process_queue_manager *pqm,
unsigned int *qid)
{
@@ -118,7 +133,7 @@ int pqm_set_gws(struct process_queue_manager *pqm, unsigned int qid,
return ret;
pqn->q->gws = mem;
- pdd->qpd.num_gws = gws ? amdgpu_amdkfd_get_num_gws(dev->kgd) : 0;
+ pdd->qpd.num_gws = gws ? dev->adev->gds.gws_size : 0;
return pqn->q->device->dqm->ops.update_queue(pqn->q->device->dqm,
pqn->q, NULL);
@@ -135,9 +150,8 @@ void kfd_process_dequeue_from_all_devices(struct kfd_process *p)
int pqm_init(struct process_queue_manager *pqm, struct kfd_process *p)
{
INIT_LIST_HEAD(&pqm->queues);
- pqm->queue_slot_bitmap =
- kzalloc(DIV_ROUND_UP(KFD_MAX_NUM_OF_QUEUES_PER_PROCESS,
- BITS_PER_BYTE), GFP_KERNEL);
+ pqm->queue_slot_bitmap = bitmap_zalloc(KFD_MAX_NUM_OF_QUEUES_PER_PROCESS,
+ GFP_KERNEL);
if (!pqm->queue_slot_bitmap)
return -ENOMEM;
pqm->process = p;
@@ -159,14 +173,15 @@ void pqm_uninit(struct process_queue_manager *pqm)
kfree(pqn);
}
- kfree(pqm->queue_slot_bitmap);
+ bitmap_free(pqm->queue_slot_bitmap);
pqm->queue_slot_bitmap = NULL;
}
static int init_user_queue(struct process_queue_manager *pqm,
struct kfd_dev *dev, struct queue **q,
struct queue_properties *q_properties,
- struct file *f, unsigned int qid)
+ struct file *f, struct amdgpu_bo *wptr_bo,
+ unsigned int qid)
{
int retval;
@@ -184,8 +199,27 @@ static int init_user_queue(struct process_queue_manager *pqm,
(*q)->device = dev;
(*q)->process = pqm->process;
+ if (dev->shared_resources.enable_mes) {
+ retval = amdgpu_amdkfd_alloc_gtt_mem(dev->adev,
+ AMDGPU_MES_GANG_CTX_SIZE,
+ &(*q)->gang_ctx_bo,
+ &(*q)->gang_ctx_gpu_addr,
+ &(*q)->gang_ctx_cpu_ptr,
+ false);
+ if (retval) {
+ pr_err("failed to allocate gang context bo\n");
+ goto cleanup;
+ }
+ memset((*q)->gang_ctx_cpu_ptr, 0, AMDGPU_MES_GANG_CTX_SIZE);
+ (*q)->wptr_bo = wptr_bo;
+ }
+
pr_debug("PQM After init queue");
+ return 0;
+cleanup:
+ if (dev->shared_resources.enable_mes)
+ uninit_queue(*q);
return retval;
}
@@ -194,6 +228,10 @@ int pqm_create_queue(struct process_queue_manager *pqm,
struct file *f,
struct queue_properties *properties,
unsigned int *qid,
+ struct amdgpu_bo *wptr_bo,
+ const struct kfd_criu_queue_priv_data *q_data,
+ const void *restore_mqd,
+ const void *restore_ctl_stack,
uint32_t *p_doorbell_offset_in_process)
{
int retval;
@@ -220,12 +258,17 @@ int pqm_create_queue(struct process_queue_manager *pqm,
* Hence we also check the type as well
*/
if ((pdd->qpd.is_debug) || (type == KFD_QUEUE_TYPE_DIQ))
- max_queues = dev->device_info->max_no_of_hqd/2;
+ max_queues = dev->device_info.max_no_of_hqd/2;
if (pdd->qpd.queue_count >= max_queues)
return -ENOSPC;
- retval = find_available_queue_slot(pqm, qid);
+ if (q_data) {
+ retval = assign_queue_slot_by_qid(pqm, q_data->q_id);
+ *qid = q_data->q_id;
+ } else
+ retval = find_available_queue_slot(pqm, qid);
+
if (retval != 0)
return retval;
@@ -248,12 +291,13 @@ int pqm_create_queue(struct process_queue_manager *pqm,
* allocate_sdma_queue() in create_queue() has the
* corresponding check logic.
*/
- retval = init_user_queue(pqm, dev, &q, properties, f, *qid);
+ retval = init_user_queue(pqm, dev, &q, properties, f, wptr_bo, *qid);
if (retval != 0)
goto err_create_queue;
pqn->q = q;
pqn->kq = NULL;
- retval = dev->dqm->ops.create_queue(dev->dqm, q, &pdd->qpd);
+ retval = dev->dqm->ops.create_queue(dev->dqm, q, &pdd->qpd, q_data,
+ restore_mqd, restore_ctl_stack);
print_queue(q);
break;
@@ -268,12 +312,13 @@ int pqm_create_queue(struct process_queue_manager *pqm,
goto err_create_queue;
}
- retval = init_user_queue(pqm, dev, &q, properties, f, *qid);
+ retval = init_user_queue(pqm, dev, &q, properties, f, wptr_bo, *qid);
if (retval != 0)
goto err_create_queue;
pqn->q = q;
pqn->kq = NULL;
- retval = dev->dqm->ops.create_queue(dev->dqm, q, &pdd->qpd);
+ retval = dev->dqm->ops.create_queue(dev->dqm, q, &pdd->qpd, q_data,
+ restore_mqd, restore_ctl_stack);
print_queue(q);
break;
case KFD_QUEUE_TYPE_DIQ:
@@ -394,6 +439,13 @@ int pqm_destroy_queue(struct process_queue_manager *pqm, unsigned int qid)
pdd->qpd.num_gws = 0;
}
+ if (dev->shared_resources.enable_mes) {
+ amdgpu_amdkfd_free_gtt_mem(dev->adev,
+ pqn->q->gang_ctx_bo);
+ if (pqn->q->wptr_bo)
+ amdgpu_amdkfd_free_gtt_mem(dev->adev, pqn->q->wptr_bo);
+
+ }
uninit_queue(pqn->q);
}
@@ -446,6 +498,21 @@ int pqm_update_mqd(struct process_queue_manager *pqm,
return -EFAULT;
}
+ /* ASICs that have WGPs must enforce pairwise enabled mask checks. */
+ if (minfo && minfo->update_flag == UPDATE_FLAG_CU_MASK && minfo->cu_mask.ptr &&
+ KFD_GC_VERSION(pqn->q->device) >= IP_VERSION(10, 0, 0)) {
+ int i;
+
+ for (i = 0; i < minfo->cu_mask.count; i += 2) {
+ uint32_t cu_pair = (minfo->cu_mask.ptr[i / 32] >> (i % 32)) & 0x3;
+
+ if (cu_pair && cu_pair != 0x3) {
+ pr_debug("CUs must be adjacent pairwise enabled.\n");
+ return -EINVAL;
+ }
+ }
+ }
+
retval = pqn->q->device->dqm->ops.update_queue(pqn->q->device->dqm,
pqn->q, minfo);
if (retval != 0)
@@ -498,6 +565,359 @@ int pqm_get_wave_state(struct process_queue_manager *pqm,
save_area_used_size);
}
+static int get_queue_data_sizes(struct kfd_process_device *pdd,
+ struct queue *q,
+ uint32_t *mqd_size,
+ uint32_t *ctl_stack_size)
+{
+ int ret;
+
+ ret = pqm_get_queue_checkpoint_info(&pdd->process->pqm,
+ q->properties.queue_id,
+ mqd_size,
+ ctl_stack_size);
+ if (ret)
+ pr_err("Failed to get queue dump info (%d)\n", ret);
+
+ return ret;
+}
+
+int kfd_process_get_queue_info(struct kfd_process *p,
+ uint32_t *num_queues,
+ uint64_t *priv_data_sizes)
+{
+ uint32_t extra_data_sizes = 0;
+ struct queue *q;
+ int i;
+ int ret;
+
+ *num_queues = 0;
+
+ /* Run over all PDDs of the process */
+ for (i = 0; i < p->n_pdds; i++) {
+ struct kfd_process_device *pdd = p->pdds[i];
+
+ list_for_each_entry(q, &pdd->qpd.queues_list, list) {
+ if (q->properties.type == KFD_QUEUE_TYPE_COMPUTE ||
+ q->properties.type == KFD_QUEUE_TYPE_SDMA ||
+ q->properties.type == KFD_QUEUE_TYPE_SDMA_XGMI) {
+ uint32_t mqd_size, ctl_stack_size;
+
+ *num_queues = *num_queues + 1;
+
+ ret = get_queue_data_sizes(pdd, q, &mqd_size, &ctl_stack_size);
+ if (ret)
+ return ret;
+
+ extra_data_sizes += mqd_size + ctl_stack_size;
+ } else {
+ pr_err("Unsupported queue type (%d)\n", q->properties.type);
+ return -EOPNOTSUPP;
+ }
+ }
+ }
+ *priv_data_sizes = extra_data_sizes +
+ (*num_queues * sizeof(struct kfd_criu_queue_priv_data));
+
+ return 0;
+}
+
+static int pqm_checkpoint_mqd(struct process_queue_manager *pqm,
+ unsigned int qid,
+ void *mqd,
+ void *ctl_stack)
+{
+ struct process_queue_node *pqn;
+
+ pqn = get_queue_by_qid(pqm, qid);
+ if (!pqn) {
+ pr_debug("amdkfd: No queue %d exists for operation\n", qid);
+ return -EFAULT;
+ }
+
+ if (!pqn->q->device->dqm->ops.checkpoint_mqd) {
+ pr_err("amdkfd: queue dumping not supported on this device\n");
+ return -EOPNOTSUPP;
+ }
+
+ return pqn->q->device->dqm->ops.checkpoint_mqd(pqn->q->device->dqm,
+ pqn->q, mqd, ctl_stack);
+}
+
+static int criu_checkpoint_queue(struct kfd_process_device *pdd,
+ struct queue *q,
+ struct kfd_criu_queue_priv_data *q_data)
+{
+ uint8_t *mqd, *ctl_stack;
+ int ret;
+
+ mqd = (void *)(q_data + 1);
+ ctl_stack = mqd + q_data->mqd_size;
+
+ q_data->gpu_id = pdd->user_gpu_id;
+ q_data->type = q->properties.type;
+ q_data->format = q->properties.format;
+ q_data->q_id = q->properties.queue_id;
+ q_data->q_address = q->properties.queue_address;
+ q_data->q_size = q->properties.queue_size;
+ q_data->priority = q->properties.priority;
+ q_data->q_percent = q->properties.queue_percent;
+ q_data->read_ptr_addr = (uint64_t)q->properties.read_ptr;
+ q_data->write_ptr_addr = (uint64_t)q->properties.write_ptr;
+ q_data->doorbell_id = q->doorbell_id;
+
+ q_data->sdma_id = q->sdma_id;
+
+ q_data->eop_ring_buffer_address =
+ q->properties.eop_ring_buffer_address;
+
+ q_data->eop_ring_buffer_size = q->properties.eop_ring_buffer_size;
+
+ q_data->ctx_save_restore_area_address =
+ q->properties.ctx_save_restore_area_address;
+
+ q_data->ctx_save_restore_area_size =
+ q->properties.ctx_save_restore_area_size;
+
+ q_data->gws = !!q->gws;
+
+ ret = pqm_checkpoint_mqd(&pdd->process->pqm, q->properties.queue_id, mqd, ctl_stack);
+ if (ret) {
+ pr_err("Failed checkpoint queue_mqd (%d)\n", ret);
+ return ret;
+ }
+
+ pr_debug("Dumping Queue: gpu_id:%x queue_id:%u\n", q_data->gpu_id, q_data->q_id);
+ return ret;
+}
+
+static int criu_checkpoint_queues_device(struct kfd_process_device *pdd,
+ uint8_t __user *user_priv,
+ unsigned int *q_index,
+ uint64_t *queues_priv_data_offset)
+{
+ unsigned int q_private_data_size = 0;
+ uint8_t *q_private_data = NULL; /* Local buffer to store individual queue private data */
+ struct queue *q;
+ int ret = 0;
+
+ list_for_each_entry(q, &pdd->qpd.queues_list, list) {
+ struct kfd_criu_queue_priv_data *q_data;
+ uint64_t q_data_size;
+ uint32_t mqd_size;
+ uint32_t ctl_stack_size;
+
+ if (q->properties.type != KFD_QUEUE_TYPE_COMPUTE &&
+ q->properties.type != KFD_QUEUE_TYPE_SDMA &&
+ q->properties.type != KFD_QUEUE_TYPE_SDMA_XGMI) {
+
+ pr_err("Unsupported queue type (%d)\n", q->properties.type);
+ ret = -EOPNOTSUPP;
+ break;
+ }
+
+ ret = get_queue_data_sizes(pdd, q, &mqd_size, &ctl_stack_size);
+ if (ret)
+ break;
+
+ q_data_size = sizeof(*q_data) + mqd_size + ctl_stack_size;
+
+ /* Increase local buffer space if needed */
+ if (q_private_data_size < q_data_size) {
+ kfree(q_private_data);
+
+ q_private_data = kzalloc(q_data_size, GFP_KERNEL);
+ if (!q_private_data) {
+ ret = -ENOMEM;
+ break;
+ }
+ q_private_data_size = q_data_size;
+ }
+
+ q_data = (struct kfd_criu_queue_priv_data *)q_private_data;
+
+ /* data stored in this order: priv_data, mqd, ctl_stack */
+ q_data->mqd_size = mqd_size;
+ q_data->ctl_stack_size = ctl_stack_size;
+
+ ret = criu_checkpoint_queue(pdd, q, q_data);
+ if (ret)
+ break;
+
+ q_data->object_type = KFD_CRIU_OBJECT_TYPE_QUEUE;
+
+ ret = copy_to_user(user_priv + *queues_priv_data_offset,
+ q_data, q_data_size);
+ if (ret) {
+ ret = -EFAULT;
+ break;
+ }
+ *queues_priv_data_offset += q_data_size;
+ *q_index = *q_index + 1;
+ }
+
+ kfree(q_private_data);
+
+ return ret;
+}
+
+int kfd_criu_checkpoint_queues(struct kfd_process *p,
+ uint8_t __user *user_priv_data,
+ uint64_t *priv_data_offset)
+{
+ int ret = 0, pdd_index, q_index = 0;
+
+ for (pdd_index = 0; pdd_index < p->n_pdds; pdd_index++) {
+ struct kfd_process_device *pdd = p->pdds[pdd_index];
+
+ /*
+ * criu_checkpoint_queues_device will copy data to user and update q_index and
+ * queues_priv_data_offset
+ */
+ ret = criu_checkpoint_queues_device(pdd, user_priv_data, &q_index,
+ priv_data_offset);
+
+ if (ret)
+ break;
+ }
+
+ return ret;
+}
+
+static void set_queue_properties_from_criu(struct queue_properties *qp,
+ struct kfd_criu_queue_priv_data *q_data)
+{
+ qp->is_interop = false;
+ qp->queue_percent = q_data->q_percent;
+ qp->priority = q_data->priority;
+ qp->queue_address = q_data->q_address;
+ qp->queue_size = q_data->q_size;
+ qp->read_ptr = (uint32_t *) q_data->read_ptr_addr;
+ qp->write_ptr = (uint32_t *) q_data->write_ptr_addr;
+ qp->eop_ring_buffer_address = q_data->eop_ring_buffer_address;
+ qp->eop_ring_buffer_size = q_data->eop_ring_buffer_size;
+ qp->ctx_save_restore_area_address = q_data->ctx_save_restore_area_address;
+ qp->ctx_save_restore_area_size = q_data->ctx_save_restore_area_size;
+ qp->ctl_stack_size = q_data->ctl_stack_size;
+ qp->type = q_data->type;
+ qp->format = q_data->format;
+}
+
+int kfd_criu_restore_queue(struct kfd_process *p,
+ uint8_t __user *user_priv_ptr,
+ uint64_t *priv_data_offset,
+ uint64_t max_priv_data_size)
+{
+ uint8_t *mqd, *ctl_stack, *q_extra_data = NULL;
+ struct kfd_criu_queue_priv_data *q_data;
+ struct kfd_process_device *pdd;
+ uint64_t q_extra_data_size;
+ struct queue_properties qp;
+ unsigned int queue_id;
+ int ret = 0;
+
+ if (*priv_data_offset + sizeof(*q_data) > max_priv_data_size)
+ return -EINVAL;
+
+ q_data = kmalloc(sizeof(*q_data), GFP_KERNEL);
+ if (!q_data)
+ return -ENOMEM;
+
+ ret = copy_from_user(q_data, user_priv_ptr + *priv_data_offset, sizeof(*q_data));
+ if (ret) {
+ ret = -EFAULT;
+ goto exit;
+ }
+
+ *priv_data_offset += sizeof(*q_data);
+ q_extra_data_size = (uint64_t)q_data->ctl_stack_size + q_data->mqd_size;
+
+ if (*priv_data_offset + q_extra_data_size > max_priv_data_size) {
+ ret = -EINVAL;
+ goto exit;
+ }
+
+ q_extra_data = kmalloc(q_extra_data_size, GFP_KERNEL);
+ if (!q_extra_data) {
+ ret = -ENOMEM;
+ goto exit;
+ }
+
+ ret = copy_from_user(q_extra_data, user_priv_ptr + *priv_data_offset, q_extra_data_size);
+ if (ret) {
+ ret = -EFAULT;
+ goto exit;
+ }
+
+ *priv_data_offset += q_extra_data_size;
+
+ pdd = kfd_process_device_data_by_id(p, q_data->gpu_id);
+ if (!pdd) {
+ pr_err("Failed to get pdd\n");
+ ret = -EINVAL;
+ goto exit;
+ }
+
+ if (!pdd->doorbell_index &&
+ kfd_alloc_process_doorbells(pdd->dev, &pdd->doorbell_index) < 0) {
+ ret = -ENOMEM;
+ goto exit;
+ }
+
+ /* data stored in this order: mqd, ctl_stack */
+ mqd = q_extra_data;
+ ctl_stack = mqd + q_data->mqd_size;
+
+ memset(&qp, 0, sizeof(qp));
+ set_queue_properties_from_criu(&qp, q_data);
+
+ print_queue_properties(&qp);
+
+ ret = pqm_create_queue(&p->pqm, pdd->dev, NULL, &qp, &queue_id, NULL, q_data, mqd, ctl_stack,
+ NULL);
+ if (ret) {
+ pr_err("Failed to create new queue err:%d\n", ret);
+ goto exit;
+ }
+
+ if (q_data->gws)
+ ret = pqm_set_gws(&p->pqm, q_data->q_id, pdd->dev->gws);
+
+exit:
+ if (ret)
+ pr_err("Failed to restore queue (%d)\n", ret);
+ else
+ pr_debug("Queue id %d was restored successfully\n", queue_id);
+
+ kfree(q_data);
+
+ return ret;
+}
+
+int pqm_get_queue_checkpoint_info(struct process_queue_manager *pqm,
+ unsigned int qid,
+ uint32_t *mqd_size,
+ uint32_t *ctl_stack_size)
+{
+ struct process_queue_node *pqn;
+
+ pqn = get_queue_by_qid(pqm, qid);
+ if (!pqn) {
+ pr_debug("amdkfd: No queue %d exists for operation\n", qid);
+ return -EFAULT;
+ }
+
+ if (!pqn->q->device->dqm->ops.get_queue_checkpoint_info) {
+ pr_err("amdkfd: queue dumping not supported on this device\n");
+ return -EOPNOTSUPP;
+ }
+
+ pqn->q->device->dqm->ops.get_queue_checkpoint_info(pqn->q->device->dqm,
+ pqn->q, mqd_size,
+ ctl_stack_size);
+ return 0;
+}
+
#if defined(CONFIG_DEBUG_FS)
int pqm_debugfs_mqds(struct seq_file *m, void *data)