aboutsummaryrefslogtreecommitdiffstats
path: root/meta-amd-bsp/recipes-kernel/linux/linux-yocto-4.19.8/2798-drm-amdkfd-Refactor-create_queue_nocpsch.patch
diff options
context:
space:
mode:
Diffstat (limited to 'meta-amd-bsp/recipes-kernel/linux/linux-yocto-4.19.8/2798-drm-amdkfd-Refactor-create_queue_nocpsch.patch')
-rw-r--r--meta-amd-bsp/recipes-kernel/linux/linux-yocto-4.19.8/2798-drm-amdkfd-Refactor-create_queue_nocpsch.patch254
1 files changed, 254 insertions, 0 deletions
diff --git a/meta-amd-bsp/recipes-kernel/linux/linux-yocto-4.19.8/2798-drm-amdkfd-Refactor-create_queue_nocpsch.patch b/meta-amd-bsp/recipes-kernel/linux/linux-yocto-4.19.8/2798-drm-amdkfd-Refactor-create_queue_nocpsch.patch
new file mode 100644
index 00000000..92acdba9
--- /dev/null
+++ b/meta-amd-bsp/recipes-kernel/linux/linux-yocto-4.19.8/2798-drm-amdkfd-Refactor-create_queue_nocpsch.patch
@@ -0,0 +1,254 @@
+From e9d0739af4cda00b936caa2ffc6e7ee37b03b157 Mon Sep 17 00:00:00 2001
+From: Oak Zeng <Oak.Zeng@amd.com>
+Date: Fri, 31 May 2019 16:05:59 -0500
+Subject: [PATCH 2798/2940] drm/amdkfd: Refactor create_queue_nocpsch
+
+This is prepare work to fix a circular lock dependency.
+No logic change
+
+Change-Id: I219633bf740255b0caaf7d41bcc889544ff8a885
+Signed-off-by: Oak Zeng <Oak.Zeng@amd.com>
+Reviewed-by: Felix Kuehling <Felix.Kuehling@amd.com>
+---
+ .../drm/amd/amdkfd/kfd_device_queue_manager.c | 171 ++++++------------
+ 1 file changed, 57 insertions(+), 114 deletions(-)
+
+diff --git a/drivers/gpu/drm/amd/amdkfd/kfd_device_queue_manager.c b/drivers/gpu/drm/amd/amdkfd/kfd_device_queue_manager.c
+index 188ef938d5d9..700b34d6caee 100644
+--- a/drivers/gpu/drm/amd/amdkfd/kfd_device_queue_manager.c
++++ b/drivers/gpu/drm/amd/amdkfd/kfd_device_queue_manager.c
+@@ -43,10 +43,6 @@
+ static int set_pasid_vmid_mapping(struct device_queue_manager *dqm,
+ unsigned int pasid, unsigned int vmid);
+
+-static int create_compute_queue_nocpsch(struct device_queue_manager *dqm,
+- struct queue *q,
+- struct qcm_process_device *qpd);
+-
+ static int execute_queues_cpsch(struct device_queue_manager *dqm,
+ enum kfd_unmap_queues_filter filter,
+ uint32_t filter_param);
+@@ -56,13 +52,14 @@ static int unmap_queues_cpsch(struct device_queue_manager *dqm,
+
+ static int map_queues_cpsch(struct device_queue_manager *dqm);
+
+-static int create_sdma_queue_nocpsch(struct device_queue_manager *dqm,
+- struct queue *q,
+- struct qcm_process_device *qpd);
+-
+ static void deallocate_sdma_queue(struct device_queue_manager *dqm,
+ struct queue *q);
+
++static inline void deallocate_hqd(struct device_queue_manager *dqm,
++ struct queue *q);
++static int allocate_hqd(struct device_queue_manager *dqm, struct queue *q);
++static int allocate_sdma_queue(struct device_queue_manager *dqm,
++ struct queue *q);
+ static void kfd_process_hw_exception(struct work_struct *work);
+
+ static inline
+@@ -249,6 +246,9 @@ static int allocate_vmid(struct device_queue_manager *dqm,
+ /* invalidate the VM context after pasid and vmid mapping is set up */
+ kfd_flush_tlb(qpd_to_pdd(qpd));
+
++ dqm->dev->kfd2kgd->set_scratch_backing_va(
++ dqm->dev->kgd, qpd->sh_hidden_private_base, qpd->vmid);
++
+ return 0;
+ }
+
+@@ -295,6 +295,7 @@ static int create_queue_nocpsch(struct device_queue_manager *dqm,
+ struct queue *q,
+ struct qcm_process_device *qpd)
+ {
++ struct mqd_manager *mqd_mgr;
+ int retval;
+
+ print_queue(q);
+@@ -324,18 +325,41 @@ static int create_queue_nocpsch(struct device_queue_manager *dqm,
+ q->properties.tba_addr = qpd->tba_addr;
+ q->properties.tma_addr = qpd->tma_addr;
+
+- if (q->properties.type == KFD_QUEUE_TYPE_COMPUTE)
+- retval = create_compute_queue_nocpsch(dqm, q, qpd);
+- else if (q->properties.type == KFD_QUEUE_TYPE_SDMA ||
+- q->properties.type == KFD_QUEUE_TYPE_SDMA_XGMI)
+- retval = create_sdma_queue_nocpsch(dqm, q, qpd);
+- else
+- retval = -EINVAL;
++ mqd_mgr = dqm->mqd_mgrs[get_mqd_type_from_queue_type(
++ q->properties.type)];
++ if (q->properties.type == KFD_QUEUE_TYPE_COMPUTE) {
++ retval = allocate_hqd(dqm, q);
++ if (retval)
++ goto deallocate_vmid;
++ pr_debug("Loading mqd to hqd on pipe %d, queue %d\n",
++ q->pipe, q->queue);
++ } else if (q->properties.type == KFD_QUEUE_TYPE_SDMA ||
++ q->properties.type == KFD_QUEUE_TYPE_SDMA_XGMI) {
++ retval = allocate_sdma_queue(dqm, q);
++ if (retval)
++ goto deallocate_vmid;
++ dqm->asic_ops.init_sdma_vm(dqm, q, qpd);
++ }
+
+- if (retval) {
+- if (list_empty(&qpd->queues_list))
+- deallocate_vmid(dqm, qpd, q);
+- goto out_unlock;
++ retval = allocate_doorbell(qpd, q);
++ if (retval)
++ goto out_deallocate_hqd;
++
++ retval = mqd_mgr->init_mqd(mqd_mgr, &q->mqd, &q->mqd_mem_obj,
++ &q->gart_mqd_addr, &q->properties);
++ if (retval)
++ goto out_deallocate_doorbell;
++
++ if (q->properties.is_active) {
++
++ if (WARN(q->process->mm != current->mm,
++ "should only run in user thread"))
++ retval = -EFAULT;
++ else
++ retval = mqd_mgr->load_mqd(mqd_mgr, q->mqd, q->pipe,
++ q->queue, &q->properties, current->mm);
++ if (retval)
++ goto out_uninit_mqd;
+ }
+
+ list_add(&q->list, &qpd->queues_list);
+@@ -355,7 +379,21 @@ static int create_queue_nocpsch(struct device_queue_manager *dqm,
+ dqm->total_queue_count++;
+ pr_debug("Total of %d queues are accountable so far\n",
+ dqm->total_queue_count);
++ goto out_unlock;
+
++out_uninit_mqd:
++ mqd_mgr->uninit_mqd(mqd_mgr, q->mqd, q->mqd_mem_obj);
++out_deallocate_doorbell:
++ deallocate_doorbell(qpd, q);
++out_deallocate_hqd:
++ if (q->properties.type == KFD_QUEUE_TYPE_COMPUTE)
++ deallocate_hqd(dqm, q);
++ else if (q->properties.type == KFD_QUEUE_TYPE_SDMA ||
++ q->properties.type == KFD_QUEUE_TYPE_SDMA_XGMI)
++ deallocate_sdma_queue(dqm, q);
++deallocate_vmid:
++ if (list_empty(&qpd->queues_list))
++ deallocate_vmid(dqm, qpd, q);
+ out_unlock:
+ dqm_unlock(dqm);
+ return retval;
+@@ -401,58 +439,6 @@ static inline void deallocate_hqd(struct device_queue_manager *dqm,
+ dqm->allocated_queues[q->pipe] |= (1 << q->queue);
+ }
+
+-static int create_compute_queue_nocpsch(struct device_queue_manager *dqm,
+- struct queue *q,
+- struct qcm_process_device *qpd)
+-{
+- struct mqd_manager *mqd_mgr;
+- int retval;
+-
+- mqd_mgr = dqm->mqd_mgrs[KFD_MQD_TYPE_COMPUTE];
+-
+- retval = allocate_hqd(dqm, q);
+- if (retval)
+- return retval;
+-
+- retval = allocate_doorbell(qpd, q);
+- if (retval)
+- goto out_deallocate_hqd;
+-
+- retval = mqd_mgr->init_mqd(mqd_mgr, &q->mqd, &q->mqd_mem_obj,
+- &q->gart_mqd_addr, &q->properties);
+- if (retval)
+- goto out_deallocate_doorbell;
+-
+- pr_debug("Loading mqd to hqd on pipe %d, queue %d\n",
+- q->pipe, q->queue);
+-
+- dqm->dev->kfd2kgd->set_scratch_backing_va(
+- dqm->dev->kgd, qpd->sh_hidden_private_base, qpd->vmid);
+-
+- if (!q->properties.is_active)
+- return 0;
+-
+- if (WARN(q->process->mm != current->mm,
+- "should only run in user thread"))
+- retval = -EFAULT;
+- else
+- retval = mqd_mgr->load_mqd(mqd_mgr, q->mqd, q->pipe, q->queue,
+- &q->properties, current->mm);
+- if (retval)
+- goto out_uninit_mqd;
+-
+- return 0;
+-
+-out_uninit_mqd:
+- mqd_mgr->uninit_mqd(mqd_mgr, q->mqd, q->mqd_mem_obj);
+-out_deallocate_doorbell:
+- deallocate_doorbell(qpd, q);
+-out_deallocate_hqd:
+- deallocate_hqd(dqm, q);
+-
+- return retval;
+-}
+-
+ /* Access to DQM has to be locked before calling destroy_queue_nocpsch_locked
+ * to avoid asynchronized access
+ */
+@@ -1049,49 +1035,6 @@ static void deallocate_sdma_queue(struct device_queue_manager *dqm,
+ }
+ }
+
+-static int create_sdma_queue_nocpsch(struct device_queue_manager *dqm,
+- struct queue *q,
+- struct qcm_process_device *qpd)
+-{
+- struct mqd_manager *mqd_mgr;
+- int retval;
+-
+- mqd_mgr = dqm->mqd_mgrs[KFD_MQD_TYPE_SDMA];
+-
+- retval = allocate_sdma_queue(dqm, q);
+- if (retval)
+- return retval;
+-
+- retval = allocate_doorbell(qpd, q);
+- if (retval)
+- goto out_deallocate_sdma_queue;
+-
+- dqm->asic_ops.init_sdma_vm(dqm, q, qpd);
+- retval = mqd_mgr->init_mqd(mqd_mgr, &q->mqd, &q->mqd_mem_obj,
+- &q->gart_mqd_addr, &q->properties);
+- if (retval)
+- goto out_deallocate_doorbell;
+-
+- if (!q->properties.is_active)
+- return 0;
+-
+- retval = mqd_mgr->load_mqd(mqd_mgr, q->mqd, 0, 0, &q->properties,
+- current->mm);
+- if (retval)
+- goto out_uninit_mqd;
+-
+- return 0;
+-
+-out_uninit_mqd:
+- mqd_mgr->uninit_mqd(mqd_mgr, q->mqd, q->mqd_mem_obj);
+-out_deallocate_doorbell:
+- deallocate_doorbell(qpd, q);
+-out_deallocate_sdma_queue:
+- deallocate_sdma_queue(dqm, q);
+-
+- return retval;
+-}
+-
+ /*
+ * Device Queue Manager implementation for cp scheduler
+ */
+--
+2.17.1
+