diff options
Diffstat (limited to 'common/recipes-kernel/linux/linux-yocto-4.14.71/1466-drm-amdkfd-Add-support-for-resuming-SDMA-queues-w-o-.patch')
-rw-r--r-- | common/recipes-kernel/linux/linux-yocto-4.14.71/1466-drm-amdkfd-Add-support-for-resuming-SDMA-queues-w-o-.patch | 156 |
1 files changed, 156 insertions, 0 deletions
diff --git a/common/recipes-kernel/linux/linux-yocto-4.14.71/1466-drm-amdkfd-Add-support-for-resuming-SDMA-queues-w-o-.patch b/common/recipes-kernel/linux/linux-yocto-4.14.71/1466-drm-amdkfd-Add-support-for-resuming-SDMA-queues-w-o-.patch new file mode 100644 index 00000000..8706b80c --- /dev/null +++ b/common/recipes-kernel/linux/linux-yocto-4.14.71/1466-drm-amdkfd-Add-support-for-resuming-SDMA-queues-w-o-.patch @@ -0,0 +1,156 @@ +From 27a044ad049618cd14de6d5701e51ff2c17dac13 Mon Sep 17 00:00:00 2001 +From: Felix Kuehling <Felix.Kuehling@amd.com> +Date: Thu, 30 Jun 2016 16:42:36 -0400 +Subject: [PATCH 1466/4131] drm/amdkfd: Add support for resuming SDMA queues + w/o HWS + +Add support for resuming SDMA queues w/o HWS in update_queue_nocpsch +and process_restory_queues. + +Remote process memory access in hqd_sdma_load requires the mmap_sem +to be locked. mmap_sem locking is done before locking dqm->lock to +avoid a circular lock dependency. There is still a circular lock +dependency warning between the process lock and mmap_sem. However, +the process locks involved are all read-locks, so there is no +possibility of a deadlock. + +Change-Id: Iae2c1d05de7c7ff732cf000de91535ab73656c3c +Signed-off-by: Felix Kuehling <Felix.Kuehling@amd.com> +--- + .../gpu/drm/amd/amdkfd/kfd_device_queue_manager.c | 59 +++++++++++++--------- + 1 file changed, 34 insertions(+), 25 deletions(-) + +diff --git a/drivers/gpu/drm/amd/amdkfd/kfd_device_queue_manager.c b/drivers/gpu/drm/amd/amdkfd/kfd_device_queue_manager.c +index 9fbbeee..aacc4dc 100644 +--- a/drivers/gpu/drm/amd/amdkfd/kfd_device_queue_manager.c ++++ b/drivers/gpu/drm/amd/amdkfd/kfd_device_queue_manager.c +@@ -389,18 +389,20 @@ static int update_queue(struct device_queue_manager *dqm, struct queue *q) + + BUG_ON(!dqm || !q || !q->mqd); + ++ if (dqm->sched_policy == KFD_SCHED_POLICY_NO_HWS) ++ down_read(&q->process->mm->mmap_sem); + mutex_lock(&dqm->lock); + + pdd = kfd_get_process_device_data(q->device, q->process); + if (!pdd) { +- mutex_unlock(&dqm->lock); +- return -ENODEV; ++ retval = -ENODEV; ++ goto out_unlock; + } + mqd = dqm->ops.get_mqd_manager(dqm, + get_mqd_type_from_queue_type(q->properties.type)); + if (mqd == NULL) { +- mutex_unlock(&dqm->lock); +- return -ENOMEM; ++ retval = -ENOMEM; ++ goto out_unlock; + } + /* + * Eviction state logic: we only mark active queues as evicted +@@ -418,8 +420,8 @@ static int update_queue(struct device_queue_manager *dqm, struct queue *q) + + retval = mqd->update_mqd(mqd, q->mqd, &q->properties); + if (dqm->sched_policy == KFD_SCHED_POLICY_NO_HWS && +- q->properties.type == KFD_QUEUE_TYPE_COMPUTE) { +- /* FIXME: Handle SDMA queues as well */ ++ (q->properties.type == KFD_QUEUE_TYPE_COMPUTE || ++ q->properties.type == KFD_QUEUE_TYPE_SDMA)) { + if (q->properties.is_active) + retval = mqd->load_mqd(mqd, q->mqd, q->pipe, + q->queue, +@@ -442,7 +444,11 @@ static int update_queue(struct device_queue_manager *dqm, struct queue *q) + if (dqm->sched_policy != KFD_SCHED_POLICY_NO_HWS) + retval = execute_queues_cpsch(dqm, false); + ++out_unlock: + mutex_unlock(&dqm->lock); ++ if (dqm->sched_policy == KFD_SCHED_POLICY_NO_HWS) ++ up_read(&q->process->mm->mmap_sem); ++ + return retval; + } + +@@ -494,8 +500,9 @@ int process_evict_queues(struct device_queue_manager *dqm, + + retval = mqd->update_mqd(mqd, q->mqd, &q->properties); + if (dqm->sched_policy == KFD_SCHED_POLICY_NO_HWS && +- q->properties.type == KFD_QUEUE_TYPE_COMPUTE && +- q->properties.is_evicted) ++ (q->properties.type == KFD_QUEUE_TYPE_COMPUTE || ++ q->properties.type == KFD_QUEUE_TYPE_SDMA) && ++ q->properties.is_evicted) + retval = mqd->destroy_mqd(mqd, q->mqd, + KFD_PREEMPT_TYPE_WAVEFRONT_DRAIN, + KFD_UNMAP_LATENCY_MS, q->pipe, q->queue); +@@ -515,21 +522,21 @@ int process_restore_queues(struct device_queue_manager *dqm, + { + struct queue *q, *next; + struct mqd_manager *mqd; ++ struct kfd_process_device *pdd = ++ container_of(qpd, struct kfd_process_device, qpd); + int retval = 0; + +- + BUG_ON(!dqm || !qpd); + ++ if (dqm->sched_policy == KFD_SCHED_POLICY_NO_HWS) ++ down_read(&pdd->process->mm->mmap_sem); + mutex_lock(&dqm->lock); +- if (qpd->evicted == 0) { /* already restored, do nothing */ +- mutex_unlock(&dqm->lock); +- return 0; +- } ++ if (qpd->evicted == 0) /* already restored, do nothing */ ++ goto out_unlock; + + if (qpd->evicted > 1) { /* ref count still > 0, decrement & quit */ + qpd->evicted--; +- mutex_unlock(&dqm->lock); +- return 0; ++ goto out_unlock; + } + + /* activate all active queues on the qpd */ +@@ -544,15 +551,13 @@ int process_restore_queues(struct device_queue_manager *dqm, + q->properties.is_evicted = false; + retval = mqd->update_mqd(mqd, q->mqd, &q->properties); + if (dqm->sched_policy == KFD_SCHED_POLICY_NO_HWS && +- q->properties.type == KFD_QUEUE_TYPE_COMPUTE) +- retval = +- mqd->load_mqd( +- mqd, +- q->mqd, +- q->pipe, +- q->queue, +- (uint32_t __user *)q->properties.write_ptr, +- q->process->mm); ++ (q->properties.type == KFD_QUEUE_TYPE_COMPUTE || ++ q->properties.type == KFD_QUEUE_TYPE_SDMA)) ++ retval = mqd->load_mqd( ++ mqd, q->mqd, q->pipe, q->queue, ++ (uint32_t __user *) ++ q->properties.write_ptr, ++ q->process->mm); + dqm->queue_count++; + } + } +@@ -561,9 +566,13 @@ int process_restore_queues(struct device_queue_manager *dqm, + + if (retval == 0) + qpd->evicted = 0; ++ ++out_unlock: + mutex_unlock(&dqm->lock); +- return retval; ++ if (dqm->sched_policy == KFD_SCHED_POLICY_NO_HWS) ++ up_read(&pdd->process->mm->mmap_sem); + ++ return retval; + } + + static int register_process_nocpsch(struct device_queue_manager *dqm, +-- +2.7.4 + |