aboutsummaryrefslogtreecommitdiffstats
path: root/meta-amd-bsp/recipes-kernel/linux/linux-yocto-4.14.71/1432-drm-amdkfd-Avoid-submitting-an-unnecessary-packet-to.patch
diff options
context:
space:
mode:
Diffstat (limited to 'meta-amd-bsp/recipes-kernel/linux/linux-yocto-4.14.71/1432-drm-amdkfd-Avoid-submitting-an-unnecessary-packet-to.patch')
-rw-r--r--meta-amd-bsp/recipes-kernel/linux/linux-yocto-4.14.71/1432-drm-amdkfd-Avoid-submitting-an-unnecessary-packet-to.patch146
1 files changed, 146 insertions, 0 deletions
diff --git a/meta-amd-bsp/recipes-kernel/linux/linux-yocto-4.14.71/1432-drm-amdkfd-Avoid-submitting-an-unnecessary-packet-to.patch b/meta-amd-bsp/recipes-kernel/linux/linux-yocto-4.14.71/1432-drm-amdkfd-Avoid-submitting-an-unnecessary-packet-to.patch
new file mode 100644
index 00000000..5dfd0071
--- /dev/null
+++ b/meta-amd-bsp/recipes-kernel/linux/linux-yocto-4.14.71/1432-drm-amdkfd-Avoid-submitting-an-unnecessary-packet-to.patch
@@ -0,0 +1,146 @@
+From c2aaf0e942b702ff89ac090216159fe46bc66fa1 Mon Sep 17 00:00:00 2001
+From: Yong Zhao <yong.zhao@amd.com>
+Date: Fri, 13 May 2016 20:46:29 -0400
+Subject: [PATCH 1432/4131] drm/amdkfd: Avoid submitting an unnecessary packet
+ to HWS
+
+Change-Id: Iff2c4f9eb5e0cd39deb1f290feab4f2467921f8b
+Signed-off-by: Yong Zhao <yong.zhao@amd.com>
+---
+ .../gpu/drm/amd/amdkfd/kfd_device_queue_manager.c | 35 ++++++++++++----------
+ 1 file changed, 20 insertions(+), 15 deletions(-)
+
+diff --git a/drivers/gpu/drm/amd/amdkfd/kfd_device_queue_manager.c b/drivers/gpu/drm/amd/amdkfd/kfd_device_queue_manager.c
+index c5724ba..ea47e8f 100644
+--- a/drivers/gpu/drm/amd/amdkfd/kfd_device_queue_manager.c
++++ b/drivers/gpu/drm/amd/amdkfd/kfd_device_queue_manager.c
+@@ -44,7 +44,8 @@ static int create_compute_queue_nocpsch(struct device_queue_manager *dqm,
+ struct queue *q,
+ struct qcm_process_device *qpd);
+
+-static int execute_queues_cpsch(struct device_queue_manager *dqm);
++static int execute_queues_cpsch(struct device_queue_manager *dqm,
++ bool static_queues_included);
+ static int unmap_queues_cpsch(struct device_queue_manager *dqm,
+ enum kfd_unmap_queues_filter filter,
+ uint32_t filter_param, bool reset);
+@@ -429,7 +430,7 @@ static int update_queue(struct device_queue_manager *dqm, struct queue *q)
+ dqm->queue_count--;
+
+ if (dqm->sched_policy != KFD_SCHED_POLICY_NO_HWS)
+- retval = execute_queues_cpsch(dqm);
++ retval = execute_queues_cpsch(dqm, false);
+
+ mutex_unlock(&dqm->lock);
+ return retval;
+@@ -491,7 +492,7 @@ int process_evict_queues(struct device_queue_manager *dqm,
+ dqm->queue_count--;
+ }
+ if (dqm->sched_policy != KFD_SCHED_POLICY_NO_HWS)
+- retval = execute_queues_cpsch(dqm);
++ retval = execute_queues_cpsch(dqm, false);
+
+ mutex_unlock(&dqm->lock);
+ return retval;
+@@ -545,7 +546,7 @@ int process_restore_queues(struct device_queue_manager *dqm,
+ }
+ }
+ if (dqm->sched_policy != KFD_SCHED_POLICY_NO_HWS)
+- retval = execute_queues_cpsch(dqm);
++ retval = execute_queues_cpsch(dqm, false);
+
+ if (retval == 0)
+ qpd->evicted = 0;
+@@ -888,7 +889,7 @@ static int start_cpsch(struct device_queue_manager *dqm)
+ node->qpd->pqm->process);
+
+ mutex_lock(&dqm->lock);
+- execute_queues_cpsch(dqm);
++ execute_queues_cpsch(dqm, false);
+ mutex_unlock(&dqm->lock);
+
+ return 0;
+@@ -949,7 +950,7 @@ static int create_kernel_queue_cpsch(struct device_queue_manager *dqm,
+ list_add(&kq->list, &qpd->priv_queue_list);
+ dqm->queue_count++;
+ qpd->is_debug = true;
+- execute_queues_cpsch(dqm);
++ execute_queues_cpsch(dqm, false);
+ mutex_unlock(&dqm->lock);
+
+ return 0;
+@@ -965,11 +966,10 @@ static void destroy_kernel_queue_cpsch(struct device_queue_manager *dqm,
+
+ mutex_lock(&dqm->lock);
+ /* here we actually preempt the DIQ */
+- unmap_queues_cpsch(dqm, KFD_UNMAP_QUEUES_FILTER_ALL_QUEUES, 0, false);
+ list_del(&kq->list);
+ dqm->queue_count--;
+ qpd->is_debug = false;
+- execute_queues_cpsch(dqm);
++ execute_queues_cpsch(dqm, true);
+ /*
+ * Unconditionally decrement this counter, regardless of the queue's
+ * type.
+@@ -1039,7 +1039,7 @@ static int create_queue_cpsch(struct device_queue_manager *dqm, struct queue *q,
+ list_add(&q->list, &qpd->queues_list);
+ if (q->properties.is_active) {
+ dqm->queue_count++;
+- retval = execute_queues_cpsch(dqm);
++ retval = execute_queues_cpsch(dqm, false);
+ }
+
+ if (q->properties.type == KFD_QUEUE_TYPE_SDMA)
+@@ -1132,14 +1132,19 @@ static int unmap_queues_cpsch(struct device_queue_manager *dqm,
+ }
+
+ /* dqm->lock mutex has to be locked before calling this function */
+-static int execute_queues_cpsch(struct device_queue_manager *dqm)
++static int execute_queues_cpsch(struct device_queue_manager *dqm,
++ bool static_queues_included)
+ {
+ int retval;
++ enum kfd_unmap_queues_filter filter;
+
+ BUG_ON(!dqm);
+
+- retval = unmap_queues_cpsch(dqm, KFD_UNMAP_QUEUES_FILTER_DYNAMIC_QUEUES,
+- 0, false);
++ filter = static_queues_included ?
++ KFD_UNMAP_QUEUES_FILTER_ALL_QUEUES :
++ KFD_UNMAP_QUEUES_FILTER_DYNAMIC_QUEUES;
++
++ retval = unmap_queues_cpsch(dqm, filter, 0, false);
+ if (retval != 0) {
+ pr_err("kfd: the cp might be in an unrecoverable state due to an unsuccessful queues preemption");
+ return retval;
+@@ -1208,7 +1213,7 @@ static int destroy_queue_cpsch(struct device_queue_manager *dqm,
+ if (q->properties.is_active)
+ dqm->queue_count--;
+
+- retval = execute_queues_cpsch(dqm);
++ retval = execute_queues_cpsch(dqm, false);
+
+ mqd->uninit_mqd(mqd, q->mqd, q->mqd_mem_obj);
+
+@@ -1349,7 +1354,7 @@ static int set_page_directory_base(struct device_queue_manager *dqm,
+ * will have the update PD base address
+ */
+ if (dqm->sched_policy != KFD_SCHED_POLICY_NO_HWS)
+- retval = execute_queues_cpsch(dqm);
++ retval = execute_queues_cpsch(dqm, false);
+
+ out:
+ mutex_unlock(&dqm->lock);
+@@ -1454,7 +1459,7 @@ static int process_termination_cpsch(struct device_queue_manager *dqm,
+ }
+ }
+
+- retval = execute_queues_cpsch(dqm);
++ retval = execute_queues_cpsch(dqm, false);
+
+ /* lastly, free mqd resources */
+ list_for_each_entry_safe(q, next, &qpd->queues_list, list) {
+--
+2.7.4
+