aboutsummaryrefslogtreecommitdiffstats
path: root/meta-amd-bsp/recipes-kernel/linux/linux-yocto-4.19.8/2769-drm-amdkfd-Simplify-eviction-state-logic.patch
diff options
context:
space:
mode:
Diffstat (limited to 'meta-amd-bsp/recipes-kernel/linux/linux-yocto-4.19.8/2769-drm-amdkfd-Simplify-eviction-state-logic.patch')
-rw-r--r--meta-amd-bsp/recipes-kernel/linux/linux-yocto-4.19.8/2769-drm-amdkfd-Simplify-eviction-state-logic.patch333
1 files changed, 333 insertions, 0 deletions
diff --git a/meta-amd-bsp/recipes-kernel/linux/linux-yocto-4.19.8/2769-drm-amdkfd-Simplify-eviction-state-logic.patch b/meta-amd-bsp/recipes-kernel/linux/linux-yocto-4.19.8/2769-drm-amdkfd-Simplify-eviction-state-logic.patch
new file mode 100644
index 00000000..31f9cdb5
--- /dev/null
+++ b/meta-amd-bsp/recipes-kernel/linux/linux-yocto-4.19.8/2769-drm-amdkfd-Simplify-eviction-state-logic.patch
@@ -0,0 +1,333 @@
+From e3c3b0ae12d33c006141cd43c143d61bd29a540f Mon Sep 17 00:00:00 2001
+From: Felix Kuehling <Felix.Kuehling@amd.com>
+Date: Wed, 1 May 2019 18:20:13 -0400
+Subject: [PATCH 2769/2940] drm/amdkfd: Simplify eviction state logic
+
+Always mark evicted queues with q->properties.is_evicted = true, even
+queues that are inactive for other reason. This simplifies maintaining
+the eviction state as it doesn't require updating is_evicted when other
+queue activation conditions change.
+
+On the other hand, we now need to check those other queue activation
+conditions whenever an evicted queues is restored. To minimize code
+duplication, move the queue activation check into a macro so it can be
+maintained in one central place.
+
+Change-Id: I9398978a228cd148fb62151ee33d0ec20d93e365
+Signed-off-by: Felix Kuehling <Felix.Kuehling@amd.com>
+---
+ .../drm/amd/amdkfd/kfd_device_queue_manager.c | 84 ++++++++++---------
+ .../gpu/drm/amd/amdkfd/kfd_mqd_manager_cik.c | 15 +---
+ .../gpu/drm/amd/amdkfd/kfd_mqd_manager_v9.c | 10 +--
+ .../gpu/drm/amd/amdkfd/kfd_mqd_manager_vi.c | 10 +--
+ drivers/gpu/drm/amd/amdkfd/kfd_priv.h | 5 ++
+ 5 files changed, 56 insertions(+), 68 deletions(-)
+
+diff --git a/drivers/gpu/drm/amd/amdkfd/kfd_device_queue_manager.c b/drivers/gpu/drm/amd/amdkfd/kfd_device_queue_manager.c
+index 43cba6f4a0f3..d5d94f6e60ab 100644
+--- a/drivers/gpu/drm/amd/amdkfd/kfd_device_queue_manager.c
++++ b/drivers/gpu/drm/amd/amdkfd/kfd_device_queue_manager.c
+@@ -315,13 +315,11 @@ static int create_queue_nocpsch(struct device_queue_manager *dqm,
+ }
+ q->properties.vmid = qpd->vmid;
+ /*
+- * Eviction state logic: we only mark active queues as evicted
+- * to avoid the overhead of restoring inactive queues later
++ * Eviction state logic: mark all queues as evicted, even ones
++ * not currently active. Restoring inactive queues later only
++ * updates the is_evicted flag but is a no-op otherwise.
+ */
+- if (qpd->evicted)
+- q->properties.is_evicted = (q->properties.queue_size > 0 &&
+- q->properties.queue_percent > 0 &&
+- q->properties.queue_address != 0);
++ q->properties.is_evicted = !!qpd->evicted;
+
+ q->properties.tba_addr = qpd->tba_addr;
+ q->properties.tma_addr = qpd->tma_addr;
+@@ -544,14 +542,6 @@ static int update_queue(struct device_queue_manager *dqm, struct queue *q)
+ }
+ mqd_mgr = dqm->mqd_mgrs[get_mqd_type_from_queue_type(
+ q->properties.type)];
+- /*
+- * Eviction state logic: we only mark active queues as evicted
+- * to avoid the overhead of restoring inactive queues later
+- */
+- if (pdd->qpd.evicted)
+- q->properties.is_evicted = (q->properties.queue_size > 0 &&
+- q->properties.queue_percent > 0 &&
+- q->properties.queue_address != 0);
+
+ /* Save previous activity state for counters */
+ prev_active = q->properties.is_active;
+@@ -616,7 +606,7 @@ static int evict_process_queues_nocpsch(struct device_queue_manager *dqm,
+ struct queue *q;
+ struct mqd_manager *mqd_mgr;
+ struct kfd_process_device *pdd;
+- int retval = 0;
++ int retval, ret = 0;
+
+ dqm_lock(dqm);
+ if (qpd->evicted++ > 0) /* already evicted, do nothing */
+@@ -626,25 +616,31 @@ static int evict_process_queues_nocpsch(struct device_queue_manager *dqm,
+ pr_info_ratelimited("Evicting PASID %u queues\n",
+ pdd->process->pasid);
+
+- /* unactivate all active queues on the qpd */
++ /* Mark all queues as evicted. Deactivate all active queues on
++ * the qpd.
++ */
+ list_for_each_entry(q, &qpd->queues_list, list) {
++ q->properties.is_evicted = true;
+ if (!q->properties.is_active)
+ continue;
++
+ mqd_mgr = dqm->mqd_mgrs[get_mqd_type_from_queue_type(
+ q->properties.type)];
+- q->properties.is_evicted = true;
+ q->properties.is_active = false;
+ retval = mqd_mgr->destroy_mqd(mqd_mgr, q->mqd,
+ KFD_PREEMPT_TYPE_WAVEFRONT_DRAIN,
+ KFD_UNMAP_LATENCY_MS, q->pipe, q->queue);
+- if (retval)
+- goto out;
++ if (retval && !ret)
++ /* Return the first error, but keep going to
++ * maintain a consistent eviction state
++ */
++ ret = retval;
+ dqm->queue_count--;
+ }
+
+ out:
+ dqm_unlock(dqm);
+- return retval;
++ return ret;
+ }
+
+ static int evict_process_queues_cpsch(struct device_queue_manager *dqm,
+@@ -662,11 +658,14 @@ static int evict_process_queues_cpsch(struct device_queue_manager *dqm,
+ pr_info_ratelimited("Evicting PASID %u queues\n",
+ pdd->process->pasid);
+
+- /* unactivate all active queues on the qpd */
++ /* Mark all queues as evicted. Deactivate all active queues on
++ * the qpd.
++ */
+ list_for_each_entry(q, &qpd->queues_list, list) {
++ q->properties.is_evicted = true;
+ if (!q->properties.is_active)
+ continue;
+- q->properties.is_evicted = true;
++
+ q->properties.is_active = false;
+ dqm->queue_count--;
+ }
+@@ -688,7 +687,7 @@ static int restore_process_queues_nocpsch(struct device_queue_manager *dqm,
+ struct mqd_manager *mqd_mgr;
+ struct kfd_process_device *pdd;
+ uint64_t pd_base;
+- int retval = 0;
++ int retval, ret = 0;
+
+ pdd = qpd_to_pdd(qpd);
+ /* Retrieve PD base */
+@@ -722,22 +721,28 @@ static int restore_process_queues_nocpsch(struct device_queue_manager *dqm,
+ */
+ mm = get_task_mm(pdd->process->lead_thread);
+ if (!mm) {
+- retval = -EFAULT;
++ ret = -EFAULT;
+ goto out;
+ }
+
+- /* activate all active queues on the qpd */
++ /* Remove the eviction flags. Activate queues that are not
++ * inactive for other reasons.
++ */
+ list_for_each_entry(q, &qpd->queues_list, list) {
+- if (!q->properties.is_evicted)
++ q->properties.is_evicted = false;
++ if (!QUEUE_IS_ACTIVE(q->properties))
+ continue;
++
+ mqd_mgr = dqm->mqd_mgrs[get_mqd_type_from_queue_type(
+ q->properties.type)];
+- q->properties.is_evicted = false;
+ q->properties.is_active = true;
+ retval = mqd_mgr->load_mqd(mqd_mgr, q->mqd, q->pipe,
+ q->queue, &q->properties, mm);
+- if (retval)
+- goto out;
++ if (retval && !ret)
++ /* Return the first error, but keep going to
++ * maintain a consistent eviction state
++ */
++ ret = retval;
+ dqm->queue_count++;
+ }
+ qpd->evicted = 0;
+@@ -745,7 +750,7 @@ static int restore_process_queues_nocpsch(struct device_queue_manager *dqm,
+ if (mm)
+ mmput(mm);
+ dqm_unlock(dqm);
+- return retval;
++ return ret;
+ }
+
+ static int restore_process_queues_cpsch(struct device_queue_manager *dqm,
+@@ -777,16 +782,16 @@ static int restore_process_queues_cpsch(struct device_queue_manager *dqm,
+
+ /* activate all active queues on the qpd */
+ list_for_each_entry(q, &qpd->queues_list, list) {
+- if (!q->properties.is_evicted)
+- continue;
+ q->properties.is_evicted = false;
++ if (!QUEUE_IS_ACTIVE(q->properties))
++ continue;
++
+ q->properties.is_active = true;
+ dqm->queue_count++;
+ }
+ retval = execute_queues_cpsch(dqm,
+ KFD_UNMAP_QUEUES_FILTER_DYNAMIC_QUEUES, 0);
+- if (!retval)
+- qpd->evicted = 0;
++ qpd->evicted = 0;
+ out:
+ dqm_unlock(dqm);
+ return retval;
+@@ -1217,13 +1222,12 @@ static int create_queue_cpsch(struct device_queue_manager *dqm, struct queue *q,
+ mqd_mgr = dqm->mqd_mgrs[get_mqd_type_from_queue_type(
+ q->properties.type)];
+ /*
+- * Eviction state logic: we only mark active queues as evicted
+- * to avoid the overhead of restoring inactive queues later
++ * Eviction state logic: mark all queues as evicted, even ones
++ * not currently active. Restoring inactive queues later only
++ * updates the is_evicted flag but is a no-op otherwise.
+ */
+- if (qpd->evicted)
+- q->properties.is_evicted = (q->properties.queue_size > 0 &&
+- q->properties.queue_percent > 0 &&
+- q->properties.queue_address != 0);
++ q->properties.is_evicted = !!qpd->evicted;
++
+ dqm->asic_ops.init_sdma_vm(dqm, q, qpd);
+ q->properties.tba_addr = qpd->tba_addr;
+ q->properties.tma_addr = qpd->tma_addr;
+diff --git a/drivers/gpu/drm/amd/amdkfd/kfd_mqd_manager_cik.c b/drivers/gpu/drm/amd/amdkfd/kfd_mqd_manager_cik.c
+index c44ede9c3945..ea42dea790f8 100644
+--- a/drivers/gpu/drm/amd/amdkfd/kfd_mqd_manager_cik.c
++++ b/drivers/gpu/drm/amd/amdkfd/kfd_mqd_manager_cik.c
+@@ -266,10 +266,7 @@ static int __update_mqd(struct mqd_manager *mm, void *mqd,
+ update_cu_mask(mm, mqd, q);
+ set_priority(m, q);
+
+- q->is_active = (q->queue_size > 0 &&
+- q->queue_address != 0 &&
+- q->queue_percent > 0 &&
+- !q->is_evicted);
++ q->is_active = QUEUE_IS_ACTIVE(*q);
+
+ return 0;
+ }
+@@ -310,10 +307,7 @@ static int update_mqd_sdma(struct mqd_manager *mm, void *mqd,
+ m->sdma_engine_id = q->sdma_engine_id;
+ m->sdma_queue_id = q->sdma_queue_id;
+
+- q->is_active = (q->queue_size > 0 &&
+- q->queue_address != 0 &&
+- q->queue_percent > 0 &&
+- !q->is_evicted);
++ q->is_active = QUEUE_IS_ACTIVE(*q);
+
+ return 0;
+ }
+@@ -393,10 +387,7 @@ static int update_mqd_hiq(struct mqd_manager *mm, void *mqd,
+
+ m->cp_hqd_vmid = q->vmid;
+
+- q->is_active = (q->queue_size > 0 &&
+- q->queue_address != 0 &&
+- q->queue_percent > 0 &&
+- !q->is_evicted);
++ q->is_active = QUEUE_IS_ACTIVE(*q);
+
+ set_priority(m, q);
+ return 0;
+diff --git a/drivers/gpu/drm/amd/amdkfd/kfd_mqd_manager_v9.c b/drivers/gpu/drm/amd/amdkfd/kfd_mqd_manager_v9.c
+index ee42cf87b58d..c53c7e5ce608 100644
+--- a/drivers/gpu/drm/amd/amdkfd/kfd_mqd_manager_v9.c
++++ b/drivers/gpu/drm/amd/amdkfd/kfd_mqd_manager_v9.c
+@@ -297,10 +297,7 @@ static int update_mqd(struct mqd_manager *mm, void *mqd,
+ update_cu_mask(mm, mqd, q);
+ set_priority(m, q);
+
+- q->is_active = (q->queue_size > 0 &&
+- q->queue_address != 0 &&
+- q->queue_percent > 0 &&
+- !q->is_evicted);
++ q->is_active = QUEUE_IS_ACTIVE(*q);
+
+ return 0;
+ }
+@@ -474,10 +471,7 @@ static int update_mqd_sdma(struct mqd_manager *mm, void *mqd,
+ m->sdma_queue_id = q->sdma_queue_id;
+ m->sdmax_rlcx_dummy_reg = SDMA_RLC_DUMMY_DEFAULT;
+
+- q->is_active = (q->queue_size > 0 &&
+- q->queue_address != 0 &&
+- q->queue_percent > 0 &&
+- !q->is_evicted);
++ q->is_active = QUEUE_IS_ACTIVE(*q);
+
+ return 0;
+ }
+diff --git a/drivers/gpu/drm/amd/amdkfd/kfd_mqd_manager_vi.c b/drivers/gpu/drm/amd/amdkfd/kfd_mqd_manager_vi.c
+index 1ec1a3c06af3..da9d965bca26 100644
+--- a/drivers/gpu/drm/amd/amdkfd/kfd_mqd_manager_vi.c
++++ b/drivers/gpu/drm/amd/amdkfd/kfd_mqd_manager_vi.c
+@@ -285,10 +285,7 @@ static int __update_mqd(struct mqd_manager *mm, void *mqd,
+ update_cu_mask(mm, mqd, q);
+ set_priority(m, q);
+
+- q->is_active = (q->queue_size > 0 &&
+- q->queue_address != 0 &&
+- q->queue_percent > 0 &&
+- !q->is_evicted);
++ q->is_active = QUEUE_IS_ACTIVE(*q);
+
+ return 0;
+ }
+@@ -443,10 +440,7 @@ static int update_mqd_sdma(struct mqd_manager *mm, void *mqd,
+ m->sdma_engine_id = q->sdma_engine_id;
+ m->sdma_queue_id = q->sdma_queue_id;
+
+- q->is_active = (q->queue_size > 0 &&
+- q->queue_address != 0 &&
+- q->queue_percent > 0 &&
+- !q->is_evicted);
++ q->is_active = QUEUE_IS_ACTIVE(*q);
+
+ return 0;
+ }
+diff --git a/drivers/gpu/drm/amd/amdkfd/kfd_priv.h b/drivers/gpu/drm/amd/amdkfd/kfd_priv.h
+index ce113dc5bda5..a0311b2ed5d6 100644
+--- a/drivers/gpu/drm/amd/amdkfd/kfd_priv.h
++++ b/drivers/gpu/drm/amd/amdkfd/kfd_priv.h
+@@ -518,6 +518,11 @@ struct queue_properties {
+ uint32_t *cu_mask;
+ };
+
++#define QUEUE_IS_ACTIVE(q) ((q).queue_size > 0 && \
++ (q).queue_address != 0 && \
++ (q).queue_percent > 0 && \
++ !(q).is_evicted)
++
+ /**
+ * struct queue
+ *
+--
+2.17.1
+