aboutsummaryrefslogtreecommitdiffstats
path: root/meta-amdfalconx86/recipes-kernel/linux/files/0001-PATCH-amdgpu-get-maximum-and-used-UVD-handles.patch
diff options
context:
space:
mode:
Diffstat (limited to 'meta-amdfalconx86/recipes-kernel/linux/files/0001-PATCH-amdgpu-get-maximum-and-used-UVD-handles.patch')
-rw-r--r--meta-amdfalconx86/recipes-kernel/linux/files/0001-PATCH-amdgpu-get-maximum-and-used-UVD-handles.patch158
1 files changed, 158 insertions, 0 deletions
diff --git a/meta-amdfalconx86/recipes-kernel/linux/files/0001-PATCH-amdgpu-get-maximum-and-used-UVD-handles.patch b/meta-amdfalconx86/recipes-kernel/linux/files/0001-PATCH-amdgpu-get-maximum-and-used-UVD-handles.patch
new file mode 100644
index 00000000..506f127f
--- /dev/null
+++ b/meta-amdfalconx86/recipes-kernel/linux/files/0001-PATCH-amdgpu-get-maximum-and-used-UVD-handles.patch
@@ -0,0 +1,158 @@
+From 195d9672a2766fca3c0495884e2ef6a4f133ebb7 Mon Sep 17 00:00:00 2001
+From: Ahsan Hussain <ahsan_hussain@mentor.com>
+Date: Wed, 19 Apr 2017 21:01:50 +0500
+Subject: [PATCH] [PATCH] amdgpu: get maximum and used UVD handles
+
+Change History
+--------------
+
+v4: Changes suggested by Emil, Christian
+- return -ENODATA for asics with unlimited sessions
+
+v3: changes suggested by Christian
+- Add a check for UVD IP block using AMDGPU_HW_IP_UVD
+ query type.
+- Add a check for asic_type to be less than
+ CHIP_POLARIS10 since starting Polaris, we support
+ unlimited UVD instances.
+- Add kerneldoc style comment for
+ amdgpu_uvd_used_handles().
+
+v2: as suggested by Christian
+- Add a new query AMDGPU_INFO_NUM_HANDLES
+- Create a helper function to return the number
+ of currently used UVD handles.
+- Modify the logic to count the number of used
+ UVD handles since handles can be freed in
+ non-linear fashion.
+
+v1:
+- User might want to query the maximum number of UVD
+ instances supported by firmware. In addition to that,
+ if there are multiple applications using UVD handles
+ at the same time, he might also want to query the
+ currently used number of handles.
+
+ For this we add two variables max_handles and
+ used_handles inside drm_amdgpu_info_hw_ip. So now
+ an application (or libdrm) can use AMDGPU_INFO IOCTL
+ with AMDGPU_INFO_HW_IP_INFO query type to get these
+ values.
+
+Signed-off-by: Ahsan Hussain <ahsan_hussain@mentor.com>
+---
+ drivers/gpu/drm/amd/amdgpu/amdgpu_kms.c | 21 +++++++++++++++++++
+ drivers/gpu/drm/amd/amdgpu/amdgpu_uvd.c | 25 +++++++++++++++++++++++
+ drivers/gpu/drm/amd/amdgpu/amdgpu_uvd.h | 1 +
+ drivers/gpu/drm/amd/include/uapi/drm/amdgpu_drm.h | 9 ++++++++
+ 4 files changed, 56 insertions(+)
+
+diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_kms.c b/drivers/gpu/drm/amd/amdgpu/amdgpu_kms.c
+index a48783e50..e7ab49d49 100644
+--- a/drivers/gpu/drm/amd/amdgpu/amdgpu_kms.c
++++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_kms.c
+@@ -505,6 +505,27 @@ static int amdgpu_info_ioctl(struct drm_device *dev, void *data, struct drm_file
+ return copy_to_user(out, &dev_info,
+ min((size_t)size, sizeof(dev_info))) ? -EFAULT : 0;
+ }
++ case AMDGPU_INFO_NUM_HANDLES: {
++ struct drm_amdgpu_info_num_handles handle;
++
++ switch (info->query_hw_ip.type) {
++ case AMDGPU_HW_IP_UVD:
++ /* Starting Polaris, we support unlimited UVD handles */
++ if (adev->asic_type < CHIP_POLARIS10) {
++ handle.uvd_max_handles = adev->uvd.max_handles;
++ handle.uvd_used_handles = amdgpu_uvd_used_handles(adev);
++
++ return copy_to_user(out, &handle,
++ min((size_t)size, sizeof(handle))) ? -EFAULT : 0;
++ } else {
++ return -ENODATA;
++ }
++
++ break;
++ default:
++ return -EINVAL;
++ }
++ }
+ default:
+ DRM_DEBUG_KMS("Invalid request %d\n", info->query);
+ return -EINVAL;
+diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_uvd.c b/drivers/gpu/drm/amd/amdgpu/amdgpu_uvd.c
+index 849c7959c..20960e82a 100644
+--- a/drivers/gpu/drm/amd/amdgpu/amdgpu_uvd.c
++++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_uvd.c
+@@ -940,6 +940,31 @@ int amdgpu_uvd_ring_parse_cs(struct amdgpu_cs_parser *parser, uint32_t ib_idx)
+ return 0;
+ }
+
++/**
++ * amdgpu_uvd_used_handles - returns used UVD handles
++ *
++ * @adev: amdgpu_device pointer
++ *
++ * Returns the number of UVD handles in use
++ */
++uint32_t amdgpu_uvd_used_handles(struct amdgpu_device *adev)
++{
++ unsigned i;
++ uint32_t used_handles = 0;
++
++ for (i = 0; i < adev->uvd.max_handles; ++i) {
++ /*
++ * Handles can be freed in any order, and not
++ * necessarily linear. So we need to count
++ * all non-zero handles.
++ */
++ if (atomic_read(&adev->uvd.handles[i]))
++ used_handles++;
++ }
++
++ return used_handles;
++}
++
+ static int amdgpu_uvd_send_msg(struct amdgpu_ring *ring, struct amdgpu_bo *bo,
+ bool direct, struct fence **fence)
+ {
+diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_uvd.h b/drivers/gpu/drm/amd/amdgpu/amdgpu_uvd.h
+index 9a3b44908..19250d69d 100644
+--- a/drivers/gpu/drm/amd/amdgpu/amdgpu_uvd.h
++++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_uvd.h
+@@ -35,5 +35,6 @@ int amdgpu_uvd_get_destroy_msg(struct amdgpu_ring *ring, uint32_t handle,
+ void amdgpu_uvd_free_handles(struct amdgpu_device *adev,
+ struct drm_file *filp);
+ int amdgpu_uvd_ring_parse_cs(struct amdgpu_cs_parser *parser, uint32_t ib_idx);
++uint32_t amdgpu_uvd_used_handles(struct amdgpu_device *adev);
+
+ #endif
+diff --git a/drivers/gpu/drm/amd/include/uapi/drm/amdgpu_drm.h b/drivers/gpu/drm/amd/include/uapi/drm/amdgpu_drm.h
+index c2f06eba3..c4f903d61 100644
+--- a/drivers/gpu/drm/amd/include/uapi/drm/amdgpu_drm.h
++++ b/drivers/gpu/drm/amd/include/uapi/drm/amdgpu_drm.h
+@@ -549,6 +549,8 @@ struct drm_amdgpu_cs_chunk_data {
+ #define AMDGPU_INFO_CAPABILITY 0x50
+ /* query pin memory capability */
+ #define AMDGPU_CAPABILITY_PIN_MEM_FLAG (1 << 0)
++/* Query UVD handles */
++#define AMDGPU_INFO_NUM_HANDLES 0x1C
+
+ #define AMDGPU_INFO_MMR_SE_INDEX_SHIFT 0
+ #define AMDGPU_INFO_MMR_SE_INDEX_MASK 0xff
+@@ -710,6 +712,13 @@ struct drm_amdgpu_info_hw_ip {
+ __u32 _pad;
+ };
+
++struct drm_amdgpu_info_num_handles {
++ /** Max handles as supported by firmware for UVD */
++ __u32 uvd_max_handles;
++ /** Handles currently in use for UVD */
++ __u32 uvd_used_handles;
++};
++
+ struct drm_amdgpu_heap_info {
+ /** max. physical memory */
+ __u64 total_heap_size;
+--
+2.11.1
+