aboutsummaryrefslogtreecommitdiffstats
path: root/meta-amdfalconx86/recipes-graphics/drm/files/0044-amdgpu-support-alloc-va-from-range.patch
diff options
context:
space:
mode:
Diffstat (limited to 'meta-amdfalconx86/recipes-graphics/drm/files/0044-amdgpu-support-alloc-va-from-range.patch')
-rw-r--r--meta-amdfalconx86/recipes-graphics/drm/files/0044-amdgpu-support-alloc-va-from-range.patch265
1 files changed, 0 insertions, 265 deletions
diff --git a/meta-amdfalconx86/recipes-graphics/drm/files/0044-amdgpu-support-alloc-va-from-range.patch b/meta-amdfalconx86/recipes-graphics/drm/files/0044-amdgpu-support-alloc-va-from-range.patch
deleted file mode 100644
index 61ce6f88..00000000
--- a/meta-amdfalconx86/recipes-graphics/drm/files/0044-amdgpu-support-alloc-va-from-range.patch
+++ /dev/null
@@ -1,265 +0,0 @@
-From 6cac1ca7faba752980ae58e4b10aef5b89c097dd Mon Sep 17 00:00:00 2001
-From: Flora Cui <Flora.Cui@amd.com>
-Date: Thu, 4 Feb 2016 09:42:45 +0800
-Subject: [PATCH 044/117] amdgpu: support alloc va from range
-
-Change-Id: Ib41ca6a99ce500fe783a1b1650f25be9cebec83a
-Signed-off-by: Flora Cui <Flora.Cui@amd.com>
-Reviewed-by: Ken Wang <Qingqing.Wang@amd.com>
----
- amdgpu/amdgpu.h | 51 +++++++++++++++
- amdgpu/amdgpu_vamgr.c | 169 ++++++++++++++++++++++++++++++++++++++++++++++++++
- 2 files changed, 220 insertions(+)
-
-diff --git a/amdgpu/amdgpu.h b/amdgpu/amdgpu.h
-index 4925056..455f388 100644
---- a/amdgpu/amdgpu.h
-+++ b/amdgpu/amdgpu.h
-@@ -1226,6 +1226,57 @@ int amdgpu_va_range_alloc(amdgpu_device_handle dev,
- uint64_t flags);
-
- /**
-+ * Allocate virtual address range in client defined range
-+ *
-+ * \param dev - [in] Device handle. See #amdgpu_device_initialize()
-+ * \param va_range_type - \c [in] Type of MC va range from which to allocate
-+ * \param size - \c [in] Size of range. Size must be correctly* aligned.
-+ * It is client responsibility to correctly aligned size based on the future
-+ * usage of allocated range.
-+ * \param va_base_alignment - \c [in] Overwrite base address alignment
-+ * requirement for GPU VM MC virtual
-+ * address assignment. Must be multiple of size alignments received as
-+ * 'amdgpu_buffer_size_alignments'.
-+ * If 0 use the default one.
-+ * \param va_base_required - \c [in] Specified required va base address.
-+ * If 0 then library choose available one between [va_base_min, va_base_max].
-+ * If !0 value will be passed and those value already "in use" then
-+ * corresponding error status will be returned.
-+ * \param va_base_min- \c [in] Specified required va range min address.
-+ * valid if va_base_required is 0
-+ * \param va_base_max - \c [in] Specified required va range max address.
-+ * valid if va_base_required is 0
-+ * \param va_base_allocated - \c [out] On return: Allocated VA base to be used
-+ * by client.
-+ * \param va_range_handle - \c [out] On return: Handle assigned to allocation
-+ * \param flags - \c [in] flags for special VA range
-+ *
-+ * \return 0 on success\n
-+ * >0 - AMD specific error code\n
-+ * <0 - Negative POSIX Error code
-+ *
-+ * \notes \n
-+ * It is client responsibility to correctly handle VA assignments and usage.
-+ * Neither kernel driver nor libdrm_amdpgu are able to prevent and
-+ * detect wrong va assignemnt.
-+ *
-+ * It is client responsibility to correctly handle multi-GPU cases and to pass
-+ * the corresponding arrays of all devices handles where corresponding VA will
-+ * be used.
-+ *
-+*/
-+int amdgpu_va_range_alloc_in_range(amdgpu_device_handle dev,
-+ enum amdgpu_gpu_va_range va_range_type,
-+ uint64_t size,
-+ uint64_t va_base_alignment,
-+ uint64_t va_base_required,
-+ uint64_t va_range_min,
-+ uint64_t va_range_max,
-+ uint64_t *va_base_allocated,
-+ amdgpu_va_handle *va_range_handle,
-+ uint64_t flags);
-+
-+/**
- * Free previously allocated virtual address range
- *
- *
-diff --git a/amdgpu/amdgpu_vamgr.c b/amdgpu/amdgpu_vamgr.c
-index 973274d..82653e9 100644
---- a/amdgpu/amdgpu_vamgr.c
-+++ b/amdgpu/amdgpu_vamgr.c
-@@ -169,6 +169,94 @@ amdgpu_vamgr_find_va(struct amdgpu_bo_va_mgr *mgr, uint64_t size,
- return offset;
- }
-
-+static uint64_t amdgpu_vamgr_find_va_in_range(struct amdgpu_bo_va_mgr *mgr, uint64_t size,
-+ uint64_t alignment, uint64_t range_min, uint64_t range_max)
-+{
-+ struct amdgpu_bo_va_hole *hole, *n;
-+ uint64_t offset = 0, waste = 0;
-+
-+ if (mgr->va_min >= range_max ||
-+ mgr->va_max <= range_min)
-+ return AMDGPU_INVALID_VA_ADDRESS;
-+
-+ alignment = MAX2(alignment, mgr->va_alignment);
-+ size = ALIGN(size, mgr->va_alignment);
-+
-+ pthread_mutex_lock(&mgr->bo_va_mutex);
-+ /* TODO: using more appropriate way to track the holes */
-+ /* first look for a hole */
-+ LIST_FOR_EACH_ENTRY_SAFE(hole, n, &mgr->va_holes, list) {
-+ if (hole->offset > range_max ||
-+ hole->offset + hole->size < range_min ||
-+ (hole->offset > range_min && hole->offset + size > range_max) ||
-+ (hole->offset < range_min && range_min + size > hole->offset + hole->size) ||
-+ hole->size < size)
-+ continue;
-+ offset = hole->offset;
-+ waste = offset % alignment;
-+ waste = waste ? alignment - waste : 0;
-+ offset += waste;
-+ if (offset >= (hole->offset + hole->size)) {
-+ continue;
-+ }
-+
-+ if (!waste && hole->size == size) {
-+ offset = hole->offset;
-+ list_del(&hole->list);
-+ free(hole);
-+ pthread_mutex_unlock(&mgr->bo_va_mutex);
-+ return offset;
-+ }
-+ if ((hole->size - waste) > size) {
-+ if (waste) {
-+ n = calloc(1, sizeof(struct amdgpu_bo_va_hole));
-+ n->size = waste;
-+ n->offset = hole->offset;
-+ list_add(&n->list, &hole->list);
-+ }
-+ hole->size -= (size + waste);
-+ hole->offset += size + waste;
-+ pthread_mutex_unlock(&mgr->bo_va_mutex);
-+ return offset;
-+ }
-+ if ((hole->size - waste) == size) {
-+ hole->size = waste;
-+ pthread_mutex_unlock(&mgr->bo_va_mutex);
-+ return offset;
-+ }
-+ }
-+
-+ if (mgr->va_offset > range_max) {
-+ pthread_mutex_unlock(&mgr->bo_va_mutex);
-+ return AMDGPU_INVALID_VA_ADDRESS;
-+ } else if (mgr->va_offset > range_min) {
-+ offset = mgr->va_offset;
-+ waste = offset % alignment;
-+ waste = waste ? alignment - waste : 0;
-+ if (offset + waste + size > range_max) {
-+ pthread_mutex_unlock(&mgr->bo_va_mutex);
-+ return AMDGPU_INVALID_VA_ADDRESS;
-+ }
-+ } else {
-+ offset = mgr->va_offset;
-+ waste = range_min % alignment;
-+ waste = waste ? alignment - waste : 0;
-+ waste += range_min - offset ;
-+ }
-+
-+ if (waste) {
-+ n = calloc(1, sizeof(struct amdgpu_bo_va_hole));
-+ n->size = waste;
-+ n->offset = offset;
-+ list_add(&n->list, &mgr->va_holes);
-+ }
-+
-+ offset += waste;
-+ mgr->va_offset = size + offset;
-+ pthread_mutex_unlock(&mgr->bo_va_mutex);
-+ return offset;
-+}
-+
- drm_private void
- amdgpu_vamgr_free_va(struct amdgpu_bo_va_mgr *mgr, uint64_t va, uint64_t size)
- {
-@@ -294,6 +382,87 @@ int amdgpu_va_range_alloc(amdgpu_device_handle dev,
- return 0;
- }
-
-+static int _amdgpu_va_range_alloc_in_range(amdgpu_device_handle dev,
-+ enum amdgpu_gpu_va_range va_range_type,
-+ uint64_t size,
-+ uint64_t va_base_alignment,
-+ uint64_t va_range_min,
-+ uint64_t va_range_max,
-+ uint64_t *va_base_allocated,
-+ amdgpu_va_handle *va_range_handle,
-+ uint64_t flags)
-+{
-+ struct amdgpu_bo_va_mgr *vamgr;
-+
-+ if (amdgpu_gpu_va_range_svm == va_range_type) {
-+ vamgr = &vamgr_svm;
-+ if (!vamgr->valid)
-+ return -EINVAL;
-+ }
-+ else if (flags & AMDGPU_VA_RANGE_32_BIT)
-+ vamgr = &dev->vamgr_32;
-+ else
-+ vamgr = &dev->vamgr;
-+
-+ va_base_alignment = MAX2(va_base_alignment, vamgr->va_alignment);
-+ size = ALIGN(size, vamgr->va_alignment);
-+
-+ *va_base_allocated = amdgpu_vamgr_find_va_in_range(vamgr, size,
-+ va_base_alignment, va_range_min, va_range_max);
-+
-+ if (!(flags & AMDGPU_VA_RANGE_32_BIT) &&
-+ (*va_base_allocated == AMDGPU_INVALID_VA_ADDRESS)) {
-+ /* fallback to 32bit address */
-+ vamgr = &dev->vamgr_32;
-+ *va_base_allocated = amdgpu_vamgr_find_va_in_range(vamgr, size,
-+ va_base_alignment, va_range_min, va_range_max);
-+ }
-+
-+ if (*va_base_allocated != AMDGPU_INVALID_VA_ADDRESS) {
-+ struct amdgpu_va* va;
-+ va = calloc(1, sizeof(struct amdgpu_va));
-+ if(!va){
-+ amdgpu_vamgr_free_va(vamgr, *va_base_allocated, size);
-+ return -ENOMEM;
-+ }
-+ va->dev = dev;
-+ va->address = *va_base_allocated;
-+ va->size = size;
-+ va->range = va_range_type;
-+ va->vamgr = vamgr;
-+ *va_range_handle = va;
-+ } else {
-+ return -EINVAL;
-+ }
-+
-+ return 0;
-+}
-+
-+int amdgpu_va_range_alloc_in_range(amdgpu_device_handle dev,
-+ enum amdgpu_gpu_va_range va_range_type,
-+ uint64_t size,
-+ uint64_t va_base_alignment,
-+ uint64_t va_base_required,
-+ uint64_t va_range_min,
-+ uint64_t va_range_max,
-+ uint64_t *va_base_allocated,
-+ amdgpu_va_handle *va_range_handle,
-+ uint64_t flags)
-+{
-+ if (va_base_required)
-+ return amdgpu_va_range_alloc(dev, va_range_type,
-+ size, va_base_alignment,
-+ va_base_required, va_base_allocated,
-+ va_range_handle, flags);
-+ else
-+ return _amdgpu_va_range_alloc_in_range(dev,
-+ va_range_type, size,
-+ va_base_alignment,
-+ va_range_min, va_range_max,
-+ va_base_allocated,
-+ va_range_handle, flags);
-+}
-+
- int amdgpu_va_range_free(amdgpu_va_handle va_range_handle)
- {
- if(!va_range_handle || !va_range_handle->address)
---
-2.7.4
-