aboutsummaryrefslogtreecommitdiffstats
path: root/meta-v1000/recipes-kernel/linux/linux-yocto-4.14.71/0826-drm-amdgpu-implement-vm_operations_struct.access.patch
diff options
context:
space:
mode:
Diffstat (limited to 'meta-v1000/recipes-kernel/linux/linux-yocto-4.14.71/0826-drm-amdgpu-implement-vm_operations_struct.access.patch')
-rw-r--r--meta-v1000/recipes-kernel/linux/linux-yocto-4.14.71/0826-drm-amdgpu-implement-vm_operations_struct.access.patch170
1 files changed, 0 insertions, 170 deletions
diff --git a/meta-v1000/recipes-kernel/linux/linux-yocto-4.14.71/0826-drm-amdgpu-implement-vm_operations_struct.access.patch b/meta-v1000/recipes-kernel/linux/linux-yocto-4.14.71/0826-drm-amdgpu-implement-vm_operations_struct.access.patch
deleted file mode 100644
index a9a761f1..00000000
--- a/meta-v1000/recipes-kernel/linux/linux-yocto-4.14.71/0826-drm-amdgpu-implement-vm_operations_struct.access.patch
+++ /dev/null
@@ -1,170 +0,0 @@
-From e400626a0f8d4cdb867c0f60742f8db5c0fbbe5d Mon Sep 17 00:00:00 2001
-From: Felix Kuehling <Felix.Kuehling@amd.com>
-Date: Mon, 1 Feb 2016 16:11:37 -0500
-Subject: [PATCH 0826/4131] drm/amdgpu: implement vm_operations_struct.access
-
-Allows gdb to access contents of user mode mapped BOs.
-
-Change-Id: Ice34fd17c914369172e2d30db97c36ab013a0e82
-Signed-off-by: Felix Kuehling <Felix.Kuehling@amd.com>
-
- Conflicts:
- drivers/gpu/drm/amd/amdgpu/amdgpu.h
----
- drivers/gpu/drm/amd/amdgpu/amdgpu_ttm.c | 119 +++++++++++++++++++++++++++++++-
- drivers/gpu/drm/amd/amdgpu/amdgpu_ttm.h | 2 +
- 2 files changed, 120 insertions(+), 1 deletion(-)
-
-diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_ttm.c b/drivers/gpu/drm/amd/amdgpu/amdgpu_ttm.c
-index f81146b..834b8fb 100644
---- a/drivers/gpu/drm/amd/amdgpu/amdgpu_ttm.c
-+++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_ttm.c
-@@ -1559,6 +1559,123 @@ void amdgpu_ttm_set_active_vram_size(struct amdgpu_device *adev, u64 size)
- man->size = size >> PAGE_SHIFT;
- }
-
-+static struct vm_operations_struct amdgpu_ttm_vm_ops;
-+static const struct vm_operations_struct *ttm_vm_ops /* = NULL;
-+ * (appease checkpatch) */;
-+static int amdgpu_ttm_bo_access_vram(struct amdgpu_bo *abo,
-+ unsigned long offset,
-+ void *buf, int len, int write)
-+{
-+ struct amdgpu_device *adev = abo->adev;
-+ uint64_t pos = amdgpu_bo_gpu_offset(abo) + offset;
-+ uint32_t value = 0;
-+ unsigned long flags;
-+ int result = 0;
-+
-+ while (len && pos < adev->mc.mc_vram_size) {
-+ uint64_t aligned_pos = pos & ~(uint64_t)3;
-+ uint32_t bytes = 4 - (pos & 3);
-+ uint32_t shift = (pos & 3) * 8;
-+ uint32_t mask = 0xffffffff << shift;
-+
-+ if (len < bytes) {
-+ mask &= 0xffffffff >> (bytes - len) * 8;
-+ bytes = len;
-+ }
-+
-+ spin_lock_irqsave(&adev->mmio_idx_lock, flags);
-+ WREG32(mmMM_INDEX, ((uint32_t)aligned_pos) | 0x80000000);
-+ WREG32(mmMM_INDEX_HI, aligned_pos >> 31);
-+ if (!write || mask != 0xffffffff)
-+ value = RREG32(mmMM_DATA);
-+ if (write) {
-+ value &= ~mask;
-+ value |= (*(uint32_t *)buf << shift) & mask;
-+ WREG32(mmMM_DATA, value);
-+ }
-+ spin_unlock_irqrestore(&adev->mmio_idx_lock, flags);
-+ if (!write) {
-+ value = (value & mask) >> shift;
-+ memcpy(buf, &value, bytes);
-+ }
-+
-+ result += bytes;
-+ buf = (uint8_t *)buf + bytes;
-+ pos += bytes;
-+ len -= bytes;
-+ }
-+
-+ return result;
-+}
-+
-+static int amdgpu_ttm_bo_access_kmap(struct amdgpu_bo *abo,
-+ unsigned long offset,
-+ void *buf, int len, int write)
-+{
-+ struct ttm_buffer_object *bo = &abo->tbo;
-+ struct ttm_bo_kmap_obj map;
-+ void *ptr;
-+ bool is_iomem;
-+ int r;
-+
-+ r = ttm_bo_kmap(bo, 0, bo->num_pages, &map);
-+ if (r)
-+ return r;
-+ ptr = (uint8_t *)ttm_kmap_obj_virtual(&map, &is_iomem) + offset;
-+ WARN_ON(is_iomem);
-+ if (write)
-+ memcpy(ptr, buf, len);
-+ else
-+ memcpy(buf, ptr, len);
-+ ttm_bo_kunmap(&map);
-+
-+ return len;
-+}
-+
-+static int amdgpu_ttm_vm_access(struct vm_area_struct *vma, unsigned long addr,
-+ void *buf, int len, int write)
-+{
-+ unsigned long offset = (addr) - vma->vm_start;
-+ struct ttm_buffer_object *bo = vma->vm_private_data;
-+ struct amdgpu_bo *abo = container_of(bo, struct amdgpu_bo, tbo);
-+ unsigned domain;
-+ int result;
-+
-+ result = amdgpu_bo_reserve(abo, false);
-+ if (result != 0)
-+ return result;
-+
-+ domain = amdgpu_mem_type_to_domain(bo->mem.mem_type);
-+ if (domain == AMDGPU_GEM_DOMAIN_VRAM)
-+ result = amdgpu_ttm_bo_access_vram(abo, offset,
-+ buf, len, write);
-+ else
-+ result = amdgpu_ttm_bo_access_kmap(abo, offset,
-+ buf, len, write);
-+ amdgpu_bo_unreserve(abo);
-+
-+ return len;
-+}
-+
-+int amdgpu_bo_mmap(struct file *filp, struct vm_area_struct *vma,
-+ struct ttm_bo_device *bdev)
-+{
-+ int r;
-+
-+ r = ttm_bo_mmap(filp, vma, bdev);
-+ if (unlikely(r != 0))
-+ return r;
-+
-+ if (unlikely(ttm_vm_ops == NULL)) {
-+ ttm_vm_ops = vma->vm_ops;
-+ amdgpu_ttm_vm_ops = *ttm_vm_ops;
-+ amdgpu_ttm_vm_ops.access = &amdgpu_ttm_vm_access;
-+ }
-+ vma->vm_ops = &amdgpu_ttm_vm_ops;
-+
-+ return 0;
-+}
-+
- int amdgpu_mmap(struct file *filp, struct vm_area_struct *vma)
- {
- struct drm_file *file_priv;
-@@ -1572,7 +1689,7 @@ int amdgpu_mmap(struct file *filp, struct vm_area_struct *vma)
- if (adev == NULL)
- return -EINVAL;
-
-- return ttm_bo_mmap(filp, vma, &adev->mman.bdev);
-+ return amdgpu_bo_mmap(filp, vma, &adev->mman.bdev);
- }
-
- static int amdgpu_map_buffer(struct ttm_buffer_object *bo,
-diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_ttm.h b/drivers/gpu/drm/amd/amdgpu/amdgpu_ttm.h
-index be4c98a..07dc856 100644
---- a/drivers/gpu/drm/amd/amdgpu/amdgpu_ttm.h
-+++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_ttm.h
-@@ -83,6 +83,8 @@ int amdgpu_fill_buffer(struct amdgpu_bo *bo,
- struct dma_fence **fence);
-
- int amdgpu_mmap(struct file *filp, struct vm_area_struct *vma);
-+int amdgpu_bo_mmap(struct file *filp, struct vm_area_struct *vma,
-+ struct ttm_bo_device *bdev);
- bool amdgpu_ttm_is_bound(struct ttm_tt *ttm);
- int amdgpu_ttm_bind(struct ttm_buffer_object *bo, struct ttm_mem_reg *bo_mem);
- int amdgpu_ttm_recover_gart(struct amdgpu_device *adev);
---
-2.7.4
-