aboutsummaryrefslogtreecommitdiffstats
path: root/common/recipes-kernel/linux/linux-yocto-4.14.71/1382-drm-amdgpu-implement-vm_operations_struct.access.patch
blob: c39ed82c84c96a0a6264ba25ba278dc3a865908a (plain)
1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22
23
24
25
26
27
28
29
30
31
32
33
34
35
36
37
38
39
40
41
42
43
44
45
46
47
48
49
50
51
52
53
54
55
56
57
58
59
60
61
62
63
64
65
66
67
68
69
70
71
72
73
74
75
76
77
78
79
80
81
82
83
84
85
86
87
88
89
90
91
92
93
94
95
96
97
98
99
100
101
102
103
104
105
106
107
108
109
110
111
112
113
114
115
116
117
118
119
120
121
122
123
124
125
126
127
128
129
130
131
132
133
134
135
136
From 7e298768abac419327d0ef5a1c7fa3ac3dc9cbf6 Mon Sep 17 00:00:00 2001
From: Felix Kuehling <Felix.Kuehling@amd.com>
Date: Mon, 1 Feb 2016 16:11:37 -0500
Subject: [PATCH 1382/4131] drm/amdgpu: implement vm_operations_struct.access

Allows gdb to access contents of user mode mapped BOs.

Change-Id: Ice34fd17c914369172e2d30db97c36ab013a0e82
Signed-off-by: Felix Kuehling <Felix.Kuehling@amd.com>
Signed-off-by: kalyan alle <kalyan.alle@amd.com>

 Conflicts:
        drivers/gpu/drm/amd/amdgpu/amdgpu.h
---
 drivers/gpu/drm/amd/amdgpu/amdgpu_ttm.c | 98 ++++++++++++++++++++++++++++++++-
 1 file changed, 97 insertions(+), 1 deletion(-)

diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_ttm.c b/drivers/gpu/drm/amd/amdgpu/amdgpu_ttm.c
index 2b866be..0e77e62 100644
--- a/drivers/gpu/drm/amd/amdgpu/amdgpu_ttm.c
+++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_ttm.c
@@ -1625,7 +1625,102 @@ void amdgpu_ttm_set_active_vram_size(struct amdgpu_device *adev, u64 size)
 }
 
 static struct vm_operations_struct amdgpu_ttm_vm_ops;
-static const struct vm_operations_struct *ttm_vm_ops;
+static const struct vm_operations_struct *ttm_vm_ops /* = NULL;
+                                                      * (appease checkpatch) */;
+static int amdgpu_ttm_bo_access_vram(struct amdgpu_bo *abo,
+                                     unsigned long offset,
+                                     void *buf, int len, int write)
+{
+        struct amdgpu_device *adev = abo->adev;
+        uint64_t pos = amdgpu_bo_gpu_offset(abo) + offset;
+        uint32_t value = 0;
+        unsigned long flags;
+        int result = 0;
+
+        while (len && pos < adev->mc.mc_vram_size) {
+                uint64_t aligned_pos = pos & ~(uint64_t)3;
+                uint32_t bytes = 4 - (pos & 3);
+                uint32_t shift = (pos & 3) * 8;
+                uint32_t mask = 0xffffffff << shift;
+
+                if (len < bytes) {
+                        mask &= 0xffffffff >> (bytes - len) * 8;
+                        bytes = len;
+                }
+
+                spin_lock_irqsave(&adev->mmio_idx_lock, flags);
+                WREG32(mmMM_INDEX, ((uint32_t)aligned_pos) | 0x80000000);
+                WREG32(mmMM_INDEX_HI, aligned_pos >> 31);
+                if (!write || mask != 0xffffffff)
+                        value = RREG32(mmMM_DATA);
+                if (write) {
+                        value &= ~mask;
+                        value |= (*(uint32_t *)buf << shift) & mask;
+                        WREG32(mmMM_DATA, value);
+                }
+                spin_unlock_irqrestore(&adev->mmio_idx_lock, flags);
+                if (!write) {
+                        value = (value & mask) >> shift;
+                        memcpy(buf, &value, bytes);
+                }
+
+                result += bytes;
+                buf = (uint8_t *)buf + bytes;
+                pos += bytes;
+                len -= bytes;
+        }
+
+        return result;
+}
+
+static int amdgpu_ttm_bo_access_kmap(struct amdgpu_bo *abo,
+                                     unsigned long offset,
+                                     void *buf, int len, int write)
+{
+        struct ttm_buffer_object *bo = &abo->tbo;
+        struct ttm_bo_kmap_obj map;
+        void *ptr;
+        bool is_iomem;
+        int r;
+
+        r = ttm_bo_kmap(bo, 0, bo->num_pages, &map);
+        if (r)
+                return r;
+        ptr = (uint8_t *)ttm_kmap_obj_virtual(&map, &is_iomem) + offset;
+        WARN_ON(is_iomem);
+        if (write)
+                memcpy(ptr, buf, len);
+        else
+                memcpy(buf, ptr, len);
+        ttm_bo_kunmap(&map);
+
+        return len;
+}
+
+static int amdgpu_ttm_vm_access(struct vm_area_struct *vma, unsigned long addr,
+                                void *buf, int len, int write)
+{
+        unsigned long offset = (addr) - vma->vm_start;
+        struct ttm_buffer_object *bo = vma->vm_private_data;
+        struct amdgpu_bo *abo = container_of(bo, struct amdgpu_bo, tbo);
+        unsigned domain;
+        int result;
+
+        result = amdgpu_bo_reserve(abo, false);
+        if (result != 0)
+                return result;
+
+        domain = amdgpu_mem_type_to_domain(bo->mem.mem_type);
+        if (domain == AMDGPU_GEM_DOMAIN_VRAM)
+                result = amdgpu_ttm_bo_access_vram(abo, offset,
+                                                   buf, len, write);
+        else
+                result = amdgpu_ttm_bo_access_kmap(abo, offset,
+                                                   buf, len, write);
+        amdgpu_bo_unreserve(abo);
+
+        return len;
+}
 
 int amdgpu_bo_mmap(struct file *filp, struct vm_area_struct *vma,
 		   struct ttm_bo_device *bdev)
@@ -1639,6 +1734,7 @@ int amdgpu_bo_mmap(struct file *filp, struct vm_area_struct *vma,
 	if (unlikely(ttm_vm_ops == NULL)) {
 		ttm_vm_ops = vma->vm_ops;
 		amdgpu_ttm_vm_ops = *ttm_vm_ops;
+		amdgpu_ttm_vm_ops.access = &amdgpu_ttm_vm_access;
 	}
 	vma->vm_ops = &amdgpu_ttm_vm_ops;
 
-- 
2.7.4