aboutsummaryrefslogtreecommitdiffstats
path: root/meta-amd-bsp/recipes-kernel/linux/linux-yocto-4.14.71/0782-drm-amdgpu-add-new-semaphore-object-in-kernel-side.patch
diff options
context:
space:
mode:
Diffstat (limited to 'meta-amd-bsp/recipes-kernel/linux/linux-yocto-4.14.71/0782-drm-amdgpu-add-new-semaphore-object-in-kernel-side.patch')
-rw-r--r--meta-amd-bsp/recipes-kernel/linux/linux-yocto-4.14.71/0782-drm-amdgpu-add-new-semaphore-object-in-kernel-side.patch493
1 files changed, 493 insertions, 0 deletions
diff --git a/meta-amd-bsp/recipes-kernel/linux/linux-yocto-4.14.71/0782-drm-amdgpu-add-new-semaphore-object-in-kernel-side.patch b/meta-amd-bsp/recipes-kernel/linux/linux-yocto-4.14.71/0782-drm-amdgpu-add-new-semaphore-object-in-kernel-side.patch
new file mode 100644
index 00000000..8d4c6e15
--- /dev/null
+++ b/meta-amd-bsp/recipes-kernel/linux/linux-yocto-4.14.71/0782-drm-amdgpu-add-new-semaphore-object-in-kernel-side.patch
@@ -0,0 +1,493 @@
+From 23cae7836e9150e09687b453e04f2b16fa9dc8eb Mon Sep 17 00:00:00 2001
+From: Chunming Zhou <David1.Zhou@amd.com>
+Date: Fri, 23 Sep 2016 10:22:22 +0800
+Subject: [PATCH 0782/4131] drm/amdgpu: add new semaphore object in kernel side
+
+So that semaphore can be shared across porcess across devices.
+
+Signed-off-by: Chunming Zhou <David1.Zhou@amd.com>
+Signed-off-by: Junwei Zhang <Jerry.Zhang@amd.com>
+Signed-off-by: Kalyan Alle <kalyan.alle@amd.com>
+
+Change-Id: I88e2168328d005a42b41eb7b0c60530a92126829
+
+ Conflicts:
+ drivers/gpu/drm/amd/amdgpu/Makefile
+---
+ drivers/gpu/drm/amd/amdgpu/Makefile | 2 +-
+ drivers/gpu/drm/amd/amdgpu/amdgpu.h | 8 +
+ drivers/gpu/drm/amd/amdgpu/amdgpu_cs.c | 2 +-
+ drivers/gpu/drm/amd/amdgpu/amdgpu_ctx.c | 6 +-
+ drivers/gpu/drm/amd/amdgpu/amdgpu_kms.c | 3 +-
+ drivers/gpu/drm/amd/amdgpu/amdgpu_sem.c | 267 ++++++++++++++++++++++++++++++++
+ drivers/gpu/drm/amd/amdgpu/amdgpu_sem.h | 44 ++++++
+ include/uapi/drm/amdgpu_drm.h | 23 +++
+ 8 files changed, 351 insertions(+), 4 deletions(-)
+ create mode 100644 drivers/gpu/drm/amd/amdgpu/amdgpu_sem.c
+ create mode 100644 drivers/gpu/drm/amd/amdgpu/amdgpu_sem.h
+
+diff --git a/drivers/gpu/drm/amd/amdgpu/Makefile b/drivers/gpu/drm/amd/amdgpu/Makefile
+index 93fc29d..0713138 100644
+--- a/drivers/gpu/drm/amd/amdgpu/Makefile
++++ b/drivers/gpu/drm/amd/amdgpu/Makefile
+@@ -32,7 +32,7 @@ amdgpu-y += amdgpu_device.o amdgpu_kms.o \
+ amdgpu_prime.o amdgpu_vm.o amdgpu_ib.o amdgpu_pll.o \
+ amdgpu_ucode.o amdgpu_bo_list.o amdgpu_ctx.o amdgpu_sync.o \
+ amdgpu_gtt_mgr.o amdgpu_vram_mgr.o amdgpu_virt.o amdgpu_atomfirmware.o \
+- amdgpu_queue_mgr.o amdgpu_vf_error.o
++ amdgpu_queue_mgr.o amdgpu_vf_error.o amdgpu_sem.o
+
+ # add asic specific block
+ amdgpu-$(CONFIG_DRM_AMDGPU_CIK)+= cik.o cik_ih.o kv_smc.o kv_dpm.o \
+diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu.h b/drivers/gpu/drm/amd/amdgpu/amdgpu.h
+index ceb1223..6cf1620 100644
+--- a/drivers/gpu/drm/amd/amdgpu/amdgpu.h
++++ b/drivers/gpu/drm/amd/amdgpu/amdgpu.h
+@@ -710,6 +710,8 @@ struct amdgpu_ctx_ring {
+ uint64_t sequence;
+ struct dma_fence **fences;
+ struct amd_sched_entity entity;
++ struct list_head sem_list;
++ struct mutex sem_lock;
+ };
+
+ struct amdgpu_ctx {
+@@ -1319,6 +1321,12 @@ int amdgpu_gem_metadata_ioctl(struct drm_device *dev, void *data,
+ int amdgpu_freesync_ioctl(struct drm_device *dev, void *data,
+ struct drm_file *filp);
+
++int amdgpu_sem_ioctl(struct drm_device *dev, void *data,
++ struct drm_file *filp);
++
++int amdgpu_sem_add_cs(struct amdgpu_ctx *ctx, struct amdgpu_ring *ring,
++ struct amdgpu_sync *sync);
++
+ int amdgpu_gem_dgma_ioctl(struct drm_device *dev, void *data,
+ struct drm_file *filp);
+
+diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_cs.c b/drivers/gpu/drm/amd/amdgpu/amdgpu_cs.c
+index 5f892ad..91aac4a 100644
+--- a/drivers/gpu/drm/amd/amdgpu/amdgpu_cs.c
++++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_cs.c
+@@ -1120,7 +1120,7 @@ static int amdgpu_cs_dependencies(struct amdgpu_device *adev,
+ }
+ }
+
+- return 0;
++ return amdgpu_sem_add_cs(p->ctx, p->job->ring, &p->job->sync);
+ }
+
+ static void amdgpu_cs_post_dependencies(struct amdgpu_cs_parser *p)
+diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_ctx.c b/drivers/gpu/drm/amd/amdgpu/amdgpu_ctx.c
+index a11e443..6a2a1ce 100644
+--- a/drivers/gpu/drm/amd/amdgpu/amdgpu_ctx.c
++++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_ctx.c
+@@ -42,6 +42,8 @@ static int amdgpu_ctx_init(struct amdgpu_device *adev, struct amdgpu_ctx *ctx)
+ for (i = 0; i < AMDGPU_MAX_RINGS; ++i) {
+ ctx->rings[i].sequence = 1;
+ ctx->rings[i].fences = &ctx->fences[amdgpu_sched_jobs * i];
++ INIT_LIST_HEAD(&ctx->rings[i].sem_list);
++ mutex_init(&ctx->rings[i].sem_lock);
+ }
+
+ ctx->reset_counter = atomic_read(&adev->gpu_reset_counter);
+@@ -86,8 +88,10 @@ static void amdgpu_ctx_fini(struct amdgpu_ctx *ctx)
+ return;
+
+ for (i = 0; i < AMDGPU_MAX_RINGS; ++i)
+- for (j = 0; j < amdgpu_sched_jobs; ++j)
++ for (j = 0; j < amdgpu_sched_jobs; ++j) {
+ dma_fence_put(ctx->rings[i].fences[j]);
++ mutex_destroy(&ctx->rings[i].sem_lock);
++ }
+ kfree(ctx->fences);
+ ctx->fences = NULL;
+
+diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_kms.c b/drivers/gpu/drm/amd/amdgpu/amdgpu_kms.c
+index bc4c55d..a5507b5 100644
+--- a/drivers/gpu/drm/amd/amdgpu/amdgpu_kms.c
++++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_kms.c
+@@ -1064,7 +1064,8 @@ const struct drm_ioctl_desc amdgpu_ioctls_kms[] = {
+ DRM_IOCTL_DEF_DRV(AMDGPU_GEM_USERPTR, amdgpu_gem_userptr_ioctl, DRM_AUTH|DRM_RENDER_ALLOW),
+ DRM_IOCTL_DEF_DRV(AMDGPU_FREESYNC, amdgpu_freesync_ioctl, DRM_MASTER),
+ DRM_IOCTL_DEF_DRV(AMDGPU_GEM_FIND_BO, amdgpu_gem_find_bo_by_cpu_mapping_ioctl, DRM_AUTH|DRM_RENDER_ALLOW),
+- DRM_IOCTL_DEF_DRV(AMDGPU_GEM_DGMA, amdgpu_gem_dgma_ioctl, DRM_AUTH|DRM_RENDER_ALLOW)
++ DRM_IOCTL_DEF_DRV(AMDGPU_GEM_DGMA, amdgpu_gem_dgma_ioctl, DRM_AUTH|DRM_RENDER_ALLOW),
++ DRM_IOCTL_DEF_DRV(AMDGPU_SEM, amdgpu_sem_ioctl, DRM_AUTH|DRM_UNLOCKED|DRM_RENDER_ALLOW),
+ };
+ const int amdgpu_max_kms_ioctl = ARRAY_SIZE(amdgpu_ioctls_kms);
+
+diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_sem.c b/drivers/gpu/drm/amd/amdgpu/amdgpu_sem.c
+new file mode 100644
+index 0000000..9fae1ef
+--- /dev/null
++++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_sem.c
+@@ -0,0 +1,267 @@
++/*
++ * Copyright 2016 Advanced Micro Devices, Inc.
++ *
++ * Permission is hereby granted, free of charge, to any person obtaining a
++ * copy of this software and associated documentation files (the "Software"),
++ * to deal in the Software without restriction, including without limitation
++ * the rights to use, copy, modify, merge, publish, distribute, sublicense,
++ * and/or sell copies of the Software, and to permit persons to whom the
++ * Software is furnished to do so, subject to the following conditions:
++ *
++ * The above copyright notice and this permission notice shall be included in
++ * all copies or substantial portions of the Software.
++ *
++ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
++ * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
++ * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
++ * THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR
++ * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE,
++ * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
++ * OTHER DEALINGS IN THE SOFTWARE.
++ *
++ * Authors:
++ * Chunming Zhou <david1.zhou@amd.com>
++ */
++#include <linux/file.h>
++#include <linux/fs.h>
++#include <linux/kernel.h>
++#include <linux/poll.h>
++#include <linux/seq_file.h>
++#include <linux/export.h>
++#include <linux/sched.h>
++#include <linux/slab.h>
++#include <linux/uaccess.h>
++#include <linux/anon_inodes.h>
++#include "amdgpu_sem.h"
++#include "amdgpu.h"
++#include <drm/drmP.h>
++
++static int amdgpu_sem_cring_add(struct amdgpu_fpriv *fpriv,
++ struct drm_amdgpu_sem_in *in,
++ struct amdgpu_sem *sem);
++
++static const struct file_operations amdgpu_sem_fops;
++
++static struct amdgpu_sem *amdgpu_sem_alloc(struct dma_fence *fence)
++{
++ struct amdgpu_sem *sem;
++
++ sem = kzalloc(sizeof(struct amdgpu_sem), GFP_KERNEL);
++ if (!sem)
++ return NULL;
++
++ sem->file = anon_inode_getfile("sem_file",
++ &amdgpu_sem_fops,
++ sem, 0);
++ if (IS_ERR(sem->file))
++ goto err;
++
++ kref_init(&sem->kref);
++ INIT_LIST_HEAD(&sem->list);
++ /* fence should be get before passing here */
++ sem->fence = fence;
++
++ return sem;
++err:
++ kfree(sem);
++ return NULL;
++}
++
++static void amdgpu_sem_free(struct kref *kref)
++{
++ struct amdgpu_sem *sem = container_of(
++ kref, struct amdgpu_sem, kref);
++
++ dma_fence_put(sem->fence);
++ kfree(sem);
++}
++
++static int amdgpu_sem_release(struct inode *inode, struct file *file)
++{
++ struct amdgpu_sem *sem = file->private_data;
++
++ kref_put(&sem->kref, amdgpu_sem_free);
++ return 0;
++}
++
++static unsigned int amdgpu_sem_poll(struct file *file, poll_table *wait)
++{
++ return 0;
++}
++
++static long amdgpu_sem_file_ioctl(struct file *file, unsigned int cmd,
++ unsigned long arg)
++{
++ return 0;
++}
++
++static const struct file_operations amdgpu_sem_fops = {
++ .release = amdgpu_sem_release,
++ .poll = amdgpu_sem_poll,
++ .unlocked_ioctl = amdgpu_sem_file_ioctl,
++ .compat_ioctl = amdgpu_sem_file_ioctl,
++};
++
++static int amdgpu_sem_create(void)
++{
++ return get_unused_fd_flags(O_CLOEXEC);
++}
++
++static int amdgpu_sem_signal(int fd, struct dma_fence *fence)
++{
++ struct amdgpu_sem *sem;
++
++ sem = amdgpu_sem_alloc(fence);
++ if (!sem)
++ return -ENOMEM;
++ fd_install(fd, sem->file);
++
++ return 0;
++}
++
++static int amdgpu_sem_wait(int fd, struct amdgpu_fpriv *fpriv,
++ struct drm_amdgpu_sem_in *in)
++{
++ struct file *file = fget(fd);
++ struct amdgpu_sem *sem;
++ int r;
++
++ if (!file)
++ return -EINVAL;
++
++ sem = file->private_data;
++ if (!sem) {
++ r = -EINVAL;
++ goto err;
++ }
++ r = amdgpu_sem_cring_add(fpriv, in, sem);
++err:
++ fput(file);
++ return r;
++}
++
++static void amdgpu_sem_destroy(void)
++{
++ /* userspace should close fd when they try to destroy sem,
++ * closing fd will free semaphore object.
++ */
++}
++
++static struct dma_fence *amdgpu_sem_get_fence(struct amdgpu_fpriv *fpriv,
++ struct drm_amdgpu_sem_in *in)
++{
++ struct amdgpu_ring *out_ring;
++ struct amdgpu_ctx *ctx;
++ struct dma_fence *fence;
++ uint32_t ctx_id, ip_type, ip_instance, ring;
++ int r;
++
++ ctx_id = in->ctx_id;
++ ip_type = in->ip_type;
++ ip_instance = in->ip_instance;
++ ring = in->ring;
++ ctx = amdgpu_ctx_get(fpriv, ctx_id);
++ if (!ctx)
++ return NULL;
++ r = amdgpu_cs_get_ring(ctx->adev, ip_type, ip_instance, ring,
++ &out_ring);
++ if (r) {
++ amdgpu_ctx_put(ctx);
++ return NULL;
++ }
++ /* get the last fence of this entity */
++ fence = amdgpu_ctx_get_fence(ctx, out_ring,
++ in->seq ? in->seq :
++ ctx->rings[out_ring->idx].sequence - 1);
++ amdgpu_ctx_put(ctx);
++
++ return fence;
++}
++
++static int amdgpu_sem_cring_add(struct amdgpu_fpriv *fpriv,
++ struct drm_amdgpu_sem_in *in,
++ struct amdgpu_sem *sem)
++{
++ struct amdgpu_ring *out_ring;
++ struct amdgpu_ctx *ctx;
++ uint32_t ctx_id, ip_type, ip_instance, ring;
++ int r;
++
++ ctx_id = in->ctx_id;
++ ip_type = in->ip_type;
++ ip_instance = in->ip_instance;
++ ring = in->ring;
++ ctx = amdgpu_ctx_get(fpriv, ctx_id);
++ if (!ctx)
++ return -EINVAL;
++ r = amdgpu_cs_get_ring(ctx->adev, ip_type, ip_instance, ring,
++ &out_ring);
++ if (r)
++ goto err;
++ mutex_lock(&ctx->rings[out_ring->idx].sem_lock);
++ list_add(&sem->list, &ctx->rings[out_ring->idx].sem_list);
++ mutex_unlock(&ctx->rings[out_ring->idx].sem_lock);
++
++err:
++ amdgpu_ctx_put(ctx);
++ return r;
++}
++
++int amdgpu_sem_add_cs(struct amdgpu_ctx *ctx, struct amdgpu_ring *ring,
++ struct amdgpu_sync *sync)
++{
++ struct amdgpu_sem *sem, *tmp;
++ int r = 0;
++
++ if (list_empty(&ctx->rings[ring->idx].sem_list))
++ return 0;
++
++ mutex_lock(&ctx->rings[ring->idx].sem_lock);
++ list_for_each_entry_safe(sem, tmp, &ctx->rings[ring->idx].sem_list,
++ list) {
++ r = amdgpu_sync_fence(ctx->adev, sync, sem->fence);
++ dma_fence_put(sem->fence);
++ if (r)
++ goto err;
++ list_del(&sem->list);
++ kfree(sem);
++ }
++err:
++ mutex_unlock(&ctx->rings[ring->idx].sem_lock);
++ return r;
++}
++
++int amdgpu_sem_ioctl(struct drm_device *dev, void *data,
++ struct drm_file *filp)
++{
++ union drm_amdgpu_sem *args = data;
++ struct amdgpu_fpriv *fpriv = filp->driver_priv;
++ struct dma_fence *fence;
++ int r = 0;
++ int fd = args->in.fd;
++
++ switch (args->in.op) {
++ case AMDGPU_SEM_OP_CREATE_SEM:
++ args->out.fd = amdgpu_sem_create();
++ break;
++ case AMDGPU_SEM_OP_WAIT_SEM:
++ r = amdgpu_sem_wait(fd, fpriv, &args->in);
++ break;
++ case AMDGPU_SEM_OP_SIGNAL_SEM:
++ fence = amdgpu_sem_get_fence(fpriv, &args->in);
++ if (IS_ERR(fence)) {
++ r = PTR_ERR(fence);
++ return r;
++ }
++ r = amdgpu_sem_signal(fd, fence);
++ dma_fence_put(fence);
++ break;
++ case AMDGPU_SEM_OP_DESTROY_SEM:
++ amdgpu_sem_destroy();
++ break;
++ default:
++ return -EINVAL;
++ }
++
++ return r;
++}
+diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_sem.h b/drivers/gpu/drm/amd/amdgpu/amdgpu_sem.h
+new file mode 100644
+index 0000000..10ec58e
+--- /dev/null
++++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_sem.h
+@@ -0,0 +1,44 @@
++/*
++ * Copyright 2016 Advanced Micro Devices, Inc.
++ *
++ * Permission is hereby granted, free of charge, to any person obtaining a
++ * copy of this software and associated documentation files (the "Software"),
++ * to deal in the Software without restriction, including without limitation
++ * the rights to use, copy, modify, merge, publish, distribute, sublicense,
++ * and/or sell copies of the Software, and to permit persons to whom the
++ * Software is furnished to do so, subject to the following conditions:
++ *
++ * The above copyright notice and this permission notice shall be included in
++ * all copies or substantial portions of the Software.
++ *
++ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
++ * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
++ * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
++ * THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR
++ * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE,
++ * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
++ * OTHER DEALINGS IN THE SOFTWARE.
++ *
++ * Authors: Chunming Zhou <david1.zhou@amd.com>
++ *
++ */
++
++
++#ifndef _LINUX_AMDGPU_SEM_H
++#define _LINUX_AMDGPU_SEM_H
++
++#include <linux/types.h>
++#include <linux/kref.h>
++#include <linux/ktime.h>
++#include <linux/list.h>
++#include <linux/spinlock.h>
++#include <linux/dma-fence.h>
++
++struct amdgpu_sem {
++ struct file *file;
++ struct kref kref;
++ struct dam_fence *fence;
++ struct list_head list;
++};
++
++#endif /* _LINUX_AMDGPU_SEM_H */
+diff --git a/include/uapi/drm/amdgpu_drm.h b/include/uapi/drm/amdgpu_drm.h
+index 3d2454b..9e3e615 100644
+--- a/include/uapi/drm/amdgpu_drm.h
++++ b/include/uapi/drm/amdgpu_drm.h
+@@ -55,6 +55,7 @@ extern "C" {
+
+ /* hybrid specific ioctls */
+ #define DRM_AMDGPU_FREESYNC 0x5d
++#define DRM_AMDGPU_SEM 0x5b
+ #define DRM_AMDGPU_GEM_DGMA 0x5c
+ #define DRM_AMDGPU_GEM_FIND_BO 0x5f
+
+@@ -77,6 +78,7 @@ extern "C" {
+ #define DRM_IOCTL_AMDGPU_FREESYNC DRM_IOWR(DRM_COMMAND_BASE + DRM_AMDGPU_FREESYNC, struct drm_amdgpu_freesync)
+ #define DRM_IOCTL_AMDGPU_GEM_DGMA DRM_IOWR(DRM_COMMAND_BASE + DRM_AMDGPU_GEM_DGMA, struct drm_amdgpu_gem_dgma)
+ #define DRM_IOCTL_AMDGPU_GEM_FIND_BO DRM_IOWR(DRM_COMMAND_BASE + DRM_AMDGPU_GEM_FIND_BO, struct drm_amdgpu_gem_find_bo)
++#define DRM_IOCTL_AMDGPU_SEM DRM_IOWR(DRM_COMMAND_BASE + DRM_AMDGPU_SEM, union drm_amdgpu_sem)
+
+ #define AMDGPU_GEM_DOMAIN_CPU 0x1
+ #define AMDGPU_GEM_DOMAIN_GTT 0x2
+@@ -210,6 +212,27 @@ union drm_amdgpu_ctx {
+ union drm_amdgpu_ctx_out out;
+ };
+
++struct drm_amdgpu_sem_in {
++ /** AMDGPU_SEM_OP_* */
++ uint32_t op;
++ int32_t fd;
++ uint32_t ctx_id;
++ uint32_t ip_type;
++ uint32_t ip_instance;
++ uint32_t ring;
++ uint64_t seq;
++};
++
++union drm_amdgpu_sem_out {
++ int32_t fd;
++ uint32_t _pad;
++};
++
++union drm_amdgpu_sem {
++ struct drm_amdgpu_sem_in in;
++ union drm_amdgpu_sem_out out;
++};
++
+ /* vm ioctl */
+ #define AMDGPU_VM_OP_RESERVE_VMID 1
+ #define AMDGPU_VM_OP_UNRESERVE_VMID 2
+--
+2.7.4
+