aboutsummaryrefslogtreecommitdiffstats
path: root/meta-amdfalconx86/recipes-graphics/mesa/mesa/0008-winsys-amdgpu-add-a-new-winsys-for-the-new-kernel-dr.patch
diff options
context:
space:
mode:
Diffstat (limited to 'meta-amdfalconx86/recipes-graphics/mesa/mesa/0008-winsys-amdgpu-add-a-new-winsys-for-the-new-kernel-dr.patch')
-rw-r--r--meta-amdfalconx86/recipes-graphics/mesa/mesa/0008-winsys-amdgpu-add-a-new-winsys-for-the-new-kernel-dr.patch2396
1 files changed, 2396 insertions, 0 deletions
diff --git a/meta-amdfalconx86/recipes-graphics/mesa/mesa/0008-winsys-amdgpu-add-a-new-winsys-for-the-new-kernel-dr.patch b/meta-amdfalconx86/recipes-graphics/mesa/mesa/0008-winsys-amdgpu-add-a-new-winsys-for-the-new-kernel-dr.patch
new file mode 100644
index 00000000..88914a74
--- /dev/null
+++ b/meta-amdfalconx86/recipes-graphics/mesa/mesa/0008-winsys-amdgpu-add-a-new-winsys-for-the-new-kernel-dr.patch
@@ -0,0 +1,2396 @@
+From c0e94dfc8abc3ec25c0a6342f9872a9e71aa7864 Mon Sep 17 00:00:00 2001
+From: =?UTF-8?q?Marek=20Ol=C5=A1=C3=A1k?= <marek.olsak@amd.com>
+Date: Thu, 16 Apr 2015 22:43:23 +0200
+Subject: [PATCH 08/29] winsys/amdgpu: add a new winsys for the new kernel
+ driver
+
+Signed-off-by: Arindam Nath <arindam.nath@amd.com>
+---
+ configure.ac | 5 +
+ src/gallium/Makefile.am | 1 +
+ src/gallium/drivers/r300/Automake.inc | 6 +-
+ src/gallium/drivers/r600/Automake.inc | 6 +-
+ src/gallium/drivers/radeonsi/Automake.inc | 6 +-
+ src/gallium/targets/pipe-loader/Makefile.am | 12 +-
+ src/gallium/winsys/radeon/amdgpu/Android.mk | 40 ++
+ src/gallium/winsys/radeon/amdgpu/Makefile.am | 12 +
+ src/gallium/winsys/radeon/amdgpu/Makefile.sources | 8 +
+ src/gallium/winsys/radeon/amdgpu/amdgpu_bo.c | 643 ++++++++++++++++++++++
+ src/gallium/winsys/radeon/amdgpu/amdgpu_bo.h | 75 +++
+ src/gallium/winsys/radeon/amdgpu/amdgpu_cs.c | 578 +++++++++++++++++++
+ src/gallium/winsys/radeon/amdgpu/amdgpu_cs.h | 149 +++++
+ src/gallium/winsys/radeon/amdgpu/amdgpu_public.h | 14 +
+ src/gallium/winsys/radeon/amdgpu/amdgpu_winsys.c | 491 +++++++++++++++++
+ src/gallium/winsys/radeon/amdgpu/amdgpu_winsys.h | 80 +++
+ src/gallium/winsys/radeon/drm/radeon_drm_winsys.c | 8 +
+ src/gallium/winsys/radeon/radeon_winsys.h | 4 +
+ 18 files changed, 2129 insertions(+), 9 deletions(-)
+ create mode 100644 src/gallium/winsys/radeon/amdgpu/Android.mk
+ create mode 100644 src/gallium/winsys/radeon/amdgpu/Makefile.am
+ create mode 100644 src/gallium/winsys/radeon/amdgpu/Makefile.sources
+ create mode 100644 src/gallium/winsys/radeon/amdgpu/amdgpu_bo.c
+ create mode 100644 src/gallium/winsys/radeon/amdgpu/amdgpu_bo.h
+ create mode 100644 src/gallium/winsys/radeon/amdgpu/amdgpu_cs.c
+ create mode 100644 src/gallium/winsys/radeon/amdgpu/amdgpu_cs.h
+ create mode 100644 src/gallium/winsys/radeon/amdgpu/amdgpu_public.h
+ create mode 100644 src/gallium/winsys/radeon/amdgpu/amdgpu_winsys.c
+ create mode 100644 src/gallium/winsys/radeon/amdgpu/amdgpu_winsys.h
+
+diff --git a/configure.ac b/configure.ac
+index 095e23e..f22975f 100644
+--- a/configure.ac
++++ b/configure.ac
+@@ -68,6 +68,7 @@ AC_SUBST([OSMESA_VERSION])
+ dnl Versions for external dependencies
+ LIBDRM_REQUIRED=2.4.38
+ LIBDRM_RADEON_REQUIRED=2.4.56
++LIBDRM_AMDGPU_REQUIRED=2.4.60
+ LIBDRM_INTEL_REQUIRED=2.4.60
+ LIBDRM_NVVIEUX_REQUIRED=2.4.33
+ LIBDRM_NOUVEAU_REQUIRED="2.4.33 libdrm >= 2.4.41"
+@@ -2091,6 +2092,7 @@ if test -n "$with_gallium_drivers"; then
+ xr300)
+ HAVE_GALLIUM_R300=yes
+ PKG_CHECK_MODULES([RADEON], [libdrm_radeon >= $LIBDRM_RADEON_REQUIRED])
++ PKG_CHECK_MODULES([AMDGPU], [libdrm_amdgpu >= $LIBDRM_AMDGPU_REQUIRED])
+ gallium_require_drm "Gallium R300"
+ gallium_require_drm_loader
+ gallium_require_llvm "Gallium R300"
+@@ -2098,6 +2100,7 @@ if test -n "$with_gallium_drivers"; then
+ xr600)
+ HAVE_GALLIUM_R600=yes
+ PKG_CHECK_MODULES([RADEON], [libdrm_radeon >= $LIBDRM_RADEON_REQUIRED])
++ PKG_CHECK_MODULES([AMDGPU], [libdrm_amdgpu >= $LIBDRM_AMDGPU_REQUIRED])
+ gallium_require_drm "Gallium R600"
+ gallium_require_drm_loader
+ if test "x$enable_r600_llvm" = xyes -o "x$enable_opencl" = xyes; then
+@@ -2114,6 +2117,7 @@ if test -n "$with_gallium_drivers"; then
+ xradeonsi)
+ HAVE_GALLIUM_RADEONSI=yes
+ PKG_CHECK_MODULES([RADEON], [libdrm_radeon >= $LIBDRM_RADEON_REQUIRED])
++ PKG_CHECK_MODULES([AMDGPU], [libdrm_amdgpu >= $LIBDRM_AMDGPU_REQUIRED])
+ gallium_require_drm "radeonsi"
+ gallium_require_drm_loader
+ radeon_llvm_check "radeonsi"
+@@ -2384,6 +2388,7 @@ AC_CONFIG_FILES([Makefile
+ src/gallium/winsys/intel/drm/Makefile
+ src/gallium/winsys/nouveau/drm/Makefile
+ src/gallium/winsys/radeon/drm/Makefile
++ src/gallium/winsys/radeon/amdgpu/Makefile
+ src/gallium/winsys/svga/drm/Makefile
+ src/gallium/winsys/sw/dri/Makefile
+ src/gallium/winsys/sw/kms-dri/Makefile
+diff --git a/src/gallium/Makefile.am b/src/gallium/Makefile.am
+index ede6e21..fa526d4 100644
+--- a/src/gallium/Makefile.am
++++ b/src/gallium/Makefile.am
+@@ -63,6 +63,7 @@ endif
+ ## the radeon winsys - linked in by r300, r600 and radeonsi
+ if NEED_RADEON_DRM_WINSYS
+ SUBDIRS += winsys/radeon/drm
++SUBDIRS += winsys/radeon/amdgpu
+ endif
+
+ ## swrast/softpipe
+diff --git a/src/gallium/drivers/r300/Automake.inc b/src/gallium/drivers/r300/Automake.inc
+index 9334973..cfcd61c 100644
+--- a/src/gallium/drivers/r300/Automake.inc
++++ b/src/gallium/drivers/r300/Automake.inc
+@@ -5,9 +5,11 @@ TARGET_CPPFLAGS += -DGALLIUM_R300
+ TARGET_LIB_DEPS += \
+ $(top_builddir)/src/gallium/drivers/r300/libr300.la \
+ $(RADEON_LIBS) \
+- $(INTEL_LIBS)
++ $(LIBDRM_LIBS) \
++ $(AMDGPU_LIBS)
+
+ TARGET_RADEON_WINSYS = \
+- $(top_builddir)/src/gallium/winsys/radeon/drm/libradeonwinsys.la
++ $(top_builddir)/src/gallium/winsys/radeon/drm/libradeonwinsys.la \
++ $(top_builddir)/src/gallium/winsys/radeon/amdgpu/libamdgpuwinsys.la
+
+ endif
+diff --git a/src/gallium/drivers/r600/Automake.inc b/src/gallium/drivers/r600/Automake.inc
+index 914eea3..2bb34b0 100644
+--- a/src/gallium/drivers/r600/Automake.inc
++++ b/src/gallium/drivers/r600/Automake.inc
+@@ -5,10 +5,12 @@ TARGET_CPPFLAGS += -DGALLIUM_R600
+ TARGET_LIB_DEPS += \
+ $(top_builddir)/src/gallium/drivers/r600/libr600.la \
+ $(RADEON_LIBS) \
+- $(LIBDRM_LIBS)
++ $(LIBDRM_LIBS) \
++ $(AMDGPU_LIBS)
+
+ TARGET_RADEON_WINSYS = \
+- $(top_builddir)/src/gallium/winsys/radeon/drm/libradeonwinsys.la
++ $(top_builddir)/src/gallium/winsys/radeon/drm/libradeonwinsys.la \
++ $(top_builddir)/src/gallium/winsys/radeon/amdgpu/libamdgpuwinsys.la
+
+ TARGET_RADEON_COMMON = \
+ $(top_builddir)/src/gallium/drivers/radeon/libradeon.la
+diff --git a/src/gallium/drivers/radeonsi/Automake.inc b/src/gallium/drivers/radeonsi/Automake.inc
+index 8686fff..200a254 100644
+--- a/src/gallium/drivers/radeonsi/Automake.inc
++++ b/src/gallium/drivers/radeonsi/Automake.inc
+@@ -5,10 +5,12 @@ TARGET_CPPFLAGS += -DGALLIUM_RADEONSI
+ TARGET_LIB_DEPS += \
+ $(top_builddir)/src/gallium/drivers/radeonsi/libradeonsi.la \
+ $(RADEON_LIBS) \
+- $(LIBDRM_LIBS)
++ $(LIBDRM_LIBS) \
++ $(AMDGPU_LIBS)
+
+ TARGET_RADEON_WINSYS = \
+- $(top_builddir)/src/gallium/winsys/radeon/drm/libradeonwinsys.la
++ $(top_builddir)/src/gallium/winsys/radeon/drm/libradeonwinsys.la \
++ $(top_builddir)/src/gallium/winsys/radeon/amdgpu/libamdgpuwinsys.la
+
+ TARGET_RADEON_COMMON = \
+ $(top_builddir)/src/gallium/drivers/radeon/libradeon.la
+diff --git a/src/gallium/targets/pipe-loader/Makefile.am b/src/gallium/targets/pipe-loader/Makefile.am
+index 967cdb7..3527090 100644
+--- a/src/gallium/targets/pipe-loader/Makefile.am
++++ b/src/gallium/targets/pipe-loader/Makefile.am
+@@ -124,9 +124,11 @@ nodist_EXTRA_pipe_r300_la_SOURCES = dummy.cpp
+ pipe_r300_la_LIBADD = \
+ $(PIPE_LIBS) \
+ $(top_builddir)/src/gallium/winsys/radeon/drm/libradeonwinsys.la \
++ $(top_builddir)/src/gallium/winsys/radeon/amdgpu/libamdgpuwinsys.la \
+ $(top_builddir)/src/gallium/drivers/r300/libr300.la \
+ $(LIBDRM_LIBS) \
+- $(RADEON_LIBS)
++ $(RADEON_LIBS) \
++ $(AMDGPU_LIBS)
+
+ endif
+
+@@ -138,10 +140,12 @@ nodist_EXTRA_pipe_r600_la_SOURCES = dummy.cpp
+ pipe_r600_la_LIBADD = \
+ $(PIPE_LIBS) \
+ $(top_builddir)/src/gallium/winsys/radeon/drm/libradeonwinsys.la \
++ $(top_builddir)/src/gallium/winsys/radeon/amdgpu/libamdgpuwinsys.la \
+ $(top_builddir)/src/gallium/drivers/radeon/libradeon.la \
+ $(top_builddir)/src/gallium/drivers/r600/libr600.la \
+ $(LIBDRM_LIBS) \
+- $(RADEON_LIBS)
++ $(RADEON_LIBS) \
++ $(AMDGPU_LIBS)
+
+ endif
+
+@@ -153,10 +157,12 @@ nodist_EXTRA_pipe_radeonsi_la_SOURCES = dummy.cpp
+ pipe_radeonsi_la_LIBADD = \
+ $(PIPE_LIBS) \
+ $(top_builddir)/src/gallium/winsys/radeon/drm/libradeonwinsys.la \
++ $(top_builddir)/src/gallium/winsys/radeon/amdgpu/libamdgpuwinsys.la \
+ $(top_builddir)/src/gallium/drivers/radeon/libradeon.la \
+ $(top_builddir)/src/gallium/drivers/radeonsi/libradeonsi.la \
+ $(LIBDRM_LIBS) \
+- $(RADEON_LIBS)
++ $(RADEON_LIBS) \
++ $(AMDGPU_LIBS)
+
+ endif
+
+diff --git a/src/gallium/winsys/radeon/amdgpu/Android.mk b/src/gallium/winsys/radeon/amdgpu/Android.mk
+new file mode 100644
+index 0000000..a10312f
+--- /dev/null
++++ b/src/gallium/winsys/radeon/amdgpu/Android.mk
+@@ -0,0 +1,40 @@
++# Mesa 3-D graphics library
++#
++# Copyright (C) 2011 Chia-I Wu <olvaffe@gmail.com>
++# Copyright (C) 2011 LunarG Inc.
++#
++# Permission is hereby granted, free of charge, to any person obtaining a
++# copy of this software and associated documentation files (the "Software"),
++# to deal in the Software without restriction, including without limitation
++# the rights to use, copy, modify, merge, publish, distribute, sublicense,
++# and/or sell copies of the Software, and to permit persons to whom the
++# Software is furnished to do so, subject to the following conditions:
++#
++# The above copyright notice and this permission notice shall be included
++# in all copies or substantial portions of the Software.
++#
++# THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
++# IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
++# FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
++# THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
++# LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING
++# FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER
++# DEALINGS IN THE SOFTWARE.
++
++LOCAL_PATH := $(call my-dir)
++
++# get C_SOURCES
++include $(LOCAL_PATH)/Makefile.sources
++
++include $(CLEAR_VARS)
++
++LOCAL_SRC_FILES := $(C_SOURCES)
++
++LOCAL_C_INCLUDES := \
++ $(DRM_TOP) \
++ $(DRM_TOP)/include/drm
++
++LOCAL_MODULE := libmesa_winsys_amdgpu
++
++include $(GALLIUM_COMMON_MK)
++include $(BUILD_STATIC_LIBRARY)
+diff --git a/src/gallium/winsys/radeon/amdgpu/Makefile.am b/src/gallium/winsys/radeon/amdgpu/Makefile.am
+new file mode 100644
+index 0000000..80ecb75
+--- /dev/null
++++ b/src/gallium/winsys/radeon/amdgpu/Makefile.am
+@@ -0,0 +1,12 @@
++include Makefile.sources
++include $(top_srcdir)/src/gallium/Automake.inc
++
++AM_CFLAGS = \
++ $(GALLIUM_WINSYS_CFLAGS) \
++ $(AMDGPU_CFLAGS)
++
++AM_CXXFLAGS = $(AM_CFLAGS)
++
++noinst_LTLIBRARIES = libamdgpuwinsys.la
++
++libamdgpuwinsys_la_SOURCES = $(C_SOURCES)
+diff --git a/src/gallium/winsys/radeon/amdgpu/Makefile.sources b/src/gallium/winsys/radeon/amdgpu/Makefile.sources
+new file mode 100644
+index 0000000..0f55010
+--- /dev/null
++++ b/src/gallium/winsys/radeon/amdgpu/Makefile.sources
+@@ -0,0 +1,8 @@
++C_SOURCES := \
++ amdgpu_bo.c \
++ amdgpu_bo.h \
++ amdgpu_cs.c \
++ amdgpu_cs.h \
++ amdgpu_public.h \
++ amdgpu_winsys.c \
++ amdgpu_winsys.h
+diff --git a/src/gallium/winsys/radeon/amdgpu/amdgpu_bo.c b/src/gallium/winsys/radeon/amdgpu/amdgpu_bo.c
+new file mode 100644
+index 0000000..de9548e
+--- /dev/null
++++ b/src/gallium/winsys/radeon/amdgpu/amdgpu_bo.c
+@@ -0,0 +1,643 @@
++/*
++ * Copyright © 2011 Marek Olšák <maraeo@gmail.com>
++ * All Rights Reserved.
++ *
++ * Permission is hereby granted, free of charge, to any person obtaining
++ * a copy of this software and associated documentation files (the
++ * "Software"), to deal in the Software without restriction, including
++ * without limitation the rights to use, copy, modify, merge, publish,
++ * distribute, sub license, and/or sell copies of the Software, and to
++ * permit persons to whom the Software is furnished to do so, subject to
++ * the following conditions:
++ *
++ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND,
++ * EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES
++ * OF MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND
++ * NON-INFRINGEMENT. IN NO EVENT SHALL THE COPYRIGHT HOLDERS, AUTHORS
++ * AND/OR ITS SUPPLIERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
++ * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE,
++ * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE
++ * USE OR OTHER DEALINGS IN THE SOFTWARE.
++ *
++ * The above copyright notice and this permission notice (including the
++ * next paragraph) shall be included in all copies or substantial portions
++ * of the Software.
++ */
++
++#include "amdgpu_cs.h"
++
++#include "os/os_time.h"
++#include "state_tracker/drm_driver.h"
++#include <amdgpu_drm.h>
++#include <xf86drm.h>
++#include <stdio.h>
++
++static const struct pb_vtbl amdgpu_winsys_bo_vtbl;
++
++static INLINE struct amdgpu_winsys_bo *amdgpu_winsys_bo(struct pb_buffer *bo)
++{
++ assert(bo->vtbl == &amdgpu_winsys_bo_vtbl);
++ return (struct amdgpu_winsys_bo *)bo;
++}
++
++struct amdgpu_bomgr {
++ struct pb_manager base;
++ struct amdgpu_winsys *rws;
++};
++
++static struct amdgpu_winsys *get_winsys(struct pb_manager *mgr)
++{
++ return ((struct amdgpu_bomgr*)mgr)->rws;
++}
++
++static struct amdgpu_winsys_bo *get_amdgpu_winsys_bo(struct pb_buffer *_buf)
++{
++ struct amdgpu_winsys_bo *bo = NULL;
++
++ if (_buf->vtbl == &amdgpu_winsys_bo_vtbl) {
++ bo = amdgpu_winsys_bo(_buf);
++ } else {
++ struct pb_buffer *base_buf;
++ pb_size offset;
++ pb_get_base_buffer(_buf, &base_buf, &offset);
++
++ if (base_buf->vtbl == &amdgpu_winsys_bo_vtbl)
++ bo = amdgpu_winsys_bo(base_buf);
++ }
++
++ return bo;
++}
++
++static void amdgpu_bo_wait(struct pb_buffer *_buf, enum radeon_bo_usage usage)
++{
++ struct amdgpu_winsys_bo *bo = get_amdgpu_winsys_bo(_buf);
++ struct radeon_winsys *ws = &bo->rws->base;
++
++ while (p_atomic_read(&bo->num_active_ioctls)) {
++ sched_yield();
++ }
++
++ if (bo->fence) {
++ ws->fence_wait(ws, bo->fence, PIPE_TIMEOUT_INFINITE);
++ }
++}
++
++static boolean amdgpu_bo_is_busy(struct pb_buffer *_buf,
++ enum radeon_bo_usage usage)
++{
++ struct amdgpu_winsys_bo *bo = get_amdgpu_winsys_bo(_buf);
++ struct radeon_winsys *ws = &bo->rws->base;
++
++ if (p_atomic_read(&bo->num_active_ioctls)) {
++ return TRUE;
++ }
++
++ return bo->fence && !ws->fence_wait(ws, bo->fence, 0);
++}
++
++static enum radeon_bo_domain amdgpu_bo_get_initial_domain(
++ struct radeon_winsys_cs_handle *buf)
++{
++ return ((struct amdgpu_winsys_bo*)buf)->initial_domain;
++}
++
++static void amdgpu_bo_destroy(struct pb_buffer *_buf)
++{
++ struct amdgpu_winsys_bo *bo = amdgpu_winsys_bo(_buf);
++
++ amdgpu_bo_free(bo->bo);
++ amdgpu_fence_reference(&bo->fence, NULL);
++
++ if (bo->initial_domain & RADEON_DOMAIN_VRAM)
++ bo->rws->allocated_vram -= align(bo->base.size, 4096);
++ else if (bo->initial_domain & RADEON_DOMAIN_GTT)
++ bo->rws->allocated_gtt -= align(bo->base.size, 4096);
++ FREE(bo);
++}
++
++static void *amdgpu_bo_map(struct radeon_winsys_cs_handle *buf,
++ struct radeon_winsys_cs *rcs,
++ enum pipe_transfer_usage usage)
++{
++ struct amdgpu_winsys_bo *bo = (struct amdgpu_winsys_bo*)buf;
++ struct amdgpu_cs *cs = (struct amdgpu_cs*)rcs;
++ int r;
++ void *cpu = NULL;
++
++ /* If it's not unsynchronized bo_map, flush CS if needed and then wait. */
++ if (!(usage & PIPE_TRANSFER_UNSYNCHRONIZED)) {
++ /* DONTBLOCK doesn't make sense with UNSYNCHRONIZED. */
++ if (usage & PIPE_TRANSFER_DONTBLOCK) {
++ if (!(usage & PIPE_TRANSFER_WRITE)) {
++ /* Mapping for read.
++ *
++ * Since we are mapping for read, we don't need to wait
++ * if the GPU is using the buffer for read too
++ * (neither one is changing it).
++ *
++ * Only check whether the buffer is being used for write. */
++ if (cs && amdgpu_bo_is_referenced_by_cs_with_usage(cs, bo,
++ RADEON_USAGE_WRITE)) {
++ cs->flush_cs(cs->flush_data, RADEON_FLUSH_ASYNC, NULL);
++ return NULL;
++ }
++
++ if (amdgpu_bo_is_busy((struct pb_buffer*)bo,
++ RADEON_USAGE_WRITE)) {
++ return NULL;
++ }
++ } else {
++ if (cs && amdgpu_bo_is_referenced_by_cs(cs, bo)) {
++ cs->flush_cs(cs->flush_data, RADEON_FLUSH_ASYNC, NULL);
++ return NULL;
++ }
++
++ if (amdgpu_bo_is_busy((struct pb_buffer*)bo,
++ RADEON_USAGE_READWRITE)) {
++ return NULL;
++ }
++ }
++ } else {
++ uint64_t time = os_time_get_nano();
++
++ if (!(usage & PIPE_TRANSFER_WRITE)) {
++ /* Mapping for read.
++ *
++ * Since we are mapping for read, we don't need to wait
++ * if the GPU is using the buffer for read too
++ * (neither one is changing it).
++ *
++ * Only check whether the buffer is being used for write. */
++ if (cs && amdgpu_bo_is_referenced_by_cs_with_usage(cs, bo,
++ RADEON_USAGE_WRITE)) {
++ cs->flush_cs(cs->flush_data, 0, NULL);
++ }
++ amdgpu_bo_wait((struct pb_buffer*)bo,
++ RADEON_USAGE_WRITE);
++ } else {
++ /* Mapping for write. */
++ if (cs) {
++ if (amdgpu_bo_is_referenced_by_cs(cs, bo)) {
++ cs->flush_cs(cs->flush_data, 0, NULL);
++ } else {
++ /* Try to avoid busy-waiting in radeon_bo_wait. */
++ if (p_atomic_read(&bo->num_active_ioctls))
++ amdgpu_cs_sync_flush(rcs);
++ }
++ }
++
++ amdgpu_bo_wait((struct pb_buffer*)bo, RADEON_USAGE_READWRITE);
++ }
++
++ bo->rws->buffer_wait_time += os_time_get_nano() - time;
++ }
++ }
++
++ r = amdgpu_bo_cpu_map(bo->bo, &cpu);
++ return r ? NULL : cpu;
++}
++
++static void amdgpu_bo_unmap(struct radeon_winsys_cs_handle *buf)
++{
++ struct amdgpu_winsys_bo *bo = (struct amdgpu_winsys_bo*)buf;
++
++ amdgpu_bo_cpu_unmap(bo->bo);
++}
++
++static void amdgpu_bo_get_base_buffer(struct pb_buffer *buf,
++ struct pb_buffer **base_buf,
++ unsigned *offset)
++{
++ *base_buf = buf;
++ *offset = 0;
++}
++
++static enum pipe_error amdgpu_bo_validate(struct pb_buffer *_buf,
++ struct pb_validate *vl,
++ unsigned flags)
++{
++ /* Always pinned */
++ return PIPE_OK;
++}
++
++static void amdgpu_bo_fence(struct pb_buffer *buf,
++ struct pipe_fence_handle *fence)
++{
++}
++
++static const struct pb_vtbl amdgpu_winsys_bo_vtbl = {
++ amdgpu_bo_destroy,
++ NULL, /* never called */
++ NULL, /* never called */
++ amdgpu_bo_validate,
++ amdgpu_bo_fence,
++ amdgpu_bo_get_base_buffer,
++};
++
++static struct pb_buffer *amdgpu_bomgr_create_bo(struct pb_manager *_mgr,
++ pb_size size,
++ const struct pb_desc *desc)
++{
++ struct amdgpu_winsys *rws = get_winsys(_mgr);
++ struct amdgpu_bo_desc *rdesc = (struct amdgpu_bo_desc*)desc;
++ struct amdgpu_bo_alloc_request request = {0};
++ struct amdgpu_bo_alloc_result result = {0};
++ struct amdgpu_winsys_bo *bo;
++ int r;
++
++ assert(rdesc->initial_domain & RADEON_DOMAIN_VRAM_GTT);
++
++ request.alloc_size = size;
++ request.phys_alignment = desc->alignment;
++
++ if (rdesc->initial_domain & RADEON_DOMAIN_VRAM) {
++ request.preferred_heap |= AMDGPU_GEM_DOMAIN_VRAM;
++ if (rdesc->flags & RADEON_FLAG_CPU_ACCESS)
++ request.flags |= AMDGPU_GEM_CREATE_CPU_ACCESS_REQUIRED;
++ }
++ if (rdesc->initial_domain & RADEON_DOMAIN_GTT) {
++ request.preferred_heap |= AMDGPU_GEM_DOMAIN_GTT;
++ if (rdesc->flags & RADEON_FLAG_GTT_WC)
++ request.flags |= AMDGPU_GEM_CREATE_CPU_GTT_WC;
++ }
++
++ r = amdgpu_bo_alloc(rws->dev, &request, &result);
++ if (r) {
++ fprintf(stderr, "amdgpu: Failed to allocate a buffer:\n");
++ fprintf(stderr, "amdgpu: size : %d bytes\n", size);
++ fprintf(stderr, "amdgpu: alignment : %d bytes\n", desc->alignment);
++ fprintf(stderr, "amdgpu: domains : %d\n", rdesc->initial_domain);
++ return NULL;
++ }
++
++ bo = CALLOC_STRUCT(amdgpu_winsys_bo);
++ if (!bo)
++ return NULL;
++
++ pipe_reference_init(&bo->base.reference, 1);
++ bo->base.alignment = desc->alignment;
++ bo->base.usage = desc->usage;
++ bo->base.size = size;
++ bo->base.vtbl = &amdgpu_winsys_bo_vtbl;
++ bo->rws = rws;
++ bo->bo = result.buf_handle;
++ bo->va = result.virtual_mc_base_address;
++ bo->initial_domain = rdesc->initial_domain;
++
++ if (amdgpu_bo_export(bo->bo, amdgpu_bo_handle_type_kms, &bo->handle)) {
++ amdgpu_bo_free(bo->bo);
++ FREE(bo);
++ return NULL;
++ }
++
++ if (rdesc->initial_domain & RADEON_DOMAIN_VRAM)
++ rws->allocated_vram += align(size, 4096);
++ else if (rdesc->initial_domain & RADEON_DOMAIN_GTT)
++ rws->allocated_gtt += align(size, 4096);
++
++ return &bo->base;
++}
++
++static void amdgpu_bomgr_flush(struct pb_manager *mgr)
++{
++ /* NOP */
++}
++
++/* This is for the cache bufmgr. */
++static boolean amdgpu_bomgr_is_buffer_busy(struct pb_manager *_mgr,
++ struct pb_buffer *_buf)
++{
++ struct amdgpu_winsys_bo *bo = amdgpu_winsys_bo(_buf);
++
++ if (amdgpu_bo_is_referenced_by_any_cs(bo)) {
++ return TRUE;
++ }
++
++ if (amdgpu_bo_is_busy((struct pb_buffer*)bo, RADEON_USAGE_READWRITE)) {
++ return TRUE;
++ }
++
++ return FALSE;
++}
++
++static void amdgpu_bomgr_destroy(struct pb_manager *mgr)
++{
++ FREE(mgr);
++}
++
++struct pb_manager *amdgpu_bomgr_create(struct amdgpu_winsys *rws)
++{
++ struct amdgpu_bomgr *mgr;
++
++ mgr = CALLOC_STRUCT(amdgpu_bomgr);
++ if (!mgr)
++ return NULL;
++
++ mgr->base.destroy = amdgpu_bomgr_destroy;
++ mgr->base.create_buffer = amdgpu_bomgr_create_bo;
++ mgr->base.flush = amdgpu_bomgr_flush;
++ mgr->base.is_buffer_busy = amdgpu_bomgr_is_buffer_busy;
++
++ mgr->rws = rws;
++ return &mgr->base;
++}
++
++static unsigned eg_tile_split(unsigned tile_split)
++{
++ switch (tile_split) {
++ case 0: tile_split = 64; break;
++ case 1: tile_split = 128; break;
++ case 2: tile_split = 256; break;
++ case 3: tile_split = 512; break;
++ default:
++ case 4: tile_split = 1024; break;
++ case 5: tile_split = 2048; break;
++ case 6: tile_split = 4096; break;
++ }
++ return tile_split;
++}
++
++static unsigned eg_tile_split_rev(unsigned eg_tile_split)
++{
++ switch (eg_tile_split) {
++ case 64: return 0;
++ case 128: return 1;
++ case 256: return 2;
++ case 512: return 3;
++ default:
++ case 1024: return 4;
++ case 2048: return 5;
++ case 4096: return 6;
++ }
++}
++
++static void amdgpu_bo_get_tiling(struct pb_buffer *_buf,
++ enum radeon_bo_layout *microtiled,
++ enum radeon_bo_layout *macrotiled,
++ unsigned *bankw, unsigned *bankh,
++ unsigned *tile_split,
++ unsigned *stencil_tile_split,
++ unsigned *mtilea,
++ bool *scanout)
++{
++ struct amdgpu_winsys_bo *bo = get_amdgpu_winsys_bo(_buf);
++ struct amdgpu_bo_info info = {0};
++ uint32_t tiling_flags;
++ int r;
++
++ r = amdgpu_bo_query_info(bo->bo, &info);
++ if (r)
++ return;
++
++ tiling_flags = info.metadata.tiling_info;
++
++ *microtiled = RADEON_LAYOUT_LINEAR;
++ *macrotiled = RADEON_LAYOUT_LINEAR;
++ if (tiling_flags & AMDGPU_TILING_MICRO)
++ *microtiled = RADEON_LAYOUT_TILED;
++ else if (tiling_flags & AMDGPU_TILING_MICRO_SQUARE)
++ *microtiled = RADEON_LAYOUT_SQUARETILED;
++
++ if (tiling_flags & AMDGPU_TILING_MACRO)
++ *macrotiled = RADEON_LAYOUT_TILED;
++ if (bankw && tile_split && stencil_tile_split && mtilea && tile_split) {
++ *bankw = (tiling_flags >> AMDGPU_TILING_EG_BANKW_SHIFT) & AMDGPU_TILING_EG_BANKW_MASK;
++ *bankh = (tiling_flags >> AMDGPU_TILING_EG_BANKH_SHIFT) & AMDGPU_TILING_EG_BANKH_MASK;
++ *tile_split = (tiling_flags >> AMDGPU_TILING_EG_TILE_SPLIT_SHIFT) & AMDGPU_TILING_EG_TILE_SPLIT_MASK;
++ *stencil_tile_split = (tiling_flags >> AMDGPU_TILING_EG_STENCIL_TILE_SPLIT_SHIFT) & AMDGPU_TILING_EG_STENCIL_TILE_SPLIT_MASK;
++ *mtilea = (tiling_flags >> AMDGPU_TILING_EG_MACRO_TILE_ASPECT_SHIFT) & AMDGPU_TILING_EG_MACRO_TILE_ASPECT_MASK;
++ *tile_split = eg_tile_split(*tile_split);
++ }
++ if (scanout)
++ *scanout = !(tiling_flags & AMDGPU_TILING_R600_NO_SCANOUT);
++}
++
++static void amdgpu_bo_set_tiling(struct pb_buffer *_buf,
++ struct radeon_winsys_cs *rcs,
++ enum radeon_bo_layout microtiled,
++ enum radeon_bo_layout macrotiled,
++ unsigned bankw, unsigned bankh,
++ unsigned tile_split,
++ unsigned stencil_tile_split,
++ unsigned mtilea,
++ uint32_t pitch,
++ bool scanout)
++{
++ struct amdgpu_winsys_bo *bo = get_amdgpu_winsys_bo(_buf);
++ struct amdgpu_cs *cs = amdgpu_cs(rcs);
++ struct amdgpu_bo_metadata metadata = {0};
++ uint32_t tiling_flags = 0;
++
++
++ /* Tiling determines how DRM treats the buffer data.
++ * We must flush CS when changing it if the buffer is referenced. */
++ if (cs && amdgpu_bo_is_referenced_by_cs(cs, bo)) {
++ cs->flush_cs(cs->flush_data, 0, NULL);
++ }
++
++ while (p_atomic_read(&bo->num_active_ioctls)) {
++ sched_yield();
++ }
++
++ if (microtiled == RADEON_LAYOUT_TILED)
++ tiling_flags |= AMDGPU_TILING_MICRO;
++ else if (microtiled == RADEON_LAYOUT_SQUARETILED)
++ tiling_flags |= AMDGPU_TILING_MICRO_SQUARE;
++
++ if (macrotiled == RADEON_LAYOUT_TILED)
++ tiling_flags |= AMDGPU_TILING_MACRO;
++
++ tiling_flags |= (bankw & AMDGPU_TILING_EG_BANKW_MASK) <<
++ AMDGPU_TILING_EG_BANKW_SHIFT;
++ tiling_flags |= (bankh & AMDGPU_TILING_EG_BANKH_MASK) <<
++ AMDGPU_TILING_EG_BANKH_SHIFT;
++ if (tile_split) {
++ tiling_flags |= (eg_tile_split_rev(tile_split) &
++ AMDGPU_TILING_EG_TILE_SPLIT_MASK) <<
++ AMDGPU_TILING_EG_TILE_SPLIT_SHIFT;
++ }
++ tiling_flags |= (stencil_tile_split &
++ AMDGPU_TILING_EG_STENCIL_TILE_SPLIT_MASK) <<
++ AMDGPU_TILING_EG_STENCIL_TILE_SPLIT_SHIFT;
++ tiling_flags |= (mtilea & AMDGPU_TILING_EG_MACRO_TILE_ASPECT_MASK) <<
++ AMDGPU_TILING_EG_MACRO_TILE_ASPECT_SHIFT;
++
++ if (!scanout)
++ tiling_flags |= AMDGPU_TILING_R600_NO_SCANOUT;
++
++ metadata.tiling_info = tiling_flags;
++
++ amdgpu_bo_set_metadata(bo->bo, &metadata);
++}
++
++static struct radeon_winsys_cs_handle *amdgpu_get_cs_handle(struct pb_buffer *_buf)
++{
++ /* return a direct pointer to amdgpu_winsys_bo. */
++ return (struct radeon_winsys_cs_handle*)get_amdgpu_winsys_bo(_buf);
++}
++
++static struct pb_buffer *
++amdgpu_bo_create(struct radeon_winsys *rws,
++ unsigned size,
++ unsigned alignment,
++ boolean use_reusable_pool,
++ enum radeon_bo_domain domain,
++ enum radeon_bo_flag flags)
++{
++ struct amdgpu_winsys *ws = amdgpu_winsys(rws);
++ struct amdgpu_bo_desc desc;
++ struct pb_manager *provider;
++ struct pb_buffer *buffer;
++
++ memset(&desc, 0, sizeof(desc));
++ desc.base.alignment = alignment;
++
++ /* Only set one usage bit each for domains and flags, or the cache manager
++ * might consider different sets of domains / flags compatible
++ */
++ if (domain == RADEON_DOMAIN_VRAM_GTT)
++ desc.base.usage = 1 << 2;
++ else
++ desc.base.usage = domain >> 1;
++ assert(flags < sizeof(desc.base.usage) * 8 - 3);
++ desc.base.usage |= 1 << (flags + 3);
++
++ desc.initial_domain = domain;
++ desc.flags = flags;
++
++ /* Assign a buffer manager. */
++ if (use_reusable_pool)
++ provider = ws->cman;
++ else
++ provider = ws->kman;
++
++ buffer = provider->create_buffer(provider, size, &desc.base);
++ if (!buffer)
++ return NULL;
++
++ return (struct pb_buffer*)buffer;
++}
++
++static struct pb_buffer *amdgpu_bo_from_handle(struct radeon_winsys *rws,
++ struct winsys_handle *whandle,
++ unsigned *stride)
++{
++ struct amdgpu_winsys *ws = amdgpu_winsys(rws);
++ struct amdgpu_winsys_bo *bo;
++ enum amdgpu_bo_handle_type type;
++ struct amdgpu_bo_import_result result = {0};
++ struct amdgpu_bo_info info = {0};
++ enum radeon_bo_domain initial = 0;
++ int r;
++
++ switch (whandle->type) {
++ case DRM_API_HANDLE_TYPE_SHARED:
++ type = amdgpu_bo_handle_type_gem_flink_name;
++ break;
++ case DRM_API_HANDLE_TYPE_FD:
++ type = amdgpu_bo_handle_type_dma_buf_fd;
++ break;
++ default:
++ return NULL;
++ }
++
++ r = amdgpu_bo_import(ws->dev, type, whandle->handle, &result);
++ if (r)
++ return NULL;
++
++ /* Get initial domains. */
++ r = amdgpu_bo_query_info(result.buf_handle, &info);
++ if (r) {
++ amdgpu_bo_free(result.buf_handle);
++ return NULL;
++ }
++
++ if (info.preferred_heap & AMDGPU_GEM_DOMAIN_VRAM)
++ initial |= RADEON_DOMAIN_VRAM;
++ if (info.preferred_heap & AMDGPU_GEM_DOMAIN_GTT)
++ initial |= RADEON_DOMAIN_GTT;
++
++ /* Initialize the structure. */
++ bo = CALLOC_STRUCT(amdgpu_winsys_bo);
++ if (!bo) {
++ amdgpu_bo_free(result.buf_handle);
++ return NULL;
++ }
++
++ pipe_reference_init(&bo->base.reference, 1);
++ bo->base.alignment = info.phys_alignment;
++ bo->base.usage = PB_USAGE_GPU_WRITE | PB_USAGE_GPU_READ;
++ bo->bo = result.buf_handle;
++ bo->base.size = result.alloc_size;
++ bo->base.vtbl = &amdgpu_winsys_bo_vtbl;
++ bo->rws = ws;
++ bo->va = result.virtual_mc_base_address;
++ bo->initial_domain = initial;
++
++ if (amdgpu_bo_export(bo->bo, amdgpu_bo_handle_type_kms, &bo->handle)) {
++ amdgpu_bo_free(bo->bo);
++ FREE(bo);
++ return NULL;
++ }
++
++ if (stride)
++ *stride = whandle->stride;
++
++ if (bo->initial_domain & RADEON_DOMAIN_VRAM)
++ ws->allocated_vram += align(bo->base.size, 4096);
++ else if (bo->initial_domain & RADEON_DOMAIN_GTT)
++ ws->allocated_gtt += align(bo->base.size, 4096);
++
++ return &bo->base;
++}
++
++static boolean amdgpu_bo_get_handle(struct pb_buffer *buffer,
++ unsigned stride,
++ struct winsys_handle *whandle)
++{
++ struct amdgpu_winsys_bo *bo = get_amdgpu_winsys_bo(buffer);
++ enum amdgpu_bo_handle_type type;
++ int r;
++
++ switch (whandle->type) {
++ case DRM_API_HANDLE_TYPE_SHARED:
++ type = amdgpu_bo_handle_type_gem_flink_name;
++ break;
++ case DRM_API_HANDLE_TYPE_FD:
++ type = amdgpu_bo_handle_type_dma_buf_fd;
++ break;
++ case DRM_API_HANDLE_TYPE_KMS:
++ type = amdgpu_bo_handle_type_kms;
++ break;
++ default:
++ return FALSE;
++ }
++
++ r = amdgpu_bo_export(bo->bo, type, &whandle->handle);
++ if (r)
++ return FALSE;
++
++ whandle->stride = stride;
++ return TRUE;
++}
++
++static uint64_t amdgpu_bo_get_va(struct radeon_winsys_cs_handle *buf)
++{
++ return ((struct amdgpu_winsys_bo*)buf)->va;
++}
++
++void amdgpu_bomgr_init_functions(struct amdgpu_winsys *ws)
++{
++ ws->base.buffer_get_cs_handle = amdgpu_get_cs_handle;
++ ws->base.buffer_set_tiling = amdgpu_bo_set_tiling;
++ ws->base.buffer_get_tiling = amdgpu_bo_get_tiling;
++ ws->base.buffer_map = amdgpu_bo_map;
++ ws->base.buffer_unmap = amdgpu_bo_unmap;
++ ws->base.buffer_wait = amdgpu_bo_wait;
++ ws->base.buffer_is_busy = amdgpu_bo_is_busy;
++ ws->base.buffer_create = amdgpu_bo_create;
++ ws->base.buffer_from_handle = amdgpu_bo_from_handle;
++ ws->base.buffer_get_handle = amdgpu_bo_get_handle;
++ ws->base.buffer_get_virtual_address = amdgpu_bo_get_va;
++ ws->base.buffer_get_initial_domain = amdgpu_bo_get_initial_domain;
++}
+diff --git a/src/gallium/winsys/radeon/amdgpu/amdgpu_bo.h b/src/gallium/winsys/radeon/amdgpu/amdgpu_bo.h
+new file mode 100644
+index 0000000..ccf98b5
+--- /dev/null
++++ b/src/gallium/winsys/radeon/amdgpu/amdgpu_bo.h
+@@ -0,0 +1,75 @@
++/*
++ * Copyright © 2008 Jérôme Glisse
++ * Copyright © 2011 Marek Olšák <maraeo@gmail.com>
++ * All Rights Reserved.
++ *
++ * Permission is hereby granted, free of charge, to any person obtaining
++ * a copy of this software and associated documentation files (the
++ * "Software"), to deal in the Software without restriction, including
++ * without limitation the rights to use, copy, modify, merge, publish,
++ * distribute, sub license, and/or sell copies of the Software, and to
++ * permit persons to whom the Software is furnished to do so, subject to
++ * the following conditions:
++ *
++ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND,
++ * EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES
++ * OF MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND
++ * NON-INFRINGEMENT. IN NO EVENT SHALL THE COPYRIGHT HOLDERS, AUTHORS
++ * AND/OR ITS SUPPLIERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
++ * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE,
++ * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE
++ * USE OR OTHER DEALINGS IN THE SOFTWARE.
++ *
++ * The above copyright notice and this permission notice (including the
++ * next paragraph) shall be included in all copies or substantial portions
++ * of the Software.
++ */
++/*
++ * Authors:
++ * Jérôme Glisse <glisse@freedesktop.org>
++ * Marek Olšák <maraeo@gmail.com>
++ */
++#ifndef AMDGPU_DRM_BO_H
++#define AMDGPU_DRM_BO_H
++
++#include "amdgpu_winsys.h"
++#include "pipebuffer/pb_bufmgr.h"
++
++struct amdgpu_bo_desc {
++ struct pb_desc base;
++
++ enum radeon_bo_domain initial_domain;
++ unsigned flags;
++};
++
++struct amdgpu_winsys_bo {
++ struct pb_buffer base;
++
++ struct amdgpu_winsys *rws;
++
++ amdgpu_bo_handle bo;
++ uint32_t handle;
++ uint64_t va;
++ enum radeon_bo_domain initial_domain;
++
++ /* how many command streams is this bo referenced in? */
++ int num_cs_references;
++
++ /* how many command streams, which are being emitted in a separate
++ * thread, is this bo referenced in? */
++ int num_active_ioctls;
++
++ struct pipe_fence_handle *fence; /* for buffer_wait & buffer_is_busy */
++};
++
++struct pb_manager *amdgpu_bomgr_create(struct amdgpu_winsys *rws);
++void amdgpu_bomgr_init_functions(struct amdgpu_winsys *ws);
++
++static INLINE
++void amdgpu_winsys_bo_reference(struct amdgpu_winsys_bo **dst,
++ struct amdgpu_winsys_bo *src)
++{
++ pb_reference((struct pb_buffer**)dst, (struct pb_buffer*)src);
++}
++
++#endif
+diff --git a/src/gallium/winsys/radeon/amdgpu/amdgpu_cs.c b/src/gallium/winsys/radeon/amdgpu/amdgpu_cs.c
+new file mode 100644
+index 0000000..aee7ff3
+--- /dev/null
++++ b/src/gallium/winsys/radeon/amdgpu/amdgpu_cs.c
+@@ -0,0 +1,578 @@
++/*
++ * Copyright © 2008 Jérôme Glisse
++ * Copyright © 2010 Marek Olšák <maraeo@gmail.com>
++ * All Rights Reserved.
++ *
++ * Permission is hereby granted, free of charge, to any person obtaining
++ * a copy of this software and associated documentation files (the
++ * "Software"), to deal in the Software without restriction, including
++ * without limitation the rights to use, copy, modify, merge, publish,
++ * distribute, sub license, and/or sell copies of the Software, and to
++ * permit persons to whom the Software is furnished to do so, subject to
++ * the following conditions:
++ *
++ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND,
++ * EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES
++ * OF MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND
++ * NON-INFRINGEMENT. IN NO EVENT SHALL THE COPYRIGHT HOLDERS, AUTHORS
++ * AND/OR ITS SUPPLIERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
++ * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE,
++ * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE
++ * USE OR OTHER DEALINGS IN THE SOFTWARE.
++ *
++ * The above copyright notice and this permission notice (including the
++ * next paragraph) shall be included in all copies or substantial portions
++ * of the Software.
++ */
++/*
++ * Authors:
++ * Marek Olšák <maraeo@gmail.com>
++ */
++
++#include "amdgpu_cs.h"
++#include "os/os_time.h"
++#include <stdio.h>
++#include <amdgpu_drm.h>
++
++
++/* FENCES */
++
++static struct pipe_fence_handle *
++amdgpu_fence_create(unsigned ip, uint32_t instance)
++{
++ struct amdgpu_fence *fence = CALLOC_STRUCT(amdgpu_fence);
++
++ fence->reference.count = 1;
++ fence->ip_type = ip;
++ fence->ring = instance;
++ fence->submission_in_progress = true;
++ return (struct pipe_fence_handle *)fence;
++}
++
++static void amdgpu_fence_submitted(struct pipe_fence_handle *fence,
++ uint64_t fence_id)
++{
++ struct amdgpu_fence *rfence = (struct amdgpu_fence*)fence;
++
++ rfence->fence = fence_id;
++ rfence->submission_in_progress = false;
++}
++
++static void amdgpu_fence_signalled(struct pipe_fence_handle *fence)
++{
++ struct amdgpu_fence *rfence = (struct amdgpu_fence*)fence;
++
++ rfence->signalled = true;
++}
++
++static bool amdgpu_fence_wait(struct radeon_winsys *rws,
++ struct pipe_fence_handle *fence,
++ uint64_t timeout)
++{
++ struct amdgpu_winsys *ws = (struct amdgpu_winsys*)rws;
++ struct amdgpu_fence *rfence = (struct amdgpu_fence*)fence;
++ struct amdgpu_cs_query_fence query = {0};
++ uint32_t expired;
++ int r;
++
++ /* XXX Access to rfence->signalled is racy here. */
++ if (rfence->signalled)
++ return true;
++
++ /* The fence may not have a number assigned if its IB is being
++ * submitted in the other thread right now. Wait until the submission
++ * is done. */
++ if (rfence->submission_in_progress) {
++ if (!timeout) {
++ return FALSE;
++ } else if (timeout == PIPE_TIMEOUT_INFINITE) {
++ while (rfence->submission_in_progress)
++ sched_yield();
++ } else {
++ int64_t start_time = os_time_get_nano();
++ int64_t elapsed_time = 0;
++
++ while (rfence->submission_in_progress) {
++ elapsed_time = os_time_get_nano() - start_time;
++ if (elapsed_time >= timeout) {
++ return FALSE;
++ }
++ sched_yield();
++ }
++ timeout -= elapsed_time;
++ }
++ }
++
++ /* Now use the libdrm query. */
++ query.timeout_ns = timeout;
++ query.fence = rfence->fence;
++ query.context = ws->ctx;
++ query.ip_type = rfence->ip_type;
++ query.ip_instance = 0;
++ query.ring = rfence->ring;
++
++ r = amdgpu_cs_query_fence_status(ws->dev, &query, &expired);
++ if (r) {
++ fprintf(stderr, "amdgpu: amdgpu_cs_query_fence_status failed.\n");
++ return FALSE;
++ }
++
++ rfence->signalled = expired != 0;
++ return rfence->signalled;
++}
++
++/* COMMAND SUBMISSION */
++
++static bool amdgpu_get_new_ib(struct amdgpu_cs *cs)
++{
++ struct amdgpu_cs_context *cur_cs = cs->csc;
++ struct amdgpu_winsys *ws = cs->ws;
++ struct amdgpu_cs_ib_alloc_result ib;
++ int r;
++
++ r = amdgpu_cs_alloc_ib(ws->dev, ws->ctx, amdgpu_cs_ib_size_64K, &ib);
++ if (r)
++ return false;
++
++ cs->base.buf = ib.cpu;
++ cs->base.cdw = 0;
++
++ cur_cs->ib.ib_handle = ib.handle;
++ return true;
++}
++
++static boolean amdgpu_init_cs_context(struct amdgpu_cs_context *csc)
++{
++ int i;
++
++ csc->request.number_of_ibs = 1;
++ csc->request.ibs = &csc->ib;
++
++ csc->max_num_buffers = 512;
++ csc->buffers = (struct amdgpu_cs_buffer*)
++ CALLOC(1, csc->max_num_buffers * sizeof(struct amdgpu_cs_buffer));
++ if (!csc->buffers) {
++ return FALSE;
++ }
++
++ csc->handles = CALLOC(1, csc->max_num_buffers * sizeof(amdgpu_bo_handle));
++ if (!csc->handles) {
++ FREE(csc->buffers);
++ return FALSE;
++ }
++
++ csc->flags = CALLOC(1, csc->max_num_buffers);
++ if (!csc->flags) {
++ FREE(csc->handles);
++ FREE(csc->buffers);
++ return FALSE;
++ }
++
++ for (i = 0; i < Elements(csc->buffer_indices_hashlist); i++) {
++ csc->buffer_indices_hashlist[i] = -1;
++ }
++ return TRUE;
++}
++
++static void amdgpu_cs_context_cleanup(struct amdgpu_cs_context *csc)
++{
++ unsigned i;
++
++ for (i = 0; i < csc->num_buffers; i++) {
++ p_atomic_dec(&csc->buffers[i].bo->num_cs_references);
++ amdgpu_winsys_bo_reference(&csc->buffers[i].bo, NULL);
++ csc->handles[i] = NULL;
++ csc->flags[i] = 0;
++ }
++
++ csc->num_buffers = 0;
++ csc->used_gart = 0;
++ csc->used_vram = 0;
++ amdgpu_fence_reference(&csc->fence, NULL);
++
++ for (i = 0; i < Elements(csc->buffer_indices_hashlist); i++) {
++ csc->buffer_indices_hashlist[i] = -1;
++ }
++}
++
++static void amdgpu_destroy_cs_context(struct amdgpu_cs_context *csc)
++{
++ amdgpu_cs_context_cleanup(csc);
++ FREE(csc->flags);
++ FREE(csc->buffers);
++ FREE(csc->handles);
++}
++
++
++static struct radeon_winsys_cs *
++amdgpu_cs_create(struct radeon_winsys *rws,
++ enum ring_type ring_type,
++ void (*flush)(void *ctx, unsigned flags,
++ struct pipe_fence_handle **fence),
++ void *flush_ctx,
++ struct radeon_winsys_cs_handle *trace_buf)
++{
++ struct amdgpu_winsys *ws = amdgpu_winsys(rws);
++ struct amdgpu_cs *cs;
++
++ cs = CALLOC_STRUCT(amdgpu_cs);
++ if (!cs) {
++ return NULL;
++ }
++
++ pipe_semaphore_init(&cs->flush_completed, 1);
++
++ cs->ws = ws;
++ cs->flush_cs = flush;
++ cs->flush_data = flush_ctx;
++
++ if (!amdgpu_init_cs_context(&cs->csc1)) {
++ FREE(cs);
++ return NULL;
++ }
++ if (!amdgpu_init_cs_context(&cs->csc2)) {
++ amdgpu_destroy_cs_context(&cs->csc1);
++ FREE(cs);
++ return NULL;
++ }
++
++ /* Set the first command buffer as current. */
++ cs->csc = &cs->csc1;
++ cs->cst = &cs->csc2;
++ cs->base.ring_type = ring_type;
++
++ if (!amdgpu_get_new_ib(cs)) {
++ amdgpu_destroy_cs_context(&cs->csc2);
++ amdgpu_destroy_cs_context(&cs->csc1);
++ FREE(cs);
++ return NULL;
++ }
++
++ p_atomic_inc(&ws->num_cs);
++ return &cs->base;
++}
++
++#define OUT_CS(cs, value) (cs)->buf[(cs)->cdw++] = (value)
++
++int amdgpu_get_reloc(struct amdgpu_cs_context *csc, struct amdgpu_winsys_bo *bo)
++{
++ unsigned hash = bo->handle & (Elements(csc->buffer_indices_hashlist)-1);
++ int i = csc->buffer_indices_hashlist[hash];
++
++ /* not found or found */
++ if (i == -1 || csc->buffers[i].bo == bo)
++ return i;
++
++ /* Hash collision, look for the BO in the list of relocs linearly. */
++ for (i = csc->num_buffers - 1; i >= 0; i--) {
++ if (csc->buffers[i].bo == bo) {
++ /* Put this reloc in the hash list.
++ * This will prevent additional hash collisions if there are
++ * several consecutive get_reloc calls for the same buffer.
++ *
++ * Example: Assuming buffers A,B,C collide in the hash list,
++ * the following sequence of relocs:
++ * AAAAAAAAAAABBBBBBBBBBBBBBCCCCCCCC
++ * will collide here: ^ and here: ^,
++ * meaning that we should get very few collisions in the end. */
++ csc->buffer_indices_hashlist[hash] = i;
++ return i;
++ }
++ }
++ return -1;
++}
++
++static unsigned amdgpu_add_reloc(struct amdgpu_cs *cs,
++ struct amdgpu_winsys_bo *bo,
++ enum radeon_bo_usage usage,
++ enum radeon_bo_domain domains,
++ unsigned priority,
++ enum radeon_bo_domain *added_domains)
++{
++ struct amdgpu_cs_context *csc = cs->csc;
++ struct amdgpu_cs_buffer *reloc;
++ unsigned hash = bo->handle & (Elements(csc->buffer_indices_hashlist)-1);
++ int i = -1;
++
++ priority = MIN2(priority, 15);
++ *added_domains = 0;
++
++ i = amdgpu_get_reloc(csc, bo);
++
++ if (i >= 0) {
++ reloc = &csc->buffers[i];
++ reloc->usage |= usage;
++ *added_domains = domains & ~reloc->domains;
++ reloc->domains |= domains;
++ csc->flags[i] = MAX2(csc->flags[i], priority);
++ return i;
++ }
++
++ /* New relocation, check if the backing array is large enough. */
++ if (csc->num_buffers >= csc->max_num_buffers) {
++ uint32_t size;
++ csc->max_num_buffers += 10;
++
++ size = csc->max_num_buffers * sizeof(struct amdgpu_cs_buffer);
++ csc->buffers = realloc(csc->buffers, size);
++
++ size = csc->max_num_buffers * sizeof(amdgpu_bo_handle);
++ csc->handles = realloc(csc->handles, size);
++
++ csc->flags = realloc(csc->flags, csc->max_num_buffers);
++ }
++
++ /* Initialize the new relocation. */
++ csc->buffers[csc->num_buffers].bo = NULL;
++ amdgpu_winsys_bo_reference(&csc->buffers[csc->num_buffers].bo, bo);
++ csc->handles[csc->num_buffers] = bo->bo;
++ csc->flags[csc->num_buffers] = priority;
++ p_atomic_inc(&bo->num_cs_references);
++ reloc = &csc->buffers[csc->num_buffers];
++ reloc->bo = bo;
++ reloc->usage = usage;
++ reloc->domains = domains;
++
++ csc->buffer_indices_hashlist[hash] = csc->num_buffers;
++
++ *added_domains = domains;
++ return csc->num_buffers++;
++}
++
++static unsigned amdgpu_cs_add_reloc(struct radeon_winsys_cs *rcs,
++ struct radeon_winsys_cs_handle *buf,
++ enum radeon_bo_usage usage,
++ enum radeon_bo_domain domains,
++ enum radeon_bo_priority priority)
++{
++ struct amdgpu_cs *cs = amdgpu_cs(rcs);
++ struct amdgpu_winsys_bo *bo = (struct amdgpu_winsys_bo*)buf;
++ enum radeon_bo_domain added_domains;
++ unsigned index = amdgpu_add_reloc(cs, bo, usage, domains, priority, &added_domains);
++
++ if (added_domains & RADEON_DOMAIN_GTT)
++ cs->csc->used_gart += bo->base.size;
++ if (added_domains & RADEON_DOMAIN_VRAM)
++ cs->csc->used_vram += bo->base.size;
++
++ return index;
++}
++
++static int amdgpu_cs_get_reloc(struct radeon_winsys_cs *rcs,
++ struct radeon_winsys_cs_handle *buf)
++{
++ struct amdgpu_cs *cs = amdgpu_cs(rcs);
++
++ return amdgpu_get_reloc(cs->csc, (struct amdgpu_winsys_bo*)buf);
++}
++
++static boolean amdgpu_cs_validate(struct radeon_winsys_cs *rcs)
++{
++ return TRUE;
++}
++
++static boolean amdgpu_cs_memory_below_limit(struct radeon_winsys_cs *rcs, uint64_t vram, uint64_t gtt)
++{
++ struct amdgpu_cs *cs = amdgpu_cs(rcs);
++ boolean status =
++ (cs->csc->used_gart + gtt) < cs->ws->info.gart_size * 0.7 &&
++ (cs->csc->used_vram + vram) < cs->ws->info.vram_size * 0.7;
++
++ return status;
++}
++
++void amdgpu_cs_emit_ioctl_oneshot(struct amdgpu_cs *cs, struct amdgpu_cs_context *csc)
++{
++ struct amdgpu_winsys *ws = cs->ws;
++ int i, r;
++ uint64_t fence;
++
++ r = amdgpu_cs_submit(ws->dev, ws->ctx, 0, &csc->request, 1, &fence);
++ if (r) {
++ fprintf(stderr, "amdgpu: The CS has been rejected, "
++ "see dmesg for more information.\n");
++
++ amdgpu_fence_signalled(csc->fence);
++ } else {
++ /* Success. */
++ amdgpu_fence_submitted(csc->fence, fence);
++
++ for (i = 0; i < csc->num_buffers; i++) {
++ amdgpu_fence_reference(&csc->buffers[i].bo->fence, csc->fence);
++ }
++ }
++
++ /* Cleanup. */
++ for (i = 0; i < csc->num_buffers; i++) {
++ p_atomic_dec(&csc->buffers[i].bo->num_active_ioctls);
++ }
++ amdgpu_cs_context_cleanup(csc);
++}
++
++/*
++ * Make sure previous submission of this cs are completed
++ */
++void amdgpu_cs_sync_flush(struct radeon_winsys_cs *rcs)
++{
++ struct amdgpu_cs *cs = amdgpu_cs(rcs);
++
++ /* Wait for any pending ioctl to complete. */
++ if (cs->ws->thread) {
++ pipe_semaphore_wait(&cs->flush_completed);
++ pipe_semaphore_signal(&cs->flush_completed);
++ }
++}
++
++DEBUG_GET_ONCE_BOOL_OPTION(noop, "RADEON_NOOP", FALSE)
++
++static void amdgpu_cs_flush(struct radeon_winsys_cs *rcs,
++ unsigned flags,
++ struct pipe_fence_handle **fence,
++ uint32_t cs_trace_id)
++{
++ struct amdgpu_cs *cs = amdgpu_cs(rcs);
++ struct amdgpu_cs_context *tmp;
++
++ switch (cs->base.ring_type) {
++ case RING_DMA:
++ /* pad DMA ring to 8 DWs */
++ if (cs->ws->info.chip_class <= SI) {
++ while (rcs->cdw & 7)
++ OUT_CS(&cs->base, 0xf0000000); /* NOP packet */
++ } else {
++ while (rcs->cdw & 7)
++ OUT_CS(&cs->base, 0x00000000); /* NOP packet */
++ }
++ break;
++ case RING_GFX:
++ /* pad DMA ring to 8 DWs to meet CP fetch alignment requirements
++ * r6xx, requires at least 4 dw alignment to avoid a hw bug.
++ */
++ if (cs->ws->info.chip_class <= SI) {
++ while (rcs->cdw & 7)
++ OUT_CS(&cs->base, 0x80000000); /* type2 nop packet */
++ } else {
++ while (rcs->cdw & 7)
++ OUT_CS(&cs->base, 0xffff1000); /* type3 nop packet */
++ }
++ break;
++ case RING_UVD:
++ while (rcs->cdw & 15)
++ OUT_CS(&cs->base, 0x80000000); /* type2 nop packet */
++ break;
++ default:
++ break;
++ }
++
++ if (rcs->cdw > RADEON_MAX_CMDBUF_DWORDS) {
++ fprintf(stderr, "amdgpu: command stream overflowed\n");
++ }
++
++ amdgpu_cs_sync_flush(rcs);
++
++ /* Swap command streams. */
++ tmp = cs->csc;
++ cs->csc = cs->cst;
++ cs->cst = tmp;
++
++ /* If the CS is not empty or overflowed, emit it in a separate thread. */
++ if (cs->base.cdw && cs->base.cdw <= RADEON_MAX_CMDBUF_DWORDS && !debug_get_option_noop()) {
++ unsigned i, num_buffers = cs->cst->num_buffers;
++
++ cs->cst->ib.size = cs->base.cdw;
++ cs->cst->request.number_of_resources = cs->cst->num_buffers;
++ cs->cst->request.resources = cs->cst->handles;
++ cs->cst->request.resource_flags = cs->cst->flags;
++
++ for (i = 0; i < num_buffers; i++) {
++ /* Update the number of active asynchronous CS ioctls for the buffer. */
++ p_atomic_inc(&cs->cst->buffers[i].bo->num_active_ioctls);
++ }
++
++ switch (cs->base.ring_type) {
++ case RING_DMA:
++ cs->cst->request.ip_type = AMDGPU_HW_IP_DMA;
++ break;
++
++ case RING_UVD:
++ cs->cst->request.ip_type = AMDGPU_HW_IP_UVD;
++ break;
++
++ case RING_VCE:
++ cs->cst->request.ip_type = AMDGPU_HW_IP_VCE;
++ break;
++
++ default:
++ case RING_GFX:
++ if (flags & RADEON_FLUSH_COMPUTE) {
++ cs->cst->request.ip_type = AMDGPU_HW_IP_COMPUTE;
++ } else {
++ cs->cst->request.ip_type = AMDGPU_HW_IP_GFX;
++ }
++ break;
++ }
++
++ amdgpu_fence_reference(&cs->cst->fence, NULL);
++ cs->cst->fence = amdgpu_fence_create(cs->cst->request.ip_type,
++ cs->cst->request.ring);
++
++ if (fence)
++ amdgpu_fence_reference(fence, cs->cst->fence);
++
++ if (cs->ws->thread) {
++ pipe_semaphore_wait(&cs->flush_completed);
++ amdgpu_ws_queue_cs(cs->ws, cs);
++ if (!(flags & RADEON_FLUSH_ASYNC))
++ amdgpu_cs_sync_flush(rcs);
++ } else {
++ amdgpu_cs_emit_ioctl_oneshot(cs, cs->cst);
++ }
++ } else {
++ amdgpu_cs_context_cleanup(cs->cst);
++ }
++
++ amdgpu_get_new_ib(cs);
++
++ cs->ws->num_cs_flushes++;
++}
++
++static void amdgpu_cs_destroy(struct radeon_winsys_cs *rcs)
++{
++ struct amdgpu_cs *cs = amdgpu_cs(rcs);
++
++ amdgpu_cs_sync_flush(rcs);
++ pipe_semaphore_destroy(&cs->flush_completed);
++ amdgpu_cs_context_cleanup(&cs->csc1);
++ amdgpu_cs_context_cleanup(&cs->csc2);
++ p_atomic_dec(&cs->ws->num_cs);
++ amdgpu_cs_free_ib(cs->ws->dev, cs->ws->ctx,
++ cs->csc->ib.ib_handle);
++ amdgpu_destroy_cs_context(&cs->csc1);
++ amdgpu_destroy_cs_context(&cs->csc2);
++ FREE(cs);
++}
++
++static boolean amdgpu_bo_is_referenced(struct radeon_winsys_cs *rcs,
++ struct radeon_winsys_cs_handle *_buf,
++ enum radeon_bo_usage usage)
++{
++ struct amdgpu_cs *cs = amdgpu_cs(rcs);
++ struct amdgpu_winsys_bo *bo = (struct amdgpu_winsys_bo*)_buf;
++
++ return amdgpu_bo_is_referenced_by_cs_with_usage(cs, bo, usage);
++}
++
++void amdgpu_cs_init_functions(struct amdgpu_winsys *ws)
++{
++ ws->base.cs_create = amdgpu_cs_create;
++ ws->base.cs_destroy = amdgpu_cs_destroy;
++ ws->base.cs_add_reloc = amdgpu_cs_add_reloc;
++ ws->base.cs_get_reloc = amdgpu_cs_get_reloc;
++ ws->base.cs_validate = amdgpu_cs_validate;
++ ws->base.cs_memory_below_limit = amdgpu_cs_memory_below_limit;
++ ws->base.cs_flush = amdgpu_cs_flush;
++ ws->base.cs_is_buffer_referenced = amdgpu_bo_is_referenced;
++ ws->base.cs_sync_flush = amdgpu_cs_sync_flush;
++ ws->base.fence_wait = amdgpu_fence_wait;
++ ws->base.fence_reference = amdgpu_fence_reference;
++}
+diff --git a/src/gallium/winsys/radeon/amdgpu/amdgpu_cs.h b/src/gallium/winsys/radeon/amdgpu/amdgpu_cs.h
+new file mode 100644
+index 0000000..36a9aad
+--- /dev/null
++++ b/src/gallium/winsys/radeon/amdgpu/amdgpu_cs.h
+@@ -0,0 +1,149 @@
++/*
++ * Copyright © 2011 Marek Olšák <maraeo@gmail.com>
++ * All Rights Reserved.
++ *
++ * Permission is hereby granted, free of charge, to any person obtaining
++ * a copy of this software and associated documentation files (the
++ * "Software"), to deal in the Software without restriction, including
++ * without limitation the rights to use, copy, modify, merge, publish,
++ * distribute, sub license, and/or sell copies of the Software, and to
++ * permit persons to whom the Software is furnished to do so, subject to
++ * the following conditions:
++ *
++ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND,
++ * EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES
++ * OF MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND
++ * NON-INFRINGEMENT. IN NO EVENT SHALL THE COPYRIGHT HOLDERS, AUTHORS
++ * AND/OR ITS SUPPLIERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
++ * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE,
++ * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE
++ * USE OR OTHER DEALINGS IN THE SOFTWARE.
++ *
++ * The above copyright notice and this permission notice (including the
++ * next paragraph) shall be included in all copies or substantial portions
++ * of the Software.
++ */
++
++#ifndef AMDGPU_DRM_CS_H
++#define AMDGPU_DRM_CS_H
++
++#include "amdgpu_bo.h"
++#include "util/u_memory.h"
++
++struct amdgpu_cs_buffer {
++ struct amdgpu_winsys_bo *bo;
++ enum radeon_bo_usage usage;
++ enum radeon_bo_domain domains;
++};
++
++struct amdgpu_cs_context {
++ struct amdgpu_cs_request request;
++ struct amdgpu_cs_ib_info ib;
++
++ /* Relocs. */
++ unsigned max_num_buffers;
++ unsigned num_buffers;
++ amdgpu_bo_handle *handles;
++ uint8_t *flags;
++ struct amdgpu_cs_buffer *buffers;
++
++ int buffer_indices_hashlist[512];
++
++ unsigned used_vram;
++ unsigned used_gart;
++
++ struct pipe_fence_handle *fence;
++};
++
++struct amdgpu_cs {
++ struct radeon_winsys_cs base;
++
++ /* We flip between these two CS. While one is being consumed
++ * by the kernel in another thread, the other one is being filled
++ * by the pipe driver. */
++ struct amdgpu_cs_context csc1;
++ struct amdgpu_cs_context csc2;
++ /* The currently-used CS. */
++ struct amdgpu_cs_context *csc;
++ /* The CS being currently-owned by the other thread. */
++ struct amdgpu_cs_context *cst;
++
++ /* The winsys. */
++ struct amdgpu_winsys *ws;
++
++ /* Flush CS. */
++ void (*flush_cs)(void *ctx, unsigned flags, struct pipe_fence_handle **fence);
++ void *flush_data;
++
++ pipe_semaphore flush_completed;
++};
++
++struct amdgpu_fence {
++ struct pipe_reference reference;
++
++ uint64_t fence; /* fence ID */
++ unsigned ip_type; /* which hw ip block the fence belongs to */
++ uint32_t ring; /* ring index of the hw ip block */
++
++ /* If the fence is unknown due to an IB still being submitted
++ * in the other thread. */
++ bool submission_in_progress;
++ bool signalled;
++};
++
++static INLINE void amdgpu_fence_reference(struct pipe_fence_handle **dst,
++ struct pipe_fence_handle *src)
++{
++ struct amdgpu_fence **rdst = (struct amdgpu_fence **)dst;
++ struct amdgpu_fence *rsrc = (struct amdgpu_fence *)src;
++
++ if (pipe_reference(&(*rdst)->reference, &rsrc->reference))
++ FREE(*rdst);
++ *rdst = rsrc;
++}
++
++int amdgpu_get_reloc(struct amdgpu_cs_context *csc, struct amdgpu_winsys_bo *bo);
++
++static INLINE struct amdgpu_cs *
++amdgpu_cs(struct radeon_winsys_cs *base)
++{
++ return (struct amdgpu_cs*)base;
++}
++
++static INLINE boolean
++amdgpu_bo_is_referenced_by_cs(struct amdgpu_cs *cs,
++ struct amdgpu_winsys_bo *bo)
++{
++ int num_refs = bo->num_cs_references;
++ return num_refs == bo->rws->num_cs ||
++ (num_refs && amdgpu_get_reloc(cs->csc, bo) != -1);
++}
++
++static INLINE boolean
++amdgpu_bo_is_referenced_by_cs_with_usage(struct amdgpu_cs *cs,
++ struct amdgpu_winsys_bo *bo,
++ enum radeon_bo_usage usage)
++{
++ int index;
++
++ if (!bo->num_cs_references)
++ return FALSE;
++
++ index = amdgpu_get_reloc(cs->csc, bo);
++ if (index == -1)
++ return FALSE;
++
++ return (cs->csc->buffers[index].usage & usage) != 0;
++}
++
++static INLINE boolean
++amdgpu_bo_is_referenced_by_any_cs(struct amdgpu_winsys_bo *bo)
++{
++ return bo->num_cs_references != 0;
++}
++
++void amdgpu_cs_sync_flush(struct radeon_winsys_cs *rcs);
++void amdgpu_cs_init_functions(struct amdgpu_winsys *ws);
++void amdgpu_cs_emit_ioctl_oneshot(struct amdgpu_cs *cs, struct amdgpu_cs_context *csc);
++
++#endif
+diff --git a/src/gallium/winsys/radeon/amdgpu/amdgpu_public.h b/src/gallium/winsys/radeon/amdgpu/amdgpu_public.h
+new file mode 100644
+index 0000000..4a7aa8e
+--- /dev/null
++++ b/src/gallium/winsys/radeon/amdgpu/amdgpu_public.h
+@@ -0,0 +1,14 @@
++#ifndef AMDGPU_DRM_PUBLIC_H
++#define AMDGPU_DRM_PUBLIC_H
++
++#include "pipe/p_defines.h"
++
++struct radeon_winsys;
++struct pipe_screen;
++
++typedef struct pipe_screen *(*radeon_screen_create_t)(struct radeon_winsys *);
++
++struct radeon_winsys *
++amdgpu_winsys_create(int fd, radeon_screen_create_t screen_create);
++
++#endif
+diff --git a/src/gallium/winsys/radeon/amdgpu/amdgpu_winsys.c b/src/gallium/winsys/radeon/amdgpu/amdgpu_winsys.c
+new file mode 100644
+index 0000000..0f3367a
+--- /dev/null
++++ b/src/gallium/winsys/radeon/amdgpu/amdgpu_winsys.c
+@@ -0,0 +1,491 @@
++/*
++ * Copyright © 2009 Corbin Simpson
++ * Copyright © 2011 Marek Olšák <maraeo@gmail.com>
++ * All Rights Reserved.
++ *
++ * Permission is hereby granted, free of charge, to any person obtaining
++ * a copy of this software and associated documentation files (the
++ * "Software"), to deal in the Software without restriction, including
++ * without limitation the rights to use, copy, modify, merge, publish,
++ * distribute, sub license, and/or sell copies of the Software, and to
++ * permit persons to whom the Software is furnished to do so, subject to
++ * the following conditions:
++ *
++ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND,
++ * EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES
++ * OF MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND
++ * NON-INFRINGEMENT. IN NO EVENT SHALL THE COPYRIGHT HOLDERS, AUTHORS
++ * AND/OR ITS SUPPLIERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
++ * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE,
++ * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE
++ * USE OR OTHER DEALINGS IN THE SOFTWARE.
++ *
++ * The above copyright notice and this permission notice (including the
++ * next paragraph) shall be included in all copies or substantial portions
++ * of the Software.
++ */
++/*
++ * Authors:
++ * Corbin Simpson <MostAwesomeDude@gmail.com>
++ * Joakim Sindholt <opensource@zhasha.com>
++ * Marek Olšák <maraeo@gmail.com>
++ */
++
++#include "amdgpu_cs.h"
++#include "amdgpu_public.h"
++
++#include "util/u_hash_table.h"
++#include <amdgpu_drm.h>
++#include <xf86drm.h>
++#include <stdio.h>
++#include <sys/stat.h>
++
++#define CIK_TILE_MODE_COLOR_2D 14
++
++#define CIK__GB_TILE_MODE__PIPE_CONFIG(x) (((x) >> 6) & 0x1f)
++#define CIK__PIPE_CONFIG__ADDR_SURF_P2 0
++#define CIK__PIPE_CONFIG__ADDR_SURF_P4_8x16 4
++#define CIK__PIPE_CONFIG__ADDR_SURF_P4_16x16 5
++#define CIK__PIPE_CONFIG__ADDR_SURF_P4_16x32 6
++#define CIK__PIPE_CONFIG__ADDR_SURF_P4_32x32 7
++#define CIK__PIPE_CONFIG__ADDR_SURF_P8_16x16_8x16 8
++#define CIK__PIPE_CONFIG__ADDR_SURF_P8_16x32_8x16 9
++#define CIK__PIPE_CONFIG__ADDR_SURF_P8_32x32_8x16 10
++#define CIK__PIPE_CONFIG__ADDR_SURF_P8_16x32_16x16 11
++#define CIK__PIPE_CONFIG__ADDR_SURF_P8_32x32_16x16 12
++#define CIK__PIPE_CONFIG__ADDR_SURF_P8_32x32_16x32 13
++#define CIK__PIPE_CONFIG__ADDR_SURF_P8_32x64_32x32 14
++#define CIK__PIPE_CONFIG__ADDR_SURF_P16_32X32_8X16 16
++#define CIK__PIPE_CONFIG__ADDR_SURF_P16_32X32_16X16 17
++
++static struct util_hash_table *fd_tab = NULL;
++pipe_static_mutex(fd_tab_mutex);
++
++static unsigned cik_get_num_tile_pipes(struct amdgpu_gpu_info *info)
++{
++ unsigned mode2d = info->gb_tile_mode[CIK_TILE_MODE_COLOR_2D];
++
++ switch (CIK__GB_TILE_MODE__PIPE_CONFIG(mode2d)) {
++ case CIK__PIPE_CONFIG__ADDR_SURF_P2:
++ default:
++ return 2;
++ case CIK__PIPE_CONFIG__ADDR_SURF_P4_8x16:
++ case CIK__PIPE_CONFIG__ADDR_SURF_P4_16x16:
++ case CIK__PIPE_CONFIG__ADDR_SURF_P4_16x32:
++ case CIK__PIPE_CONFIG__ADDR_SURF_P4_32x32:
++ return 4;
++ case CIK__PIPE_CONFIG__ADDR_SURF_P8_16x16_8x16:
++ case CIK__PIPE_CONFIG__ADDR_SURF_P8_16x32_8x16:
++ case CIK__PIPE_CONFIG__ADDR_SURF_P8_32x32_8x16:
++ case CIK__PIPE_CONFIG__ADDR_SURF_P8_16x32_16x16:
++ case CIK__PIPE_CONFIG__ADDR_SURF_P8_32x32_16x16:
++ case CIK__PIPE_CONFIG__ADDR_SURF_P8_32x32_16x32:
++ case CIK__PIPE_CONFIG__ADDR_SURF_P8_32x64_32x32:
++ return 8;
++ case CIK__PIPE_CONFIG__ADDR_SURF_P16_32X32_8X16:
++ case CIK__PIPE_CONFIG__ADDR_SURF_P16_32X32_16X16:
++ return 16;
++ }
++}
++
++/* Convert Sea Islands register values GB_ADDR_CFG and MC_ADDR_CFG
++ * into GB_TILING_CONFIG register which is only present on R600-R700. */
++static unsigned r600_get_gb_tiling_config(struct amdgpu_gpu_info *info)
++{
++ unsigned num_pipes = info->gb_addr_cfg & 0x7;
++ unsigned num_banks = info->mc_arb_ramcfg & 0x3;
++ unsigned pipe_interleave_bytes = (info->gb_addr_cfg >> 4) & 0x7;
++ unsigned row_size = (info->gb_addr_cfg >> 28) & 0x3;
++
++ return num_pipes | (num_banks << 4) |
++ (pipe_interleave_bytes << 8) |
++ (row_size << 12);
++}
++
++/* Helper function to do the ioctls needed for setup and init. */
++static boolean do_winsys_init(struct amdgpu_winsys *ws)
++{
++ struct amdgpu_heap_info vram, gtt;
++ struct drm_amdgpu_info_hw_ip dma, uvd, vce;
++ uint32_t vce_version, vce_feature;
++ int r;
++
++ ws->num_cpus = sysconf(_SC_NPROCESSORS_ONLN);
++
++ r = amdgpu_device_initialize(ws->fd, &ws->info.drm_major,
++ &ws->info.drm_minor, &ws->dev);
++ if (r) {
++ fprintf(stderr, "amdgpu: amdgpu_device_initialize failed.\n");
++ return FALSE;
++ }
++
++ /* Query hardware and driver information. */
++ r = amdgpu_query_gpu_info(ws->dev, &ws->amdinfo);
++ if (r) {
++ fprintf(stderr, "amdgpu: amdgpu_query_gpu_info failed.\n");
++ goto fail;
++ }
++
++ r = amdgpu_query_heap_info(ws->dev, AMDGPU_GEM_DOMAIN_VRAM, 0, &vram);
++ if (r) {
++ fprintf(stderr, "amdgpu: amdgpu_query_heap_info(vram) failed.\n");
++ goto fail;
++ }
++
++ r = amdgpu_query_heap_info(ws->dev, AMDGPU_GEM_DOMAIN_GTT, 0, &gtt);
++ if (r) {
++ fprintf(stderr, "amdgpu: amdgpu_query_heap_info(gtt) failed.\n");
++ goto fail;
++ }
++
++ r = amdgpu_query_hw_ip_info(ws->dev, AMDGPU_HW_IP_DMA, 0, &dma);
++ if (r) {
++ fprintf(stderr, "amdgpu: amdgpu_query_hw_ip_info(dma) failed.\n");
++ goto fail;
++ }
++
++ r = amdgpu_query_hw_ip_info(ws->dev, AMDGPU_HW_IP_UVD, 0, &uvd);
++ if (r) {
++ fprintf(stderr, "amdgpu: amdgpu_query_hw_ip_info(uvd) failed.\n");
++ goto fail;
++ }
++
++ r = amdgpu_query_hw_ip_info(ws->dev, AMDGPU_HW_IP_VCE, 0, &vce);
++ if (r) {
++ fprintf(stderr, "amdgpu: amdgpu_query_hw_ip_info(vce) failed.\n");
++ goto fail;
++ }
++
++ r = amdgpu_query_firmware_version(ws->dev, AMDGPU_INFO_FW_VCE, 0, 0,
++ &vce_version, &vce_feature);
++ if (r) {
++ fprintf(stderr, "amdgpu: amdgpu_query_firmware_version(vce) failed.\n");
++ goto fail;
++ }
++
++ r = amdgpu_cs_ctx_create(ws->dev, &ws->ctx);
++ if (r) {
++ fprintf(stderr, "amdgpu: amdgpu_cs_ctx_create failed.\n");
++ goto fail;
++ }
++
++ /* Set chip identification. */
++ ws->info.pci_id = ws->amdinfo.asic_id; /* TODO: is this correct? */
++
++ switch (ws->info.pci_id) {
++#define CHIPSET(pci_id, name, cfamily) case pci_id: ws->info.family = CHIP_##cfamily; break;
++#include "pci_ids/radeonsi_pci_ids.h"
++#undef CHIPSET
++
++ default:
++ fprintf(stderr, "amdgpu: Invalid PCI ID.\n");
++ goto fail;
++ }
++
++ if (ws->info.family >= CHIP_TONGA)
++ ws->info.chip_class = VI;
++ else if (ws->info.family >= CHIP_BONAIRE)
++ ws->info.chip_class = CIK;
++ else {
++ fprintf(stderr, "amdgpu: Unknown family.\n");
++ goto fail;
++ }
++
++ /* LLVM 3.6 is required for VI. */
++ if (ws->info.chip_class >= VI && HAVE_LLVM < 0x0306) {
++ fprintf(stderr, "amdgpu: LLVM 3.6 is required, got LLVM %i.%i.\n",
++ HAVE_LLVM >> 8, HAVE_LLVM & 255);
++ goto fail;
++ }
++
++ /* Set hardware information. */
++ ws->info.gart_size = gtt.heap_size;
++ ws->info.vram_size = vram.heap_size;
++ /* convert the shader clock from KHz to MHz */
++ ws->info.max_sclk = ws->amdinfo.max_engine_clk / 1000;
++ ws->info.max_compute_units = 1; /* TODO */
++ ws->info.max_se = ws->amdinfo.num_shader_engines;
++ ws->info.max_sh_per_se = ws->amdinfo.num_shader_arrays_per_engine;
++ ws->info.has_uvd = uvd.available_rings != 0;
++ ws->info.vce_fw_version =
++ vce.available_rings ? vce_version : 0;
++ ws->info.r600_num_backends = ws->amdinfo.rb_pipes;
++ ws->info.r600_clock_crystal_freq = ws->amdinfo.gpu_counter_freq;
++ ws->info.r600_tiling_config = r600_get_gb_tiling_config(&ws->amdinfo);
++ ws->info.r600_num_tile_pipes = cik_get_num_tile_pipes(&ws->amdinfo);
++ ws->info.r600_max_pipes = ws->amdinfo.max_quad_shader_pipes; /* TODO: is this correct? */
++ ws->info.r600_virtual_address = TRUE;
++ ws->info.r600_has_dma = dma.available_rings != 0;
++
++ memcpy(ws->info.si_tile_mode_array, ws->amdinfo.gb_tile_mode,
++ sizeof(ws->amdinfo.gb_tile_mode));
++ ws->info.si_tile_mode_array_valid = TRUE;
++ ws->info.si_backend_enabled_mask = ws->amdinfo.enabled_rb_pipes_mask;
++
++ memcpy(ws->info.cik_macrotile_mode_array, ws->amdinfo.gb_macro_tile_mode,
++ sizeof(ws->amdinfo.gb_macro_tile_mode));
++ ws->info.cik_macrotile_mode_array_valid = TRUE;
++
++ return TRUE;
++
++fail:
++ if (ws->ctx) {
++ amdgpu_cs_ctx_free(ws->dev, ws->ctx);
++ }
++ amdgpu_device_deinitialize(ws->dev);
++ ws->dev = NULL;
++ return FALSE;
++}
++
++static void amdgpu_winsys_destroy(struct radeon_winsys *rws)
++{
++ struct amdgpu_winsys *ws = (struct amdgpu_winsys*)rws;
++
++ if (ws->thread) {
++ ws->kill_thread = 1;
++ pipe_semaphore_signal(&ws->cs_queued);
++ pipe_thread_wait(ws->thread);
++ }
++ pipe_semaphore_destroy(&ws->cs_queued);
++ pipe_mutex_destroy(ws->cs_stack_lock);
++
++ ws->cman->destroy(ws->cman);
++ ws->kman->destroy(ws->kman);
++
++ amdgpu_cs_ctx_free(ws->dev, ws->ctx);
++ amdgpu_device_deinitialize(ws->dev);
++ FREE(rws);
++}
++
++static void amdgpu_winsys_query_info(struct radeon_winsys *rws,
++ struct radeon_info *info)
++{
++ *info = ((struct amdgpu_winsys *)rws)->info;
++}
++
++static boolean amdgpu_cs_request_feature(struct radeon_winsys_cs *rcs,
++ enum radeon_feature_id fid,
++ boolean enable)
++{
++ return FALSE;
++}
++
++static uint64_t amdgpu_query_value(struct radeon_winsys *rws,
++ enum radeon_value_id value)
++{
++ struct amdgpu_winsys *ws = (struct amdgpu_winsys*)rws;
++ struct amdgpu_heap_info heap;
++ uint64_t retval = 0;
++
++ switch (value) {
++ case RADEON_REQUESTED_VRAM_MEMORY:
++ return ws->allocated_vram;
++ case RADEON_REQUESTED_GTT_MEMORY:
++ return ws->allocated_gtt;
++ case RADEON_BUFFER_WAIT_TIME_NS:
++ return ws->buffer_wait_time;
++ case RADEON_TIMESTAMP:
++ amdgpu_query_info(ws->dev, AMDGPU_INFO_TIMESTAMP, 8, &retval);
++ return retval;
++ case RADEON_NUM_CS_FLUSHES:
++ return ws->num_cs_flushes;
++ case RADEON_NUM_BYTES_MOVED:
++ amdgpu_query_info(ws->dev, AMDGPU_INFO_NUM_BYTES_MOVED, 8, &retval);
++ return retval;
++ case RADEON_VRAM_USAGE:
++ amdgpu_query_heap_info(ws->dev, AMDGPU_GEM_DOMAIN_VRAM, 0, &heap);
++ return heap.heap_usage;
++ case RADEON_GTT_USAGE:
++ amdgpu_query_heap_info(ws->dev, AMDGPU_GEM_DOMAIN_GTT, 0, &heap);
++ return heap.heap_usage;
++ }
++ return 0;
++}
++
++static unsigned hash_fd(void *key)
++{
++ int fd = pointer_to_intptr(key);
++ struct stat stat;
++ fstat(fd, &stat);
++
++ return stat.st_dev ^ stat.st_ino ^ stat.st_rdev;
++}
++
++static int compare_fd(void *key1, void *key2)
++{
++ int fd1 = pointer_to_intptr(key1);
++ int fd2 = pointer_to_intptr(key2);
++ struct stat stat1, stat2;
++ fstat(fd1, &stat1);
++ fstat(fd2, &stat2);
++
++ return stat1.st_dev != stat2.st_dev ||
++ stat1.st_ino != stat2.st_ino ||
++ stat1.st_rdev != stat2.st_rdev;
++}
++
++void amdgpu_ws_queue_cs(struct amdgpu_winsys *ws, struct amdgpu_cs *cs)
++{
++retry:
++ pipe_mutex_lock(ws->cs_stack_lock);
++ if (ws->num_enqueued_cs >= RING_LAST) {
++ /* no room left for a flush */
++ pipe_mutex_unlock(ws->cs_stack_lock);
++ goto retry;
++ }
++ ws->cs_stack[ws->num_enqueued_cs++] = cs;
++ pipe_mutex_unlock(ws->cs_stack_lock);
++ pipe_semaphore_signal(&ws->cs_queued);
++}
++
++static PIPE_THREAD_ROUTINE(amdgpu_cs_emit_ioctl, param)
++{
++ struct amdgpu_winsys *ws = (struct amdgpu_winsys *)param;
++ struct amdgpu_cs *cs;
++ unsigned i;
++
++ while (1) {
++ pipe_semaphore_wait(&ws->cs_queued);
++ if (ws->kill_thread)
++ break;
++
++ pipe_mutex_lock(ws->cs_stack_lock);
++ cs = ws->cs_stack[0];
++ for (i = 1; i < ws->num_enqueued_cs; i++)
++ ws->cs_stack[i - 1] = ws->cs_stack[i];
++ ws->cs_stack[--ws->num_enqueued_cs] = NULL;
++ pipe_mutex_unlock(ws->cs_stack_lock);
++
++ if (cs) {
++ amdgpu_cs_emit_ioctl_oneshot(cs, cs->cst);
++ pipe_semaphore_signal(&cs->flush_completed);
++ }
++ }
++ pipe_mutex_lock(ws->cs_stack_lock);
++ for (i = 0; i < ws->num_enqueued_cs; i++) {
++ pipe_semaphore_signal(&ws->cs_stack[i]->flush_completed);
++ ws->cs_stack[i] = NULL;
++ }
++ ws->num_enqueued_cs = 0;
++ pipe_mutex_unlock(ws->cs_stack_lock);
++ return 0;
++}
++
++DEBUG_GET_ONCE_BOOL_OPTION(thread, "RADEON_THREAD", TRUE)
++static PIPE_THREAD_ROUTINE(amdgpu_cs_emit_ioctl, param);
++
++static bool amdgpu_winsys_unref(struct radeon_winsys *ws)
++{
++ struct amdgpu_winsys *rws = (struct amdgpu_winsys*)ws;
++ bool destroy;
++
++ /* When the reference counter drops to zero, remove the fd from the table.
++ * This must happen while the mutex is locked, so that
++ * amdgpu_winsys_create in another thread doesn't get the winsys
++ * from the table when the counter drops to 0. */
++ pipe_mutex_lock(fd_tab_mutex);
++
++ destroy = pipe_reference(&rws->reference, NULL);
++ if (destroy && fd_tab)
++ util_hash_table_remove(fd_tab, intptr_to_pointer(rws->fd));
++
++ pipe_mutex_unlock(fd_tab_mutex);
++ return destroy;
++}
++
++struct radeon_winsys *
++ amdgpu_winsys_create(int fd, radeon_screen_create_t screen_create)
++{
++ struct amdgpu_winsys *ws;
++ drmVersionPtr version = drmGetVersion(fd);
++
++ /* The DRM driver version of amdgpu is 3.x.x. */
++ if (version->version_major != 3) {
++ drmFreeVersion(version);
++ return NULL;
++ }
++ drmFreeVersion(version);
++
++ /* Look up the winsys from the fd table. */
++ pipe_mutex_lock(fd_tab_mutex);
++ if (!fd_tab) {
++ fd_tab = util_hash_table_create(hash_fd, compare_fd);
++ }
++
++ ws = util_hash_table_get(fd_tab, intptr_to_pointer(fd));
++ if (ws) {
++ pipe_reference(NULL, &ws->reference);
++ pipe_mutex_unlock(fd_tab_mutex);
++ return &ws->base;
++ }
++
++ ws = CALLOC_STRUCT(amdgpu_winsys);
++ if (!ws) {
++ pipe_mutex_unlock(fd_tab_mutex);
++ return NULL;
++ }
++
++ ws->fd = fd;
++
++ if (!do_winsys_init(ws))
++ goto fail;
++
++ /* Create managers. */
++ ws->kman = amdgpu_bomgr_create(ws);
++ if (!ws->kman)
++ goto fail;
++ ws->cman = pb_cache_manager_create(ws->kman, 1000000, 2.0f, 0,
++ (ws->info.vram_size + ws->info.gart_size) / 8);
++ if (!ws->cman)
++ goto fail;
++
++ /* init reference */
++ pipe_reference_init(&ws->reference, 1);
++
++ /* Set functions. */
++ ws->base.unref = amdgpu_winsys_unref;
++ ws->base.destroy = amdgpu_winsys_destroy;
++ ws->base.query_info = amdgpu_winsys_query_info;
++ ws->base.cs_request_feature = amdgpu_cs_request_feature;
++ ws->base.query_value = amdgpu_query_value;
++
++ amdgpu_bomgr_init_functions(ws);
++ amdgpu_cs_init_functions(ws);
++
++ pipe_mutex_init(ws->cs_stack_lock);
++
++ ws->num_enqueued_cs = 0;
++ pipe_semaphore_init(&ws->cs_queued, 0);
++ if (ws->num_cpus > 1 && debug_get_option_thread())
++ ws->thread = pipe_thread_create(amdgpu_cs_emit_ioctl, ws);
++
++ /* Create the screen at the end. The winsys must be initialized
++ * completely.
++ *
++ * Alternatively, we could create the screen based on "ws->gen"
++ * and link all drivers into one binary blob. */
++ ws->base.screen = screen_create(&ws->base);
++ if (!ws->base.screen) {
++ amdgpu_winsys_destroy(&ws->base);
++ pipe_mutex_unlock(fd_tab_mutex);
++ return NULL;
++ }
++
++ util_hash_table_set(fd_tab, intptr_to_pointer(fd), ws);
++
++ /* We must unlock the mutex once the winsys is fully initialized, so that
++ * other threads attempting to create the winsys from the same fd will
++ * get a fully initialized winsys and not just half-way initialized. */
++ pipe_mutex_unlock(fd_tab_mutex);
++
++ return &ws->base;
++
++fail:
++ pipe_mutex_unlock(fd_tab_mutex);
++ if (ws->cman)
++ ws->cman->destroy(ws->cman);
++ if (ws->kman)
++ ws->kman->destroy(ws->kman);
++ FREE(ws);
++ return NULL;
++}
+diff --git a/src/gallium/winsys/radeon/amdgpu/amdgpu_winsys.h b/src/gallium/winsys/radeon/amdgpu/amdgpu_winsys.h
+new file mode 100644
+index 0000000..fc27f1c
+--- /dev/null
++++ b/src/gallium/winsys/radeon/amdgpu/amdgpu_winsys.h
+@@ -0,0 +1,80 @@
++/*
++ * Copyright © 2009 Corbin Simpson
++ * All Rights Reserved.
++ *
++ * Permission is hereby granted, free of charge, to any person obtaining
++ * a copy of this software and associated documentation files (the
++ * "Software"), to deal in the Software without restriction, including
++ * without limitation the rights to use, copy, modify, merge, publish,
++ * distribute, sub license, and/or sell copies of the Software, and to
++ * permit persons to whom the Software is furnished to do so, subject to
++ * the following conditions:
++ *
++ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND,
++ * EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES
++ * OF MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND
++ * NON-INFRINGEMENT. IN NO EVENT SHALL THE COPYRIGHT HOLDERS, AUTHORS
++ * AND/OR ITS SUPPLIERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
++ * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE,
++ * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE
++ * USE OR OTHER DEALINGS IN THE SOFTWARE.
++ *
++ * The above copyright notice and this permission notice (including the
++ * next paragraph) shall be included in all copies or substantial portions
++ * of the Software.
++ */
++/*
++ * Authors:
++ * Corbin Simpson <MostAwesomeDude@gmail.com>
++ */
++#ifndef AMDGPU_DRM_WINSYS_H
++#define AMDGPU_DRM_WINSYS_H
++
++#include "../radeon_winsys.h"
++#include "os/os_thread.h"
++#include <amdgpu.h>
++
++struct amdgpu_cs;
++
++struct amdgpu_winsys {
++ struct radeon_winsys base;
++ struct pipe_reference reference;
++
++ int fd; /* DRM file descriptor */
++ amdgpu_device_handle dev;
++ /* This only affects the order in which IBs are executed. */
++ amdgpu_context_handle ctx;
++
++ int num_cs; /* The number of command streams created. */
++ uint64_t allocated_vram;
++ uint64_t allocated_gtt;
++ uint64_t buffer_wait_time; /* time spent in buffer_wait in ns */
++ uint64_t num_cs_flushes;
++
++ struct radeon_info info;
++
++ struct pb_manager *kman;
++ struct pb_manager *cman;
++
++ uint32_t num_cpus; /* Number of CPUs. */
++
++ /* rings submission thread */
++ pipe_mutex cs_stack_lock;
++ pipe_semaphore cs_queued;
++ pipe_thread thread;
++ int kill_thread;
++ int num_enqueued_cs;
++ struct amdgpu_cs *cs_stack[RING_LAST];
++
++ struct amdgpu_gpu_info amdinfo;
++};
++
++static INLINE struct amdgpu_winsys *
++amdgpu_winsys(struct radeon_winsys *base)
++{
++ return (struct amdgpu_winsys*)base;
++}
++
++void amdgpu_ws_queue_cs(struct amdgpu_winsys *ws, struct amdgpu_cs *cs);
++
++#endif
+diff --git a/src/gallium/winsys/radeon/drm/radeon_drm_winsys.c b/src/gallium/winsys/radeon/drm/radeon_drm_winsys.c
+index 12767bf..a312f03 100644
+--- a/src/gallium/winsys/radeon/drm/radeon_drm_winsys.c
++++ b/src/gallium/winsys/radeon/drm/radeon_drm_winsys.c
+@@ -34,6 +34,7 @@
+ #include "radeon_drm_bo.h"
+ #include "radeon_drm_cs.h"
+ #include "radeon_drm_public.h"
++#include "../amdgpu/amdgpu_public.h"
+
+ #include "pipebuffer/pb_bufmgr.h"
+ #include "util/u_memory.h"
+@@ -643,6 +644,13 @@ PUBLIC struct radeon_winsys *
+ radeon_drm_winsys_create(int fd, radeon_screen_create_t screen_create)
+ {
+ struct radeon_drm_winsys *ws;
++ struct radeon_winsys *amdgpu;
++
++ /* First, try amdgpu. */
++ amdgpu = amdgpu_winsys_create(fd, screen_create);
++ if (amdgpu) {
++ return amdgpu;
++ }
+
+ pipe_mutex_lock(fd_tab_mutex);
+ if (!fd_tab) {
+diff --git a/src/gallium/winsys/radeon/radeon_winsys.h b/src/gallium/winsys/radeon/radeon_winsys.h
+index 7fb7ac9..a3cb273 100644
+--- a/src/gallium/winsys/radeon/radeon_winsys.h
++++ b/src/gallium/winsys/radeon/radeon_winsys.h
+@@ -136,6 +136,9 @@ enum radeon_family {
+ CHIP_KABINI,
+ CHIP_HAWAII,
+ CHIP_MULLINS,
++ CHIP_TONGA,
++ CHIP_ICELAND,
++ CHIP_CARRIZO,
+ CHIP_LAST,
+ };
+
+@@ -150,6 +153,7 @@ enum chip_class {
+ CAYMAN,
+ SI,
+ CIK,
++ VI,
+ };
+
+ enum ring_type {
+--
+1.9.1
+