aboutsummaryrefslogtreecommitdiffstats
diff options
context:
space:
mode:
authorDrew Moseley <drew_moseley@mentor.com>2015-07-10 13:59:25 -0400
committerDrew Moseley <drew_moseley@mentor.com>2015-07-10 13:59:25 -0400
commit37fb5db0306b7b2802a6a11ce309eb00a951d859 (patch)
tree3b86c491f45126f4d889116612054a73b8f19d35
parente42ead566d5a0bc2c66f1a5f6e67a9eafafd1d32 (diff)
downloadmeta-amd-37fb5db0306b7b2802a6a11ce309eb00a951d859.tar.gz
meta-amd-37fb5db0306b7b2802a6a11ce309eb00a951d859.tar.bz2
meta-amd-37fb5db0306b7b2802a6a11ce309eb00a951d859.zip
amdfalconx86: Initial public port to the amdfalconx86 family
Signed-off-by: Drew Moseley <drew_moseley@mentor.com>
-rw-r--r--meta-amdfalconx86/.gitignore0
-rw-r--r--meta-amdfalconx86/COPYING.MIT17
-rw-r--r--meta-amdfalconx86/binary/.gitignore0
-rw-r--r--meta-amdfalconx86/conf/layer.conf12
-rw-r--r--meta-amdfalconx86/conf/machine/amdfalconx86.conf61
-rw-r--r--meta-amdfalconx86/conf/machine/include/tune-amdfalconx86.inc18
-rw-r--r--meta-amdfalconx86/recipes-applications/gpio-test/files/gpio-test.c529
-rw-r--r--meta-amdfalconx86/recipes-applications/gpio-test/files/gpio-test.h17
-rw-r--r--meta-amdfalconx86/recipes-applications/gpio-test/gpio-test_1.0.bb29
-rw-r--r--meta-amdfalconx86/recipes-applications/spi-test/files/spirom-test.c798
-rw-r--r--meta-amdfalconx86/recipes-applications/spi-test/files/spirom.h53
-rw-r--r--meta-amdfalconx86/recipes-applications/spi-test/spi-test_1.0.bb27
-rw-r--r--meta-amdfalconx86/recipes-bsp/formfactor/formfactor/amdfalconx86/machconfig3
-rw-r--r--meta-amdfalconx86/recipes-bsp/formfactor/formfactor_0.0.bbappend2
-rw-r--r--meta-amdfalconx86/recipes-core/llvm/files/0001-force-link-pass.o.patch42
-rw-r--r--meta-amdfalconx86/recipes-core/llvm/llvm3.7.0.inc49
-rw-r--r--meta-amdfalconx86/recipes-core/llvm/llvm3.7.0_3.7.0.bb29
-rw-r--r--meta-amdfalconx86/recipes-graphics/drm/libdrm/0001-drm-add-libdrm_amdgpu.patch5665
-rw-r--r--meta-amdfalconx86/recipes-graphics/drm/libdrm/0002-drm-add-tests-amdgpu.patch2487
-rw-r--r--meta-amdfalconx86/recipes-graphics/drm/libdrm/0003-tests-also-install-tests-app.patch34
-rw-r--r--meta-amdfalconx86/recipes-graphics/drm/libdrm_git.bbappend19
-rw-r--r--meta-amdfalconx86/recipes-graphics/libepoxy/libepoxy_1.2.bb23
-rw-r--r--meta-amdfalconx86/recipes-graphics/mesa/mesa/0001-winsys-radeon-make-radeon_bo_vtbl-static.patch35
-rw-r--r--meta-amdfalconx86/recipes-graphics/mesa/mesa/0002-gallium-radeon-print-winsys-info-with-R600_DEBUG-inf.patch71
-rw-r--r--meta-amdfalconx86/recipes-graphics/mesa/mesa/0003-radeonsi-remove-useless-includes.patch27
-rw-r--r--meta-amdfalconx86/recipes-graphics/mesa/mesa/0004-radeonsi-remove-deprecated-and-useless-registers.patch36
-rw-r--r--meta-amdfalconx86/recipes-graphics/mesa/mesa/0005-radeonsi-set-an-optimal-value-for-DB_Z_INFO_ZRANGE_P.patch43
-rw-r--r--meta-amdfalconx86/recipes-graphics/mesa/mesa/0006-winsys-radeon-move-radeon_winsys.h-up-one-directory.patch1483
-rw-r--r--meta-amdfalconx86/recipes-graphics/mesa/mesa/0007-winsys-radeon-add-a-private-interface-for-radeon_sur.patch659
-rw-r--r--meta-amdfalconx86/recipes-graphics/mesa/mesa/0008-winsys-amdgpu-add-a-new-winsys-for-the-new-kernel-dr.patch2396
-rw-r--r--meta-amdfalconx86/recipes-graphics/mesa/mesa/0009-winsys-amdgpu-add-addrlib-texture-addressing-and-ali.patch22649
-rw-r--r--meta-amdfalconx86/recipes-graphics/mesa/mesa/0010-radeonsi-fix-DRM-version-checks-for-amdgpu-DRM-3.0.0.patch137
-rw-r--r--meta-amdfalconx86/recipes-graphics/mesa/mesa/0011-radeonsi-add-VI-register-definitions.patch1753
-rw-r--r--meta-amdfalconx86/recipes-graphics/mesa/mesa/0012-radeonsi-add-VI-hardware-support.patch410
-rw-r--r--meta-amdfalconx86/recipes-graphics/mesa/mesa/0013-radeonsi-add-a-temporary-workaround-for-a-shader-bug.patch163
-rw-r--r--meta-amdfalconx86/recipes-graphics/mesa/mesa/0014-gallium-radeon-use-VM-for-UVD.patch64
-rw-r--r--meta-amdfalconx86/recipes-graphics/mesa/mesa/0015-gallium-radeon-use-VM-for-VCE.patch151
-rw-r--r--meta-amdfalconx86/recipes-graphics/mesa/mesa/0016-gallium-radeon-add-h264-performance-HW-decoder-suppo.patch267
-rw-r--r--meta-amdfalconx86/recipes-graphics/mesa/mesa/0017-radeon-vce-make-firmware-check-compatible-with-new-f.patch29
-rw-r--r--meta-amdfalconx86/recipes-graphics/mesa/mesa/0018-radeon-vce-adapt-new-firmware-interface-changes.patch89
-rw-r--r--meta-amdfalconx86/recipes-graphics/mesa/mesa/0019-radeon-video-add-4K-support-for-decode-encode-parame.patch48
-rw-r--r--meta-amdfalconx86/recipes-graphics/mesa/mesa/0020-radeon-uvd-recalculate-dbp-buffer-size.patch145
-rw-r--r--meta-amdfalconx86/recipes-graphics/mesa/mesa/0021-radeon-uvd-make-30M-as-minimum-for-MPEG4-dpb-buffer-.patch32
-rw-r--r--meta-amdfalconx86/recipes-graphics/mesa/mesa/0022-radeon-vce-implement-VCE-two-pipe-support.patch96
-rw-r--r--meta-amdfalconx86/recipes-graphics/mesa/mesa/0023-radeonsi-add-new-VI-PCI-IDs.patch41
-rw-r--r--meta-amdfalconx86/recipes-graphics/mesa/mesa/0024-gallium-util-get-h264-level-based-on-number-of-max-r.patch76
-rw-r--r--meta-amdfalconx86/recipes-graphics/mesa/mesa/0025-st-vdpau-add-h264-decoder-level-support.patch34
-rw-r--r--meta-amdfalconx86/recipes-graphics/mesa/mesa/0026-st-omx-dec-separate-create_video_codec-to-different-.patch123
-rw-r--r--meta-amdfalconx86/recipes-graphics/mesa/mesa/0027-vl-add-level-idc-in-sps.patch30
-rw-r--r--meta-amdfalconx86/recipes-graphics/mesa/mesa/0028-st-omx-dec-add-h264-decoder-level-support.patch72
-rw-r--r--meta-amdfalconx86/recipes-graphics/mesa/mesa/0029-st-va-add-h264-decoder-level-support.patch34
-rw-r--r--meta-amdfalconx86/recipes-graphics/mesa/mesa/0030-radeonsi-properly-set-the-raster_config-for-KV.patch53
-rw-r--r--meta-amdfalconx86/recipes-graphics/mesa/mesa/0031-radeonsi-properly-handler-raster_config-setup-on-CZ.patch32
-rw-r--r--meta-amdfalconx86/recipes-graphics/mesa/mesa_git.bbappend54
-rw-r--r--meta-amdfalconx86/recipes-graphics/xorg-driver/xf86-video-amdgpu.inc33
-rw-r--r--meta-amdfalconx86/recipes-graphics/xorg-driver/xf86-video-amdgpu_0.0.01.bb4
-rw-r--r--meta-amdfalconx86/recipes-graphics/xorg-xserver/glamor-egl_0.6.0.bbappend12
-rw-r--r--meta-amdfalconx86/recipes-graphics/xorg-xserver/xserver-xf86-config_0.1.bbappend3
-rw-r--r--meta-amdfalconx86/recipes-kernel/amd-gpio/amd-gpio_1.0.bb28
-rw-r--r--meta-amdfalconx86/recipes-kernel/amd-gpio/files/Makefile14
-rw-r--r--meta-amdfalconx86/recipes-kernel/amd-gpio/files/gpio-amd.c701
-rw-r--r--meta-amdfalconx86/recipes-kernel/amd-gpio/files/gpio-amd.h86
-rw-r--r--meta-amdfalconx86/recipes-kernel/amd-gpio/kernel-module-amd-gpio_1.0.bb28
-rw-r--r--meta-amdfalconx86/recipes-kernel/amd-spi/amd-spi_1.0.bb33
-rw-r--r--meta-amdfalconx86/recipes-kernel/amd-spi/files/Makefile14
-rw-r--r--meta-amdfalconx86/recipes-kernel/amd-spi/files/spi_amd.c476
-rw-r--r--meta-amdfalconx86/recipes-kernel/amd-spi/files/spi_amd.h28
-rw-r--r--meta-amdfalconx86/recipes-kernel/amd-spi/files/spirom.c519
-rw-r--r--meta-amdfalconx86/recipes-kernel/amd-spi/files/spirom.h53
-rw-r--r--meta-amdfalconx86/recipes-kernel/amd-spi/kernel-module-amd-spi_1.0.bb33
-rw-r--r--meta-amdfalconx86/recipes-kernel/amd-wdt/files/Makefile14
-rw-r--r--meta-amdfalconx86/recipes-kernel/amd-wdt/files/amd_wdt.c418
-rw-r--r--meta-amdfalconx86/recipes-kernel/amd-wdt/files/amd_wdt.h46
-rw-r--r--meta-amdfalconx86/recipes-kernel/amd-wdt/kernel-module-amd-wdt_1.0.bb28
-rw-r--r--meta-amdfalconx86/recipes-kernel/gpio-load/amd-gpio-load_1.0.bb17
-rw-r--r--meta-amdfalconx86/recipes-kernel/gpio-load/files/modprobe.d/gpio-amd.conf0
-rw-r--r--meta-amdfalconx86/recipes-kernel/linux-firmware/amdgpu-firmware.bb35
-rw-r--r--meta-amdfalconx86/recipes-kernel/linux-firmware/amdgpu-firmware/LICENSE.radeon51
-rw-r--r--meta-amdfalconx86/recipes-kernel/linux-firmware/amdgpu-firmware/carrizo_ce.binbin0 -> 8832 bytes
-rw-r--r--meta-amdfalconx86/recipes-kernel/linux-firmware/amdgpu-firmware/carrizo_me.binbin0 -> 17024 bytes
-rw-r--r--meta-amdfalconx86/recipes-kernel/linux-firmware/amdgpu-firmware/carrizo_mec.binbin0 -> 262784 bytes
-rw-r--r--meta-amdfalconx86/recipes-kernel/linux-firmware/amdgpu-firmware/carrizo_mec2.binbin0 -> 262784 bytes
-rw-r--r--meta-amdfalconx86/recipes-kernel/linux-firmware/amdgpu-firmware/carrizo_pfp.binbin0 -> 17024 bytes
-rw-r--r--meta-amdfalconx86/recipes-kernel/linux-firmware/amdgpu-firmware/carrizo_rlc.binbin0 -> 18836 bytes
-rw-r--r--meta-amdfalconx86/recipes-kernel/linux-firmware/amdgpu-firmware/carrizo_sdma.binbin0 -> 10624 bytes
-rw-r--r--meta-amdfalconx86/recipes-kernel/linux-firmware/amdgpu-firmware/carrizo_sdma1.binbin0 -> 10624 bytes
-rw-r--r--meta-amdfalconx86/recipes-kernel/linux-firmware/amdgpu-firmware/carrizo_uvd.binbin0 -> 256960 bytes
-rw-r--r--meta-amdfalconx86/recipes-kernel/linux-firmware/amdgpu-firmware/carrizo_vce.binbin0 -> 167456 bytes
-rw-r--r--meta-amdfalconx86/recipes-kernel/linux/files/0001-yocto-amd-drm-amdgpu-add-VI-pci-idsamdgpu.patch11197
-rw-r--r--meta-amdfalconx86/recipes-kernel/linux/files/0001-yocto-amd-staging-add-support-to-enable-and-disable-IMC-to-fetch-BIOS-code.patch415
-rw-r--r--meta-amdfalconx86/recipes-kernel/linux/files/0002-yocto-amd-drm-amdgpu-add-CIK-pci-ids.patch13792
-rw-r--r--meta-amdfalconx86/recipes-kernel/linux/files/0002-yocto-amd-i2c-dev-add-calls-to-enable-and-disable-IMC-from-fetching-BIOS-code.patch38
-rw-r--r--meta-amdfalconx86/recipes-kernel/linux/files/0003-yocto-amd-MCE-decoding-for-F15h-M60h.patch121
-rw-r--r--meta-amdfalconx86/recipes-kernel/linux/files/0003-yocto-amd-drm-amdgpu-Add-initial-VI-support.patch37757
-rw-r--r--meta-amdfalconx86/recipes-kernel/linux/files/0004-yocto-amd-drm-amdgpu-Add-support-for-CIK-parts.patch18838
-rw-r--r--meta-amdfalconx86/recipes-kernel/linux/files/0004-yocto-amd-temperature-monitoring-support-for-F15h-M60h-processor.patch94
-rw-r--r--meta-amdfalconx86/recipes-kernel/linux/files/0005-yocto-amd-drm-amdgpu-Do-not-directly-dereference-pointers-to-BIOS-area.patch25802
-rw-r--r--meta-amdfalconx86/recipes-kernel/linux/files/0005-yocto-amd-i2c-driver_support_ACPI2Platform-1.0.patch317
-rw-r--r--meta-amdfalconx86/recipes-kernel/linux/files/0006-yocto-amd-drm-amdgpu-fix-const-warnings-in-amdgpu_connectors-c.patch31367
-rw-r--r--meta-amdfalconx86/recipes-kernel/linux/files/0006-yocto-amd-tty-driver_enable_UART_support-1.0.patch46
-rw-r--r--meta-amdfalconx86/recipes-kernel/linux/files/0007-yocto-amd-backport-kernel-dependencies-for-amdgpu-driver.patch35302
-rw-r--r--meta-amdfalconx86/recipes-kernel/linux/files/0007-yocto-amd-drm-amdgpu-add-core-driver.patch2552
-rw-r--r--meta-amdfalconx86/recipes-kernel/linux/files/0008-yocto-amd-drm-amdgpu-add-amdgpu-h.patch27307
-rw-r--r--meta-amdfalconx86/recipes-kernel/linux/files/0009-yocto-amd-drm-amdgpu-add-amdgpu_family-h.patch30477
-rw-r--r--meta-amdfalconx86/recipes-kernel/linux/files/0010-yocto-amd-drm-amdgpu-add-ppsmc-hA.patch6799
-rw-r--r--meta-amdfalconx86/recipes-kernel/linux/files/0011-yocto-amd-drm-amdgpu-add-clearstate_defs-h.patch15906
-rw-r--r--meta-amdfalconx86/recipes-kernel/linux/files/0012-yocto-amd-drm-amdgpu-add-atombios-headers.patch18620
-rw-r--r--meta-amdfalconx86/recipes-kernel/linux/files/0013-yocto-amd-drm-amdgpu-add-amdgpu-uapi-header.patch9860
-rw-r--r--meta-amdfalconx86/recipes-kernel/linux/files/0014-yocto-amd-drm-amdgpu-add-VCE-3-0-register-headers.patch3144
-rw-r--r--meta-amdfalconx86/recipes-kernel/linux/files/0015-yocto-amd-drm-amdgpu-add-VCE-2-0-register-headers.patch4387
-rw-r--r--meta-amdfalconx86/recipes-kernel/linux/files/0016-yocto-amd-drm-amdgpu-add-UVD-6-0-register-headers.patch5877
-rw-r--r--meta-amdfalconx86/recipes-kernel/linux/files/0017-yocto-amd-drm-amdgpu-add-UVD-5-0-register-headers.patch5647
-rw-r--r--meta-amdfalconx86/recipes-kernel/linux/files/0018-yocto-amd-drm-amdgpu-add-UVD-4-2-register-headers.patch4608
-rw-r--r--meta-amdfalconx86/recipes-kernel/linux/files/0019-yocto-amd-drm-amdgpu-add-SMU-8-0-register-headers.patch6796
-rw-r--r--meta-amdfalconx86/recipes-kernel/linux/files/0020-yocto-amd-drm-amdgpu-add-SMU-7-1-2-register-headers.patch8215
-rw-r--r--meta-amdfalconx86/recipes-kernel/linux/files/0021-yocto-amd-drm-amdgpu-add-SMU-7-1-1-register-headers.patch7224
-rw-r--r--meta-amdfalconx86/recipes-kernel/linux/files/0022-yocto-amd-drm-amdgpu-add-SMU-7-1-0-register-headers.patch8385
-rw-r--r--meta-amdfalconx86/recipes-kernel/linux/files/0023-yocto-amd-drm-amdgpu-add-SMU-7-0-1-register-headers.patch4739
-rw-r--r--meta-amdfalconx86/recipes-kernel/linux/files/0024-yocto-amd-drm-amdgpu-add-SMU-7-0-0-register-headers.patch921
-rw-r--r--meta-amdfalconx86/recipes-kernel/linux/files/0025-yocto-amd-drm-amdgpu-add-OSS-3-0-1-register-headers.patch2403
-rw-r--r--meta-amdfalconx86/recipes-kernel/linux/files/0026-yocto-amd-drm-amdgpu-add-OSS-3-0-register-headers.patch2262
-rw-r--r--meta-amdfalconx86/recipes-kernel/linux/files/0027-yocto-amd-drm-amdgpu-add-OSS-2-4-register-headers.patch198
-rw-r--r--meta-amdfalconx86/recipes-kernel/linux/files/0028-yocto-amd-drm-amdgpu-add-OSS-2-0-register-headers.patch219
-rw-r--r--meta-amdfalconx86/recipes-kernel/linux/files/0029-yocto-amd-drm-amdgpu-add-GMC-8-2-register-headers.patch623
-rw-r--r--meta-amdfalconx86/recipes-kernel/linux/files/0030-yocto-amd-drm-amdgpu-add-GMC-8-1-register-headers.patch10025
-rw-r--r--meta-amdfalconx86/recipes-kernel/linux/files/0031-yocto-amd-drm-amdgpu-add-GMC-7-1-register-headers.patch69
-rw-r--r--meta-amdfalconx86/recipes-kernel/linux/files/0032-yocto-amd-drm-amdgpu-add-GMC-7-0-register-headers.patch215
-rw-r--r--meta-amdfalconx86/recipes-kernel/linux/files/0033-yocto-amd-drm-amdgpu-add-GCA-8-0-register-headers.patch85
-rw-r--r--meta-amdfalconx86/recipes-kernel/linux/files/0034-yocto-amd-drm-amdgpu-add-GCA-7-2-register-headers.patch2442
-rw-r--r--meta-amdfalconx86/recipes-kernel/linux/files/0035-yocto-amd-drm-amdgpu-add-GCA-7-0-register-headers.patch34087
-rw-r--r--meta-amdfalconx86/recipes-kernel/linux/files/0036-yocto-amd-drm-amdgpu-add-DCE-11-0-register-headers.patch78
-rw-r--r--meta-amdfalconx86/recipes-kernel/linux/files/0037-yocto-amd-drm-amdgpu-add-DCE-10-0-register-headers.patch51
-rw-r--r--meta-amdfalconx86/recipes-kernel/linux/files/0038-yocto-amd-drm-amdgpu-add-DCE-8-0-register-headers.patch30215
-rw-r--r--meta-amdfalconx86/recipes-kernel/linux/files/0039-yocto-amd-drm-amdgpu-add-BIF-5-1-register-headers.patch33455
-rw-r--r--meta-amdfalconx86/recipes-kernel/linux/files/0040-yocto-amd-drm-amdgpu-add-BIF-5-0-register-headers.patch109
-rw-r--r--meta-amdfalconx86/recipes-kernel/linux/files/0041-yocto-amd-drm-amdgpu-add-BIF-4-1-register-headers.patch42
-rw-r--r--meta-amdfalconx86/recipes-kernel/linux/files/0042-yocto-amd-drm-amdgpu-fix-amdgpu.dpm-0-kernel-bug-482.patch58
-rw-r--r--meta-amdfalconx86/recipes-kernel/linux/files/0043-yocto-amd-drm-amdgpu-set-the-gfx-config-properly-for-all-CZ-va.patch70
-rw-r--r--meta-amdfalconx86/recipes-kernel/linux/files/0044-yocto-amd-sdhci-add-support-for-CZ-SD-host-controller.patch136
-rw-r--r--meta-amdfalconx86/recipes-kernel/linux/files/amdfalconx86-preempt-rt.scc14
-rw-r--r--meta-amdfalconx86/recipes-kernel/linux/files/amdfalconx86-standard.scc14
-rw-r--r--meta-amdfalconx86/recipes-kernel/linux/files/amdfalconx86-tiny.scc8
-rw-r--r--meta-amdfalconx86/recipes-kernel/linux/files/amdfalconx86-user-config.cfg231
-rw-r--r--meta-amdfalconx86/recipes-kernel/linux/files/amdfalconx86-user-features.scc0
-rw-r--r--meta-amdfalconx86/recipes-kernel/linux/files/amdfalconx86-user-patches.scc51
-rw-r--r--meta-amdfalconx86/recipes-kernel/linux/files/amdfalconx86.cfg70
-rw-r--r--meta-amdfalconx86/recipes-kernel/linux/files/amdfalconx86.scc9
-rw-r--r--meta-amdfalconx86/recipes-kernel/linux/files/bluetooth.cfg13
-rw-r--r--meta-amdfalconx86/recipes-kernel/linux/files/disable-kgdb.cfg1
-rw-r--r--meta-amdfalconx86/recipes-kernel/linux/files/usb-serial.cfg1
-rw-r--r--meta-amdfalconx86/recipes-kernel/linux/linux-yocto_3.14.bbappend22
-rw-r--r--meta-amdfalconx86/recipes-kernel/lttng/lttng-modules_2.5.0.bbappend5
-rw-r--r--meta-amdfalconx86/recipes-kernel/r8168/files/0001-r8168-adjust-Makefiles-for-Yocto-environment.patch145
-rw-r--r--meta-amdfalconx86/recipes-kernel/r8168/files/0002-r8168-8.040.00.tar.bz2bin0 -> 87770 bytes
-rw-r--r--meta-amdfalconx86/recipes-kernel/r8168/r8168_8.040.00.bb13
-rw-r--r--meta-amdfalconx86/recipes-multimedia/gstreamer/gstreamer1.0-omx/0001-adjust-gstomx.conf-for-amdgpu.patch50
-rw-r--r--meta-amdfalconx86/recipes-multimedia/gstreamer/gstreamer1.0-omx_git.bbappend5
157 files changed, 507964 insertions, 0 deletions
diff --git a/meta-amdfalconx86/.gitignore b/meta-amdfalconx86/.gitignore
new file mode 100644
index 00000000..e69de29b
--- /dev/null
+++ b/meta-amdfalconx86/.gitignore
diff --git a/meta-amdfalconx86/COPYING.MIT b/meta-amdfalconx86/COPYING.MIT
new file mode 100644
index 00000000..89de3547
--- /dev/null
+++ b/meta-amdfalconx86/COPYING.MIT
@@ -0,0 +1,17 @@
+Permission is hereby granted, free of charge, to any person obtaining a copy
+of this software and associated documentation files (the "Software"), to deal
+in the Software without restriction, including without limitation the rights
+to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
+copies of the Software, and to permit persons to whom the Software is
+furnished to do so, subject to the following conditions:
+
+The above copyright notice and this permission notice shall be included in
+all copies or substantial portions of the Software.
+
+THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
+IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
+FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
+AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
+LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
+OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN
+THE SOFTWARE.
diff --git a/meta-amdfalconx86/binary/.gitignore b/meta-amdfalconx86/binary/.gitignore
new file mode 100644
index 00000000..e69de29b
--- /dev/null
+++ b/meta-amdfalconx86/binary/.gitignore
diff --git a/meta-amdfalconx86/conf/layer.conf b/meta-amdfalconx86/conf/layer.conf
new file mode 100644
index 00000000..01b7067d
--- /dev/null
+++ b/meta-amdfalconx86/conf/layer.conf
@@ -0,0 +1,12 @@
+# We have a conf and classes directory, add to BBPATH
+BBPATH .= ":${LAYERDIR}"
+
+# We have a recipes-* directories, add to BBFILES
+BBFILES += "${LAYERDIR}/recipes-*/*/*.bb \
+ ${LAYERDIR}/recipes-*/*/*.bbappend"
+
+BBFILE_COLLECTIONS += "amdfalconx86"
+BBFILE_PATTERN_amdfalconx86 = "^${LAYERDIR}/"
+BBFILE_PRIORITY_amdfalconx86 = "6"
+
+LAYERDEPENDS_amdfalconx86 = "amd openembedded-layer"
diff --git a/meta-amdfalconx86/conf/machine/amdfalconx86.conf b/meta-amdfalconx86/conf/machine/amdfalconx86.conf
new file mode 100644
index 00000000..58f52c91
--- /dev/null
+++ b/meta-amdfalconx86/conf/machine/amdfalconx86.conf
@@ -0,0 +1,61 @@
+#@TYPE: Machine
+#@NAME: amdfalconx86
+
+#@DESCRIPTION: Machine configuration for amdfalconx86 systems
+
+
+PREFERRED_PROVIDER_virtual/kernel ?= "linux-yocto"
+PREFERRED_PROVIDER_jpeg ?= "jpeg"
+PREFERRED_PROVIDER_jpeg-native ?= "jpeg-native"
+PREFERRED_PROVIDER_llvm ?= "llvm3.7.0"
+PREFERRED_VERSION_linux-yocto ?= "3.14%"
+PREFERRED_VERSION_mesa = "10.6.0+git%"
+PREFERRED_VERSION_gstreamer1.0-omx = "git+git%"
+PREFERRED_VERSION_libav = "9.16"
+PREFERRED_VERSION_libdrm = "2.4.60+git%"
+PREFERRED_VERSION_glamor-egl = "0.6.0+git%"
+PREFERRED_VERSION_xf86-video-amdgpu = "0.0.01+git%"
+
+require conf/machine/include/tune-amdfalconx86.inc
+
+MACHINE_FEATURES += "wifi efi pcbios"
+
+XSERVER_X86_AMDGPU = "xf86-video-amdgpu \
+ ${@base_contains('DISTRO_FEATURES', 'opengl', 'mesa-driver-radeonsi', '', d)} \
+ "
+XSERVER ?= "${XSERVER_X86_BASE} \
+ ${XSERVER_X86_EXT} \
+ ${XSERVER_X86_FBDEV} \
+ ${XSERVER_X86_MODESETTING} \
+ ${XSERVER_X86_AMDGPU} \
+ "
+
+MACHINE_EXTRA_RRECOMMENDS += "kernel-modules amdgpu-firmware alsa-utils"
+MACHINE_EXTRA_RRECOMMENDS += "vdpauinfo gstreamer1.0-omx"
+MACHINE_EXTRA_RRECOMMENDS += "grub parted util-linux-blkid"
+MACHINE_EXTRA_RRECOMMENDS += "spi-test gpio-test rtc-test watchdog-test smbus-test"
+MACHINE_EXTRA_RRECOMMENDS += "kernel-module-amd-gpio kernel-module-amd-spi kernel-module-amd-wdt"
+MACHINE_EXTRA_RRECOMMENDS += "amd-gpio-load amd-wdt-load"
+
+MACHINE_EXTRA_RDEPENDS += "r8168"
+
+EXTRA_OECONF_append_pn-matchbox-panel-2 = " --with-battery=acpi"
+
+# Setup a gettys on all serial ports
+SERIAL_CONSOLES = "115200;ttyS4 115200;ttyS5 115200;ttyUSB0"
+
+# Enable the kernel console on ttyS4/COM0
+APPEND += "console=ttyS4,115200n8"
+
+# Make sure the kernel messages go to the VGA console
+APPEND += "console=tty0"
+
+RELEASE_IMAGE ?= "core-image-sato"
+
+APPEND += "amdgpu.dpm=1"
+
+# Make sure the VESA VGA driver goes up on boot, we provide a generic mode here
+# The AMD GPU driver will take over once it is up
+APPEND += "vga=802"
+
+MACHINEOVERRIDES =. "amd:"
diff --git a/meta-amdfalconx86/conf/machine/include/tune-amdfalconx86.inc b/meta-amdfalconx86/conf/machine/include/tune-amdfalconx86.inc
new file mode 100644
index 00000000..b78f0f7c
--- /dev/null
+++ b/meta-amdfalconx86/conf/machine/include/tune-amdfalconx86.inc
@@ -0,0 +1,18 @@
+DEFAULTTUNE ?= "dbfp4"
+
+require conf/machine/include/x86/arch-x86.inc
+require conf/machine/include/x86-base.inc
+
+
+# AMD DB-FP4 64bit (MerlinFalcon)
+TUNEVALID[dbfp4] = "Enable AMD DB-FP4 (64 bit) specific processor optimizations"
+TUNECONFLICTS[dbfp4] = "m32 mx32"
+TUNE_ARCH .= "${@bb.utils.contains("TUNE_FEATURES", "dbfp4", "${X86ARCH64}", "" ,d)}"
+TUNE_CCARGS .= "${@bb.utils.contains("TUNE_FEATURES", "dbfp4", " -march=bdver4", "", d)}"
+
+# Extra tune selections
+AVAILTUNES += "dbfp4"
+TUNE_FEATURES_tune-dbfp4 = "dbfp4"
+BASE_LIB_tune-dbfp4 = "lib64"
+TUNE_PKGARCH_tune-dbfp4 = "dbfp4"
+PACKAGE_EXTRA_ARCHS_tune-dbfp4 = "${TUNE_PKGARCH_tune-dbfp4}"
diff --git a/meta-amdfalconx86/recipes-applications/gpio-test/files/gpio-test.c b/meta-amdfalconx86/recipes-applications/gpio-test/files/gpio-test.c
new file mode 100644
index 00000000..6cd073bd
--- /dev/null
+++ b/meta-amdfalconx86/recipes-applications/gpio-test/files/gpio-test.c
@@ -0,0 +1,529 @@
+/*****************************************************************************
+*
+* Copyright (c) 2014, Advanced Micro Devices, Inc.
+* All rights reserved.
+*
+* Redistribution and use in source and binary forms, with or without
+* modification, are permitted provided that the following conditions are met:
+* * Redistributions of source code must retain the above copyright
+* notice, this list of conditions and the following disclaimer.
+* * Redistributions in binary form must reproduce the above copyright
+* notice, this list of conditions and the following disclaimer in the
+* documentation and/or other materials provided with the distribution.
+* * Neither the name of Advanced Micro Devices, Inc. nor the names of
+* its contributors may be used to endorse or promote products derived
+* from this software without specific prior written permission.
+*
+* THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS" AND
+* ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED
+* WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE
+* DISCLAIMED. IN NO EVENT SHALL ADVANCED MICRO DEVICES, INC. BE LIABLE FOR ANY
+* DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES
+* (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES;
+* LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND
+* ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
+* (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS
+* SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+*
+*
+***************************************************************************/
+#include <unistd.h>
+#include <stdio.h>
+#include <stdlib.h>
+#include <fcntl.h>
+#include <signal.h>
+#include <errno.h>
+#include <string.h>
+
+#include <readline/readline.h>
+#include <sys/types.h>
+#include <sys/wait.h>
+#include <sys/ioctl.h>
+
+#include "gpio-test.h"
+
+#define GPIO_APP_VERSION "0.2"
+#define AMD_GPIO_NUM_PINS 184
+static int gpio_in_use[AMD_GPIO_NUM_PINS];
+
+char *show_prompt(void)
+{
+ return "$ ";
+}
+
+void sighandler(int sig)
+{
+ printf("\n%s", show_prompt());
+}
+
+void show_license(void)
+{
+ printf("/*****************************************************************************\n"
+ "*\n"
+ "* Copyright (c) 2014, Advanced Micro Devices, Inc.\n"
+ "* All rights reserved.\n"
+ "*\n"
+ "* Redistribution and use in source and binary forms, with or without\n"
+ "* modification, are permitted provided that the following conditions are met:\n"
+ "* * Redistributions of source code must retain the above copyright\n"
+ "* notice, this list of conditions and the following disclaimer.\n"
+ "* * Redistributions in binary form must reproduce the above copyright\n"
+ "* notice, this list of conditions and the following disclaimer in the\n"
+ "* documentation and/or other materials provided with the distribution.\n"
+ "* * Neither the name of Advanced Micro Devices, Inc. nor the names of\n"
+ "* its contributors may be used to endorse or promote products derived\n"
+ "* from this software without specific prior written permission.\n"
+ "*\n"
+ "* THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS \"AS IS\" AND\n"
+ "* ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED\n"
+ "* WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE\n"
+ "* DISCLAIMED. IN NO EVENT SHALL ADVANCED MICRO DEVICES, INC. BE LIABLE FOR ANY\n"
+ "* DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES\n"
+ "* (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES;\n"
+ "* LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND\n"
+ "* ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT\n"
+ "* (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS\n"
+ "* SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.\n"
+ "*\n"
+ "*\n"
+ "***************************************************************************/\n");
+}
+
+void print_usage()
+{
+ printf("\nCommands Supported ->\n");
+ printf(" getgpiomode <gpio> : Gets the mode of GPIO pin\n");
+ printf(" setgpiomode <gpio> <in/out/high/low> : Sets the mode of GPIO pin to input or output(high/low)\n");
+ printf(" getgpiovalue <gpio> : Gets the value of GPIO pin\n");
+ printf(" setgpiovalue <gpio> <high/low> : Sets the value of GPO pin to high or low\n");
+ printf(" getnumgpio : Gets the number of GPIO pins supported\n");
+ printf(" getgpiobase : Gets the number of first GPIO pin\n");
+ printf(" getgpioname : Gets the name of GPIO driver currently in use\n");
+ printf(" dmesg : Displays the kernel log messages related to GPIO\n");
+ printf(" license : Displays the terms of LICENSE for this application\n");
+ printf(" help : Displays help text\n");
+ printf(" exit : Exits the application\n\n");
+}
+
+void parse_cmd(const char *cmdline)
+{
+ int fd;
+
+ if (strncmp(cmdline, "help", 4) == 0)
+ print_usage();
+ else if (strncmp(cmdline, "getnumgpio", 10) == 0) {
+ int fd;
+ char ngpio[3 + 1];
+
+ memset(ngpio, '\0', (3 + 1));
+ fd = open("/sys/class/gpio/gpiochip0/ngpio", O_RDONLY);
+ if (fd < 0) {
+ printf("\nPlease make sure AMD GPIO driver is loaded\n");
+ exit(EXIT_FAILURE);
+ }
+
+ /* Value read from the file is ASCII text */
+ if(read(fd, ngpio, 3) < 0)
+ perror("Cannot read number of GPIO pins");
+
+ printf("\nThe maximum number of GPIO pins supported is %d\n", atoi(ngpio));
+ close(fd);
+ } else if (strncmp(cmdline, "getgpiobase", 11) == 0) {
+ int fd;
+ char gpiobase[3 + 1];
+
+ memset(gpiobase, '\0', (3 + 1));
+ fd = open("/sys/class/gpio/gpiochip0/base", O_RDONLY);
+ if (fd < 0) {
+ printf("\nPlease make sure AMD GPIO driver is loaded\n");
+ exit(EXIT_FAILURE);
+ }
+
+ if(read(fd, gpiobase, 3) < 0)
+ perror("Cannot read GPIO base");
+
+ printf("\nGPIO pin numbering starts from %d\n", atoi(gpiobase));
+ close(fd);
+ } else if (strncmp(cmdline, "getgpioname", 11) == 0) {
+ int fd;
+ char gpioname[10 + 1]; /* Max 10 characters + NULL character */
+
+ /* Zero initialize gpioname array */
+ memset(gpioname, '\0', sizeof(gpioname));
+
+ fd = open("/sys/class/gpio/gpiochip0/label", O_RDONLY);
+ if (fd < 0) {
+ printf("\nPlease make sure AMD GPIO driver is loaded\n");
+ exit(EXIT_FAILURE);
+ }
+
+ if(read(fd, gpioname, 10) < 0)
+ perror("Cannot read GPIO driver name");
+
+ printf("\nGPIO driver loaded is %s\n", gpioname);
+ close(fd);
+ } else if (strncmp(cmdline, "getgpiovalue", 12) == 0) {
+ int fd;
+ int gpio_num;
+ char gpio[4 + 1];
+ char pathname[80];
+ int ret = 0;
+
+ /* Lets point to the end of first token */
+ if (sscanf(cmdline, "getgpiovalue %d", &gpio_num) < 1) {
+ printf("Invalid inputs, please try again\n\n");
+ return;
+ }
+
+ fd = open("/sys/class/gpio/export", O_WRONLY);
+ if (fd < 0) {
+ if (errno == EACCES)
+ printf("\nYou do not have correct permission, please run as root\n");
+ else
+ perror("Error opening /sys/class/gpio/export");
+
+ exit(EXIT_FAILURE);
+ }
+
+ memset(gpio, '\0', (3 + 1));
+ if (snprintf(gpio, 3, "%d", gpio_num) < 1) {
+ printf("Invalid inputs, please try again\n");
+ close(fd);
+ return;
+ }
+
+ ret = write(fd, gpio, strlen(gpio));
+ /*
+ * There can be two situations ->
+ * 1) The GPIO is being exported for the first time.
+ * 2) The GPIO is being exported again.
+ * In the first case, the write to file descriptor should
+ * succeed, and we should still fall into the if clause.
+ *
+ * In the second case, write will fail and errno will be
+ * set to EBUSY, since the GPIO pin is already exported.
+ * Rest all is error.
+ */
+ if((ret >= 0) || ((ret < 0) && (errno == EBUSY))) {
+ /* Close the last file descriptor */
+ close(fd);
+
+ memset(pathname, '\0', sizeof(pathname));
+ sprintf(pathname, "/sys/class/gpio/gpio%d/value", gpio_num);
+
+ fd = open(pathname, O_RDONLY);
+ if (fd < 0)
+ perror("GPIO read error");
+ else {
+ char value[1 + 1];
+
+ memset(value, '\0', 2);
+ ret = read(fd, value, 1);
+ if (ret < 0)
+ perror("Cannot read GPIO pin");
+
+ printf("\nGPIO pin %d is at \"%s\"\n", gpio_num,
+ (strncmp(value, "1", 1) == 0) ? "high" : "low");
+
+ close(fd);
+
+ /*
+ * Mark the GPIO as already exported, so that we can use
+ * unexport them during exit.
+ */
+ gpio_in_use[gpio_num] = 1;
+ }
+ } else {
+ if (errno == EINVAL)
+ printf("\nInvalid GPIO number\n");
+ else
+ perror("Error exporting GPIO number");
+
+ close(fd);
+ }
+ } else if (strncmp(cmdline, "getgpiomode", 11) == 0) {
+ int fd;
+ int gpio_num;
+ char gpio[4 + 1];
+ char pathname[80];
+ int ret = 0;
+
+ /* Lets point to the end of first token */
+ if (sscanf(cmdline, "getgpiomode %d", &gpio_num) < 1) {
+ printf("Invalid inputs, please try again\n\n");
+ return;
+ }
+
+ fd = open("/sys/class/gpio/export", O_WRONLY);
+ if (fd < 0) {
+ if (errno == EACCES)
+ printf("\nYou do not have correct permission, please run as root\n");
+ else
+ perror("Error opening /sys/class/gpio/export");
+
+ exit(EXIT_FAILURE);
+ }
+
+ memset(gpio, '\0', (3 + 1));
+ if (snprintf(gpio, 3, "%d", gpio_num) < 1) {
+ printf("Invalid inputs, please try again\n");
+ close(fd);
+ return;
+ }
+
+ ret = write(fd, gpio, strlen(gpio));
+ /*
+ * There can be two situations ->
+ * 1) The GPIO is being exported for the first time.
+ * 2) The GPIO is being exported again.
+ * In the first case, the write to file descriptor should
+ * succeed, and we should still fall into the if clause.
+ *
+ * In the second case, write will fail and errno will be
+ * set to EBUSY, since the GPIO pin is already exported.
+ * Rest all is error.
+ */
+ if((ret >= 0) || ((ret < 0) && (errno == EBUSY))) {
+ /* Close the last file descriptor */
+ close(fd);
+
+ memset(pathname, '\0', sizeof(pathname));
+ sprintf(pathname, "/sys/class/gpio/gpio%d/direction", gpio_num);
+
+ fd = open(pathname, O_RDONLY);
+ if (fd < 0)
+ perror("GPIO read error");
+ else {
+ char mode[3 + 1];
+ int c, i = 0;
+
+ memset(mode, '\0', (3 + 1));
+ ret = read(fd, mode, 3);
+ if (ret < 0)
+ perror("Cannot read GPIO pin");
+
+ printf("\nGPIO pin %d is in \"%s\" mode\n", gpio_num,
+ (strncmp(mode, "in", 2) == 0) ? "input" : "output");
+
+ close(fd);
+
+ /*
+ * Mark the GPIO as already exported, so that we can use
+ * unexport them during exit.
+ */
+ gpio_in_use[gpio_num] = 1;
+ }
+ } else {
+ if (errno == EINVAL)
+ printf("\nInvalid GPIO number\n");
+ else
+ perror("Error exporting GPIO number");
+
+ close(fd);
+ }
+ } else if (strncmp(cmdline, "setgpiomode", 11) == 0) {
+ int fd;
+ int gpio_num;
+ char mode[4 + 1];
+ char gpio[3 + 1];
+ int ret;
+
+ memset(mode, (4 + 1), 0);
+ if (sscanf(cmdline, "setgpiomode %d %s", &gpio_num, mode) < 2) {
+ printf("Invalid inputs, please try again\n\n");
+ return;
+ }
+
+ memset(gpio, '\0', (3 + 1));
+ if (snprintf(gpio, 3, "%d", gpio_num) < 1) {
+ printf("Invalid inputs, please try again\n");
+ return;
+ }
+
+ fd = open("/sys/class/gpio/export", O_WRONLY);
+ if (fd < 0) {
+ if (errno == EACCES)
+ printf("\nYou do not have correct permission, please run as root\n");
+ else
+ perror("Error opening /sys/class/gpio/export");
+
+ exit(EXIT_FAILURE);
+ }
+
+ ret = write(fd, gpio, strlen(gpio));
+ if((ret >= 0) || ((ret < 0) && (errno == EBUSY))) {
+ char pathname[80];
+
+ /* Close the last file descriptor */
+ close(fd);
+
+ memset(pathname, '\0', sizeof(pathname));
+ sprintf(pathname, "/sys/class/gpio/gpio%d/direction", gpio_num);
+
+ fd = open(pathname, O_WRONLY);
+ if (fd < 0)
+ perror("GPIO read error");
+ else {
+ /* Sanity check */
+ if ((strncmp(mode, "in", 2) == 0) ||
+ (strncmp(mode, "out", 3) == 0) ||
+ (strncmp(mode, "high", 4) == 0) ||
+ (strncmp(mode, "low", 3) == 0)) {
+ /* Write mode into /sys/.../direction file */
+ ret = write(fd, mode, strlen(mode));
+ if (ret < 0)
+ perror("Error writing GPIO mode");
+ } else
+ printf("\nInvalid GPIO mode, please try again\n");
+
+ close(fd);
+
+ /*
+ * Mark the GPIO as exported, so that we can use
+ * unexport them during exit.
+ */
+ gpio_in_use[gpio_num] = 1;
+ }
+ } else {
+ if (errno == EINVAL)
+ printf("\nInvalid GPIO number\n");
+ else
+ perror("Error exporting GPIO number");
+
+ close(fd);
+ }
+ } else if (strncmp(cmdline, "setgpiovalue", 12) == 0) {
+ int fd;
+ int gpio_num;
+ char gpio[3 + 1];
+ char value[4 + 1];
+ int ret;
+
+ memset(value, (4 + 1), 0);
+ if (sscanf(cmdline, "setgpiovalue %d %s", &gpio_num, value) < 2) {
+ printf("Invalid inputs, please try again\n\n");
+ return;
+ }
+
+ memset(gpio, '\0', (3 + 1));
+ if (snprintf(gpio, 3, "%d", gpio_num) < 1) {
+ printf("Invalid inputs, please try again\n");
+ return;
+ }
+
+ fd = open("/sys/class/gpio/export", O_WRONLY);
+ if (fd < 0) {
+ if (errno == EACCES)
+ printf("\nYou do not have correct permission, please run as root\n");
+ else
+ perror("Error opening /sys/class/gpio/export");
+
+ exit(EXIT_FAILURE);
+ }
+
+ ret = write(fd, gpio, strlen(gpio));
+ if((ret >= 0) || ((ret < 0) && (errno == EBUSY))) {
+ char pathname[80];
+
+ /* Close the last file descriptor */
+ close(fd);
+
+ memset(pathname, '\0', sizeof(pathname));
+ sprintf(pathname, "/sys/class/gpio/gpio%d/value", gpio_num);
+
+ fd = open(pathname, O_WRONLY);
+ if (fd < 0)
+ perror("GPIO read error");
+ else {
+ if (strncmp(value, "high", 4) == 0)
+ value[0] = '1';
+ else if (strncmp(value, "low", 3) == 0)
+ value[0] = '0';
+ else {
+ printf("\nInvalid input, please try again...\n");
+ return;
+ }
+
+ /* Write mode into /sys/.../direction file */
+ ret = write(fd, value, 1);
+ if (ret < 0)
+ perror("Error writing GPIO mode");
+
+ close(fd);
+
+ /*
+ * Mark the GPIO as exported, so that we can use
+ * unexport them during exit.
+ */
+ gpio_in_use[gpio_num] = 1;
+ }
+ } else {
+ if (errno == EINVAL)
+ printf("\nInvalid GPIO number\n");
+ else
+ perror("Error exporting GPIO number");
+
+ close(fd);
+ }
+ } else if (strncmp(cmdline, "dmesg", 5) == 0) {
+ if (system("dmesg | grep GPIO") < 0)
+ perror("Error executing \'dmesg | grep GPIO\'");
+ } else if (strncmp(cmdline, "license", 7) == 0) {
+ show_license();
+ } else if (strncmp(cmdline, "exit", 4) == 0) {
+ int i;
+ int ret;
+ char gpio[3 + 1];
+
+ printf("\nExiting...\n");
+
+ /* We need to unexport all the GPIO pins exported earlier */
+ for (i = 0; i < AMD_GPIO_NUM_PINS; i++) {
+ if (gpio_in_use[i]) {
+ int fd;
+
+ fd = open("/sys/class/gpio/unexport", O_WRONLY);
+ if (fd < 0) {
+ printf("\nPlease make sure AMD GPIO driver is loaded\n");
+ exit(EXIT_FAILURE);
+ }
+ memset(gpio, '\0', (3 + 1));
+ snprintf(gpio, 4, "%d", i);
+
+ ret = write(fd, gpio, strlen(gpio));
+ if (ret < 0)
+ perror("Error writing to /sys/class/gpio/unexport");
+ }
+ }
+
+ exit(EXIT_SUCCESS);
+ } else {
+ printf("\nUnknown command\n");
+ print_usage();
+ }
+}
+
+int main(void)
+{
+ char *cmdline= NULL;
+
+ printf("GPIO sample application version: %s\n", GPIO_APP_VERSION);
+ printf("Copyright (c) 2014, Advanced Micro Devices, Inc.\n"
+ "This sample application comes with ABSOLUTELY NO WARRANTY;\n"
+ "This is free software, and you are welcome to redistribute it\n"
+ "under certain conditions; type `license' for details.\n\n");
+
+ /* Handler for Ctrl+C */
+ signal(SIGINT, sighandler);
+
+ while (1) {
+ cmdline = readline(show_prompt());
+ parse_cmd(cmdline);
+ /* Free the memory malloc'ed by readline */
+ free(cmdline);
+ cmdline = NULL;
+ }
+
+ /* Should never reach here */
+ return 0;
+}
diff --git a/meta-amdfalconx86/recipes-applications/gpio-test/files/gpio-test.h b/meta-amdfalconx86/recipes-applications/gpio-test/files/gpio-test.h
new file mode 100644
index 00000000..af9c3b68
--- /dev/null
+++ b/meta-amdfalconx86/recipes-applications/gpio-test/files/gpio-test.h
@@ -0,0 +1,17 @@
+#ifndef _GPIO_TEST_H_
+#define _GPIO_TEST_H_
+
+
+
+/* IOCTL numbers */
+
+typedef struct {
+ int offset;
+ int value;
+}debug_data;
+
+#define GPIO_TEST_IOC_MAGIC 'k'
+#define GPIO_IOC_SWCTRLIN _IOW(GPIO_TEST_IOC_MAGIC, 1, debug_data)
+#define GPIO_IOC_SWCTRLEN _IOW(GPIO_TEST_IOC_MAGIC, 2, debug_data)
+
+#endif /* _GPIO_TEST_H_ */
diff --git a/meta-amdfalconx86/recipes-applications/gpio-test/gpio-test_1.0.bb b/meta-amdfalconx86/recipes-applications/gpio-test/gpio-test_1.0.bb
new file mode 100644
index 00000000..f3f3e2e3
--- /dev/null
+++ b/meta-amdfalconx86/recipes-applications/gpio-test/gpio-test_1.0.bb
@@ -0,0 +1,29 @@
+DESCRIPTION = "Sample application for AMD GPIO driver"
+SECTION = "applications"
+LICENSE = "BSD"
+DEPENDS = "readline"
+LIC_FILES_CHKSUM = "\
+ file://gpio-test.c;md5=e6e905de01cc60d7d588d095010cc904 \
+ file://gpio-test.h;md5=c7aaa743b172cf584032f9bfc5e85044 \
+ "
+
+PR = "r1"
+PV = "1.0"
+
+SRC_URI = "\
+ file://gpio-test.c \
+ file://gpio-test.h \
+ "
+
+TARGET_CC_ARCH += "${LDFLAGS}"
+
+S = "${WORKDIR}"
+
+do_compile() {
+ ${CC} gpio-test.c -o gpio-test -lreadline
+}
+
+do_install() {
+ install -d ${D}${bindir}
+ install -m 0755 gpio-test ${D}${bindir}
+}
diff --git a/meta-amdfalconx86/recipes-applications/spi-test/files/spirom-test.c b/meta-amdfalconx86/recipes-applications/spi-test/files/spirom-test.c
new file mode 100644
index 00000000..22c90036
--- /dev/null
+++ b/meta-amdfalconx86/recipes-applications/spi-test/files/spirom-test.c
@@ -0,0 +1,798 @@
+/*****************************************************************************
+*
+* Copyright (c) 2014, Advanced Micro Devices, Inc.
+* All rights reserved.
+*
+* Redistribution and use in source and binary forms, with or without
+* modification, are permitted provided that the following conditions are met:
+* * Redistributions of source code must retain the above copyright
+* notice, this list of conditions and the following disclaimer.
+* * Redistributions in binary form must reproduce the above copyright
+* notice, this list of conditions and the following disclaimer in the
+* documentation and/or other materials provided with the distribution.
+* * Neither the name of Advanced Micro Devices, Inc. nor the names of
+* its contributors may be used to endorse or promote products derived
+* from this software without specific prior written permission.
+*
+* THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS" AND
+* ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED
+* WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE
+* DISCLAIMED. IN NO EVENT SHALL ADVANCED MICRO DEVICES, INC. BE LIABLE FOR ANY
+* DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES
+* (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES;
+* LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND
+* ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
+* (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS
+* SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+*
+*
+***************************************************************************/
+#include <stdint.h>
+#include <unistd.h>
+#include <stdio.h>
+#include <stdlib.h>
+#include <fcntl.h>
+#include <string.h>
+#include <dirent.h>
+#include <signal.h>
+
+#include <sys/types.h>
+#include <sys/ioctl.h>
+#include <sys/stat.h>
+
+#include <readline/readline.h>
+
+#include "spirom.h"
+
+#define SPI_APP_VERSION "1.0"
+
+static int device_opened = 0;
+static char filename[20];
+static int fd = -1;
+
+char *show_prompt(void)
+{
+ return "$ ";
+}
+
+void sighandler(int sig)
+{
+ /* Do nothing. That is the idea. */
+}
+
+void show_license(void)
+{
+ printf("/*****************************************************************************\n"
+ "*\n"
+ "* Copyright (c) 2014, Advanced Micro Devices, Inc.\n"
+ "* All rights reserved.\n"
+ "*\n"
+ "* Redistribution and use in source and binary forms, with or without\n"
+ "* modification, are permitted provided that the following conditions are met:\n"
+ "* * Redistributions of source code must retain the above copyright\n"
+ "* notice, this list of conditions and the following disclaimer.\n"
+ "* * Redistributions in binary form must reproduce the above copyright\n"
+ "* notice, this list of conditions and the following disclaimer in the\n"
+ "* documentation and/or other materials provided with the distribution.\n"
+ "* * Neither the name of Advanced Micro Devices, Inc. nor the names of\n"
+ "* its contributors may be used to endorse or promote products derived\n"
+ "* from this software without specific prior written permission.\n"
+ "*\n"
+ "* THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS \"AS IS\" AND\n"
+ "* ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED\n"
+ "* WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE\n"
+ "* DISCLAIMED. IN NO EVENT SHALL ADVANCED MICRO DEVICES, INC. BE LIABLE FOR ANY\n"
+ "* DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES\n"
+ "* (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES;\n"
+ "* LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND\n"
+ "* ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT\n"
+ "* (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS\n"
+ "* SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.\n"
+ "*\n"
+ "*\n"
+ "***************************************************************************/\n");
+}
+
+void print_usage(void)
+{
+ printf("\nCommands Supported ->\n");
+ printf(" enumerate : List all SPI device nodes available\n");
+ printf(" setdevice <dev_id> : Set the SPI device number to access\n");
+ printf(" wren : Enable Write operation on SPI device\n");
+ printf(" wrdi : Disable Write operation on SPI device\n");
+ printf(" chiperase : Erase entire ROM chip\n");
+ printf(" rdsr : Read status register of ROM device\n");
+ printf(" rdid : Read device identification string\n");
+ printf(" sectorerase <addr> <num_sectors> : Erase a fixed number of sectors starting at the address\n"
+ " specified\n");
+ printf(" blockerase <addr> <num_blocks> : Erase a fixed number of blocks starting at the address\n"
+ " specified\n");
+ printf(" read <addr> <num_bytes> <filename> : Read a fixed number of bytes starting at address\n"
+ " specified, and output the contents into file\n");
+ printf(" write <addr> <num_bytes> <filename> : Read a fixed number of bytes from file and output\n"
+ " the contents to the device starting at the address\n"
+ " specified\n");
+ printf(" license : Displays the terms of LICENSE for this application\n");
+ printf(" help : Displays help text\n");
+ printf(" exit : Exits the application\n\n");
+}
+
+void parse_cmd(const char *cmdline)
+{
+ struct spi_ioc_transfer tr;
+ unsigned int bytes_chunks;
+ unsigned int remaining_bytes;
+ int addr;
+ int ret;
+
+ if (strncmp(cmdline, "enumerate", 9) == 0) {
+ DIR *dir;
+ struct dirent *dir_entry;
+ int device_found = 0;
+
+ /* Get the directory handle */
+ if ((dir = opendir("/dev")) == NULL) {
+ printf("\n\nFailed to open directory /dev. Probably you "
+ "do not have right privilege!\n\n");
+ exit(EXIT_FAILURE);
+ }
+
+ /* Iterate over all the directory entries */
+ while ((dir_entry = readdir(dir)) != NULL) {
+ /*
+ * If the file is a character device, and its signature
+ * matches spirom, then we print the corresponding file.
+ */
+ if ((dir_entry->d_type == DT_CHR) &&
+ (strncmp(dir_entry->d_name, "spirom", 6) == 0)) {
+ printf("/dev/%s\n", dir_entry->d_name);
+ device_found = 1;
+ }
+ }
+
+ printf("\n");
+
+ /*
+ * In case we did not find even a single entry, we print a
+ * message and exit.
+ */
+ if (!device_found) {
+ printf("\n\nNo spirom device nodes found, load spirom "
+ "kernel module and try again\n\n");
+ exit(EXIT_FAILURE);
+ }
+ } else if (strncmp(cmdline, "setdevice", 9) == 0) {
+ char input[2 + 1];
+ int file_desc;
+
+ cmdline += 10;
+ memset(input, 0, 3);
+ if (sscanf(cmdline, "%s", input) < 1) {
+ printf("\nInvalid inputs, please try again\n\n");
+ return;
+ }
+
+ memset(filename, 0, 20);
+ snprintf(filename, 19, "/dev/spirom%s", input);
+ file_desc = open(filename, O_RDWR);
+ if (file_desc < 0) {
+ printf("\nError opening file %s\n\n", filename);
+ return;
+ }
+
+ /* Once we have validated inputs, we store them into the global
+ * variables used at other places in the program.
+ */
+ fd = file_desc;
+ device_opened = 1;
+ printf("\nSPI device set to /dev/spirom%s\n\n", input);
+ } else if (strncmp(cmdline, "wren", 4) == 0) {
+ if (!device_opened) {
+ printf("\nSPI device needs to be set before you can "
+ "perform this operation\n\n");
+ return;
+ }
+
+ /* command without data */
+ tr.buf[0] = ROM_WREN;
+ tr.direction = 0;
+ tr.len = 0;
+ tr.addr_present = 0;
+
+ ret = ioctl(fd, SPI_IOC_MESSAGE(1), &tr);
+ if (ret < 1)
+ printf("\nError executing WREN command\n\n");
+ else
+ printf("\n...WREN completed successfully\n\n");
+ } else if (strncmp(cmdline, "wrdi", 4) == 0) {
+ if (!device_opened) {
+ printf("\nSPI device needs to be set before you can "
+ "perform this operation\n\n");
+ return;
+ }
+
+ /* command without data */
+ tr.buf[0] = ROM_WRDI;
+ tr.direction = 0;
+ tr.len = 0;
+ tr.addr_present = 0;
+
+ ret = ioctl(fd, SPI_IOC_MESSAGE(1), &tr);
+ if (ret < 1)
+ printf("\nError executing WRDI command\n\n");
+ else
+ printf("\n...WRDI completed successfully\n\n");
+ } else if (strncmp(cmdline, "chiperase", 9) == 0) {
+ if (!device_opened) {
+ printf("\nSPI device needs to be set before you can "
+ "perform this operation\n\n");
+ return;
+ }
+
+ tr.buf[0] = ROM_RDSR;
+ tr.direction = RECEIVE;
+ tr.addr_present = 0;
+ tr.len = 1;
+
+ ret = ioctl(fd, SPI_IOC_MESSAGE(1), &tr);
+ if (ret < 1) {
+ printf("\nError executing RDSR command\n\n");;
+ return;
+ } else if ((tr.buf[1] & 0x02) == 0x00) {
+ printf("\nCannot execute CHIPERASE command, write is disabled\n\n");
+ return;
+ }
+
+ /* Command without data */
+ tr.buf[0] = ROM_CHIP_ERASE;
+ tr.direction = 0;
+ tr.len = 0;
+ tr.addr_present = 0;
+ ret = ioctl(fd, SPI_IOC_MESSAGE(1), &tr);
+ if (ret < 1) {
+ printf("\nError executing CHIPERASE command\n\n");
+ return;
+ }
+
+ printf("\n\nCHIPERASE operation in progress, please do not "
+ " stop in between.\n\n");
+
+ /* Make sure WIP has been reset */
+ while (1) {
+ memset(&tr, 0, sizeof(struct spi_ioc_transfer));
+ tr.buf[0] = ROM_RDSR;
+ tr.direction = RECEIVE;
+ tr.addr_present = 0;
+ tr.len = 1;
+
+ ret = ioctl(fd, SPI_IOC_MESSAGE(1), &tr);
+ if (ret < 1) {
+ printf("\nError executing RDSR command\n\n");
+ return;
+ }
+
+ if ((tr.buf[1] & 0x01) == 0x00)
+ break;
+ }
+
+ printf("\n\n...CHIPERASE completed successfully\n\n");
+ /* Restore signal handler to default */
+ } else if (strncmp(cmdline, "rdsr", 4) == 0) {
+ if (!device_opened) {
+ printf("\nSPI device needs to be set before you can "
+ "perform this operation\n\n");
+ return;
+ }
+
+ /* Command with response */
+ tr.buf[0] = ROM_RDSR;
+ tr.direction = RECEIVE;
+ tr.addr_present = 0;
+ tr.len = 1;
+
+ ret = ioctl(fd, SPI_IOC_MESSAGE(1), &tr);
+ if (ret < 1) {
+ printf("\nError executing RDSR command\n\n");
+ return;
+ }
+
+ /*
+ * The 1-byte response will be stored in tr.buf,
+ * so print it out
+ */
+ printf("\nRDSR command returned: 0x%.2x\n\n", tr.buf[1]);
+ } else if (strncmp(cmdline, "rdid", 4) == 0) {
+ if (!device_opened) {
+ printf("\nSPI device needs to be set before you can "
+ "perform this operation\n\n");
+ return;
+ }
+
+ /* Command with response */
+ tr.buf[0] = ROM_RDID;
+ tr.direction = RECEIVE;
+ tr.addr_present = 0;
+ tr.len = 3;
+
+ ret = ioctl(fd, SPI_IOC_MESSAGE(1), &tr);
+ if (ret < 1) {
+ printf("\nError executing RDID command\n\n");
+ return;
+ }
+
+ /*
+ * The 3-bytes response will be stored in tr.buf,
+ * so print it out
+ */
+ printf("\nRDID command returned: 0x%.2x%.2x%.2x\n", tr.buf[1],
+ tr.buf[2], tr.buf[3]);
+ } else if (strncmp(cmdline, "sectorerase", 11) == 0) {
+ int nsectors;
+ int i;
+
+ if (!device_opened) {
+ printf("\nSPI device needs to be set before you can "
+ "perform this operation\n\n");
+ return;
+ }
+
+ cmdline += 12;
+ if (sscanf(cmdline, "0x%x 0x%x", &addr, &nsectors) < 2) {
+ printf("\nInvalid inputs, please try again\n\n");
+ return;
+ }
+
+ tr.buf[0] = ROM_RDSR;
+ tr.direction = RECEIVE;
+ tr.addr_present = 0;
+ tr.len = 1;
+
+ ret = ioctl(fd, SPI_IOC_MESSAGE(1), &tr);
+ if (ret < 1) {
+ printf("\nError executing RDSR command\n\n");
+ return;
+ } else if ((tr.buf[1] & 0x02) == 0x00) {
+ printf("\nCannot execute SECTORERASE command, write is disabled\n\n");
+ return;
+ }
+
+ printf("\n\nSECTORERASE operation in progress, please do not "
+ " stop in between.\n\n");
+
+ for (i = 0; i < nsectors; i++) {
+ /* Write Enable before Sector Erase */
+ tr.buf[0] = ROM_WREN;
+ tr.direction = 0;
+ tr.len = 0;
+ tr.addr_present = 0;
+ ret = ioctl(fd, SPI_IOC_MESSAGE(1), &tr);
+ if (ret < 1) {
+ printf("\nError executing WREN command\n\n");
+ return;
+ }
+
+ /* Command with address but no data */
+ memset(&tr, 0, sizeof(struct spi_ioc_transfer));
+ tr.buf[0] = ROM_SECTOR_ERASE;
+ tr.buf[3] = addr & 0xff;
+ tr.buf[2] = (addr >> 8) & 0xff;
+ tr.buf[1] = (addr >> 16) & 0xff;
+ tr.addr_present = 1;
+ tr.direction = 0;
+ tr.len = 0;
+
+ ret = ioctl(fd, SPI_IOC_MESSAGE(1), &tr);
+ if (ret < 1) {
+ printf("\nError executing SECTORERASE command\n\n");
+ return;
+ }
+
+ /* point to the next 4k sector */
+ addr += 4 * 1024;
+
+ /*
+ * Before the next loop, we need to make sure that WIP
+ * bit in the output of RDSR has been reset.
+ */
+ while (1) {
+ memset(&tr, 0, sizeof(struct spi_ioc_transfer));
+ tr.buf[0] = ROM_RDSR;
+ tr.direction = RECEIVE;
+ tr.addr_present = 0;
+ tr.len = 1;
+
+ ret = ioctl(fd, SPI_IOC_MESSAGE(1), &tr);
+ if (ret < 1) {
+ printf("\nError executing RDSR command\n\n");
+ return;
+ }
+
+ if ((tr.buf[1] & 0x01) == 0x00)
+ break;
+ }
+ }
+
+ printf("\n\n...SECTORERASE completed successfully\n\n");
+ } else if (strncmp(cmdline, "blockerase", 10) == 0) {
+ int nblocks;
+ int i;
+
+ if (!device_opened) {
+ printf("\nSPI device needs to be set before you can "
+ "perform this operation\n\n");
+ return;
+ }
+
+ cmdline += 11;
+ if (sscanf(cmdline, "0x%x 0x%x", &addr, &nblocks) < 2) {
+ printf("\nInvalid inputs, please try again\n\n");
+ return;
+ }
+
+ tr.buf[0] = ROM_RDSR;
+ tr.direction = RECEIVE;
+ tr.addr_present = 0;
+ tr.len = 1;
+
+ ret = ioctl(fd, SPI_IOC_MESSAGE(1), &tr);
+ if (ret < 1) {
+ printf("\nError executing RDSR command\n\n");
+ return;
+ } else if ((tr.buf[1] & 0x02) == 0x00) {
+ printf("\nError executing BLOCKERASE command, write is disabled\n\n");
+ return;
+ }
+
+ printf("\n\nBLOCKERASE operation in progress, please do not "
+ " stop in between.\n\n");
+
+ for (i = 0; i < nblocks; i++) {
+ /* Write Enable before Block Erase */
+ tr.buf[0] = ROM_WREN;
+ tr.direction = 0;
+ tr.len = 0;
+ tr.addr_present = 0;
+ ret = ioctl(fd, SPI_IOC_MESSAGE(1), &tr);
+ if (ret < 1) {
+ printf("\nError executing WREN command\n\n");
+ return;
+ }
+
+ /* Command with address but no data */
+ memset(&tr, 0, sizeof(struct spi_ioc_transfer));
+ tr.buf[0] = ROM_BLOCK_ERASE;
+ tr.buf[3] = addr & 0xff;
+ tr.buf[2] = (addr >> 8) & 0xff;
+ tr.buf[1] = (addr >> 16) & 0xff;
+ tr.addr_present = 1;
+ tr.direction = 0;
+ tr.len = 0;
+
+ ret = ioctl(fd, SPI_IOC_MESSAGE(1), &tr);
+ if (ret < 1) {
+ printf("\nError executing BLOCKERASE command\n\n");
+ return;
+ }
+
+ /* point to the next 64k block */
+ addr += 64 * 1024;
+
+ /*
+ * Before the next loop, we need to make sure that WIP
+ * bit in the output of RDSR has been reset.
+ */
+ while (1) {
+ memset(&tr, 0, sizeof(struct spi_ioc_transfer));
+ tr.buf[0] = ROM_RDSR;
+ tr.direction = RECEIVE;
+ tr.addr_present = 0;
+ tr.len = 1;
+
+ ret = ioctl(fd, SPI_IOC_MESSAGE(1), &tr);
+ if (ret < 1) {
+ printf("\nError executing RDSR command\n\n");
+ return;
+ }
+
+ if ((tr.buf[1] & 0x01) == 0x00)
+ break;
+ }
+ }
+
+ printf("\n\n...BLOCKERASE completed successfully\n\n");
+ } else if (strncmp(cmdline, "read", 4) == 0) {
+ int nbytes;
+ int outfile_fd;
+ int i;
+
+ if (!device_opened) {
+ printf("\nSPI device needs to be set before you can "
+ "perform this operation\n\n");
+ return;
+ }
+
+ cmdline += 5;
+ memset(filename, 0, 20);
+ if (sscanf(cmdline, "0x%x 0x%x %s", &addr, &nbytes, filename) < 3) {
+ printf("\nInvalid inputs, please try again\n\n");
+ return;
+ }
+
+ /*
+ * Open the output file for writing. Create a new file if not
+ * there, and empty the file before writing if file already
+ * exists.
+ */
+ outfile_fd = open(filename, O_WRONLY | O_CREAT | O_TRUNC, 0644);
+ if (outfile_fd < 0) {
+ printf("\nError opening file %s for writing\n\n", filename);
+ return;
+ }
+
+ /*
+ * We will break down the bytes to be received in chunks of
+ * of 64-bytes. Data might not be a even multiple of 64. So
+ * in that case, we will have some remaining bytes <4. We
+ * handle that separately.
+ */
+ bytes_chunks = nbytes / 64;
+ remaining_bytes = nbytes % 64;
+
+ printf("\n\nREAD operation in progress.\n\n");
+
+ for (i = 0; i < bytes_chunks; i++) {
+ /* Command with address and data */
+ memset(&tr, 0, sizeof(struct spi_ioc_transfer));
+ tr.buf[0] = ROM_READ;
+ tr.direction = RECEIVE;
+ /*
+ * We will store the address into the buffer in little
+ * endian order.
+ */
+ tr.buf[3] = addr & 0xff;
+ tr.buf[2] = (addr >> 8) & 0xff;
+ tr.buf[1] = (addr >> 16) & 0xff;
+ tr.len = 64;
+ tr.addr_present = 1;
+
+ ret = ioctl(fd, SPI_IOC_MESSAGE(1), &tr);
+ if (ret < 1) {
+ printf("\nError executing READ command\n\n");
+ return;
+ }
+
+ /* Write the data read to output file */
+ if (write(outfile_fd, &tr.buf[4], tr.len) < 0) {
+ printf("\nError writing to file %s\n\n", filename);
+ return;
+ }
+ addr += 64;
+ }
+
+ if (remaining_bytes) {
+ memset(&tr, 0, sizeof(struct spi_ioc_transfer));
+ tr.buf[0] = ROM_READ;
+ tr.direction = RECEIVE;
+ tr.buf[3] = addr & 0xff;
+ tr.buf[2] = (addr >> 8) & 0xff;
+ tr.buf[1] = (addr >> 16) & 0xff;
+ tr.len = remaining_bytes;
+ tr.addr_present = 1;
+
+ ret = ioctl(fd, SPI_IOC_MESSAGE(1), &tr);
+ if (ret < 1) {
+ printf("\nError executing READ command\n\n");
+ return;
+ }
+
+ if (write(outfile_fd, &tr.buf[4], tr.len) < 0) {
+ printf("\nError writing to file %s\n\n", filename);
+ return;
+ }
+ }
+
+ printf("\n\n...READ completed successfully\n\n");
+ close(outfile_fd);
+ } else if (strncmp(cmdline, "write", 5) == 0) {
+ int nbytes;
+ int infile_fd;
+ int i;
+
+ if (!device_opened) {
+ printf("\nSPI device needs to be set before you can "
+ "perform this operation\n\n");
+ return;
+ }
+
+ cmdline += 6;
+ memset(filename, 0, 20);
+ if (sscanf(cmdline, "0x%x 0x%x %s", &addr, &nbytes, filename) < 3) {
+ printf("\nInvalid inputs, please try again\n\n");
+ return;
+ }
+
+ /* Open the input file for reading*/
+ infile_fd = open(filename, O_RDONLY);
+ if (infile_fd < 0) {
+ printf("\nError opening file %s for reading\n\n", filename);
+ return;
+ }
+
+ /*
+ * We will break down the bytes to be transmitted in chunks of
+ * of 64-bytes. Like for read, we might not have data in an
+ * even multiple of 64 bytes. So we will handle the remaining
+ * bytes in the end.
+ */
+ tr.buf[0] = ROM_RDSR;
+ tr.direction = RECEIVE;
+ tr.addr_present = 0;
+ tr.len = 1;
+
+ ret = ioctl(fd, SPI_IOC_MESSAGE(1), &tr);
+ if (ret < 1) {
+ printf("\nError executing RDSR command\n\n");
+ return;
+ } else if ((tr.buf[1] & 0x02) == 0x00) {
+ printf("\nCannot execute WRITE command, write is disabled\n\n");
+ return;
+ }
+
+ bytes_chunks = nbytes / 64;
+ remaining_bytes = nbytes % 64;
+
+ printf("\n\nWRITE operation in progress, please do not "
+ " stop in between.\n\n");
+
+ for (i = 0; i < bytes_chunks; i++) {
+ tr.buf[0] = ROM_WREN;
+ tr.direction = 0;
+ tr.len = 0;
+ tr.addr_present = 0;
+ ret = ioctl(fd, SPI_IOC_MESSAGE(1), &tr);
+ if (ret < 1) {
+ printf("\nError executing WREN command\n\n");
+ return;
+ }
+
+ /* Command with data and address */
+ memset(&tr, 0, sizeof(struct spi_ioc_transfer));
+ tr.buf[0] = ROM_WRITE;
+ tr.direction = TRANSMIT;
+ /*
+ * We will store the address into the buffer in little
+ * endian order.
+ */
+ tr.buf[3] = addr & 0xff;
+ tr.buf[2] = (addr >> 8) & 0xff;
+ tr.buf[1] = (addr >> 16) & 0xff;
+ tr.len = 64;
+ tr.addr_present = 1;
+
+ /* Read 64 bytes from input file to buffer */
+ if (read(infile_fd, &tr.buf[4], tr.len) < 0) {
+ printf("\nError reading from file %s\n\n", filename);
+ return;
+ }
+
+ ret = ioctl(fd, SPI_IOC_MESSAGE(1), &tr);
+ if (ret < 1) {
+ printf("\nError executing WRITE command\n\n");
+ return;
+ }
+
+ addr += 64;
+
+ /*
+ * Before the next loop, we need to make sure that WIP
+ * bit in the output of RDSR has been reset.
+ */
+ while (1) {
+ memset(&tr, 0, sizeof(struct spi_ioc_transfer));
+ tr.buf[0] = ROM_RDSR;
+ tr.direction = RECEIVE;
+ tr.addr_present = 0;
+ tr.len = 1;
+
+ ret = ioctl(fd, SPI_IOC_MESSAGE(1), &tr);
+ if (ret < 1) {
+ printf("\nError executing RDSR command\n\n");
+ return;
+ }
+
+ if ((tr.buf[1] & 0x01) == 0x00)
+ break;
+ }
+ }
+
+ if (remaining_bytes) {
+ tr.buf[0] = ROM_WREN;
+ tr.direction = 0;
+ tr.len = 0;
+ tr.addr_present = 0;
+ ret = ioctl(fd, SPI_IOC_MESSAGE(1), &tr);
+ if (ret < 1) {
+ printf("\nError executing WREN command\n\n");
+ return;
+ }
+
+ memset(&tr, 0, sizeof(struct spi_ioc_transfer));
+ tr.buf[0] = ROM_WRITE;
+ tr.direction = TRANSMIT;
+ tr.buf[3] = addr & 0xff;
+ tr.buf[2] = (addr >> 8) & 0xff;
+ tr.buf[1] = (addr >> 16) & 0xff;
+ tr.len = remaining_bytes;
+ tr.addr_present = 1;
+
+ if (read(infile_fd, &tr.buf[4], tr.len) < 0) {
+ printf("\nError reading from file %s\n\n", filename);
+ return;
+ }
+
+ ret = ioctl(fd, SPI_IOC_MESSAGE(1), &tr);
+ if (ret < 1) {
+ printf("\nError executing WRITE command\n\n");
+ return;
+ }
+
+ while (1) {
+ memset(&tr, 0, sizeof(struct spi_ioc_transfer));
+ tr.buf[0] = ROM_RDSR;
+ tr.direction = RECEIVE;
+ tr.addr_present = 0;
+ tr.len = 1;
+
+ ret = ioctl(fd, SPI_IOC_MESSAGE(1), &tr);
+ if (ret < 1) {
+ printf("\nError executing RDSR command\n\n");
+ return;
+ }
+
+ if ((tr.buf[1] & 0x01) == 0x00)
+ break;
+ }
+ }
+
+ printf("\n\n...WRITE completed successfully\n\n");
+ close(infile_fd);
+ } else if (strncmp(cmdline, "license", 7) == 0) {
+ show_license();
+ } else if (strncmp(cmdline, "exit", 4) == 0) {
+ printf("\nExiting...\n");
+ close(fd);
+ exit(EXIT_SUCCESS);
+ } else if (strncmp(cmdline, "help", 4) == 0) {
+ print_usage();
+ } else {
+ printf("\nUnknown command\n");
+ print_usage();
+ }
+}
+
+int main(void)
+{
+ char *cmdline= NULL;
+
+ printf("SPI sample application version: %s\n", SPI_APP_VERSION);
+ printf("Copyright (c) 2014, Advanced Micro Devices, Inc.\n"
+ "This sample application comes with ABSOLUTELY NO WARRANTY;\n"
+ "This is free software, and you are welcome to redistribute it\n"
+ "under certain conditions; type `license` for details.\n\n");
+
+ /* Set the signal handler */
+ signal(SIGINT, sighandler);
+
+ while (1) {
+ cmdline = readline(show_prompt());
+ parse_cmd(cmdline);
+ /* Free the memory malloc'ed by readline */
+ free(cmdline);
+ }
+
+ /* Restore the default signal handler */
+ signal(SIGINT, SIG_DFL);
+
+ /* Should never reach here */
+ return 0;
+}
diff --git a/meta-amdfalconx86/recipes-applications/spi-test/files/spirom.h b/meta-amdfalconx86/recipes-applications/spi-test/files/spirom.h
new file mode 100644
index 00000000..f599925f
--- /dev/null
+++ b/meta-amdfalconx86/recipes-applications/spi-test/files/spirom.h
@@ -0,0 +1,53 @@
+#ifndef SPIROM_H
+#define SPIROM_H
+
+#include <linux/types.h>
+
+/*---------------------------------------------------------------------------*/
+
+/* IOCTL commands */
+
+#define SPI_IOC_MAGIC 'k'
+
+#define TRANSMIT 1
+#define RECEIVE 2
+
+/*
+ * struct spi_ioc_transfer - interface structure between application and ioctl
+ *
+ * @buf: Buffer to hold 1-byte command, 3-bytes address, and 4-byte data for
+ * transmit or receive. The internal FIFO of our controller can hold a
+ * maximum of 8 bytes, including the address. But here we assume the
+ * maximum data excluding address to be 4-bytes long.
+ *
+ * @direction: Direction of data transfer, either TRANSMIT or RECEIVE.
+ *
+ * @len: Length of data excluding command and address.
+ *
+ * @addr_present: Flag to indicate whether 'buf' above contains an address.
+ */
+struct spi_ioc_transfer {
+ __u8 buf[64 + 1 + 3];
+ __u8 direction;
+ __u8 len;
+ __u8 addr_present;
+};
+
+/* not all platforms use <asm-generic/ioctl.h> or _IOC_TYPECHECK() ... */
+#define SPI_MSGSIZE(N) \
+ ((((N)*(sizeof (struct spi_ioc_transfer))) < (1 << _IOC_SIZEBITS)) \
+ ? ((N)*(sizeof (struct spi_ioc_transfer))) : 0)
+#define SPI_IOC_MESSAGE(N) _IOW(SPI_IOC_MAGIC, 0, char[SPI_MSGSIZE(N)])
+
+/* SPI ROM command codes */
+#define ROM_WREN 0x06
+#define ROM_WRDI 0x04
+#define ROM_RDSR 0x05
+#define ROM_RDID 0x9F
+#define ROM_CHIP_ERASE 0x60
+#define ROM_SECTOR_ERASE 0x20
+#define ROM_BLOCK_ERASE 0xD8
+#define ROM_READ 0x03
+#define ROM_WRITE 0x02
+
+#endif /* SPIROM_H */
diff --git a/meta-amdfalconx86/recipes-applications/spi-test/spi-test_1.0.bb b/meta-amdfalconx86/recipes-applications/spi-test/spi-test_1.0.bb
new file mode 100644
index 00000000..8824d55f
--- /dev/null
+++ b/meta-amdfalconx86/recipes-applications/spi-test/spi-test_1.0.bb
@@ -0,0 +1,27 @@
+DESCRIPTION = "Sample application for AMD SPI driver"
+SECTION = "applications"
+LICENSE = "BSD"
+DEPENDS = "readline"
+LIC_FILES_CHKSUM = "file://spirom-test.c;md5=73fa56dd9cc632c765154aa1c2f5b228 \
+ file://spirom.h;md5=1990f1f1e7a82115c354152bed83df52 \
+ "
+
+PR = "r1"
+PV = "1.0"
+
+SRC_URI = "file://spirom-test.c \
+ file://spirom.h \
+ "
+
+S = "${WORKDIR}"
+
+TARGET_CC_ARCH += "${LDFLAGS}"
+
+do_compile() {
+ ${CC} spirom-test.c -o spirom-test -lreadline
+}
+
+do_install() {
+ install -d ${D}${bindir}
+ install -m 0755 spirom-test ${D}${bindir}
+}
diff --git a/meta-amdfalconx86/recipes-bsp/formfactor/formfactor/amdfalconx86/machconfig b/meta-amdfalconx86/recipes-bsp/formfactor/formfactor/amdfalconx86/machconfig
new file mode 100644
index 00000000..28ca080e
--- /dev/null
+++ b/meta-amdfalconx86/recipes-bsp/formfactor/formfactor/amdfalconx86/machconfig
@@ -0,0 +1,3 @@
+# Assume a USB mouse and keyboard are connected
+HAVE_TOUCHSCREEN=n
+HAVE_KEYBOARD=y
diff --git a/meta-amdfalconx86/recipes-bsp/formfactor/formfactor_0.0.bbappend b/meta-amdfalconx86/recipes-bsp/formfactor/formfactor_0.0.bbappend
new file mode 100644
index 00000000..6d4804d1
--- /dev/null
+++ b/meta-amdfalconx86/recipes-bsp/formfactor/formfactor_0.0.bbappend
@@ -0,0 +1,2 @@
+FILESEXTRAPATHS_prepend := "${THISDIR}/${PN}:"
+
diff --git a/meta-amdfalconx86/recipes-core/llvm/files/0001-force-link-pass.o.patch b/meta-amdfalconx86/recipes-core/llvm/files/0001-force-link-pass.o.patch
new file mode 100644
index 00000000..e4b5f471
--- /dev/null
+++ b/meta-amdfalconx86/recipes-core/llvm/files/0001-force-link-pass.o.patch
@@ -0,0 +1,42 @@
+From 2f1e7f43ee516e56f9042c94abf6f90b8f61b7a9 Mon Sep 17 00:00:00 2001
+From: Arindam Nath <arindam.nath@amd.com>
+Date: Thu, 16 Apr 2015 16:11:24 +0530
+Subject: [PATCH 1/1] force link pass.o
+
+Signed-off-by: Arindam Nath <arindam.nath@amd.com>
+---
+ tools/bugpoint/Makefile | 3 +++
+ tools/opt/Makefile | 3 +++
+ 2 files changed, 6 insertions(+)
+
+diff --git a/tools/bugpoint/Makefile b/tools/bugpoint/Makefile
+index 174f8d2..dfde77c 100644
+--- a/tools/bugpoint/Makefile
++++ b/tools/bugpoint/Makefile
+@@ -12,6 +12,9 @@ TOOLNAME := bugpoint
+ LINK_COMPONENTS := asmparser instrumentation scalaropts ipo linker bitreader \
+ bitwriter irreader vectorize objcarcopts codegen
+
++# Crappy workaround to make sure it links correctly.
++LLVMLibsOptions := ../../lib/IR/Release*/Pass.o
++
+ # Support plugins.
+ NO_DEAD_STRIP := 1
+
+diff --git a/tools/opt/Makefile b/tools/opt/Makefile
+index 2422eb4..79ab3f7 100644
+--- a/tools/opt/Makefile
++++ b/tools/opt/Makefile
+@@ -11,6 +11,9 @@ LEVEL := ../..
+ TOOLNAME := opt
+ LINK_COMPONENTS := bitreader bitwriter asmparser irreader instrumentation scalaropts objcarcopts ipo vectorize all-targets codegen passes
+
++# Crappy workaround to make sure it links correctly.
++LLVMLibsOptions := ../../lib/IR/Release*/Pass.o
++
+ # Support plugins.
+ NO_DEAD_STRIP := 1
+
+--
+1.9.1
+
diff --git a/meta-amdfalconx86/recipes-core/llvm/llvm3.7.0.inc b/meta-amdfalconx86/recipes-core/llvm/llvm3.7.0.inc
new file mode 100644
index 00000000..bb6c5ca3
--- /dev/null
+++ b/meta-amdfalconx86/recipes-core/llvm/llvm3.7.0.inc
@@ -0,0 +1,49 @@
+#
+# Since we need LLVM 3.7.0 but meta-oe currently only supports
+# 3.3, override the minimum bits needed to get it to work.
+# Eventually this will need to be reconciled with upstream
+#
+
+# Ideally this would be done as a patch but since llvm3.inc already
+# manipulates this file with sed we have to do similar. We also need
+# to do this as an append to ensure proper sequencing.
+do_configure_append_x86-64() {
+ # Fix paths in llvm-config
+ sed -ri "s#lib/${LLVM_DIR}#${baselib}/${LLVM_DIR}#g" ${S}/tools/llvm-config/llvm-config.cpp
+}
+
+#
+# Override the do_install() provided by llvm3.inc to use the lib64
+# directory naming.
+#
+# This should probably be cleaned up at some point in the stock LLVM
+# sources but at the moment they do not appear to handle lib64 very well.
+#
+do_install() {
+ cd ${LLVM_BUILD_DIR}
+ oe_runmake DESTDIR=${LLVM_INSTALL_DIR} install
+
+ mv ${LLVM_INSTALL_DIR}${bindir}/${HOST_SYS}-llvm-config-host ${LLVM_INSTALL_DIR}/llvm-config-host
+
+ install -d ${D}${bindir}/${LLVM_DIR}
+ mv ${LLVM_INSTALL_DIR}${bindir}/* ${D}${bindir}/${LLVM_DIR}/
+
+ install -d ${D}${includedir}/${LLVM_DIR}
+ mv ${LLVM_INSTALL_DIR}${includedir}/* ${D}${includedir}/${LLVM_DIR}/
+
+ install -d ${D}${libdir}/${LLVM_DIR}
+
+ # The LLVM sources have "/lib" embedded and so we cannot completely rely on the ${libdir} variable
+ if [ -d ${LLVM_INSTALL_DIR}${libdir}/ ]; then
+ mv ${LLVM_INSTALL_DIR}${libdir}/* ${D}${libdir}/${LLVM_DIR}/
+ elif [ -d ${LLVM_INSTALL_DIR}${prefix}/lib ]; then
+ mv ${LLVM_INSTALL_DIR}${prefix}/lib/* ${D}${libdir}/${LLVM_DIR}/
+ elif [ -d ${LLVM_INSTALL_DIR}${prefix}/lib64 ]; then
+ mv ${LLVM_INSTALL_DIR}${prefix}/lib64/* ${D}${libdir}/${LLVM_DIR}/
+ fi
+
+ ln -s ${LLVM_DIR}/libLLVM-${PV}${SOLIBSDEV} ${D}${libdir}/libLLVM-${PV}${SOLIBSDEV}
+
+ install -d ${D}${docdir}/${LLVM_DIR}
+ mv ${LLVM_INSTALL_DIR}${prefix}/docs/llvm/* ${D}${docdir}/${LLVM_DIR}
+}
diff --git a/meta-amdfalconx86/recipes-core/llvm/llvm3.7.0_3.7.0.bb b/meta-amdfalconx86/recipes-core/llvm/llvm3.7.0_3.7.0.bb
new file mode 100644
index 00000000..00bd0b1a
--- /dev/null
+++ b/meta-amdfalconx86/recipes-core/llvm/llvm3.7.0_3.7.0.bb
@@ -0,0 +1,29 @@
+require recipes-core/llvm/llvm.inc
+require llvm3.7.0.inc
+
+LIC_FILES_CHKSUM = "file://LICENSE.TXT;md5=4c0bc17c954e99fd547528d938832bfa"
+
+DEPENDS += "zlib"
+RDEPENDS_${PN} += "ncurses-terminfo"
+PROVIDES += "llvm"
+
+EXTRA_OECONF += "--enable-zlib"
+PACKAGECONFIG_append_amd = "r600"
+
+SRC_URI = "\
+ git://llvm.org/git/llvm.git;branch=master;protocol=http \
+ file://0001-force-link-pass.o.patch \
+ "
+
+S = "${WORKDIR}/git"
+
+SRCREV = "ffc045ab802ea542aabf1f1f22f97cb8a0ad6cde"
+PV = "3.7.0"
+
+PACKAGECONFIG ??= ""
+PACKAGECONFIG[r600] = "--enable-experimental-targets=R600,,,"
+
+do_configure_prepend() {
+ # Drop "svn" suffix from version string
+ sed -i 's/${PV}svn/${PV}/g' ${S}/configure
+}
diff --git a/meta-amdfalconx86/recipes-graphics/drm/libdrm/0001-drm-add-libdrm_amdgpu.patch b/meta-amdfalconx86/recipes-graphics/drm/libdrm/0001-drm-add-libdrm_amdgpu.patch
new file mode 100644
index 00000000..1609db27
--- /dev/null
+++ b/meta-amdfalconx86/recipes-graphics/drm/libdrm/0001-drm-add-libdrm_amdgpu.patch
@@ -0,0 +1,5665 @@
+From 7a6c09a5a0b17e9e981424fe35ea3492369d4eab Mon Sep 17 00:00:00 2001
+From: Alex Deucher <alexander.deucher@amd.com>
+Date: Mon, 20 Apr 2015 12:04:22 -0400
+Subject: [PATCH 1/3] drm: add libdrm_amdgpu
+
+This is the new ioctl wrapper used by the new admgpu driver.
+It's primarily used by xf86-video-amdgpu and mesa.
+
+Signed-off-by: Alex Deucher <alexander.deucher@amd.com>
+Signed-off-by: Arindam Nath <arindam.nath@amd.com>
+---
+ Makefile.am | 5 +
+ amdgpu/Makefile.am | 55 ++
+ amdgpu/amdgpu.h | 1278 ++++++++++++++++++++++++++++++++++++++++++++
+ amdgpu/amdgpu_bo.c | 622 +++++++++++++++++++++
+ amdgpu/amdgpu_cs.c | 981 ++++++++++++++++++++++++++++++++++
+ amdgpu/amdgpu_device.c | 242 +++++++++
+ amdgpu/amdgpu_gpu_info.c | 275 ++++++++++
+ amdgpu/amdgpu_internal.h | 210 ++++++++
+ amdgpu/amdgpu_vamgr.c | 169 ++++++
+ amdgpu/libdrm_amdgpu.pc.in | 10 +
+ amdgpu/util_double_list.h | 146 +++++
+ amdgpu/util_hash.c | 382 +++++++++++++
+ amdgpu/util_hash.h | 99 ++++
+ amdgpu/util_hash_table.c | 257 +++++++++
+ amdgpu/util_hash_table.h | 65 +++
+ amdgpu/util_math.h | 32 ++
+ configure.ac | 20 +
+ include/drm/amdgpu_drm.h | 600 +++++++++++++++++++++
+ 18 files changed, 5448 insertions(+)
+ create mode 100644 amdgpu/Makefile.am
+ create mode 100644 amdgpu/amdgpu.h
+ create mode 100644 amdgpu/amdgpu_bo.c
+ create mode 100644 amdgpu/amdgpu_cs.c
+ create mode 100644 amdgpu/amdgpu_device.c
+ create mode 100644 amdgpu/amdgpu_gpu_info.c
+ create mode 100644 amdgpu/amdgpu_internal.h
+ create mode 100644 amdgpu/amdgpu_vamgr.c
+ create mode 100644 amdgpu/libdrm_amdgpu.pc.in
+ create mode 100644 amdgpu/util_double_list.h
+ create mode 100644 amdgpu/util_hash.c
+ create mode 100644 amdgpu/util_hash.h
+ create mode 100644 amdgpu/util_hash_table.c
+ create mode 100644 amdgpu/util_hash_table.h
+ create mode 100644 amdgpu/util_math.h
+ create mode 100644 include/drm/amdgpu_drm.h
+
+diff --git a/Makefile.am b/Makefile.am
+index 42d3d7f..5defeb2 100644
+--- a/Makefile.am
++++ b/Makefile.am
+@@ -57,6 +57,10 @@ if HAVE_RADEON
+ RADEON_SUBDIR = radeon
+ endif
+
++if HAVE_AMDGPU
++AMDGPU_SUBDIR = amdgpu
++endif
++
+ if HAVE_OMAP
+ OMAP_SUBDIR = omap
+ endif
+@@ -79,6 +83,7 @@ SUBDIRS = \
+ $(INTEL_SUBDIR) \
+ $(NOUVEAU_SUBDIR) \
+ $(RADEON_SUBDIR) \
++ $(AMDGPU_SUBDIR) \
+ $(OMAP_SUBDIR) \
+ $(EXYNOS_SUBDIR) \
+ $(FREEDRENO_SUBDIR) \
+diff --git a/amdgpu/Makefile.am b/amdgpu/Makefile.am
+new file mode 100644
+index 0000000..9baf194
+--- /dev/null
++++ b/amdgpu/Makefile.am
+@@ -0,0 +1,55 @@
++# Copyright © 2008 Jérôme Glisse
++#
++# Permission is hereby granted, free of charge, to any person obtaining a
++# copy of this software and associated documentation files (the "Software"),
++# to deal in the Software without restriction, including without limitation
++# the rights to use, copy, modify, merge, publish, distribute, sublicense,
++# and/or sell copies of the Software, and to permit persons to whom the
++# Software is furnished to do so, subject to the following conditions:
++#
++# The above copyright notice and this permission notice (including the next
++# paragraph) shall be included in all copies or substantial portions of the
++# Software.
++#
++# THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
++# IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
++# FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
++# THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
++# LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING
++# FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS
++# IN THE SOFTWARE.
++#
++# Authors:
++# Jérôme Glisse <glisse@freedesktop.org>
++
++AM_CFLAGS = \
++ $(WARN_CFLAGS) -Wno-switch-enum \
++ -I$(top_srcdir) \
++ -I$(top_srcdir)/amdgpu \
++ $(PTHREADSTUBS_CFLAGS) \
++ -I$(top_srcdir)/include/drm
++
++libdrm_amdgpu_la_LTLIBRARIES = libdrm_amdgpu.la
++libdrm_amdgpu_ladir = $(libdir)
++libdrm_amdgpu_la_LDFLAGS = -version-number 1:0:1 -no-undefined
++libdrm_amdgpu_la_LIBADD = ../libdrm.la @PTHREADSTUBS_LIBS@
++
++libdrm_amdgpu_la_SOURCES = \
++ amdgpu_gpu_info.c \
++ amdgpu_device.c \
++ amdgpu_bo.c \
++ util_hash.c \
++ util_hash_table.c \
++ amdgpu_vamgr.c \
++ amdgpu_cs.c
++
++nodist_EXTRA_libdrm_amdgpu_la_SOURCES = dummy.cxx
++
++libdrm_amdgpuincludedir = ${includedir}/libdrm
++libdrm_amdgpuinclude_HEADERS = \
++ amdgpu.h
++
++pkgconfigdir = @pkgconfigdir@
++pkgconfig_DATA = libdrm_amdgpu.pc
++
++EXTRA_DIST = libdrm_amdgpu.pc.in
+diff --git a/amdgpu/amdgpu.h b/amdgpu/amdgpu.h
+new file mode 100644
+index 0000000..90dc33c
+--- /dev/null
++++ b/amdgpu/amdgpu.h
+@@ -0,0 +1,1278 @@
++/*
++ * Copyright 2014 Advanced Micro Devices, Inc.
++ *
++ * Permission is hereby granted, free of charge, to any person obtaining a
++ * copy of this software and associated documentation files (the "Software"),
++ * to deal in the Software without restriction, including without limitation
++ * the rights to use, copy, modify, merge, publish, distribute, sublicense,
++ * and/or sell copies of the Software, and to permit persons to whom the
++ * Software is furnished to do so, subject to the following conditions:
++ *
++ * The above copyright notice and this permission notice shall be included in
++ * all copies or substantial portions of the Software.
++ *
++ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
++ * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
++ * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
++ * THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR
++ * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE,
++ * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
++ * OTHER DEALINGS IN THE SOFTWARE.
++ *
++*/
++
++/**
++ * \file amdgpu.h
++ *
++ * Declare public libdrm_amdgpu API
++ *
++ * This file define API exposed by libdrm_amdgpu library.
++ * User wanted to use libdrm_amdgpu functionality must include
++ * this file.
++ *
++ */
++#ifndef _amdgpu_h_
++#define _amdgpu_h_
++
++#include <stdint.h>
++#include <stdbool.h>
++
++struct drm_amdgpu_info_hw_ip;
++
++/*--------------------------------------------------------------------------*/
++/* --------------------------- Defines ------------------------------------ */
++/*--------------------------------------------------------------------------*/
++
++/**
++ * Define max. number of Command Buffers (IB) which could be sent to the single
++ * hardware IP to accommodate CE/DE requirements
++ *
++ * \sa amdgpu_cs_ib_info
++*/
++#define AMDGPU_CS_MAX_IBS_PER_SUBMIT 4
++
++/**
++ *
++ */
++#define AMDGPU_TIMEOUT_INFINITE 0xffffffffffffffffull
++
++/**
++ * The special flag for GFX submission to identify that this is CE IB
++ * \sa amdgpu_cs_ib_info
++*/
++#define AMDGPU_CS_GFX_IB_CE 0x1
++
++/**
++ * The special flag to mark that this IB will re-used
++ * by client and should not be automatically return back
++ * to free pool by libdrm_amdgpu when submission is completed.
++ *
++ * \sa amdgpu_cs_ib_info
++*/
++#define AMDGPU_CS_REUSE_IB 0x2
++
++/**
++ * The special resource flag for IB submission.
++ * When VRAM is full, some resources may be moved to GTT to make place
++ * for other resources which want to be in VRAM. This flag affects the order
++ * in which resources are moved back to VRAM until there is no space there.
++ * The resources with the highest priority will be moved first.
++ * The value can be between 0 and 15, inclusive.
++ */
++#define AMDGPU_IB_RESOURCE_PRIORITY(x) ((x) & 0xf)
++
++
++/*--------------------------------------------------------------------------*/
++/* ----------------------------- Enums ------------------------------------ */
++/*--------------------------------------------------------------------------*/
++
++/**
++ * Enum describing possible handle types
++ *
++ * \sa amdgpu_bo_import, amdgpu_bo_export
++ *
++*/
++enum amdgpu_bo_handle_type {
++ /** GEM flink name (needs DRM authentication, used by DRI2) */
++ amdgpu_bo_handle_type_gem_flink_name = 0,
++
++ /** KMS handle which is used by all driver ioctls */
++ amdgpu_bo_handle_type_kms = 1,
++
++ /** DMA-buf fd handle */
++ amdgpu_bo_handle_type_dma_buf_fd = 2
++};
++
++/**
++ * Enum describing possible context reset states
++ *
++ * \sa amdgpu_cs_query_reset_state()
++ *
++*/
++enum amdgpu_cs_ctx_reset_state {
++ /** No reset was detected */
++ amdgpu_cs_reset_no_error = 0,
++
++ /** Reset/TDR was detected and context caused */
++ amdgpu_cs_reset_guilty = 1,
++
++ /** Reset/TDR was detected caused by other context */
++ amdgpu_cs_reset_innocent = 2,
++
++ /** Reset TDR was detected by cause of it unknown */
++ amdgpu_cs_reset_unknown = 3
++};
++
++/**
++ * For performance reasons and to simplify logic libdrm_amdgpu will handle
++ * IBs only some pre-defined sizes.
++ *
++ * \sa amdgpu_cs_alloc_ib()
++ */
++enum amdgpu_cs_ib_size {
++ amdgpu_cs_ib_size_4K = 1,
++ amdgpu_cs_ib_size_16K = 2,
++ amdgpu_cs_ib_size_32K = 3,
++ amdgpu_cs_ib_size_64K = 4,
++ amdgpu_cs_ib_size_128K = 5
++};
++
++/** The number of different IB sizes */
++#define AMDGPU_CS_IB_SIZE_NUM 6
++
++
++/*--------------------------------------------------------------------------*/
++/* -------------------------- Datatypes ----------------------------------- */
++/*--------------------------------------------------------------------------*/
++
++/**
++ * Define opaque pointer to context associated with fd.
++ * This context will be returned as the result of
++ * "initialize" function and should be pass as the first
++ * parameter to any API call
++ */
++typedef struct amdgpu_device *amdgpu_device_handle;
++
++/**
++ * Define GPU Context type as pointer to opaque structure
++ * Example of GPU Context is the "rendering" context associated
++ * with OpenGL context (glCreateContext)
++ */
++typedef struct amdgpu_context *amdgpu_context_handle;
++
++/**
++ * Define handle for amdgpu resources: buffer, GDS, etc.
++ */
++typedef struct amdgpu_bo *amdgpu_bo_handle;
++
++/**
++ * Define handle to be used when dealing with command
++ * buffers (a.k.a. ibs)
++ *
++ */
++typedef struct amdgpu_ib *amdgpu_ib_handle;
++
++
++/*--------------------------------------------------------------------------*/
++/* -------------------------- Structures ---------------------------------- */
++/*--------------------------------------------------------------------------*/
++
++/**
++ * Structure describing memory allocation request
++ *
++ * \sa amdgpu_bo_alloc()
++ *
++*/
++struct amdgpu_bo_alloc_request {
++ /** Allocation request. It must be aligned correctly. */
++ uint64_t alloc_size;
++
++ /**
++ * It may be required to have some specific alignment requirements
++ * for physical back-up storage (e.g. for displayable surface).
++ * If 0 there is no special alignment requirement
++ */
++ uint64_t phys_alignment;
++
++ /**
++ * UMD should specify where to allocate memory and how it
++ * will be accessed by the CPU.
++ */
++ uint32_t preferred_heap;
++
++ /** Additional flags passed on allocation */
++ uint64_t flags;
++};
++
++/**
++ * Structure describing memory allocation request
++ *
++ * \sa amdgpu_bo_alloc()
++*/
++struct amdgpu_bo_alloc_result {
++ /** Assigned virtual MC Base Address */
++ uint64_t virtual_mc_base_address;
++
++ /** Handle of allocated memory to be used by the given process only. */
++ amdgpu_bo_handle buf_handle;
++};
++
++/**
++ * Special UMD specific information associated with buffer.
++ *
++ * It may be need to pass some buffer charactersitic as part
++ * of buffer sharing. Such information are defined UMD and
++ * opaque for libdrm_amdgpu as well for kernel driver.
++ *
++ * \sa amdgpu_bo_set_metadata(), amdgpu_bo_query_info,
++ * amdgpu_bo_import(), amdgpu_bo_export
++ *
++*/
++struct amdgpu_bo_metadata {
++ /** Special flag associated with surface */
++ uint64_t flags;
++
++ /**
++ * ASIC-specific tiling information (also used by DCE).
++ * The encoding is defined by the AMDGPU_TILING_* definitions.
++ */
++ uint64_t tiling_info;
++
++ /** Size of metadata associated with the buffer, in bytes. */
++ uint32_t size_metadata;
++
++ /** UMD specific metadata. Opaque for kernel */
++ uint32_t umd_metadata[64];
++};
++
++/**
++ * Structure describing allocated buffer. Client may need
++ * to query such information as part of 'sharing' buffers mechanism
++ *
++ * \sa amdgpu_bo_set_metadata(), amdgpu_bo_query_info(),
++ * amdgpu_bo_import(), amdgpu_bo_export()
++*/
++struct amdgpu_bo_info {
++ /** Allocated memory size */
++ uint64_t alloc_size;
++
++ /**
++ * It may be required to have some specific alignment requirements
++ * for physical back-up storage.
++ */
++ uint64_t phys_alignment;
++
++ /**
++ * Assigned virtual MC Base Address.
++ * \note This information will be returned only if this buffer was
++ * allocated in the same process otherwise 0 will be returned.
++ */
++ uint64_t virtual_mc_base_address;
++
++ /** Heap where to allocate memory. */
++ uint32_t preferred_heap;
++
++ /** Additional allocation flags. */
++ uint64_t alloc_flags;
++
++ /** Metadata associated with buffer if any. */
++ struct amdgpu_bo_metadata metadata;
++};
++
++/**
++ * Structure with information about "imported" buffer
++ *
++ * \sa amdgpu_bo_import()
++ *
++ */
++struct amdgpu_bo_import_result {
++ /** Handle of memory/buffer to use */
++ amdgpu_bo_handle buf_handle;
++
++ /** Buffer size */
++ uint64_t alloc_size;
++
++ /** Assigned virtual MC Base Address */
++ uint64_t virtual_mc_base_address;
++};
++
++
++/**
++ *
++ * Structure to describe GDS partitioning information.
++ * \note OA and GWS resources are asscoiated with GDS partition
++ *
++ * \sa amdgpu_gpu_resource_query_gds_info
++ *
++*/
++struct amdgpu_gds_resource_info {
++ uint32_t gds_gfx_partition_size;
++ uint32_t compute_partition_size;
++ uint32_t gds_total_size;
++ uint32_t gws_per_gfx_partition;
++ uint32_t gws_per_compute_partition;
++ uint32_t oa_per_gfx_partition;
++ uint32_t oa_per_compute_partition;
++};
++
++
++
++/**
++ * Structure describing result of request to allocate GDS
++ *
++ * \sa amdgpu_gpu_resource_gds_alloc
++ *
++*/
++struct amdgpu_gds_alloc_info {
++ /** Handle assigned to gds allocation */
++ amdgpu_bo_handle resource_handle;
++
++ /** How much was really allocated */
++ uint32_t gds_memory_size;
++
++ /** Number of GWS resources allocated */
++ uint32_t gws;
++
++ /** Number of OA resources allocated */
++ uint32_t oa;
++};
++
++/**
++ * Structure to described allocated command buffer (a.k.a. IB)
++ *
++ * \sa amdgpu_cs_alloc_ib()
++ *
++*/
++struct amdgpu_cs_ib_alloc_result {
++ /** IB allocation handle */
++ amdgpu_ib_handle handle;
++
++ /** Assigned GPU VM MC Address of command buffer */
++ uint64_t mc_address;
++
++ /** Address to be used for CPU access */
++ void *cpu;
++};
++
++/**
++ * Structure describing IB
++ *
++ * \sa amdgpu_cs_request, amdgpu_cs_submit()
++ *
++*/
++struct amdgpu_cs_ib_info {
++ /** Special flags */
++ uint64_t flags;
++
++ /** Handle of command buffer */
++ amdgpu_ib_handle ib_handle;
++
++ /**
++ * Size of Command Buffer to be submitted.
++ * - The size is in units of dwords (4 bytes).
++ * - Must be less or equal to the size of allocated IB
++ * - Could be 0
++ */
++ uint32_t size;
++};
++
++/**
++ * Structure describing submission request
++ *
++ * \note We could have several IBs as packet. e.g. CE, CE, DE case for gfx
++ *
++ * \sa amdgpu_cs_submit()
++*/
++struct amdgpu_cs_request {
++ /** Specify flags with additional information */
++ uint64_t flags;
++
++ /** Specify HW IP block type to which to send the IB. */
++ unsigned ip_type;
++
++ /** IP instance index if there are several IPs of the same type. */
++ unsigned ip_instance;
++
++ /**
++ * Specify ring index of the IP. We could have several rings
++ * in the same IP. E.g. 0 for SDMA0 and 1 for SDMA1.
++ */
++ uint32_t ring;
++
++ /**
++ * Specify number of resource handles passed.
++ * Size of 'handles' array
++ *
++ */
++ uint32_t number_of_resources;
++
++ /** Array of resources used by submission. */
++ amdgpu_bo_handle *resources;
++
++ /** Array of resources flags. This is optional and can be NULL. */
++ uint8_t *resource_flags;
++
++ /** Number of IBs to submit in the field ibs. */
++ uint32_t number_of_ibs;
++
++ /**
++ * IBs to submit. Those IBs will be submit together as single entity
++ */
++ struct amdgpu_cs_ib_info *ibs;
++};
++
++/**
++ * Structure describing request to check submission state using fence
++ *
++ * \sa amdgpu_cs_query_fence_status()
++ *
++*/
++struct amdgpu_cs_query_fence {
++
++ /** In which context IB was sent to execution */
++ amdgpu_context_handle context;
++
++ /** Timeout in nanoseconds. */
++ uint64_t timeout_ns;
++
++ /** To which HW IP type the fence belongs */
++ unsigned ip_type;
++
++ /** IP instance index if there are several IPs of the same type. */
++ unsigned ip_instance;
++
++ /** Ring index of the HW IP */
++ uint32_t ring;
++
++ /** Flags */
++ uint64_t flags;
++
++ /** Specify fence for which we need to check
++ * submission status.*/
++ uint64_t fence;
++};
++
++/**
++ * Structure which provide information about GPU VM MC Address space
++ * alignments requirements
++ *
++ * \sa amdgpu_query_buffer_size_alignment
++ */
++struct amdgpu_buffer_size_alignments {
++ /** Size alignment requirement for allocation in
++ * local memory */
++ uint64_t size_local;
++
++ /**
++ * Size alignment requirement for allocation in remote memory
++ */
++ uint64_t size_remote;
++};
++
++
++/**
++ * Structure which provide information about heap
++ *
++ * \sa amdgpu_query_heap_info()
++ *
++ */
++struct amdgpu_heap_info {
++ /** Theoretical max. available memory in the given heap */
++ uint64_t heap_size;
++
++ /**
++ * Number of bytes allocated in the heap. This includes all processes
++ * and private allocations in the kernel. It changes when new buffers
++ * are allocated, freed, and moved. It cannot be larger than
++ * heap_size.
++ */
++ uint64_t heap_usage;
++
++ /**
++ * Theoretical possible max. size of buffer which
++ * could be allocated in the given heap
++ */
++ uint64_t max_allocation;
++};
++
++
++
++/**
++ * Describe GPU h/w info needed for UMD correct initialization
++ *
++ * \sa amdgpu_query_gpu_info()
++*/
++struct amdgpu_gpu_info {
++ /** Asic id */
++ uint32_t asic_id;
++ /**< Chip revision */
++ uint32_t chip_rev;
++ /** Chip external revision */
++ uint32_t chip_external_rev;
++ /** Family ID */
++ uint32_t family_id;
++ /** Special flags */
++ uint64_t ids_flags;
++ /** max engine clock*/
++ uint64_t max_engine_clk;
++ /** number of shader engines */
++ uint32_t num_shader_engines;
++ /** number of shader arrays per engine */
++ uint32_t num_shader_arrays_per_engine;
++ /** Number of available good shader pipes */
++ uint32_t avail_quad_shader_pipes;
++ /** Max. number of shader pipes.(including good and bad pipes */
++ uint32_t max_quad_shader_pipes;
++ /** Number of parameter cache entries per shader quad pipe */
++ uint32_t cache_entries_per_quad_pipe;
++ /** Number of available graphics context */
++ uint32_t num_hw_gfx_contexts;
++ /** Number of render backend pipes */
++ uint32_t rb_pipes;
++ /** Active render backend pipe number */
++ uint32_t active_rb_pipes;
++ /** Enabled render backend pipe mask */
++ uint32_t enabled_rb_pipes_mask;
++ /** Frequency of GPU Counter */
++ uint32_t gpu_counter_freq;
++ /** CC_RB_BACKEND_DISABLE.BACKEND_DISABLE per SE */
++ uint32_t backend_disable[4];
++ /** Value of MC_ARB_RAMCFG register*/
++ uint32_t mc_arb_ramcfg;
++ /** Value of GB_ADDR_CONFIG */
++ uint32_t gb_addr_cfg;
++ /** Values of the GB_TILE_MODE0..31 registers */
++ uint32_t gb_tile_mode[32];
++ /** Values of GB_MACROTILE_MODE0..15 registers */
++ uint32_t gb_macro_tile_mode[16];
++ /** Value of PA_SC_RASTER_CONFIG register per SE */
++ uint32_t pa_sc_raster_cfg[4];
++ /** Value of PA_SC_RASTER_CONFIG_1 register per SE */
++ uint32_t pa_sc_raster_cfg1[4];
++ /* CU info */
++ uint32_t cu_active_number;
++ uint32_t cu_ao_mask;
++ uint32_t cu_bitmap[4][4];
++};
++
++
++/*--------------------------------------------------------------------------*/
++/*------------------------- Functions --------------------------------------*/
++/*--------------------------------------------------------------------------*/
++
++/*
++ * Initialization / Cleanup
++ *
++*/
++
++
++/**
++ *
++ * \param fd - \c [in] File descriptor for AMD GPU device
++ * received previously as the result of
++ * e.g. drmOpen() call.
++ * For legacy fd type, the DRI2/DRI3 authentication
++ * should be done before calling this function.
++ * \param major_version - \c [out] Major version of library. It is assumed
++ * that adding new functionality will cause
++ * increase in major version
++ * \param minor_version - \c [out] Minor version of library
++ * \param device_handle - \c [out] Pointer to opaque context which should
++ * be passed as the first parameter on each
++ * API call
++ *
++ *
++ * \return 0 on success\n
++ * >0 - AMD specific error code\n
++ * <0 - Negative POSIX Error code
++ *
++ *
++ * \sa amdgpu_device_deinitialize()
++*/
++int amdgpu_device_initialize(int fd,
++ uint32_t *major_version,
++ uint32_t *minor_version,
++ amdgpu_device_handle *device_handle);
++
++
++
++/**
++ *
++ * When access to such library does not needed any more the special
++ * function must be call giving opportunity to clean up any
++ * resources if needed.
++ *
++ * \param device_handle - \c [in] Context associated with file
++ * descriptor for AMD GPU device
++ * received previously as the
++ * result e.g. of drmOpen() call.
++ *
++ * \return 0 on success\n
++ * >0 - AMD specific error code\n
++ * <0 - Negative POSIX Error code
++ *
++ * \sa amdgpu_device_initialize()
++ *
++*/
++int amdgpu_device_deinitialize(amdgpu_device_handle device_handle);
++
++
++/*
++ * Memory Management
++ *
++*/
++
++/**
++ * Allocate memory to be used by UMD for GPU related operations
++ *
++ * \param dev - \c [in] Device handle.
++ * See #amdgpu_device_initialize()
++ * \param alloc_buffer - \c [in] Pointer to the structure describing an
++ * allocation request
++ * \param info - \c [out] Pointer to structure which return
++ * information about allocated memory
++ *
++ * \return 0 on success\n
++ * >0 - AMD specific error code\n
++ * <0 - Negative POSIX Error code
++ *
++ * \sa amdgpu_bo_free()
++*/
++int amdgpu_bo_alloc(amdgpu_device_handle dev,
++ struct amdgpu_bo_alloc_request *alloc_buffer,
++ struct amdgpu_bo_alloc_result *info);
++
++/**
++ * Associate opaque data with buffer to be queried by another UMD
++ *
++ * \param dev - \c [in] Device handle. See #amdgpu_device_initialize()
++ * \param buf_handle - \c [in] Buffer handle
++ * \param info - \c [in] Metadata to associated with buffer
++ *
++ * \return 0 on success\n
++ * >0 - AMD specific error code\n
++ * <0 - Negative POSIX Error code
++*/
++int amdgpu_bo_set_metadata(amdgpu_bo_handle buf_handle,
++ struct amdgpu_bo_metadata *info);
++
++/**
++ * Query buffer information including metadata previusly associated with
++ * buffer.
++ *
++ * \param dev - \c [in] Device handle.
++ * See #amdgpu_device_initialize()
++ * \param buf_handle - \c [in] Buffer handle
++ * \param info - \c [out] Structure describing buffer
++ *
++ * \return 0 on success\n
++ * >0 - AMD specific error code\n
++ * <0 - Negative POSIX Error code
++ *
++ * \sa amdgpu_bo_set_metadata(), amdgpu_bo_alloc()
++*/
++int amdgpu_bo_query_info(amdgpu_bo_handle buf_handle,
++ struct amdgpu_bo_info *info);
++
++/**
++ * Allow others to get access to buffer
++ *
++ * \param dev - \c [in] Device handle.
++ * See #amdgpu_device_initialize()
++ * \param buf_handle - \c [in] Buffer handle
++ * \param type - \c [in] Type of handle requested
++ * \param shared_handle - \c [out] Special "shared" handle
++ *
++ * \return 0 on success\n
++ * >0 - AMD specific error code\n
++ * <0 - Negative POSIX Error code
++ *
++ * \sa amdgpu_bo_import()
++ *
++*/
++int amdgpu_bo_export(amdgpu_bo_handle buf_handle,
++ enum amdgpu_bo_handle_type type,
++ uint32_t *shared_handle);
++
++/**
++ * Request access to "shared" buffer
++ *
++ * \param dev - \c [in] Device handle.
++ * See #amdgpu_device_initialize()
++ * \param type - \c [in] Type of handle requested
++ * \param shared_handle - \c [in] Shared handle received as result "import"
++ * operation
++ * \param output - \c [out] Pointer to structure with information
++ * about imported buffer
++ *
++ * \return 0 on success\n
++ * >0 - AMD specific error code\n
++ * <0 - Negative POSIX Error code
++ *
++ * \note Buffer must be "imported" only using new "fd" (different from
++ * one used by "exporter").
++ *
++ * \sa amdgpu_bo_export()
++ *
++*/
++int amdgpu_bo_import(amdgpu_device_handle dev,
++ enum amdgpu_bo_handle_type type,
++ uint32_t shared_handle,
++ struct amdgpu_bo_import_result *output);
++
++/**
++ * Free previosuly allocated memory
++ *
++ * \param dev - \c [in] Device handle. See #amdgpu_device_initialize()
++ * \param buf_handle - \c [in] Buffer handle to free
++ *
++ * \return 0 on success\n
++ * >0 - AMD specific error code\n
++ * <0 - Negative POSIX Error code
++ *
++ * \note In the case of memory shared between different applications all
++ * resources will be “physically” freed only all such applications
++ * will be terminated
++ * \note If is UMD responsibility to ‘free’ buffer only when there is no
++ * more GPU access
++ *
++ * \sa amdgpu_bo_set_metadata(), amdgpu_bo_alloc()
++ *
++*/
++int amdgpu_bo_free(amdgpu_bo_handle buf_handle);
++
++/**
++ * Request CPU access to GPU accessable memory
++ *
++ * \param buf_handle - \c [in] Buffer handle
++ * \param cpu - \c [out] CPU address to be used for access
++ *
++ * \return 0 on success\n
++ * >0 - AMD specific error code\n
++ * <0 - Negative POSIX Error code
++ *
++ * \sa amdgpu_bo_cpu_unmap()
++ *
++*/
++int amdgpu_bo_cpu_map(amdgpu_bo_handle buf_handle, void **cpu);
++
++/**
++ * Release CPU access to GPU memory
++ *
++ * \param buf_handle - \c [in] Buffer handle
++ *
++ * \return 0 on success\n
++ * >0 - AMD specific error code\n
++ * <0 - Negative POSIX Error code
++ *
++ * \sa amdgpu_bo_cpu_map()
++ *
++*/
++int amdgpu_bo_cpu_unmap(amdgpu_bo_handle buf_handle);
++
++
++/**
++ * Wait until a buffer is not used by the device.
++ *
++ * \param dev - \c [in] Device handle. See #amdgpu_lib_initialize()
++ * \param buf_handle - \c [in] Buffer handle.
++ * \param timeout_ns - Timeout in nanoseconds.
++ * \param buffer_busy - 0 if buffer is idle, all GPU access was completed
++ * and no GPU access is scheduled.
++ * 1 GPU access is in fly or scheduled
++ *
++ * \return 0 - on success
++ * <0 - AMD specific error code
++ */
++int amdgpu_bo_wait_for_idle(amdgpu_bo_handle buf_handle,
++ uint64_t timeout_ns,
++ bool *buffer_busy);
++
++
++/*
++ * Special GPU Resources
++ *
++*/
++
++
++
++/**
++ * Query information about GDS
++ *
++ * \param dev - \c [in] Device handle. See #amdgpu_device_initialize()
++ * \param gds_info - \c [out] Pointer to structure to get GDS information
++ *
++ * \return 0 on success\n
++ * >0 - AMD specific error code\n
++ * <0 - Negative POSIX Error code
++ *
++*/
++int amdgpu_gpu_resource_query_gds_info(amdgpu_device_handle dev,
++ struct amdgpu_gds_resource_info *
++ gds_info);
++
++
++/**
++ * Allocate GDS partitions
++ *
++ * \param dev - \c [in] Device handle. See #amdgpu_device_initialize()
++ * \param gds_size - \c [in] Size of gds allocation. Must be aligned
++ * accordingly.
++ * \param alloc_info - \c [out] Pointer to structure to receive information
++ * about allocation
++ *
++ * \return 0 on success\n
++ * >0 - AMD specific error code\n
++ * <0 - Negative POSIX Error code
++ *
++ *
++*/
++int amdgpu_gpu_resource_gds_alloc(amdgpu_device_handle dev,
++ uint32_t gds_size,
++ struct amdgpu_gds_alloc_info *alloc_info);
++
++
++
++
++/**
++ * Release GDS resource. When GDS and associated resources not needed any
++ * more UMD should free them
++ *
++ * \param dev - \c [in] Device handle. See #amdgpu_device_initialize()
++ * \param handle - \c [in] Handle assigned to GDS allocation
++ *
++ * \return 0 on success\n
++ * >0 - AMD specific error code\n
++ * <0 - Negative POSIX Error code
++ *
++*/
++int amdgpu_gpu_resource_gds_free(amdgpu_bo_handle handle);
++
++
++
++/*
++ * GPU Execution context
++ *
++*/
++
++/**
++ * Create GPU execution Context
++ *
++ * For the purpose of GPU Scheduler and GPU Robustness extensions it is
++ * necessary to have information/identify rendering/compute contexts.
++ * It also may be needed to associate some specific requirements with such
++ * contexts. Kernel driver will guarantee that submission from the same
++ * context will always be executed in order (first come, first serve).
++ *
++ *
++ * \param dev - \c [in] Device handle. See #amdgpu_device_initialize()
++ * \param context - \c [out] GPU Context handle
++ *
++ * \return 0 on success\n
++ * >0 - AMD specific error code\n
++ * <0 - Negative POSIX Error code
++ *
++ * \sa amdgpu_cs_ctx_free()
++ *
++*/
++int amdgpu_cs_ctx_create(amdgpu_device_handle dev,
++ amdgpu_context_handle *context);
++
++/**
++ *
++ * Destroy GPU execution context when not needed any more
++ *
++ * \param dev - \c [in] Device handle. See #amdgpu_device_initialize()
++ * \param context - \c [in] GPU Context handle
++ *
++ * \return 0 on success\n
++ * >0 - AMD specific error code\n
++ * <0 - Negative POSIX Error code
++ *
++ * \sa amdgpu_cs_ctx_create()
++ *
++*/
++int amdgpu_cs_ctx_free(amdgpu_device_handle dev,
++ amdgpu_context_handle context);
++
++/**
++ * Query reset state for the specific GPU Context
++ *
++ * \param dev - \c [in] Device handle. See #amdgpu_device_initialize()
++ * \param context - \c [in] GPU Context handle
++ * \param state - \c [out] Reset state status
++ *
++ * \return 0 on success\n
++ * >0 - AMD specific error code\n
++ * <0 - Negative POSIX Error code
++ *
++ * \sa amdgpu_cs_ctx_create()
++ *
++*/
++int amdgpu_cs_query_reset_state(amdgpu_device_handle dev,
++ amdgpu_context_handle context,
++ enum amdgpu_cs_ctx_reset_state *state);
++
++
++/*
++ * Command Buffers Management
++ *
++*/
++
++
++/**
++ * Allocate memory to be filled with PM4 packets and be served as the first
++ * entry point of execution (a.k.a. Indirect Buffer)
++ *
++ * \param dev - \c [in] Device handle. See #amdgpu_device_initialize()
++ * \param context - \c [in] GPU Context which will use IB
++ * \param ib_size - \c [in] Size of allocation
++ * \param output - \c [out] Pointer to structure to get information about
++ * allocated IB
++ *
++ * \return 0 on success\n
++ * >0 - AMD specific error code\n
++ * <0 - Negative POSIX Error code
++ *
++ * \sa amdgpu_cs_free_ib()
++ *
++*/
++int amdgpu_cs_alloc_ib(amdgpu_device_handle dev,
++ amdgpu_context_handle context,
++ enum amdgpu_cs_ib_size ib_size,
++ struct amdgpu_cs_ib_alloc_result *output);
++
++/**
++ * If UMD has allocates IBs which doesn’t need any more than those IBs must
++ * be explicitly freed
++ *
++ * \param dev - \c [in] Device handle. See #amdgpu_device_initialize()
++ * \param context - \c [in] GPU Context containing IB
++ * \param handle - \c [in] IB handle
++ *
++ * \return 0 on success\n
++ * >0 - AMD specific error code\n
++ * <0 - Negative POSIX Error code
++ *
++ * \note Libdrm_amdgpu will guarantee that it will correctly detect when it
++ * is safe to return IB to free pool
++ *
++ * \sa amdgpu_cs_alloc_ib()
++ *
++*/
++int amdgpu_cs_free_ib(amdgpu_device_handle dev,
++ amdgpu_context_handle context,
++ amdgpu_ib_handle handle);
++
++/**
++ * Send request to submit command buffers to hardware.
++ *
++ * Kernel driver could use GPU Scheduler to make decision when physically
++ * sent this request to the hardware. Accordingly this request could be put
++ * in queue and sent for execution later. The only guarantee is that request
++ * from the same GPU context to the same ip:ip_instance:ring will be executed in
++ * order.
++ *
++ *
++ * \param dev - \c [in] Device handle.
++ * See #amdgpu_device_initialize()
++ * \param context - \c [in] GPU Context
++ * \param flags - \c [in] Global submission flags
++ * \param ibs_request - \c [in] Pointer to submission requests.
++ * We could submit to the several
++ * engines/rings simulteniously as
++ * 'atomic' operation
++ * \param number_of_requests - \c [in] Number of submission requests
++ * \param fences - \c [out] Pointer to array of data to get
++ * fences to identify submission
++ * requests. Timestamps are valid
++ * in this GPU context and could be used
++ * to identify/detect completion of
++ * submission request
++ *
++ * \return 0 on success\n
++ * >0 - AMD specific error code\n
++ * <0 - Negative POSIX Error code
++ *
++ * \note It is assumed that by default IB will be returned to free pool
++ * automatically by libdrm_amdgpu when submission will completed.
++ * It is possible for UMD to make decision to re-use the same IB in
++ * this case it should be explicitly freed.\n
++ * Accordingly, by default, after submission UMD should not touch passed
++ * IBs. If UMD needs to re-use IB then the special flag AMDGPU_CS_REUSE_IB
++ * must be passed.
++ *
++ * \note It is required to pass correct resource list with buffer handles
++ * which will be accessible by command buffers from submission
++ * This will allow kernel driver to correctly implement "paging".
++ * Failure to do so will have unpredictable results.
++ *
++ * \sa amdgpu_command_buffer_alloc(), amdgpu_command_buffer_free(),
++ * amdgpu_cs_query_fence_status()
++ *
++*/
++int amdgpu_cs_submit(amdgpu_device_handle dev,
++ amdgpu_context_handle context,
++ uint64_t flags,
++ struct amdgpu_cs_request *ibs_request,
++ uint32_t number_of_requests,
++ uint64_t *fences);
++
++/**
++ * Query status of Command Buffer Submission
++ *
++ * \param dev - \c [in] Device handle. See #amdgpu_device_initialize()
++ * \param fence - \c [in] Structure describing fence to query
++ * \param expired - \c [out] If fence expired or not.\n
++ * 0 – if fence is not expired\n
++ * !0 - otherwise
++ *
++ * \return 0 on success\n
++ * >0 - AMD specific error code\n
++ * <0 - Negative POSIX Error code
++ *
++ * \note If UMD wants only to check operation status and returned immediately
++ * then timeout value as 0 must be passed. In this case success will be
++ * returned in the case if submission was completed or timeout error
++ * code.
++ *
++ * \sa amdgpu_cs_submit()
++*/
++int amdgpu_cs_query_fence_status(amdgpu_device_handle dev,
++ struct amdgpu_cs_query_fence *fence,
++ uint32_t *expired);
++
++
++/*
++ * Query / Info API
++ *
++*/
++
++
++/**
++ * Query allocation size alignments
++ *
++ * UMD should query information about GPU VM MC size alignments requirements
++ * to be able correctly choose required allocation size and implement
++ * internal optimization if needed.
++ *
++ * \param dev - \c [in] Device handle. See #amdgpu_device_initialize()
++ * \param info - \c [out] Pointer to structure to get size alignment
++ * requirements
++ *
++ * \return 0 on success\n
++ * >0 - AMD specific error code\n
++ * <0 - Negative POSIX Error code
++ *
++*/
++int amdgpu_query_buffer_size_alignment(amdgpu_device_handle dev,
++ struct amdgpu_buffer_size_alignments
++ *info);
++
++
++
++/**
++ * Query firmware versions
++ *
++ * \param dev - \c [in] Device handle. See #amdgpu_device_initialize()
++ * \param fw_type - \c [in] AMDGPU_INFO_FW_*
++ * \param ip_instance - \c [in] Index of the IP block of the same type.
++ * \param index - \c [in] Index of the engine. (for SDMA and MEC)
++ * \param version - \c [out] Pointer to to the "version" return value
++ * \param feature - \c [out] Pointer to to the "feature" return value
++ *
++ * \return 0 on success\n
++ * >0 - AMD specific error code\n
++ * <0 - Negative POSIX Error code
++ *
++*/
++int amdgpu_query_firmware_version(amdgpu_device_handle dev, unsigned fw_type,
++ unsigned ip_instance, unsigned index,
++ uint32_t *version, uint32_t *feature);
++
++
++
++/**
++ * Query the number of HW IP instances of a certain type.
++ *
++ * \param dev - \c [in] Device handle. See #amdgpu_device_initialize()
++ * \param type - \c [in] Hardware IP block type = AMDGPU_HW_IP_*
++ * \param count - \c [out] Pointer to structure to get information
++ *
++ * \return 0 on success\n
++ * >0 - AMD specific error code\n
++ * <0 - Negative POSIX Error code
++*/
++int amdgpu_query_hw_ip_count(amdgpu_device_handle dev, unsigned type,
++ uint32_t *count);
++
++
++
++/**
++ * Query engine information
++ *
++ * This query allows UMD to query information different engines and their
++ * capabilities.
++ *
++ * \param dev - \c [in] Device handle. See #amdgpu_device_initialize()
++ * \param type - \c [in] Hardware IP block type = AMDGPU_HW_IP_*
++ * \param ip_instance - \c [in] Index of the IP block of the same type.
++ * \param info - \c [out] Pointer to structure to get information
++ *
++ * \return 0 on success\n
++ * >0 - AMD specific error code\n
++ * <0 - Negative POSIX Error code
++*/
++int amdgpu_query_hw_ip_info(amdgpu_device_handle dev, unsigned type,
++ unsigned ip_instance,
++ struct drm_amdgpu_info_hw_ip *info);
++
++
++
++
++/**
++ * Query heap information
++ *
++ * This query allows UMD to query potentially available memory resources and
++ * adjust their logic if necessary.
++ *
++ * \param dev - \c [in] Device handle. See #amdgpu_device_initialize()
++ * \param heap - \c [in] Heap type
++ * \param info - \c [in] Pointer to structure to get needed information
++ *
++ * \return 0 on success\n
++ * >0 - AMD specific error code\n
++ * <0 - Negative POSIX Error code
++ *
++*/
++int amdgpu_query_heap_info(amdgpu_device_handle dev,
++ uint32_t heap,
++ uint32_t flags,
++ struct amdgpu_heap_info *info);
++
++
++
++/**
++ * Get the CRTC ID from the mode object ID
++ *
++ * \param dev - \c [in] Device handle. See #amdgpu_device_initialize()
++ * \param id - \c [in] Mode object ID
++ * \param result - \c [in] Pointer to the CRTC ID
++ *
++ * \return 0 on success\n
++ * >0 - AMD specific error code\n
++ * <0 - Negative POSIX Error code
++ *
++*/
++int amdgpu_query_crtc_from_id(amdgpu_device_handle dev, unsigned id,
++ int32_t *result);
++
++
++
++/**
++ * Query GPU H/w Info
++ *
++ * Query hardware specific information
++ *
++ * \param dev - \c [in] Device handle. See #amdgpu_device_initialize()
++ * \param heap - \c [in] Heap type
++ * \param info - \c [in] Pointer to structure to get needed information
++ *
++ * \return 0 on success\n
++ * >0 - AMD specific error code\n
++ * <0 - Negative POSIX Error code
++ *
++*/
++int amdgpu_query_gpu_info(amdgpu_device_handle dev,
++ struct amdgpu_gpu_info *info);
++
++
++
++/**
++ * Query hardware or driver information.
++ *
++ * The return size is query-specific and depends on the "info_id" parameter.
++ * No more than "size" bytes is returned.
++ *
++ * \param dev - \c [in] Device handle. See #amdgpu_device_initialize()
++ * \param info_id - \c [in] AMDGPU_INFO_*
++ * \param size - \c [in] Size of the returned value.
++ * \param value - \c [out] Pointer to the return value.
++ *
++ * \return 0 on success\n
++ * >0 - AMD specific error code\n
++ * <0 - Negative POSIX error code
++ *
++*/
++int amdgpu_query_info(amdgpu_device_handle dev, unsigned info_id,
++ unsigned size, void *value);
++
++
++
++/**
++ * Read a set of consecutive memory-mapped registers.
++ * Not all registers are allowed to be read by userspace.
++ *
++ * \param dev - \c [in] Device handle. See #amdgpu_device_initialize(
++ * \param dword_offset - \c [in] Register offset in dwords
++ * \param count - \c [in] The number of registers to read starting
++ * from the offset
++ * \param instance - \c [in] GRBM_GFX_INDEX selector. It may have other
++ * uses. Set it to 0xffffffff if unsure.
++ * \param flags - \c [in] Flags with additional information.
++ * \param values - \c [out] The pointer to return values.
++ *
++ * \return 0 on success\n
++ * >0 - AMD specific error code\n
++ * <0 - Negative POSIX error code
++ *
++*/
++int amdgpu_read_mm_registers(amdgpu_device_handle dev, unsigned dword_offset,
++ unsigned count, uint32_t instance, uint32_t flags,
++ uint32_t *values);
++
++
++
++/**
++ * Request GPU access to user allocated memory e.g. via "malloc"
++ *
++ * \param dev - [in] Device handle. See #amdgpu_device_initialize()
++ * \param cpu - [in] CPU address of user allocated memory which we
++ * want to map to GPU address space (make GPU accessible)
++ * (This address must be correctly aligned).
++ * \param size - [in] Size of allocation (must be correctly aligned)
++ * \param amdgpu_bo_alloc_result - [out] Handle of allocation to be passed as resource
++ * on submission and be used in other operations.(e.g. for VA submission)
++ * ( Temporally defined amdgpu_bo_alloc_result as parameter for return mc address. )
++ *
++ *
++ * \return 0 on success
++ * >0 - AMD specific error code
++ * <0 - Negative POSIX Error code
++ *
++ *
++ * \note
++ * This call doesn't guarantee that such memory will be persistently
++ * "locked" / make non-pageable. The purpose of this call is to provide
++ * opportunity for GPU get access to this resource during submission.
++ *
++ * The maximum amount of memory which could be mapped in this call depends
++ * if overcommit is disabled or not. If overcommit is disabled than the max.
++ * amount of memory to be pinned will be limited by left "free" size in total
++ * amount of memory which could be locked simultaneously ("GART" size).
++ *
++ * Supported (theoretical) max. size of mapping is restricted only by
++ * "GART" size.
++ *
++ * It is responsibility of caller to correctly specify access rights
++ * on VA assignment.
++*/
++int amdgpu_create_bo_from_user_mem(amdgpu_device_handle dev,
++ void *cpu,
++ uint64_t size,
++ struct amdgpu_bo_alloc_result *info);
++
++
++#endif /* #ifdef _amdgpu_h_ */
++
++
+diff --git a/amdgpu/amdgpu_bo.c b/amdgpu/amdgpu_bo.c
+new file mode 100644
+index 0000000..ce7e9d1
+--- /dev/null
++++ b/amdgpu/amdgpu_bo.c
+@@ -0,0 +1,622 @@
++/*
++ * Copyright © 2014 Advanced Micro Devices, Inc.
++ * All Rights Reserved.
++ *
++ * Permission is hereby granted, free of charge, to any person obtaining a
++ * copy of this software and associated documentation files (the "Software"),
++ * to deal in the Software without restriction, including without limitation
++ * the rights to use, copy, modify, merge, publish, distribute, sublicense,
++ * and/or sell copies of the Software, and to permit persons to whom the
++ * Software is furnished to do so, subject to the following conditions:
++ *
++ * The above copyright notice and this permission notice shall be included in
++ * all copies or substantial portions of the Software.
++ *
++ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
++ * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
++ * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
++ * THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR
++ * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE,
++ * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
++ * OTHER DEALINGS IN THE SOFTWARE.
++ */
++
++#define _FILE_OFFSET_BITS 64
++#include <stdlib.h>
++#include <stdio.h>
++#include <string.h>
++#include <errno.h>
++#include <fcntl.h>
++#include <unistd.h>
++#include <sys/ioctl.h>
++#include <sys/mman.h>
++#include <sys/time.h>
++
++#include "xf86drm.h"
++#include "amdgpu_drm.h"
++#include "amdgpu_internal.h"
++#include "util_hash_table.h"
++
++static void amdgpu_close_kms_handle(amdgpu_device_handle dev,
++ uint32_t handle)
++{
++ struct drm_gem_close args = {};
++
++ args.handle = handle;
++ drmIoctl(dev->fd, DRM_IOCTL_GEM_CLOSE, &args);
++}
++
++void amdgpu_bo_free_internal(amdgpu_bo_handle bo)
++{
++ /* Remove the buffer from the hash tables. */
++ pthread_mutex_lock(&bo->dev->bo_table_mutex);
++ util_hash_table_remove(bo->dev->bo_handles,
++ (void*)(uintptr_t)bo->handle);
++ if (bo->flink_name) {
++ util_hash_table_remove(bo->dev->bo_flink_names,
++ (void*)(uintptr_t)bo->flink_name);
++ }
++ pthread_mutex_unlock(&bo->dev->bo_table_mutex);
++
++ /* Release CPU access. */
++ if (bo->cpu_map_count > 0) {
++ bo->cpu_map_count = 1;
++ amdgpu_bo_cpu_unmap(bo);
++ }
++
++ amdgpu_close_kms_handle(bo->dev, bo->handle);
++ pthread_mutex_destroy(&bo->cpu_access_mutex);
++ amdgpu_vamgr_free_va(&bo->dev->vamgr, bo->virtual_mc_base_address, bo->alloc_size);
++ free(bo);
++}
++
++int amdgpu_bo_alloc(amdgpu_device_handle dev,
++ struct amdgpu_bo_alloc_request *alloc_buffer,
++ struct amdgpu_bo_alloc_result *info)
++{
++ struct amdgpu_bo *bo;
++ union drm_amdgpu_gem_create args;
++ unsigned heap = alloc_buffer->preferred_heap;
++ int r = 0;
++
++ /* It's an error if the heap is not specified */
++ if (!(heap & (AMDGPU_GEM_DOMAIN_GTT | AMDGPU_GEM_DOMAIN_VRAM)))
++ return -EINVAL;
++
++ bo = calloc(1, sizeof(struct amdgpu_bo));
++ if (!bo)
++ return -ENOMEM;
++
++ atomic_set(&bo->refcount, 1);
++ bo->dev = dev;
++ bo->alloc_size = alloc_buffer->alloc_size;
++
++ memset(&args, 0, sizeof(args));
++ args.in.bo_size = alloc_buffer->alloc_size;
++ args.in.alignment = alloc_buffer->phys_alignment;
++
++ /* Set the placement. */
++ args.in.domains = heap & AMDGPU_GEM_DOMAIN_MASK;
++ args.in.domain_flags = alloc_buffer->flags & AMDGPU_GEM_CREATE_CPU_GTT_MASK;
++
++ /* Allocate the buffer with the preferred heap. */
++ r = drmCommandWriteRead(dev->fd, DRM_AMDGPU_GEM_CREATE,
++ &args, sizeof(args));
++ if (r) {
++ free(bo);
++ return r;
++ }
++
++ bo->handle = args.out.handle;
++
++ pthread_mutex_init(&bo->cpu_access_mutex, NULL);
++
++ /* map the buffer to the GPU virtual address space */
++ {
++ union drm_amdgpu_gem_va va;
++
++ memset(&va, 0, sizeof(va));
++
++ bo->virtual_mc_base_address = amdgpu_vamgr_find_va(&dev->vamgr, alloc_buffer->alloc_size, alloc_buffer->phys_alignment);
++
++ va.in.handle = bo->handle;
++ va.in.operation = AMDGPU_VA_OP_MAP;
++ va.in.flags = AMDGPU_VM_PAGE_READABLE |
++ AMDGPU_VM_PAGE_WRITEABLE |
++ AMDGPU_VM_PAGE_EXECUTABLE;
++ va.in.va_address = bo->virtual_mc_base_address;
++
++ r = drmCommandWriteRead(dev->fd, DRM_AMDGPU_GEM_VA, &va, sizeof(va));
++ if (r || va.out.result == AMDGPU_VA_RESULT_ERROR) {
++ amdgpu_bo_free_internal(bo);
++ return r;
++ }
++ pthread_mutex_lock(&dev->bo_table_mutex);
++
++ util_hash_table_set(dev->bo_vas,
++ (void*)(uintptr_t)bo->virtual_mc_base_address, bo);
++ pthread_mutex_unlock(&dev->bo_table_mutex);
++ }
++
++ info->buf_handle = bo;
++ info->virtual_mc_base_address = bo->virtual_mc_base_address;
++ return 0;
++}
++
++int amdgpu_bo_set_metadata(amdgpu_bo_handle bo,
++ struct amdgpu_bo_metadata *info)
++{
++ struct drm_amdgpu_gem_metadata args = {};
++
++ args.handle = bo->handle;
++ args.op = AMDGPU_GEM_METADATA_OP_SET_METADATA;
++ args.data.flags = info->flags;
++ args.data.tiling_info = info->tiling_info;
++
++ if (info->size_metadata > sizeof(args.data.data))
++ return -EINVAL;
++
++ if (info->size_metadata) {
++ args.data.data_size_bytes = info->size_metadata;
++ memcpy(args.data.data, info->umd_metadata, info->size_metadata);
++ }
++
++ return drmCommandWriteRead(bo->dev->fd,
++ DRM_AMDGPU_GEM_METADATA,
++ &args, sizeof(args));
++}
++
++int amdgpu_bo_query_info(amdgpu_bo_handle bo,
++ struct amdgpu_bo_info *info)
++{
++ struct drm_amdgpu_gem_metadata metadata = {};
++ struct drm_amdgpu_gem_create_in bo_info = {};
++ struct drm_amdgpu_gem_op gem_op = {};
++ int r;
++
++ /* Query metadata. */
++ metadata.handle = bo->handle;
++ metadata.op = AMDGPU_GEM_METADATA_OP_GET_METADATA;
++
++ r = drmCommandWriteRead(bo->dev->fd, DRM_AMDGPU_GEM_METADATA,
++ &metadata, sizeof(metadata));
++ if (r)
++ return r;
++
++ if (metadata.data.data_size_bytes >
++ sizeof(info->metadata.umd_metadata))
++ return -EINVAL;
++
++ /* Query buffer info. */
++ gem_op.handle = bo->handle;
++ gem_op.op = AMDGPU_GEM_OP_GET_GEM_CREATE_INFO;
++ gem_op.value = (intptr_t)&bo_info;
++
++ r = drmCommandWriteRead(bo->dev->fd, DRM_AMDGPU_GEM_OP,
++ &gem_op, sizeof(gem_op));
++ if (r)
++ return r;
++
++ memset(info, 0, sizeof(*info));
++ info->alloc_size = bo_info.bo_size;
++ info->phys_alignment = bo_info.alignment;
++ info->virtual_mc_base_address = bo->virtual_mc_base_address;
++ info->preferred_heap = bo_info.domains;
++ info->alloc_flags = bo_info.domain_flags;
++ info->metadata.flags = metadata.data.flags;
++ info->metadata.tiling_info = metadata.data.tiling_info;
++
++ info->metadata.size_metadata = metadata.data.data_size_bytes;
++ if (metadata.data.data_size_bytes > 0)
++ memcpy(info->metadata.umd_metadata, metadata.data.data,
++ metadata.data.data_size_bytes);
++
++ return 0;
++}
++
++static void amdgpu_add_handle_to_table(amdgpu_bo_handle bo)
++{
++ pthread_mutex_lock(&bo->dev->bo_table_mutex);
++ util_hash_table_set(bo->dev->bo_handles,
++ (void*)(uintptr_t)bo->handle, bo);
++ pthread_mutex_unlock(&bo->dev->bo_table_mutex);
++}
++
++static int amdgpu_bo_export_flink(amdgpu_bo_handle bo)
++{
++ struct drm_gem_flink flink;
++ int fd, dma_fd;
++ uint32_t handle;
++ int r;
++
++ fd = bo->dev->fd;
++ handle = bo->handle;
++ if (bo->flink_name)
++ return 0;
++
++
++ if (bo->dev->flink_fd != bo->dev->fd) {
++ r = drmPrimeHandleToFD(bo->dev->fd, bo->handle, DRM_CLOEXEC,
++ &dma_fd);
++ if (!r) {
++ r = drmPrimeFDToHandle(bo->dev->flink_fd, dma_fd, &handle);
++ close(dma_fd);
++ }
++ if (r)
++ return r;
++ fd = bo->dev->flink_fd;
++ }
++ memset(&flink, 0, sizeof(flink));
++ flink.handle = handle;
++
++ r = drmIoctl(fd, DRM_IOCTL_GEM_FLINK, &flink);
++ if (r)
++ return r;
++
++ bo->flink_name = flink.name;
++
++ if (bo->dev->flink_fd != bo->dev->fd) {
++ struct drm_gem_close args = {};
++ args.handle = handle;
++ drmIoctl(bo->dev->flink_fd, DRM_IOCTL_GEM_CLOSE, &args);
++ }
++
++ pthread_mutex_lock(&bo->dev->bo_table_mutex);
++ util_hash_table_set(bo->dev->bo_flink_names,
++ (void*)(uintptr_t)bo->flink_name,
++ bo);
++ pthread_mutex_unlock(&bo->dev->bo_table_mutex);
++
++ return 0;
++}
++
++int amdgpu_bo_export(amdgpu_bo_handle bo,
++ enum amdgpu_bo_handle_type type,
++ uint32_t *shared_handle)
++{
++ int r;
++
++ switch (type) {
++ case amdgpu_bo_handle_type_gem_flink_name:
++ r = amdgpu_bo_export_flink(bo);
++ if (r)
++ return r;
++
++ *shared_handle = bo->flink_name;
++ return 0;
++
++ case amdgpu_bo_handle_type_kms:
++ r = amdgpu_bo_export_flink(bo);
++ if (r)
++ return r;
++
++ amdgpu_add_handle_to_table(bo);
++ *shared_handle = bo->handle;
++ return 0;
++
++ case amdgpu_bo_handle_type_dma_buf_fd:
++ amdgpu_add_handle_to_table(bo);
++ return drmPrimeHandleToFD(bo->dev->fd, bo->handle, DRM_CLOEXEC,
++ (int*)shared_handle);
++ }
++ return -EINVAL;
++}
++
++int amdgpu_bo_import(amdgpu_device_handle dev,
++ enum amdgpu_bo_handle_type type,
++ uint32_t shared_handle,
++ struct amdgpu_bo_import_result *output)
++{
++ struct drm_gem_open open_arg = {};
++ union drm_amdgpu_gem_va va;
++ struct amdgpu_bo *bo = NULL;
++ int r;
++ int dma_fd;
++ uint64_t dma_buf_size = 0;
++
++ /* Convert a DMA buf handle to a KMS handle now. */
++ if (type == amdgpu_bo_handle_type_dma_buf_fd) {
++ uint32_t handle;
++ off_t size;
++
++ /* Get a KMS handle. */
++ r = drmPrimeFDToHandle(dev->fd, shared_handle, &handle);
++ if (r) {
++ return r;
++ }
++
++ /* Query the buffer size. */
++ size = lseek(shared_handle, 0, SEEK_END);
++ if (size == (off_t)-1) {
++ amdgpu_close_kms_handle(dev, handle);
++ return -errno;
++ }
++ lseek(shared_handle, 0, SEEK_SET);
++
++ dma_buf_size = size;
++ shared_handle = handle;
++ }
++
++ /* We must maintain a list of pairs <handle, bo>, so that we always
++ * return the same amdgpu_bo instance for the same handle. */
++ pthread_mutex_lock(&dev->bo_table_mutex);
++
++ /* If we have already created a buffer with this handle, find it. */
++ switch (type) {
++ case amdgpu_bo_handle_type_gem_flink_name:
++ bo = util_hash_table_get(dev->bo_flink_names,
++ (void*)(uintptr_t)shared_handle);
++ break;
++
++ case amdgpu_bo_handle_type_dma_buf_fd:
++ bo = util_hash_table_get(dev->bo_handles,
++ (void*)(uintptr_t)shared_handle);
++ break;
++
++ case amdgpu_bo_handle_type_kms:
++ /* Importing a KMS handle in not allowed. */
++ pthread_mutex_unlock(&dev->bo_table_mutex);
++ return -EPERM;
++
++ default:
++ pthread_mutex_unlock(&dev->bo_table_mutex);
++ return -EINVAL;
++ }
++
++ if (bo) {
++ pthread_mutex_unlock(&dev->bo_table_mutex);
++
++ /* The buffer already exists, just bump the refcount. */
++ atomic_inc(&bo->refcount);
++
++ output->buf_handle = bo;
++ output->alloc_size = bo->alloc_size;
++ output->virtual_mc_base_address =
++ bo->virtual_mc_base_address;
++ return 0;
++ }
++
++ bo = calloc(1, sizeof(struct amdgpu_bo));
++ if (!bo) {
++ pthread_mutex_unlock(&dev->bo_table_mutex);
++ if (type == amdgpu_bo_handle_type_dma_buf_fd) {
++ amdgpu_close_kms_handle(dev, shared_handle);
++ }
++ return -ENOMEM;
++ }
++
++ /* Open the handle. */
++ switch (type) {
++ case amdgpu_bo_handle_type_gem_flink_name:
++ open_arg.name = shared_handle;
++ r = drmIoctl(dev->flink_fd, DRM_IOCTL_GEM_OPEN, &open_arg);
++ if (r) {
++ free(bo);
++ pthread_mutex_unlock(&dev->bo_table_mutex);
++ return r;
++ }
++
++ bo->handle = open_arg.handle;
++ if (dev->flink_fd != dev->fd) {
++ r = drmPrimeHandleToFD(dev->flink_fd, bo->handle, DRM_CLOEXEC, &dma_fd);
++ if (r) {
++ free(bo);
++ pthread_mutex_unlock(&dev->bo_table_mutex);
++ return r;
++ }
++ r = drmPrimeFDToHandle(dev->fd, dma_fd, &bo->handle );
++
++ close(dma_fd);
++
++ if (r) {
++ free(bo);
++ pthread_mutex_unlock(&dev->bo_table_mutex);
++ return r;
++ }
++ }
++ bo->flink_name = shared_handle;
++ bo->alloc_size = open_arg.size;
++ util_hash_table_set(dev->bo_flink_names,
++ (void*)(uintptr_t)bo->flink_name, bo);
++ break;
++
++ case amdgpu_bo_handle_type_dma_buf_fd:
++ bo->handle = shared_handle;
++ bo->alloc_size = dma_buf_size;
++ break;
++
++ case amdgpu_bo_handle_type_kms:
++ assert(0); /* unreachable */
++ }
++
++ /* Initialize it. */
++ atomic_set(&bo->refcount, 1);
++ bo->dev = dev;
++ pthread_mutex_init(&bo->cpu_access_mutex, NULL);
++
++ bo->virtual_mc_base_address = amdgpu_vamgr_find_va(&dev->vamgr, bo->alloc_size, 1 << 20);
++
++ memset(&va, 0, sizeof(va));
++ va.in.handle = bo->handle;
++ va.in.operation = AMDGPU_VA_OP_MAP;
++ va.in.va_address = bo->virtual_mc_base_address;
++ va.in.flags = AMDGPU_VM_PAGE_READABLE | AMDGPU_VM_PAGE_WRITEABLE |
++ AMDGPU_VM_PAGE_EXECUTABLE;
++
++ r = drmCommandWriteRead(dev->fd, DRM_AMDGPU_GEM_VA, &va, sizeof(va));
++ if (r || va.out.result == AMDGPU_VA_RESULT_ERROR) {
++ pthread_mutex_unlock(&dev->bo_table_mutex);
++ amdgpu_vamgr_free_va(&dev->vamgr, bo->virtual_mc_base_address, bo->alloc_size);
++ amdgpu_bo_reference(&bo, NULL);
++ return r;
++ }
++
++ util_hash_table_set(dev->bo_vas,
++ (void*)(uintptr_t)bo->virtual_mc_base_address, bo);
++ util_hash_table_set(dev->bo_handles, (void*)(uintptr_t)bo->handle, bo);
++ pthread_mutex_unlock(&dev->bo_table_mutex);
++
++ output->buf_handle = bo;
++ output->alloc_size = bo->alloc_size;
++ output->virtual_mc_base_address = bo->virtual_mc_base_address;
++ return 0;
++}
++
++int amdgpu_bo_free(amdgpu_bo_handle buf_handle)
++{
++ /* Just drop the reference. */
++ amdgpu_bo_reference(&buf_handle, NULL);
++ return 0;
++}
++
++int amdgpu_bo_cpu_map(amdgpu_bo_handle bo, void **cpu)
++{
++ union drm_amdgpu_gem_mmap args;
++ void *ptr;
++ int r;
++
++ pthread_mutex_lock(&bo->cpu_access_mutex);
++
++ if (bo->cpu_ptr) {
++ /* already mapped */
++ assert(bo->cpu_map_count > 0);
++ bo->cpu_map_count++;
++ *cpu = bo->cpu_ptr;
++ pthread_mutex_unlock(&bo->cpu_access_mutex);
++ return 0;
++ }
++
++ assert(bo->cpu_map_count == 0);
++
++ memset(&args, 0, sizeof(args));
++
++ /* Query the buffer address (args.addr_ptr).
++ * The kernel driver ignores the offset and size parameters. */
++ args.in.handle = bo->handle;
++
++ r = drmCommandWriteRead(bo->dev->fd, DRM_AMDGPU_GEM_MMAP, &args,
++ sizeof(args));
++ if (r) {
++ pthread_mutex_unlock(&bo->cpu_access_mutex);
++ return r;
++ }
++
++ /* Map the buffer. */
++ ptr = mmap(NULL, bo->alloc_size, PROT_READ | PROT_WRITE, MAP_SHARED,
++ bo->dev->fd, args.out.addr_ptr);
++ if (ptr == MAP_FAILED) {
++ pthread_mutex_unlock(&bo->cpu_access_mutex);
++ return -errno;
++ }
++
++ bo->cpu_ptr = ptr;
++ bo->cpu_map_count = 1;
++ pthread_mutex_unlock(&bo->cpu_access_mutex);
++
++ *cpu = ptr;
++ return 0;
++}
++
++int amdgpu_bo_cpu_unmap(amdgpu_bo_handle bo)
++{
++ int r;
++
++ pthread_mutex_lock(&bo->cpu_access_mutex);
++ assert(bo->cpu_map_count >= 0);
++
++ if (bo->cpu_map_count == 0) {
++ /* not mapped */
++ pthread_mutex_unlock(&bo->cpu_access_mutex);
++ return -EBADMSG;
++ }
++
++ bo->cpu_map_count--;
++ if (bo->cpu_map_count > 0) {
++ /* mapped multiple times */
++ pthread_mutex_unlock(&bo->cpu_access_mutex);
++ return 0;
++ }
++
++ r = munmap(bo->cpu_ptr, bo->alloc_size) == 0 ? 0 : -errno;
++ bo->cpu_ptr = NULL;
++ pthread_mutex_unlock(&bo->cpu_access_mutex);
++ return r;
++}
++
++int amdgpu_query_buffer_size_alignment(amdgpu_device_handle dev,
++ struct amdgpu_buffer_size_alignments *info)
++{
++ info->size_local = dev->dev_info.pte_fragment_size;
++ info->size_remote = dev->dev_info.gart_page_size;
++ return 0;
++}
++
++int amdgpu_bo_wait_for_idle(amdgpu_bo_handle bo,
++ uint64_t timeout_ns,
++ bool *busy)
++{
++ union drm_amdgpu_gem_wait_idle args;
++ int r;
++
++ memset(&args, 0, sizeof(args));
++ args.in.handle = bo->handle;
++ args.in.timeout = amdgpu_cs_calculate_timeout(timeout_ns);
++
++ r = drmCommandWriteRead(bo->dev->fd, DRM_AMDGPU_GEM_WAIT_IDLE,
++ &args, sizeof(args));
++
++ if (r == 0) {
++ *busy = args.out.status;
++ return 0;
++ } else {
++ fprintf(stderr, "amdgpu: GEM_WAIT_IDLE failed with %i\n", r);
++ return r;
++ }
++}
++
++int amdgpu_create_bo_from_user_mem(amdgpu_device_handle dev,
++ void *cpu,
++ uint64_t size,
++ struct amdgpu_bo_alloc_result *info)
++{
++ int r;
++ struct amdgpu_bo *bo;
++ struct drm_amdgpu_gem_userptr args;
++ union drm_amdgpu_gem_va va;
++
++ memset(&args, 0, sizeof(args));
++ args.addr = (uint64_t)cpu;
++ args.flags = AMDGPU_GEM_USERPTR_ANONONLY | AMDGPU_GEM_USERPTR_REGISTER;
++ args.size = size;
++ r = drmCommandWriteRead(dev->fd, DRM_AMDGPU_GEM_USERPTR,
++ &args, sizeof(args));
++ if (r)
++ return r;
++
++ bo = calloc(1, sizeof(struct amdgpu_bo));
++ if (!bo)
++ return -ENOMEM;
++
++ atomic_set(&bo->refcount, 1);
++ bo->dev = dev;
++ bo->alloc_size = size;
++ bo->handle = args.handle;
++ bo->virtual_mc_base_address = amdgpu_vamgr_find_va(&dev->vamgr, size, 4 * 1024);
++
++ memset(&va, 0, sizeof(va));
++ va.in.handle = bo->handle;
++ va.in.operation = AMDGPU_VA_OP_MAP;
++ va.in.flags = AMDGPU_VM_PAGE_READABLE | AMDGPU_VM_PAGE_WRITEABLE |
++ AMDGPU_VM_PAGE_EXECUTABLE;
++ va.in.va_address = bo->virtual_mc_base_address;
++ r = drmCommandWriteRead(dev->fd, DRM_AMDGPU_GEM_VA, &va, sizeof(va));
++ if (r || va.out.result == AMDGPU_VA_RESULT_ERROR) {
++ amdgpu_bo_free_internal(bo);
++ return r;
++ }
++ util_hash_table_set(dev->bo_vas,
++ (void*)(uintptr_t)bo->virtual_mc_base_address, bo);
++ info->buf_handle = bo;
++ info->virtual_mc_base_address = bo->virtual_mc_base_address;
++ return r;
++}
+diff --git a/amdgpu/amdgpu_cs.c b/amdgpu/amdgpu_cs.c
+new file mode 100644
+index 0000000..614904d
+--- /dev/null
++++ b/amdgpu/amdgpu_cs.c
+@@ -0,0 +1,981 @@
++/*
++ * Copyright 2014 Advanced Micro Devices, Inc.
++ *
++ * Permission is hereby granted, free of charge, to any person obtaining a
++ * copy of this software and associated documentation files (the "Software"),
++ * to deal in the Software without restriction, including without limitation
++ * the rights to use, copy, modify, merge, publish, distribute, sublicense,
++ * and/or sell copies of the Software, and to permit persons to whom the
++ * Software is furnished to do so, subject to the following conditions:
++ *
++ * The above copyright notice and this permission notice shall be included in
++ * all copies or substantial portions of the Software.
++ *
++ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
++ * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
++ * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
++ * THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR
++ * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE,
++ * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
++ * OTHER DEALINGS IN THE SOFTWARE.
++ *
++*/
++#include <stdlib.h>
++#include <stdio.h>
++#include <string.h>
++#include <errno.h>
++#include <pthread.h>
++#include <sched.h>
++#include <sys/ioctl.h>
++
++#include "xf86drm.h"
++#include "amdgpu_drm.h"
++#include "amdgpu_internal.h"
++
++/**
++ * Create an IB buffer.
++ *
++ * \param dev - \c [in] Device handle
++ * \param context - \c [in] GPU Context
++ * \param ib_size - \c [in] Size of allocation
++ * \param ib - \c [out] return the pointer to the created IB buffer
++ *
++ * \return 0 on success otherwise POSIX Error code
++*/
++static int amdgpu_cs_create_ib(amdgpu_device_handle dev,
++ amdgpu_context_handle context,
++ enum amdgpu_cs_ib_size ib_size,
++ amdgpu_ib_handle *ib)
++{
++ struct amdgpu_bo_alloc_request alloc_buffer;
++ struct amdgpu_bo_alloc_result info;
++ int r;
++ void *cpu;
++ struct amdgpu_ib *new_ib;
++
++ memset(&alloc_buffer, 0, sizeof(alloc_buffer));
++
++ switch (ib_size) {
++ case amdgpu_cs_ib_size_4K:
++ alloc_buffer.alloc_size = 4 * 1024;
++ break;
++ case amdgpu_cs_ib_size_16K:
++ alloc_buffer.alloc_size = 16 * 1024;
++ break;
++ case amdgpu_cs_ib_size_32K:
++ alloc_buffer.alloc_size = 32 * 1024;
++ break;
++ case amdgpu_cs_ib_size_64K:
++ alloc_buffer.alloc_size = 64 * 1024;
++ break;
++ case amdgpu_cs_ib_size_128K:
++ alloc_buffer.alloc_size = 128 * 1024;
++ break;
++ default:
++ return -EINVAL;
++ }
++
++ alloc_buffer.phys_alignment = 4 * 1024;
++
++ alloc_buffer.preferred_heap = AMDGPU_GEM_DOMAIN_GTT;
++
++ r = amdgpu_bo_alloc(dev,
++ &alloc_buffer,
++ &info);
++ if (r)
++ return r;
++
++ r = amdgpu_bo_cpu_map(info.buf_handle, &cpu);
++ if (r) {
++ amdgpu_bo_free(info.buf_handle);
++ return r;
++ }
++
++ new_ib = malloc(sizeof(struct amdgpu_ib));
++ if (NULL == new_ib) {
++ amdgpu_bo_cpu_unmap(info.buf_handle);
++ amdgpu_bo_free(info.buf_handle);
++ return -ENOMEM;
++ }
++
++ new_ib->buf_handle = info.buf_handle;
++ new_ib->cpu = cpu;
++ new_ib->virtual_mc_base_address = info.virtual_mc_base_address;
++ new_ib->ib_size = ib_size;
++ *ib = new_ib;
++ return 0;
++}
++
++/**
++ * Destroy an IB buffer.
++ *
++ * \param dev - \c [in] Device handle
++ * \param ib - \c [in] the IB buffer
++ *
++ * \return 0 on success otherwise POSIX Error code
++*/
++static int amdgpu_cs_destroy_ib(amdgpu_device_handle dev,
++ amdgpu_ib_handle ib)
++{
++ int r;
++ r = amdgpu_bo_cpu_unmap(ib->buf_handle);
++ if (r)
++ return r;
++
++ r = amdgpu_bo_free(ib->buf_handle);
++ if (r)
++ return r;
++
++ free(ib);
++ return 0;
++}
++
++/**
++ * Initialize IB pools to empty.
++ *
++ * \param context - \c [in] GPU Context
++ *
++ * \return 0 on success otherwise POSIX Error code
++*/
++static int amdgpu_cs_init_ib_pool(amdgpu_context_handle context)
++{
++ int i;
++ int r;
++
++ r = pthread_mutex_init(&context->pool_mutex, NULL);
++ if (r)
++ return r;
++
++ for (i = 0; i < AMDGPU_CS_IB_SIZE_NUM; i++)
++ LIST_INITHEAD(&context->ib_pools[i]);
++
++ return 0;
++}
++
++/**
++ * Allocate an IB buffer from IB pools.
++ *
++ * \param dev - \c [in] Device handle
++ * \param context - \c [in] GPU Context
++ * \param ib_size - \c [in] Size of allocation
++ * \param ib - \c [out] return the pointer to the allocated IB buffer
++ *
++ * \return 0 on success otherwise POSIX Error code
++*/
++static int amdgpu_cs_alloc_from_ib_pool(amdgpu_device_handle dev,
++ amdgpu_context_handle context,
++ enum amdgpu_cs_ib_size ib_size,
++ amdgpu_ib_handle *ib)
++{
++ int r;
++ struct list_head *head;
++ head = &context->ib_pools[ib_size];
++
++ r = -ENOMEM;
++ pthread_mutex_lock(&context->pool_mutex);
++ if (!LIST_IS_EMPTY(head)) {
++ *ib = LIST_ENTRY(struct amdgpu_ib, head->next, list_node);
++ LIST_DEL(&(*ib)->list_node);
++ r = 0;
++ }
++ pthread_mutex_unlock(&context->pool_mutex);
++
++ return r;
++}
++
++/**
++ * Free an IB buffer to IB pools.
++ *
++ * \param context - \c [in] GPU Context
++ * \param ib - \c [in] the IB buffer
++ *
++ * \return N/A
++*/
++static void amdgpu_cs_free_to_ib_pool(amdgpu_context_handle context,
++ amdgpu_ib_handle ib)
++{
++ struct list_head *head;
++ head = &context->ib_pools[ib->ib_size];
++ pthread_mutex_lock(&context->pool_mutex);
++ LIST_ADD(&ib->list_node, head);
++ pthread_mutex_unlock(&context->pool_mutex);
++ return;
++}
++
++/**
++ * Destroy all IB buffers in pools
++ *
++ * \param dev - \c [in] Device handle
++ * \param context - \c [in] GPU Context
++ *
++ * \return 0 on success otherwise POSIX Error code
++*/
++static int amdgpu_cs_destroy_ib_pool(amdgpu_device_handle dev,
++ amdgpu_context_handle context)
++{
++ int i;
++ int r;
++ struct list_head *head;
++ struct amdgpu_ib *next;
++ struct amdgpu_ib *storage;
++
++ r = 0;
++ pthread_mutex_lock(&context->pool_mutex);
++ for (i = 0; i < AMDGPU_CS_IB_SIZE_NUM; i++) {
++ head = &context->ib_pools[i];
++ LIST_FOR_EACH_ENTRY_SAFE(next, storage, head, list_node) {
++ r = amdgpu_cs_destroy_ib(dev, next);
++ if (r)
++ break;
++ }
++ }
++ pthread_mutex_unlock(&context->pool_mutex);
++ pthread_mutex_destroy(&context->pool_mutex);
++ return r;
++}
++
++/**
++ * Initialize pending IB lists
++ *
++ * \param context - \c [in] GPU Context
++ *
++ * \return 0 on success otherwise POSIX Error code
++*/
++static int amdgpu_cs_init_pendings(amdgpu_context_handle context)
++{
++ unsigned ip, inst;
++ uint32_t ring;
++ int r;
++
++ r = pthread_mutex_init(&context->pendings_mutex, NULL);
++ if (r)
++ return r;
++
++ for (ip = 0; ip < AMDGPU_HW_IP_NUM; ip++)
++ for (inst = 0; inst < AMDGPU_HW_IP_INSTANCE_MAX_COUNT; inst++)
++ for (ring = 0; ring < AMDGPU_CS_MAX_RINGS; ring++)
++ LIST_INITHEAD(&context->pendings[ip][inst][ring]);
++
++ LIST_INITHEAD(&context->freed);
++ return 0;
++}
++
++/**
++ * Free pending IBs
++ *
++ * \param dev - \c [in] Device handle
++ * \param context - \c [in] GPU Context
++ *
++ * \return 0 on success otherwise POSIX Error code
++*/
++static int amdgpu_cs_destroy_pendings(amdgpu_device_handle dev,
++ amdgpu_context_handle context)
++{
++ int ip, inst;
++ uint32_t ring;
++ int r;
++ struct amdgpu_ib *next;
++ struct amdgpu_ib *s;
++ struct list_head *head;
++
++ r = 0;
++ pthread_mutex_lock(&context->pendings_mutex);
++ for (ip = 0; ip < AMDGPU_HW_IP_NUM; ip++)
++ for (inst = 0; inst < AMDGPU_HW_IP_INSTANCE_MAX_COUNT; inst++)
++ for (ring = 0; ring < AMDGPU_CS_MAX_RINGS; ring++) {
++ head = &context->pendings[ip][inst][ring];
++ LIST_FOR_EACH_ENTRY_SAFE(next, s, head, list_node) {
++ r = amdgpu_cs_destroy_ib(dev, next);
++ if (r)
++ break;
++ }
++ }
++
++ head = &context->freed;
++ LIST_FOR_EACH_ENTRY_SAFE(next, s, head, list_node) {
++ r = amdgpu_cs_destroy_ib(dev, next);
++ if (r)
++ break;
++ }
++
++ pthread_mutex_unlock(&context->pendings_mutex);
++ pthread_mutex_destroy(&context->pendings_mutex);
++ return r;
++}
++
++/**
++ * Add IB to pending IB lists without holding sequence_mutex.
++ *
++ * \param context - \c [in] GPU Context
++ * \param ib - \c [in] ib to added to pending lists
++ * \param ip - \c [in] hw ip block
++ * \param ip_instance - \c [in] instance of the hw ip block
++ * \param ring - \c [in] Ring of hw ip
++ *
++ * \return N/A
++*/
++static void amdgpu_cs_add_pending(amdgpu_context_handle context,
++ amdgpu_ib_handle ib,
++ unsigned ip, unsigned ip_instance,
++ uint32_t ring)
++{
++ struct list_head *head;
++ pthread_mutex_lock(&context->pendings_mutex);
++ head = &context->pendings[ip][ip_instance][ring];
++ LIST_ADDTAIL(&ib->list_node, head);
++ pthread_mutex_unlock(&context->pendings_mutex);
++ return;
++}
++
++/**
++ * Garbage collector on a pending IB list without holding pendings_mutex.
++ * This function by itself is not multithread safe.
++ *
++ * \param context - \c [in] GPU Context
++ * \param ip - \c [in] hw ip block
++ * \param ip_instance - \c [in] instance of the hw ip block
++ * \param ring - \c [in] Ring of hw ip
++ * \param expired_fence - \c [in] fence expired
++ *
++ * \return N/A
++ * \note Hold pendings_mutex before calling this function.
++*/
++static void amdgpu_cs_pending_gc_not_safe(amdgpu_context_handle context,
++ unsigned ip, unsigned ip_instance,
++ uint32_t ring,
++ uint64_t expired_fence)
++{
++ struct list_head *head;
++ struct amdgpu_ib *next;
++ struct amdgpu_ib *s;
++ int r;
++
++ head = &context->pendings[ip][ip_instance][ring];
++ LIST_FOR_EACH_ENTRY_SAFE(next, s, head, list_node)
++ if (next->cs_handle <= expired_fence) {
++ LIST_DEL(&next->list_node);
++ amdgpu_cs_free_to_ib_pool(context, next);
++ } else {
++ /* The pending list is a sorted list.
++ There is no need to continue. */
++ break;
++ }
++
++ /* walk the freed list as well */
++ head = &context->freed;
++ LIST_FOR_EACH_ENTRY_SAFE(next, s, head, list_node) {
++ bool busy;
++
++ r = amdgpu_bo_wait_for_idle(next->buf_handle, 0, &busy);
++ if (r || busy)
++ break;
++
++ LIST_DEL(&next->list_node);
++ amdgpu_cs_free_to_ib_pool(context, next);
++ }
++
++ return;
++}
++
++/**
++ * Garbage collector on a pending IB list
++ *
++ * \param context - \c [in] GPU Context
++ * \param ip - \c [in] hw ip block
++ * \param ip_instance - \c [in] instance of the hw ip block
++ * \param ring - \c [in] Ring of hw ip
++ * \param expired_fence - \c [in] fence expired
++ *
++ * \return N/A
++*/
++static void amdgpu_cs_pending_gc(amdgpu_context_handle context,
++ unsigned ip, unsigned ip_instance,
++ uint32_t ring,
++ uint64_t expired_fence)
++{
++ pthread_mutex_lock(&context->pendings_mutex);
++ amdgpu_cs_pending_gc_not_safe(context, ip, ip_instance, ring,
++ expired_fence);
++ pthread_mutex_unlock(&context->pendings_mutex);
++ return;
++}
++
++/**
++ * Garbage collector on all pending IB lists
++ *
++ * \param context - \c [in] GPU Context
++ *
++ * \return N/A
++*/
++static void amdgpu_cs_all_pending_gc(amdgpu_context_handle context)
++{
++ unsigned ip, inst;
++ uint32_t ring;
++ uint64_t expired_fences[AMDGPU_HW_IP_NUM][AMDGPU_HW_IP_INSTANCE_MAX_COUNT][AMDGPU_CS_MAX_RINGS];
++
++ pthread_mutex_lock(&context->sequence_mutex);
++ for (ip = 0; ip < AMDGPU_HW_IP_NUM; ip++)
++ for (inst = 0; inst < AMDGPU_HW_IP_INSTANCE_MAX_COUNT; inst++)
++ for (ring = 0; ring < AMDGPU_CS_MAX_RINGS; ring++)
++ expired_fences[ip][inst][ring] =
++ context->expired_fences[ip][inst][ring];
++ pthread_mutex_unlock(&context->sequence_mutex);
++
++ pthread_mutex_lock(&context->pendings_mutex);
++ for (ip = 0; ip < AMDGPU_HW_IP_NUM; ip++)
++ for (inst = 0; inst < AMDGPU_HW_IP_INSTANCE_MAX_COUNT; inst++)
++ for (ring = 0; ring < AMDGPU_CS_MAX_RINGS; ring++)
++ amdgpu_cs_pending_gc_not_safe(context, ip, inst, ring,
++ expired_fences[ip][inst][ring]);
++ pthread_mutex_unlock(&context->pendings_mutex);
++}
++
++/**
++ * Allocate an IB buffer
++ * If there is no free IB buffer in pools, create one.
++ *
++ * \param dev - \c [in] Device handle
++ * \param context - \c [in] GPU Context
++ * \param ib_size - \c [in] Size of allocation
++ * \param ib - \c [out] return the pointer to the allocated IB buffer
++ *
++ * \return 0 on success otherwise POSIX Error code
++*/
++static int amdgpu_cs_alloc_ib_local(amdgpu_device_handle dev,
++ amdgpu_context_handle context,
++ enum amdgpu_cs_ib_size ib_size,
++ amdgpu_ib_handle *ib)
++{
++ int r;
++
++ r = amdgpu_cs_alloc_from_ib_pool(dev, context, ib_size, ib);
++ if (!r)
++ return r;
++
++ amdgpu_cs_all_pending_gc(context);
++
++ /* Retry to allocate from free IB pools after garbage collector. */
++ r = amdgpu_cs_alloc_from_ib_pool(dev, context, ib_size, ib);
++ if (!r)
++ return r;
++
++ /* There is no suitable IB in free pools. Create one. */
++ r = amdgpu_cs_create_ib(dev, context, ib_size, ib);
++ return r;
++}
++
++int amdgpu_cs_alloc_ib(amdgpu_device_handle dev,
++ amdgpu_context_handle context,
++ enum amdgpu_cs_ib_size ib_size,
++ struct amdgpu_cs_ib_alloc_result *output)
++{
++ int r;
++ amdgpu_ib_handle ib;
++
++ if (NULL == dev)
++ return -EINVAL;
++ if (NULL == context)
++ return -EINVAL;
++ if (NULL == output)
++ return -EINVAL;
++ if (ib_size >= AMDGPU_CS_IB_SIZE_NUM)
++ return -EINVAL;
++
++ r = amdgpu_cs_alloc_ib_local(dev, context, ib_size, &ib);
++ if (!r) {
++ output->handle = ib;
++ output->cpu = ib->cpu;
++ output->mc_address = ib->virtual_mc_base_address;
++ }
++
++ return r;
++}
++
++int amdgpu_cs_free_ib(amdgpu_device_handle dev,
++ amdgpu_context_handle context,
++ amdgpu_ib_handle handle)
++{
++ if (NULL == dev)
++ return -EINVAL;
++ if (NULL == context)
++ return -EINVAL;
++ if (NULL == handle)
++ return -EINVAL;
++
++ pthread_mutex_lock(&context->pendings_mutex);
++ LIST_ADD(&handle->list_node, &context->freed);
++ pthread_mutex_unlock(&context->pendings_mutex);
++ return 0;
++}
++
++/**
++ * Create command submission context
++ *
++ * \param dev - \c [in] amdgpu device handle
++ * \param context - \c [out] amdgpu context handle
++ *
++ * \return 0 on success otherwise POSIX Error code
++*/
++int amdgpu_cs_ctx_create(amdgpu_device_handle dev,
++ amdgpu_context_handle *context)
++{
++ struct amdgpu_context *gpu_context;
++ union drm_amdgpu_ctx args;
++ int r;
++
++ if (NULL == dev)
++ return -EINVAL;
++ if (NULL == context)
++ return -EINVAL;
++
++ gpu_context = calloc(1, sizeof(struct amdgpu_context));
++ if (NULL == gpu_context)
++ return -ENOMEM;
++
++ r = pthread_mutex_init(&gpu_context->sequence_mutex, NULL);
++ if (r)
++ goto error_mutex;
++
++ r = amdgpu_cs_init_ib_pool(gpu_context);
++ if (r)
++ goto error_pool;
++
++ r = amdgpu_cs_init_pendings(gpu_context);
++ if (r)
++ goto error_pendings;
++
++ r = amdgpu_cs_alloc_ib_local(dev, gpu_context, amdgpu_cs_ib_size_4K,
++ &gpu_context->fence_ib);
++ if (r)
++ goto error_fence_ib;
++
++
++ memset(&args, 0, sizeof(args));
++ args.in.op = AMDGPU_CTX_OP_ALLOC_CTX;
++ r = drmCommandWriteRead(dev->fd, DRM_AMDGPU_CTX, &args, sizeof(args));
++ if (r)
++ goto error_kernel;
++
++ gpu_context->id = args.out.alloc.ctx_id;
++ *context = (amdgpu_context_handle)gpu_context;
++
++ return 0;
++
++error_kernel:
++ amdgpu_cs_free_ib(dev, gpu_context, gpu_context->fence_ib);
++
++error_fence_ib:
++ amdgpu_cs_destroy_pendings(dev, gpu_context);
++
++error_pendings:
++ amdgpu_cs_destroy_ib_pool(dev, gpu_context);
++
++error_pool:
++ pthread_mutex_destroy(&gpu_context->sequence_mutex);
++
++error_mutex:
++ free(gpu_context);
++ return r;
++}
++
++/**
++ * Release command submission context
++ *
++ * \param dev - \c [in] amdgpu device handle
++ * \param context - \c [in] amdgpu context handle
++ *
++ * \return 0 on success otherwise POSIX Error code
++*/
++int amdgpu_cs_ctx_free(amdgpu_device_handle dev,
++ amdgpu_context_handle context)
++{
++ int r;
++ union drm_amdgpu_ctx args;
++
++ if (NULL == dev)
++ return -EINVAL;
++ if (NULL == context)
++ return -EINVAL;
++
++ r = amdgpu_cs_free_ib(dev, context, context->fence_ib);
++ if (r)
++ return r;
++
++ r = amdgpu_cs_destroy_pendings(dev, context);
++ if (r)
++ return r;
++
++ r = amdgpu_cs_destroy_ib_pool(dev, context);
++ if (r)
++ return r;
++
++ pthread_mutex_destroy(&context->sequence_mutex);
++
++ /* now deal with kernel side */
++ memset(&args, 0, sizeof(args));
++ args.in.op = AMDGPU_CTX_OP_FREE_CTX;
++ args.in.ctx_id = context->id;
++ r = drmCommandWriteRead(dev->fd, DRM_AMDGPU_CTX, &args, sizeof(args));
++
++ free(context);
++
++ return r;
++}
++
++static int amdgpu_cs_create_bo_list(amdgpu_device_handle dev,
++ amdgpu_context_handle context,
++ struct amdgpu_cs_request *request,
++ amdgpu_ib_handle fence_ib,
++ uint32_t *handle)
++{
++ struct drm_amdgpu_bo_list_entry *list;
++ union drm_amdgpu_bo_list args;
++ unsigned num_resources;
++ unsigned i;
++ int r;
++
++ num_resources = request->number_of_resources;
++ if (fence_ib)
++ ++num_resources;
++
++ list = alloca(sizeof(struct drm_amdgpu_bo_list_entry) * num_resources);
++
++ memset(&args, 0, sizeof(args));
++ args.in.operation = AMDGPU_BO_LIST_OP_CREATE;
++ args.in.bo_number = num_resources;
++ args.in.bo_info_size = sizeof(struct drm_amdgpu_bo_list_entry);
++ args.in.bo_info_ptr = (uint64_t)(uintptr_t)list;
++
++ for (i = 0; i < request->number_of_resources; i++) {
++ list[i].bo_handle = request->resources[i]->handle;
++ if (request->resource_flags)
++ list[i].bo_priority = request->resource_flags[i];
++ else
++ list[i].bo_priority = 0;
++ }
++
++ if (fence_ib)
++ list[i].bo_handle = fence_ib->buf_handle->handle;
++
++ r = drmCommandWriteRead(dev->fd, DRM_AMDGPU_BO_LIST,
++ &args, sizeof(args));
++ if (r)
++ return r;
++
++ *handle = args.out.list_handle;
++ return 0;
++}
++
++static int amdgpu_cs_free_bo_list(amdgpu_device_handle dev, uint32_t handle)
++{
++ union drm_amdgpu_bo_list args;
++ int r;
++
++ memset(&args, 0, sizeof(args));
++ args.in.operation = AMDGPU_BO_LIST_OP_DESTROY;
++ args.in.list_handle = handle;
++
++ r = drmCommandWriteRead(dev->fd, DRM_AMDGPU_BO_LIST,
++ &args, sizeof(args));
++
++ return r;
++}
++
++static uint32_t amdgpu_cs_fence_index(unsigned ip, unsigned ring)
++{
++ return ip * AMDGPU_CS_MAX_RINGS + ring;
++}
++
++/**
++ * Submit command to kernel DRM
++ * \param dev - \c [in] Device handle
++ * \param context - \c [in] GPU Context
++ * \param ibs_request - \c [in] Pointer to submission requests
++ * \param fence - \c [out] return fence for this submission
++ *
++ * \return 0 on success otherwise POSIX Error code
++ * \sa amdgpu_cs_submit()
++*/
++static int amdgpu_cs_submit_one(amdgpu_device_handle dev,
++ amdgpu_context_handle context,
++ struct amdgpu_cs_request *ibs_request,
++ uint64_t *fence)
++{
++ int r;
++ uint32_t i, size;
++ union drm_amdgpu_cs cs;
++ uint64_t *chunk_array;
++ struct drm_amdgpu_cs_chunk *chunks;
++ struct drm_amdgpu_cs_chunk_data *chunk_data;
++
++ if (ibs_request->ip_type >= AMDGPU_HW_IP_NUM)
++ return -EINVAL;
++ if (ibs_request->ring >= AMDGPU_CS_MAX_RINGS)
++ return -EINVAL;
++ if (ibs_request->number_of_ibs > AMDGPU_CS_MAX_IBS_PER_SUBMIT)
++ return -EINVAL;
++
++ size = (ibs_request->number_of_ibs + 1) * ((sizeof(uint64_t) +
++ sizeof(struct drm_amdgpu_cs_chunk) +
++ sizeof(struct drm_amdgpu_cs_chunk_data)) +
++ ibs_request->number_of_resources + 1) *
++ sizeof(struct drm_amdgpu_bo_list_entry);
++ chunk_array = malloc(size);
++ if (NULL == chunk_array)
++ return -ENOMEM;
++ memset(chunk_array, 0, size);
++
++ chunks = (struct drm_amdgpu_cs_chunk *)(chunk_array + ibs_request->number_of_ibs + 1);
++ chunk_data = (struct drm_amdgpu_cs_chunk_data *)(chunks + ibs_request->number_of_ibs + 1);
++
++ memset(&cs, 0, sizeof(cs));
++ cs.in.chunks = (uint64_t)(uintptr_t)chunk_array;
++ cs.in.ctx_id = context->id;
++ cs.in.num_chunks = ibs_request->number_of_ibs;
++ /* IB chunks */
++ for (i = 0; i < ibs_request->number_of_ibs; i++) {
++ struct amdgpu_cs_ib_info *ib;
++ chunk_array[i] = (uint64_t)(uintptr_t)&chunks[i];
++ chunks[i].chunk_id = AMDGPU_CHUNK_ID_IB;
++ chunks[i].length_dw = sizeof(struct drm_amdgpu_cs_chunk_ib) / 4;
++ chunks[i].chunk_data = (uint64_t)(uintptr_t)&chunk_data[i];
++
++ ib = &ibs_request->ibs[i];
++
++ chunk_data[i].ib_data.handle = ib->ib_handle->buf_handle->handle;
++ chunk_data[i].ib_data.va_start = ib->ib_handle->virtual_mc_base_address;
++ chunk_data[i].ib_data.ib_bytes = ib->size * 4;
++ chunk_data[i].ib_data.ip_type = ibs_request->ip_type;
++ chunk_data[i].ib_data.ip_instance = ibs_request->ip_instance;
++ chunk_data[i].ib_data.ring = ibs_request->ring;
++
++ if (ib->flags & AMDGPU_CS_GFX_IB_CE)
++ chunk_data[i].ib_data.flags = AMDGPU_IB_FLAG_CE;
++ }
++
++ r = amdgpu_cs_create_bo_list(dev, context, ibs_request, NULL,
++ &cs.in.bo_list_handle);
++ if (r)
++ goto error_unlock;
++
++ pthread_mutex_lock(&context->sequence_mutex);
++
++ if (ibs_request->ip_type != AMDGPU_HW_IP_UVD &&
++ ibs_request->ip_type != AMDGPU_HW_IP_VCE) {
++ i = cs.in.num_chunks++;
++
++ /* fence chunk */
++ chunk_array[i] = (uint64_t)(uintptr_t)&chunks[i];
++ chunks[i].chunk_id = AMDGPU_CHUNK_ID_FENCE;
++ chunks[i].length_dw = sizeof(struct drm_amdgpu_cs_chunk_fence) / 4;
++ chunks[i].chunk_data = (uint64_t)(uintptr_t)&chunk_data[i];
++
++ /* fence bo handle */
++ chunk_data[i].fence_data.handle = context->fence_ib->buf_handle->handle;
++ /* offset */
++ chunk_data[i].fence_data.offset = amdgpu_cs_fence_index(
++ ibs_request->ip_type, ibs_request->ring);
++ chunk_data[i].fence_data.offset *= sizeof(uint64_t);
++ }
++
++ r = drmCommandWriteRead(dev->fd, DRM_AMDGPU_CS,
++ &cs, sizeof(cs));
++ if (r)
++ goto error_unlock;
++
++
++ /* Hold sequence_mutex while adding record to the pending list.
++ So the pending list is a sorted list according to fence value. */
++
++ for (i = 0; i < ibs_request->number_of_ibs; i++) {
++ struct amdgpu_cs_ib_info *ib;
++
++ ib = &ibs_request->ibs[i];
++ if (ib->flags & AMDGPU_CS_REUSE_IB)
++ continue;
++
++ ib->ib_handle->cs_handle = cs.out.handle;
++
++ amdgpu_cs_add_pending(context, ib->ib_handle, ibs_request->ip_type,
++ ibs_request->ip_instance,
++ ibs_request->ring);
++ }
++
++ *fence = cs.out.handle;
++
++ pthread_mutex_unlock(&context->sequence_mutex);
++
++ r = amdgpu_cs_free_bo_list(dev, cs.in.bo_list_handle);
++ if (r)
++ goto error_free;
++
++ free(chunk_array);
++ return 0;
++
++error_unlock:
++ pthread_mutex_unlock(&context->sequence_mutex);
++
++error_free:
++ free(chunk_array);
++ return r;
++}
++
++int amdgpu_cs_submit(amdgpu_device_handle dev,
++ amdgpu_context_handle context,
++ uint64_t flags,
++ struct amdgpu_cs_request *ibs_request,
++ uint32_t number_of_requests,
++ uint64_t *fences)
++{
++ int r;
++ uint32_t i;
++
++ if (NULL == dev)
++ return -EINVAL;
++ if (NULL == context)
++ return -EINVAL;
++ if (NULL == ibs_request)
++ return -EINVAL;
++ if (NULL == fences)
++ return -EINVAL;
++
++ r = 0;
++ for (i = 0; i < number_of_requests; i++) {
++ r = amdgpu_cs_submit_one(dev, context, ibs_request, fences);
++ if (r)
++ break;
++ fences++;
++ ibs_request++;
++ }
++
++ return r;
++}
++
++/**
++ * Calculate absolute timeout.
++ *
++ * \param timeout - \c [in] timeout in nanoseconds.
++ *
++ * \return absolute timeout in nanoseconds
++*/
++uint64_t amdgpu_cs_calculate_timeout(uint64_t timeout)
++{
++ int r;
++
++ if (timeout != AMDGPU_TIMEOUT_INFINITE) {
++ struct timespec current;
++ r = clock_gettime(CLOCK_MONOTONIC, &current);
++ if (r)
++ return r;
++
++ timeout += ((uint64_t)current.tv_sec) * 1000000000ull;
++ timeout += current.tv_nsec;
++ }
++ return timeout;
++}
++
++static int amdgpu_ioctl_wait_cs(amdgpu_device_handle dev,
++ unsigned ip,
++ unsigned ip_instance,
++ uint32_t ring,
++ uint64_t handle,
++ uint64_t timeout_ns,
++ bool *busy)
++{
++ union drm_amdgpu_wait_cs args;
++ int r;
++
++ memset(&args, 0, sizeof(args));
++ args.in.handle = handle;
++ args.in.ip_type = ip;
++ args.in.ip_instance = ip_instance;
++ args.in.ring = ring;
++ args.in.timeout = amdgpu_cs_calculate_timeout(timeout_ns);
++
++ /* Handle errors manually here because of timeout */
++ r = ioctl(dev->fd, DRM_IOCTL_AMDGPU_WAIT_CS, &args);
++ if (r == -1 && (errno == EINTR || errno == EAGAIN)) {
++ *busy = true;
++ return 0;
++ } else if (r)
++ return -errno;
++
++ *busy = args.out.status;
++ return 0;
++}
++
++int amdgpu_cs_query_fence_status(amdgpu_device_handle dev,
++ struct amdgpu_cs_query_fence *fence,
++ uint32_t *expired)
++{
++ amdgpu_context_handle context;
++ uint64_t *signaled_fence;
++ uint64_t *expired_fence;
++ unsigned ip_type, ip_instance;
++ uint32_t ring;
++ bool busy = true;
++ int r;
++
++ if (NULL == dev)
++ return -EINVAL;
++ if (NULL == fence)
++ return -EINVAL;
++ if (NULL == expired)
++ return -EINVAL;
++ if (NULL == fence->context)
++ return -EINVAL;
++ if (fence->ip_type >= AMDGPU_HW_IP_NUM)
++ return -EINVAL;
++ if (fence->ring >= AMDGPU_CS_MAX_RINGS)
++ return -EINVAL;
++
++ context = fence->context;
++ ip_type = fence->ip_type;
++ ip_instance = fence->ip_instance;
++ ring = fence->ring;
++ signaled_fence = context->fence_ib->cpu;
++ signaled_fence += amdgpu_cs_fence_index(ip_type, ring);
++ expired_fence = &context->expired_fences[ip_type][ip_instance][ring];
++ *expired = false;
++
++ pthread_mutex_lock(&context->sequence_mutex);
++ if (fence->fence <= *expired_fence) {
++ /* This fence value is expired already. */
++ pthread_mutex_unlock(&context->sequence_mutex);
++ *expired = true;
++ return 0;
++ }
++
++ if (fence->fence <= *signaled_fence) {
++ /* This fence value is signaled already. */
++ *expired_fence = *signaled_fence;
++ pthread_mutex_unlock(&context->sequence_mutex);
++ amdgpu_cs_pending_gc(context, ip_type, ip_instance, ring,
++ fence->fence);
++ *expired = true;
++ return 0;
++ }
++
++ pthread_mutex_unlock(&context->sequence_mutex);
++
++ r = amdgpu_ioctl_wait_cs(dev, ip_type, ip_instance, ring,
++ fence->fence, fence->timeout_ns, &busy);
++ if (!r && !busy) {
++ *expired = true;
++ pthread_mutex_lock(&context->sequence_mutex);
++ /* The thread doesn't hold sequence_mutex. Other thread could
++ update *expired_fence already. Check whether there is a
++ newerly expired fence. */
++ if (fence->fence > *expired_fence) {
++ *expired_fence = fence->fence;
++ pthread_mutex_unlock(&context->sequence_mutex);
++ amdgpu_cs_pending_gc(context, ip_type, ip_instance,
++ ring, fence->fence);
++ } else {
++ pthread_mutex_unlock(&context->sequence_mutex);
++ }
++ }
++
++ return r;
++}
++
+diff --git a/amdgpu/amdgpu_device.c b/amdgpu/amdgpu_device.c
+new file mode 100644
+index 0000000..66fa187
+--- /dev/null
++++ b/amdgpu/amdgpu_device.c
+@@ -0,0 +1,242 @@
++/*
++ * Copyright 2014 Advanced Micro Devices, Inc.
++ *
++ * Permission is hereby granted, free of charge, to any person obtaining a
++ * copy of this software and associated documentation files (the "Software"),
++ * to deal in the Software without restriction, including without limitation
++ * the rights to use, copy, modify, merge, publish, distribute, sublicense,
++ * and/or sell copies of the Software, and to permit persons to whom the
++ * Software is furnished to do so, subject to the following conditions:
++ *
++ * The above copyright notice and this permission notice shall be included in
++ * all copies or substantial portions of the Software.
++ *
++ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
++ * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
++ * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
++ * THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR
++ * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE,
++ * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
++ * OTHER DEALINGS IN THE SOFTWARE.
++ *
++*/
++
++/**
++ * \file amdgpu_device.c
++ *
++ * Implementation of functions for AMD GPU device
++ *
++ *
++ */
++
++#include <sys/stat.h>
++#include <errno.h>
++#include <string.h>
++#include <stdio.h>
++#include <stdlib.h>
++
++#include "xf86drm.h"
++#include "amdgpu_drm.h"
++#include "amdgpu_internal.h"
++#include "util_hash_table.h"
++
++#define PTR_TO_UINT(x) ((unsigned)((intptr_t)(x)))
++#define UINT_TO_PTR(x) ((void *)((intptr_t)(x)))
++#define RENDERNODE_MINOR_MASK 0xff7f
++
++pthread_mutex_t fd_mutex = PTHREAD_MUTEX_INITIALIZER;
++static struct util_hash_table *fd_tab;
++
++static unsigned handle_hash(void *key)
++{
++ return PTR_TO_UINT(key);
++}
++
++static int handle_compare(void *key1, void *key2)
++{
++ return PTR_TO_UINT(key1) != PTR_TO_UINT(key2);
++}
++
++static unsigned fd_hash(void *key)
++{
++ int fd = PTR_TO_UINT(key);
++ struct stat stat;
++ fstat(fd, &stat);
++
++ if (!S_ISCHR(stat.st_mode))
++ return stat.st_dev ^ stat.st_ino;
++ else
++ return stat.st_dev ^ (stat.st_rdev & RENDERNODE_MINOR_MASK);
++}
++
++static int fd_compare(void *key1, void *key2)
++{
++ int fd1 = PTR_TO_UINT(key1);
++ int fd2 = PTR_TO_UINT(key2);
++ struct stat stat1, stat2;
++ fstat(fd1, &stat1);
++ fstat(fd2, &stat2);
++
++ if (!S_ISCHR(stat1.st_mode) || !S_ISCHR(stat2.st_mode))
++ return stat1.st_dev != stat2.st_dev ||
++ stat1.st_ino != stat2.st_ino;
++ else
++ return major(stat1.st_rdev) != major(stat2.st_rdev) ||
++ (minor(stat1.st_rdev) & RENDERNODE_MINOR_MASK) !=
++ (minor(stat2.st_rdev) & RENDERNODE_MINOR_MASK);
++}
++
++/**
++* Get the authenticated form fd,
++*
++* \param fd - \c [in] File descriptor for AMD GPU device
++* \param auth - \c [out] Pointer to output the fd is authenticated or not
++* A render node fd, output auth = 0
++* A legacy fd, get the authenticated for compatibility root
++*
++* \return 0 on success\n
++* >0 - AMD specific error code\n
++* <0 - Negative POSIX Error code
++*/
++static int amdgpu_get_auth(int fd, int *auth)
++{
++ int r = 0;
++ drm_client_t client;
++ struct stat stat1;
++ fstat(fd,&stat1);
++ if (minor(stat1.st_rdev) & ~RENDERNODE_MINOR_MASK)/* find a render node fd */
++ *auth = 0;
++ else {
++ client.idx=0;
++ r= drmIoctl(fd, DRM_IOCTL_GET_CLIENT, &client);
++ if (!r)
++ *auth = client.auth;
++ }
++ return r;
++}
++
++int amdgpu_device_initialize(int fd,
++ uint32_t *major_version,
++ uint32_t *minor_version,
++ amdgpu_device_handle *device_handle)
++{
++ struct amdgpu_device *dev;
++ drmVersionPtr version;
++ int r;
++ int flag_auth = 0;
++ int flag_authexist=0;
++ uint32_t accel_working;
++
++ *device_handle = NULL;
++
++ pthread_mutex_lock(&fd_mutex);
++ if (!fd_tab)
++ fd_tab = util_hash_table_create(fd_hash, fd_compare);
++ r = amdgpu_get_auth(fd, &flag_auth);
++ if (r) {
++ pthread_mutex_unlock(&fd_mutex);
++ return r;
++ }
++ dev = util_hash_table_get(fd_tab, UINT_TO_PTR(fd));
++ if (dev) {
++ r = amdgpu_get_auth(dev->fd, &flag_authexist);
++ if (r) {
++ pthread_mutex_unlock(&fd_mutex);
++ return r;
++ }
++ if ((flag_auth) && (!flag_authexist)) {
++ dev->flink_fd = fd;
++ }
++ *major_version = dev->major_version;
++ *minor_version = dev->minor_version;
++ amdgpu_device_reference(device_handle, dev);
++ pthread_mutex_unlock(&fd_mutex);
++ return 0;
++ }
++
++ dev = calloc(1, sizeof(struct amdgpu_device));
++ if (!dev) {
++ pthread_mutex_unlock(&fd_mutex);
++ return -ENOMEM;
++ }
++
++ atomic_set(&dev->refcount, 1);
++
++ version = drmGetVersion(fd);
++ if (version->version_major != 3) {
++ fprintf(stderr, "%s: DRM version is %d.%d.%d but this driver is "
++ "only compatible with 3.x.x.\n",
++ __func__,
++ version->version_major,
++ version->version_minor,
++ version->version_patchlevel);
++ drmFreeVersion(version);
++ r = -EBADF;
++ goto cleanup;
++ }
++
++ dev->fd = fd;
++ dev->flink_fd = fd;
++ dev->major_version = version->version_major;
++ dev->minor_version = version->version_minor;
++ drmFreeVersion(version);
++
++ dev->bo_flink_names = util_hash_table_create(handle_hash,
++ handle_compare);
++ dev->bo_handles = util_hash_table_create(handle_hash, handle_compare);
++ dev->bo_vas = util_hash_table_create(handle_hash, handle_compare);
++ pthread_mutex_init(&dev->bo_table_mutex, NULL);
++
++ /* Check if acceleration is working. */
++ r = amdgpu_query_info(dev, AMDGPU_INFO_ACCEL_WORKING, 4, &accel_working);
++ if (r)
++ goto cleanup;
++ if (!accel_working) {
++ r = -EBADF;
++ goto cleanup;
++ }
++
++ r = amdgpu_query_gpu_info_init(dev);
++ if (r)
++ goto cleanup;
++
++ amdgpu_vamgr_init(dev);
++
++ *major_version = dev->major_version;
++ *minor_version = dev->minor_version;
++ *device_handle = dev;
++ util_hash_table_set(fd_tab, UINT_TO_PTR(fd), dev);
++ pthread_mutex_unlock(&fd_mutex);
++
++ return 0;
++
++cleanup:
++ free(dev);
++ pthread_mutex_unlock(&fd_mutex);
++ return r;
++}
++
++void amdgpu_device_free_internal(amdgpu_device_handle dev)
++{
++ util_hash_table_destroy(dev->bo_flink_names);
++ util_hash_table_destroy(dev->bo_handles);
++ util_hash_table_destroy(dev->bo_vas);
++ pthread_mutex_destroy(&dev->bo_table_mutex);
++ pthread_mutex_destroy(&(dev->vamgr.bo_va_mutex));
++ util_hash_table_remove(fd_tab, UINT_TO_PTR(dev->fd));
++ free(dev);
++}
++
++int amdgpu_device_deinitialize(amdgpu_device_handle dev)
++{
++ amdgpu_device_reference(&dev, NULL);
++ return 0;
++}
++
++void amdgpu_device_reference(struct amdgpu_device **dst,
++ struct amdgpu_device *src)
++{
++ if (update_references(&(*dst)->refcount, &src->refcount))
++ amdgpu_device_free_internal(*dst);
++ *dst = src;
++}
+diff --git a/amdgpu/amdgpu_gpu_info.c b/amdgpu/amdgpu_gpu_info.c
+new file mode 100644
+index 0000000..0b77731
+--- /dev/null
++++ b/amdgpu/amdgpu_gpu_info.c
+@@ -0,0 +1,275 @@
++/*
++ * Copyright © 2014 Advanced Micro Devices, Inc.
++ * All Rights Reserved.
++ *
++ * Permission is hereby granted, free of charge, to any person obtaining a
++ * copy of this software and associated documentation files (the "Software"),
++ * to deal in the Software without restriction, including without limitation
++ * the rights to use, copy, modify, merge, publish, distribute, sublicense,
++ * and/or sell copies of the Software, and to permit persons to whom the
++ * Software is furnished to do so, subject to the following conditions:
++ *
++ * The above copyright notice and this permission notice shall be included in
++ * all copies or substantial portions of the Software.
++ *
++ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
++ * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
++ * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
++ * THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR
++ * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE,
++ * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
++ * OTHER DEALINGS IN THE SOFTWARE.
++ */
++
++#include <errno.h>
++#include <string.h>
++
++#include "amdgpu.h"
++#include "amdgpu_drm.h"
++#include "amdgpu_internal.h"
++#include "xf86drm.h"
++
++int amdgpu_query_info(amdgpu_device_handle dev, unsigned info_id,
++ unsigned size, void *value)
++{
++ struct drm_amdgpu_info request;
++
++ memset(&request, 0, sizeof(request));
++ request.return_pointer = (uintptr_t)value;
++ request.return_size = size;
++ request.query = info_id;
++
++ return drmCommandWrite(dev->fd, DRM_AMDGPU_INFO, &request,
++ sizeof(struct drm_amdgpu_info));
++}
++
++int amdgpu_query_crtc_from_id(amdgpu_device_handle dev, unsigned id,
++ int32_t *result)
++{
++ struct drm_amdgpu_info request;
++
++ memset(&request, 0, sizeof(request));
++ request.return_pointer = (uintptr_t)result;
++ request.return_size = sizeof(*result);
++ request.query = AMDGPU_INFO_CRTC_FROM_ID;
++ request.mode_crtc.id = id;
++
++ return drmCommandWrite(dev->fd, DRM_AMDGPU_INFO, &request,
++ sizeof(struct drm_amdgpu_info));
++}
++
++int amdgpu_read_mm_registers(amdgpu_device_handle dev, unsigned dword_offset,
++ unsigned count, uint32_t instance, uint32_t flags,
++ uint32_t *values)
++{
++ struct drm_amdgpu_info request;
++
++ memset(&request, 0, sizeof(request));
++ request.return_pointer = (uintptr_t)values;
++ request.return_size = count * sizeof(uint32_t);
++ request.query = AMDGPU_INFO_READ_MMR_REG;
++ request.read_mmr_reg.dword_offset = dword_offset;
++ request.read_mmr_reg.count = count;
++ request.read_mmr_reg.instance = instance;
++ request.read_mmr_reg.flags = flags;
++
++ return drmCommandWrite(dev->fd, DRM_AMDGPU_INFO, &request,
++ sizeof(struct drm_amdgpu_info));
++}
++
++int amdgpu_query_hw_ip_count(amdgpu_device_handle dev, unsigned type,
++ uint32_t *count)
++{
++ struct drm_amdgpu_info request;
++
++ memset(&request, 0, sizeof(request));
++ request.return_pointer = (uintptr_t)count;
++ request.return_size = sizeof(*count);
++ request.query = AMDGPU_INFO_HW_IP_COUNT;
++ request.query_hw_ip.type = type;
++
++ return drmCommandWrite(dev->fd, DRM_AMDGPU_INFO, &request,
++ sizeof(struct drm_amdgpu_info));
++}
++
++int amdgpu_query_hw_ip_info(amdgpu_device_handle dev, unsigned type,
++ unsigned ip_instance,
++ struct drm_amdgpu_info_hw_ip *info)
++{
++ struct drm_amdgpu_info request;
++
++ memset(&request, 0, sizeof(request));
++ request.return_pointer = (uintptr_t)info;
++ request.return_size = sizeof(*info);
++ request.query = AMDGPU_INFO_HW_IP_INFO;
++ request.query_hw_ip.type = type;
++ request.query_hw_ip.ip_instance = ip_instance;
++
++ return drmCommandWrite(dev->fd, DRM_AMDGPU_INFO, &request,
++ sizeof(struct drm_amdgpu_info));
++}
++
++int amdgpu_query_firmware_version(amdgpu_device_handle dev, unsigned fw_type,
++ unsigned ip_instance, unsigned index,
++ uint32_t *version, uint32_t *feature)
++{
++ struct drm_amdgpu_info request;
++ struct drm_amdgpu_info_firmware firmware;
++ int r;
++
++ memset(&request, 0, sizeof(request));
++ request.return_pointer = (uintptr_t)&firmware;
++ request.return_size = sizeof(firmware);
++ request.query = AMDGPU_INFO_FW_VERSION;
++ request.query_fw.fw_type = fw_type;
++ request.query_fw.ip_instance = ip_instance;
++ request.query_fw.index = index;
++
++ r = drmCommandWrite(dev->fd, DRM_AMDGPU_INFO, &request,
++ sizeof(struct drm_amdgpu_info));
++ if (r)
++ return r;
++
++ *version = firmware.ver;
++ *feature = firmware.feature;
++ return 0;
++}
++
++int amdgpu_query_gpu_info_init(amdgpu_device_handle dev)
++{
++ int r, i;
++
++ r = amdgpu_query_info(dev, AMDGPU_INFO_DEV_INFO, sizeof(dev->dev_info),
++ &dev->dev_info);
++ if (r)
++ return r;
++
++ dev->info.asic_id = dev->dev_info.device_id;
++ dev->info.chip_rev = dev->dev_info.chip_rev;
++ dev->info.chip_external_rev = dev->dev_info.external_rev;
++ dev->info.family_id = dev->dev_info.family;
++ dev->info.max_engine_clk = dev->dev_info.max_engine_clock;
++ dev->info.gpu_counter_freq = dev->dev_info.gpu_counter_freq;
++ dev->info.enabled_rb_pipes_mask = dev->dev_info.enabled_rb_pipes_mask;
++ dev->info.rb_pipes = dev->dev_info.num_rb_pipes;
++ dev->info.ids_flags = dev->dev_info.ids_flags;
++ dev->info.num_hw_gfx_contexts = dev->dev_info.num_hw_gfx_contexts;
++ dev->info.num_shader_engines = dev->dev_info.num_shader_engines;
++ dev->info.num_shader_arrays_per_engine =
++ dev->dev_info.num_shader_arrays_per_engine;
++
++ for (i = 0; i < (int)dev->info.num_shader_engines; i++) {
++ unsigned instance = (i << AMDGPU_INFO_MMR_SE_INDEX_SHIFT) |
++ (AMDGPU_INFO_MMR_SH_INDEX_MASK <<
++ AMDGPU_INFO_MMR_SH_INDEX_SHIFT);
++
++ r = amdgpu_read_mm_registers(dev, 0x263d, 1, instance, 0,
++ &dev->info.backend_disable[i]);
++ if (r)
++ return r;
++ /* extract bitfield CC_RB_BACKEND_DISABLE.BACKEND_DISABLE */
++ dev->info.backend_disable[i] =
++ (dev->info.backend_disable[i] >> 16) & 0xff;
++
++ r = amdgpu_read_mm_registers(dev, 0xa0d4, 1, instance, 0,
++ &dev->info.pa_sc_raster_cfg[i]);
++ if (r)
++ return r;
++
++ r = amdgpu_read_mm_registers(dev, 0xa0d5, 1, instance, 0,
++ &dev->info.pa_sc_raster_cfg1[i]);
++ if (r)
++ return r;
++ }
++
++ r = amdgpu_read_mm_registers(dev, 0x2644, 32, 0xffffffff, 0,
++ dev->info.gb_tile_mode);
++ if (r)
++ return r;
++
++ r = amdgpu_read_mm_registers(dev, 0x2664, 16, 0xffffffff, 0,
++ dev->info.gb_macro_tile_mode);
++ if (r)
++ return r;
++
++ r = amdgpu_read_mm_registers(dev, 0x263e, 1, 0xffffffff, 0,
++ &dev->info.gb_addr_cfg);
++ if (r)
++ return r;
++
++ r = amdgpu_read_mm_registers(dev, 0x9d8, 1, 0xffffffff, 0,
++ &dev->info.mc_arb_ramcfg);
++ if (r)
++ return r;
++
++ dev->info.cu_active_number = dev->dev_info.cu_active_number;
++ dev->info.cu_ao_mask = dev->dev_info.cu_ao_mask;
++ memcpy(&dev->info.cu_bitmap[0][0], &dev->dev_info.cu_bitmap[0][0], sizeof(dev->info.cu_bitmap));
++
++ /* TODO: info->max_quad_shader_pipes is not set */
++ /* TODO: info->avail_quad_shader_pipes is not set */
++ /* TODO: info->cache_entries_per_quad_pipe is not set */
++ /* TODO: info->active_rb_pipes is not set */
++ return 0;
++}
++
++int amdgpu_query_gpu_info(amdgpu_device_handle dev,
++ struct amdgpu_gpu_info *info)
++{
++ /* Get ASIC info*/
++ *info = dev->info;
++
++ return 0;
++}
++
++int amdgpu_query_heap_info(amdgpu_device_handle dev,
++ uint32_t heap,
++ uint32_t flags,
++ struct amdgpu_heap_info *info)
++{
++ struct drm_amdgpu_info_vram_gtt vram_gtt_info;
++ int r;
++
++ r = amdgpu_query_info(dev, AMDGPU_INFO_VRAM_GTT,
++ sizeof(vram_gtt_info), &vram_gtt_info);
++ if (r)
++ return r;
++
++ /* Get heap information */
++ switch (heap) {
++ case AMDGPU_GEM_DOMAIN_VRAM:
++ /* query visible only vram heap */
++ if (flags & AMDGPU_GEM_CREATE_CPU_ACCESS_REQUIRED)
++ info->heap_size = vram_gtt_info.vram_cpu_accessible_size;
++ else /* query total vram heap */
++ info->heap_size = vram_gtt_info.vram_size;
++
++ info->max_allocation = vram_gtt_info.vram_cpu_accessible_size;
++
++ if (flags & AMDGPU_GEM_CREATE_CPU_ACCESS_REQUIRED)
++ r = amdgpu_query_info(dev, AMDGPU_INFO_VIS_VRAM_USAGE,
++ sizeof(info->heap_usage),
++ &info->heap_usage);
++ else
++ r = amdgpu_query_info(dev, AMDGPU_INFO_VRAM_USAGE,
++ sizeof(info->heap_usage),
++ &info->heap_usage);
++ if (r)
++ return r;
++ break;
++ case AMDGPU_GEM_DOMAIN_GTT:
++ info->heap_size = vram_gtt_info.gtt_size;
++ info->max_allocation = vram_gtt_info.vram_cpu_accessible_size;
++
++ r = amdgpu_query_info(dev, AMDGPU_INFO_GTT_USAGE,
++ sizeof(info->heap_usage),
++ &info->heap_usage);
++ if (r)
++ return r;
++ break;
++ default:
++ return -EINVAL;
++ }
++
++ return 0;
++}
+diff --git a/amdgpu/amdgpu_internal.h b/amdgpu/amdgpu_internal.h
+new file mode 100644
+index 0000000..b27756d
+--- /dev/null
++++ b/amdgpu/amdgpu_internal.h
+@@ -0,0 +1,210 @@
++/*
++ * Copyright © 2014 Advanced Micro Devices, Inc.
++ * All Rights Reserved.
++ *
++ * Permission is hereby granted, free of charge, to any person obtaining a
++ * copy of this software and associated documentation files (the "Software"),
++ * to deal in the Software without restriction, including without limitation
++ * the rights to use, copy, modify, merge, publish, distribute, sublicense,
++ * and/or sell copies of the Software, and to permit persons to whom the
++ * Software is furnished to do so, subject to the following conditions:
++ *
++ * The above copyright notice and this permission notice shall be included in
++ * all copies or substantial portions of the Software.
++ *
++ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
++ * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
++ * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
++ * THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR
++ * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE,
++ * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
++ * OTHER DEALINGS IN THE SOFTWARE.
++ */
++
++#ifndef _amdgpu_internal_h_
++#define _amdgpu_internal_h_
++
++#ifdef HAVE_CONFIG_H
++#include "config.h"
++#endif
++
++#include <assert.h>
++#include <pthread.h>
++#include "xf86atomic.h"
++#include "amdgpu.h"
++#include "util_double_list.h"
++
++#define ARRAY_SIZE(a) (sizeof(a) / sizeof((a)[0]))
++
++#define AMDGPU_CS_MAX_RINGS 8
++
++struct amdgpu_bo_va_hole {
++ struct list_head list;
++ uint64_t offset;
++ uint64_t size;
++};
++
++struct amdgpu_bo_va_mgr {
++ /* the start virtual address */
++ uint64_t va_offset;
++ struct list_head va_holes;
++ pthread_mutex_t bo_va_mutex;
++ uint32_t va_alignment;
++};
++
++struct amdgpu_device {
++ atomic_t refcount;
++ int fd;
++ int flink_fd;
++ unsigned major_version;
++ unsigned minor_version;
++
++ /** List of buffer handles. Protected by bo_table_mutex. */
++ struct util_hash_table *bo_handles;
++ /** List of buffer GEM flink names. Protected by bo_table_mutex. */
++ struct util_hash_table *bo_flink_names;
++ /** List of buffer virtual memory ranges. Protected by bo_table_mutex. */
++ struct util_hash_table *bo_vas;
++ /** This protects all hash tables. */
++ pthread_mutex_t bo_table_mutex;
++ struct amdgpu_bo_va_mgr vamgr;
++ struct drm_amdgpu_info_device dev_info;
++ struct amdgpu_gpu_info info;
++};
++
++struct amdgpu_bo {
++ atomic_t refcount;
++ struct amdgpu_device *dev;
++
++ uint64_t alloc_size;
++ uint64_t virtual_mc_base_address;
++
++ uint32_t handle;
++ uint32_t flink_name;
++
++ pthread_mutex_t cpu_access_mutex;
++ void *cpu_ptr;
++ int cpu_map_count;
++};
++
++/*
++ * There are three mutexes.
++ * To avoid deadlock, only hold the mutexes in this order:
++ * sequence_mutex -> pendings_mutex -> pool_mutex.
++*/
++struct amdgpu_context {
++ /** Mutex for accessing fences and to maintain command submissions
++ and pending lists in good sequence. */
++ pthread_mutex_t sequence_mutex;
++ /** Buffer for user fences */
++ struct amdgpu_ib *fence_ib;
++ /** The newest expired fence for the ring of the ip blocks. */
++ uint64_t expired_fences[AMDGPU_HW_IP_NUM][AMDGPU_HW_IP_INSTANCE_MAX_COUNT][AMDGPU_CS_MAX_RINGS];
++ /** Mutex for accessing pendings list. */
++ pthread_mutex_t pendings_mutex;
++ /** Pending IBs. */
++ struct list_head pendings[AMDGPU_HW_IP_NUM][AMDGPU_HW_IP_INSTANCE_MAX_COUNT][AMDGPU_CS_MAX_RINGS];
++ /** Freed IBs not yet in pool */
++ struct list_head freed;
++ /** Mutex for accessing free ib pool. */
++ pthread_mutex_t pool_mutex;
++ /** Internal free IB pools. */
++ struct list_head ib_pools[AMDGPU_CS_IB_SIZE_NUM];
++ /* context id*/
++ uint32_t id;
++};
++
++struct amdgpu_ib {
++ struct list_head list_node;
++ amdgpu_bo_handle buf_handle;
++ void *cpu;
++ uint64_t virtual_mc_base_address;
++ enum amdgpu_cs_ib_size ib_size;
++ uint64_t cs_handle;
++};
++
++/**
++ * Functions.
++ */
++
++void amdgpu_device_free_internal(amdgpu_device_handle dev);
++
++void amdgpu_bo_free_internal(amdgpu_bo_handle bo);
++
++void amdgpu_vamgr_init(struct amdgpu_device *dev);
++
++uint64_t amdgpu_vamgr_find_va(struct amdgpu_bo_va_mgr *mgr,
++ uint64_t size, uint64_t alignment);
++
++void amdgpu_vamgr_free_va(struct amdgpu_bo_va_mgr *mgr, uint64_t va,
++ uint64_t size);
++
++int amdgpu_query_gpu_info_init(amdgpu_device_handle dev);
++
++uint64_t amdgpu_cs_calculate_timeout(uint64_t timeout);
++
++/**
++ * Inline functions.
++ */
++
++/**
++ * Increment src and decrement dst as if we were updating references
++ * for an assignment between 2 pointers of some objects.
++ *
++ * \return true if dst is 0
++ */
++static inline bool update_references(atomic_t *dst, atomic_t *src)
++{
++ if (dst != src) {
++ /* bump src first */
++ if (src) {
++ assert(atomic_read(src) > 0);
++ atomic_inc(src);
++ }
++ if (dst) {
++ assert(atomic_read(dst) > 0);
++ return atomic_dec_and_test(dst);
++ }
++ }
++ return false;
++}
++
++/**
++ * Assignment between two amdgpu_bo pointers with reference counting.
++ *
++ * Usage:
++ * struct amdgpu_bo *dst = ... , *src = ...;
++ *
++ * dst = src;
++ * // No reference counting. Only use this when you need to move
++ * // a reference from one pointer to another.
++ *
++ * amdgpu_bo_reference(&dst, src);
++ * // Reference counters are updated. dst is decremented and src is
++ * // incremented. dst is freed if its reference counter is 0.
++ */
++static inline void amdgpu_bo_reference(struct amdgpu_bo **dst,
++ struct amdgpu_bo *src)
++{
++ if (update_references(&(*dst)->refcount, &src->refcount))
++ amdgpu_bo_free_internal(*dst);
++ *dst = src;
++}
++
++/**
++ * Assignment between two amdgpu_device pointers with reference counting.
++ *
++ * Usage:
++ * struct amdgpu_device *dst = ... , *src = ...;
++ *
++ * dst = src;
++ * // No reference counting. Only use this when you need to move
++ * // a reference from one pointer to another.
++ *
++ * amdgpu_device_reference(&dst, src);
++ * // Reference counters are updated. dst is decremented and src is
++ * // incremented. dst is freed if its reference counter is 0.
++ */
++void amdgpu_device_reference(struct amdgpu_device **dst,
++ struct amdgpu_device *src);
++#endif
+diff --git a/amdgpu/amdgpu_vamgr.c b/amdgpu/amdgpu_vamgr.c
+new file mode 100644
+index 0000000..2335912
+--- /dev/null
++++ b/amdgpu/amdgpu_vamgr.c
+@@ -0,0 +1,169 @@
++/*
++ * Copyright 2014 Advanced Micro Devices, Inc.
++ *
++ * Permission is hereby granted, free of charge, to any person obtaining a
++ * copy of this software and associated documentation files (the "Software"),
++ * to deal in the Software without restriction, including without limitation
++ * the rights to use, copy, modify, merge, publish, distribute, sublicense,
++ * and/or sell copies of the Software, and to permit persons to whom the
++ * Software is furnished to do so, subject to the following conditions:
++ *
++ * The above copyright notice and this permission notice shall be included in
++ * all copies or substantial portions of the Software.
++ *
++ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
++ * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
++ * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
++ * THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR
++ * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE,
++ * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
++ * OTHER DEALINGS IN THE SOFTWARE.
++ *
++*/
++
++#include <stdlib.h>
++#include <string.h>
++#include "amdgpu.h"
++#include "amdgpu_drm.h"
++#include "amdgpu_internal.h"
++#include "util_math.h"
++
++void amdgpu_vamgr_init(struct amdgpu_device *dev)
++{
++ struct amdgpu_bo_va_mgr *vamgr = &dev->vamgr;
++
++ vamgr->va_offset = dev->dev_info.virtual_address_offset;
++ vamgr->va_alignment = dev->dev_info.virtual_address_alignment;
++
++ list_inithead(&vamgr->va_holes);
++ pthread_mutex_init(&vamgr->bo_va_mutex, NULL);
++}
++
++uint64_t amdgpu_vamgr_find_va(struct amdgpu_bo_va_mgr *mgr,
++ uint64_t size, uint64_t alignment)
++{
++ struct amdgpu_bo_va_hole *hole, *n;
++ uint64_t offset = 0, waste = 0;
++
++ alignment = MAX2(alignment, mgr->va_alignment);
++ size = ALIGN(size, mgr->va_alignment);
++
++ pthread_mutex_lock(&mgr->bo_va_mutex);
++ /* TODO: using more appropriate way to track the holes */
++ /* first look for a hole */
++ LIST_FOR_EACH_ENTRY_SAFE(hole, n, &mgr->va_holes, list) {
++ offset = hole->offset;
++ waste = offset % alignment;
++ waste = waste ? alignment - waste : 0;
++ offset += waste;
++ if (offset >= (hole->offset + hole->size)) {
++ continue;
++ }
++ if (!waste && hole->size == size) {
++ offset = hole->offset;
++ list_del(&hole->list);
++ free(hole);
++ pthread_mutex_unlock(&mgr->bo_va_mutex);
++ return offset;
++ }
++ if ((hole->size - waste) > size) {
++ if (waste) {
++ n = calloc(1,
++ sizeof(struct amdgpu_bo_va_hole));
++ n->size = waste;
++ n->offset = hole->offset;
++ list_add(&n->list, &hole->list);
++ }
++ hole->size -= (size + waste);
++ hole->offset += size + waste;
++ pthread_mutex_unlock(&mgr->bo_va_mutex);
++ return offset;
++ }
++ if ((hole->size - waste) == size) {
++ hole->size = waste;
++ pthread_mutex_unlock(&mgr->bo_va_mutex);
++ return offset;
++ }
++ }
++
++ offset = mgr->va_offset;
++ waste = offset % alignment;
++ waste = waste ? alignment - waste : 0;
++ if (waste) {
++ n = calloc(1, sizeof(struct amdgpu_bo_va_hole));
++ n->size = waste;
++ n->offset = offset;
++ list_add(&n->list, &mgr->va_holes);
++ }
++ offset += waste;
++ mgr->va_offset += size + waste;
++ pthread_mutex_unlock(&mgr->bo_va_mutex);
++ return offset;
++}
++
++void amdgpu_vamgr_free_va(struct amdgpu_bo_va_mgr *mgr, uint64_t va,
++ uint64_t size)
++{
++ struct amdgpu_bo_va_hole *hole;
++
++ size = ALIGN(size, mgr->va_alignment);
++
++ pthread_mutex_lock(&mgr->bo_va_mutex);
++ if ((va + size) == mgr->va_offset) {
++ mgr->va_offset = va;
++ /* Delete uppermost hole if it reaches the new top */
++ if (!LIST_IS_EMPTY(&mgr->va_holes)) {
++ hole = container_of(mgr->va_holes.next, hole, list);
++ if ((hole->offset + hole->size) == va) {
++ mgr->va_offset = hole->offset;
++ list_del(&hole->list);
++ free(hole);
++ }
++ }
++ } else {
++ struct amdgpu_bo_va_hole *next;
++
++ hole = container_of(&mgr->va_holes, hole, list);
++ LIST_FOR_EACH_ENTRY(next, &mgr->va_holes, list) {
++ if (next->offset < va)
++ break;
++ hole = next;
++ }
++
++ if (&hole->list != &mgr->va_holes) {
++ /* Grow upper hole if it's adjacent */
++ if (hole->offset == (va + size)) {
++ hole->offset = va;
++ hole->size += size;
++ /* Merge lower hole if it's adjacent */
++ if (next != hole
++ && &next->list != &mgr->va_holes
++ && (next->offset + next->size) == va) {
++ next->size += hole->size;
++ list_del(&hole->list);
++ free(hole);
++ }
++ goto out;
++ }
++ }
++
++ /* Grow lower hole if it's adjacent */
++ if (next != hole && &next->list != &mgr->va_holes &&
++ (next->offset + next->size) == va) {
++ next->size += size;
++ goto out;
++ }
++
++ /* FIXME on allocation failure we just lose virtual address space
++ * maybe print a warning
++ */
++ next = calloc(1, sizeof(struct amdgpu_bo_va_hole));
++ if (next) {
++ next->size = size;
++ next->offset = va;
++ list_add(&next->list, &hole->list);
++ }
++ }
++out:
++ pthread_mutex_unlock(&mgr->bo_va_mutex);
++}
+diff --git a/amdgpu/libdrm_amdgpu.pc.in b/amdgpu/libdrm_amdgpu.pc.in
+new file mode 100644
+index 0000000..417865e
+--- /dev/null
++++ b/amdgpu/libdrm_amdgpu.pc.in
+@@ -0,0 +1,10 @@
++prefix=@prefix@
++exec_prefix=@exec_prefix@
++libdir=@libdir@
++includedir=@includedir@
++
++Name: libdrm_amdgpu
++Description: Userspace interface to kernel DRM services for amdgpu
++Version: @PACKAGE_VERSION@
++Libs: -L${libdir} -ldrm_amdgpu
++Cflags: -I${includedir} -I${includedir}/libdrm
+diff --git a/amdgpu/util_double_list.h b/amdgpu/util_double_list.h
+new file mode 100644
+index 0000000..3f48ae2
+--- /dev/null
++++ b/amdgpu/util_double_list.h
+@@ -0,0 +1,146 @@
++/**************************************************************************
++ *
++ * Copyright 2006 VMware, Inc., Bismarck, ND. USA.
++ * All Rights Reserved.
++ *
++ * Permission is hereby granted, free of charge, to any person obtaining a
++ * copy of this software and associated documentation files (the
++ * "Software"), to deal in the Software without restriction, including
++ * without limitation the rights to use, copy, modify, merge, publish,
++ * distribute, sub license, and/or sell copies of the Software, and to
++ * permit persons to whom the Software is furnished to do so, subject to
++ * the following conditions:
++ *
++ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
++ * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
++ * FITNESS FOR A PARTICULAR PURPOSE AND NON-INFRINGEMENT. IN NO EVENT SHALL
++ * THE COPYRIGHT HOLDERS, AUTHORS AND/OR ITS SUPPLIERS BE LIABLE FOR ANY CLAIM,
++ * DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR
++ * OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE
++ * USE OR OTHER DEALINGS IN THE SOFTWARE.
++ *
++ * The above copyright notice and this permission notice (including the
++ * next paragraph) shall be included in all copies or substantial portions
++ * of the Software.
++ *
++ **************************************************************************/
++
++/**
++ * \file
++ * List macros heavily inspired by the Linux kernel
++ * list handling. No list looping yet.
++ *
++ * Is not threadsafe, so common operations need to
++ * be protected using an external mutex.
++ */
++
++#ifndef _U_DOUBLE_LIST_H_
++#define _U_DOUBLE_LIST_H_
++
++
++#include <stddef.h>
++
++
++struct list_head
++{
++ struct list_head *prev;
++ struct list_head *next;
++};
++
++static inline void list_inithead(struct list_head *item)
++{
++ item->prev = item;
++ item->next = item;
++}
++
++static inline void list_add(struct list_head *item, struct list_head *list)
++{
++ item->prev = list;
++ item->next = list->next;
++ list->next->prev = item;
++ list->next = item;
++}
++
++static inline void list_addtail(struct list_head *item, struct list_head *list)
++{
++ item->next = list;
++ item->prev = list->prev;
++ list->prev->next = item;
++ list->prev = item;
++}
++
++static inline void list_replace(struct list_head *from, struct list_head *to)
++{
++ to->prev = from->prev;
++ to->next = from->next;
++ from->next->prev = to;
++ from->prev->next = to;
++}
++
++static inline void list_del(struct list_head *item)
++{
++ item->prev->next = item->next;
++ item->next->prev = item->prev;
++ item->prev = item->next = NULL;
++}
++
++static inline void list_delinit(struct list_head *item)
++{
++ item->prev->next = item->next;
++ item->next->prev = item->prev;
++ item->next = item;
++ item->prev = item;
++}
++
++#define LIST_INITHEAD(__item) list_inithead(__item)
++#define LIST_ADD(__item, __list) list_add(__item, __list)
++#define LIST_ADDTAIL(__item, __list) list_addtail(__item, __list)
++#define LIST_REPLACE(__from, __to) list_replace(__from, __to)
++#define LIST_DEL(__item) list_del(__item)
++#define LIST_DELINIT(__item) list_delinit(__item)
++
++#define LIST_ENTRY(__type, __item, __field) \
++ ((__type *)(((char *)(__item)) - offsetof(__type, __field)))
++
++#define LIST_IS_EMPTY(__list) \
++ ((__list)->next == (__list))
++
++/**
++ * Cast from a pointer to a member of a struct back to the containing struct.
++ *
++ * 'sample' MUST be initialized, or else the result is undefined!
++ */
++#ifndef container_of
++#define container_of(ptr, sample, member) \
++ (void *)((char *)(ptr) \
++ - ((char *)&(sample)->member - (char *)(sample)))
++#endif
++
++#define LIST_FOR_EACH_ENTRY(pos, head, member) \
++ for (pos = NULL, pos = container_of((head)->next, pos, member); \
++ &pos->member != (head); \
++ pos = container_of(pos->member.next, pos, member))
++
++#define LIST_FOR_EACH_ENTRY_SAFE(pos, storage, head, member) \
++ for (pos = NULL, pos = container_of((head)->next, pos, member), \
++ storage = container_of(pos->member.next, pos, member); \
++ &pos->member != (head); \
++ pos = storage, storage = container_of(storage->member.next, storage, member))
++
++#define LIST_FOR_EACH_ENTRY_SAFE_REV(pos, storage, head, member) \
++ for (pos = NULL, pos = container_of((head)->prev, pos, member), \
++ storage = container_of(pos->member.prev, pos, member); \
++ &pos->member != (head); \
++ pos = storage, storage = container_of(storage->member.prev, storage, member))
++
++#define LIST_FOR_EACH_ENTRY_FROM(pos, start, head, member) \
++ for (pos = NULL, pos = container_of((start), pos, member); \
++ &pos->member != (head); \
++ pos = container_of(pos->member.next, pos, member))
++
++#define LIST_FOR_EACH_ENTRY_FROM_REV(pos, start, head, member) \
++ for (pos = NULL, pos = container_of((start), pos, member); \
++ &pos->member != (head); \
++ pos = container_of(pos->member.prev, pos, member))
++
++#endif /*_U_DOUBLE_LIST_H_*/
+diff --git a/amdgpu/util_hash.c b/amdgpu/util_hash.c
+new file mode 100644
+index 0000000..b1e12c4
+--- /dev/null
++++ b/amdgpu/util_hash.c
+@@ -0,0 +1,382 @@
++/**************************************************************************
++ *
++ * Copyright 2007 VMware, Inc.
++ * All Rights Reserved.
++ *
++ * Permission is hereby granted, free of charge, to any person obtaining a
++ * copy of this software and associated documentation files (the
++ * "Software"), to deal in the Software without restriction, including
++ * without limitation the rights to use, copy, modify, merge, publish,
++ * distribute, sub license, and/or sell copies of the Software, and to
++ * permit persons to whom the Software is furnished to do so, subject to
++ * the following conditions:
++ *
++ * The above copyright notice and this permission notice (including the
++ * next paragraph) shall be included in all copies or substantial portions
++ * of the Software.
++ *
++ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS
++ * OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
++ * MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND NON-INFRINGEMENT.
++ * IN NO EVENT SHALL VMWARE AND/OR ITS SUPPLIERS BE LIABLE FOR
++ * ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT,
++ * TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION WITH THE
++ * SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE.
++ *
++ **************************************************************************/
++
++ /*
++ * Authors:
++ * Zack Rusin <zackr@vmware.com>
++ */
++
++#include "util_hash.h"
++
++#include <stdlib.h>
++#include <assert.h>
++
++#define MAX(a, b) ((a > b) ? (a) : (b))
++
++static const int MinNumBits = 4;
++
++static const unsigned char prime_deltas[] = {
++ 0, 0, 1, 3, 1, 5, 3, 3, 1, 9, 7, 5, 3, 9, 25, 3,
++ 1, 21, 3, 21, 7, 15, 9, 5, 3, 29, 15, 0, 0, 0, 0, 0
++};
++
++static int primeForNumBits(int numBits)
++{
++ return (1 << numBits) + prime_deltas[numBits];
++}
++
++/* Returns the smallest integer n such that
++ primeForNumBits(n) >= hint.
++*/
++static int countBits(int hint)
++{
++ int numBits = 0;
++ int bits = hint;
++
++ while (bits > 1) {
++ bits >>= 1;
++ numBits++;
++ }
++
++ if (numBits >= (int)sizeof(prime_deltas)) {
++ numBits = sizeof(prime_deltas) - 1;
++ } else if (primeForNumBits(numBits) < hint) {
++ ++numBits;
++ }
++ return numBits;
++}
++
++struct util_node {
++ struct util_node *next;
++ unsigned key;
++ void *value;
++};
++
++struct util_hash_data {
++ struct util_node *fakeNext;
++ struct util_node **buckets;
++ int size;
++ int nodeSize;
++ short userNumBits;
++ short numBits;
++ int numBuckets;
++};
++
++struct util_hash {
++ union {
++ struct util_hash_data *d;
++ struct util_node *e;
++ } data;
++};
++
++static void *util_data_allocate_node(struct util_hash_data *hash)
++{
++ return malloc(hash->nodeSize);
++}
++
++static void util_free_node(struct util_node *node)
++{
++ free(node);
++}
++
++static struct util_node *
++util_hash_create_node(struct util_hash *hash,
++ unsigned akey, void *avalue,
++ struct util_node **anextNode)
++{
++ struct util_node *node = util_data_allocate_node(hash->data.d);
++
++ if (!node)
++ return NULL;
++
++ node->key = akey;
++ node->value = avalue;
++
++ node->next = (struct util_node*)(*anextNode);
++ *anextNode = node;
++ ++hash->data.d->size;
++ return node;
++}
++
++static void util_data_rehash(struct util_hash_data *hash, int hint)
++{
++ if (hint < 0) {
++ hint = countBits(-hint);
++ if (hint < MinNumBits)
++ hint = MinNumBits;
++ hash->userNumBits = (short)hint;
++ while (primeForNumBits(hint) < (hash->size >> 1))
++ ++hint;
++ } else if (hint < MinNumBits) {
++ hint = MinNumBits;
++ }
++
++ if (hash->numBits != hint) {
++ struct util_node *e = (struct util_node *)(hash);
++ struct util_node **oldBuckets = hash->buckets;
++ int oldNumBuckets = hash->numBuckets;
++ int i = 0;
++
++ hash->numBits = (short)hint;
++ hash->numBuckets = primeForNumBits(hint);
++ hash->buckets = malloc(sizeof(struct util_node*) * hash->numBuckets);
++ for (i = 0; i < hash->numBuckets; ++i)
++ hash->buckets[i] = e;
++
++ for (i = 0; i < oldNumBuckets; ++i) {
++ struct util_node *firstNode = oldBuckets[i];
++ while (firstNode != e) {
++ unsigned h = firstNode->key;
++ struct util_node *lastNode = firstNode;
++ struct util_node *afterLastNode;
++ struct util_node **beforeFirstNode;
++
++ while (lastNode->next != e && lastNode->next->key == h)
++ lastNode = lastNode->next;
++
++ afterLastNode = lastNode->next;
++ beforeFirstNode = &hash->buckets[h % hash->numBuckets];
++ while (*beforeFirstNode != e)
++ beforeFirstNode = &(*beforeFirstNode)->next;
++ lastNode->next = *beforeFirstNode;
++ *beforeFirstNode = firstNode;
++ firstNode = afterLastNode;
++ }
++ }
++ free(oldBuckets);
++ }
++}
++
++static void util_data_might_grow(struct util_hash_data *hash)
++{
++ if (hash->size >= hash->numBuckets)
++ util_data_rehash(hash, hash->numBits + 1);
++}
++
++static void util_data_has_shrunk(struct util_hash_data *hash)
++{
++ if (hash->size <= (hash->numBuckets >> 3) &&
++ hash->numBits > hash->userNumBits) {
++ int max = MAX(hash->numBits-2, hash->userNumBits);
++ util_data_rehash(hash, max);
++ }
++}
++
++static struct util_node *util_data_first_node(struct util_hash_data *hash)
++{
++ struct util_node *e = (struct util_node *)(hash);
++ struct util_node **bucket = hash->buckets;
++ int n = hash->numBuckets;
++ while (n--) {
++ if (*bucket != e)
++ return *bucket;
++ ++bucket;
++ }
++ return e;
++}
++
++static struct util_node **util_hash_find_node(struct util_hash *hash, unsigned akey)
++{
++ struct util_node **node;
++
++ if (hash->data.d->numBuckets) {
++ node = (struct util_node **)(&hash->data.d->buckets[akey % hash->data.d->numBuckets]);
++ assert(*node == hash->data.e || (*node)->next);
++ while (*node != hash->data.e && (*node)->key != akey)
++ node = &(*node)->next;
++ } else {
++ node = (struct util_node **)((const struct util_node * const *)(&hash->data.e));
++ }
++ return node;
++}
++
++struct util_hash_iter util_hash_insert(struct util_hash *hash,
++ unsigned key, void *data)
++{
++ util_data_might_grow(hash->data.d);
++
++ {
++ struct util_node **nextNode = util_hash_find_node(hash, key);
++ struct util_node *node = util_hash_create_node(hash, key, data, nextNode);
++ if (!node) {
++ struct util_hash_iter null_iter = {hash, 0};
++ return null_iter;
++ }
++
++ {
++ struct util_hash_iter iter = {hash, node};
++ return iter;
++ }
++ }
++}
++
++struct util_hash * util_hash_create(void)
++{
++ struct util_hash *hash = malloc(sizeof(struct util_hash));
++ if (!hash)
++ return NULL;
++
++ hash->data.d = malloc(sizeof(struct util_hash_data));
++ if (!hash->data.d) {
++ free(hash);
++ return NULL;
++ }
++
++ hash->data.d->fakeNext = 0;
++ hash->data.d->buckets = 0;
++ hash->data.d->size = 0;
++ hash->data.d->nodeSize = sizeof(struct util_node);
++ hash->data.d->userNumBits = (short)MinNumBits;
++ hash->data.d->numBits = 0;
++ hash->data.d->numBuckets = 0;
++
++ return hash;
++}
++
++void util_hash_delete(struct util_hash *hash)
++{
++ struct util_node *e_for_x = (struct util_node *)(hash->data.d);
++ struct util_node **bucket = (struct util_node **)(hash->data.d->buckets);
++ int n = hash->data.d->numBuckets;
++ while (n--) {
++ struct util_node *cur = *bucket++;
++ while (cur != e_for_x) {
++ struct util_node *next = cur->next;
++ util_free_node(cur);
++ cur = next;
++ }
++ }
++ free(hash->data.d->buckets);
++ free(hash->data.d);
++ free(hash);
++}
++
++struct util_hash_iter util_hash_find(struct util_hash *hash,
++ unsigned key)
++{
++ struct util_node **nextNode = util_hash_find_node(hash, key);
++ struct util_hash_iter iter = {hash, *nextNode};
++ return iter;
++}
++
++unsigned util_hash_iter_key(struct util_hash_iter iter)
++{
++ if (!iter.node || iter.hash->data.e == iter.node)
++ return 0;
++ return iter.node->key;
++}
++
++void * util_hash_iter_data(struct util_hash_iter iter)
++{
++ if (!iter.node || iter.hash->data.e == iter.node)
++ return 0;
++ return iter.node->value;
++}
++
++static struct util_node *util_hash_data_next(struct util_node *node)
++{
++ union {
++ struct util_node *next;
++ struct util_node *e;
++ struct util_hash_data *d;
++ } a;
++ int start;
++ struct util_node **bucket;
++ int n;
++
++ a.next = node->next;
++ if (!a.next) {
++ /* iterating beyond the last element */
++ return 0;
++ }
++ if (a.next->next)
++ return a.next;
++
++ start = (node->key % a.d->numBuckets) + 1;
++ bucket = a.d->buckets + start;
++ n = a.d->numBuckets - start;
++ while (n--) {
++ if (*bucket != a.e)
++ return *bucket;
++ ++bucket;
++ }
++ return a.e;
++}
++
++struct util_hash_iter util_hash_iter_next(struct util_hash_iter iter)
++{
++ struct util_hash_iter next = {iter.hash, util_hash_data_next(iter.node)};
++ return next;
++}
++
++int util_hash_iter_is_null(struct util_hash_iter iter)
++{
++ if (!iter.node || iter.node == iter.hash->data.e)
++ return 1;
++ return 0;
++}
++
++void * util_hash_take(struct util_hash *hash,
++ unsigned akey)
++{
++ struct util_node **node = util_hash_find_node(hash, akey);
++ if (*node != hash->data.e) {
++ void *t = (*node)->value;
++ struct util_node *next = (*node)->next;
++ util_free_node(*node);
++ *node = next;
++ --hash->data.d->size;
++ util_data_has_shrunk(hash->data.d);
++ return t;
++ }
++ return 0;
++}
++
++struct util_hash_iter util_hash_first_node(struct util_hash *hash)
++{
++ struct util_hash_iter iter = {hash, util_data_first_node(hash->data.d)};
++ return iter;
++}
++
++struct util_hash_iter util_hash_erase(struct util_hash *hash, struct util_hash_iter iter)
++{
++ struct util_hash_iter ret = iter;
++ struct util_node *node = iter.node;
++ struct util_node **node_ptr;
++
++ if (node == hash->data.e)
++ return iter;
++
++ ret = util_hash_iter_next(ret);
++ node_ptr = (struct util_node**)(&hash->data.d->buckets[node->key % hash->data.d->numBuckets]);
++ while (*node_ptr != node)
++ node_ptr = &(*node_ptr)->next;
++ *node_ptr = node->next;
++ util_free_node(node);
++ --hash->data.d->size;
++ return ret;
++}
+diff --git a/amdgpu/util_hash.h b/amdgpu/util_hash.h
+new file mode 100644
+index 0000000..8e0f9a2
+--- /dev/null
++++ b/amdgpu/util_hash.h
+@@ -0,0 +1,99 @@
++/**************************************************************************
++ *
++ * Copyright 2007 VMware, Inc.
++ * All Rights Reserved.
++ *
++ * Permission is hereby granted, free of charge, to any person obtaining a
++ * copy of this software and associated documentation files (the
++ * "Software"), to deal in the Software without restriction, including
++ * without limitation the rights to use, copy, modify, merge, publish,
++ * distribute, sub license, and/or sell copies of the Software, and to
++ * permit persons to whom the Software is furnished to do so, subject to
++ * the following conditions:
++ *
++ * The above copyright notice and this permission notice (including the
++ * next paragraph) shall be included in all copies or substantial portions
++ * of the Software.
++ *
++ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS
++ * OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
++ * MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND NON-INFRINGEMENT.
++ * IN NO EVENT SHALL VMWARE AND/OR ITS SUPPLIERS BE LIABLE FOR
++ * ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT,
++ * TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION WITH THE
++ * SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE.
++ *
++ **************************************************************************/
++
++/**
++ * @file
++ * Hash implementation.
++ *
++ * This file provides a hash implementation that is capable of dealing
++ * with collisions. It stores colliding entries in linked list. All
++ * functions operating on the hash return an iterator. The iterator
++ * itself points to the collision list. If there wasn't any collision
++ * the list will have just one entry, otherwise client code should
++ * iterate over the entries to find the exact entry among ones that
++ * had the same key (e.g. memcmp could be used on the data to check
++ * that)
++ *
++ * @author Zack Rusin <zackr@vmware.com>
++ */
++
++#ifndef UTIL_HASH_H
++#define UTIL_HASH_H
++
++#include <stdbool.h>
++
++struct util_hash;
++struct util_node;
++
++struct util_hash_iter {
++ struct util_hash *hash;
++ struct util_node *node;
++};
++
++
++struct util_hash *util_hash_create(void);
++void util_hash_delete(struct util_hash *hash);
++
++
++/**
++ * Adds a data with the given key to the hash. If entry with the given
++ * key is already in the hash, this current entry is instered before it
++ * in the collision list.
++ * Function returns iterator pointing to the inserted item in the hash.
++ */
++struct util_hash_iter util_hash_insert(struct util_hash *hash, unsigned key,
++ void *data);
++
++/**
++ * Removes the item pointed to by the current iterator from the hash.
++ * Note that the data itself is not erased and if it was a malloc'ed pointer
++ * it will have to be freed after calling this function by the callee.
++ * Function returns iterator pointing to the item after the removed one in
++ * the hash.
++ */
++struct util_hash_iter util_hash_erase(struct util_hash *hash,
++ struct util_hash_iter iter);
++
++void *util_hash_take(struct util_hash *hash, unsigned key);
++
++
++struct util_hash_iter util_hash_first_node(struct util_hash *hash);
++
++/**
++ * Return an iterator pointing to the first entry in the collision list.
++ */
++struct util_hash_iter util_hash_find(struct util_hash *hash, unsigned key);
++
++
++int util_hash_iter_is_null(struct util_hash_iter iter);
++unsigned util_hash_iter_key(struct util_hash_iter iter);
++void *util_hash_iter_data(struct util_hash_iter iter);
++
++
++struct util_hash_iter util_hash_iter_next(struct util_hash_iter iter);
++
++#endif
+diff --git a/amdgpu/util_hash_table.c b/amdgpu/util_hash_table.c
+new file mode 100644
+index 0000000..cb7213c
+--- /dev/null
++++ b/amdgpu/util_hash_table.c
+@@ -0,0 +1,257 @@
++/**************************************************************************
++ *
++ * Copyright 2008 VMware, Inc.
++ * All Rights Reserved.
++ *
++ * Permission is hereby granted, free of charge, to any person obtaining a
++ * copy of this software and associated documentation files (the
++ * "Software"), to deal in the Software without restriction, including
++ * without limitation the rights to use, copy, modify, merge, publish,
++ * distribute, sub license, and/or sell copies of the Software, and to
++ * permit persons to whom the Software is furnished to do so, subject to
++ * the following conditions:
++ *
++ * The above copyright notice and this permission notice (including the
++ * next paragraph) shall be included in all copies or substantial portions
++ * of the Software.
++ *
++ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS
++ * OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
++ * MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND NON-INFRINGEMENT.
++ * IN NO EVENT SHALL VMWARE AND/OR ITS SUPPLIERS BE LIABLE FOR
++ * ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT,
++ * TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION WITH THE
++ * SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE.
++ *
++ **************************************************************************/
++
++/**
++ * @file
++ * General purpose hash table implementation.
++ *
++ * Just uses the util_hash for now, but it might be better switch to a linear
++ * probing hash table implementation at some point -- as it is said they have
++ * better lookup and cache performance and it appears to be possible to write
++ * a lock-free implementation of such hash tables .
++ *
++ * @author José Fonseca <jfonseca@vmware.com>
++ */
++
++
++
++#include "util_hash_table.h"
++#include "util_hash.h"
++
++#include <stdlib.h>
++#include <assert.h>
++
++struct util_hash_table
++{
++ struct util_hash *head;
++
++ /** Hash function */
++ unsigned (*make_hash)(void *key);
++
++ /** Compare two keys */
++ int (*compare)(void *key1, void *key2);
++};
++
++struct util_hash_table_item
++{
++ void *key;
++ void *value;
++};
++
++
++static struct util_hash_table_item *
++util_hash_table_item(struct util_hash_iter iter)
++{
++ return (struct util_hash_table_item *)util_hash_iter_data(iter);
++}
++
++struct util_hash_table *util_hash_table_create(unsigned (*hash)(void *key),
++ int (*compare)(void *key1, void *key2))
++{
++ struct util_hash_table *ht;
++
++ ht = malloc(sizeof(struct util_hash_table));
++ if(!ht)
++ return NULL;
++
++ ht->head = util_hash_create();
++ if(!ht->head) {
++ free(ht);
++ return NULL;
++ }
++
++ ht->make_hash = hash;
++ ht->compare = compare;
++
++ return ht;
++}
++
++static struct util_hash_iter
++util_hash_table_find_iter(struct util_hash_table *ht,
++ void *key, unsigned key_hash)
++{
++ struct util_hash_iter iter;
++ struct util_hash_table_item *item;
++
++ iter = util_hash_find(ht->head, key_hash);
++ while (!util_hash_iter_is_null(iter)) {
++ item = (struct util_hash_table_item *)util_hash_iter_data(iter);
++ if (!ht->compare(item->key, key))
++ break;
++ iter = util_hash_iter_next(iter);
++ }
++
++ return iter;
++}
++
++static struct util_hash_table_item *
++util_hash_table_find_item(struct util_hash_table *ht,
++ void *key, unsigned key_hash)
++{
++ struct util_hash_iter iter;
++ struct util_hash_table_item *item;
++
++ iter = util_hash_find(ht->head, key_hash);
++ while (!util_hash_iter_is_null(iter)) {
++ item = (struct util_hash_table_item *)util_hash_iter_data(iter);
++ if (!ht->compare(item->key, key))
++ return item;
++ iter = util_hash_iter_next(iter);
++ }
++
++ return NULL;
++}
++
++void util_hash_table_set(struct util_hash_table *ht, void *key, void *value)
++{
++ unsigned key_hash;
++ struct util_hash_table_item *item;
++ struct util_hash_iter iter;
++
++ assert(ht);
++ if (!ht)
++ return;
++
++ key_hash = ht->make_hash(key);
++
++ item = util_hash_table_find_item(ht, key, key_hash);
++ if(item) {
++ /* TODO: key/value destruction? */
++ item->value = value;
++ return;
++ }
++
++ item = malloc(sizeof(struct util_hash_table_item));
++ if(!item)
++ return;
++
++ item->key = key;
++ item->value = value;
++
++ iter = util_hash_insert(ht->head, key_hash, item);
++ if(util_hash_iter_is_null(iter)) {
++ free(item);
++ return;
++ }
++}
++
++void *util_hash_table_get(struct util_hash_table *ht, void *key)
++{
++ unsigned key_hash;
++ struct util_hash_table_item *item;
++
++ assert(ht);
++ if (!ht)
++ return NULL;
++
++ key_hash = ht->make_hash(key);
++
++ item = util_hash_table_find_item(ht, key, key_hash);
++ if(!item)
++ return NULL;
++
++ return item->value;
++}
++
++void util_hash_table_remove(struct util_hash_table *ht, void *key)
++{
++ unsigned key_hash;
++ struct util_hash_iter iter;
++ struct util_hash_table_item *item;
++
++ assert(ht);
++ if (!ht)
++ return;
++
++ key_hash = ht->make_hash(key);
++
++ iter = util_hash_table_find_iter(ht, key, key_hash);
++ if(util_hash_iter_is_null(iter))
++ return;
++
++ item = util_hash_table_item(iter);
++ assert(item);
++ free(item);
++
++ util_hash_erase(ht->head, iter);
++}
++
++void util_hash_table_clear(struct util_hash_table *ht)
++{
++ struct util_hash_iter iter;
++ struct util_hash_table_item *item;
++
++ assert(ht);
++ if (!ht)
++ return;
++
++ iter = util_hash_first_node(ht->head);
++ while (!util_hash_iter_is_null(iter)) {
++ item = (struct util_hash_table_item *)util_hash_take(ht->head, util_hash_iter_key(iter));
++ free(item);
++ iter = util_hash_first_node(ht->head);
++ }
++}
++
++void util_hash_table_foreach(struct util_hash_table *ht,
++ void (*callback)(void *key, void *value, void *data),
++ void *data)
++{
++ struct util_hash_iter iter;
++ struct util_hash_table_item *item;
++
++ assert(ht);
++ if (!ht)
++ return;
++
++ iter = util_hash_first_node(ht->head);
++ while (!util_hash_iter_is_null(iter)) {
++ item = (struct util_hash_table_item *)util_hash_iter_data(iter);
++ callback(item->key, item->value, data);
++ iter = util_hash_iter_next(iter);
++ }
++}
++
++void util_hash_table_destroy(struct util_hash_table *ht)
++{
++ struct util_hash_iter iter;
++ struct util_hash_table_item *item;
++
++ assert(ht);
++ if (!ht)
++ return;
++
++ iter = util_hash_first_node(ht->head);
++ while (!util_hash_iter_is_null(iter)) {
++ item = (struct util_hash_table_item *)util_hash_iter_data(iter);
++ free(item);
++ iter = util_hash_iter_next(iter);
++ }
++
++ util_hash_delete(ht->head);
++ free(ht);
++}
+diff --git a/amdgpu/util_hash_table.h b/amdgpu/util_hash_table.h
+new file mode 100644
+index 0000000..04fe704
+--- /dev/null
++++ b/amdgpu/util_hash_table.h
+@@ -0,0 +1,65 @@
++/**************************************************************************
++ *
++ * Copyright 2008 VMware, Inc.
++ * All Rights Reserved.
++ *
++ * Permission is hereby granted, free of charge, to any person obtaining a
++ * copy of this software and associated documentation files (the
++ * "Software"), to deal in the Software without restriction, including
++ * without limitation the rights to use, copy, modify, merge, publish,
++ * distribute, sub license, and/or sell copies of the Software, and to
++ * permit persons to whom the Software is furnished to do so, subject to
++ * the following conditions:
++ *
++ * The above copyright notice and this permission notice (including the
++ * next paragraph) shall be included in all copies or substantial portions
++ * of the Software.
++ *
++ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS
++ * OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
++ * MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND NON-INFRINGEMENT.
++ * IN NO EVENT SHALL VMWARE AND/OR ITS SUPPLIERS BE LIABLE FOR
++ * ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT,
++ * TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION WITH THE
++ * SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE.
++ *
++ **************************************************************************/
++
++/**
++ * General purpose hash table.
++ *
++ * @author José Fonseca <jfonseca@vmware.com>
++ */
++
++#ifndef U_HASH_TABLE_H_
++#define U_HASH_TABLE_H_
++
++/**
++ * Generic purpose hash table.
++ */
++struct util_hash_table;
++
++/**
++ * Create an hash table.
++ *
++ * @param hash hash function
++ * @param compare should return 0 for two equal keys.
++ */
++struct util_hash_table *util_hash_table_create(unsigned (*hash)(void *key),
++ int (*compare)(void *key1, void *key2));
++
++void util_hash_table_set(struct util_hash_table *ht, void *key, void *value);
++
++void *util_hash_table_get(struct util_hash_table *ht, void *key);
++
++void util_hash_table_remove(struct util_hash_table *ht, void *key);
++
++void util_hash_table_clear(struct util_hash_table *ht);
++
++void util_hash_table_foreach(struct util_hash_table *ht,
++ void (*callback)(void *key, void *value, void *data),
++ void *data);
++
++void util_hash_table_destroy(struct util_hash_table *ht);
++
++#endif /* U_HASH_TABLE_H_ */
+diff --git a/amdgpu/util_math.h b/amdgpu/util_math.h
+new file mode 100644
+index 0000000..b8de0f8
+--- /dev/null
++++ b/amdgpu/util_math.h
+@@ -0,0 +1,32 @@
++/*
++ * Copyright 2014 Advanced Micro Devices, Inc.
++ *
++ * Permission is hereby granted, free of charge, to any person obtaining a
++ * copy of this software and associated documentation files (the "Software"),
++ * to deal in the Software without restriction, including without limitation
++ * the rights to use, copy, modify, merge, publish, distribute, sublicense,
++ * and/or sell copies of the Software, and to permit persons to whom the
++ * Software is furnished to do so, subject to the following conditions:
++ *
++ * The above copyright notice and this permission notice shall be included in
++ * all copies or substantial portions of the Software.
++ *
++ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
++ * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
++ * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
++ * THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR
++ * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE,
++ * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
++ * OTHER DEALINGS IN THE SOFTWARE.
++ *
++*/
++
++#ifndef _UTIL_MATH_H_
++#define _UTIL_MATH_H_
++
++#define MIN2( A, B ) ( (A)<(B) ? (A) : (B) )
++#define MAX2( A, B ) ( (A)>(B) ? (A) : (B) )
++
++#define ALIGN( value, alignment ) ( ((value) + (alignment) - 1) & ~((alignment) - 1) )
++
++#endif /*_UTIL_MATH_H_*/
+diff --git a/configure.ac b/configure.ac
+index 155d577..509f2d4 100644
+--- a/configure.ac
++++ b/configure.ac
+@@ -36,6 +36,7 @@ m4_ifdef([AM_SILENT_RULES], [AM_SILENT_RULES([yes])])
+
+ # Check for programs
+ AC_PROG_CC
++AC_PROG_CXX
+
+ AC_USE_SYSTEM_EXTENSIONS
+ AC_SYS_LARGEFILE
+@@ -74,6 +75,11 @@ AC_ARG_ENABLE(radeon,
+ [Enable support for radeon's KMS API (default: auto)]),
+ [RADEON=$enableval], [RADEON=auto])
+
++AC_ARG_ENABLE(amdgpu,
++ AS_HELP_STRING([--disable-amdgpu],
++ [Enable support for amdgpu's KMS API (default: auto)]),
++ [AMDGPU=$enableval], [AMDGPU=auto])
++
+ AC_ARG_ENABLE(nouveau,
+ AS_HELP_STRING([--disable-nouveau],
+ [Enable support for nouveau's KMS API (default: auto)]),
+@@ -236,6 +242,9 @@ if test "x$drm_cv_atomic_primitives" = "xnone"; then
+ LIBDRM_ATOMICS_NOT_FOUND_MSG($RADEON, radeon, Radeon, radeon)
+ RADEON=no
+
++ LIBDRM_ATOMICS_NOT_FOUND_MSG($AMDGPU, amdgpu, AMD, amdgpu)
++ AMDGPU=no
++
+ LIBDRM_ATOMICS_NOT_FOUND_MSG($NOUVEAU, nouveau, NVIDIA, nouveau)
+ NOUVEAU=no
+
+@@ -257,6 +266,9 @@ else
+ if test "x$RADEON" = xauto; then
+ RADEON=yes
+ fi
++ if test "x$AMDGPU" = xauto; then
++ AMDGPU=yes
++ fi
+ if test "x$NOUVEAU" = xauto; then
+ NOUVEAU=yes
+ fi
+@@ -336,6 +348,11 @@ if test "x$RADEON" = xyes; then
+ AC_DEFINE(HAVE_RADEON, 1, [Have radeon support])
+ fi
+
++AM_CONDITIONAL(HAVE_AMDGPU, [test "x$AMDGPU" = xyes])
++if test "x$AMDGPU" = xyes; then
++ AC_DEFINE(HAVE_AMDGPU, 1, [Have amdgpu support])
++fi
++
+ AM_CONDITIONAL(HAVE_TEGRA, [test "x$TEGRA" = xyes])
+ if test "x$TEGRA" = xyes; then
+ AC_DEFINE(HAVE_TEGRA, 1, [Have Tegra support])
+@@ -432,6 +449,8 @@ AC_CONFIG_FILES([
+ intel/libdrm_intel.pc
+ radeon/Makefile
+ radeon/libdrm_radeon.pc
++ amdgpu/Makefile
++ amdgpu/libdrm_amdgpu.pc
+ nouveau/Makefile
+ nouveau/libdrm_nouveau.pc
+ omap/Makefile
+@@ -463,6 +482,7 @@ echo " libkms $LIBKMS"
+ echo " Intel API $INTEL"
+ echo " vmwgfx API $VMWGFX"
+ echo " Radeon API $RADEON"
++echo " AMDGPU API $AMDGPU"
+ echo " Nouveau API $NOUVEAU"
+ echo " OMAP API $OMAP"
+ echo " EXYNOS API $EXYNOS"
+diff --git a/include/drm/amdgpu_drm.h b/include/drm/amdgpu_drm.h
+new file mode 100644
+index 0000000..d248d77
+--- /dev/null
++++ b/include/drm/amdgpu_drm.h
+@@ -0,0 +1,600 @@
++/* amdgpu_drm.h -- Public header for the amdgpu driver -*- linux-c -*-
++ *
++ * Copyright 2000 Precision Insight, Inc., Cedar Park, Texas.
++ * Copyright 2000 VA Linux Systems, Inc., Fremont, California.
++ * Copyright 2002 Tungsten Graphics, Inc., Cedar Park, Texas.
++ * Copyright 2014 Advanced Micro Devices, Inc.
++ *
++ * Permission is hereby granted, free of charge, to any person obtaining a
++ * copy of this software and associated documentation files (the "Software"),
++ * to deal in the Software without restriction, including without limitation
++ * the rights to use, copy, modify, merge, publish, distribute, sublicense,
++ * and/or sell copies of the Software, and to permit persons to whom the
++ * Software is furnished to do so, subject to the following conditions:
++ *
++ * The above copyright notice and this permission notice shall be included in
++ * all copies or substantial portions of the Software.
++ *
++ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
++ * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
++ * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
++ * THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR
++ * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE,
++ * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
++ * OTHER DEALINGS IN THE SOFTWARE.
++ *
++ * Authors:
++ * Kevin E. Martin <martin@valinux.com>
++ * Gareth Hughes <gareth@valinux.com>
++ * Keith Whitwell <keith@tungstengraphics.com>
++ */
++
++#ifndef __AMDGPU_DRM_H__
++#define __AMDGPU_DRM_H__
++
++#include <drm.h>
++
++#define DRM_AMDGPU_GEM_CREATE 0x00
++#define DRM_AMDGPU_GEM_MMAP 0x01
++#define DRM_AMDGPU_CTX 0x02
++#define DRM_AMDGPU_BO_LIST 0x03
++#define DRM_AMDGPU_CS 0x04
++#define DRM_AMDGPU_INFO 0x05
++#define DRM_AMDGPU_GEM_METADATA 0x06
++#define DRM_AMDGPU_GEM_WAIT_IDLE 0x07
++#define DRM_AMDGPU_GEM_VA 0x08
++#define DRM_AMDGPU_WAIT_CS 0x09
++#define DRM_AMDGPU_GEM_OP 0x10
++#define DRM_AMDGPU_GEM_USERPTR 0x11
++
++#define DRM_IOCTL_AMDGPU_GEM_CREATE DRM_IOWR(DRM_COMMAND_BASE + DRM_AMDGPU_GEM_CREATE, union drm_amdgpu_gem_create)
++#define DRM_IOCTL_AMDGPU_GEM_MMAP DRM_IOWR(DRM_COMMAND_BASE + DRM_AMDGPU_GEM_MMAP, union drm_amdgpu_gem_mmap)
++#define DRM_IOCTL_AMDGPU_CTX DRM_IOWR(DRM_COMMAND_BASE + DRM_AMDGPU_CTX, union drm_amdgpu_ctx)
++#define DRM_IOCTL_AMDGPU_BO_LIST DRM_IOWR(DRM_COMMAND_BASE + DRM_AMDGPU_BO_LIST, union drm_amdgpu_bo_list)
++#define DRM_IOCTL_AMDGPU_CS DRM_IOWR(DRM_COMMAND_BASE + DRM_AMDGPU_CS, union drm_amdgpu_cs)
++#define DRM_IOCTL_AMDGPU_INFO DRM_IOW(DRM_COMMAND_BASE + DRM_AMDGPU_INFO, struct drm_amdgpu_info)
++#define DRM_IOCTL_AMDGPU_GEM_METADATA DRM_IOWR(DRM_COMMAND_BASE + DRM_AMDGPU_GEM_METADATA, struct drm_amdgpu_gem_metadata)
++#define DRM_IOCTL_AMDGPU_GEM_WAIT_IDLE DRM_IOWR(DRM_COMMAND_BASE + DRM_AMDGPU_GEM_WAIT_IDLE, union drm_amdgpu_gem_wait_idle)
++#define DRM_IOCTL_AMDGPU_GEM_VA DRM_IOWR(DRM_COMMAND_BASE + DRM_AMDGPU_GEM_VA, union drm_amdgpu_gem_va)
++#define DRM_IOCTL_AMDGPU_WAIT_CS DRM_IOWR(DRM_COMMAND_BASE + DRM_AMDGPU_WAIT_CS, union drm_amdgpu_wait_cs)
++#define DRM_IOCTL_AMDGPU_GEM_OP DRM_IOWR(DRM_COMMAND_BASE + DRM_AMDGPU_GEM_OP, struct drm_amdgpu_gem_op)
++#define DRM_IOCTL_AMDGPU_GEM_USERPTR DRM_IOWR(DRM_COMMAND_BASE + DRM_AMDGPU_GEM_USERPTR, struct drm_amdgpu_gem_userptr)
++
++#define AMDGPU_GEM_DOMAIN_CPU 0x1
++#define AMDGPU_GEM_DOMAIN_GTT 0x2
++#define AMDGPU_GEM_DOMAIN_VRAM 0x4
++#define AMDGPU_GEM_DOMAIN_GDS 0x8
++#define AMDGPU_GEM_DOMAIN_GWS 0x10
++#define AMDGPU_GEM_DOMAIN_OA 0x20
++
++#define AMDGPU_GEM_DOMAIN_MASK 0x3F
++
++/* Flag that CPU access will be required for the case of VRAM domain */
++#define AMDGPU_GEM_CREATE_CPU_ACCESS_REQUIRED (1 << 0)
++/* Flag that CPU access will not work, this VRAM domain is invisible */
++#define AMDGPU_GEM_CREATE_NO_CPU_ACCESS (1 << 1)
++/* Flag that un-cached attributes should be used for GTT */
++#define AMDGPU_GEM_CREATE_CPU_GTT_UC (1 << 2)
++/* Flag that USWC attributes should be used for GTT */
++#define AMDGPU_GEM_CREATE_CPU_GTT_WC (1 << 3)
++
++/* Flag mask for GTT domain_flags */
++#define AMDGPU_GEM_CREATE_CPU_GTT_MASK \
++ (AMDGPU_GEM_CREATE_CPU_GTT_WC | \
++ AMDGPU_GEM_CREATE_CPU_GTT_UC | \
++ AMDGPU_GEM_CREATE_CPU_ACCESS_REQUIRED | \
++ AMDGPU_GEM_CREATE_NO_CPU_ACCESS)
++
++struct drm_amdgpu_gem_create_in {
++ /** the requested memory size */
++ uint64_t bo_size;
++ /** physical start_addr alignment in bytes for some HW requirements */
++ uint64_t alignment;
++ /** the requested memory domains */
++ uint64_t domains;
++ /** allocation flags */
++ uint64_t domain_flags;
++};
++
++struct drm_amdgpu_gem_create_out {
++ /** returned GEM object handle */
++ uint32_t handle;
++};
++
++union drm_amdgpu_gem_create {
++ struct drm_amdgpu_gem_create_in in;
++ struct drm_amdgpu_gem_create_out out;
++};
++
++/** Opcode to create new residency list. */
++#define AMDGPU_BO_LIST_OP_CREATE 0
++/** Opcode to destroy previously created residency list */
++#define AMDGPU_BO_LIST_OP_DESTROY 1
++/** Opcode to update resource information in the list */
++#define AMDGPU_BO_LIST_OP_UPDATE 2
++
++struct drm_amdgpu_bo_list_in {
++ /** Type of operation */
++ uint32_t operation;
++ /** Handle of list or 0 if we want to create one */
++ uint32_t list_handle;
++ /** Number of BOs in list */
++ uint32_t bo_number;
++ /** Size of each element describing BO */
++ uint32_t bo_info_size;
++ /** Pointer to array describing BOs */
++ uint64_t bo_info_ptr;
++};
++
++struct drm_amdgpu_bo_list_entry {
++ /** Handle of BO */
++ uint32_t bo_handle;
++ /** New (if specified) BO priority to be used during migration */
++ uint32_t bo_priority;
++};
++
++struct drm_amdgpu_bo_list_out {
++ /** Handle of resource list */
++ uint32_t list_handle;
++};
++
++union drm_amdgpu_bo_list {
++ struct drm_amdgpu_bo_list_in in;
++ struct drm_amdgpu_bo_list_out out;
++};
++
++/* context related */
++#define AMDGPU_CTX_OP_ALLOC_CTX 1
++#define AMDGPU_CTX_OP_FREE_CTX 2
++#define AMDGPU_CTX_OP_QUERY_STATE 3
++
++#define AMDGPU_CTX_OP_STATE_RUNNING 1
++
++struct drm_amdgpu_ctx_in {
++ uint32_t op;
++ uint32_t flags;
++ uint32_t ctx_id;
++ uint32_t pad;
++};
++
++union drm_amdgpu_ctx_out {
++ struct {
++ uint32_t ctx_id;
++ } alloc;
++
++ struct {
++ uint64_t flags;
++ uint64_t hangs;
++ } state;
++};
++
++union drm_amdgpu_ctx {
++ struct drm_amdgpu_ctx_in in;
++ union drm_amdgpu_ctx_out out;
++};
++
++/*
++ * This is not a reliable API and you should expect it to fail for any
++ * number of reasons and have fallback path that do not use userptr to
++ * perform any operation.
++ */
++#define AMDGPU_GEM_USERPTR_READONLY (1 << 0)
++#define AMDGPU_GEM_USERPTR_ANONONLY (1 << 1)
++#define AMDGPU_GEM_USERPTR_VALIDATE (1 << 2)
++#define AMDGPU_GEM_USERPTR_REGISTER (1 << 3)
++
++struct drm_amdgpu_gem_userptr {
++ uint64_t addr;
++ uint64_t size;
++ uint32_t flags;
++ uint32_t handle;
++};
++
++#define AMDGPU_TILING_MACRO 0x1
++#define AMDGPU_TILING_MICRO 0x2
++#define AMDGPU_TILING_SWAP_16BIT 0x4
++#define AMDGPU_TILING_R600_NO_SCANOUT AMDGPU_TILING_SWAP_16BIT
++#define AMDGPU_TILING_SWAP_32BIT 0x8
++/* this object requires a surface when mapped - i.e. front buffer */
++#define AMDGPU_TILING_SURFACE 0x10
++#define AMDGPU_TILING_MICRO_SQUARE 0x20
++#define AMDGPU_TILING_EG_BANKW_SHIFT 8
++#define AMDGPU_TILING_EG_BANKW_MASK 0xf
++#define AMDGPU_TILING_EG_BANKH_SHIFT 12
++#define AMDGPU_TILING_EG_BANKH_MASK 0xf
++#define AMDGPU_TILING_EG_MACRO_TILE_ASPECT_SHIFT 16
++#define AMDGPU_TILING_EG_MACRO_TILE_ASPECT_MASK 0xf
++#define AMDGPU_TILING_EG_TILE_SPLIT_SHIFT 24
++#define AMDGPU_TILING_EG_TILE_SPLIT_MASK 0xf
++#define AMDGPU_TILING_EG_STENCIL_TILE_SPLIT_SHIFT 28
++#define AMDGPU_TILING_EG_STENCIL_TILE_SPLIT_MASK 0xf
++
++#define AMDGPU_GEM_METADATA_OP_SET_METADATA 1
++#define AMDGPU_GEM_METADATA_OP_GET_METADATA 2
++
++/** The same structure is shared for input/output */
++struct drm_amdgpu_gem_metadata {
++ uint32_t handle; /* GEM Object handle */
++ uint32_t op; /** Do we want get or set metadata */
++ struct {
++ uint64_t flags;
++ uint64_t tiling_info; /* family specific tiling info */
++ uint32_t data_size_bytes;
++ uint32_t data[64];
++ } data;
++};
++
++struct drm_amdgpu_gem_mmap_in {
++ uint32_t handle; /** the GEM object handle */
++};
++
++struct drm_amdgpu_gem_mmap_out {
++ uint64_t addr_ptr; /** mmap offset from the vma offset manager */
++};
++
++union drm_amdgpu_gem_mmap {
++ struct drm_amdgpu_gem_mmap_in in;
++ struct drm_amdgpu_gem_mmap_out out;
++};
++
++struct drm_amdgpu_gem_wait_idle_in {
++ uint32_t handle; /* GEM object handle */
++ uint32_t flags;
++ uint64_t timeout; /* Timeout to wait. If 0 then returned immediately with the status */
++};
++
++struct drm_amdgpu_gem_wait_idle_out {
++ uint32_t status; /* BO status: 0 - BO is idle, 1 - BO is busy */
++ uint32_t domain; /* Returned current memory domain */
++};
++
++union drm_amdgpu_gem_wait_idle {
++ struct drm_amdgpu_gem_wait_idle_in in;
++ struct drm_amdgpu_gem_wait_idle_out out;
++};
++
++struct drm_amdgpu_wait_cs_in {
++ uint64_t handle;
++ uint64_t timeout;
++ uint32_t ip_type;
++ uint32_t ip_instance;
++ uint32_t ring;
++};
++
++struct drm_amdgpu_wait_cs_out {
++ uint64_t status;
++};
++
++union drm_amdgpu_wait_cs {
++ struct drm_amdgpu_wait_cs_in in;
++ struct drm_amdgpu_wait_cs_out out;
++};
++
++/* Sets or returns a value associated with a buffer. */
++struct drm_amdgpu_gem_op {
++ uint32_t handle; /* buffer */
++ uint32_t op; /* AMDGPU_GEM_OP_* */
++ uint64_t value; /* input or return value */
++};
++
++#define AMDGPU_GEM_OP_GET_GEM_CREATE_INFO 0
++#define AMDGPU_GEM_OP_SET_INITIAL_DOMAIN 1
++
++#define AMDGPU_VA_OP_MAP 1
++#define AMDGPU_VA_OP_UNMAP 2
++
++#define AMDGPU_VA_RESULT_OK 0
++#define AMDGPU_VA_RESULT_ERROR 1
++#define AMDGPU_VA_RESULT_VA_INVALID_ALIGNMENT 2
++
++/* Mapping flags */
++/* readable mapping */
++#define AMDGPU_VM_PAGE_READABLE (1 << 1)
++/* writable mapping */
++#define AMDGPU_VM_PAGE_WRITEABLE (1 << 2)
++/* executable mapping, new for VI */
++#define AMDGPU_VM_PAGE_EXECUTABLE (1 << 3)
++
++struct drm_amdgpu_gem_va_in {
++ /* GEM object handle */
++ uint32_t handle;
++ uint32_t pad;
++ /* map or unmap*/
++ uint32_t operation;
++ /* specify mapping flags */
++ uint32_t flags;
++ /* va address to assign . Must be correctly aligned.*/
++ uint64_t va_address;
++ /* Specify offset inside of BO to assign. Must be correctly aligned.*/
++ uint64_t offset_in_bo;
++ /* Specify mapping size. If 0 and offset is 0 then map the whole BO.*/
++ /* Must be correctly aligned. */
++ uint64_t map_size;
++};
++
++struct drm_amdgpu_gem_va_out {
++ uint32_t result;
++};
++
++union drm_amdgpu_gem_va {
++ struct drm_amdgpu_gem_va_in in;
++ struct drm_amdgpu_gem_va_out out;
++};
++
++#define AMDGPU_HW_IP_GFX 0
++#define AMDGPU_HW_IP_COMPUTE 1
++#define AMDGPU_HW_IP_DMA 2
++#define AMDGPU_HW_IP_UVD 3
++#define AMDGPU_HW_IP_VCE 4
++#define AMDGPU_HW_IP_NUM 5
++
++#define AMDGPU_HW_IP_INSTANCE_MAX_COUNT 1
++
++#define AMDGPU_CHUNK_ID_IB 0x01
++#define AMDGPU_CHUNK_ID_FENCE 0x02
++struct drm_amdgpu_cs_chunk {
++ uint32_t chunk_id;
++ uint32_t length_dw;
++ uint64_t chunk_data;
++};
++
++struct drm_amdgpu_cs_in {
++ /** Rendering context id */
++ uint32_t ctx_id;
++ /** Handle of resource list associated with CS */
++ uint32_t bo_list_handle;
++ uint32_t num_chunks;
++ uint32_t pad;
++ /* this points to uint64_t * which point to cs chunks */
++ uint64_t chunks;
++};
++
++struct drm_amdgpu_cs_out {
++ uint64_t handle;
++};
++
++union drm_amdgpu_cs {
++ struct drm_amdgpu_cs_in in;
++ struct drm_amdgpu_cs_out out;
++};
++
++/* Specify flags to be used for IB */
++
++/* This IB should be submitted to CE */
++#define AMDGPU_IB_FLAG_CE (1<<0)
++
++/* GDS is used by this IB */
++#define AMDGPU_IB_FLAG_GDS (1<<1)
++
++struct drm_amdgpu_cs_chunk_ib {
++ /**
++ * Handle of GEM object to be used as IB or 0 if it is already in
++ * residency list.
++ */
++ uint32_t handle;
++ uint32_t flags; /* IB Flags */
++ uint64_t va_start; /* Virtual address to begin IB execution */
++ uint32_t ib_bytes; /* Size of submission */
++ uint32_t ip_type; /* HW IP to submit to */
++ uint32_t ip_instance; /* HW IP index of the same type to submit to */
++ uint32_t ring; /* Ring index to submit to */
++};
++
++struct drm_amdgpu_cs_chunk_fence {
++ uint32_t handle;
++ uint32_t offset;
++};
++
++struct drm_amdgpu_cs_chunk_data {
++ union {
++ struct drm_amdgpu_cs_chunk_ib ib_data;
++ struct drm_amdgpu_cs_chunk_fence fence_data;
++ };
++};
++
++/**
++ * Query h/w info: Flag that this is integrated (a.h.a. fusion) GPU
++ *
++ */
++#define AMDGPU_IDS_FLAGS_FUSION 0x1
++
++/* indicate if acceleration can be working */
++#define AMDGPU_INFO_ACCEL_WORKING 0x00
++/* get the crtc_id from the mode object id? */
++#define AMDGPU_INFO_CRTC_FROM_ID 0x01
++/* query hw IP info */
++#define AMDGPU_INFO_HW_IP_INFO 0x02
++/* query hw IP instance count for the specified type */
++#define AMDGPU_INFO_HW_IP_COUNT 0x03
++/* timestamp for GL_ARB_timer_query */
++#define AMDGPU_INFO_TIMESTAMP 0x05
++/* Query the firmware version */
++#define AMDGPU_INFO_FW_VERSION 0x0e
++ /* Subquery id: Query VCE firmware version */
++ #define AMDGPU_INFO_FW_VCE 0x1
++ /* Subquery id: Query UVD firmware version */
++ #define AMDGPU_INFO_FW_UVD 0x2
++ /* Subquery id: Query GMC firmware version */
++ #define AMDGPU_INFO_FW_GMC 0x03
++ /* Subquery id: Query GFX ME firmware version */
++ #define AMDGPU_INFO_FW_GFX_ME 0x04
++ /* Subquery id: Query GFX PFP firmware version */
++ #define AMDGPU_INFO_FW_GFX_PFP 0x05
++ /* Subquery id: Query GFX CE firmware version */
++ #define AMDGPU_INFO_FW_GFX_CE 0x06
++ /* Subquery id: Query GFX RLC firmware version */
++ #define AMDGPU_INFO_FW_GFX_RLC 0x07
++ /* Subquery id: Query GFX MEC firmware version */
++ #define AMDGPU_INFO_FW_GFX_MEC 0x08
++ /* Subquery id: Query SMC firmware version */
++ #define AMDGPU_INFO_FW_SMC 0x0a
++ /* Subquery id: Query SDMA firmware version */
++ #define AMDGPU_INFO_FW_SDMA 0x0b
++/* number of bytes moved for TTM migration */
++#define AMDGPU_INFO_NUM_BYTES_MOVED 0x0f
++/* the used VRAM size */
++#define AMDGPU_INFO_VRAM_USAGE 0x10
++/* the used GTT size */
++#define AMDGPU_INFO_GTT_USAGE 0x11
++/* Information about GDS, etc. resource configuration */
++#define AMDGPU_INFO_GDS_CONFIG 0x13
++/* Query information about VRAM and GTT domains */
++#define AMDGPU_INFO_VRAM_GTT 0x14
++/* Query information about register in MMR address space*/
++#define AMDGPU_INFO_READ_MMR_REG 0x15
++/* Query information about device: rev id, family, etc. */
++#define AMDGPU_INFO_DEV_INFO 0x16
++/* visible vram usage */
++#define AMDGPU_INFO_VIS_VRAM_USAGE 0x17
++
++#define AMDGPU_INFO_MMR_SE_INDEX_SHIFT 0
++#define AMDGPU_INFO_MMR_SE_INDEX_MASK 0xff
++#define AMDGPU_INFO_MMR_SH_INDEX_SHIFT 8
++#define AMDGPU_INFO_MMR_SH_INDEX_MASK 0xff
++
++/* Input structure for the INFO ioctl */
++struct drm_amdgpu_info {
++ /* Where the return value will be stored */
++ uint64_t return_pointer;
++ /* The size of the return value. Just like "size" in "snprintf",
++ * it limits how many bytes the kernel can write. */
++ uint32_t return_size;
++ /* The query request id. */
++ uint32_t query;
++
++ union {
++ struct {
++ uint32_t id;
++ } mode_crtc;
++
++ struct {
++ /** AMDGPU_HW_IP_* */
++ uint32_t type;
++ /**
++ * Index of the IP if there are more IPs of the same type.
++ * Ignored by AMDGPU_INFO_HW_IP_COUNT.
++ */
++ uint32_t ip_instance;
++ } query_hw_ip;
++
++ struct {
++ uint32_t dword_offset;
++ uint32_t count; /* number of registers to read */
++ uint32_t instance;
++ uint32_t flags;
++ } read_mmr_reg;
++
++ struct {
++ /** AMDGPU_INFO_FW_* */
++ uint32_t fw_type;
++ /** Index of the IP if there are more IPs of the same type. */
++ uint32_t ip_instance;
++ /**
++ * Index of the engine. Whether this is used depends
++ * on the firmware type. (e.g. MEC, SDMA)
++ */
++ uint32_t index;
++ } query_fw;
++ };
++};
++
++struct drm_amdgpu_info_gds {
++ /** GDS GFX partition size */
++ uint32_t gds_gfx_partition_size;
++ /** GDS compute partition size */
++ uint32_t compute_partition_size;
++ /** total GDS memory size */
++ uint32_t gds_total_size;
++ /** GWS size per GFX partition */
++ uint32_t gws_per_gfx_partition;
++ /** GSW size per compute partition */
++ uint32_t gws_per_compute_partition;
++ /** OA size per GFX partition */
++ uint32_t oa_per_gfx_partition;
++ /** OA size per compute partition */
++ uint32_t oa_per_compute_partition;
++};
++
++struct drm_amdgpu_info_vram_gtt {
++ uint64_t vram_size;
++ uint64_t vram_cpu_accessible_size;
++ uint64_t gtt_size;
++};
++
++struct drm_amdgpu_info_firmware {
++ uint32_t ver;
++ uint32_t feature;
++};
++
++struct drm_amdgpu_info_device {
++ /** PCI Device ID */
++ uint32_t device_id;
++ /** Internal chip revision: A0, A1, etc.) */
++ uint32_t chip_rev;
++ uint32_t external_rev;
++ /** Revision id in PCI Config space */
++ uint32_t pci_rev;
++ uint32_t family;
++ uint32_t num_shader_engines;
++ uint32_t num_shader_arrays_per_engine;
++ uint32_t gpu_counter_freq; /* in KHz */
++ uint64_t max_engine_clock; /* in KHz */
++ /* cu information */
++ uint32_t cu_active_number;
++ uint32_t cu_ao_mask;
++ uint32_t cu_bitmap[4][4];
++ /** Render backend pipe mask. One render backend is CB+DB. */
++ uint32_t enabled_rb_pipes_mask;
++ uint32_t num_rb_pipes;
++ uint32_t num_hw_gfx_contexts;
++ uint32_t _pad;
++ uint64_t ids_flags;
++ /** Starting virtual address for UMDs. */
++ uint64_t virtual_address_offset;
++ /** Required alignment of virtual addresses. */
++ uint32_t virtual_address_alignment;
++ /** Page table entry - fragment size */
++ uint32_t pte_fragment_size;
++ uint32_t gart_page_size;
++};
++
++struct drm_amdgpu_info_hw_ip {
++ /** Version of h/w IP */
++ uint32_t hw_ip_version_major;
++ uint32_t hw_ip_version_minor;
++ /** Capabilities */
++ uint64_t capabilities_flags;
++ /** Bitmask of available rings. Bit 0 means ring 0, etc. */
++ uint32_t available_rings;
++};
++
++/* Those correspond to the tile index to use, this is to explicitly state
++ * the API that is implicitly defined by the tile mode array.
++ */
++#define SI_TILE_MODE_COLOR_LINEAR_ALIGNED 8
++#define SI_TILE_MODE_COLOR_1D 13
++#define SI_TILE_MODE_COLOR_1D_SCANOUT 9
++#define SI_TILE_MODE_COLOR_2D_8BPP 14
++#define SI_TILE_MODE_COLOR_2D_16BPP 15
++#define SI_TILE_MODE_COLOR_2D_32BPP 16
++#define SI_TILE_MODE_COLOR_2D_64BPP 17
++#define SI_TILE_MODE_COLOR_2D_SCANOUT_16BPP 11
++#define SI_TILE_MODE_COLOR_2D_SCANOUT_32BPP 12
++#define SI_TILE_MODE_DEPTH_STENCIL_1D 4
++#define SI_TILE_MODE_DEPTH_STENCIL_2D 0
++#define SI_TILE_MODE_DEPTH_STENCIL_2D_2AA 3
++#define SI_TILE_MODE_DEPTH_STENCIL_2D_4AA 3
++#define SI_TILE_MODE_DEPTH_STENCIL_2D_8AA 2
++
++#define CIK_TILE_MODE_DEPTH_STENCIL_1D 5
++
++/*
++ * Supported GPU families
++ */
++#define AMDGPU_FAMILY_UNKNOWN 0
++#define AMDGPU_FAMILY_CI 120 /* Bonaire, Hawaii */
++#define AMDGPU_FAMILY_KV 125 /* Kaveri, Kabini, Mullins */
++#define AMDGPU_FAMILY_VI 130 /* Iceland, Tonga */
++#define AMDGPU_FAMILY_CZ 135 /* Carrizo */
++
++#endif
+--
+1.9.1
+
diff --git a/meta-amdfalconx86/recipes-graphics/drm/libdrm/0002-drm-add-tests-amdgpu.patch b/meta-amdfalconx86/recipes-graphics/drm/libdrm/0002-drm-add-tests-amdgpu.patch
new file mode 100644
index 00000000..85675beb
--- /dev/null
+++ b/meta-amdfalconx86/recipes-graphics/drm/libdrm/0002-drm-add-tests-amdgpu.patch
@@ -0,0 +1,2487 @@
+From f946a76cb79c3d49e209ff5b48c7aa73af7af0dc Mon Sep 17 00:00:00 2001
+From: Alex Deucher <alexander.deucher@amd.com>
+Date: Mon, 20 Apr 2015 12:15:23 -0400
+Subject: [PATCH 2/3] drm: add tests/amdgpu
+
+This adds some basic unit tests for the new amdgpu driver.
+
+Signed-off-by: Alex Deucher <alexander.deucher@amd.com>
+Signed-off-by: Arindam Nath <arindam.nath@amd.com>
+---
+ configure.ac | 23 ++
+ tests/Makefile.am | 6 +
+ tests/amdgpu/Makefile.am | 24 ++
+ tests/amdgpu/amdgpu_test.c | 241 +++++++++++++
+ tests/amdgpu/amdgpu_test.h | 119 +++++++
+ tests/amdgpu/basic_tests.c | 676 ++++++++++++++++++++++++++++++++++++
+ tests/amdgpu/bo_tests.c | 151 ++++++++
+ tests/amdgpu/cs_tests.c | 319 +++++++++++++++++
+ tests/amdgpu/uvd_messages.h | 813 ++++++++++++++++++++++++++++++++++++++++++++
+ tests/kmstest/main.c | 1 +
+ 10 files changed, 2373 insertions(+)
+ create mode 100644 tests/amdgpu/Makefile.am
+ create mode 100644 tests/amdgpu/amdgpu_test.c
+ create mode 100644 tests/amdgpu/amdgpu_test.h
+ create mode 100644 tests/amdgpu/basic_tests.c
+ create mode 100644 tests/amdgpu/bo_tests.c
+ create mode 100644 tests/amdgpu/cs_tests.c
+ create mode 100644 tests/amdgpu/uvd_messages.h
+
+diff --git a/configure.ac b/configure.ac
+index 509f2d4..0019f73 100644
+--- a/configure.ac
++++ b/configure.ac
+@@ -351,6 +351,28 @@ fi
+ AM_CONDITIONAL(HAVE_AMDGPU, [test "x$AMDGPU" = xyes])
+ if test "x$AMDGPU" = xyes; then
+ AC_DEFINE(HAVE_AMDGPU, 1, [Have amdgpu support])
++
++ # Detect cunit library
++ PKG_CHECK_MODULES([CUNIT], [cunit >= 2.1], [have_cunit=yes], [have_cunit=no])
++
++ # If pkg-config does not find cunit, check it using AC_CHECK_LIB. We
++ # do this because Debian (Ubuntu) lacks pkg-config file for cunit.
++ if test "x${have_cunit}" = "xno"; then
++ AC_CHECK_LIB([cunit], [CU_initialize_registry], [have_cunit=yes], [have_cunit=no])
++ if test "x${have_cunit}" = "xyes"; then
++ CUNIT_LIBS="-lcunit"
++ CUNIT_CFLAGS=""
++ AC_SUBST([CUNIT_LIBS])
++ AC_SUBST([CUNIT_CFLAGS])
++ fi
++ fi
++
++ AM_CONDITIONAL(HAVE_CUNIT, [test "x$have_cunit" != "xno"])
++ AC_DEFINE(HAVE_CUNIT, [test "x$have_cunit" != "xno"], [Enable CUNIT Have amdgpu support])
++
++ if test "x$have_cunit" = "xno"; then
++ AC_MSG_WARN([Could not find cunit library. Disabling amdgpu tests])
++ fi
+ fi
+
+ AM_CONDITIONAL(HAVE_TEGRA, [test "x$TEGRA" = xyes])
+@@ -467,6 +489,7 @@ AC_CONFIG_FILES([
+ tests/kmstest/Makefile
+ tests/proptest/Makefile
+ tests/radeon/Makefile
++ tests/amdgpu/Makefile
+ tests/vbltest/Makefile
+ tests/exynos/Makefile
+ tests/tegra/Makefile
+diff --git a/tests/Makefile.am b/tests/Makefile.am
+index 069285f..a980b3d 100644
+--- a/tests/Makefile.am
++++ b/tests/Makefile.am
+@@ -8,6 +8,12 @@ if HAVE_RADEON
+ SUBDIRS += radeon
+ endif
+
++if HAVE_AMDGPU
++if HAVE_CUNIT
++SUBDIRS += amdgpu
++endif
++endif
++
+ if HAVE_EXYNOS
+ SUBDIRS += exynos
+ endif
+diff --git a/tests/amdgpu/Makefile.am b/tests/amdgpu/Makefile.am
+new file mode 100644
+index 0000000..ba7339d
+--- /dev/null
++++ b/tests/amdgpu/Makefile.am
+@@ -0,0 +1,24 @@
++AM_CFLAGS = \
++ -I $(top_srcdir)/include/drm \
++ -I $(top_srcdir)/amdgpu \
++ -I $(top_srcdir)
++
++LDADD = $(top_builddir)/libdrm.la \
++ $(top_builddir)/amdgpu/libdrm_amdgpu.la
++
++if HAVE_INSTALL_TESTS
++bin_PROGRAMS = \
++ amdgpu_test
++else
++noinst_PROGRAMS = \
++ amdgpu_test
++endif
++
++amdgpu_test_CPPFLAGS = $(CUNIT_CFLAGS)
++amdgpu_test_LDFLAGS = $(CUNIT_LIBS)
++
++amdgpu_test_SOURCES = \
++ amdgpu_test.c \
++ basic_tests.c \
++ bo_tests.c \
++ cs_tests.c
+diff --git a/tests/amdgpu/amdgpu_test.c b/tests/amdgpu/amdgpu_test.c
+new file mode 100644
+index 0000000..fc14b70
+--- /dev/null
++++ b/tests/amdgpu/amdgpu_test.c
+@@ -0,0 +1,241 @@
++/*
++ * Copyright 2014 Advanced Micro Devices, Inc.
++ *
++ * Permission is hereby granted, free of charge, to any person obtaining a
++ * copy of this software and associated documentation files (the "Software"),
++ * to deal in the Software without restriction, including without limitation
++ * the rights to use, copy, modify, merge, publish, distribute, sublicense,
++ * and/or sell copies of the Software, and to permit persons to whom the
++ * Software is furnished to do so, subject to the following conditions:
++ *
++ * The above copyright notice and this permission notice shall be included in
++ * all copies or substantial portions of the Software.
++ *
++ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
++ * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
++ * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
++ * THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR
++ * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE,
++ * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
++ * OTHER DEALINGS IN THE SOFTWARE.
++ *
++*/
++#include <string.h>
++#include <stdio.h>
++#include <stdlib.h>
++#include <unistd.h>
++#include <string.h>
++#include <ctype.h>
++#include <fcntl.h>
++#include <errno.h>
++#include <signal.h>
++#include <time.h>
++#include <sys/types.h>
++#include <sys/stat.h>
++#include <sys/ioctl.h>
++#include <sys/time.h>
++#include <stdarg.h>
++#include <stdint.h>
++
++#include "drm.h"
++#include "xf86drmMode.h"
++#include "xf86drm.h"
++
++#include "CUnit/Basic.h"
++
++#include "amdgpu_test.h"
++
++/**
++ * Open handles for amdgpu devices
++ *
++ */
++int drm_amdgpu[MAX_CARDS_SUPPORTED];
++
++/** The table of all known test suites to run */
++CU_SuiteInfo suites[] = {
++ { "Basic Tests", suite_basic_tests_init,
++ suite_basic_tests_clean, basic_tests },
++ { "BO Tests", suite_bo_tests_init,
++ suite_bo_tests_clean, bo_tests },
++ { "CS Tests", suite_cs_tests_init,
++ suite_cs_tests_clean, cs_tests },
++ CU_SUITE_INFO_NULL,
++};
++
++
++/** Display information about all suites and their tests */
++static void display_test_suites(void)
++{
++ int iSuite;
++ int iTest;
++
++ printf("Suites\n");
++
++ for (iSuite = 0; suites[iSuite].pName != NULL; iSuite++) {
++ printf("Suite id = %d: Name '%s'\n",
++ iSuite + 1, suites[iSuite].pName);
++
++ for (iTest = 0; suites[iSuite].pTests[iTest].pName != NULL;
++ iTest++) {
++ printf(" Test id %d: Name: '%s'\n", iTest + 1,
++ suites[iSuite].pTests[iTest].pName);
++ }
++ }
++}
++
++
++/** Help string for command line parameters */
++static const char usage[] = "Usage: %s [-hl] [<-s <suite id>> [-t <test id>]]\n"
++ "where:\n"
++ " l - Display all suites and their tests\n"
++ " h - Display this help\n";
++/** Specified options strings for getopt */
++static const char options[] = "hls:t:";
++
++/* The main() function for setting up and running the tests.
++ * Returns a CUE_SUCCESS on successful running, another
++ * CUnit error code on failure.
++ */
++int main(int argc, char **argv)
++{
++ int c; /* Character received from getopt */
++ int i = 0;
++ int suite_id = -1; /* By default run everything */
++ int test_id = -1; /* By default run all tests in the suite */
++ CU_pSuite pSuite = NULL;
++ CU_pTest pTest = NULL;
++
++ int aval = drmAvailable();
++
++ if (aval == 0) {
++ fprintf(stderr, "DRM driver is not available\n");
++ exit(EXIT_FAILURE);
++ }
++
++
++ for (i = 0; i < MAX_CARDS_SUPPORTED; i++)
++ drm_amdgpu[i] = 0;
++
++
++ /* Parse command line string */
++ opterr = 0; /* Do not print error messages from getopt */
++ while ((c = getopt(argc, argv, options)) != -1) {
++ switch (c) {
++ case 'l':
++ display_test_suites();
++ exit(EXIT_SUCCESS);
++ case 's':
++ suite_id = atoi(optarg);
++ break;
++ case 't':
++ test_id = atoi(optarg);
++ break;
++ case '?':
++ case 'h':
++ fprintf(stderr, usage, argv[0]);
++ exit(EXIT_SUCCESS);
++ default:
++ fprintf(stderr, usage, argv[0]);
++ exit(EXIT_FAILURE);
++ }
++ }
++
++ /* Try to open all possible radeon connections
++ * Right now: Open only the 0.
++ */
++ printf("Try to open the card 0..\n");
++ drm_amdgpu[0] = open("/dev/dri/card0", O_RDWR | O_CLOEXEC);
++
++ if (drm_amdgpu[0] == 1) {
++ perror("Cannot open /dev/dri/card0\n");
++ exit(EXIT_FAILURE);
++ }
++
++ /** Display version of DRM driver */
++ drmVersionPtr retval;
++ drm_version_t *version = drmMalloc(sizeof(*version));
++
++ version->name_len = 0;
++ version->name = NULL;
++ version->date_len = 0;
++ version->date = NULL;
++ version->desc_len = 0;
++ version->desc = NULL;
++
++ if (drmIoctl(drm_amdgpu[0], DRM_IOCTL_VERSION, version)) {
++ perror("Could not get DRM driver version\n");
++ drmFree(version);
++ exit(EXIT_FAILURE);
++ }
++
++ if (version->name_len)
++ version->name = drmMalloc(version->name_len + 1);
++ if (version->date_len)
++ version->date = drmMalloc(version->date_len + 1);
++ if (version->desc_len)
++ version->desc = drmMalloc(version->desc_len + 1);
++
++ if (drmIoctl(drm_amdgpu[0], DRM_IOCTL_VERSION, version)) {
++ perror("Could not get information about DRM driver");
++ drmFree(version);
++ exit(EXIT_FAILURE);
++ }
++
++ /* The results might not be null-terminated strings. Add zero */
++ if (version->name_len)
++ version->name[version->name_len] = '\0';
++ if (version->date_len)
++ version->date[version->date_len] = '\0';
++ if (version->desc_len)
++ version->desc[version->desc_len] = '\0';
++
++ printf("DRM Driver: Name: [%s] : Date [%s] : Description [%s]\n",
++ version->name, version->date, version->desc);
++
++ drmFree(version);
++
++ /* Initialize test suites to run */
++
++ /* initialize the CUnit test registry */
++ if (CUE_SUCCESS != CU_initialize_registry())
++ return CU_get_error();
++
++ /* Register suites. */
++ if (CU_register_suites(suites) != CUE_SUCCESS) {
++ fprintf(stderr, "suite registration failed - %s\n",
++ CU_get_error_msg());
++ exit(EXIT_FAILURE);
++ }
++
++ /* Run tests using the CUnit Basic interface */
++ CU_basic_set_mode(CU_BRM_VERBOSE);
++
++ if (suite_id != -1) { /* If user specify particular suite? */
++ pSuite = CU_get_suite_by_index((unsigned int) suite_id,
++ CU_get_registry());
++
++ if (pSuite) {
++ if (test_id != -1) { /* If user specify test id */
++ pTest = CU_get_test_by_index(
++ (unsigned int) test_id,
++ pSuite);
++ if (pTest)
++ CU_basic_run_test(pSuite, pTest);
++ else {
++ fprintf(stderr, "Invalid test id: %d\n",
++ test_id);
++ exit(EXIT_FAILURE);
++ }
++ } else
++ CU_basic_run_suite(pSuite);
++ } else {
++ fprintf(stderr, "Invalid suite id : %d\n",
++ suite_id);
++ exit(EXIT_FAILURE);
++ }
++ } else
++ CU_basic_run_tests();
++
++ CU_cleanup_registry();
++ return CU_get_error();
++}
+diff --git a/tests/amdgpu/amdgpu_test.h b/tests/amdgpu/amdgpu_test.h
+new file mode 100644
+index 0000000..508e561
+--- /dev/null
++++ b/tests/amdgpu/amdgpu_test.h
+@@ -0,0 +1,119 @@
++/*
++ * Copyright 2014 Advanced Micro Devices, Inc.
++ *
++ * Permission is hereby granted, free of charge, to any person obtaining a
++ * copy of this software and associated documentation files (the "Software"),
++ * to deal in the Software without restriction, including without limitation
++ * the rights to use, copy, modify, merge, publish, distribute, sublicense,
++ * and/or sell copies of the Software, and to permit persons to whom the
++ * Software is furnished to do so, subject to the following conditions:
++ *
++ * The above copyright notice and this permission notice shall be included in
++ * all copies or substantial portions of the Software.
++ *
++ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
++ * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
++ * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
++ * THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR
++ * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE,
++ * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
++ * OTHER DEALINGS IN THE SOFTWARE.
++ *
++*/
++
++#ifndef _amdgpu_test_h_
++#define _amdgpu_test_h_
++
++#include "amdgpu.h"
++
++/**
++ * Define max. number of card in system which we are able to handle
++ */
++#define MAX_CARDS_SUPPORTED 4
++
++/* Forward reference for array to keep "drm" handles */
++extern int drm_amdgpu[MAX_CARDS_SUPPORTED];
++
++/************************* Basic test suite ********************************/
++
++/*
++ * Define basic test suite to serve as the starting point for future testing
++*/
++
++/**
++ * Initialize basic test suite
++ */
++int suite_basic_tests_init();
++
++/**
++ * Deinitialize basic test suite
++ */
++int suite_basic_tests_clean();
++
++/**
++ * Tests in basic test suite
++ */
++extern CU_TestInfo basic_tests[];
++
++/**
++ * Initialize bo test suite
++ */
++int suite_bo_tests_init();
++
++/**
++ * Deinitialize bo test suite
++ */
++int suite_bo_tests_clean();
++
++/**
++ * Tests in bo test suite
++ */
++extern CU_TestInfo bo_tests[];
++
++/**
++ * Initialize cs test suite
++ */
++int suite_cs_tests_init();
++
++/**
++ * Deinitialize cs test suite
++ */
++int suite_cs_tests_clean();
++
++/**
++ * Tests in cs test suite
++ */
++extern CU_TestInfo cs_tests[];
++
++/**
++ * Helper functions
++ */
++static inline amdgpu_bo_handle gpu_mem_alloc(
++ amdgpu_device_handle device_handle,
++ uint64_t size,
++ uint64_t alignment,
++ uint32_t type,
++ uint64_t flags,
++ uint64_t *vmc_addr)
++{
++ struct amdgpu_bo_alloc_request req = {0};
++ struct amdgpu_bo_alloc_result res = {0};
++ int r;
++
++ CU_ASSERT_NOT_EQUAL(vmc_addr, NULL);
++
++ req.alloc_size = size;
++ req.phys_alignment = alignment;
++ req.preferred_heap = type;
++ req.flags = flags;
++
++ r = amdgpu_bo_alloc(device_handle, &req, &res);
++ CU_ASSERT_EQUAL(r, 0);
++
++ CU_ASSERT_NOT_EQUAL(res.virtual_mc_base_address, 0);
++ CU_ASSERT_NOT_EQUAL(res.buf_handle, NULL);
++ *vmc_addr = res.virtual_mc_base_address;
++ return res.buf_handle;
++}
++
++#endif /* #ifdef _amdgpu_test_h_ */
+diff --git a/tests/amdgpu/basic_tests.c b/tests/amdgpu/basic_tests.c
+new file mode 100644
+index 0000000..c53f6a0
+--- /dev/null
++++ b/tests/amdgpu/basic_tests.c
+@@ -0,0 +1,676 @@
++/*
++ * Copyright 2014 Advanced Micro Devices, Inc.
++ *
++ * Permission is hereby granted, free of charge, to any person obtaining a
++ * copy of this software and associated documentation files (the "Software"),
++ * to deal in the Software without restriction, including without limitation
++ * the rights to use, copy, modify, merge, publish, distribute, sublicense,
++ * and/or sell copies of the Software, and to permit persons to whom the
++ * Software is furnished to do so, subject to the following conditions:
++ *
++ * The above copyright notice and this permission notice shall be included in
++ * all copies or substantial portions of the Software.
++ *
++ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
++ * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
++ * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
++ * THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR
++ * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE,
++ * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
++ * OTHER DEALINGS IN THE SOFTWARE.
++ *
++*/
++#include <stdio.h>
++#include <stdlib.h>
++#include <unistd.h>
++
++#include "CUnit/Basic.h"
++
++#include "amdgpu_test.h"
++#include "amdgpu_drm.h"
++
++static amdgpu_device_handle device_handle;
++static uint32_t major_version;
++static uint32_t minor_version;
++
++static void amdgpu_query_info_test(void);
++static void amdgpu_memory_alloc(void);
++static void amdgpu_command_submission_gfx(void);
++static void amdgpu_command_submission_compute(void);
++static void amdgpu_command_submission_sdma(void);
++static void amdgpu_userptr_test(void);
++
++CU_TestInfo basic_tests[] = {
++ { "Query Info Test", amdgpu_query_info_test },
++ { "Memory alloc Test", amdgpu_memory_alloc },
++ { "Userptr Test", amdgpu_userptr_test },
++ { "Command submission Test (GFX)", amdgpu_command_submission_gfx },
++ { "Command submission Test (Compute)", amdgpu_command_submission_compute },
++ { "Command submission Test (SDMA)", amdgpu_command_submission_sdma },
++ CU_TEST_INFO_NULL,
++};
++#define BUFFER_SIZE (8 * 1024)
++#define SDMA_PKT_HEADER_op_offset 0
++#define SDMA_PKT_HEADER_op_mask 0x000000FF
++#define SDMA_PKT_HEADER_op_shift 0
++#define SDMA_PKT_HEADER_OP(x) (((x) & SDMA_PKT_HEADER_op_mask) << SDMA_PKT_HEADER_op_shift)
++#define SDMA_OPCODE_CONSTANT_FILL 11
++# define SDMA_CONSTANT_FILL_EXTRA_SIZE(x) ((x) << 14)
++ /* 0 = byte fill
++ * 2 = DW fill
++ */
++#define SDMA_PACKET(op, sub_op, e) ((((e) & 0xFFFF) << 16) | \
++ (((sub_op) & 0xFF) << 8) | \
++ (((op) & 0xFF) << 0))
++#define SDMA_OPCODE_WRITE 2
++# define SDMA_WRITE_SUB_OPCODE_LINEAR 0
++# define SDMA_WRTIE_SUB_OPCODE_TILED 1
++
++#define SDMA_OPCODE_COPY 1
++# define SDMA_COPY_SUB_OPCODE_LINEAR 0
++
++int suite_basic_tests_init(void)
++{
++ int r;
++
++ r = amdgpu_device_initialize(drm_amdgpu[0], &major_version,
++ &minor_version, &device_handle);
++
++ if (r == 0)
++ return CUE_SUCCESS;
++ else
++ return CUE_SINIT_FAILED;
++}
++
++int suite_basic_tests_clean(void)
++{
++ int r = amdgpu_device_deinitialize(device_handle);
++
++ if (r == 0)
++ return CUE_SUCCESS;
++ else
++ return CUE_SCLEAN_FAILED;
++}
++
++static void amdgpu_query_info_test(void)
++{
++ struct amdgpu_gpu_info gpu_info = {0};
++ uint32_t version, feature;
++ int r;
++
++ r = amdgpu_query_gpu_info(device_handle, &gpu_info);
++ CU_ASSERT_EQUAL(r, 0);
++
++ r = amdgpu_query_firmware_version(device_handle, AMDGPU_INFO_FW_VCE, 0,
++ 0, &version, &feature);
++ CU_ASSERT_EQUAL(r, 0);
++}
++
++static void amdgpu_memory_alloc(void)
++{
++ amdgpu_bo_handle bo;
++ uint64_t bo_mc;
++ int r;
++
++ /* Test visible VRAM */
++ bo = gpu_mem_alloc(device_handle,
++ 4096, 4096,
++ AMDGPU_GEM_DOMAIN_VRAM,
++ AMDGPU_GEM_CREATE_CPU_ACCESS_REQUIRED,
++ &bo_mc);
++
++ r = amdgpu_bo_free(bo);
++ CU_ASSERT_EQUAL(r, 0);
++
++ /* Test invisible VRAM */
++ bo = gpu_mem_alloc(device_handle,
++ 4096, 4096,
++ AMDGPU_GEM_DOMAIN_VRAM,
++ AMDGPU_GEM_CREATE_NO_CPU_ACCESS,
++ &bo_mc);
++
++ r = amdgpu_bo_free(bo);
++ CU_ASSERT_EQUAL(r, 0);
++
++ /* Test GART Cacheable */
++ bo = gpu_mem_alloc(device_handle,
++ 4096, 4096,
++ AMDGPU_GEM_DOMAIN_GTT,
++ 0,
++ &bo_mc);
++
++ r = amdgpu_bo_free(bo);
++ CU_ASSERT_EQUAL(r, 0);
++
++ /* Test GART USWC */
++ bo = gpu_mem_alloc(device_handle,
++ 4096, 4096,
++ AMDGPU_GEM_DOMAIN_GTT,
++ AMDGPU_GEM_CREATE_CPU_GTT_WC,
++ &bo_mc);
++
++ r = amdgpu_bo_free(bo);
++ CU_ASSERT_EQUAL(r, 0);
++}
++
++static void amdgpu_command_submission_gfx(void)
++{
++ amdgpu_context_handle context_handle;
++ struct amdgpu_cs_ib_alloc_result ib_result = {0};
++ struct amdgpu_cs_ib_alloc_result ib_result_ce = {0};
++ struct amdgpu_cs_request ibs_request = {0};
++ struct amdgpu_cs_ib_info ib_info[2];
++ struct amdgpu_cs_query_fence fence_status = {0};
++ uint32_t *ptr;
++ uint32_t expired;
++ int r;
++
++ r = amdgpu_cs_ctx_create(device_handle, &context_handle);
++ CU_ASSERT_EQUAL(r, 0);
++
++ r = amdgpu_cs_alloc_ib(device_handle, context_handle,
++ amdgpu_cs_ib_size_4K, &ib_result);
++ CU_ASSERT_EQUAL(r, 0);
++
++ r = amdgpu_cs_alloc_ib(device_handle, context_handle,
++ amdgpu_cs_ib_size_4K, &ib_result_ce);
++ CU_ASSERT_EQUAL(r, 0);
++
++ memset(ib_info, 0, 2 * sizeof(struct amdgpu_cs_ib_info));
++
++ /* IT_SET_CE_DE_COUNTERS */
++ ptr = ib_result_ce.cpu;
++ ptr[0] = 0xc0008900;
++ ptr[1] = 0;
++ ptr[2] = 0xc0008400;
++ ptr[3] = 1;
++ ib_info[0].ib_handle = ib_result_ce.handle;
++ ib_info[0].size = 4;
++ ib_info[0].flags = AMDGPU_CS_GFX_IB_CE;
++
++ /* IT_WAIT_ON_CE_COUNTER */
++ ptr = ib_result.cpu;
++ ptr[0] = 0xc0008600;
++ ptr[1] = 0x00000001;
++ ib_info[1].ib_handle = ib_result.handle;
++ ib_info[1].size = 2;
++
++ ibs_request.ip_type = AMDGPU_HW_IP_GFX;
++ ibs_request.number_of_ibs = 2;
++ ibs_request.ibs = ib_info;
++
++ r = amdgpu_cs_submit(device_handle, context_handle, 0,
++ &ibs_request, 1, &fence_status.fence);
++ CU_ASSERT_EQUAL(r, 0);
++
++ fence_status.context = context_handle;
++ fence_status.timeout_ns = AMDGPU_TIMEOUT_INFINITE;
++ fence_status.ip_type = AMDGPU_HW_IP_GFX;
++
++ r = amdgpu_cs_query_fence_status(device_handle, &fence_status, &expired);
++ CU_ASSERT_EQUAL(r, 0);
++
++ r = amdgpu_cs_ctx_free(device_handle, context_handle);
++ CU_ASSERT_EQUAL(r, 0);
++}
++
++static void amdgpu_command_submission_compute(void)
++{
++ amdgpu_context_handle context_handle;
++ struct amdgpu_cs_ib_alloc_result ib_result;
++ struct amdgpu_cs_request ibs_request;
++ struct amdgpu_cs_ib_info ib_info;
++ struct amdgpu_cs_query_fence fence_status;
++ uint32_t *ptr;
++ uint32_t expired;
++ int i, r, instance;
++
++ r = amdgpu_cs_ctx_create(device_handle, &context_handle);
++ CU_ASSERT_EQUAL(r, 0);
++
++ for (instance = 0; instance < 8; instance++) {
++ memset(&ib_result, 0, sizeof(struct amdgpu_cs_ib_alloc_result));
++ r = amdgpu_cs_alloc_ib(device_handle, context_handle,
++ amdgpu_cs_ib_size_4K, &ib_result);
++ CU_ASSERT_EQUAL(r, 0);
++
++ ptr = ib_result.cpu;
++ for (i = 0; i < 16; ++i)
++ ptr[i] = 0xffff1000;
++
++ memset(&ib_info, 0, sizeof(struct amdgpu_cs_ib_info));
++ ib_info.ib_handle = ib_result.handle;
++ ib_info.size = 16;
++
++ memset(&ibs_request, 0, sizeof(struct amdgpu_cs_request));
++ ibs_request.ip_type = AMDGPU_HW_IP_COMPUTE;
++ ibs_request.ring = instance;
++ ibs_request.number_of_ibs = 1;
++ ibs_request.ibs = &ib_info;
++
++ memset(&fence_status, 0, sizeof(struct amdgpu_cs_query_fence));
++ r = amdgpu_cs_submit(device_handle, context_handle, 0,
++ &ibs_request, 1, &fence_status.fence);
++ CU_ASSERT_EQUAL(r, 0);
++
++ fence_status.context = context_handle;
++ fence_status.timeout_ns = AMDGPU_TIMEOUT_INFINITE;
++ fence_status.ip_type = AMDGPU_HW_IP_COMPUTE;
++ fence_status.ring = instance;
++
++ r = amdgpu_cs_query_fence_status(device_handle, &fence_status, &expired);
++ CU_ASSERT_EQUAL(r, 0);
++ }
++
++ r = amdgpu_cs_ctx_free(device_handle, context_handle);
++ CU_ASSERT_EQUAL(r, 0);
++}
++
++/*
++ * caller need create/release:
++ * pm4_src, resources, ib_info, and ibs_request
++ * submit command stream described in ibs_request and wait for this IB accomplished
++ */
++static void amdgpu_sdma_test_exec_cs(amdgpu_context_handle context_handle,
++ int instance, int pm4_dw, uint32_t *pm4_src,
++ int res_cnt, amdgpu_bo_handle *resources,
++ struct amdgpu_cs_ib_info *ib_info,
++ struct amdgpu_cs_request *ibs_request)
++{
++ int r, i, j;
++ uint32_t expired;
++ uint32_t *ring_ptr;
++ struct amdgpu_cs_ib_alloc_result ib_result = {0};
++ struct amdgpu_cs_query_fence fence_status = {0};
++
++ /* prepare CS */
++ CU_ASSERT_NOT_EQUAL(pm4_src, NULL);
++ CU_ASSERT_NOT_EQUAL(resources, NULL);
++ CU_ASSERT_NOT_EQUAL(ib_info, NULL);
++ CU_ASSERT_NOT_EQUAL(ibs_request, NULL);
++ CU_ASSERT_TRUE(pm4_dw <= 1024);
++
++ /* allocate IB */
++ r = amdgpu_cs_alloc_ib(device_handle, context_handle,
++ amdgpu_cs_ib_size_4K, &ib_result);
++ CU_ASSERT_EQUAL(r, 0);
++
++ /* copy PM4 packet to ring from caller */
++ ring_ptr = ib_result.cpu;
++ memcpy(ring_ptr, pm4_src, pm4_dw * sizeof(*pm4_src));
++
++ ib_info->ib_handle = ib_result.handle;
++ ib_info->size = pm4_dw;
++
++ ibs_request->ip_type = AMDGPU_HW_IP_DMA;
++ ibs_request->ring = instance;
++ ibs_request->number_of_ibs = 1;
++ ibs_request->ibs = ib_info;
++ ibs_request->number_of_resources = res_cnt;
++ ibs_request->resources = resources;
++
++
++ CU_ASSERT_NOT_EQUAL(ibs_request, NULL);
++
++ /* submit CS */
++ r = amdgpu_cs_submit(device_handle, context_handle, 0,
++ ibs_request, 1, &fence_status.fence);
++ CU_ASSERT_EQUAL(r, 0);
++
++ fence_status.ip_type = AMDGPU_HW_IP_DMA;
++ fence_status.ring = ibs_request->ring;
++ fence_status.context = context_handle;
++ fence_status.timeout_ns = AMDGPU_TIMEOUT_INFINITE;
++
++ /* wait for IB accomplished */
++ r = amdgpu_cs_query_fence_status(device_handle, &fence_status,
++ &expired);
++ CU_ASSERT_EQUAL(r, 0);
++ CU_ASSERT_EQUAL(expired, true);
++}
++
++static void amdgpu_command_submission_sdma_write_linear(void)
++{
++ const int sdma_write_length = 128;
++ const int pm4_dw = 256;
++ amdgpu_context_handle context_handle;
++ amdgpu_bo_handle bo;
++ amdgpu_bo_handle *resources;
++ uint32_t *pm4;
++ struct amdgpu_cs_ib_info *ib_info;
++ struct amdgpu_cs_request *ibs_request;
++ uint64_t bo_mc;
++ volatile uint32_t *bo_cpu;
++ int i, j, r, loop;
++ uint64_t gtt_flags[3] = {0, AMDGPU_GEM_CREATE_CPU_GTT_UC,
++ AMDGPU_GEM_CREATE_CPU_GTT_WC};
++
++ pm4 = malloc(pm4_dw * 4);
++ CU_ASSERT_NOT_EQUAL(pm4, NULL);
++
++ ib_info = malloc(sizeof(*ib_info));
++ CU_ASSERT_NOT_EQUAL(ib_info, NULL);
++ memset(ib_info, 0, sizeof(*ib_info));
++
++ ibs_request = malloc(sizeof(*ibs_request));
++ CU_ASSERT_NOT_EQUAL(ibs_request, NULL);
++ memset(ibs_request, 0, sizeof(*ibs_request));
++
++ r = amdgpu_cs_ctx_create(device_handle, &context_handle);
++ CU_ASSERT_EQUAL(r, 0);
++
++ /* prepare resource */
++ resources = malloc(sizeof(amdgpu_bo_handle));
++ CU_ASSERT_NOT_EQUAL(resources, NULL);
++
++ loop = 0;
++ while(loop < 3) {
++ /* allocate UC bo for sDMA use */
++ bo = gpu_mem_alloc(device_handle,
++ sdma_write_length * sizeof(uint32_t),
++ 4096, AMDGPU_GEM_DOMAIN_GTT,
++ gtt_flags[loop], &bo_mc);
++
++ CU_ASSERT_EQUAL(amdgpu_bo_cpu_map(bo, (void **)&bo_cpu), 0);
++ CU_ASSERT_NOT_EQUAL(bo_cpu, NULL);
++
++ /* clear bo */
++ memset((void*)bo_cpu, 0, sdma_write_length * sizeof(uint32_t));
++
++
++ resources[0] = bo;
++
++ /* fullfill PM4: test DMA write-linear */
++ i = j = 0;
++ pm4[i++] = SDMA_PACKET(SDMA_OPCODE_WRITE,
++ SDMA_WRITE_SUB_OPCODE_LINEAR, 0);
++ pm4[i++] = 0xffffffff & bo_mc;
++ pm4[i++] = (0xffffffff00000000 & bo_mc) >> 32;
++ pm4[i++] = sdma_write_length;
++ while(j++ < sdma_write_length)
++ pm4[i++] = 0xdeadbeaf;
++
++ amdgpu_sdma_test_exec_cs(context_handle, 0,
++ i, pm4,
++ 1, resources,
++ ib_info, ibs_request);
++
++ /* verify if SDMA test result meets with expected */
++ i = 0;
++ while(i < sdma_write_length) {
++ CU_ASSERT_EQUAL(bo_cpu[i++], 0xdeadbeaf);
++ }
++ amdgpu_bo_free(bo);
++ loop++;
++ }
++ /* clean resources */
++ free(resources);
++ free(ibs_request);
++ free(ib_info);
++ free(pm4);
++
++ /* end of test */
++ r = amdgpu_cs_ctx_free(device_handle, context_handle);
++ CU_ASSERT_EQUAL(r, 0);
++}
++
++static void amdgpu_command_submission_sdma_const_fill(void)
++{
++ const int sdma_write_length = 1024 * 1024;
++ const int pm4_dw = 256;
++ amdgpu_context_handle context_handle;
++ amdgpu_bo_handle bo;
++ amdgpu_bo_handle *resources;
++ uint32_t *pm4;
++ struct amdgpu_cs_ib_info *ib_info;
++ struct amdgpu_cs_request *ibs_request;
++ uint64_t bo_mc;
++ volatile uint32_t *bo_cpu;
++ int i, j, r, loop;
++ uint64_t gtt_flags[3] = {0, AMDGPU_GEM_CREATE_CPU_GTT_UC,
++ AMDGPU_GEM_CREATE_CPU_GTT_WC};
++
++ pm4 = malloc(pm4_dw * 4);
++ CU_ASSERT_NOT_EQUAL(pm4, NULL);
++
++ ib_info = malloc(sizeof(*ib_info));
++ CU_ASSERT_NOT_EQUAL(ib_info, NULL);
++ memset(ib_info, 0, sizeof(*ib_info));
++
++ ibs_request = malloc(sizeof(*ibs_request));
++ CU_ASSERT_NOT_EQUAL(ibs_request, NULL);
++ memset(ibs_request, 0, sizeof(*ibs_request));
++
++ r = amdgpu_cs_ctx_create(device_handle, &context_handle);
++ CU_ASSERT_EQUAL(r, 0);
++
++ /* prepare resource */
++ resources = malloc(sizeof(amdgpu_bo_handle));
++ CU_ASSERT_NOT_EQUAL(resources, NULL);
++
++ loop = 0;
++ while(loop < 3) {
++ /* allocate UC bo for sDMA use */
++ bo = gpu_mem_alloc(device_handle,
++ sdma_write_length, 4096,
++ AMDGPU_GEM_DOMAIN_GTT,
++ gtt_flags[loop], &bo_mc);
++
++ CU_ASSERT_EQUAL(amdgpu_bo_cpu_map(bo, (void **)&bo_cpu), 0);
++ CU_ASSERT_NOT_EQUAL(bo_cpu, NULL);
++
++ /* clear bo */
++ memset((void*)bo_cpu, 0, sdma_write_length);
++
++ resources[0] = bo;
++
++ /* fullfill PM4: test DMA const fill */
++ i = j = 0;
++ pm4[i++] = SDMA_PACKET(SDMA_OPCODE_CONSTANT_FILL, 0,
++ SDMA_CONSTANT_FILL_EXTRA_SIZE(2));
++ pm4[i++] = 0xffffffff & bo_mc;
++ pm4[i++] = (0xffffffff00000000 & bo_mc) >> 32;
++ pm4[i++] = 0xdeadbeaf;
++ pm4[i++] = sdma_write_length;
++
++ amdgpu_sdma_test_exec_cs(context_handle, 0,
++ i, pm4,
++ 1, resources,
++ ib_info, ibs_request);
++
++ /* verify if SDMA test result meets with expected */
++ i = 0;
++ while(i < (sdma_write_length / 4)) {
++ CU_ASSERT_EQUAL(bo_cpu[i++], 0xdeadbeaf);
++ }
++ amdgpu_bo_free(bo);
++ loop++;
++ }
++ /* clean resources */
++ free(resources);
++ free(ibs_request);
++ free(ib_info);
++ free(pm4);
++
++ /* end of test */
++ r = amdgpu_cs_ctx_free(device_handle, context_handle);
++ CU_ASSERT_EQUAL(r, 0);
++}
++
++static void amdgpu_command_submission_sdma_copy_linear(void)
++{
++ const int sdma_write_length = 1024;
++ const int pm4_dw = 256;
++ amdgpu_context_handle context_handle;
++ amdgpu_bo_handle bo1, bo2;
++ amdgpu_bo_handle *resources;
++ uint32_t *pm4;
++ struct amdgpu_cs_ib_info *ib_info;
++ struct amdgpu_cs_request *ibs_request;
++ uint64_t bo1_mc, bo2_mc;
++ volatile unsigned char *bo1_cpu, *bo2_cpu;
++ int i, j, r, loop1, loop2;
++ uint64_t gtt_flags[3] = {0, AMDGPU_GEM_CREATE_CPU_GTT_UC,
++ AMDGPU_GEM_CREATE_CPU_GTT_WC};
++
++ pm4 = malloc(pm4_dw * 4);
++ CU_ASSERT_NOT_EQUAL(pm4, NULL);
++
++ ib_info = malloc(sizeof(*ib_info));
++ CU_ASSERT_NOT_EQUAL(ib_info, NULL);
++ memset(ib_info, 0, sizeof(*ib_info));
++
++ ibs_request = malloc(sizeof(*ibs_request));
++ CU_ASSERT_NOT_EQUAL(ibs_request, NULL);
++ memset(ibs_request, 0, sizeof(*ibs_request));
++
++ r = amdgpu_cs_ctx_create(device_handle, &context_handle);
++ CU_ASSERT_EQUAL(r, 0);
++
++ /* prepare resource */
++ resources = malloc(2 * sizeof(amdgpu_bo_handle));
++ CU_ASSERT_NOT_EQUAL(resources, NULL);
++
++ loop1 = loop2 = 0;
++ /* run 9 circle to test all mapping combination */
++ while(loop1 < 3) {
++ while(loop2 < 3) {
++ /* allocate UC bo1for sDMA use */
++ bo1 = gpu_mem_alloc(device_handle,
++ sdma_write_length, 4096,
++ AMDGPU_GEM_DOMAIN_GTT,
++ gtt_flags[loop1], &bo1_mc);
++
++ CU_ASSERT_EQUAL(amdgpu_bo_cpu_map(bo1, (void **)&bo1_cpu), 0);
++ CU_ASSERT_NOT_EQUAL(bo1_cpu, NULL);
++
++ /* set bo1 */
++ memset((void*)bo1_cpu, 0xaa, sdma_write_length);
++
++ /* allocate UC bo2 for sDMA use */
++ bo2 = gpu_mem_alloc(device_handle,
++ sdma_write_length, 4096,
++ AMDGPU_GEM_DOMAIN_GTT,
++ gtt_flags[loop2], &bo2_mc);
++
++ CU_ASSERT_EQUAL(amdgpu_bo_cpu_map(bo2, (void **)&bo2_cpu), 0);
++ CU_ASSERT_NOT_EQUAL(bo2_cpu, NULL);
++
++ /* clear bo2 */
++ memset((void*)bo2_cpu, 0, sdma_write_length);
++
++ resources[0] = bo1;
++ resources[1] = bo2;
++
++ /* fullfill PM4: test DMA copy linear */
++ i = j = 0;
++ pm4[i++] = SDMA_PACKET(SDMA_OPCODE_COPY, SDMA_COPY_SUB_OPCODE_LINEAR, 0);
++ pm4[i++] = sdma_write_length;
++ pm4[i++] = 0;
++ pm4[i++] = 0xffffffff & bo1_mc;
++ pm4[i++] = (0xffffffff00000000 & bo1_mc) >> 32;
++ pm4[i++] = 0xffffffff & bo2_mc;
++ pm4[i++] = (0xffffffff00000000 & bo2_mc) >> 32;
++
++
++ amdgpu_sdma_test_exec_cs(context_handle, 0,
++ i, pm4,
++ 2, resources,
++ ib_info, ibs_request);
++
++ /* verify if SDMA test result meets with expected */
++ i = 0;
++ while(i < sdma_write_length) {
++ CU_ASSERT_EQUAL(bo2_cpu[i++], 0xaa);
++ }
++ amdgpu_bo_free(bo1);
++ amdgpu_bo_free(bo2);
++ loop2++;
++ }
++ loop1++;
++ }
++ /* clean resources */
++ free(resources);
++ free(ibs_request);
++ free(ib_info);
++ free(pm4);
++
++ /* end of test */
++ r = amdgpu_cs_ctx_free(device_handle, context_handle);
++ CU_ASSERT_EQUAL(r, 0);
++}
++
++static void amdgpu_command_submission_sdma(void)
++{
++ amdgpu_command_submission_sdma_write_linear();
++ amdgpu_command_submission_sdma_const_fill();
++ amdgpu_command_submission_sdma_copy_linear();
++}
++
++static void amdgpu_userptr_test(void)
++{
++ int i, r, j;
++ uint32_t *pm4 = NULL;
++ uint64_t bo_mc;
++ void *ptr = NULL;
++ int pm4_dw = 256;
++ int sdma_write_length = 4;
++ amdgpu_bo_handle handle;
++ amdgpu_context_handle context_handle;
++ struct amdgpu_cs_ib_info *ib_info;
++ struct amdgpu_cs_request *ibs_request;
++ struct amdgpu_bo_alloc_result res;
++
++ memset(&res, 0, sizeof(res));
++
++ pm4 = malloc(pm4_dw * sizeof(*pm4));
++ CU_ASSERT_NOT_EQUAL(pm4, NULL);
++
++ ib_info = malloc(sizeof(*ib_info));
++ CU_ASSERT_NOT_EQUAL(ib_info, NULL);
++
++ memset(ib_info, 0, sizeof(*ib_info));
++ ibs_request = malloc(sizeof(*ibs_request));
++ CU_ASSERT_NOT_EQUAL(ibs_request, NULL);
++
++ memset(ibs_request, 0, sizeof(*ibs_request));
++ r = amdgpu_cs_ctx_create(device_handle, &context_handle);
++ CU_ASSERT_EQUAL(r, 0);
++
++ ptr = memalign(BUFFER_SIZE);
++ CU_ASSERT_NOT_EQUAL(ptr, NULL);
++
++ r = amdgpu_create_bo_from_user_mem(device_handle,
++ ptr, BUFFER_SIZE, &res);
++ CU_ASSERT_EQUAL(r, 0);
++ bo_mc = res.virtual_mc_base_address;
++ handle = res.buf_handle;
++
++ j = i = 0;
++ pm4[i++] = SDMA_PACKET(SDMA_OPCODE_WRITE,
++ SDMA_WRITE_SUB_OPCODE_LINEAR, 0);
++ pm4[i++] = 0xffffffff & bo_mc;
++ pm4[i++] = (0xffffffff00000000 & bo_mc) >> 32;
++ pm4[i++] = sdma_write_length;
++
++ while (j++ < sdma_write_length)
++ pm4[i++] = 0xdeadbeaf;
++
++ amdgpu_sdma_test_exec_cs(context_handle, 0,
++ i, pm4,
++ 1, &handle,
++ ib_info, ibs_request);
++ i = 0;
++ while (i < sdma_write_length) {
++ CU_ASSERT_EQUAL(((int*)ptr)[i++], 0xdeadbeaf);
++ }
++ free(ibs_request);
++ free(ib_info);
++ free(pm4);
++ r = amdgpu_bo_free(res.buf_handle);
++ CU_ASSERT_EQUAL(r, 0);
++ free(ptr);
++
++ r = amdgpu_cs_ctx_free(device_handle, context_handle);
++ CU_ASSERT_EQUAL(r, 0);
++}
+diff --git a/tests/amdgpu/bo_tests.c b/tests/amdgpu/bo_tests.c
+new file mode 100644
+index 0000000..73701b7
+--- /dev/null
++++ b/tests/amdgpu/bo_tests.c
+@@ -0,0 +1,151 @@
++/*
++ * Copyright 2014 Advanced Micro Devices, Inc.
++ *
++ * Permission is hereby granted, free of charge, to any person obtaining a
++ * copy of this software and associated documentation files (the "Software"),
++ * to deal in the Software without restriction, including without limitation
++ * the rights to use, copy, modify, merge, publish, distribute, sublicense,
++ * and/or sell copies of the Software, and to permit persons to whom the
++ * Software is furnished to do so, subject to the following conditions:
++ *
++ * The above copyright notice and this permission notice shall be included in
++ * all copies or substantial portions of the Software.
++ *
++ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
++ * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
++ * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
++ * THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR
++ * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE,
++ * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
++ * OTHER DEALINGS IN THE SOFTWARE.
++ *
++*/
++#include <stdio.h>
++
++#include "CUnit/Basic.h"
++
++#include "amdgpu_test.h"
++#include "amdgpu_drm.h"
++
++#define BUFFER_SIZE (4*1024)
++#define BUFFER_ALIGN (4*1024)
++
++static amdgpu_device_handle device_handle;
++static uint32_t major_version;
++static uint32_t minor_version;
++
++static amdgpu_bo_handle buffer_handle;
++static uint64_t virtual_mc_base_address;
++
++static void amdgpu_bo_export_import(void);
++static void amdgpu_bo_metadata(void);
++static void amdgpu_bo_map_unmap(void);
++
++CU_TestInfo bo_tests[] = {
++ { "Export/Import", amdgpu_bo_export_import },
++#if 0
++ { "Metadata", amdgpu_bo_metadata },
++#endif
++ { "CPU map/unmap", amdgpu_bo_map_unmap },
++ CU_TEST_INFO_NULL,
++};
++
++int suite_bo_tests_init(void)
++{
++ struct amdgpu_bo_alloc_request req = {0};
++ struct amdgpu_bo_alloc_result res = {0};
++ int r;
++
++ r = amdgpu_device_initialize(drm_amdgpu[0], &major_version,
++ &minor_version, &device_handle);
++ if (r)
++ return CUE_SINIT_FAILED;
++
++ req.alloc_size = BUFFER_SIZE;
++ req.phys_alignment = BUFFER_ALIGN;
++ req.preferred_heap = AMDGPU_GEM_DOMAIN_GTT;
++
++ r = amdgpu_bo_alloc(device_handle, &req, &res);
++ if (r)
++ return CUE_SINIT_FAILED;
++
++ buffer_handle = res.buf_handle;
++ virtual_mc_base_address = res.virtual_mc_base_address;
++
++ return CUE_SUCCESS;
++}
++
++int suite_bo_tests_clean(void)
++{
++ int r;
++
++ r = amdgpu_bo_free(buffer_handle);
++ if (r)
++ return CUE_SCLEAN_FAILED;
++
++ r = amdgpu_device_deinitialize(device_handle);
++ if (r)
++ return CUE_SCLEAN_FAILED;
++
++ return CUE_SUCCESS;
++}
++
++static void amdgpu_bo_export_import_do_type(enum amdgpu_bo_handle_type type)
++{
++ struct amdgpu_bo_import_result res = {0};
++ uint32_t shared_handle;
++ int r;
++
++ r = amdgpu_bo_export(buffer_handle, type, &shared_handle);
++ CU_ASSERT_EQUAL(r, 0);
++
++ r = amdgpu_bo_import(device_handle, type, shared_handle, &res);
++ CU_ASSERT_EQUAL(r, 0);
++
++ CU_ASSERT_EQUAL(res.buf_handle, buffer_handle);
++ CU_ASSERT_EQUAL(res.alloc_size, BUFFER_SIZE);
++
++ r = amdgpu_bo_free(res.buf_handle);
++ CU_ASSERT_EQUAL(r, 0);
++}
++
++static void amdgpu_bo_export_import(void)
++{
++ amdgpu_bo_export_import_do_type(amdgpu_bo_handle_type_gem_flink_name);
++ amdgpu_bo_export_import_do_type(amdgpu_bo_handle_type_dma_buf_fd);
++}
++
++static void amdgpu_bo_metadata(void)
++{
++ struct amdgpu_bo_metadata meta = {0};
++ struct amdgpu_bo_info info = {0};
++ int r;
++
++ meta.size_metadata = 1;
++ meta.umd_metadata[0] = 0xdeadbeef;
++
++ r = amdgpu_bo_set_metadata(buffer_handle, &meta);
++ CU_ASSERT_EQUAL(r, 0);
++
++ r = amdgpu_bo_query_info(buffer_handle, &info);
++ CU_ASSERT_EQUAL(r, 0);
++
++ CU_ASSERT_EQUAL(info.metadata.size_metadata, 1);
++ CU_ASSERT_EQUAL(info.metadata.umd_metadata[0], 0xdeadbeef);
++}
++
++static void amdgpu_bo_map_unmap(void)
++{
++ uint32_t *ptr;
++ int i, r;
++
++ r = amdgpu_bo_cpu_map(buffer_handle, (void **)&ptr);
++ CU_ASSERT_EQUAL(r, 0);
++ CU_ASSERT_NOT_EQUAL(ptr, NULL);
++
++ for (i = 0; i < (BUFFER_SIZE / 4); ++i)
++ ptr[i] = 0xdeadbeef;
++
++ r = amdgpu_bo_cpu_unmap(buffer_handle);
++ CU_ASSERT_EQUAL(r, 0);
++}
+diff --git a/tests/amdgpu/cs_tests.c b/tests/amdgpu/cs_tests.c
+new file mode 100644
+index 0000000..f30d923
+--- /dev/null
++++ b/tests/amdgpu/cs_tests.c
+@@ -0,0 +1,319 @@
++/*
++ * Copyright 2014 Advanced Micro Devices, Inc.
++ *
++ * Permission is hereby granted, free of charge, to any person obtaining a
++ * copy of this software and associated documentation files (the "Software"),
++ * to deal in the Software without restriction, including without limitation
++ * the rights to use, copy, modify, merge, publish, distribute, sublicense,
++ * and/or sell copies of the Software, and to permit persons to whom the
++ * Software is furnished to do so, subject to the following conditions:
++ *
++ * The above copyright notice and this permission notice shall be included in
++ * all copies or substantial portions of the Software.
++ *
++ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
++ * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
++ * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
++ * THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR
++ * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE,
++ * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
++ * OTHER DEALINGS IN THE SOFTWARE.
++ *
++*/
++#include <stdio.h>
++
++#include "CUnit/Basic.h"
++
++#include "amdgpu/util_math.h"
++
++#include "amdgpu_test.h"
++#include "uvd_messages.h"
++#include "amdgpu_drm.h"
++#include "amdgpu_internal.h"
++
++#define IB_SIZE amdgpu_cs_ib_size_4K
++#define MAX_RESOURCES 16
++
++static amdgpu_device_handle device_handle;
++static uint32_t major_version;
++static uint32_t minor_version;
++static uint32_t family_id;
++
++static amdgpu_context_handle context_handle;
++static amdgpu_ib_handle ib_handle;
++uint32_t *ib_cpu;
++
++static amdgpu_bo_handle resources[MAX_RESOURCES];
++static unsigned num_resources;
++
++static void amdgpu_cs_uvd_create(void);
++static void amdgpu_cs_uvd_decode(void);
++static void amdgpu_cs_uvd_destroy(void);
++
++CU_TestInfo cs_tests[] = {
++ { "UVD create", amdgpu_cs_uvd_create },
++ { "UVD decode", amdgpu_cs_uvd_decode },
++ { "UVD destroy", amdgpu_cs_uvd_destroy },
++ CU_TEST_INFO_NULL,
++};
++
++int suite_cs_tests_init(void)
++{
++ struct amdgpu_cs_ib_alloc_result ib_result = {0};
++ int r;
++
++ r = amdgpu_device_initialize(drm_amdgpu[0], &major_version,
++ &minor_version, &device_handle);
++ if (r)
++ return CUE_SINIT_FAILED;
++
++ family_id = device_handle->info.family_id;
++
++ r = amdgpu_cs_ctx_create(device_handle, &context_handle);
++ if (r)
++ return CUE_SINIT_FAILED;
++
++ r = amdgpu_cs_alloc_ib(device_handle, context_handle,
++ IB_SIZE, &ib_result);
++ if (r)
++ return CUE_SINIT_FAILED;
++
++ ib_handle = ib_result.handle;
++ ib_cpu = ib_result.cpu;
++
++ return CUE_SUCCESS;
++}
++
++int suite_cs_tests_clean(void)
++{
++ int r;
++
++ r = amdgpu_cs_free_ib(device_handle, context_handle, ib_handle);
++ if (r)
++ return CUE_SCLEAN_FAILED;
++
++ r = amdgpu_cs_ctx_free(device_handle, context_handle);
++ if (r)
++ return CUE_SCLEAN_FAILED;
++
++ r = amdgpu_device_deinitialize(device_handle);
++ if (r)
++ return CUE_SCLEAN_FAILED;
++
++ return CUE_SUCCESS;
++}
++
++static int submit(unsigned ndw, unsigned ip)
++{
++ struct amdgpu_cs_ib_alloc_result ib_result = {0};
++ struct amdgpu_cs_request ibs_request = {0};
++ struct amdgpu_cs_ib_info ib_info = {0};
++ struct amdgpu_cs_query_fence fence_status = {0};
++ uint32_t expired;
++ int r;
++
++ ib_info.ib_handle = ib_handle;
++ ib_info.size = ndw;
++
++ ibs_request.ip_type = ip;
++ ibs_request.number_of_resources = num_resources;
++ ibs_request.resources = resources;
++ ibs_request.number_of_ibs = 1;
++ ibs_request.ibs = &ib_info;
++
++ r = amdgpu_cs_submit(device_handle, context_handle, 0,
++ &ibs_request, 1, &fence_status.fence);
++ if (r)
++ return r;
++
++ r = amdgpu_cs_alloc_ib(device_handle, context_handle,
++ IB_SIZE, &ib_result);
++ if (r)
++ return r;
++
++ ib_handle = ib_result.handle;
++ ib_cpu = ib_result.cpu;
++
++ fence_status.context = context_handle;
++ fence_status.timeout_ns = AMDGPU_TIMEOUT_INFINITE;
++ fence_status.ip_type = ip;
++
++ r = amdgpu_cs_query_fence_status(device_handle, &fence_status, &expired);
++ if (r)
++ return r;
++
++ return 0;
++}
++
++static void uvd_cmd(uint64_t addr, unsigned cmd, int *idx)
++{
++ ib_cpu[(*idx)++] = 0x3BC4;
++ ib_cpu[(*idx)++] = addr;
++ ib_cpu[(*idx)++] = 0x3BC5;
++ ib_cpu[(*idx)++] = addr >> 32;
++ ib_cpu[(*idx)++] = 0x3BC3;
++ ib_cpu[(*idx)++] = cmd << 1;
++}
++
++static void amdgpu_cs_uvd_create(void)
++{
++ struct amdgpu_bo_alloc_request req = {0};
++ struct amdgpu_bo_alloc_result res = {0};
++ void *msg;
++ int i, r;
++
++ req.alloc_size = 4*1024;
++ req.preferred_heap = AMDGPU_GEM_DOMAIN_GTT;
++
++ r = amdgpu_bo_alloc(device_handle, &req, &res);
++ CU_ASSERT_EQUAL(r, 0);
++
++ r = amdgpu_bo_cpu_map(res.buf_handle, &msg);
++ CU_ASSERT_EQUAL(r, 0);
++
++ memcpy(msg, uvd_create_msg, sizeof(uvd_create_msg));
++ if (family_id >= AMDGPU_FAMILY_VI)
++ ((uint8_t*)msg)[0x10] = 7;
++
++ r = amdgpu_bo_cpu_unmap(res.buf_handle);
++ CU_ASSERT_EQUAL(r, 0);
++
++ num_resources = 0;
++ resources[num_resources++] = res.buf_handle;
++
++ i = 0;
++ uvd_cmd(res.virtual_mc_base_address, 0x0, &i);
++ for (; i % 16; ++i)
++ ib_cpu[i] = 0x80000000;
++
++ r = submit(i, AMDGPU_HW_IP_UVD);
++ CU_ASSERT_EQUAL(r, 0);
++
++ r = amdgpu_bo_free(resources[0]);
++ CU_ASSERT_EQUAL(r, 0);
++}
++
++static void amdgpu_cs_uvd_decode(void)
++{
++ const unsigned dpb_size = 15923584, dt_size = 737280;
++ uint64_t msg_addr, fb_addr, bs_addr, dpb_addr, dt_addr, it_addr;
++ struct amdgpu_bo_alloc_request req = {0};
++ struct amdgpu_bo_alloc_result res = {0};
++ uint64_t sum;
++ uint8_t *ptr;
++ int i, r;
++
++ req.alloc_size = 4*1024; /* msg */
++ req.alloc_size += 4*1024; /* fb */
++ if (family_id >= AMDGPU_FAMILY_VI)
++ req.alloc_size += 4096; /*it_scaling_table*/
++ req.alloc_size += ALIGN(sizeof(uvd_bitstream), 4*1024);
++ req.alloc_size += ALIGN(dpb_size, 4*1024);
++ req.alloc_size += ALIGN(dt_size, 4*1024);
++
++ req.preferred_heap = AMDGPU_GEM_DOMAIN_GTT;
++
++ r = amdgpu_bo_alloc(device_handle, &req, &res);
++ CU_ASSERT_EQUAL(r, 0);
++
++ r = amdgpu_bo_cpu_map(res.buf_handle, (void **)&ptr);
++ CU_ASSERT_EQUAL(r, 0);
++
++ memcpy(ptr, uvd_decode_msg, sizeof(uvd_create_msg));
++ if (family_id >= AMDGPU_FAMILY_VI)
++ ptr[0x10] = 7;
++
++ ptr += 4*1024;
++ memset(ptr, 0, 4*1024);
++ if (family_id >= AMDGPU_FAMILY_VI) {
++ ptr += 4*1024;
++ memcpy(ptr, uvd_it_scaling_table, sizeof(uvd_it_scaling_table));
++ }
++
++ ptr += 4*1024;
++ memcpy(ptr, uvd_bitstream, sizeof(uvd_bitstream));
++
++ ptr += ALIGN(sizeof(uvd_bitstream), 4*1024);
++ memset(ptr, 0, dpb_size);
++
++ ptr += ALIGN(dpb_size, 4*1024);
++ memset(ptr, 0, dt_size);
++
++ num_resources = 0;
++ resources[num_resources++] = res.buf_handle;
++
++ msg_addr = res.virtual_mc_base_address;
++ fb_addr = msg_addr + 4*1024;
++ if (family_id >= AMDGPU_FAMILY_VI) {
++ it_addr = fb_addr + 4*1024;
++ bs_addr = it_addr + 4*1024;
++ } else
++ bs_addr = fb_addr + 4*1024;
++ dpb_addr = ALIGN(bs_addr + sizeof(uvd_bitstream), 4*1024);
++ dt_addr = ALIGN(dpb_addr + dpb_size, 4*1024);
++
++ i = 0;
++ uvd_cmd(msg_addr, 0x0, &i);
++ uvd_cmd(dpb_addr, 0x1, &i);
++ uvd_cmd(dt_addr, 0x2, &i);
++ uvd_cmd(fb_addr, 0x3, &i);
++ uvd_cmd(bs_addr, 0x100, &i);
++ if (family_id >= AMDGPU_FAMILY_VI)
++ uvd_cmd(it_addr, 0x204, &i);
++ ib_cpu[i++] = 0x3BC6;
++ ib_cpu[i++] = 0x1;
++ for (; i % 16; ++i)
++ ib_cpu[i] = 0x80000000;
++
++ r = submit(i, AMDGPU_HW_IP_UVD);
++ CU_ASSERT_EQUAL(r, 0);
++
++ /* TODO: use a real CRC32 */
++ for (i = 0, sum = 0; i < dt_size; ++i)
++ sum += ptr[i];
++ CU_ASSERT_EQUAL(sum, 0x20345d8);
++
++ r = amdgpu_bo_cpu_unmap(res.buf_handle);
++ CU_ASSERT_EQUAL(r, 0);
++
++ r = amdgpu_bo_free(resources[0]);
++ CU_ASSERT_EQUAL(r, 0);
++}
++
++static void amdgpu_cs_uvd_destroy(void)
++{
++ struct amdgpu_bo_alloc_request req = {0};
++ struct amdgpu_bo_alloc_result res = {0};
++ void *msg;
++ int i, r;
++
++ req.alloc_size = 4*1024;
++ req.preferred_heap = AMDGPU_GEM_DOMAIN_GTT;
++
++ r = amdgpu_bo_alloc(device_handle, &req, &res);
++ CU_ASSERT_EQUAL(r, 0);
++
++ r = amdgpu_bo_cpu_map(res.buf_handle, &msg);
++ CU_ASSERT_EQUAL(r, 0);
++
++ memcpy(msg, uvd_destroy_msg, sizeof(uvd_create_msg));
++ if (family_id >= AMDGPU_FAMILY_VI)
++ ((uint8_t*)msg)[0x10] = 7;
++
++ r = amdgpu_bo_cpu_unmap(res.buf_handle);
++ CU_ASSERT_EQUAL(r, 0);
++
++ num_resources = 0;
++ resources[num_resources++] = res.buf_handle;
++
++ i = 0;
++ uvd_cmd(res.virtual_mc_base_address, 0x0, &i);
++ for (; i % 16; ++i)
++ ib_cpu[i] = 0x80000000;
++
++ r = submit(i, AMDGPU_HW_IP_UVD);
++ CU_ASSERT_EQUAL(r, 0);
++
++ r = amdgpu_bo_free(resources[0]);
++ CU_ASSERT_EQUAL(r, 0);
++}
+diff --git a/tests/amdgpu/uvd_messages.h b/tests/amdgpu/uvd_messages.h
+new file mode 100644
+index 0000000..d4069c9
+--- /dev/null
++++ b/tests/amdgpu/uvd_messages.h
+@@ -0,0 +1,813 @@
++/*
++ * Copyright 2014 Advanced Micro Devices, Inc.
++ *
++ * Permission is hereby granted, free of charge, to any person obtaining a
++ * copy of this software and associated documentation files (the "Software"),
++ * to deal in the Software without restriction, including without limitation
++ * the rights to use, copy, modify, merge, publish, distribute, sublicense,
++ * and/or sell copies of the Software, and to permit persons to whom the
++ * Software is furnished to do so, subject to the following conditions:
++ *
++ * The above copyright notice and this permission notice shall be included in
++ * all copies or substantial portions of the Software.
++ *
++ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
++ * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
++ * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
++ * THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR
++ * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE,
++ * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
++ * OTHER DEALINGS IN THE SOFTWARE.
++ *
++ */
++
++#ifndef _uvd_messages_h_
++#define _uvd_messages_h_
++
++static uint8_t uvd_create_msg[] = {
++ 0xe4,0x0d,0x00,0x00,0x00,0x00,0x00,0x00,0x03,0x00,0x44,0x40,0x00,0x00,0x00,0x00,
++ 0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x60,0x03,0x00,0x00,
++ 0xe0,0x01,0x00,0x00,0x00,0x00,0x00,0x00,0x80,0xf9,0xf2,0x00,0x00,0x00,0x00,0x00,
++ 0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,
++ 0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,
++ 0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,
++ 0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,
++ 0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,
++ 0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,
++ 0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,
++ 0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,
++ 0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,
++ 0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,
++ 0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,
++ 0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,
++ 0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,
++ 0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,
++ 0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,
++ 0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,
++ 0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,
++ 0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,
++ 0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,
++ 0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,
++ 0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,
++ 0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,
++ 0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,
++ 0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,
++ 0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,
++ 0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,
++ 0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,
++ 0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,
++ 0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,
++ 0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,
++ 0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,
++ 0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,
++ 0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,
++ 0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,
++ 0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,
++ 0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,
++ 0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,
++ 0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,
++ 0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,
++ 0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,
++ 0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,
++ 0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,
++ 0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,
++ 0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,
++ 0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,
++ 0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,
++ 0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,
++ 0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,
++ 0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,
++ 0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,
++ 0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,
++ 0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,
++ 0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,
++ 0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,
++ 0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,
++ 0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,
++ 0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,
++ 0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,
++ 0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,
++ 0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,
++ 0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,
++ 0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,
++ 0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,
++ 0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,
++ 0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,
++ 0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,
++ 0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,
++ 0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,
++ 0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,
++ 0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,
++ 0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,
++ 0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,
++ 0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,
++ 0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,
++ 0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,
++ 0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,
++ 0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,
++ 0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,
++ 0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,
++ 0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,
++ 0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,
++ 0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,
++ 0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,
++ 0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,
++ 0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,
++ 0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,
++ 0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,
++ 0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,
++ 0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,
++ 0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,
++ 0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,
++ 0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,
++ 0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,
++ 0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,
++ 0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,
++ 0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,
++ 0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,
++ 0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,
++ 0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,
++ 0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,
++ 0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,
++ 0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,
++ 0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,
++ 0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,
++ 0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,
++ 0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,
++ 0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,
++ 0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,
++ 0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,
++ 0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,
++ 0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,
++ 0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,
++ 0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,
++ 0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,
++ 0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,
++ 0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,
++ 0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,
++ 0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,
++ 0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,
++ 0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,
++ 0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,
++ 0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,
++ 0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,
++ 0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,
++ 0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,
++ 0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,
++ 0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,
++ 0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,
++ 0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,
++ 0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,
++ 0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,
++ 0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,
++ 0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,
++ 0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,
++ 0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,
++ 0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,
++ 0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,
++ 0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,
++ 0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,
++ 0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,
++ 0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,
++ 0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,
++ 0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,
++ 0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,
++ 0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,
++ 0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,
++ 0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,
++ 0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,
++ 0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,
++ 0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,
++ 0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,
++ 0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,
++ 0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,
++ 0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,
++ 0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,
++ 0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,
++ 0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,
++ 0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,
++ 0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,
++ 0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,
++ 0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,
++ 0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,
++ 0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,
++ 0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,
++ 0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,
++ 0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,
++ 0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,
++ 0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,
++ 0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,
++ 0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,
++ 0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,
++ 0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,
++ 0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,
++ 0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,
++ 0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,
++ 0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,
++ 0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,
++ 0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,
++ 0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,
++ 0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,
++ 0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,
++ 0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,
++ 0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,
++ 0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,
++ 0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,
++ 0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,
++ 0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,
++ 0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,
++ 0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,
++ 0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,
++ 0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,
++ 0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,
++ 0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,
++ 0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,
++ 0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,
++ 0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,
++ 0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,
++ 0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,
++ 0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,
++ 0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,
++ 0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,
++ 0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,
++ 0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,
++ 0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,
++ 0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,
++ 0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,
++ 0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,
++ 0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,
++ 0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,
++ 0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,
++ 0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,
++ 0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,
++ 0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,
++ 0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,
++ 0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,
++ 0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,
++ 0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,
++ 0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,
++ 0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,
++ 0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,
++};
++
++static uint8_t uvd_bitstream[] ={
++ 0x00,0x00,0x01,0x25,0xb8,0x20,0x20,0x21,0x44,0xc5,0x00,0x01,0x57,0x9b,0xef,0xbe,
++ 0xfb,0xef,0xbe,0xfb,0xef,0xbe,0xfb,0xef,0xbe,0xfb,0xef,0xbe,0xfb,0xef,0xbe,0xfb,
++ 0xef,0xbe,0xfb,0xef,0xbe,0xfb,0xef,0xbe,0xfb,0xef,0xbe,0xfb,0xef,0xbe,0xfb,0xef,
++ 0xbe,0xfb,0xef,0xbe,0xff,0x87,0xff,0xc2,0x58,0x0e,0x00,0x02,0x02,0xa0,0x00,0x20,
++ 0x3a,0x00,0x0d,0x00,0x01,0x01,0xa4,0xcb,0x94,0x73,0xeb,0xae,0xba,0xeb,0xae,0xba,
++ 0xeb,0xae,0xba,0xeb,0xae,0xba,0xeb,0xae,0xba,0xeb,0xae,0xba,0xeb,0xae,0xba,0xeb,
++ 0xae,0xba,0xeb,0xae,0xba,0xeb,0xae,0xba,0xeb,0xae,0xba,0xeb,0xae,0xba,0xeb,0xae,
++ 0xba,0xeb,0xae,0xba,0xeb,0xae,0xba,0xeb,0xae,0xba,0xeb,0xae,0xba,0xeb,0xae,0xba,
++ 0xeb,0xae,0xba,0xeb,0xae,0xba,0xeb,0xae,0xba,0xeb,0xae,0xba,0xeb,0xae,0xba,0xeb,
++ 0xae,0xba,0xeb,0xae,0xba,0xeb,0xae,0xba,0xeb,0xae,0xba,0xeb,0xae,0xba,0xeb,0xae,
++ 0xba,0xeb,0xae,0xba,0xeb,0xae,0xba,0xeb,0xae,0xba,0xeb,0xae,0xba,0xeb,0xae,0xba,
++ 0xeb,0xae,0xba,0xeb,0xae,0xba,0xeb,0xae,0xba,0xeb,0xae,0xba,0xeb,0xae,0xba,0xeb,
++ 0xae,0xba,0xeb,0xae,0xba,0xeb,0xae,0xba,0xeb,0xae,0xba,0xeb,0xae,0xba,0xeb,0xae,
++ 0xba,0xeb,0xae,0xba,0xeb,0xae,0xba,0xeb,0xae,0xba,0xeb,0xae,0xba,0xeb,0xae,0xba,
++ 0xeb,0xae,0xba,0xeb,0xae,0xba,0xeb,0xae,0xba,0xeb,0xae,0xba,0xeb,0xae,0xba,0xeb,
++ 0xae,0xba,0xeb,0xae,0xba,0xeb,0xae,0xba,0xeb,0xae,0xba,0xeb,0xae,0xba,0xeb,0xae,
++ 0xba,0xeb,0xae,0xba,0xeb,0xae,0xba,0xeb,0xae,0xba,0xeb,0xae,0xba,0xeb,0xae,0xba,
++ 0xeb,0xae,0xba,0xeb,0xaf,0x00,0x00,0x01,0x25,0x00,0xa2,0xb8,0x20,0x20,0x21,0x44,
++ 0xc5,0x00,0x01,0x57,0x9b,0xef,0xbe,0xfb,0xef,0xbe,0xfb,0xef,0xbe,0xfb,0xef,0xbe,
++ 0xfb,0xef,0xbe,0xfb,0xef,0xbe,0xfb,0xef,0xbe,0xfb,0xef,0xbe,0xfb,0xef,0xbe,0xfb,
++ 0xef,0xbe,0xfb,0xef,0xbe,0xfb,0xef,0xbe,0xfb,0xef,0xbe,0xff,0x87,0xff,0xc2,0x58,
++ 0x0e,0x00,0x02,0x02,0xa0,0x00,0x20,0x3a,0x00,0x0d,0x00,0x01,0x01,0xa4,0xcb,0x94,
++ 0x73,0xeb,0xae,0xba,0xeb,0xae,0xba,0xeb,0xae,0xba,0xeb,0xae,0xba,0xeb,0xae,0xba,
++ 0xeb,0xae,0xba,0xeb,0xae,0xba,0xeb,0xae,0xba,0xeb,0xae,0xba,0xeb,0xae,0xba,0xeb,
++ 0xae,0xba,0xeb,0xae,0xba,0xeb,0xae,0xba,0xeb,0xae,0xba,0xeb,0xae,0xba,0xeb,0xae,
++ 0xba,0xeb,0xae,0xba,0xeb,0xae,0xba,0xeb,0xae,0xba,0xeb,0xae,0xba,0xeb,0xae,0xba,
++ 0xeb,0xae,0xba,0xeb,0xae,0xba,0xeb,0xae,0xba,0xeb,0xae,0xba,0xeb,0xae,0xba,0xeb,
++ 0xae,0xba,0xeb,0xae,0xba,0xeb,0xae,0xba,0xeb,0xae,0xba,0xeb,0xae,0xba,0xeb,0xae,
++ 0xba,0xeb,0xae,0xba,0xeb,0xae,0xba,0xeb,0xae,0xba,0xeb,0xae,0xba,0xeb,0xae,0xba,
++ 0xeb,0xae,0xba,0xeb,0xae,0xba,0xeb,0xae,0xba,0xeb,0xae,0xba,0xeb,0xae,0xba,0xeb,
++ 0xae,0xba,0xeb,0xae,0xba,0xeb,0xae,0xba,0xeb,0xae,0xba,0xeb,0xae,0xba,0xeb,0xae,
++ 0xba,0xeb,0xae,0xba,0xeb,0xae,0xba,0xeb,0xae,0xba,0xeb,0xae,0xba,0xeb,0xae,0xba,
++ 0xeb,0xae,0xba,0xeb,0xae,0xba,0xeb,0xae,0xba,0xeb,0xae,0xba,0xeb,0xae,0xba,0xeb,
++ 0xae,0xba,0xeb,0xae,0xba,0xeb,0xae,0xba,0xeb,0xae,0xba,0xeb,0xae,0xba,0xeb,0xae,
++ 0xba,0xeb,0xae,0xba,0xeb,0xae,0xba,0xeb,0xae,0xba,0xeb,0xaf,0x00,0x00,0x01,0x25,
++ 0x00,0x51,0x2e,0x08,0x08,0x08,0x51,0x31,0x40,0x00,0x55,0xe6,0xfb,0xef,0xbe,0xfb,
++ 0xef,0xbe,0xfb,0xef,0xbe,0xfb,0xef,0xbe,0xfb,0xef,0xbe,0xfb,0xef,0xbe,0xfb,0xef,
++ 0xbe,0xfb,0xef,0xbe,0xfb,0xef,0xbe,0xfb,0xef,0xbe,0xfb,0xef,0xbe,0xfb,0xef,0xbe,
++ 0xfb,0xef,0xbf,0xe1,0xff,0xf0,0x96,0x03,0x80,0x00,0x80,0xa8,0x00,0x08,0x0e,0x80,
++ 0x03,0x40,0x00,0x40,0x69,0x32,0xe5,0x1c,0xfa,0xeb,0xae,0xba,0xeb,0xae,0xba,0xeb,
++ 0xae,0xba,0xeb,0xae,0xba,0xeb,0xae,0xba,0xeb,0xae,0xba,0xeb,0xae,0xba,0xeb,0xae,
++ 0xba,0xeb,0xae,0xba,0xeb,0xae,0xba,0xeb,0xae,0xba,0xeb,0xae,0xba,0xeb,0xae,0xba,
++ 0xeb,0xae,0xba,0xeb,0xae,0xba,0xeb,0xae,0xba,0xeb,0xae,0xba,0xeb,0xae,0xba,0xeb,
++ 0xae,0xba,0xeb,0xae,0xba,0xeb,0xae,0xba,0xeb,0xae,0xba,0xeb,0xae,0xba,0xeb,0xae,
++ 0xba,0xeb,0xae,0xba,0xeb,0xae,0xba,0xeb,0xae,0xba,0xeb,0xae,0xba,0xeb,0xae,0xba,
++ 0xeb,0xae,0xba,0xeb,0xae,0xba,0xeb,0xae,0xba,0xeb,0xae,0xba,0xeb,0xae,0xba,0xeb,
++ 0xae,0xba,0xeb,0xae,0xba,0xeb,0xae,0xba,0xeb,0xae,0xba,0xeb,0xae,0xba,0xeb,0xae,
++ 0xba,0xeb,0xae,0xba,0xeb,0xae,0xba,0xeb,0xae,0xba,0xeb,0xae,0xba,0xeb,0xae,0xba,
++ 0xeb,0xae,0xba,0xeb,0xae,0xba,0xeb,0xae,0xba,0xeb,0xae,0xba,0xeb,0xae,0xba,0xeb,
++ 0xae,0xba,0xeb,0xae,0xba,0xeb,0xae,0xba,0xeb,0xae,0xba,0xeb,0xae,0xba,0xeb,0xae,
++ 0xba,0xeb,0xae,0xba,0xeb,0xae,0xba,0xeb,0xae,0xba,0xeb,0xae,0xba,0xeb,0xae,0xba,
++ 0xeb,0xae,0xba,0xeb,0xae,0xba,0xeb,0xae,0xba,0xeb,0xae,0xba,0xeb,0xae,0xba,0xeb,
++ 0xae,0xba,0xeb,0xc0,0x00,0x00,0x01,0x25,0x00,0x79,0xae,0x08,0x08,0x08,0x51,0x31,
++ 0x40,0x00,0x55,0xe6,0xfb,0xef,0xbe,0xfb,0xef,0xbe,0xfb,0xef,0xbe,0xfb,0xef,0xbe,
++ 0xfb,0xef,0xbe,0xfb,0xef,0xbe,0xfb,0xef,0xbe,0xfb,0xef,0xbe,0xfb,0xef,0xbe,0xfb,
++ 0xef,0xbe,0xfb,0xef,0xbe,0xfb,0xef,0xbe,0xfb,0xef,0xbf,0xe1,0xff,0xf0,0x96,0x03,
++ 0x80,0x00,0x80,0xa8,0x00,0x08,0x0e,0x80,0x03,0x40,0x00,0x40,0x69,0x32,0xe5,0x1c,
++ 0xfa,0xeb,0xae,0xba,0xeb,0xae,0xba,0xeb,0xae,0xba,0xeb,0xae,0xba,0xeb,0xae,0xba,
++ 0xeb,0xae,0xba,0xeb,0xae,0xba,0xeb,0xae,0xba,0xeb,0xae,0xba,0xeb,0xae,0xba,0xeb,
++ 0xae,0xba,0xeb,0xae,0xba,0xeb,0xae,0xba,0xeb,0xae,0xba,0xeb,0xae,0xba,0xeb,0xae,
++ 0xba,0xeb,0xae,0xba,0xeb,0xae,0xba,0xeb,0xae,0xba,0xeb,0xae,0xba,0xeb,0xae,0xba,
++ 0xeb,0xae,0xba,0xeb,0xae,0xba,0xeb,0xae,0xba,0xeb,0xae,0xba,0xeb,0xae,0xba,0xeb,
++ 0xae,0xba,0xeb,0xae,0xba,0xeb,0xae,0xba,0xeb,0xae,0xba,0xeb,0xae,0xba,0xeb,0xae,
++ 0xba,0xeb,0xae,0xba,0xeb,0xae,0xba,0xeb,0xae,0xba,0xeb,0xae,0xba,0xeb,0xae,0xba,
++ 0xeb,0xae,0xba,0xeb,0xae,0xba,0xeb,0xae,0xba,0xeb,0xae,0xba,0xeb,0xae,0xba,0xeb,
++ 0xae,0xba,0xeb,0xae,0xba,0xeb,0xae,0xba,0xeb,0xae,0xba,0xeb,0xae,0xba,0xeb,0xae,
++ 0xba,0xeb,0xae,0xba,0xeb,0xae,0xba,0xeb,0xae,0xba,0xeb,0xae,0xba,0xeb,0xae,0xba,
++ 0xeb,0xae,0xba,0xeb,0xae,0xba,0xeb,0xae,0xba,0xeb,0xae,0xba,0xeb,0xae,0xba,0xeb,
++ 0xae,0xba,0xeb,0xae,0xba,0xeb,0xae,0xba,0xeb,0xae,0xba,0xeb,0xae,0xba,0xeb,0xae,
++ 0xba,0xeb,0xae,0xba,0xeb,0xae,0xba,0xeb,0xae,0xba,0xeb,0xc0,0x00,0x00,0x01,0x25,
++ 0x00,0x28,0x8b,0x82,0x02,0x02,0x14,0x4c,0x50,0x00,0x15,0x79,0xbe,0xfb,0xef,0xbe,
++ 0xfb,0xef,0xbe,0xfb,0xef,0xbe,0xfb,0xef,0xbe,0xfb,0xef,0xbe,0xfb,0xef,0xbe,0xfb,
++ 0xef,0xbe,0xfb,0xef,0xbe,0xfb,0xef,0xbe,0xfb,0xef,0xbe,0xfb,0xef,0xbe,0xfb,0xef,
++ 0xbe,0xfb,0xef,0xf8,0x7f,0xfc,0x25,0x80,0xe0,0x00,0x20,0x2a,0x00,0x02,0x03,0xa0,
++ 0x00,0xd0,0x00,0x10,0x1a,0x4c,0xb9,0x47,0x3e,0xba,0xeb,0xae,0xba,0xeb,0xae,0xba,
++ 0xeb,0xae,0xba,0xeb,0xae,0xba,0xeb,0xae,0xba,0xeb,0xae,0xba,0xeb,0xae,0xba,0xeb,
++ 0xae,0xba,0xeb,0xae,0xba,0xeb,0xae,0xba,0xeb,0xae,0xba,0xeb,0xae,0xba,0xeb,0xae,
++ 0xba,0xeb,0xae,0xba,0xeb,0xae,0xba,0xeb,0xae,0xba,0xeb,0xae,0xba,0xeb,0xae,0xba,
++ 0xeb,0xae,0xba,0xeb,0xae,0xba,0xeb,0xae,0xba,0xeb,0xae,0xba,0xeb,0xae,0xba,0xeb,
++ 0xae,0xba,0xeb,0xae,0xba,0xeb,0xae,0xba,0xeb,0xae,0xba,0xeb,0xae,0xba,0xeb,0xae,
++ 0xba,0xeb,0xae,0xba,0xeb,0xae,0xba,0xeb,0xae,0xba,0xeb,0xae,0xba,0xeb,0xae,0xba,
++ 0xeb,0xae,0xba,0xeb,0xae,0xba,0xeb,0xae,0xba,0xeb,0xae,0xba,0xeb,0xae,0xba,0xeb,
++ 0xae,0xba,0xeb,0xae,0xba,0xeb,0xae,0xba,0xeb,0xae,0xba,0xeb,0xae,0xba,0xeb,0xae,
++ 0xba,0xeb,0xae,0xba,0xeb,0xae,0xba,0xeb,0xae,0xba,0xeb,0xae,0xba,0xeb,0xae,0xba,
++ 0xeb,0xae,0xba,0xeb,0xae,0xba,0xeb,0xae,0xba,0xeb,0xae,0xba,0xeb,0xae,0xba,0xeb,
++ 0xae,0xba,0xeb,0xae,0xba,0xeb,0xae,0xba,0xeb,0xae,0xba,0xeb,0xae,0xba,0xeb,0xae,
++ 0xba,0xeb,0xae,0xba,0xeb,0xae,0xba,0xeb,0xae,0xba,0xeb,0xae,0xba,0xeb,0xae,0xba,
++ 0xeb,0xae,0xba,0xf0,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,
++};
++
++static uint8_t uvd_decode_msg[] = {
++ 0xe4,0x0d,0x00,0x00,0x01,0x00,0x00,0x00,0x03,0x00,0x44,0x40,0x01,0x00,0x00,0x00,
++ 0x00,0x00,0x00,0x00,0x01,0x00,0x00,0x00,0x60,0x03,0x00,0x00,0xe0,0x01,0x00,0x00,
++ 0x00,0x00,0x00,0x00,0x80,0xf9,0xf2,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,
++ 0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,
++ 0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,
++ 0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x80,0x05,0x00,0x00,0x00,0x00,0x00,0x00,
++ 0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,
++ 0x00,0x04,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x01,0x00,0x00,0x00,
++ 0x00,0x00,0x00,0x00,0x00,0xc0,0x03,0x00,0x00,0x80,0x07,0x00,0x00,0x60,0x09,0x00,
++ 0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,
++ 0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,
++ 0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,
++ 0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,
++ 0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,
++ 0x02,0x00,0x00,0x00,0x1e,0x00,0x00,0x00,0x05,0x00,0x00,0x00,0x88,0x00,0x00,0x00,
++ 0x01,0x00,0x00,0x01,0x00,0x03,0x02,0x00,0x02,0x00,0x00,0x00,0x00,0x00,0x00,0x00,
++ 0x00,0x00,0x00,0x00,0x10,0x10,0x10,0x10,0x10,0x10,0x10,0x10,0x10,0x10,0x10,0x10,
++ 0x10,0x10,0x10,0x10,0x10,0x10,0x10,0x10,0x10,0x10,0x10,0x10,0x10,0x10,0x10,0x10,
++ 0x10,0x10,0x10,0x10,0x10,0x10,0x10,0x10,0x10,0x10,0x10,0x10,0x10,0x10,0x10,0x10,
++ 0x10,0x10,0x10,0x10,0x10,0x10,0x10,0x10,0x10,0x10,0x10,0x10,0x10,0x10,0x10,0x10,
++ 0x10,0x10,0x10,0x10,0x10,0x10,0x10,0x10,0x10,0x10,0x10,0x10,0x10,0x10,0x10,0x10,
++ 0x10,0x10,0x10,0x10,0x10,0x10,0x10,0x10,0x10,0x10,0x10,0x10,0x10,0x10,0x10,0x10,
++ 0x10,0x10,0x10,0x10,0x10,0x10,0x10,0x10,0x10,0x10,0x10,0x10,0x10,0x10,0x10,0x10,
++ 0x10,0x10,0x10,0x10,0x10,0x10,0x10,0x10,0x10,0x10,0x10,0x10,0x10,0x10,0x10,0x10,
++ 0x10,0x10,0x10,0x10,0x10,0x10,0x10,0x10,0x10,0x10,0x10,0x10,0x10,0x10,0x10,0x10,
++ 0x10,0x10,0x10,0x10,0x10,0x10,0x10,0x10,0x10,0x10,0x10,0x10,0x10,0x10,0x10,0x10,
++ 0x10,0x10,0x10,0x10,0x10,0x10,0x10,0x10,0x10,0x10,0x10,0x10,0x10,0x10,0x10,0x10,
++ 0x10,0x10,0x10,0x10,0x10,0x10,0x10,0x10,0x10,0x10,0x10,0x10,0x10,0x10,0x10,0x10,
++ 0x10,0x10,0x10,0x10,0x10,0x10,0x10,0x10,0x10,0x10,0x10,0x10,0x10,0x10,0x10,0x10,
++ 0x10,0x10,0x10,0x10,0x10,0x10,0x10,0x10,0x10,0x10,0x10,0x10,0x10,0x10,0x10,0x10,
++ 0x10,0x10,0x10,0x10,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,
++ 0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,
++ 0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,
++ 0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,
++ 0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,
++ 0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,
++ 0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,
++ 0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,
++ 0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,
++ 0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,
++ 0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,
++ 0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,
++ 0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,
++ 0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,
++ 0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,
++ 0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,
++ 0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,
++ 0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,
++ 0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,
++ 0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,
++ 0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,
++ 0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,
++ 0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,
++ 0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,
++ 0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,
++ 0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,
++ 0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,
++ 0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,
++ 0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,
++ 0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,
++ 0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,
++ 0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,
++ 0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,
++ 0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,
++ 0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,
++ 0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,
++ 0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,
++ 0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,
++ 0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,
++ 0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,
++ 0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,
++ 0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,
++ 0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,
++ 0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,
++ 0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,
++ 0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,
++ 0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,
++ 0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,
++ 0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,
++ 0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,
++ 0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,
++ 0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,
++ 0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,
++ 0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,
++ 0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,
++ 0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,
++ 0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,
++ 0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,
++ 0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,
++ 0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,
++ 0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,
++ 0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,
++ 0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,
++ 0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,
++ 0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,
++ 0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,
++ 0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,
++ 0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,
++ 0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,
++ 0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,
++ 0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,
++ 0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,
++ 0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,
++ 0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,
++ 0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,
++ 0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,
++ 0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,
++ 0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,
++ 0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,
++ 0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,
++ 0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,
++ 0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,
++ 0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,
++ 0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,
++ 0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,
++ 0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,
++ 0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,
++ 0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,
++ 0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,
++ 0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,
++ 0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,
++ 0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,
++ 0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,
++ 0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,
++ 0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,
++ 0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,
++ 0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,
++ 0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,
++ 0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,
++ 0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,
++ 0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,
++ 0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,
++ 0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,
++ 0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,
++ 0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,
++ 0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,
++ 0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,
++ 0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,
++ 0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,
++ 0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,
++ 0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,
++ 0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,
++ 0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,
++ 0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,
++ 0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,
++ 0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,
++ 0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,
++ 0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,
++ 0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,
++ 0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,
++ 0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,
++ 0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,
++ 0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,
++ 0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,
++ 0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,
++ 0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,
++ 0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,
++ 0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,
++ 0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,
++ 0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,
++ 0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,
++ 0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,
++ 0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,
++ 0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,
++ 0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,
++ 0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,
++ 0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,
++ 0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,
++ 0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,
++ 0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,
++ 0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,
++ 0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,
++ 0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,
++ 0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,
++ 0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,
++ 0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,
++ 0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,
++ 0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,
++ 0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,
++ 0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,
++ 0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,
++ 0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,
++ 0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,
++ 0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,
++ 0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,
++ 0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,
++ 0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,
++ 0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,
++ 0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,
++ 0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,
++ 0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,
++ 0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,
++ 0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,
++ 0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,
++ 0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,
++ 0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,
++ 0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,
++ 0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,
++ 0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,
++ 0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,
++ 0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,
++ 0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,
++ 0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,
++ 0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,
++ 0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,
++ 0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,
++ 0x01,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,
++ 0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,
++ 0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,
++ 0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,
++ 0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,
++ 0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,
++ 0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,
++ 0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,
++ 0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,
++ 0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,
++ 0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,
++ 0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,
++ 0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,
++ 0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,
++ 0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,
++ 0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,
++ 0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,
++};
++
++static uint8_t uvd_destroy_msg[] = {
++ 0xe4,0x0d,0x00,0x00,0x02,0x00,0x00,0x00,0x03,0x00,0x44,0x40,0x00,0x00,0x00,0x00,
++ 0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,
++ 0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,
++ 0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,
++ 0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,
++ 0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,
++ 0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,
++ 0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,
++ 0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,
++ 0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,
++ 0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,
++ 0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,
++ 0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,
++ 0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,
++ 0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,
++ 0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,
++ 0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,
++ 0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,
++ 0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,
++ 0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,
++ 0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,
++ 0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,
++ 0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,
++ 0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,
++ 0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,
++ 0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,
++ 0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,
++ 0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,
++ 0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,
++ 0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,
++ 0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,
++ 0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,
++ 0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,
++ 0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,
++ 0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,
++ 0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,
++ 0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,
++ 0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,
++ 0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,
++ 0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,
++ 0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,
++ 0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,
++ 0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,
++ 0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,
++ 0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,
++ 0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,
++ 0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,
++ 0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,
++ 0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,
++ 0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,
++ 0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,
++ 0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,
++ 0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,
++ 0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,
++ 0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,
++ 0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,
++ 0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,
++ 0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,
++ 0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,
++ 0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,
++ 0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,
++ 0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,
++ 0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,
++ 0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,
++ 0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,
++ 0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,
++ 0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,
++ 0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,
++ 0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,
++ 0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,
++ 0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,
++ 0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,
++ 0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,
++ 0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,
++ 0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,
++ 0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,
++ 0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,
++ 0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,
++ 0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,
++ 0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,
++ 0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,
++ 0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,
++ 0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,
++ 0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,
++ 0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,
++ 0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,
++ 0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,
++ 0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,
++ 0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,
++ 0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,
++ 0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,
++ 0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,
++ 0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,
++ 0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,
++ 0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,
++ 0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,
++ 0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,
++ 0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,
++ 0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,
++ 0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,
++ 0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,
++ 0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,
++ 0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,
++ 0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,
++ 0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,
++ 0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,
++ 0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,
++ 0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,
++ 0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,
++ 0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,
++ 0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,
++ 0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,
++ 0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,
++ 0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,
++ 0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,
++ 0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,
++ 0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,
++ 0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,
++ 0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,
++ 0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,
++ 0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,
++ 0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,
++ 0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,
++ 0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,
++ 0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,
++ 0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,
++ 0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,
++ 0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,
++ 0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,
++ 0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,
++ 0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,
++ 0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,
++ 0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,
++ 0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,
++ 0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,
++ 0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,
++ 0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,
++ 0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,
++ 0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,
++ 0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,
++ 0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,
++ 0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,
++ 0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,
++ 0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,
++ 0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,
++ 0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,
++ 0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,
++ 0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,
++ 0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,
++ 0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,
++ 0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,
++ 0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,
++ 0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,
++ 0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,
++ 0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,
++ 0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,
++ 0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,
++ 0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,
++ 0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,
++ 0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,
++ 0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,
++ 0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,
++ 0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,
++ 0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,
++ 0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,
++ 0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,
++ 0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,
++ 0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,
++ 0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,
++ 0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,
++ 0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,
++ 0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,
++ 0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,
++ 0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,
++ 0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,
++ 0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,
++ 0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,
++ 0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,
++ 0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,
++ 0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,
++ 0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,
++ 0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,
++ 0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,
++ 0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,
++ 0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,
++ 0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,
++ 0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,
++ 0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,
++ 0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,
++ 0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,
++ 0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,
++ 0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,
++ 0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,
++ 0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,
++ 0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,
++ 0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,
++ 0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,
++ 0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,
++ 0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,
++ 0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,
++ 0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,
++ 0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,
++ 0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,
++ 0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,
++ 0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,
++ 0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,
++ 0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,
++ 0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,
++ 0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,
++ 0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,
++ 0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,
++ 0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,
++ 0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,
++ 0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,
++ 0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,
++ 0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,
++ 0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,
++ 0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,
++ 0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,
++ 0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,
++ 0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,
++ 0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,
++ 0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,
++};
++
++static uint8_t uvd_it_scaling_table[] = {
++ 0x10,0x10,0x10,0x10,0x10,0x10,0x10,0x10,0x10,0x10,0x10,0x10,0x10,0x10,0x10,0x10,
++ 0x10,0x10,0x10,0x10,0x10,0x10,0x10,0x10,0x10,0x10,0x10,0x10,0x10,0x10,0x10,0x10,
++ 0x10,0x10,0x10,0x10,0x10,0x10,0x10,0x10,0x10,0x10,0x10,0x10,0x10,0x10,0x10,0x10,
++ 0x10,0x10,0x10,0x10,0x10,0x10,0x10,0x10,0x10,0x10,0x10,0x10,0x10,0x10,0x10,0x10,
++ 0x10,0x10,0x10,0x10,0x10,0x10,0x10,0x10,0x10,0x10,0x10,0x10,0x10,0x10,0x10,0x10,
++ 0x10,0x10,0x10,0x10,0x10,0x10,0x10,0x10,0x10,0x10,0x10,0x10,0x10,0x10,0x10,0x10,
++ 0x10,0x10,0x10,0x10,0x10,0x10,0x10,0x10,0x10,0x10,0x10,0x10,0x10,0x10,0x10,0x10,
++ 0x10,0x10,0x10,0x10,0x10,0x10,0x10,0x10,0x10,0x10,0x10,0x10,0x10,0x10,0x10,0x10,
++ 0x10,0x10,0x10,0x10,0x10,0x10,0x10,0x10,0x10,0x10,0x10,0x10,0x10,0x10,0x10,0x10,
++ 0x10,0x10,0x10,0x10,0x10,0x10,0x10,0x10,0x10,0x10,0x10,0x10,0x10,0x10,0x10,0x10,
++ 0x10,0x10,0x10,0x10,0x10,0x10,0x10,0x10,0x10,0x10,0x10,0x10,0x10,0x10,0x10,0x10,
++ 0x10,0x10,0x10,0x10,0x10,0x10,0x10,0x10,0x10,0x10,0x10,0x10,0x10,0x10,0x10,0x10,
++ 0x10,0x10,0x10,0x10,0x10,0x10,0x10,0x10,0x10,0x10,0x10,0x10,0x10,0x10,0x10,0x10,
++ 0x10,0x10,0x10,0x10,0x10,0x10,0x10,0x10,0x10,0x10,0x10,0x10,0x10,0x10,0x10,0x10,
++};
++
++#endif /* _uvd_messages_h_ */
+diff --git a/tests/kmstest/main.c b/tests/kmstest/main.c
+index 7d50f5b..f39364f 100644
+--- a/tests/kmstest/main.c
++++ b/tests/kmstest/main.c
+@@ -62,6 +62,7 @@ static const char *drivers[] = {
+ "nouveau",
+ "vmwgfx",
+ "exynos",
++ "amdgpu",
+ "imx-drm",
+ "rockchip",
+ NULL
+--
+1.9.1
+
diff --git a/meta-amdfalconx86/recipes-graphics/drm/libdrm/0003-tests-also-install-tests-app.patch b/meta-amdfalconx86/recipes-graphics/drm/libdrm/0003-tests-also-install-tests-app.patch
new file mode 100644
index 00000000..2adfbe74
--- /dev/null
+++ b/meta-amdfalconx86/recipes-graphics/drm/libdrm/0003-tests-also-install-tests-app.patch
@@ -0,0 +1,34 @@
+From b0c643612ff1477ab0faf49592556525d85fdb22 Mon Sep 17 00:00:00 2001
+From: Arindam Nath <arindam.nath@amd.com>
+Date: Tue, 21 Apr 2015 04:50:04 +0530
+Subject: [PATCH 3/3] tests: also install tests app
+
+Upstream-Status: Inappropriate [configuration]
+
+Signed-off-by: Yu Ke <ke.yu@intel.com>
+Signed-off-by: Arindam Nath <arindam.nath@amd.com>
+---
+ tests/Makefile.am | 3 ++-
+ 1 file changed, 2 insertions(+), 1 deletion(-)
+
+diff --git a/tests/Makefile.am b/tests/Makefile.am
+index a980b3d..fff6294 100644
+--- a/tests/Makefile.am
++++ b/tests/Makefile.am
+@@ -1,3 +1,4 @@
++check_PROGRAMS =
+ SUBDIRS = modeprint proptest modetest
+
+ if HAVE_LIBKMS
+@@ -33,7 +34,7 @@ AM_CFLAGS = \
+
+ LDADD = $(top_builddir)/libdrm.la
+
+-check_PROGRAMS = \
++bin_PROGRAMS = \
+ dristat \
+ drmstat
+
+--
+1.9.1
+
diff --git a/meta-amdfalconx86/recipes-graphics/drm/libdrm_git.bbappend b/meta-amdfalconx86/recipes-graphics/drm/libdrm_git.bbappend
new file mode 100644
index 00000000..1ca35fd4
--- /dev/null
+++ b/meta-amdfalconx86/recipes-graphics/drm/libdrm_git.bbappend
@@ -0,0 +1,19 @@
+FILESEXTRAPATHS_prepend := "${THISDIR}/${PN}:"
+
+SRC_URI_append_amdfalconx86 = " file://0001-drm-add-libdrm_amdgpu.patch \
+ file://0002-drm-add-tests-amdgpu.patch \
+ file://0003-tests-also-install-tests-app.patch \
+"
+
+SRCREV_amdfalconx86 = "0d78b37b1cac304ce5e84d1207f0a43abd29c000"
+PV_amdfalconx86 = "2.4.60+git${SRCPV}"
+
+EXTRA_OECONF_append_amdfalconx86 = " --enable-amdgpu \
+ --enable-radeon \
+"
+
+FILES_${PN}-amdgpu = "${libdir}/libdrm_amdgpu.so.*"
+
+do_install_append_amdfalconx86() {
+ cp ${S}/include/drm/amdgpu_drm.h ${D}/usr/include/libdrm
+}
diff --git a/meta-amdfalconx86/recipes-graphics/libepoxy/libepoxy_1.2.bb b/meta-amdfalconx86/recipes-graphics/libepoxy/libepoxy_1.2.bb
new file mode 100644
index 00000000..02c8cad4
--- /dev/null
+++ b/meta-amdfalconx86/recipes-graphics/libepoxy/libepoxy_1.2.bb
@@ -0,0 +1,23 @@
+SUMMARY = "Library for handling OpenGL function pointer management"
+
+DESCRIPTION = "Epoxy is a library for handling OpenGL function pointer \
+management for you. It hides the complexity of dlopen(), dlsym(), \
+glXGetProcAddress(), eglGetProcAddress(), etc. from the app developer, \
+with very little knowledge needed on their part. They get to read GL specs \
+and write code using undecorated function names like glCompileShader()."
+
+HOMEPAGE = "https://github.com/anholt/libepoxy"
+SECTION = "x11/libs"
+LICENSE = "MIT"
+
+LIC_FILES_CHKSUM = "file://COPYING;md5=58ef4c80d401e07bd9ee8b6b58cf464b"
+
+SRC_URI = "http://crux.nu/files/libepoxy-${PV}.tar.gz"
+SRC_URI[md5sum] = "12d6b7621f086c0c928887c27d90bc30"
+SRC_URI[sha256sum] = "42c328440f60a5795835c5ec4bdfc1329e75bba16b6e22b3a87ed17e9679e8f6"
+
+S = "${WORKDIR}/libepoxy-${PV}"
+
+DEPENDS += "util-macros virtual/libx11 virtual/egl"
+
+inherit autotools pkgconfig gettext
diff --git a/meta-amdfalconx86/recipes-graphics/mesa/mesa/0001-winsys-radeon-make-radeon_bo_vtbl-static.patch b/meta-amdfalconx86/recipes-graphics/mesa/mesa/0001-winsys-radeon-make-radeon_bo_vtbl-static.patch
new file mode 100644
index 00000000..a6abee97
--- /dev/null
+++ b/meta-amdfalconx86/recipes-graphics/mesa/mesa/0001-winsys-radeon-make-radeon_bo_vtbl-static.patch
@@ -0,0 +1,35 @@
+From 65a1b608e285322358732911e41fce4fe89a5e07 Mon Sep 17 00:00:00 2001
+From: =?UTF-8?q?Marek=20Ol=C5=A1=C3=A1k?= <marek.olsak@amd.com>
+Date: Thu, 16 Apr 2015 19:09:57 +0200
+Subject: [PATCH 01/29] winsys/radeon: make radeon_bo_vtbl static
+
+Signed-off-by: Arindam Nath <arindam.nath@amd.com>
+---
+ src/gallium/winsys/radeon/drm/radeon_drm_bo.c | 4 ++--
+ 1 file changed, 2 insertions(+), 2 deletions(-)
+
+diff --git a/src/gallium/winsys/radeon/drm/radeon_drm_bo.c b/src/gallium/winsys/radeon/drm/radeon_drm_bo.c
+index e609d68..061c814 100644
+--- a/src/gallium/winsys/radeon/drm/radeon_drm_bo.c
++++ b/src/gallium/winsys/radeon/drm/radeon_drm_bo.c
+@@ -42,7 +42,7 @@
+ #include <fcntl.h>
+ #include <stdio.h>
+
+-extern const struct pb_vtbl radeon_bo_vtbl;
++static const struct pb_vtbl radeon_bo_vtbl;
+
+ static INLINE struct radeon_bo *radeon_bo(struct pb_buffer *bo)
+ {
+@@ -471,7 +471,7 @@ static void radeon_bo_fence(struct pb_buffer *buf,
+ {
+ }
+
+-const struct pb_vtbl radeon_bo_vtbl = {
++static const struct pb_vtbl radeon_bo_vtbl = {
+ radeon_bo_destroy,
+ NULL, /* never called */
+ NULL, /* never called */
+--
+1.9.1
+
diff --git a/meta-amdfalconx86/recipes-graphics/mesa/mesa/0002-gallium-radeon-print-winsys-info-with-R600_DEBUG-inf.patch b/meta-amdfalconx86/recipes-graphics/mesa/mesa/0002-gallium-radeon-print-winsys-info-with-R600_DEBUG-inf.patch
new file mode 100644
index 00000000..989b7a0d
--- /dev/null
+++ b/meta-amdfalconx86/recipes-graphics/mesa/mesa/0002-gallium-radeon-print-winsys-info-with-R600_DEBUG-inf.patch
@@ -0,0 +1,71 @@
+From f3caa583548e36334891974a337d7e932d4260b3 Mon Sep 17 00:00:00 2001
+From: =?UTF-8?q?Marek=20Ol=C5=A1=C3=A1k?= <marek.olsak@amd.com>
+Date: Thu, 16 Apr 2015 20:15:16 +0200
+Subject: [PATCH 02/29] gallium/radeon: print winsys info with R600_DEBUG=info
+
+Signed-off-by: Arindam Nath <arindam.nath@amd.com>
+---
+ src/gallium/drivers/radeon/r600_pipe_common.c | 27 +++++++++++++++++++++++++++
+ src/gallium/drivers/radeon/r600_pipe_common.h | 1 +
+ 2 files changed, 28 insertions(+)
+
+diff --git a/src/gallium/drivers/radeon/r600_pipe_common.c b/src/gallium/drivers/radeon/r600_pipe_common.c
+index 2b27e0a..c6d7918 100644
+--- a/src/gallium/drivers/radeon/r600_pipe_common.c
++++ b/src/gallium/drivers/radeon/r600_pipe_common.c
+@@ -306,6 +306,7 @@ static const struct debug_named_value common_debug_options[] = {
+ { "compute", DBG_COMPUTE, "Print compute info" },
+ { "vm", DBG_VM, "Print virtual addresses when creating resources" },
+ { "trace_cs", DBG_TRACE_CS, "Trace cs and write rlockup_<csid>.c file with faulty cs" },
++ { "info", DBG_INFO, "Print driver information" },
+
+ /* shaders */
+ { "fs", DBG_FS, "Print fetch shaders" },
+@@ -874,6 +875,32 @@ bool r600_common_screen_init(struct r600_common_screen *rscreen,
+ }
+ }
+
++ if (rscreen->debug_flags & DBG_INFO) {
++ printf("pci_id = 0x%x\n", rscreen->info.pci_id);
++ printf("family = %i\n", rscreen->info.family);
++ printf("chip_class = %i\n", rscreen->info.chip_class);
++ printf("gart_size = %i MB\n", (int)(rscreen->info.gart_size >> 20));
++ printf("vram_size = %i MB\n", (int)(rscreen->info.vram_size >> 20));
++ printf("max_sclk = %i\n", rscreen->info.max_sclk);
++ printf("max_compute_units = %i\n", rscreen->info.max_compute_units);
++ printf("max_se = %i\n", rscreen->info.max_se);
++ printf("max_sh_per_se = %i\n", rscreen->info.max_sh_per_se);
++ printf("drm = %i.%i.%i\n", rscreen->info.drm_major,
++ rscreen->info.drm_minor, rscreen->info.drm_patchlevel);
++ printf("has_uvd = %i\n", rscreen->info.has_uvd);
++ printf("vce_fw_version = %i\n", rscreen->info.vce_fw_version);
++ printf("r600_num_backends = %i\n", rscreen->info.r600_num_backends);
++ printf("r600_clock_crystal_freq = %i\n", rscreen->info.r600_clock_crystal_freq);
++ printf("r600_tiling_config = 0x%x\n", rscreen->info.r600_tiling_config);
++ printf("r600_num_tile_pipes = %i\n", rscreen->info.r600_num_tile_pipes);
++ printf("r600_max_pipes = %i\n", rscreen->info.r600_max_pipes);
++ printf("r600_virtual_address = %i\n", rscreen->info.r600_virtual_address);
++ printf("r600_has_dma = %i\n", rscreen->info.r600_has_dma);
++ printf("r600_backend_map = %i\n", rscreen->info.r600_backend_map);
++ printf("r600_backend_map_valid = %i\n", rscreen->info.r600_backend_map_valid);
++ printf("si_tile_mode_array_valid = %i\n", rscreen->info.si_tile_mode_array_valid);
++ printf("cik_macrotile_mode_array_valid = %i\n", rscreen->info.cik_macrotile_mode_array_valid);
++ }
+ return true;
+ }
+
+diff --git a/src/gallium/drivers/radeon/r600_pipe_common.h b/src/gallium/drivers/radeon/r600_pipe_common.h
+index febd2a1..384a9a6 100644
+--- a/src/gallium/drivers/radeon/r600_pipe_common.h
++++ b/src/gallium/drivers/radeon/r600_pipe_common.h
+@@ -84,6 +84,7 @@
+ #define DBG_SWITCH_ON_EOP (1 << 15)
+ #define DBG_FORCE_DMA (1 << 16)
+ #define DBG_PRECOMPILE (1 << 17)
++#define DBG_INFO (1 << 18)
+ /* The maximum allowed bit is 20. */
+
+ #define R600_MAP_BUFFER_ALIGNMENT 64
+--
+1.9.1
+
diff --git a/meta-amdfalconx86/recipes-graphics/mesa/mesa/0003-radeonsi-remove-useless-includes.patch b/meta-amdfalconx86/recipes-graphics/mesa/mesa/0003-radeonsi-remove-useless-includes.patch
new file mode 100644
index 00000000..28736b8d
--- /dev/null
+++ b/meta-amdfalconx86/recipes-graphics/mesa/mesa/0003-radeonsi-remove-useless-includes.patch
@@ -0,0 +1,27 @@
+From 2eb1c8e83edfa6ebb0603e20813136bd696bea01 Mon Sep 17 00:00:00 2001
+From: =?UTF-8?q?Marek=20Ol=C5=A1=C3=A1k?= <marek.olsak@amd.com>
+Date: Thu, 16 Apr 2015 20:16:35 +0200
+Subject: [PATCH 03/29] radeonsi: remove useless includes
+
+Signed-off-by: Arindam Nath <arindam.nath@amd.com>
+---
+ src/gallium/drivers/radeonsi/si_pipe.c | 3 ---
+ 1 file changed, 3 deletions(-)
+
+diff --git a/src/gallium/drivers/radeonsi/si_pipe.c b/src/gallium/drivers/radeonsi/si_pipe.c
+index 5dc657c..e68c30e 100644
+--- a/src/gallium/drivers/radeonsi/si_pipe.c
++++ b/src/gallium/drivers/radeonsi/si_pipe.c
+@@ -30,9 +30,6 @@
+ #include "util/u_memory.h"
+ #include "vl/vl_decoder.h"
+
+-#include <llvm-c/Target.h>
+-#include <llvm-c/TargetMachine.h>
+-
+ /*
+ * pipe_context
+ */
+--
+1.9.1
+
diff --git a/meta-amdfalconx86/recipes-graphics/mesa/mesa/0004-radeonsi-remove-deprecated-and-useless-registers.patch b/meta-amdfalconx86/recipes-graphics/mesa/mesa/0004-radeonsi-remove-deprecated-and-useless-registers.patch
new file mode 100644
index 00000000..0bc7a57d
--- /dev/null
+++ b/meta-amdfalconx86/recipes-graphics/mesa/mesa/0004-radeonsi-remove-deprecated-and-useless-registers.patch
@@ -0,0 +1,36 @@
+From 3c24679d5732c8b90e793537cd43c69a3a4d0618 Mon Sep 17 00:00:00 2001
+From: =?UTF-8?q?Marek=20Ol=C5=A1=C3=A1k?= <marek.olsak@amd.com>
+Date: Thu, 16 Apr 2015 20:37:45 +0200
+Subject: [PATCH 04/29] radeonsi: remove deprecated and useless registers
+
+Signed-off-by: Arindam Nath <arindam.nath@amd.com>
+---
+ src/gallium/drivers/radeonsi/si_state.c | 10 ----------
+ 1 file changed, 10 deletions(-)
+
+diff --git a/src/gallium/drivers/radeonsi/si_state.c b/src/gallium/drivers/radeonsi/si_state.c
+index 4bb6f2b..f24cbbd 100644
+--- a/src/gallium/drivers/radeonsi/si_state.c
++++ b/src/gallium/drivers/radeonsi/si_state.c
+@@ -3035,18 +3035,8 @@ void si_init_config(struct si_context *sctx)
+
+ si_cmd_context_control(pm4);
+
+- si_pm4_set_reg(pm4, R_028A10_VGT_OUTPUT_PATH_CNTL, 0x0);
+- si_pm4_set_reg(pm4, R_028A14_VGT_HOS_CNTL, 0x0);
+ si_pm4_set_reg(pm4, R_028A18_VGT_HOS_MAX_TESS_LEVEL, 0x0);
+ si_pm4_set_reg(pm4, R_028A1C_VGT_HOS_MIN_TESS_LEVEL, 0x0);
+- si_pm4_set_reg(pm4, R_028A20_VGT_HOS_REUSE_DEPTH, 0x0);
+- si_pm4_set_reg(pm4, R_028A24_VGT_GROUP_PRIM_TYPE, 0x0);
+- si_pm4_set_reg(pm4, R_028A28_VGT_GROUP_FIRST_DECR, 0x0);
+- si_pm4_set_reg(pm4, R_028A2C_VGT_GROUP_DECR, 0x0);
+- si_pm4_set_reg(pm4, R_028A30_VGT_GROUP_VECT_0_CNTL, 0x0);
+- si_pm4_set_reg(pm4, R_028A34_VGT_GROUP_VECT_1_CNTL, 0x0);
+- si_pm4_set_reg(pm4, R_028A38_VGT_GROUP_VECT_0_FMT_CNTL, 0x0);
+- si_pm4_set_reg(pm4, R_028A3C_VGT_GROUP_VECT_1_FMT_CNTL, 0x0);
+
+ /* FIXME calculate these values somehow ??? */
+ si_pm4_set_reg(pm4, R_028A54_VGT_GS_PER_ES, 0x80);
+--
+1.9.1
+
diff --git a/meta-amdfalconx86/recipes-graphics/mesa/mesa/0005-radeonsi-set-an-optimal-value-for-DB_Z_INFO_ZRANGE_P.patch b/meta-amdfalconx86/recipes-graphics/mesa/mesa/0005-radeonsi-set-an-optimal-value-for-DB_Z_INFO_ZRANGE_P.patch
new file mode 100644
index 00000000..7148d2ee
--- /dev/null
+++ b/meta-amdfalconx86/recipes-graphics/mesa/mesa/0005-radeonsi-set-an-optimal-value-for-DB_Z_INFO_ZRANGE_P.patch
@@ -0,0 +1,43 @@
+From a20e66cd94137e196ae5ef627b8d71d5bc6a52ed Mon Sep 17 00:00:00 2001
+From: =?UTF-8?q?Marek=20Ol=C5=A1=C3=A1k?= <marek.olsak@amd.com>
+Date: Thu, 16 Apr 2015 20:40:31 +0200
+Subject: [PATCH 05/29] radeonsi: set an optimal value for
+ DB_Z_INFO_ZRANGE_PRECISION
+
+Required because of a VI hw bug.
+
+Signed-off-by: Arindam Nath <arindam.nath@amd.com>
+---
+ src/gallium/drivers/radeonsi/si_state.c | 9 ++-------
+ 1 file changed, 2 insertions(+), 7 deletions(-)
+
+diff --git a/src/gallium/drivers/radeonsi/si_state.c b/src/gallium/drivers/radeonsi/si_state.c
+index f24cbbd..dc19d29 100644
+--- a/src/gallium/drivers/radeonsi/si_state.c
++++ b/src/gallium/drivers/radeonsi/si_state.c
+@@ -1948,12 +1948,6 @@ static void si_init_depth_surface(struct si_context *sctx,
+ z_info |= S_028040_TILE_SURFACE_ENABLE(1) |
+ S_028040_ALLOW_EXPCLEAR(1);
+
+- /* This is optimal for the clear value of 1.0 and using
+- * the LESS and LEQUAL test functions. Set this to 0
+- * for the opposite case. This can only be changed when
+- * clearing. */
+- z_info |= S_028040_ZRANGE_PRECISION(1);
+-
+ /* Use all of the htile_buffer for depth, because we don't
+ * use HTILE for stencil because of FAST_STENCIL_DISABLE. */
+ s_info |= S_028044_TILE_STENCIL_DISABLE(1);
+@@ -2183,7 +2177,8 @@ static void si_emit_framebuffer_state(struct si_context *sctx, struct r600_atom
+
+ r600_write_context_reg_seq(cs, R_02803C_DB_DEPTH_INFO, 9);
+ radeon_emit(cs, zb->db_depth_info); /* R_02803C_DB_DEPTH_INFO */
+- radeon_emit(cs, zb->db_z_info); /* R_028040_DB_Z_INFO */
++ radeon_emit(cs, zb->db_z_info | /* R_028040_DB_Z_INFO */
++ S_028040_ZRANGE_PRECISION(rtex->depth_clear_value != 0));
+ radeon_emit(cs, zb->db_stencil_info); /* R_028044_DB_STENCIL_INFO */
+ radeon_emit(cs, zb->db_depth_base); /* R_028048_DB_Z_READ_BASE */
+ radeon_emit(cs, zb->db_stencil_base); /* R_02804C_DB_STENCIL_READ_BASE */
+--
+1.9.1
+
diff --git a/meta-amdfalconx86/recipes-graphics/mesa/mesa/0006-winsys-radeon-move-radeon_winsys.h-up-one-directory.patch b/meta-amdfalconx86/recipes-graphics/mesa/mesa/0006-winsys-radeon-move-radeon_winsys.h-up-one-directory.patch
new file mode 100644
index 00000000..90c1f976
--- /dev/null
+++ b/meta-amdfalconx86/recipes-graphics/mesa/mesa/0006-winsys-radeon-move-radeon_winsys.h-up-one-directory.patch
@@ -0,0 +1,1483 @@
+From f31b21f2e2ec8f5ab61740ced586eb04dd4dcb37 Mon Sep 17 00:00:00 2001
+From: =?UTF-8?q?Marek=20Ol=C5=A1=C3=A1k?= <marek.olsak@amd.com>
+Date: Thu, 16 Apr 2015 22:50:33 +0200
+Subject: [PATCH 06/29] winsys/radeon: move radeon_winsys.h up one directory
+
+Signed-off-by: Arindam Nath <arindam.nath@amd.com>
+---
+ .../auxiliary/target-helpers/inline_drm_helper.h | 6 +-
+ src/gallium/drivers/r300/r300_chipset.c | 2 +-
+ src/gallium/drivers/r300/r300_context.h | 1 -
+ src/gallium/drivers/r300/r300_screen.h | 2 +-
+ src/gallium/drivers/radeon/r600_pipe_common.h | 2 +-
+ src/gallium/drivers/radeon/radeon_uvd.c | 1 -
+ src/gallium/drivers/radeon/radeon_uvd.h | 2 +-
+ src/gallium/drivers/radeon/radeon_vce.c | 1 -
+ src/gallium/drivers/radeon/radeon_vce_40_2_2.c | 1 -
+ src/gallium/drivers/radeon/radeon_video.c | 1 -
+ src/gallium/drivers/radeon/radeon_video.h | 2 +-
+ src/gallium/drivers/radeonsi/si_pm4.h | 2 +-
+ src/gallium/targets/pipe-loader/pipe_r300.c | 2 +-
+ src/gallium/targets/pipe-loader/pipe_r600.c | 2 +-
+ src/gallium/targets/pipe-loader/pipe_radeonsi.c | 2 +-
+ src/gallium/winsys/radeon/drm/Makefile.sources | 2 +-
+ src/gallium/winsys/radeon/drm/radeon_drm_winsys.h | 2 +-
+ src/gallium/winsys/radeon/drm/radeon_winsys.h | 604 ---------------------
+ src/gallium/winsys/radeon/radeon_winsys.h | 604 +++++++++++++++++++++
+ 19 files changed, 618 insertions(+), 623 deletions(-)
+ delete mode 100644 src/gallium/winsys/radeon/drm/radeon_winsys.h
+ create mode 100644 src/gallium/winsys/radeon/radeon_winsys.h
+
+diff --git a/src/gallium/auxiliary/target-helpers/inline_drm_helper.h b/src/gallium/auxiliary/target-helpers/inline_drm_helper.h
+index 542ad43..d3c331d 100644
+--- a/src/gallium/auxiliary/target-helpers/inline_drm_helper.h
++++ b/src/gallium/auxiliary/target-helpers/inline_drm_helper.h
+@@ -28,19 +28,19 @@
+ #endif
+
+ #if GALLIUM_R300
+-#include "radeon/drm/radeon_winsys.h"
++#include "radeon/radeon_winsys.h"
+ #include "radeon/drm/radeon_drm_public.h"
+ #include "r300/r300_public.h"
+ #endif
+
+ #if GALLIUM_R600
+-#include "radeon/drm/radeon_winsys.h"
++#include "radeon/radeon_winsys.h"
+ #include "radeon/drm/radeon_drm_public.h"
+ #include "r600/r600_public.h"
+ #endif
+
+ #if GALLIUM_RADEONSI
+-#include "radeon/drm/radeon_winsys.h"
++#include "radeon/radeon_winsys.h"
+ #include "radeon/drm/radeon_drm_public.h"
+ #include "radeonsi/si_public.h"
+ #endif
+diff --git a/src/gallium/drivers/r300/r300_chipset.c b/src/gallium/drivers/r300/r300_chipset.c
+index 7a83611..c1c7ce3 100644
+--- a/src/gallium/drivers/r300/r300_chipset.c
++++ b/src/gallium/drivers/r300/r300_chipset.c
+@@ -22,7 +22,7 @@
+ * USE OR OTHER DEALINGS IN THE SOFTWARE. */
+
+ #include "r300_chipset.h"
+-#include "radeon/drm/radeon_winsys.h"
++#include "radeon/radeon_winsys.h"
+
+ #include "util/u_debug.h"
+ #include "util/u_memory.h"
+diff --git a/src/gallium/drivers/r300/r300_context.h b/src/gallium/drivers/r300/r300_context.h
+index 4d2b153..3873c9a 100644
+--- a/src/gallium/drivers/r300/r300_context.h
++++ b/src/gallium/drivers/r300/r300_context.h
+@@ -36,7 +36,6 @@
+ #include "r300_defines.h"
+ #include "r300_screen.h"
+ #include "compiler/radeon_regalloc.h"
+-#include "radeon/drm/radeon_winsys.h"
+
+ struct u_upload_mgr;
+ struct r300_context;
+diff --git a/src/gallium/drivers/r300/r300_screen.h b/src/gallium/drivers/r300/r300_screen.h
+index f0dd3c6..7bba39b 100644
+--- a/src/gallium/drivers/r300/r300_screen.h
++++ b/src/gallium/drivers/r300/r300_screen.h
+@@ -25,7 +25,7 @@
+ #define R300_SCREEN_H
+
+ #include "r300_chipset.h"
+-#include "radeon/drm/radeon_winsys.h"
++#include "radeon/radeon_winsys.h"
+ #include "pipe/p_screen.h"
+ #include "util/u_slab.h"
+ #include "os/os_thread.h"
+diff --git a/src/gallium/drivers/radeon/r600_pipe_common.h b/src/gallium/drivers/radeon/r600_pipe_common.h
+index 384a9a6..b7df001 100644
+--- a/src/gallium/drivers/radeon/r600_pipe_common.h
++++ b/src/gallium/drivers/radeon/r600_pipe_common.h
+@@ -34,7 +34,7 @@
+
+ #include <stdio.h>
+
+-#include "radeon/drm/radeon_winsys.h"
++#include "radeon/radeon_winsys.h"
+
+ #include "util/u_blitter.h"
+ #include "util/u_double_list.h"
+diff --git a/src/gallium/drivers/radeon/radeon_uvd.c b/src/gallium/drivers/radeon/radeon_uvd.c
+index 9668d7d..4d4b54b 100644
+--- a/src/gallium/drivers/radeon/radeon_uvd.c
++++ b/src/gallium/drivers/radeon/radeon_uvd.c
+@@ -45,7 +45,6 @@
+ #include "vl/vl_defines.h"
+ #include "vl/vl_mpeg12_decoder.h"
+
+-#include "radeon/drm/radeon_winsys.h"
+ #include "r600_pipe_common.h"
+ #include "radeon_video.h"
+ #include "radeon_uvd.h"
+diff --git a/src/gallium/drivers/radeon/radeon_uvd.h b/src/gallium/drivers/radeon/radeon_uvd.h
+index 462b101..41a6fb4 100644
+--- a/src/gallium/drivers/radeon/radeon_uvd.h
++++ b/src/gallium/drivers/radeon/radeon_uvd.h
+@@ -34,7 +34,7 @@
+ #ifndef RADEON_UVD_H
+ #define RADEON_UVD_H
+
+-#include "radeon/drm/radeon_winsys.h"
++#include "radeon/radeon_winsys.h"
+ #include "vl/vl_video_buffer.h"
+
+ /* UVD uses PM4 packet type 0 and 2 */
+diff --git a/src/gallium/drivers/radeon/radeon_vce.c b/src/gallium/drivers/radeon/radeon_vce.c
+index 6d34bd3..5f710e6 100644
+--- a/src/gallium/drivers/radeon/radeon_vce.c
++++ b/src/gallium/drivers/radeon/radeon_vce.c
+@@ -40,7 +40,6 @@
+
+ #include "vl/vl_video_buffer.h"
+
+-#include "radeon/drm/radeon_winsys.h"
+ #include "r600_pipe_common.h"
+ #include "radeon_video.h"
+ #include "radeon_vce.h"
+diff --git a/src/gallium/drivers/radeon/radeon_vce_40_2_2.c b/src/gallium/drivers/radeon/radeon_vce_40_2_2.c
+index b176aa7..0902957 100644
+--- a/src/gallium/drivers/radeon/radeon_vce_40_2_2.c
++++ b/src/gallium/drivers/radeon/radeon_vce_40_2_2.c
+@@ -40,7 +40,6 @@
+
+ #include "vl/vl_video_buffer.h"
+
+-#include "radeon/drm/radeon_winsys.h"
+ #include "r600_pipe_common.h"
+ #include "radeon_video.h"
+ #include "radeon_vce.h"
+diff --git a/src/gallium/drivers/radeon/radeon_video.c b/src/gallium/drivers/radeon/radeon_video.c
+index 1420798..6ec10c1 100644
+--- a/src/gallium/drivers/radeon/radeon_video.c
++++ b/src/gallium/drivers/radeon/radeon_video.c
+@@ -39,7 +39,6 @@
+ #include "vl/vl_defines.h"
+ #include "vl/vl_video_buffer.h"
+
+-#include "radeon/drm/radeon_winsys.h"
+ #include "r600_pipe_common.h"
+ #include "radeon_video.h"
+ #include "radeon_vce.h"
+diff --git a/src/gallium/drivers/radeon/radeon_video.h b/src/gallium/drivers/radeon/radeon_video.h
+index 974ea4f..6d0ff28 100644
+--- a/src/gallium/drivers/radeon/radeon_video.h
++++ b/src/gallium/drivers/radeon/radeon_video.h
+@@ -34,7 +34,7 @@
+ #ifndef RADEON_VIDEO_H
+ #define RADEON_VIDEO_H
+
+-#include "radeon/drm/radeon_winsys.h"
++#include "radeon/radeon_winsys.h"
+ #include "vl/vl_video_buffer.h"
+
+ #define RVID_ERR(fmt, args...) \
+diff --git a/src/gallium/drivers/radeonsi/si_pm4.h b/src/gallium/drivers/radeonsi/si_pm4.h
+index bfb5562..d215882 100644
+--- a/src/gallium/drivers/radeonsi/si_pm4.h
++++ b/src/gallium/drivers/radeonsi/si_pm4.h
+@@ -27,7 +27,7 @@
+ #ifndef SI_PM4_H
+ #define SI_PM4_H
+
+-#include "radeon/drm/radeon_winsys.h"
++#include "radeon/radeon_winsys.h"
+
+ #define SI_PM4_MAX_DW 256
+ #define SI_PM4_MAX_BO 32
+diff --git a/src/gallium/targets/pipe-loader/pipe_r300.c b/src/gallium/targets/pipe-loader/pipe_r300.c
+index abcade4..368b8c2 100644
+--- a/src/gallium/targets/pipe-loader/pipe_r300.c
++++ b/src/gallium/targets/pipe-loader/pipe_r300.c
+@@ -1,7 +1,7 @@
+ #include "target-helpers/inline_debug_helper.h"
+ #include "state_tracker/drm_driver.h"
+ #include "radeon/drm/radeon_drm_public.h"
+-#include "radeon/drm/radeon_winsys.h"
++#include "radeon/radeon_winsys.h"
+ #include "r300/r300_public.h"
+
+ static struct pipe_screen *
+diff --git a/src/gallium/targets/pipe-loader/pipe_r600.c b/src/gallium/targets/pipe-loader/pipe_r600.c
+index eb53637..65b11c8 100644
+--- a/src/gallium/targets/pipe-loader/pipe_r600.c
++++ b/src/gallium/targets/pipe-loader/pipe_r600.c
+@@ -1,7 +1,7 @@
+ #include "state_tracker/drm_driver.h"
+ #include "target-helpers/inline_debug_helper.h"
+ #include "radeon/drm/radeon_drm_public.h"
+-#include "radeon/drm/radeon_winsys.h"
++#include "radeon/radeon_winsys.h"
+ #include "r600/r600_public.h"
+
+ static struct pipe_screen *
+diff --git a/src/gallium/targets/pipe-loader/pipe_radeonsi.c b/src/gallium/targets/pipe-loader/pipe_radeonsi.c
+index 1dcd781..5457b5b 100644
+--- a/src/gallium/targets/pipe-loader/pipe_radeonsi.c
++++ b/src/gallium/targets/pipe-loader/pipe_radeonsi.c
+@@ -1,7 +1,7 @@
+ #include "state_tracker/drm_driver.h"
+ #include "target-helpers/inline_debug_helper.h"
+ #include "radeon/drm/radeon_drm_public.h"
+-#include "radeon/drm/radeon_winsys.h"
++#include "radeon/radeon_winsys.h"
+ #include "radeonsi/si_public.h"
+
+ static struct pipe_screen *
+diff --git a/src/gallium/winsys/radeon/drm/Makefile.sources b/src/gallium/winsys/radeon/drm/Makefile.sources
+index d30969e..ced788b 100644
+--- a/src/gallium/winsys/radeon/drm/Makefile.sources
++++ b/src/gallium/winsys/radeon/drm/Makefile.sources
+@@ -7,7 +7,7 @@ C_SOURCES := \
+ radeon_drm_public.h \
+ radeon_drm_winsys.c \
+ radeon_drm_winsys.h \
+- radeon_winsys.h
++ ../radeon_winsys.h
+
+ TOOLS_HDR := \
+ radeon_ctx.h
+diff --git a/src/gallium/winsys/radeon/drm/radeon_drm_winsys.h b/src/gallium/winsys/radeon/drm/radeon_drm_winsys.h
+index 5711ffa..70657be 100644
+--- a/src/gallium/winsys/radeon/drm/radeon_drm_winsys.h
++++ b/src/gallium/winsys/radeon/drm/radeon_drm_winsys.h
+@@ -30,7 +30,7 @@
+ #ifndef RADEON_DRM_WINSYS_H
+ #define RADEON_DRM_WINSYS_H
+
+-#include "radeon_winsys.h"
++#include "../radeon_winsys.h"
+ #include "os/os_thread.h"
+ #include <radeon_drm.h>
+
+diff --git a/src/gallium/winsys/radeon/drm/radeon_winsys.h b/src/gallium/winsys/radeon/drm/radeon_winsys.h
+deleted file mode 100644
+index a8cc60a..0000000
+--- a/src/gallium/winsys/radeon/drm/radeon_winsys.h
++++ /dev/null
+@@ -1,604 +0,0 @@
+-/*
+- * Copyright 2008 Corbin Simpson <MostAwesomeDude@gmail.com>
+- * Copyright 2010 Marek Olšák <maraeo@gmail.com>
+- *
+- * Permission is hereby granted, free of charge, to any person obtaining a
+- * copy of this software and associated documentation files (the "Software"),
+- * to deal in the Software without restriction, including without limitation
+- * on the rights to use, copy, modify, merge, publish, distribute, sub
+- * license, and/or sell copies of the Software, and to permit persons to whom
+- * the Software is furnished to do so, subject to the following conditions:
+- *
+- * The above copyright notice and this permission notice (including the next
+- * paragraph) shall be included in all copies or substantial portions of the
+- * Software.
+- *
+- * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
+- * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
+- * FITNESS FOR A PARTICULAR PURPOSE AND NON-INFRINGEMENT. IN NO EVENT SHALL
+- * THE AUTHOR(S) AND/OR THEIR SUPPLIERS BE LIABLE FOR ANY CLAIM,
+- * DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR
+- * OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE
+- * USE OR OTHER DEALINGS IN THE SOFTWARE. */
+-
+-#ifndef RADEON_WINSYS_H
+-#define RADEON_WINSYS_H
+-
+-/* The public winsys interface header for the radeon driver. */
+-
+-/* R300 features in DRM.
+- *
+- * 2.6.0:
+- * - Hyper-Z
+- * - GB_Z_PEQ_CONFIG on rv350->r4xx
+- * - R500 FG_ALPHA_VALUE
+- *
+- * 2.8.0:
+- * - R500 US_FORMAT regs
+- * - R500 ARGB2101010 colorbuffer
+- * - CMask and AA regs
+- * - R16F/RG16F
+- */
+-
+-#include "pipebuffer/pb_buffer.h"
+-#include "radeon_surface.h"
+-
+-#define RADEON_MAX_CMDBUF_DWORDS (16 * 1024)
+-
+-#define RADEON_FLUSH_ASYNC (1 << 0)
+-#define RADEON_FLUSH_KEEP_TILING_FLAGS (1 << 1) /* needs DRM 2.12.0 */
+-#define RADEON_FLUSH_COMPUTE (1 << 2)
+-#define RADEON_FLUSH_END_OF_FRAME (1 << 3)
+-
+-/* Tiling flags. */
+-enum radeon_bo_layout {
+- RADEON_LAYOUT_LINEAR = 0,
+- RADEON_LAYOUT_TILED,
+- RADEON_LAYOUT_SQUARETILED,
+-
+- RADEON_LAYOUT_UNKNOWN
+-};
+-
+-enum radeon_bo_domain { /* bitfield */
+- RADEON_DOMAIN_GTT = 2,
+- RADEON_DOMAIN_VRAM = 4,
+- RADEON_DOMAIN_VRAM_GTT = RADEON_DOMAIN_VRAM | RADEON_DOMAIN_GTT
+-};
+-
+-enum radeon_bo_flag { /* bitfield */
+- RADEON_FLAG_GTT_WC = (1 << 0),
+- RADEON_FLAG_CPU_ACCESS = (1 << 1),
+- RADEON_FLAG_NO_CPU_ACCESS = (1 << 2),
+-};
+-
+-enum radeon_bo_usage { /* bitfield */
+- RADEON_USAGE_READ = 2,
+- RADEON_USAGE_WRITE = 4,
+- RADEON_USAGE_READWRITE = RADEON_USAGE_READ | RADEON_USAGE_WRITE
+-};
+-
+-enum radeon_family {
+- CHIP_UNKNOWN = 0,
+- CHIP_R300, /* R3xx-based cores. */
+- CHIP_R350,
+- CHIP_RV350,
+- CHIP_RV370,
+- CHIP_RV380,
+- CHIP_RS400,
+- CHIP_RC410,
+- CHIP_RS480,
+- CHIP_R420, /* R4xx-based cores. */
+- CHIP_R423,
+- CHIP_R430,
+- CHIP_R480,
+- CHIP_R481,
+- CHIP_RV410,
+- CHIP_RS600,
+- CHIP_RS690,
+- CHIP_RS740,
+- CHIP_RV515, /* R5xx-based cores. */
+- CHIP_R520,
+- CHIP_RV530,
+- CHIP_R580,
+- CHIP_RV560,
+- CHIP_RV570,
+- CHIP_R600,
+- CHIP_RV610,
+- CHIP_RV630,
+- CHIP_RV670,
+- CHIP_RV620,
+- CHIP_RV635,
+- CHIP_RS780,
+- CHIP_RS880,
+- CHIP_RV770,
+- CHIP_RV730,
+- CHIP_RV710,
+- CHIP_RV740,
+- CHIP_CEDAR,
+- CHIP_REDWOOD,
+- CHIP_JUNIPER,
+- CHIP_CYPRESS,
+- CHIP_HEMLOCK,
+- CHIP_PALM,
+- CHIP_SUMO,
+- CHIP_SUMO2,
+- CHIP_BARTS,
+- CHIP_TURKS,
+- CHIP_CAICOS,
+- CHIP_CAYMAN,
+- CHIP_ARUBA,
+- CHIP_TAHITI,
+- CHIP_PITCAIRN,
+- CHIP_VERDE,
+- CHIP_OLAND,
+- CHIP_HAINAN,
+- CHIP_BONAIRE,
+- CHIP_KAVERI,
+- CHIP_KABINI,
+- CHIP_HAWAII,
+- CHIP_MULLINS,
+- CHIP_LAST,
+-};
+-
+-enum chip_class {
+- CLASS_UNKNOWN = 0,
+- R300,
+- R400,
+- R500,
+- R600,
+- R700,
+- EVERGREEN,
+- CAYMAN,
+- SI,
+- CIK,
+-};
+-
+-enum ring_type {
+- RING_GFX = 0,
+- RING_DMA,
+- RING_UVD,
+- RING_VCE,
+- RING_LAST,
+-};
+-
+-enum radeon_value_id {
+- RADEON_REQUESTED_VRAM_MEMORY,
+- RADEON_REQUESTED_GTT_MEMORY,
+- RADEON_BUFFER_WAIT_TIME_NS,
+- RADEON_TIMESTAMP,
+- RADEON_NUM_CS_FLUSHES,
+- RADEON_NUM_BYTES_MOVED,
+- RADEON_VRAM_USAGE,
+- RADEON_GTT_USAGE
+-};
+-
+-enum radeon_bo_priority {
+- RADEON_PRIO_MIN,
+- RADEON_PRIO_SHADER_DATA, /* shader code, resource descriptors */
+- RADEON_PRIO_SHADER_BUFFER_RO, /* read-only */
+- RADEON_PRIO_SHADER_TEXTURE_RO, /* read-only */
+- RADEON_PRIO_SHADER_RESOURCE_RW, /* buffers, textures, streamout, GS rings, RATs; read/write */
+- RADEON_PRIO_COLOR_BUFFER,
+- RADEON_PRIO_DEPTH_BUFFER,
+- RADEON_PRIO_SHADER_TEXTURE_MSAA,
+- RADEON_PRIO_COLOR_BUFFER_MSAA,
+- RADEON_PRIO_DEPTH_BUFFER_MSAA,
+- RADEON_PRIO_COLOR_META,
+- RADEON_PRIO_DEPTH_META,
+- RADEON_PRIO_MAX /* must be <= 15 */
+-};
+-
+-struct winsys_handle;
+-struct radeon_winsys_cs_handle;
+-
+-struct radeon_winsys_cs {
+- unsigned cdw; /* Number of used dwords. */
+- uint32_t *buf; /* The command buffer. */
+- enum ring_type ring_type;
+-};
+-
+-struct radeon_info {
+- uint32_t pci_id;
+- enum radeon_family family;
+- enum chip_class chip_class;
+- uint64_t gart_size;
+- uint64_t vram_size;
+- uint32_t max_sclk;
+- uint32_t max_compute_units;
+- uint32_t max_se;
+- uint32_t max_sh_per_se;
+-
+- uint32_t drm_major; /* version */
+- uint32_t drm_minor;
+- uint32_t drm_patchlevel;
+-
+- boolean has_uvd;
+- uint32_t vce_fw_version;
+- boolean has_userptr;
+-
+- uint32_t r300_num_gb_pipes;
+- uint32_t r300_num_z_pipes;
+-
+- uint32_t r600_num_backends;
+- uint32_t r600_clock_crystal_freq;
+- uint32_t r600_tiling_config;
+- uint32_t r600_num_tile_pipes;
+- uint32_t r600_max_pipes;
+- boolean r600_virtual_address;
+- boolean r600_has_dma;
+-
+- uint32_t r600_backend_map;
+- boolean r600_backend_map_valid;
+-
+- boolean si_tile_mode_array_valid;
+- uint32_t si_tile_mode_array[32];
+- uint32_t si_backend_enabled_mask;
+-
+- boolean cik_macrotile_mode_array_valid;
+- uint32_t cik_macrotile_mode_array[16];
+-};
+-
+-enum radeon_feature_id {
+- RADEON_FID_R300_HYPERZ_ACCESS, /* ZMask + HiZ */
+- RADEON_FID_R300_CMASK_ACCESS,
+-};
+-
+-struct radeon_winsys {
+- /**
+- * The screen object this winsys was created for
+- */
+- struct pipe_screen *screen;
+-
+- /**
+- * Decrement the winsys reference count.
+- *
+- * \param ws The winsys this function is called for.
+- * \return True if the winsys and screen should be destroyed.
+- */
+- bool (*unref)(struct radeon_winsys *ws);
+-
+- /**
+- * Destroy this winsys.
+- *
+- * \param ws The winsys this function is called from.
+- */
+- void (*destroy)(struct radeon_winsys *ws);
+-
+- /**
+- * Query an info structure from winsys.
+- *
+- * \param ws The winsys this function is called from.
+- * \param info Return structure
+- */
+- void (*query_info)(struct radeon_winsys *ws,
+- struct radeon_info *info);
+-
+- /**************************************************************************
+- * Buffer management. Buffer attributes are mostly fixed over its lifetime.
+- *
+- * Remember that gallium gets to choose the interface it needs, and the
+- * window systems must then implement that interface (rather than the
+- * other way around...).
+- *************************************************************************/
+-
+- /**
+- * Create a buffer object.
+- *
+- * \param ws The winsys this function is called from.
+- * \param size The size to allocate.
+- * \param alignment An alignment of the buffer in memory.
+- * \param use_reusable_pool Whether the cache buffer manager should be used.
+- * \param domain A bitmask of the RADEON_DOMAIN_* flags.
+- * \return The created buffer object.
+- */
+- struct pb_buffer *(*buffer_create)(struct radeon_winsys *ws,
+- unsigned size,
+- unsigned alignment,
+- boolean use_reusable_pool,
+- enum radeon_bo_domain domain,
+- enum radeon_bo_flag flags);
+-
+- struct radeon_winsys_cs_handle *(*buffer_get_cs_handle)(
+- struct pb_buffer *buf);
+-
+- /**
+- * Map the entire data store of a buffer object into the client's address
+- * space.
+- *
+- * \param buf A winsys buffer object to map.
+- * \param cs A command stream to flush if the buffer is referenced by it.
+- * \param usage A bitmask of the PIPE_TRANSFER_* flags.
+- * \return The pointer at the beginning of the buffer.
+- */
+- void *(*buffer_map)(struct radeon_winsys_cs_handle *buf,
+- struct radeon_winsys_cs *cs,
+- enum pipe_transfer_usage usage);
+-
+- /**
+- * Unmap a buffer object from the client's address space.
+- *
+- * \param buf A winsys buffer object to unmap.
+- */
+- void (*buffer_unmap)(struct radeon_winsys_cs_handle *buf);
+-
+- /**
+- * Return TRUE if a buffer object is being used by the GPU.
+- *
+- * \param buf A winsys buffer object.
+- * \param usage Only check whether the buffer is busy for the given usage.
+- */
+- boolean (*buffer_is_busy)(struct pb_buffer *buf,
+- enum radeon_bo_usage usage);
+-
+- /**
+- * Wait for a buffer object until it is not used by a GPU. This is
+- * equivalent to a fence placed after the last command using the buffer,
+- * and synchronizing to the fence.
+- *
+- * \param buf A winsys buffer object to wait for.
+- * \param usage Only wait until the buffer is idle for the given usage,
+- * but may still be busy for some other usage.
+- */
+- void (*buffer_wait)(struct pb_buffer *buf, enum radeon_bo_usage usage);
+-
+- /**
+- * Return tiling flags describing a memory layout of a buffer object.
+- *
+- * \param buf A winsys buffer object to get the flags from.
+- * \param macrotile A pointer to the return value of the microtile flag.
+- * \param microtile A pointer to the return value of the macrotile flag.
+- *
+- * \note microtile and macrotile are not bitmasks!
+- */
+- void (*buffer_get_tiling)(struct pb_buffer *buf,
+- enum radeon_bo_layout *microtile,
+- enum radeon_bo_layout *macrotile,
+- unsigned *bankw, unsigned *bankh,
+- unsigned *tile_split,
+- unsigned *stencil_tile_split,
+- unsigned *mtilea,
+- bool *scanout);
+-
+- /**
+- * Set tiling flags describing a memory layout of a buffer object.
+- *
+- * \param buf A winsys buffer object to set the flags for.
+- * \param cs A command stream to flush if the buffer is referenced by it.
+- * \param macrotile A macrotile flag.
+- * \param microtile A microtile flag.
+- * \param stride A stride of the buffer in bytes, for texturing.
+- *
+- * \note microtile and macrotile are not bitmasks!
+- */
+- void (*buffer_set_tiling)(struct pb_buffer *buf,
+- struct radeon_winsys_cs *rcs,
+- enum radeon_bo_layout microtile,
+- enum radeon_bo_layout macrotile,
+- unsigned bankw, unsigned bankh,
+- unsigned tile_split,
+- unsigned stencil_tile_split,
+- unsigned mtilea,
+- unsigned stride,
+- bool scanout);
+-
+- /**
+- * Get a winsys buffer from a winsys handle. The internal structure
+- * of the handle is platform-specific and only a winsys should access it.
+- *
+- * \param ws The winsys this function is called from.
+- * \param whandle A winsys handle pointer as was received from a state
+- * tracker.
+- * \param stride The returned buffer stride in bytes.
+- */
+- struct pb_buffer *(*buffer_from_handle)(struct radeon_winsys *ws,
+- struct winsys_handle *whandle,
+- unsigned *stride);
+-
+- /**
+- * Get a winsys buffer from a user pointer. The resulting buffer can't
+- * be exported. Both pointer and size must be page aligned.
+- *
+- * \param ws The winsys this function is called from.
+- * \param pointer User pointer to turn into a buffer object.
+- * \param Size Size in bytes for the new buffer.
+- */
+- struct pb_buffer *(*buffer_from_ptr)(struct radeon_winsys *ws,
+- void *pointer, unsigned size);
+-
+- /**
+- * Get a winsys handle from a winsys buffer. The internal structure
+- * of the handle is platform-specific and only a winsys should access it.
+- *
+- * \param buf A winsys buffer object to get the handle from.
+- * \param whandle A winsys handle pointer.
+- * \param stride A stride of the buffer in bytes, for texturing.
+- * \return TRUE on success.
+- */
+- boolean (*buffer_get_handle)(struct pb_buffer *buf,
+- unsigned stride,
+- struct winsys_handle *whandle);
+-
+- /**
+- * Return the virtual address of a buffer.
+- *
+- * \param buf A winsys buffer object
+- * \return virtual address
+- */
+- uint64_t (*buffer_get_virtual_address)(struct radeon_winsys_cs_handle *buf);
+-
+- /**
+- * Query the initial placement of the buffer from the kernel driver.
+- */
+- enum radeon_bo_domain (*buffer_get_initial_domain)(struct radeon_winsys_cs_handle *buf);
+-
+- /**************************************************************************
+- * Command submission.
+- *
+- * Each pipe context should create its own command stream and submit
+- * commands independently of other contexts.
+- *************************************************************************/
+-
+- /**
+- * Create a command stream.
+- *
+- * \param ws The winsys this function is called from.
+- * \param ring_type The ring type (GFX, DMA, UVD)
+- * \param flush Flush callback function associated with the command stream.
+- * \param user User pointer that will be passed to the flush callback.
+- * \param trace_buf Trace buffer when tracing is enabled
+- */
+- struct radeon_winsys_cs *(*cs_create)(struct radeon_winsys *ws,
+- enum ring_type ring_type,
+- void (*flush)(void *ctx, unsigned flags,
+- struct pipe_fence_handle **fence),
+- void *flush_ctx,
+- struct radeon_winsys_cs_handle *trace_buf);
+-
+- /**
+- * Destroy a command stream.
+- *
+- * \param cs A command stream to destroy.
+- */
+- void (*cs_destroy)(struct radeon_winsys_cs *cs);
+-
+- /**
+- * Add a new buffer relocation. Every relocation must first be added
+- * before it can be written.
+- *
+- * \param cs A command stream to add buffer for validation against.
+- * \param buf A winsys buffer to validate.
+- * \param usage Whether the buffer is used for read and/or write.
+- * \param domain Bitmask of the RADEON_DOMAIN_* flags.
+- * \param priority A higher number means a greater chance of being
+- * placed in the requested domain. 15 is the maximum.
+- * \return Relocation index.
+- */
+- unsigned (*cs_add_reloc)(struct radeon_winsys_cs *cs,
+- struct radeon_winsys_cs_handle *buf,
+- enum radeon_bo_usage usage,
+- enum radeon_bo_domain domain,
+- enum radeon_bo_priority priority);
+-
+- /**
+- * Return the index of an already-added buffer.
+- *
+- * \param cs Command stream
+- * \param buf Buffer
+- * \return The buffer index, or -1 if the buffer has not been added.
+- */
+- int (*cs_get_reloc)(struct radeon_winsys_cs *cs,
+- struct radeon_winsys_cs_handle *buf);
+-
+- /**
+- * Return TRUE if there is enough memory in VRAM and GTT for the relocs
+- * added so far. If the validation fails, all the relocations which have
+- * been added since the last call of cs_validate will be removed and
+- * the CS will be flushed (provided there are still any relocations).
+- *
+- * \param cs A command stream to validate.
+- */
+- boolean (*cs_validate)(struct radeon_winsys_cs *cs);
+-
+- /**
+- * Return TRUE if there is enough memory in VRAM and GTT for the relocs
+- * added so far.
+- *
+- * \param cs A command stream to validate.
+- * \param vram VRAM memory size pending to be use
+- * \param gtt GTT memory size pending to be use
+- */
+- boolean (*cs_memory_below_limit)(struct radeon_winsys_cs *cs, uint64_t vram, uint64_t gtt);
+-
+- /**
+- * Flush a command stream.
+- *
+- * \param cs A command stream to flush.
+- * \param flags, RADEON_FLUSH_ASYNC or 0.
+- * \param fence Pointer to a fence. If non-NULL, a fence is inserted
+- * after the CS and is returned through this parameter.
+- * \param cs_trace_id A unique identifier of the cs, used for tracing.
+- */
+- void (*cs_flush)(struct radeon_winsys_cs *cs,
+- unsigned flags,
+- struct pipe_fence_handle **fence,
+- uint32_t cs_trace_id);
+-
+- /**
+- * Return TRUE if a buffer is referenced by a command stream.
+- *
+- * \param cs A command stream.
+- * \param buf A winsys buffer.
+- */
+- boolean (*cs_is_buffer_referenced)(struct radeon_winsys_cs *cs,
+- struct radeon_winsys_cs_handle *buf,
+- enum radeon_bo_usage usage);
+-
+- /**
+- * Request access to a feature for a command stream.
+- *
+- * \param cs A command stream.
+- * \param fid Feature ID, one of RADEON_FID_*
+- * \param enable Whether to enable or disable the feature.
+- */
+- boolean (*cs_request_feature)(struct radeon_winsys_cs *cs,
+- enum radeon_feature_id fid,
+- boolean enable);
+- /**
+- * Make sure all asynchronous flush of the cs have completed
+- *
+- * \param cs A command stream.
+- */
+- void (*cs_sync_flush)(struct radeon_winsys_cs *cs);
+-
+- /**
+- * Wait for the fence and return true if the fence has been signalled.
+- * The timeout of 0 will only return the status.
+- * The timeout of PIPE_TIMEOUT_INFINITE will always wait until the fence
+- * is signalled.
+- */
+- bool (*fence_wait)(struct radeon_winsys *ws,
+- struct pipe_fence_handle *fence,
+- uint64_t timeout);
+-
+- /**
+- * Reference counting for fences.
+- */
+- void (*fence_reference)(struct pipe_fence_handle **dst,
+- struct pipe_fence_handle *src);
+-
+- /**
+- * Initialize surface
+- *
+- * \param ws The winsys this function is called from.
+- * \param surf Surface structure ptr
+- */
+- int (*surface_init)(struct radeon_winsys *ws,
+- struct radeon_surface *surf);
+-
+- /**
+- * Find best values for a surface
+- *
+- * \param ws The winsys this function is called from.
+- * \param surf Surface structure ptr
+- */
+- int (*surface_best)(struct radeon_winsys *ws,
+- struct radeon_surface *surf);
+-
+- uint64_t (*query_value)(struct radeon_winsys *ws,
+- enum radeon_value_id value);
+-};
+-
+-
+-static INLINE void radeon_emit(struct radeon_winsys_cs *cs, uint32_t value)
+-{
+- cs->buf[cs->cdw++] = value;
+-}
+-
+-static INLINE void radeon_emit_array(struct radeon_winsys_cs *cs,
+- const uint32_t *values, unsigned count)
+-{
+- memcpy(cs->buf+cs->cdw, values, count * 4);
+- cs->cdw += count;
+-}
+-
+-#endif
+diff --git a/src/gallium/winsys/radeon/radeon_winsys.h b/src/gallium/winsys/radeon/radeon_winsys.h
+new file mode 100644
+index 0000000..a8cc60a
+--- /dev/null
++++ b/src/gallium/winsys/radeon/radeon_winsys.h
+@@ -0,0 +1,604 @@
++/*
++ * Copyright 2008 Corbin Simpson <MostAwesomeDude@gmail.com>
++ * Copyright 2010 Marek Olšák <maraeo@gmail.com>
++ *
++ * Permission is hereby granted, free of charge, to any person obtaining a
++ * copy of this software and associated documentation files (the "Software"),
++ * to deal in the Software without restriction, including without limitation
++ * on the rights to use, copy, modify, merge, publish, distribute, sub
++ * license, and/or sell copies of the Software, and to permit persons to whom
++ * the Software is furnished to do so, subject to the following conditions:
++ *
++ * The above copyright notice and this permission notice (including the next
++ * paragraph) shall be included in all copies or substantial portions of the
++ * Software.
++ *
++ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
++ * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
++ * FITNESS FOR A PARTICULAR PURPOSE AND NON-INFRINGEMENT. IN NO EVENT SHALL
++ * THE AUTHOR(S) AND/OR THEIR SUPPLIERS BE LIABLE FOR ANY CLAIM,
++ * DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR
++ * OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE
++ * USE OR OTHER DEALINGS IN THE SOFTWARE. */
++
++#ifndef RADEON_WINSYS_H
++#define RADEON_WINSYS_H
++
++/* The public winsys interface header for the radeon driver. */
++
++/* R300 features in DRM.
++ *
++ * 2.6.0:
++ * - Hyper-Z
++ * - GB_Z_PEQ_CONFIG on rv350->r4xx
++ * - R500 FG_ALPHA_VALUE
++ *
++ * 2.8.0:
++ * - R500 US_FORMAT regs
++ * - R500 ARGB2101010 colorbuffer
++ * - CMask and AA regs
++ * - R16F/RG16F
++ */
++
++#include "pipebuffer/pb_buffer.h"
++#include "radeon_surface.h"
++
++#define RADEON_MAX_CMDBUF_DWORDS (16 * 1024)
++
++#define RADEON_FLUSH_ASYNC (1 << 0)
++#define RADEON_FLUSH_KEEP_TILING_FLAGS (1 << 1) /* needs DRM 2.12.0 */
++#define RADEON_FLUSH_COMPUTE (1 << 2)
++#define RADEON_FLUSH_END_OF_FRAME (1 << 3)
++
++/* Tiling flags. */
++enum radeon_bo_layout {
++ RADEON_LAYOUT_LINEAR = 0,
++ RADEON_LAYOUT_TILED,
++ RADEON_LAYOUT_SQUARETILED,
++
++ RADEON_LAYOUT_UNKNOWN
++};
++
++enum radeon_bo_domain { /* bitfield */
++ RADEON_DOMAIN_GTT = 2,
++ RADEON_DOMAIN_VRAM = 4,
++ RADEON_DOMAIN_VRAM_GTT = RADEON_DOMAIN_VRAM | RADEON_DOMAIN_GTT
++};
++
++enum radeon_bo_flag { /* bitfield */
++ RADEON_FLAG_GTT_WC = (1 << 0),
++ RADEON_FLAG_CPU_ACCESS = (1 << 1),
++ RADEON_FLAG_NO_CPU_ACCESS = (1 << 2),
++};
++
++enum radeon_bo_usage { /* bitfield */
++ RADEON_USAGE_READ = 2,
++ RADEON_USAGE_WRITE = 4,
++ RADEON_USAGE_READWRITE = RADEON_USAGE_READ | RADEON_USAGE_WRITE
++};
++
++enum radeon_family {
++ CHIP_UNKNOWN = 0,
++ CHIP_R300, /* R3xx-based cores. */
++ CHIP_R350,
++ CHIP_RV350,
++ CHIP_RV370,
++ CHIP_RV380,
++ CHIP_RS400,
++ CHIP_RC410,
++ CHIP_RS480,
++ CHIP_R420, /* R4xx-based cores. */
++ CHIP_R423,
++ CHIP_R430,
++ CHIP_R480,
++ CHIP_R481,
++ CHIP_RV410,
++ CHIP_RS600,
++ CHIP_RS690,
++ CHIP_RS740,
++ CHIP_RV515, /* R5xx-based cores. */
++ CHIP_R520,
++ CHIP_RV530,
++ CHIP_R580,
++ CHIP_RV560,
++ CHIP_RV570,
++ CHIP_R600,
++ CHIP_RV610,
++ CHIP_RV630,
++ CHIP_RV670,
++ CHIP_RV620,
++ CHIP_RV635,
++ CHIP_RS780,
++ CHIP_RS880,
++ CHIP_RV770,
++ CHIP_RV730,
++ CHIP_RV710,
++ CHIP_RV740,
++ CHIP_CEDAR,
++ CHIP_REDWOOD,
++ CHIP_JUNIPER,
++ CHIP_CYPRESS,
++ CHIP_HEMLOCK,
++ CHIP_PALM,
++ CHIP_SUMO,
++ CHIP_SUMO2,
++ CHIP_BARTS,
++ CHIP_TURKS,
++ CHIP_CAICOS,
++ CHIP_CAYMAN,
++ CHIP_ARUBA,
++ CHIP_TAHITI,
++ CHIP_PITCAIRN,
++ CHIP_VERDE,
++ CHIP_OLAND,
++ CHIP_HAINAN,
++ CHIP_BONAIRE,
++ CHIP_KAVERI,
++ CHIP_KABINI,
++ CHIP_HAWAII,
++ CHIP_MULLINS,
++ CHIP_LAST,
++};
++
++enum chip_class {
++ CLASS_UNKNOWN = 0,
++ R300,
++ R400,
++ R500,
++ R600,
++ R700,
++ EVERGREEN,
++ CAYMAN,
++ SI,
++ CIK,
++};
++
++enum ring_type {
++ RING_GFX = 0,
++ RING_DMA,
++ RING_UVD,
++ RING_VCE,
++ RING_LAST,
++};
++
++enum radeon_value_id {
++ RADEON_REQUESTED_VRAM_MEMORY,
++ RADEON_REQUESTED_GTT_MEMORY,
++ RADEON_BUFFER_WAIT_TIME_NS,
++ RADEON_TIMESTAMP,
++ RADEON_NUM_CS_FLUSHES,
++ RADEON_NUM_BYTES_MOVED,
++ RADEON_VRAM_USAGE,
++ RADEON_GTT_USAGE
++};
++
++enum radeon_bo_priority {
++ RADEON_PRIO_MIN,
++ RADEON_PRIO_SHADER_DATA, /* shader code, resource descriptors */
++ RADEON_PRIO_SHADER_BUFFER_RO, /* read-only */
++ RADEON_PRIO_SHADER_TEXTURE_RO, /* read-only */
++ RADEON_PRIO_SHADER_RESOURCE_RW, /* buffers, textures, streamout, GS rings, RATs; read/write */
++ RADEON_PRIO_COLOR_BUFFER,
++ RADEON_PRIO_DEPTH_BUFFER,
++ RADEON_PRIO_SHADER_TEXTURE_MSAA,
++ RADEON_PRIO_COLOR_BUFFER_MSAA,
++ RADEON_PRIO_DEPTH_BUFFER_MSAA,
++ RADEON_PRIO_COLOR_META,
++ RADEON_PRIO_DEPTH_META,
++ RADEON_PRIO_MAX /* must be <= 15 */
++};
++
++struct winsys_handle;
++struct radeon_winsys_cs_handle;
++
++struct radeon_winsys_cs {
++ unsigned cdw; /* Number of used dwords. */
++ uint32_t *buf; /* The command buffer. */
++ enum ring_type ring_type;
++};
++
++struct radeon_info {
++ uint32_t pci_id;
++ enum radeon_family family;
++ enum chip_class chip_class;
++ uint64_t gart_size;
++ uint64_t vram_size;
++ uint32_t max_sclk;
++ uint32_t max_compute_units;
++ uint32_t max_se;
++ uint32_t max_sh_per_se;
++
++ uint32_t drm_major; /* version */
++ uint32_t drm_minor;
++ uint32_t drm_patchlevel;
++
++ boolean has_uvd;
++ uint32_t vce_fw_version;
++ boolean has_userptr;
++
++ uint32_t r300_num_gb_pipes;
++ uint32_t r300_num_z_pipes;
++
++ uint32_t r600_num_backends;
++ uint32_t r600_clock_crystal_freq;
++ uint32_t r600_tiling_config;
++ uint32_t r600_num_tile_pipes;
++ uint32_t r600_max_pipes;
++ boolean r600_virtual_address;
++ boolean r600_has_dma;
++
++ uint32_t r600_backend_map;
++ boolean r600_backend_map_valid;
++
++ boolean si_tile_mode_array_valid;
++ uint32_t si_tile_mode_array[32];
++ uint32_t si_backend_enabled_mask;
++
++ boolean cik_macrotile_mode_array_valid;
++ uint32_t cik_macrotile_mode_array[16];
++};
++
++enum radeon_feature_id {
++ RADEON_FID_R300_HYPERZ_ACCESS, /* ZMask + HiZ */
++ RADEON_FID_R300_CMASK_ACCESS,
++};
++
++struct radeon_winsys {
++ /**
++ * The screen object this winsys was created for
++ */
++ struct pipe_screen *screen;
++
++ /**
++ * Decrement the winsys reference count.
++ *
++ * \param ws The winsys this function is called for.
++ * \return True if the winsys and screen should be destroyed.
++ */
++ bool (*unref)(struct radeon_winsys *ws);
++
++ /**
++ * Destroy this winsys.
++ *
++ * \param ws The winsys this function is called from.
++ */
++ void (*destroy)(struct radeon_winsys *ws);
++
++ /**
++ * Query an info structure from winsys.
++ *
++ * \param ws The winsys this function is called from.
++ * \param info Return structure
++ */
++ void (*query_info)(struct radeon_winsys *ws,
++ struct radeon_info *info);
++
++ /**************************************************************************
++ * Buffer management. Buffer attributes are mostly fixed over its lifetime.
++ *
++ * Remember that gallium gets to choose the interface it needs, and the
++ * window systems must then implement that interface (rather than the
++ * other way around...).
++ *************************************************************************/
++
++ /**
++ * Create a buffer object.
++ *
++ * \param ws The winsys this function is called from.
++ * \param size The size to allocate.
++ * \param alignment An alignment of the buffer in memory.
++ * \param use_reusable_pool Whether the cache buffer manager should be used.
++ * \param domain A bitmask of the RADEON_DOMAIN_* flags.
++ * \return The created buffer object.
++ */
++ struct pb_buffer *(*buffer_create)(struct radeon_winsys *ws,
++ unsigned size,
++ unsigned alignment,
++ boolean use_reusable_pool,
++ enum radeon_bo_domain domain,
++ enum radeon_bo_flag flags);
++
++ struct radeon_winsys_cs_handle *(*buffer_get_cs_handle)(
++ struct pb_buffer *buf);
++
++ /**
++ * Map the entire data store of a buffer object into the client's address
++ * space.
++ *
++ * \param buf A winsys buffer object to map.
++ * \param cs A command stream to flush if the buffer is referenced by it.
++ * \param usage A bitmask of the PIPE_TRANSFER_* flags.
++ * \return The pointer at the beginning of the buffer.
++ */
++ void *(*buffer_map)(struct radeon_winsys_cs_handle *buf,
++ struct radeon_winsys_cs *cs,
++ enum pipe_transfer_usage usage);
++
++ /**
++ * Unmap a buffer object from the client's address space.
++ *
++ * \param buf A winsys buffer object to unmap.
++ */
++ void (*buffer_unmap)(struct radeon_winsys_cs_handle *buf);
++
++ /**
++ * Return TRUE if a buffer object is being used by the GPU.
++ *
++ * \param buf A winsys buffer object.
++ * \param usage Only check whether the buffer is busy for the given usage.
++ */
++ boolean (*buffer_is_busy)(struct pb_buffer *buf,
++ enum radeon_bo_usage usage);
++
++ /**
++ * Wait for a buffer object until it is not used by a GPU. This is
++ * equivalent to a fence placed after the last command using the buffer,
++ * and synchronizing to the fence.
++ *
++ * \param buf A winsys buffer object to wait for.
++ * \param usage Only wait until the buffer is idle for the given usage,
++ * but may still be busy for some other usage.
++ */
++ void (*buffer_wait)(struct pb_buffer *buf, enum radeon_bo_usage usage);
++
++ /**
++ * Return tiling flags describing a memory layout of a buffer object.
++ *
++ * \param buf A winsys buffer object to get the flags from.
++ * \param macrotile A pointer to the return value of the microtile flag.
++ * \param microtile A pointer to the return value of the macrotile flag.
++ *
++ * \note microtile and macrotile are not bitmasks!
++ */
++ void (*buffer_get_tiling)(struct pb_buffer *buf,
++ enum radeon_bo_layout *microtile,
++ enum radeon_bo_layout *macrotile,
++ unsigned *bankw, unsigned *bankh,
++ unsigned *tile_split,
++ unsigned *stencil_tile_split,
++ unsigned *mtilea,
++ bool *scanout);
++
++ /**
++ * Set tiling flags describing a memory layout of a buffer object.
++ *
++ * \param buf A winsys buffer object to set the flags for.
++ * \param cs A command stream to flush if the buffer is referenced by it.
++ * \param macrotile A macrotile flag.
++ * \param microtile A microtile flag.
++ * \param stride A stride of the buffer in bytes, for texturing.
++ *
++ * \note microtile and macrotile are not bitmasks!
++ */
++ void (*buffer_set_tiling)(struct pb_buffer *buf,
++ struct radeon_winsys_cs *rcs,
++ enum radeon_bo_layout microtile,
++ enum radeon_bo_layout macrotile,
++ unsigned bankw, unsigned bankh,
++ unsigned tile_split,
++ unsigned stencil_tile_split,
++ unsigned mtilea,
++ unsigned stride,
++ bool scanout);
++
++ /**
++ * Get a winsys buffer from a winsys handle. The internal structure
++ * of the handle is platform-specific and only a winsys should access it.
++ *
++ * \param ws The winsys this function is called from.
++ * \param whandle A winsys handle pointer as was received from a state
++ * tracker.
++ * \param stride The returned buffer stride in bytes.
++ */
++ struct pb_buffer *(*buffer_from_handle)(struct radeon_winsys *ws,
++ struct winsys_handle *whandle,
++ unsigned *stride);
++
++ /**
++ * Get a winsys buffer from a user pointer. The resulting buffer can't
++ * be exported. Both pointer and size must be page aligned.
++ *
++ * \param ws The winsys this function is called from.
++ * \param pointer User pointer to turn into a buffer object.
++ * \param Size Size in bytes for the new buffer.
++ */
++ struct pb_buffer *(*buffer_from_ptr)(struct radeon_winsys *ws,
++ void *pointer, unsigned size);
++
++ /**
++ * Get a winsys handle from a winsys buffer. The internal structure
++ * of the handle is platform-specific and only a winsys should access it.
++ *
++ * \param buf A winsys buffer object to get the handle from.
++ * \param whandle A winsys handle pointer.
++ * \param stride A stride of the buffer in bytes, for texturing.
++ * \return TRUE on success.
++ */
++ boolean (*buffer_get_handle)(struct pb_buffer *buf,
++ unsigned stride,
++ struct winsys_handle *whandle);
++
++ /**
++ * Return the virtual address of a buffer.
++ *
++ * \param buf A winsys buffer object
++ * \return virtual address
++ */
++ uint64_t (*buffer_get_virtual_address)(struct radeon_winsys_cs_handle *buf);
++
++ /**
++ * Query the initial placement of the buffer from the kernel driver.
++ */
++ enum radeon_bo_domain (*buffer_get_initial_domain)(struct radeon_winsys_cs_handle *buf);
++
++ /**************************************************************************
++ * Command submission.
++ *
++ * Each pipe context should create its own command stream and submit
++ * commands independently of other contexts.
++ *************************************************************************/
++
++ /**
++ * Create a command stream.
++ *
++ * \param ws The winsys this function is called from.
++ * \param ring_type The ring type (GFX, DMA, UVD)
++ * \param flush Flush callback function associated with the command stream.
++ * \param user User pointer that will be passed to the flush callback.
++ * \param trace_buf Trace buffer when tracing is enabled
++ */
++ struct radeon_winsys_cs *(*cs_create)(struct radeon_winsys *ws,
++ enum ring_type ring_type,
++ void (*flush)(void *ctx, unsigned flags,
++ struct pipe_fence_handle **fence),
++ void *flush_ctx,
++ struct radeon_winsys_cs_handle *trace_buf);
++
++ /**
++ * Destroy a command stream.
++ *
++ * \param cs A command stream to destroy.
++ */
++ void (*cs_destroy)(struct radeon_winsys_cs *cs);
++
++ /**
++ * Add a new buffer relocation. Every relocation must first be added
++ * before it can be written.
++ *
++ * \param cs A command stream to add buffer for validation against.
++ * \param buf A winsys buffer to validate.
++ * \param usage Whether the buffer is used for read and/or write.
++ * \param domain Bitmask of the RADEON_DOMAIN_* flags.
++ * \param priority A higher number means a greater chance of being
++ * placed in the requested domain. 15 is the maximum.
++ * \return Relocation index.
++ */
++ unsigned (*cs_add_reloc)(struct radeon_winsys_cs *cs,
++ struct radeon_winsys_cs_handle *buf,
++ enum radeon_bo_usage usage,
++ enum radeon_bo_domain domain,
++ enum radeon_bo_priority priority);
++
++ /**
++ * Return the index of an already-added buffer.
++ *
++ * \param cs Command stream
++ * \param buf Buffer
++ * \return The buffer index, or -1 if the buffer has not been added.
++ */
++ int (*cs_get_reloc)(struct radeon_winsys_cs *cs,
++ struct radeon_winsys_cs_handle *buf);
++
++ /**
++ * Return TRUE if there is enough memory in VRAM and GTT for the relocs
++ * added so far. If the validation fails, all the relocations which have
++ * been added since the last call of cs_validate will be removed and
++ * the CS will be flushed (provided there are still any relocations).
++ *
++ * \param cs A command stream to validate.
++ */
++ boolean (*cs_validate)(struct radeon_winsys_cs *cs);
++
++ /**
++ * Return TRUE if there is enough memory in VRAM and GTT for the relocs
++ * added so far.
++ *
++ * \param cs A command stream to validate.
++ * \param vram VRAM memory size pending to be use
++ * \param gtt GTT memory size pending to be use
++ */
++ boolean (*cs_memory_below_limit)(struct radeon_winsys_cs *cs, uint64_t vram, uint64_t gtt);
++
++ /**
++ * Flush a command stream.
++ *
++ * \param cs A command stream to flush.
++ * \param flags, RADEON_FLUSH_ASYNC or 0.
++ * \param fence Pointer to a fence. If non-NULL, a fence is inserted
++ * after the CS and is returned through this parameter.
++ * \param cs_trace_id A unique identifier of the cs, used for tracing.
++ */
++ void (*cs_flush)(struct radeon_winsys_cs *cs,
++ unsigned flags,
++ struct pipe_fence_handle **fence,
++ uint32_t cs_trace_id);
++
++ /**
++ * Return TRUE if a buffer is referenced by a command stream.
++ *
++ * \param cs A command stream.
++ * \param buf A winsys buffer.
++ */
++ boolean (*cs_is_buffer_referenced)(struct radeon_winsys_cs *cs,
++ struct radeon_winsys_cs_handle *buf,
++ enum radeon_bo_usage usage);
++
++ /**
++ * Request access to a feature for a command stream.
++ *
++ * \param cs A command stream.
++ * \param fid Feature ID, one of RADEON_FID_*
++ * \param enable Whether to enable or disable the feature.
++ */
++ boolean (*cs_request_feature)(struct radeon_winsys_cs *cs,
++ enum radeon_feature_id fid,
++ boolean enable);
++ /**
++ * Make sure all asynchronous flush of the cs have completed
++ *
++ * \param cs A command stream.
++ */
++ void (*cs_sync_flush)(struct radeon_winsys_cs *cs);
++
++ /**
++ * Wait for the fence and return true if the fence has been signalled.
++ * The timeout of 0 will only return the status.
++ * The timeout of PIPE_TIMEOUT_INFINITE will always wait until the fence
++ * is signalled.
++ */
++ bool (*fence_wait)(struct radeon_winsys *ws,
++ struct pipe_fence_handle *fence,
++ uint64_t timeout);
++
++ /**
++ * Reference counting for fences.
++ */
++ void (*fence_reference)(struct pipe_fence_handle **dst,
++ struct pipe_fence_handle *src);
++
++ /**
++ * Initialize surface
++ *
++ * \param ws The winsys this function is called from.
++ * \param surf Surface structure ptr
++ */
++ int (*surface_init)(struct radeon_winsys *ws,
++ struct radeon_surface *surf);
++
++ /**
++ * Find best values for a surface
++ *
++ * \param ws The winsys this function is called from.
++ * \param surf Surface structure ptr
++ */
++ int (*surface_best)(struct radeon_winsys *ws,
++ struct radeon_surface *surf);
++
++ uint64_t (*query_value)(struct radeon_winsys *ws,
++ enum radeon_value_id value);
++};
++
++
++static INLINE void radeon_emit(struct radeon_winsys_cs *cs, uint32_t value)
++{
++ cs->buf[cs->cdw++] = value;
++}
++
++static INLINE void radeon_emit_array(struct radeon_winsys_cs *cs,
++ const uint32_t *values, unsigned count)
++{
++ memcpy(cs->buf+cs->cdw, values, count * 4);
++ cs->cdw += count;
++}
++
++#endif
+--
+1.9.1
+
diff --git a/meta-amdfalconx86/recipes-graphics/mesa/mesa/0007-winsys-radeon-add-a-private-interface-for-radeon_sur.patch b/meta-amdfalconx86/recipes-graphics/mesa/mesa/0007-winsys-radeon-add-a-private-interface-for-radeon_sur.patch
new file mode 100644
index 00000000..094c76c0
--- /dev/null
+++ b/meta-amdfalconx86/recipes-graphics/mesa/mesa/0007-winsys-radeon-add-a-private-interface-for-radeon_sur.patch
@@ -0,0 +1,659 @@
+From 96bcd3e235a5d326f455944e8393e254925b58bc Mon Sep 17 00:00:00 2001
+From: =?UTF-8?q?Marek=20Ol=C5=A1=C3=A1k?= <marek.olsak@amd.com>
+Date: Thu, 16 Apr 2015 22:53:04 +0200
+Subject: [PATCH 07/29] winsys/radeon: add a private interface for
+ radeon_surface
+
+Signed-off-by: Arindam Nath <arindam.nath@amd.com>
+---
+ src/gallium/drivers/r600/evergreen_state.c | 6 +-
+ src/gallium/drivers/r600/r600_uvd.c | 2 +-
+ src/gallium/drivers/radeon/r600_pipe_common.h | 2 +-
+ src/gallium/drivers/radeon/r600_texture.c | 12 +-
+ src/gallium/drivers/radeon/radeon_uvd.c | 6 +-
+ src/gallium/drivers/radeon/radeon_uvd.h | 4 +-
+ src/gallium/drivers/radeon/radeon_vce.c | 2 +-
+ src/gallium/drivers/radeon/radeon_vce.h | 6 +-
+ src/gallium/drivers/radeon/radeon_video.c | 2 +-
+ src/gallium/drivers/radeon/radeon_video.h | 2 +-
+ src/gallium/drivers/radeonsi/si_state.c | 4 +-
+ src/gallium/drivers/radeonsi/si_uvd.c | 4 +-
+ src/gallium/winsys/radeon/drm/Makefile.sources | 1 +
+ src/gallium/winsys/radeon/drm/radeon_drm_surface.c | 180 +++++++++++++++++++++
+ src/gallium/winsys/radeon/drm/radeon_drm_winsys.c | 20 +--
+ src/gallium/winsys/radeon/drm/radeon_drm_winsys.h | 1 +
+ src/gallium/winsys/radeon/radeon_winsys.h | 79 ++++++++-
+ 17 files changed, 286 insertions(+), 47 deletions(-)
+ create mode 100644 src/gallium/winsys/radeon/drm/radeon_drm_surface.c
+
+diff --git a/src/gallium/drivers/r600/evergreen_state.c b/src/gallium/drivers/r600/evergreen_state.c
+index edd886b..8951ab0 100644
+--- a/src/gallium/drivers/r600/evergreen_state.c
++++ b/src/gallium/drivers/r600/evergreen_state.c
+@@ -664,7 +664,7 @@ evergreen_create_sampler_view_custom(struct pipe_context *ctx,
+ unsigned height, depth, width;
+ unsigned macro_aspect, tile_split, bankh, bankw, nbanks, fmask_bankh;
+ enum pipe_format pipe_format = state->format;
+- struct radeon_surface_level *surflevel;
++ struct radeon_surf_level *surflevel;
+ unsigned base_level, first_level, last_level;
+ uint64_t va;
+
+@@ -918,7 +918,7 @@ static void evergreen_emit_scissor_state(struct r600_context *rctx, struct r600_
+ /**
+ * This function intializes the CB* register values for RATs. It is meant
+ * to be used for 1D aligned buffers that do not have an associated
+- * radeon_surface.
++ * radeon_surf.
+ */
+ void evergreen_init_color_surface_rat(struct r600_context *rctx,
+ struct r600_surface *surf)
+@@ -1163,7 +1163,7 @@ static void evergreen_init_depth_surface(struct r600_context *rctx,
+ struct r600_screen *rscreen = rctx->screen;
+ struct r600_texture *rtex = (struct r600_texture*)surf->base.texture;
+ unsigned level = surf->base.u.tex.level;
+- struct radeon_surface_level *levelinfo = &rtex->surface.level[level];
++ struct radeon_surf_level *levelinfo = &rtex->surface.level[level];
+ uint64_t offset;
+ unsigned format, array_mode;
+ unsigned macro_aspect, tile_split, bankh, bankw, nbanks;
+diff --git a/src/gallium/drivers/r600/r600_uvd.c b/src/gallium/drivers/r600/r600_uvd.c
+index ee5288f..357e901 100644
+--- a/src/gallium/drivers/r600/r600_uvd.c
++++ b/src/gallium/drivers/r600/r600_uvd.c
+@@ -57,7 +57,7 @@ struct pipe_video_buffer *r600_video_buffer_create(struct pipe_context *pipe,
+ {
+ struct r600_context *ctx = (struct r600_context *)pipe;
+ struct r600_texture *resources[VL_NUM_COMPONENTS] = {};
+- struct radeon_surface* surfaces[VL_NUM_COMPONENTS] = {};
++ struct radeon_surf* surfaces[VL_NUM_COMPONENTS] = {};
+ struct pb_buffer **pbs[VL_NUM_COMPONENTS] = {};
+ const enum pipe_format *resource_formats;
+ struct pipe_video_buffer template;
+diff --git a/src/gallium/drivers/radeon/r600_pipe_common.h b/src/gallium/drivers/radeon/r600_pipe_common.h
+index b7df001..bdb4541 100644
+--- a/src/gallium/drivers/radeon/r600_pipe_common.h
++++ b/src/gallium/drivers/radeon/r600_pipe_common.h
+@@ -195,7 +195,7 @@ struct r600_texture {
+ unsigned dirty_level_mask; /* each bit says if that mipmap is compressed */
+ struct r600_texture *flushed_depth_texture;
+ boolean is_flushing_texture;
+- struct radeon_surface surface;
++ struct radeon_surf surface;
+
+ /* Colorbuffer compression and fast clear. */
+ struct r600_fmask_info fmask;
+diff --git a/src/gallium/drivers/radeon/r600_texture.c b/src/gallium/drivers/radeon/r600_texture.c
+index ab8ce7b..dc510c9 100644
+--- a/src/gallium/drivers/radeon/r600_texture.c
++++ b/src/gallium/drivers/radeon/r600_texture.c
+@@ -119,7 +119,7 @@ static unsigned r600_texture_get_offset(struct r600_texture *rtex, unsigned leve
+ }
+
+ static int r600_init_surface(struct r600_common_screen *rscreen,
+- struct radeon_surface *surface,
++ struct radeon_surf *surface,
+ const struct pipe_resource *ptex,
+ unsigned array_mode,
+ bool is_flushed_depth)
+@@ -234,7 +234,7 @@ static boolean r600_texture_get_handle(struct pipe_screen* screen,
+ {
+ struct r600_texture *rtex = (struct r600_texture*)ptex;
+ struct r600_resource *resource = &rtex->resource;
+- struct radeon_surface *surface = &rtex->surface;
++ struct radeon_surf *surface = &rtex->surface;
+ struct r600_common_screen *rscreen = (struct r600_common_screen*)screen;
+
+ rscreen->ws->buffer_set_tiling(resource->buf,
+@@ -280,7 +280,7 @@ void r600_texture_get_fmask_info(struct r600_common_screen *rscreen,
+ struct r600_fmask_info *out)
+ {
+ /* FMASK is allocated like an ordinary texture. */
+- struct radeon_surface fmask = rtex->surface;
++ struct radeon_surf fmask = rtex->surface;
+
+ memset(out, 0, sizeof(*out));
+
+@@ -570,7 +570,7 @@ r600_texture_create_object(struct pipe_screen *screen,
+ const struct pipe_resource *base,
+ unsigned pitch_in_bytes_override,
+ struct pb_buffer *buf,
+- struct radeon_surface *surface)
++ struct radeon_surf *surface)
+ {
+ struct r600_texture *rtex;
+ struct r600_resource *resource;
+@@ -764,7 +764,7 @@ struct pipe_resource *r600_texture_create(struct pipe_screen *screen,
+ const struct pipe_resource *templ)
+ {
+ struct r600_common_screen *rscreen = (struct r600_common_screen*)screen;
+- struct radeon_surface surface = {0};
++ struct radeon_surf surface = {0};
+ int r;
+
+ r = r600_init_surface(rscreen, &surface, templ,
+@@ -790,7 +790,7 @@ static struct pipe_resource *r600_texture_from_handle(struct pipe_screen *screen
+ unsigned stride = 0;
+ unsigned array_mode;
+ enum radeon_bo_layout micro, macro;
+- struct radeon_surface surface;
++ struct radeon_surf surface;
+ bool scanout;
+ int r;
+
+diff --git a/src/gallium/drivers/radeon/radeon_uvd.c b/src/gallium/drivers/radeon/radeon_uvd.c
+index 4d4b54b..be58d0b 100644
+--- a/src/gallium/drivers/radeon/radeon_uvd.c
++++ b/src/gallium/drivers/radeon/radeon_uvd.c
+@@ -870,7 +870,7 @@ error:
+ }
+
+ /* calculate top/bottom offset */
+-static unsigned texture_offset(struct radeon_surface *surface, unsigned layer)
++static unsigned texture_offset(struct radeon_surf *surface, unsigned layer)
+ {
+ return surface->level[0].offset +
+ layer * surface->level[0].slice_size;
+@@ -905,8 +905,8 @@ static unsigned bank_wh(unsigned bankwh)
+ /**
+ * fill decoding target field from the luma and chroma surfaces
+ */
+-void ruvd_set_dt_surfaces(struct ruvd_msg *msg, struct radeon_surface *luma,
+- struct radeon_surface *chroma)
++void ruvd_set_dt_surfaces(struct ruvd_msg *msg, struct radeon_surf *luma,
++ struct radeon_surf *chroma)
+ {
+ msg->body.decode.dt_pitch = luma->level[0].pitch_bytes;
+ switch (luma->level[0].mode) {
+diff --git a/src/gallium/drivers/radeon/radeon_uvd.h b/src/gallium/drivers/radeon/radeon_uvd.h
+index 41a6fb4..7442865 100644
+--- a/src/gallium/drivers/radeon/radeon_uvd.h
++++ b/src/gallium/drivers/radeon/radeon_uvd.h
+@@ -353,6 +353,6 @@ struct pipe_video_codec *ruvd_create_decoder(struct pipe_context *context,
+ ruvd_set_dtb set_dtb);
+
+ /* fill decoding target field from the luma and chroma surfaces */
+-void ruvd_set_dt_surfaces(struct ruvd_msg *msg, struct radeon_surface *luma,
+- struct radeon_surface *chroma);
++void ruvd_set_dt_surfaces(struct ruvd_msg *msg, struct radeon_surf *luma,
++ struct radeon_surf *chroma);
+ #endif
+diff --git a/src/gallium/drivers/radeon/radeon_vce.c b/src/gallium/drivers/radeon/radeon_vce.c
+index 5f710e6..e220f40 100644
+--- a/src/gallium/drivers/radeon/radeon_vce.c
++++ b/src/gallium/drivers/radeon/radeon_vce.c
+@@ -337,7 +337,7 @@ struct pipe_video_codec *rvce_create_encoder(struct pipe_context *context,
+ struct r600_common_screen *rscreen = (struct r600_common_screen *)context->screen;
+ struct rvce_encoder *enc;
+ struct pipe_video_buffer *tmp_buf, templat = {};
+- struct radeon_surface *tmp_surf;
++ struct radeon_surf *tmp_surf;
+ unsigned cpb_size;
+
+ if (!rscreen->info.vce_fw_version) {
+diff --git a/src/gallium/drivers/radeon/radeon_vce.h b/src/gallium/drivers/radeon/radeon_vce.h
+index 7f0cd1f..5c6317a 100644
+--- a/src/gallium/drivers/radeon/radeon_vce.h
++++ b/src/gallium/drivers/radeon/radeon_vce.h
+@@ -50,7 +50,7 @@ struct r600_common_screen;
+ /* driver dependent callback */
+ typedef void (*rvce_get_buffer)(struct pipe_resource *resource,
+ struct radeon_winsys_cs_handle **handle,
+- struct radeon_surface **surface);
++ struct radeon_surf **surface);
+
+ /* Coded picture buffer slot */
+ struct rvce_cpb_slot {
+@@ -88,8 +88,8 @@ struct rvce_encoder {
+ rvce_get_buffer get_buffer;
+
+ struct radeon_winsys_cs_handle* handle;
+- struct radeon_surface* luma;
+- struct radeon_surface* chroma;
++ struct radeon_surf* luma;
++ struct radeon_surf* chroma;
+
+ struct radeon_winsys_cs_handle* bs_handle;
+ unsigned bs_size;
+diff --git a/src/gallium/drivers/radeon/radeon_video.c b/src/gallium/drivers/radeon/radeon_video.c
+index 6ec10c1..826e076 100644
+--- a/src/gallium/drivers/radeon/radeon_video.c
++++ b/src/gallium/drivers/radeon/radeon_video.c
+@@ -132,7 +132,7 @@ void rvid_clear_buffer(struct pipe_context *context, struct rvid_buffer* buffer)
+ */
+ void rvid_join_surfaces(struct radeon_winsys* ws, unsigned bind,
+ struct pb_buffer** buffers[VL_NUM_COMPONENTS],
+- struct radeon_surface *surfaces[VL_NUM_COMPONENTS])
++ struct radeon_surf *surfaces[VL_NUM_COMPONENTS])
+ {
+ unsigned best_tiling, best_wh, off;
+ unsigned size, alignment;
+diff --git a/src/gallium/drivers/radeon/radeon_video.h b/src/gallium/drivers/radeon/radeon_video.h
+index 6d0ff28..c9ee67f 100644
+--- a/src/gallium/drivers/radeon/radeon_video.h
++++ b/src/gallium/drivers/radeon/radeon_video.h
+@@ -68,7 +68,7 @@ void rvid_clear_buffer(struct pipe_context *context, struct rvid_buffer* buffer)
+ sumup their sizes and replace the backend buffers with a single bo */
+ void rvid_join_surfaces(struct radeon_winsys* ws, unsigned bind,
+ struct pb_buffer** buffers[VL_NUM_COMPONENTS],
+- struct radeon_surface *surfaces[VL_NUM_COMPONENTS]);
++ struct radeon_surf *surfaces[VL_NUM_COMPONENTS]);
+
+ /* returns supported codecs and other parameters */
+ int rvid_get_video_param(struct pipe_screen *screen,
+diff --git a/src/gallium/drivers/radeonsi/si_state.c b/src/gallium/drivers/radeonsi/si_state.c
+index dc19d29..7f0fdd5 100644
+--- a/src/gallium/drivers/radeonsi/si_state.c
++++ b/src/gallium/drivers/radeonsi/si_state.c
+@@ -1852,7 +1852,7 @@ static void si_init_depth_surface(struct si_context *sctx,
+ struct si_screen *sscreen = sctx->screen;
+ struct r600_texture *rtex = (struct r600_texture*)surf->base.texture;
+ unsigned level = surf->base.u.tex.level;
+- struct radeon_surface_level *levelinfo = &rtex->surface.level[level];
++ struct radeon_surf_level *levelinfo = &rtex->surface.level[level];
+ unsigned format, tile_mode_index, array_mode;
+ unsigned macro_aspect, tile_split, stile_split, bankh, bankw, nbanks, pipe_config;
+ uint32_t z_info, s_info, db_depth_info;
+@@ -2258,7 +2258,7 @@ static struct pipe_sampler_view *si_create_sampler_view(struct pipe_context *ctx
+ unsigned char state_swizzle[4], swizzle[4];
+ unsigned height, depth, width;
+ enum pipe_format pipe_format = state->format;
+- struct radeon_surface_level *surflevel;
++ struct radeon_surf_level *surflevel;
+ int first_non_void;
+ uint64_t va;
+
+diff --git a/src/gallium/drivers/radeonsi/si_uvd.c b/src/gallium/drivers/radeonsi/si_uvd.c
+index 0ba3b12..2f10f9b 100644
+--- a/src/gallium/drivers/radeonsi/si_uvd.c
++++ b/src/gallium/drivers/radeonsi/si_uvd.c
+@@ -44,7 +44,7 @@ struct pipe_video_buffer *si_video_buffer_create(struct pipe_context *pipe,
+ {
+ struct si_context *ctx = (struct si_context *)pipe;
+ struct r600_texture *resources[VL_NUM_COMPONENTS] = {};
+- struct radeon_surface *surfaces[VL_NUM_COMPONENTS] = {};
++ struct radeon_surf *surfaces[VL_NUM_COMPONENTS] = {};
+ struct pb_buffer **pbs[VL_NUM_COMPONENTS] = {};
+ const enum pipe_format *resource_formats;
+ struct pipe_video_buffer template;
+@@ -136,7 +136,7 @@ static struct radeon_winsys_cs_handle* si_uvd_set_dtb(struct ruvd_msg *msg, stru
+ /* get the radeon resources for VCE */
+ static void si_vce_get_buffer(struct pipe_resource *resource,
+ struct radeon_winsys_cs_handle **handle,
+- struct radeon_surface **surface)
++ struct radeon_surf **surface)
+ {
+ struct r600_texture *res = (struct r600_texture *)resource;
+
+diff --git a/src/gallium/winsys/radeon/drm/Makefile.sources b/src/gallium/winsys/radeon/drm/Makefile.sources
+index ced788b..13a71c4 100644
+--- a/src/gallium/winsys/radeon/drm/Makefile.sources
++++ b/src/gallium/winsys/radeon/drm/Makefile.sources
+@@ -5,6 +5,7 @@ C_SOURCES := \
+ radeon_drm_cs_dump.c \
+ radeon_drm_cs.h \
+ radeon_drm_public.h \
++ radeon_drm_surface.c \
+ radeon_drm_winsys.c \
+ radeon_drm_winsys.h \
+ ../radeon_winsys.h
+diff --git a/src/gallium/winsys/radeon/drm/radeon_drm_surface.c b/src/gallium/winsys/radeon/drm/radeon_drm_surface.c
+new file mode 100644
+index 0000000..29d3467
+--- /dev/null
++++ b/src/gallium/winsys/radeon/drm/radeon_drm_surface.c
+@@ -0,0 +1,180 @@
++/*
++ * Copyright © 2014 Advanced Micro Devices, Inc.
++ * All Rights Reserved.
++ *
++ * Permission is hereby granted, free of charge, to any person obtaining
++ * a copy of this software and associated documentation files (the
++ * "Software"), to deal in the Software without restriction, including
++ * without limitation the rights to use, copy, modify, merge, publish,
++ * distribute, sub license, and/or sell copies of the Software, and to
++ * permit persons to whom the Software is furnished to do so, subject to
++ * the following conditions:
++ *
++ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND,
++ * EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES
++ * OF MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND
++ * NON-INFRINGEMENT. IN NO EVENT SHALL THE COPYRIGHT HOLDERS, AUTHORS
++ * AND/OR ITS SUPPLIERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
++ * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE,
++ * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE
++ * USE OR OTHER DEALINGS IN THE SOFTWARE.
++ *
++ * The above copyright notice and this permission notice (including the
++ * next paragraph) shall be included in all copies or substantial portions
++ * of the Software.
++ *
++ * Authors:
++ * Marek Olšák <maraeo@gmail.com>
++ */
++
++#include "radeon_drm_winsys.h"
++
++#include <radeon_surface.h>
++
++static void surf_level_winsys_to_drm(struct radeon_surface_level *level_drm,
++ const struct radeon_surf_level *level_ws)
++{
++ level_drm->offset = level_ws->offset;
++ level_drm->slice_size = level_ws->slice_size;
++ level_drm->npix_x = level_ws->npix_x;
++ level_drm->npix_y = level_ws->npix_y;
++ level_drm->npix_z = level_ws->npix_z;
++ level_drm->nblk_x = level_ws->nblk_x;
++ level_drm->nblk_y = level_ws->nblk_y;
++ level_drm->nblk_z = level_ws->nblk_z;
++ level_drm->pitch_bytes = level_ws->pitch_bytes;
++ level_drm->mode = level_ws->mode;
++}
++
++static void surf_level_drm_to_winsys(struct radeon_surf_level *level_ws,
++ const struct radeon_surface_level *level_drm)
++{
++ level_ws->offset = level_drm->offset;
++ level_ws->slice_size = level_drm->slice_size;
++ level_ws->npix_x = level_drm->npix_x;
++ level_ws->npix_y = level_drm->npix_y;
++ level_ws->npix_z = level_drm->npix_z;
++ level_ws->nblk_x = level_drm->nblk_x;
++ level_ws->nblk_y = level_drm->nblk_y;
++ level_ws->nblk_z = level_drm->nblk_z;
++ level_ws->pitch_bytes = level_drm->pitch_bytes;
++ level_ws->mode = level_drm->mode;
++}
++
++static void surf_winsys_to_drm(struct radeon_surface *surf_drm,
++ const struct radeon_surf *surf_ws)
++{
++ int i;
++
++ memset(surf_drm, 0, sizeof(*surf_drm));
++
++ surf_drm->npix_x = surf_ws->npix_x;
++ surf_drm->npix_y = surf_ws->npix_y;
++ surf_drm->npix_z = surf_ws->npix_z;
++ surf_drm->blk_w = surf_ws->blk_w;
++ surf_drm->blk_h = surf_ws->blk_h;
++ surf_drm->blk_d = surf_ws->blk_d;
++ surf_drm->array_size = surf_ws->array_size;
++ surf_drm->last_level = surf_ws->last_level;
++ surf_drm->bpe = surf_ws->bpe;
++ surf_drm->nsamples = surf_ws->nsamples;
++ surf_drm->flags = surf_ws->flags;
++
++ surf_drm->bo_size = surf_ws->bo_size;
++ surf_drm->bo_alignment = surf_ws->bo_alignment;
++
++ surf_drm->bankw = surf_ws->bankw;
++ surf_drm->bankh = surf_ws->bankh;
++ surf_drm->mtilea = surf_ws->mtilea;
++ surf_drm->tile_split = surf_ws->tile_split;
++ surf_drm->stencil_tile_split = surf_ws->stencil_tile_split;
++ surf_drm->stencil_offset = surf_ws->stencil_offset;
++
++ for (i = 0; i < RADEON_SURF_MAX_LEVEL; i++) {
++ surf_level_winsys_to_drm(&surf_drm->level[i], &surf_ws->level[i]);
++ surf_level_winsys_to_drm(&surf_drm->stencil_level[i],
++ &surf_ws->stencil_level[i]);
++
++ surf_drm->tiling_index[i] = surf_ws->tiling_index[i];
++ surf_drm->stencil_tiling_index[i] = surf_ws->stencil_tiling_index[i];
++ }
++}
++
++static void surf_drm_to_winsys(struct radeon_surf *surf_ws,
++ const struct radeon_surface *surf_drm)
++{
++ int i;
++
++ memset(surf_ws, 0, sizeof(*surf_ws));
++
++ surf_ws->npix_x = surf_drm->npix_x;
++ surf_ws->npix_y = surf_drm->npix_y;
++ surf_ws->npix_z = surf_drm->npix_z;
++ surf_ws->blk_w = surf_drm->blk_w;
++ surf_ws->blk_h = surf_drm->blk_h;
++ surf_ws->blk_d = surf_drm->blk_d;
++ surf_ws->array_size = surf_drm->array_size;
++ surf_ws->last_level = surf_drm->last_level;
++ surf_ws->bpe = surf_drm->bpe;
++ surf_ws->nsamples = surf_drm->nsamples;
++ surf_ws->flags = surf_drm->flags;
++
++ surf_ws->bo_size = surf_drm->bo_size;
++ surf_ws->bo_alignment = surf_drm->bo_alignment;
++
++ surf_ws->bankw = surf_drm->bankw;
++ surf_ws->bankh = surf_drm->bankh;
++ surf_ws->mtilea = surf_drm->mtilea;
++ surf_ws->tile_split = surf_drm->tile_split;
++ surf_ws->stencil_tile_split = surf_drm->stencil_tile_split;
++ surf_ws->stencil_offset = surf_drm->stencil_offset;
++
++ for (i = 0; i < RADEON_SURF_MAX_LEVEL; i++) {
++ surf_level_drm_to_winsys(&surf_ws->level[i], &surf_drm->level[i]);
++ surf_level_drm_to_winsys(&surf_ws->stencil_level[i],
++ &surf_drm->stencil_level[i]);
++
++ surf_ws->tiling_index[i] = surf_drm->tiling_index[i];
++ surf_ws->stencil_tiling_index[i] = surf_drm->stencil_tiling_index[i];
++ }
++}
++
++static int radeon_winsys_surface_init(struct radeon_winsys *rws,
++ struct radeon_surf *surf_ws)
++{
++ struct radeon_drm_winsys *ws = (struct radeon_drm_winsys*)rws;
++ struct radeon_surface surf_drm;
++ int r;
++
++ surf_winsys_to_drm(&surf_drm, surf_ws);
++
++ r = radeon_surface_init(ws->surf_man, &surf_drm);
++ if (r)
++ return r;
++
++ surf_drm_to_winsys(surf_ws, &surf_drm);
++ return 0;
++}
++
++static int radeon_winsys_surface_best(struct radeon_winsys *rws,
++ struct radeon_surf *surf_ws)
++{
++ struct radeon_drm_winsys *ws = (struct radeon_drm_winsys*)rws;
++ struct radeon_surface surf_drm;
++ int r;
++
++ surf_winsys_to_drm(&surf_drm, surf_ws);
++
++ r = radeon_surface_best(ws->surf_man, &surf_drm);
++ if (r)
++ return r;
++
++ surf_drm_to_winsys(surf_ws, &surf_drm);
++ return 0;
++}
++
++void radeon_surface_init_functions(struct radeon_drm_winsys *ws)
++{
++ ws->base.surface_init = radeon_winsys_surface_init;
++ ws->base.surface_best = radeon_winsys_surface_best;
++}
+diff --git a/src/gallium/winsys/radeon/drm/radeon_drm_winsys.c b/src/gallium/winsys/radeon/drm/radeon_drm_winsys.c
+index 2b12f4d..12767bf 100644
+--- a/src/gallium/winsys/radeon/drm/radeon_drm_winsys.c
++++ b/src/gallium/winsys/radeon/drm/radeon_drm_winsys.c
+@@ -44,6 +44,7 @@
+ #include <sys/types.h>
+ #include <sys/stat.h>
+ #include <unistd.h>
++#include <radeon_surface.h>
+
+ #ifndef RADEON_INFO_ACTIVE_CU_COUNT
+ #define RADEON_INFO_ACTIVE_CU_COUNT 0x20
+@@ -507,22 +508,6 @@ static boolean radeon_cs_request_feature(struct radeon_winsys_cs *rcs,
+ return FALSE;
+ }
+
+-static int radeon_drm_winsys_surface_init(struct radeon_winsys *rws,
+- struct radeon_surface *surf)
+-{
+- struct radeon_drm_winsys *ws = (struct radeon_drm_winsys*)rws;
+-
+- return radeon_surface_init(ws->surf_man, surf);
+-}
+-
+-static int radeon_drm_winsys_surface_best(struct radeon_winsys *rws,
+- struct radeon_surface *surf)
+-{
+- struct radeon_drm_winsys *ws = (struct radeon_drm_winsys*)rws;
+-
+- return radeon_surface_best(ws->surf_man, surf);
+-}
+-
+ static uint64_t radeon_query_value(struct radeon_winsys *rws,
+ enum radeon_value_id value)
+ {
+@@ -706,12 +691,11 @@ radeon_drm_winsys_create(int fd, radeon_screen_create_t screen_create)
+ ws->base.destroy = radeon_winsys_destroy;
+ ws->base.query_info = radeon_query_info;
+ ws->base.cs_request_feature = radeon_cs_request_feature;
+- ws->base.surface_init = radeon_drm_winsys_surface_init;
+- ws->base.surface_best = radeon_drm_winsys_surface_best;
+ ws->base.query_value = radeon_query_value;
+
+ radeon_bomgr_init_functions(ws);
+ radeon_drm_cs_init_functions(ws);
++ radeon_surface_init_functions(ws);
+
+ pipe_mutex_init(ws->hyperz_owner_mutex);
+ pipe_mutex_init(ws->cmask_owner_mutex);
+diff --git a/src/gallium/winsys/radeon/drm/radeon_drm_winsys.h b/src/gallium/winsys/radeon/drm/radeon_drm_winsys.h
+index 70657be..3200f0d 100644
+--- a/src/gallium/winsys/radeon/drm/radeon_drm_winsys.h
++++ b/src/gallium/winsys/radeon/drm/radeon_drm_winsys.h
+@@ -103,5 +103,6 @@ radeon_drm_winsys(struct radeon_winsys *base)
+ }
+
+ void radeon_drm_ws_queue_cs(struct radeon_drm_winsys *ws, struct radeon_drm_cs *cs);
++void radeon_surface_init_functions(struct radeon_drm_winsys *ws);
+
+ #endif
+diff --git a/src/gallium/winsys/radeon/radeon_winsys.h b/src/gallium/winsys/radeon/radeon_winsys.h
+index a8cc60a..7fb7ac9 100644
+--- a/src/gallium/winsys/radeon/radeon_winsys.h
++++ b/src/gallium/winsys/radeon/radeon_winsys.h
+@@ -41,7 +41,6 @@
+ */
+
+ #include "pipebuffer/pb_buffer.h"
+-#include "radeon_surface.h"
+
+ #define RADEON_MAX_CMDBUF_DWORDS (16 * 1024)
+
+@@ -243,6 +242,80 @@ enum radeon_feature_id {
+ RADEON_FID_R300_CMASK_ACCESS,
+ };
+
++#define RADEON_SURF_MAX_LEVEL 32
++
++#define RADEON_SURF_TYPE_MASK 0xFF
++#define RADEON_SURF_TYPE_SHIFT 0
++#define RADEON_SURF_TYPE_1D 0
++#define RADEON_SURF_TYPE_2D 1
++#define RADEON_SURF_TYPE_3D 2
++#define RADEON_SURF_TYPE_CUBEMAP 3
++#define RADEON_SURF_TYPE_1D_ARRAY 4
++#define RADEON_SURF_TYPE_2D_ARRAY 5
++#define RADEON_SURF_MODE_MASK 0xFF
++#define RADEON_SURF_MODE_SHIFT 8
++#define RADEON_SURF_MODE_LINEAR 0
++#define RADEON_SURF_MODE_LINEAR_ALIGNED 1
++#define RADEON_SURF_MODE_1D 2
++#define RADEON_SURF_MODE_2D 3
++#define RADEON_SURF_SCANOUT (1 << 16)
++#define RADEON_SURF_ZBUFFER (1 << 17)
++#define RADEON_SURF_SBUFFER (1 << 18)
++#define RADEON_SURF_Z_OR_SBUFFER (RADEON_SURF_ZBUFFER | RADEON_SURF_SBUFFER)
++#define RADEON_SURF_HAS_SBUFFER_MIPTREE (1 << 19)
++#define RADEON_SURF_HAS_TILE_MODE_INDEX (1 << 20)
++#define RADEON_SURF_FMASK (1 << 21)
++
++#define RADEON_SURF_GET(v, field) (((v) >> RADEON_SURF_ ## field ## _SHIFT) & RADEON_SURF_ ## field ## _MASK)
++#define RADEON_SURF_SET(v, field) (((v) & RADEON_SURF_ ## field ## _MASK) << RADEON_SURF_ ## field ## _SHIFT)
++#define RADEON_SURF_CLR(v, field) ((v) & ~(RADEON_SURF_ ## field ## _MASK << RADEON_SURF_ ## field ## _SHIFT))
++
++struct radeon_surf_level {
++ uint64_t offset;
++ uint64_t slice_size;
++ uint32_t npix_x;
++ uint32_t npix_y;
++ uint32_t npix_z;
++ uint32_t nblk_x;
++ uint32_t nblk_y;
++ uint32_t nblk_z;
++ uint32_t pitch_bytes;
++ uint32_t mode;
++};
++
++struct radeon_surf {
++ /* These are inputs to the calculator. */
++ uint32_t npix_x;
++ uint32_t npix_y;
++ uint32_t npix_z;
++ uint32_t blk_w;
++ uint32_t blk_h;
++ uint32_t blk_d;
++ uint32_t array_size;
++ uint32_t last_level;
++ uint32_t bpe;
++ uint32_t nsamples;
++ uint32_t flags;
++
++ /* These are return values. Some of them can be set by the caller, but
++ * they will be treated as hints (e.g. bankw, bankh) and might be
++ * changed by the calculator.
++ */
++ uint64_t bo_size;
++ uint64_t bo_alignment;
++ /* This applies to EG and later. */
++ uint32_t bankw;
++ uint32_t bankh;
++ uint32_t mtilea;
++ uint32_t tile_split;
++ uint32_t stencil_tile_split;
++ uint64_t stencil_offset;
++ struct radeon_surf_level level[RADEON_SURF_MAX_LEVEL];
++ struct radeon_surf_level stencil_level[RADEON_SURF_MAX_LEVEL];
++ uint32_t tiling_index[RADEON_SURF_MAX_LEVEL];
++ uint32_t stencil_tiling_index[RADEON_SURF_MAX_LEVEL];
++};
++
+ struct radeon_winsys {
+ /**
+ * The screen object this winsys was created for
+@@ -573,7 +646,7 @@ struct radeon_winsys {
+ * \param surf Surface structure ptr
+ */
+ int (*surface_init)(struct radeon_winsys *ws,
+- struct radeon_surface *surf);
++ struct radeon_surf *surf);
+
+ /**
+ * Find best values for a surface
+@@ -582,7 +655,7 @@ struct radeon_winsys {
+ * \param surf Surface structure ptr
+ */
+ int (*surface_best)(struct radeon_winsys *ws,
+- struct radeon_surface *surf);
++ struct radeon_surf *surf);
+
+ uint64_t (*query_value)(struct radeon_winsys *ws,
+ enum radeon_value_id value);
+--
+1.9.1
+
diff --git a/meta-amdfalconx86/recipes-graphics/mesa/mesa/0008-winsys-amdgpu-add-a-new-winsys-for-the-new-kernel-dr.patch b/meta-amdfalconx86/recipes-graphics/mesa/mesa/0008-winsys-amdgpu-add-a-new-winsys-for-the-new-kernel-dr.patch
new file mode 100644
index 00000000..88914a74
--- /dev/null
+++ b/meta-amdfalconx86/recipes-graphics/mesa/mesa/0008-winsys-amdgpu-add-a-new-winsys-for-the-new-kernel-dr.patch
@@ -0,0 +1,2396 @@
+From c0e94dfc8abc3ec25c0a6342f9872a9e71aa7864 Mon Sep 17 00:00:00 2001
+From: =?UTF-8?q?Marek=20Ol=C5=A1=C3=A1k?= <marek.olsak@amd.com>
+Date: Thu, 16 Apr 2015 22:43:23 +0200
+Subject: [PATCH 08/29] winsys/amdgpu: add a new winsys for the new kernel
+ driver
+
+Signed-off-by: Arindam Nath <arindam.nath@amd.com>
+---
+ configure.ac | 5 +
+ src/gallium/Makefile.am | 1 +
+ src/gallium/drivers/r300/Automake.inc | 6 +-
+ src/gallium/drivers/r600/Automake.inc | 6 +-
+ src/gallium/drivers/radeonsi/Automake.inc | 6 +-
+ src/gallium/targets/pipe-loader/Makefile.am | 12 +-
+ src/gallium/winsys/radeon/amdgpu/Android.mk | 40 ++
+ src/gallium/winsys/radeon/amdgpu/Makefile.am | 12 +
+ src/gallium/winsys/radeon/amdgpu/Makefile.sources | 8 +
+ src/gallium/winsys/radeon/amdgpu/amdgpu_bo.c | 643 ++++++++++++++++++++++
+ src/gallium/winsys/radeon/amdgpu/amdgpu_bo.h | 75 +++
+ src/gallium/winsys/radeon/amdgpu/amdgpu_cs.c | 578 +++++++++++++++++++
+ src/gallium/winsys/radeon/amdgpu/amdgpu_cs.h | 149 +++++
+ src/gallium/winsys/radeon/amdgpu/amdgpu_public.h | 14 +
+ src/gallium/winsys/radeon/amdgpu/amdgpu_winsys.c | 491 +++++++++++++++++
+ src/gallium/winsys/radeon/amdgpu/amdgpu_winsys.h | 80 +++
+ src/gallium/winsys/radeon/drm/radeon_drm_winsys.c | 8 +
+ src/gallium/winsys/radeon/radeon_winsys.h | 4 +
+ 18 files changed, 2129 insertions(+), 9 deletions(-)
+ create mode 100644 src/gallium/winsys/radeon/amdgpu/Android.mk
+ create mode 100644 src/gallium/winsys/radeon/amdgpu/Makefile.am
+ create mode 100644 src/gallium/winsys/radeon/amdgpu/Makefile.sources
+ create mode 100644 src/gallium/winsys/radeon/amdgpu/amdgpu_bo.c
+ create mode 100644 src/gallium/winsys/radeon/amdgpu/amdgpu_bo.h
+ create mode 100644 src/gallium/winsys/radeon/amdgpu/amdgpu_cs.c
+ create mode 100644 src/gallium/winsys/radeon/amdgpu/amdgpu_cs.h
+ create mode 100644 src/gallium/winsys/radeon/amdgpu/amdgpu_public.h
+ create mode 100644 src/gallium/winsys/radeon/amdgpu/amdgpu_winsys.c
+ create mode 100644 src/gallium/winsys/radeon/amdgpu/amdgpu_winsys.h
+
+diff --git a/configure.ac b/configure.ac
+index 095e23e..f22975f 100644
+--- a/configure.ac
++++ b/configure.ac
+@@ -68,6 +68,7 @@ AC_SUBST([OSMESA_VERSION])
+ dnl Versions for external dependencies
+ LIBDRM_REQUIRED=2.4.38
+ LIBDRM_RADEON_REQUIRED=2.4.56
++LIBDRM_AMDGPU_REQUIRED=2.4.60
+ LIBDRM_INTEL_REQUIRED=2.4.60
+ LIBDRM_NVVIEUX_REQUIRED=2.4.33
+ LIBDRM_NOUVEAU_REQUIRED="2.4.33 libdrm >= 2.4.41"
+@@ -2091,6 +2092,7 @@ if test -n "$with_gallium_drivers"; then
+ xr300)
+ HAVE_GALLIUM_R300=yes
+ PKG_CHECK_MODULES([RADEON], [libdrm_radeon >= $LIBDRM_RADEON_REQUIRED])
++ PKG_CHECK_MODULES([AMDGPU], [libdrm_amdgpu >= $LIBDRM_AMDGPU_REQUIRED])
+ gallium_require_drm "Gallium R300"
+ gallium_require_drm_loader
+ gallium_require_llvm "Gallium R300"
+@@ -2098,6 +2100,7 @@ if test -n "$with_gallium_drivers"; then
+ xr600)
+ HAVE_GALLIUM_R600=yes
+ PKG_CHECK_MODULES([RADEON], [libdrm_radeon >= $LIBDRM_RADEON_REQUIRED])
++ PKG_CHECK_MODULES([AMDGPU], [libdrm_amdgpu >= $LIBDRM_AMDGPU_REQUIRED])
+ gallium_require_drm "Gallium R600"
+ gallium_require_drm_loader
+ if test "x$enable_r600_llvm" = xyes -o "x$enable_opencl" = xyes; then
+@@ -2114,6 +2117,7 @@ if test -n "$with_gallium_drivers"; then
+ xradeonsi)
+ HAVE_GALLIUM_RADEONSI=yes
+ PKG_CHECK_MODULES([RADEON], [libdrm_radeon >= $LIBDRM_RADEON_REQUIRED])
++ PKG_CHECK_MODULES([AMDGPU], [libdrm_amdgpu >= $LIBDRM_AMDGPU_REQUIRED])
+ gallium_require_drm "radeonsi"
+ gallium_require_drm_loader
+ radeon_llvm_check "radeonsi"
+@@ -2384,6 +2388,7 @@ AC_CONFIG_FILES([Makefile
+ src/gallium/winsys/intel/drm/Makefile
+ src/gallium/winsys/nouveau/drm/Makefile
+ src/gallium/winsys/radeon/drm/Makefile
++ src/gallium/winsys/radeon/amdgpu/Makefile
+ src/gallium/winsys/svga/drm/Makefile
+ src/gallium/winsys/sw/dri/Makefile
+ src/gallium/winsys/sw/kms-dri/Makefile
+diff --git a/src/gallium/Makefile.am b/src/gallium/Makefile.am
+index ede6e21..fa526d4 100644
+--- a/src/gallium/Makefile.am
++++ b/src/gallium/Makefile.am
+@@ -63,6 +63,7 @@ endif
+ ## the radeon winsys - linked in by r300, r600 and radeonsi
+ if NEED_RADEON_DRM_WINSYS
+ SUBDIRS += winsys/radeon/drm
++SUBDIRS += winsys/radeon/amdgpu
+ endif
+
+ ## swrast/softpipe
+diff --git a/src/gallium/drivers/r300/Automake.inc b/src/gallium/drivers/r300/Automake.inc
+index 9334973..cfcd61c 100644
+--- a/src/gallium/drivers/r300/Automake.inc
++++ b/src/gallium/drivers/r300/Automake.inc
+@@ -5,9 +5,11 @@ TARGET_CPPFLAGS += -DGALLIUM_R300
+ TARGET_LIB_DEPS += \
+ $(top_builddir)/src/gallium/drivers/r300/libr300.la \
+ $(RADEON_LIBS) \
+- $(INTEL_LIBS)
++ $(LIBDRM_LIBS) \
++ $(AMDGPU_LIBS)
+
+ TARGET_RADEON_WINSYS = \
+- $(top_builddir)/src/gallium/winsys/radeon/drm/libradeonwinsys.la
++ $(top_builddir)/src/gallium/winsys/radeon/drm/libradeonwinsys.la \
++ $(top_builddir)/src/gallium/winsys/radeon/amdgpu/libamdgpuwinsys.la
+
+ endif
+diff --git a/src/gallium/drivers/r600/Automake.inc b/src/gallium/drivers/r600/Automake.inc
+index 914eea3..2bb34b0 100644
+--- a/src/gallium/drivers/r600/Automake.inc
++++ b/src/gallium/drivers/r600/Automake.inc
+@@ -5,10 +5,12 @@ TARGET_CPPFLAGS += -DGALLIUM_R600
+ TARGET_LIB_DEPS += \
+ $(top_builddir)/src/gallium/drivers/r600/libr600.la \
+ $(RADEON_LIBS) \
+- $(LIBDRM_LIBS)
++ $(LIBDRM_LIBS) \
++ $(AMDGPU_LIBS)
+
+ TARGET_RADEON_WINSYS = \
+- $(top_builddir)/src/gallium/winsys/radeon/drm/libradeonwinsys.la
++ $(top_builddir)/src/gallium/winsys/radeon/drm/libradeonwinsys.la \
++ $(top_builddir)/src/gallium/winsys/radeon/amdgpu/libamdgpuwinsys.la
+
+ TARGET_RADEON_COMMON = \
+ $(top_builddir)/src/gallium/drivers/radeon/libradeon.la
+diff --git a/src/gallium/drivers/radeonsi/Automake.inc b/src/gallium/drivers/radeonsi/Automake.inc
+index 8686fff..200a254 100644
+--- a/src/gallium/drivers/radeonsi/Automake.inc
++++ b/src/gallium/drivers/radeonsi/Automake.inc
+@@ -5,10 +5,12 @@ TARGET_CPPFLAGS += -DGALLIUM_RADEONSI
+ TARGET_LIB_DEPS += \
+ $(top_builddir)/src/gallium/drivers/radeonsi/libradeonsi.la \
+ $(RADEON_LIBS) \
+- $(LIBDRM_LIBS)
++ $(LIBDRM_LIBS) \
++ $(AMDGPU_LIBS)
+
+ TARGET_RADEON_WINSYS = \
+- $(top_builddir)/src/gallium/winsys/radeon/drm/libradeonwinsys.la
++ $(top_builddir)/src/gallium/winsys/radeon/drm/libradeonwinsys.la \
++ $(top_builddir)/src/gallium/winsys/radeon/amdgpu/libamdgpuwinsys.la
+
+ TARGET_RADEON_COMMON = \
+ $(top_builddir)/src/gallium/drivers/radeon/libradeon.la
+diff --git a/src/gallium/targets/pipe-loader/Makefile.am b/src/gallium/targets/pipe-loader/Makefile.am
+index 967cdb7..3527090 100644
+--- a/src/gallium/targets/pipe-loader/Makefile.am
++++ b/src/gallium/targets/pipe-loader/Makefile.am
+@@ -124,9 +124,11 @@ nodist_EXTRA_pipe_r300_la_SOURCES = dummy.cpp
+ pipe_r300_la_LIBADD = \
+ $(PIPE_LIBS) \
+ $(top_builddir)/src/gallium/winsys/radeon/drm/libradeonwinsys.la \
++ $(top_builddir)/src/gallium/winsys/radeon/amdgpu/libamdgpuwinsys.la \
+ $(top_builddir)/src/gallium/drivers/r300/libr300.la \
+ $(LIBDRM_LIBS) \
+- $(RADEON_LIBS)
++ $(RADEON_LIBS) \
++ $(AMDGPU_LIBS)
+
+ endif
+
+@@ -138,10 +140,12 @@ nodist_EXTRA_pipe_r600_la_SOURCES = dummy.cpp
+ pipe_r600_la_LIBADD = \
+ $(PIPE_LIBS) \
+ $(top_builddir)/src/gallium/winsys/radeon/drm/libradeonwinsys.la \
++ $(top_builddir)/src/gallium/winsys/radeon/amdgpu/libamdgpuwinsys.la \
+ $(top_builddir)/src/gallium/drivers/radeon/libradeon.la \
+ $(top_builddir)/src/gallium/drivers/r600/libr600.la \
+ $(LIBDRM_LIBS) \
+- $(RADEON_LIBS)
++ $(RADEON_LIBS) \
++ $(AMDGPU_LIBS)
+
+ endif
+
+@@ -153,10 +157,12 @@ nodist_EXTRA_pipe_radeonsi_la_SOURCES = dummy.cpp
+ pipe_radeonsi_la_LIBADD = \
+ $(PIPE_LIBS) \
+ $(top_builddir)/src/gallium/winsys/radeon/drm/libradeonwinsys.la \
++ $(top_builddir)/src/gallium/winsys/radeon/amdgpu/libamdgpuwinsys.la \
+ $(top_builddir)/src/gallium/drivers/radeon/libradeon.la \
+ $(top_builddir)/src/gallium/drivers/radeonsi/libradeonsi.la \
+ $(LIBDRM_LIBS) \
+- $(RADEON_LIBS)
++ $(RADEON_LIBS) \
++ $(AMDGPU_LIBS)
+
+ endif
+
+diff --git a/src/gallium/winsys/radeon/amdgpu/Android.mk b/src/gallium/winsys/radeon/amdgpu/Android.mk
+new file mode 100644
+index 0000000..a10312f
+--- /dev/null
++++ b/src/gallium/winsys/radeon/amdgpu/Android.mk
+@@ -0,0 +1,40 @@
++# Mesa 3-D graphics library
++#
++# Copyright (C) 2011 Chia-I Wu <olvaffe@gmail.com>
++# Copyright (C) 2011 LunarG Inc.
++#
++# Permission is hereby granted, free of charge, to any person obtaining a
++# copy of this software and associated documentation files (the "Software"),
++# to deal in the Software without restriction, including without limitation
++# the rights to use, copy, modify, merge, publish, distribute, sublicense,
++# and/or sell copies of the Software, and to permit persons to whom the
++# Software is furnished to do so, subject to the following conditions:
++#
++# The above copyright notice and this permission notice shall be included
++# in all copies or substantial portions of the Software.
++#
++# THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
++# IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
++# FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
++# THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
++# LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING
++# FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER
++# DEALINGS IN THE SOFTWARE.
++
++LOCAL_PATH := $(call my-dir)
++
++# get C_SOURCES
++include $(LOCAL_PATH)/Makefile.sources
++
++include $(CLEAR_VARS)
++
++LOCAL_SRC_FILES := $(C_SOURCES)
++
++LOCAL_C_INCLUDES := \
++ $(DRM_TOP) \
++ $(DRM_TOP)/include/drm
++
++LOCAL_MODULE := libmesa_winsys_amdgpu
++
++include $(GALLIUM_COMMON_MK)
++include $(BUILD_STATIC_LIBRARY)
+diff --git a/src/gallium/winsys/radeon/amdgpu/Makefile.am b/src/gallium/winsys/radeon/amdgpu/Makefile.am
+new file mode 100644
+index 0000000..80ecb75
+--- /dev/null
++++ b/src/gallium/winsys/radeon/amdgpu/Makefile.am
+@@ -0,0 +1,12 @@
++include Makefile.sources
++include $(top_srcdir)/src/gallium/Automake.inc
++
++AM_CFLAGS = \
++ $(GALLIUM_WINSYS_CFLAGS) \
++ $(AMDGPU_CFLAGS)
++
++AM_CXXFLAGS = $(AM_CFLAGS)
++
++noinst_LTLIBRARIES = libamdgpuwinsys.la
++
++libamdgpuwinsys_la_SOURCES = $(C_SOURCES)
+diff --git a/src/gallium/winsys/radeon/amdgpu/Makefile.sources b/src/gallium/winsys/radeon/amdgpu/Makefile.sources
+new file mode 100644
+index 0000000..0f55010
+--- /dev/null
++++ b/src/gallium/winsys/radeon/amdgpu/Makefile.sources
+@@ -0,0 +1,8 @@
++C_SOURCES := \
++ amdgpu_bo.c \
++ amdgpu_bo.h \
++ amdgpu_cs.c \
++ amdgpu_cs.h \
++ amdgpu_public.h \
++ amdgpu_winsys.c \
++ amdgpu_winsys.h
+diff --git a/src/gallium/winsys/radeon/amdgpu/amdgpu_bo.c b/src/gallium/winsys/radeon/amdgpu/amdgpu_bo.c
+new file mode 100644
+index 0000000..de9548e
+--- /dev/null
++++ b/src/gallium/winsys/radeon/amdgpu/amdgpu_bo.c
+@@ -0,0 +1,643 @@
++/*
++ * Copyright © 2011 Marek Olšák <maraeo@gmail.com>
++ * All Rights Reserved.
++ *
++ * Permission is hereby granted, free of charge, to any person obtaining
++ * a copy of this software and associated documentation files (the
++ * "Software"), to deal in the Software without restriction, including
++ * without limitation the rights to use, copy, modify, merge, publish,
++ * distribute, sub license, and/or sell copies of the Software, and to
++ * permit persons to whom the Software is furnished to do so, subject to
++ * the following conditions:
++ *
++ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND,
++ * EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES
++ * OF MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND
++ * NON-INFRINGEMENT. IN NO EVENT SHALL THE COPYRIGHT HOLDERS, AUTHORS
++ * AND/OR ITS SUPPLIERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
++ * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE,
++ * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE
++ * USE OR OTHER DEALINGS IN THE SOFTWARE.
++ *
++ * The above copyright notice and this permission notice (including the
++ * next paragraph) shall be included in all copies or substantial portions
++ * of the Software.
++ */
++
++#include "amdgpu_cs.h"
++
++#include "os/os_time.h"
++#include "state_tracker/drm_driver.h"
++#include <amdgpu_drm.h>
++#include <xf86drm.h>
++#include <stdio.h>
++
++static const struct pb_vtbl amdgpu_winsys_bo_vtbl;
++
++static INLINE struct amdgpu_winsys_bo *amdgpu_winsys_bo(struct pb_buffer *bo)
++{
++ assert(bo->vtbl == &amdgpu_winsys_bo_vtbl);
++ return (struct amdgpu_winsys_bo *)bo;
++}
++
++struct amdgpu_bomgr {
++ struct pb_manager base;
++ struct amdgpu_winsys *rws;
++};
++
++static struct amdgpu_winsys *get_winsys(struct pb_manager *mgr)
++{
++ return ((struct amdgpu_bomgr*)mgr)->rws;
++}
++
++static struct amdgpu_winsys_bo *get_amdgpu_winsys_bo(struct pb_buffer *_buf)
++{
++ struct amdgpu_winsys_bo *bo = NULL;
++
++ if (_buf->vtbl == &amdgpu_winsys_bo_vtbl) {
++ bo = amdgpu_winsys_bo(_buf);
++ } else {
++ struct pb_buffer *base_buf;
++ pb_size offset;
++ pb_get_base_buffer(_buf, &base_buf, &offset);
++
++ if (base_buf->vtbl == &amdgpu_winsys_bo_vtbl)
++ bo = amdgpu_winsys_bo(base_buf);
++ }
++
++ return bo;
++}
++
++static void amdgpu_bo_wait(struct pb_buffer *_buf, enum radeon_bo_usage usage)
++{
++ struct amdgpu_winsys_bo *bo = get_amdgpu_winsys_bo(_buf);
++ struct radeon_winsys *ws = &bo->rws->base;
++
++ while (p_atomic_read(&bo->num_active_ioctls)) {
++ sched_yield();
++ }
++
++ if (bo->fence) {
++ ws->fence_wait(ws, bo->fence, PIPE_TIMEOUT_INFINITE);
++ }
++}
++
++static boolean amdgpu_bo_is_busy(struct pb_buffer *_buf,
++ enum radeon_bo_usage usage)
++{
++ struct amdgpu_winsys_bo *bo = get_amdgpu_winsys_bo(_buf);
++ struct radeon_winsys *ws = &bo->rws->base;
++
++ if (p_atomic_read(&bo->num_active_ioctls)) {
++ return TRUE;
++ }
++
++ return bo->fence && !ws->fence_wait(ws, bo->fence, 0);
++}
++
++static enum radeon_bo_domain amdgpu_bo_get_initial_domain(
++ struct radeon_winsys_cs_handle *buf)
++{
++ return ((struct amdgpu_winsys_bo*)buf)->initial_domain;
++}
++
++static void amdgpu_bo_destroy(struct pb_buffer *_buf)
++{
++ struct amdgpu_winsys_bo *bo = amdgpu_winsys_bo(_buf);
++
++ amdgpu_bo_free(bo->bo);
++ amdgpu_fence_reference(&bo->fence, NULL);
++
++ if (bo->initial_domain & RADEON_DOMAIN_VRAM)
++ bo->rws->allocated_vram -= align(bo->base.size, 4096);
++ else if (bo->initial_domain & RADEON_DOMAIN_GTT)
++ bo->rws->allocated_gtt -= align(bo->base.size, 4096);
++ FREE(bo);
++}
++
++static void *amdgpu_bo_map(struct radeon_winsys_cs_handle *buf,
++ struct radeon_winsys_cs *rcs,
++ enum pipe_transfer_usage usage)
++{
++ struct amdgpu_winsys_bo *bo = (struct amdgpu_winsys_bo*)buf;
++ struct amdgpu_cs *cs = (struct amdgpu_cs*)rcs;
++ int r;
++ void *cpu = NULL;
++
++ /* If it's not unsynchronized bo_map, flush CS if needed and then wait. */
++ if (!(usage & PIPE_TRANSFER_UNSYNCHRONIZED)) {
++ /* DONTBLOCK doesn't make sense with UNSYNCHRONIZED. */
++ if (usage & PIPE_TRANSFER_DONTBLOCK) {
++ if (!(usage & PIPE_TRANSFER_WRITE)) {
++ /* Mapping for read.
++ *
++ * Since we are mapping for read, we don't need to wait
++ * if the GPU is using the buffer for read too
++ * (neither one is changing it).
++ *
++ * Only check whether the buffer is being used for write. */
++ if (cs && amdgpu_bo_is_referenced_by_cs_with_usage(cs, bo,
++ RADEON_USAGE_WRITE)) {
++ cs->flush_cs(cs->flush_data, RADEON_FLUSH_ASYNC, NULL);
++ return NULL;
++ }
++
++ if (amdgpu_bo_is_busy((struct pb_buffer*)bo,
++ RADEON_USAGE_WRITE)) {
++ return NULL;
++ }
++ } else {
++ if (cs && amdgpu_bo_is_referenced_by_cs(cs, bo)) {
++ cs->flush_cs(cs->flush_data, RADEON_FLUSH_ASYNC, NULL);
++ return NULL;
++ }
++
++ if (amdgpu_bo_is_busy((struct pb_buffer*)bo,
++ RADEON_USAGE_READWRITE)) {
++ return NULL;
++ }
++ }
++ } else {
++ uint64_t time = os_time_get_nano();
++
++ if (!(usage & PIPE_TRANSFER_WRITE)) {
++ /* Mapping for read.
++ *
++ * Since we are mapping for read, we don't need to wait
++ * if the GPU is using the buffer for read too
++ * (neither one is changing it).
++ *
++ * Only check whether the buffer is being used for write. */
++ if (cs && amdgpu_bo_is_referenced_by_cs_with_usage(cs, bo,
++ RADEON_USAGE_WRITE)) {
++ cs->flush_cs(cs->flush_data, 0, NULL);
++ }
++ amdgpu_bo_wait((struct pb_buffer*)bo,
++ RADEON_USAGE_WRITE);
++ } else {
++ /* Mapping for write. */
++ if (cs) {
++ if (amdgpu_bo_is_referenced_by_cs(cs, bo)) {
++ cs->flush_cs(cs->flush_data, 0, NULL);
++ } else {
++ /* Try to avoid busy-waiting in radeon_bo_wait. */
++ if (p_atomic_read(&bo->num_active_ioctls))
++ amdgpu_cs_sync_flush(rcs);
++ }
++ }
++
++ amdgpu_bo_wait((struct pb_buffer*)bo, RADEON_USAGE_READWRITE);
++ }
++
++ bo->rws->buffer_wait_time += os_time_get_nano() - time;
++ }
++ }
++
++ r = amdgpu_bo_cpu_map(bo->bo, &cpu);
++ return r ? NULL : cpu;
++}
++
++static void amdgpu_bo_unmap(struct radeon_winsys_cs_handle *buf)
++{
++ struct amdgpu_winsys_bo *bo = (struct amdgpu_winsys_bo*)buf;
++
++ amdgpu_bo_cpu_unmap(bo->bo);
++}
++
++static void amdgpu_bo_get_base_buffer(struct pb_buffer *buf,
++ struct pb_buffer **base_buf,
++ unsigned *offset)
++{
++ *base_buf = buf;
++ *offset = 0;
++}
++
++static enum pipe_error amdgpu_bo_validate(struct pb_buffer *_buf,
++ struct pb_validate *vl,
++ unsigned flags)
++{
++ /* Always pinned */
++ return PIPE_OK;
++}
++
++static void amdgpu_bo_fence(struct pb_buffer *buf,
++ struct pipe_fence_handle *fence)
++{
++}
++
++static const struct pb_vtbl amdgpu_winsys_bo_vtbl = {
++ amdgpu_bo_destroy,
++ NULL, /* never called */
++ NULL, /* never called */
++ amdgpu_bo_validate,
++ amdgpu_bo_fence,
++ amdgpu_bo_get_base_buffer,
++};
++
++static struct pb_buffer *amdgpu_bomgr_create_bo(struct pb_manager *_mgr,
++ pb_size size,
++ const struct pb_desc *desc)
++{
++ struct amdgpu_winsys *rws = get_winsys(_mgr);
++ struct amdgpu_bo_desc *rdesc = (struct amdgpu_bo_desc*)desc;
++ struct amdgpu_bo_alloc_request request = {0};
++ struct amdgpu_bo_alloc_result result = {0};
++ struct amdgpu_winsys_bo *bo;
++ int r;
++
++ assert(rdesc->initial_domain & RADEON_DOMAIN_VRAM_GTT);
++
++ request.alloc_size = size;
++ request.phys_alignment = desc->alignment;
++
++ if (rdesc->initial_domain & RADEON_DOMAIN_VRAM) {
++ request.preferred_heap |= AMDGPU_GEM_DOMAIN_VRAM;
++ if (rdesc->flags & RADEON_FLAG_CPU_ACCESS)
++ request.flags |= AMDGPU_GEM_CREATE_CPU_ACCESS_REQUIRED;
++ }
++ if (rdesc->initial_domain & RADEON_DOMAIN_GTT) {
++ request.preferred_heap |= AMDGPU_GEM_DOMAIN_GTT;
++ if (rdesc->flags & RADEON_FLAG_GTT_WC)
++ request.flags |= AMDGPU_GEM_CREATE_CPU_GTT_WC;
++ }
++
++ r = amdgpu_bo_alloc(rws->dev, &request, &result);
++ if (r) {
++ fprintf(stderr, "amdgpu: Failed to allocate a buffer:\n");
++ fprintf(stderr, "amdgpu: size : %d bytes\n", size);
++ fprintf(stderr, "amdgpu: alignment : %d bytes\n", desc->alignment);
++ fprintf(stderr, "amdgpu: domains : %d\n", rdesc->initial_domain);
++ return NULL;
++ }
++
++ bo = CALLOC_STRUCT(amdgpu_winsys_bo);
++ if (!bo)
++ return NULL;
++
++ pipe_reference_init(&bo->base.reference, 1);
++ bo->base.alignment = desc->alignment;
++ bo->base.usage = desc->usage;
++ bo->base.size = size;
++ bo->base.vtbl = &amdgpu_winsys_bo_vtbl;
++ bo->rws = rws;
++ bo->bo = result.buf_handle;
++ bo->va = result.virtual_mc_base_address;
++ bo->initial_domain = rdesc->initial_domain;
++
++ if (amdgpu_bo_export(bo->bo, amdgpu_bo_handle_type_kms, &bo->handle)) {
++ amdgpu_bo_free(bo->bo);
++ FREE(bo);
++ return NULL;
++ }
++
++ if (rdesc->initial_domain & RADEON_DOMAIN_VRAM)
++ rws->allocated_vram += align(size, 4096);
++ else if (rdesc->initial_domain & RADEON_DOMAIN_GTT)
++ rws->allocated_gtt += align(size, 4096);
++
++ return &bo->base;
++}
++
++static void amdgpu_bomgr_flush(struct pb_manager *mgr)
++{
++ /* NOP */
++}
++
++/* This is for the cache bufmgr. */
++static boolean amdgpu_bomgr_is_buffer_busy(struct pb_manager *_mgr,
++ struct pb_buffer *_buf)
++{
++ struct amdgpu_winsys_bo *bo = amdgpu_winsys_bo(_buf);
++
++ if (amdgpu_bo_is_referenced_by_any_cs(bo)) {
++ return TRUE;
++ }
++
++ if (amdgpu_bo_is_busy((struct pb_buffer*)bo, RADEON_USAGE_READWRITE)) {
++ return TRUE;
++ }
++
++ return FALSE;
++}
++
++static void amdgpu_bomgr_destroy(struct pb_manager *mgr)
++{
++ FREE(mgr);
++}
++
++struct pb_manager *amdgpu_bomgr_create(struct amdgpu_winsys *rws)
++{
++ struct amdgpu_bomgr *mgr;
++
++ mgr = CALLOC_STRUCT(amdgpu_bomgr);
++ if (!mgr)
++ return NULL;
++
++ mgr->base.destroy = amdgpu_bomgr_destroy;
++ mgr->base.create_buffer = amdgpu_bomgr_create_bo;
++ mgr->base.flush = amdgpu_bomgr_flush;
++ mgr->base.is_buffer_busy = amdgpu_bomgr_is_buffer_busy;
++
++ mgr->rws = rws;
++ return &mgr->base;
++}
++
++static unsigned eg_tile_split(unsigned tile_split)
++{
++ switch (tile_split) {
++ case 0: tile_split = 64; break;
++ case 1: tile_split = 128; break;
++ case 2: tile_split = 256; break;
++ case 3: tile_split = 512; break;
++ default:
++ case 4: tile_split = 1024; break;
++ case 5: tile_split = 2048; break;
++ case 6: tile_split = 4096; break;
++ }
++ return tile_split;
++}
++
++static unsigned eg_tile_split_rev(unsigned eg_tile_split)
++{
++ switch (eg_tile_split) {
++ case 64: return 0;
++ case 128: return 1;
++ case 256: return 2;
++ case 512: return 3;
++ default:
++ case 1024: return 4;
++ case 2048: return 5;
++ case 4096: return 6;
++ }
++}
++
++static void amdgpu_bo_get_tiling(struct pb_buffer *_buf,
++ enum radeon_bo_layout *microtiled,
++ enum radeon_bo_layout *macrotiled,
++ unsigned *bankw, unsigned *bankh,
++ unsigned *tile_split,
++ unsigned *stencil_tile_split,
++ unsigned *mtilea,
++ bool *scanout)
++{
++ struct amdgpu_winsys_bo *bo = get_amdgpu_winsys_bo(_buf);
++ struct amdgpu_bo_info info = {0};
++ uint32_t tiling_flags;
++ int r;
++
++ r = amdgpu_bo_query_info(bo->bo, &info);
++ if (r)
++ return;
++
++ tiling_flags = info.metadata.tiling_info;
++
++ *microtiled = RADEON_LAYOUT_LINEAR;
++ *macrotiled = RADEON_LAYOUT_LINEAR;
++ if (tiling_flags & AMDGPU_TILING_MICRO)
++ *microtiled = RADEON_LAYOUT_TILED;
++ else if (tiling_flags & AMDGPU_TILING_MICRO_SQUARE)
++ *microtiled = RADEON_LAYOUT_SQUARETILED;
++
++ if (tiling_flags & AMDGPU_TILING_MACRO)
++ *macrotiled = RADEON_LAYOUT_TILED;
++ if (bankw && tile_split && stencil_tile_split && mtilea && tile_split) {
++ *bankw = (tiling_flags >> AMDGPU_TILING_EG_BANKW_SHIFT) & AMDGPU_TILING_EG_BANKW_MASK;
++ *bankh = (tiling_flags >> AMDGPU_TILING_EG_BANKH_SHIFT) & AMDGPU_TILING_EG_BANKH_MASK;
++ *tile_split = (tiling_flags >> AMDGPU_TILING_EG_TILE_SPLIT_SHIFT) & AMDGPU_TILING_EG_TILE_SPLIT_MASK;
++ *stencil_tile_split = (tiling_flags >> AMDGPU_TILING_EG_STENCIL_TILE_SPLIT_SHIFT) & AMDGPU_TILING_EG_STENCIL_TILE_SPLIT_MASK;
++ *mtilea = (tiling_flags >> AMDGPU_TILING_EG_MACRO_TILE_ASPECT_SHIFT) & AMDGPU_TILING_EG_MACRO_TILE_ASPECT_MASK;
++ *tile_split = eg_tile_split(*tile_split);
++ }
++ if (scanout)
++ *scanout = !(tiling_flags & AMDGPU_TILING_R600_NO_SCANOUT);
++}
++
++static void amdgpu_bo_set_tiling(struct pb_buffer *_buf,
++ struct radeon_winsys_cs *rcs,
++ enum radeon_bo_layout microtiled,
++ enum radeon_bo_layout macrotiled,
++ unsigned bankw, unsigned bankh,
++ unsigned tile_split,
++ unsigned stencil_tile_split,
++ unsigned mtilea,
++ uint32_t pitch,
++ bool scanout)
++{
++ struct amdgpu_winsys_bo *bo = get_amdgpu_winsys_bo(_buf);
++ struct amdgpu_cs *cs = amdgpu_cs(rcs);
++ struct amdgpu_bo_metadata metadata = {0};
++ uint32_t tiling_flags = 0;
++
++
++ /* Tiling determines how DRM treats the buffer data.
++ * We must flush CS when changing it if the buffer is referenced. */
++ if (cs && amdgpu_bo_is_referenced_by_cs(cs, bo)) {
++ cs->flush_cs(cs->flush_data, 0, NULL);
++ }
++
++ while (p_atomic_read(&bo->num_active_ioctls)) {
++ sched_yield();
++ }
++
++ if (microtiled == RADEON_LAYOUT_TILED)
++ tiling_flags |= AMDGPU_TILING_MICRO;
++ else if (microtiled == RADEON_LAYOUT_SQUARETILED)
++ tiling_flags |= AMDGPU_TILING_MICRO_SQUARE;
++
++ if (macrotiled == RADEON_LAYOUT_TILED)
++ tiling_flags |= AMDGPU_TILING_MACRO;
++
++ tiling_flags |= (bankw & AMDGPU_TILING_EG_BANKW_MASK) <<
++ AMDGPU_TILING_EG_BANKW_SHIFT;
++ tiling_flags |= (bankh & AMDGPU_TILING_EG_BANKH_MASK) <<
++ AMDGPU_TILING_EG_BANKH_SHIFT;
++ if (tile_split) {
++ tiling_flags |= (eg_tile_split_rev(tile_split) &
++ AMDGPU_TILING_EG_TILE_SPLIT_MASK) <<
++ AMDGPU_TILING_EG_TILE_SPLIT_SHIFT;
++ }
++ tiling_flags |= (stencil_tile_split &
++ AMDGPU_TILING_EG_STENCIL_TILE_SPLIT_MASK) <<
++ AMDGPU_TILING_EG_STENCIL_TILE_SPLIT_SHIFT;
++ tiling_flags |= (mtilea & AMDGPU_TILING_EG_MACRO_TILE_ASPECT_MASK) <<
++ AMDGPU_TILING_EG_MACRO_TILE_ASPECT_SHIFT;
++
++ if (!scanout)
++ tiling_flags |= AMDGPU_TILING_R600_NO_SCANOUT;
++
++ metadata.tiling_info = tiling_flags;
++
++ amdgpu_bo_set_metadata(bo->bo, &metadata);
++}
++
++static struct radeon_winsys_cs_handle *amdgpu_get_cs_handle(struct pb_buffer *_buf)
++{
++ /* return a direct pointer to amdgpu_winsys_bo. */
++ return (struct radeon_winsys_cs_handle*)get_amdgpu_winsys_bo(_buf);
++}
++
++static struct pb_buffer *
++amdgpu_bo_create(struct radeon_winsys *rws,
++ unsigned size,
++ unsigned alignment,
++ boolean use_reusable_pool,
++ enum radeon_bo_domain domain,
++ enum radeon_bo_flag flags)
++{
++ struct amdgpu_winsys *ws = amdgpu_winsys(rws);
++ struct amdgpu_bo_desc desc;
++ struct pb_manager *provider;
++ struct pb_buffer *buffer;
++
++ memset(&desc, 0, sizeof(desc));
++ desc.base.alignment = alignment;
++
++ /* Only set one usage bit each for domains and flags, or the cache manager
++ * might consider different sets of domains / flags compatible
++ */
++ if (domain == RADEON_DOMAIN_VRAM_GTT)
++ desc.base.usage = 1 << 2;
++ else
++ desc.base.usage = domain >> 1;
++ assert(flags < sizeof(desc.base.usage) * 8 - 3);
++ desc.base.usage |= 1 << (flags + 3);
++
++ desc.initial_domain = domain;
++ desc.flags = flags;
++
++ /* Assign a buffer manager. */
++ if (use_reusable_pool)
++ provider = ws->cman;
++ else
++ provider = ws->kman;
++
++ buffer = provider->create_buffer(provider, size, &desc.base);
++ if (!buffer)
++ return NULL;
++
++ return (struct pb_buffer*)buffer;
++}
++
++static struct pb_buffer *amdgpu_bo_from_handle(struct radeon_winsys *rws,
++ struct winsys_handle *whandle,
++ unsigned *stride)
++{
++ struct amdgpu_winsys *ws = amdgpu_winsys(rws);
++ struct amdgpu_winsys_bo *bo;
++ enum amdgpu_bo_handle_type type;
++ struct amdgpu_bo_import_result result = {0};
++ struct amdgpu_bo_info info = {0};
++ enum radeon_bo_domain initial = 0;
++ int r;
++
++ switch (whandle->type) {
++ case DRM_API_HANDLE_TYPE_SHARED:
++ type = amdgpu_bo_handle_type_gem_flink_name;
++ break;
++ case DRM_API_HANDLE_TYPE_FD:
++ type = amdgpu_bo_handle_type_dma_buf_fd;
++ break;
++ default:
++ return NULL;
++ }
++
++ r = amdgpu_bo_import(ws->dev, type, whandle->handle, &result);
++ if (r)
++ return NULL;
++
++ /* Get initial domains. */
++ r = amdgpu_bo_query_info(result.buf_handle, &info);
++ if (r) {
++ amdgpu_bo_free(result.buf_handle);
++ return NULL;
++ }
++
++ if (info.preferred_heap & AMDGPU_GEM_DOMAIN_VRAM)
++ initial |= RADEON_DOMAIN_VRAM;
++ if (info.preferred_heap & AMDGPU_GEM_DOMAIN_GTT)
++ initial |= RADEON_DOMAIN_GTT;
++
++ /* Initialize the structure. */
++ bo = CALLOC_STRUCT(amdgpu_winsys_bo);
++ if (!bo) {
++ amdgpu_bo_free(result.buf_handle);
++ return NULL;
++ }
++
++ pipe_reference_init(&bo->base.reference, 1);
++ bo->base.alignment = info.phys_alignment;
++ bo->base.usage = PB_USAGE_GPU_WRITE | PB_USAGE_GPU_READ;
++ bo->bo = result.buf_handle;
++ bo->base.size = result.alloc_size;
++ bo->base.vtbl = &amdgpu_winsys_bo_vtbl;
++ bo->rws = ws;
++ bo->va = result.virtual_mc_base_address;
++ bo->initial_domain = initial;
++
++ if (amdgpu_bo_export(bo->bo, amdgpu_bo_handle_type_kms, &bo->handle)) {
++ amdgpu_bo_free(bo->bo);
++ FREE(bo);
++ return NULL;
++ }
++
++ if (stride)
++ *stride = whandle->stride;
++
++ if (bo->initial_domain & RADEON_DOMAIN_VRAM)
++ ws->allocated_vram += align(bo->base.size, 4096);
++ else if (bo->initial_domain & RADEON_DOMAIN_GTT)
++ ws->allocated_gtt += align(bo->base.size, 4096);
++
++ return &bo->base;
++}
++
++static boolean amdgpu_bo_get_handle(struct pb_buffer *buffer,
++ unsigned stride,
++ struct winsys_handle *whandle)
++{
++ struct amdgpu_winsys_bo *bo = get_amdgpu_winsys_bo(buffer);
++ enum amdgpu_bo_handle_type type;
++ int r;
++
++ switch (whandle->type) {
++ case DRM_API_HANDLE_TYPE_SHARED:
++ type = amdgpu_bo_handle_type_gem_flink_name;
++ break;
++ case DRM_API_HANDLE_TYPE_FD:
++ type = amdgpu_bo_handle_type_dma_buf_fd;
++ break;
++ case DRM_API_HANDLE_TYPE_KMS:
++ type = amdgpu_bo_handle_type_kms;
++ break;
++ default:
++ return FALSE;
++ }
++
++ r = amdgpu_bo_export(bo->bo, type, &whandle->handle);
++ if (r)
++ return FALSE;
++
++ whandle->stride = stride;
++ return TRUE;
++}
++
++static uint64_t amdgpu_bo_get_va(struct radeon_winsys_cs_handle *buf)
++{
++ return ((struct amdgpu_winsys_bo*)buf)->va;
++}
++
++void amdgpu_bomgr_init_functions(struct amdgpu_winsys *ws)
++{
++ ws->base.buffer_get_cs_handle = amdgpu_get_cs_handle;
++ ws->base.buffer_set_tiling = amdgpu_bo_set_tiling;
++ ws->base.buffer_get_tiling = amdgpu_bo_get_tiling;
++ ws->base.buffer_map = amdgpu_bo_map;
++ ws->base.buffer_unmap = amdgpu_bo_unmap;
++ ws->base.buffer_wait = amdgpu_bo_wait;
++ ws->base.buffer_is_busy = amdgpu_bo_is_busy;
++ ws->base.buffer_create = amdgpu_bo_create;
++ ws->base.buffer_from_handle = amdgpu_bo_from_handle;
++ ws->base.buffer_get_handle = amdgpu_bo_get_handle;
++ ws->base.buffer_get_virtual_address = amdgpu_bo_get_va;
++ ws->base.buffer_get_initial_domain = amdgpu_bo_get_initial_domain;
++}
+diff --git a/src/gallium/winsys/radeon/amdgpu/amdgpu_bo.h b/src/gallium/winsys/radeon/amdgpu/amdgpu_bo.h
+new file mode 100644
+index 0000000..ccf98b5
+--- /dev/null
++++ b/src/gallium/winsys/radeon/amdgpu/amdgpu_bo.h
+@@ -0,0 +1,75 @@
++/*
++ * Copyright © 2008 Jérôme Glisse
++ * Copyright © 2011 Marek Olšák <maraeo@gmail.com>
++ * All Rights Reserved.
++ *
++ * Permission is hereby granted, free of charge, to any person obtaining
++ * a copy of this software and associated documentation files (the
++ * "Software"), to deal in the Software without restriction, including
++ * without limitation the rights to use, copy, modify, merge, publish,
++ * distribute, sub license, and/or sell copies of the Software, and to
++ * permit persons to whom the Software is furnished to do so, subject to
++ * the following conditions:
++ *
++ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND,
++ * EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES
++ * OF MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND
++ * NON-INFRINGEMENT. IN NO EVENT SHALL THE COPYRIGHT HOLDERS, AUTHORS
++ * AND/OR ITS SUPPLIERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
++ * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE,
++ * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE
++ * USE OR OTHER DEALINGS IN THE SOFTWARE.
++ *
++ * The above copyright notice and this permission notice (including the
++ * next paragraph) shall be included in all copies or substantial portions
++ * of the Software.
++ */
++/*
++ * Authors:
++ * Jérôme Glisse <glisse@freedesktop.org>
++ * Marek Olšák <maraeo@gmail.com>
++ */
++#ifndef AMDGPU_DRM_BO_H
++#define AMDGPU_DRM_BO_H
++
++#include "amdgpu_winsys.h"
++#include "pipebuffer/pb_bufmgr.h"
++
++struct amdgpu_bo_desc {
++ struct pb_desc base;
++
++ enum radeon_bo_domain initial_domain;
++ unsigned flags;
++};
++
++struct amdgpu_winsys_bo {
++ struct pb_buffer base;
++
++ struct amdgpu_winsys *rws;
++
++ amdgpu_bo_handle bo;
++ uint32_t handle;
++ uint64_t va;
++ enum radeon_bo_domain initial_domain;
++
++ /* how many command streams is this bo referenced in? */
++ int num_cs_references;
++
++ /* how many command streams, which are being emitted in a separate
++ * thread, is this bo referenced in? */
++ int num_active_ioctls;
++
++ struct pipe_fence_handle *fence; /* for buffer_wait & buffer_is_busy */
++};
++
++struct pb_manager *amdgpu_bomgr_create(struct amdgpu_winsys *rws);
++void amdgpu_bomgr_init_functions(struct amdgpu_winsys *ws);
++
++static INLINE
++void amdgpu_winsys_bo_reference(struct amdgpu_winsys_bo **dst,
++ struct amdgpu_winsys_bo *src)
++{
++ pb_reference((struct pb_buffer**)dst, (struct pb_buffer*)src);
++}
++
++#endif
+diff --git a/src/gallium/winsys/radeon/amdgpu/amdgpu_cs.c b/src/gallium/winsys/radeon/amdgpu/amdgpu_cs.c
+new file mode 100644
+index 0000000..aee7ff3
+--- /dev/null
++++ b/src/gallium/winsys/radeon/amdgpu/amdgpu_cs.c
+@@ -0,0 +1,578 @@
++/*
++ * Copyright © 2008 Jérôme Glisse
++ * Copyright © 2010 Marek Olšák <maraeo@gmail.com>
++ * All Rights Reserved.
++ *
++ * Permission is hereby granted, free of charge, to any person obtaining
++ * a copy of this software and associated documentation files (the
++ * "Software"), to deal in the Software without restriction, including
++ * without limitation the rights to use, copy, modify, merge, publish,
++ * distribute, sub license, and/or sell copies of the Software, and to
++ * permit persons to whom the Software is furnished to do so, subject to
++ * the following conditions:
++ *
++ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND,
++ * EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES
++ * OF MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND
++ * NON-INFRINGEMENT. IN NO EVENT SHALL THE COPYRIGHT HOLDERS, AUTHORS
++ * AND/OR ITS SUPPLIERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
++ * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE,
++ * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE
++ * USE OR OTHER DEALINGS IN THE SOFTWARE.
++ *
++ * The above copyright notice and this permission notice (including the
++ * next paragraph) shall be included in all copies or substantial portions
++ * of the Software.
++ */
++/*
++ * Authors:
++ * Marek Olšák <maraeo@gmail.com>
++ */
++
++#include "amdgpu_cs.h"
++#include "os/os_time.h"
++#include <stdio.h>
++#include <amdgpu_drm.h>
++
++
++/* FENCES */
++
++static struct pipe_fence_handle *
++amdgpu_fence_create(unsigned ip, uint32_t instance)
++{
++ struct amdgpu_fence *fence = CALLOC_STRUCT(amdgpu_fence);
++
++ fence->reference.count = 1;
++ fence->ip_type = ip;
++ fence->ring = instance;
++ fence->submission_in_progress = true;
++ return (struct pipe_fence_handle *)fence;
++}
++
++static void amdgpu_fence_submitted(struct pipe_fence_handle *fence,
++ uint64_t fence_id)
++{
++ struct amdgpu_fence *rfence = (struct amdgpu_fence*)fence;
++
++ rfence->fence = fence_id;
++ rfence->submission_in_progress = false;
++}
++
++static void amdgpu_fence_signalled(struct pipe_fence_handle *fence)
++{
++ struct amdgpu_fence *rfence = (struct amdgpu_fence*)fence;
++
++ rfence->signalled = true;
++}
++
++static bool amdgpu_fence_wait(struct radeon_winsys *rws,
++ struct pipe_fence_handle *fence,
++ uint64_t timeout)
++{
++ struct amdgpu_winsys *ws = (struct amdgpu_winsys*)rws;
++ struct amdgpu_fence *rfence = (struct amdgpu_fence*)fence;
++ struct amdgpu_cs_query_fence query = {0};
++ uint32_t expired;
++ int r;
++
++ /* XXX Access to rfence->signalled is racy here. */
++ if (rfence->signalled)
++ return true;
++
++ /* The fence may not have a number assigned if its IB is being
++ * submitted in the other thread right now. Wait until the submission
++ * is done. */
++ if (rfence->submission_in_progress) {
++ if (!timeout) {
++ return FALSE;
++ } else if (timeout == PIPE_TIMEOUT_INFINITE) {
++ while (rfence->submission_in_progress)
++ sched_yield();
++ } else {
++ int64_t start_time = os_time_get_nano();
++ int64_t elapsed_time = 0;
++
++ while (rfence->submission_in_progress) {
++ elapsed_time = os_time_get_nano() - start_time;
++ if (elapsed_time >= timeout) {
++ return FALSE;
++ }
++ sched_yield();
++ }
++ timeout -= elapsed_time;
++ }
++ }
++
++ /* Now use the libdrm query. */
++ query.timeout_ns = timeout;
++ query.fence = rfence->fence;
++ query.context = ws->ctx;
++ query.ip_type = rfence->ip_type;
++ query.ip_instance = 0;
++ query.ring = rfence->ring;
++
++ r = amdgpu_cs_query_fence_status(ws->dev, &query, &expired);
++ if (r) {
++ fprintf(stderr, "amdgpu: amdgpu_cs_query_fence_status failed.\n");
++ return FALSE;
++ }
++
++ rfence->signalled = expired != 0;
++ return rfence->signalled;
++}
++
++/* COMMAND SUBMISSION */
++
++static bool amdgpu_get_new_ib(struct amdgpu_cs *cs)
++{
++ struct amdgpu_cs_context *cur_cs = cs->csc;
++ struct amdgpu_winsys *ws = cs->ws;
++ struct amdgpu_cs_ib_alloc_result ib;
++ int r;
++
++ r = amdgpu_cs_alloc_ib(ws->dev, ws->ctx, amdgpu_cs_ib_size_64K, &ib);
++ if (r)
++ return false;
++
++ cs->base.buf = ib.cpu;
++ cs->base.cdw = 0;
++
++ cur_cs->ib.ib_handle = ib.handle;
++ return true;
++}
++
++static boolean amdgpu_init_cs_context(struct amdgpu_cs_context *csc)
++{
++ int i;
++
++ csc->request.number_of_ibs = 1;
++ csc->request.ibs = &csc->ib;
++
++ csc->max_num_buffers = 512;
++ csc->buffers = (struct amdgpu_cs_buffer*)
++ CALLOC(1, csc->max_num_buffers * sizeof(struct amdgpu_cs_buffer));
++ if (!csc->buffers) {
++ return FALSE;
++ }
++
++ csc->handles = CALLOC(1, csc->max_num_buffers * sizeof(amdgpu_bo_handle));
++ if (!csc->handles) {
++ FREE(csc->buffers);
++ return FALSE;
++ }
++
++ csc->flags = CALLOC(1, csc->max_num_buffers);
++ if (!csc->flags) {
++ FREE(csc->handles);
++ FREE(csc->buffers);
++ return FALSE;
++ }
++
++ for (i = 0; i < Elements(csc->buffer_indices_hashlist); i++) {
++ csc->buffer_indices_hashlist[i] = -1;
++ }
++ return TRUE;
++}
++
++static void amdgpu_cs_context_cleanup(struct amdgpu_cs_context *csc)
++{
++ unsigned i;
++
++ for (i = 0; i < csc->num_buffers; i++) {
++ p_atomic_dec(&csc->buffers[i].bo->num_cs_references);
++ amdgpu_winsys_bo_reference(&csc->buffers[i].bo, NULL);
++ csc->handles[i] = NULL;
++ csc->flags[i] = 0;
++ }
++
++ csc->num_buffers = 0;
++ csc->used_gart = 0;
++ csc->used_vram = 0;
++ amdgpu_fence_reference(&csc->fence, NULL);
++
++ for (i = 0; i < Elements(csc->buffer_indices_hashlist); i++) {
++ csc->buffer_indices_hashlist[i] = -1;
++ }
++}
++
++static void amdgpu_destroy_cs_context(struct amdgpu_cs_context *csc)
++{
++ amdgpu_cs_context_cleanup(csc);
++ FREE(csc->flags);
++ FREE(csc->buffers);
++ FREE(csc->handles);
++}
++
++
++static struct radeon_winsys_cs *
++amdgpu_cs_create(struct radeon_winsys *rws,
++ enum ring_type ring_type,
++ void (*flush)(void *ctx, unsigned flags,
++ struct pipe_fence_handle **fence),
++ void *flush_ctx,
++ struct radeon_winsys_cs_handle *trace_buf)
++{
++ struct amdgpu_winsys *ws = amdgpu_winsys(rws);
++ struct amdgpu_cs *cs;
++
++ cs = CALLOC_STRUCT(amdgpu_cs);
++ if (!cs) {
++ return NULL;
++ }
++
++ pipe_semaphore_init(&cs->flush_completed, 1);
++
++ cs->ws = ws;
++ cs->flush_cs = flush;
++ cs->flush_data = flush_ctx;
++
++ if (!amdgpu_init_cs_context(&cs->csc1)) {
++ FREE(cs);
++ return NULL;
++ }
++ if (!amdgpu_init_cs_context(&cs->csc2)) {
++ amdgpu_destroy_cs_context(&cs->csc1);
++ FREE(cs);
++ return NULL;
++ }
++
++ /* Set the first command buffer as current. */
++ cs->csc = &cs->csc1;
++ cs->cst = &cs->csc2;
++ cs->base.ring_type = ring_type;
++
++ if (!amdgpu_get_new_ib(cs)) {
++ amdgpu_destroy_cs_context(&cs->csc2);
++ amdgpu_destroy_cs_context(&cs->csc1);
++ FREE(cs);
++ return NULL;
++ }
++
++ p_atomic_inc(&ws->num_cs);
++ return &cs->base;
++}
++
++#define OUT_CS(cs, value) (cs)->buf[(cs)->cdw++] = (value)
++
++int amdgpu_get_reloc(struct amdgpu_cs_context *csc, struct amdgpu_winsys_bo *bo)
++{
++ unsigned hash = bo->handle & (Elements(csc->buffer_indices_hashlist)-1);
++ int i = csc->buffer_indices_hashlist[hash];
++
++ /* not found or found */
++ if (i == -1 || csc->buffers[i].bo == bo)
++ return i;
++
++ /* Hash collision, look for the BO in the list of relocs linearly. */
++ for (i = csc->num_buffers - 1; i >= 0; i--) {
++ if (csc->buffers[i].bo == bo) {
++ /* Put this reloc in the hash list.
++ * This will prevent additional hash collisions if there are
++ * several consecutive get_reloc calls for the same buffer.
++ *
++ * Example: Assuming buffers A,B,C collide in the hash list,
++ * the following sequence of relocs:
++ * AAAAAAAAAAABBBBBBBBBBBBBBCCCCCCCC
++ * will collide here: ^ and here: ^,
++ * meaning that we should get very few collisions in the end. */
++ csc->buffer_indices_hashlist[hash] = i;
++ return i;
++ }
++ }
++ return -1;
++}
++
++static unsigned amdgpu_add_reloc(struct amdgpu_cs *cs,
++ struct amdgpu_winsys_bo *bo,
++ enum radeon_bo_usage usage,
++ enum radeon_bo_domain domains,
++ unsigned priority,
++ enum radeon_bo_domain *added_domains)
++{
++ struct amdgpu_cs_context *csc = cs->csc;
++ struct amdgpu_cs_buffer *reloc;
++ unsigned hash = bo->handle & (Elements(csc->buffer_indices_hashlist)-1);
++ int i = -1;
++
++ priority = MIN2(priority, 15);
++ *added_domains = 0;
++
++ i = amdgpu_get_reloc(csc, bo);
++
++ if (i >= 0) {
++ reloc = &csc->buffers[i];
++ reloc->usage |= usage;
++ *added_domains = domains & ~reloc->domains;
++ reloc->domains |= domains;
++ csc->flags[i] = MAX2(csc->flags[i], priority);
++ return i;
++ }
++
++ /* New relocation, check if the backing array is large enough. */
++ if (csc->num_buffers >= csc->max_num_buffers) {
++ uint32_t size;
++ csc->max_num_buffers += 10;
++
++ size = csc->max_num_buffers * sizeof(struct amdgpu_cs_buffer);
++ csc->buffers = realloc(csc->buffers, size);
++
++ size = csc->max_num_buffers * sizeof(amdgpu_bo_handle);
++ csc->handles = realloc(csc->handles, size);
++
++ csc->flags = realloc(csc->flags, csc->max_num_buffers);
++ }
++
++ /* Initialize the new relocation. */
++ csc->buffers[csc->num_buffers].bo = NULL;
++ amdgpu_winsys_bo_reference(&csc->buffers[csc->num_buffers].bo, bo);
++ csc->handles[csc->num_buffers] = bo->bo;
++ csc->flags[csc->num_buffers] = priority;
++ p_atomic_inc(&bo->num_cs_references);
++ reloc = &csc->buffers[csc->num_buffers];
++ reloc->bo = bo;
++ reloc->usage = usage;
++ reloc->domains = domains;
++
++ csc->buffer_indices_hashlist[hash] = csc->num_buffers;
++
++ *added_domains = domains;
++ return csc->num_buffers++;
++}
++
++static unsigned amdgpu_cs_add_reloc(struct radeon_winsys_cs *rcs,
++ struct radeon_winsys_cs_handle *buf,
++ enum radeon_bo_usage usage,
++ enum radeon_bo_domain domains,
++ enum radeon_bo_priority priority)
++{
++ struct amdgpu_cs *cs = amdgpu_cs(rcs);
++ struct amdgpu_winsys_bo *bo = (struct amdgpu_winsys_bo*)buf;
++ enum radeon_bo_domain added_domains;
++ unsigned index = amdgpu_add_reloc(cs, bo, usage, domains, priority, &added_domains);
++
++ if (added_domains & RADEON_DOMAIN_GTT)
++ cs->csc->used_gart += bo->base.size;
++ if (added_domains & RADEON_DOMAIN_VRAM)
++ cs->csc->used_vram += bo->base.size;
++
++ return index;
++}
++
++static int amdgpu_cs_get_reloc(struct radeon_winsys_cs *rcs,
++ struct radeon_winsys_cs_handle *buf)
++{
++ struct amdgpu_cs *cs = amdgpu_cs(rcs);
++
++ return amdgpu_get_reloc(cs->csc, (struct amdgpu_winsys_bo*)buf);
++}
++
++static boolean amdgpu_cs_validate(struct radeon_winsys_cs *rcs)
++{
++ return TRUE;
++}
++
++static boolean amdgpu_cs_memory_below_limit(struct radeon_winsys_cs *rcs, uint64_t vram, uint64_t gtt)
++{
++ struct amdgpu_cs *cs = amdgpu_cs(rcs);
++ boolean status =
++ (cs->csc->used_gart + gtt) < cs->ws->info.gart_size * 0.7 &&
++ (cs->csc->used_vram + vram) < cs->ws->info.vram_size * 0.7;
++
++ return status;
++}
++
++void amdgpu_cs_emit_ioctl_oneshot(struct amdgpu_cs *cs, struct amdgpu_cs_context *csc)
++{
++ struct amdgpu_winsys *ws = cs->ws;
++ int i, r;
++ uint64_t fence;
++
++ r = amdgpu_cs_submit(ws->dev, ws->ctx, 0, &csc->request, 1, &fence);
++ if (r) {
++ fprintf(stderr, "amdgpu: The CS has been rejected, "
++ "see dmesg for more information.\n");
++
++ amdgpu_fence_signalled(csc->fence);
++ } else {
++ /* Success. */
++ amdgpu_fence_submitted(csc->fence, fence);
++
++ for (i = 0; i < csc->num_buffers; i++) {
++ amdgpu_fence_reference(&csc->buffers[i].bo->fence, csc->fence);
++ }
++ }
++
++ /* Cleanup. */
++ for (i = 0; i < csc->num_buffers; i++) {
++ p_atomic_dec(&csc->buffers[i].bo->num_active_ioctls);
++ }
++ amdgpu_cs_context_cleanup(csc);
++}
++
++/*
++ * Make sure previous submission of this cs are completed
++ */
++void amdgpu_cs_sync_flush(struct radeon_winsys_cs *rcs)
++{
++ struct amdgpu_cs *cs = amdgpu_cs(rcs);
++
++ /* Wait for any pending ioctl to complete. */
++ if (cs->ws->thread) {
++ pipe_semaphore_wait(&cs->flush_completed);
++ pipe_semaphore_signal(&cs->flush_completed);
++ }
++}
++
++DEBUG_GET_ONCE_BOOL_OPTION(noop, "RADEON_NOOP", FALSE)
++
++static void amdgpu_cs_flush(struct radeon_winsys_cs *rcs,
++ unsigned flags,
++ struct pipe_fence_handle **fence,
++ uint32_t cs_trace_id)
++{
++ struct amdgpu_cs *cs = amdgpu_cs(rcs);
++ struct amdgpu_cs_context *tmp;
++
++ switch (cs->base.ring_type) {
++ case RING_DMA:
++ /* pad DMA ring to 8 DWs */
++ if (cs->ws->info.chip_class <= SI) {
++ while (rcs->cdw & 7)
++ OUT_CS(&cs->base, 0xf0000000); /* NOP packet */
++ } else {
++ while (rcs->cdw & 7)
++ OUT_CS(&cs->base, 0x00000000); /* NOP packet */
++ }
++ break;
++ case RING_GFX:
++ /* pad DMA ring to 8 DWs to meet CP fetch alignment requirements
++ * r6xx, requires at least 4 dw alignment to avoid a hw bug.
++ */
++ if (cs->ws->info.chip_class <= SI) {
++ while (rcs->cdw & 7)
++ OUT_CS(&cs->base, 0x80000000); /* type2 nop packet */
++ } else {
++ while (rcs->cdw & 7)
++ OUT_CS(&cs->base, 0xffff1000); /* type3 nop packet */
++ }
++ break;
++ case RING_UVD:
++ while (rcs->cdw & 15)
++ OUT_CS(&cs->base, 0x80000000); /* type2 nop packet */
++ break;
++ default:
++ break;
++ }
++
++ if (rcs->cdw > RADEON_MAX_CMDBUF_DWORDS) {
++ fprintf(stderr, "amdgpu: command stream overflowed\n");
++ }
++
++ amdgpu_cs_sync_flush(rcs);
++
++ /* Swap command streams. */
++ tmp = cs->csc;
++ cs->csc = cs->cst;
++ cs->cst = tmp;
++
++ /* If the CS is not empty or overflowed, emit it in a separate thread. */
++ if (cs->base.cdw && cs->base.cdw <= RADEON_MAX_CMDBUF_DWORDS && !debug_get_option_noop()) {
++ unsigned i, num_buffers = cs->cst->num_buffers;
++
++ cs->cst->ib.size = cs->base.cdw;
++ cs->cst->request.number_of_resources = cs->cst->num_buffers;
++ cs->cst->request.resources = cs->cst->handles;
++ cs->cst->request.resource_flags = cs->cst->flags;
++
++ for (i = 0; i < num_buffers; i++) {
++ /* Update the number of active asynchronous CS ioctls for the buffer. */
++ p_atomic_inc(&cs->cst->buffers[i].bo->num_active_ioctls);
++ }
++
++ switch (cs->base.ring_type) {
++ case RING_DMA:
++ cs->cst->request.ip_type = AMDGPU_HW_IP_DMA;
++ break;
++
++ case RING_UVD:
++ cs->cst->request.ip_type = AMDGPU_HW_IP_UVD;
++ break;
++
++ case RING_VCE:
++ cs->cst->request.ip_type = AMDGPU_HW_IP_VCE;
++ break;
++
++ default:
++ case RING_GFX:
++ if (flags & RADEON_FLUSH_COMPUTE) {
++ cs->cst->request.ip_type = AMDGPU_HW_IP_COMPUTE;
++ } else {
++ cs->cst->request.ip_type = AMDGPU_HW_IP_GFX;
++ }
++ break;
++ }
++
++ amdgpu_fence_reference(&cs->cst->fence, NULL);
++ cs->cst->fence = amdgpu_fence_create(cs->cst->request.ip_type,
++ cs->cst->request.ring);
++
++ if (fence)
++ amdgpu_fence_reference(fence, cs->cst->fence);
++
++ if (cs->ws->thread) {
++ pipe_semaphore_wait(&cs->flush_completed);
++ amdgpu_ws_queue_cs(cs->ws, cs);
++ if (!(flags & RADEON_FLUSH_ASYNC))
++ amdgpu_cs_sync_flush(rcs);
++ } else {
++ amdgpu_cs_emit_ioctl_oneshot(cs, cs->cst);
++ }
++ } else {
++ amdgpu_cs_context_cleanup(cs->cst);
++ }
++
++ amdgpu_get_new_ib(cs);
++
++ cs->ws->num_cs_flushes++;
++}
++
++static void amdgpu_cs_destroy(struct radeon_winsys_cs *rcs)
++{
++ struct amdgpu_cs *cs = amdgpu_cs(rcs);
++
++ amdgpu_cs_sync_flush(rcs);
++ pipe_semaphore_destroy(&cs->flush_completed);
++ amdgpu_cs_context_cleanup(&cs->csc1);
++ amdgpu_cs_context_cleanup(&cs->csc2);
++ p_atomic_dec(&cs->ws->num_cs);
++ amdgpu_cs_free_ib(cs->ws->dev, cs->ws->ctx,
++ cs->csc->ib.ib_handle);
++ amdgpu_destroy_cs_context(&cs->csc1);
++ amdgpu_destroy_cs_context(&cs->csc2);
++ FREE(cs);
++}
++
++static boolean amdgpu_bo_is_referenced(struct radeon_winsys_cs *rcs,
++ struct radeon_winsys_cs_handle *_buf,
++ enum radeon_bo_usage usage)
++{
++ struct amdgpu_cs *cs = amdgpu_cs(rcs);
++ struct amdgpu_winsys_bo *bo = (struct amdgpu_winsys_bo*)_buf;
++
++ return amdgpu_bo_is_referenced_by_cs_with_usage(cs, bo, usage);
++}
++
++void amdgpu_cs_init_functions(struct amdgpu_winsys *ws)
++{
++ ws->base.cs_create = amdgpu_cs_create;
++ ws->base.cs_destroy = amdgpu_cs_destroy;
++ ws->base.cs_add_reloc = amdgpu_cs_add_reloc;
++ ws->base.cs_get_reloc = amdgpu_cs_get_reloc;
++ ws->base.cs_validate = amdgpu_cs_validate;
++ ws->base.cs_memory_below_limit = amdgpu_cs_memory_below_limit;
++ ws->base.cs_flush = amdgpu_cs_flush;
++ ws->base.cs_is_buffer_referenced = amdgpu_bo_is_referenced;
++ ws->base.cs_sync_flush = amdgpu_cs_sync_flush;
++ ws->base.fence_wait = amdgpu_fence_wait;
++ ws->base.fence_reference = amdgpu_fence_reference;
++}
+diff --git a/src/gallium/winsys/radeon/amdgpu/amdgpu_cs.h b/src/gallium/winsys/radeon/amdgpu/amdgpu_cs.h
+new file mode 100644
+index 0000000..36a9aad
+--- /dev/null
++++ b/src/gallium/winsys/radeon/amdgpu/amdgpu_cs.h
+@@ -0,0 +1,149 @@
++/*
++ * Copyright © 2011 Marek Olšák <maraeo@gmail.com>
++ * All Rights Reserved.
++ *
++ * Permission is hereby granted, free of charge, to any person obtaining
++ * a copy of this software and associated documentation files (the
++ * "Software"), to deal in the Software without restriction, including
++ * without limitation the rights to use, copy, modify, merge, publish,
++ * distribute, sub license, and/or sell copies of the Software, and to
++ * permit persons to whom the Software is furnished to do so, subject to
++ * the following conditions:
++ *
++ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND,
++ * EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES
++ * OF MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND
++ * NON-INFRINGEMENT. IN NO EVENT SHALL THE COPYRIGHT HOLDERS, AUTHORS
++ * AND/OR ITS SUPPLIERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
++ * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE,
++ * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE
++ * USE OR OTHER DEALINGS IN THE SOFTWARE.
++ *
++ * The above copyright notice and this permission notice (including the
++ * next paragraph) shall be included in all copies or substantial portions
++ * of the Software.
++ */
++
++#ifndef AMDGPU_DRM_CS_H
++#define AMDGPU_DRM_CS_H
++
++#include "amdgpu_bo.h"
++#include "util/u_memory.h"
++
++struct amdgpu_cs_buffer {
++ struct amdgpu_winsys_bo *bo;
++ enum radeon_bo_usage usage;
++ enum radeon_bo_domain domains;
++};
++
++struct amdgpu_cs_context {
++ struct amdgpu_cs_request request;
++ struct amdgpu_cs_ib_info ib;
++
++ /* Relocs. */
++ unsigned max_num_buffers;
++ unsigned num_buffers;
++ amdgpu_bo_handle *handles;
++ uint8_t *flags;
++ struct amdgpu_cs_buffer *buffers;
++
++ int buffer_indices_hashlist[512];
++
++ unsigned used_vram;
++ unsigned used_gart;
++
++ struct pipe_fence_handle *fence;
++};
++
++struct amdgpu_cs {
++ struct radeon_winsys_cs base;
++
++ /* We flip between these two CS. While one is being consumed
++ * by the kernel in another thread, the other one is being filled
++ * by the pipe driver. */
++ struct amdgpu_cs_context csc1;
++ struct amdgpu_cs_context csc2;
++ /* The currently-used CS. */
++ struct amdgpu_cs_context *csc;
++ /* The CS being currently-owned by the other thread. */
++ struct amdgpu_cs_context *cst;
++
++ /* The winsys. */
++ struct amdgpu_winsys *ws;
++
++ /* Flush CS. */
++ void (*flush_cs)(void *ctx, unsigned flags, struct pipe_fence_handle **fence);
++ void *flush_data;
++
++ pipe_semaphore flush_completed;
++};
++
++struct amdgpu_fence {
++ struct pipe_reference reference;
++
++ uint64_t fence; /* fence ID */
++ unsigned ip_type; /* which hw ip block the fence belongs to */
++ uint32_t ring; /* ring index of the hw ip block */
++
++ /* If the fence is unknown due to an IB still being submitted
++ * in the other thread. */
++ bool submission_in_progress;
++ bool signalled;
++};
++
++static INLINE void amdgpu_fence_reference(struct pipe_fence_handle **dst,
++ struct pipe_fence_handle *src)
++{
++ struct amdgpu_fence **rdst = (struct amdgpu_fence **)dst;
++ struct amdgpu_fence *rsrc = (struct amdgpu_fence *)src;
++
++ if (pipe_reference(&(*rdst)->reference, &rsrc->reference))
++ FREE(*rdst);
++ *rdst = rsrc;
++}
++
++int amdgpu_get_reloc(struct amdgpu_cs_context *csc, struct amdgpu_winsys_bo *bo);
++
++static INLINE struct amdgpu_cs *
++amdgpu_cs(struct radeon_winsys_cs *base)
++{
++ return (struct amdgpu_cs*)base;
++}
++
++static INLINE boolean
++amdgpu_bo_is_referenced_by_cs(struct amdgpu_cs *cs,
++ struct amdgpu_winsys_bo *bo)
++{
++ int num_refs = bo->num_cs_references;
++ return num_refs == bo->rws->num_cs ||
++ (num_refs && amdgpu_get_reloc(cs->csc, bo) != -1);
++}
++
++static INLINE boolean
++amdgpu_bo_is_referenced_by_cs_with_usage(struct amdgpu_cs *cs,
++ struct amdgpu_winsys_bo *bo,
++ enum radeon_bo_usage usage)
++{
++ int index;
++
++ if (!bo->num_cs_references)
++ return FALSE;
++
++ index = amdgpu_get_reloc(cs->csc, bo);
++ if (index == -1)
++ return FALSE;
++
++ return (cs->csc->buffers[index].usage & usage) != 0;
++}
++
++static INLINE boolean
++amdgpu_bo_is_referenced_by_any_cs(struct amdgpu_winsys_bo *bo)
++{
++ return bo->num_cs_references != 0;
++}
++
++void amdgpu_cs_sync_flush(struct radeon_winsys_cs *rcs);
++void amdgpu_cs_init_functions(struct amdgpu_winsys *ws);
++void amdgpu_cs_emit_ioctl_oneshot(struct amdgpu_cs *cs, struct amdgpu_cs_context *csc);
++
++#endif
+diff --git a/src/gallium/winsys/radeon/amdgpu/amdgpu_public.h b/src/gallium/winsys/radeon/amdgpu/amdgpu_public.h
+new file mode 100644
+index 0000000..4a7aa8e
+--- /dev/null
++++ b/src/gallium/winsys/radeon/amdgpu/amdgpu_public.h
+@@ -0,0 +1,14 @@
++#ifndef AMDGPU_DRM_PUBLIC_H
++#define AMDGPU_DRM_PUBLIC_H
++
++#include "pipe/p_defines.h"
++
++struct radeon_winsys;
++struct pipe_screen;
++
++typedef struct pipe_screen *(*radeon_screen_create_t)(struct radeon_winsys *);
++
++struct radeon_winsys *
++amdgpu_winsys_create(int fd, radeon_screen_create_t screen_create);
++
++#endif
+diff --git a/src/gallium/winsys/radeon/amdgpu/amdgpu_winsys.c b/src/gallium/winsys/radeon/amdgpu/amdgpu_winsys.c
+new file mode 100644
+index 0000000..0f3367a
+--- /dev/null
++++ b/src/gallium/winsys/radeon/amdgpu/amdgpu_winsys.c
+@@ -0,0 +1,491 @@
++/*
++ * Copyright © 2009 Corbin Simpson
++ * Copyright © 2011 Marek Olšák <maraeo@gmail.com>
++ * All Rights Reserved.
++ *
++ * Permission is hereby granted, free of charge, to any person obtaining
++ * a copy of this software and associated documentation files (the
++ * "Software"), to deal in the Software without restriction, including
++ * without limitation the rights to use, copy, modify, merge, publish,
++ * distribute, sub license, and/or sell copies of the Software, and to
++ * permit persons to whom the Software is furnished to do so, subject to
++ * the following conditions:
++ *
++ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND,
++ * EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES
++ * OF MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND
++ * NON-INFRINGEMENT. IN NO EVENT SHALL THE COPYRIGHT HOLDERS, AUTHORS
++ * AND/OR ITS SUPPLIERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
++ * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE,
++ * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE
++ * USE OR OTHER DEALINGS IN THE SOFTWARE.
++ *
++ * The above copyright notice and this permission notice (including the
++ * next paragraph) shall be included in all copies or substantial portions
++ * of the Software.
++ */
++/*
++ * Authors:
++ * Corbin Simpson <MostAwesomeDude@gmail.com>
++ * Joakim Sindholt <opensource@zhasha.com>
++ * Marek Olšák <maraeo@gmail.com>
++ */
++
++#include "amdgpu_cs.h"
++#include "amdgpu_public.h"
++
++#include "util/u_hash_table.h"
++#include <amdgpu_drm.h>
++#include <xf86drm.h>
++#include <stdio.h>
++#include <sys/stat.h>
++
++#define CIK_TILE_MODE_COLOR_2D 14
++
++#define CIK__GB_TILE_MODE__PIPE_CONFIG(x) (((x) >> 6) & 0x1f)
++#define CIK__PIPE_CONFIG__ADDR_SURF_P2 0
++#define CIK__PIPE_CONFIG__ADDR_SURF_P4_8x16 4
++#define CIK__PIPE_CONFIG__ADDR_SURF_P4_16x16 5
++#define CIK__PIPE_CONFIG__ADDR_SURF_P4_16x32 6
++#define CIK__PIPE_CONFIG__ADDR_SURF_P4_32x32 7
++#define CIK__PIPE_CONFIG__ADDR_SURF_P8_16x16_8x16 8
++#define CIK__PIPE_CONFIG__ADDR_SURF_P8_16x32_8x16 9
++#define CIK__PIPE_CONFIG__ADDR_SURF_P8_32x32_8x16 10
++#define CIK__PIPE_CONFIG__ADDR_SURF_P8_16x32_16x16 11
++#define CIK__PIPE_CONFIG__ADDR_SURF_P8_32x32_16x16 12
++#define CIK__PIPE_CONFIG__ADDR_SURF_P8_32x32_16x32 13
++#define CIK__PIPE_CONFIG__ADDR_SURF_P8_32x64_32x32 14
++#define CIK__PIPE_CONFIG__ADDR_SURF_P16_32X32_8X16 16
++#define CIK__PIPE_CONFIG__ADDR_SURF_P16_32X32_16X16 17
++
++static struct util_hash_table *fd_tab = NULL;
++pipe_static_mutex(fd_tab_mutex);
++
++static unsigned cik_get_num_tile_pipes(struct amdgpu_gpu_info *info)
++{
++ unsigned mode2d = info->gb_tile_mode[CIK_TILE_MODE_COLOR_2D];
++
++ switch (CIK__GB_TILE_MODE__PIPE_CONFIG(mode2d)) {
++ case CIK__PIPE_CONFIG__ADDR_SURF_P2:
++ default:
++ return 2;
++ case CIK__PIPE_CONFIG__ADDR_SURF_P4_8x16:
++ case CIK__PIPE_CONFIG__ADDR_SURF_P4_16x16:
++ case CIK__PIPE_CONFIG__ADDR_SURF_P4_16x32:
++ case CIK__PIPE_CONFIG__ADDR_SURF_P4_32x32:
++ return 4;
++ case CIK__PIPE_CONFIG__ADDR_SURF_P8_16x16_8x16:
++ case CIK__PIPE_CONFIG__ADDR_SURF_P8_16x32_8x16:
++ case CIK__PIPE_CONFIG__ADDR_SURF_P8_32x32_8x16:
++ case CIK__PIPE_CONFIG__ADDR_SURF_P8_16x32_16x16:
++ case CIK__PIPE_CONFIG__ADDR_SURF_P8_32x32_16x16:
++ case CIK__PIPE_CONFIG__ADDR_SURF_P8_32x32_16x32:
++ case CIK__PIPE_CONFIG__ADDR_SURF_P8_32x64_32x32:
++ return 8;
++ case CIK__PIPE_CONFIG__ADDR_SURF_P16_32X32_8X16:
++ case CIK__PIPE_CONFIG__ADDR_SURF_P16_32X32_16X16:
++ return 16;
++ }
++}
++
++/* Convert Sea Islands register values GB_ADDR_CFG and MC_ADDR_CFG
++ * into GB_TILING_CONFIG register which is only present on R600-R700. */
++static unsigned r600_get_gb_tiling_config(struct amdgpu_gpu_info *info)
++{
++ unsigned num_pipes = info->gb_addr_cfg & 0x7;
++ unsigned num_banks = info->mc_arb_ramcfg & 0x3;
++ unsigned pipe_interleave_bytes = (info->gb_addr_cfg >> 4) & 0x7;
++ unsigned row_size = (info->gb_addr_cfg >> 28) & 0x3;
++
++ return num_pipes | (num_banks << 4) |
++ (pipe_interleave_bytes << 8) |
++ (row_size << 12);
++}
++
++/* Helper function to do the ioctls needed for setup and init. */
++static boolean do_winsys_init(struct amdgpu_winsys *ws)
++{
++ struct amdgpu_heap_info vram, gtt;
++ struct drm_amdgpu_info_hw_ip dma, uvd, vce;
++ uint32_t vce_version, vce_feature;
++ int r;
++
++ ws->num_cpus = sysconf(_SC_NPROCESSORS_ONLN);
++
++ r = amdgpu_device_initialize(ws->fd, &ws->info.drm_major,
++ &ws->info.drm_minor, &ws->dev);
++ if (r) {
++ fprintf(stderr, "amdgpu: amdgpu_device_initialize failed.\n");
++ return FALSE;
++ }
++
++ /* Query hardware and driver information. */
++ r = amdgpu_query_gpu_info(ws->dev, &ws->amdinfo);
++ if (r) {
++ fprintf(stderr, "amdgpu: amdgpu_query_gpu_info failed.\n");
++ goto fail;
++ }
++
++ r = amdgpu_query_heap_info(ws->dev, AMDGPU_GEM_DOMAIN_VRAM, 0, &vram);
++ if (r) {
++ fprintf(stderr, "amdgpu: amdgpu_query_heap_info(vram) failed.\n");
++ goto fail;
++ }
++
++ r = amdgpu_query_heap_info(ws->dev, AMDGPU_GEM_DOMAIN_GTT, 0, &gtt);
++ if (r) {
++ fprintf(stderr, "amdgpu: amdgpu_query_heap_info(gtt) failed.\n");
++ goto fail;
++ }
++
++ r = amdgpu_query_hw_ip_info(ws->dev, AMDGPU_HW_IP_DMA, 0, &dma);
++ if (r) {
++ fprintf(stderr, "amdgpu: amdgpu_query_hw_ip_info(dma) failed.\n");
++ goto fail;
++ }
++
++ r = amdgpu_query_hw_ip_info(ws->dev, AMDGPU_HW_IP_UVD, 0, &uvd);
++ if (r) {
++ fprintf(stderr, "amdgpu: amdgpu_query_hw_ip_info(uvd) failed.\n");
++ goto fail;
++ }
++
++ r = amdgpu_query_hw_ip_info(ws->dev, AMDGPU_HW_IP_VCE, 0, &vce);
++ if (r) {
++ fprintf(stderr, "amdgpu: amdgpu_query_hw_ip_info(vce) failed.\n");
++ goto fail;
++ }
++
++ r = amdgpu_query_firmware_version(ws->dev, AMDGPU_INFO_FW_VCE, 0, 0,
++ &vce_version, &vce_feature);
++ if (r) {
++ fprintf(stderr, "amdgpu: amdgpu_query_firmware_version(vce) failed.\n");
++ goto fail;
++ }
++
++ r = amdgpu_cs_ctx_create(ws->dev, &ws->ctx);
++ if (r) {
++ fprintf(stderr, "amdgpu: amdgpu_cs_ctx_create failed.\n");
++ goto fail;
++ }
++
++ /* Set chip identification. */
++ ws->info.pci_id = ws->amdinfo.asic_id; /* TODO: is this correct? */
++
++ switch (ws->info.pci_id) {
++#define CHIPSET(pci_id, name, cfamily) case pci_id: ws->info.family = CHIP_##cfamily; break;
++#include "pci_ids/radeonsi_pci_ids.h"
++#undef CHIPSET
++
++ default:
++ fprintf(stderr, "amdgpu: Invalid PCI ID.\n");
++ goto fail;
++ }
++
++ if (ws->info.family >= CHIP_TONGA)
++ ws->info.chip_class = VI;
++ else if (ws->info.family >= CHIP_BONAIRE)
++ ws->info.chip_class = CIK;
++ else {
++ fprintf(stderr, "amdgpu: Unknown family.\n");
++ goto fail;
++ }
++
++ /* LLVM 3.6 is required for VI. */
++ if (ws->info.chip_class >= VI && HAVE_LLVM < 0x0306) {
++ fprintf(stderr, "amdgpu: LLVM 3.6 is required, got LLVM %i.%i.\n",
++ HAVE_LLVM >> 8, HAVE_LLVM & 255);
++ goto fail;
++ }
++
++ /* Set hardware information. */
++ ws->info.gart_size = gtt.heap_size;
++ ws->info.vram_size = vram.heap_size;
++ /* convert the shader clock from KHz to MHz */
++ ws->info.max_sclk = ws->amdinfo.max_engine_clk / 1000;
++ ws->info.max_compute_units = 1; /* TODO */
++ ws->info.max_se = ws->amdinfo.num_shader_engines;
++ ws->info.max_sh_per_se = ws->amdinfo.num_shader_arrays_per_engine;
++ ws->info.has_uvd = uvd.available_rings != 0;
++ ws->info.vce_fw_version =
++ vce.available_rings ? vce_version : 0;
++ ws->info.r600_num_backends = ws->amdinfo.rb_pipes;
++ ws->info.r600_clock_crystal_freq = ws->amdinfo.gpu_counter_freq;
++ ws->info.r600_tiling_config = r600_get_gb_tiling_config(&ws->amdinfo);
++ ws->info.r600_num_tile_pipes = cik_get_num_tile_pipes(&ws->amdinfo);
++ ws->info.r600_max_pipes = ws->amdinfo.max_quad_shader_pipes; /* TODO: is this correct? */
++ ws->info.r600_virtual_address = TRUE;
++ ws->info.r600_has_dma = dma.available_rings != 0;
++
++ memcpy(ws->info.si_tile_mode_array, ws->amdinfo.gb_tile_mode,
++ sizeof(ws->amdinfo.gb_tile_mode));
++ ws->info.si_tile_mode_array_valid = TRUE;
++ ws->info.si_backend_enabled_mask = ws->amdinfo.enabled_rb_pipes_mask;
++
++ memcpy(ws->info.cik_macrotile_mode_array, ws->amdinfo.gb_macro_tile_mode,
++ sizeof(ws->amdinfo.gb_macro_tile_mode));
++ ws->info.cik_macrotile_mode_array_valid = TRUE;
++
++ return TRUE;
++
++fail:
++ if (ws->ctx) {
++ amdgpu_cs_ctx_free(ws->dev, ws->ctx);
++ }
++ amdgpu_device_deinitialize(ws->dev);
++ ws->dev = NULL;
++ return FALSE;
++}
++
++static void amdgpu_winsys_destroy(struct radeon_winsys *rws)
++{
++ struct amdgpu_winsys *ws = (struct amdgpu_winsys*)rws;
++
++ if (ws->thread) {
++ ws->kill_thread = 1;
++ pipe_semaphore_signal(&ws->cs_queued);
++ pipe_thread_wait(ws->thread);
++ }
++ pipe_semaphore_destroy(&ws->cs_queued);
++ pipe_mutex_destroy(ws->cs_stack_lock);
++
++ ws->cman->destroy(ws->cman);
++ ws->kman->destroy(ws->kman);
++
++ amdgpu_cs_ctx_free(ws->dev, ws->ctx);
++ amdgpu_device_deinitialize(ws->dev);
++ FREE(rws);
++}
++
++static void amdgpu_winsys_query_info(struct radeon_winsys *rws,
++ struct radeon_info *info)
++{
++ *info = ((struct amdgpu_winsys *)rws)->info;
++}
++
++static boolean amdgpu_cs_request_feature(struct radeon_winsys_cs *rcs,
++ enum radeon_feature_id fid,
++ boolean enable)
++{
++ return FALSE;
++}
++
++static uint64_t amdgpu_query_value(struct radeon_winsys *rws,
++ enum radeon_value_id value)
++{
++ struct amdgpu_winsys *ws = (struct amdgpu_winsys*)rws;
++ struct amdgpu_heap_info heap;
++ uint64_t retval = 0;
++
++ switch (value) {
++ case RADEON_REQUESTED_VRAM_MEMORY:
++ return ws->allocated_vram;
++ case RADEON_REQUESTED_GTT_MEMORY:
++ return ws->allocated_gtt;
++ case RADEON_BUFFER_WAIT_TIME_NS:
++ return ws->buffer_wait_time;
++ case RADEON_TIMESTAMP:
++ amdgpu_query_info(ws->dev, AMDGPU_INFO_TIMESTAMP, 8, &retval);
++ return retval;
++ case RADEON_NUM_CS_FLUSHES:
++ return ws->num_cs_flushes;
++ case RADEON_NUM_BYTES_MOVED:
++ amdgpu_query_info(ws->dev, AMDGPU_INFO_NUM_BYTES_MOVED, 8, &retval);
++ return retval;
++ case RADEON_VRAM_USAGE:
++ amdgpu_query_heap_info(ws->dev, AMDGPU_GEM_DOMAIN_VRAM, 0, &heap);
++ return heap.heap_usage;
++ case RADEON_GTT_USAGE:
++ amdgpu_query_heap_info(ws->dev, AMDGPU_GEM_DOMAIN_GTT, 0, &heap);
++ return heap.heap_usage;
++ }
++ return 0;
++}
++
++static unsigned hash_fd(void *key)
++{
++ int fd = pointer_to_intptr(key);
++ struct stat stat;
++ fstat(fd, &stat);
++
++ return stat.st_dev ^ stat.st_ino ^ stat.st_rdev;
++}
++
++static int compare_fd(void *key1, void *key2)
++{
++ int fd1 = pointer_to_intptr(key1);
++ int fd2 = pointer_to_intptr(key2);
++ struct stat stat1, stat2;
++ fstat(fd1, &stat1);
++ fstat(fd2, &stat2);
++
++ return stat1.st_dev != stat2.st_dev ||
++ stat1.st_ino != stat2.st_ino ||
++ stat1.st_rdev != stat2.st_rdev;
++}
++
++void amdgpu_ws_queue_cs(struct amdgpu_winsys *ws, struct amdgpu_cs *cs)
++{
++retry:
++ pipe_mutex_lock(ws->cs_stack_lock);
++ if (ws->num_enqueued_cs >= RING_LAST) {
++ /* no room left for a flush */
++ pipe_mutex_unlock(ws->cs_stack_lock);
++ goto retry;
++ }
++ ws->cs_stack[ws->num_enqueued_cs++] = cs;
++ pipe_mutex_unlock(ws->cs_stack_lock);
++ pipe_semaphore_signal(&ws->cs_queued);
++}
++
++static PIPE_THREAD_ROUTINE(amdgpu_cs_emit_ioctl, param)
++{
++ struct amdgpu_winsys *ws = (struct amdgpu_winsys *)param;
++ struct amdgpu_cs *cs;
++ unsigned i;
++
++ while (1) {
++ pipe_semaphore_wait(&ws->cs_queued);
++ if (ws->kill_thread)
++ break;
++
++ pipe_mutex_lock(ws->cs_stack_lock);
++ cs = ws->cs_stack[0];
++ for (i = 1; i < ws->num_enqueued_cs; i++)
++ ws->cs_stack[i - 1] = ws->cs_stack[i];
++ ws->cs_stack[--ws->num_enqueued_cs] = NULL;
++ pipe_mutex_unlock(ws->cs_stack_lock);
++
++ if (cs) {
++ amdgpu_cs_emit_ioctl_oneshot(cs, cs->cst);
++ pipe_semaphore_signal(&cs->flush_completed);
++ }
++ }
++ pipe_mutex_lock(ws->cs_stack_lock);
++ for (i = 0; i < ws->num_enqueued_cs; i++) {
++ pipe_semaphore_signal(&ws->cs_stack[i]->flush_completed);
++ ws->cs_stack[i] = NULL;
++ }
++ ws->num_enqueued_cs = 0;
++ pipe_mutex_unlock(ws->cs_stack_lock);
++ return 0;
++}
++
++DEBUG_GET_ONCE_BOOL_OPTION(thread, "RADEON_THREAD", TRUE)
++static PIPE_THREAD_ROUTINE(amdgpu_cs_emit_ioctl, param);
++
++static bool amdgpu_winsys_unref(struct radeon_winsys *ws)
++{
++ struct amdgpu_winsys *rws = (struct amdgpu_winsys*)ws;
++ bool destroy;
++
++ /* When the reference counter drops to zero, remove the fd from the table.
++ * This must happen while the mutex is locked, so that
++ * amdgpu_winsys_create in another thread doesn't get the winsys
++ * from the table when the counter drops to 0. */
++ pipe_mutex_lock(fd_tab_mutex);
++
++ destroy = pipe_reference(&rws->reference, NULL);
++ if (destroy && fd_tab)
++ util_hash_table_remove(fd_tab, intptr_to_pointer(rws->fd));
++
++ pipe_mutex_unlock(fd_tab_mutex);
++ return destroy;
++}
++
++struct radeon_winsys *
++ amdgpu_winsys_create(int fd, radeon_screen_create_t screen_create)
++{
++ struct amdgpu_winsys *ws;
++ drmVersionPtr version = drmGetVersion(fd);
++
++ /* The DRM driver version of amdgpu is 3.x.x. */
++ if (version->version_major != 3) {
++ drmFreeVersion(version);
++ return NULL;
++ }
++ drmFreeVersion(version);
++
++ /* Look up the winsys from the fd table. */
++ pipe_mutex_lock(fd_tab_mutex);
++ if (!fd_tab) {
++ fd_tab = util_hash_table_create(hash_fd, compare_fd);
++ }
++
++ ws = util_hash_table_get(fd_tab, intptr_to_pointer(fd));
++ if (ws) {
++ pipe_reference(NULL, &ws->reference);
++ pipe_mutex_unlock(fd_tab_mutex);
++ return &ws->base;
++ }
++
++ ws = CALLOC_STRUCT(amdgpu_winsys);
++ if (!ws) {
++ pipe_mutex_unlock(fd_tab_mutex);
++ return NULL;
++ }
++
++ ws->fd = fd;
++
++ if (!do_winsys_init(ws))
++ goto fail;
++
++ /* Create managers. */
++ ws->kman = amdgpu_bomgr_create(ws);
++ if (!ws->kman)
++ goto fail;
++ ws->cman = pb_cache_manager_create(ws->kman, 1000000, 2.0f, 0,
++ (ws->info.vram_size + ws->info.gart_size) / 8);
++ if (!ws->cman)
++ goto fail;
++
++ /* init reference */
++ pipe_reference_init(&ws->reference, 1);
++
++ /* Set functions. */
++ ws->base.unref = amdgpu_winsys_unref;
++ ws->base.destroy = amdgpu_winsys_destroy;
++ ws->base.query_info = amdgpu_winsys_query_info;
++ ws->base.cs_request_feature = amdgpu_cs_request_feature;
++ ws->base.query_value = amdgpu_query_value;
++
++ amdgpu_bomgr_init_functions(ws);
++ amdgpu_cs_init_functions(ws);
++
++ pipe_mutex_init(ws->cs_stack_lock);
++
++ ws->num_enqueued_cs = 0;
++ pipe_semaphore_init(&ws->cs_queued, 0);
++ if (ws->num_cpus > 1 && debug_get_option_thread())
++ ws->thread = pipe_thread_create(amdgpu_cs_emit_ioctl, ws);
++
++ /* Create the screen at the end. The winsys must be initialized
++ * completely.
++ *
++ * Alternatively, we could create the screen based on "ws->gen"
++ * and link all drivers into one binary blob. */
++ ws->base.screen = screen_create(&ws->base);
++ if (!ws->base.screen) {
++ amdgpu_winsys_destroy(&ws->base);
++ pipe_mutex_unlock(fd_tab_mutex);
++ return NULL;
++ }
++
++ util_hash_table_set(fd_tab, intptr_to_pointer(fd), ws);
++
++ /* We must unlock the mutex once the winsys is fully initialized, so that
++ * other threads attempting to create the winsys from the same fd will
++ * get a fully initialized winsys and not just half-way initialized. */
++ pipe_mutex_unlock(fd_tab_mutex);
++
++ return &ws->base;
++
++fail:
++ pipe_mutex_unlock(fd_tab_mutex);
++ if (ws->cman)
++ ws->cman->destroy(ws->cman);
++ if (ws->kman)
++ ws->kman->destroy(ws->kman);
++ FREE(ws);
++ return NULL;
++}
+diff --git a/src/gallium/winsys/radeon/amdgpu/amdgpu_winsys.h b/src/gallium/winsys/radeon/amdgpu/amdgpu_winsys.h
+new file mode 100644
+index 0000000..fc27f1c
+--- /dev/null
++++ b/src/gallium/winsys/radeon/amdgpu/amdgpu_winsys.h
+@@ -0,0 +1,80 @@
++/*
++ * Copyright © 2009 Corbin Simpson
++ * All Rights Reserved.
++ *
++ * Permission is hereby granted, free of charge, to any person obtaining
++ * a copy of this software and associated documentation files (the
++ * "Software"), to deal in the Software without restriction, including
++ * without limitation the rights to use, copy, modify, merge, publish,
++ * distribute, sub license, and/or sell copies of the Software, and to
++ * permit persons to whom the Software is furnished to do so, subject to
++ * the following conditions:
++ *
++ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND,
++ * EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES
++ * OF MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND
++ * NON-INFRINGEMENT. IN NO EVENT SHALL THE COPYRIGHT HOLDERS, AUTHORS
++ * AND/OR ITS SUPPLIERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
++ * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE,
++ * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE
++ * USE OR OTHER DEALINGS IN THE SOFTWARE.
++ *
++ * The above copyright notice and this permission notice (including the
++ * next paragraph) shall be included in all copies or substantial portions
++ * of the Software.
++ */
++/*
++ * Authors:
++ * Corbin Simpson <MostAwesomeDude@gmail.com>
++ */
++#ifndef AMDGPU_DRM_WINSYS_H
++#define AMDGPU_DRM_WINSYS_H
++
++#include "../radeon_winsys.h"
++#include "os/os_thread.h"
++#include <amdgpu.h>
++
++struct amdgpu_cs;
++
++struct amdgpu_winsys {
++ struct radeon_winsys base;
++ struct pipe_reference reference;
++
++ int fd; /* DRM file descriptor */
++ amdgpu_device_handle dev;
++ /* This only affects the order in which IBs are executed. */
++ amdgpu_context_handle ctx;
++
++ int num_cs; /* The number of command streams created. */
++ uint64_t allocated_vram;
++ uint64_t allocated_gtt;
++ uint64_t buffer_wait_time; /* time spent in buffer_wait in ns */
++ uint64_t num_cs_flushes;
++
++ struct radeon_info info;
++
++ struct pb_manager *kman;
++ struct pb_manager *cman;
++
++ uint32_t num_cpus; /* Number of CPUs. */
++
++ /* rings submission thread */
++ pipe_mutex cs_stack_lock;
++ pipe_semaphore cs_queued;
++ pipe_thread thread;
++ int kill_thread;
++ int num_enqueued_cs;
++ struct amdgpu_cs *cs_stack[RING_LAST];
++
++ struct amdgpu_gpu_info amdinfo;
++};
++
++static INLINE struct amdgpu_winsys *
++amdgpu_winsys(struct radeon_winsys *base)
++{
++ return (struct amdgpu_winsys*)base;
++}
++
++void amdgpu_ws_queue_cs(struct amdgpu_winsys *ws, struct amdgpu_cs *cs);
++
++#endif
+diff --git a/src/gallium/winsys/radeon/drm/radeon_drm_winsys.c b/src/gallium/winsys/radeon/drm/radeon_drm_winsys.c
+index 12767bf..a312f03 100644
+--- a/src/gallium/winsys/radeon/drm/radeon_drm_winsys.c
++++ b/src/gallium/winsys/radeon/drm/radeon_drm_winsys.c
+@@ -34,6 +34,7 @@
+ #include "radeon_drm_bo.h"
+ #include "radeon_drm_cs.h"
+ #include "radeon_drm_public.h"
++#include "../amdgpu/amdgpu_public.h"
+
+ #include "pipebuffer/pb_bufmgr.h"
+ #include "util/u_memory.h"
+@@ -643,6 +644,13 @@ PUBLIC struct radeon_winsys *
+ radeon_drm_winsys_create(int fd, radeon_screen_create_t screen_create)
+ {
+ struct radeon_drm_winsys *ws;
++ struct radeon_winsys *amdgpu;
++
++ /* First, try amdgpu. */
++ amdgpu = amdgpu_winsys_create(fd, screen_create);
++ if (amdgpu) {
++ return amdgpu;
++ }
+
+ pipe_mutex_lock(fd_tab_mutex);
+ if (!fd_tab) {
+diff --git a/src/gallium/winsys/radeon/radeon_winsys.h b/src/gallium/winsys/radeon/radeon_winsys.h
+index 7fb7ac9..a3cb273 100644
+--- a/src/gallium/winsys/radeon/radeon_winsys.h
++++ b/src/gallium/winsys/radeon/radeon_winsys.h
+@@ -136,6 +136,9 @@ enum radeon_family {
+ CHIP_KABINI,
+ CHIP_HAWAII,
+ CHIP_MULLINS,
++ CHIP_TONGA,
++ CHIP_ICELAND,
++ CHIP_CARRIZO,
+ CHIP_LAST,
+ };
+
+@@ -150,6 +153,7 @@ enum chip_class {
+ CAYMAN,
+ SI,
+ CIK,
++ VI,
+ };
+
+ enum ring_type {
+--
+1.9.1
+
diff --git a/meta-amdfalconx86/recipes-graphics/mesa/mesa/0009-winsys-amdgpu-add-addrlib-texture-addressing-and-ali.patch b/meta-amdfalconx86/recipes-graphics/mesa/mesa/0009-winsys-amdgpu-add-addrlib-texture-addressing-and-ali.patch
new file mode 100644
index 00000000..0ca8a819
--- /dev/null
+++ b/meta-amdfalconx86/recipes-graphics/mesa/mesa/0009-winsys-amdgpu-add-addrlib-texture-addressing-and-ali.patch
@@ -0,0 +1,22649 @@
+From dbeaed6cf049a0be97631ab74afa1f4ab9a800bf Mon Sep 17 00:00:00 2001
+From: =?UTF-8?q?Marek=20Ol=C5=A1=C3=A1k?= <marek.olsak@amd.com>
+Date: Thu, 16 Apr 2015 19:41:33 +0200
+Subject: [PATCH 09/29] winsys/amdgpu: add addrlib - texture addressing and
+ alignment calculator
+
+This is an internal project that Catalyst uses and now open source will do
+too.
+
+Signed-off-by: Arindam Nath <arindam.nath@amd.com>
+---
+ src/gallium/winsys/radeon/amdgpu/Makefile.am | 7 +-
+ src/gallium/winsys/radeon/amdgpu/Makefile.sources | 23 +
+ .../winsys/radeon/amdgpu/addrlib/addrinterface.cpp | 1008 +++++
+ .../winsys/radeon/amdgpu/addrlib/addrinterface.h | 2166 +++++++++
+ .../winsys/radeon/amdgpu/addrlib/addrtypes.h | 590 +++
+ .../winsys/radeon/amdgpu/addrlib/core/addrcommon.h | 558 +++
+ .../radeon/amdgpu/addrlib/core/addrelemlib.cpp | 1678 +++++++
+ .../radeon/amdgpu/addrlib/core/addrelemlib.h | 270 ++
+ .../winsys/radeon/amdgpu/addrlib/core/addrlib.cpp | 4028 +++++++++++++++++
+ .../winsys/radeon/amdgpu/addrlib/core/addrlib.h | 695 +++
+ .../radeon/amdgpu/addrlib/core/addrobject.cpp | 246 ++
+ .../winsys/radeon/amdgpu/addrlib/core/addrobject.h | 89 +
+ .../amdgpu/addrlib/inc/chip/r800/si_gb_reg.h | 155 +
+ .../radeon/amdgpu/addrlib/inc/lnx_common_defs.h | 129 +
+ .../addrlib/r800/chip/si_ci_vi_merged_enum.h | 40 +
+ .../radeon/amdgpu/addrlib/r800/ciaddrlib.cpp | 1777 ++++++++
+ .../winsys/radeon/amdgpu/addrlib/r800/ciaddrlib.h | 197 +
+ .../radeon/amdgpu/addrlib/r800/egbaddrlib.cpp | 4578 ++++++++++++++++++++
+ .../winsys/radeon/amdgpu/addrlib/r800/egbaddrlib.h | 411 ++
+ .../radeon/amdgpu/addrlib/r800/siaddrlib.cpp | 2818 ++++++++++++
+ .../winsys/radeon/amdgpu/addrlib/r800/siaddrlib.h | 262 ++
+ src/gallium/winsys/radeon/amdgpu/amdgpu_id.h | 157 +
+ src/gallium/winsys/radeon/amdgpu/amdgpu_surface.c | 436 ++
+ src/gallium/winsys/radeon/amdgpu/amdgpu_winsys.c | 50 +
+ src/gallium/winsys/radeon/amdgpu/amdgpu_winsys.h | 6 +
+ 25 files changed, 22373 insertions(+), 1 deletion(-)
+ create mode 100644 src/gallium/winsys/radeon/amdgpu/addrlib/addrinterface.cpp
+ create mode 100644 src/gallium/winsys/radeon/amdgpu/addrlib/addrinterface.h
+ create mode 100644 src/gallium/winsys/radeon/amdgpu/addrlib/addrtypes.h
+ create mode 100644 src/gallium/winsys/radeon/amdgpu/addrlib/core/addrcommon.h
+ create mode 100644 src/gallium/winsys/radeon/amdgpu/addrlib/core/addrelemlib.cpp
+ create mode 100644 src/gallium/winsys/radeon/amdgpu/addrlib/core/addrelemlib.h
+ create mode 100644 src/gallium/winsys/radeon/amdgpu/addrlib/core/addrlib.cpp
+ create mode 100644 src/gallium/winsys/radeon/amdgpu/addrlib/core/addrlib.h
+ create mode 100644 src/gallium/winsys/radeon/amdgpu/addrlib/core/addrobject.cpp
+ create mode 100644 src/gallium/winsys/radeon/amdgpu/addrlib/core/addrobject.h
+ create mode 100644 src/gallium/winsys/radeon/amdgpu/addrlib/inc/chip/r800/si_gb_reg.h
+ create mode 100644 src/gallium/winsys/radeon/amdgpu/addrlib/inc/lnx_common_defs.h
+ create mode 100644 src/gallium/winsys/radeon/amdgpu/addrlib/r800/chip/si_ci_vi_merged_enum.h
+ create mode 100644 src/gallium/winsys/radeon/amdgpu/addrlib/r800/ciaddrlib.cpp
+ create mode 100644 src/gallium/winsys/radeon/amdgpu/addrlib/r800/ciaddrlib.h
+ create mode 100644 src/gallium/winsys/radeon/amdgpu/addrlib/r800/egbaddrlib.cpp
+ create mode 100644 src/gallium/winsys/radeon/amdgpu/addrlib/r800/egbaddrlib.h
+ create mode 100644 src/gallium/winsys/radeon/amdgpu/addrlib/r800/siaddrlib.cpp
+ create mode 100644 src/gallium/winsys/radeon/amdgpu/addrlib/r800/siaddrlib.h
+ create mode 100644 src/gallium/winsys/radeon/amdgpu/amdgpu_id.h
+ create mode 100644 src/gallium/winsys/radeon/amdgpu/amdgpu_surface.c
+
+diff --git a/src/gallium/winsys/radeon/amdgpu/Makefile.am b/src/gallium/winsys/radeon/amdgpu/Makefile.am
+index 80ecb75..a719913 100644
+--- a/src/gallium/winsys/radeon/amdgpu/Makefile.am
++++ b/src/gallium/winsys/radeon/amdgpu/Makefile.am
+@@ -3,7 +3,12 @@ include $(top_srcdir)/src/gallium/Automake.inc
+
+ AM_CFLAGS = \
+ $(GALLIUM_WINSYS_CFLAGS) \
+- $(AMDGPU_CFLAGS)
++ $(AMDGPU_CFLAGS) \
++ -I$(srcdir)/addrlib \
++ -I$(srcdir)/addrlib/core \
++ -I$(srcdir)/addrlib/inc/chip/r800 \
++ -I$(srcdir)/addrlib/r800/chip \
++ -DBRAHMA_BUILD=1
+
+ AM_CXXFLAGS = $(AM_CFLAGS)
+
+diff --git a/src/gallium/winsys/radeon/amdgpu/Makefile.sources b/src/gallium/winsys/radeon/amdgpu/Makefile.sources
+index 0f55010..6b33841 100644
+--- a/src/gallium/winsys/radeon/amdgpu/Makefile.sources
++++ b/src/gallium/winsys/radeon/amdgpu/Makefile.sources
+@@ -1,8 +1,31 @@
+ C_SOURCES := \
++ addrlib/addrinterface.cpp \
++ addrlib/addrinterface.h \
++ addrlib/addrtypes.h \
++ addrlib/core/addrcommon.h \
++ addrlib/core/addrelemlib.cpp \
++ addrlib/core/addrelemlib.h \
++ addrlib/core/addrlib.cpp \
++ addrlib/core/addrlib.h \
++ addrlib/core/addrobject.cpp \
++ addrlib/core/addrobject.h \
++ addrlib/inc/chip/r800/si_gb_reg.h \
++ addrlib/inc/lnx_common_defs.h \
++ addrlib/r800/chip/si_ci_merged_enum.h \
++ addrlib/r800/chip/si_ci_vi_merged_enum.h \
++ addrlib/r800/chip/si_enum.h \
++ addrlib/r800/ciaddrlib.cpp \
++ addrlib/r800/ciaddrlib.h \
++ addrlib/r800/egbaddrlib.cpp \
++ addrlib/r800/egbaddrlib.h \
++ addrlib/r800/siaddrlib.cpp \
++ addrlib/r800/siaddrlib.h \
+ amdgpu_bo.c \
+ amdgpu_bo.h \
+ amdgpu_cs.c \
+ amdgpu_cs.h \
++ amdgpu_id.h \
+ amdgpu_public.h \
++ amdgpu_surface.c \
+ amdgpu_winsys.c \
+ amdgpu_winsys.h
+diff --git a/src/gallium/winsys/radeon/amdgpu/addrlib/addrinterface.cpp b/src/gallium/winsys/radeon/amdgpu/addrlib/addrinterface.cpp
+new file mode 100644
+index 0000000..6556927
+--- /dev/null
++++ b/src/gallium/winsys/radeon/amdgpu/addrlib/addrinterface.cpp
+@@ -0,0 +1,1008 @@
++/*
++ * Copyright © 2014 Advanced Micro Devices, Inc.
++ * All Rights Reserved.
++ *
++ * Permission is hereby granted, free of charge, to any person obtaining
++ * a copy of this software and associated documentation files (the
++ * "Software"), to deal in the Software without restriction, including
++ * without limitation the rights to use, copy, modify, merge, publish,
++ * distribute, sub license, and/or sell copies of the Software, and to
++ * permit persons to whom the Software is furnished to do so, subject to
++ * the following conditions:
++ *
++ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND,
++ * EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES
++ * OF MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND
++ * NON-INFRINGEMENT. IN NO EVENT SHALL THE COPYRIGHT HOLDERS, AUTHORS
++ * AND/OR ITS SUPPLIERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
++ * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE,
++ * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE
++ * USE OR OTHER DEALINGS IN THE SOFTWARE.
++ *
++ * The above copyright notice and this permission notice (including the
++ * next paragraph) shall be included in all copies or substantial portions
++ * of the Software.
++ */
++
++/**
++***************************************************************************************************
++* @file addrinterface.cpp
++* @brief Contains the addrlib interface functions
++***************************************************************************************************
++*/
++#include "addrinterface.h"
++#include "addrlib.h"
++
++#include "addrcommon.h"
++
++///////////////////////////////////////////////////////////////////////////////////////////////////
++// Create/Destroy/Config functions
++///////////////////////////////////////////////////////////////////////////////////////////////////
++
++/**
++***************************************************************************************************
++* AddrCreate
++*
++* @brief
++* Create address lib object
++*
++* @return
++* ADDR_OK if successful, otherwise an error code of ADDR_E_RETURNCODE
++***************************************************************************************************
++*/
++ADDR_E_RETURNCODE ADDR_API AddrCreate(
++ const ADDR_CREATE_INPUT* pAddrCreateIn, ///< [in] infomation for creating address lib object
++ ADDR_CREATE_OUTPUT* pAddrCreateOut) ///< [out] address lib handle
++{
++ ADDR_E_RETURNCODE returnCode = ADDR_OK;
++
++ returnCode = AddrLib::Create(pAddrCreateIn, pAddrCreateOut);
++
++ return returnCode;
++}
++
++
++
++/**
++***************************************************************************************************
++* AddrDestroy
++*
++* @brief
++* Destroy address lib object
++*
++* @return
++* ADDR_OK if successful, otherwise an error code of ADDR_E_RETURNCODE
++***************************************************************************************************
++*/
++ADDR_E_RETURNCODE ADDR_API AddrDestroy(
++ ADDR_HANDLE hLib) ///< [in] address lib handle
++{
++ ADDR_E_RETURNCODE returnCode = ADDR_OK;
++
++ if (hLib)
++ {
++ AddrLib* pLib = AddrLib::GetAddrLib(hLib);
++ pLib->Destroy();
++ }
++ else
++ {
++ returnCode = ADDR_ERROR;
++ }
++
++ return returnCode;
++}
++
++
++
++///////////////////////////////////////////////////////////////////////////////////////////////////
++// Surface functions
++///////////////////////////////////////////////////////////////////////////////////////////////////
++
++/**
++***************************************************************************************************
++* AddrComputeSurfaceInfo
++*
++* @brief
++* Calculate surface width/height/depth/alignments and suitable tiling mode
++*
++* @return
++* ADDR_OK if successful, otherwise an error code of ADDR_E_RETURNCODE
++***************************************************************************************************
++*/
++ADDR_E_RETURNCODE ADDR_API AddrComputeSurfaceInfo(
++ ADDR_HANDLE hLib, ///< [in] address lib handle
++ const ADDR_COMPUTE_SURFACE_INFO_INPUT* pIn, ///< [in] surface information
++ ADDR_COMPUTE_SURFACE_INFO_OUTPUT* pOut) ///< [out] surface parameters and alignments
++{
++ AddrLib* pLib = AddrLib::GetAddrLib(hLib);
++
++ ADDR_E_RETURNCODE returnCode = ADDR_OK;
++
++ if (pLib != NULL)
++ {
++ returnCode = pLib->ComputeSurfaceInfo(pIn, pOut);
++ }
++ else
++ {
++ returnCode = ADDR_ERROR;
++ }
++
++ return returnCode;
++}
++
++
++
++/**
++***************************************************************************************************
++* AddrComputeSurfaceAddrFromCoord
++*
++* @brief
++* Compute surface address according to coordinates
++*
++* @return
++* ADDR_OK if successful, otherwise an error code of ADDR_E_RETURNCODE
++***************************************************************************************************
++*/
++ADDR_E_RETURNCODE ADDR_API AddrComputeSurfaceAddrFromCoord(
++ ADDR_HANDLE hLib, ///< [in] address lib handle
++ const ADDR_COMPUTE_SURFACE_ADDRFROMCOORD_INPUT* pIn, ///< [in] surface info and coordinates
++ ADDR_COMPUTE_SURFACE_ADDRFROMCOORD_OUTPUT* pOut) ///< [out] surface address
++{
++ AddrLib* pLib = AddrLib::GetAddrLib(hLib);
++
++ ADDR_E_RETURNCODE returnCode = ADDR_OK;
++
++ if (pLib != NULL)
++ {
++ returnCode = pLib->ComputeSurfaceAddrFromCoord(pIn, pOut);
++ }
++ else
++ {
++ returnCode = ADDR_ERROR;
++ }
++
++ return returnCode;
++}
++
++/**
++***************************************************************************************************
++* AddrComputeSurfaceCoordFromAddr
++*
++* @brief
++* Compute coordinates according to surface address
++*
++* @return
++* ADDR_OK if successful, otherwise an error code of ADDR_E_RETURNCODE
++***************************************************************************************************
++*/
++ADDR_E_RETURNCODE ADDR_API AddrComputeSurfaceCoordFromAddr(
++ ADDR_HANDLE hLib, ///< [in] address lib handle
++ const ADDR_COMPUTE_SURFACE_COORDFROMADDR_INPUT* pIn, ///< [in] surface info and address
++ ADDR_COMPUTE_SURFACE_COORDFROMADDR_OUTPUT* pOut) ///< [out] coordinates
++{
++ AddrLib* pLib = AddrLib::GetAddrLib(hLib);
++
++ ADDR_E_RETURNCODE returnCode = ADDR_OK;
++
++ if (pLib != NULL)
++ {
++ returnCode = pLib->ComputeSurfaceCoordFromAddr(pIn, pOut);
++ }
++ else
++ {
++ returnCode = ADDR_ERROR;
++ }
++
++ return returnCode;
++}
++
++
++
++///////////////////////////////////////////////////////////////////////////////////////////////////
++// HTile functions
++///////////////////////////////////////////////////////////////////////////////////////////////////
++
++/**
++***************************************************************************************************
++* AddrComputeHtileInfo
++*
++* @brief
++* Compute Htile pitch, height, base alignment and size in bytes
++*
++* @return
++* ADDR_OK if successful, otherwise an error code of ADDR_E_RETURNCODE
++***************************************************************************************************
++*/
++ADDR_E_RETURNCODE ADDR_API AddrComputeHtileInfo(
++ ADDR_HANDLE hLib, ///< [in] address lib handle
++ const ADDR_COMPUTE_HTILE_INFO_INPUT* pIn, ///< [in] Htile information
++ ADDR_COMPUTE_HTILE_INFO_OUTPUT* pOut) ///< [out] Htile pitch, height and size in bytes
++{
++ AddrLib* pLib = AddrLib::GetAddrLib(hLib);
++
++ ADDR_E_RETURNCODE returnCode = ADDR_OK;
++
++ if (pLib != NULL)
++ {
++ returnCode = pLib->ComputeHtileInfo(pIn, pOut);
++ }
++ else
++ {
++ returnCode = ADDR_ERROR;
++ }
++
++ return returnCode;
++}
++
++/**
++***************************************************************************************************
++* AddrComputeHtileAddrFromCoord
++*
++* @brief
++* Compute Htile address according to coordinates (of depth buffer)
++*
++* @return
++* ADDR_OK if successful, otherwise an error code of ADDR_E_RETURNCODE
++***************************************************************************************************
++*/
++ADDR_E_RETURNCODE ADDR_API AddrComputeHtileAddrFromCoord(
++ ADDR_HANDLE hLib, ///< [in] address lib handle
++ const ADDR_COMPUTE_HTILE_ADDRFROMCOORD_INPUT* pIn, ///< [in] Htile info and coordinates
++ ADDR_COMPUTE_HTILE_ADDRFROMCOORD_OUTPUT* pOut) ///< [out] Htile address
++{
++ AddrLib* pLib = AddrLib::GetAddrLib(hLib);
++
++ ADDR_E_RETURNCODE returnCode = ADDR_OK;
++
++ if (pLib != NULL)
++ {
++ returnCode = pLib->ComputeHtileAddrFromCoord(pIn, pOut);
++ }
++ else
++ {
++ returnCode = ADDR_ERROR;
++ }
++
++ return returnCode;
++}
++
++/**
++***************************************************************************************************
++* AddrComputeHtileCoordFromAddr
++*
++* @brief
++* Compute coordinates within depth buffer (1st pixel of a micro tile) according to
++* Htile address
++*
++* @return
++* ADDR_OK if successful, otherwise an error code of ADDR_E_RETURNCODE
++***************************************************************************************************
++*/
++ADDR_E_RETURNCODE ADDR_API AddrComputeHtileCoordFromAddr(
++ ADDR_HANDLE hLib, ///< [in] address lib handle
++ const ADDR_COMPUTE_HTILE_COORDFROMADDR_INPUT* pIn, ///< [in] Htile info and address
++ ADDR_COMPUTE_HTILE_COORDFROMADDR_OUTPUT* pOut) ///< [out] Htile coordinates
++{
++ AddrLib* pLib = AddrLib::GetAddrLib(hLib);
++
++ ADDR_E_RETURNCODE returnCode = ADDR_OK;
++
++ if (pLib != NULL)
++ {
++ returnCode = pLib->ComputeHtileCoordFromAddr(pIn, pOut);
++ }
++ else
++ {
++ returnCode = ADDR_ERROR;
++ }
++
++ return returnCode;
++}
++
++
++
++///////////////////////////////////////////////////////////////////////////////////////////////////
++// C-mask functions
++///////////////////////////////////////////////////////////////////////////////////////////////////
++
++/**
++***************************************************************************************************
++* AddrComputeCmaskInfo
++*
++* @brief
++* Compute Cmask pitch, height, base alignment and size in bytes from color buffer
++* info
++*
++* @return
++* ADDR_OK if successful, otherwise an error code of ADDR_E_RETURNCODE
++***************************************************************************************************
++*/
++ADDR_E_RETURNCODE ADDR_API AddrComputeCmaskInfo(
++ ADDR_HANDLE hLib, ///< [in] address lib handle
++ const ADDR_COMPUTE_CMASK_INFO_INPUT* pIn, ///< [in] Cmask pitch and height
++ ADDR_COMPUTE_CMASK_INFO_OUTPUT* pOut) ///< [out] Cmask pitch, height and size in bytes
++{
++ AddrLib* pLib = AddrLib::GetAddrLib(hLib);
++
++ ADDR_E_RETURNCODE returnCode = ADDR_OK;
++
++ if (pLib != NULL)
++ {
++ returnCode = pLib->ComputeCmaskInfo(pIn, pOut);
++ }
++ else
++ {
++ returnCode = ADDR_ERROR;
++ }
++
++ return returnCode;
++}
++
++/**
++***************************************************************************************************
++* AddrComputeCmaskAddrFromCoord
++*
++* @brief
++* Compute Cmask address according to coordinates (of MSAA color buffer)
++*
++* @return
++* ADDR_OK if successful, otherwise an error code of ADDR_E_RETURNCODE
++***************************************************************************************************
++*/
++ADDR_E_RETURNCODE ADDR_API AddrComputeCmaskAddrFromCoord(
++ ADDR_HANDLE hLib, ///< [in] address lib handle
++ const ADDR_COMPUTE_CMASK_ADDRFROMCOORD_INPUT* pIn, ///< [in] Cmask info and coordinates
++ ADDR_COMPUTE_CMASK_ADDRFROMCOORD_OUTPUT* pOut) ///< [out] Cmask address
++{
++ AddrLib* pLib = AddrLib::GetAddrLib(hLib);
++
++ ADDR_E_RETURNCODE returnCode = ADDR_OK;
++
++ if (pLib != NULL)
++ {
++ returnCode = pLib->ComputeCmaskAddrFromCoord(pIn, pOut);
++ }
++ else
++ {
++ returnCode = ADDR_ERROR;
++ }
++
++ return returnCode;
++}
++
++/**
++***************************************************************************************************
++* AddrComputeCmaskCoordFromAddr
++*
++* @brief
++* Compute coordinates within color buffer (1st pixel of a micro tile) according to
++* Cmask address
++*
++* @return
++* ADDR_OK if successful, otherwise an error code of ADDR_E_RETURNCODE
++***************************************************************************************************
++*/
++ADDR_E_RETURNCODE ADDR_API AddrComputeCmaskCoordFromAddr(
++ ADDR_HANDLE hLib, ///< [in] address lib handle
++ const ADDR_COMPUTE_CMASK_COORDFROMADDR_INPUT* pIn, ///< [in] Cmask info and address
++ ADDR_COMPUTE_CMASK_COORDFROMADDR_OUTPUT* pOut) ///< [out] Cmask coordinates
++{
++ AddrLib* pLib = AddrLib::GetAddrLib(hLib);
++
++ ADDR_E_RETURNCODE returnCode = ADDR_OK;
++
++ if (pLib != NULL)
++ {
++ returnCode = pLib->ComputeCmaskCoordFromAddr(pIn, pOut);
++ }
++ else
++ {
++ returnCode = ADDR_ERROR;
++ }
++
++ return returnCode;
++}
++
++
++
++///////////////////////////////////////////////////////////////////////////////////////////////////
++// F-mask functions
++///////////////////////////////////////////////////////////////////////////////////////////////////
++
++/**
++***************************************************************************************************
++* AddrComputeFmaskInfo
++*
++* @brief
++* Compute Fmask pitch/height/depth/alignments and size in bytes
++*
++* @return
++* ADDR_OK if successful, otherwise an error code of ADDR_E_RETURNCODE
++***************************************************************************************************
++*/
++ADDR_E_RETURNCODE ADDR_API AddrComputeFmaskInfo(
++ ADDR_HANDLE hLib, ///< [in] address lib handle
++ const ADDR_COMPUTE_FMASK_INFO_INPUT* pIn, ///< [in] Fmask information
++ ADDR_COMPUTE_FMASK_INFO_OUTPUT* pOut) ///< [out] Fmask pitch and height
++{
++ AddrLib* pLib = AddrLib::GetAddrLib(hLib);
++
++ ADDR_E_RETURNCODE returnCode = ADDR_OK;
++
++ if (pLib != NULL)
++ {
++ returnCode = pLib->ComputeFmaskInfo(pIn, pOut);
++ }
++ else
++ {
++ returnCode = ADDR_ERROR;
++ }
++
++ return returnCode;
++}
++
++/**
++***************************************************************************************************
++* AddrComputeFmaskAddrFromCoord
++*
++* @brief
++* Compute Fmask address according to coordinates (x,y,slice,sample,plane)
++*
++* @return
++* ADDR_OK if successful, otherwise an error code of ADDR_E_RETURNCODE
++***************************************************************************************************
++*/
++ADDR_E_RETURNCODE ADDR_API AddrComputeFmaskAddrFromCoord(
++ ADDR_HANDLE hLib, ///< [in] address lib handle
++ const ADDR_COMPUTE_FMASK_ADDRFROMCOORD_INPUT* pIn, ///< [in] Fmask info and coordinates
++ ADDR_COMPUTE_FMASK_ADDRFROMCOORD_OUTPUT* pOut) ///< [out] Fmask address
++{
++ AddrLib* pLib = AddrLib::GetAddrLib(hLib);
++
++ ADDR_E_RETURNCODE returnCode = ADDR_OK;
++
++ if (pLib != NULL)
++ {
++ returnCode = pLib->ComputeFmaskAddrFromCoord(pIn, pOut);
++ }
++ else
++ {
++ returnCode = ADDR_ERROR;
++ }
++
++ return returnCode;
++}
++
++/**
++***************************************************************************************************
++* AddrComputeFmaskCoordFromAddr
++*
++* @brief
++* Compute coordinates (x,y,slice,sample,plane) according to Fmask address
++*
++* @return
++* ADDR_OK if successful, otherwise an error code of ADDR_E_RETURNCODE
++***************************************************************************************************
++*/
++ADDR_E_RETURNCODE ADDR_API AddrComputeFmaskCoordFromAddr(
++ ADDR_HANDLE hLib, ///< [in] address lib handle
++ const ADDR_COMPUTE_FMASK_COORDFROMADDR_INPUT* pIn, ///< [in] Fmask info and address
++ ADDR_COMPUTE_FMASK_COORDFROMADDR_OUTPUT* pOut) ///< [out] Fmask coordinates
++{
++ AddrLib* pLib = AddrLib::GetAddrLib(hLib);
++
++ ADDR_E_RETURNCODE returnCode = ADDR_OK;
++
++ if (pLib != NULL)
++ {
++ returnCode = pLib->ComputeFmaskCoordFromAddr(pIn, pOut);
++ }
++ else
++ {
++ returnCode = ADDR_ERROR;
++ }
++
++ return returnCode;
++}
++
++
++
++///////////////////////////////////////////////////////////////////////////////////////////////////
++// DCC key functions
++///////////////////////////////////////////////////////////////////////////////////////////////////
++
++/**
++***************************************************************************************************
++* AddrComputeDccInfo
++*
++* @brief
++* Compute DCC key size, base alignment based on color surface size, tile info or tile index
++*
++***************************************************************************************************
++*/
++ADDR_E_RETURNCODE ADDR_API AddrComputeDccInfo(
++ ADDR_HANDLE hLib, ///< [in] handle of addrlib
++ const ADDR_COMPUTE_DCCINFO_INPUT* pIn, ///< [in] input
++ ADDR_COMPUTE_DCCINFO_OUTPUT* pOut) ///< [out] output
++{
++ ADDR_E_RETURNCODE returnCode;
++
++ AddrLib* pLib = AddrLib::GetAddrLib(hLib);
++
++ if (pLib != NULL)
++ {
++ returnCode = pLib->ComputeDccInfo(pIn, pOut);
++ }
++ else
++ {
++ returnCode = ADDR_ERROR;
++ }
++
++ return returnCode;
++}
++
++
++
++///////////////////////////////////////////////////////////////////////////////
++// Below functions are element related or helper functions
++///////////////////////////////////////////////////////////////////////////////
++
++/**
++***************************************************************************************************
++* AddrGetVersion
++*
++* @brief
++* Get AddrLib version number. Client may check this return value against ADDRLIB_VERSION
++* defined in addrinterface.h to see if there is a mismatch.
++***************************************************************************************************
++*/
++UINT_32 ADDR_API AddrGetVersion(ADDR_HANDLE hLib)
++{
++ UINT_32 version = 0;
++
++ AddrLib* pLib = AddrLib::GetAddrLib(hLib);
++
++ ADDR_ASSERT(pLib != NULL);
++
++ if (pLib)
++ {
++ version = pLib->GetVersion();
++ }
++
++ return version;
++}
++
++/**
++***************************************************************************************************
++* AddrUseTileIndex
++*
++* @brief
++* Return TRUE if tileIndex is enabled in this address library
++***************************************************************************************************
++*/
++BOOL_32 ADDR_API AddrUseTileIndex(ADDR_HANDLE hLib)
++{
++ BOOL_32 useTileIndex = FALSE;
++
++ AddrLib* pLib = AddrLib::GetAddrLib(hLib);
++
++ ADDR_ASSERT(pLib != NULL);
++
++ if (pLib)
++ {
++ useTileIndex = pLib->UseTileIndex(0);
++ }
++
++ return useTileIndex;
++}
++
++/**
++***************************************************************************************************
++* AddrUseCombinedSwizzle
++*
++* @brief
++* Return TRUE if combined swizzle is enabled in this address library
++***************************************************************************************************
++*/
++BOOL_32 ADDR_API AddrUseCombinedSwizzle(ADDR_HANDLE hLib)
++{
++ BOOL_32 useCombinedSwizzle = FALSE;
++
++ AddrLib* pLib = AddrLib::GetAddrLib(hLib);
++
++ ADDR_ASSERT(pLib != NULL);
++
++ if (pLib)
++ {
++ useCombinedSwizzle = pLib->UseCombinedSwizzle();
++ }
++
++ return useCombinedSwizzle;
++}
++
++/**
++***************************************************************************************************
++* AddrExtractBankPipeSwizzle
++*
++* @brief
++* Extract Bank and Pipe swizzle from base256b
++* @return
++* ADDR_OK if successful, otherwise an error code of ADDR_E_RETURNCODE
++***************************************************************************************************
++*/
++ADDR_E_RETURNCODE ADDR_API AddrExtractBankPipeSwizzle(
++ ADDR_HANDLE hLib, ///< [in] addrlib handle
++ const ADDR_EXTRACT_BANKPIPE_SWIZZLE_INPUT* pIn, ///< [in] input structure
++ ADDR_EXTRACT_BANKPIPE_SWIZZLE_OUTPUT* pOut) ///< [out] output structure
++{
++ ADDR_E_RETURNCODE returnCode = ADDR_OK;
++
++ AddrLib* pLib = AddrLib::GetAddrLib(hLib);
++
++ if (pLib != NULL)
++ {
++ returnCode = pLib->ExtractBankPipeSwizzle(pIn, pOut);
++ }
++ else
++ {
++ returnCode = ADDR_ERROR;
++ }
++
++ return returnCode;
++}
++
++/**
++***************************************************************************************************
++* AddrCombineBankPipeSwizzle
++*
++* @brief
++* Combine Bank and Pipe swizzle
++* @return
++* ADDR_E_RETURNCODE
++***************************************************************************************************
++*/
++ADDR_E_RETURNCODE ADDR_API AddrCombineBankPipeSwizzle(
++ ADDR_HANDLE hLib,
++ const ADDR_COMBINE_BANKPIPE_SWIZZLE_INPUT* pIn,
++ ADDR_COMBINE_BANKPIPE_SWIZZLE_OUTPUT* pOut)
++{
++ ADDR_E_RETURNCODE returnCode = ADDR_OK;
++
++ AddrLib* pLib = AddrLib::GetAddrLib(hLib);
++
++ if (pLib != NULL)
++ {
++ returnCode = pLib->CombineBankPipeSwizzle(pIn, pOut);
++ }
++ else
++ {
++ returnCode = ADDR_ERROR;
++ }
++
++ return returnCode;
++}
++
++/**
++***************************************************************************************************
++* AddrComputeSliceSwizzle
++*
++* @brief
++* Compute a swizzle for slice from a base swizzle
++* @return
++* ADDR_OK if no error
++***************************************************************************************************
++*/
++ADDR_E_RETURNCODE ADDR_API AddrComputeSliceSwizzle(
++ ADDR_HANDLE hLib,
++ const ADDR_COMPUTE_SLICESWIZZLE_INPUT* pIn,
++ ADDR_COMPUTE_SLICESWIZZLE_OUTPUT* pOut)
++{
++ ADDR_E_RETURNCODE returnCode = ADDR_OK;
++
++ AddrLib* pLib = AddrLib::GetAddrLib(hLib);
++
++ if (pLib != NULL)
++ {
++ returnCode = pLib->ComputeSliceTileSwizzle(pIn, pOut);
++ }
++ else
++ {
++ returnCode = ADDR_ERROR;
++ }
++
++ return returnCode;
++}
++
++/**
++***************************************************************************************************
++* AddrComputeBaseSwizzle
++*
++* @brief
++* Return a Combined Bank and Pipe swizzle base on surface based on surface type/index
++* @return
++* ADDR_OK if no error
++***************************************************************************************************
++*/
++ADDR_E_RETURNCODE ADDR_API AddrComputeBaseSwizzle(
++ ADDR_HANDLE hLib,
++ const ADDR_COMPUTE_BASE_SWIZZLE_INPUT* pIn,
++ ADDR_COMPUTE_BASE_SWIZZLE_OUTPUT* pOut)
++{
++ ADDR_E_RETURNCODE returnCode = ADDR_OK;
++
++ AddrLib* pLib = AddrLib::GetAddrLib(hLib);
++
++ if (pLib != NULL)
++ {
++ returnCode = pLib->ComputeBaseSwizzle(pIn, pOut);
++ }
++ else
++ {
++ returnCode = ADDR_ERROR;
++ }
++
++ return returnCode;
++}
++
++/**
++***************************************************************************************************
++* ElemFlt32ToDepthPixel
++*
++* @brief
++* Convert a FLT_32 value to a depth/stencil pixel value
++*
++* @return
++* ADDR_OK if successful, otherwise an error code of ADDR_E_RETURNCODE
++*
++***************************************************************************************************
++*/
++ADDR_E_RETURNCODE ADDR_API ElemFlt32ToDepthPixel(
++ ADDR_HANDLE hLib, ///< [in] addrlib handle
++ const ELEM_FLT32TODEPTHPIXEL_INPUT* pIn, ///< [in] per-component value
++ ELEM_FLT32TODEPTHPIXEL_OUTPUT* pOut) ///< [out] final pixel value
++{
++ ADDR_E_RETURNCODE returnCode = ADDR_OK;
++
++ AddrLib* pLib = AddrLib::GetAddrLib(hLib);
++
++ if (pLib != NULL)
++ {
++ pLib->Flt32ToDepthPixel(pIn, pOut);
++ }
++ else
++ {
++ returnCode = ADDR_ERROR;
++ }
++
++ return returnCode;
++}
++
++/**
++***************************************************************************************************
++* ElemFlt32ToColorPixel
++*
++* @brief
++* Convert a FLT_32 value to a red/green/blue/alpha pixel value
++*
++* @return
++* ADDR_OK if successful, otherwise an error code of ADDR_E_RETURNCODE
++*
++***************************************************************************************************
++*/
++ADDR_E_RETURNCODE ADDR_API ElemFlt32ToColorPixel(
++ ADDR_HANDLE hLib, ///< [in] addrlib handle
++ const ELEM_FLT32TOCOLORPIXEL_INPUT* pIn, ///< [in] format, surface number and swap value
++ ELEM_FLT32TOCOLORPIXEL_OUTPUT* pOut) ///< [out] final pixel value
++{
++ ADDR_E_RETURNCODE returnCode = ADDR_OK;
++
++ AddrLib* pLib = AddrLib::GetAddrLib(hLib);
++
++ if (pLib != NULL)
++ {
++ pLib->Flt32ToColorPixel(pIn, pOut);
++ }
++ else
++ {
++ returnCode = ADDR_ERROR;
++ }
++
++ return returnCode;
++}
++
++/**
++***************************************************************************************************
++* ElemGetExportNorm
++*
++* @brief
++* Helper function to check one format can be EXPORT_NUM,
++* which is a register CB_COLOR_INFO.SURFACE_FORMAT.
++* FP16 can be reported as EXPORT_NORM for rv770 in r600
++* family
++*
++***************************************************************************************************
++*/
++BOOL_32 ADDR_API ElemGetExportNorm(
++ ADDR_HANDLE hLib, ///< [in] addrlib handle
++ const ELEM_GETEXPORTNORM_INPUT* pIn) ///< [in] input structure
++{
++ AddrLib* pLib = AddrLib::GetAddrLib(hLib);
++ BOOL_32 enabled = FALSE;
++
++ ADDR_E_RETURNCODE returnCode = ADDR_OK;
++
++ if (pLib != NULL)
++ {
++ enabled = pLib->GetExportNorm(pIn);
++ }
++ else
++ {
++ returnCode = ADDR_ERROR;
++ }
++
++ ADDR_ASSERT(returnCode == ADDR_OK);
++
++ return enabled;
++}
++
++/**
++***************************************************************************************************
++* AddrConvertTileInfoToHW
++*
++* @brief
++* Convert tile info from real value to hardware register value
++*
++* @return
++* ADDR_OK if successful, otherwise an error code of ADDR_E_RETURNCODE
++***************************************************************************************************
++*/
++ADDR_E_RETURNCODE ADDR_API AddrConvertTileInfoToHW(
++ ADDR_HANDLE hLib, ///< [in] address lib handle
++ const ADDR_CONVERT_TILEINFOTOHW_INPUT* pIn, ///< [in] tile info with real value
++ ADDR_CONVERT_TILEINFOTOHW_OUTPUT* pOut) ///< [out] tile info with HW register value
++{
++ AddrLib* pLib = AddrLib::GetAddrLib(hLib);
++
++ ADDR_E_RETURNCODE returnCode = ADDR_OK;
++
++ if (pLib != NULL)
++ {
++ returnCode = pLib->ConvertTileInfoToHW(pIn, pOut);
++ }
++ else
++ {
++ returnCode = ADDR_ERROR;
++ }
++
++ return returnCode;
++}
++
++/**
++***************************************************************************************************
++* AddrConvertTileIndex
++*
++* @brief
++* Convert tile index to tile mode/type/info
++*
++* @return
++* ADDR_OK if successful, otherwise an error code of ADDR_E_RETURNCODE
++***************************************************************************************************
++*/
++ADDR_E_RETURNCODE ADDR_API AddrConvertTileIndex(
++ ADDR_HANDLE hLib, ///< [in] address lib handle
++ const ADDR_CONVERT_TILEINDEX_INPUT* pIn, ///< [in] input - tile index
++ ADDR_CONVERT_TILEINDEX_OUTPUT* pOut) ///< [out] tile mode/type/info
++{
++ AddrLib* pLib = AddrLib::GetAddrLib(hLib);
++
++ ADDR_E_RETURNCODE returnCode = ADDR_OK;
++
++ if (pLib != NULL)
++ {
++ returnCode = pLib->ConvertTileIndex(pIn, pOut);
++ }
++ else
++ {
++ returnCode = ADDR_ERROR;
++ }
++
++ return returnCode;
++}
++
++/**
++***************************************************************************************************
++* AddrConvertTileIndex1
++*
++* @brief
++* Convert tile index to tile mode/type/info
++*
++* @return
++* ADDR_OK if successful, otherwise an error code of ADDR_E_RETURNCODE
++***************************************************************************************************
++*/
++ADDR_E_RETURNCODE ADDR_API AddrConvertTileIndex1(
++ ADDR_HANDLE hLib, ///< [in] address lib handle
++ const ADDR_CONVERT_TILEINDEX1_INPUT* pIn, ///< [in] input - tile index
++ ADDR_CONVERT_TILEINDEX_OUTPUT* pOut) ///< [out] tile mode/type/info
++{
++ AddrLib* pLib = AddrLib::GetAddrLib(hLib);
++
++ ADDR_E_RETURNCODE returnCode = ADDR_OK;
++
++ if (pLib != NULL)
++ {
++ returnCode = pLib->ConvertTileIndex1(pIn, pOut);
++ }
++ else
++ {
++ returnCode = ADDR_ERROR;
++ }
++
++ return returnCode;
++}
++
++/**
++***************************************************************************************************
++* AddrGetTileIndex
++*
++* @brief
++* Get tile index from tile mode/type/info
++*
++* @return
++* ADDR_OK if successful, otherwise an error code of ADDR_E_RETURNCODE
++*
++* @note
++* Only meaningful for SI (and above)
++***************************************************************************************************
++*/
++ADDR_E_RETURNCODE ADDR_API AddrGetTileIndex(
++ ADDR_HANDLE hLib,
++ const ADDR_GET_TILEINDEX_INPUT* pIn,
++ ADDR_GET_TILEINDEX_OUTPUT* pOut)
++{
++ AddrLib* pLib = AddrLib::GetAddrLib(hLib);
++
++ ADDR_E_RETURNCODE returnCode = ADDR_OK;
++
++ if (pLib != NULL)
++ {
++ returnCode = pLib->GetTileIndex(pIn, pOut);
++ }
++ else
++ {
++ returnCode = ADDR_ERROR;
++ }
++
++ return returnCode;
++}
++
++/**
++***************************************************************************************************
++* AddrComputePrtInfo
++*
++* @brief
++* Interface function for ComputePrtInfo
++*
++***************************************************************************************************
++*/
++ADDR_E_RETURNCODE ADDR_API AddrComputePrtInfo(
++ ADDR_HANDLE hLib,
++ const ADDR_PRT_INFO_INPUT* pIn,
++ ADDR_PRT_INFO_OUTPUT* pOut)
++{
++ ADDR_E_RETURNCODE returnCode = ADDR_OK;
++
++ AddrLib* pLib = AddrLib::GetAddrLib(hLib);
++
++ if (pLib != NULL)
++ {
++ returnCode = pLib->ComputePrtInfo(pIn, pOut);
++ }
++ else
++ {
++ returnCode = ADDR_ERROR;
++ }
++
++ return returnCode;
++}
++
+diff --git a/src/gallium/winsys/radeon/amdgpu/addrlib/addrinterface.h b/src/gallium/winsys/radeon/amdgpu/addrlib/addrinterface.h
+new file mode 100644
+index 0000000..03fbf2b
+--- /dev/null
++++ b/src/gallium/winsys/radeon/amdgpu/addrlib/addrinterface.h
+@@ -0,0 +1,2166 @@
++/*
++ * Copyright © 2014 Advanced Micro Devices, Inc.
++ * All Rights Reserved.
++ *
++ * Permission is hereby granted, free of charge, to any person obtaining
++ * a copy of this software and associated documentation files (the
++ * "Software"), to deal in the Software without restriction, including
++ * without limitation the rights to use, copy, modify, merge, publish,
++ * distribute, sub license, and/or sell copies of the Software, and to
++ * permit persons to whom the Software is furnished to do so, subject to
++ * the following conditions:
++ *
++ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND,
++ * EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES
++ * OF MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND
++ * NON-INFRINGEMENT. IN NO EVENT SHALL THE COPYRIGHT HOLDERS, AUTHORS
++ * AND/OR ITS SUPPLIERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
++ * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE,
++ * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE
++ * USE OR OTHER DEALINGS IN THE SOFTWARE.
++ *
++ * The above copyright notice and this permission notice (including the
++ * next paragraph) shall be included in all copies or substantial portions
++ * of the Software.
++ */
++
++/**
++***************************************************************************************************
++* @file addrinterface.h
++* @brief Contains the addrlib interfaces declaration and parameter defines
++***************************************************************************************************
++*/
++#ifndef __ADDR_INTERFACE_H__
++#define __ADDR_INTERFACE_H__
++
++#if defined(__cplusplus)
++extern "C"
++{
++#endif
++
++#include "addrtypes.h"
++
++#define ADDRLIB_VERSION_MAJOR 5
++#define ADDRLIB_VERSION_MINOR 25
++#define ADDRLIB_VERSION ((ADDRLIB_VERSION_MAJOR << 16) | ADDRLIB_VERSION_MINOR)
++
++/// Virtually all interface functions need ADDR_HANDLE as first parameter
++typedef VOID* ADDR_HANDLE;
++
++/// Client handle used in callbacks
++typedef VOID* ADDR_CLIENT_HANDLE;
++
++/**
++* /////////////////////////////////////////////////////////////////////////////////////////////////
++* // Callback functions
++* /////////////////////////////////////////////////////////////////////////////////////////////////
++* typedef VOID* (ADDR_API* ADDR_ALLOCSYSMEM)(
++* const ADDR_ALLOCSYSMEM_INPUT* pInput);
++* typedef ADDR_E_RETURNCODE (ADDR_API* ADDR_FREESYSMEM)(
++* VOID* pVirtAddr);
++* typedef ADDR_E_RETURNCODE (ADDR_API* ADDR_DEBUGPRINT)(
++* const ADDR_DEBUGPRINT_INPUT* pInput);
++*
++* /////////////////////////////////////////////////////////////////////////////////////////////////
++* // Create/Destroy/Config functions
++* /////////////////////////////////////////////////////////////////////////////////////////////////
++* AddrCreate()
++* AddrDestroy()
++*
++* /////////////////////////////////////////////////////////////////////////////////////////////////
++* // Surface functions
++* /////////////////////////////////////////////////////////////////////////////////////////////////
++* AddrComputeSurfaceInfo()
++* AddrComputeSurfaceAddrFromCoord()
++* AddrComputeSurfaceCoordFromAddr()
++*
++* /////////////////////////////////////////////////////////////////////////////////////////////////
++* // HTile functions
++* /////////////////////////////////////////////////////////////////////////////////////////////////
++* AddrComputeHtileInfo()
++* AddrComputeHtileAddrFromCoord()
++* AddrComputeHtileCoordFromAddr()
++*
++* /////////////////////////////////////////////////////////////////////////////////////////////////
++* // C-mask functions
++* /////////////////////////////////////////////////////////////////////////////////////////////////
++* AddrComputeCmaskInfo()
++* AddrComputeCmaskAddrFromCoord()
++* AddrComputeCmaskCoordFromAddr()
++*
++* /////////////////////////////////////////////////////////////////////////////////////////////////
++* // F-mask functions
++* /////////////////////////////////////////////////////////////////////////////////////////////////
++* AddrComputeFmaskInfo()
++* AddrComputeFmaskAddrFromCoord()
++* AddrComputeFmaskCoordFromAddr()
++*
++* /////////////////////////////////////////////////////////////////////////////////////////////////
++* // Element/Utility functions
++* /////////////////////////////////////////////////////////////////////////////////////////////////
++* ElemFlt32ToDepthPixel()
++* ElemFlt32ToColorPixel()
++* AddrExtractBankPipeSwizzle()
++* AddrCombineBankPipeSwizzle()
++* AddrComputeSliceSwizzle()
++* AddrConvertTileInfoToHW()
++* AddrConvertTileIndex()
++* AddrConvertTileIndex1()
++* AddrGetTileIndex()
++* AddrComputeBaseSwizzle()
++* AddrUseTileIndex()
++* AddrUseCombinedSwizzle()
++*
++* /////////////////////////////////////////////////////////////////////////////////////////////////
++* // Dump functions
++* /////////////////////////////////////////////////////////////////////////////////////////////////
++* AddrDumpSurfaceInfo()
++* AddrDumpFmaskInfo()
++* AddrDumpCmaskInfo()
++* AddrDumpHtileInfo()
++*
++**/
++
++///////////////////////////////////////////////////////////////////////////////////////////////////
++// Callback functions
++///////////////////////////////////////////////////////////////////////////////////////////////////
++
++/**
++***************************************************************************************************
++* @brief Alloc system memory flags.
++* @note These flags are reserved for future use and if flags are added will minimize the impact
++* of the client.
++***************************************************************************************************
++*/
++typedef union _ADDR_ALLOCSYSMEM_FLAGS
++{
++ struct
++ {
++ UINT_32 reserved : 32; ///< Reserved for future use.
++ } fields;
++ UINT_32 value;
++
++} ADDR_ALLOCSYSMEM_FLAGS;
++
++/**
++***************************************************************************************************
++* @brief Alloc system memory input structure
++***************************************************************************************************
++*/
++typedef struct _ADDR_ALLOCSYSMEM_INPUT
++{
++ UINT_32 size; ///< Size of this structure in bytes
++
++ ADDR_ALLOCSYSMEM_FLAGS flags; ///< System memory flags.
++ UINT_32 sizeInBytes; ///< System memory allocation size in bytes.
++ ADDR_CLIENT_HANDLE hClient; ///< Client handle
++} ADDR_ALLOCSYSMEM_INPUT;
++
++/**
++***************************************************************************************************
++* ADDR_ALLOCSYSMEM
++* @brief
++* Allocate system memory callback function. Returns valid pointer on success.
++***************************************************************************************************
++*/
++typedef VOID* (ADDR_API* ADDR_ALLOCSYSMEM)(
++ const ADDR_ALLOCSYSMEM_INPUT* pInput);
++
++/**
++***************************************************************************************************
++* @brief Free system memory input structure
++***************************************************************************************************
++*/
++typedef struct _ADDR_FREESYSMEM_INPUT
++{
++ UINT_32 size; ///< Size of this structure in bytes
++
++ VOID* pVirtAddr; ///< Virtual address
++ ADDR_CLIENT_HANDLE hClient; ///< Client handle
++} ADDR_FREESYSMEM_INPUT;
++
++/**
++***************************************************************************************************
++* ADDR_FREESYSMEM
++* @brief
++* Free system memory callback function.
++* Returns ADDR_OK on success.
++***************************************************************************************************
++*/
++typedef ADDR_E_RETURNCODE (ADDR_API* ADDR_FREESYSMEM)(
++ const ADDR_FREESYSMEM_INPUT* pInput);
++
++/**
++***************************************************************************************************
++* @brief Print debug message input structure
++***************************************************************************************************
++*/
++typedef struct _ADDR_DEBUGPRINT_INPUT
++{
++ UINT_32 size; ///< Size of this structure in bytes
++
++ CHAR* pDebugString; ///< Debug print string
++ va_list ap; ///< Variable argument list
++ ADDR_CLIENT_HANDLE hClient; ///< Client handle
++} ADDR_DEBUGPRINT_INPUT;
++
++/**
++***************************************************************************************************
++* ADDR_DEBUGPRINT
++* @brief
++* Print debug message callback function.
++* Returns ADDR_OK on success.
++***************************************************************************************************
++*/
++typedef ADDR_E_RETURNCODE (ADDR_API* ADDR_DEBUGPRINT)(
++ const ADDR_DEBUGPRINT_INPUT* pInput);
++
++/**
++***************************************************************************************************
++* ADDR_CALLBACKS
++*
++* @brief
++* Address Library needs client to provide system memory alloc/free routines.
++***************************************************************************************************
++*/
++typedef struct _ADDR_CALLBACKS
++{
++ ADDR_ALLOCSYSMEM allocSysMem; ///< Routine to allocate system memory
++ ADDR_FREESYSMEM freeSysMem; ///< Routine to free system memory
++ ADDR_DEBUGPRINT debugPrint; ///< Routine to print debug message
++} ADDR_CALLBACKS;
++
++///////////////////////////////////////////////////////////////////////////////////////////////////
++// Create/Destroy functions
++///////////////////////////////////////////////////////////////////////////////////////////////////
++
++/**
++***************************************************************************************************
++* ADDR_CREATE_FLAGS
++*
++* @brief
++* This structure is used to pass some setup in creation of AddrLib
++* @note
++***************************************************************************************************
++*/
++typedef union _ADDR_CREATE_FLAGS
++{
++ struct
++ {
++ UINT_32 noCubeMipSlicesPad : 1; ///< Turn cubemap faces padding off
++ UINT_32 fillSizeFields : 1; ///< If clients fill size fields in all input and
++ /// output structure
++ UINT_32 useTileIndex : 1; ///< Make tileIndex field in input valid
++ UINT_32 useCombinedSwizzle : 1; ///< Use combined tile swizzle
++ UINT_32 checkLast2DLevel : 1; ///< Check the last 2D mip sub level
++ UINT_32 useHtileSliceAlign : 1; ///< Do htile single slice alignment
++ UINT_32 degradeBaseLevel : 1; ///< Degrade to 1D modes automatically for base level
++ UINT_32 allowLargeThickTile : 1; ///< Allow 64*thickness*bytesPerPixel > rowSize
++ UINT_32 reserved : 24; ///< Reserved bits for future use
++ };
++
++ UINT_32 value;
++} ADDR_CREATE_FLAGS;
++
++/**
++***************************************************************************************************
++* ADDR_REGISTER_VALUE
++*
++* @brief
++* Data from registers to setup AddrLib global data, used in AddrCreate
++***************************************************************************************************
++*/
++typedef struct _ADDR_REGISTER_VALUE
++{
++ UINT_32 gbAddrConfig; ///< For R8xx, use GB_ADDR_CONFIG register value.
++ /// For R6xx/R7xx, use GB_TILING_CONFIG.
++ /// But they can be treated as the same.
++ /// if this value is 0, use chip to set default value
++ UINT_32 backendDisables; ///< 1 bit per backend, starting with LSB. 1=disabled,0=enabled.
++ /// Register value of CC_RB_BACKEND_DISABLE.BACKEND_DISABLE
++
++ /// R800 registers-----------------------------------------------
++ UINT_32 noOfBanks; ///< Number of h/w ram banks - For r800: MC_ARB_RAMCFG.NOOFBANK
++ /// No enums for this value in h/w header files
++ /// 0: 4
++ /// 1: 8
++ /// 2: 16
++ UINT_32 noOfRanks; /// MC_ARB_RAMCFG.NOOFRANK
++ /// 0: 1
++ /// 1: 2
++ /// SI (R1000) registers-----------------------------------------
++ const UINT_32* pTileConfig; ///< Global tile setting tables
++ UINT_32 noOfEntries; ///< Number of entries in pTileConfig
++
++ ///< CI registers-------------------------------------------------
++ const UINT_32* pMacroTileConfig; ///< Global macro tile mode table
++ UINT_32 noOfMacroEntries; ///< Number of entries in pMacroTileConfig
++
++} ADDR_REGISTER_VALUE;
++
++/**
++***************************************************************************************************
++* ADDR_CREATE_INPUT
++*
++* @brief
++* Parameters use to create an AddrLib Object. Caller must provide all fields.
++*
++***************************************************************************************************
++*/
++typedef struct _ADDR_CREATE_INPUT
++{
++ UINT_32 size; ///< Size of this structure in bytes
++
++ UINT_32 chipEngine; ///< Chip Engine
++ UINT_32 chipFamily; ///< Chip Family
++ UINT_32 chipRevision; ///< Chip Revision
++ ADDR_CALLBACKS callbacks; ///< Callbacks for sysmem alloc/free/print
++ ADDR_CREATE_FLAGS createFlags; ///< Flags to setup AddrLib
++ ADDR_REGISTER_VALUE regValue; ///< Data from registers to setup AddrLib global data
++ ADDR_CLIENT_HANDLE hClient; ///< Client handle
++ UINT_32 minPitchAlignPixels; ///< Minimum pitch alignment in pixels
++} ADDR_CREATE_INPUT;
++
++/**
++***************************************************************************************************
++* ADDR_CREATEINFO_OUTPUT
++*
++* @brief
++* Return AddrLib handle to client driver
++*
++***************************************************************************************************
++*/
++typedef struct _ADDR_CREATE_OUTPUT
++{
++ UINT_32 size; ///< Size of this structure in bytes
++
++ ADDR_HANDLE hLib; ///< Address lib handle
++} ADDR_CREATE_OUTPUT;
++
++/**
++***************************************************************************************************
++* AddrCreate
++*
++* @brief
++* Create AddrLib object, must be called before any interface calls
++*
++* @return
++* ADDR_OK if successful
++***************************************************************************************************
++*/
++ADDR_E_RETURNCODE ADDR_API AddrCreate(
++ const ADDR_CREATE_INPUT* pAddrCreateIn,
++ ADDR_CREATE_OUTPUT* pAddrCreateOut);
++
++
++
++/**
++***************************************************************************************************
++* AddrDestroy
++*
++* @brief
++* Destroy AddrLib object, must be called to free internally allocated resources.
++*
++* @return
++* ADDR_OK if successful
++***************************************************************************************************
++*/
++ADDR_E_RETURNCODE ADDR_API AddrDestroy(
++ ADDR_HANDLE hLib);
++
++
++
++///////////////////////////////////////////////////////////////////////////////////////////////////
++// Surface functions
++///////////////////////////////////////////////////////////////////////////////////////////////////
++
++/**
++***************************************************************************************************
++* @brief
++* Bank/tiling parameters. On function input, these can be set as desired or
++* left 0 for AddrLib to calculate/default. On function output, these are the actual
++* parameters used.
++* @note
++* Valid bankWidth/bankHeight value:
++* 1,2,4,8. They are factors instead of pixels or bytes.
++*
++* The bank number remains constant across each row of the
++* macro tile as each pipe is selected, so the number of
++* tiles in the x direction with the same bank number will
++* be bank_width * num_pipes.
++***************************************************************************************************
++*/
++typedef struct _ADDR_TILEINFO
++{
++ /// Any of these parameters can be set to 0 to use the HW default.
++ UINT_32 banks; ///< Number of banks, numerical value
++ UINT_32 bankWidth; ///< Number of tiles in the X direction in the same bank
++ UINT_32 bankHeight; ///< Number of tiles in the Y direction in the same bank
++ UINT_32 macroAspectRatio; ///< Macro tile aspect ratio. 1-1:1, 2-4:1, 4-16:1, 8-64:1
++ UINT_32 tileSplitBytes; ///< Tile split size, in bytes
++ AddrPipeCfg pipeConfig; ///< Pipe Config = HW enum + 1
++} ADDR_TILEINFO;
++
++// Create a define to avoid client change. The removal of R800 is because we plan to implement SI
++// within 800 HWL - An AddrPipeCfg is added in above data structure
++typedef ADDR_TILEINFO ADDR_R800_TILEINFO;
++
++/**
++***************************************************************************************************
++* @brief
++* Information needed by quad buffer stereo support
++***************************************************************************************************
++*/
++typedef struct _ADDR_QBSTEREOINFO
++{
++ UINT_32 eyeHeight; ///< Height (in pixel rows) to right eye
++ UINT_32 rightOffset; ///< Offset (in bytes) to right eye
++ UINT_32 rightSwizzle; ///< TileSwizzle for right eyes
++} ADDR_QBSTEREOINFO;
++
++/**
++***************************************************************************************************
++* ADDR_SURFACE_FLAGS
++*
++* @brief
++* Surface flags
++***************************************************************************************************
++*/
++typedef union _ADDR_SURFACE_FLAGS
++{
++ struct
++ {
++ UINT_32 color : 1; ///< Flag indicates this is a color buffer
++ UINT_32 depth : 1; ///< Flag indicates this is a depth/stencil buffer
++ UINT_32 stencil : 1; ///< Flag indicates this is a stencil buffer
++ UINT_32 texture : 1; ///< Flag indicates this is a texture
++ UINT_32 cube : 1; ///< Flag indicates this is a cubemap
++
++ UINT_32 volume : 1; ///< Flag indicates this is a volume texture
++ UINT_32 fmask : 1; ///< Flag indicates this is an fmask
++ UINT_32 cubeAsArray : 1; ///< Flag indicates if treat cubemap as arrays
++ UINT_32 compressZ : 1; ///< Flag indicates z buffer is compressed
++ UINT_32 overlay : 1; ///< Flag indicates this is an overlay surface
++ UINT_32 noStencil : 1; ///< Flag indicates this depth has no separate stencil
++ UINT_32 display : 1; ///< Flag indicates this should match display controller req.
++ UINT_32 opt4Space : 1; ///< Flag indicates this surface should be optimized for space
++ /// i.e. save some memory but may lose pe