aboutsummaryrefslogtreecommitdiffstats
path: root/meta-amdfalconx86/recipes-kernel/linux/files/0035-yocto-amd-drm-amdgpu-add-GCA-7-0-register-headers.patch
diff options
context:
space:
mode:
Diffstat (limited to 'meta-amdfalconx86/recipes-kernel/linux/files/0035-yocto-amd-drm-amdgpu-add-GCA-7-0-register-headers.patch')
-rw-r--r--meta-amdfalconx86/recipes-kernel/linux/files/0035-yocto-amd-drm-amdgpu-add-GCA-7-0-register-headers.patch34087
1 files changed, 34087 insertions, 0 deletions
diff --git a/meta-amdfalconx86/recipes-kernel/linux/files/0035-yocto-amd-drm-amdgpu-add-GCA-7-0-register-headers.patch b/meta-amdfalconx86/recipes-kernel/linux/files/0035-yocto-amd-drm-amdgpu-add-GCA-7-0-register-headers.patch
new file mode 100644
index 00000000..193fb4f2
--- /dev/null
+++ b/meta-amdfalconx86/recipes-kernel/linux/files/0035-yocto-amd-drm-amdgpu-add-GCA-7-0-register-headers.patch
@@ -0,0 +1,34087 @@
+From fa0009cd518c0c131a04b309b7b25ce2af688d90 Mon Sep 17 00:00:00 2001
+From: Alex Deucher <alexander.deucher@amd.com>
+Date: Mon, 20 Apr 2015 16:55:21 -0400
+Subject: drm/amdgpu: add core driver
+
+This adds the non-asic specific core driver code.
+
+Signed-off-by: Alex Deucher <alexander.deucher@amd.com>
+Signed-off-by: Sanjay R Mehta <Sanju.Mehta@amd.com>
+
+diff --git a/drivers/gpu/drm/Kconfig b/drivers/gpu/drm/Kconfig
+index 47f2ce8..1a39f24 100644
+--- a/drivers/gpu/drm/Kconfig
++++ b/drivers/gpu/drm/Kconfig
+@@ -121,6 +121,28 @@ config DRM_RADEON
+
+ source "drivers/gpu/drm/radeon/Kconfig"
+
++config DRM_AMDGPU
++ tristate "AMD GPU"
++ depends on DRM && PCI
++ select FB_CFB_FILLRECT
++ select FB_CFB_COPYAREA
++ select FB_CFB_IMAGEBLIT
++ select FW_LOADER
++ select DRM_KMS_HELPER
++ select DRM_KMS_FB_HELPER
++ select DRM_TTM
++ select POWER_SUPPLY
++ select HWMON
++ select BACKLIGHT_CLASS_DEVICE
++ select DRM_AMD_GNB_BUS
++ select INTERVAL_TREE
++ help
++ Choose this option if you have a recent AMD Radeon graphics card.
++
++ If M is selected, the module will be called amdgpu.
++
++source "drivers/gpu/drm/amd/amdgpu/Kconfig"
++
+ source "drivers/gpu/drm/nouveau/Kconfig"
+
+ config DRM_I810
+diff --git a/drivers/gpu/drm/Makefile b/drivers/gpu/drm/Makefile
+index 7d4944e..ca5cf1a 100644
+--- a/drivers/gpu/drm/Makefile
++++ b/drivers/gpu/drm/Makefile
+@@ -39,5 +39,6 @@ obj-$(CONFIG_DRM_TDFX) += tdfx/
+ obj-$(CONFIG_DRM_R128) += r128/
+ obj-$(CONFIG_DRM_RADEON)+= radeon/
++obj-$(CONFIG_DRM_AMDGPU)+= amd/amdgpu/
+ obj-$(CONFIG_DRM_MGA) += mga/
+ obj-$(CONFIG_DRM_I810) += i810/
+ obj-$(CONFIG_DRM_I915) += i915/
+diff --git a/drivers/gpu/drm/amd/amdgpu/Kconfig b/drivers/gpu/drm/amd/amdgpu/Kconfig
+new file mode 100644
+index 0000000..b30fcfa
+--- /dev/null
++++ b/drivers/gpu/drm/amd/amdgpu/Kconfig
+@@ -0,0 +1,17 @@
++config DRM_AMDGPU_CIK
++ bool "Enable amdgpu support for CIK parts"
++ depends on DRM_AMDGPU
++ help
++ Choose this option if you want to enable experimental support
++ for CIK asics.
++
++ CIK is already supported in radeon. CIK support in amdgpu
++ is for experimentation and testing.
++
++config DRM_AMDGPU_USERPTR
++ bool "Always enable userptr write support"
++ depends on DRM_AMDGPU
++ select MMU_NOTIFIER
++ help
++ This option selects CONFIG_MMU_NOTIFIER if it isn't already
++ selected to enabled full userptr support.
+diff --git a/drivers/gpu/drm/amd/amdgpu/Makefile b/drivers/gpu/drm/amd/amdgpu/Makefile
+new file mode 100644
+index 0000000..01276a5
+--- /dev/null
++++ b/drivers/gpu/drm/amd/amdgpu/Makefile
+@@ -0,0 +1,49 @@
++#
++# Makefile for the drm device driver. This driver provides support for the
++# Direct Rendering Infrastructure (DRI) in XFree86 4.1.0 and higher.
++
++ccflags-y := -Iinclude/drm -Idrivers/gpu/drm/amd/include/asic_reg
++
++amdgpu-y := amdgpu_drv.o
++
++# add KMS driver
++amdgpu-y += amdgpu_device.o amdgpu_kms.o \
++ amdgpu_atombios.o atombios_crtc.o amdgpu_connectors.o \
++ atom.o amdgpu_fence.o amdgpu_ttm.o amdgpu_object.o amdgpu_gart.o \
++ amdgpu_encoders.o amdgpu_display.o amdgpu_i2c.o \
++ amdgpu_fb.o amdgpu_gem.o amdgpu_ring.o \
++ amdgpu_cs.o amdgpu_bios.o amdgpu_benchmark.o amdgpu_test.o \
++ amdgpu_pm.o atombios_dp.o amdgpu_afmt.o amdgpu_trace_points.o \
++ atombios_encoders.o amdgpu_semaphore.o amdgpu_sa.o atombios_i2c.o \
++ amdgpu_prime.o amdgpu_vm.o amdgpu_ib.o amdgpu_pll.o \
++ amdgpu_ucode.o amdgpu_bo_list.o amdgpu_ctx.o amdgpu_sync.o
++
++# add IH block
++amdgpu-y += \
++ amdgpu_irq.o \
++ amdgpu_ih.o
++
++# add SMC block
++amdgpu-y += \
++ amdgpu_dpm.o
++
++# add GFX block
++amdgpu-y += \
++ amdgpu_gfx.o
++
++# add UVD block
++amdgpu-y += \
++ amdgpu_uvd.o
++
++# add VCE block
++amdgpu-y += \
++ amdgpu_vce.o
++
++amdgpu-$(CONFIG_COMPAT) += amdgpu_ioc32.o
++amdgpu-$(CONFIG_VGA_SWITCHEROO) += amdgpu_atpx_handler.o
++amdgpu-$(CONFIG_ACPI) += amdgpu_acpi.o
++amdgpu-$(CONFIG_MMU_NOTIFIER) += amdgpu_mn.o
++
++obj-$(CONFIG_DRM_AMDGPU)+= amdgpu.o
++
++CFLAGS_amdgpu_trace_points.o := -I$(src)
+diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_acpi.c b/drivers/gpu/drm/amd/amdgpu/amdgpu_acpi.c
+new file mode 100644
+index 0000000..aef4a7a
+--- /dev/null
++++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_acpi.c
+@@ -0,0 +1,768 @@
++/*
++ * Copyright 2012 Advanced Micro Devices, Inc.
++ *
++ * Permission is hereby granted, free of charge, to any person obtaining a
++ * copy of this software and associated documentation files (the "Software"),
++ * to deal in the Software without restriction, including without limitation
++ * the rights to use, copy, modify, merge, publish, distribute, sublicense,
++ * and/or sell copies of the Software, and to permit persons to whom the
++ * Software is furnished to do so, subject to the following conditions:
++ *
++ * The above copyright notice and this permission notice shall be included in
++ * all copies or substantial portions of the Software.
++ *
++ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
++ * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
++ * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
++ * THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR
++ * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE,
++ * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
++ * OTHER DEALINGS IN THE SOFTWARE.
++ *
++ */
++
++#include <linux/pci.h>
++#include <linux/acpi.h>
++#include <linux/slab.h>
++#include <linux/power_supply.h>
++#include <linux/vga_switcheroo.h>
++#include <acpi/video.h>
++#include <drm/drmP.h>
++#include <drm/drm_crtc_helper.h>
++#include "amdgpu.h"
++#include "amdgpu_acpi.h"
++#include "atom.h"
++
++#define ACPI_AC_CLASS "ac_adapter"
++
++extern void amdgpu_pm_acpi_event_handler(struct amdgpu_device *adev);
++
++struct atif_verify_interface {
++ u16 size; /* structure size in bytes (includes size field) */
++ u16 version; /* version */
++ u32 notification_mask; /* supported notifications mask */
++ u32 function_bits; /* supported functions bit vector */
++} __packed;
++
++struct atif_system_params {
++ u16 size; /* structure size in bytes (includes size field) */
++ u32 valid_mask; /* valid flags mask */
++ u32 flags; /* flags */
++ u8 command_code; /* notify command code */
++} __packed;
++
++struct atif_sbios_requests {
++ u16 size; /* structure size in bytes (includes size field) */
++ u32 pending; /* pending sbios requests */
++ u8 panel_exp_mode; /* panel expansion mode */
++ u8 thermal_gfx; /* thermal state: target gfx controller */
++ u8 thermal_state; /* thermal state: state id (0: exit state, non-0: state) */
++ u8 forced_power_gfx; /* forced power state: target gfx controller */
++ u8 forced_power_state; /* forced power state: state id */
++ u8 system_power_src; /* system power source */
++ u8 backlight_level; /* panel backlight level (0-255) */
++} __packed;
++
++#define ATIF_NOTIFY_MASK 0x3
++#define ATIF_NOTIFY_NONE 0
++#define ATIF_NOTIFY_81 1
++#define ATIF_NOTIFY_N 2
++
++struct atcs_verify_interface {
++ u16 size; /* structure size in bytes (includes size field) */
++ u16 version; /* version */
++ u32 function_bits; /* supported functions bit vector */
++} __packed;
++
++#define ATCS_VALID_FLAGS_MASK 0x3
++
++struct atcs_pref_req_input {
++ u16 size; /* structure size in bytes (includes size field) */
++ u16 client_id; /* client id (bit 2-0: func num, 7-3: dev num, 15-8: bus num) */
++ u16 valid_flags_mask; /* valid flags mask */
++ u16 flags; /* flags */
++ u8 req_type; /* request type */
++ u8 perf_req; /* performance request */
++} __packed;
++
++struct atcs_pref_req_output {
++ u16 size; /* structure size in bytes (includes size field) */
++ u8 ret_val; /* return value */
++} __packed;
++
++/* Call the ATIF method
++ */
++/**
++ * amdgpu_atif_call - call an ATIF method
++ *
++ * @handle: acpi handle
++ * @function: the ATIF function to execute
++ * @params: ATIF function params
++ *
++ * Executes the requested ATIF function (all asics).
++ * Returns a pointer to the acpi output buffer.
++ */
++static union acpi_object *amdgpu_atif_call(acpi_handle handle, int function,
++ struct acpi_buffer *params)
++{
++ acpi_status status;
++ union acpi_object atif_arg_elements[2];
++ struct acpi_object_list atif_arg;
++ struct acpi_buffer buffer = { ACPI_ALLOCATE_BUFFER, NULL };
++
++ atif_arg.count = 2;
++ atif_arg.pointer = &atif_arg_elements[0];
++
++ atif_arg_elements[0].type = ACPI_TYPE_INTEGER;
++ atif_arg_elements[0].integer.value = function;
++
++ if (params) {
++ atif_arg_elements[1].type = ACPI_TYPE_BUFFER;
++ atif_arg_elements[1].buffer.length = params->length;
++ atif_arg_elements[1].buffer.pointer = params->pointer;
++ } else {
++ /* We need a second fake parameter */
++ atif_arg_elements[1].type = ACPI_TYPE_INTEGER;
++ atif_arg_elements[1].integer.value = 0;
++ }
++
++ status = acpi_evaluate_object(handle, "ATIF", &atif_arg, &buffer);
++
++ /* Fail only if calling the method fails and ATIF is supported */
++ if (ACPI_FAILURE(status) && status != AE_NOT_FOUND) {
++ DRM_DEBUG_DRIVER("failed to evaluate ATIF got %s\n",
++ acpi_format_exception(status));
++ kfree(buffer.pointer);
++ return NULL;
++ }
++
++ return buffer.pointer;
++}
++
++/**
++ * amdgpu_atif_parse_notification - parse supported notifications
++ *
++ * @n: supported notifications struct
++ * @mask: supported notifications mask from ATIF
++ *
++ * Use the supported notifications mask from ATIF function
++ * ATIF_FUNCTION_VERIFY_INTERFACE to determine what notifications
++ * are supported (all asics).
++ */
++static void amdgpu_atif_parse_notification(struct amdgpu_atif_notifications *n, u32 mask)
++{
++ n->display_switch = mask & ATIF_DISPLAY_SWITCH_REQUEST_SUPPORTED;
++ n->expansion_mode_change = mask & ATIF_EXPANSION_MODE_CHANGE_REQUEST_SUPPORTED;
++ n->thermal_state = mask & ATIF_THERMAL_STATE_CHANGE_REQUEST_SUPPORTED;
++ n->forced_power_state = mask & ATIF_FORCED_POWER_STATE_CHANGE_REQUEST_SUPPORTED;
++ n->system_power_state = mask & ATIF_SYSTEM_POWER_SOURCE_CHANGE_REQUEST_SUPPORTED;
++ n->display_conf_change = mask & ATIF_DISPLAY_CONF_CHANGE_REQUEST_SUPPORTED;
++ n->px_gfx_switch = mask & ATIF_PX_GFX_SWITCH_REQUEST_SUPPORTED;
++ n->brightness_change = mask & ATIF_PANEL_BRIGHTNESS_CHANGE_REQUEST_SUPPORTED;
++ n->dgpu_display_event = mask & ATIF_DGPU_DISPLAY_EVENT_SUPPORTED;
++}
++
++/**
++ * amdgpu_atif_parse_functions - parse supported functions
++ *
++ * @f: supported functions struct
++ * @mask: supported functions mask from ATIF
++ *
++ * Use the supported functions mask from ATIF function
++ * ATIF_FUNCTION_VERIFY_INTERFACE to determine what functions
++ * are supported (all asics).
++ */
++static void amdgpu_atif_parse_functions(struct amdgpu_atif_functions *f, u32 mask)
++{
++ f->system_params = mask & ATIF_GET_SYSTEM_PARAMETERS_SUPPORTED;
++ f->sbios_requests = mask & ATIF_GET_SYSTEM_BIOS_REQUESTS_SUPPORTED;
++ f->select_active_disp = mask & ATIF_SELECT_ACTIVE_DISPLAYS_SUPPORTED;
++ f->lid_state = mask & ATIF_GET_LID_STATE_SUPPORTED;
++ f->get_tv_standard = mask & ATIF_GET_TV_STANDARD_FROM_CMOS_SUPPORTED;
++ f->set_tv_standard = mask & ATIF_SET_TV_STANDARD_IN_CMOS_SUPPORTED;
++ f->get_panel_expansion_mode = mask & ATIF_GET_PANEL_EXPANSION_MODE_FROM_CMOS_SUPPORTED;
++ f->set_panel_expansion_mode = mask & ATIF_SET_PANEL_EXPANSION_MODE_IN_CMOS_SUPPORTED;
++ f->temperature_change = mask & ATIF_TEMPERATURE_CHANGE_NOTIFICATION_SUPPORTED;
++ f->graphics_device_types = mask & ATIF_GET_GRAPHICS_DEVICE_TYPES_SUPPORTED;
++}
++
++/**
++ * amdgpu_atif_verify_interface - verify ATIF
++ *
++ * @handle: acpi handle
++ * @atif: amdgpu atif struct
++ *
++ * Execute the ATIF_FUNCTION_VERIFY_INTERFACE ATIF function
++ * to initialize ATIF and determine what features are supported
++ * (all asics).
++ * returns 0 on success, error on failure.
++ */
++static int amdgpu_atif_verify_interface(acpi_handle handle,
++ struct amdgpu_atif *atif)
++{
++ union acpi_object *info;
++ struct atif_verify_interface output;
++ size_t size;
++ int err = 0;
++
++ info = amdgpu_atif_call(handle, ATIF_FUNCTION_VERIFY_INTERFACE, NULL);
++ if (!info)
++ return -EIO;
++
++ memset(&output, 0, sizeof(output));
++
++ size = *(u16 *) info->buffer.pointer;
++ if (size < 12) {
++ DRM_INFO("ATIF buffer is too small: %zu\n", size);
++ err = -EINVAL;
++ goto out;
++ }
++ size = min(sizeof(output), size);
++
++ memcpy(&output, info->buffer.pointer, size);
++
++ /* TODO: check version? */
++ DRM_DEBUG_DRIVER("ATIF version %u\n", output.version);
++
++ amdgpu_atif_parse_notification(&atif->notifications, output.notification_mask);
++ amdgpu_atif_parse_functions(&atif->functions, output.function_bits);
++
++out:
++ kfree(info);
++ return err;
++}
++
++/**
++ * amdgpu_atif_get_notification_params - determine notify configuration
++ *
++ * @handle: acpi handle
++ * @n: atif notification configuration struct
++ *
++ * Execute the ATIF_FUNCTION_GET_SYSTEM_PARAMETERS ATIF function
++ * to determine if a notifier is used and if so which one
++ * (all asics). This is either Notify(VGA, 0x81) or Notify(VGA, n)
++ * where n is specified in the result if a notifier is used.
++ * Returns 0 on success, error on failure.
++ */
++static int amdgpu_atif_get_notification_params(acpi_handle handle,
++ struct amdgpu_atif_notification_cfg *n)
++{
++ union acpi_object *info;
++ struct atif_system_params params;
++ size_t size;
++ int err = 0;
++
++ info = amdgpu_atif_call(handle, ATIF_FUNCTION_GET_SYSTEM_PARAMETERS, NULL);
++ if (!info) {
++ err = -EIO;
++ goto out;
++ }
++
++ size = *(u16 *) info->buffer.pointer;
++ if (size < 10) {
++ err = -EINVAL;
++ goto out;
++ }
++
++ memset(&params, 0, sizeof(params));
++ size = min(sizeof(params), size);
++ memcpy(&params, info->buffer.pointer, size);
++
++ DRM_DEBUG_DRIVER("SYSTEM_PARAMS: mask = %#x, flags = %#x\n",
++ params.flags, params.valid_mask);
++ params.flags = params.flags & params.valid_mask;
++
++ if ((params.flags & ATIF_NOTIFY_MASK) == ATIF_NOTIFY_NONE) {
++ n->enabled = false;
++ n->command_code = 0;
++ } else if ((params.flags & ATIF_NOTIFY_MASK) == ATIF_NOTIFY_81) {
++ n->enabled = true;
++ n->command_code = 0x81;
++ } else {
++ if (size < 11) {
++ err = -EINVAL;
++ goto out;
++ }
++ n->enabled = true;
++ n->command_code = params.command_code;
++ }
++
++out:
++ DRM_DEBUG_DRIVER("Notification %s, command code = %#x\n",
++ (n->enabled ? "enabled" : "disabled"),
++ n->command_code);
++ kfree(info);
++ return err;
++}
++
++/**
++ * amdgpu_atif_get_sbios_requests - get requested sbios event
++ *
++ * @handle: acpi handle
++ * @req: atif sbios request struct
++ *
++ * Execute the ATIF_FUNCTION_GET_SYSTEM_BIOS_REQUESTS ATIF function
++ * to determine what requests the sbios is making to the driver
++ * (all asics).
++ * Returns 0 on success, error on failure.
++ */
++static int amdgpu_atif_get_sbios_requests(acpi_handle handle,
++ struct atif_sbios_requests *req)
++{
++ union acpi_object *info;
++ size_t size;
++ int count = 0;
++
++ info = amdgpu_atif_call(handle, ATIF_FUNCTION_GET_SYSTEM_BIOS_REQUESTS, NULL);
++ if (!info)
++ return -EIO;
++
++ size = *(u16 *)info->buffer.pointer;
++ if (size < 0xd) {
++ count = -EINVAL;
++ goto out;
++ }
++ memset(req, 0, sizeof(*req));
++
++ size = min(sizeof(*req), size);
++ memcpy(req, info->buffer.pointer, size);
++ DRM_DEBUG_DRIVER("SBIOS pending requests: %#x\n", req->pending);
++
++ count = hweight32(req->pending);
++
++out:
++ kfree(info);
++ return count;
++}
++
++/**
++ * amdgpu_atif_handler - handle ATIF notify requests
++ *
++ * @adev: amdgpu_device pointer
++ * @event: atif sbios request struct
++ *
++ * Checks the acpi event and if it matches an atif event,
++ * handles it.
++ * Returns NOTIFY code
++ */
++int amdgpu_atif_handler(struct amdgpu_device *adev,
++ struct acpi_bus_event *event)
++{
++ struct amdgpu_atif *atif = &adev->atif;
++ struct atif_sbios_requests req;
++ acpi_handle handle;
++ int count;
++
++ DRM_DEBUG_DRIVER("event, device_class = %s, type = %#x\n",
++ event->device_class, event->type);
++
++ if (strcmp(event->device_class, ACPI_VIDEO_CLASS) != 0)
++ return NOTIFY_DONE;
++
++ if (!atif->notification_cfg.enabled ||
++ event->type != atif->notification_cfg.command_code)
++ /* Not our event */
++ return NOTIFY_DONE;
++
++ /* Check pending SBIOS requests */
++ handle = ACPI_HANDLE(&adev->pdev->dev);
++ count = amdgpu_atif_get_sbios_requests(handle, &req);
++
++ if (count <= 0)
++ return NOTIFY_DONE;
++
++ DRM_DEBUG_DRIVER("ATIF: %d pending SBIOS requests\n", count);
++
++ if (req.pending & ATIF_PANEL_BRIGHTNESS_CHANGE_REQUEST) {
++ struct amdgpu_encoder *enc = atif->encoder_for_bl;
++
++ if (enc) {
++ struct amdgpu_encoder_atom_dig *dig = enc->enc_priv;
++
++ DRM_DEBUG_DRIVER("Changing brightness to %d\n",
++ req.backlight_level);
++
++ amdgpu_display_backlight_set_level(adev, enc, req.backlight_level);
++
++#if defined(CONFIG_BACKLIGHT_CLASS_DEVICE) || defined(CONFIG_BACKLIGHT_CLASS_DEVICE_MODULE)
++ backlight_force_update(dig->bl_dev,
++ BACKLIGHT_UPDATE_HOTKEY);
++#endif
++ }
++ }
++ /* TODO: check other events */
++
++ /* We've handled the event, stop the notifier chain. The ACPI interface
++ * overloads ACPI_VIDEO_NOTIFY_PROBE, we don't want to send that to
++ * userspace if the event was generated only to signal a SBIOS
++ * request.
++ */
++ return NOTIFY_BAD;
++}
++
++/* Call the ATCS method
++ */
++/**
++ * amdgpu_atcs_call - call an ATCS method
++ *
++ * @handle: acpi handle
++ * @function: the ATCS function to execute
++ * @params: ATCS function params
++ *
++ * Executes the requested ATCS function (all asics).
++ * Returns a pointer to the acpi output buffer.
++ */
++static union acpi_object *amdgpu_atcs_call(acpi_handle handle, int function,
++ struct acpi_buffer *params)
++{
++ acpi_status status;
++ union acpi_object atcs_arg_elements[2];
++ struct acpi_object_list atcs_arg;
++ struct acpi_buffer buffer = { ACPI_ALLOCATE_BUFFER, NULL };
++
++ atcs_arg.count = 2;
++ atcs_arg.pointer = &atcs_arg_elements[0];
++
++ atcs_arg_elements[0].type = ACPI_TYPE_INTEGER;
++ atcs_arg_elements[0].integer.value = function;
++
++ if (params) {
++ atcs_arg_elements[1].type = ACPI_TYPE_BUFFER;
++ atcs_arg_elements[1].buffer.length = params->length;
++ atcs_arg_elements[1].buffer.pointer = params->pointer;
++ } else {
++ /* We need a second fake parameter */
++ atcs_arg_elements[1].type = ACPI_TYPE_INTEGER;
++ atcs_arg_elements[1].integer.value = 0;
++ }
++
++ status = acpi_evaluate_object(handle, "ATCS", &atcs_arg, &buffer);
++
++ /* Fail only if calling the method fails and ATIF is supported */
++ if (ACPI_FAILURE(status) && status != AE_NOT_FOUND) {
++ DRM_DEBUG_DRIVER("failed to evaluate ATCS got %s\n",
++ acpi_format_exception(status));
++ kfree(buffer.pointer);
++ return NULL;
++ }
++
++ return buffer.pointer;
++}
++
++/**
++ * amdgpu_atcs_parse_functions - parse supported functions
++ *
++ * @f: supported functions struct
++ * @mask: supported functions mask from ATCS
++ *
++ * Use the supported functions mask from ATCS function
++ * ATCS_FUNCTION_VERIFY_INTERFACE to determine what functions
++ * are supported (all asics).
++ */
++static void amdgpu_atcs_parse_functions(struct amdgpu_atcs_functions *f, u32 mask)
++{
++ f->get_ext_state = mask & ATCS_GET_EXTERNAL_STATE_SUPPORTED;
++ f->pcie_perf_req = mask & ATCS_PCIE_PERFORMANCE_REQUEST_SUPPORTED;
++ f->pcie_dev_rdy = mask & ATCS_PCIE_DEVICE_READY_NOTIFICATION_SUPPORTED;
++ f->pcie_bus_width = mask & ATCS_SET_PCIE_BUS_WIDTH_SUPPORTED;
++}
++
++/**
++ * amdgpu_atcs_verify_interface - verify ATCS
++ *
++ * @handle: acpi handle
++ * @atcs: amdgpu atcs struct
++ *
++ * Execute the ATCS_FUNCTION_VERIFY_INTERFACE ATCS function
++ * to initialize ATCS and determine what features are supported
++ * (all asics).
++ * returns 0 on success, error on failure.
++ */
++static int amdgpu_atcs_verify_interface(acpi_handle handle,
++ struct amdgpu_atcs *atcs)
++{
++ union acpi_object *info;
++ struct atcs_verify_interface output;
++ size_t size;
++ int err = 0;
++
++ info = amdgpu_atcs_call(handle, ATCS_FUNCTION_VERIFY_INTERFACE, NULL);
++ if (!info)
++ return -EIO;
++
++ memset(&output, 0, sizeof(output));
++
++ size = *(u16 *) info->buffer.pointer;
++ if (size < 8) {
++ DRM_INFO("ATCS buffer is too small: %zu\n", size);
++ err = -EINVAL;
++ goto out;
++ }
++ size = min(sizeof(output), size);
++
++ memcpy(&output, info->buffer.pointer, size);
++
++ /* TODO: check version? */
++ DRM_DEBUG_DRIVER("ATCS version %u\n", output.version);
++
++ amdgpu_atcs_parse_functions(&atcs->functions, output.function_bits);
++
++out:
++ kfree(info);
++ return err;
++}
++
++/**
++ * amdgpu_acpi_is_pcie_performance_request_supported
++ *
++ * @adev: amdgpu_device pointer
++ *
++ * Check if the ATCS pcie_perf_req and pcie_dev_rdy methods
++ * are supported (all asics).
++ * returns true if supported, false if not.
++ */
++bool amdgpu_acpi_is_pcie_performance_request_supported(struct amdgpu_device *adev)
++{
++ struct amdgpu_atcs *atcs = &adev->atcs;
++
++ if (atcs->functions.pcie_perf_req && atcs->functions.pcie_dev_rdy)
++ return true;
++
++ return false;
++}
++
++/**
++ * amdgpu_acpi_pcie_notify_device_ready
++ *
++ * @adev: amdgpu_device pointer
++ *
++ * Executes the PCIE_DEVICE_READY_NOTIFICATION method
++ * (all asics).
++ * returns 0 on success, error on failure.
++ */
++int amdgpu_acpi_pcie_notify_device_ready(struct amdgpu_device *adev)
++{
++ acpi_handle handle;
++ union acpi_object *info;
++ struct amdgpu_atcs *atcs = &adev->atcs;
++
++ /* Get the device handle */
++ handle = ACPI_HANDLE(&adev->pdev->dev);
++ if (!handle)
++ return -EINVAL;
++
++ if (!atcs->functions.pcie_dev_rdy)
++ return -EINVAL;
++
++ info = amdgpu_atcs_call(handle, ATCS_FUNCTION_PCIE_DEVICE_READY_NOTIFICATION, NULL);
++ if (!info)
++ return -EIO;
++
++ kfree(info);
++
++ return 0;
++}
++
++/**
++ * amdgpu_acpi_pcie_performance_request
++ *
++ * @adev: amdgpu_device pointer
++ * @perf_req: requested perf level (pcie gen speed)
++ * @advertise: set advertise caps flag if set
++ *
++ * Executes the PCIE_PERFORMANCE_REQUEST method to
++ * change the pcie gen speed (all asics).
++ * returns 0 on success, error on failure.
++ */
++int amdgpu_acpi_pcie_performance_request(struct amdgpu_device *adev,
++ u8 perf_req, bool advertise)
++{
++ acpi_handle handle;
++ union acpi_object *info;
++ struct amdgpu_atcs *atcs = &adev->atcs;
++ struct atcs_pref_req_input atcs_input;
++ struct atcs_pref_req_output atcs_output;
++ struct acpi_buffer params;
++ size_t size;
++ u32 retry = 3;
++
++ /* Get the device handle */
++ handle = ACPI_HANDLE(&adev->pdev->dev);
++ if (!handle)
++ return -EINVAL;
++
++ if (!atcs->functions.pcie_perf_req)
++ return -EINVAL;
++
++ atcs_input.size = sizeof(struct atcs_pref_req_input);
++ /* client id (bit 2-0: func num, 7-3: dev num, 15-8: bus num) */
++ atcs_input.client_id = adev->pdev->devfn | (adev->pdev->bus->number << 8);
++ atcs_input.valid_flags_mask = ATCS_VALID_FLAGS_MASK;
++ atcs_input.flags = ATCS_WAIT_FOR_COMPLETION;
++ if (advertise)
++ atcs_input.flags |= ATCS_ADVERTISE_CAPS;
++ atcs_input.req_type = ATCS_PCIE_LINK_SPEED;
++ atcs_input.perf_req = perf_req;
++
++ params.length = sizeof(struct atcs_pref_req_input);
++ params.pointer = &atcs_input;
++
++ while (retry--) {
++ info = amdgpu_atcs_call(handle, ATCS_FUNCTION_PCIE_PERFORMANCE_REQUEST, &params);
++ if (!info)
++ return -EIO;
++
++ memset(&atcs_output, 0, sizeof(atcs_output));
++
++ size = *(u16 *) info->buffer.pointer;
++ if (size < 3) {
++ DRM_INFO("ATCS buffer is too small: %zu\n", size);
++ kfree(info);
++ return -EINVAL;
++ }
++ size = min(sizeof(atcs_output), size);
++
++ memcpy(&atcs_output, info->buffer.pointer, size);
++
++ kfree(info);
++
++ switch (atcs_output.ret_val) {
++ case ATCS_REQUEST_REFUSED:
++ default:
++ return -EINVAL;
++ case ATCS_REQUEST_COMPLETE:
++ return 0;
++ case ATCS_REQUEST_IN_PROGRESS:
++ udelay(10);
++ break;
++ }
++ }
++
++ return 0;
++}
++
++/**
++ * amdgpu_acpi_event - handle notify events
++ *
++ * @nb: notifier block
++ * @val: val
++ * @data: acpi event
++ *
++ * Calls relevant amdgpu functions in response to various
++ * acpi events.
++ * Returns NOTIFY code
++ */
++static int amdgpu_acpi_event(struct notifier_block *nb,
++ unsigned long val,
++ void *data)
++{
++ struct amdgpu_device *adev = container_of(nb, struct amdgpu_device, acpi_nb);
++ struct acpi_bus_event *entry = (struct acpi_bus_event *)data;
++
++ if (strcmp(entry->device_class, ACPI_AC_CLASS) == 0) {
++ if (power_supply_is_system_supplied() > 0)
++ DRM_DEBUG_DRIVER("pm: AC\n");
++ else
++ DRM_DEBUG_DRIVER("pm: DC\n");
++
++ amdgpu_pm_acpi_event_handler(adev);
++ }
++
++ /* Check for pending SBIOS requests */
++ return amdgpu_atif_handler(adev, entry);
++}
++
++/* Call all ACPI methods here */
++/**
++ * amdgpu_acpi_init - init driver acpi support
++ *
++ * @adev: amdgpu_device pointer
++ *
++ * Verifies the AMD ACPI interfaces and registers with the acpi
++ * notifier chain (all asics).
++ * Returns 0 on success, error on failure.
++ */
++int amdgpu_acpi_init(struct amdgpu_device *adev)
++{
++ acpi_handle handle;
++ struct amdgpu_atif *atif = &adev->atif;
++ struct amdgpu_atcs *atcs = &adev->atcs;
++ int ret;
++
++ /* Get the device handle */
++ handle = ACPI_HANDLE(&adev->pdev->dev);
++
++ if (!adev->bios || !handle)
++ return 0;
++
++ /* Call the ATCS method */
++ ret = amdgpu_atcs_verify_interface(handle, atcs);
++ if (ret) {
++ DRM_DEBUG_DRIVER("Call to ATCS verify_interface failed: %d\n", ret);
++ }
++
++ /* Call the ATIF method */
++ ret = amdgpu_atif_verify_interface(handle, atif);
++ if (ret) {
++ DRM_DEBUG_DRIVER("Call to ATIF verify_interface failed: %d\n", ret);
++ goto out;
++ }
++
++ if (atif->notifications.brightness_change) {
++ struct drm_encoder *tmp;
++
++ /* Find the encoder controlling the brightness */
++ list_for_each_entry(tmp, &adev->ddev->mode_config.encoder_list,
++ head) {
++ struct amdgpu_encoder *enc = to_amdgpu_encoder(tmp);
++
++ if ((enc->devices & (ATOM_DEVICE_LCD_SUPPORT)) &&
++ enc->enc_priv) {
++ if (adev->is_atom_bios) {
++ struct amdgpu_encoder_atom_dig *dig = enc->enc_priv;
++ if (dig->bl_dev) {
++ atif->encoder_for_bl = enc;
++ break;
++ }
++ }
++ }
++ }
++ }
++
++ if (atif->functions.sbios_requests && !atif->functions.system_params) {
++ /* XXX check this workraround, if sbios request function is
++ * present we have to see how it's configured in the system
++ * params
++ */
++ atif->functions.system_params = true;
++ }
++
++ if (atif->functions.system_params) {
++ ret = amdgpu_atif_get_notification_params(handle,
++ &atif->notification_cfg);
++ if (ret) {
++ DRM_DEBUG_DRIVER("Call to GET_SYSTEM_PARAMS failed: %d\n",
++ ret);
++ /* Disable notification */
++ atif->notification_cfg.enabled = false;
++ }
++ }
++
++out:
++ adev->acpi_nb.notifier_call = amdgpu_acpi_event;
++ register_acpi_notifier(&adev->acpi_nb);
++
++ return ret;
++}
++
++/**
++ * amdgpu_acpi_fini - tear down driver acpi support
++ *
++ * @adev: amdgpu_device pointer
++ *
++ * Unregisters with the acpi notifier chain (all asics).
++ */
++void amdgpu_acpi_fini(struct amdgpu_device *adev)
++{
++ unregister_acpi_notifier(&adev->acpi_nb);
++}
+diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_acpi.h b/drivers/gpu/drm/amd/amdgpu/amdgpu_acpi.h
+new file mode 100644
+index 0000000..01a29c3
+--- /dev/null
++++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_acpi.h
+@@ -0,0 +1,445 @@
++/*
++ * Copyright 2012 Advanced Micro Devices, Inc.
++ *
++ * Permission is hereby granted, free of charge, to any person obtaining a
++ * copy of this software and associated documentation files (the "Software"),
++ * to deal in the Software without restriction, including without limitation
++ * the rights to use, copy, modify, merge, publish, distribute, sublicense,
++ * and/or sell copies of the Software, and to permit persons to whom the
++ * Software is furnished to do so, subject to the following conditions:
++ *
++ * The above copyright notice and this permission notice shall be included in
++ * all copies or substantial portions of the Software.
++ *
++ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
++ * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
++ * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
++ * THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR
++ * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE,
++ * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
++ * OTHER DEALINGS IN THE SOFTWARE.
++ *
++ */
++
++#ifndef AMDGPU_ACPI_H
++#define AMDGPU_ACPI_H
++
++struct amdgpu_device;
++struct acpi_bus_event;
++
++int amdgpu_atif_handler(struct amdgpu_device *adev,
++ struct acpi_bus_event *event);
++
++/* AMD hw uses four ACPI control methods:
++ * 1. ATIF
++ * ARG0: (ACPI_INTEGER) function code
++ * ARG1: (ACPI_BUFFER) parameter buffer, 256 bytes
++ * OUTPUT: (ACPI_BUFFER) output buffer, 256 bytes
++ * ATIF provides an entry point for the gfx driver to interact with the sbios.
++ * The AMD ACPI notification mechanism uses Notify (VGA, 0x81) or a custom
++ * notification. Which notification is used as indicated by the ATIF Control
++ * Method GET_SYSTEM_PARAMETERS. When the driver receives Notify (VGA, 0x81) or
++ * a custom notification it invokes ATIF Control Method GET_SYSTEM_BIOS_REQUESTS
++ * to identify pending System BIOS requests and associated parameters. For
++ * example, if one of the pending requests is DISPLAY_SWITCH_REQUEST, the driver
++ * will perform display device detection and invoke ATIF Control Method
++ * SELECT_ACTIVE_DISPLAYS.
++ *
++ * 2. ATPX
++ * ARG0: (ACPI_INTEGER) function code
++ * ARG1: (ACPI_BUFFER) parameter buffer, 256 bytes
++ * OUTPUT: (ACPI_BUFFER) output buffer, 256 bytes
++ * ATPX methods are used on PowerXpress systems to handle mux switching and
++ * discrete GPU power control.
++ *
++ * 3. ATRM
++ * ARG0: (ACPI_INTEGER) offset of vbios rom data
++ * ARG1: (ACPI_BUFFER) size of the buffer to fill (up to 4K).
++ * OUTPUT: (ACPI_BUFFER) output buffer
++ * ATRM provides an interfacess to access the discrete GPU vbios image on
++ * PowerXpress systems with multiple GPUs.
++ *
++ * 4. ATCS
++ * ARG0: (ACPI_INTEGER) function code
++ * ARG1: (ACPI_BUFFER) parameter buffer, 256 bytes
++ * OUTPUT: (ACPI_BUFFER) output buffer, 256 bytes
++ * ATCS provides an interface to AMD chipset specific functionality.
++ *
++ */
++/* ATIF */
++#define ATIF_FUNCTION_VERIFY_INTERFACE 0x0
++/* ARG0: ATIF_FUNCTION_VERIFY_INTERFACE
++ * ARG1: none
++ * OUTPUT:
++ * WORD - structure size in bytes (includes size field)
++ * WORD - version
++ * DWORD - supported notifications mask
++ * DWORD - supported functions bit vector
++ */
++/* Notifications mask */
++# define ATIF_DISPLAY_SWITCH_REQUEST_SUPPORTED (1 << 0)
++# define ATIF_EXPANSION_MODE_CHANGE_REQUEST_SUPPORTED (1 << 1)
++# define ATIF_THERMAL_STATE_CHANGE_REQUEST_SUPPORTED (1 << 2)
++# define ATIF_FORCED_POWER_STATE_CHANGE_REQUEST_SUPPORTED (1 << 3)
++# define ATIF_SYSTEM_POWER_SOURCE_CHANGE_REQUEST_SUPPORTED (1 << 4)
++# define ATIF_DISPLAY_CONF_CHANGE_REQUEST_SUPPORTED (1 << 5)
++# define ATIF_PX_GFX_SWITCH_REQUEST_SUPPORTED (1 << 6)
++# define ATIF_PANEL_BRIGHTNESS_CHANGE_REQUEST_SUPPORTED (1 << 7)
++# define ATIF_DGPU_DISPLAY_EVENT_SUPPORTED (1 << 8)
++/* supported functions vector */
++# define ATIF_GET_SYSTEM_PARAMETERS_SUPPORTED (1 << 0)
++# define ATIF_GET_SYSTEM_BIOS_REQUESTS_SUPPORTED (1 << 1)
++# define ATIF_SELECT_ACTIVE_DISPLAYS_SUPPORTED (1 << 2)
++# define ATIF_GET_LID_STATE_SUPPORTED (1 << 3)
++# define ATIF_GET_TV_STANDARD_FROM_CMOS_SUPPORTED (1 << 4)
++# define ATIF_SET_TV_STANDARD_IN_CMOS_SUPPORTED (1 << 5)
++# define ATIF_GET_PANEL_EXPANSION_MODE_FROM_CMOS_SUPPORTED (1 << 6)
++# define ATIF_SET_PANEL_EXPANSION_MODE_IN_CMOS_SUPPORTED (1 << 7)
++# define ATIF_TEMPERATURE_CHANGE_NOTIFICATION_SUPPORTED (1 << 12)
++# define ATIF_GET_GRAPHICS_DEVICE_TYPES_SUPPORTED (1 << 14)
++#define ATIF_FUNCTION_GET_SYSTEM_PARAMETERS 0x1
++/* ARG0: ATIF_FUNCTION_GET_SYSTEM_PARAMETERS
++ * ARG1: none
++ * OUTPUT:
++ * WORD - structure size in bytes (includes size field)
++ * DWORD - valid flags mask
++ * DWORD - flags
++ *
++ * OR
++ *
++ * WORD - structure size in bytes (includes size field)
++ * DWORD - valid flags mask
++ * DWORD - flags
++ * BYTE - notify command code
++ *
++ * flags
++ * bits 1:0:
++ * 0 - Notify(VGA, 0x81) is not used for notification
++ * 1 - Notify(VGA, 0x81) is used for notification
++ * 2 - Notify(VGA, n) is used for notification where
++ * n (0xd0-0xd9) is specified in notify command code.
++ * bit 2:
++ * 1 - lid changes not reported though int10
++ */
++#define ATIF_FUNCTION_GET_SYSTEM_BIOS_REQUESTS 0x2
++/* ARG0: ATIF_FUNCTION_GET_SYSTEM_BIOS_REQUESTS
++ * ARG1: none
++ * OUTPUT:
++ * WORD - structure size in bytes (includes size field)
++ * DWORD - pending sbios requests
++ * BYTE - panel expansion mode
++ * BYTE - thermal state: target gfx controller
++ * BYTE - thermal state: state id (0: exit state, non-0: state)
++ * BYTE - forced power state: target gfx controller
++ * BYTE - forced power state: state id
++ * BYTE - system power source
++ * BYTE - panel backlight level (0-255)
++ */
++/* pending sbios requests */
++# define ATIF_DISPLAY_SWITCH_REQUEST (1 << 0)
++# define ATIF_EXPANSION_MODE_CHANGE_REQUEST (1 << 1)
++# define ATIF_THERMAL_STATE_CHANGE_REQUEST (1 << 2)
++# define ATIF_FORCED_POWER_STATE_CHANGE_REQUEST (1 << 3)
++# define ATIF_SYSTEM_POWER_SOURCE_CHANGE_REQUEST (1 << 4)
++# define ATIF_DISPLAY_CONF_CHANGE_REQUEST (1 << 5)
++# define ATIF_PX_GFX_SWITCH_REQUEST (1 << 6)
++# define ATIF_PANEL_BRIGHTNESS_CHANGE_REQUEST (1 << 7)
++# define ATIF_DGPU_DISPLAY_EVENT (1 << 8)
++/* panel expansion mode */
++# define ATIF_PANEL_EXPANSION_DISABLE 0
++# define ATIF_PANEL_EXPANSION_FULL 1
++# define ATIF_PANEL_EXPANSION_ASPECT 2
++/* target gfx controller */
++# define ATIF_TARGET_GFX_SINGLE 0
++# define ATIF_TARGET_GFX_PX_IGPU 1
++# define ATIF_TARGET_GFX_PX_DGPU 2
++/* system power source */
++# define ATIF_POWER_SOURCE_AC 1
++# define ATIF_POWER_SOURCE_DC 2
++# define ATIF_POWER_SOURCE_RESTRICTED_AC_1 3
++# define ATIF_POWER_SOURCE_RESTRICTED_AC_2 4
++#define ATIF_FUNCTION_SELECT_ACTIVE_DISPLAYS 0x3
++/* ARG0: ATIF_FUNCTION_SELECT_ACTIVE_DISPLAYS
++ * ARG1:
++ * WORD - structure size in bytes (includes size field)
++ * WORD - selected displays
++ * WORD - connected displays
++ * OUTPUT:
++ * WORD - structure size in bytes (includes size field)
++ * WORD - selected displays
++ */
++# define ATIF_LCD1 (1 << 0)
++# define ATIF_CRT1 (1 << 1)
++# define ATIF_TV (1 << 2)
++# define ATIF_DFP1 (1 << 3)
++# define ATIF_CRT2 (1 << 4)
++# define ATIF_LCD2 (1 << 5)
++# define ATIF_DFP2 (1 << 7)
++# define ATIF_CV (1 << 8)
++# define ATIF_DFP3 (1 << 9)
++# define ATIF_DFP4 (1 << 10)
++# define ATIF_DFP5 (1 << 11)
++# define ATIF_DFP6 (1 << 12)
++#define ATIF_FUNCTION_GET_LID_STATE 0x4
++/* ARG0: ATIF_FUNCTION_GET_LID_STATE
++ * ARG1: none
++ * OUTPUT:
++ * WORD - structure size in bytes (includes size field)
++ * BYTE - lid state (0: open, 1: closed)
++ *
++ * GET_LID_STATE only works at boot and resume, for general lid
++ * status, use the kernel provided status
++ */
++#define ATIF_FUNCTION_GET_TV_STANDARD_FROM_CMOS 0x5
++/* ARG0: ATIF_FUNCTION_GET_TV_STANDARD_FROM_CMOS
++ * ARG1: none
++ * OUTPUT:
++ * WORD - structure size in bytes (includes size field)
++ * BYTE - 0
++ * BYTE - TV standard
++ */
++# define ATIF_TV_STD_NTSC 0
++# define ATIF_TV_STD_PAL 1
++# define ATIF_TV_STD_PALM 2
++# define ATIF_TV_STD_PAL60 3
++# define ATIF_TV_STD_NTSCJ 4
++# define ATIF_TV_STD_PALCN 5
++# define ATIF_TV_STD_PALN 6
++# define ATIF_TV_STD_SCART_RGB 9
++#define ATIF_FUNCTION_SET_TV_STANDARD_IN_CMOS 0x6
++/* ARG0: ATIF_FUNCTION_SET_TV_STANDARD_IN_CMOS
++ * ARG1:
++ * WORD - structure size in bytes (includes size field)
++ * BYTE - 0
++ * BYTE - TV standard
++ * OUTPUT: none
++ */
++#define ATIF_FUNCTION_GET_PANEL_EXPANSION_MODE_FROM_CMOS 0x7
++/* ARG0: ATIF_FUNCTION_GET_PANEL_EXPANSION_MODE_FROM_CMOS
++ * ARG1: none
++ * OUTPUT:
++ * WORD - structure size in bytes (includes size field)
++ * BYTE - panel expansion mode
++ */
++#define ATIF_FUNCTION_SET_PANEL_EXPANSION_MODE_IN_CMOS 0x8
++/* ARG0: ATIF_FUNCTION_SET_PANEL_EXPANSION_MODE_IN_CMOS
++ * ARG1:
++ * WORD - structure size in bytes (includes size field)
++ * BYTE - panel expansion mode
++ * OUTPUT: none
++ */
++#define ATIF_FUNCTION_TEMPERATURE_CHANGE_NOTIFICATION 0xD
++/* ARG0: ATIF_FUNCTION_TEMPERATURE_CHANGE_NOTIFICATION
++ * ARG1:
++ * WORD - structure size in bytes (includes size field)
++ * WORD - gfx controller id
++ * BYTE - current temperature (degress Celsius)
++ * OUTPUT: none
++ */
++#define ATIF_FUNCTION_GET_GRAPHICS_DEVICE_TYPES 0xF
++/* ARG0: ATIF_FUNCTION_GET_GRAPHICS_DEVICE_TYPES
++ * ARG1: none
++ * OUTPUT:
++ * WORD - number of gfx devices
++ * WORD - device structure size in bytes (excludes device size field)
++ * DWORD - flags \
++ * WORD - bus number } repeated structure
++ * WORD - device number /
++ */
++/* flags */
++# define ATIF_PX_REMOVABLE_GRAPHICS_DEVICE (1 << 0)
++# define ATIF_XGP_PORT (1 << 1)
++# define ATIF_VGA_ENABLED_GRAPHICS_DEVICE (1 << 2)
++# define ATIF_XGP_PORT_IN_DOCK (1 << 3)
++
++/* ATPX */
++#define ATPX_FUNCTION_VERIFY_INTERFACE 0x0
++/* ARG0: ATPX_FUNCTION_VERIFY_INTERFACE
++ * ARG1: none
++ * OUTPUT:
++ * WORD - structure size in bytes (includes size field)
++ * WORD - version
++ * DWORD - supported functions bit vector
++ */
++/* supported functions vector */
++# define ATPX_GET_PX_PARAMETERS_SUPPORTED (1 << 0)
++# define ATPX_POWER_CONTROL_SUPPORTED (1 << 1)
++# define ATPX_DISPLAY_MUX_CONTROL_SUPPORTED (1 << 2)
++# define ATPX_I2C_MUX_CONTROL_SUPPORTED (1 << 3)
++# define ATPX_GRAPHICS_DEVICE_SWITCH_START_NOTIFICATION_SUPPORTED (1 << 4)
++# define ATPX_GRAPHICS_DEVICE_SWITCH_END_NOTIFICATION_SUPPORTED (1 << 5)
++# define ATPX_GET_DISPLAY_CONNECTORS_MAPPING_SUPPORTED (1 << 7)
++# define ATPX_GET_DISPLAY_DETECTION_PORTS_SUPPORTED (1 << 8)
++#define ATPX_FUNCTION_GET_PX_PARAMETERS 0x1
++/* ARG0: ATPX_FUNCTION_GET_PX_PARAMETERS
++ * ARG1: none
++ * OUTPUT:
++ * WORD - structure size in bytes (includes size field)
++ * DWORD - valid flags mask
++ * DWORD - flags
++ */
++/* flags */
++# define ATPX_LVDS_I2C_AVAILABLE_TO_BOTH_GPUS (1 << 0)
++# define ATPX_CRT1_I2C_AVAILABLE_TO_BOTH_GPUS (1 << 1)
++# define ATPX_DVI1_I2C_AVAILABLE_TO_BOTH_GPUS (1 << 2)
++# define ATPX_CRT1_RGB_SIGNAL_MUXED (1 << 3)
++# define ATPX_TV_SIGNAL_MUXED (1 << 4)
++# define ATPX_DFP_SIGNAL_MUXED (1 << 5)
++# define ATPX_SEPARATE_MUX_FOR_I2C (1 << 6)
++# define ATPX_DYNAMIC_PX_SUPPORTED (1 << 7)
++# define ATPX_ACF_NOT_SUPPORTED (1 << 8)
++# define ATPX_FIXED_NOT_SUPPORTED (1 << 9)
++# define ATPX_DYNAMIC_DGPU_POWER_OFF_SUPPORTED (1 << 10)
++# define ATPX_DGPU_REQ_POWER_FOR_DISPLAYS (1 << 11)
++#define ATPX_FUNCTION_POWER_CONTROL 0x2
++/* ARG0: ATPX_FUNCTION_POWER_CONTROL
++ * ARG1:
++ * WORD - structure size in bytes (includes size field)
++ * BYTE - dGPU power state (0: power off, 1: power on)
++ * OUTPUT: none
++ */
++#define ATPX_FUNCTION_DISPLAY_MUX_CONTROL 0x3
++/* ARG0: ATPX_FUNCTION_DISPLAY_MUX_CONTROL
++ * ARG1:
++ * WORD - structure size in bytes (includes size field)
++ * WORD - display mux control (0: iGPU, 1: dGPU)
++ * OUTPUT: none
++ */
++# define ATPX_INTEGRATED_GPU 0
++# define ATPX_DISCRETE_GPU 1
++#define ATPX_FUNCTION_I2C_MUX_CONTROL 0x4
++/* ARG0: ATPX_FUNCTION_I2C_MUX_CONTROL
++ * ARG1:
++ * WORD - structure size in bytes (includes size field)
++ * WORD - i2c/aux/hpd mux control (0: iGPU, 1: dGPU)
++ * OUTPUT: none
++ */
++#define ATPX_FUNCTION_GRAPHICS_DEVICE_SWITCH_START_NOTIFICATION 0x5
++/* ARG0: ATPX_FUNCTION_GRAPHICS_DEVICE_SWITCH_START_NOTIFICATION
++ * ARG1:
++ * WORD - structure size in bytes (includes size field)
++ * WORD - target gpu (0: iGPU, 1: dGPU)
++ * OUTPUT: none
++ */
++#define ATPX_FUNCTION_GRAPHICS_DEVICE_SWITCH_END_NOTIFICATION 0x6
++/* ARG0: ATPX_FUNCTION_GRAPHICS_DEVICE_SWITCH_END_NOTIFICATION
++ * ARG1:
++ * WORD - structure size in bytes (includes size field)
++ * WORD - target gpu (0: iGPU, 1: dGPU)
++ * OUTPUT: none
++ */
++#define ATPX_FUNCTION_GET_DISPLAY_CONNECTORS_MAPPING 0x8
++/* ARG0: ATPX_FUNCTION_GET_DISPLAY_CONNECTORS_MAPPING
++ * ARG1: none
++ * OUTPUT:
++ * WORD - number of display connectors
++ * WORD - connector structure size in bytes (excludes connector size field)
++ * BYTE - flags \
++ * BYTE - ATIF display vector bit position } repeated
++ * BYTE - adapter id (0: iGPU, 1-n: dGPU ordered by pcie bus number) } structure
++ * WORD - connector ACPI id /
++ */
++/* flags */
++# define ATPX_DISPLAY_OUTPUT_SUPPORTED_BY_ADAPTER_ID_DEVICE (1 << 0)
++# define ATPX_DISPLAY_HPD_SUPPORTED_BY_ADAPTER_ID_DEVICE (1 << 1)
++# define ATPX_DISPLAY_I2C_SUPPORTED_BY_ADAPTER_ID_DEVICE (1 << 2)
++#define ATPX_FUNCTION_GET_DISPLAY_DETECTION_PORTS 0x9
++/* ARG0: ATPX_FUNCTION_GET_DISPLAY_DETECTION_PORTS
++ * ARG1: none
++ * OUTPUT:
++ * WORD - number of HPD/DDC ports
++ * WORD - port structure size in bytes (excludes port size field)
++ * BYTE - ATIF display vector bit position \
++ * BYTE - hpd id } reapeated structure
++ * BYTE - ddc id /
++ *
++ * available on A+A systems only
++ */
++/* hpd id */
++# define ATPX_HPD_NONE 0
++# define ATPX_HPD1 1
++# define ATPX_HPD2 2
++# define ATPX_HPD3 3
++# define ATPX_HPD4 4
++# define ATPX_HPD5 5
++# define ATPX_HPD6 6
++/* ddc id */
++# define ATPX_DDC_NONE 0
++# define ATPX_DDC1 1
++# define ATPX_DDC2 2
++# define ATPX_DDC3 3
++# define ATPX_DDC4 4
++# define ATPX_DDC5 5
++# define ATPX_DDC6 6
++# define ATPX_DDC7 7
++# define ATPX_DDC8 8
++
++/* ATCS */
++#define ATCS_FUNCTION_VERIFY_INTERFACE 0x0
++/* ARG0: ATCS_FUNCTION_VERIFY_INTERFACE
++ * ARG1: none
++ * OUTPUT:
++ * WORD - structure size in bytes (includes size field)
++ * WORD - version
++ * DWORD - supported functions bit vector
++ */
++/* supported functions vector */
++# define ATCS_GET_EXTERNAL_STATE_SUPPORTED (1 << 0)
++# define ATCS_PCIE_PERFORMANCE_REQUEST_SUPPORTED (1 << 1)
++# define ATCS_PCIE_DEVICE_READY_NOTIFICATION_SUPPORTED (1 << 2)
++# define ATCS_SET_PCIE_BUS_WIDTH_SUPPORTED (1 << 3)
++#define ATCS_FUNCTION_GET_EXTERNAL_STATE 0x1
++/* ARG0: ATCS_FUNCTION_GET_EXTERNAL_STATE
++ * ARG1: none
++ * OUTPUT:
++ * WORD - structure size in bytes (includes size field)
++ * DWORD - valid flags mask
++ * DWORD - flags (0: undocked, 1: docked)
++ */
++/* flags */
++# define ATCS_DOCKED (1 << 0)
++#define ATCS_FUNCTION_PCIE_PERFORMANCE_REQUEST 0x2
++/* ARG0: ATCS_FUNCTION_PCIE_PERFORMANCE_REQUEST
++ * ARG1:
++ * WORD - structure size in bytes (includes size field)
++ * WORD - client id (bit 2-0: func num, 7-3: dev num, 15-8: bus num)
++ * WORD - valid flags mask
++ * WORD - flags
++ * BYTE - request type
++ * BYTE - performance request
++ * OUTPUT:
++ * WORD - structure size in bytes (includes size field)
++ * BYTE - return value
++ */
++/* flags */
++# define ATCS_ADVERTISE_CAPS (1 << 0)
++# define ATCS_WAIT_FOR_COMPLETION (1 << 1)
++/* request type */
++# define ATCS_PCIE_LINK_SPEED 1
++/* performance request */
++# define ATCS_REMOVE 0
++# define ATCS_FORCE_LOW_POWER 1
++# define ATCS_PERF_LEVEL_1 2 /* PCIE Gen 1 */
++# define ATCS_PERF_LEVEL_2 3 /* PCIE Gen 2 */
++# define ATCS_PERF_LEVEL_3 4 /* PCIE Gen 3 */
++/* return value */
++# define ATCS_REQUEST_REFUSED 1
++# define ATCS_REQUEST_COMPLETE 2
++# define ATCS_REQUEST_IN_PROGRESS 3
++#define ATCS_FUNCTION_PCIE_DEVICE_READY_NOTIFICATION 0x3
++/* ARG0: ATCS_FUNCTION_PCIE_DEVICE_READY_NOTIFICATION
++ * ARG1: none
++ * OUTPUT: none
++ */
++#define ATCS_FUNCTION_SET_PCIE_BUS_WIDTH 0x4
++/* ARG0: ATCS_FUNCTION_SET_PCIE_BUS_WIDTH
++ * ARG1:
++ * WORD - structure size in bytes (includes size field)
++ * WORD - client id (bit 2-0: func num, 7-3: dev num, 15-8: bus num)
++ * BYTE - number of active lanes
++ * OUTPUT:
++ * WORD - structure size in bytes (includes size field)
++ * BYTE - number of active lanes
++ */
++
++#endif
+diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_afmt.c b/drivers/gpu/drm/amd/amdgpu/amdgpu_afmt.c
+new file mode 100644
+index 0000000..857ba08
+--- /dev/null
++++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_afmt.c
+@@ -0,0 +1,105 @@
++/*
++ * Copyright 2008 Advanced Micro Devices, Inc.
++ * Copyright 2008 Red Hat Inc.
++ * Copyright 2009 Christian König.
++ *
++ * Permission is hereby granted, free of charge, to any person obtaining a
++ * copy of this software and associated documentation files (the "Software"),
++ * to deal in the Software without restriction, including without limitation
++ * the rights to use, copy, modify, merge, publish, distribute, sublicense,
++ * and/or sell copies of the Software, and to permit persons to whom the
++ * Software is furnished to do so, subject to the following conditions:
++ *
++ * The above copyright notice and this permission notice shall be included in
++ * all copies or substantial portions of the Software.
++ *
++ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
++ * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
++ * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
++ * THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR
++ * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE,
++ * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
++ * OTHER DEALINGS IN THE SOFTWARE.
++ *
++ * Authors: Christian König
++ */
++#include <linux/hdmi.h>
++#include <linux/gcd.h>
++#include <drm/drmP.h>
++#include <drm/amdgpu_drm.h>
++#include "amdgpu.h"
++
++static const struct amdgpu_afmt_acr amdgpu_afmt_predefined_acr[] = {
++ /* 32kHz 44.1kHz 48kHz */
++ /* Clock N CTS N CTS N CTS */
++ { 25175, 4096, 25175, 28224, 125875, 6144, 25175 }, /* 25,20/1.001 MHz */
++ { 25200, 4096, 25200, 6272, 28000, 6144, 25200 }, /* 25.20 MHz */
++ { 27000, 4096, 27000, 6272, 30000, 6144, 27000 }, /* 27.00 MHz */
++ { 27027, 4096, 27027, 6272, 30030, 6144, 27027 }, /* 27.00*1.001 MHz */
++ { 54000, 4096, 54000, 6272, 60000, 6144, 54000 }, /* 54.00 MHz */
++ { 54054, 4096, 54054, 6272, 60060, 6144, 54054 }, /* 54.00*1.001 MHz */
++ { 74176, 4096, 74176, 5733, 75335, 6144, 74176 }, /* 74.25/1.001 MHz */
++ { 74250, 4096, 74250, 6272, 82500, 6144, 74250 }, /* 74.25 MHz */
++ { 148352, 4096, 148352, 5733, 150670, 6144, 148352 }, /* 148.50/1.001 MHz */
++ { 148500, 4096, 148500, 6272, 165000, 6144, 148500 }, /* 148.50 MHz */
++};
++
++
++/*
++ * calculate CTS and N values if they are not found in the table
++ */
++static void amdgpu_afmt_calc_cts(uint32_t clock, int *CTS, int *N, int freq)
++{
++ int n, cts;
++ unsigned long div, mul;
++
++ /* Safe, but overly large values */
++ n = 128 * freq;
++ cts = clock * 1000;
++
++ /* Smallest valid fraction */
++ div = gcd(n, cts);
++
++ n /= div;
++ cts /= div;
++
++ /*
++ * The optimal N is 128*freq/1000. Calculate the closest larger
++ * value that doesn't truncate any bits.
++ */
++ mul = ((128*freq/1000) + (n-1))/n;
++
++ n *= mul;
++ cts *= mul;
++
++ /* Check that we are in spec (not always possible) */
++ if (n < (128*freq/1500))
++ printk(KERN_WARNING "Calculated ACR N value is too small. You may experience audio problems.\n");
++ if (n > (128*freq/300))
++ printk(KERN_WARNING "Calculated ACR N value is too large. You may experience audio problems.\n");
++
++ *N = n;
++ *CTS = cts;
++
++ DRM_DEBUG("Calculated ACR timing N=%d CTS=%d for frequency %d\n",
++ *N, *CTS, freq);
++}
++
++struct amdgpu_afmt_acr amdgpu_afmt_acr(uint32_t clock)
++{
++ struct amdgpu_afmt_acr res;
++ u8 i;
++
++ /* Precalculated values for common clocks */
++ for (i = 0; i < ARRAY_SIZE(amdgpu_afmt_predefined_acr); i++) {
++ if (amdgpu_afmt_predefined_acr[i].clock == clock)
++ return amdgpu_afmt_predefined_acr[i];
++ }
++
++ /* And odd clocks get manually calculated */
++ amdgpu_afmt_calc_cts(clock, &res.cts_32khz, &res.n_32khz, 32000);
++ amdgpu_afmt_calc_cts(clock, &res.cts_44_1khz, &res.n_44_1khz, 44100);
++ amdgpu_afmt_calc_cts(clock, &res.cts_48khz, &res.n_48khz, 48000);
++
++ return res;
++}
+diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_atombios.c b/drivers/gpu/drm/amd/amdgpu/amdgpu_atombios.c
+new file mode 100644
+index 0000000..6a58837
+--- /dev/null
++++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_atombios.c
+@@ -0,0 +1,1598 @@
++/*
++ * Copyright 2007-8 Advanced Micro Devices, Inc.
++ * Copyright 2008 Red Hat Inc.
++ *
++ * Permission is hereby granted, free of charge, to any person obtaining a
++ * copy of this software and associated documentation files (the "Software"),
++ * to deal in the Software without restriction, including without limitation
++ * the rights to use, copy, modify, merge, publish, distribute, sublicense,
++ * and/or sell copies of the Software, and to permit persons to whom the
++ * Software is furnished to do so, subject to the following conditions:
++ *
++ * The above copyright notice and this permission notice shall be included in
++ * all copies or substantial portions of the Software.
++ *
++ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
++ * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
++ * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
++ * THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR
++ * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE,
++ * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
++ * OTHER DEALINGS IN THE SOFTWARE.
++ *
++ * Authors: Dave Airlie
++ * Alex Deucher
++ */
++#include <drm/drmP.h>
++#include <drm/amdgpu_drm.h>
++#include "amdgpu.h"
++#include "amdgpu_atombios.h"
++#include "amdgpu_i2c.h"
++
++#include "atom.h"
++#include "atom-bits.h"
++#include "atombios_encoders.h"
++#include "bif/bif_4_1_d.h"
++
++static void amdgpu_atombios_lookup_i2c_gpio_quirks(struct amdgpu_device *adev,
++ ATOM_GPIO_I2C_ASSIGMENT *gpio,
++ u8 index)
++{
++
++}
++
++static struct amdgpu_i2c_bus_rec amdgpu_atombios_get_bus_rec_for_i2c_gpio(ATOM_GPIO_I2C_ASSIGMENT *gpio)
++{
++ struct amdgpu_i2c_bus_rec i2c;
++
++ memset(&i2c, 0, sizeof(struct amdgpu_i2c_bus_rec));
++
++ i2c.mask_clk_reg = le16_to_cpu(gpio->usClkMaskRegisterIndex);
++ i2c.mask_data_reg = le16_to_cpu(gpio->usDataMaskRegisterIndex);
++ i2c.en_clk_reg = le16_to_cpu(gpio->usClkEnRegisterIndex);
++ i2c.en_data_reg = le16_to_cpu(gpio->usDataEnRegisterIndex);
++ i2c.y_clk_reg = le16_to_cpu(gpio->usClkY_RegisterIndex);
++ i2c.y_data_reg = le16_to_cpu(gpio->usDataY_RegisterIndex);
++ i2c.a_clk_reg = le16_to_cpu(gpio->usClkA_RegisterIndex);
++ i2c.a_data_reg = le16_to_cpu(gpio->usDataA_RegisterIndex);
++ i2c.mask_clk_mask = (1 << gpio->ucClkMaskShift);
++ i2c.mask_data_mask = (1 << gpio->ucDataMaskShift);
++ i2c.en_clk_mask = (1 << gpio->ucClkEnShift);
++ i2c.en_data_mask = (1 << gpio->ucDataEnShift);
++ i2c.y_clk_mask = (1 << gpio->ucClkY_Shift);
++ i2c.y_data_mask = (1 << gpio->ucDataY_Shift);
++ i2c.a_clk_mask = (1 << gpio->ucClkA_Shift);
++ i2c.a_data_mask = (1 << gpio->ucDataA_Shift);
++
++ if (gpio->sucI2cId.sbfAccess.bfHW_Capable)
++ i2c.hw_capable = true;
++ else
++ i2c.hw_capable = false;
++
++ if (gpio->sucI2cId.ucAccess == 0xa0)
++ i2c.mm_i2c = true;
++ else
++ i2c.mm_i2c = false;
++
++ i2c.i2c_id = gpio->sucI2cId.ucAccess;
++
++ if (i2c.mask_clk_reg)
++ i2c.valid = true;
++ else
++ i2c.valid = false;
++
++ return i2c;
++}
++
++struct amdgpu_i2c_bus_rec amdgpu_atombios_lookup_i2c_gpio(struct amdgpu_device *adev,
++ uint8_t id)
++{
++ struct atom_context *ctx = adev->mode_info.atom_context;
++ ATOM_GPIO_I2C_ASSIGMENT *gpio;
++ struct amdgpu_i2c_bus_rec i2c;
++ int index = GetIndexIntoMasterTable(DATA, GPIO_I2C_Info);
++ struct _ATOM_GPIO_I2C_INFO *i2c_info;
++ uint16_t data_offset, size;
++ int i, num_indices;
++
++ memset(&i2c, 0, sizeof(struct amdgpu_i2c_bus_rec));
++ i2c.valid = false;
++
++ if (amdgpu_atom_parse_data_header(ctx, index, &size, NULL, NULL, &data_offset)) {
++ i2c_info = (struct _ATOM_GPIO_I2C_INFO *)(ctx->bios + data_offset);
++
++ num_indices = (size - sizeof(ATOM_COMMON_TABLE_HEADER)) /
++ sizeof(ATOM_GPIO_I2C_ASSIGMENT);
++
++ gpio = &i2c_info->asGPIO_Info[0];
++ for (i = 0; i < num_indices; i++) {
++
++ amdgpu_atombios_lookup_i2c_gpio_quirks(adev, gpio, i);
++
++ if (gpio->sucI2cId.ucAccess == id) {
++ i2c = amdgpu_atombios_get_bus_rec_for_i2c_gpio(gpio);
++ break;
++ }
++ gpio = (ATOM_GPIO_I2C_ASSIGMENT *)
++ ((u8 *)gpio + sizeof(ATOM_GPIO_I2C_ASSIGMENT));
++ }
++ }
++
++ return i2c;
++}
++
++void amdgpu_atombios_i2c_init(struct amdgpu_device *adev)
++{
++ struct atom_context *ctx = adev->mode_info.atom_context;
++ ATOM_GPIO_I2C_ASSIGMENT *gpio;
++ struct amdgpu_i2c_bus_rec i2c;
++ int index = GetIndexIntoMasterTable(DATA, GPIO_I2C_Info);
++ struct _ATOM_GPIO_I2C_INFO *i2c_info;
++ uint16_t data_offset, size;
++ int i, num_indices;
++ char stmp[32];
++
++ if (amdgpu_atom_parse_data_header(ctx, index, &size, NULL, NULL, &data_offset)) {
++ i2c_info = (struct _ATOM_GPIO_I2C_INFO *)(ctx->bios + data_offset);
++
++ num_indices = (size - sizeof(ATOM_COMMON_TABLE_HEADER)) /
++ sizeof(ATOM_GPIO_I2C_ASSIGMENT);
++
++ gpio = &i2c_info->asGPIO_Info[0];
++ for (i = 0; i < num_indices; i++) {
++ amdgpu_atombios_lookup_i2c_gpio_quirks(adev, gpio, i);
++
++ i2c = amdgpu_atombios_get_bus_rec_for_i2c_gpio(gpio);
++
++ if (i2c.valid) {
++ sprintf(stmp, "0x%x", i2c.i2c_id);
++ adev->i2c_bus[i] = amdgpu_i2c_create(adev->ddev, &i2c, stmp);
++ }
++ gpio = (ATOM_GPIO_I2C_ASSIGMENT *)
++ ((u8 *)gpio + sizeof(ATOM_GPIO_I2C_ASSIGMENT));
++ }
++ }
++}
++
++struct amdgpu_gpio_rec
++amdgpu_atombios_lookup_gpio(struct amdgpu_device *adev,
++ u8 id)
++{
++ struct atom_context *ctx = adev->mode_info.atom_context;
++ struct amdgpu_gpio_rec gpio;
++ int index = GetIndexIntoMasterTable(DATA, GPIO_Pin_LUT);
++ struct _ATOM_GPIO_PIN_LUT *gpio_info;
++ ATOM_GPIO_PIN_ASSIGNMENT *pin;
++ u16 data_offset, size;
++ int i, num_indices;
++
++ memset(&gpio, 0, sizeof(struct amdgpu_gpio_rec));
++ gpio.valid = false;
++
++ if (amdgpu_atom_parse_data_header(ctx, index, &size, NULL, NULL, &data_offset)) {
++ gpio_info = (struct _ATOM_GPIO_PIN_LUT *)(ctx->bios + data_offset);
++
++ num_indices = (size - sizeof(ATOM_COMMON_TABLE_HEADER)) /
++ sizeof(ATOM_GPIO_PIN_ASSIGNMENT);
++
++ pin = gpio_info->asGPIO_Pin;
++ for (i = 0; i < num_indices; i++) {
++ if (id == pin->ucGPIO_ID) {
++ gpio.id = pin->ucGPIO_ID;
++ gpio.reg = le16_to_cpu(pin->usGpioPin_AIndex);
++ gpio.shift = pin->ucGpioPinBitShift;
++ gpio.mask = (1 << pin->ucGpioPinBitShift);
++ gpio.valid = true;
++ break;
++ }
++ pin = (ATOM_GPIO_PIN_ASSIGNMENT *)
++ ((u8 *)pin + sizeof(ATOM_GPIO_PIN_ASSIGNMENT));
++ }
++ }
++
++ return gpio;
++}
++
++static struct amdgpu_hpd
++amdgpu_atombios_get_hpd_info_from_gpio(struct amdgpu_device *adev,
++ struct amdgpu_gpio_rec *gpio)
++{
++ struct amdgpu_hpd hpd;
++ u32 reg;
++
++ memset(&hpd, 0, sizeof(struct amdgpu_hpd));
++
++ reg = amdgpu_display_hpd_get_gpio_reg(adev);
++
++ hpd.gpio = *gpio;
++ if (gpio->reg == reg) {
++ switch(gpio->mask) {
++ case (1 << 0):
++ hpd.hpd = AMDGPU_HPD_1;
++ break;
++ case (1 << 8):
++ hpd.hpd = AMDGPU_HPD_2;
++ break;
++ case (1 << 16):
++ hpd.hpd = AMDGPU_HPD_3;
++ break;
++ case (1 << 24):
++ hpd.hpd = AMDGPU_HPD_4;
++ break;
++ case (1 << 26):
++ hpd.hpd = AMDGPU_HPD_5;
++ break;
++ case (1 << 28):
++ hpd.hpd = AMDGPU_HPD_6;
++ break;
++ default:
++ hpd.hpd = AMDGPU_HPD_NONE;
++ break;
++ }
++ } else
++ hpd.hpd = AMDGPU_HPD_NONE;
++ return hpd;
++}
++
++static bool amdgpu_atombios_apply_quirks(struct amdgpu_device *adev,
++ uint32_t supported_device,
++ int *connector_type,
++ struct amdgpu_i2c_bus_rec *i2c_bus,
++ uint16_t *line_mux,
++ struct amdgpu_hpd *hpd)
++{
++ return true;
++}
++
++static const int object_connector_convert[] = {
++ DRM_MODE_CONNECTOR_Unknown,
++ DRM_MODE_CONNECTOR_DVII,
++ DRM_MODE_CONNECTOR_DVII,
++ DRM_MODE_CONNECTOR_DVID,
++ DRM_MODE_CONNECTOR_DVID,
++ DRM_MODE_CONNECTOR_VGA,
++ DRM_MODE_CONNECTOR_Composite,
++ DRM_MODE_CONNECTOR_SVIDEO,
++ DRM_MODE_CONNECTOR_Unknown,
++ DRM_MODE_CONNECTOR_Unknown,
++ DRM_MODE_CONNECTOR_9PinDIN,
++ DRM_MODE_CONNECTOR_Unknown,
++ DRM_MODE_CONNECTOR_HDMIA,
++ DRM_MODE_CONNECTOR_HDMIB,
++ DRM_MODE_CONNECTOR_LVDS,
++ DRM_MODE_CONNECTOR_9PinDIN,
++ DRM_MODE_CONNECTOR_Unknown,
++ DRM_MODE_CONNECTOR_Unknown,
++ DRM_MODE_CONNECTOR_Unknown,
++ DRM_MODE_CONNECTOR_DisplayPort,
++ DRM_MODE_CONNECTOR_eDP,
++ DRM_MODE_CONNECTOR_Unknown
++};
++
++bool amdgpu_atombios_get_connector_info_from_object_table(struct amdgpu_device *adev)
++{
++ struct amdgpu_mode_info *mode_info = &adev->mode_info;
++ struct atom_context *ctx = mode_info->atom_context;
++ int index = GetIndexIntoMasterTable(DATA, Object_Header);
++ u16 size, data_offset;
++ u8 frev, crev;
++ ATOM_CONNECTOR_OBJECT_TABLE *con_obj;
++ ATOM_ENCODER_OBJECT_TABLE *enc_obj;
++ ATOM_OBJECT_TABLE *router_obj;
++ ATOM_DISPLAY_OBJECT_PATH_TABLE *path_obj;
++ ATOM_OBJECT_HEADER *obj_header;
++ int i, j, k, path_size, device_support;
++ int connector_type;
++ u16 conn_id, connector_object_id;
++ struct amdgpu_i2c_bus_rec ddc_bus;
++ struct amdgpu_router router;
++ struct amdgpu_gpio_rec gpio;
++ struct amdgpu_hpd hpd;
++
++ if (!amdgpu_atom_parse_data_header(ctx, index, &size, &frev, &crev, &data_offset))
++ return false;
++
++ if (crev < 2)
++ return false;
++
++ obj_header = (ATOM_OBJECT_HEADER *) (ctx->bios + data_offset);
++ path_obj = (ATOM_DISPLAY_OBJECT_PATH_TABLE *)
++ (ctx->bios + data_offset +
++ le16_to_cpu(obj_header->usDisplayPathTableOffset));
++ con_obj = (ATOM_CONNECTOR_OBJECT_TABLE *)
++ (ctx->bios + data_offset +
++ le16_to_cpu(obj_header->usConnectorObjectTableOffset));
++ enc_obj = (ATOM_ENCODER_OBJECT_TABLE *)
++ (ctx->bios + data_offset +
++ le16_to_cpu(obj_header->usEncoderObjectTableOffset));
++ router_obj = (ATOM_OBJECT_TABLE *)
++ (ctx->bios + data_offset +
++ le16_to_cpu(obj_header->usRouterObjectTableOffset));
++ device_support = le16_to_cpu(obj_header->usDeviceSupport);
++
++ path_size = 0;
++ for (i = 0; i < path_obj->ucNumOfDispPath; i++) {
++ uint8_t *addr = (uint8_t *) path_obj->asDispPath;
++ ATOM_DISPLAY_OBJECT_PATH *path;
++ addr += path_size;
++ path = (ATOM_DISPLAY_OBJECT_PATH *) addr;
++ path_size += le16_to_cpu(path->usSize);
++
++ if (device_support & le16_to_cpu(path->usDeviceTag)) {
++ uint8_t con_obj_id, con_obj_num, con_obj_type;
++
++ con_obj_id =
++ (le16_to_cpu(path->usConnObjectId) & OBJECT_ID_MASK)
++ >> OBJECT_ID_SHIFT;
++ con_obj_num =
++ (le16_to_cpu(path->usConnObjectId) & ENUM_ID_MASK)
++ >> ENUM_ID_SHIFT;
++ con_obj_type =
++ (le16_to_cpu(path->usConnObjectId) &
++ OBJECT_TYPE_MASK) >> OBJECT_TYPE_SHIFT;
++
++ connector_type =
++ object_connector_convert[con_obj_id];
++ connector_object_id = con_obj_id;
++
++ if (connector_type == DRM_MODE_CONNECTOR_Unknown)
++ continue;
++
++ router.ddc_valid = false;
++ router.cd_valid = false;
++ for (j = 0; j < ((le16_to_cpu(path->usSize) - 8) / 2); j++) {
++ uint8_t grph_obj_id, grph_obj_num, grph_obj_type;
++
++ grph_obj_id =
++ (le16_to_cpu(path->usGraphicObjIds[j]) &
++ OBJECT_ID_MASK) >> OBJECT_ID_SHIFT;
++ grph_obj_num =
++ (le16_to_cpu(path->usGraphicObjIds[j]) &
++ ENUM_ID_MASK) >> ENUM_ID_SHIFT;
++ grph_obj_type =
++ (le16_to_cpu(path->usGraphicObjIds[j]) &
++ OBJECT_TYPE_MASK) >> OBJECT_TYPE_SHIFT;
++
++ if (grph_obj_type == GRAPH_OBJECT_TYPE_ENCODER) {
++ for (k = 0; k < enc_obj->ucNumberOfObjects; k++) {
++ u16 encoder_obj = le16_to_cpu(enc_obj->asObjects[k].usObjectID);
++ if (le16_to_cpu(path->usGraphicObjIds[j]) == encoder_obj) {
++ ATOM_COMMON_RECORD_HEADER *record = (ATOM_COMMON_RECORD_HEADER *)
++ (ctx->bios + data_offset +
++ le16_to_cpu(enc_obj->asObjects[k].usRecordOffset));
++ ATOM_ENCODER_CAP_RECORD *cap_record;
++ u16 caps = 0;
++
++ while (record->ucRecordSize > 0 &&
++ record->ucRecordType > 0 &&
++ record->ucRecordType <= ATOM_MAX_OBJECT_RECORD_NUMBER) {
++ switch (record->ucRecordType) {
++ case ATOM_ENCODER_CAP_RECORD_TYPE:
++ cap_record =(ATOM_ENCODER_CAP_RECORD *)
++ record;
++ caps = le16_to_cpu(cap_record->usEncoderCap);
++ break;
++ }
++ record = (ATOM_COMMON_RECORD_HEADER *)
++ ((char *)record + record->ucRecordSize);
++ }
++ amdgpu_display_add_encoder(adev, encoder_obj,
++ le16_to_cpu(path->usDeviceTag),
++ caps);
++ }
++ }
++ } else if (grph_obj_type == GRAPH_OBJECT_TYPE_ROUTER) {
++ for (k = 0; k < router_obj->ucNumberOfObjects; k++) {
++ u16 router_obj_id = le16_to_cpu(router_obj->asObjects[k].usObjectID);
++ if (le16_to_cpu(path->usGraphicObjIds[j]) == router_obj_id) {
++ ATOM_COMMON_RECORD_HEADER *record = (ATOM_COMMON_RECORD_HEADER *)
++ (ctx->bios + data_offset +
++ le16_to_cpu(router_obj->asObjects[k].usRecordOffset));
++ ATOM_I2C_RECORD *i2c_record;
++ ATOM_I2C_ID_CONFIG_ACCESS *i2c_config;
++ ATOM_ROUTER_DDC_PATH_SELECT_RECORD *ddc_path;
++ ATOM_ROUTER_DATA_CLOCK_PATH_SELECT_RECORD *cd_path;
++ ATOM_SRC_DST_TABLE_FOR_ONE_OBJECT *router_src_dst_table =
++ (ATOM_SRC_DST_TABLE_FOR_ONE_OBJECT *)
++ (ctx->bios + data_offset +
++ le16_to_cpu(router_obj->asObjects[k].usSrcDstTableOffset));
++ u8 *num_dst_objs = (u8 *)
++ ((u8 *)router_src_dst_table + 1 +
++ (router_src_dst_table->ucNumberOfSrc * 2));
++ u16 *dst_objs = (u16 *)(num_dst_objs + 1);
++ int enum_id;
++
++ router.router_id = router_obj_id;
++ for (enum_id = 0; enum_id < (*num_dst_objs); enum_id++) {
++ if (le16_to_cpu(path->usConnObjectId) ==
++ le16_to_cpu(dst_objs[enum_id]))
++ break;
++ }
++
++ while (record->ucRecordSize > 0 &&
++ record->ucRecordType > 0 &&
++ record->ucRecordType <= ATOM_MAX_OBJECT_RECORD_NUMBER) {
++ switch (record->ucRecordType) {
++ case ATOM_I2C_RECORD_TYPE:
++ i2c_record =
++ (ATOM_I2C_RECORD *)
++ record;
++ i2c_config =
++ (ATOM_I2C_ID_CONFIG_ACCESS *)
++ &i2c_record->sucI2cId;
++ router.i2c_info =
++ amdgpu_atombios_lookup_i2c_gpio(adev,
++ i2c_config->
++ ucAccess);
++ router.i2c_addr = i2c_record->ucI2CAddr >> 1;
++ break;
++ case ATOM_ROUTER_DDC_PATH_SELECT_RECORD_TYPE:
++ ddc_path = (ATOM_ROUTER_DDC_PATH_SELECT_RECORD *)
++ record;
++ router.ddc_valid = true;
++ router.ddc_mux_type = ddc_path->ucMuxType;
++ router.ddc_mux_control_pin = ddc_path->ucMuxControlPin;
++ router.ddc_mux_state = ddc_path->ucMuxState[enum_id];
++ break;
++ case ATOM_ROUTER_DATA_CLOCK_PATH_SELECT_RECORD_TYPE:
++ cd_path = (ATOM_ROUTER_DATA_CLOCK_PATH_SELECT_RECORD *)
++ record;
++ router.cd_valid = true;
++ router.cd_mux_type = cd_path->ucMuxType;
++ router.cd_mux_control_pin = cd_path->ucMuxControlPin;
++ router.cd_mux_state = cd_path->ucMuxState[enum_id];
++ break;
++ }
++ record = (ATOM_COMMON_RECORD_HEADER *)
++ ((char *)record + record->ucRecordSize);
++ }
++ }
++ }
++ }
++ }
++
++ /* look up gpio for ddc, hpd */
++ ddc_bus.valid = false;
++ hpd.hpd = AMDGPU_HPD_NONE;
++ if ((le16_to_cpu(path->usDeviceTag) &
++ (ATOM_DEVICE_TV_SUPPORT | ATOM_DEVICE_CV_SUPPORT)) == 0) {
++ for (j = 0; j < con_obj->ucNumberOfObjects; j++) {
++ if (le16_to_cpu(path->usConnObjectId) ==
++ le16_to_cpu(con_obj->asObjects[j].
++ usObjectID)) {
++ ATOM_COMMON_RECORD_HEADER
++ *record =
++ (ATOM_COMMON_RECORD_HEADER
++ *)
++ (ctx->bios + data_offset +
++ le16_to_cpu(con_obj->
++ asObjects[j].
++ usRecordOffset));
++ ATOM_I2C_RECORD *i2c_record;
++ ATOM_HPD_INT_RECORD *hpd_record;
++ ATOM_I2C_ID_CONFIG_ACCESS *i2c_config;
++
++ while (record->ucRecordSize > 0 &&
++ record->ucRecordType > 0 &&
++ record->ucRecordType <= ATOM_MAX_OBJECT_RECORD_NUMBER) {
++ switch (record->ucRecordType) {
++ case ATOM_I2C_RECORD_TYPE:
++ i2c_record =
++ (ATOM_I2C_RECORD *)
++ record;
++ i2c_config =
++ (ATOM_I2C_ID_CONFIG_ACCESS *)
++ &i2c_record->sucI2cId;
++ ddc_bus = amdgpu_atombios_lookup_i2c_gpio(adev,
++ i2c_config->
++ ucAccess);
++ break;
++ case ATOM_HPD_INT_RECORD_TYPE:
++ hpd_record =
++ (ATOM_HPD_INT_RECORD *)
++ record;
++ gpio = amdgpu_atombios_lookup_gpio(adev,
++ hpd_record->ucHPDIntGPIOID);
++ hpd = amdgpu_atombios_get_hpd_info_from_gpio(adev, &gpio);
++ hpd.plugged_state = hpd_record->ucPlugged_PinState;
++ break;
++ }
++ record =
++ (ATOM_COMMON_RECORD_HEADER
++ *) ((char *)record
++ +
++ record->
++ ucRecordSize);
++ }
++ break;
++ }
++ }
++ }
++
++ /* needed for aux chan transactions */
++ ddc_bus.hpd = hpd.hpd;
++
++ conn_id = le16_to_cpu(path->usConnObjectId);
++
++ if (!amdgpu_atombios_apply_quirks
++ (adev, le16_to_cpu(path->usDeviceTag), &connector_type,
++ &ddc_bus, &conn_id, &hpd))
++ continue;
++
++ amdgpu_display_add_connector(adev,
++ conn_id,
++ le16_to_cpu(path->usDeviceTag),
++ connector_type, &ddc_bus,
++ connector_object_id,
++ &hpd,
++ &router);
++
++ }
++ }
++
++ amdgpu_link_encoder_connector(adev->ddev);
++
++ return true;
++}
++
++union firmware_info {
++ ATOM_FIRMWARE_INFO info;
++ ATOM_FIRMWARE_INFO_V1_2 info_12;
++ ATOM_FIRMWARE_INFO_V1_3 info_13;
++ ATOM_FIRMWARE_INFO_V1_4 info_14;
++ ATOM_FIRMWARE_INFO_V2_1 info_21;
++ ATOM_FIRMWARE_INFO_V2_2 info_22;
++};
++
++int amdgpu_atombios_get_clock_info(struct amdgpu_device *adev)
++{
++ struct amdgpu_mode_info *mode_info = &adev->mode_info;
++ int index = GetIndexIntoMasterTable(DATA, FirmwareInfo);
++ uint8_t frev, crev;
++ uint16_t data_offset;
++ int ret = -EINVAL;
++
++ if (amdgpu_atom_parse_data_header(mode_info->atom_context, index, NULL,
++ &frev, &crev, &data_offset)) {
++ int i;
++ struct amdgpu_pll *ppll = &adev->clock.ppll[0];
++ struct amdgpu_pll *spll = &adev->clock.spll;
++ struct amdgpu_pll *mpll = &adev->clock.mpll;
++ union firmware_info *firmware_info =
++ (union firmware_info *)(mode_info->atom_context->bios +
++ data_offset);
++ /* pixel clocks */
++ ppll->reference_freq =
++ le16_to_cpu(firmware_info->info.usReferenceClock);
++ ppll->reference_div = 0;
++
++ if (crev < 2)
++ ppll->pll_out_min =
++ le16_to_cpu(firmware_info->info.usMinPixelClockPLL_Output);
++ else
++ ppll->pll_out_min =
++ le32_to_cpu(firmware_info->info_12.ulMinPixelClockPLL_Output);
++ ppll->pll_out_max =
++ le32_to_cpu(firmware_info->info.ulMaxPixelClockPLL_Output);
++
++ if (crev >= 4) {
++ ppll->lcd_pll_out_min =
++ le16_to_cpu(firmware_info->info_14.usLcdMinPixelClockPLL_Output) * 100;
++ if (ppll->lcd_pll_out_min == 0)
++ ppll->lcd_pll_out_min = ppll->pll_out_min;
++ ppll->lcd_pll_out_max =
++ le16_to_cpu(firmware_info->info_14.usLcdMaxPixelClockPLL_Output) * 100;
++ if (ppll->lcd_pll_out_max == 0)
++ ppll->lcd_pll_out_max = ppll->pll_out_max;
++ } else {
++ ppll->lcd_pll_out_min = ppll->pll_out_min;
++ ppll->lcd_pll_out_max = ppll->pll_out_max;
++ }
++
++ if (ppll->pll_out_min == 0)
++ ppll->pll_out_min = 64800;
++
++ ppll->pll_in_min =
++ le16_to_cpu(firmware_info->info.usMinPixelClockPLL_Input);
++ ppll->pll_in_max =
++ le16_to_cpu(firmware_info->info.usMaxPixelClockPLL_Input);
++
++ ppll->min_post_div = 2;
++ ppll->max_post_div = 0x7f;
++ ppll->min_frac_feedback_div = 0;
++ ppll->max_frac_feedback_div = 9;
++ ppll->min_ref_div = 2;
++ ppll->max_ref_div = 0x3ff;
++ ppll->min_feedback_div = 4;
++ ppll->max_feedback_div = 0xfff;
++ ppll->best_vco = 0;
++
++ for (i = 1; i < AMDGPU_MAX_PPLL; i++)
++ adev->clock.ppll[i] = *ppll;
++
++ /* system clock */
++ spll->reference_freq =
++ le16_to_cpu(firmware_info->info_21.usCoreReferenceClock);
++ spll->reference_div = 0;
++
++ spll->pll_out_min =
++ le16_to_cpu(firmware_info->info.usMinEngineClockPLL_Output);
++ spll->pll_out_max =
++ le32_to_cpu(firmware_info->info.ulMaxEngineClockPLL_Output);
++
++ /* ??? */
++ if (spll->pll_out_min == 0)
++ spll->pll_out_min = 64800;
++
++ spll->pll_in_min =
++ le16_to_cpu(firmware_info->info.usMinEngineClockPLL_Input);
++ spll->pll_in_max =
++ le16_to_cpu(firmware_info->info.usMaxEngineClockPLL_Input);
++
++ spll->min_post_div = 1;
++ spll->max_post_div = 1;
++ spll->min_ref_div = 2;
++ spll->max_ref_div = 0xff;
++ spll->min_feedback_div = 4;
++ spll->max_feedback_div = 0xff;
++ spll->best_vco = 0;
++
++ /* memory clock */
++ mpll->reference_freq =
++ le16_to_cpu(firmware_info->info_21.usMemoryReferenceClock);
++ mpll->reference_div = 0;
++
++ mpll->pll_out_min =
++ le16_to_cpu(firmware_info->info.usMinMemoryClockPLL_Output);
++ mpll->pll_out_max =
++ le32_to_cpu(firmware_info->info.ulMaxMemoryClockPLL_Output);
++
++ /* ??? */
++ if (mpll->pll_out_min == 0)
++ mpll->pll_out_min = 64800;
++
++ mpll->pll_in_min =
++ le16_to_cpu(firmware_info->info.usMinMemoryClockPLL_Input);
++ mpll->pll_in_max =
++ le16_to_cpu(firmware_info->info.usMaxMemoryClockPLL_Input);
++
++ adev->clock.default_sclk =
++ le32_to_cpu(firmware_info->info.ulDefaultEngineClock);
++ adev->clock.default_mclk =
++ le32_to_cpu(firmware_info->info.ulDefaultMemoryClock);
++
++ mpll->min_post_div = 1;
++ mpll->max_post_div = 1;
++ mpll->min_ref_div = 2;
++ mpll->max_ref_div = 0xff;
++ mpll->min_feedback_div = 4;
++ mpll->max_feedback_div = 0xff;
++ mpll->best_vco = 0;
++
++ /* disp clock */
++ adev->clock.default_dispclk =
++ le32_to_cpu(firmware_info->info_21.ulDefaultDispEngineClkFreq);
++ if (adev->clock.default_dispclk == 0)
++ adev->clock.default_dispclk = 54000; /* 540 Mhz */
++ adev->clock.dp_extclk =
++ le16_to_cpu(firmware_info->info_21.usUniphyDPModeExtClkFreq);
++ adev->clock.current_dispclk = adev->clock.default_dispclk;
++
++ adev->clock.max_pixel_clock = le16_to_cpu(firmware_info->info.usMaxPixelClock);
++ if (adev->clock.max_pixel_clock == 0)
++ adev->clock.max_pixel_clock = 40000;
++
++ /* not technically a clock, but... */
++ adev->mode_info.firmware_flags =
++ le16_to_cpu(firmware_info->info.usFirmwareCapability.susAccess);
++
++ ret = 0;
++ }
++
++ adev->pm.current_sclk = adev->clock.default_sclk;
++ adev->pm.current_mclk = adev->clock.default_mclk;
++
++ return ret;
++}
++
++union igp_info {
++ struct _ATOM_INTEGRATED_SYSTEM_INFO info;
++ struct _ATOM_INTEGRATED_SYSTEM_INFO_V2 info_2;
++ struct _ATOM_INTEGRATED_SYSTEM_INFO_V6 info_6;
++ struct _ATOM_INTEGRATED_SYSTEM_INFO_V1_7 info_7;
++ struct _ATOM_INTEGRATED_SYSTEM_INFO_V1_8 info_8;
++ struct _ATOM_INTEGRATED_SYSTEM_INFO_V1_9 info_9;
++};
++
++static void amdgpu_atombios_get_igp_ss_overrides(struct amdgpu_device *adev,
++ struct amdgpu_atom_ss *ss,
++ int id)
++{
++ struct amdgpu_mode_info *mode_info = &adev->mode_info;
++ int index = GetIndexIntoMasterTable(DATA, IntegratedSystemInfo);
++ u16 data_offset, size;
++ union igp_info *igp_info;
++ u8 frev, crev;
++ u16 percentage = 0, rate = 0;
++
++ /* get any igp specific overrides */
++ if (amdgpu_atom_parse_data_header(mode_info->atom_context, index, &size,
++ &frev, &crev, &data_offset)) {
++ igp_info = (union igp_info *)
++ (mode_info->atom_context->bios + data_offset);
++ switch (crev) {
++ case 6:
++ switch (id) {
++ case ASIC_INTERNAL_SS_ON_TMDS:
++ percentage = le16_to_cpu(igp_info->info_6.usDVISSPercentage);
++ rate = le16_to_cpu(igp_info->info_6.usDVISSpreadRateIn10Hz);
++ break;
++ case ASIC_INTERNAL_SS_ON_HDMI:
++ percentage = le16_to_cpu(igp_info->info_6.usHDMISSPercentage);
++ rate = le16_to_cpu(igp_info->info_6.usHDMISSpreadRateIn10Hz);
++ break;
++ case ASIC_INTERNAL_SS_ON_LVDS:
++ percentage = le16_to_cpu(igp_info->info_6.usLvdsSSPercentage);
++ rate = le16_to_cpu(igp_info->info_6.usLvdsSSpreadRateIn10Hz);
++ break;
++ }
++ break;
++ case 7:
++ switch (id) {
++ case ASIC_INTERNAL_SS_ON_TMDS:
++ percentage = le16_to_cpu(igp_info->info_7.usDVISSPercentage);
++ rate = le16_to_cpu(igp_info->info_7.usDVISSpreadRateIn10Hz);
++ break;
++ case ASIC_INTERNAL_SS_ON_HDMI:
++ percentage = le16_to_cpu(igp_info->info_7.usHDMISSPercentage);
++ rate = le16_to_cpu(igp_info->info_7.usHDMISSpreadRateIn10Hz);
++ break;
++ case ASIC_INTERNAL_SS_ON_LVDS:
++ percentage = le16_to_cpu(igp_info->info_7.usLvdsSSPercentage);
++ rate = le16_to_cpu(igp_info->info_7.usLvdsSSpreadRateIn10Hz);
++ break;
++ }
++ break;
++ case 8:
++ switch (id) {
++ case ASIC_INTERNAL_SS_ON_TMDS:
++ percentage = le16_to_cpu(igp_info->info_8.usDVISSPercentage);
++ rate = le16_to_cpu(igp_info->info_8.usDVISSpreadRateIn10Hz);
++ break;
++ case ASIC_INTERNAL_SS_ON_HDMI:
++ percentage = le16_to_cpu(igp_info->info_8.usHDMISSPercentage);
++ rate = le16_to_cpu(igp_info->info_8.usHDMISSpreadRateIn10Hz);
++ break;
++ case ASIC_INTERNAL_SS_ON_LVDS:
++ percentage = le16_to_cpu(igp_info->info_8.usLvdsSSPercentage);
++ rate = le16_to_cpu(igp_info->info_8.usLvdsSSpreadRateIn10Hz);
++ break;
++ }
++ break;
++ case 9:
++ switch (id) {
++ case ASIC_INTERNAL_SS_ON_TMDS:
++ percentage = le16_to_cpu(igp_info->info_9.usDVISSPercentage);
++ rate = le16_to_cpu(igp_info->info_9.usDVISSpreadRateIn10Hz);
++ break;
++ case ASIC_INTERNAL_SS_ON_HDMI:
++ percentage = le16_to_cpu(igp_info->info_9.usHDMISSPercentage);
++ rate = le16_to_cpu(igp_info->info_9.usHDMISSpreadRateIn10Hz);
++ break;
++ case ASIC_INTERNAL_SS_ON_LVDS:
++ percentage = le16_to_cpu(igp_info->info_9.usLvdsSSPercentage);
++ rate = le16_to_cpu(igp_info->info_9.usLvdsSSpreadRateIn10Hz);
++ break;
++ }
++ break;
++ default:
++ DRM_ERROR("Unsupported IGP table: %d %d\n", frev, crev);
++ break;
++ }
++ if (percentage)
++ ss->percentage = percentage;
++ if (rate)
++ ss->rate = rate;
++ }
++}
++
++union asic_ss_info {
++ struct _ATOM_ASIC_INTERNAL_SS_INFO info;
++ struct _ATOM_ASIC_INTERNAL_SS_INFO_V2 info_2;
++ struct _ATOM_ASIC_INTERNAL_SS_INFO_V3 info_3;
++};
++
++union asic_ss_assignment {
++ struct _ATOM_ASIC_SS_ASSIGNMENT v1;
++ struct _ATOM_ASIC_SS_ASSIGNMENT_V2 v2;
++ struct _ATOM_ASIC_SS_ASSIGNMENT_V3 v3;
++};
++
++bool amdgpu_atombios_get_asic_ss_info(struct amdgpu_device *adev,
++ struct amdgpu_atom_ss *ss,
++ int id, u32 clock)
++{
++ struct amdgpu_mode_info *mode_info = &adev->mode_info;
++ int index = GetIndexIntoMasterTable(DATA, ASIC_InternalSS_Info);
++ uint16_t data_offset, size;
++ union asic_ss_info *ss_info;
++ union asic_ss_assignment *ss_assign;
++ uint8_t frev, crev;
++ int i, num_indices;
++
++ if (id == ASIC_INTERNAL_MEMORY_SS) {
++ if (!(adev->mode_info.firmware_flags & ATOM_BIOS_INFO_MEMORY_CLOCK_SS_SUPPORT))
++ return false;
++ }
++ if (id == ASIC_INTERNAL_ENGINE_SS) {
++ if (!(adev->mode_info.firmware_flags & ATOM_BIOS_INFO_ENGINE_CLOCK_SS_SUPPORT))
++ return false;
++ }
++
++ memset(ss, 0, sizeof(struct amdgpu_atom_ss));
++ if (amdgpu_atom_parse_data_header(mode_info->atom_context, index, &size,
++ &frev, &crev, &data_offset)) {
++
++ ss_info =
++ (union asic_ss_info *)(mode_info->atom_context->bios + data_offset);
++
++ switch (frev) {
++ case 1:
++ num_indices = (size - sizeof(ATOM_COMMON_TABLE_HEADER)) /
++ sizeof(ATOM_ASIC_SS_ASSIGNMENT);
++
++ ss_assign = (union asic_ss_assignment *)((u8 *)&ss_info->info.asSpreadSpectrum[0]);
++ for (i = 0; i < num_indices; i++) {
++ if ((ss_assign->v1.ucClockIndication == id) &&
++ (clock <= le32_to_cpu(ss_assign->v1.ulTargetClockRange))) {
++ ss->percentage =
++ le16_to_cpu(ss_assign->v1.usSpreadSpectrumPercentage);
++ ss->type = ss_assign->v1.ucSpreadSpectrumMode;
++ ss->rate = le16_to_cpu(ss_assign->v1.usSpreadRateInKhz);
++ ss->percentage_divider = 100;
++ return true;
++ }
++ ss_assign = (union asic_ss_assignment *)
++ ((u8 *)ss_assign + sizeof(ATOM_ASIC_SS_ASSIGNMENT));
++ }
++ break;
++ case 2:
++ num_indices = (size - sizeof(ATOM_COMMON_TABLE_HEADER)) /
++ sizeof(ATOM_ASIC_SS_ASSIGNMENT_V2);
++ ss_assign = (union asic_ss_assignment *)((u8 *)&ss_info->info_2.asSpreadSpectrum[0]);
++ for (i = 0; i < num_indices; i++) {
++ if ((ss_assign->v2.ucClockIndication == id) &&
++ (clock <= le32_to_cpu(ss_assign->v2.ulTargetClockRange))) {
++ ss->percentage =
++ le16_to_cpu(ss_assign->v2.usSpreadSpectrumPercentage);
++ ss->type = ss_assign->v2.ucSpreadSpectrumMode;
++ ss->rate = le16_to_cpu(ss_assign->v2.usSpreadRateIn10Hz);
++ ss->percentage_divider = 100;
++ if ((crev == 2) &&
++ ((id == ASIC_INTERNAL_ENGINE_SS) ||
++ (id == ASIC_INTERNAL_MEMORY_SS)))
++ ss->rate /= 100;
++ return true;
++ }
++ ss_assign = (union asic_ss_assignment *)
++ ((u8 *)ss_assign + sizeof(ATOM_ASIC_SS_ASSIGNMENT_V2));
++ }
++ break;
++ case 3:
++ num_indices = (size - sizeof(ATOM_COMMON_TABLE_HEADER)) /
++ sizeof(ATOM_ASIC_SS_ASSIGNMENT_V3);
++ ss_assign = (union asic_ss_assignment *)((u8 *)&ss_info->info_3.asSpreadSpectrum[0]);
++ for (i = 0; i < num_indices; i++) {
++ if ((ss_assign->v3.ucClockIndication == id) &&
++ (clock <= le32_to_cpu(ss_assign->v3.ulTargetClockRange))) {
++ ss->percentage =
++ le16_to_cpu(ss_assign->v3.usSpreadSpectrumPercentage);
++ ss->type = ss_assign->v3.ucSpreadSpectrumMode;
++ ss->rate = le16_to_cpu(ss_assign->v3.usSpreadRateIn10Hz);
++ if (ss_assign->v3.ucSpreadSpectrumMode &
++ SS_MODE_V3_PERCENTAGE_DIV_BY_1000_MASK)
++ ss->percentage_divider = 1000;
++ else
++ ss->percentage_divider = 100;
++ if ((id == ASIC_INTERNAL_ENGINE_SS) ||
++ (id == ASIC_INTERNAL_MEMORY_SS))
++ ss->rate /= 100;
++ if (adev->flags & AMDGPU_IS_APU)
++ amdgpu_atombios_get_igp_ss_overrides(adev, ss, id);
++ return true;
++ }
++ ss_assign = (union asic_ss_assignment *)
++ ((u8 *)ss_assign + sizeof(ATOM_ASIC_SS_ASSIGNMENT_V3));
++ }
++ break;
++ default:
++ DRM_ERROR("Unsupported ASIC_InternalSS_Info table: %d %d\n", frev, crev);
++ break;
++ }
++
++ }
++ return false;
++}
++
++union get_clock_dividers {
++ struct _COMPUTE_MEMORY_ENGINE_PLL_PARAMETERS v1;
++ struct _COMPUTE_MEMORY_ENGINE_PLL_PARAMETERS_V2 v2;
++ struct _COMPUTE_MEMORY_ENGINE_PLL_PARAMETERS_V3 v3;
++ struct _COMPUTE_MEMORY_ENGINE_PLL_PARAMETERS_V4 v4;
++ struct _COMPUTE_MEMORY_ENGINE_PLL_PARAMETERS_V5 v5;
++ struct _COMPUTE_GPU_CLOCK_INPUT_PARAMETERS_V1_6 v6_in;
++ struct _COMPUTE_GPU_CLOCK_OUTPUT_PARAMETERS_V1_6 v6_out;
++};
++
++int amdgpu_atombios_get_clock_dividers(struct amdgpu_device *adev,
++ u8 clock_type,
++ u32 clock,
++ bool strobe_mode,
++ struct atom_clock_dividers *dividers)
++{
++ union get_clock_dividers args;
++ int index = GetIndexIntoMasterTable(COMMAND, ComputeMemoryEnginePLL);
++ u8 frev, crev;
++
++ memset(&args, 0, sizeof(args));
++ memset(dividers, 0, sizeof(struct atom_clock_dividers));
++
++ if (!amdgpu_atom_parse_cmd_header(adev->mode_info.atom_context, index, &frev, &crev))
++ return -EINVAL;
++
++ switch (crev) {
++ case 4:
++ /* fusion */
++ args.v4.ulClock = cpu_to_le32(clock); /* 10 khz */
++
++ amdgpu_atom_execute_table(adev->mode_info.atom_context, index, (uint32_t *)&args);
++
++ dividers->post_divider = dividers->post_div = args.v4.ucPostDiv;
++ dividers->real_clock = le32_to_cpu(args.v4.ulClock);
++ break;
++ case 6:
++ /* CI */
++ /* COMPUTE_GPUCLK_INPUT_FLAG_DEFAULT_GPUCLK, COMPUTE_GPUCLK_INPUT_FLAG_SCLK */
++ args.v6_in.ulClock.ulComputeClockFlag = clock_type;
++ args.v6_in.ulClock.ulClockFreq = cpu_to_le32(clock); /* 10 khz */
++
++ amdgpu_atom_execute_table(adev->mode_info.atom_context, index, (uint32_t *)&args);
++
++ dividers->whole_fb_div = le16_to_cpu(args.v6_out.ulFbDiv.usFbDiv);
++ dividers->frac_fb_div = le16_to_cpu(args.v6_out.ulFbDiv.usFbDivFrac);
++ dividers->ref_div = args.v6_out.ucPllRefDiv;
++ dividers->post_div = args.v6_out.ucPllPostDiv;
++ dividers->flags = args.v6_out.ucPllCntlFlag;
++ dividers->real_clock = le32_to_cpu(args.v6_out.ulClock.ulClock);
++ dividers->post_divider = args.v6_out.ulClock.ucPostDiv;
++ break;
++ default:
++ return -EINVAL;
++ }
++ return 0;
++}
++
++int amdgpu_atombios_get_memory_pll_dividers(struct amdgpu_device *adev,
++ u32 clock,
++ bool strobe_mode,
++ struct atom_mpll_param *mpll_param)
++{
++ COMPUTE_MEMORY_CLOCK_PARAM_PARAMETERS_V2_1 args;
++ int index = GetIndexIntoMasterTable(COMMAND, ComputeMemoryClockParam);
++ u8 frev, crev;
++
++ memset(&args, 0, sizeof(args));
++ memset(mpll_param, 0, sizeof(struct atom_mpll_param));
++
++ if (!amdgpu_atom_parse_cmd_header(adev->mode_info.atom_context, index, &frev, &crev))
++ return -EINVAL;
++
++ switch (frev) {
++ case 2:
++ switch (crev) {
++ case 1:
++ /* SI */
++ args.ulClock = cpu_to_le32(clock); /* 10 khz */
++ args.ucInputFlag = 0;
++ if (strobe_mode)
++ args.ucInputFlag |= MPLL_INPUT_FLAG_STROBE_MODE_EN;
++
++ amdgpu_atom_execute_table(adev->mode_info.atom_context, index, (uint32_t *)&args);
++
++ mpll_param->clkfrac = le16_to_cpu(args.ulFbDiv.usFbDivFrac);
++ mpll_param->clkf = le16_to_cpu(args.ulFbDiv.usFbDiv);
++ mpll_param->post_div = args.ucPostDiv;
++ mpll_param->dll_speed = args.ucDllSpeed;
++ mpll_param->bwcntl = args.ucBWCntl;
++ mpll_param->vco_mode =
++ (args.ucPllCntlFlag & MPLL_CNTL_FLAG_VCO_MODE_MASK);
++ mpll_param->yclk_sel =
++ (args.ucPllCntlFlag & MPLL_CNTL_FLAG_BYPASS_DQ_PLL) ? 1 : 0;
++ mpll_param->qdr =
++ (args.ucPllCntlFlag & MPLL_CNTL_FLAG_QDR_ENABLE) ? 1 : 0;
++ mpll_param->half_rate =
++ (args.ucPllCntlFlag & MPLL_CNTL_FLAG_AD_HALF_RATE) ? 1 : 0;
++ break;
++ default:
++ return -EINVAL;
++ }
++ break;
++ default:
++ return -EINVAL;
++ }
++ return 0;
++}
++
++uint32_t amdgpu_atombios_get_engine_clock(struct amdgpu_device *adev)
++{
++ GET_ENGINE_CLOCK_PS_ALLOCATION args;
++ int index = GetIndexIntoMasterTable(COMMAND, GetEngineClock);
++
++ amdgpu_atom_execute_table(adev->mode_info.atom_context, index, (uint32_t *)&args);
++ return le32_to_cpu(args.ulReturnEngineClock);
++}
++
++uint32_t amdgpu_atombios_get_memory_clock(struct amdgpu_device *adev)
++{
++ GET_MEMORY_CLOCK_PS_ALLOCATION args;
++ int index = GetIndexIntoMasterTable(COMMAND, GetMemoryClock);
++
++ amdgpu_atom_execute_table(adev->mode_info.atom_context, index, (uint32_t *)&args);
++ return le32_to_cpu(args.ulReturnMemoryClock);
++}
++
++void amdgpu_atombios_set_engine_clock(struct amdgpu_device *adev,
++ uint32_t eng_clock)
++{
++ SET_ENGINE_CLOCK_PS_ALLOCATION args;
++ int index = GetIndexIntoMasterTable(COMMAND, SetEngineClock);
++
++ args.ulTargetEngineClock = cpu_to_le32(eng_clock); /* 10 khz */
++
++ amdgpu_atom_execute_table(adev->mode_info.atom_context, index, (uint32_t *)&args);
++}
++
++void amdgpu_atombios_set_memory_clock(struct amdgpu_device *adev,
++ uint32_t mem_clock)
++{
++ SET_MEMORY_CLOCK_PS_ALLOCATION args;
++ int index = GetIndexIntoMasterTable(COMMAND, SetMemoryClock);
++
++ if (adev->flags & AMDGPU_IS_APU)
++ return;
++
++ args.ulTargetMemoryClock = cpu_to_le32(mem_clock); /* 10 khz */
++
++ amdgpu_atom_execute_table(adev->mode_info.atom_context, index, (uint32_t *)&args);
++}
++
++void amdgpu_atombios_set_engine_dram_timings(struct amdgpu_device *adev,
++ u32 eng_clock, u32 mem_clock)
++{
++ SET_ENGINE_CLOCK_PS_ALLOCATION args;
++ int index = GetIndexIntoMasterTable(COMMAND, DynamicMemorySettings);
++ u32 tmp;
++
++ memset(&args, 0, sizeof(args));
++
++ tmp = eng_clock & SET_CLOCK_FREQ_MASK;
++ tmp |= (COMPUTE_ENGINE_PLL_PARAM << 24);
++
++ args.ulTargetEngineClock = cpu_to_le32(tmp);
++ if (mem_clock)
++ args.sReserved.ulClock = cpu_to_le32(mem_clock & SET_CLOCK_FREQ_MASK);
++
++ amdgpu_atom_execute_table(adev->mode_info.atom_context, index, (uint32_t *)&args);
++}
++
++union set_voltage {
++ struct _SET_VOLTAGE_PS_ALLOCATION alloc;
++ struct _SET_VOLTAGE_PARAMETERS v1;
++ struct _SET_VOLTAGE_PARAMETERS_V2 v2;
++ struct _SET_VOLTAGE_PARAMETERS_V1_3 v3;
++};
++
++void amdgpu_atombios_set_voltage(struct amdgpu_device *adev,
++ u16 voltage_level,
++ u8 voltage_type)
++{
++ union set_voltage args;
++ int index = GetIndexIntoMasterTable(COMMAND, SetVoltage);
++ u8 frev, crev, volt_index = voltage_level;
++
++ if (!amdgpu_atom_parse_cmd_header(adev->mode_info.atom_context, index, &frev, &crev))
++ return;
++
++ /* 0xff01 is a flag rather then an actual voltage */
++ if (voltage_level == 0xff01)
++ return;
++
++ switch (crev) {
++ case 1:
++ args.v1.ucVoltageType = voltage_type;
++ args.v1.ucVoltageMode = SET_ASIC_VOLTAGE_MODE_ALL_SOURCE;
++ args.v1.ucVoltageIndex = volt_index;
++ break;
++ case 2:
++ args.v2.ucVoltageType = voltage_type;
++ args.v2.ucVoltageMode = SET_ASIC_VOLTAGE_MODE_SET_VOLTAGE;
++ args.v2.usVoltageLevel = cpu_to_le16(voltage_level);
++ break;
++ case 3:
++ args.v3.ucVoltageType = voltage_type;
++ args.v3.ucVoltageMode = ATOM_SET_VOLTAGE;
++ args.v3.usVoltageLevel = cpu_to_le16(voltage_level);
++ break;
++ default:
++ DRM_ERROR("Unknown table version %d, %d\n", frev, crev);
++ return;
++ }
++
++ amdgpu_atom_execute_table(adev->mode_info.atom_context, index, (uint32_t *)&args);
++}
++
++int amdgpu_atombios_get_leakage_id_from_vbios(struct amdgpu_device *adev,
++ u16 *leakage_id)
++{
++ union set_voltage args;
++ int index = GetIndexIntoMasterTable(COMMAND, SetVoltage);
++ u8 frev, crev;
++
++ if (!amdgpu_atom_parse_cmd_header(adev->mode_info.atom_context, index, &frev, &crev))
++ return -EINVAL;
++
++ switch (crev) {
++ case 3:
++ case 4:
++ args.v3.ucVoltageType = 0;
++ args.v3.ucVoltageMode = ATOM_GET_LEAKAGE_ID;
++ args.v3.usVoltageLevel = 0;
++
++ amdgpu_atom_execute_table(adev->mode_info.atom_context, index, (uint32_t *)&args);
++
++ *leakage_id = le16_to_cpu(args.v3.usVoltageLevel);
++ break;
++ default:
++ DRM_ERROR("Unknown table version %d, %d\n", frev, crev);
++ return -EINVAL;
++ }
++
++ return 0;
++}
++
++int amdgpu_atombios_get_leakage_vddc_based_on_leakage_params(struct amdgpu_device *adev,
++ u16 *vddc, u16 *vddci,
++ u16 virtual_voltage_id,
++ u16 vbios_voltage_id)
++{
++ int index = GetIndexIntoMasterTable(DATA, ASIC_ProfilingInfo);
++ u8 frev, crev;
++ u16 data_offset, size;
++ int i, j;
++ ATOM_ASIC_PROFILING_INFO_V2_1 *profile;
++ u16 *leakage_bin, *vddc_id_buf, *vddc_buf, *vddci_id_buf, *vddci_buf;
++
++ *vddc = 0;
++ *vddci = 0;
++
++ if (!amdgpu_atom_parse_data_header(adev->mode_info.atom_context, index, &size,
++ &frev, &crev, &data_offset))
++ return -EINVAL;
++
++ profile = (ATOM_ASIC_PROFILING_INFO_V2_1 *)
++ (adev->mode_info.atom_context->bios + data_offset);
++
++ switch (frev) {
++ case 1:
++ return -EINVAL;
++ case 2:
++ switch (crev) {
++ case 1:
++ if (size < sizeof(ATOM_ASIC_PROFILING_INFO_V2_1))
++ return -EINVAL;
++ leakage_bin = (u16 *)
++ (adev->mode_info.atom_context->bios + data_offset +
++ le16_to_cpu(profile->usLeakageBinArrayOffset));
++ vddc_id_buf = (u16 *)
++ (adev->mode_info.atom_context->bios + data_offset +
++ le16_to_cpu(profile->usElbVDDC_IdArrayOffset));
++ vddc_buf = (u16 *)
++ (adev->mode_info.atom_context->bios + data_offset +
++ le16_to_cpu(profile->usElbVDDC_LevelArrayOffset));
++ vddci_id_buf = (u16 *)
++ (adev->mode_info.atom_context->bios + data_offset +
++ le16_to_cpu(profile->usElbVDDCI_IdArrayOffset));
++ vddci_buf = (u16 *)
++ (adev->mode_info.atom_context->bios + data_offset +
++ le16_to_cpu(profile->usElbVDDCI_LevelArrayOffset));
++
++ if (profile->ucElbVDDC_Num > 0) {
++ for (i = 0; i < profile->ucElbVDDC_Num; i++) {
++ if (vddc_id_buf[i] == virtual_voltage_id) {
++ for (j = 0; j < profile->ucLeakageBinNum; j++) {
++ if (vbios_voltage_id <= leakage_bin[j]) {
++ *vddc = vddc_buf[j * profile->ucElbVDDC_Num + i];
++ break;
++ }
++ }
++ break;
++ }
++ }
++ }
++ if (profile->ucElbVDDCI_Num > 0) {
++ for (i = 0; i < profile->ucElbVDDCI_Num; i++) {
++ if (vddci_id_buf[i] == virtual_voltage_id) {
++ for (j = 0; j < profile->ucLeakageBinNum; j++) {
++ if (vbios_voltage_id <= leakage_bin[j]) {
++ *vddci = vddci_buf[j * profile->ucElbVDDCI_Num + i];
++ break;
++ }
++ }
++ break;
++ }
++ }
++ }
++ break;
++ default:
++ DRM_ERROR("Unknown table version %d, %d\n", frev, crev);
++ return -EINVAL;
++ }
++ break;
++ default:
++ DRM_ERROR("Unknown table version %d, %d\n", frev, crev);
++ return -EINVAL;
++ }
++
++ return 0;
++}
++
++union get_voltage_info {
++ struct _GET_VOLTAGE_INFO_INPUT_PARAMETER_V1_2 in;
++ struct _GET_EVV_VOLTAGE_INFO_OUTPUT_PARAMETER_V1_2 evv_out;
++};
++
++int amdgpu_atombios_get_voltage_evv(struct amdgpu_device *adev,
++ u16 virtual_voltage_id,
++ u16 *voltage)
++{
++ int index = GetIndexIntoMasterTable(COMMAND, GetVoltageInfo);
++ u32 entry_id;
++ u32 count = adev->pm.dpm.dyn_state.vddc_dependency_on_sclk.count;
++ union get_voltage_info args;
++
++ for (entry_id = 0; entry_id < count; entry_id++) {
++ if (adev->pm.dpm.dyn_state.vddc_dependency_on_sclk.entries[entry_id].v ==
++ virtual_voltage_id)
++ break;
++ }
++
++ if (entry_id >= count)
++ return -EINVAL;
++
++ args.in.ucVoltageType = VOLTAGE_TYPE_VDDC;
++ args.in.ucVoltageMode = ATOM_GET_VOLTAGE_EVV_VOLTAGE;
++ args.in.usVoltageLevel = cpu_to_le16(virtual_voltage_id);
++ args.in.ulSCLKFreq =
++ cpu_to_le32(adev->pm.dpm.dyn_state.vddc_dependency_on_sclk.entries[entry_id].clk);
++
++ amdgpu_atom_execute_table(adev->mode_info.atom_context, index, (uint32_t *)&args);
++
++ *voltage = le16_to_cpu(args.evv_out.usVoltageLevel);
++
++ return 0;
++}
++
++union voltage_object_info {
++ struct _ATOM_VOLTAGE_OBJECT_INFO v1;
++ struct _ATOM_VOLTAGE_OBJECT_INFO_V2 v2;
++ struct _ATOM_VOLTAGE_OBJECT_INFO_V3_1 v3;
++};
++
++union voltage_object {
++ struct _ATOM_VOLTAGE_OBJECT v1;
++ struct _ATOM_VOLTAGE_OBJECT_V2 v2;
++ union _ATOM_VOLTAGE_OBJECT_V3 v3;
++};
++
++
++static ATOM_VOLTAGE_OBJECT_V3 *amdgpu_atombios_lookup_voltage_object_v3(ATOM_VOLTAGE_OBJECT_INFO_V3_1 *v3,
++ u8 voltage_type, u8 voltage_mode)
++{
++ u32 size = le16_to_cpu(v3->sHeader.usStructureSize);
++ u32 offset = offsetof(ATOM_VOLTAGE_OBJECT_INFO_V3_1, asVoltageObj[0]);
++ u8 *start = (u8*)v3;
++
++ while (offset < size) {
++ ATOM_VOLTAGE_OBJECT_V3 *vo = (ATOM_VOLTAGE_OBJECT_V3 *)(start + offset);
++ if ((vo->asGpioVoltageObj.sHeader.ucVoltageType == voltage_type) &&
++ (vo->asGpioVoltageObj.sHeader.ucVoltageMode == voltage_mode))
++ return vo;
++ offset += le16_to_cpu(vo->asGpioVoltageObj.sHeader.usSize);
++ }
++ return NULL;
++}
++
++bool
++amdgpu_atombios_is_voltage_gpio(struct amdgpu_device *adev,
++ u8 voltage_type, u8 voltage_mode)
++{
++ int index = GetIndexIntoMasterTable(DATA, VoltageObjectInfo);
++ u8 frev, crev;
++ u16 data_offset, size;
++ union voltage_object_info *voltage_info;
++
++ if (amdgpu_atom_parse_data_header(adev->mode_info.atom_context, index, &size,
++ &frev, &crev, &data_offset)) {
++ voltage_info = (union voltage_object_info *)
++ (adev->mode_info.atom_context->bios + data_offset);
++
++ switch (frev) {
++ case 3:
++ switch (crev) {
++ case 1:
++ if (amdgpu_atombios_lookup_voltage_object_v3(&voltage_info->v3,
++ voltage_type, voltage_mode))
++ return true;
++ break;
++ default:
++ DRM_ERROR("unknown voltage object table\n");
++ return false;
++ }
++ break;
++ default:
++ DRM_ERROR("unknown voltage object table\n");
++ return false;
++ }
++
++ }
++ return false;
++}
++
++int amdgpu_atombios_get_voltage_table(struct amdgpu_device *adev,
++ u8 voltage_type, u8 voltage_mode,
++ struct atom_voltage_table *voltage_table)
++{
++ int index = GetIndexIntoMasterTable(DATA, VoltageObjectInfo);
++ u8 frev, crev;
++ u16 data_offset, size;
++ int i;
++ union voltage_object_info *voltage_info;
++ union voltage_object *voltage_object = NULL;
++
++ if (amdgpu_atom_parse_data_header(adev->mode_info.atom_context, index, &size,
++ &frev, &crev, &data_offset)) {
++ voltage_info = (union voltage_object_info *)
++ (adev->mode_info.atom_context->bios + data_offset);
++
++ switch (frev) {
++ case 3:
++ switch (crev) {
++ case 1:
++ voltage_object = (union voltage_object *)
++ amdgpu_atombios_lookup_voltage_object_v3(&voltage_info->v3,
++ voltage_type, voltage_mode);
++ if (voltage_object) {
++ ATOM_GPIO_VOLTAGE_OBJECT_V3 *gpio =
++ &voltage_object->v3.asGpioVoltageObj;
++ VOLTAGE_LUT_ENTRY_V2 *lut;
++ if (gpio->ucGpioEntryNum > MAX_VOLTAGE_ENTRIES)
++ return -EINVAL;
++ lut = &gpio->asVolGpioLut[0];
++ for (i = 0; i < gpio->ucGpioEntryNum; i++) {
++ voltage_table->entries[i].value =
++ le16_to_cpu(lut->usVoltageValue);
++ voltage_table->entries[i].smio_low =
++ le32_to_cpu(lut->ulVoltageId);
++ lut = (VOLTAGE_LUT_ENTRY_V2 *)
++ ((u8 *)lut + sizeof(VOLTAGE_LUT_ENTRY_V2));
++ }
++ voltage_table->mask_low = le32_to_cpu(gpio->ulGpioMaskVal);
++ voltage_table->count = gpio->ucGpioEntryNum;
++ voltage_table->phase_delay = gpio->ucPhaseDelay;
++ return 0;
++ }
++ break;
++ default:
++ DRM_ERROR("unknown voltage object table\n");
++ return -EINVAL;
++ }
++ break;
++ default:
++ DRM_ERROR("unknown voltage object table\n");
++ return -EINVAL;
++ }
++ }
++ return -EINVAL;
++}
++
++union vram_info {
++ struct _ATOM_VRAM_INFO_V3 v1_3;
++ struct _ATOM_VRAM_INFO_V4 v1_4;
++ struct _ATOM_VRAM_INFO_HEADER_V2_1 v2_1;
++};
++
++#define MEM_ID_MASK 0xff000000
++#define MEM_ID_SHIFT 24
++#define CLOCK_RANGE_MASK 0x00ffffff
++#define CLOCK_RANGE_SHIFT 0
++#define LOW_NIBBLE_MASK 0xf
++#define DATA_EQU_PREV 0
++#define DATA_FROM_TABLE 4
++
++int amdgpu_atombios_init_mc_reg_table(struct amdgpu_device *adev,
++ u8 module_index,
++ struct atom_mc_reg_table *reg_table)
++{
++ int index = GetIndexIntoMasterTable(DATA, VRAM_Info);
++ u8 frev, crev, num_entries, t_mem_id, num_ranges = 0;
++ u32 i = 0, j;
++ u16 data_offset, size;
++ union vram_info *vram_info;
++
++ memset(reg_table, 0, sizeof(struct atom_mc_reg_table));
++
++ if (amdgpu_atom_parse_data_header(adev->mode_info.atom_context, index, &size,
++ &frev, &crev, &data_offset)) {
++ vram_info = (union vram_info *)
++ (adev->mode_info.atom_context->bios + data_offset);
++ switch (frev) {
++ case 1:
++ DRM_ERROR("old table version %d, %d\n", frev, crev);
++ return -EINVAL;
++ case 2:
++ switch (crev) {
++ case 1:
++ if (module_index < vram_info->v2_1.ucNumOfVRAMModule) {
++ ATOM_INIT_REG_BLOCK *reg_block =
++ (ATOM_INIT_REG_BLOCK *)
++ ((u8 *)vram_info + le16_to_cpu(vram_info->v2_1.usMemClkPatchTblOffset));
++ ATOM_MEMORY_SETTING_DATA_BLOCK *reg_data =
++ (ATOM_MEMORY_SETTING_DATA_BLOCK *)
++ ((u8 *)reg_block + (2 * sizeof(u16)) +
++ le16_to_cpu(reg_block->usRegIndexTblSize));
++ ATOM_INIT_REG_INDEX_FORMAT *format = &reg_block->asRegIndexBuf[0];
++ num_entries = (u8)((le16_to_cpu(reg_block->usRegIndexTblSize)) /
++ sizeof(ATOM_INIT_REG_INDEX_FORMAT)) - 1;
++ if (num_entries > VBIOS_MC_REGISTER_ARRAY_SIZE)
++ return -EINVAL;
++ while (i < num_entries) {
++ if (format->ucPreRegDataLength & ACCESS_PLACEHOLDER)
++ break;
++ reg_table->mc_reg_address[i].s1 =
++ (u16)(le16_to_cpu(format->usRegIndex));
++ reg_table->mc_reg_address[i].pre_reg_data =
++ (u8)(format->ucPreRegDataLength);
++ i++;
++ format = (ATOM_INIT_REG_INDEX_FORMAT *)
++ ((u8 *)format + sizeof(ATOM_INIT_REG_INDEX_FORMAT));
++ }
++ reg_table->last = i;
++ while ((le32_to_cpu(*(u32 *)reg_data) != END_OF_REG_DATA_BLOCK) &&
++ (num_ranges < VBIOS_MAX_AC_TIMING_ENTRIES)) {
++ t_mem_id = (u8)((le32_to_cpu(*(u32 *)reg_data) & MEM_ID_MASK)
++ >> MEM_ID_SHIFT);
++ if (module_index == t_mem_id) {
++ reg_table->mc_reg_table_entry[num_ranges].mclk_max =
++ (u32)((le32_to_cpu(*(u32 *)reg_data) & CLOCK_RANGE_MASK)
++ >> CLOCK_RANGE_SHIFT);
++ for (i = 0, j = 1; i < reg_table->last; i++) {
++ if ((reg_table->mc_reg_address[i].pre_reg_data & LOW_NIBBLE_MASK) == DATA_FROM_TABLE) {
++ reg_table->mc_reg_table_entry[num_ranges].mc_data[i] =
++ (u32)le32_to_cpu(*((u32 *)reg_data + j));
++ j++;
++ } else if ((reg_table->mc_reg_address[i].pre_reg_data & LOW_NIBBLE_MASK) == DATA_EQU_PREV) {
++ reg_table->mc_reg_table_entry[num_ranges].mc_data[i] =
++ reg_table->mc_reg_table_entry[num_ranges].mc_data[i - 1];
++ }
++ }
++ num_ranges++;
++ }
++ reg_data = (ATOM_MEMORY_SETTING_DATA_BLOCK *)
++ ((u8 *)reg_data + le16_to_cpu(reg_block->usRegDataBlkSize));
++ }
++ if (le32_to_cpu(*(u32 *)reg_data) != END_OF_REG_DATA_BLOCK)
++ return -EINVAL;
++ reg_table->num_entries = num_ranges;
++ } else
++ return -EINVAL;
++ break;
++ default:
++ DRM_ERROR("Unknown table version %d, %d\n", frev, crev);
++ return -EINVAL;
++ }
++ break;
++ default:
++ DRM_ERROR("Unknown table version %d, %d\n", frev, crev);
++ return -EINVAL;
++ }
++ return 0;
++ }
++ return -EINVAL;
++}
++
++void amdgpu_atombios_scratch_regs_lock(struct amdgpu_device *adev, bool lock)
++{
++ uint32_t bios_6_scratch;
++
++ bios_6_scratch = RREG32(mmBIOS_SCRATCH_6);
++
++ if (lock) {
++ bios_6_scratch |= ATOM_S6_CRITICAL_STATE;
++ bios_6_scratch &= ~ATOM_S6_ACC_MODE;
++ } else {
++ bios_6_scratch &= ~ATOM_S6_CRITICAL_STATE;
++ bios_6_scratch |= ATOM_S6_ACC_MODE;
++ }
++
++ WREG32(mmBIOS_SCRATCH_6, bios_6_scratch);
++}
++
++void amdgpu_atombios_scratch_regs_init(struct amdgpu_device *adev)
++{
++ uint32_t bios_2_scratch, bios_6_scratch;
++
++ bios_2_scratch = RREG32(mmBIOS_SCRATCH_2);
++ bios_6_scratch = RREG32(mmBIOS_SCRATCH_6);
++
++ /* let the bios control the backlight */
++ bios_2_scratch &= ~ATOM_S2_VRI_BRIGHT_ENABLE;
++
++ /* tell the bios not to handle mode switching */
++ bios_6_scratch |= ATOM_S6_ACC_BLOCK_DISPLAY_SWITCH;
++
++ /* clear the vbios dpms state */
++ bios_2_scratch &= ~ATOM_S2_DEVICE_DPMS_STATE;
++
++ WREG32(mmBIOS_SCRATCH_2, bios_2_scratch);
++ WREG32(mmBIOS_SCRATCH_6, bios_6_scratch);
++}
++
++void amdgpu_atombios_scratch_regs_save(struct amdgpu_device *adev)
++{
++ int i;
++
++ for (i = 0; i < AMDGPU_BIOS_NUM_SCRATCH; i++)
++ adev->bios_scratch[i] = RREG32(mmBIOS_SCRATCH_0 + i);
++}
++
++void amdgpu_atombios_scratch_regs_restore(struct amdgpu_device *adev)
++{
++ int i;
++
++ for (i = 0; i < AMDGPU_BIOS_NUM_SCRATCH; i++)
++ WREG32(mmBIOS_SCRATCH_0 + i, adev->bios_scratch[i]);
++}
++
++/* Atom needs data in little endian format
++ * so swap as appropriate when copying data to
++ * or from atom. Note that atom operates on
++ * dw units.
++ */
++void amdgpu_atombios_copy_swap(u8 *dst, u8 *src, u8 num_bytes, bool to_le)
++{
++#ifdef __BIG_ENDIAN
++ u8 src_tmp[20], dst_tmp[20]; /* used for byteswapping */
++ u32 *dst32, *src32;
++ int i;
++
++ memcpy(src_tmp, src, num_bytes);
++ src32 = (u32 *)src_tmp;
++ dst32 = (u32 *)dst_tmp;
++ if (to_le) {
++ for (i = 0; i < ((num_bytes + 3) / 4); i++)
++ dst32[i] = cpu_to_le32(src32[i]);
++ memcpy(dst, dst_tmp, num_bytes);
++ } else {
++ u8 dws = num_bytes & ~3;
++ for (i = 0; i < ((num_bytes + 3) / 4); i++)
++ dst32[i] = le32_to_cpu(src32[i]);
++ memcpy(dst, dst_tmp, dws);
++ if (num_bytes % 4) {
++ for (i = 0; i < (num_bytes % 4); i++)
++ dst[dws+i] = dst_tmp[dws+i];
++ }
++ }
++#else
++ memcpy(dst, src, num_bytes);
++#endif
++}
+diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_atombios.h b/drivers/gpu/drm/amd/amdgpu/amdgpu_atombios.h
+new file mode 100644
+index 0000000..0ebb959
+--- /dev/null
++++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_atombios.h
+@@ -0,0 +1,206 @@
++/*
++ * Copyright 2014 Advanced Micro Devices, Inc.
++ *
++ * Permission is hereby granted, free of charge, to any person obtaining a
++ * copy of this software and associated documentation files (the "Software"),
++ * to deal in the Software without restriction, including without limitation
++ * the rights to use, copy, modify, merge, publish, distribute, sublicense,
++ * and/or sell copies of the Software, and to permit persons to whom the
++ * Software is furnished to do so, subject to the following conditions:
++ *
++ * The above copyright notice and this permission notice shall be included in
++ * all copies or substantial portions of the Software.
++ *
++ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
++ * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
++ * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
++ * THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR
++ * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE,
++ * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
++ * OTHER DEALINGS IN THE SOFTWARE.
++ *
++ */
++
++#ifndef __AMDGPU_ATOMBIOS_H__
++#define __AMDGPU_ATOMBIOS_H__
++
++struct atom_clock_dividers {
++ u32 post_div;
++ union {
++ struct {
++#ifdef __BIG_ENDIAN
++ u32 reserved : 6;
++ u32 whole_fb_div : 12;
++ u32 frac_fb_div : 14;
++#else
++ u32 frac_fb_div : 14;
++ u32 whole_fb_div : 12;
++ u32 reserved : 6;
++#endif
++ };
++ u32 fb_div;
++ };
++ u32 ref_div;
++ bool enable_post_div;
++ bool enable_dithen;
++ u32 vco_mode;
++ u32 real_clock;
++ /* added for CI */
++ u32 post_divider;
++ u32 flags;
++};
++
++struct atom_mpll_param {
++ union {
++ struct {
++#ifdef __BIG_ENDIAN
++ u32 reserved : 8;
++ u32 clkfrac : 12;
++ u32 clkf : 12;
++#else
++ u32 clkf : 12;
++ u32 clkfrac : 12;
++ u32 reserved : 8;
++#endif
++ };
++ u32 fb_div;
++ };
++ u32 post_div;
++ u32 bwcntl;
++ u32 dll_speed;
++ u32 vco_mode;
++ u32 yclk_sel;
++ u32 qdr;
++ u32 half_rate;
++};
++
++#define MEM_TYPE_GDDR5 0x50
++#define MEM_TYPE_GDDR4 0x40
++#define MEM_TYPE_GDDR3 0x30
++#define MEM_TYPE_DDR2 0x20
++#define MEM_TYPE_GDDR1 0x10
++#define MEM_TYPE_DDR3 0xb0
++#define MEM_TYPE_MASK 0xf0
++
++struct atom_memory_info {
++ u8 mem_vendor;
++ u8 mem_type;
++};
++
++#define MAX_AC_TIMING_ENTRIES 16
++
++struct atom_memory_clock_range_table
++{
++ u8 num_entries;
++ u8 rsv[3];
++ u32 mclk[MAX_AC_TIMING_ENTRIES];
++};
++
++#define VBIOS_MC_REGISTER_ARRAY_SIZE 32
++#define VBIOS_MAX_AC_TIMING_ENTRIES 20
++
++struct atom_mc_reg_entry {
++ u32 mclk_max;
++ u32 mc_data[VBIOS_MC_REGISTER_ARRAY_SIZE];
++};
++
++struct atom_mc_register_address {
++ u16 s1;
++ u8 pre_reg_data;
++};
++
++struct atom_mc_reg_table {
++ u8 last;
++ u8 num_entries;
++ struct atom_mc_reg_entry mc_reg_table_entry[VBIOS_MAX_AC_TIMING_ENTRIES];
++ struct atom_mc_register_address mc_reg_address[VBIOS_MC_REGISTER_ARRAY_SIZE];
++};
++
++#define MAX_VOLTAGE_ENTRIES 32
++
++struct atom_voltage_table_entry
++{
++ u16 value;
++ u32 smio_low;
++};
++
++struct atom_voltage_table
++{
++ u32 count;
++ u32 mask_low;
++ u32 phase_delay;
++ struct atom_voltage_table_entry entries[MAX_VOLTAGE_ENTRIES];
++};
++
++struct amdgpu_gpio_rec
++amdgpu_atombios_lookup_gpio(struct amdgpu_device *adev,
++ u8 id);
++
++struct amdgpu_i2c_bus_rec amdgpu_atombios_lookup_i2c_gpio(struct amdgpu_device *adev,
++ uint8_t id);
++void amdgpu_atombios_i2c_init(struct amdgpu_device *adev);
++
++bool amdgpu_atombios_get_connector_info_from_object_table(struct amdgpu_device *adev);
++
++int amdgpu_atombios_get_clock_info(struct amdgpu_device *adev);
++
++bool amdgpu_atombios_get_asic_ss_info(struct amdgpu_device *adev,
++ struct amdgpu_atom_ss *ss,
++ int id, u32 clock);
++
++int amdgpu_atombios_get_clock_dividers(struct amdgpu_device *adev,
++ u8 clock_type,
++ u32 clock,
++ bool strobe_mode,
++ struct atom_clock_dividers *dividers);
++
++int amdgpu_atombios_get_memory_pll_dividers(struct amdgpu_device *adev,
++ u32 clock,
++ bool strobe_mode,
++ struct atom_mpll_param *mpll_param);
++
++uint32_t amdgpu_atombios_get_engine_clock(struct amdgpu_device *adev);
++uint32_t amdgpu_atombios_get_memory_clock(struct amdgpu_device *adev);
++void amdgpu_atombios_set_engine_clock(struct amdgpu_device *adev,
++ uint32_t eng_clock);
++void amdgpu_atombios_set_memory_clock(struct amdgpu_device *adev,
++ uint32_t mem_clock);
++void amdgpu_atombios_set_voltage(struct amdgpu_device *adev,
++ u16 voltage_level,
++ u8 voltage_type);
++
++void amdgpu_atombios_set_engine_dram_timings(struct amdgpu_device *adev,
++ u32 eng_clock, u32 mem_clock);
++
++int amdgpu_atombios_get_leakage_id_from_vbios(struct amdgpu_device *adev,
++ u16 *leakage_id);
++
++int amdgpu_atombios_get_leakage_vddc_based_on_leakage_params(struct amdgpu_device *adev,
++ u16 *vddc, u16 *vddci,
++ u16 virtual_voltage_id,
++ u16 vbios_voltage_id);
++
++int amdgpu_atombios_get_voltage_evv(struct amdgpu_device *adev,
++ u16 virtual_voltage_id,
++ u16 *voltage);
++
++bool
++amdgpu_atombios_is_voltage_gpio(struct amdgpu_device *adev,
++ u8 voltage_type, u8 voltage_mode);
++
++int amdgpu_atombios_get_voltage_table(struct amdgpu_device *adev,
++ u8 voltage_type, u8 voltage_mode,
++ struct atom_voltage_table *voltage_table);
++
++int amdgpu_atombios_init_mc_reg_table(struct amdgpu_device *adev,
++ u8 module_index,
++ struct atom_mc_reg_table *reg_table);
++
++void amdgpu_atombios_scratch_regs_lock(struct amdgpu_device *adev, bool lock);
++void amdgpu_atombios_scratch_regs_init(struct amdgpu_device *adev);
++void amdgpu_atombios_scratch_regs_save(struct amdgpu_device *adev);
++void amdgpu_atombios_scratch_regs_restore(struct amdgpu_device *adev);
++
++void amdgpu_atombios_copy_swap(u8 *dst, u8 *src, u8 num_bytes, bool to_le);
++
++#endif
+diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_atpx_handler.c b/drivers/gpu/drm/amd/amdgpu/amdgpu_atpx_handler.c
+new file mode 100644
+index 0000000..3f7aaa4
+--- /dev/null
++++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_atpx_handler.c
+@@ -0,0 +1,572 @@
++/*
++ * Copyright (c) 2010 Red Hat Inc.
++ * Author : Dave Airlie <airlied@redhat.com>
++ *
++ * Licensed under GPLv2
++ *
++ * ATPX support for both Intel/ATI
++ */
++#include <linux/vga_switcheroo.h>
++#include <linux/slab.h>
++#include <linux/acpi.h>
++#include <linux/pci.h>
++
++#include "amdgpu_acpi.h"
++
++struct amdgpu_atpx_functions {
++ bool px_params;
++ bool power_cntl;
++ bool disp_mux_cntl;
++ bool i2c_mux_cntl;
++ bool switch_start;
++ bool switch_end;
++ bool disp_connectors_mapping;
++ bool disp_detetion_ports;
++};
++
++struct amdgpu_atpx {
++ acpi_handle handle;
++ struct amdgpu_atpx_functions functions;
++};
++
++static struct amdgpu_atpx_priv {
++ bool atpx_detected;
++ /* handle for device - and atpx */
++ acpi_handle dhandle;
++ acpi_handle other_handle;
++ struct amdgpu_atpx atpx;
++} amdgpu_atpx_priv;
++
++struct atpx_verify_interface {
++ u16 size; /* structure size in bytes (includes size field) */
++ u16 version; /* version */
++ u32 function_bits; /* supported functions bit vector */
++} __packed;
++
++struct atpx_px_params {
++ u16 size; /* structure size in bytes (includes size field) */
++ u32 valid_flags; /* which flags are valid */
++ u32 flags; /* flags */
++} __packed;
++
++struct atpx_power_control {
++ u16 size;
++ u8 dgpu_state;
++} __packed;
++
++struct atpx_mux {
++ u16 size;
++ u16 mux;
++} __packed;
++
++bool amdgpu_has_atpx(void) {
++ return amdgpu_atpx_priv.atpx_detected;
++}
++
++/**
++ * amdgpu_atpx_call - call an ATPX method
++ *
++ * @handle: acpi handle
++ * @function: the ATPX function to execute
++ * @params: ATPX function params
++ *
++ * Executes the requested ATPX function (all asics).
++ * Returns a pointer to the acpi output buffer.
++ */
++static union acpi_object *amdgpu_atpx_call(acpi_handle handle, int function,
++ struct acpi_buffer *params)
++{
++ acpi_status status;
++ union acpi_object atpx_arg_elements[2];
++ struct acpi_object_list atpx_arg;
++ struct acpi_buffer buffer = { ACPI_ALLOCATE_BUFFER, NULL };
++
++ atpx_arg.count = 2;
++ atpx_arg.pointer = &atpx_arg_elements[0];
++
++ atpx_arg_elements[0].type = ACPI_TYPE_INTEGER;
++ atpx_arg_elements[0].integer.value = function;
++
++ if (params) {
++ atpx_arg_elements[1].type = ACPI_TYPE_BUFFER;
++ atpx_arg_elements[1].buffer.length = params->length;
++ atpx_arg_elements[1].buffer.pointer = params->pointer;
++ } else {
++ /* We need a second fake parameter */
++ atpx_arg_elements[1].type = ACPI_TYPE_INTEGER;
++ atpx_arg_elements[1].integer.value = 0;
++ }
++
++ status = acpi_evaluate_object(handle, NULL, &atpx_arg, &buffer);
++
++ /* Fail only if calling the method fails and ATPX is supported */
++ if (ACPI_FAILURE(status) && status != AE_NOT_FOUND) {
++ printk("failed to evaluate ATPX got %s\n",
++ acpi_format_exception(status));
++ kfree(buffer.pointer);
++ return NULL;
++ }
++
++ return buffer.pointer;
++}
++
++/**
++ * amdgpu_atpx_parse_functions - parse supported functions
++ *
++ * @f: supported functions struct
++ * @mask: supported functions mask from ATPX
++ *
++ * Use the supported functions mask from ATPX function
++ * ATPX_FUNCTION_VERIFY_INTERFACE to determine what functions
++ * are supported (all asics).
++ */
++static void amdgpu_atpx_parse_functions(struct amdgpu_atpx_functions *f, u32 mask)
++{
++ f->px_params = mask & ATPX_GET_PX_PARAMETERS_SUPPORTED;
++ f->power_cntl = mask & ATPX_POWER_CONTROL_SUPPORTED;
++ f->disp_mux_cntl = mask & ATPX_DISPLAY_MUX_CONTROL_SUPPORTED;
++ f->i2c_mux_cntl = mask & ATPX_I2C_MUX_CONTROL_SUPPORTED;
++ f->switch_start = mask & ATPX_GRAPHICS_DEVICE_SWITCH_START_NOTIFICATION_SUPPORTED;
++ f->switch_end = mask & ATPX_GRAPHICS_DEVICE_SWITCH_END_NOTIFICATION_SUPPORTED;
++ f->disp_connectors_mapping = mask & ATPX_GET_DISPLAY_CONNECTORS_MAPPING_SUPPORTED;
++ f->disp_detetion_ports = mask & ATPX_GET_DISPLAY_DETECTION_PORTS_SUPPORTED;
++}
++
++/**
++ * amdgpu_atpx_validate_functions - validate ATPX functions
++ *
++ * @atpx: amdgpu atpx struct
++ *
++ * Validate that required functions are enabled (all asics).
++ * returns 0 on success, error on failure.
++ */
++static int amdgpu_atpx_validate(struct amdgpu_atpx *atpx)
++{
++ /* make sure required functions are enabled */
++ /* dGPU power control is required */
++ atpx->functions.power_cntl = true;
++
++ if (atpx->functions.px_params) {
++ union acpi_object *info;
++ struct atpx_px_params output;
++ size_t size;
++ u32 valid_bits;
++
++ info = amdgpu_atpx_call(atpx->handle, ATPX_FUNCTION_GET_PX_PARAMETERS, NULL);
++ if (!info)
++ return -EIO;
++
++ memset(&output, 0, sizeof(output));
++
++ size = *(u16 *) info->buffer.pointer;
++ if (size < 10) {
++ printk("ATPX buffer is too small: %zu\n", size);
++ kfree(info);
++ return -EINVAL;
++ }
++ size = min(sizeof(output), size);
++
++ memcpy(&output, info->buffer.pointer, size);
++
++ valid_bits = output.flags & output.valid_flags;
++ /* if separate mux flag is set, mux controls are required */
++ if (valid_bits & ATPX_SEPARATE_MUX_FOR_I2C) {
++ atpx->functions.i2c_mux_cntl = true;
++ atpx->functions.disp_mux_cntl = true;
++ }
++ /* if any outputs are muxed, mux controls are required */
++ if (valid_bits & (ATPX_CRT1_RGB_SIGNAL_MUXED |
++ ATPX_TV_SIGNAL_MUXED |
++ ATPX_DFP_SIGNAL_MUXED))
++ atpx->functions.disp_mux_cntl = true;
++
++ kfree(info);
++ }
++ return 0;
++}
++
++/**
++ * amdgpu_atpx_verify_interface - verify ATPX
++ *
++ * @atpx: amdgpu atpx struct
++ *
++ * Execute the ATPX_FUNCTION_VERIFY_INTERFACE ATPX function
++ * to initialize ATPX and determine what features are supported
++ * (all asics).
++ * returns 0 on success, error on failure.
++ */
++static int amdgpu_atpx_verify_interface(struct amdgpu_atpx *atpx)
++{
++ union acpi_object *info;
++ struct atpx_verify_interface output;
++ size_t size;
++ int err = 0;
++
++ info = amdgpu_atpx_call(atpx->handle, ATPX_FUNCTION_VERIFY_INTERFACE, NULL);
++ if (!info)
++ return -EIO;
++
++ memset(&output, 0, sizeof(output));
++
++ size = *(u16 *) info->buffer.pointer;
++ if (size < 8) {
++ printk("ATPX buffer is too small: %zu\n", size);
++ err = -EINVAL;
++ goto out;
++ }
++ size = min(sizeof(output), size);
++
++ memcpy(&output, info->buffer.pointer, size);
++
++ /* TODO: check version? */
++ printk("ATPX version %u, functions 0x%08x\n",
++ output.version, output.function_bits);
++
++ amdgpu_atpx_parse_functions(&atpx->functions, output.function_bits);
++
++out:
++ kfree(info);
++ return err;
++}
++
++/**
++ * amdgpu_atpx_set_discrete_state - power up/down discrete GPU
++ *
++ * @atpx: atpx info struct
++ * @state: discrete GPU state (0 = power down, 1 = power up)
++ *
++ * Execute the ATPX_FUNCTION_POWER_CONTROL ATPX function to
++ * power down/up the discrete GPU (all asics).
++ * Returns 0 on success, error on failure.
++ */
++static int amdgpu_atpx_set_discrete_state(struct amdgpu_atpx *atpx, u8 state)
++{
++ struct acpi_buffer params;
++ union acpi_object *info;
++ struct atpx_power_control input;
++
++ if (atpx->functions.power_cntl) {
++ input.size = 3;
++ input.dgpu_state = state;
++ params.length = input.size;
++ params.pointer = &input;
++ info = amdgpu_atpx_call(atpx->handle,
++ ATPX_FUNCTION_POWER_CONTROL,
++ &params);
++ if (!info)
++ return -EIO;
++ kfree(info);
++ }
++ return 0;
++}
++
++/**
++ * amdgpu_atpx_switch_disp_mux - switch display mux
++ *
++ * @atpx: atpx info struct
++ * @mux_id: mux state (0 = integrated GPU, 1 = discrete GPU)
++ *
++ * Execute the ATPX_FUNCTION_DISPLAY_MUX_CONTROL ATPX function to
++ * switch the display mux between the discrete GPU and integrated GPU
++ * (all asics).
++ * Returns 0 on success, error on failure.
++ */
++static int amdgpu_atpx_switch_disp_mux(struct amdgpu_atpx *atpx, u16 mux_id)
++{
++ struct acpi_buffer params;
++ union acpi_object *info;
++ struct atpx_mux input;
++
++ if (atpx->functions.disp_mux_cntl) {
++ input.size = 4;
++ input.mux = mux_id;
++ params.length = input.size;
++ params.pointer = &input;
++ info = amdgpu_atpx_call(atpx->handle,
++ ATPX_FUNCTION_DISPLAY_MUX_CONTROL,
++ &params);
++ if (!info)
++ return -EIO;
++ kfree(info);
++ }
++ return 0;
++}
++
++/**
++ * amdgpu_atpx_switch_i2c_mux - switch i2c/hpd mux
++ *
++ * @atpx: atpx info struct
++ * @mux_id: mux state (0 = integrated GPU, 1 = discrete GPU)
++ *
++ * Execute the ATPX_FUNCTION_I2C_MUX_CONTROL ATPX function to
++ * switch the i2c/hpd mux between the discrete GPU and integrated GPU
++ * (all asics).
++ * Returns 0 on success, error on failure.
++ */
++static int amdgpu_atpx_switch_i2c_mux(struct amdgpu_atpx *atpx, u16 mux_id)
++{
++ struct acpi_buffer params;
++ union acpi_object *info;
++ struct atpx_mux input;
++
++ if (atpx->functions.i2c_mux_cntl) {
++ input.size = 4;
++ input.mux = mux_id;
++ params.length = input.size;
++ params.pointer = &input;
++ info = amdgpu_atpx_call(atpx->handle,
++ ATPX_FUNCTION_I2C_MUX_CONTROL,
++ &params);
++ if (!info)
++ return -EIO;
++ kfree(info);
++ }
++ return 0;
++}
++
++/**
++ * amdgpu_atpx_switch_start - notify the sbios of a GPU switch
++ *
++ * @atpx: atpx info struct
++ * @mux_id: mux state (0 = integrated GPU, 1 = discrete GPU)
++ *
++ * Execute the ATPX_FUNCTION_GRAPHICS_DEVICE_SWITCH_START_NOTIFICATION ATPX
++ * function to notify the sbios that a switch between the discrete GPU and
++ * integrated GPU has begun (all asics).
++ * Returns 0 on success, error on failure.
++ */
++static int amdgpu_atpx_switch_start(struct amdgpu_atpx *atpx, u16 mux_id)
++{
++ struct acpi_buffer params;
++ union acpi_object *info;
++ struct atpx_mux input;
++
++ if (atpx->functions.switch_start) {
++ input.size = 4;
++ input.mux = mux_id;
++ params.length = input.size;
++ params.pointer = &input;
++ info = amdgpu_atpx_call(atpx->handle,
++ ATPX_FUNCTION_GRAPHICS_DEVICE_SWITCH_START_NOTIFICATION,
++ &params);
++ if (!info)
++ return -EIO;
++ kfree(info);
++ }
++ return 0;
++}
++
++/**
++ * amdgpu_atpx_switch_end - notify the sbios of a GPU switch
++ *
++ * @atpx: atpx info struct
++ * @mux_id: mux state (0 = integrated GPU, 1 = discrete GPU)
++ *
++ * Execute the ATPX_FUNCTION_GRAPHICS_DEVICE_SWITCH_END_NOTIFICATION ATPX
++ * function to notify the sbios that a switch between the discrete GPU and
++ * integrated GPU has ended (all asics).
++ * Returns 0 on success, error on failure.
++ */
++static int amdgpu_atpx_switch_end(struct amdgpu_atpx *atpx, u16 mux_id)
++{
++ struct acpi_buffer params;
++ union acpi_object *info;
++ struct atpx_mux input;
++
++ if (atpx->functions.switch_end) {
++ input.size = 4;
++ input.mux = mux_id;
++ params.length = input.size;
++ params.pointer = &input;
++ info = amdgpu_atpx_call(atpx->handle,
++ ATPX_FUNCTION_GRAPHICS_DEVICE_SWITCH_END_NOTIFICATION,
++ &params);
++ if (!info)
++ return -EIO;
++ kfree(info);
++ }
++ return 0;
++}
++
++/**
++ * amdgpu_atpx_switchto - switch to the requested GPU
++ *
++ * @id: GPU to switch to
++ *
++ * Execute the necessary ATPX functions to switch between the discrete GPU and
++ * integrated GPU (all asics).
++ * Returns 0 on success, error on failure.
++ */
++static int amdgpu_atpx_switchto(enum vga_switcheroo_client_id id)
++{
++ u16 gpu_id;
++
++ if (id == VGA_SWITCHEROO_IGD)
++ gpu_id = ATPX_INTEGRATED_GPU;
++ else
++ gpu_id = ATPX_DISCRETE_GPU;
++
++ amdgpu_atpx_switch_start(&amdgpu_atpx_priv.atpx, gpu_id);
++ amdgpu_atpx_switch_disp_mux(&amdgpu_atpx_priv.atpx, gpu_id);
++ amdgpu_atpx_switch_i2c_mux(&amdgpu_atpx_priv.atpx, gpu_id);
++ amdgpu_atpx_switch_end(&amdgpu_atpx_priv.atpx, gpu_id);
++
++ return 0;
++}
++
++/**
++ * amdgpu_atpx_power_state - power down/up the requested GPU
++ *
++ * @id: GPU to power down/up
++ * @state: requested power state (0 = off, 1 = on)
++ *
++ * Execute the necessary ATPX function to power down/up the discrete GPU
++ * (all asics).
++ * Returns 0 on success, error on failure.
++ */
++static int amdgpu_atpx_power_state(enum vga_switcheroo_client_id id,
++ enum vga_switcheroo_state state)
++{
++ /* on w500 ACPI can't change intel gpu state */
++ if (id == VGA_SWITCHEROO_IGD)
++ return 0;
++
++ amdgpu_atpx_set_discrete_state(&amdgpu_atpx_priv.atpx, state);
++ return 0;
++}
++
++/**
++ * amdgpu_atpx_pci_probe_handle - look up the ATPX handle
++ *
++ * @pdev: pci device
++ *
++ * Look up the ATPX handles (all asics).
++ * Returns true if the handles are found, false if not.
++ */
++static bool amdgpu_atpx_pci_probe_handle(struct pci_dev *pdev)
++{
++ acpi_handle dhandle, atpx_handle;
++ acpi_status status;
++
++ dhandle = ACPI_HANDLE(&pdev->dev);
++ if (!dhandle)
++ return false;
++
++ status = acpi_get_handle(dhandle, "ATPX", &atpx_handle);
++ if (ACPI_FAILURE(status)) {
++ amdgpu_atpx_priv.other_handle = dhandle;
++ return false;
++ }
++ amdgpu_atpx_priv.dhandle = dhandle;
++ amdgpu_atpx_priv.atpx.handle = atpx_handle;
++ return true;
++}
++
++/**
++ * amdgpu_atpx_init - verify the ATPX interface
++ *
++ * Verify the ATPX interface (all asics).
++ * Returns 0 on success, error on failure.
++ */
++static int amdgpu_atpx_init(void)
++{
++ int r;
++
++ /* set up the ATPX handle */
++ r = amdgpu_atpx_verify_interface(&amdgpu_atpx_priv.atpx);
++ if (r)
++ return r;
++
++ /* validate the atpx setup */
++ r = amdgpu_atpx_validate(&amdgpu_atpx_priv.atpx);
++ if (r)
++ return r;
++
++ return 0;
++}
++
++/**
++ * amdgpu_atpx_get_client_id - get the client id
++ *
++ * @pdev: pci device
++ *
++ * look up whether we are the integrated or discrete GPU (all asics).
++ * Returns the client id.
++ */
++static int amdgpu_atpx_get_client_id(struct pci_dev *pdev)
++{
++ if (amdgpu_atpx_priv.dhandle == ACPI_HANDLE(&pdev->dev))
++ return VGA_SWITCHEROO_IGD;
++ else
++ return VGA_SWITCHEROO_DIS;
++}
++
++static struct vga_switcheroo_handler amdgpu_atpx_handler = {
++ .switchto = amdgpu_atpx_switchto,
++ .power_state = amdgpu_atpx_power_state,
++ .init = amdgpu_atpx_init,
++ .get_client_id = amdgpu_atpx_get_client_id,
++};
++
++/**
++ * amdgpu_atpx_detect - detect whether we have PX
++ *
++ * Check if we have a PX system (all asics).
++ * Returns true if we have a PX system, false if not.
++ */
++static bool amdgpu_atpx_detect(void)
++{
++ char acpi_method_name[255] = { 0 };
++ struct acpi_buffer buffer = {sizeof(acpi_method_name), acpi_method_name};
++ struct pci_dev *pdev = NULL;
++ bool has_atpx = false;
++ int vga_count = 0;
++
++ while ((pdev = pci_get_class(PCI_CLASS_DISPLAY_VGA << 8, pdev)) != NULL) {
++ vga_count++;
++
++ has_atpx |= (amdgpu_atpx_pci_probe_handle(pdev) == true);
++ }
++
++ while ((pdev = pci_get_class(PCI_CLASS_DISPLAY_OTHER << 8, pdev)) != NULL) {
++ vga_count++;
++
++ has_atpx |= (amdgpu_atpx_pci_probe_handle(pdev) == true);
++ }
++
++ if (has_atpx && vga_count == 2) {
++ acpi_get_name(amdgpu_atpx_priv.atpx.handle, ACPI_FULL_PATHNAME, &buffer);
++ printk(KERN_INFO "VGA switcheroo: detected switching method %s handle\n",
++ acpi_method_name);
++ amdgpu_atpx_priv.atpx_detected = true;
++ return true;
++ }
++ return false;
++}
++
++/**
++ * amdgpu_register_atpx_handler - register with vga_switcheroo
++ *
++ * Register the PX callbacks with vga_switcheroo (all asics).
++ */
++void amdgpu_register_atpx_handler(void)
++{
++ bool r;
++
++ /* detect if we have any ATPX + 2 VGA in the system */
++ r = amdgpu_atpx_detect();
++ if (!r)
++ return;
++
++ vga_switcheroo_register_handler(&amdgpu_atpx_handler);
++}
++
++/**
++ * amdgpu_unregister_atpx_handler - unregister with vga_switcheroo
++ *
++ * Unregister the PX callbacks with vga_switcheroo (all asics).
++ */
++void amdgpu_unregister_atpx_handler(void)
++{
++ vga_switcheroo_unregister_handler();
++}
+diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_benchmark.c b/drivers/gpu/drm/amd/amdgpu/amdgpu_benchmark.c
+new file mode 100644
+index 0000000..2742b9a
+--- /dev/null
++++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_benchmark.c
+@@ -0,0 +1,221 @@
++/*
++ * Copyright 2009 Jerome Glisse.
++ *
++ * Permission is hereby granted, free of charge, to any person obtaining a
++ * copy of this software and associated documentation files (the "Software"),
++ * to deal in the Software without restriction, including without limitation
++ * the rights to use, copy, modify, merge, publish, distribute, sublicense,
++ * and/or sell copies of the Software, and to permit persons to whom the
++ * Software is furnished to do so, subject to the following conditions:
++ *
++ * The above copyright notice and this permission notice shall be included in
++ * all copies or substantial portions of the Software.
++ *
++ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
++ * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
++ * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
++ * THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR
++ * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE,
++ * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
++ * OTHER DEALINGS IN THE SOFTWARE.
++ *
++ * Authors: Jerome Glisse
++ */
++#include <drm/drmP.h>
++#include <drm/amdgpu_drm.h>
++#include "amdgpu.h"
++
++#define AMDGPU_BENCHMARK_ITERATIONS 1024
++#define AMDGPU_BENCHMARK_COMMON_MODES_N 17
++
++static int amdgpu_benchmark_do_move(struct amdgpu_device *adev, unsigned size,
++ uint64_t saddr, uint64_t daddr, int n)
++{
++ unsigned long start_jiffies;
++ unsigned long end_jiffies;
++ struct amdgpu_fence *fence = NULL;
++ int i, r;
++
++ start_jiffies = jiffies;
++ for (i = 0; i < n; i++) {
++ struct amdgpu_ring *ring = adev->mman.buffer_funcs_ring;
++ r = amdgpu_copy_buffer(ring, saddr, daddr, size, NULL, &fence);
++ if (r)
++ goto exit_do_move;
++ r = amdgpu_fence_wait(fence, false);
++ if (r)
++ goto exit_do_move;
++ amdgpu_fence_unref(&fence);
++ }
++ end_jiffies = jiffies;
++ r = jiffies_to_msecs(end_jiffies - start_jiffies);
++
++exit_do_move:
++ if (fence)
++ amdgpu_fence_unref(&fence);
++ return r;
++}
++
++
++static void amdgpu_benchmark_log_results(int n, unsigned size,
++ unsigned int time,
++ unsigned sdomain, unsigned ddomain,
++ char *kind)
++{
++ unsigned int throughput = (n * (size >> 10)) / time;
++ DRM_INFO("amdgpu: %s %u bo moves of %u kB from"
++ " %d to %d in %u ms, throughput: %u Mb/s or %u MB/s\n",
++ kind, n, size >> 10, sdomain, ddomain, time,
++ throughput * 8, throughput);
++}
++
++static void amdgpu_benchmark_move(struct amdgpu_device *adev, unsigned size,
++ unsigned sdomain, unsigned ddomain)
++{
++ struct amdgpu_bo *dobj = NULL;
++ struct amdgpu_bo *sobj = NULL;
++ uint64_t saddr, daddr;
++ int r, n;
++ int time;
++
++ n = AMDGPU_BENCHMARK_ITERATIONS;
++ r = amdgpu_bo_create(adev, size, PAGE_SIZE, true, sdomain, 0, NULL, &sobj);
++ if (r) {
++ goto out_cleanup;
++ }
++ r = amdgpu_bo_reserve(sobj, false);
++ if (unlikely(r != 0))
++ goto out_cleanup;
++ r = amdgpu_bo_pin(sobj, sdomain, &saddr);
++ amdgpu_bo_unreserve(sobj);
++ if (r) {
++ goto out_cleanup;
++ }
++ r = amdgpu_bo_create(adev, size, PAGE_SIZE, true, ddomain, 0, NULL, &dobj);
++ if (r) {
++ goto out_cleanup;
++ }
++ r = amdgpu_bo_reserve(dobj, false);
++ if (unlikely(r != 0))
++ goto out_cleanup;
++ r = amdgpu_bo_pin(dobj, ddomain, &daddr);
++ amdgpu_bo_unreserve(dobj);
++ if (r) {
++ goto out_cleanup;
++ }
++
++ if (adev->mman.buffer_funcs) {
++ time = amdgpu_benchmark_do_move(adev, size, saddr, daddr, n);
++ if (time < 0)
++ goto out_cleanup;
++ if (time > 0)
++ amdgpu_benchmark_log_results(n, size, time,
++ sdomain, ddomain, "dma");
++ }
++
++out_cleanup:
++ if (sobj) {
++ r = amdgpu_bo_reserve(sobj, false);
++ if (likely(r == 0)) {
++ amdgpu_bo_unpin(sobj);
++ amdgpu_bo_unreserve(sobj);
++ }
++ amdgpu_bo_unref(&sobj);
++ }
++ if (dobj) {
++ r = amdgpu_bo_reserve(dobj, false);
++ if (likely(r == 0)) {
++ amdgpu_bo_unpin(dobj);
++ amdgpu_bo_unreserve(dobj);
++ }
++ amdgpu_bo_unref(&dobj);
++ }
++
++ if (r) {
++ DRM_ERROR("Error while benchmarking BO move.\n");
++ }
++}
++
++void amdgpu_benchmark(struct amdgpu_device *adev, int test_number)
++{
++ int i;
++ int common_modes[AMDGPU_BENCHMARK_COMMON_MODES_N] = {
++ 640 * 480 * 4,
++ 720 * 480 * 4,
++ 800 * 600 * 4,
++ 848 * 480 * 4,
++ 1024 * 768 * 4,
++ 1152 * 768 * 4,
++ 1280 * 720 * 4,
++ 1280 * 800 * 4,
++ 1280 * 854 * 4,
++ 1280 * 960 * 4,
++ 1280 * 1024 * 4,
++ 1440 * 900 * 4,
++ 1400 * 1050 * 4,
++ 1680 * 1050 * 4,
++ 1600 * 1200 * 4,
++ 1920 * 1080 * 4,
++ 1920 * 1200 * 4
++ };
++
++ switch (test_number) {
++ case 1:
++ /* simple test, VRAM to GTT and GTT to VRAM */
++ amdgpu_benchmark_move(adev, 1024*1024, AMDGPU_GEM_DOMAIN_GTT,
++ AMDGPU_GEM_DOMAIN_VRAM);
++ amdgpu_benchmark_move(adev, 1024*1024, AMDGPU_GEM_DOMAIN_VRAM,
++ AMDGPU_GEM_DOMAIN_GTT);
++ break;
++ case 2:
++ /* simple test, VRAM to VRAM */
++ amdgpu_benchmark_move(adev, 1024*1024, AMDGPU_GEM_DOMAIN_VRAM,
++ AMDGPU_GEM_DOMAIN_VRAM);
++ break;
++ case 3:
++ /* GTT to VRAM, buffer size sweep, powers of 2 */
++ for (i = 1; i <= 16384; i <<= 1)
++ amdgpu_benchmark_move(adev, i * AMDGPU_GPU_PAGE_SIZE,
++ AMDGPU_GEM_DOMAIN_GTT,
++ AMDGPU_GEM_DOMAIN_VRAM);
++ break;
++ case 4:
++ /* VRAM to GTT, buffer size sweep, powers of 2 */
++ for (i = 1; i <= 16384; i <<= 1)
++ amdgpu_benchmark_move(adev, i * AMDGPU_GPU_PAGE_SIZE,
++ AMDGPU_GEM_DOMAIN_VRAM,
++ AMDGPU_GEM_DOMAIN_GTT);
++ break;
++ case 5:
++ /* VRAM to VRAM, buffer size sweep, powers of 2 */
++ for (i = 1; i <= 16384; i <<= 1)
++ amdgpu_benchmark_move(adev, i * AMDGPU_GPU_PAGE_SIZE,
++ AMDGPU_GEM_DOMAIN_VRAM,
++ AMDGPU_GEM_DOMAIN_VRAM);
++ break;
++ case 6:
++ /* GTT to VRAM, buffer size sweep, common modes */
++ for (i = 0; i < AMDGPU_BENCHMARK_COMMON_MODES_N; i++)
++ amdgpu_benchmark_move(adev, common_modes[i],
++ AMDGPU_GEM_DOMAIN_GTT,
++ AMDGPU_GEM_DOMAIN_VRAM);
++ break;
++ case 7:
++ /* VRAM to GTT, buffer size sweep, common modes */
++ for (i = 0; i < AMDGPU_BENCHMARK_COMMON_MODES_N; i++)
++ amdgpu_benchmark_move(adev, common_modes[i],
++ AMDGPU_GEM_DOMAIN_VRAM,
++ AMDGPU_GEM_DOMAIN_GTT);
++ break;
++ case 8:
++ /* VRAM to VRAM, buffer size sweep, common modes */
++ for (i = 0; i < AMDGPU_BENCHMARK_COMMON_MODES_N; i++)
++ amdgpu_benchmark_move(adev, common_modes[i],
++ AMDGPU_GEM_DOMAIN_VRAM,
++ AMDGPU_GEM_DOMAIN_VRAM);
++ break;
++
++ default:
++ DRM_ERROR("Unknown benchmark\n");
++ }
++}
+diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_bios.c b/drivers/gpu/drm/amd/amdgpu/amdgpu_bios.c
+new file mode 100644
+index 0000000..d938a11
+--- /dev/null
++++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_bios.c
+@@ -0,0 +1,359 @@
++/*
++ * Copyright 2008 Advanced Micro Devices, Inc.
++ * Copyright 2008 Red Hat Inc.
++ * Copyright 2009 Jerome Glisse.
++ *
++ * Permission is hereby granted, free of charge, to any person obtaining a
++ * copy of this software and associated documentation files (the "Software"),
++ * to deal in the Software without restriction, including without limitation
++ * the rights to use, copy, modify, merge, publish, distribute, sublicense,
++ * and/or sell copies of the Software, and to permit persons to whom the
++ * Software is furnished to do so, subject to the following conditions:
++ *
++ * The above copyright notice and this permission notice shall be included in
++ * all copies or substantial portions of the Software.
++ *
++ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
++ * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
++ * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
++ * THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR
++ * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE,
++ * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
++ * OTHER DEALINGS IN THE SOFTWARE.
++ *
++ * Authors: Dave Airlie
++ * Alex Deucher
++ * Jerome Glisse
++ */
++#include <drm/drmP.h>
++#include "amdgpu.h"
++#include "atom.h"
++
++#include <linux/vga_switcheroo.h>
++#include <linux/slab.h>
++#include <linux/acpi.h>
++/*
++ * BIOS.
++ */
++
++/* If you boot an IGP board with a discrete card as the primary,
++ * the IGP rom is not accessible via the rom bar as the IGP rom is
++ * part of the system bios. On boot, the system bios puts a
++ * copy of the igp rom at the start of vram if a discrete card is
++ * present.
++ */
++static bool igp_read_bios_from_vram(struct amdgpu_device *adev)
++{
++ uint8_t __iomem *bios;
++ resource_size_t vram_base;
++ resource_size_t size = 256 * 1024; /* ??? */
++
++ if (!(adev->flags & AMDGPU_IS_APU))
++ if (!amdgpu_card_posted(adev))
++ return false;
++
++ adev->bios = NULL;
++ vram_base = pci_resource_start(adev->pdev, 0);
++ bios = ioremap(vram_base, size);
++ if (!bios) {
++ return false;
++ }
++
++ if (size == 0 || bios[0] != 0x55 || bios[1] != 0xaa) {
++ iounmap(bios);
++ return false;
++ }
++ adev->bios = kmalloc(size, GFP_KERNEL);
++ if (adev->bios == NULL) {
++ iounmap(bios);
++ return false;
++ }
++ memcpy_fromio(adev->bios, bios, size);
++ iounmap(bios);
++ return true;
++}
++
++bool amdgpu_read_bios(struct amdgpu_device *adev)
++{
++ uint8_t __iomem *bios;
++ size_t size;
++
++ adev->bios = NULL;
++ /* XXX: some cards may return 0 for rom size? ddx has a workaround */
++ bios = pci_map_rom(adev->pdev, &size);
++ if (!bios) {
++ return false;
++ }
++
++ if (size == 0 || bios[0] != 0x55 || bios[1] != 0xaa) {
++ pci_unmap_rom(adev->pdev, bios);
++ return false;
++ }
++ adev->bios = kmemdup(bios, size, GFP_KERNEL);
++ if (adev->bios == NULL) {
++ pci_unmap_rom(adev->pdev, bios);
++ return false;
++ }
++ pci_unmap_rom(adev->pdev, bios);
++ return true;
++}
++
++static bool amdgpu_read_platform_bios(struct amdgpu_device *adev)
++{
++ uint8_t __iomem *bios;
++ size_t size;
++
++ adev->bios = NULL;
++
++ bios = pci_platform_rom(adev->pdev, &size);
++ if (!bios) {
++ return false;
++ }
++
++ if (size == 0 || bios[0] != 0x55 || bios[1] != 0xaa) {
++ return false;
++ }
++ adev->bios = kmemdup(bios, size, GFP_KERNEL);
++ if (adev->bios == NULL) {
++ return false;
++ }
++
++ return true;
++}
++
++#ifdef CONFIG_ACPI
++/* ATRM is used to get the BIOS on the discrete cards in
++ * dual-gpu systems.
++ */
++/* retrieve the ROM in 4k blocks */
++#define ATRM_BIOS_PAGE 4096
++/**
++ * amdgpu_atrm_call - fetch a chunk of the vbios
++ *
++ * @atrm_handle: acpi ATRM handle
++ * @bios: vbios image pointer
++ * @offset: offset of vbios image data to fetch
++ * @len: length of vbios image data to fetch
++ *
++ * Executes ATRM to fetch a chunk of the discrete
++ * vbios image on PX systems (all asics).
++ * Returns the length of the buffer fetched.
++ */
++static int amdgpu_atrm_call(acpi_handle atrm_handle, uint8_t *bios,
++ int offset, int len)
++{
++ acpi_status status;
++ union acpi_object atrm_arg_elements[2], *obj;
++ struct acpi_object_list atrm_arg;
++ struct acpi_buffer buffer = { ACPI_ALLOCATE_BUFFER, NULL};
++
++ atrm_arg.count = 2;
++ atrm_arg.pointer = &atrm_arg_elements[0];
++
++ atrm_arg_elements[0].type = ACPI_TYPE_INTEGER;
++ atrm_arg_elements[0].integer.value = offset;
++
++ atrm_arg_elements[1].type = ACPI_TYPE_INTEGER;
++ atrm_arg_elements[1].integer.value = len;
++
++ status = acpi_evaluate_object(atrm_handle, NULL, &atrm_arg, &buffer);
++ if (ACPI_FAILURE(status)) {
++ printk("failed to evaluate ATRM got %s\n", acpi_format_exception(status));
++ return -ENODEV;
++ }
++
++ obj = (union acpi_object *)buffer.pointer;
++ memcpy(bios+offset, obj->buffer.pointer, obj->buffer.length);
++ len = obj->buffer.length;
++ kfree(buffer.pointer);
++ return len;
++}
++
++static bool amdgpu_atrm_get_bios(struct amdgpu_device *adev)
++{
++ int ret;
++ int size = 256 * 1024;
++ int i;
++ struct pci_dev *pdev = NULL;
++ acpi_handle dhandle, atrm_handle;
++ acpi_status status;
++ bool found = false;
++
++ /* ATRM is for the discrete card only */
++ if (adev->flags & AMDGPU_IS_APU)
++ return false;
++
++ while ((pdev = pci_get_class(PCI_CLASS_DISPLAY_VGA << 8, pdev)) != NULL) {
++ dhandle = ACPI_HANDLE(&pdev->dev);
++ if (!dhandle)
++ continue;
++
++ status = acpi_get_handle(dhandle, "ATRM", &atrm_handle);
++ if (!ACPI_FAILURE(status)) {
++ found = true;
++ break;
++ }
++ }
++
++ if (!found) {
++ while ((pdev = pci_get_class(PCI_CLASS_DISPLAY_OTHER << 8, pdev)) != NULL) {
++ dhandle = ACPI_HANDLE(&pdev->dev);
++ if (!dhandle)
++ continue;
++
++ status = acpi_get_handle(dhandle, "ATRM", &atrm_handle);
++ if (!ACPI_FAILURE(status)) {
++ found = true;
++ break;
++ }
++ }
++ }
++
++ if (!found)
++ return false;
++
++ adev->bios = kmalloc(size, GFP_KERNEL);
++ if (!adev->bios) {
++ DRM_ERROR("Unable to allocate bios\n");
++ return false;
++ }
++
++ for (i = 0; i < size / ATRM_BIOS_PAGE; i++) {
++ ret = amdgpu_atrm_call(atrm_handle,
++ adev->bios,
++ (i * ATRM_BIOS_PAGE),
++ ATRM_BIOS_PAGE);
++ if (ret < ATRM_BIOS_PAGE)
++ break;
++ }
++
++ if (i == 0 || adev->bios[0] != 0x55 || adev->bios[1] != 0xaa) {
++ kfree(adev->bios);
++ return false;
++ }
++ return true;
++}
++#else
++static inline bool amdgpu_atrm_get_bios(struct amdgpu_device *adev)
++{
++ return false;
++}
++#endif
++
++static bool amdgpu_read_disabled_bios(struct amdgpu_device *adev)
++{
++ if (adev->flags & AMDGPU_IS_APU)
++ return igp_read_bios_from_vram(adev);
++ else
++ return amdgpu_asic_read_disabled_bios(adev);
++}
++
++#ifdef CONFIG_ACPI
++static bool amdgpu_acpi_vfct_bios(struct amdgpu_device *adev)
++{
++ bool ret = false;
++ struct acpi_table_header *hdr;
++ acpi_size tbl_size;
++ UEFI_ACPI_VFCT *vfct;
++ GOP_VBIOS_CONTENT *vbios;
++ VFCT_IMAGE_HEADER *vhdr;
++
++ if (!ACPI_SUCCESS(acpi_get_table_with_size("VFCT", 1, &hdr, &tbl_size)))
++ return false;
++ if (tbl_size < sizeof(UEFI_ACPI_VFCT)) {
++ DRM_ERROR("ACPI VFCT table present but broken (too short #1)\n");
++ goto out_unmap;
++ }
++
++ vfct = (UEFI_ACPI_VFCT *)hdr;
++ if (vfct->VBIOSImageOffset + sizeof(VFCT_IMAGE_HEADER) > tbl_size) {
++ DRM_ERROR("ACPI VFCT table present but broken (too short #2)\n");
++ goto out_unmap;
++ }
++
++ vbios = (GOP_VBIOS_CONTENT *)((char *)hdr + vfct->VBIOSImageOffset);
++ vhdr = &vbios->VbiosHeader;
++ DRM_INFO("ACPI VFCT contains a BIOS for %02x:%02x.%d %04x:%04x, size %d\n",
++ vhdr->PCIBus, vhdr->PCIDevice, vhdr->PCIFunction,
++ vhdr->VendorID, vhdr->DeviceID, vhdr->ImageLength);
++
++ if (vhdr->PCIBus != adev->pdev->bus->number ||
++ vhdr->PCIDevice != PCI_SLOT(adev->pdev->devfn) ||
++ vhdr->PCIFunction != PCI_FUNC(adev->pdev->devfn) ||
++ vhdr->VendorID != adev->pdev->vendor ||
++ vhdr->DeviceID != adev->pdev->device) {
++ DRM_INFO("ACPI VFCT table is not for this card\n");
++ goto out_unmap;
++ };
++
++ if (vfct->VBIOSImageOffset + sizeof(VFCT_IMAGE_HEADER) + vhdr->ImageLength > tbl_size) {
++ DRM_ERROR("ACPI VFCT image truncated\n");
++ goto out_unmap;
++ }
++
++ adev->bios = kmemdup(&vbios->VbiosContent, vhdr->ImageLength, GFP_KERNEL);
++ ret = !!adev->bios;
++
++out_unmap:
++ return ret;
++}
++#else
++static inline bool amdgpu_acpi_vfct_bios(struct amdgpu_device *adev)
++{
++ return false;
++}
++#endif
++
++bool amdgpu_get_bios(struct amdgpu_device *adev)
++{
++ bool r;
++ uint16_t tmp;
++
++ r = amdgpu_atrm_get_bios(adev);
++ if (r == false)
++ r = amdgpu_acpi_vfct_bios(adev);
++ if (r == false)
++ r = igp_read_bios_from_vram(adev);
++ if (r == false)
++ r = amdgpu_read_bios(adev);
++ if (r == false) {
++ r = amdgpu_read_disabled_bios(adev);
++ }
++ if (r == false) {
++ r = amdgpu_read_platform_bios(adev);
++ }
++ if (r == false || adev->bios == NULL) {
++ DRM_ERROR("Unable to locate a BIOS ROM\n");
++ adev->bios = NULL;
++ return false;
++ }
++ if (adev->bios[0] != 0x55 || adev->bios[1] != 0xaa) {
++ printk("BIOS signature incorrect %x %x\n", adev->bios[0], adev->bios[1]);
++ goto free_bios;
++ }
++
++ tmp = RBIOS16(0x18);
++ if (RBIOS8(tmp + 0x14) != 0x0) {
++ DRM_INFO("Not an x86 BIOS ROM, not using.\n");
++ goto free_bios;
++ }
++
++ adev->bios_header_start = RBIOS16(0x48);
++ if (!adev->bios_header_start) {
++ goto free_bios;
++ }
++ tmp = adev->bios_header_start + 4;
++ if (!memcmp(adev->bios + tmp, "ATOM", 4) ||
++ !memcmp(adev->bios + tmp, "MOTA", 4)) {
++ adev->is_atom_bios = true;
++ } else {
++ adev->is_atom_bios = false;
++ }
++
++ DRM_DEBUG("%sBIOS detected\n", adev->is_atom_bios ? "ATOM" : "COM");
++ return true;
++free_bios:
++ kfree(adev->bios);
++ adev->bios = NULL;
++ return false;
++}
+diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_bo_list.c b/drivers/gpu/drm/amd/amdgpu/amdgpu_bo_list.c
+new file mode 100644
+index 0000000..819fb86
+--- /dev/null
++++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_bo_list.c
+@@ -0,0 +1,268 @@
++/*
++ * Copyright 2015 Advanced Micro Devices, Inc.
++ * All Rights Reserved.
++ *
++ * Permission is hereby granted, free of charge, to any person obtaining a
++ * copy of this software and associated documentation files (the
++ * "Software"), to deal in the Software without restriction, including
++ * without limitation the rights to use, copy, modify, merge, publish,
++ * distribute, sub license, and/or sell copies of the Software, and to
++ * permit persons to whom the Software is furnished to do so, subject to
++ * the following conditions:
++ *
++ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
++ * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
++ * FITNESS FOR A PARTICULAR PURPOSE AND NON-INFRINGEMENT. IN NO EVENT SHALL
++ * THE COPYRIGHT HOLDERS, AUTHORS AND/OR ITS SUPPLIERS BE LIABLE FOR ANY CLAIM,
++ * DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR
++ * OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE
++ * USE OR OTHER DEALINGS IN THE SOFTWARE.
++ *
++ * The above copyright notice and this permission notice (including the
++ * next paragraph) shall be included in all copies or substantial portions
++ * of the Software.
++ *
++ */
++/*
++ * Authors:
++ * Christian König <deathsimple@vodafone.de>
++ */
++
++#include <drm/drmP.h>
++#include "amdgpu.h"
++
++static int amdgpu_bo_list_create(struct amdgpu_fpriv *fpriv,
++ struct amdgpu_bo_list **result,
++ int *id)
++{
++ int r;
++
++ *result = kzalloc(sizeof(struct amdgpu_bo_list), GFP_KERNEL);
++ if (!*result)
++ return -ENOMEM;
++
++ mutex_lock(&fpriv->bo_list_lock);
++ r = idr_alloc(&fpriv->bo_list_handles, *result,
++ 0, 0, GFP_KERNEL);
++ if (r < 0) {
++ mutex_unlock(&fpriv->bo_list_lock);
++ kfree(*result);
++ return r;
++ }
++ *id = r;
++
++ mutex_init(&(*result)->lock);
++ (*result)->num_entries = 0;
++ (*result)->array = NULL;
++
++ mutex_lock(&(*result)->lock);
++ mutex_unlock(&fpriv->bo_list_lock);
++
++ return 0;
++}
++
++static void amdgpu_bo_list_destroy(struct amdgpu_fpriv *fpriv, int id)
++{
++ struct amdgpu_bo_list *list;
++
++ mutex_lock(&fpriv->bo_list_lock);
++ list = idr_find(&fpriv->bo_list_handles, id);
++ if (list) {
++ mutex_lock(&list->lock);
++ idr_remove(&fpriv->bo_list_handles, id);
++ mutex_unlock(&list->lock);
++ amdgpu_bo_list_free(list);
++ }
++ mutex_unlock(&fpriv->bo_list_lock);
++}
++
++static int amdgpu_bo_list_set(struct amdgpu_device *adev,
++ struct drm_file *filp,
++ struct amdgpu_bo_list *list,
++ struct drm_amdgpu_bo_list_entry *info,
++ unsigned num_entries)
++{
++ struct amdgpu_bo_list_entry *array;
++ struct amdgpu_bo *gds_obj = adev->gds.gds_gfx_bo;
++ struct amdgpu_bo *gws_obj = adev->gds.gws_gfx_bo;
++ struct amdgpu_bo *oa_obj = adev->gds.oa_gfx_bo;
++
++ bool has_userptr = false;
++ unsigned i;
++
++ array = drm_malloc_ab(num_entries, sizeof(struct amdgpu_bo_list_entry));
++ if (!array)
++ return -ENOMEM;
++ memset(array, 0, num_entries * sizeof(struct amdgpu_bo_list_entry));
++
++ for (i = 0; i < num_entries; ++i) {
++ struct amdgpu_bo_list_entry *entry = &array[i];
++ struct drm_gem_object *gobj;
++
++ gobj = drm_gem_object_lookup(adev->ddev, filp, info[i].bo_handle);
++ if (!gobj)
++ goto error_free;
++
++ entry->robj = amdgpu_bo_ref(gem_to_amdgpu_bo(gobj));
++ drm_gem_object_unreference_unlocked(gobj);
++ entry->priority = info[i].bo_priority;
++ entry->prefered_domains = entry->robj->initial_domain;
++ entry->allowed_domains = entry->prefered_domains;
++ if (entry->allowed_domains == AMDGPU_GEM_DOMAIN_VRAM)
++ entry->allowed_domains |= AMDGPU_GEM_DOMAIN_GTT;
++ if (amdgpu_ttm_tt_has_userptr(entry->robj->tbo.ttm)) {
++ has_userptr = true;
++ entry->prefered_domains = AMDGPU_GEM_DOMAIN_GTT;
++ entry->allowed_domains = AMDGPU_GEM_DOMAIN_GTT;
++ }
++ entry->tv.bo = &entry->robj->tbo;
++ entry->tv.shared = true;
++
++ if (entry->prefered_domains == AMDGPU_GEM_DOMAIN_GDS)
++ gds_obj = entry->robj;
++ if (entry->prefered_domains == AMDGPU_GEM_DOMAIN_GWS)
++ gws_obj = entry->robj;
++ if (entry->prefered_domains == AMDGPU_GEM_DOMAIN_OA)
++ oa_obj = entry->robj;
++ }
++
++ for (i = 0; i < list->num_entries; ++i)
++ amdgpu_bo_unref(&list->array[i].robj);
++
++ drm_free_large(list->array);
++
++ list->gds_obj = gds_obj;
++ list->gws_obj = gws_obj;
++ list->oa_obj = oa_obj;
++ list->has_userptr = has_userptr;
++ list->array = array;
++ list->num_entries = num_entries;
++
++ return 0;
++
++error_free:
++ drm_free_large(array);
++ return -ENOENT;
++}
++
++struct amdgpu_bo_list *
++amdgpu_bo_list_get(struct amdgpu_fpriv *fpriv, int id)
++{
++ struct amdgpu_bo_list *result;
++
++ mutex_lock(&fpriv->bo_list_lock);
++ result = idr_find(&fpriv->bo_list_handles, id);
++ if (result)
++ mutex_lock(&result->lock);
++ mutex_unlock(&fpriv->bo_list_lock);
++ return result;
++}
++
++void amdgpu_bo_list_put(struct amdgpu_bo_list *list)
++{
++ mutex_unlock(&list->lock);
++}
++
++void amdgpu_bo_list_free(struct amdgpu_bo_list *list)
++{
++ unsigned i;
++
++ for (i = 0; i < list->num_entries; ++i)
++ amdgpu_bo_unref(&list->array[i].robj);
++
++ mutex_destroy(&list->lock);
++ drm_free_large(list->array);
++ kfree(list);
++}
++
++int amdgpu_bo_list_ioctl(struct drm_device *dev, void *data,
++ struct drm_file *filp)
++{
++ const uint32_t info_size = sizeof(struct drm_amdgpu_bo_list_entry);
++
++ struct amdgpu_device *adev = dev->dev_private;
++ struct amdgpu_fpriv *fpriv = filp->driver_priv;
++ union drm_amdgpu_bo_list *args = data;
++ uint32_t handle = args->in.list_handle;
++ const void __user *uptr = (const void*)(long)args->in.bo_info_ptr;
++
++ struct drm_amdgpu_bo_list_entry *info;
++ struct amdgpu_bo_list *list;
++
++ int r;
++
++ info = drm_malloc_ab(args->in.bo_number,
++ sizeof(struct drm_amdgpu_bo_list_entry));
++ if (!info)
++ return -ENOMEM;
++
++ /* copy the handle array from userspace to a kernel buffer */
++ r = -EFAULT;
++ if (likely(info_size == args->in.bo_info_size)) {
++ unsigned long bytes = args->in.bo_number *
++ args->in.bo_info_size;
++
++ if (copy_from_user(info, uptr, bytes))
++ goto error_free;
++
++ } else {
++ unsigned long bytes = min(args->in.bo_info_size, info_size);
++ unsigned i;
++
++ memset(info, 0, args->in.bo_number * info_size);
++ for (i = 0; i < args->in.bo_number; ++i) {
++ if (copy_from_user(&info[i], uptr, bytes))
++ goto error_free;
++
++ uptr += args->in.bo_info_size;
++ }
++ }
++
++ switch (args->in.operation) {
++ case AMDGPU_BO_LIST_OP_CREATE:
++ r = amdgpu_bo_list_create(fpriv, &list, &handle);
++ if (r)
++ goto error_free;
++
++ r = amdgpu_bo_list_set(adev, filp, list, info,
++ args->in.bo_number);
++ amdgpu_bo_list_put(list);
++ if (r)
++ goto error_free;
++
++ break;
++
++ case AMDGPU_BO_LIST_OP_DESTROY:
++ amdgpu_bo_list_destroy(fpriv, handle);
++ handle = 0;
++ break;
++
++ case AMDGPU_BO_LIST_OP_UPDATE:
++ r = -ENOENT;
++ list = amdgpu_bo_list_get(fpriv, handle);
++ if (!list)
++ goto error_free;
++
++ r = amdgpu_bo_list_set(adev, filp, list, info,
++ args->in.bo_number);
++ amdgpu_bo_list_put(list);
++ if (r)
++ goto error_free;
++
++ break;
++
++ default:
++ r = -EINVAL;
++ goto error_free;
++ }
++
++ memset(args, 0, sizeof(*args));
++ args->out.list_handle = handle;
++ drm_free_large(info);
++
++ return 0;
++
++error_free:
++ drm_free_large(info);
++ return r;
++}
+diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_connectors.c b/drivers/gpu/drm/amd/amdgpu/amdgpu_connectors.c
+new file mode 100644
+index 0000000..6a8d28f
+--- /dev/null
++++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_connectors.c
+@@ -0,0 +1,1907 @@
++/*
++ * Copyright 2007-8 Advanced Micro Devices, Inc.
++ * Copyright 2008 Red Hat Inc.
++ *
++ * Permission is hereby granted, free of charge, to any person obtaining a
++ * copy of this software and associated documentation files (the "Software"),
++ * to deal in the Software without restriction, including without limitation
++ * the rights to use, copy, modify, merge, publish, distribute, sublicense,
++ * and/or sell copies of the Software, and to permit persons to whom the
++ * Software is furnished to do so, subject to the following conditions:
++ *
++ * The above copyright notice and this permission notice shall be included in
++ * all copies or substantial portions of the Software.
++ *
++ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
++ * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
++ * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
++ * THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR
++ * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE,
++ * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
++ * OTHER DEALINGS IN THE SOFTWARE.
++ *
++ * Authors: Dave Airlie
++ * Alex Deucher
++ */
++#include <drm/drmP.h>
++#include <drm/drm_edid.h>
++#include <drm/drm_crtc_helper.h>
++#include <drm/drm_fb_helper.h>
++#include <drm/amdgpu_drm.h>
++#include "amdgpu.h"
++#include "atom.h"
++#include "atombios_encoders.h"
++#include "atombios_dp.h"
++#include "amdgpu_connectors.h"
++#include "amdgpu_i2c.h"
++
++#include <linux/pm_runtime.h>
++
++void amdgpu_connector_hotplug(struct drm_connector *connector)
++{
++ struct drm_device *dev = connector->dev;
++ struct amdgpu_device *adev = dev->dev_private;
++ struct amdgpu_connector *amdgpu_connector = to_amdgpu_connector(connector);
++
++ /* bail if the connector does not have hpd pin, e.g.,
++ * VGA, TV, etc.
++ */
++ if (amdgpu_connector->hpd.hpd == AMDGPU_HPD_NONE)
++ return;
++
++ amdgpu_display_hpd_set_polarity(adev, amdgpu_connector->hpd.hpd);
++
++ /* if the connector is already off, don't turn it back on */
++ if (connector->dpms != DRM_MODE_DPMS_ON)
++ return;
++
++ /* just deal with DP (not eDP) here. */
++ if (connector->connector_type == DRM_MODE_CONNECTOR_DisplayPort) {
++ struct amdgpu_connector_atom_dig *dig_connector =
++ amdgpu_connector->con_priv;
++
++ /* if existing sink type was not DP no need to retrain */
++ if (dig_connector->dp_sink_type != CONNECTOR_OBJECT_ID_DISPLAYPORT)
++ return;
++
++ /* first get sink type as it may be reset after (un)plug */
++ dig_connector->dp_sink_type = amdgpu_atombios_dp_get_sinktype(amdgpu_connector);
++ /* don't do anything if sink is not display port, i.e.,
++ * passive dp->(dvi|hdmi) adaptor
++ */
++ if (dig_connector->dp_sink_type == CONNECTOR_OBJECT_ID_DISPLAYPORT) {
++ int saved_dpms = connector->dpms;
++ /* Only turn off the display if it's physically disconnected */
++ if (!amdgpu_display_hpd_sense(adev, amdgpu_connector->hpd.hpd)) {
++ drm_helper_connector_dpms(connector, DRM_MODE_DPMS_OFF);
++ } else if (amdgpu_atombios_dp_needs_link_train(amdgpu_connector)) {
++ /* set it to OFF so that drm_helper_connector_dpms()
++ * won't return immediately since the current state
++ * is ON at this point.
++ */
++ connector->dpms = DRM_MODE_DPMS_OFF;
++ drm_helper_connector_dpms(connector, DRM_MODE_DPMS_ON);
++ }
++ connector->dpms = saved_dpms;
++ }
++ }
++}
++
++static void amdgpu_connector_property_change_mode(struct drm_encoder *encoder)
++{
++ struct drm_crtc *crtc = encoder->crtc;
++
++ if (crtc && crtc->enabled) {
++ drm_crtc_helper_set_mode(crtc, &crtc->mode,
++ crtc->x, crtc->y, crtc->primary->fb);
++ }
++}
++
++int amdgpu_connector_get_monitor_bpc(struct drm_connector *connector)
++{
++ struct amdgpu_connector *amdgpu_connector = to_amdgpu_connector(connector);
++ struct amdgpu_connector_atom_dig *dig_connector;
++ int bpc = 8;
++ unsigned mode_clock, max_tmds_clock;
++
++ switch (connector->connector_type) {
++ case DRM_MODE_CONNECTOR_DVII:
++ case DRM_MODE_CONNECTOR_HDMIB:
++ if (amdgpu_connector->use_digital) {
++ if (drm_detect_hdmi_monitor(amdgpu_connector_edid(connector))) {
++ if (connector->display_info.bpc)
++ bpc = connector->display_info.bpc;
++ }
++ }
++ break;
++ case DRM_MODE_CONNECTOR_DVID:
++ case DRM_MODE_CONNECTOR_HDMIA:
++ if (drm_detect_hdmi_monitor(amdgpu_connector_edid(connector))) {
++ if (connector->display_info.bpc)
++ bpc = connector->display_info.bpc;
++ }
++ break;
++ case DRM_MODE_CONNECTOR_DisplayPort:
++ dig_connector = amdgpu_connector->con_priv;
++ if ((dig_connector->dp_sink_type == CONNECTOR_OBJECT_ID_DISPLAYPORT) ||
++ (dig_connector->dp_sink_type == CONNECTOR_OBJECT_ID_eDP) ||
++ drm_detect_hdmi_monitor(amdgpu_connector_edid(connector))) {
++ if (connector->display_info.bpc)
++ bpc = connector->display_info.bpc;
++ }
++ break;
++ case DRM_MODE_CONNECTOR_eDP:
++ case DRM_MODE_CONNECTOR_LVDS:
++ if (connector->display_info.bpc)
++ bpc = connector->display_info.bpc;
++ else {
++ struct drm_connector_helper_funcs *connector_funcs =
++ connector->helper_private;
++ struct drm_encoder *encoder = connector_funcs->best_encoder(connector);
++ struct amdgpu_encoder *amdgpu_encoder = to_amdgpu_encoder(encoder);
++ struct amdgpu_encoder_atom_dig *dig = amdgpu_encoder->enc_priv;
++
++ if (dig->lcd_misc & ATOM_PANEL_MISC_V13_6BIT_PER_COLOR)
++ bpc = 6;
++ else if (dig->lcd_misc & ATOM_PANEL_MISC_V13_8BIT_PER_COLOR)
++ bpc = 8;
++ }
++ break;
++ }
++
++ if (drm_detect_hdmi_monitor(amdgpu_connector_edid(connector))) {
++ /*
++ * Pre DCE-8 hw can't handle > 12 bpc, and more than 12 bpc doesn't make
++ * much sense without support for > 12 bpc framebuffers. RGB 4:4:4 at
++ * 12 bpc is always supported on hdmi deep color sinks, as this is
++ * required by the HDMI-1.3 spec. Clamp to a safe 12 bpc maximum.
++ */
++ if (bpc > 12) {
++ DRM_DEBUG("%s: HDMI deep color %d bpc unsupported. Using 12 bpc.\n",
++ connector->name, bpc);
++ bpc = 12;
++ }
++
++ /* Any defined maximum tmds clock limit we must not exceed? */
++ if (connector->max_tmds_clock > 0) {
++ /* mode_clock is clock in kHz for mode to be modeset on this connector */
++ mode_clock = amdgpu_connector->pixelclock_for_modeset;
++
++ /* Maximum allowable input clock in kHz */
++ max_tmds_clock = connector->max_tmds_clock * 1000;
++
++ DRM_DEBUG("%s: hdmi mode dotclock %d kHz, max tmds input clock %d kHz.\n",
++ connector->name, mode_clock, max_tmds_clock);
++
++ /* Check if bpc is within clock limit. Try to degrade gracefully otherwise */
++ if ((bpc == 12) && (mode_clock * 3/2 > max_tmds_clock)) {
++ if ((connector->display_info.edid_hdmi_dc_modes & DRM_EDID_HDMI_DC_30) &&
++ (mode_clock * 5/4 <= max_tmds_clock))
++ bpc = 10;
++ else
++ bpc = 8;
++
++ DRM_DEBUG("%s: HDMI deep color 12 bpc exceeds max tmds clock. Using %d bpc.\n",
++ connector->name, bpc);
++ }
++
++ if ((bpc == 10) && (mode_clock * 5/4 > max_tmds_clock)) {
++ bpc = 8;
++ DRM_DEBUG("%s: HDMI deep color 10 bpc exceeds max tmds clock. Using %d bpc.\n",
++ connector->name, bpc);
++ } else if (bpc > 8) {
++ /* max_tmds_clock missing, but hdmi spec mandates it for deep color. */
++ DRM_DEBUG("%s: Required max tmds clock for HDMI deep color missing. Using 8 bpc.\n",
++ connector->name);
++ bpc = 8;
++ }
++ }
++ }
++
++ if ((amdgpu_deep_color == 0) && (bpc > 8)) {
++ DRM_DEBUG("%s: Deep color disabled. Set amdgpu module param deep_color=1 to enable.\n",
++ connector->name);
++ bpc = 8;
++ }
++
++ DRM_DEBUG("%s: Display bpc=%d, returned bpc=%d\n",
++ connector->name, connector->display_info.bpc, bpc);
++
++ return bpc;
++}
++
++static void
++amdgpu_connector_update_scratch_regs(struct drm_connector *connector,
++ enum drm_connector_status status)
++{
++ struct drm_encoder *best_encoder = NULL;
++ struct drm_encoder *encoder = NULL;
++ struct drm_connector_helper_funcs *connector_funcs = connector->helper_private;
++ bool connected;
++ int i;
++
++ best_encoder = connector_funcs->best_encoder(connector);
++
++ for (i = 0; i < DRM_CONNECTOR_MAX_ENCODER; i++) {
++ if (connector->encoder_ids[i] == 0)
++ break;
++
++ encoder = drm_encoder_find(connector->dev,
++ connector->encoder_ids[i]);
++ if (!encoder)
++ continue;
++
++ if ((encoder == best_encoder) && (status == connector_status_connected))
++ connected = true;
++ else
++ connected = false;
++
++ amdgpu_atombios_encoder_set_bios_scratch_regs(connector, encoder, connected);
++
++ }
++}
++
++static struct drm_encoder *
++amdgpu_connector_find_encoder(struct drm_connector *connector,
++ int encoder_type)
++{
++ struct drm_encoder *encoder;
++ int i;
++
++ for (i = 0; i < DRM_CONNECTOR_MAX_ENCODER; i++) {
++ if (connector->encoder_ids[i] == 0)
++ break;
++ encoder = drm_encoder_find(connector->dev,
++ connector->encoder_ids[i]);
++ if (!encoder)
++ continue;
++
++ if (encoder->encoder_type == encoder_type)
++ return encoder;
++ }
++ return NULL;
++}
++
++struct edid *amdgpu_connector_edid(struct drm_connector *connector)
++{
++ struct amdgpu_connector *amdgpu_connector = to_amdgpu_connector(connector);
++ struct drm_property_blob *edid_blob = connector->edid_blob_ptr;
++
++ if (amdgpu_connector->edid) {
++ return amdgpu_connector->edid;
++ } else if (edid_blob) {
++ struct edid *edid = kmemdup(edid_blob->data, edid_blob->length, GFP_KERNEL);
++ if (edid)
++ amdgpu_connector->edid = edid;
++ }
++ return amdgpu_connector->edid;
++}
++
++static struct edid *
++amdgpu_connector_get_hardcoded_edid(struct amdgpu_device *adev)
++{
++ struct edid *edid;
++
++ if (adev->mode_info.bios_hardcoded_edid) {
++ edid = kmalloc(adev->mode_info.bios_hardcoded_edid_size, GFP_KERNEL);
++ if (edid) {
++ memcpy((unsigned char *)edid,
++ (unsigned char *)adev->mode_info.bios_hardcoded_edid,
++ adev->mode_info.bios_hardcoded_edid_size);
++ return edid;
++ }
++ }
++ return NULL;
++}
++
++static void amdgpu_connector_get_edid(struct drm_connector *connector)
++{
++ struct drm_device *dev = connector->dev;
++ struct amdgpu_device *adev = dev->dev_private;
++ struct amdgpu_connector *amdgpu_connector = to_amdgpu_connector(connector);
++
++ if (amdgpu_connector->edid)
++ return;
++
++ /* on hw with routers, select right port */
++ if (amdgpu_connector->router.ddc_valid)
++ amdgpu_i2c_router_select_ddc_port(amdgpu_connector);
++
++ if ((amdgpu_connector_encoder_get_dp_bridge_encoder_id(connector) !=
++ ENCODER_OBJECT_ID_NONE) &&
++ amdgpu_connector->ddc_bus->has_aux) {
++ amdgpu_connector->edid = drm_get_edid(connector,
++ &amdgpu_connector->ddc_bus->aux.ddc);
++ } else if ((connector->connector_type == DRM_MODE_CONNECTOR_DisplayPort) ||
++ (connector->connector_type == DRM_MODE_CONNECTOR_eDP)) {
++ struct amdgpu_connector_atom_dig *dig = amdgpu_connector->con_priv;
++
++ if ((dig->dp_sink_type == CONNECTOR_OBJECT_ID_DISPLAYPORT ||
++ dig->dp_sink_type == CONNECTOR_OBJECT_ID_eDP) &&
++ amdgpu_connector->ddc_bus->has_aux)
++ amdgpu_connector->edid = drm_get_edid(connector,
++ &amdgpu_connector->ddc_bus->aux.ddc);
++ else if (amdgpu_connector->ddc_bus)
++ amdgpu_connector->edid = drm_get_edid(connector,
++ &amdgpu_connector->ddc_bus->adapter);
++ } else if (amdgpu_connector->ddc_bus) {
++ amdgpu_connector->edid = drm_get_edid(connector,
++ &amdgpu_connector->ddc_bus->adapter);
++ }
++
++ if (!amdgpu_connector->edid) {
++ /* some laptops provide a hardcoded edid in rom for LCDs */
++ if (((connector->connector_type == DRM_MODE_CONNECTOR_LVDS) ||
++ (connector->connector_type == DRM_MODE_CONNECTOR_eDP)))
++ amdgpu_connector->edid = amdgpu_connector_get_hardcoded_edid(adev);
++ }
++}
++
++static void amdgpu_connector_free_edid(struct drm_connector *connector)
++{
++ struct amdgpu_connector *amdgpu_connector = to_amdgpu_connector(connector);
++
++ if (amdgpu_connector->edid) {
++ kfree(amdgpu_connector->edid);
++ amdgpu_connector->edid = NULL;
++ }
++}
++
++static int amdgpu_connector_ddc_get_modes(struct drm_connector *connector)
++{
++ struct amdgpu_connector *amdgpu_connector = to_amdgpu_connector(connector);
++ int ret;
++
++ if (amdgpu_connector->edid) {
++ drm_mode_connector_update_edid_property(connector, amdgpu_connector->edid);
++ ret = drm_add_edid_modes(connector, amdgpu_connector->edid);
++ drm_edid_to_eld(connector, amdgpu_connector->edid);
++ return ret;
++ }
++ drm_mode_connector_update_edid_property(connector, NULL);
++ return 0;
++}
++
++static struct drm_encoder *
++amdgpu_connector_best_single_encoder(struct drm_connector *connector)
++{
++ int enc_id = connector->encoder_ids[0];
++
++ /* pick the encoder ids */
++ if (enc_id)
++ return drm_encoder_find(connector->dev, enc_id);
++ return NULL;
++}
++
++static void amdgpu_get_native_mode(struct drm_connector *connector)
++{
++ struct drm_encoder *encoder = amdgpu_connector_best_single_encoder(connector);
++ struct amdgpu_encoder *amdgpu_encoder;
++
++ if (encoder == NULL)
++ return;
++
++ amdgpu_encoder = to_amdgpu_encoder(encoder);
++
++ if (!list_empty(&connector->probed_modes)) {
++ struct drm_display_mode *preferred_mode =
++ list_first_entry(&connector->probed_modes,
++ struct drm_display_mode, head);
++
++ amdgpu_encoder->native_mode = *preferred_mode;
++ } else {
++ amdgpu_encoder->native_mode.clock = 0;
++ }
++}
++
++static struct drm_display_mode *
++amdgpu_connector_lcd_native_mode(struct drm_encoder *encoder)
++{
++ struct drm_device *dev = encoder->dev;
++ struct amdgpu_encoder *amdgpu_encoder = to_amdgpu_encoder(encoder);
++ struct drm_display_mode *mode = NULL;
++ struct drm_display_mode *native_mode = &amdgpu_encoder->native_mode;
++
++ if (native_mode->hdisplay != 0 &&
++ native_mode->vdisplay != 0 &&
++ native_mode->clock != 0) {
++ mode = drm_mode_duplicate(dev, native_mode);
++ mode->type = DRM_MODE_TYPE_PREFERRED | DRM_MODE_TYPE_DRIVER;
++ drm_mode_set_name(mode);
++
++ DRM_DEBUG_KMS("Adding native panel mode %s\n", mode->name);
++ } else if (native_mode->hdisplay != 0 &&
++ native_mode->vdisplay != 0) {
++ /* mac laptops without an edid */
++ /* Note that this is not necessarily the exact panel mode,
++ * but an approximation based on the cvt formula. For these
++ * systems we should ideally read the mode info out of the
++ * registers or add a mode table, but this works and is much
++ * simpler.
++ */
++ mode = drm_cvt_mode(dev, native_mode->hdisplay, native_mode->vdisplay, 60, true, false, false);
++ mode->type = DRM_MODE_TYPE_PREFERRED | DRM_MODE_TYPE_DRIVER;
++ DRM_DEBUG_KMS("Adding cvt approximation of native panel mode %s\n", mode->name);
++ }
++ return mode;
++}
++
++static void amdgpu_connector_add_common_modes(struct drm_encoder *encoder,
++ struct drm_connector *connector)
++{
++ struct drm_device *dev = encoder->dev;
++ struct amdgpu_encoder *amdgpu_encoder = to_amdgpu_encoder(encoder);
++ struct drm_display_mode *mode = NULL;
++ struct drm_display_mode *native_mode = &amdgpu_encoder->native_mode;
++ int i;
++ struct mode_size {
++ int w;
++ int h;
++ } common_modes[17] = {
++ { 640, 480},
++ { 720, 480},
++ { 800, 600},
++ { 848, 480},
++ {1024, 768},
++ {1152, 768},
++ {1280, 720},
++ {1280, 800},
++ {1280, 854},
++ {1280, 960},
++ {1280, 1024},
++ {1440, 900},
++ {1400, 1050},
++ {1680, 1050},
++ {1600, 1200},
++ {1920, 1080},
++ {1920, 1200}
++ };
++
++ for (i = 0; i < 17; i++) {
++ if (amdgpu_encoder->devices & (ATOM_DEVICE_TV_SUPPORT)) {
++ if (common_modes[i].w > 1024 ||
++ common_modes[i].h > 768)
++ continue;
++ }
++ if (amdgpu_encoder->devices & (ATOM_DEVICE_LCD_SUPPORT)) {
++ if (common_modes[i].w > native_mode->hdisplay ||
++ common_modes[i].h > native_mode->vdisplay ||
++ (common_modes[i].w == native_mode->hdisplay &&
++ common_modes[i].h == native_mode->vdisplay))
++ continue;
++ }
++ if (common_modes[i].w < 320 || common_modes[i].h < 200)
++ continue;
++
++ mode = drm_cvt_mode(dev, common_modes[i].w, common_modes[i].h, 60, false, false, false);
++ drm_mode_probed_add(connector, mode);
++ }
++}
++
++static int amdgpu_connector_set_property(struct drm_connector *connector,
++ struct drm_property *property,
++ uint64_t val)
++{
++ struct drm_device *dev = connector->dev;
++ struct amdgpu_device *adev = dev->dev_private;
++ struct drm_encoder *encoder;
++ struct amdgpu_encoder *amdgpu_encoder;
++
++ if (property == adev->mode_info.coherent_mode_property) {
++ struct amdgpu_encoder_atom_dig *dig;
++ bool new_coherent_mode;
++
++ /* need to find digital encoder on connector */
++ encoder = amdgpu_connector_find_encoder(connector, DRM_MODE_ENCODER_TMDS);
++ if (!encoder)
++ return 0;
++
++ amdgpu_encoder = to_amdgpu_encoder(encoder);
++
++ if (!amdgpu_encoder->enc_priv)
++ return 0;
++
++ dig = amdgpu_encoder->enc_priv;
++ new_coherent_mode = val ? true : false;
++ if (dig->coherent_mode != new_coherent_mode) {
++ dig->coherent_mode = new_coherent_mode;
++ amdgpu_connector_property_change_mode(&amdgpu_encoder->base);
++ }
++ }
++
++ if (property == adev->mode_info.audio_property) {
++ struct amdgpu_connector *amdgpu_connector = to_amdgpu_connector(connector);
++ /* need to find digital encoder on connector */
++ encoder = amdgpu_connector_find_encoder(connector, DRM_MODE_ENCODER_TMDS);
++ if (!encoder)
++ return 0;
++
++ amdgpu_encoder = to_amdgpu_encoder(encoder);
++
++ if (amdgpu_connector->audio != val) {
++ amdgpu_connector->audio = val;
++ amdgpu_connector_property_change_mode(&amdgpu_encoder->base);
++ }
++ }
++
++ if (property == adev->mode_info.dither_property) {
++ struct amdgpu_connector *amdgpu_connector = to_amdgpu_connector(connector);
++ /* need to find digital encoder on connector */
++ encoder = amdgpu_connector_find_encoder(connector, DRM_MODE_ENCODER_TMDS);
++ if (!encoder)
++ return 0;
++
++ amdgpu_encoder = to_amdgpu_encoder(encoder);
++
++ if (amdgpu_connector->dither != val) {
++ amdgpu_connector->dither = val;
++ amdgpu_connector_property_change_mode(&amdgpu_encoder->base);
++ }
++ }
++
++ if (property == adev->mode_info.underscan_property) {
++ /* need to find digital encoder on connector */
++ encoder = amdgpu_connector_find_encoder(connector, DRM_MODE_ENCODER_TMDS);
++ if (!encoder)
++ return 0;
++
++ amdgpu_encoder = to_amdgpu_encoder(encoder);
++
++ if (amdgpu_encoder->underscan_type != val) {
++ amdgpu_encoder->underscan_type = val;
++ amdgpu_connector_property_change_mode(&amdgpu_encoder->base);
++ }
++ }
++
++ if (property == adev->mode_info.underscan_hborder_property) {
++ /* need to find digital encoder on connector */
++ encoder = amdgpu_connector_find_encoder(connector, DRM_MODE_ENCODER_TMDS);
++ if (!encoder)
++ return 0;
++
++ amdgpu_encoder = to_amdgpu_encoder(encoder);
++
++ if (amdgpu_encoder->underscan_hborder != val) {
++ amdgpu_encoder->underscan_hborder = val;
++ amdgpu_connector_property_change_mode(&amdgpu_encoder->base);
++ }
++ }
++
++ if (property == adev->mode_info.underscan_vborder_property) {
++ /* need to find digital encoder on connector */
++ encoder = amdgpu_connector_find_encoder(connector, DRM_MODE_ENCODER_TMDS);
++ if (!encoder)
++ return 0;
++
++ amdgpu_encoder = to_amdgpu_encoder(encoder);
++
++ if (amdgpu_encoder->underscan_vborder != val) {
++ amdgpu_encoder->underscan_vborder = val;
++ amdgpu_connector_property_change_mode(&amdgpu_encoder->base);
++ }
++ }
++
++ if (property == adev->mode_info.load_detect_property) {
++ struct amdgpu_connector *amdgpu_connector =
++ to_amdgpu_connector(connector);
++
++ if (val == 0)
++ amdgpu_connector->dac_load_detect = false;
++ else
++ amdgpu_connector->dac_load_detect = true;
++ }
++
++ if (property == dev->mode_config.scaling_mode_property) {
++ enum amdgpu_rmx_type rmx_type;
++
++ if (connector->encoder) {
++ amdgpu_encoder = to_amdgpu_encoder(connector->encoder);
++ } else {
++ struct drm_connector_helper_funcs *connector_funcs = connector->helper_private;
++ amdgpu_encoder = to_amdgpu_encoder(connector_funcs->best_encoder(connector));
++ }
++
++ switch (val) {
++ default:
++ case DRM_MODE_SCALE_NONE: rmx_type = RMX_OFF; break;
++ case DRM_MODE_SCALE_CENTER: rmx_type = RMX_CENTER; break;
++ case DRM_MODE_SCALE_ASPECT: rmx_type = RMX_ASPECT; break;
++ case DRM_MODE_SCALE_FULLSCREEN: rmx_type = RMX_FULL; break;
++ }
++ if (amdgpu_encoder->rmx_type == rmx_type)
++ return 0;
++
++ if ((rmx_type != DRM_MODE_SCALE_NONE) &&
++ (amdgpu_encoder->native_mode.clock == 0))
++ return 0;
++
++ amdgpu_encoder->rmx_type = rmx_type;
++
++ amdgpu_connector_property_change_mode(&amdgpu_encoder->base);
++ }
++
++ return 0;
++}
++
++static void
++amdgpu_connector_fixup_lcd_native_mode(struct drm_encoder *encoder,
++ struct drm_connector *connector)
++{
++ struct amdgpu_encoder *amdgpu_encoder = to_amdgpu_encoder(encoder);
++ struct drm_display_mode *native_mode = &amdgpu_encoder->native_mode;
++ struct drm_display_mode *t, *mode;
++
++ /* If the EDID preferred mode doesn't match the native mode, use it */
++ list_for_each_entry_safe(mode, t, &connector->probed_modes, head) {
++ if (mode->type & DRM_MODE_TYPE_PREFERRED) {
++ if (mode->hdisplay != native_mode->hdisplay ||
++ mode->vdisplay != native_mode->vdisplay)
++ memcpy(native_mode, mode, sizeof(*mode));
++ }
++ }
++
++ /* Try to get native mode details from EDID if necessary */
++ if (!native_mode->clock) {
++ list_for_each_entry_safe(mode, t, &connector->probed_modes, head) {
++ if (mode->hdisplay == native_mode->hdisplay &&
++ mode->vdisplay == native_mode->vdisplay) {
++ *native_mode = *mode;
++ drm_mode_set_crtcinfo(native_mode, CRTC_INTERLACE_HALVE_V);
++ DRM_DEBUG_KMS("Determined LVDS native mode details from EDID\n");
++ break;
++ }
++ }
++ }
++
++ if (!native_mode->clock) {
++ DRM_DEBUG_KMS("No LVDS native mode details, disabling RMX\n");
++ amdgpu_encoder->rmx_type = RMX_OFF;
++ }
++}
++
++static int amdgpu_connector_lvds_get_modes(struct drm_connector *connector)
++{
++ struct drm_encoder *encoder;
++ int ret = 0;
++ struct drm_display_mode *mode;
++
++ amdgpu_connector_get_edid(connector);
++ ret = amdgpu_connector_ddc_get_modes(connector);
++ if (ret > 0) {
++ encoder = amdgpu_connector_best_single_encoder(connector);
++ if (encoder) {
++ amdgpu_connector_fixup_lcd_native_mode(encoder, connector);
++ /* add scaled modes */
++ amdgpu_connector_add_common_modes(encoder, connector);
++ }
++ return ret;
++ }
++
++ encoder = amdgpu_connector_best_single_encoder(connector);
++ if (!encoder)
++ return 0;
++
++ /* we have no EDID modes */
++ mode = amdgpu_connector_lcd_native_mode(encoder);
++ if (mode) {
++ ret = 1;
++ drm_mode_probed_add(connector, mode);
++ /* add the width/height from vbios tables if available */
++ connector->display_info.width_mm = mode->width_mm;
++ connector->display_info.height_mm = mode->height_mm;
++ /* add scaled modes */
++ amdgpu_connector_add_common_modes(encoder, connector);
++ }
++
++ return ret;
++}
++
++static int amdgpu_connector_lvds_mode_valid(struct drm_connector *connector,
++ struct drm_display_mode *mode)
++{
++ struct drm_encoder *encoder = amdgpu_connector_best_single_encoder(connector);
++
++ if ((mode->hdisplay < 320) || (mode->vdisplay < 240))
++ return MODE_PANEL;
++
++ if (encoder) {
++ struct amdgpu_encoder *amdgpu_encoder = to_amdgpu_encoder(encoder);
++ struct drm_display_mode *native_mode = &amdgpu_encoder->native_mode;
++
++ /* AVIVO hardware supports downscaling modes larger than the panel
++ * to the panel size, but I'm not sure this is desirable.
++ */
++ if ((mode->hdisplay > native_mode->hdisplay) ||
++ (mode->vdisplay > native_mode->vdisplay))
++ return MODE_PANEL;
++
++ /* if scaling is disabled, block non-native modes */
++ if (amdgpu_encoder->rmx_type == RMX_OFF) {
++ if ((mode->hdisplay != native_mode->hdisplay) ||
++ (mode->vdisplay != native_mode->vdisplay))
++ return MODE_PANEL;
++ }
++ }
++
++ return MODE_OK;
++}
++
++static enum drm_connector_status
++amdgpu_connector_lvds_detect(struct drm_connector *connector, bool force)
++{
++ struct amdgpu_connector *amdgpu_connector = to_amdgpu_connector(connector);
++ struct drm_encoder *encoder = amdgpu_connector_best_single_encoder(connector);
++ enum drm_connector_status ret = connector_status_disconnected;
++ int r;
++
++ r = pm_runtime_get_sync(connector->dev->dev);
++ if (r < 0)
++ return connector_status_disconnected;
++
++ if (encoder) {
++ struct amdgpu_encoder *amdgpu_encoder = to_amdgpu_encoder(encoder);
++ struct drm_display_mode *native_mode = &amdgpu_encoder->native_mode;
++
++ /* check if panel is valid */
++ if (native_mode->hdisplay >= 320 && native_mode->vdisplay >= 240)
++ ret = connector_status_connected;
++
++ }
++
++ /* check for edid as well */
++ amdgpu_connector_get_edid(connector);
++ if (amdgpu_connector->edid)
++ ret = connector_status_connected;
++ /* check acpi lid status ??? */
++
++ amdgpu_connector_update_scratch_regs(connector, ret);
++ pm_runtime_mark_last_busy(connector->dev->dev);
++ pm_runtime_put_autosuspend(connector->dev->dev);
++ return ret;
++}
++
++static void amdgpu_connector_destroy(struct drm_connector *connector)
++{
++ struct amdgpu_connector *amdgpu_connector = to_amdgpu_connector(connector);
++
++ if (amdgpu_connector->ddc_bus->has_aux)
++ drm_dp_aux_unregister(&amdgpu_connector->ddc_bus->aux);
++ amdgpu_connector_free_edid(connector);
++ kfree(amdgpu_connector->con_priv);
++ drm_connector_unregister(connector);
++ drm_connector_cleanup(connector);
++ kfree(connector);
++}
++
++static int amdgpu_connector_set_lcd_property(struct drm_connector *connector,
++ struct drm_property *property,
++ uint64_t value)
++{
++ struct drm_device *dev = connector->dev;
++ struct amdgpu_encoder *amdgpu_encoder;
++ enum amdgpu_rmx_type rmx_type;
++
++ DRM_DEBUG_KMS("\n");
++ if (property != dev->mode_config.scaling_mode_property)
++ return 0;
++
++ if (connector->encoder)
++ amdgpu_encoder = to_amdgpu_encoder(connector->encoder);
++ else {
++ struct drm_connector_helper_funcs *connector_funcs = connector->helper_private;
++ amdgpu_encoder = to_amdgpu_encoder(connector_funcs->best_encoder(connector));
++ }
++
++ switch (value) {
++ case DRM_MODE_SCALE_NONE: rmx_type = RMX_OFF; break;
++ case DRM_MODE_SCALE_CENTER: rmx_type = RMX_CENTER; break;
++ case DRM_MODE_SCALE_ASPECT: rmx_type = RMX_ASPECT; break;
++ default:
++ case DRM_MODE_SCALE_FULLSCREEN: rmx_type = RMX_FULL; break;
++ }
++ if (amdgpu_encoder->rmx_type == rmx_type)
++ return 0;
++
++ amdgpu_encoder->rmx_type = rmx_type;
++
++ amdgpu_connector_property_change_mode(&amdgpu_encoder->base);
++ return 0;
++}
++
++
++static const struct drm_connector_helper_funcs amdgpu_connector_lvds_helper_funcs = {
++ .get_modes = amdgpu_connector_lvds_get_modes,
++ .mode_valid = amdgpu_connector_lvds_mode_valid,
++ .best_encoder = amdgpu_connector_best_single_encoder,
++};
++
++static const struct drm_connector_funcs amdgpu_connector_lvds_funcs = {
++ .dpms = drm_helper_connector_dpms,
++ .detect = amdgpu_connector_lvds_detect,
++ .fill_modes = drm_helper_probe_single_connector_modes,
++ .destroy = amdgpu_connector_destroy,
++ .set_property = amdgpu_connector_set_lcd_property,
++};
++
++static int amdgpu_connector_vga_get_modes(struct drm_connector *connector)
++{
++ int ret;
++
++ amdgpu_connector_get_edid(connector);
++ ret = amdgpu_connector_ddc_get_modes(connector);
++
++ return ret;
++}
++
++static int amdgpu_connector_vga_mode_valid(struct drm_connector *connector,
++ struct drm_display_mode *mode)
++{
++ struct drm_device *dev = connector->dev;
++ struct amdgpu_device *adev = dev->dev_private;
++
++ /* XXX check mode bandwidth */
++
++ if ((mode->clock / 10) > adev->clock.max_pixel_clock)
++ return MODE_CLOCK_HIGH;
++
++ return MODE_OK;
++}
++
++static enum drm_connector_status
++amdgpu_connector_vga_detect(struct drm_connector *connector, bool force)
++{
++ struct amdgpu_connector *amdgpu_connector = to_amdgpu_connector(connector);
++ struct drm_encoder *encoder;
++ struct drm_encoder_helper_funcs *encoder_funcs;
++ bool dret = false;
++ enum drm_connector_status ret = connector_status_disconnected;
++ int r;
++
++ r = pm_runtime_get_sync(connector->dev->dev);
++ if (r < 0)
++ return connector_status_disconnected;
++
++ encoder = amdgpu_connector_best_single_encoder(connector);
++ if (!encoder)
++ ret = connector_status_disconnected;
++
++ if (amdgpu_connector->ddc_bus)
++ dret = amdgpu_ddc_probe(amdgpu_connector, false);
++ if (dret) {
++ amdgpu_connector->detected_by_load = false;
++ amdgpu_connector_free_edid(connector);
++ amdgpu_connector_get_edid(connector);
++
++ if (!amdgpu_connector->edid) {
++ DRM_ERROR("%s: probed a monitor but no|invalid EDID\n",
++ connector->name);
++ ret = connector_status_connected;
++ } else {
++ amdgpu_connector->use_digital =
++ !!(amdgpu_connector->edid->input & DRM_EDID_INPUT_DIGITAL);
++
++ /* some oems have boards with separate digital and analog connectors
++ * with a shared ddc line (often vga + hdmi)
++ */
++ if (amdgpu_connector->use_digital && amdgpu_connector->shared_ddc) {
++ amdgpu_connector_free_edid(connector);
++ ret = connector_status_disconnected;
++ } else {
++ ret = connector_status_connected;
++ }
++ }
++ } else {
++
++ /* if we aren't forcing don't do destructive polling */
++ if (!force) {
++ /* only return the previous status if we last
++ * detected a monitor via load.
++ */
++ if (amdgpu_connector->detected_by_load)
++ ret = connector->status;
++ goto out;
++ }
++
++ if (amdgpu_connector->dac_load_detect && encoder) {
++ encoder_funcs = encoder->helper_private;
++ ret = encoder_funcs->detect(encoder, connector);
++ if (ret != connector_status_disconnected)
++ amdgpu_connector->detected_by_load = true;
++ }
++ }
++
++ amdgpu_connector_update_scratch_regs(connector, ret);
++
++out:
++ pm_runtime_mark_last_busy(connector->dev->dev);
++ pm_runtime_put_autosuspend(connector->dev->dev);
++
++ return ret;
++}
++
++static const struct drm_connector_helper_funcs amdgpu_connector_vga_helper_funcs = {
++ .get_modes = amdgpu_connector_vga_get_modes,
++ .mode_valid = amdgpu_connector_vga_mode_valid,
++ .best_encoder = amdgpu_connector_best_single_encoder,
++};
++
++static const struct drm_connector_funcs amdgpu_connector_vga_funcs = {
++ .dpms = drm_helper_connector_dpms,
++ .detect = amdgpu_connector_vga_detect,
++ .fill_modes = drm_helper_probe_single_connector_modes,
++ .destroy = amdgpu_connector_destroy,
++ .set_property = amdgpu_connector_set_property,
++};
++
++static bool
++amdgpu_connector_check_hpd_status_unchanged(struct drm_connector *connector)
++{
++ struct drm_device *dev = connector->dev;
++ struct amdgpu_device *adev = dev->dev_private;
++ struct amdgpu_connector *amdgpu_connector = to_amdgpu_connector(connector);
++ enum drm_connector_status status;
++
++ if (amdgpu_connector->hpd.hpd != AMDGPU_HPD_NONE) {
++ if (amdgpu_display_hpd_sense(adev, amdgpu_connector->hpd.hpd))
++ status = connector_status_connected;
++ else
++ status = connector_status_disconnected;
++ if (connector->status == status)
++ return true;
++ }
++
++ return false;
++}
++
++/*
++ * DVI is complicated
++ * Do a DDC probe, if DDC probe passes, get the full EDID so
++ * we can do analog/digital monitor detection at this point.
++ * If the monitor is an analog monitor or we got no DDC,
++ * we need to find the DAC encoder object for this connector.
++ * If we got no DDC, we do load detection on the DAC encoder object.
++ * If we got analog DDC or load detection passes on the DAC encoder
++ * we have to check if this analog encoder is shared with anyone else (TV)
++ * if its shared we have to set the other connector to disconnected.
++ */
++static enum drm_connector_status
++amdgpu_connector_dvi_detect(struct drm_connector *connector, bool force)
++{
++ struct drm_device *dev = connector->dev;
++ struct amdgpu_device *adev = dev->dev_private;
++ struct amdgpu_connector *amdgpu_connector = to_amdgpu_connector(connector);
++ struct drm_encoder *encoder = NULL;
++ struct drm_encoder_helper_funcs *encoder_funcs;
++ int i, r;
++ enum drm_connector_status ret = connector_status_disconnected;
++ bool dret = false, broken_edid = false;
++
++ r = pm_runtime_get_sync(connector->dev->dev);
++ if (r < 0)
++ return connector_status_disconnected;
++
++ if (!force && amdgpu_connector_check_hpd_status_unchanged(connector)) {
++ ret = connector->status;
++ goto exit;
++ }
++
++ if (amdgpu_connector->ddc_bus)
++ dret = amdgpu_ddc_probe(amdgpu_connector, false);
++ if (dret) {
++ amdgpu_connector->detected_by_load = false;
++ amdgpu_connector_free_edid(connector);
++ amdgpu_connector_get_edid(connector);
++
++ if (!amdgpu_connector->edid) {
++ DRM_ERROR("%s: probed a monitor but no|invalid EDID\n",
++ connector->name);
++ ret = connector_status_connected;
++ broken_edid = true; /* defer use_digital to later */
++ } else {
++ amdgpu_connector->use_digital =
++ !!(amdgpu_connector->edid->input & DRM_EDID_INPUT_DIGITAL);
++
++ /* some oems have boards with separate digital and analog connectors
++ * with a shared ddc line (often vga + hdmi)
++ */
++ if ((!amdgpu_connector->use_digital) && amdgpu_connector->shared_ddc) {
++ amdgpu_connector_free_edid(connector);
++ ret = connector_status_disconnected;
++ } else {
++ ret = connector_status_connected;
++ }
++
++ /* This gets complicated. We have boards with VGA + HDMI with a
++ * shared DDC line and we have boards with DVI-D + HDMI with a shared
++ * DDC line. The latter is more complex because with DVI<->HDMI adapters
++ * you don't really know what's connected to which port as both are digital.
++ */
++ if (amdgpu_connector->shared_ddc && (ret == connector_status_connected)) {
++ struct drm_connector *list_connector;
++ struct amdgpu_connector *list_amdgpu_connector;
++ list_for_each_entry(list_connector, &dev->mode_config.connector_list, head) {
++ if (connector == list_connector)
++ continue;
++ list_amdgpu_connector = to_amdgpu_connector(list_connector);
++ if (list_amdgpu_connector->shared_ddc &&
++ (list_amdgpu_connector->ddc_bus->rec.i2c_id ==
++ amdgpu_connector->ddc_bus->rec.i2c_id)) {
++ /* cases where both connectors are digital */
++ if (list_connector->connector_type != DRM_MODE_CONNECTOR_VGA) {
++ /* hpd is our only option in this case */
++ if (!amdgpu_display_hpd_sense(adev, amdgpu_connector->hpd.hpd)) {
++ amdgpu_connector_free_edid(connector);
++ ret = connector_status_disconnected;
++ }
++ }
++ }
++ }
++ }
++ }
++ }
++
++ if ((ret == connector_status_connected) && (amdgpu_connector->use_digital == true))
++ goto out;
++
++ /* DVI-D and HDMI-A are digital only */
++ if ((connector->connector_type == DRM_MODE_CONNECTOR_DVID) ||
++ (connector->connector_type == DRM_MODE_CONNECTOR_HDMIA))
++ goto out;
++
++ /* if we aren't forcing don't do destructive polling */
++ if (!force) {
++ /* only return the previous status if we last
++ * detected a monitor via load.
++ */
++ if (amdgpu_connector->detected_by_load)
++ ret = connector->status;
++ goto out;
++ }
++
++ /* find analog encoder */
++ if (amdgpu_connector->dac_load_detect) {
++ for (i = 0; i < DRM_CONNECTOR_MAX_ENCODER; i++) {
++ if (connector->encoder_ids[i] == 0)
++ break;
++
++ encoder = drm_encoder_find(connector->dev, connector->encoder_ids[i]);
++ if (!encoder)
++ continue;
++
++ if (encoder->encoder_type != DRM_MODE_ENCODER_DAC &&
++ encoder->encoder_type != DRM_MODE_ENCODER_TVDAC)
++ continue;
++
++ encoder_funcs = encoder->helper_private;
++ if (encoder_funcs->detect) {
++ if (!broken_edid) {
++ if (ret != connector_status_connected) {
++ /* deal with analog monitors without DDC */
++ ret = encoder_funcs->detect(encoder, connector);
++ if (ret == connector_status_connected) {
++ amdgpu_connector->use_digital = false;
++ }
++ if (ret != connector_status_disconnected)
++ amdgpu_connector->detected_by_load = true;
++ }
++ } else {
++ enum drm_connector_status lret;
++ /* assume digital unless load detected otherwise */
++ amdgpu_connector->use_digital = true;
++ lret = encoder_funcs->detect(encoder, connector);
++ DRM_DEBUG_KMS("load_detect %x returned: %x\n",encoder->encoder_type,lret);
++ if (lret == connector_status_connected)
++ amdgpu_connector->use_digital = false;
++ }
++ break;
++ }
++ }
++ }
++
++out:
++ /* updated in get modes as well since we need to know if it's analog or digital */
++ amdgpu_connector_update_scratch_regs(connector, ret);
++
++exit:
++ pm_runtime_mark_last_busy(connector->dev->dev);
++ pm_runtime_put_autosuspend(connector->dev->dev);
++
++ return ret;
++}
++
++/* okay need to be smart in here about which encoder to pick */
++static struct drm_encoder *
++amdgpu_connector_dvi_encoder(struct drm_connector *connector)
++{
++ int enc_id = connector->encoder_ids[0];
++ struct amdgpu_connector *amdgpu_connector = to_amdgpu_connector(connector);
++ struct drm_encoder *encoder;
++ int i;
++ for (i = 0; i < DRM_CONNECTOR_MAX_ENCODER; i++) {
++ if (connector->encoder_ids[i] == 0)
++ break;
++
++ encoder = drm_encoder_find(connector->dev, connector->encoder_ids[i]);
++ if (!encoder)
++ continue;
++
++ if (amdgpu_connector->use_digital == true) {
++ if (encoder->encoder_type == DRM_MODE_ENCODER_TMDS)
++ return encoder;
++ } else {
++ if (encoder->encoder_type == DRM_MODE_ENCODER_DAC ||
++ encoder->encoder_type == DRM_MODE_ENCODER_TVDAC)
++ return encoder;
++ }
++ }
++
++ /* see if we have a default encoder TODO */
++
++ /* then check use digitial */
++ /* pick the first one */
++ if (enc_id)
++ return drm_encoder_find(connector->dev, enc_id);
++ return NULL;
++}
++
++static void amdgpu_connector_dvi_force(struct drm_connector *connector)
++{
++ struct amdgpu_connector *amdgpu_connector = to_amdgpu_connector(connector);
++ if (connector->force == DRM_FORCE_ON)
++ amdgpu_connector->use_digital = false;
++ if (connector->force == DRM_FORCE_ON_DIGITAL)
++ amdgpu_connector->use_digital = true;
++}
++
++static int amdgpu_connector_dvi_mode_valid(struct drm_connector *connector,
++ struct drm_display_mode *mode)
++{
++ struct drm_device *dev = connector->dev;
++ struct amdgpu_device *adev = dev->dev_private;
++ struct amdgpu_connector *amdgpu_connector = to_amdgpu_connector(connector);
++
++ /* XXX check mode bandwidth */
++
++ if (amdgpu_connector->use_digital && (mode->clock > 165000)) {
++ if ((amdgpu_connector->connector_object_id == CONNECTOR_OBJECT_ID_DUAL_LINK_DVI_I) ||
++ (amdgpu_connector->connector_object_id == CONNECTOR_OBJECT_ID_DUAL_LINK_DVI_D) ||
++ (amdgpu_connector->connector_object_id == CONNECTOR_OBJECT_ID_HDMI_TYPE_B)) {
++ return MODE_OK;
++ } else if (drm_detect_hdmi_monitor(amdgpu_connector_edid(connector))) {
++ /* HDMI 1.3+ supports max clock of 340 Mhz */
++ if (mode->clock > 340000)
++ return MODE_CLOCK_HIGH;
++ else
++ return MODE_OK;
++ } else {
++ return MODE_CLOCK_HIGH;
++ }
++ }
++
++ /* check against the max pixel clock */
++ if ((mode->clock / 10) > adev->clock.max_pixel_clock)
++ return MODE_CLOCK_HIGH;
++
++ return MODE_OK;
++}
++
++static const struct drm_connector_helper_funcs amdgpu_connector_dvi_helper_funcs = {
++ .get_modes = amdgpu_connector_vga_get_modes,
++ .mode_valid = amdgpu_connector_dvi_mode_valid,
++ .best_encoder = amdgpu_connector_dvi_encoder,
++};
++
++static const struct drm_connector_funcs amdgpu_connector_dvi_funcs = {
++ .dpms = drm_helper_connector_dpms,
++ .detect = amdgpu_connector_dvi_detect,
++ .fill_modes = drm_helper_probe_single_connector_modes,
++ .set_property = amdgpu_connector_set_property,
++ .destroy = amdgpu_connector_destroy,
++ .force = amdgpu_connector_dvi_force,
++};
++
++static int amdgpu_connector_dp_get_modes(struct drm_connector *connector)
++{
++ struct amdgpu_connector *amdgpu_connector = to_amdgpu_connector(connector);
++ struct amdgpu_connector_atom_dig *amdgpu_dig_connector = amdgpu_connector->con_priv;
++ struct drm_encoder *encoder = amdgpu_connector_best_single_encoder(connector);
++ int ret;
++
++ if ((connector->connector_type == DRM_MODE_CONNECTOR_eDP) ||
++ (connector->connector_type == DRM_MODE_CONNECTOR_LVDS)) {
++ struct drm_display_mode *mode;
++
++ if (connector->connector_type == DRM_MODE_CONNECTOR_eDP) {
++ if (!amdgpu_dig_connector->edp_on)
++ amdgpu_atombios_encoder_set_edp_panel_power(connector,
++ ATOM_TRANSMITTER_ACTION_POWER_ON);
++ amdgpu_connector_get_edid(connector);
++ ret = amdgpu_connector_ddc_get_modes(connector);
++ if (!amdgpu_dig_connector->edp_on)
++ amdgpu_atombios_encoder_set_edp_panel_power(connector,
++ ATOM_TRANSMITTER_ACTION_POWER_OFF);
++ } else {
++ /* need to setup ddc on the bridge */
++ if (amdgpu_connector_encoder_get_dp_bridge_encoder_id(connector) !=
++ ENCODER_OBJECT_ID_NONE) {
++ if (encoder)
++ amdgpu_atombios_encoder_setup_ext_encoder_ddc(encoder);
++ }
++ amdgpu_connector_get_edid(connector);
++ ret = amdgpu_connector_ddc_get_modes(connector);
++ }
++
++ if (ret > 0) {
++ if (encoder) {
++ amdgpu_connector_fixup_lcd_native_mode(encoder, connector);
++ /* add scaled modes */
++ amdgpu_connector_add_common_modes(encoder, connector);
++ }
++ return ret;
++ }
++
++ if (!encoder)
++ return 0;
++
++ /* we have no EDID modes */
++ mode = amdgpu_connector_lcd_native_mode(encoder);
++ if (mode) {
++ ret = 1;
++ drm_mode_probed_add(connector, mode);
++ /* add the width/height from vbios tables if available */
++ connector->display_info.width_mm = mode->width_mm;
++ connector->display_info.height_mm = mode->height_mm;
++ /* add scaled modes */
++ amdgpu_connector_add_common_modes(encoder, connector);
++ }
++ } else {
++ /* need to setup ddc on the bridge */
++ if (amdgpu_connector_encoder_get_dp_bridge_encoder_id(connector) !=
++ ENCODER_OBJECT_ID_NONE) {
++ if (encoder)
++ amdgpu_atombios_encoder_setup_ext_encoder_ddc(encoder);
++ }
++ amdgpu_connector_get_edid(connector);
++ ret = amdgpu_connector_ddc_get_modes(connector);
++
++ amdgpu_get_native_mode(connector);
++ }
++
++ return ret;
++}
++
++u16 amdgpu_connector_encoder_get_dp_bridge_encoder_id(struct drm_connector *connector)
++{
++ struct drm_encoder *encoder;
++ struct amdgpu_encoder *amdgpu_encoder;
++ int i;
++
++ for (i = 0; i < DRM_CONNECTOR_MAX_ENCODER; i++) {
++ if (connector->encoder_ids[i] == 0)
++ break;
++
++ encoder = drm_encoder_find(connector->dev,
++ connector->encoder_ids[i]);
++ if (!encoder)
++ continue;
++
++ amdgpu_encoder = to_amdgpu_encoder(encoder);
++
++ switch (amdgpu_encoder->encoder_id) {
++ case ENCODER_OBJECT_ID_TRAVIS:
++ case ENCODER_OBJECT_ID_NUTMEG:
++ return amdgpu_encoder->encoder_id;
++ default:
++ break;
++ }
++ }
++
++ return ENCODER_OBJECT_ID_NONE;
++}
++
++static bool amdgpu_connector_encoder_is_hbr2(struct drm_connector *connector)
++{
++ struct drm_encoder *encoder;
++ struct amdgpu_encoder *amdgpu_encoder;
++ int i;
++ bool found = false;
++
++ for (i = 0; i < DRM_CONNECTOR_MAX_ENCODER; i++) {
++ if (connector->encoder_ids[i] == 0)
++ break;
++ encoder = drm_encoder_find(connector->dev,
++ connector->encoder_ids[i]);
++ if (!encoder)
++ continue;
++
++ amdgpu_encoder = to_amdgpu_encoder(encoder);
++ if (amdgpu_encoder->caps & ATOM_ENCODER_CAP_RECORD_HBR2)
++ found = true;
++ }
++
++ return found;
++}
++
++bool amdgpu_connector_is_dp12_capable(struct drm_connector *connector)
++{
++ struct drm_device *dev = connector->dev;
++ struct amdgpu_device *adev = dev->dev_private;
++
++ if ((adev->clock.default_dispclk >= 53900) &&
++ amdgpu_connector_encoder_is_hbr2(connector)) {
++ return true;
++ }
++
++ return false;
++}
++
++static enum drm_connector_status
++amdgpu_connector_dp_detect(struct drm_connector *connector, bool force)
++{
++ struct drm_device *dev = connector->dev;
++ struct amdgpu_device *adev = dev->dev_private;
++ struct amdgpu_connector *amdgpu_connector = to_amdgpu_connector(connector);
++ enum drm_connector_status ret = connector_status_disconnected;
++ struct amdgpu_connector_atom_dig *amdgpu_dig_connector = amdgpu_connector->con_priv;
++ struct drm_encoder *encoder = amdgpu_connector_best_single_encoder(connector);
++ int r;
++
++ r = pm_runtime_get_sync(connector->dev->dev);
++ if (r < 0)
++ return connector_status_disconnected;
++
++ if (!force && amdgpu_connector_check_hpd_status_unchanged(connector)) {
++ ret = connector->status;
++ goto out;
++ }
++
++ amdgpu_connector_free_edid(connector);
++
++ if ((connector->connector_type == DRM_MODE_CONNECTOR_eDP) ||
++ (connector->connector_type == DRM_MODE_CONNECTOR_LVDS)) {
++ if (encoder) {
++ struct amdgpu_encoder *amdgpu_encoder = to_amdgpu_encoder(encoder);
++ struct drm_display_mode *native_mode = &amdgpu_encoder->native_mode;
++
++ /* check if panel is valid */
++ if (native_mode->hdisplay >= 320 && native_mode->vdisplay >= 240)
++ ret = connector_status_connected;
++ }
++ /* eDP is always DP */
++ amdgpu_dig_connector->dp_sink_type = CONNECTOR_OBJECT_ID_DISPLAYPORT;
++ if (!amdgpu_dig_connector->edp_on)
++ amdgpu_atombios_encoder_set_edp_panel_power(connector,
++ ATOM_TRANSMITTER_ACTION_POWER_ON);
++ if (!amdgpu_atombios_dp_get_dpcd(amdgpu_connector))
++ ret = connector_status_connected;
++ if (!amdgpu_dig_connector->edp_on)
++ amdgpu_atombios_encoder_set_edp_panel_power(connector,
++ ATOM_TRANSMITTER_ACTION_POWER_OFF);
++ } else if (amdgpu_connector_encoder_get_dp_bridge_encoder_id(connector) !=
++ ENCODER_OBJECT_ID_NONE) {
++ /* DP bridges are always DP */
++ amdgpu_dig_connector->dp_sink_type = CONNECTOR_OBJECT_ID_DISPLAYPORT;
++ /* get the DPCD from the bridge */
++ amdgpu_atombios_dp_get_dpcd(amdgpu_connector);
++
++ if (encoder) {
++ /* setup ddc on the bridge */
++ amdgpu_atombios_encoder_setup_ext_encoder_ddc(encoder);
++ /* bridge chips are always aux */
++ if (amdgpu_ddc_probe(amdgpu_connector, true)) /* try DDC */
++ ret = connector_status_connected;
++ else if (amdgpu_connector->dac_load_detect) { /* try load detection */
++ struct drm_encoder_helper_funcs *encoder_funcs = encoder->helper_private;
++ ret = encoder_funcs->detect(encoder, connector);
++ }
++ }
++ } else {
++ amdgpu_dig_connector->dp_sink_type =
++ amdgpu_atombios_dp_get_sinktype(amdgpu_connector);
++ if (amdgpu_display_hpd_sense(adev, amdgpu_connector->hpd.hpd)) {
++ ret = connector_status_connected;
++ if (amdgpu_dig_connector->dp_sink_type == CONNECTOR_OBJECT_ID_DISPLAYPORT)
++ amdgpu_atombios_dp_get_dpcd(amdgpu_connector);
++ } else {
++ if (amdgpu_dig_connector->dp_sink_type == CONNECTOR_OBJECT_ID_DISPLAYPORT) {
++ if (!amdgpu_atombios_dp_get_dpcd(amdgpu_connector))
++ ret = connector_status_connected;
++ } else {
++ /* try non-aux ddc (DP to DVI/HDMI/etc. adapter) */
++ if (amdgpu_ddc_probe(amdgpu_connector, false))
++ ret = connector_status_connected;
++ }
++ }
++ }
++
++ amdgpu_connector_update_scratch_regs(connector, ret);
++out:
++ pm_runtime_mark_last_busy(connector->dev->dev);
++ pm_runtime_put_autosuspend(connector->dev->dev);
++
++ return ret;
++}
++
++static int amdgpu_connector_dp_mode_valid(struct drm_connector *connector,
++ struct drm_display_mode *mode)
++{
++ struct amdgpu_connector *amdgpu_connector = to_amdgpu_connector(connector);
++ struct amdgpu_connector_atom_dig *amdgpu_dig_connector = amdgpu_connector->con_priv;
++
++ /* XXX check mode bandwidth */
++
++ if ((connector->connector_type == DRM_MODE_CONNECTOR_eDP) ||
++ (connector->connector_type == DRM_MODE_CONNECTOR_LVDS)) {
++ struct drm_encoder *encoder = amdgpu_connector_best_single_encoder(connector);
++
++ if ((mode->hdisplay < 320) || (mode->vdisplay < 240))
++ return MODE_PANEL;
++
++ if (encoder) {
++ struct amdgpu_encoder *amdgpu_encoder = to_amdgpu_encoder(encoder);
++ struct drm_display_mode *native_mode = &amdgpu_encoder->native_mode;
++
++ /* AVIVO hardware supports downscaling modes larger than the panel
++ * to the panel size, but I'm not sure this is desirable.
++ */
++ if ((mode->hdisplay > native_mode->hdisplay) ||
++ (mode->vdisplay > native_mode->vdisplay))
++ return MODE_PANEL;
++
++ /* if scaling is disabled, block non-native modes */
++ if (amdgpu_encoder->rmx_type == RMX_OFF) {
++ if ((mode->hdisplay != native_mode->hdisplay) ||
++ (mode->vdisplay != native_mode->vdisplay))
++ return MODE_PANEL;
++ }
++ }
++ return MODE_OK;
++ } else {
++ if ((amdgpu_dig_connector->dp_sink_type == CONNECTOR_OBJECT_ID_DISPLAYPORT) ||
++ (amdgpu_dig_connector->dp_sink_type == CONNECTOR_OBJECT_ID_eDP)) {
++ return amdgpu_atombios_dp_mode_valid_helper(connector, mode);
++ } else {
++ if (drm_detect_hdmi_monitor(amdgpu_connector_edid(connector))) {
++ /* HDMI 1.3+ supports max clock of 340 Mhz */
++ if (mode->clock > 340000)
++ return MODE_CLOCK_HIGH;
++ } else {
++ if (mode->clock > 165000)
++ return MODE_CLOCK_HIGH;
++ }
++ }
++ }
++
++ return MODE_OK;
++}
++
++static const struct drm_connector_helper_funcs amdgpu_connector_dp_helper_funcs = {
++ .get_modes = amdgpu_connector_dp_get_modes,
++ .mode_valid = amdgpu_connector_dp_mode_valid,
++ .best_encoder = amdgpu_connector_dvi_encoder,
++};
++
++static const struct drm_connector_funcs amdgpu_connector_dp_funcs = {
++ .dpms = drm_helper_connector_dpms,
++ .detect = amdgpu_connector_dp_detect,
++ .fill_modes = drm_helper_probe_single_connector_modes,
++ .set_property = amdgpu_connector_set_property,
++ .destroy = amdgpu_connector_destroy,
++ .force = amdgpu_connector_dvi_force,
++};
++
++static const struct drm_connector_funcs amdgpu_connector_edp_funcs = {
++ .dpms = drm_helper_connector_dpms,
++ .detect = amdgpu_connector_dp_detect,
++ .fill_modes = drm_helper_probe_single_connector_modes,
++ .set_property = amdgpu_connector_set_lcd_property,
++ .destroy = amdgpu_connector_destroy,
++ .force = amdgpu_connector_dvi_force,
++};
++
++void
++amdgpu_connector_add(struct amdgpu_device *adev,
++ uint32_t connector_id,
++ uint32_t supported_device,
++ int connector_type,
++ struct amdgpu_i2c_bus_rec *i2c_bus,
++ uint16_t connector_object_id,
++ struct amdgpu_hpd *hpd,
++ struct amdgpu_router *router)
++{
++ struct drm_device *dev = adev->ddev;
++ struct drm_connector *connector;
++ struct amdgpu_connector *amdgpu_connector;
++ struct amdgpu_connector_atom_dig *amdgpu_dig_connector;
++ struct drm_encoder *encoder;
++ struct amdgpu_encoder *amdgpu_encoder;
++ uint32_t subpixel_order = SubPixelNone;
++ bool shared_ddc = false;
++ bool is_dp_bridge = false;
++ bool has_aux = false;
++
++ if (connector_type == DRM_MODE_CONNECTOR_Unknown)
++ return;
++
++ /* see if we already added it */
++ list_for_each_entry(connector, &dev->mode_config.connector_list, head) {
++ amdgpu_connector = to_amdgpu_connector(connector);
++ if (amdgpu_connector->connector_id == connector_id) {
++ amdgpu_connector->devices |= supported_device;
++ return;
++ }
++ if (amdgpu_connector->ddc_bus && i2c_bus->valid) {
++ if (amdgpu_connector->ddc_bus->rec.i2c_id == i2c_bus->i2c_id) {
++ amdgpu_connector->shared_ddc = true;
++ shared_ddc = true;
++ }
++ if (amdgpu_connector->router_bus && router->ddc_valid &&
++ (amdgpu_connector->router.router_id == router->router_id)) {
++ amdgpu_connector->shared_ddc = false;
++ shared_ddc = false;
++ }
++ }
++ }
++
++ /* check if it's a dp bridge */
++ list_for_each_entry(encoder, &dev->mode_config.encoder_list, head) {
++ amdgpu_encoder = to_amdgpu_encoder(encoder);
++ if (amdgpu_encoder->devices & supported_device) {
++ switch (amdgpu_encoder->encoder_id) {
++ case ENCODER_OBJECT_ID_TRAVIS:
++ case ENCODER_OBJECT_ID_NUTMEG:
++ is_dp_bridge = true;
++ break;
++ default:
++ break;
++ }
++ }
++ }
++
++ amdgpu_connector = kzalloc(sizeof(struct amdgpu_connector), GFP_KERNEL);
++ if (!amdgpu_connector)
++ return;
++
++ connector = &amdgpu_connector->base;
++
++ amdgpu_connector->connector_id = connector_id;
++ amdgpu_connector->devices = supported_device;
++ amdgpu_connector->shared_ddc = shared_ddc;
++ amdgpu_connector->connector_object_id = connector_object_id;
++ amdgpu_connector->hpd = *hpd;
++
++ amdgpu_connector->router = *router;
++ if (router->ddc_valid || router->cd_valid) {
++ amdgpu_connector->router_bus = amdgpu_i2c_lookup(adev, &router->i2c_info);
++ if (!amdgpu_connector->router_bus)
++ DRM_ERROR("Failed to assign router i2c bus! Check dmesg for i2c errors.\n");
++ }
++
++ if (is_dp_bridge) {
++ amdgpu_dig_connector = kzalloc(sizeof(struct amdgpu_connector_atom_dig), GFP_KERNEL);
++ if (!amdgpu_dig_connector)
++ goto failed;
++ amdgpu_connector->con_priv = amdgpu_dig_connector;
++ if (i2c_bus->valid) {
++ amdgpu_connector->ddc_bus = amdgpu_i2c_lookup(adev, i2c_bus);
++ if (amdgpu_connector->ddc_bus)
++ has_aux = true;
++ else
++ DRM_ERROR("DP: Failed to assign ddc bus! Check dmesg for i2c errors.\n");
++ }
++ switch (connector_type) {
++ case DRM_MODE_CONNECTOR_VGA:
++ case DRM_MODE_CONNECTOR_DVIA:
++ default:
++ drm_connector_init(dev, &amdgpu_connector->base,
++ &amdgpu_connector_dp_funcs, connector_type);
++ drm_connector_helper_add(&amdgpu_connector->base,
++ &amdgpu_connector_dp_helper_funcs);
++ connector->interlace_allowed = true;
++ connector->doublescan_allowed = true;
++ amdgpu_connector->dac_load_detect = true;
++ drm_object_attach_property(&amdgpu_connector->base.base,
++ adev->mode_info.load_detect_property,
++ 1);
++ drm_object_attach_property(&amdgpu_connector->base.base,
++ dev->mode_config.scaling_mode_property,
++ DRM_MODE_SCALE_NONE);
++ break;
++ case DRM_MODE_CONNECTOR_DVII:
++ case DRM_MODE_CONNECTOR_DVID:
++ case DRM_MODE_CONNECTOR_HDMIA:
++ case DRM_MODE_CONNECTOR_HDMIB:
++ case DRM_MODE_CONNECTOR_DisplayPort:
++ drm_connector_init(dev, &amdgpu_connector->base,
++ &amdgpu_connector_dp_funcs, connector_type);
++ drm_connector_helper_add(&amdgpu_connector->base,
++ &amdgpu_connector_dp_helper_funcs);
++ drm_object_attach_property(&amdgpu_connector->base.base,
++ adev->mode_info.underscan_property,
++ UNDERSCAN_OFF);
++ drm_object_attach_property(&amdgpu_connector->base.base,
++ adev->mode_info.underscan_hborder_property,
++ 0);
++ drm_object_attach_property(&amdgpu_connector->base.base,
++ adev->mode_info.underscan_vborder_property,
++ 0);
++
++ drm_object_attach_property(&amdgpu_connector->base.base,
++ dev->mode_config.scaling_mode_property,
++ DRM_MODE_SCALE_NONE);
++
++ drm_object_attach_property(&amdgpu_connector->base.base,
++ adev->mode_info.dither_property,
++ AMDGPU_FMT_DITHER_DISABLE);
++
++ if (amdgpu_audio != 0)
++ drm_object_attach_property(&amdgpu_connector->base.base,
++ adev->mode_info.audio_property,
++ AMDGPU_AUDIO_AUTO);
++
++ subpixel_order = SubPixelHorizontalRGB;
++ connector->interlace_allowed = true;
++ if (connector_type == DRM_MODE_CONNECTOR_HDMIB)
++ connector->doublescan_allowed = true;
++ else
++ connector->doublescan_allowed = false;
++ if (connector_type == DRM_MODE_CONNECTOR_DVII) {
++ amdgpu_connector->dac_load_detect = true;
++ drm_object_attach_property(&amdgpu_connector->base.base,
++ adev->mode_info.load_detect_property,
++ 1);
++ }
++ break;
++ case DRM_MODE_CONNECTOR_LVDS:
++ case DRM_MODE_CONNECTOR_eDP:
++ drm_connector_init(dev, &amdgpu_connector->base,
++ &amdgpu_connector_edp_funcs, connector_type);
++ drm_connector_helper_add(&amdgpu_connector->base,
++ &amdgpu_connector_dp_helper_funcs);
++ drm_object_attach_property(&amdgpu_connector->base.base,
++ dev->mode_config.scaling_mode_property,
++ DRM_MODE_SCALE_FULLSCREEN);
++ subpixel_order = SubPixelHorizontalRGB;
++ connector->interlace_allowed = false;
++ connector->doublescan_allowed = false;
++ break;
++ }
++ } else {
++ switch (connector_type) {
++ case DRM_MODE_CONNECTOR_VGA:
++ drm_connector_init(dev, &amdgpu_connector->base, &amdgpu_connector_vga_funcs, connector_type);
++ drm_connector_helper_add(&amdgpu_connector->base, &amdgpu_connector_vga_helper_funcs);
++ if (i2c_bus->valid) {
++ amdgpu_connector->ddc_bus = amdgpu_i2c_lookup(adev, i2c_bus);
++ if (!amdgpu_connector->ddc_bus)
++ DRM_ERROR("VGA: Failed to assign ddc bus! Check dmesg for i2c errors.\n");
++ }
++ amdgpu_connector->dac_load_detect = true;
++ drm_object_attach_property(&amdgpu_connector->base.base,
++ adev->mode_info.load_detect_property,
++ 1);
++ drm_object_attach_property(&amdgpu_connector->base.base,
++ dev->mode_config.scaling_mode_property,
++ DRM_MODE_SCALE_NONE);
++ /* no HPD on analog connectors */
++ amdgpu_connector->hpd.hpd = AMDGPU_HPD_NONE;
++ connector->polled = DRM_CONNECTOR_POLL_CONNECT;
++ connector->interlace_allowed = true;
++ connector->doublescan_allowed = true;
++ break;
++ case DRM_MODE_CONNECTOR_DVIA:
++ drm_connector_init(dev, &amdgpu_connector->base, &amdgpu_connector_vga_funcs, connector_type);
++ drm_connector_helper_add(&amdgpu_connector->base, &amdgpu_connector_vga_helper_funcs);
++ if (i2c_bus->valid) {
++ amdgpu_connector->ddc_bus = amdgpu_i2c_lookup(adev, i2c_bus);
++ if (!amdgpu_connector->ddc_bus)
++ DRM_ERROR("DVIA: Failed to assign ddc bus! Check dmesg for i2c errors.\n");
++ }
++ amdgpu_connector->dac_load_detect = true;
++ drm_object_attach_property(&amdgpu_connector->base.base,
++ adev->mode_info.load_detect_property,
++ 1);
++ drm_object_attach_property(&amdgpu_connector->base.base,
++ dev->mode_config.scaling_mode_property,
++ DRM_MODE_SCALE_NONE);
++ /* no HPD on analog connectors */
++ amdgpu_connector->hpd.hpd = AMDGPU_HPD_NONE;
++ connector->interlace_allowed = true;
++ connector->doublescan_allowed = true;
++ break;
++ case DRM_MODE_CONNECTOR_DVII:
++ case DRM_MODE_CONNECTOR_DVID:
++ amdgpu_dig_connector = kzalloc(sizeof(struct amdgpu_connector_atom_dig), GFP_KERNEL);
++ if (!amdgpu_dig_connector)
++ goto failed;
++ amdgpu_connector->con_priv = amdgpu_dig_connector;
++ drm_connector_init(dev, &amdgpu_connector->base, &amdgpu_connector_dvi_funcs, connector_type);
++ drm_connector_helper_add(&amdgpu_connector->base, &amdgpu_connector_dvi_helper_funcs);
++ if (i2c_bus->valid) {
++ amdgpu_connector->ddc_bus = amdgpu_i2c_lookup(adev, i2c_bus);
++ if (!amdgpu_connector->ddc_bus)
++ DRM_ERROR("DVI: Failed to assign ddc bus! Check dmesg for i2c errors.\n");
++ }
++ subpixel_order = SubPixelHorizontalRGB;
++ drm_object_attach_property(&amdgpu_connector->base.base,
++ adev->mode_info.coherent_mode_property,
++ 1);
++ drm_object_attach_property(&amdgpu_connector->base.base,
++ adev->mode_info.underscan_property,
++ UNDERSCAN_OFF);
++ drm_object_attach_property(&amdgpu_connector->base.base,
++ adev->mode_info.underscan_hborder_property,
++ 0);
++ drm_object_attach_property(&amdgpu_connector->base.base,
++ adev->mode_info.underscan_vborder_property,
++ 0);
++ drm_object_attach_property(&amdgpu_connector->base.base,
++ dev->mode_config.scaling_mode_property,
++ DRM_MODE_SCALE_NONE);
++
++ if (amdgpu_audio != 0) {
++ drm_object_attach_property(&amdgpu_connector->base.base,
++ adev->mode_info.audio_property,
++ AMDGPU_AUDIO_AUTO);
++ }
++ drm_object_attach_property(&amdgpu_connector->base.base,
++ adev->mode_info.dither_property,
++ AMDGPU_FMT_DITHER_DISABLE);
++ if (connector_type == DRM_MODE_CONNECTOR_DVII) {
++ amdgpu_connector->dac_load_detect = true;
++ drm_object_attach_property(&amdgpu_connector->base.base,
++ adev->mode_info.load_detect_property,
++ 1);
++ }
++ connector->interlace_allowed = true;
++ if (connector_type == DRM_MODE_CONNECTOR_DVII)
++ connector->doublescan_allowed = true;
++ else
++ connector->doublescan_allowed = false;
++ break;
++ case DRM_MODE_CONNECTOR_HDMIA:
++ case DRM_MODE_CONNECTOR_HDMIB:
++ amdgpu_dig_connector = kzalloc(sizeof(struct amdgpu_connector_atom_dig), GFP_KERNEL);
++ if (!amdgpu_dig_connector)
++ goto failed;
++ amdgpu_connector->con_priv = amdgpu_dig_connector;
++ drm_connector_init(dev, &amdgpu_connector->base, &amdgpu_connector_dvi_funcs, connector_type);
++ drm_connector_helper_add(&amdgpu_connector->base, &amdgpu_connector_dvi_helper_funcs);
++ if (i2c_bus->valid) {
++ amdgpu_connector->ddc_bus = amdgpu_i2c_lookup(adev, i2c_bus);
++ if (!amdgpu_connector->ddc_bus)
++ DRM_ERROR("HDMI: Failed to assign ddc bus! Check dmesg for i2c errors.\n");
++ }
++ drm_object_attach_property(&amdgpu_connector->base.base,
++ adev->mode_info.coherent_mode_property,
++ 1);
++ drm_object_attach_property(&amdgpu_connector->base.base,
++ adev->mode_info.underscan_property,
++ UNDERSCAN_OFF);
++ drm_object_attach_property(&amdgpu_connector->base.base,
++ adev->mode_info.underscan_hborder_property,
++ 0);
++ drm_object_attach_property(&amdgpu_connector->base.base,
++ adev->mode_info.underscan_vborder_property,
++ 0);
++ drm_object_attach_property(&amdgpu_connector->base.base,
++ dev->mode_config.scaling_mode_property,
++ DRM_MODE_SCALE_NONE);
++ if (amdgpu_audio != 0) {
++ drm_object_attach_property(&amdgpu_connector->base.base,
++ adev->mode_info.audio_property,
++ AMDGPU_AUDIO_AUTO);
++ }
++ drm_object_attach_property(&amdgpu_connector->base.base,
++ adev->mode_info.dither_property,
++ AMDGPU_FMT_DITHER_DISABLE);
++ subpixel_order = SubPixelHorizontalRGB;
++ connector->interlace_allowed = true;
++ if (connector_type == DRM_MODE_CONNECTOR_HDMIB)
++ connector->doublescan_allowed = true;
++ else
++ connector->doublescan_allowed = false;
++ break;
++ case DRM_MODE_CONNECTOR_DisplayPort:
++ amdgpu_dig_connector = kzalloc(sizeof(struct amdgpu_connector_atom_dig), GFP_KERNEL);
++ if (!amdgpu_dig_connector)
++ goto failed;
++ amdgpu_connector->con_priv = amdgpu_dig_connector;
++ drm_connector_init(dev, &amdgpu_connector->base, &amdgpu_connector_dp_funcs, connector_type);
++ drm_connector_helper_add(&amdgpu_connector->base, &amdgpu_connector_dp_helper_funcs);
++ if (i2c_bus->valid) {
++ amdgpu_connector->ddc_bus = amdgpu_i2c_lookup(adev, i2c_bus);
++ if (amdgpu_connector->ddc_bus)
++ has_aux = true;
++ else
++ DRM_ERROR("DP: Failed to assign ddc bus! Check dmesg for i2c errors.\n");
++ }
++ subpixel_order = SubPixelHorizontalRGB;
++ drm_object_attach_property(&amdgpu_connector->base.base,
++ adev->mode_info.coherent_mode_property,
++ 1);
++ drm_object_attach_property(&amdgpu_connector->base.base,
++ adev->mode_info.underscan_property,
++ UNDERSCAN_OFF);
++ drm_object_attach_property(&amdgpu_connector->base.base,
++ adev->mode_info.underscan_hborder_property,
++ 0);
++ drm_object_attach_property(&amdgpu_connector->base.base,
++ adev->mode_info.underscan_vborder_property,
++ 0);
++ drm_object_attach_property(&amdgpu_connector->base.base,
++ dev->mode_config.scaling_mode_property,
++ DRM_MODE_SCALE_NONE);
++ if (amdgpu_audio != 0) {
++ drm_object_attach_property(&amdgpu_connector->base.base,
++ adev->mode_info.audio_property,
++ AMDGPU_AUDIO_AUTO);
++ }
++ drm_object_attach_property(&amdgpu_connector->base.base,
++ adev->mode_info.dither_property,
++ AMDGPU_FMT_DITHER_DISABLE);
++ connector->interlace_allowed = true;
++ /* in theory with a DP to VGA converter... */
++ connector->doublescan_allowed = false;
++ break;
++ case DRM_MODE_CONNECTOR_eDP:
++ amdgpu_dig_connector = kzalloc(sizeof(struct amdgpu_connector_atom_dig), GFP_KERNEL);
++ if (!amdgpu_dig_connector)
++ goto failed;
++ amdgpu_connector->con_priv = amdgpu_dig_connector;
++ drm_connector_init(dev, &amdgpu_connector->base, &amdgpu_connector_edp_funcs, connector_type);
++ drm_connector_helper_add(&amdgpu_connector->base, &amdgpu_connector_dp_helper_funcs);
++ if (i2c_bus->valid) {
++ amdgpu_connector->ddc_bus = amdgpu_i2c_lookup(adev, i2c_bus);
++ if (amdgpu_connector->ddc_bus)
++ has_aux = true;
++ else
++ DRM_ERROR("DP: Failed to assign ddc bus! Check dmesg for i2c errors.\n");
++ }
++ drm_object_attach_property(&amdgpu_connector->base.base,
++ dev->mode_config.scaling_mode_property,
++ DRM_MODE_SCALE_FULLSCREEN);
++ subpixel_order = SubPixelHorizontalRGB;
++ connector->interlace_allowed = false;
++ connector->doublescan_allowed = false;
++ break;
++ case DRM_MODE_CONNECTOR_LVDS:
++ amdgpu_dig_connector = kzalloc(sizeof(struct amdgpu_connector_atom_dig), GFP_KERNEL);
++ if (!amdgpu_dig_connector)
++ goto failed;
++ amdgpu_connector->con_priv = amdgpu_dig_connector;
++ drm_connector_init(dev, &amdgpu_connector->base, &amdgpu_connector_lvds_funcs, connector_type);
++ drm_connector_helper_add(&amdgpu_connector->base, &amdgpu_connector_lvds_helper_funcs);
++ if (i2c_bus->valid) {
++ amdgpu_connector->ddc_bus = amdgpu_i2c_lookup(adev, i2c_bus);
++ if (!amdgpu_connector->ddc_bus)
++ DRM_ERROR("LVDS: Failed to assign ddc bus! Check dmesg for i2c errors.\n");
++ }
++ drm_object_attach_property(&amdgpu_connector->base.base,
++ dev->mode_config.scaling_mode_property,
++ DRM_MODE_SCALE_FULLSCREEN);
++ subpixel_order = SubPixelHorizontalRGB;
++ connector->interlace_allowed = false;
++ connector->doublescan_allowed = false;
++ break;
++ }
++ }
++
++ if (amdgpu_connector->hpd.hpd == AMDGPU_HPD_NONE) {
++ if (i2c_bus->valid)
++ connector->polled = DRM_CONNECTOR_POLL_CONNECT;
++ } else
++ connector->polled = DRM_CONNECTOR_POLL_HPD;
++
++ connector->display_info.subpixel_order = subpixel_order;
++ drm_connector_register(connector);
++
++ if (has_aux)
++ amdgpu_atombios_dp_aux_init(amdgpu_connector);
++
++ return;
++
++failed:
++ drm_connector_cleanup(connector);
++ kfree(connector);
++}
+diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_connectors.h b/drivers/gpu/drm/amd/amdgpu/amdgpu_connectors.h
+new file mode 100644
+index 0000000..61fcef1
+--- /dev/null
++++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_connectors.h
+@@ -0,0 +1,42 @@
++/*
++ * Copyright 2014 Advanced Micro Devices, Inc.
++ *
++ * Permission is hereby granted, free of charge, to any person obtaining a
++ * copy of this software and associated documentation files (the "Software"),
++ * to deal in the Software without restriction, including without limitation
++ * the rights to use, copy, modify, merge, publish, distribute, sublicense,
++ * and/or sell copies of the Software, and to permit persons to whom the
++ * Software is furnished to do so, subject to the following conditions:
++ *
++ * The above copyright notice and this permission notice shall be included in
++ * all copies or substantial portions of the Software.
++ *
++ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
++ * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
++ * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
++ * THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR
++ * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE,
++ * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
++ * OTHER DEALINGS IN THE SOFTWARE.
++ *
++ */
++
++#ifndef __AMDGPU_CONNECTORS_H__
++#define __AMDGPU_CONNECTORS_H__
++
++struct edid *amdgpu_connector_edid(struct drm_connector *connector);
++void amdgpu_connector_hotplug(struct drm_connector *connector);
++int amdgpu_connector_get_monitor_bpc(struct drm_connector *connector);
++u16 amdgpu_connector_encoder_get_dp_bridge_encoder_id(struct drm_connector *connector);
++bool amdgpu_connector_is_dp12_capable(struct drm_connector *connector);
++void
++amdgpu_connector_add(struct amdgpu_device *adev,
++ uint32_t connector_id,
++ uint32_t supported_device,
++ int connector_type,
++ struct amdgpu_i2c_bus_rec *i2c_bus,
++ uint16_t connector_object_id,
++ struct amdgpu_hpd *hpd,
++ struct amdgpu_router *router);
++
++#endif
+diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_cs.c b/drivers/gpu/drm/amd/amdgpu/amdgpu_cs.c
+new file mode 100644
+index 0000000..70a9031
+--- /dev/null
++++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_cs.c
+@@ -0,0 +1,825 @@
++/*
++ * Copyright 2008 Jerome Glisse.
++ * All Rights Reserved.
++ *
++ * Permission is hereby granted, free of charge, to any person obtaining a
++ * copy of this software and associated documentation files (the "Software"),
++ * to deal in the Software without restriction, including without limitation
++ * the rights to use, copy, modify, merge, publish, distribute, sublicense,
++ * and/or sell copies of the Software, and to permit persons to whom the
++ * Software is furnished to do so, subject to the following conditions:
++ *
++ * The above copyright notice and this permission notice (including the next
++ * paragraph) shall be included in all copies or substantial portions of the
++ * Software.
++ *
++ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
++ * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
++ * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
++ * PRECISION INSIGHT AND/OR ITS SUPPLIERS BE LIABLE FOR ANY CLAIM, DAMAGES OR
++ * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE,
++ * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER
++ * DEALINGS IN THE SOFTWARE.
++ *
++ * Authors:
++ * Jerome Glisse <glisse@freedesktop.org>
++ */
++#include <linux/list_sort.h>
++#include <drm/drmP.h>
++#include <drm/amdgpu_drm.h>
++#include "amdgpu.h"
++#include "amdgpu_trace.h"
++
++#define AMDGPU_CS_MAX_PRIORITY 32u
++#define AMDGPU_CS_NUM_BUCKETS (AMDGPU_CS_MAX_PRIORITY + 1)
++
++/* This is based on the bucket sort with O(n) time complexity.
++ * An item with priority "i" is added to bucket[i]. The lists are then
++ * concatenated in descending order.
++ */
++struct amdgpu_cs_buckets {
++ struct list_head bucket[AMDGPU_CS_NUM_BUCKETS];
++};
++
++static void amdgpu_cs_buckets_init(struct amdgpu_cs_buckets *b)
++{
++ unsigned i;
++
++ for (i = 0; i < AMDGPU_CS_NUM_BUCKETS; i++)
++ INIT_LIST_HEAD(&b->bucket[i]);
++}
++
++static void amdgpu_cs_buckets_add(struct amdgpu_cs_buckets *b,
++ struct list_head *item, unsigned priority)
++{
++ /* Since buffers which appear sooner in the relocation list are
++ * likely to be used more often than buffers which appear later
++ * in the list, the sort mustn't change the ordering of buffers
++ * with the same priority, i.e. it must be stable.
++ */
++ list_add_tail(item, &b->bucket[min(priority, AMDGPU_CS_MAX_PRIORITY)]);
++}
++
++static void amdgpu_cs_buckets_get_list(struct amdgpu_cs_buckets *b,
++ struct list_head *out_list)
++{
++ unsigned i;
++
++ /* Connect the sorted buckets in the output list. */
++ for (i = 0; i < AMDGPU_CS_NUM_BUCKETS; i++) {
++ list_splice(&b->bucket[i], out_list);
++ }
++}
++
++int amdgpu_cs_get_ring(struct amdgpu_device *adev, u32 ip_type,
++ u32 ip_instance, u32 ring,
++ struct amdgpu_ring **out_ring)
++{
++ /* Right now all IPs have only one instance - multiple rings. */
++ if (ip_instance != 0) {
++ DRM_ERROR("invalid ip instance: %d\n", ip_instance);
++ return -EINVAL;
++ }
++
++ switch (ip_type) {
++ default:
++ DRM_ERROR("unknown ip type: %d\n", ip_type);
++ return -EINVAL;
++ case AMDGPU_HW_IP_GFX:
++ if (ring < adev->gfx.num_gfx_rings) {
++ *out_ring = &adev->gfx.gfx_ring[ring];
++ } else {
++ DRM_ERROR("only %d gfx rings are supported now\n",
++ adev->gfx.num_gfx_rings);
++ return -EINVAL;
++ }
++ break;
++ case AMDGPU_HW_IP_COMPUTE:
++ if (ring < adev->gfx.num_compute_rings) {
++ *out_ring = &adev->gfx.compute_ring[ring];
++ } else {
++ DRM_ERROR("only %d compute rings are supported now\n",
++ adev->gfx.num_compute_rings);
++ return -EINVAL;
++ }
++ break;
++ case AMDGPU_HW_IP_DMA:
++ if (ring < 2) {
++ *out_ring = &adev->sdma[ring].ring;
++ } else {
++ DRM_ERROR("only two SDMA rings are supported\n");
++ return -EINVAL;
++ }
++ break;
++ case AMDGPU_HW_IP_UVD:
++ *out_ring = &adev->uvd.ring;
++ break;
++ case AMDGPU_HW_IP_VCE:
++ if (ring < 2){
++ *out_ring = &adev->vce.ring[ring];
++ } else {
++ DRM_ERROR("only two VCE rings are supported\n");
++ return -EINVAL;
++ }
++ break;
++ }
++ return 0;
++}
++
++int amdgpu_cs_parser_init(struct amdgpu_cs_parser *p, void *data)
++{
++ union drm_amdgpu_cs *cs = data;
++ uint64_t *chunk_array_user;
++ uint64_t *chunk_array = NULL;
++ struct amdgpu_fpriv *fpriv = p->filp->driver_priv;
++ unsigned size, i;
++ int r = 0;
++
++ if (!cs->in.num_chunks)
++ goto out;
++
++ p->ctx_id = cs->in.ctx_id;
++ p->bo_list = amdgpu_bo_list_get(fpriv, cs->in.bo_list_handle);
++
++ /* get chunks */
++ INIT_LIST_HEAD(&p->validated);
++ chunk_array = kcalloc(cs->in.num_chunks, sizeof(uint64_t), GFP_KERNEL);
++ if (chunk_array == NULL) {
++ r = -ENOMEM;
++ goto out;
++ }
++
++ chunk_array_user = (uint64_t *)(unsigned long)(cs->in.chunks);
++ if (copy_from_user(chunk_array, chunk_array_user,
++ sizeof(uint64_t)*cs->in.num_chunks)) {
++ r = -EFAULT;
++ goto out;
++ }
++
++ p->nchunks = cs->in.num_chunks;
++ p->chunks = kcalloc(p->nchunks, sizeof(struct amdgpu_cs_chunk),
++ GFP_KERNEL);
++ if (p->chunks == NULL) {
++ r = -ENOMEM;
++ goto out;
++ }
++
++ for (i = 0; i < p->nchunks; i++) {
++ struct drm_amdgpu_cs_chunk __user **chunk_ptr = NULL;
++ struct drm_amdgpu_cs_chunk user_chunk;
++ uint32_t __user *cdata;
++
++ chunk_ptr = (void __user *)(unsigned long)chunk_array[i];
++ if (copy_from_user(&user_chunk, chunk_ptr,
++ sizeof(struct drm_amdgpu_cs_chunk))) {
++ r = -EFAULT;
++ goto out;
++ }
++ p->chunks[i].chunk_id = user_chunk.chunk_id;
++ p->chunks[i].length_dw = user_chunk.length_dw;
++ if (p->chunks[i].chunk_id == AMDGPU_CHUNK_ID_IB)
++ p->num_ibs++;
++
++ size = p->chunks[i].length_dw;
++ cdata = (void __user *)(unsigned long)user_chunk.chunk_data;
++ p->chunks[i].user_ptr = cdata;
++
++ p->chunks[i].kdata = drm_malloc_ab(size, sizeof(uint32_t));
++ if (p->chunks[i].kdata == NULL) {
++ r = -ENOMEM;
++ goto out;
++ }
++ size *= sizeof(uint32_t);
++ if (copy_from_user(p->chunks[i].kdata, cdata, size)) {
++ r = -EFAULT;
++ goto out;
++ }
++
++ if (p->chunks[i].chunk_id == AMDGPU_CHUNK_ID_FENCE) {
++ size = sizeof(struct drm_amdgpu_cs_chunk_fence);
++ if (p->chunks[i].length_dw * sizeof(uint32_t) >= size) {
++ uint32_t handle;
++ struct drm_gem_object *gobj;
++ struct drm_amdgpu_cs_chunk_fence *fence_data;
++
++ fence_data = (void *)p->chunks[i].kdata;
++ handle = fence_data->handle;
++ gobj = drm_gem_object_lookup(p->adev->ddev,
++ p->filp, handle);
++ if (gobj == NULL) {
++ r = -EINVAL;
++ goto out;
++ }
++
++ p->uf.bo = gem_to_amdgpu_bo(gobj);
++ p->uf.offset = fence_data->offset;
++ } else {
++ r = -EINVAL;
++ goto out;
++ }
++ }
++ }
++
++ p->ibs = kcalloc(p->num_ibs, sizeof(struct amdgpu_ib), GFP_KERNEL);
++ if (!p->ibs) {
++ r = -ENOMEM;
++ goto out;
++ }
++
++ p->ib_bos = kcalloc(p->num_ibs, sizeof(struct amdgpu_bo_list_entry),
++ GFP_KERNEL);
++ if (!p->ib_bos)
++ r = -ENOMEM;
++
++out:
++ kfree(chunk_array);
++ return r;
++}
++
++/* Returns how many bytes TTM can move per IB.
++ */
++static u64 amdgpu_cs_get_threshold_for_moves(struct amdgpu_device *adev)
++{
++ u64 real_vram_size = adev->mc.real_vram_size;
++ u64 vram_usage = atomic64_read(&adev->vram_usage);
++
++ /* This function is based on the current VRAM usage.
++ *
++ * - If all of VRAM is free, allow relocating the number of bytes that
++ * is equal to 1/4 of the size of VRAM for this IB.
++
++ * - If more than one half of VRAM is occupied, only allow relocating
++ * 1 MB of data for this IB.
++ *
++ * - From 0 to one half of used VRAM, the threshold decreases
++ * linearly.
++ * __________________
++ * 1/4 of -|\ |
++ * VRAM | \ |
++ * | \ |
++ * | \ |
++ * | \ |
++ * | \ |
++ * | \ |
++ * | \________|1 MB
++ * |----------------|
++ * VRAM 0 % 100 %
++ * used used
++ *
++ * Note: It's a threshold, not a limit. The threshold must be crossed
++ * for buffer relocations to stop, so any buffer of an arbitrary size
++ * can be moved as long as the threshold isn't crossed before
++ * the relocation takes place. We don't want to disable buffer
++ * relocations completely.
++ *
++ * The idea is that buffers should be placed in VRAM at creation time
++ * and TTM should only do a minimum number of relocations during
++ * command submission. In practice, you need to submit at least
++ * a dozen IBs to move all buffers to VRAM if they are in GTT.
++ *
++ * Also, things can get pretty crazy under memory pressure and actual
++ * VRAM usage can change a lot, so playing safe even at 50% does
++ * consistently increase performance.
++ */
++
++ u64 half_vram = real_vram_size >> 1;
++ u64 half_free_vram = vram_usage >= half_vram ? 0 : half_vram - vram_usage;
++ u64 bytes_moved_threshold = half_free_vram >> 1;
++ return max(bytes_moved_threshold, 1024*1024ull);
++}
++
++int amdgpu_cs_list_validate(struct amdgpu_cs_parser *p)
++{
++ struct amdgpu_fpriv *fpriv = p->filp->driver_priv;
++ struct amdgpu_vm *vm = &fpriv->vm;
++ struct amdgpu_device *adev = p->adev;
++ struct amdgpu_bo_list_entry *lobj;
++ struct list_head duplicates;
++ struct amdgpu_bo *bo;
++ u64 bytes_moved = 0, initial_bytes_moved;
++ u64 bytes_moved_threshold = amdgpu_cs_get_threshold_for_moves(adev);
++ int r;
++
++ INIT_LIST_HEAD(&duplicates);
++ r = ttm_eu_reserve_buffers(&p->ticket, &p->validated, true, &duplicates);
++ if (unlikely(r != 0)) {
++ return r;
++ }
++
++ list_for_each_entry(lobj, &p->validated, tv.head) {
++ bo = lobj->robj;
++ if (!bo->pin_count) {
++ u32 domain = lobj->prefered_domains;
++ u32 current_domain =
++ amdgpu_mem_type_to_domain(bo->tbo.mem.mem_type);
++
++ /* Check if this buffer will be moved and don't move it
++ * if we have moved too many buffers for this IB already.
++ *
++ * Note that this allows moving at least one buffer of
++ * any size, because it doesn't take the current "bo"
++ * into account. We don't want to disallow buffer moves
++ * completely.
++ */
++ if (current_domain != AMDGPU_GEM_DOMAIN_CPU &&
++ (domain & current_domain) == 0 && /* will be moved */
++ bytes_moved > bytes_moved_threshold) {
++ /* don't move it */
++ domain = current_domain;
++ }
++
++ retry:
++ amdgpu_ttm_placement_from_domain(bo, domain);
++ initial_bytes_moved = atomic64_read(&adev->num_bytes_moved);
++ r = ttm_bo_validate(&bo->tbo, &bo->placement, true, false);
++ bytes_moved += atomic64_read(&adev->num_bytes_moved) -
++ initial_bytes_moved;
++
++ if (unlikely(r)) {
++ if (r != -ERESTARTSYS && domain != lobj->allowed_domains) {
++ domain = lobj->allowed_domains;
++ goto retry;
++ }
++ ttm_eu_backoff_reservation(&p->ticket, &p->validated);
++ return r;
++ }
++ }
++ lobj->bo_va = amdgpu_vm_bo_find(vm, bo);
++ }
++ return 0;
++}
++
++static int amdgpu_cs_parser_relocs(struct amdgpu_cs_parser *p)
++{
++ struct amdgpu_fpriv *fpriv = p->filp->driver_priv;
++ struct amdgpu_cs_buckets buckets;
++ bool need_mmap_lock;
++ int i, r;
++
++ if (p->bo_list == NULL)
++ return 0;
++
++ need_mmap_lock = p->bo_list->has_userptr;
++ amdgpu_cs_buckets_init(&buckets);
++ for (i = 0; i < p->bo_list->num_entries; i++)
++ amdgpu_cs_buckets_add(&buckets, &p->bo_list->array[i].tv.head,
++ p->bo_list->array[i].priority);
++
++ amdgpu_cs_buckets_get_list(&buckets, &p->validated);
++ p->vm_bos = amdgpu_vm_get_bos(p->adev, &fpriv->vm,
++ &p->validated);
++
++ for (i = 0; i < p->num_ibs; i++) {
++ if (!p->ib_bos[i].robj)
++ continue;
++
++ list_add(&p->ib_bos[i].tv.head, &p->validated);
++ }
++
++ if (need_mmap_lock)
++ down_read(&current->mm->mmap_sem);
++
++ r = amdgpu_cs_list_validate(p);
++
++ if (need_mmap_lock)
++ up_read(&current->mm->mmap_sem);
++
++ return r;
++}
++
++static int amdgpu_cs_sync_rings(struct amdgpu_cs_parser *p)
++{
++ struct amdgpu_bo_list_entry *e;
++ int r;
++
++ list_for_each_entry(e, &p->validated, tv.head) {
++ struct reservation_object *resv = e->robj->tbo.resv;
++ r = amdgpu_sync_resv(p->adev, &p->ibs[0].sync, resv, p->filp);
++
++ if (r)
++ return r;
++ }
++ return 0;
++}
++
++static int cmp_size_smaller_first(void *priv, struct list_head *a,
++ struct list_head *b)
++{
++ struct amdgpu_bo_list_entry *la = list_entry(a, struct amdgpu_bo_list_entry, tv.head);
++ struct amdgpu_bo_list_entry *lb = list_entry(b, struct amdgpu_bo_list_entry, tv.head);
++
++ /* Sort A before B if A is smaller. */
++ return (int)la->robj->tbo.num_pages - (int)lb->robj->tbo.num_pages;
++}
++
++/**
++ * cs_parser_fini() - clean parser states
++ * @parser: parser structure holding parsing context.
++ * @error: error number
++ *
++ * If error is set than unvalidate buffer, otherwise just free memory
++ * used by parsing context.
++ **/
++static void amdgpu_cs_parser_fini(struct amdgpu_cs_parser *parser, int error, bool backoff)
++{
++ unsigned i;
++
++ if (!error) {
++ /* Sort the buffer list from the smallest to largest buffer,
++ * which affects the order of buffers in the LRU list.
++ * This assures that the smallest buffers are added first
++ * to the LRU list, so they are likely to be later evicted
++ * first, instead of large buffers whose eviction is more
++ * expensive.
++ *
++ * This slightly lowers the number of bytes moved by TTM
++ * per frame under memory pressure.
++ */
++ list_sort(NULL, &parser->validated, cmp_size_smaller_first);
++
++ ttm_eu_fence_buffer_objects(&parser->ticket,
++ &parser->validated,
++ &parser->ibs[parser->num_ibs-1].fence->base);
++ } else if (backoff) {
++ ttm_eu_backoff_reservation(&parser->ticket,
++ &parser->validated);
++ }
++
++ if (parser->bo_list)
++ amdgpu_bo_list_put(parser->bo_list);
++ drm_free_large(parser->vm_bos);
++ for (i = 0; i < parser->nchunks; i++)
++ drm_free_large(parser->chunks[i].kdata);
++ kfree(parser->chunks);
++ for (i = 0; i < parser->num_ibs; i++) {
++ struct amdgpu_bo *bo = parser->ib_bos[i].robj;
++ amdgpu_ib_free(parser->adev, &parser->ibs[i]);
++
++ if (bo)
++ drm_gem_object_unreference_unlocked(&bo->gem_base);
++ }
++ kfree(parser->ibs);
++ kfree(parser->ib_bos);
++ if (parser->uf.bo)
++ drm_gem_object_unreference_unlocked(&parser->uf.bo->gem_base);
++}
++
++static int amdgpu_bo_vm_update_pte(struct amdgpu_cs_parser *p,
++ struct amdgpu_vm *vm)
++{
++ struct amdgpu_device *adev = p->adev;
++ struct amdgpu_bo_va *bo_va;
++ struct amdgpu_bo *bo;
++ int i, r;
++
++ r = amdgpu_vm_update_page_directory(adev, vm);
++ if (r)
++ return r;
++
++ r = amdgpu_vm_clear_freed(adev, vm);
++ if (r)
++ return r;
++
++ if (p->bo_list) {
++ for (i = 0; i < p->bo_list->num_entries; i++) {
++ /* ignore duplicates */
++ bo = p->bo_list->array[i].robj;
++ if (!bo)
++ continue;
++
++ bo_va = p->bo_list->array[i].bo_va;
++ if (bo_va == NULL)
++ continue;
++
++ r = amdgpu_vm_bo_update(adev, bo_va, &bo->tbo.mem);
++ if (r)
++ return r;
++
++ amdgpu_sync_fence(&p->ibs[0].sync, bo_va->last_pt_update);
++ }
++ }
++
++ for (i = 0; i < p->num_ibs; i++) {
++ bo = p->ib_bos[i].robj;
++ if (!bo)
++ continue;
++
++ bo_va = p->ib_bos[i].bo_va;
++ if (!bo_va)
++ continue;
++
++ r = amdgpu_vm_bo_update(adev, bo_va, &bo->tbo.mem);
++ if (r)
++ return r;
++
++ amdgpu_sync_fence(&p->ibs[0].sync, bo_va->last_pt_update);
++ }
++ return amdgpu_vm_clear_invalids(adev, vm);
++}
++
++static int amdgpu_cs_ib_vm_chunk(struct amdgpu_device *adev,
++ struct amdgpu_cs_parser *parser)
++{
++ struct amdgpu_fpriv *fpriv = parser->filp->driver_priv;
++ struct amdgpu_vm *vm = &fpriv->vm;
++ struct amdgpu_ring *ring;
++ int i, r;
++
++ if (parser->num_ibs == 0)
++ return 0;
++
++ /* Only for UVD/VCE VM emulation */
++ for (i = 0; i < parser->num_ibs; i++) {
++ ring = parser->ibs[i].ring;
++ if (ring->funcs->parse_cs) {
++ r = amdgpu_ring_parse_cs(ring, parser, i);
++ if (r)
++ return r;
++ }
++ }
++
++ mutex_lock(&vm->mutex);
++ r = amdgpu_bo_vm_update_pte(parser, vm);
++ if (r) {
++ goto out;
++ }
++ amdgpu_cs_sync_rings(parser);
++
++ r = amdgpu_ib_schedule(adev, parser->num_ibs, parser->ibs,
++ parser->filp);
++
++out:
++ mutex_unlock(&vm->mutex);
++ return r;
++}
++
++static int amdgpu_cs_handle_lockup(struct amdgpu_device *adev, int r)
++{
++ if (r == -EDEADLK) {
++ r = amdgpu_gpu_reset(adev);
++ if (!r)
++ r = -EAGAIN;
++ }
++ return r;
++}
++
++static int amdgpu_cs_ib_fill(struct amdgpu_device *adev,
++ struct amdgpu_cs_parser *parser)
++{
++ struct amdgpu_fpriv *fpriv = parser->filp->driver_priv;
++ struct amdgpu_vm *vm = &fpriv->vm;
++ int i, j;
++ int r;
++
++ for (i = 0, j = 0; i < parser->nchunks && j < parser->num_ibs; i++) {
++ struct amdgpu_cs_chunk *chunk;
++ struct amdgpu_ib *ib;
++ struct drm_amdgpu_cs_chunk_ib *chunk_ib;
++ struct amdgpu_bo_list_entry *ib_bo;
++ struct amdgpu_ring *ring;
++ struct drm_gem_object *gobj;
++ struct amdgpu_bo *aobj;
++ void *kptr;
++
++ chunk = &parser->chunks[i];
++ ib = &parser->ibs[j];
++ chunk_ib = (struct drm_amdgpu_cs_chunk_ib *)chunk->kdata;
++
++ if (chunk->chunk_id != AMDGPU_CHUNK_ID_IB)
++ continue;
++
++ gobj = drm_gem_object_lookup(adev->ddev, parser->filp, chunk_ib->handle);
++ if (gobj == NULL)
++ return -ENOENT;
++ aobj = gem_to_amdgpu_bo(gobj);
++
++ r = amdgpu_cs_get_ring(adev, chunk_ib->ip_type,
++ chunk_ib->ip_instance, chunk_ib->ring,
++ &ring);
++ if (r) {
++ drm_gem_object_unreference_unlocked(gobj);
++ return r;
++ }
++
++ if (ring->funcs->parse_cs) {
++ r = amdgpu_bo_reserve(aobj, false);
++ if (r) {
++ drm_gem_object_unreference_unlocked(gobj);
++ return r;
++ }
++
++ r = amdgpu_bo_kmap(aobj, &kptr);
++ if (r) {
++ amdgpu_bo_unreserve(aobj);
++ drm_gem_object_unreference_unlocked(gobj);
++ return r;
++ }
++
++ r = amdgpu_ib_get(ring, NULL, chunk_ib->ib_bytes, ib);
++ if (r) {
++ DRM_ERROR("Failed to get ib !\n");
++ amdgpu_bo_unreserve(aobj);
++ drm_gem_object_unreference_unlocked(gobj);
++ return r;
++ }
++
++ memcpy(ib->ptr, kptr, chunk_ib->ib_bytes);
++ amdgpu_bo_kunmap(aobj);
++ amdgpu_bo_unreserve(aobj);
++ } else {
++ r = amdgpu_ib_get(ring, vm, 0, ib);
++ if (r) {
++ DRM_ERROR("Failed to get ib !\n");
++ drm_gem_object_unreference_unlocked(gobj);
++ return r;
++ }
++
++ ib->gpu_addr = chunk_ib->va_start;
++ }
++ ib->length_dw = chunk_ib->ib_bytes / 4;
++
++ if (chunk_ib->flags & AMDGPU_IB_FLAG_CE)
++ ib->is_const_ib = true;
++ if (chunk_ib->flags & AMDGPU_IB_FLAG_GDS)
++ ib->gds_needed = true;
++ if (ib->ring->current_filp != parser->filp) {
++ ib->ring->need_ctx_switch = true;
++ ib->ring->current_filp = parser->filp;
++ }
++
++ ib_bo = &parser->ib_bos[j];
++ ib_bo->robj = aobj;
++ ib_bo->prefered_domains = aobj->initial_domain;
++ ib_bo->allowed_domains = aobj->initial_domain;
++ ib_bo->priority = 0;
++ ib_bo->tv.bo = &aobj->tbo;
++ ib_bo->tv.shared = true;
++ j++;
++ }
++
++ if (!parser->num_ibs)
++ return 0;
++
++ /* add GDS resources to first IB */
++ if (parser->bo_list) {
++ struct amdgpu_bo *gds = parser->bo_list->gds_obj;
++ struct amdgpu_bo *gws = parser->bo_list->gws_obj;
++ struct amdgpu_bo *oa = parser->bo_list->oa_obj;
++ struct amdgpu_ib *ib = &parser->ibs[0];
++
++ if (gds) {
++ ib->gds_base = amdgpu_bo_gpu_offset(gds);
++ ib->gds_size = amdgpu_bo_size(gds);
++ }
++ if (gws) {
++ ib->gws_base = amdgpu_bo_gpu_offset(gws);
++ ib->gws_size = amdgpu_bo_size(gws);
++ }
++ if (oa) {
++ ib->oa_base = amdgpu_bo_gpu_offset(oa);
++ ib->oa_size = amdgpu_bo_size(oa);
++ }
++ }
++
++ /* wrap the last IB with user fence */
++ if (parser->uf.bo) {
++ struct amdgpu_ib *ib = &parser->ibs[parser->num_ibs - 1];
++
++ /* UVD & VCE fw doesn't support user fences */
++ if (ib->ring->type == AMDGPU_RING_TYPE_UVD ||
++ ib->ring->type == AMDGPU_RING_TYPE_VCE)
++ return -EINVAL;
++
++ ib->user = &parser->uf;
++ }
++
++ return 0;
++}
++
++int amdgpu_cs_ioctl(struct drm_device *dev, void *data, struct drm_file *filp)
++{
++ struct amdgpu_device *adev = dev->dev_private;
++ union drm_amdgpu_cs *cs = data;
++ struct amdgpu_cs_parser parser;
++ int r, i;
++
++ down_read(&adev->exclusive_lock);
++ if (!adev->accel_working) {
++ up_read(&adev->exclusive_lock);
++ return -EBUSY;
++ }
++ /* initialize parser */
++ memset(&parser, 0, sizeof(struct amdgpu_cs_parser));
++ parser.filp = filp;
++ parser.adev = adev;
++ r = amdgpu_cs_parser_init(&parser, data);
++ if (r) {
++ DRM_ERROR("Failed to initialize parser !\n");
++ amdgpu_cs_parser_fini(&parser, r, false);
++ up_read(&adev->exclusive_lock);
++ r = amdgpu_cs_handle_lockup(adev, r);
++ return r;
++ }
++
++ r = amdgpu_cs_ib_fill(adev, &parser);
++ if (!r) {
++ r = amdgpu_cs_parser_relocs(&parser);
++ if (r && r != -ERESTARTSYS)
++ DRM_ERROR("Failed to parse relocation %d!\n", r);
++ }
++
++ if (r) {
++ amdgpu_cs_parser_fini(&parser, r, false);
++ up_read(&adev->exclusive_lock);
++ r = amdgpu_cs_handle_lockup(adev, r);
++ return r;
++ }
++
++ for (i = 0; i < parser.num_ibs; i++)
++ trace_amdgpu_cs(&parser, i);
++
++ r = amdgpu_cs_ib_vm_chunk(adev, &parser);
++ if (r) {
++ goto out;
++ }
++
++ cs->out.handle = parser.ibs[parser.num_ibs - 1].fence->seq;
++out:
++ amdgpu_cs_parser_fini(&parser, r, true);
++ up_read(&adev->exclusive_lock);
++ r = amdgpu_cs_handle_lockup(adev, r);
++ return r;
++}
++
++/**
++ * amdgpu_cs_wait_ioctl - wait for a command submission to finish
++ *
++ * @dev: drm device
++ * @data: data from userspace
++ * @filp: file private
++ *
++ * Wait for the command submission identified by handle to finish.
++ */
++int amdgpu_cs_wait_ioctl(struct drm_device *dev, void *data,
++ struct drm_file *filp)
++{
++ union drm_amdgpu_wait_cs *wait = data;
++ struct amdgpu_device *adev = dev->dev_private;
++ uint64_t seq[AMDGPU_MAX_RINGS] = {0};
++ struct amdgpu_ring *ring = NULL;
++ unsigned long timeout = amdgpu_gem_timeout(wait->in.timeout);
++ long r;
++
++ r = amdgpu_cs_get_ring(adev, wait->in.ip_type, wait->in.ip_instance,
++ wait->in.ring, &ring);
++ if (r)
++ return r;
++
++ seq[ring->idx] = wait->in.handle;
++
++ r = amdgpu_fence_wait_seq_timeout(adev, seq, true, timeout);
++ if (r < 0)
++ return r;
++
++ memset(wait, 0, sizeof(*wait));
++ wait->out.status = (r == 0);
++
++ return 0;
++}
++
++/**
++ * amdgpu_cs_find_bo_va - find bo_va for VM address
++ *
++ * @parser: command submission parser context
++ * @addr: VM address
++ * @bo: resulting BO of the mapping found
++ *
++ * Search the buffer objects in the command submission context for a certain
++ * virtual memory address. Returns allocation structure when found, NULL
++ * otherwise.
++ */
++struct amdgpu_bo_va_mapping *
++amdgpu_cs_find_mapping(struct amdgpu_cs_parser *parser,
++ uint64_t addr, struct amdgpu_bo **bo)
++{
++ struct amdgpu_bo_list_entry *reloc;
++ struct amdgpu_bo_va_mapping *mapping;
++
++ addr /= AMDGPU_GPU_PAGE_SIZE;
++
++ list_for_each_entry(reloc, &parser->validated, tv.head) {
++ if (!reloc->bo_va)
++ continue;
++
++ list_for_each_entry(mapping, &reloc->bo_va->mappings, list) {
++ if (mapping->it.start > addr ||
++ addr > mapping->it.last)
++ continue;
++
++ *bo = reloc->bo_va->bo;
++ return mapping;
++ }
++ }
++
++ return NULL;
++}
+diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_ctx.c b/drivers/gpu/drm/amd/amdgpu/amdgpu_ctx.c
+new file mode 100644
+index 0000000..235010a
+--- /dev/null
++++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_ctx.c
+@@ -0,0 +1,161 @@
++/*
++ * Copyright 2015 Advanced Micro Devices, Inc.
++ *
++ * Permission is hereby granted, free of charge, to any person obtaining a
++ * copy of this software and associated documentation files (the "Software"),
++ * to deal in the Software without restriction, including without limitation
++ * the rights to use, copy, modify, merge, publish, distribute, sublicense,
++ * and/or sell copies of the Software, and to permit persons to whom the
++ * Software is furnished to do so, subject to the following conditions:
++ *
++ * The above copyright notice and this permission notice shall be included in
++ * all copies or substantial portions of the Software.
++ *
++ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
++ * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
++ * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
++ * THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR
++ * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE,
++ * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
++ * OTHER DEALINGS IN THE SOFTWARE.
++ *
++ * Authors: monk liu <monk.liu@amd.com>
++ */
++
++#include <drm/drmP.h>
++#include "amdgpu.h"
++
++static void amdgpu_ctx_do_release(struct kref *ref)
++{
++ struct amdgpu_ctx *ctx;
++ struct amdgpu_ctx_mgr *mgr;
++
++ ctx = container_of(ref, struct amdgpu_ctx, refcount);
++ mgr = &ctx->fpriv->ctx_mgr;
++
++ mutex_lock(&mgr->hlock);
++ idr_remove(&mgr->ctx_handles, ctx->id);
++ mutex_unlock(&mgr->hlock);
++ kfree(ctx);
++}
++
++int amdgpu_ctx_alloc(struct amdgpu_device *adev, struct amdgpu_fpriv *fpriv, uint32_t *id, uint32_t flags)
++{
++ int r;
++ struct amdgpu_ctx *ctx;
++ struct amdgpu_ctx_mgr *mgr = &fpriv->ctx_mgr;
++
++ ctx = kmalloc(sizeof(*ctx), GFP_KERNEL);
++ if (!ctx)
++ return -ENOMEM;
++
++ mutex_lock(&mgr->hlock);
++ r = idr_alloc(&mgr->ctx_handles, ctx, 0, 0, GFP_KERNEL);
++ if (r < 0) {
++ mutex_unlock(&mgr->hlock);
++ kfree(ctx);
++ return r;
++ }
++ mutex_unlock(&mgr->hlock);
++ *id = (uint32_t)r;
++
++ memset(ctx, 0, sizeof(*ctx));
++ ctx->id = *id;
++ ctx->fpriv = fpriv;
++ kref_init(&ctx->refcount);
++
++ return 0;
++}
++
++int amdgpu_ctx_free(struct amdgpu_device *adev, struct amdgpu_fpriv *fpriv, uint32_t id)
++{
++ int r;
++ struct amdgpu_ctx *ctx;
++ struct amdgpu_ctx_mgr *mgr = &fpriv->ctx_mgr;
++
++ rcu_read_lock();
++ ctx = idr_find(&mgr->ctx_handles, id);
++ rcu_read_unlock();
++ if (ctx) {
++ /* if no task is pending on this context, free it */
++ r = kref_put(&ctx->refcount, amdgpu_ctx_do_release);
++ if (r == 1)
++ return 0;//context is removed successfully
++ else {
++ /* context is still in using */
++ kref_get(&ctx->refcount);
++ return -ERESTARTSYS;
++ }
++ }
++ return -EINVAL;
++}
++
++int amdgpu_ctx_query(struct amdgpu_device *adev, struct amdgpu_fpriv *fpriv, uint32_t id, struct amdgpu_ctx_state *state)
++{
++ struct amdgpu_ctx *ctx;
++ struct amdgpu_ctx_mgr *mgr = &fpriv->ctx_mgr;
++
++ rcu_read_lock();
++ ctx = idr_find(&mgr->ctx_handles, id);
++ rcu_read_unlock();
++ if (ctx) {
++ /* state should alter with CS activity */
++ *state = ctx->state;
++ return 0;
++ }
++ return -EINVAL;
++}
++
++void amdgpu_ctx_fini(struct amdgpu_fpriv *fpriv)
++{
++ struct idr *idp;
++ struct amdgpu_ctx *ctx;
++ uint32_t id;
++ struct amdgpu_ctx_mgr *mgr = &fpriv->ctx_mgr;
++ idp = &mgr->ctx_handles;
++
++ idr_for_each_entry(idp,ctx,id) {
++ if (kref_put(&ctx->refcount, amdgpu_ctx_do_release) != 1)
++ DRM_ERROR("ctx (id=%ul) is still alive\n",ctx->id);
++ }
++
++ mutex_destroy(&mgr->hlock);
++}
++
++int amdgpu_ctx_ioctl(struct drm_device *dev, void *data,
++ struct drm_file *filp)
++{
++ int r;
++ uint32_t id;
++ uint32_t flags;
++ struct amdgpu_ctx_state state;
++
++ union drm_amdgpu_ctx *args = data;
++ struct amdgpu_device *adev = dev->dev_private;
++ struct amdgpu_fpriv *fpriv = filp->driver_priv;
++
++ r = 0;
++ id = args->in.ctx_id;
++ flags = args->in.flags;
++
++ switch (args->in.op) {
++ case AMDGPU_CTX_OP_ALLOC_CTX:
++ r = amdgpu_ctx_alloc(adev, fpriv, &id, flags);
++ args->out.alloc.ctx_id = id;
++ break;
++ case AMDGPU_CTX_OP_FREE_CTX:
++ r = amdgpu_ctx_free(adev, fpriv, id);
++ break;
++ case AMDGPU_CTX_OP_QUERY_STATE:
++ r = amdgpu_ctx_query(adev, fpriv, id, &state);
++ if (r == 0) {
++ args->out.state.flags = state.flags;
++ args->out.state.hangs = state.hangs;
++ }
++ break;
++ default:
++ return -EINVAL;
++ }
++
++ return r;
++}
+diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_device.c b/drivers/gpu/drm/amd/amdgpu/amdgpu_device.c
+new file mode 100644
+index 0000000..cd4bb90
+--- /dev/null
++++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_device.c
+@@ -0,0 +1,1971 @@
++/*
++ * Copyright 2008 Advanced Micro Devices, Inc.
++ * Copyright 2008 Red Hat Inc.
++ * Copyright 2009 Jerome Glisse.
++ *
++ * Permission is hereby granted, free of charge, to any person obtaining a
++ * copy of this software and associated documentation files (the "Software"),
++ * to deal in the Software without restriction, including without limitation
++ * the rights to use, copy, modify, merge, publish, distribute, sublicense,
++ * and/or sell copies of the Software, and to permit persons to whom the
++ * Software is furnished to do so, subject to the following conditions:
++ *
++ * The above copyright notice and this permission notice shall be included in
++ * all copies or substantial portions of the Software.
++ *
++ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
++ * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
++ * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
++ * THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR
++ * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE,
++ * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
++ * OTHER DEALINGS IN THE SOFTWARE.
++ *
++ * Authors: Dave Airlie
++ * Alex Deucher
++ * Jerome Glisse
++ */
++#include <linux/console.h>
++#include <linux/slab.h>
++#include <linux/debugfs.h>
++#include <drm/drmP.h>
++#include <drm/drm_crtc_helper.h>
++#include <drm/amdgpu_drm.h>
++#include <linux/vgaarb.h>
++#include <linux/vga_switcheroo.h>
++#include <linux/efi.h>
++#include "amdgpu.h"
++#include "amdgpu_i2c.h"
++#include "atom.h"
++#include "amdgpu_atombios.h"
++#include "bif/bif_4_1_d.h"
++
++static int amdgpu_debugfs_regs_init(struct amdgpu_device *adev);
++static void amdgpu_debugfs_regs_cleanup(struct amdgpu_device *adev);
++
++static const char *amdgpu_asic_name[] = {
++ "BONAIRE",
++ "KAVERI",
++ "KABINI",
++ "HAWAII",
++ "MULLINS",
++ "TOPAZ",
++ "TONGA",
++ "CARRIZO",
++ "LAST",
++};
++
++bool amdgpu_device_is_px(struct drm_device *dev)
++{
++ struct amdgpu_device *adev = dev->dev_private;
++
++ if (adev->flags & AMDGPU_IS_PX)
++ return true;
++ return false;
++}
++
++/*
++ * MMIO register access helper functions.
++ */
++uint32_t amdgpu_mm_rreg(struct amdgpu_device *adev, uint32_t reg,
++ bool always_indirect)
++{
++ if ((reg * 4) < adev->rmmio_size && !always_indirect)
++ return readl(((void __iomem *)adev->rmmio) + (reg * 4));
++ else {
++ unsigned long flags;
++ uint32_t ret;
++
++ spin_lock_irqsave(&adev->mmio_idx_lock, flags);
++ writel((reg * 4), ((void __iomem *)adev->rmmio) + (mmMM_INDEX * 4));
++ ret = readl(((void __iomem *)adev->rmmio) + (mmMM_DATA * 4));
++ spin_unlock_irqrestore(&adev->mmio_idx_lock, flags);
++
++ return ret;
++ }
++}
++
++void amdgpu_mm_wreg(struct amdgpu_device *adev, uint32_t reg, uint32_t v,
++ bool always_indirect)
++{
++ if ((reg * 4) < adev->rmmio_size && !always_indirect)
++ writel(v, ((void __iomem *)adev->rmmio) + (reg * 4));
++ else {
++ unsigned long flags;
++
++ spin_lock_irqsave(&adev->mmio_idx_lock, flags);
++ writel((reg * 4), ((void __iomem *)adev->rmmio) + (mmMM_INDEX * 4));
++ writel(v, ((void __iomem *)adev->rmmio) + (mmMM_DATA * 4));
++ spin_unlock_irqrestore(&adev->mmio_idx_lock, flags);
++ }
++}
++
++u32 amdgpu_io_rreg(struct amdgpu_device *adev, u32 reg)
++{
++ if ((reg * 4) < adev->rio_mem_size)
++ return ioread32(adev->rio_mem + (reg * 4));
++ else {
++ iowrite32((reg * 4), adev->rio_mem + (mmMM_INDEX * 4));
++ return ioread32(adev->rio_mem + (mmMM_DATA * 4));
++ }
++}
++
++void amdgpu_io_wreg(struct amdgpu_device *adev, u32 reg, u32 v)
++{
++
++ if ((reg * 4) < adev->rio_mem_size)
++ iowrite32(v, adev->rio_mem + (reg * 4));
++ else {
++ iowrite32((reg * 4), adev->rio_mem + (mmMM_INDEX * 4));
++ iowrite32(v, adev->rio_mem + (mmMM_DATA * 4));
++ }
++}
++
++/**
++ * amdgpu_mm_rdoorbell - read a doorbell dword
++ *
++ * @adev: amdgpu_device pointer
++ * @index: doorbell index
++ *
++ * Returns the value in the doorbell aperture at the
++ * requested doorbell index (CIK).
++ */
++u32 amdgpu_mm_rdoorbell(struct amdgpu_device *adev, u32 index)
++{
++ if (index < adev->doorbell.num_doorbells) {
++ return readl(adev->doorbell.ptr + index);
++ } else {
++ DRM_ERROR("reading beyond doorbell aperture: 0x%08x!\n", index);
++ return 0;
++ }
++}
++
++/**
++ * amdgpu_mm_wdoorbell - write a doorbell dword
++ *
++ * @adev: amdgpu_device pointer
++ * @index: doorbell index
++ * @v: value to write
++ *
++ * Writes @v to the doorbell aperture at the
++ * requested doorbell index (CIK).
++ */
++void amdgpu_mm_wdoorbell(struct amdgpu_device *adev, u32 index, u32 v)
++{
++ if (index < adev->doorbell.num_doorbells) {
++ writel(v, adev->doorbell.ptr + index);
++ } else {
++ DRM_ERROR("writing beyond doorbell aperture: 0x%08x!\n", index);
++ }
++}
++
++/**
++ * amdgpu_invalid_rreg - dummy reg read function
++ *
++ * @adev: amdgpu device pointer
++ * @reg: offset of register
++ *
++ * Dummy register read function. Used for register blocks
++ * that certain asics don't have (all asics).
++ * Returns the value in the register.
++ */
++static uint32_t amdgpu_invalid_rreg(struct amdgpu_device *adev, uint32_t reg)
++{
++ DRM_ERROR("Invalid callback to read register 0x%04X\n", reg);
++ BUG();
++ return 0;
++}
++
++/**
++ * amdgpu_invalid_wreg - dummy reg write function
++ *
++ * @adev: amdgpu device pointer
++ * @reg: offset of register
++ * @v: value to write to the register
++ *
++ * Dummy register read function. Used for register blocks
++ * that certain asics don't have (all asics).
++ */
++static void amdgpu_invalid_wreg(struct amdgpu_device *adev, uint32_t reg, uint32_t v)
++{
++ DRM_ERROR("Invalid callback to write register 0x%04X with 0x%08X\n",
++ reg, v);
++ BUG();
++}
++
++/**
++ * amdgpu_block_invalid_rreg - dummy reg read function
++ *
++ * @adev: amdgpu device pointer
++ * @block: offset of instance
++ * @reg: offset of register
++ *
++ * Dummy register read function. Used for register blocks
++ * that certain asics don't have (all asics).
++ * Returns the value in the register.
++ */
++static uint32_t amdgpu_block_invalid_rreg(struct amdgpu_device *adev,
++ uint32_t block, uint32_t reg)
++{
++ DRM_ERROR("Invalid callback to read register 0x%04X in block 0x%04X\n",
++ reg, block);
++ BUG();
++ return 0;
++}
++
++/**
++ * amdgpu_block_invalid_wreg - dummy reg write function
++ *
++ * @adev: amdgpu device pointer
++ * @block: offset of instance
++ * @reg: offset of register
++ * @v: value to write to the register
++ *
++ * Dummy register read function. Used for register blocks
++ * that certain asics don't have (all asics).
++ */
++static void amdgpu_block_invalid_wreg(struct amdgpu_device *adev,
++ uint32_t block,
++ uint32_t reg, uint32_t v)
++{
++ DRM_ERROR("Invalid block callback to write register 0x%04X in block 0x%04X with 0x%08X\n",
++ reg, block, v);
++ BUG();
++}
++
++static int amdgpu_vram_scratch_init(struct amdgpu_device *adev)
++{
++ int r;
++
++ if (adev->vram_scratch.robj == NULL) {
++ r = amdgpu_bo_create(adev, AMDGPU_GPU_PAGE_SIZE,
++ PAGE_SIZE, true, AMDGPU_GEM_DOMAIN_VRAM, 0,
++ NULL, &adev->vram_scratch.robj);
++ if (r) {
++ return r;
++ }
++ }
++
++ r = amdgpu_bo_reserve(adev->vram_scratch.robj, false);
++ if (unlikely(r != 0))
++ return r;
++ r = amdgpu_bo_pin(adev->vram_scratch.robj,
++ AMDGPU_GEM_DOMAIN_VRAM, &adev->vram_scratch.gpu_addr);
++ if (r) {
++ amdgpu_bo_unreserve(adev->vram_scratch.robj);
++ return r;
++ }
++ r = amdgpu_bo_kmap(adev->vram_scratch.robj,
++ (void **)&adev->vram_scratch.ptr);
++ if (r)
++ amdgpu_bo_unpin(adev->vram_scratch.robj);
++ amdgpu_bo_unreserve(adev->vram_scratch.robj);
++
++ return r;
++}
++
++static void amdgpu_vram_scratch_fini(struct amdgpu_device *adev)
++{
++ int r;
++
++ if (adev->vram_scratch.robj == NULL) {
++ return;
++ }
++ r = amdgpu_bo_reserve(adev->vram_scratch.robj, false);
++ if (likely(r == 0)) {
++ amdgpu_bo_kunmap(adev->vram_scratch.robj);
++ amdgpu_bo_unpin(adev->vram_scratch.robj);
++ amdgpu_bo_unreserve(adev->vram_scratch.robj);
++ }
++ amdgpu_bo_unref(&adev->vram_scratch.robj);
++}
++
++/**
++ * amdgpu_program_register_sequence - program an array of registers.
++ *
++ * @adev: amdgpu_device pointer
++ * @registers: pointer to the register array
++ * @array_size: size of the register array
++ *
++ * Programs an array or registers with and and or masks.
++ * This is a helper for setting golden registers.
++ */
++void amdgpu_program_register_sequence(struct amdgpu_device *adev,
++ const u32 *registers,
++ const u32 array_size)
++{
++ u32 tmp, reg, and_mask, or_mask;
++ int i;
++
++ if (array_size % 3)
++ return;
++
++ for (i = 0; i < array_size; i +=3) {
++ reg = registers[i + 0];
++ and_mask = registers[i + 1];
++ or_mask = registers[i + 2];
++
++ if (and_mask == 0xffffffff) {
++ tmp = or_mask;
++ } else {
++ tmp = RREG32(reg);
++ tmp &= ~and_mask;
++ tmp |= or_mask;
++ }
++ WREG32(reg, tmp);
++ }
++}
++
++void amdgpu_pci_config_reset(struct amdgpu_device *adev)
++{
++ pci_write_config_dword(adev->pdev, 0x7c, AMDGPU_ASIC_RESET_DATA);
++}
++
++/*
++ * GPU doorbell aperture helpers function.
++ */
++/**
++ * amdgpu_doorbell_init - Init doorbell driver information.
++ *
++ * @adev: amdgpu_device pointer
++ *
++ * Init doorbell driver information (CIK)
++ * Returns 0 on success, error on failure.
++ */
++static int amdgpu_doorbell_init(struct amdgpu_device *adev)
++{
++ /* doorbell bar mapping */
++ adev->doorbell.base = pci_resource_start(adev->pdev, 2);
++ adev->doorbell.size = pci_resource_len(adev->pdev, 2);
++
++ adev->doorbell.num_doorbells = min_t(u32, adev->doorbell.size / sizeof(u32),
++ AMDGPU_DOORBELL_MAX_ASSIGNMENT+1);
++ if (adev->doorbell.num_doorbells == 0)
++ return -EINVAL;
++
++ adev->doorbell.ptr = ioremap(adev->doorbell.base, adev->doorbell.num_doorbells * sizeof(u32));
++ if (adev->doorbell.ptr == NULL) {
++ return -ENOMEM;
++ }
++ DRM_INFO("doorbell mmio base: 0x%08X\n", (uint32_t)adev->doorbell.base);
++ DRM_INFO("doorbell mmio size: %u\n", (unsigned)adev->doorbell.size);
++
++ return 0;
++}
++
++/**
++ * amdgpu_doorbell_fini - Tear down doorbell driver information.
++ *
++ * @adev: amdgpu_device pointer
++ *
++ * Tear down doorbell driver information (CIK)
++ */
++static void amdgpu_doorbell_fini(struct amdgpu_device *adev)
++{
++ iounmap(adev->doorbell.ptr);
++ adev->doorbell.ptr = NULL;
++}
++
++/**
++ * amdgpu_doorbell_get_kfd_info - Report doorbell configuration required to
++ * setup amdkfd
++ *
++ * @adev: amdgpu_device pointer
++ * @aperture_base: output returning doorbell aperture base physical address
++ * @aperture_size: output returning doorbell aperture size in bytes
++ * @start_offset: output returning # of doorbell bytes reserved for amdgpu.
++ *
++ * amdgpu and amdkfd share the doorbell aperture. amdgpu sets it up,
++ * takes doorbells required for its own rings and reports the setup to amdkfd.
++ * amdgpu reserved doorbells are at the start of the doorbell aperture.
++ */
++void amdgpu_doorbell_get_kfd_info(struct amdgpu_device *adev,
++ phys_addr_t *aperture_base,
++ size_t *aperture_size,
++ size_t *start_offset)
++{
++ /*
++ * The first num_doorbells are used by amdgpu.
++ * amdkfd takes whatever's left in the aperture.
++ */
++ if (adev->doorbell.size > adev->doorbell.num_doorbells * sizeof(u32)) {
++ *aperture_base = adev->doorbell.base;
++ *aperture_size = adev->doorbell.size;
++ *start_offset = adev->doorbell.num_doorbells * sizeof(u32);
++ } else {
++ *aperture_base = 0;
++ *aperture_size = 0;
++ *start_offset = 0;
++ }
++}
++
++/*
++ * amdgpu_wb_*()
++ * Writeback is the the method by which the the GPU updates special pages
++ * in memory with the status of certain GPU events (fences, ring pointers,
++ * etc.).
++ */
++
++/**
++ * amdgpu_wb_fini - Disable Writeback and free memory
++ *
++ * @adev: amdgpu_device pointer
++ *
++ * Disables Writeback and frees the Writeback memory (all asics).
++ * Used at driver shutdown.
++ */
++static void amdgpu_wb_fini(struct amdgpu_device *adev)
++{
++ if (adev->wb.wb_obj) {
++ if (!amdgpu_bo_reserve(adev->wb.wb_obj, false)) {
++ amdgpu_bo_kunmap(adev->wb.wb_obj);
++ amdgpu_bo_unpin(adev->wb.wb_obj);
++ amdgpu_bo_unreserve(adev->wb.wb_obj);
++ }
++ amdgpu_bo_unref(&adev->wb.wb_obj);
++ adev->wb.wb = NULL;
++ adev->wb.wb_obj = NULL;
++ }
++}
++
++/**
++ * amdgpu_wb_init- Init Writeback driver info and allocate memory
++ *
++ * @adev: amdgpu_device pointer
++ *
++ * Disables Writeback and frees the Writeback memory (all asics).
++ * Used at driver startup.
++ * Returns 0 on success or an -error on failure.
++ */
++static int amdgpu_wb_init(struct amdgpu_device *adev)
++{
++ int r;
++
++ if (adev->wb.wb_obj == NULL) {
++ r = amdgpu_bo_create(adev, AMDGPU_MAX_WB * 4, PAGE_SIZE, true,
++ AMDGPU_GEM_DOMAIN_GTT, 0, NULL, &adev->wb.wb_obj);
++ if (r) {
++ dev_warn(adev->dev, "(%d) create WB bo failed\n", r);
++ return r;
++ }
++ r = amdgpu_bo_reserve(adev->wb.wb_obj, false);
++ if (unlikely(r != 0)) {
++ amdgpu_wb_fini(adev);
++ return r;
++ }
++ r = amdgpu_bo_pin(adev->wb.wb_obj, AMDGPU_GEM_DOMAIN_GTT,
++ &adev->wb.gpu_addr);
++ if (r) {
++ amdgpu_bo_unreserve(adev->wb.wb_obj);
++ dev_warn(adev->dev, "(%d) pin WB bo failed\n", r);
++ amdgpu_wb_fini(adev);
++ return r;
++ }
++ r = amdgpu_bo_kmap(adev->wb.wb_obj, (void **)&adev->wb.wb);
++ amdgpu_bo_unreserve(adev->wb.wb_obj);
++ if (r) {
++ dev_warn(adev->dev, "(%d) map WB bo failed\n", r);
++ amdgpu_wb_fini(adev);
++ return r;
++ }
++
++ adev->wb.num_wb = AMDGPU_MAX_WB;
++ memset(&adev->wb.used, 0, sizeof(adev->wb.used));
++
++ /* clear wb memory */
++ memset((char *)adev->wb.wb, 0, AMDGPU_GPU_PAGE_SIZE);
++ }
++
++ return 0;
++}
++
++/**
++ * amdgpu_wb_get - Allocate a wb entry
++ *
++ * @adev: amdgpu_device pointer
++ * @wb: wb index
++ *
++ * Allocate a wb slot for use by the driver (all asics).
++ * Returns 0 on success or -EINVAL on failure.
++ */
++int amdgpu_wb_get(struct amdgpu_device *adev, u32 *wb)
++{
++ unsigned long offset = find_first_zero_bit(adev->wb.used, adev->wb.num_wb);
++ if (offset < adev->wb.num_wb) {
++ __set_bit(offset, adev->wb.used);
++ *wb = offset;
++ return 0;
++ } else {
++ return -EINVAL;
++ }
++}
++
++/**
++ * amdgpu_wb_free - Free a wb entry
++ *
++ * @adev: amdgpu_device pointer
++ * @wb: wb index
++ *
++ * Free a wb slot allocated for use by the driver (all asics)
++ */
++void amdgpu_wb_free(struct amdgpu_device *adev, u32 wb)
++{
++ if (wb < adev->wb.num_wb)
++ __clear_bit(wb, adev->wb.used);
++}
++
++/**
++ * amdgpu_vram_location - try to find VRAM location
++ * @adev: amdgpu device structure holding all necessary informations
++ * @mc: memory controller structure holding memory informations
++ * @base: base address at which to put VRAM
++ *
++ * Function will place try to place VRAM at base address provided
++ * as parameter (which is so far either PCI aperture address or
++ * for IGP TOM base address).
++ *
++ * If there is not enough space to fit the unvisible VRAM in the 32bits
++ * address space then we limit the VRAM size to the aperture.
++ *
++ * Note: We don't explicitly enforce VRAM start to be aligned on VRAM size,
++ * this shouldn't be a problem as we are using the PCI aperture as a reference.
++ * Otherwise this would be needed for rv280, all r3xx, and all r4xx, but
++ * not IGP.
++ *
++ * Note: we use mc_vram_size as on some board we need to program the mc to
++ * cover the whole aperture even if VRAM size is inferior to aperture size
++ * Novell bug 204882 + along with lots of ubuntu ones
++ *
++ * Note: when limiting vram it's safe to overwritte real_vram_size because
++ * we are not in case where real_vram_size is inferior to mc_vram_size (ie
++ * note afected by bogus hw of Novell bug 204882 + along with lots of ubuntu
++ * ones)
++ *
++ * Note: IGP TOM addr should be the same as the aperture addr, we don't
++ * explicitly check for that thought.
++ *
++ * FIXME: when reducing VRAM size align new size on power of 2.
++ */
++void amdgpu_vram_location(struct amdgpu_device *adev, struct amdgpu_mc *mc, u64 base)
++{
++ uint64_t limit = (uint64_t)amdgpu_vram_limit << 20;
++
++ mc->vram_start = base;
++ if (mc->mc_vram_size > (adev->mc.mc_mask - base + 1)) {
++ dev_warn(adev->dev, "limiting VRAM to PCI aperture size\n");
++ mc->real_vram_size = mc->aper_size;
++ mc->mc_vram_size = mc->aper_size;
++ }
++ mc->vram_end = mc->vram_start + mc->mc_vram_size - 1;
++ if (limit && limit < mc->real_vram_size)
++ mc->real_vram_size = limit;
++ dev_info(adev->dev, "VRAM: %lluM 0x%016llX - 0x%016llX (%lluM used)\n",
++ mc->mc_vram_size >> 20, mc->vram_start,
++ mc->vram_end, mc->real_vram_size >> 20);
++}
++
++/**
++ * amdgpu_gtt_location - try to find GTT location
++ * @adev: amdgpu device structure holding all necessary informations
++ * @mc: memory controller structure holding memory informations
++ *
++ * Function will place try to place GTT before or after VRAM.
++ *
++ * If GTT size is bigger than space left then we ajust GTT size.
++ * Thus function will never fails.
++ *
++ * FIXME: when reducing GTT size align new size on power of 2.
++ */
++void amdgpu_gtt_location(struct amdgpu_device *adev, struct amdgpu_mc *mc)
++{
++ u64 size_af, size_bf;
++
++ size_af = ((adev->mc.mc_mask - mc->vram_end) + mc->gtt_base_align) & ~mc->gtt_base_align;
++ size_bf = mc->vram_start & ~mc->gtt_base_align;
++ if (size_bf > size_af) {
++ if (mc->gtt_size > size_bf) {
++ dev_warn(adev->dev, "limiting GTT\n");
++ mc->gtt_size = size_bf;
++ }
++ mc->gtt_start = (mc->vram_start & ~mc->gtt_base_align) - mc->gtt_size;
++ } else {
++ if (mc->gtt_size > size_af) {
++ dev_warn(adev->dev, "limiting GTT\n");
++ mc->gtt_size = size_af;
++ }
++ mc->gtt_start = (mc->vram_end + 1 + mc->gtt_base_align) & ~mc->gtt_base_align;
++ }
++ mc->gtt_end = mc->gtt_start + mc->gtt_size - 1;
++ dev_info(adev->dev, "GTT: %lluM 0x%016llX - 0x%016llX\n",
++ mc->gtt_size >> 20, mc->gtt_start, mc->gtt_end);
++}
++
++/*
++ * GPU helpers function.
++ */
++/**
++ * amdgpu_card_posted - check if the hw has already been initialized
++ *
++ * @adev: amdgpu_device pointer
++ *
++ * Check if the asic has been initialized (all asics).
++ * Used at driver startup.
++ * Returns true if initialized or false if not.
++ */
++bool amdgpu_card_posted(struct amdgpu_device *adev)
++{
++ uint32_t reg;
++
++ /* then check MEM_SIZE, in case the crtcs are off */
++ reg = RREG32(mmCONFIG_MEMSIZE);
++
++ if (reg)
++ return true;
++
++ return false;
++
++}
++
++/**
++ * amdgpu_boot_test_post_card - check and possibly initialize the hw
++ *
++ * @adev: amdgpu_device pointer
++ *
++ * Check if the asic is initialized and if not, attempt to initialize
++ * it (all asics).
++ * Returns true if initialized or false if not.
++ */
++bool amdgpu_boot_test_post_card(struct amdgpu_device *adev)
++{
++ if (amdgpu_card_posted(adev))
++ return true;
++
++ if (adev->bios) {
++ DRM_INFO("GPU not posted. posting now...\n");
++ if (adev->is_atom_bios)
++ amdgpu_atom_asic_init(adev->mode_info.atom_context);
++ return true;
++ } else {
++ dev_err(adev->dev, "Card not posted and no BIOS - ignoring\n");
++ return false;
++ }
++}
++
++/**
++ * amdgpu_dummy_page_init - init dummy page used by the driver
++ *
++ * @adev: amdgpu_device pointer
++ *
++ * Allocate the dummy page used by the driver (all asics).
++ * This dummy page is used by the driver as a filler for gart entries
++ * when pages are taken out of the GART
++ * Returns 0 on sucess, -ENOMEM on failure.
++ */
++int amdgpu_dummy_page_init(struct amdgpu_device *adev)
++{
++ if (adev->dummy_page.page)
++ return 0;
++ adev->dummy_page.page = alloc_page(GFP_DMA32 | GFP_KERNEL | __GFP_ZERO);
++ if (adev->dummy_page.page == NULL)
++ return -ENOMEM;
++ adev->dummy_page.addr = pci_map_page(adev->pdev, adev->dummy_page.page,
++ 0, PAGE_SIZE, PCI_DMA_BIDIRECTIONAL);
++ if (pci_dma_mapping_error(adev->pdev, adev->dummy_page.addr)) {
++ dev_err(&adev->pdev->dev, "Failed to DMA MAP the dummy page\n");
++ __free_page(adev->dummy_page.page);
++ adev->dummy_page.page = NULL;
++ return -ENOMEM;
++ }
++ return 0;
++}
++
++/**
++ * amdgpu_dummy_page_fini - free dummy page used by the driver
++ *
++ * @adev: amdgpu_device pointer
++ *
++ * Frees the dummy page used by the driver (all asics).
++ */
++void amdgpu_dummy_page_fini(struct amdgpu_device *adev)
++{
++ if (adev->dummy_page.page == NULL)
++ return;
++ pci_unmap_page(adev->pdev, adev->dummy_page.addr,
++ PAGE_SIZE, PCI_DMA_BIDIRECTIONAL);
++ __free_page(adev->dummy_page.page);
++ adev->dummy_page.page = NULL;
++}
++
++
++/* ATOM accessor methods */
++/*
++ * ATOM is an interpreted byte code stored in tables in the vbios. The
++ * driver registers callbacks to access registers and the interpreter
++ * in the driver parses the tables and executes then to program specific
++ * actions (set display modes, asic init, etc.). See amdgpu_atombios.c,
++ * atombios.h, and atom.c
++ */
++
++/**
++ * cail_pll_read - read PLL register
++ *
++ * @info: atom card_info pointer
++ * @reg: PLL register offset
++ *
++ * Provides a PLL register accessor for the atom interpreter (r4xx+).
++ * Returns the value of the PLL register.
++ */
++static uint32_t cail_pll_read(struct card_info *info, uint32_t reg)
++{
++ return 0;
++}
++
++/**
++ * cail_pll_write - write PLL register
++ *
++ * @info: atom card_info pointer
++ * @reg: PLL register offset
++ * @val: value to write to the pll register
++ *
++ * Provides a PLL register accessor for the atom interpreter (r4xx+).
++ */
++static void cail_pll_write(struct card_info *info, uint32_t reg, uint32_t val)
++{
++
++}
++
++/**
++ * cail_mc_read - read MC (Memory Controller) register
++ *
++ * @info: atom card_info pointer
++ * @reg: MC register offset
++ *
++ * Provides an MC register accessor for the atom interpreter (r4xx+).
++ * Returns the value of the MC register.
++ */
++static uint32_t cail_mc_read(struct card_info *info, uint32_t reg)
++{
++ return 0;
++}
++
++/**
++ * cail_mc_write - write MC (Memory Controller) register
++ *
++ * @info: atom card_info pointer
++ * @reg: MC register offset
++ * @val: value to write to the pll register
++ *
++ * Provides a MC register accessor for the atom interpreter (r4xx+).
++ */
++static void cail_mc_write(struct card_info *info, uint32_t reg, uint32_t val)
++{
++
++}
++
++/**
++ * cail_reg_write - write MMIO register
++ *
++ * @info: atom card_info pointer
++ * @reg: MMIO register offset
++ * @val: value to write to the pll register
++ *
++ * Provides a MMIO register accessor for the atom interpreter (r4xx+).
++ */
++static void cail_reg_write(struct card_info *info, uint32_t reg, uint32_t val)
++{
++ struct amdgpu_device *adev = info->dev->dev_private;
++
++ WREG32(reg, val);
++}
++
++/**
++ * cail_reg_read - read MMIO register
++ *
++ * @info: atom card_info pointer
++ * @reg: MMIO register offset
++ *
++ * Provides an MMIO register accessor for the atom interpreter (r4xx+).
++ * Returns the value of the MMIO register.
++ */
++static uint32_t cail_reg_read(struct card_info *info, uint32_t reg)
++{
++ struct amdgpu_device *adev = info->dev->dev_private;
++ uint32_t r;
++
++ r = RREG32(reg);
++ return r;
++}
++
++/**
++ * cail_ioreg_write - write IO register
++ *
++ * @info: atom card_info pointer
++ * @reg: IO register offset
++ * @val: value to write to the pll register
++ *
++ * Provides a IO register accessor for the atom interpreter (r4xx+).
++ */
++static void cail_ioreg_write(struct card_info *info, uint32_t reg, uint32_t val)
++{
++ struct amdgpu_device *adev = info->dev->dev_private;
++
++ WREG32_IO(reg, val);
++}
++
++/**
++ * cail_ioreg_read - read IO register
++ *
++ * @info: atom card_info pointer
++ * @reg: IO register offset
++ *
++ * Provides an IO register accessor for the atom interpreter (r4xx+).
++ * Returns the value of the IO register.
++ */
++static uint32_t cail_ioreg_read(struct card_info *info, uint32_t reg)
++{
++ struct amdgpu_device *adev = info->dev->dev_private;
++ uint32_t r;
++
++ r = RREG32_IO(reg);
++ return r;
++}
++
++/**
++ * amdgpu_atombios_fini - free the driver info and callbacks for atombios
++ *
++ * @adev: amdgpu_device pointer
++ *
++ * Frees the driver info and register access callbacks for the ATOM
++ * interpreter (r4xx+).
++ * Called at driver shutdown.
++ */
++static void amdgpu_atombios_fini(struct amdgpu_device *adev)
++{
++ if (adev->mode_info.atom_context)
++ kfree(adev->mode_info.atom_context->scratch);
++ kfree(adev->mode_info.atom_context);
++ adev->mode_info.atom_context = NULL;
++ kfree(adev->mode_info.atom_card_info);
++ adev->mode_info.atom_card_info = NULL;
++}
++
++/**
++ * amdgpu_atombios_init - init the driver info and callbacks for atombios
++ *
++ * @adev: amdgpu_device pointer
++ *
++ * Initializes the driver info and register access callbacks for the
++ * ATOM interpreter (r4xx+).
++ * Returns 0 on sucess, -ENOMEM on failure.
++ * Called at driver startup.
++ */
++static int amdgpu_atombios_init(struct amdgpu_device *adev)
++{
++ struct card_info *atom_card_info =
++ kzalloc(sizeof(struct card_info), GFP_KERNEL);
++
++ if (!atom_card_info)
++ return -ENOMEM;
++
++ adev->mode_info.atom_card_info = atom_card_info;
++ atom_card_info->dev = adev->ddev;
++ atom_card_info->reg_read = cail_reg_read;
++ atom_card_info->reg_write = cail_reg_write;
++ /* needed for iio ops */
++ if (adev->rio_mem) {
++ atom_card_info->ioreg_read = cail_ioreg_read;
++ atom_card_info->ioreg_write = cail_ioreg_write;
++ } else {
++ DRM_ERROR("Unable to find PCI I/O BAR; using MMIO for ATOM IIO\n");
++ atom_card_info->ioreg_read = cail_reg_read;
++ atom_card_info->ioreg_write = cail_reg_write;
++ }
++ atom_card_info->mc_read = cail_mc_read;
++ atom_card_info->mc_write = cail_mc_write;
++ atom_card_info->pll_read = cail_pll_read;
++ atom_card_info->pll_write = cail_pll_write;
++
++ adev->mode_info.atom_context = amdgpu_atom_parse(atom_card_info, adev->bios);
++ if (!adev->mode_info.atom_context) {
++ amdgpu_atombios_fini(adev);
++ return -ENOMEM;
++ }
++
++ mutex_init(&adev->mode_info.atom_context->mutex);
++ amdgpu_atombios_scratch_regs_init(adev);
++ amdgpu_atom_allocate_fb_scratch(adev->mode_info.atom_context);
++ return 0;
++}
++
++/* if we get transitioned to only one device, take VGA back */
++/**
++ * amdgpu_vga_set_decode - enable/disable vga decode
++ *
++ * @cookie: amdgpu_device pointer
++ * @state: enable/disable vga decode
++ *
++ * Enable/disable vga decode (all asics).
++ * Returns VGA resource flags.
++ */
++static unsigned int amdgpu_vga_set_decode(void *cookie, bool state)
++{
++ struct amdgpu_device *adev = cookie;
++ amdgpu_asic_set_vga_state(adev, state);
++ if (state)
++ return VGA_RSRC_LEGACY_IO | VGA_RSRC_LEGACY_MEM |
++ VGA_RSRC_NORMAL_IO | VGA_RSRC_NORMAL_MEM;
++ else
++ return VGA_RSRC_NORMAL_IO | VGA_RSRC_NORMAL_MEM;
++}
++
++/**
++ * amdgpu_check_pot_argument - check that argument is a power of two
++ *
++ * @arg: value to check
++ *
++ * Validates that a certain argument is a power of two (all asics).
++ * Returns true if argument is valid.
++ */
++static bool amdgpu_check_pot_argument(int arg)
++{
++ return (arg & (arg - 1)) == 0;
++}
++
++/**
++ * amdgpu_check_arguments - validate module params
++ *
++ * @adev: amdgpu_device pointer
++ *
++ * Validates certain module parameters and updates
++ * the associated values used by the driver (all asics).
++ */
++static void amdgpu_check_arguments(struct amdgpu_device *adev)
++{
++ /* vramlimit must be a power of two */
++ if (!amdgpu_check_pot_argument(amdgpu_vram_limit)) {
++ dev_warn(adev->dev, "vram limit (%d) must be a power of 2\n",
++ amdgpu_vram_limit);
++ amdgpu_vram_limit = 0;
++ }
++
++ if (amdgpu_gart_size != -1) {
++ /* gtt size must be power of two and greater or equal to 32M */
++ if (amdgpu_gart_size < 32) {
++ dev_warn(adev->dev, "gart size (%d) too small\n",
++ amdgpu_gart_size);
++ amdgpu_gart_size = -1;
++ } else if (!amdgpu_check_pot_argument(amdgpu_gart_size)) {
++ dev_warn(adev->dev, "gart size (%d) must be a power of 2\n",
++ amdgpu_gart_size);
++ amdgpu_gart_size = -1;
++ }
++ }
++
++ if (!amdgpu_check_pot_argument(amdgpu_vm_size)) {
++ dev_warn(adev->dev, "VM size (%d) must be a power of 2\n",
++ amdgpu_vm_size);
++ amdgpu_vm_size = 4;
++ }
++
++ if (amdgpu_vm_size < 1) {
++ dev_warn(adev->dev, "VM size (%d) too small, min is 1GB\n",
++ amdgpu_vm_size);
++ amdgpu_vm_size = 4;
++ }
++
++ /*
++ * Max GPUVM size for Cayman, SI and CI are 40 bits.
++ */
++ if (amdgpu_vm_size > 1024) {
++ dev_warn(adev->dev, "VM size (%d) too large, max is 1TB\n",
++ amdgpu_vm_size);
++ amdgpu_vm_size = 4;
++ }
++
++ /* defines number of bits in page table versus page directory,
++ * a page is 4KB so we have 12 bits offset, minimum 9 bits in the
++ * page table and the remaining bits are in the page directory */
++ if (amdgpu_vm_block_size == -1) {
++
++ /* Total bits covered by PD + PTs */
++ unsigned bits = ilog2(amdgpu_vm_size) + 18;
++
++ /* Make sure the PD is 4K in size up to 8GB address space.
++ Above that split equal between PD and PTs */
++ if (amdgpu_vm_size <= 8)
++ amdgpu_vm_block_size = bits - 9;
++ else
++ amdgpu_vm_block_size = (bits + 3) / 2;
++
++ } else if (amdgpu_vm_block_size < 9) {
++ dev_warn(adev->dev, "VM page table size (%d) too small\n",
++ amdgpu_vm_block_size);
++ amdgpu_vm_block_size = 9;
++ }
++
++ if (amdgpu_vm_block_size > 24 ||
++ (amdgpu_vm_size * 1024) < (1ull << amdgpu_vm_block_size)) {
++ dev_warn(adev->dev, "VM page table size (%d) too large\n",
++ amdgpu_vm_block_size);
++ amdgpu_vm_block_size = 9;
++ }
++}
++
++/**
++ * amdgpu_switcheroo_set_state - set switcheroo state
++ *
++ * @pdev: pci dev pointer
++ * @state: vga switcheroo state
++ *
++ * Callback for the switcheroo driver. Suspends or resumes the
++ * the asics before or after it is powered up using ACPI methods.
++ */
++static void amdgpu_switcheroo_set_state(struct pci_dev *pdev, enum vga_switcheroo_state state)
++{
++ struct drm_device *dev = pci_get_drvdata(pdev);
++
++ if (amdgpu_device_is_px(dev) && state == VGA_SWITCHEROO_OFF)
++ return;
++
++ if (state == VGA_SWITCHEROO_ON) {
++ unsigned d3_delay = dev->pdev->d3_delay;
++
++ printk(KERN_INFO "amdgpu: switched on\n");
++ /* don't suspend or resume card normally */
++ dev->switch_power_state = DRM_SWITCH_POWER_CHANGING;
++
++ amdgpu_resume_kms(dev, true, true);
++
++ dev->pdev->d3_delay = d3_delay;
++
++ dev->switch_power_state = DRM_SWITCH_POWER_ON;
++ drm_kms_helper_poll_enable(dev);
++ } else {
++ printk(KERN_INFO "amdgpu: switched off\n");
++ drm_kms_helper_poll_disable(dev);
++ dev->switch_power_state = DRM_SWITCH_POWER_CHANGING;
++ amdgpu_suspend_kms(dev, true, true);
++ dev->switch_power_state = DRM_SWITCH_POWER_OFF;
++ }
++}
++
++/**
++ * amdgpu_switcheroo_can_switch - see if switcheroo state can change
++ *
++ * @pdev: pci dev pointer
++ *
++ * Callback for the switcheroo driver. Check of the switcheroo
++ * state can be changed.
++ * Returns true if the state can be changed, false if not.
++ */
++static bool amdgpu_switcheroo_can_switch(struct pci_dev *pdev)
++{
++ struct drm_device *dev = pci_get_drvdata(pdev);
++
++ /*
++ * FIXME: open_count is protected by drm_global_mutex but that would lead to
++ * locking inversion with the driver load path. And the access here is
++ * completely racy anyway. So don't bother with locking for now.
++ */
++ return dev->open_count == 0;
++}
++
++static const struct vga_switcheroo_client_ops amdgpu_switcheroo_ops = {
++ .set_gpu_state = amdgpu_switcheroo_set_state,
++ .reprobe = NULL,
++ .can_switch = amdgpu_switcheroo_can_switch,
++};
++
++int amdgpu_set_clockgating_state(struct amdgpu_device *adev,
++ enum amdgpu_ip_block_type block_type,
++ enum amdgpu_clockgating_state state)
++{
++ int i, r = 0;
++
++ for (i = 0; i < adev->num_ip_blocks; i++) {
++ if (adev->ip_blocks[i].type == block_type) {
++ r = adev->ip_blocks[i].funcs->set_clockgating_state(adev,
++ state);
++ if (r)
++ return r;
++ }
++ }
++ return r;
++}
++
++int amdgpu_set_powergating_state(struct amdgpu_device *adev,
++ enum amdgpu_ip_block_type block_type,
++ enum amdgpu_powergating_state state)
++{
++ int i, r = 0;
++
++ for (i = 0; i < adev->num_ip_blocks; i++) {
++ if (adev->ip_blocks[i].type == block_type) {
++ r = adev->ip_blocks[i].funcs->set_powergating_state(adev,
++ state);
++ if (r)
++ return r;
++ }
++ }
++ return r;
++}
++
++const struct amdgpu_ip_block_version * amdgpu_get_ip_block(
++ struct amdgpu_device *adev,
++ enum amdgpu_ip_block_type type)
++{
++ int i;
++
++ for (i = 0; i < adev->num_ip_blocks; i++)
++ if (adev->ip_blocks[i].type == type)
++ return &adev->ip_blocks[i];
++
++ return NULL;
++}
++
++/**
++ * amdgpu_ip_block_version_cmp
++ *
++ * @adev: amdgpu_device pointer
++ * @type: enum amdgpu_ip_block_type
++ * @major: major version
++ * @minor: minor version
++ *
++ * return 0 if equal or greater
++ * return 1 if smaller or the ip_block doesn't exist
++ */
++int amdgpu_ip_block_version_cmp(struct amdgpu_device *adev,
++ enum amdgpu_ip_block_type type,
++ u32 major, u32 minor)
++{
++ const struct amdgpu_ip_block_version *ip_block;
++ ip_block = amdgpu_get_ip_block(adev, type);
++
++ if (ip_block && ((ip_block->major > major) ||
++ ((ip_block->major == major) &&
++ (ip_block->minor >= minor))))
++ return 0;
++
++ return 1;
++}
++
++static int amdgpu_early_init(struct amdgpu_device *adev)
++{
++ int i, r = -EINVAL;
++
++ switch (adev->asic_type) {
++ default:
++ /* FIXME: not supported yet */
++ return -EINVAL;
++ }
++
++
++
++ if (adev->ip_blocks == NULL) {
++ DRM_ERROR("No IP blocks found!\n");
++ return r;
++ }
++
++ for (i = 0; i < adev->num_ip_blocks; i++) {
++ if ((amdgpu_ip_block_mask & (1 << i)) == 0) {
++ DRM_ERROR("disabled ip block: %d\n", i);
++ adev->ip_block_enabled[i] = false;
++ } else {
++ if (adev->ip_blocks[i].funcs->early_init) {
++ r = adev->ip_blocks[i].funcs->early_init(adev);
++ if (r)
++ return r;
++ }
++ adev->ip_block_enabled[i] = true;
++ }
++ }
++
++ return 0;
++}
++
++static int amdgpu_init(struct amdgpu_device *adev)
++{
++ int i, r;
++
++ for (i = 0; i < adev->num_ip_blocks; i++) {
++ if (!adev->ip_block_enabled[i])
++ continue;
++ r = adev->ip_blocks[i].funcs->sw_init(adev);
++ if (r)
++ return r;
++ /* need to do gmc hw init early so we can allocate gpu mem */
++ if (adev->ip_blocks[i].type == AMDGPU_IP_BLOCK_TYPE_GMC) {
++ r = amdgpu_vram_scratch_init(adev);
++ if (r)
++ return r;
++ r = adev->ip_blocks[i].funcs->hw_init(adev);
++ if (r)
++ return r;
++ r = amdgpu_wb_init(adev);
++ if (r)
++ return r;
++ }
++ }
++
++ for (i = 0; i < adev->num_ip_blocks; i++) {
++ if (!adev->ip_block_enabled[i])
++ continue;
++ /* gmc hw init is done early */
++ if (adev->ip_blocks[i].type == AMDGPU_IP_BLOCK_TYPE_GMC)
++ continue;
++ r = adev->ip_blocks[i].funcs->hw_init(adev);
++ if (r)
++ return r;
++ }
++
++ return 0;
++}
++
++static int amdgpu_late_init(struct amdgpu_device *adev)
++{
++ int i = 0, r;
++
++ for (i = 0; i < adev->num_ip_blocks; i++) {
++ if (!adev->ip_block_enabled[i])
++ continue;
++ /* enable clockgating to save power */
++ r = adev->ip_blocks[i].funcs->set_clockgating_state(adev,
++ AMDGPU_CG_STATE_GATE);
++ if (r)
++ return r;
++ if (adev->ip_blocks[i].funcs->late_init) {
++ r = adev->ip_blocks[i].funcs->late_init(adev);
++ if (r)
++ return r;
++ }
++ }
++
++ return 0;
++}
++
++static int amdgpu_fini(struct amdgpu_device *adev)
++{
++ int i, r;
++
++ for (i = adev->num_ip_blocks - 1; i >= 0; i--) {
++ if (!adev->ip_block_enabled[i])
++ continue;
++ if (adev->ip_blocks[i].type == AMDGPU_IP_BLOCK_TYPE_GMC) {
++ amdgpu_wb_fini(adev);
++ amdgpu_vram_scratch_fini(adev);
++ }
++ /* ungate blocks before hw fini so that we can shutdown the blocks safely */
++ r = adev->ip_blocks[i].funcs->set_clockgating_state(adev,
++ AMDGPU_CG_STATE_UNGATE);
++ if (r)
++ return r;
++ r = adev->ip_blocks[i].funcs->hw_fini(adev);
++ /* XXX handle errors */
++ }
++
++ for (i = adev->num_ip_blocks - 1; i >= 0; i--) {
++ if (!adev->ip_block_enabled[i])
++ continue;
++ r = adev->ip_blocks[i].funcs->sw_fini(adev);
++ /* XXX handle errors */
++ adev->ip_block_enabled[i] = false;
++ }
++
++ return 0;
++}
++
++static int amdgpu_suspend(struct amdgpu_device *adev)
++{
++ int i, r;
++
++ for (i = adev->num_ip_blocks - 1; i >= 0; i--) {
++ if (!adev->ip_block_enabled[i])
++ continue;
++ /* ungate blocks so that suspend can properly shut them down */
++ r = adev->ip_blocks[i].funcs->set_clockgating_state(adev,
++ AMDGPU_CG_STATE_UNGATE);
++ /* XXX handle errors */
++ r = adev->ip_blocks[i].funcs->suspend(adev);
++ /* XXX handle errors */
++ }
++
++ return 0;
++}
++
++static int amdgpu_resume(struct amdgpu_device *adev)
++{
++ int i, r;
++
++ for (i = 0; i < adev->num_ip_blocks; i++) {
++ if (!adev->ip_block_enabled[i])
++ continue;
++ r = adev->ip_blocks[i].funcs->resume(adev);
++ if (r)
++ return r;
++ }
++
++ return 0;
++}
++
++/**
++ * amdgpu_device_init - initialize the driver
++ *
++ * @adev: amdgpu_device pointer
++ * @pdev: drm dev pointer
++ * @pdev: pci dev pointer
++ * @flags: driver flags
++ *
++ * Initializes the driver info and hw (all asics).
++ * Returns 0 for success or an error on failure.
++ * Called at driver startup.
++ */
++int amdgpu_device_init(struct amdgpu_device *adev,
++ struct drm_device *ddev,
++ struct pci_dev *pdev,
++ uint32_t flags)
++{
++ int r, i;
++ bool runtime = false;
++
++ adev->shutdown = false;
++ adev->dev = &pdev->dev;
++ adev->ddev = ddev;
++ adev->pdev = pdev;
++ adev->flags = flags;
++ adev->asic_type = flags & AMDGPU_ASIC_MASK;
++ adev->is_atom_bios = false;
++ adev->usec_timeout = AMDGPU_MAX_USEC_TIMEOUT;
++ adev->mc.gtt_size = 512 * 1024 * 1024;
++ adev->accel_working = false;
++ adev->num_rings = 0;
++ adev->mman.buffer_funcs = NULL;
++ adev->mman.buffer_funcs_ring = NULL;
++ adev->vm_manager.vm_pte_funcs = NULL;
++ adev->vm_manager.vm_pte_funcs_ring = NULL;
++ adev->gart.gart_funcs = NULL;
++ adev->fence_context = fence_context_alloc(AMDGPU_MAX_RINGS);
++
++ adev->smc_rreg = &amdgpu_invalid_rreg;
++ adev->smc_wreg = &amdgpu_invalid_wreg;
++ adev->pcie_rreg = &amdgpu_invalid_rreg;
++ adev->pcie_wreg = &amdgpu_invalid_wreg;
++ adev->uvd_ctx_rreg = &amdgpu_invalid_rreg;
++ adev->uvd_ctx_wreg = &amdgpu_invalid_wreg;
++ adev->didt_rreg = &amdgpu_invalid_rreg;
++ adev->didt_wreg = &amdgpu_invalid_wreg;
++ adev->audio_endpt_rreg = &amdgpu_block_invalid_rreg;
++ adev->audio_endpt_wreg = &amdgpu_block_invalid_wreg;
++
++ DRM_INFO("initializing kernel modesetting (%s 0x%04X:0x%04X 0x%04X:0x%04X).\n",
++ amdgpu_asic_name[adev->asic_type], pdev->vendor, pdev->device,
++ pdev->subsystem_vendor, pdev->subsystem_device);
++
++ /* mutex initialization are all done here so we
++ * can recall function without having locking issues */
++ mutex_init(&adev->ring_lock);
++ atomic_set(&adev->irq.ih.lock, 0);
++ mutex_init(&adev->gem.mutex);
++ mutex_init(&adev->pm.mutex);
++ mutex_init(&adev->gfx.gpu_clock_mutex);
++ mutex_init(&adev->srbm_mutex);
++ mutex_init(&adev->grbm_idx_mutex);
++ init_rwsem(&adev->pm.mclk_lock);
++ init_rwsem(&adev->exclusive_lock);
++ mutex_init(&adev->mn_lock);
++ hash_init(adev->mn_hash);
++
++ amdgpu_check_arguments(adev);
++
++ /* Registers mapping */
++ /* TODO: block userspace mapping of io register */
++ spin_lock_init(&adev->mmio_idx_lock);
++ spin_lock_init(&adev->smc_idx_lock);
++ spin_lock_init(&adev->pcie_idx_lock);
++ spin_lock_init(&adev->uvd_ctx_idx_lock);
++ spin_lock_init(&adev->didt_idx_lock);
++ spin_lock_init(&adev->audio_endpt_idx_lock);
++
++ adev->rmmio_base = pci_resource_start(adev->pdev, 5);
++ adev->rmmio_size = pci_resource_len(adev->pdev, 5);
++ adev->rmmio = ioremap(adev->rmmio_base, adev->rmmio_size);
++ if (adev->rmmio == NULL) {
++ return -ENOMEM;
++ }
++ DRM_INFO("register mmio base: 0x%08X\n", (uint32_t)adev->rmmio_base);
++ DRM_INFO("register mmio size: %u\n", (unsigned)adev->rmmio_size);
++
++ /* doorbell bar mapping */
++ amdgpu_doorbell_init(adev);
++
++ /* io port mapping */
++ for (i = 0; i < DEVICE_COUNT_RESOURCE; i++) {
++ if (pci_resource_flags(adev->pdev, i) & IORESOURCE_IO) {
++ adev->rio_mem_size = pci_resource_len(adev->pdev, i);
++ adev->rio_mem = pci_iomap(adev->pdev, i, adev->rio_mem_size);
++ break;
++ }
++ }
++ if (adev->rio_mem == NULL)
++ DRM_ERROR("Unable to find PCI I/O BAR\n");
++
++ /* early init functions */
++ r = amdgpu_early_init(adev);
++ if (r)
++ return r;
++
++ /* if we have > 1 VGA cards, then disable the amdgpu VGA resources */
++ /* this will fail for cards that aren't VGA class devices, just
++ * ignore it */
++ vga_client_register(adev->pdev, adev, NULL, amdgpu_vga_set_decode);
++
++ if (amdgpu_runtime_pm == 1)
++ runtime = true;
++ if (amdgpu_device_is_px(ddev))
++ runtime = true;
++ vga_switcheroo_register_client(adev->pdev, &amdgpu_switcheroo_ops, runtime);
++ if (runtime)
++ vga_switcheroo_init_domain_pm_ops(adev->dev, &adev->vga_pm_domain);
++
++ /* Read BIOS */
++ if (!amdgpu_get_bios(adev))
++ return -EINVAL;
++ /* Must be an ATOMBIOS */
++ if (!adev->is_atom_bios) {
++ dev_err(adev->dev, "Expecting atombios for GPU\n");
++ return -EINVAL;
++ }
++ r = amdgpu_atombios_init(adev);
++ if (r)
++ return r;
++
++ /* Post card if necessary */
++ if (!amdgpu_card_posted(adev)) {
++ if (!adev->bios) {
++ dev_err(adev->dev, "Card not posted and no BIOS - ignoring\n");
++ return -EINVAL;
++ }
++ DRM_INFO("GPU not posted. posting now...\n");
++ amdgpu_atom_asic_init(adev->mode_info.atom_context);
++ }
++
++ /* Initialize clocks */
++ r = amdgpu_atombios_get_clock_info(adev);
++ if (r)
++ return r;
++ /* init i2c buses */
++ amdgpu_atombios_i2c_init(adev);
++
++ /* Fence driver */
++ r = amdgpu_fence_driver_init(adev);
++ if (r)
++ return r;
++
++ /* init the mode config */
++ drm_mode_config_init(adev->ddev);
++
++ r = amdgpu_init(adev);
++ if (r) {
++ amdgpu_fini(adev);
++ return r;
++ }
++
++ adev->accel_working = true;
++
++ amdgpu_fbdev_init(adev);
++
++ r = amdgpu_ib_pool_init(adev);
++ if (r) {
++ dev_err(adev->dev, "IB initialization failed (%d).\n", r);
++ return r;
++ }
++
++ r = amdgpu_ib_ring_tests(adev);
++ if (r)
++ DRM_ERROR("ib ring test failed (%d).\n", r);
++
++ r = amdgpu_gem_debugfs_init(adev);
++ if (r) {
++ DRM_ERROR("registering gem debugfs failed (%d).\n", r);
++ }
++
++ r = amdgpu_debugfs_regs_init(adev);
++ if (r) {
++ DRM_ERROR("registering register debugfs failed (%d).\n", r);
++ }
++
++ if ((amdgpu_testing & 1)) {
++ if (adev->accel_working)
++ amdgpu_test_moves(adev);
++ else
++ DRM_INFO("amdgpu: acceleration disabled, skipping move tests\n");
++ }
++ if ((amdgpu_testing & 2)) {
++ if (adev->accel_working)
++ amdgpu_test_syncing(adev);
++ else
++ DRM_INFO("amdgpu: acceleration disabled, skipping sync tests\n");
++ }
++ if (amdgpu_benchmarking) {
++ if (adev->accel_working)
++ amdgpu_benchmark(adev, amdgpu_benchmarking);
++ else
++ DRM_INFO("amdgpu: acceleration disabled, skipping benchmarks\n");
++ }
++
++ /* enable clockgating, etc. after ib tests, etc. since some blocks require
++ * explicit gating rather than handling it automatically.
++ */
++ r = amdgpu_late_init(adev);
++ if (r)
++ return r;
++
++ return 0;
++}
++
++static void amdgpu_debugfs_remove_files(struct amdgpu_device *adev);
++
++/**
++ * amdgpu_device_fini - tear down the driver
++ *
++ * @adev: amdgpu_device pointer
++ *
++ * Tear down the driver info (all asics).
++ * Called at driver shutdown.
++ */
++void amdgpu_device_fini(struct amdgpu_device *adev)
++{
++ int r;
++
++ DRM_INFO("amdgpu: finishing device.\n");
++ adev->shutdown = true;
++ /* evict vram memory */
++ amdgpu_bo_evict_vram(adev);
++ amdgpu_ib_pool_fini(adev);
++ amdgpu_fence_driver_fini(adev);
++ amdgpu_fbdev_fini(adev);
++ r = amdgpu_fini(adev);
++ if (adev->ip_block_enabled)
++ kfree(adev->ip_block_enabled);
++ adev->ip_block_enabled = NULL;
++ adev->accel_working = false;
++ /* free i2c buses */
++ amdgpu_i2c_fini(adev);
++ amdgpu_atombios_fini(adev);
++ kfree(adev->bios);
++ adev->bios = NULL;
++ vga_switcheroo_unregister_client(adev->pdev);
++ vga_client_register(adev->pdev, NULL, NULL, NULL);
++ if (adev->rio_mem)
++ pci_iounmap(adev->pdev, adev->rio_mem);
++ adev->rio_mem = NULL;
++ iounmap(adev->rmmio);
++ adev->rmmio = NULL;
++ amdgpu_doorbell_fini(adev);
++ amdgpu_debugfs_regs_cleanup(adev);
++ amdgpu_debugfs_remove_files(adev);
++}
++
++
++/*
++ * Suspend & resume.
++ */
++/**
++ * amdgpu_suspend_kms - initiate device suspend
++ *
++ * @pdev: drm dev pointer
++ * @state: suspend state
++ *
++ * Puts the hw in the suspend state (all asics).
++ * Returns 0 for success or an error on failure.
++ * Called at driver suspend.
++ */
++int amdgpu_suspend_kms(struct drm_device *dev, bool suspend, bool fbcon)
++{
++ struct amdgpu_device *adev;
++ struct drm_crtc *crtc;
++ struct drm_connector *connector;
++ int i, r;
++ bool force_completion = false;
++
++ if (dev == NULL || dev->dev_private == NULL) {
++ return -ENODEV;
++ }
++
++ adev = dev->dev_private;
++
++ if (dev->switch_power_state == DRM_SWITCH_POWER_OFF)
++ return 0;
++
++ drm_kms_helper_poll_disable(dev);
++
++ /* turn off display hw */
++ list_for_each_entry(connector, &dev->mode_config.connector_list, head) {
++ drm_helper_connector_dpms(connector, DRM_MODE_DPMS_OFF);
++ }
++
++ /* unpin the front buffers */
++ list_for_each_entry(crtc, &dev->mode_config.crtc_list, head) {
++ struct amdgpu_framebuffer *rfb = to_amdgpu_framebuffer(crtc->primary->fb);
++ struct amdgpu_bo *robj;
++
++ if (rfb == NULL || rfb->obj == NULL) {
++ continue;
++ }
++ robj = gem_to_amdgpu_bo(rfb->obj);
++ /* don't unpin kernel fb objects */
++ if (!amdgpu_fbdev_robj_is_fb(adev, robj)) {
++ r = amdgpu_bo_reserve(robj, false);
++ if (r == 0) {
++ amdgpu_bo_unpin(robj);
++ amdgpu_bo_unreserve(robj);
++ }
++ }
++ }
++ /* evict vram memory */
++ amdgpu_bo_evict_vram(adev);
++
++ /* wait for gpu to finish processing current batch */
++ for (i = 0; i < AMDGPU_MAX_RINGS; i++) {
++ struct amdgpu_ring *ring = adev->rings[i];
++ if (!ring)
++ continue;
++
++ r = amdgpu_fence_wait_empty(ring);
++ if (r) {
++ /* delay GPU reset to resume */
++ force_completion = true;
++ }
++ }
++ if (force_completion) {
++ amdgpu_fence_driver_force_completion(adev);
++ }
++
++ r = amdgpu_suspend(adev);
++
++ /* evict remaining vram memory */
++ amdgpu_bo_evict_vram(adev);
++
++ pci_save_state(dev->pdev);
++ if (suspend) {
++ /* Shut down the device */
++ pci_disable_device(dev->pdev);
++ pci_set_power_state(dev->pdev, PCI_D3hot);
++ }
++
++ if (fbcon) {
++ console_lock();
++ amdgpu_fbdev_set_suspend(adev, 1);
++ console_unlock();
++ }
++ return 0;
++}
++
++/**
++ * amdgpu_resume_kms - initiate device resume
++ *
++ * @pdev: drm dev pointer
++ *
++ * Bring the hw back to operating state (all asics).
++ * Returns 0 for success or an error on failure.
++ * Called at driver resume.
++ */
++int amdgpu_resume_kms(struct drm_device *dev, bool resume, bool fbcon)
++{
++ struct drm_connector *connector;
++ struct amdgpu_device *adev = dev->dev_private;
++ int r;
++
++ if (dev->switch_power_state == DRM_SWITCH_POWER_OFF)
++ return 0;
++
++ if (fbcon) {
++ console_lock();
++ }
++ if (resume) {
++ pci_set_power_state(dev->pdev, PCI_D0);
++ pci_restore_state(dev->pdev);
++ if (pci_enable_device(dev->pdev)) {
++ if (fbcon)
++ console_unlock();
++ return -1;
++ }
++ }
++
++ /* post card */
++ amdgpu_atom_asic_init(adev->mode_info.atom_context);
++
++ r = amdgpu_resume(adev);
++
++ r = amdgpu_ib_ring_tests(adev);
++ if (r)
++ DRM_ERROR("ib ring test failed (%d).\n", r);
++
++ r = amdgpu_late_init(adev);
++ if (r)
++ return r;
++
++ /* blat the mode back in */
++ if (fbcon) {
++ drm_helper_resume_force_mode(dev);
++ /* turn on display hw */
++ list_for_each_entry(connector, &dev->mode_config.connector_list, head) {
++ drm_helper_connector_dpms(connector, DRM_MODE_DPMS_ON);
++ }
++ }
++
++ drm_kms_helper_poll_enable(dev);
++
++ if (fbcon) {
++ amdgpu_fbdev_set_suspend(adev, 0);
++ console_unlock();
++ }
++
++ return 0;
++}
++
++/**
++ * amdgpu_gpu_reset - reset the asic
++ *
++ * @adev: amdgpu device pointer
++ *
++ * Attempt the reset the GPU if it has hung (all asics).
++ * Returns 0 for success or an error on failure.
++ */
++int amdgpu_gpu_reset(struct amdgpu_device *adev)
++{
++ unsigned ring_sizes[AMDGPU_MAX_RINGS];
++ uint32_t *ring_data[AMDGPU_MAX_RINGS];
++
++ bool saved = false;
++
++ int i, r;
++ int resched;
++
++ down_write(&adev->exclusive_lock);
++
++ if (!adev->needs_reset) {
++ up_write(&adev->exclusive_lock);
++ return 0;
++ }
++
++ adev->needs_reset = false;
++
++ /* block TTM */
++ resched = ttm_bo_lock_delayed_workqueue(&adev->mman.bdev);
++
++ r = amdgpu_suspend(adev);
++
++ for (i = 0; i < AMDGPU_MAX_RINGS; ++i) {
++ struct amdgpu_ring *ring = adev->rings[i];
++ if (!ring)
++ continue;
++
++ ring_sizes[i] = amdgpu_ring_backup(ring, &ring_data[i]);
++ if (ring_sizes[i]) {
++ saved = true;
++ dev_info(adev->dev, "Saved %d dwords of commands "
++ "on ring %d.\n", ring_sizes[i], i);
++ }
++ }
++
++retry:
++ r = amdgpu_asic_reset(adev);
++ if (!r) {
++ dev_info(adev->dev, "GPU reset succeeded, trying to resume\n");
++ r = amdgpu_resume(adev);
++ }
++
++ if (!r) {
++ for (i = 0; i < AMDGPU_MAX_RINGS; ++i) {
++ struct amdgpu_ring *ring = adev->rings[i];
++ if (!ring)
++ continue;
++
++ amdgpu_ring_restore(ring, ring_sizes[i], ring_data[i]);
++ ring_sizes[i] = 0;
++ ring_data[i] = NULL;
++ }
++
++ r = amdgpu_ib_ring_tests(adev);
++ if (r) {
++ dev_err(adev->dev, "ib ring test failed (%d).\n", r);
++ if (saved) {
++ saved = false;
++ r = amdgpu_suspend(adev);
++ goto retry;
++ }
++ }
++ } else {
++ amdgpu_fence_driver_force_completion(adev);
++ for (i = 0; i < AMDGPU_MAX_RINGS; ++i) {
++ if (adev->rings[i])
++ kfree(ring_data[i]);
++ }
++ }
++
++ drm_helper_resume_force_mode(adev->ddev);
++
++ ttm_bo_unlock_delayed_workqueue(&adev->mman.bdev, resched);
++ if (r) {
++ /* bad news, how to tell it to userspace ? */
++ dev_info(adev->dev, "GPU reset failed\n");
++ }
++
++ up_write(&adev->exclusive_lock);
++ return r;
++}
++
++
++/*
++ * Debugfs
++ */
++int amdgpu_debugfs_add_files(struct amdgpu_device *adev,
++ struct drm_info_list *files,
++ unsigned nfiles)
++{
++ unsigned i;
++
++ for (i = 0; i < adev->debugfs_count; i++) {
++ if (adev->debugfs[i].files == files) {
++ /* Already registered */
++ return 0;
++ }
++ }
++
++ i = adev->debugfs_count + 1;
++ if (i > AMDGPU_DEBUGFS_MAX_COMPONENTS) {
++ DRM_ERROR("Reached maximum number of debugfs components.\n");
++ DRM_ERROR("Report so we increase "
++ "AMDGPU_DEBUGFS_MAX_COMPONENTS.\n");
++ return -EINVAL;
++ }
++ adev->debugfs[adev->debugfs_count].files = files;
++ adev->debugfs[adev->debugfs_count].num_files = nfiles;
++ adev->debugfs_count = i;
++#if defined(CONFIG_DEBUG_FS)
++ drm_debugfs_create_files(files, nfiles,
++ adev->ddev->control->debugfs_root,
++ adev->ddev->control);
++ drm_debugfs_create_files(files, nfiles,
++ adev->ddev->primary->debugfs_root,
++ adev->ddev->primary);
++#endif
++ return 0;
++}
++
++static void amdgpu_debugfs_remove_files(struct amdgpu_device *adev)
++{
++#if defined(CONFIG_DEBUG_FS)
++ unsigned i;
++
++ for (i = 0; i < adev->debugfs_count; i++) {
++ drm_debugfs_remove_files(adev->debugfs[i].files,
++ adev->debugfs[i].num_files,
++ adev->ddev->control);
++ drm_debugfs_remove_files(adev->debugfs[i].files,
++ adev->debugfs[i].num_files,
++ adev->ddev->primary);
++ }
++#endif
++}
++
++#if defined(CONFIG_DEBUG_FS)
++
++static ssize_t amdgpu_debugfs_regs_read(struct file *f, char __user *buf,
++ size_t size, loff_t *pos)
++{
++ struct amdgpu_device *adev = f->f_inode->i_private;
++ ssize_t result = 0;
++ int r;
++
++ if (size & 0x3 || *pos & 0x3)
++ return -EINVAL;
++
++ while (size) {
++ uint32_t value;
++
++ if (*pos > adev->rmmio_size)
++ return result;
++
++ value = RREG32(*pos >> 2);
++ r = put_user(value, (uint32_t *)buf);
++ if (r)
++ return r;
++
++ result += 4;
++ buf += 4;
++ *pos += 4;
++ size -= 4;
++ }
++
++ return result;
++}
++
++static ssize_t amdgpu_debugfs_regs_write(struct file *f, const char __user *buf,
++ size_t size, loff_t *pos)
++{
++ struct amdgpu_device *adev = f->f_inode->i_private;
++ ssize_t result = 0;
++ int r;
++
++ if (size & 0x3 || *pos & 0x3)
++ return -EINVAL;
++
++ while (size) {
++ uint32_t value;
++
++ if (*pos > adev->rmmio_size)
++ return result;
++
++ r = get_user(value, (uint32_t *)buf);
++ if (r)
++ return r;
++
++ WREG32(*pos >> 2, value);
++
++ result += 4;
++ buf += 4;
++ *pos += 4;
++ size -= 4;
++ }
++
++ return result;
++}
++
++static const struct file_operations amdgpu_debugfs_regs_fops = {
++ .owner = THIS_MODULE,
++ .read = amdgpu_debugfs_regs_read,
++ .write = amdgpu_debugfs_regs_write,
++ .llseek = default_llseek
++};
++
++static int amdgpu_debugfs_regs_init(struct amdgpu_device *adev)
++{
++ struct drm_minor *minor = adev->ddev->primary;
++ struct dentry *ent, *root = minor->debugfs_root;
++
++ ent = debugfs_create_file("amdgpu_regs", S_IFREG | S_IRUGO, root,
++ adev, &amdgpu_debugfs_regs_fops);
++ if (IS_ERR(ent))
++ return PTR_ERR(ent);
++ i_size_write(ent->d_inode, adev->rmmio_size);
++ adev->debugfs_regs = ent;
++
++ return 0;
++}
++
++static void amdgpu_debugfs_regs_cleanup(struct amdgpu_device *adev)
++{
++ debugfs_remove(adev->debugfs_regs);
++ adev->debugfs_regs = NULL;
++}
++
++int amdgpu_debugfs_init(struct drm_minor *minor)
++{
++ return 0;
++}
++
++void amdgpu_debugfs_cleanup(struct drm_minor *minor)
++{
++}
++#endif
+diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_display.c b/drivers/gpu/drm/amd/amdgpu/amdgpu_display.c
+new file mode 100644
+index 0000000..f22c067
+--- /dev/null
++++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_display.c
+@@ -0,0 +1,832 @@
++/*
++ * Copyright 2007-8 Advanced Micro Devices, Inc.
++ * Copyright 2008 Red Hat Inc.
++ *
++ * Permission is hereby granted, free of charge, to any person obtaining a
++ * copy of this software and associated documentation files (the "Software"),
++ * to deal in the Software without restriction, including without limitation
++ * the rights to use, copy, modify, merge, publish, distribute, sublicense,
++ * and/or sell copies of the Software, and to permit persons to whom the
++ * Software is furnished to do so, subject to the following conditions:
++ *
++ * The above copyright notice and this permission notice shall be included in
++ * all copies or substantial portions of the Software.
++ *
++ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
++ * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
++ * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
++ * THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR
++ * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE,
++ * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
++ * OTHER DEALINGS IN THE SOFTWARE.
++ *
++ * Authors: Dave Airlie
++ * Alex Deucher
++ */
++#include <drm/drmP.h>
++#include <drm/amdgpu_drm.h>
++#include "amdgpu.h"
++#include "amdgpu_i2c.h"
++#include "atom.h"
++#include "amdgpu_connectors.h"
++#include <asm/div64.h>
++
++#include <linux/pm_runtime.h>
++#include <drm/drm_crtc_helper.h>
++#include <drm/drm_edid.h>
++
++
++static void amdgpu_flip_work_func(struct work_struct *__work)
++{
++ struct amdgpu_flip_work *work =
++ container_of(__work, struct amdgpu_flip_work, flip_work);
++ struct amdgpu_device *adev = work->adev;
++ struct amdgpu_crtc *amdgpuCrtc = adev->mode_info.crtcs[work->crtc_id];
++
++ struct drm_crtc *crtc = &amdgpuCrtc->base;
++ struct amdgpu_fence *fence;
++ unsigned long flags;
++ int r;
++
++ down_read(&adev->exclusive_lock);
++ if (work->fence) {
++ fence = to_amdgpu_fence(work->fence);
++ if (fence) {
++ r = amdgpu_fence_wait(fence, false);
++ if (r == -EDEADLK) {
++ up_read(&adev->exclusive_lock);
++ r = amdgpu_gpu_reset(adev);
++ down_read(&adev->exclusive_lock);
++ }
++ } else
++ r = fence_wait(work->fence, false);
++
++ if (r)
++ DRM_ERROR("failed to wait on page flip fence (%d)!\n", r);
++
++ /* We continue with the page flip even if we failed to wait on
++ * the fence, otherwise the DRM core and userspace will be
++ * confused about which BO the CRTC is scanning out
++ */
++
++ fence_put(work->fence);
++ work->fence = NULL;
++ }
++
++ /* We borrow the event spin lock for protecting flip_status */
++ spin_lock_irqsave(&crtc->dev->event_lock, flags);
++
++ /* set the proper interrupt */
++ amdgpu_irq_get(adev, &adev->pageflip_irq, work->crtc_id);
++ /* do the flip (mmio) */
++ adev->mode_info.funcs->page_flip(adev, work->crtc_id, work->base);
++ /* set the flip status */
++ amdgpuCrtc->pflip_status = AMDGPU_FLIP_SUBMITTED;
++
++ spin_unlock_irqrestore(&crtc->dev->event_lock, flags);
++ up_read(&adev->exclusive_lock);
++}
++
++/*
++ * Handle unpin events outside the interrupt handler proper.
++ */
++static void amdgpu_unpin_work_func(struct work_struct *__work)
++{
++ struct amdgpu_flip_work *work =
++ container_of(__work, struct amdgpu_flip_work, unpin_work);
++ int r;
++
++ /* unpin of the old buffer */
++ r = amdgpu_bo_reserve(work->old_rbo, false);
++ if (likely(r == 0)) {
++ r = amdgpu_bo_unpin(work->old_rbo);
++ if (unlikely(r != 0)) {
++ DRM_ERROR("failed to unpin buffer after flip\n");
++ }
++ amdgpu_bo_unreserve(work->old_rbo);
++ } else
++ DRM_ERROR("failed to reserve buffer after flip\n");
++
++ drm_gem_object_unreference_unlocked(&work->old_rbo->gem_base);
++ kfree(work);
++}
++
++int amdgpu_crtc_page_flip(struct drm_crtc *crtc,
++ struct drm_framebuffer *fb,
++ struct drm_pending_vblank_event *event,
++ uint32_t page_flip_flags)
++{
++ struct drm_device *dev = crtc->dev;
++ struct amdgpu_device *adev = dev->dev_private;
++ struct amdgpu_crtc *amdgpu_crtc = to_amdgpu_crtc(crtc);
++ struct amdgpu_framebuffer *old_amdgpu_fb;
++ struct amdgpu_framebuffer *new_amdgpu_fb;
++ struct drm_gem_object *obj;
++ struct amdgpu_flip_work *work;
++ struct amdgpu_bo *new_rbo;
++ unsigned long flags;
++ u64 tiling_flags;
++ u64 base;
++ int r;
++
++ work = kzalloc(sizeof *work, GFP_KERNEL);
++ if (work == NULL)
++ return -ENOMEM;
++
++ INIT_WORK(&work->flip_work, amdgpu_flip_work_func);
++ INIT_WORK(&work->unpin_work, amdgpu_unpin_work_func);
++
++ work->event = event;
++ work->adev = adev;
++ work->crtc_id = amdgpu_crtc->crtc_id;
++
++ /* schedule unpin of the old buffer */
++ old_amdgpu_fb = to_amdgpu_framebuffer(crtc->primary->fb);
++ obj = old_amdgpu_fb->obj;
++
++ /* take a reference to the old object */
++ drm_gem_object_reference(obj);
++ work->old_rbo = gem_to_amdgpu_bo(obj);
++
++ new_amdgpu_fb = to_amdgpu_framebuffer(fb);
++ obj = new_amdgpu_fb->obj;
++ new_rbo = gem_to_amdgpu_bo(obj);
++
++ /* pin the new buffer */
++ r = amdgpu_bo_reserve(new_rbo, false);
++ if (unlikely(r != 0)) {
++ DRM_ERROR("failed to reserve new rbo buffer before flip\n");
++ goto cleanup;
++ }
++
++ r = amdgpu_bo_pin_restricted(new_rbo, AMDGPU_GEM_DOMAIN_VRAM, 0, &base);
++ if (unlikely(r != 0)) {
++ amdgpu_bo_unreserve(new_rbo);
++ r = -EINVAL;
++ DRM_ERROR("failed to pin new rbo buffer before flip\n");
++ goto cleanup;
++ }
++
++ work->fence = fence_get(reservation_object_get_excl(new_rbo->tbo.resv));
++ amdgpu_bo_get_tiling_flags(new_rbo, &tiling_flags);
++ amdgpu_bo_unreserve(new_rbo);
++
++ work->base = base;
++
++ r = drm_vblank_get(crtc->dev, amdgpu_crtc->crtc_id);
++ if (r) {
++ DRM_ERROR("failed to get vblank before flip\n");
++ goto pflip_cleanup;
++ }
++
++ /* we borrow the event spin lock for protecting flip_wrok */
++ spin_lock_irqsave(&crtc->dev->event_lock, flags);
++ if (amdgpu_crtc->pflip_status != AMDGPU_FLIP_NONE) {
++ DRM_DEBUG_DRIVER("flip queue: crtc already busy\n");
++ spin_unlock_irqrestore(&crtc->dev->event_lock, flags);
++ r = -EBUSY;
++ goto vblank_cleanup;
++ }
++
++ amdgpu_crtc->pflip_status = AMDGPU_FLIP_PENDING;
++ amdgpu_crtc->pflip_works = work;
++
++ /* update crtc fb */
++ crtc->primary->fb = fb;
++ spin_unlock_irqrestore(&crtc->dev->event_lock, flags);
++ queue_work(amdgpu_crtc->pflip_queue, &work->flip_work);
++ return 0;
++
++vblank_cleanup:
++ drm_vblank_put(crtc->dev, amdgpu_crtc->crtc_id);
++
++pflip_cleanup:
++ if (unlikely(amdgpu_bo_reserve(new_rbo, false) != 0)) {
++ DRM_ERROR("failed to reserve new rbo in error path\n");
++ goto cleanup;
++ }
++ if (unlikely(amdgpu_bo_unpin(new_rbo) != 0)) {
++ DRM_ERROR("failed to unpin new rbo in error path\n");
++ }
++ amdgpu_bo_unreserve(new_rbo);
++
++cleanup:
++ drm_gem_object_unreference_unlocked(&work->old_rbo->gem_base);
++ fence_put(work->fence);
++ kfree(work);
++
++ return r;
++}
++
++int amdgpu_crtc_set_config(struct drm_mode_set *set)
++{
++ struct drm_device *dev;
++ struct amdgpu_device *adev;
++ struct drm_crtc *crtc;
++ bool active = false;
++ int ret;
++
++ if (!set || !set->crtc)
++ return -EINVAL;
++
++ dev = set->crtc->dev;
++
++ ret = pm_runtime_get_sync(dev->dev);
++ if (ret < 0)
++ return ret;
++
++ ret = drm_crtc_helper_set_config(set);
++
++ list_for_each_entry(crtc, &dev->mode_config.crtc_list, head)
++ if (crtc->enabled)
++ active = true;
++
++ pm_runtime_mark_last_busy(dev->dev);
++
++ adev = dev->dev_private;
++ /* if we have active crtcs and we don't have a power ref,
++ take the current one */
++ if (active && !adev->have_disp_power_ref) {
++ adev->have_disp_power_ref = true;
++ return ret;
++ }
++ /* if we have no active crtcs, then drop the power ref
++ we got before */
++ if (!active && adev->have_disp_power_ref) {
++ pm_runtime_put_autosuspend(dev->dev);
++ adev->have_disp_power_ref = false;
++ }
++
++ /* drop the power reference we got coming in here */
++ pm_runtime_put_autosuspend(dev->dev);
++ return ret;
++}
++
++static const char *encoder_names[38] = {
++ "NONE",
++ "INTERNAL_LVDS",
++ "INTERNAL_TMDS1",
++ "INTERNAL_TMDS2",
++ "INTERNAL_DAC1",
++ "INTERNAL_DAC2",
++ "INTERNAL_SDVOA",
++ "INTERNAL_SDVOB",
++ "SI170B",
++ "CH7303",
++ "CH7301",
++ "INTERNAL_DVO1",
++ "EXTERNAL_SDVOA",
++ "EXTERNAL_SDVOB",
++ "TITFP513",
++ "INTERNAL_LVTM1",
++ "VT1623",
++ "HDMI_SI1930",
++ "HDMI_INTERNAL",
++ "INTERNAL_KLDSCP_TMDS1",
++ "INTERNAL_KLDSCP_DVO1",
++ "INTERNAL_KLDSCP_DAC1",
++ "INTERNAL_KLDSCP_DAC2",
++ "SI178",
++ "MVPU_FPGA",
++ "INTERNAL_DDI",
++ "VT1625",
++ "HDMI_SI1932",
++ "DP_AN9801",
++ "DP_DP501",
++ "INTERNAL_UNIPHY",
++ "INTERNAL_KLDSCP_LVTMA",
++ "INTERNAL_UNIPHY1",
++ "INTERNAL_UNIPHY2",
++ "NUTMEG",
++ "TRAVIS",
++ "INTERNAL_VCE",
++ "INTERNAL_UNIPHY3",
++};
++
++static const char *hpd_names[6] = {
++ "HPD1",
++ "HPD2",
++ "HPD3",
++ "HPD4",
++ "HPD5",
++ "HPD6",
++};
++
++void amdgpu_print_display_setup(struct drm_device *dev)
++{
++ struct drm_connector *connector;
++ struct amdgpu_connector *amdgpu_connector;
++ struct drm_encoder *encoder;
++ struct amdgpu_encoder *amdgpu_encoder;
++ uint32_t devices;
++ int i = 0;
++
++ DRM_INFO("AMDGPU Display Connectors\n");
++ list_for_each_entry(connector, &dev->mode_config.connector_list, head) {
++ amdgpu_connector = to_amdgpu_connector(connector);
++ DRM_INFO("Connector %d:\n", i);
++ DRM_INFO(" %s\n", connector->name);
++ if (amdgpu_connector->hpd.hpd != AMDGPU_HPD_NONE)
++ DRM_INFO(" %s\n", hpd_names[amdgpu_connector->hpd.hpd]);
++ if (amdgpu_connector->ddc_bus) {
++ DRM_INFO(" DDC: 0x%x 0x%x 0x%x 0x%x 0x%x 0x%x 0x%x 0x%x\n",
++ amdgpu_connector->ddc_bus->rec.mask_clk_reg,
++ amdgpu_connector->ddc_bus->rec.mask_data_reg,
++ amdgpu_connector->ddc_bus->rec.a_clk_reg,
++ amdgpu_connector->ddc_bus->rec.a_data_reg,
++ amdgpu_connector->ddc_bus->rec.en_clk_reg,
++ amdgpu_connector->ddc_bus->rec.en_data_reg,
++ amdgpu_connector->ddc_bus->rec.y_clk_reg,
++ amdgpu_connector->ddc_bus->rec.y_data_reg);
++ if (amdgpu_connector->router.ddc_valid)
++ DRM_INFO(" DDC Router 0x%x/0x%x\n",
++ amdgpu_connector->router.ddc_mux_control_pin,
++ amdgpu_connector->router.ddc_mux_state);
++ if (amdgpu_connector->router.cd_valid)
++ DRM_INFO(" Clock/Data Router 0x%x/0x%x\n",
++ amdgpu_connector->router.cd_mux_control_pin,
++ amdgpu_connector->router.cd_mux_state);
++ } else {
++ if (connector->connector_type == DRM_MODE_CONNECTOR_VGA ||
++ connector->connector_type == DRM_MODE_CONNECTOR_DVII ||
++ connector->connector_type == DRM_MODE_CONNECTOR_DVID ||
++ connector->connector_type == DRM_MODE_CONNECTOR_DVIA ||
++ connector->connector_type == DRM_MODE_CONNECTOR_HDMIA ||
++ connector->connector_type == DRM_MODE_CONNECTOR_HDMIB)
++ DRM_INFO(" DDC: no ddc bus - possible BIOS bug - please report to xorg-driver-ati@lists.x.org\n");
++ }
++ DRM_INFO(" Encoders:\n");
++ list_for_each_entry(encoder, &dev->mode_config.encoder_list, head) {
++ amdgpu_encoder = to_amdgpu_encoder(encoder);
++ devices = amdgpu_encoder->devices & amdgpu_connector->devices;
++ if (devices) {
++ if (devices & ATOM_DEVICE_CRT1_SUPPORT)
++ DRM_INFO(" CRT1: %s\n", encoder_names[amdgpu_encoder->encoder_id]);
++ if (devices & ATOM_DEVICE_CRT2_SUPPORT)
++ DRM_INFO(" CRT2: %s\n", encoder_names[amdgpu_encoder->encoder_id]);
++ if (devices & ATOM_DEVICE_LCD1_SUPPORT)
++ DRM_INFO(" LCD1: %s\n", encoder_names[amdgpu_encoder->encoder_id]);
++ if (devices & ATOM_DEVICE_DFP1_SUPPORT)
++ DRM_INFO(" DFP1: %s\n", encoder_names[amdgpu_encoder->encoder_id]);
++ if (devices & ATOM_DEVICE_DFP2_SUPPORT)
++ DRM_INFO(" DFP2: %s\n", encoder_names[amdgpu_encoder->encoder_id]);
++ if (devices & ATOM_DEVICE_DFP3_SUPPORT)
++ DRM_INFO(" DFP3: %s\n", encoder_names[amdgpu_encoder->encoder_id]);
++ if (devices & ATOM_DEVICE_DFP4_SUPPORT)
++ DRM_INFO(" DFP4: %s\n", encoder_names[amdgpu_encoder->encoder_id]);
++ if (devices & ATOM_DEVICE_DFP5_SUPPORT)
++ DRM_INFO(" DFP5: %s\n", encoder_names[amdgpu_encoder->encoder_id]);
++ if (devices & ATOM_DEVICE_DFP6_SUPPORT)
++ DRM_INFO(" DFP6: %s\n", encoder_names[amdgpu_encoder->encoder_id]);
++ if (devices & ATOM_DEVICE_TV1_SUPPORT)
++ DRM_INFO(" TV1: %s\n", encoder_names[amdgpu_encoder->encoder_id]);
++ if (devices & ATOM_DEVICE_CV_SUPPORT)
++ DRM_INFO(" CV: %s\n", encoder_names[amdgpu_encoder->encoder_id]);
++ }
++ }
++ i++;
++ }
++}
++
++/**
++ * amdgpu_ddc_probe
++ *
++ */
++bool amdgpu_ddc_probe(struct amdgpu_connector *amdgpu_connector,
++ bool use_aux)
++{
++ u8 out = 0x0;
++ u8 buf[8];
++ int ret;
++ struct i2c_msg msgs[] = {
++ {
++ .addr = DDC_ADDR,
++ .flags = 0,
++ .len = 1,
++ .buf = &out,
++ },
++ {
++ .addr = DDC_ADDR,
++ .flags = I2C_M_RD,
++ .len = 8,
++ .buf = buf,
++ }
++ };
++
++ /* on hw with routers, select right port */
++ if (amdgpu_connector->router.ddc_valid)
++ amdgpu_i2c_router_select_ddc_port(amdgpu_connector);
++
++ if (use_aux) {
++ ret = i2c_transfer(&amdgpu_connector->ddc_bus->aux.ddc, msgs, 2);
++ } else {
++ ret = i2c_transfer(&amdgpu_connector->ddc_bus->adapter, msgs, 2);
++ }
++
++ if (ret != 2)
++ /* Couldn't find an accessible DDC on this connector */
++ return false;
++ /* Probe also for valid EDID header
++ * EDID header starts with:
++ * 0x00,0xFF,0xFF,0xFF,0xFF,0xFF,0xFF,0x00.
++ * Only the first 6 bytes must be valid as
++ * drm_edid_block_valid() can fix the last 2 bytes */
++ if (drm_edid_header_is_valid(buf) < 6) {
++ /* Couldn't find an accessible EDID on this
++ * connector */
++ return false;
++ }
++ return true;
++}
++
++static void amdgpu_user_framebuffer_destroy(struct drm_framebuffer *fb)
++{
++ struct amdgpu_framebuffer *amdgpu_fb = to_amdgpu_framebuffer(fb);
++
++ if (amdgpu_fb->obj) {
++ drm_gem_object_unreference_unlocked(amdgpu_fb->obj);
++ }
++ drm_framebuffer_cleanup(fb);
++ kfree(amdgpu_fb);
++}
++
++static int amdgpu_user_framebuffer_create_handle(struct drm_framebuffer *fb,
++ struct drm_file *file_priv,
++ unsigned int *handle)
++{
++ struct amdgpu_framebuffer *amdgpu_fb = to_amdgpu_framebuffer(fb);
++
++ return drm_gem_handle_create(file_priv, amdgpu_fb->obj, handle);
++}
++
++static const struct drm_framebuffer_funcs amdgpu_fb_funcs = {
++ .destroy = amdgpu_user_framebuffer_destroy,
++ .create_handle = amdgpu_user_framebuffer_create_handle,
++};
++
++int
++amdgpu_framebuffer_init(struct drm_device *dev,
++ struct amdgpu_framebuffer *rfb,
++ struct drm_mode_fb_cmd2 *mode_cmd,
++ struct drm_gem_object *obj)
++{
++ int ret;
++ rfb->obj = obj;
++ drm_helper_mode_fill_fb_struct(&rfb->base, mode_cmd);
++ ret = drm_framebuffer_init(dev, &rfb->base, &amdgpu_fb_funcs);
++ if (ret) {
++ rfb->obj = NULL;
++ return ret;
++ }
++ return 0;
++}
++
++static struct drm_framebuffer *
++amdgpu_user_framebuffer_create(struct drm_device *dev,
++ struct drm_file *file_priv,
++ struct drm_mode_fb_cmd2 *mode_cmd)
++{
++ struct drm_gem_object *obj;
++ struct amdgpu_framebuffer *amdgpu_fb;
++ int ret;
++
++ obj = drm_gem_object_lookup(dev, file_priv, mode_cmd->handles[0]);
++ if (obj == NULL) {
++ dev_err(&dev->pdev->dev, "No GEM object associated to handle 0x%08X, "
++ "can't create framebuffer\n", mode_cmd->handles[0]);
++ return ERR_PTR(-ENOENT);
++ }
++
++ amdgpu_fb = kzalloc(sizeof(*amdgpu_fb), GFP_KERNEL);
++ if (amdgpu_fb == NULL) {
++ drm_gem_object_unreference_unlocked(obj);
++ return ERR_PTR(-ENOMEM);
++ }
++
++ ret = amdgpu_framebuffer_init(dev, amdgpu_fb, mode_cmd, obj);
++ if (ret) {
++ kfree(amdgpu_fb);
++ drm_gem_object_unreference_unlocked(obj);
++ return ERR_PTR(ret);
++ }
++
++ return &amdgpu_fb->base;
++}
++
++static void amdgpu_output_poll_changed(struct drm_device *dev)
++{
++ struct amdgpu_device *adev = dev->dev_private;
++ amdgpu_fb_output_poll_changed(adev);
++}
++
++const struct drm_mode_config_funcs amdgpu_mode_funcs = {
++ .fb_create = amdgpu_user_framebuffer_create,
++ .output_poll_changed = amdgpu_output_poll_changed
++};
++
++static struct drm_prop_enum_list amdgpu_underscan_enum_list[] =
++{ { UNDERSCAN_OFF, "off" },
++ { UNDERSCAN_ON, "on" },
++ { UNDERSCAN_AUTO, "auto" },
++};
++
++static struct drm_prop_enum_list amdgpu_audio_enum_list[] =
++{ { AMDGPU_AUDIO_DISABLE, "off" },
++ { AMDGPU_AUDIO_ENABLE, "on" },
++ { AMDGPU_AUDIO_AUTO, "auto" },
++};
++
++/* XXX support different dither options? spatial, temporal, both, etc. */
++static struct drm_prop_enum_list amdgpu_dither_enum_list[] =
++{ { AMDGPU_FMT_DITHER_DISABLE, "off" },
++ { AMDGPU_FMT_DITHER_ENABLE, "on" },
++};
++
++int amdgpu_modeset_create_props(struct amdgpu_device *adev)
++{
++ int sz;
++
++ if (adev->is_atom_bios) {
++ adev->mode_info.coherent_mode_property =
++ drm_property_create_range(adev->ddev, 0 , "coherent", 0, 1);
++ if (!adev->mode_info.coherent_mode_property)
++ return -ENOMEM;
++ }
++
++ adev->mode_info.load_detect_property =
++ drm_property_create_range(adev->ddev, 0, "load detection", 0, 1);
++ if (!adev->mode_info.load_detect_property)
++ return -ENOMEM;
++
++ drm_mode_create_scaling_mode_property(adev->ddev);
++
++ sz = ARRAY_SIZE(amdgpu_underscan_enum_list);
++ adev->mode_info.underscan_property =
++ drm_property_create_enum(adev->ddev, 0,
++ "underscan",
++ amdgpu_underscan_enum_list, sz);
++
++ adev->mode_info.underscan_hborder_property =
++ drm_property_create_range(adev->ddev, 0,
++ "underscan hborder", 0, 128);
++ if (!adev->mode_info.underscan_hborder_property)
++ return -ENOMEM;
++
++ adev->mode_info.underscan_vborder_property =
++ drm_property_create_range(adev->ddev, 0,
++ "underscan vborder", 0, 128);
++ if (!adev->mode_info.underscan_vborder_property)
++ return -ENOMEM;
++
++ sz = ARRAY_SIZE(amdgpu_audio_enum_list);
++ adev->mode_info.audio_property =
++ drm_property_create_enum(adev->ddev, 0,
++ "audio",
++ amdgpu_audio_enum_list, sz);
++
++ sz = ARRAY_SIZE(amdgpu_dither_enum_list);
++ adev->mode_info.dither_property =
++ drm_property_create_enum(adev->ddev, 0,
++ "dither",
++ amdgpu_dither_enum_list, sz);
++
++ return 0;
++}
++
++void amdgpu_update_display_priority(struct amdgpu_device *adev)
++{
++ /* adjustment options for the display watermarks */
++ if ((amdgpu_disp_priority == 0) || (amdgpu_disp_priority > 2))
++ adev->mode_info.disp_priority = 0;
++ else
++ adev->mode_info.disp_priority = amdgpu_disp_priority;
++
++}
++
++static bool is_hdtv_mode(const struct drm_display_mode *mode)
++{
++ /* try and guess if this is a tv or a monitor */
++ if ((mode->vdisplay == 480 && mode->hdisplay == 720) || /* 480p */
++ (mode->vdisplay == 576) || /* 576p */
++ (mode->vdisplay == 720) || /* 720p */
++ (mode->vdisplay == 1080)) /* 1080p */
++ return true;
++ else
++ return false;
++}
++
++bool amdgpu_crtc_scaling_mode_fixup(struct drm_crtc *crtc,
++ const struct drm_display_mode *mode,
++ struct drm_display_mode *adjusted_mode)
++{
++ struct drm_device *dev = crtc->dev;
++ struct drm_encoder *encoder;
++ struct amdgpu_crtc *amdgpu_crtc = to_amdgpu_crtc(crtc);
++ struct amdgpu_encoder *amdgpu_encoder;
++ struct drm_connector *connector;
++ struct amdgpu_connector *amdgpu_connector;
++ u32 src_v = 1, dst_v = 1;
++ u32 src_h = 1, dst_h = 1;
++
++ amdgpu_crtc->h_border = 0;
++ amdgpu_crtc->v_border = 0;
++
++ list_for_each_entry(encoder, &dev->mode_config.encoder_list, head) {
++ if (encoder->crtc != crtc)
++ continue;
++ amdgpu_encoder = to_amdgpu_encoder(encoder);
++ connector = amdgpu_get_connector_for_encoder(encoder);
++ amdgpu_connector = to_amdgpu_connector(connector);
++
++ /* set scaling */
++ if (amdgpu_encoder->rmx_type == RMX_OFF)
++ amdgpu_crtc->rmx_type = RMX_OFF;
++ else if (mode->hdisplay < amdgpu_encoder->native_mode.hdisplay ||
++ mode->vdisplay < amdgpu_encoder->native_mode.vdisplay)
++ amdgpu_crtc->rmx_type = amdgpu_encoder->rmx_type;
++ else
++ amdgpu_crtc->rmx_type = RMX_OFF;
++ /* copy native mode */
++ memcpy(&amdgpu_crtc->native_mode,
++ &amdgpu_encoder->native_mode,
++ sizeof(struct drm_display_mode));
++ src_v = crtc->mode.vdisplay;
++ dst_v = amdgpu_crtc->native_mode.vdisplay;
++ src_h = crtc->mode.hdisplay;
++ dst_h = amdgpu_crtc->native_mode.hdisplay;
++
++ /* fix up for overscan on hdmi */
++ if ((!(mode->flags & DRM_MODE_FLAG_INTERLACE)) &&
++ ((amdgpu_encoder->underscan_type == UNDERSCAN_ON) ||
++ ((amdgpu_encoder->underscan_type == UNDERSCAN_AUTO) &&
++ drm_detect_hdmi_monitor(amdgpu_connector_edid(connector)) &&
++ is_hdtv_mode(mode)))) {
++ if (amdgpu_encoder->underscan_hborder != 0)
++ amdgpu_crtc->h_border = amdgpu_encoder->underscan_hborder;
++ else
++ amdgpu_crtc->h_border = (mode->hdisplay >> 5) + 16;
++ if (amdgpu_encoder->underscan_vborder != 0)
++ amdgpu_crtc->v_border = amdgpu_encoder->underscan_vborder;
++ else
++ amdgpu_crtc->v_border = (mode->vdisplay >> 5) + 16;
++ amdgpu_crtc->rmx_type = RMX_FULL;
++ src_v = crtc->mode.vdisplay;
++ dst_v = crtc->mode.vdisplay - (amdgpu_crtc->v_border * 2);
++ src_h = crtc->mode.hdisplay;
++ dst_h = crtc->mode.hdisplay - (amdgpu_crtc->h_border * 2);
++ }
++ }
++ if (amdgpu_crtc->rmx_type != RMX_OFF) {
++ fixed20_12 a, b;
++ a.full = dfixed_const(src_v);
++ b.full = dfixed_const(dst_v);
++ amdgpu_crtc->vsc.full = dfixed_div(a, b);
++ a.full = dfixed_const(src_h);
++ b.full = dfixed_const(dst_h);
++ amdgpu_crtc->hsc.full = dfixed_div(a, b);
++ } else {
++ amdgpu_crtc->vsc.full = dfixed_const(1);
++ amdgpu_crtc->hsc.full = dfixed_const(1);
++ }
++ return true;
++}
++
++/*
++ * Retrieve current video scanout position of crtc on a given gpu, and
++ * an optional accurate timestamp of when query happened.
++ *
++ * \param dev Device to query.
++ * \param crtc Crtc to query.
++ * \param flags Flags from caller (DRM_CALLED_FROM_VBLIRQ or 0).
++ * \param *vpos Location where vertical scanout position should be stored.
++ * \param *hpos Location where horizontal scanout position should go.
++ * \param *stime Target location for timestamp taken immediately before
++ * scanout position query. Can be NULL to skip timestamp.
++ * \param *etime Target location for timestamp taken immediately after
++ * scanout position query. Can be NULL to skip timestamp.
++ *
++ * Returns vpos as a positive number while in active scanout area.
++ * Returns vpos as a negative number inside vblank, counting the number
++ * of scanlines to go until end of vblank, e.g., -1 means "one scanline
++ * until start of active scanout / end of vblank."
++ *
++ * \return Flags, or'ed together as follows:
++ *
++ * DRM_SCANOUTPOS_VALID = Query successful.
++ * DRM_SCANOUTPOS_INVBL = Inside vblank.
++ * DRM_SCANOUTPOS_ACCURATE = Returned position is accurate. A lack of
++ * this flag means that returned position may be offset by a constant but
++ * unknown small number of scanlines wrt. real scanout position.
++ *
++ */
++int amdgpu_get_crtc_scanoutpos(struct drm_device *dev, int crtc, unsigned int flags,
++ int *vpos, int *hpos, ktime_t *stime, ktime_t *etime)
++{
++ u32 vbl = 0, position = 0;
++ int vbl_start, vbl_end, vtotal, ret = 0;
++ bool in_vbl = true;
++
++ struct amdgpu_device *adev = dev->dev_private;
++
++ /* preempt_disable_rt() should go right here in PREEMPT_RT patchset. */
++
++ /* Get optional system timestamp before query. */
++ if (stime)
++ *stime = ktime_get();
++
++ if (amdgpu_display_page_flip_get_scanoutpos(adev, crtc, &vbl, &position) == 0)
++ ret |= DRM_SCANOUTPOS_VALID;
++
++ /* Get optional system timestamp after query. */
++ if (etime)
++ *etime = ktime_get();
++
++ /* preempt_enable_rt() should go right here in PREEMPT_RT patchset. */
++
++ /* Decode into vertical and horizontal scanout position. */
++ *vpos = position & 0x1fff;
++ *hpos = (position >> 16) & 0x1fff;
++
++ /* Valid vblank area boundaries from gpu retrieved? */
++ if (vbl > 0) {
++ /* Yes: Decode. */
++ ret |= DRM_SCANOUTPOS_ACCURATE;
++ vbl_start = vbl & 0x1fff;
++ vbl_end = (vbl >> 16) & 0x1fff;
++ }
++ else {
++ /* No: Fake something reasonable which gives at least ok results. */
++ vbl_start = adev->mode_info.crtcs[crtc]->base.hwmode.crtc_vdisplay;
++ vbl_end = 0;
++ }
++
++ /* Test scanout position against vblank region. */
++ if ((*vpos < vbl_start) && (*vpos >= vbl_end))
++ in_vbl = false;
++
++ /* Check if inside vblank area and apply corrective offsets:
++ * vpos will then be >=0 in video scanout area, but negative
++ * within vblank area, counting down the number of lines until
++ * start of scanout.
++ */
++
++ /* Inside "upper part" of vblank area? Apply corrective offset if so: */
++ if (in_vbl && (*vpos >= vbl_start)) {
++ vtotal = adev->mode_info.crtcs[crtc]->base.hwmode.crtc_vtotal;
++ *vpos = *vpos - vtotal;
++ }
++
++ /* Correct for shifted end of vbl at vbl_end. */
++ *vpos = *vpos - vbl_end;
++
++ /* In vblank? */
++ if (in_vbl)
++ ret |= DRM_SCANOUTPOS_IN_VBLANK;
++
++ /* Is vpos outside nominal vblank area, but less than
++ * 1/100 of a frame height away from start of vblank?
++ * If so, assume this isn't a massively delayed vblank
++ * interrupt, but a vblank interrupt that fired a few
++ * microseconds before true start of vblank. Compensate
++ * by adding a full frame duration to the final timestamp.
++ * Happens, e.g., on ATI R500, R600.
++ *
++ * We only do this if DRM_CALLED_FROM_VBLIRQ.
++ */
++ if ((flags & DRM_CALLED_FROM_VBLIRQ) && !in_vbl) {
++ vbl_start = adev->mode_info.crtcs[crtc]->base.hwmode.crtc_vdisplay;
++ vtotal = adev->mode_info.crtcs[crtc]->base.hwmode.crtc_vtotal;
++
++ if (vbl_start - *vpos < vtotal / 100) {
++ *vpos -= vtotal;
++
++ /* Signal this correction as "applied". */
++ ret |= 0x8;
++ }
++ }
++
++ return ret;
++}
++
++int amdgpu_crtc_idx_to_irq_type(struct amdgpu_device *adev, int crtc)
++{
++ if (crtc < 0 || crtc >= adev->mode_info.num_crtc)
++ return AMDGPU_CRTC_IRQ_NONE;
++
++ switch (crtc) {
++ case 0:
++ return AMDGPU_CRTC_IRQ_VBLANK1;
++ case 1:
++ return AMDGPU_CRTC_IRQ_VBLANK2;
++ case 2:
++ return AMDGPU_CRTC_IRQ_VBLANK3;
++ case 3:
++ return AMDGPU_CRTC_IRQ_VBLANK4;
++ case 4:
++ return AMDGPU_CRTC_IRQ_VBLANK5;
++ case 5:
++ return AMDGPU_CRTC_IRQ_VBLANK6;
++ default:
++ return AMDGPU_CRTC_IRQ_NONE;
++ }
++}
+diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_dpm.c b/drivers/gpu/drm/amd/amdgpu/amdgpu_dpm.c
+new file mode 100644
+index 0000000..7b7f4ab
+--- /dev/null
++++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_dpm.c
+@@ -0,0 +1,955 @@
++/*
++ * Copyright 2011 Advanced Micro Devices, Inc.
++ *
++ * Permission is hereby granted, free of charge, to any person obtaining a
++ * copy of this software and associated documentation files (the "Software"),
++ * to deal in the Software without restriction, including without limitation
++ * the rights to use, copy, modify, merge, publish, distribute, sublicense,
++ * and/or sell copies of the Software, and to permit persons to whom the
++ * Software is furnished to do so, subject to the following conditions:
++ *
++ * The above copyright notice and this permission notice shall be included in
++ * all copies or substantial portions of the Software.
++ *
++ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
++ * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
++ * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
++ * THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR
++ * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE,
++ * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
++ * OTHER DEALINGS IN THE SOFTWARE.
++ *
++ * Authors: Alex Deucher
++ */
++
++#include "drmP.h"
++#include "amdgpu.h"
++#include "amdgpu_atombios.h"
++#include "amdgpu_i2c.h"
++#include "amdgpu_dpm.h"
++#include "atom.h"
++
++void amdgpu_dpm_print_class_info(u32 class, u32 class2)
++{
++ printk("\tui class: ");
++ switch (class & ATOM_PPLIB_CLASSIFICATION_UI_MASK) {
++ case ATOM_PPLIB_CLASSIFICATION_UI_NONE:
++ default:
++ printk("none\n");
++ break;
++ case ATOM_PPLIB_CLASSIFICATION_UI_BATTERY:
++ printk("battery\n");
++ break;
++ case ATOM_PPLIB_CLASSIFICATION_UI_BALANCED:
++ printk("balanced\n");
++ break;
++ case ATOM_PPLIB_CLASSIFICATION_UI_PERFORMANCE:
++ printk("performance\n");
++ break;
++ }
++ printk("\tinternal class: ");
++ if (((class & ~ATOM_PPLIB_CLASSIFICATION_UI_MASK) == 0) &&
++ (class2 == 0))
++ printk("none");
++ else {
++ if (class & ATOM_PPLIB_CLASSIFICATION_BOOT)
++ printk("boot ");
++ if (class & ATOM_PPLIB_CLASSIFICATION_THERMAL)
++ printk("thermal ");
++ if (class & ATOM_PPLIB_CLASSIFICATION_LIMITEDPOWERSOURCE)
++ printk("limited_pwr ");
++ if (class & ATOM_PPLIB_CLASSIFICATION_REST)
++ printk("rest ");
++ if (class & ATOM_PPLIB_CLASSIFICATION_FORCED)
++ printk("forced ");
++ if (class & ATOM_PPLIB_CLASSIFICATION_3DPERFORMANCE)
++ printk("3d_perf ");
++ if (class & ATOM_PPLIB_CLASSIFICATION_OVERDRIVETEMPLATE)
++ printk("ovrdrv ");
++ if (class & ATOM_PPLIB_CLASSIFICATION_UVDSTATE)
++ printk("uvd ");
++ if (class & ATOM_PPLIB_CLASSIFICATION_3DLOW)
++ printk("3d_low ");
++ if (class & ATOM_PPLIB_CLASSIFICATION_ACPI)
++ printk("acpi ");
++ if (class & ATOM_PPLIB_CLASSIFICATION_HD2STATE)
++ printk("uvd_hd2 ");
++ if (class & ATOM_PPLIB_CLASSIFICATION_HDSTATE)
++ printk("uvd_hd ");
++ if (class & ATOM_PPLIB_CLASSIFICATION_SDSTATE)
++ printk("uvd_sd ");
++ if (class2 & ATOM_PPLIB_CLASSIFICATION2_LIMITEDPOWERSOURCE_2)
++ printk("limited_pwr2 ");
++ if (class2 & ATOM_PPLIB_CLASSIFICATION2_ULV)
++ printk("ulv ");
++ if (class2 & ATOM_PPLIB_CLASSIFICATION2_MVC)
++ printk("uvd_mvc ");
++ }
++ printk("\n");
++}
++
++void amdgpu_dpm_print_cap_info(u32 caps)
++{
++ printk("\tcaps: ");
++ if (caps & ATOM_PPLIB_SINGLE_DISPLAY_ONLY)
++ printk("single_disp ");
++ if (caps & ATOM_PPLIB_SUPPORTS_VIDEO_PLAYBACK)
++ printk("video ");
++ if (caps & ATOM_PPLIB_DISALLOW_ON_DC)
++ printk("no_dc ");
++ printk("\n");
++}
++
++void amdgpu_dpm_print_ps_status(struct amdgpu_device *adev,
++ struct amdgpu_ps *rps)
++{
++ printk("\tstatus: ");
++ if (rps == adev->pm.dpm.current_ps)
++ printk("c ");
++ if (rps == adev->pm.dpm.requested_ps)
++ printk("r ");
++ if (rps == adev->pm.dpm.boot_ps)
++ printk("b ");
++ printk("\n");
++}
++
++u32 amdgpu_dpm_get_vblank_time(struct amdgpu_device *adev)
++{
++ struct drm_device *dev = adev->ddev;
++ struct drm_crtc *crtc;
++ struct amdgpu_crtc *amdgpu_crtc;
++ u32 line_time_us, vblank_lines;
++ u32 vblank_time_us = 0xffffffff; /* if the displays are off, vblank time is max */
++
++ if (adev->mode_info.num_crtc && adev->mode_info.mode_config_initialized) {
++ list_for_each_entry(crtc, &dev->mode_config.crtc_list, head) {
++ amdgpu_crtc = to_amdgpu_crtc(crtc);
++ if (crtc->enabled && amdgpu_crtc->enabled && amdgpu_crtc->hw_mode.clock) {
++ line_time_us = (amdgpu_crtc->hw_mode.crtc_htotal * 1000) /
++ amdgpu_crtc->hw_mode.clock;
++ vblank_lines = amdgpu_crtc->hw_mode.crtc_vblank_end -
++ amdgpu_crtc->hw_mode.crtc_vdisplay +
++ (amdgpu_crtc->v_border * 2);
++ vblank_time_us = vblank_lines * line_time_us;
++ break;
++ }
++ }
++ }
++
++ return vblank_time_us;
++}
++
++u32 amdgpu_dpm_get_vrefresh(struct amdgpu_device *adev)
++{
++ struct drm_device *dev = adev->ddev;
++ struct drm_crtc *crtc;
++ struct amdgpu_crtc *amdgpu_crtc;
++ u32 vrefresh = 0;
++
++ if (adev->mode_info.num_crtc && adev->mode_info.mode_config_initialized) {
++ list_for_each_entry(crtc, &dev->mode_config.crtc_list, head) {
++ amdgpu_crtc = to_amdgpu_crtc(crtc);
++ if (crtc->enabled && amdgpu_crtc->enabled && amdgpu_crtc->hw_mode.clock) {
++ vrefresh = amdgpu_crtc->hw_mode.vrefresh;
++ break;
++ }
++ }
++ }
++
++ return vrefresh;
++}
++
++void amdgpu_calculate_u_and_p(u32 i, u32 r_c, u32 p_b,
++ u32 *p, u32 *u)
++{
++ u32 b_c = 0;
++ u32 i_c;
++ u32 tmp;
++
++ i_c = (i * r_c) / 100;
++ tmp = i_c >> p_b;
++
++ while (tmp) {
++ b_c++;
++ tmp >>= 1;
++ }
++
++ *u = (b_c + 1) / 2;
++ *p = i_c / (1 << (2 * (*u)));
++}
++
++int amdgpu_calculate_at(u32 t, u32 h, u32 fh, u32 fl, u32 *tl, u32 *th)
++{
++ u32 k, a, ah, al;
++ u32 t1;
++
++ if ((fl == 0) || (fh == 0) || (fl > fh))
++ return -EINVAL;
++
++ k = (100 * fh) / fl;
++ t1 = (t * (k - 100));
++ a = (1000 * (100 * h + t1)) / (10000 + (t1 / 100));
++ a = (a + 5) / 10;
++ ah = ((a * t) + 5000) / 10000;
++ al = a - ah;
++
++ *th = t - ah;
++ *tl = t + al;
++
++ return 0;
++}
++
++bool amdgpu_is_uvd_state(u32 class, u32 class2)
++{
++ if (class & ATOM_PPLIB_CLASSIFICATION_UVDSTATE)
++ return true;
++ if (class & ATOM_PPLIB_CLASSIFICATION_HD2STATE)
++ return true;
++ if (class & ATOM_PPLIB_CLASSIFICATION_HDSTATE)
++ return true;
++ if (class & ATOM_PPLIB_CLASSIFICATION_SDSTATE)
++ return true;
++ if (class2 & ATOM_PPLIB_CLASSIFICATION2_MVC)
++ return true;
++ return false;
++}
++
++bool amdgpu_is_internal_thermal_sensor(enum amdgpu_int_thermal_type sensor)
++{
++ switch (sensor) {
++ case THERMAL_TYPE_RV6XX:
++ case THERMAL_TYPE_RV770:
++ case THERMAL_TYPE_EVERGREEN:
++ case THERMAL_TYPE_SUMO:
++ case THERMAL_TYPE_NI:
++ case THERMAL_TYPE_SI:
++ case THERMAL_TYPE_CI:
++ case THERMAL_TYPE_KV:
++ return true;
++ case THERMAL_TYPE_ADT7473_WITH_INTERNAL:
++ case THERMAL_TYPE_EMC2103_WITH_INTERNAL:
++ return false; /* need special handling */
++ case THERMAL_TYPE_NONE:
++ case THERMAL_TYPE_EXTERNAL:
++ case THERMAL_TYPE_EXTERNAL_GPIO:
++ default:
++ return false;
++ }
++}
++
++union power_info {
++ struct _ATOM_POWERPLAY_INFO info;
++ struct _ATOM_POWERPLAY_INFO_V2 info_2;
++ struct _ATOM_POWERPLAY_INFO_V3 info_3;
++ struct _ATOM_PPLIB_POWERPLAYTABLE pplib;
++ struct _ATOM_PPLIB_POWERPLAYTABLE2 pplib2;
++ struct _ATOM_PPLIB_POWERPLAYTABLE3 pplib3;
++ struct _ATOM_PPLIB_POWERPLAYTABLE4 pplib4;
++ struct _ATOM_PPLIB_POWERPLAYTABLE5 pplib5;
++};
++
++union fan_info {
++ struct _ATOM_PPLIB_FANTABLE fan;
++ struct _ATOM_PPLIB_FANTABLE2 fan2;
++ struct _ATOM_PPLIB_FANTABLE3 fan3;
++};
++
++static int amdgpu_parse_clk_voltage_dep_table(struct amdgpu_clock_voltage_dependency_table *amdgpu_table,
++ ATOM_PPLIB_Clock_Voltage_Dependency_Table *atom_table)
++{
++ u32 size = atom_table->ucNumEntries *
++ sizeof(struct amdgpu_clock_voltage_dependency_entry);
++ int i;
++ ATOM_PPLIB_Clock_Voltage_Dependency_Record *entry;
++
++ amdgpu_table->entries = kzalloc(size, GFP_KERNEL);
++ if (!amdgpu_table->entries)
++ return -ENOMEM;
++
++ entry = &atom_table->entries[0];
++ for (i = 0; i < atom_table->ucNumEntries; i++) {
++ amdgpu_table->entries[i].clk = le16_to_cpu(entry->usClockLow) |
++ (entry->ucClockHigh << 16);
++ amdgpu_table->entries[i].v = le16_to_cpu(entry->usVoltage);
++ entry = (ATOM_PPLIB_Clock_Voltage_Dependency_Record *)
++ ((u8 *)entry + sizeof(ATOM_PPLIB_Clock_Voltage_Dependency_Record));
++ }
++ amdgpu_table->count = atom_table->ucNumEntries;
++
++ return 0;
++}
++
++int amdgpu_get_platform_caps(struct amdgpu_device *adev)
++{
++ struct amdgpu_mode_info *mode_info = &adev->mode_info;
++ union power_info *power_info;
++ int index = GetIndexIntoMasterTable(DATA, PowerPlayInfo);
++ u16 data_offset;
++ u8 frev, crev;
++
++ if (!amdgpu_atom_parse_data_header(mode_info->atom_context, index, NULL,
++ &frev, &crev, &data_offset))
++ return -EINVAL;
++ power_info = (union power_info *)(mode_info->atom_context->bios + data_offset);
++
++ adev->pm.dpm.platform_caps = le32_to_cpu(power_info->pplib.ulPlatformCaps);
++ adev->pm.dpm.backbias_response_time = le16_to_cpu(power_info->pplib.usBackbiasTime);
++ adev->pm.dpm.voltage_response_time = le16_to_cpu(power_info->pplib.usVoltageTime);
++
++ return 0;
++}
++
++/* sizeof(ATOM_PPLIB_EXTENDEDHEADER) */
++#define SIZE_OF_ATOM_PPLIB_EXTENDEDHEADER_V2 12
++#define SIZE_OF_ATOM_PPLIB_EXTENDEDHEADER_V3 14
++#define SIZE_OF_ATOM_PPLIB_EXTENDEDHEADER_V4 16
++#define SIZE_OF_ATOM_PPLIB_EXTENDEDHEADER_V5 18
++#define SIZE_OF_ATOM_PPLIB_EXTENDEDHEADER_V6 20
++#define SIZE_OF_ATOM_PPLIB_EXTENDEDHEADER_V7 22
++#define SIZE_OF_ATOM_PPLIB_EXTENDEDHEADER_V8 24
++#define SIZE_OF_ATOM_PPLIB_EXTENDEDHEADER_V9 26
++
++int amdgpu_parse_extended_power_table(struct amdgpu_device *adev)
++{
++ struct amdgpu_mode_info *mode_info = &adev->mode_info;
++ union power_info *power_info;
++ union fan_info *fan_info;
++ ATOM_PPLIB_Clock_Voltage_Dependency_Table *dep_table;
++ int index = GetIndexIntoMasterTable(DATA, PowerPlayInfo);
++ u16 data_offset;
++ u8 frev, crev;
++ int ret, i;
++
++ if (!amdgpu_atom_parse_data_header(mode_info->atom_context, index, NULL,
++ &frev, &crev, &data_offset))
++ return -EINVAL;
++ power_info = (union power_info *)(mode_info->atom_context->bios + data_offset);
++
++ /* fan table */
++ if (le16_to_cpu(power_info->pplib.usTableSize) >=
++ sizeof(struct _ATOM_PPLIB_POWERPLAYTABLE3)) {
++ if (power_info->pplib3.usFanTableOffset) {
++ fan_info = (union fan_info *)(mode_info->atom_context->bios + data_offset +
++ le16_to_cpu(power_info->pplib3.usFanTableOffset));
++ adev->pm.dpm.fan.t_hyst = fan_info->fan.ucTHyst;
++ adev->pm.dpm.fan.t_min = le16_to_cpu(fan_info->fan.usTMin);
++ adev->pm.dpm.fan.t_med = le16_to_cpu(fan_info->fan.usTMed);
++ adev->pm.dpm.fan.t_high = le16_to_cpu(fan_info->fan.usTHigh);
++ adev->pm.dpm.fan.pwm_min = le16_to_cpu(fan_info->fan.usPWMMin);
++ adev->pm.dpm.fan.pwm_med = le16_to_cpu(fan_info->fan.usPWMMed);
++ adev->pm.dpm.fan.pwm_high = le16_to_cpu(fan_info->fan.usPWMHigh);
++ if (fan_info->fan.ucFanTableFormat >= 2)
++ adev->pm.dpm.fan.t_max = le16_to_cpu(fan_info->fan2.usTMax);
++ else
++ adev->pm.dpm.fan.t_max = 10900;
++ adev->pm.dpm.fan.cycle_delay = 100000;
++ if (fan_info->fan.ucFanTableFormat >= 3) {
++ adev->pm.dpm.fan.control_mode = fan_info->fan3.ucFanControlMode;
++ adev->pm.dpm.fan.default_max_fan_pwm =
++ le16_to_cpu(fan_info->fan3.usFanPWMMax);
++ adev->pm.dpm.fan.default_fan_output_sensitivity = 4836;
++ adev->pm.dpm.fan.fan_output_sensitivity =
++ le16_to_cpu(fan_info->fan3.usFanOutputSensitivity);
++ }
++ adev->pm.dpm.fan.ucode_fan_control = true;
++ }
++ }
++
++ /* clock dependancy tables, shedding tables */
++ if (le16_to_cpu(power_info->pplib.usTableSize) >=
++ sizeof(struct _ATOM_PPLIB_POWERPLAYTABLE4)) {
++ if (power_info->pplib4.usVddcDependencyOnSCLKOffset) {
++ dep_table = (ATOM_PPLIB_Clock_Voltage_Dependency_Table *)
++ (mode_info->atom_context->bios + data_offset +
++ le16_to_cpu(power_info->pplib4.usVddcDependencyOnSCLKOffset));
++ ret = amdgpu_parse_clk_voltage_dep_table(&adev->pm.dpm.dyn_state.vddc_dependency_on_sclk,
++ dep_table);
++ if (ret) {
++ amdgpu_free_extended_power_table(adev);
++ return ret;
++ }
++ }
++ if (power_info->pplib4.usVddciDependencyOnMCLKOffset) {
++ dep_table = (ATOM_PPLIB_Clock_Voltage_Dependency_Table *)
++ (mode_info->atom_context->bios + data_offset +
++ le16_to_cpu(power_info->pplib4.usVddciDependencyOnMCLKOffset));
++ ret = amdgpu_parse_clk_voltage_dep_table(&adev->pm.dpm.dyn_state.vddci_dependency_on_mclk,
++ dep_table);
++ if (ret) {
++ amdgpu_free_extended_power_table(adev);
++ return ret;
++ }
++ }
++ if (power_info->pplib4.usVddcDependencyOnMCLKOffset) {
++ dep_table = (ATOM_PPLIB_Clock_Voltage_Dependency_Table *)
++ (mode_info->atom_context->bios + data_offset +
++ le16_to_cpu(power_info->pplib4.usVddcDependencyOnMCLKOffset));
++ ret = amdgpu_parse_clk_voltage_dep_table(&adev->pm.dpm.dyn_state.vddc_dependency_on_mclk,
++ dep_table);
++ if (ret) {
++ amdgpu_free_extended_power_table(adev);
++ return ret;
++ }
++ }
++ if (power_info->pplib4.usMvddDependencyOnMCLKOffset) {
++ dep_table = (ATOM_PPLIB_Clock_Voltage_Dependency_Table *)
++ (mode_info->atom_context->bios + data_offset +
++ le16_to_cpu(power_info->pplib4.usMvddDependencyOnMCLKOffset));
++ ret = amdgpu_parse_clk_voltage_dep_table(&adev->pm.dpm.dyn_state.mvdd_dependency_on_mclk,
++ dep_table);
++ if (ret) {
++ amdgpu_free_extended_power_table(adev);
++ return ret;
++ }
++ }
++ if (power_info->pplib4.usMaxClockVoltageOnDCOffset) {
++ ATOM_PPLIB_Clock_Voltage_Limit_Table *clk_v =
++ (ATOM_PPLIB_Clock_Voltage_Limit_Table *)
++ (mode_info->atom_context->bios + data_offset +
++ le16_to_cpu(power_info->pplib4.usMaxClockVoltageOnDCOffset));
++ if (clk_v->ucNumEntries) {
++ adev->pm.dpm.dyn_state.max_clock_voltage_on_dc.sclk =
++ le16_to_cpu(clk_v->entries[0].usSclkLow) |
++ (clk_v->entries[0].ucSclkHigh << 16);
++ adev->pm.dpm.dyn_state.max_clock_voltage_on_dc.mclk =
++ le16_to_cpu(clk_v->entries[0].usMclkLow) |
++ (clk_v->entries[0].ucMclkHigh << 16);
++ adev->pm.dpm.dyn_state.max_clock_voltage_on_dc.vddc =
++ le16_to_cpu(clk_v->entries[0].usVddc);
++ adev->pm.dpm.dyn_state.max_clock_voltage_on_dc.vddci =
++ le16_to_cpu(clk_v->entries[0].usVddci);
++ }
++ }
++ if (power_info->pplib4.usVddcPhaseShedLimitsTableOffset) {
++ ATOM_PPLIB_PhaseSheddingLimits_Table *psl =
++ (ATOM_PPLIB_PhaseSheddingLimits_Table *)
++ (mode_info->atom_context->bios + data_offset +
++ le16_to_cpu(power_info->pplib4.usVddcPhaseShedLimitsTableOffset));
++ ATOM_PPLIB_PhaseSheddingLimits_Record *entry;
++
++ adev->pm.dpm.dyn_state.phase_shedding_limits_table.entries =
++ kzalloc(psl->ucNumEntries *
++ sizeof(struct amdgpu_phase_shedding_limits_entry),
++ GFP_KERNEL);
++ if (!adev->pm.dpm.dyn_state.phase_shedding_limits_table.entries) {
++ amdgpu_free_extended_power_table(adev);
++ return -ENOMEM;
++ }
++
++ entry = &psl->entries[0];
++ for (i = 0; i < psl->ucNumEntries; i++) {
++ adev->pm.dpm.dyn_state.phase_shedding_limits_table.entries[i].sclk =
++ le16_to_cpu(entry->usSclkLow) | (entry->ucSclkHigh << 16);
++ adev->pm.dpm.dyn_state.phase_shedding_limits_table.entries[i].mclk =
++ le16_to_cpu(entry->usMclkLow) | (entry->ucMclkHigh << 16);
++ adev->pm.dpm.dyn_state.phase_shedding_limits_table.entries[i].voltage =
++ le16_to_cpu(entry->usVoltage);
++ entry = (ATOM_PPLIB_PhaseSheddingLimits_Record *)
++ ((u8 *)entry + sizeof(ATOM_PPLIB_PhaseSheddingLimits_Record));
++ }
++ adev->pm.dpm.dyn_state.phase_shedding_limits_table.count =
++ psl->ucNumEntries;
++ }
++ }
++
++ /* cac data */
++ if (le16_to_cpu(power_info->pplib.usTableSize) >=
++ sizeof(struct _ATOM_PPLIB_POWERPLAYTABLE5)) {
++ adev->pm.dpm.tdp_limit = le32_to_cpu(power_info->pplib5.ulTDPLimit);
++ adev->pm.dpm.near_tdp_limit = le32_to_cpu(power_info->pplib5.ulNearTDPLimit);
++ adev->pm.dpm.near_tdp_limit_adjusted = adev->pm.dpm.near_tdp_limit;
++ adev->pm.dpm.tdp_od_limit = le16_to_cpu(power_info->pplib5.usTDPODLimit);
++ if (adev->pm.dpm.tdp_od_limit)
++ adev->pm.dpm.power_control = true;
++ else
++ adev->pm.dpm.power_control = false;
++ adev->pm.dpm.tdp_adjustment = 0;
++ adev->pm.dpm.sq_ramping_threshold = le32_to_cpu(power_info->pplib5.ulSQRampingThreshold);
++ adev->pm.dpm.cac_leakage = le32_to_cpu(power_info->pplib5.ulCACLeakage);
++ adev->pm.dpm.load_line_slope = le16_to_cpu(power_info->pplib5.usLoadLineSlope);
++ if (power_info->pplib5.usCACLeakageTableOffset) {
++ ATOM_PPLIB_CAC_Leakage_Table *cac_table =
++ (ATOM_PPLIB_CAC_Leakage_Table *)
++ (mode_info->atom_context->bios + data_offset +
++ le16_to_cpu(power_info->pplib5.usCACLeakageTableOffset));
++ ATOM_PPLIB_CAC_Leakage_Record *entry;
++ u32 size = cac_table->ucNumEntries * sizeof(struct amdgpu_cac_leakage_table);
++ adev->pm.dpm.dyn_state.cac_leakage_table.entries = kzalloc(size, GFP_KERNEL);
++ if (!adev->pm.dpm.dyn_state.cac_leakage_table.entries) {
++ amdgpu_free_extended_power_table(adev);
++ return -ENOMEM;
++ }
++ entry = &cac_table->entries[0];
++ for (i = 0; i < cac_table->ucNumEntries; i++) {
++ if (adev->pm.dpm.platform_caps & ATOM_PP_PLATFORM_CAP_EVV) {
++ adev->pm.dpm.dyn_state.cac_leakage_table.entries[i].vddc1 =
++ le16_to_cpu(entry->usVddc1);
++ adev->pm.dpm.dyn_state.cac_leakage_table.entries[i].vddc2 =
++ le16_to_cpu(entry->usVddc2);
++ adev->pm.dpm.dyn_state.cac_leakage_table.entries[i].vddc3 =
++ le16_to_cpu(entry->usVddc3);
++ } else {
++ adev->pm.dpm.dyn_state.cac_leakage_table.entries[i].vddc =
++ le16_to_cpu(entry->usVddc);
++ adev->pm.dpm.dyn_state.cac_leakage_table.entries[i].leakage =
++ le32_to_cpu(entry->ulLeakageValue);
++ }
++ entry = (ATOM_PPLIB_CAC_Leakage_Record *)
++ ((u8 *)entry + sizeof(ATOM_PPLIB_CAC_Leakage_Record));
++ }
++ adev->pm.dpm.dyn_state.cac_leakage_table.count = cac_table->ucNumEntries;
++ }
++ }
++
++ /* ext tables */
++ if (le16_to_cpu(power_info->pplib.usTableSize) >=
++ sizeof(struct _ATOM_PPLIB_POWERPLAYTABLE3)) {
++ ATOM_PPLIB_EXTENDEDHEADER *ext_hdr = (ATOM_PPLIB_EXTENDEDHEADER *)
++ (mode_info->atom_context->bios + data_offset +
++ le16_to_cpu(power_info->pplib3.usExtendendedHeaderOffset));
++ if ((le16_to_cpu(ext_hdr->usSize) >= SIZE_OF_ATOM_PPLIB_EXTENDEDHEADER_V2) &&
++ ext_hdr->usVCETableOffset) {
++ VCEClockInfoArray *array = (VCEClockInfoArray *)
++ (mode_info->atom_context->bios + data_offset +
++ le16_to_cpu(ext_hdr->usVCETableOffset) + 1);
++ ATOM_PPLIB_VCE_Clock_Voltage_Limit_Table *limits =
++ (ATOM_PPLIB_VCE_Clock_Voltage_Limit_Table *)
++ (mode_info->atom_context->bios + data_offset +
++ le16_to_cpu(ext_hdr->usVCETableOffset) + 1 +
++ 1 + array->ucNumEntries * sizeof(VCEClockInfo));
++ ATOM_PPLIB_VCE_State_Table *states =
++ (ATOM_PPLIB_VCE_State_Table *)
++ (mode_info->atom_context->bios + data_offset +
++ le16_to_cpu(ext_hdr->usVCETableOffset) + 1 +
++ 1 + (array->ucNumEntries * sizeof (VCEClockInfo)) +
++ 1 + (limits->numEntries * sizeof(ATOM_PPLIB_VCE_Clock_Voltage_Limit_Record)));
++ ATOM_PPLIB_VCE_Clock_Voltage_Limit_Record *entry;
++ ATOM_PPLIB_VCE_State_Record *state_entry;
++ VCEClockInfo *vce_clk;
++ u32 size = limits->numEntries *
++ sizeof(struct amdgpu_vce_clock_voltage_dependency_entry);
++ adev->pm.dpm.dyn_state.vce_clock_voltage_dependency_table.entries =
++ kzalloc(size, GFP_KERNEL);
++ if (!adev->pm.dpm.dyn_state.vce_clock_voltage_dependency_table.entries) {
++ amdgpu_free_extended_power_table(adev);
++ return -ENOMEM;
++ }
++ adev->pm.dpm.dyn_state.vce_clock_voltage_dependency_table.count =
++ limits->numEntries;
++ entry = &limits->entries[0];
++ state_entry = &states->entries[0];
++ for (i = 0; i < limits->numEntries; i++) {
++ vce_clk = (VCEClockInfo *)
++ ((u8 *)&array->entries[0] +
++ (entry->ucVCEClockInfoIndex * sizeof(VCEClockInfo)));
++ adev->pm.dpm.dyn_state.vce_clock_voltage_dependency_table.entries[i].evclk =
++ le16_to_cpu(vce_clk->usEVClkLow) | (vce_clk->ucEVClkHigh << 16);
++ adev->pm.dpm.dyn_state.vce_clock_voltage_dependency_table.entries[i].ecclk =
++ le16_to_cpu(vce_clk->usECClkLow) | (vce_clk->ucECClkHigh << 16);
++ adev->pm.dpm.dyn_state.vce_clock_voltage_dependency_table.entries[i].v =
++ le16_to_cpu(entry->usVoltage);
++ entry = (ATOM_PPLIB_VCE_Clock_Voltage_Limit_Record *)
++ ((u8 *)entry + sizeof(ATOM_PPLIB_VCE_Clock_Voltage_Limit_Record));
++ }
++ for (i = 0; i < states->numEntries; i++) {
++ if (i >= AMDGPU_MAX_VCE_LEVELS)
++ break;
++ vce_clk = (VCEClockInfo *)
++ ((u8 *)&array->entries[0] +
++ (state_entry->ucVCEClockInfoIndex * sizeof(VCEClockInfo)));
++ adev->pm.dpm.vce_states[i].evclk =
++ le16_to_cpu(vce_clk->usEVClkLow) | (vce_clk->ucEVClkHigh << 16);
++ adev->pm.dpm.vce_states[i].ecclk =
++ le16_to_cpu(vce_clk->usECClkLow) | (vce_clk->ucECClkHigh << 16);
++ adev->pm.dpm.vce_states[i].clk_idx =
++ state_entry->ucClockInfoIndex & 0x3f;
++ adev->pm.dpm.vce_states[i].pstate =
++ (state_entry->ucClockInfoIndex & 0xc0) >> 6;
++ state_entry = (ATOM_PPLIB_VCE_State_Record *)
++ ((u8 *)state_entry + sizeof(ATOM_PPLIB_VCE_State_Record));
++ }
++ }
++ if ((le16_to_cpu(ext_hdr->usSize) >= SIZE_OF_ATOM_PPLIB_EXTENDEDHEADER_V3) &&
++ ext_hdr->usUVDTableOffset) {
++ UVDClockInfoArray *array = (UVDClockInfoArray *)
++ (mode_info->atom_context->bios + data_offset +
++ le16_to_cpu(ext_hdr->usUVDTableOffset) + 1);
++ ATOM_PPLIB_UVD_Clock_Voltage_Limit_Table *limits =
++ (ATOM_PPLIB_UVD_Clock_Voltage_Limit_Table *)
++ (mode_info->atom_context->bios + data_offset +
++ le16_to_cpu(ext_hdr->usUVDTableOffset) + 1 +
++ 1 + (array->ucNumEntries * sizeof (UVDClockInfo)));
++ ATOM_PPLIB_UVD_Clock_Voltage_Limit_Record *entry;
++ u32 size = limits->numEntries *
++ sizeof(struct amdgpu_uvd_clock_voltage_dependency_entry);
++ adev->pm.dpm.dyn_state.uvd_clock_voltage_dependency_table.entries =
++ kzalloc(size, GFP_KERNEL);
++ if (!adev->pm.dpm.dyn_state.uvd_clock_voltage_dependency_table.entries) {
++ amdgpu_free_extended_power_table(adev);
++ return -ENOMEM;
++ }
++ adev->pm.dpm.dyn_state.uvd_clock_voltage_dependency_table.count =
++ limits->numEntries;
++ entry = &limits->entries[0];
++ for (i = 0; i < limits->numEntries; i++) {
++ UVDClockInfo *uvd_clk = (UVDClockInfo *)
++ ((u8 *)&array->entries[0] +
++ (entry->ucUVDClockInfoIndex * sizeof(UVDClockInfo)));
++ adev->pm.dpm.dyn_state.uvd_clock_voltage_dependency_table.entries[i].vclk =
++ le16_to_cpu(uvd_clk->usVClkLow) | (uvd_clk->ucVClkHigh << 16);
++ adev->pm.dpm.dyn_state.uvd_clock_voltage_dependency_table.entries[i].dclk =
++ le16_to_cpu(uvd_clk->usDClkLow) | (uvd_clk->ucDClkHigh << 16);
++ adev->pm.dpm.dyn_state.uvd_clock_voltage_dependency_table.entries[i].v =
++ le16_to_cpu(entry->usVoltage);
++ entry = (ATOM_PPLIB_UVD_Clock_Voltage_Limit_Record *)
++ ((u8 *)entry + sizeof(ATOM_PPLIB_UVD_Clock_Voltage_Limit_Record));
++ }
++ }
++ if ((le16_to_cpu(ext_hdr->usSize) >= SIZE_OF_ATOM_PPLIB_EXTENDEDHEADER_V4) &&
++ ext_hdr->usSAMUTableOffset) {
++ ATOM_PPLIB_SAMClk_Voltage_Limit_Table *limits =
++ (ATOM_PPLIB_SAMClk_Voltage_Limit_Table *)
++ (mode_info->atom_context->bios + data_offset +
++ le16_to_cpu(ext_hdr->usSAMUTableOffset) + 1);
++ ATOM_PPLIB_SAMClk_Voltage_Limit_Record *entry;
++ u32 size = limits->numEntries *
++ sizeof(struct amdgpu_clock_voltage_dependency_entry);
++ adev->pm.dpm.dyn_state.samu_clock_voltage_dependency_table.entries =
++ kzalloc(size, GFP_KERNEL);
++ if (!adev->pm.dpm.dyn_state.samu_clock_voltage_dependency_table.entries) {
++ amdgpu_free_extended_power_table(adev);
++ return -ENOMEM;
++ }
++ adev->pm.dpm.dyn_state.samu_clock_voltage_dependency_table.count =
++ limits->numEntries;
++ entry = &limits->entries[0];
++ for (i = 0; i < limits->numEntries; i++) {
++ adev->pm.dpm.dyn_state.samu_clock_voltage_dependency_table.entries[i].clk =
++ le16_to_cpu(entry->usSAMClockLow) | (entry->ucSAMClockHigh << 16);
++ adev->pm.dpm.dyn_state.samu_clock_voltage_dependency_table.entries[i].v =
++ le16_to_cpu(entry->usVoltage);
++ entry = (ATOM_PPLIB_SAMClk_Voltage_Limit_Record *)
++ ((u8 *)entry + sizeof(ATOM_PPLIB_SAMClk_Voltage_Limit_Record));
++ }
++ }
++ if ((le16_to_cpu(ext_hdr->usSize) >= SIZE_OF_ATOM_PPLIB_EXTENDEDHEADER_V5) &&
++ ext_hdr->usPPMTableOffset) {
++ ATOM_PPLIB_PPM_Table *ppm = (ATOM_PPLIB_PPM_Table *)
++ (mode_info->atom_context->bios + data_offset +
++ le16_to_cpu(ext_hdr->usPPMTableOffset));
++ adev->pm.dpm.dyn_state.ppm_table =
++ kzalloc(sizeof(struct amdgpu_ppm_table), GFP_KERNEL);
++ if (!adev->pm.dpm.dyn_state.ppm_table) {
++ amdgpu_free_extended_power_table(adev);
++ return -ENOMEM;
++ }
++ adev->pm.dpm.dyn_state.ppm_table->ppm_design = ppm->ucPpmDesign;
++ adev->pm.dpm.dyn_state.ppm_table->cpu_core_number =
++ le16_to_cpu(ppm->usCpuCoreNumber);
++ adev->pm.dpm.dyn_state.ppm_table->platform_tdp =
++ le32_to_cpu(ppm->ulPlatformTDP);
++ adev->pm.dpm.dyn_state.ppm_table->small_ac_platform_tdp =
++ le32_to_cpu(ppm->ulSmallACPlatformTDP);
++ adev->pm.dpm.dyn_state.ppm_table->platform_tdc =
++ le32_to_cpu(ppm->ulPlatformTDC);
++ adev->pm.dpm.dyn_state.ppm_table->small_ac_platform_tdc =
++ le32_to_cpu(ppm->ulSmallACPlatformTDC);
++ adev->pm.dpm.dyn_state.ppm_table->apu_tdp =
++ le32_to_cpu(ppm->ulApuTDP);
++ adev->pm.dpm.dyn_state.ppm_table->dgpu_tdp =
++ le32_to_cpu(ppm->ulDGpuTDP);
++ adev->pm.dpm.dyn_state.ppm_table->dgpu_ulv_power =
++ le32_to_cpu(ppm->ulDGpuUlvPower);
++ adev->pm.dpm.dyn_state.ppm_table->tj_max =
++ le32_to_cpu(ppm->ulTjmax);
++ }
++ if ((le16_to_cpu(ext_hdr->usSize) >= SIZE_OF_ATOM_PPLIB_EXTENDEDHEADER_V6) &&
++ ext_hdr->usACPTableOffset) {
++ ATOM_PPLIB_ACPClk_Voltage_Limit_Table *limits =
++ (ATOM_PPLIB_ACPClk_Voltage_Limit_Table *)
++ (mode_info->atom_context->bios + data_offset +
++ le16_to_cpu(ext_hdr->usACPTableOffset) + 1);
++ ATOM_PPLIB_ACPClk_Voltage_Limit_Record *entry;
++ u32 size = limits->numEntries *
++ sizeof(struct amdgpu_clock_voltage_dependency_entry);
++ adev->pm.dpm.dyn_state.acp_clock_voltage_dependency_table.entries =
++ kzalloc(size, GFP_KERNEL);
++ if (!adev->pm.dpm.dyn_state.acp_clock_voltage_dependency_table.entries) {
++ amdgpu_free_extended_power_table(adev);
++ return -ENOMEM;
++ }
++ adev->pm.dpm.dyn_state.acp_clock_voltage_dependency_table.count =
++ limits->numEntries;
++ entry = &limits->entries[0];
++ for (i = 0; i < limits->numEntries; i++) {
++ adev->pm.dpm.dyn_state.acp_clock_voltage_dependency_table.entries[i].clk =
++ le16_to_cpu(entry->usACPClockLow) | (entry->ucACPClockHigh << 16);
++ adev->pm.dpm.dyn_state.acp_clock_voltage_dependency_table.entries[i].v =
++ le16_to_cpu(entry->usVoltage);
++ entry = (ATOM_PPLIB_ACPClk_Voltage_Limit_Record *)
++ ((u8 *)entry + sizeof(ATOM_PPLIB_ACPClk_Voltage_Limit_Record));
++ }
++ }
++ if ((le16_to_cpu(ext_hdr->usSize) >= SIZE_OF_ATOM_PPLIB_EXTENDEDHEADER_V7) &&
++ ext_hdr->usPowerTuneTableOffset) {
++ u8 rev = *(u8 *)(mode_info->atom_context->bios + data_offset +
++ le16_to_cpu(ext_hdr->usPowerTuneTableOffset));
++ ATOM_PowerTune_Table *pt;
++ adev->pm.dpm.dyn_state.cac_tdp_table =
++ kzalloc(sizeof(struct amdgpu_cac_tdp_table), GFP_KERNEL);
++ if (!adev->pm.dpm.dyn_state.cac_tdp_table) {
++ amdgpu_free_extended_power_table(adev);
++ return -ENOMEM;
++ }
++ if (rev > 0) {
++ ATOM_PPLIB_POWERTUNE_Table_V1 *ppt = (ATOM_PPLIB_POWERTUNE_Table_V1 *)
++ (mode_info->atom_context->bios + data_offset +
++ le16_to_cpu(ext_hdr->usPowerTuneTableOffset));
++ adev->pm.dpm.dyn_state.cac_tdp_table->maximum_power_delivery_limit =
++ ppt->usMaximumPowerDeliveryLimit;
++ pt = &ppt->power_tune_table;
++ } else {
++ ATOM_PPLIB_POWERTUNE_Table *ppt = (ATOM_PPLIB_POWERTUNE_Table *)
++ (mode_info->atom_context->bios + data_offset +
++ le16_to_cpu(ext_hdr->usPowerTuneTableOffset));
++ adev->pm.dpm.dyn_state.cac_tdp_table->maximum_power_delivery_limit = 255;
++ pt = &ppt->power_tune_table;
++ }
++ adev->pm.dpm.dyn_state.cac_tdp_table->tdp = le16_to_cpu(pt->usTDP);
++ adev->pm.dpm.dyn_state.cac_tdp_table->configurable_tdp =
++ le16_to_cpu(pt->usConfigurableTDP);
++ adev->pm.dpm.dyn_state.cac_tdp_table->tdc = le16_to_cpu(pt->usTDC);
++ adev->pm.dpm.dyn_state.cac_tdp_table->battery_power_limit =
++ le16_to_cpu(pt->usBatteryPowerLimit);
++ adev->pm.dpm.dyn_state.cac_tdp_table->small_power_limit =
++ le16_to_cpu(pt->usSmallPowerLimit);
++ adev->pm.dpm.dyn_state.cac_tdp_table->low_cac_leakage =
++ le16_to_cpu(pt->usLowCACLeakage);
++ adev->pm.dpm.dyn_state.cac_tdp_table->high_cac_leakage =
++ le16_to_cpu(pt->usHighCACLeakage);
++ }
++ if ((le16_to_cpu(ext_hdr->usSize) >= SIZE_OF_ATOM_PPLIB_EXTENDEDHEADER_V8) &&
++ ext_hdr->usSclkVddgfxTableOffset) {
++ dep_table = (ATOM_PPLIB_Clock_Voltage_Dependency_Table *)
++ (mode_info->atom_context->bios + data_offset +
++ le16_to_cpu(ext_hdr->usSclkVddgfxTableOffset));
++ ret = amdgpu_parse_clk_voltage_dep_table(
++ &adev->pm.dpm.dyn_state.vddgfx_dependency_on_sclk,
++ dep_table);
++ if (ret) {
++ kfree(adev->pm.dpm.dyn_state.vddgfx_dependency_on_sclk.entries);
++ return ret;
++ }
++ }
++ }
++
++ return 0;
++}
++
++void amdgpu_free_extended_power_table(struct amdgpu_device *adev)
++{
++ struct amdgpu_dpm_dynamic_state *dyn_state = &adev->pm.dpm.dyn_state;
++
++ kfree(dyn_state->vddc_dependency_on_sclk.entries);
++ kfree(dyn_state->vddci_dependency_on_mclk.entries);
++ kfree(dyn_state->vddc_dependency_on_mclk.entries);
++ kfree(dyn_state->mvdd_dependency_on_mclk.entries);
++ kfree(dyn_state->cac_leakage_table.entries);
++ kfree(dyn_state->phase_shedding_limits_table.entries);
++ kfree(dyn_state->ppm_table);
++ kfree(dyn_state->cac_tdp_table);
++ kfree(dyn_state->vce_clock_voltage_dependency_table.entries);
++ kfree(dyn_state->uvd_clock_voltage_dependency_table.entries);
++ kfree(dyn_state->samu_clock_voltage_dependency_table.entries);
++ kfree(dyn_state->acp_clock_voltage_dependency_table.entries);
++ kfree(dyn_state->vddgfx_dependency_on_sclk.entries);
++}
++
++static const char *pp_lib_thermal_controller_names[] = {
++ "NONE",
++ "lm63",
++ "adm1032",
++ "adm1030",
++ "max6649",
++ "lm64",
++ "f75375",
++ "RV6xx",
++ "RV770",
++ "adt7473",
++ "NONE",
++ "External GPIO",
++ "Evergreen",
++ "emc2103",
++ "Sumo",
++ "Northern Islands",
++ "Southern Islands",
++ "lm96163",
++ "Sea Islands",
++ "Kaveri/Kabini",
++};
++
++void amdgpu_add_thermal_controller(struct amdgpu_device *adev)
++{
++ struct amdgpu_mode_info *mode_info = &adev->mode_info;
++ ATOM_PPLIB_POWERPLAYTABLE *power_table;
++ int index = GetIndexIntoMasterTable(DATA, PowerPlayInfo);
++ ATOM_PPLIB_THERMALCONTROLLER *controller;
++ struct amdgpu_i2c_bus_rec i2c_bus;
++ u16 data_offset;
++ u8 frev, crev;
++
++ if (!amdgpu_atom_parse_data_header(mode_info->atom_context, index, NULL,
++ &frev, &crev, &data_offset))
++ return;
++ power_table = (ATOM_PPLIB_POWERPLAYTABLE *)
++ (mode_info->atom_context->bios + data_offset);
++ controller = &power_table->sThermalController;
++
++ /* add the i2c bus for thermal/fan chip */
++ if (controller->ucType > 0) {
++ if (controller->ucFanParameters & ATOM_PP_FANPARAMETERS_NOFAN)
++ adev->pm.no_fan = true;
++ adev->pm.fan_pulses_per_revolution =
++ controller->ucFanParameters & ATOM_PP_FANPARAMETERS_TACHOMETER_PULSES_PER_REVOLUTION_MASK;
++ if (adev->pm.fan_pulses_per_revolution) {
++ adev->pm.fan_min_rpm = controller->ucFanMinRPM;
++ adev->pm.fan_max_rpm = controller->ucFanMaxRPM;
++ }
++ if (controller->ucType == ATOM_PP_THERMALCONTROLLER_RV6xx) {
++ DRM_INFO("Internal thermal controller %s fan control\n",
++ (controller->ucFanParameters &
++ ATOM_PP_FANPARAMETERS_NOFAN) ? "without" : "with");
++ adev->pm.int_thermal_type = THERMAL_TYPE_RV6XX;
++ } else if (controller->ucType == ATOM_PP_THERMALCONTROLLER_RV770) {
++ DRM_INFO("Internal thermal controller %s fan control\n",
++ (controller->ucFanParameters &
++ ATOM_PP_FANPARAMETERS_NOFAN) ? "without" : "with");
++ adev->pm.int_thermal_type = THERMAL_TYPE_RV770;
++ } else if (controller->ucType == ATOM_PP_THERMALCONTROLLER_EVERGREEN) {
++ DRM_INFO("Internal thermal controller %s fan control\n",
++ (controller->ucFanParameters &
++ ATOM_PP_FANPARAMETERS_NOFAN) ? "without" : "with");
++ adev->pm.int_thermal_type = THERMAL_TYPE_EVERGREEN;
++ } else if (controller->ucType == ATOM_PP_THERMALCONTROLLER_SUMO) {
++ DRM_INFO("Internal thermal controller %s fan control\n",
++ (controller->ucFanParameters &
++ ATOM_PP_FANPARAMETERS_NOFAN) ? "without" : "with");
++ adev->pm.int_thermal_type = THERMAL_TYPE_SUMO;
++ } else if (controller->ucType == ATOM_PP_THERMALCONTROLLER_NISLANDS) {
++ DRM_INFO("Internal thermal controller %s fan control\n",
++ (controller->ucFanParameters &
++ ATOM_PP_FANPARAMETERS_NOFAN) ? "without" : "with");
++ adev->pm.int_thermal_type = THERMAL_TYPE_NI;
++ } else if (controller->ucType == ATOM_PP_THERMALCONTROLLER_SISLANDS) {
++ DRM_INFO("Internal thermal controller %s fan control\n",
++ (controller->ucFanParameters &
++ ATOM_PP_FANPARAMETERS_NOFAN) ? "without" : "with");
++ adev->pm.int_thermal_type = THERMAL_TYPE_SI;
++ } else if (controller->ucType == ATOM_PP_THERMALCONTROLLER_CISLANDS) {
++ DRM_INFO("Internal thermal controller %s fan control\n",
++ (controller->ucFanParameters &
++ ATOM_PP_FANPARAMETERS_NOFAN) ? "without" : "with");
++ adev->pm.int_thermal_type = THERMAL_TYPE_CI;
++ } else if (controller->ucType == ATOM_PP_THERMALCONTROLLER_KAVERI) {
++ DRM_INFO("Internal thermal controller %s fan control\n",
++ (controller->ucFanParameters &
++ ATOM_PP_FANPARAMETERS_NOFAN) ? "without" : "with");
++ adev->pm.int_thermal_type = THERMAL_TYPE_KV;
++ } else if (controller->ucType == ATOM_PP_THERMALCONTROLLER_EXTERNAL_GPIO) {
++ DRM_INFO("External GPIO thermal controller %s fan control\n",
++ (controller->ucFanParameters &
++ ATOM_PP_FANPARAMETERS_NOFAN) ? "without" : "with");
++ adev->pm.int_thermal_type = THERMAL_TYPE_EXTERNAL_GPIO;
++ } else if (controller->ucType ==
++ ATOM_PP_THERMALCONTROLLER_ADT7473_WITH_INTERNAL) {
++ DRM_INFO("ADT7473 with internal thermal controller %s fan control\n",
++ (controller->ucFanParameters &
++ ATOM_PP_FANPARAMETERS_NOFAN) ? "without" : "with");
++ adev->pm.int_thermal_type = THERMAL_TYPE_ADT7473_WITH_INTERNAL;
++ } else if (controller->ucType ==
++ ATOM_PP_THERMALCONTROLLER_EMC2103_WITH_INTERNAL) {
++ DRM_INFO("EMC2103 with internal thermal controller %s fan control\n",
++ (controller->ucFanParameters &
++ ATOM_PP_FANPARAMETERS_NOFAN) ? "without" : "with");
++ adev->pm.int_thermal_type = THERMAL_TYPE_EMC2103_WITH_INTERNAL;
++ } else if (controller->ucType < ARRAY_SIZE(pp_lib_thermal_controller_names)) {
++ DRM_INFO("Possible %s thermal controller at 0x%02x %s fan control\n",
++ pp_lib_thermal_controller_names[controller->ucType],
++ controller->ucI2cAddress >> 1,
++ (controller->ucFanParameters &
++ ATOM_PP_FANPARAMETERS_NOFAN) ? "without" : "with");
++ adev->pm.int_thermal_type = THERMAL_TYPE_EXTERNAL;
++ i2c_bus = amdgpu_atombios_lookup_i2c_gpio(adev, controller->ucI2cLine);
++ adev->pm.i2c_bus = amdgpu_i2c_lookup(adev, &i2c_bus);
++ if (adev->pm.i2c_bus) {
++ struct i2c_board_info info = { };
++ const char *name = pp_lib_thermal_controller_names[controller->ucType];
++ info.addr = controller->ucI2cAddress >> 1;
++ strlcpy(info.type, name, sizeof(info.type));
++ i2c_new_device(&adev->pm.i2c_bus->adapter, &info);
++ }
++ } else {
++ DRM_INFO("Unknown thermal controller type %d at 0x%02x %s fan control\n",
++ controller->ucType,
++ controller->ucI2cAddress >> 1,
++ (controller->ucFanParameters &
++ ATOM_PP_FANPARAMETERS_NOFAN) ? "without" : "with");
++ }
++ }
++}
++
++enum amdgpu_pcie_gen amdgpu_get_pcie_gen_support(struct amdgpu_device *adev,
++ u32 sys_mask,
++ enum amdgpu_pcie_gen asic_gen,
++ enum amdgpu_pcie_gen default_gen)
++{
++ switch (asic_gen) {
++ case AMDGPU_PCIE_GEN1:
++ return AMDGPU_PCIE_GEN1;
++ case AMDGPU_PCIE_GEN2:
++ return AMDGPU_PCIE_GEN2;
++ case AMDGPU_PCIE_GEN3:
++ return AMDGPU_PCIE_GEN3;
++ default:
++ if ((sys_mask & DRM_PCIE_SPEED_80) && (default_gen == AMDGPU_PCIE_GEN3))
++ return AMDGPU_PCIE_GEN3;
++ else if ((sys_mask & DRM_PCIE_SPEED_50) && (default_gen == AMDGPU_PCIE_GEN2))
++ return AMDGPU_PCIE_GEN2;
++ else
++ return AMDGPU_PCIE_GEN1;
++ }
++ return AMDGPU_PCIE_GEN1;
++}
++
++u16 amdgpu_get_pcie_lane_support(struct amdgpu_device *adev,
++ u16 asic_lanes,
++ u16 default_lanes)
++{
++ switch (asic_lanes) {
++ case 0:
++ default:
++ return default_lanes;
++ case 1:
++ return 1;
++ case 2:
++ return 2;
++ case 4:
++ return 4;
++ case 8:
++ return 8;
++ case 12:
++ return 12;
++ case 16:
++ return 16;
++ }
++}
++
++u8 amdgpu_encode_pci_lane_width(u32 lanes)
++{
++ u8 encoded_lanes[] = { 0, 1, 2, 0, 3, 0, 0, 0, 4, 0, 0, 0, 5, 0, 0, 0, 6 };
++
++ if (lanes > 16)
++ return 0;
++
++ return encoded_lanes[lanes];
++}
+diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_dpm.h b/drivers/gpu/drm/amd/amdgpu/amdgpu_dpm.h
+new file mode 100644
+index 0000000..3738a96
+--- /dev/null
++++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_dpm.h
+@@ -0,0 +1,85 @@
++/*
++ * Copyright 2014 Advanced Micro Devices, Inc.
++ *
++ * Permission is hereby granted, free of charge, to any person obtaining a
++ * copy of this software and associated documentation files (the "Software"),
++ * to deal in the Software without restriction, including without limitation
++ * the rights to use, copy, modify, merge, publish, distribute, sublicense,
++ * and/or sell copies of the Software, and to permit persons to whom the
++ * Software is furnished to do so, subject to the following conditions:
++ *
++ * The above copyright notice and this permission notice shall be included in
++ * all copies or substantial portions of the Software.
++ *
++ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
++ * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
++ * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
++ * THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR
++ * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE,
++ * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
++ * OTHER DEALINGS IN THE SOFTWARE.
++ *
++ */
++#ifndef __AMDGPU_DPM_H__
++#define __AMDGPU_DPM_H__
++
++#define R600_SSTU_DFLT 0
++#define R600_SST_DFLT 0x00C8
++
++/* XXX are these ok? */
++#define R600_TEMP_RANGE_MIN (90 * 1000)
++#define R600_TEMP_RANGE_MAX (120 * 1000)
++
++#define FDO_PWM_MODE_STATIC 1
++#define FDO_PWM_MODE_STATIC_RPM 5
++
++enum amdgpu_td {
++ AMDGPU_TD_AUTO,
++ AMDGPU_TD_UP,
++ AMDGPU_TD_DOWN,
++};
++
++enum amdgpu_display_watermark {
++ AMDGPU_DISPLAY_WATERMARK_LOW = 0,
++ AMDGPU_DISPLAY_WATERMARK_HIGH = 1,
++};
++
++enum amdgpu_display_gap
++{
++ AMDGPU_PM_DISPLAY_GAP_VBLANK_OR_WM = 0,
++ AMDGPU_PM_DISPLAY_GAP_VBLANK = 1,
++ AMDGPU_PM_DISPLAY_GAP_WATERMARK = 2,
++ AMDGPU_PM_DISPLAY_GAP_IGNORE = 3,
++};
++
++void amdgpu_dpm_print_class_info(u32 class, u32 class2);
++void amdgpu_dpm_print_cap_info(u32 caps);
++void amdgpu_dpm_print_ps_status(struct amdgpu_device *adev,
++ struct amdgpu_ps *rps);
++u32 amdgpu_dpm_get_vblank_time(struct amdgpu_device *adev);
++u32 amdgpu_dpm_get_vrefresh(struct amdgpu_device *adev);
++bool amdgpu_is_uvd_state(u32 class, u32 class2);
++void amdgpu_calculate_u_and_p(u32 i, u32 r_c, u32 p_b,
++ u32 *p, u32 *u);
++int amdgpu_calculate_at(u32 t, u32 h, u32 fh, u32 fl, u32 *tl, u32 *th);
++
++bool amdgpu_is_internal_thermal_sensor(enum amdgpu_int_thermal_type sensor);
++
++int amdgpu_get_platform_caps(struct amdgpu_device *adev);
++
++int amdgpu_parse_extended_power_table(struct amdgpu_device *adev);
++void amdgpu_free_extended_power_table(struct amdgpu_device *adev);
++
++void amdgpu_add_thermal_controller(struct amdgpu_device *adev);
++
++enum amdgpu_pcie_gen amdgpu_get_pcie_gen_support(struct amdgpu_device *adev,
++ u32 sys_mask,
++ enum amdgpu_pcie_gen asic_gen,
++ enum amdgpu_pcie_gen default_gen);
++
++u16 amdgpu_get_pcie_lane_support(struct amdgpu_device *adev,
++ u16 asic_lanes,
++ u16 default_lanes);
++u8 amdgpu_encode_pci_lane_width(u32 lanes);
++
++#endif
+diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_drv.c b/drivers/gpu/drm/amd/amdgpu/amdgpu_drv.c
+new file mode 100644
+index 0000000..d1af448
+--- /dev/null
++++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_drv.c
+@@ -0,0 +1,439 @@
++/**
++ * \file amdgpu_drv.c
++ * AMD Amdgpu driver
++ *
++ * \author Gareth Hughes <gareth@valinux.com>
++ */
++
++/*
++ * Copyright 2000 VA Linux Systems, Inc., Sunnyvale, California.
++ * All Rights Reserved.
++ *
++ * Permission is hereby granted, free of charge, to any person obtaining a
++ * copy of this software and associated documentation files (the "Software"),
++ * to deal in the Software without restriction, including without limitation
++ * the rights to use, copy, modify, merge, publish, distribute, sublicense,
++ * and/or sell copies of the Software, and to permit persons to whom the
++ * Software is furnished to do so, subject to the following conditions:
++ *
++ * The above copyright notice and this permission notice (including the next
++ * paragraph) shall be included in all copies or substantial portions of the
++ * Software.
++ *
++ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
++ * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
++ * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
++ * VA LINUX SYSTEMS AND/OR ITS SUPPLIERS BE LIABLE FOR ANY CLAIM, DAMAGES OR
++ * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE,
++ * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
++ * OTHER DEALINGS IN THE SOFTWARE.
++ */
++
++#include <drm/drmP.h>
++#include <drm/amdgpu_drm.h>
++#include <drm/drm_gem.h>
++#include "amdgpu_drv.h"
++
++#include <drm/drm_pciids.h>
++#include <linux/console.h>
++#include <linux/module.h>
++#include <linux/pm_runtime.h>
++#include <linux/vga_switcheroo.h>
++#include "drm_crtc_helper.h"
++
++#include "amdgpu.h"
++#include "amdgpu_irq.h"
++
++/*
++ * KMS wrapper.
++ * - 3.0.0 - initial driver
++ */
++#define KMS_DRIVER_MAJOR 3
++#define KMS_DRIVER_MINOR 0
++#define KMS_DRIVER_PATCHLEVEL 0
++
++int amdgpu_vram_limit = 0;
++int amdgpu_gart_size = -1; /* auto */
++int amdgpu_benchmarking = 0;
++int amdgpu_testing = 0;
++int amdgpu_audio = -1;
++int amdgpu_disp_priority = 0;
++int amdgpu_hw_i2c = 0;
++int amdgpu_pcie_gen2 = -1;
++int amdgpu_msi = -1;
++int amdgpu_lockup_timeout = 10000;
++int amdgpu_dpm = -1;
++int amdgpu_smc_load_fw = 1;
++int amdgpu_aspm = -1;
++int amdgpu_runtime_pm = -1;
++int amdgpu_hard_reset = 0;
++unsigned amdgpu_ip_block_mask = 0xffffffff;
++int amdgpu_bapm = -1;
++int amdgpu_deep_color = 0;
++int amdgpu_vm_size = 8;
++int amdgpu_vm_block_size = -1;
++int amdgpu_exp_hw_support = 0;
++
++MODULE_PARM_DESC(vramlimit, "Restrict VRAM for testing, in megabytes");
++module_param_named(vramlimit, amdgpu_vram_limit, int, 0600);
++
++MODULE_PARM_DESC(gartsize, "Size of PCIE/IGP gart to setup in megabytes (32, 64, etc., -1 = auto)");
++module_param_named(gartsize, amdgpu_gart_size, int, 0600);
++
++MODULE_PARM_DESC(benchmark, "Run benchmark");
++module_param_named(benchmark, amdgpu_benchmarking, int, 0444);
++
++MODULE_PARM_DESC(test, "Run tests");
++module_param_named(test, amdgpu_testing, int, 0444);
++
++MODULE_PARM_DESC(audio, "Audio enable (-1 = auto, 0 = disable, 1 = enable)");
++module_param_named(audio, amdgpu_audio, int, 0444);
++
++MODULE_PARM_DESC(disp_priority, "Display Priority (0 = auto, 1 = normal, 2 = high)");
++module_param_named(disp_priority, amdgpu_disp_priority, int, 0444);
++
++MODULE_PARM_DESC(hw_i2c, "hw i2c engine enable (0 = disable)");
++module_param_named(hw_i2c, amdgpu_hw_i2c, int, 0444);
++
++MODULE_PARM_DESC(pcie_gen2, "PCIE Gen2 mode (-1 = auto, 0 = disable, 1 = enable)");
++module_param_named(pcie_gen2, amdgpu_pcie_gen2, int, 0444);
++
++MODULE_PARM_DESC(msi, "MSI support (1 = enable, 0 = disable, -1 = auto)");
++module_param_named(msi, amdgpu_msi, int, 0444);
++
++MODULE_PARM_DESC(lockup_timeout, "GPU lockup timeout in ms (defaul 10000 = 10 seconds, 0 = disable)");
++module_param_named(lockup_timeout, amdgpu_lockup_timeout, int, 0444);
++
++MODULE_PARM_DESC(dpm, "DPM support (1 = enable, 0 = disable, -1 = auto)");
++module_param_named(dpm, amdgpu_dpm, int, 0444);
++
++MODULE_PARM_DESC(smc_load_fw, "SMC firmware loading(1 = enable, 0 = disable)");
++module_param_named(smc_load_fw, amdgpu_smc_load_fw, int, 0444);
++
++MODULE_PARM_DESC(aspm, "ASPM support (1 = enable, 0 = disable, -1 = auto)");
++module_param_named(aspm, amdgpu_aspm, int, 0444);
++
++MODULE_PARM_DESC(runpm, "PX runtime pm (1 = force enable, 0 = disable, -1 = PX only default)");
++module_param_named(runpm, amdgpu_runtime_pm, int, 0444);
++
++MODULE_PARM_DESC(hard_reset, "PCI config reset (1 = force enable, 0 = disable (default))");
++module_param_named(hard_reset, amdgpu_hard_reset, int, 0444);
++
++MODULE_PARM_DESC(ip_block_mask, "IP Block Mask (all blocks enabled (default))");
++module_param_named(ip_block_mask, amdgpu_ip_block_mask, uint, 0444);
++
++MODULE_PARM_DESC(bapm, "BAPM support (1 = enable, 0 = disable, -1 = auto)");
++module_param_named(bapm, amdgpu_bapm, int, 0444);
++
++MODULE_PARM_DESC(deep_color, "Deep Color support (1 = enable, 0 = disable (default))");
++module_param_named(deep_color, amdgpu_deep_color, int, 0444);
++
++MODULE_PARM_DESC(vm_size, "VM address space size in gigabytes (default 4GB)");
++module_param_named(vm_size, amdgpu_vm_size, int, 0444);
++
++MODULE_PARM_DESC(vm_block_size, "VM page table size in bits (default depending on vm_size)");
++module_param_named(vm_block_size, amdgpu_vm_block_size, int, 0444);
++
++MODULE_PARM_DESC(exp_hw_support, "experimental hw support (1 = enable, 0 = disable (default))");
++module_param_named(exp_hw_support, amdgpu_exp_hw_support, int, 0444);
++
++static struct pci_device_id pciidlist[] = {
++
++ {0, 0, 0}
++};
++
++MODULE_DEVICE_TABLE(pci, pciidlist);
++
++static struct drm_driver kms_driver;
++
++static int amdgpu_kick_out_firmware_fb(struct pci_dev *pdev)
++{
++ struct apertures_struct *ap;
++ bool primary = false;
++
++ ap = alloc_apertures(1);
++ if (!ap)
++ return -ENOMEM;
++
++ ap->ranges[0].base = pci_resource_start(pdev, 0);
++ ap->ranges[0].size = pci_resource_len(pdev, 0);
++
++#ifdef CONFIG_X86
++ primary = pdev->resource[PCI_ROM_RESOURCE].flags & IORESOURCE_ROM_SHADOW;
++#endif
++ remove_conflicting_framebuffers(ap, "amdgpudrmfb", primary);
++ kfree(ap);
++
++ return 0;
++}
++
++static int amdgpu_pci_probe(struct pci_dev *pdev,
++ const struct pci_device_id *ent)
++{
++ unsigned long flags = ent->driver_data;
++ int ret;
++
++ if ((flags & AMDGPU_EXP_HW_SUPPORT) && !amdgpu_exp_hw_support) {
++ DRM_INFO("This hardware requires experimental hardware support.\n"
++ "See modparam exp_hw_support\n");
++ return -ENODEV;
++ }
++
++ /* Get rid of things like offb */
++ ret = amdgpu_kick_out_firmware_fb(pdev);
++ if (ret)
++ return ret;
++
++ return drm_get_pci_dev(pdev, ent, &kms_driver);
++}
++
++static void
++amdgpu_pci_remove(struct pci_dev *pdev)
++{
++ struct drm_device *dev = pci_get_drvdata(pdev);
++
++ drm_put_dev(dev);
++}
++
++static int amdgpu_pmops_suspend(struct device *dev)
++{
++ struct pci_dev *pdev = to_pci_dev(dev);
++ struct drm_device *drm_dev = pci_get_drvdata(pdev);
++ return amdgpu_suspend_kms(drm_dev, true, true);
++}
++
++static int amdgpu_pmops_resume(struct device *dev)
++{
++ struct pci_dev *pdev = to_pci_dev(dev);
++ struct drm_device *drm_dev = pci_get_drvdata(pdev);
++ return amdgpu_resume_kms(drm_dev, true, true);
++}
++
++static int amdgpu_pmops_freeze(struct device *dev)
++{
++ struct pci_dev *pdev = to_pci_dev(dev);
++ struct drm_device *drm_dev = pci_get_drvdata(pdev);
++ return amdgpu_suspend_kms(drm_dev, false, true);
++}
++
++static int amdgpu_pmops_thaw(struct device *dev)
++{
++ struct pci_dev *pdev = to_pci_dev(dev);
++ struct drm_device *drm_dev = pci_get_drvdata(pdev);
++ return amdgpu_resume_kms(drm_dev, false, true);
++}
++
++static int amdgpu_pmops_runtime_suspend(struct device *dev)
++{
++ struct pci_dev *pdev = to_pci_dev(dev);
++ struct drm_device *drm_dev = pci_get_drvdata(pdev);
++ int ret;
++
++ if (!amdgpu_device_is_px(drm_dev)) {
++ pm_runtime_forbid(dev);
++ return -EBUSY;
++ }
++
++ drm_dev->switch_power_state = DRM_SWITCH_POWER_CHANGING;
++ drm_kms_helper_poll_disable(drm_dev);
++ vga_switcheroo_set_dynamic_switch(pdev, VGA_SWITCHEROO_OFF);
++
++ ret = amdgpu_suspend_kms(drm_dev, false, false);
++ pci_save_state(pdev);
++ pci_disable_device(pdev);
++ pci_ignore_hotplug(pdev);
++ pci_set_power_state(pdev, PCI_D3cold);
++ drm_dev->switch_power_state = DRM_SWITCH_POWER_DYNAMIC_OFF;
++
++ return 0;
++}
++
++static int amdgpu_pmops_runtime_resume(struct device *dev)
++{
++ struct pci_dev *pdev = to_pci_dev(dev);
++ struct drm_device *drm_dev = pci_get_drvdata(pdev);
++ int ret;
++
++ if (!amdgpu_device_is_px(drm_dev))
++ return -EINVAL;
++
++ drm_dev->switch_power_state = DRM_SWITCH_POWER_CHANGING;
++
++ pci_set_power_state(pdev, PCI_D0);
++ pci_restore_state(pdev);
++ ret = pci_enable_device(pdev);
++ if (ret)
++ return ret;
++ pci_set_master(pdev);
++
++ ret = amdgpu_resume_kms(drm_dev, false, false);
++ drm_kms_helper_poll_enable(drm_dev);
++ vga_switcheroo_set_dynamic_switch(pdev, VGA_SWITCHEROO_ON);
++ drm_dev->switch_power_state = DRM_SWITCH_POWER_ON;
++ return 0;
++}
++
++static int amdgpu_pmops_runtime_idle(struct device *dev)
++{
++ struct pci_dev *pdev = to_pci_dev(dev);
++ struct drm_device *drm_dev = pci_get_drvdata(pdev);
++ struct drm_crtc *crtc;
++
++ if (!amdgpu_device_is_px(drm_dev)) {
++ pm_runtime_forbid(dev);
++ return -EBUSY;
++ }
++
++ list_for_each_entry(crtc, &drm_dev->mode_config.crtc_list, head) {
++ if (crtc->enabled) {
++ DRM_DEBUG_DRIVER("failing to power off - crtc active\n");
++ return -EBUSY;
++ }
++ }
++
++ pm_runtime_mark_last_busy(dev);
++ pm_runtime_autosuspend(dev);
++ /* we don't want the main rpm_idle to call suspend - we want to autosuspend */
++ return 1;
++}
++
++long amdgpu_drm_ioctl(struct file *filp,
++ unsigned int cmd, unsigned long arg)
++{
++ struct drm_file *file_priv = filp->private_data;
++ struct drm_device *dev;
++ long ret;
++ dev = file_priv->minor->dev;
++ ret = pm_runtime_get_sync(dev->dev);
++ if (ret < 0)
++ return ret;
++
++ ret = drm_ioctl(filp, cmd, arg);
++
++ pm_runtime_mark_last_busy(dev->dev);
++ pm_runtime_put_autosuspend(dev->dev);
++ return ret;
++}
++
++static const struct dev_pm_ops amdgpu_pm_ops = {
++ .suspend = amdgpu_pmops_suspend,
++ .resume = amdgpu_pmops_resume,
++ .freeze = amdgpu_pmops_freeze,
++ .thaw = amdgpu_pmops_thaw,
++ .poweroff = amdgpu_pmops_freeze,
++ .restore = amdgpu_pmops_resume,
++ .runtime_suspend = amdgpu_pmops_runtime_suspend,
++ .runtime_resume = amdgpu_pmops_runtime_resume,
++ .runtime_idle = amdgpu_pmops_runtime_idle,
++};
++
++static const struct file_operations amdgpu_driver_kms_fops = {
++ .owner = THIS_MODULE,
++ .open = drm_open,
++ .release = drm_release,
++ .unlocked_ioctl = amdgpu_drm_ioctl,
++ .mmap = amdgpu_mmap,
++ .poll = drm_poll,
++ .read = drm_read,
++#ifdef CONFIG_COMPAT
++ .compat_ioctl = amdgpu_kms_compat_ioctl,
++#endif
++};
++
++static struct drm_driver kms_driver = {
++ .driver_features =
++ DRIVER_USE_AGP |
++ DRIVER_HAVE_IRQ | DRIVER_IRQ_SHARED | DRIVER_GEM |
++ DRIVER_PRIME | DRIVER_RENDER,
++ .dev_priv_size = 0,
++ .load = amdgpu_driver_load_kms,
++ .open = amdgpu_driver_open_kms,
++ .preclose = amdgpu_driver_preclose_kms,
++ .postclose = amdgpu_driver_postclose_kms,
++ .lastclose = amdgpu_driver_lastclose_kms,
++ .set_busid = drm_pci_set_busid,
++ .unload = amdgpu_driver_unload_kms,
++ .get_vblank_counter = amdgpu_get_vblank_counter_kms,
++ .enable_vblank = amdgpu_enable_vblank_kms,
++ .disable_vblank = amdgpu_disable_vblank_kms,
++ .get_vblank_timestamp = amdgpu_get_vblank_timestamp_kms,
++ .get_scanout_position = amdgpu_get_crtc_scanoutpos,
++#if defined(CONFIG_DEBUG_FS)
++ .debugfs_init = amdgpu_debugfs_init,
++ .debugfs_cleanup = amdgpu_debugfs_cleanup,
++#endif
++ .irq_preinstall = amdgpu_irq_preinstall,
++ .irq_postinstall = amdgpu_irq_postinstall,
++ .irq_uninstall = amdgpu_irq_uninstall,
++ .irq_handler = amdgpu_irq_handler,
++ .ioctls = amdgpu_ioctls_kms,
++ .gem_free_object = amdgpu_gem_object_free,
++ .gem_open_object = amdgpu_gem_object_open,
++ .gem_close_object = amdgpu_gem_object_close,
++ .dumb_create = amdgpu_mode_dumb_create,
++ .dumb_map_offset = amdgpu_mode_dumb_mmap,
++ .dumb_destroy = drm_gem_dumb_destroy,
++ .fops = &amdgpu_driver_kms_fops,
++
++ .prime_handle_to_fd = drm_gem_prime_handle_to_fd,
++ .prime_fd_to_handle = drm_gem_prime_fd_to_handle,
++ .gem_prime_export = amdgpu_gem_prime_export,
++ .gem_prime_import = drm_gem_prime_import,
++ .gem_prime_pin = amdgpu_gem_prime_pin,
++ .gem_prime_unpin = amdgpu_gem_prime_unpin,
++ .gem_prime_res_obj = amdgpu_gem_prime_res_obj,
++ .gem_prime_get_sg_table = amdgpu_gem_prime_get_sg_table,
++ .gem_prime_import_sg_table = amdgpu_gem_prime_import_sg_table,
++ .gem_prime_vmap = amdgpu_gem_prime_vmap,
++ .gem_prime_vunmap = amdgpu_gem_prime_vunmap,
++
++ .name = DRIVER_NAME,
++ .desc = DRIVER_DESC,
++ .date = DRIVER_DATE,
++ .major = KMS_DRIVER_MAJOR,
++ .minor = KMS_DRIVER_MINOR,
++ .patchlevel = KMS_DRIVER_PATCHLEVEL,
++};
++
++static struct drm_driver *driver;
++static struct pci_driver *pdriver;
++
++static struct pci_driver amdgpu_kms_pci_driver = {
++ .name = DRIVER_NAME,
++ .id_table = pciidlist,
++ .probe = amdgpu_pci_probe,
++ .remove = amdgpu_pci_remove,
++ .driver.pm = &amdgpu_pm_ops,
++};
++
++static int __init amdgpu_init(void)
++{
++#ifdef CONFIG_VGA_CONSOLE
++ if (vgacon_text_force()) {
++ DRM_ERROR("VGACON disables amdgpu kernel modesetting.\n");
++ return -EINVAL;
++ }
++#endif
++ DRM_INFO("amdgpu kernel modesetting enabled.\n");
++ driver = &kms_driver;
++ pdriver = &amdgpu_kms_pci_driver;
++ driver->driver_features |= DRIVER_MODESET;
++ driver->num_ioctls = amdgpu_max_kms_ioctl;
++ amdgpu_register_atpx_handler();
++
++ /* let modprobe override vga console setting */
++ return drm_pci_init(driver, pdriver);
++}
++
++static void __exit amdgpu_exit(void)
++{
++ drm_pci_exit(driver, pdriver);
++ amdgpu_unregister_atpx_handler();
++}
++
++module_init(amdgpu_init);
++module_exit(amdgpu_exit);
++
++MODULE_AUTHOR(DRIVER_AUTHOR);
++MODULE_DESCRIPTION(DRIVER_DESC);
++MODULE_LICENSE("GPL and additional rights");
+diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_drv.h b/drivers/gpu/drm/amd/amdgpu/amdgpu_drv.h
+new file mode 100644
+index 0000000..cceeb33
+--- /dev/null
++++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_drv.h
+@@ -0,0 +1,48 @@
++/* amdgpu_drv.h -- Private header for amdgpu driver -*- linux-c -*-
++ *
++ * Copyright 1999 Precision Insight, Inc., Cedar Park, Texas.
++ * Copyright 2000 VA Linux Systems, Inc., Fremont, California.
++ * All rights reserved.
++ *
++ * Permission is hereby granted, free of charge, to any person obtaining a
++ * copy of this software and associated documentation files (the "Software"),
++ * to deal in the Software without restriction, including without limitation
++ * the rights to use, copy, modify, merge, publish, distribute, sublicense,
++ * and/or sell copies of the Software, and to permit persons to whom the
++ * Software is furnished to do so, subject to the following conditions:
++ *
++ * The above copyright notice and this permission notice (including the next
++ * paragraph) shall be included in all copies or substantial portions of the
++ * Software.
++ *
++ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
++ * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
++ * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
++ * PRECISION INSIGHT AND/OR ITS SUPPLIERS BE LIABLE FOR ANY CLAIM, DAMAGES OR
++ * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE,
++ * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER
++ * DEALINGS IN THE SOFTWARE.
++ *
++ */
++
++#ifndef __AMDGPU_DRV_H__
++#define __AMDGPU_DRV_H__
++
++#include <linux/firmware.h>
++#include <linux/platform_device.h>
++
++#include "amdgpu_family.h"
++
++/* General customization:
++ */
++
++#define DRIVER_AUTHOR "AMD linux driver team"
++
++#define DRIVER_NAME "amdgpu"
++#define DRIVER_DESC "AMD GPU"
++#define DRIVER_DATE "20150101"
++
++long amdgpu_drm_ioctl(struct file *filp,
++ unsigned int cmd, unsigned long arg);
++
++#endif
+diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_encoders.c b/drivers/gpu/drm/amd/amdgpu/amdgpu_encoders.c
+new file mode 100644
+index 0000000..94138ab
+--- /dev/null
++++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_encoders.c
+@@ -0,0 +1,245 @@
++/*
++ * Copyright 2007-8 Advanced Micro Devices, Inc.
++ * Copyright 2008 Red Hat Inc.
++ *
++ * Permission is hereby granted, free of charge, to any person obtaining a
++ * copy of this software and associated documentation files (the "Software"),
++ * to deal in the Software without restriction, including without limitation
++ * the rights to use, copy, modify, merge, publish, distribute, sublicense,
++ * and/or sell copies of the Software, and to permit persons to whom the
++ * Software is furnished to do so, subject to the following conditions:
++ *
++ * The above copyright notice and this permission notice shall be included in
++ * all copies or substantial portions of the Software.
++ *
++ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
++ * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
++ * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
++ * THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR
++ * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE,
++ * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
++ * OTHER DEALINGS IN THE SOFTWARE.
++ *
++ * Authors: Dave Airlie
++ * Alex Deucher
++ */
++#include <drm/drmP.h>
++#include <drm/drm_crtc_helper.h>
++#include <drm/amdgpu_drm.h>
++#include "amdgpu.h"
++#include "amdgpu_connectors.h"
++#include "atom.h"
++#include "atombios_encoders.h"
++
++void
++amdgpu_link_encoder_connector(struct drm_device *dev)
++{
++ struct amdgpu_device *adev = dev->dev_private;
++ struct drm_connector *connector;
++ struct amdgpu_connector *amdgpu_connector;
++ struct drm_encoder *encoder;
++ struct amdgpu_encoder *amdgpu_encoder;
++
++ /* walk the list and link encoders to connectors */
++ list_for_each_entry(connector, &dev->mode_config.connector_list, head) {
++ amdgpu_connector = to_amdgpu_connector(connector);
++ list_for_each_entry(encoder, &dev->mode_config.encoder_list, head) {
++ amdgpu_encoder = to_amdgpu_encoder(encoder);
++ if (amdgpu_encoder->devices & amdgpu_connector->devices) {
++ drm_mode_connector_attach_encoder(connector, encoder);
++ if (amdgpu_encoder->devices & (ATOM_DEVICE_LCD_SUPPORT)) {
++ amdgpu_atombios_encoder_init_backlight(amdgpu_encoder, connector);
++ adev->mode_info.bl_encoder = amdgpu_encoder;
++ }
++ }
++ }
++ }
++}
++
++void amdgpu_encoder_set_active_device(struct drm_encoder *encoder)
++{
++ struct drm_device *dev = encoder->dev;
++ struct amdgpu_encoder *amdgpu_encoder = to_amdgpu_encoder(encoder);
++ struct drm_connector *connector;
++
++ list_for_each_entry(connector, &dev->mode_config.connector_list, head) {
++ if (connector->encoder == encoder) {
++ struct amdgpu_connector *amdgpu_connector = to_amdgpu_connector(connector);
++ amdgpu_encoder->active_device = amdgpu_encoder->devices & amdgpu_connector->devices;
++ DRM_DEBUG_KMS("setting active device to %08x from %08x %08x for encoder %d\n",
++ amdgpu_encoder->active_device, amdgpu_encoder->devices,
++ amdgpu_connector->devices, encoder->encoder_type);
++ }
++ }
++}
++
++struct drm_connector *
++amdgpu_get_connector_for_encoder(struct drm_encoder *encoder)
++{
++ struct drm_device *dev = encoder->dev;
++ struct amdgpu_encoder *amdgpu_encoder = to_amdgpu_encoder(encoder);
++ struct drm_connector *connector;
++ struct amdgpu_connector *amdgpu_connector;
++
++ list_for_each_entry(connector, &dev->mode_config.connector_list, head) {
++ amdgpu_connector = to_amdgpu_connector(connector);
++ if (amdgpu_encoder->active_device & amdgpu_connector->devices)
++ return connector;
++ }
++ return NULL;
++}
++
++struct drm_connector *
++amdgpu_get_connector_for_encoder_init(struct drm_encoder *encoder)
++{
++ struct drm_device *dev = encoder->dev;
++ struct amdgpu_encoder *amdgpu_encoder = to_amdgpu_encoder(encoder);
++ struct drm_connector *connector;
++ struct amdgpu_connector *amdgpu_connector;
++
++ list_for_each_entry(connector, &dev->mode_config.connector_list, head) {
++ amdgpu_connector = to_amdgpu_connector(connector);
++ if (amdgpu_encoder->devices & amdgpu_connector->devices)
++ return connector;
++ }
++ return NULL;
++}
++
++struct drm_encoder *amdgpu_get_external_encoder(struct drm_encoder *encoder)
++{
++ struct drm_device *dev = encoder->dev;
++ struct amdgpu_encoder *amdgpu_encoder = to_amdgpu_encoder(encoder);
++ struct drm_encoder *other_encoder;
++ struct amdgpu_encoder *other_amdgpu_encoder;
++
++ if (amdgpu_encoder->is_ext_encoder)
++ return NULL;
++
++ list_for_each_entry(other_encoder, &dev->mode_config.encoder_list, head) {
++ if (other_encoder == encoder)
++ continue;
++ other_amdgpu_encoder = to_amdgpu_encoder(other_encoder);
++ if (other_amdgpu_encoder->is_ext_encoder &&
++ (amdgpu_encoder->devices & other_amdgpu_encoder->devices))
++ return other_encoder;
++ }
++ return NULL;
++}
++
++u16 amdgpu_encoder_get_dp_bridge_encoder_id(struct drm_encoder *encoder)
++{
++ struct drm_encoder *other_encoder = amdgpu_get_external_encoder(encoder);
++
++ if (other_encoder) {
++ struct amdgpu_encoder *amdgpu_encoder = to_amdgpu_encoder(other_encoder);
++
++ switch (amdgpu_encoder->encoder_id) {
++ case ENCODER_OBJECT_ID_TRAVIS:
++ case ENCODER_OBJECT_ID_NUTMEG:
++ return amdgpu_encoder->encoder_id;
++ default:
++ return ENCODER_OBJECT_ID_NONE;
++ }
++ }
++ return ENCODER_OBJECT_ID_NONE;
++}
++
++void amdgpu_panel_mode_fixup(struct drm_encoder *encoder,
++ struct drm_display_mode *adjusted_mode)
++{
++ struct amdgpu_encoder *amdgpu_encoder = to_amdgpu_encoder(encoder);
++ struct drm_display_mode *native_mode = &amdgpu_encoder->native_mode;
++ unsigned hblank = native_mode->htotal - native_mode->hdisplay;
++ unsigned vblank = native_mode->vtotal - native_mode->vdisplay;
++ unsigned hover = native_mode->hsync_start - native_mode->hdisplay;
++ unsigned vover = native_mode->vsync_start - native_mode->vdisplay;
++ unsigned hsync_width = native_mode->hsync_end - native_mode->hsync_start;
++ unsigned vsync_width = native_mode->vsync_end - native_mode->vsync_start;
++
++ adjusted_mode->clock = native_mode->clock;
++ adjusted_mode->flags = native_mode->flags;
++
++ adjusted_mode->hdisplay = native_mode->hdisplay;
++ adjusted_mode->vdisplay = native_mode->vdisplay;
++
++ adjusted_mode->htotal = native_mode->hdisplay + hblank;
++ adjusted_mode->hsync_start = native_mode->hdisplay + hover;
++ adjusted_mode->hsync_end = adjusted_mode->hsync_start + hsync_width;
++
++ adjusted_mode->vtotal = native_mode->vdisplay + vblank;
++ adjusted_mode->vsync_start = native_mode->vdisplay + vover;
++ adjusted_mode->vsync_end = adjusted_mode->vsync_start + vsync_width;
++
++ drm_mode_set_crtcinfo(adjusted_mode, CRTC_INTERLACE_HALVE_V);
++
++ adjusted_mode->crtc_hdisplay = native_mode->hdisplay;
++ adjusted_mode->crtc_vdisplay = native_mode->vdisplay;
++
++ adjusted_mode->crtc_htotal = adjusted_mode->crtc_hdisplay + hblank;
++ adjusted_mode->crtc_hsync_start = adjusted_mode->crtc_hdisplay + hover;
++ adjusted_mode->crtc_hsync_end = adjusted_mode->crtc_hsync_start + hsync_width;
++
++ adjusted_mode->crtc_vtotal = adjusted_mode->crtc_vdisplay + vblank;
++ adjusted_mode->crtc_vsync_start = adjusted_mode->crtc_vdisplay + vover;
++ adjusted_mode->crtc_vsync_end = adjusted_mode->crtc_vsync_start + vsync_width;
++
++}
++
++bool amdgpu_dig_monitor_is_duallink(struct drm_encoder *encoder,
++ u32 pixel_clock)
++{
++ struct drm_connector *connector;
++ struct amdgpu_connector *amdgpu_connector;
++ struct amdgpu_connector_atom_dig *dig_connector;
++
++ connector = amdgpu_get_connector_for_encoder(encoder);
++ /* if we don't have an active device yet, just use one of
++ * the connectors tied to the encoder.
++ */
++ if (!connector)
++ connector = amdgpu_get_connector_for_encoder_init(encoder);
++ amdgpu_connector = to_amdgpu_connector(connector);
++
++ switch (connector->connector_type) {
++ case DRM_MODE_CONNECTOR_DVII:
++ case DRM_MODE_CONNECTOR_HDMIB:
++ if (amdgpu_connector->use_digital) {
++ /* HDMI 1.3 supports up to 340 Mhz over single link */
++ if (drm_detect_hdmi_monitor(amdgpu_connector_edid(connector))) {
++ if (pixel_clock > 340000)
++ return true;
++ else
++ return false;
++ } else {
++ if (pixel_clock > 165000)
++ return true;
++ else
++ return false;
++ }
++ } else
++ return false;
++ case DRM_MODE_CONNECTOR_DVID:
++ case DRM_MODE_CONNECTOR_HDMIA:
++ case DRM_MODE_CONNECTOR_DisplayPort:
++ dig_connector = amdgpu_connector->con_priv;
++ if ((dig_connector->dp_sink_type == CONNECTOR_OBJECT_ID_DISPLAYPORT) ||
++ (dig_connector->dp_sink_type == CONNECTOR_OBJECT_ID_eDP))
++ return false;
++ else {
++ /* HDMI 1.3 supports up to 340 Mhz over single link */
++ if (drm_detect_hdmi_monitor(amdgpu_connector_edid(connector))) {
++ if (pixel_clock > 340000)
++ return true;
++ else
++ return false;
++ } else {
++ if (pixel_clock > 165000)
++ return true;
++ else
++ return false;
++ }
++ }
++ default:
++ return false;
++ }
++}
+diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_fb.c b/drivers/gpu/drm/amd/amdgpu/amdgpu_fb.c
+new file mode 100644
+index 0000000..2b1735d
+--- /dev/null
++++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_fb.c
+@@ -0,0 +1,432 @@
++/*
++ * Copyright © 2007 David Airlie
++ *
++ * Permission is hereby granted, free of charge, to any person obtaining a
++ * copy of this software and associated documentation files (the "Software"),
++ * to deal in the Software without restriction, including without limitation
++ * the rights to use, copy, modify, merge, publish, distribute, sublicense,
++ * and/or sell copies of the Software, and to permit persons to whom the
++ * Software is furnished to do so, subject to the following conditions:
++ *
++ * The above copyright notice and this permission notice (including the next
++ * paragraph) shall be included in all copies or substantial portions of the
++ * Software.
++ *
++ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
++ * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
++ * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
++ * THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
++ * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING
++ * FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER
++ * DEALINGS IN THE SOFTWARE.
++ *
++ * Authors:
++ * David Airlie
++ */
++#include <linux/module.h>
++#include <linux/slab.h>
++#include <linux/fb.h>
++
++#include <drm/drmP.h>
++#include <drm/drm_crtc.h>
++#include <drm/drm_crtc_helper.h>
++#include <drm/amdgpu_drm.h>
++#include "amdgpu.h"
++
++#include <drm/drm_fb_helper.h>
++
++#include <linux/vga_switcheroo.h>
++
++/* object hierarchy -
++ this contains a helper + a amdgpu fb
++ the helper contains a pointer to amdgpu framebuffer baseclass.
++*/
++struct amdgpu_fbdev {
++ struct drm_fb_helper helper;
++ struct amdgpu_framebuffer rfb;
++ struct list_head fbdev_list;
++ struct amdgpu_device *adev;
++};
++
++static struct fb_ops amdgpufb_ops = {
++ .owner = THIS_MODULE,
++ .fb_check_var = drm_fb_helper_check_var,
++ .fb_set_par = drm_fb_helper_set_par,
++ .fb_fillrect = cfb_fillrect,
++ .fb_copyarea = cfb_copyarea,
++ .fb_imageblit = cfb_imageblit,
++ .fb_pan_display = drm_fb_helper_pan_display,
++ .fb_blank = drm_fb_helper_blank,
++ .fb_setcmap = drm_fb_helper_setcmap,
++ .fb_debug_enter = drm_fb_helper_debug_enter,
++ .fb_debug_leave = drm_fb_helper_debug_leave,
++};
++
++
++int amdgpu_align_pitch(struct amdgpu_device *adev, int width, int bpp, bool tiled)
++{
++ int aligned = width;
++ int pitch_mask = 0;
++
++ switch (bpp / 8) {
++ case 1:
++ pitch_mask = 255;
++ break;
++ case 2:
++ pitch_mask = 127;
++ break;
++ case 3:
++ case 4:
++ pitch_mask = 63;
++ break;
++ }
++
++ aligned += pitch_mask;
++ aligned &= ~pitch_mask;
++ return aligned;
++}
++
++static void amdgpufb_destroy_pinned_object(struct drm_gem_object *gobj)
++{
++ struct amdgpu_bo *rbo = gem_to_amdgpu_bo(gobj);
++ int ret;
++
++ ret = amdgpu_bo_reserve(rbo, false);
++ if (likely(ret == 0)) {
++ amdgpu_bo_kunmap(rbo);
++ amdgpu_bo_unpin(rbo);
++ amdgpu_bo_unreserve(rbo);
++ }
++ drm_gem_object_unreference_unlocked(gobj);
++}
++
++static int amdgpufb_create_pinned_object(struct amdgpu_fbdev *rfbdev,
++ struct drm_mode_fb_cmd2 *mode_cmd,
++ struct drm_gem_object **gobj_p)
++{
++ struct amdgpu_device *adev = rfbdev->adev;
++ struct drm_gem_object *gobj = NULL;
++ struct amdgpu_bo *rbo = NULL;
++ bool fb_tiled = false; /* useful for testing */
++ u32 tiling_flags = 0;
++ int ret;
++ int aligned_size, size;
++ int height = mode_cmd->height;
++ u32 bpp, depth;
++
++ drm_fb_get_bpp_depth(mode_cmd->pixel_format, &depth, &bpp);
++
++ /* need to align pitch with crtc limits */
++ mode_cmd->pitches[0] = amdgpu_align_pitch(adev, mode_cmd->width, bpp,
++ fb_tiled) * ((bpp + 1) / 8);
++
++ height = ALIGN(mode_cmd->height, 8);
++ size = mode_cmd->pitches[0] * height;
++ aligned_size = ALIGN(size, PAGE_SIZE);
++ ret = amdgpu_gem_object_create(adev, aligned_size, 0,
++ AMDGPU_GEM_DOMAIN_VRAM,
++ 0, true,
++ &gobj);
++ if (ret) {
++ printk(KERN_ERR "failed to allocate framebuffer (%d)\n",
++ aligned_size);
++ return -ENOMEM;
++ }
++ rbo = gem_to_amdgpu_bo(gobj);
++
++ if (fb_tiled)
++ tiling_flags = AMDGPU_TILING_MACRO;
++
++#ifdef __BIG_ENDIAN
++ switch (bpp) {
++ case 32:
++ tiling_flags |= AMDGPU_TILING_SWAP_32BIT;
++ break;
++ case 16:
++ tiling_flags |= AMDGPU_TILING_SWAP_16BIT;
++ default:
++ break;
++ }
++#endif
++
++ ret = amdgpu_bo_reserve(rbo, false);
++ if (unlikely(ret != 0))
++ goto out_unref;
++
++ if (tiling_flags) {
++ ret = amdgpu_bo_set_tiling_flags(rbo,
++ tiling_flags | AMDGPU_TILING_SURFACE);
++ if (ret)
++ dev_err(adev->dev, "FB failed to set tiling flags\n");
++ }
++
++
++ ret = amdgpu_bo_pin_restricted(rbo, AMDGPU_GEM_DOMAIN_VRAM, 0, NULL);
++ if (ret) {
++ amdgpu_bo_unreserve(rbo);
++ goto out_unref;
++ }
++ ret = amdgpu_bo_kmap(rbo, NULL);
++ amdgpu_bo_unreserve(rbo);
++ if (ret) {
++ goto out_unref;
++ }
++
++ *gobj_p = gobj;
++ return 0;
++out_unref:
++ amdgpufb_destroy_pinned_object(gobj);
++ *gobj_p = NULL;
++ return ret;
++}
++
++static int amdgpufb_create(struct drm_fb_helper *helper,
++ struct drm_fb_helper_surface_size *sizes)
++{
++ struct amdgpu_fbdev *rfbdev = (struct amdgpu_fbdev *)helper;
++ struct amdgpu_device *adev = rfbdev->adev;
++ struct fb_info *info;
++ struct drm_framebuffer *fb = NULL;
++ struct drm_mode_fb_cmd2 mode_cmd;
++ struct drm_gem_object *gobj = NULL;
++ struct amdgpu_bo *rbo = NULL;
++ struct device *device = &adev->pdev->dev;
++ int ret;
++ unsigned long tmp;
++
++ mode_cmd.width = sizes->surface_width;
++ mode_cmd.height = sizes->surface_height;
++
++ if (sizes->surface_bpp == 24)
++ sizes->surface_bpp = 32;
++
++ mode_cmd.pixel_format = drm_mode_legacy_fb_format(sizes->surface_bpp,
++ sizes->surface_depth);
++
++ ret = amdgpufb_create_pinned_object(rfbdev, &mode_cmd, &gobj);
++ if (ret) {
++ DRM_ERROR("failed to create fbcon object %d\n", ret);
++ return ret;
++ }
++
++ rbo = gem_to_amdgpu_bo(gobj);
++
++ /* okay we have an object now allocate the framebuffer */
++ info = framebuffer_alloc(0, device);
++ if (info == NULL) {
++ ret = -ENOMEM;
++ goto out_unref;
++ }
++
++ info->par = rfbdev;
++
++ ret = amdgpu_framebuffer_init(adev->ddev, &rfbdev->rfb, &mode_cmd, gobj);
++ if (ret) {
++ DRM_ERROR("failed to initialize framebuffer %d\n", ret);
++ goto out_unref;
++ }
++
++ fb = &rfbdev->rfb.base;
++
++ /* setup helper */
++ rfbdev->helper.fb = fb;
++ rfbdev->helper.fbdev = info;
++
++ memset_io(rbo->kptr, 0x0, amdgpu_bo_size(rbo));
++
++ strcpy(info->fix.id, "amdgpudrmfb");
++
++ drm_fb_helper_fill_fix(info, fb->pitches[0], fb->depth);
++
++ info->flags = FBINFO_DEFAULT | FBINFO_CAN_FORCE_OUTPUT;
++ info->fbops = &amdgpufb_ops;
++
++ tmp = amdgpu_bo_gpu_offset(rbo) - adev->mc.vram_start;
++ info->fix.smem_start = adev->mc.aper_base + tmp;
++ info->fix.smem_len = amdgpu_bo_size(rbo);
++ info->screen_base = rbo->kptr;
++ info->screen_size = amdgpu_bo_size(rbo);
++
++ drm_fb_helper_fill_var(info, &rfbdev->helper, sizes->fb_width, sizes->fb_height);
++
++ /* setup aperture base/size for vesafb takeover */
++ info->apertures = alloc_apertures(1);
++ if (!info->apertures) {
++ ret = -ENOMEM;
++ goto out_unref;
++ }
++ info->apertures->ranges[0].base = adev->ddev->mode_config.fb_base;
++ info->apertures->ranges[0].size = adev->mc.aper_size;
++
++ /* Use default scratch pixmap (info->pixmap.flags = FB_PIXMAP_SYSTEM) */
++
++ if (info->screen_base == NULL) {
++ ret = -ENOSPC;
++ goto out_unref;
++ }
++
++ ret = fb_alloc_cmap(&info->cmap, 256, 0);
++ if (ret) {
++ ret = -ENOMEM;
++ goto out_unref;
++ }
++
++ DRM_INFO("fb mappable at 0x%lX\n", info->fix.smem_start);
++ DRM_INFO("vram apper at 0x%lX\n", (unsigned long)adev->mc.aper_base);
++ DRM_INFO("size %lu\n", (unsigned long)amdgpu_bo_size(rbo));
++ DRM_INFO("fb depth is %d\n", fb->depth);
++ DRM_INFO(" pitch is %d\n", fb->pitches[0]);
++
++ vga_switcheroo_client_fb_set(adev->ddev->pdev, info);
++ return 0;
++
++out_unref:
++ if (rbo) {
++
++ }
++ if (fb && ret) {
++ drm_gem_object_unreference(gobj);
++ drm_framebuffer_unregister_private(fb);
++ drm_framebuffer_cleanup(fb);
++ kfree(fb);
++ }
++ return ret;
++}
++
++void amdgpu_fb_output_poll_changed(struct amdgpu_device *adev)
++{
++ if (adev->mode_info.rfbdev)
++ drm_fb_helper_hotplug_event(&adev->mode_info.rfbdev->helper);
++}
++
++static int amdgpu_fbdev_destroy(struct drm_device *dev, struct amdgpu_fbdev *rfbdev)
++{
++ struct fb_info *info;
++ struct amdgpu_framebuffer *rfb = &rfbdev->rfb;
++
++ if (rfbdev->helper.fbdev) {
++ info = rfbdev->helper.fbdev;
++
++ unregister_framebuffer(info);
++ if (info->cmap.len)
++ fb_dealloc_cmap(&info->cmap);
++ framebuffer_release(info);
++ }
++
++ if (rfb->obj) {
++ amdgpufb_destroy_pinned_object(rfb->obj);
++ rfb->obj = NULL;
++ }
++ drm_fb_helper_fini(&rfbdev->helper);
++ drm_framebuffer_unregister_private(&rfb->base);
++ drm_framebuffer_cleanup(&rfb->base);
++
++ return 0;
++}
++
++/** Sets the color ramps on behalf of fbcon */
++static void amdgpu_crtc_fb_gamma_set(struct drm_crtc *crtc, u16 red, u16 green,
++ u16 blue, int regno)
++{
++ struct amdgpu_crtc *amdgpu_crtc = to_amdgpu_crtc(crtc);
++
++ amdgpu_crtc->lut_r[regno] = red >> 6;
++ amdgpu_crtc->lut_g[regno] = green >> 6;
++ amdgpu_crtc->lut_b[regno] = blue >> 6;
++}
++
++/** Gets the color ramps on behalf of fbcon */
++static void amdgpu_crtc_fb_gamma_get(struct drm_crtc *crtc, u16 *red, u16 *green,
++ u16 *blue, int regno)
++{
++ struct amdgpu_crtc *amdgpu_crtc = to_amdgpu_crtc(crtc);
++
++ *red = amdgpu_crtc->lut_r[regno] << 6;
++ *green = amdgpu_crtc->lut_g[regno] << 6;
++ *blue = amdgpu_crtc->lut_b[regno] << 6;
++}
++
++static const struct drm_fb_helper_funcs amdgpu_fb_helper_funcs = {
++ .gamma_set = amdgpu_crtc_fb_gamma_set,
++ .gamma_get = amdgpu_crtc_fb_gamma_get,
++ .fb_probe = amdgpufb_create,
++};
++
++int amdgpu_fbdev_init(struct amdgpu_device *adev)
++{
++ struct amdgpu_fbdev *rfbdev;
++ int bpp_sel = 32;
++ int ret;
++
++ /* don't init fbdev on hw without DCE */
++ if (!adev->mode_info.mode_config_initialized)
++ return 0;
++
++ /* select 8 bpp console on low vram cards */
++ if (adev->mc.real_vram_size <= (32*1024*1024))
++ bpp_sel = 8;
++
++ rfbdev = kzalloc(sizeof(struct amdgpu_fbdev), GFP_KERNEL);
++ if (!rfbdev)
++ return -ENOMEM;
++
++ rfbdev->adev = adev;
++ adev->mode_info.rfbdev = rfbdev;
++
++ drm_fb_helper_prepare(adev->ddev, &rfbdev->helper,
++ &amdgpu_fb_helper_funcs);
++
++ ret = drm_fb_helper_init(adev->ddev, &rfbdev->helper,
++ adev->mode_info.num_crtc,
++ AMDGPUFB_CONN_LIMIT);
++ if (ret) {
++ kfree(rfbdev);
++ return ret;
++ }
++
++ drm_fb_helper_single_add_all_connectors(&rfbdev->helper);
++
++ /* disable all the possible outputs/crtcs before entering KMS mode */
++ drm_helper_disable_unused_functions(adev->ddev);
++
++ drm_fb_helper_initial_config(&rfbdev->helper, bpp_sel);
++ return 0;
++}
++
++void amdgpu_fbdev_fini(struct amdgpu_device *adev)
++{
++ if (!adev->mode_info.rfbdev)
++ return;
++
++ amdgpu_fbdev_destroy(adev->ddev, adev->mode_info.rfbdev);
++ kfree(adev->mode_info.rfbdev);
++ adev->mode_info.rfbdev = NULL;
++}
++
++void amdgpu_fbdev_set_suspend(struct amdgpu_device *adev, int state)
++{
++ if (adev->mode_info.rfbdev)
++ fb_set_suspend(adev->mode_info.rfbdev->helper.fbdev, state);
++}
++
++int amdgpu_fbdev_total_size(struct amdgpu_device *adev)
++{
++ struct amdgpu_bo *robj;
++ int size = 0;
++
++ if (!adev->mode_info.rfbdev)
++ return 0;
++
++ robj = gem_to_amdgpu_bo(adev->mode_info.rfbdev->rfb.obj);
++ size += amdgpu_bo_size(robj);
++ return size;
++}
++
++bool amdgpu_fbdev_robj_is_fb(struct amdgpu_device *adev, struct amdgpu_bo *robj)
++{
++ if (!adev->mode_info.rfbdev)
++ return false;
++ if (robj == gem_to_amdgpu_bo(adev->mode_info.rfbdev->rfb.obj))
++ return true;
++ return false;
++}
+diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_fence.c b/drivers/gpu/drm/amd/amdgpu/amdgpu_fence.c
+new file mode 100644
+index 0000000..fc63855e
+--- /dev/null
++++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_fence.c
+@@ -0,0 +1,1139 @@
++/*
++ * Copyright 2009 Jerome Glisse.
++ * All Rights Reserved.
++ *
++ * Permission is hereby granted, free of charge, to any person obtaining a
++ * copy of this software and associated documentation files (the
++ * "Software"), to deal in the Software without restriction, including
++ * without limitation the rights to use, copy, modify, merge, publish,
++ * distribute, sub license, and/or sell copies of the Software, and to
++ * permit persons to whom the Software is furnished to do so, subject to
++ * the following conditions:
++ *
++ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
++ * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
++ * FITNESS FOR A PARTICULAR PURPOSE AND NON-INFRINGEMENT. IN NO EVENT SHALL
++ * THE COPYRIGHT HOLDERS, AUTHORS AND/OR ITS SUPPLIERS BE LIABLE FOR ANY CLAIM,
++ * DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR
++ * OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE
++ * USE OR OTHER DEALINGS IN THE SOFTWARE.
++ *
++ * The above copyright notice and this permission notice (including the
++ * next paragraph) shall be included in all copies or substantial portions
++ * of the Software.
++ *
++ */
++/*
++ * Authors:
++ * Jerome Glisse <glisse@freedesktop.org>
++ * Dave Airlie
++ */
++#include <linux/seq_file.h>
++#include <linux/atomic.h>
++#include <linux/wait.h>
++#include <linux/kref.h>
++#include <linux/slab.h>
++#include <linux/firmware.h>
++#include <drm/drmP.h>
++#include "amdgpu.h"
++#include "amdgpu_trace.h"
++
++/*
++ * Fences
++ * Fences mark an event in the GPUs pipeline and are used
++ * for GPU/CPU synchronization. When the fence is written,
++ * it is expected that all buffers associated with that fence
++ * are no longer in use by the associated ring on the GPU and
++ * that the the relevant GPU caches have been flushed.
++ */
++
++/**
++ * amdgpu_fence_write - write a fence value
++ *
++ * @ring: ring the fence is associated with
++ * @seq: sequence number to write
++ *
++ * Writes a fence value to memory (all asics).
++ */
++static void amdgpu_fence_write(struct amdgpu_ring *ring, u32 seq)
++{
++ struct amdgpu_fence_driver *drv = &ring->fence_drv;
++
++ if (drv->cpu_addr)
++ *drv->cpu_addr = cpu_to_le32(seq);
++}
++
++/**
++ * amdgpu_fence_read - read a fence value
++ *
++ * @ring: ring the fence is associated with
++ *
++ * Reads a fence value from memory (all asics).
++ * Returns the value of the fence read from memory.
++ */
++static u32 amdgpu_fence_read(struct amdgpu_ring *ring)
++{
++ struct amdgpu_fence_driver *drv = &ring->fence_drv;
++ u32 seq = 0;
++
++ if (drv->cpu_addr)
++ seq = le32_to_cpu(*drv->cpu_addr);
++ else
++ seq = lower_32_bits(atomic64_read(&drv->last_seq));
++
++ return seq;
++}
++
++/**
++ * amdgpu_fence_schedule_check - schedule lockup check
++ *
++ * @ring: pointer to struct amdgpu_ring
++ *
++ * Queues a delayed work item to check for lockups.
++ */
++static void amdgpu_fence_schedule_check(struct amdgpu_ring *ring)
++{
++ /*
++ * Do not reset the timer here with mod_delayed_work,
++ * this can livelock in an interaction with TTM delayed destroy.
++ */
++ queue_delayed_work(system_power_efficient_wq,
++ &ring->fence_drv.lockup_work,
++ AMDGPU_FENCE_JIFFIES_TIMEOUT);
++}
++
++/**
++ * amdgpu_fence_emit - emit a fence on the requested ring
++ *
++ * @ring: ring the fence is associated with
++ * @owner: creator of the fence
++ * @fence: amdgpu fence object
++ *
++ * Emits a fence command on the requested ring (all asics).
++ * Returns 0 on success, -ENOMEM on failure.
++ */
++int amdgpu_fence_emit(struct amdgpu_ring *ring, void *owner,
++ struct amdgpu_fence **fence)
++{
++ struct amdgpu_device *adev = ring->adev;
++
++ /* we are protected by the ring emission mutex */
++ *fence = kmalloc(sizeof(struct amdgpu_fence), GFP_KERNEL);
++ if ((*fence) == NULL) {
++ return -ENOMEM;
++ }
++ (*fence)->seq = ++ring->fence_drv.sync_seq[ring->idx];
++ (*fence)->ring = ring;
++ (*fence)->owner = owner;
++ fence_init(&(*fence)->base, &amdgpu_fence_ops,
++ &adev->fence_queue.lock, adev->fence_context + ring->idx,
++ (*fence)->seq);
++ amdgpu_ring_emit_fence(ring, ring->fence_drv.gpu_addr, (*fence)->seq, false);
++ trace_amdgpu_fence_emit(ring->adev->ddev, ring->idx, (*fence)->seq);
++ return 0;
++}
++
++/**
++ * amdgpu_fence_check_signaled - callback from fence_queue
++ *
++ * this function is called with fence_queue lock held, which is also used
++ * for the fence locking itself, so unlocked variants are used for
++ * fence_signal, and remove_wait_queue.
++ */
++static int amdgpu_fence_check_signaled(wait_queue_t *wait, unsigned mode, int flags, void *key)
++{
++ struct amdgpu_fence *fence;
++ struct amdgpu_device *adev;
++ u64 seq;
++ int ret;
++
++ fence = container_of(wait, struct amdgpu_fence, fence_wake);
++ adev = fence->ring->adev;
++
++ /*
++ * We cannot use amdgpu_fence_process here because we're already
++ * in the waitqueue, in a call from wake_up_all.
++ */
++ seq = atomic64_read(&fence->ring->fence_drv.last_seq);
++ if (seq >= fence->seq) {
++ ret = fence_signal_locked(&fence->base);
++ if (!ret)
++ FENCE_TRACE(&fence->base, "signaled from irq context\n");
++ else
++ FENCE_TRACE(&fence->base, "was already signaled\n");
++
++ amdgpu_irq_put(adev, fence->ring->fence_drv.irq_src,
++ fence->ring->fence_drv.irq_type);
++ __remove_wait_queue(&adev->fence_queue, &fence->fence_wake);
++ fence_put(&fence->base);
++ } else
++ FENCE_TRACE(&fence->base, "pending\n");
++ return 0;
++}
++
++/**
++ * amdgpu_fence_activity - check for fence activity
++ *
++ * @ring: pointer to struct amdgpu_ring
++ *
++ * Checks the current fence value and calculates the last
++ * signalled fence value. Returns true if activity occured
++ * on the ring, and the fence_queue should be waken up.
++ */
++static bool amdgpu_fence_activity(struct amdgpu_ring *ring)
++{
++ uint64_t seq, last_seq, last_emitted;
++ unsigned count_loop = 0;
++ bool wake = false;
++
++ /* Note there is a scenario here for an infinite loop but it's
++ * very unlikely to happen. For it to happen, the current polling
++ * process need to be interrupted by another process and another
++ * process needs to update the last_seq btw the atomic read and
++ * xchg of the current process.
++ *
++ * More over for this to go in infinite loop there need to be
++ * continuously new fence signaled ie radeon_fence_read needs
++ * to return a different value each time for both the currently
++ * polling process and the other process that xchg the last_seq
++ * btw atomic read and xchg of the current process. And the
++ * value the other process set as last seq must be higher than
++ * the seq value we just read. Which means that current process
++ * need to be interrupted after radeon_fence_read and before
++ * atomic xchg.
++ *
++ * To be even more safe we count the number of time we loop and
++ * we bail after 10 loop just accepting the fact that we might
++ * have temporarly set the last_seq not to the true real last
++ * seq but to an older one.
++ */
++ last_seq = atomic64_read(&ring->fence_drv.last_seq);
++ do {
++ last_emitted = ring->fence_drv.sync_seq[ring->idx];
++ seq = amdgpu_fence_read(ring);
++ seq |= last_seq & 0xffffffff00000000LL;
++ if (seq < last_seq) {
++ seq &= 0xffffffff;
++ seq |= last_emitted & 0xffffffff00000000LL;
++ }
++
++ if (seq <= last_seq || seq > last_emitted) {
++ break;
++ }
++ /* If we loop over we don't want to return without
++ * checking if a fence is signaled as it means that the
++ * seq we just read is different from the previous on.
++ */
++ wake = true;
++ last_seq = seq;
++ if ((count_loop++) > 10) {
++ /* We looped over too many time leave with the
++ * fact that we might have set an older fence
++ * seq then the current real last seq as signaled
++ * by the hw.
++ */
++ break;
++ }
++ } while (atomic64_xchg(&ring->fence_drv.last_seq, seq) > seq);
++
++ if (seq < last_emitted)
++ amdgpu_fence_schedule_check(ring);
++
++ return wake;
++}
++
++/**
++ * amdgpu_fence_check_lockup - check for hardware lockup
++ *
++ * @work: delayed work item
++ *
++ * Checks for fence activity and if there is none probe
++ * the hardware if a lockup occured.
++ */
++static void amdgpu_fence_check_lockup(struct work_struct *work)
++{
++ struct amdgpu_fence_driver *fence_drv;
++ struct amdgpu_ring *ring;
++
++ fence_drv = container_of(work, struct amdgpu_fence_driver,
++ lockup_work.work);
++ ring = fence_drv->ring;
++
++ if (!down_read_trylock(&ring->adev->exclusive_lock)) {
++ /* just reschedule the check if a reset is going on */
++ amdgpu_fence_schedule_check(ring);
++ return;
++ }
++
++ if (fence_drv->delayed_irq && ring->adev->ddev->irq_enabled) {
++ fence_drv->delayed_irq = false;
++ amdgpu_irq_update(ring->adev, fence_drv->irq_src,
++ fence_drv->irq_type);
++ }
++
++ if (amdgpu_fence_activity(ring))
++ wake_up_all(&ring->adev->fence_queue);
++ else if (amdgpu_ring_is_lockup(ring)) {
++ /* good news we believe it's a lockup */
++ dev_warn(ring->adev->dev, "GPU lockup (current fence id "
++ "0x%016llx last fence id 0x%016llx on ring %d)\n",
++ (uint64_t)atomic64_read(&fence_drv->last_seq),
++ fence_drv->sync_seq[ring->idx], ring->idx);
++
++ /* remember that we need an reset */
++ ring->adev->needs_reset = true;
++ wake_up_all(&ring->adev->fence_queue);
++ }
++ up_read(&ring->adev->exclusive_lock);
++}
++
++/**
++ * amdgpu_fence_process - process a fence
++ *
++ * @adev: amdgpu_device pointer
++ * @ring: ring index the fence is associated with
++ *
++ * Checks the current fence value and wakes the fence queue
++ * if the sequence number has increased (all asics).
++ */
++void amdgpu_fence_process(struct amdgpu_ring *ring)
++{
++ uint64_t seq, last_seq, last_emitted;
++ unsigned count_loop = 0;
++ bool wake = false;
++
++ /* Note there is a scenario here for an infinite loop but it's
++ * very unlikely to happen. For it to happen, the current polling
++ * process need to be interrupted by another process and another
++ * process needs to update the last_seq btw the atomic read and
++ * xchg of the current process.
++ *
++ * More over for this to go in infinite loop there need to be
++ * continuously new fence signaled ie amdgpu_fence_read needs
++ * to return a different value each time for both the currently
++ * polling process and the other process that xchg the last_seq
++ * btw atomic read and xchg of the current process. And the
++ * value the other process set as last seq must be higher than
++ * the seq value we just read. Which means that current process
++ * need to be interrupted after amdgpu_fence_read and before
++ * atomic xchg.
++ *
++ * To be even more safe we count the number of time we loop and
++ * we bail after 10 loop just accepting the fact that we might
++ * have temporarly set the last_seq not to the true real last
++ * seq but to an older one.
++ */
++ last_seq = atomic64_read(&ring->fence_drv.last_seq);
++ do {
++ last_emitted = ring->fence_drv.sync_seq[ring->idx];
++ seq = amdgpu_fence_read(ring);
++ seq |= last_seq & 0xffffffff00000000LL;
++ if (seq < last_seq) {
++ seq &= 0xffffffff;
++ seq |= last_emitted & 0xffffffff00000000LL;
++ }
++
++ if (seq <= last_seq || seq > last_emitted) {
++ break;
++ }
++ /* If we loop over we don't want to return without
++ * checking if a fence is signaled as it means that the
++ * seq we just read is different from the previous on.
++ */
++ wake = true;
++ last_seq = seq;
++ if ((count_loop++) > 10) {
++ /* We looped over too many time leave with the
++ * fact that we might have set an older fence
++ * seq then the current real last seq as signaled
++ * by the hw.
++ */
++ break;
++ }
++ } while (atomic64_xchg(&ring->fence_drv.last_seq, seq) > seq);
++
++ if (wake)
++ wake_up_all(&ring->adev->fence_queue);
++}
++
++/**
++ * amdgpu_fence_seq_signaled - check if a fence sequence number has signaled
++ *
++ * @ring: ring the fence is associated with
++ * @seq: sequence number
++ *
++ * Check if the last signaled fence sequnce number is >= the requested
++ * sequence number (all asics).
++ * Returns true if the fence has signaled (current fence value
++ * is >= requested value) or false if it has not (current fence
++ * value is < the requested value. Helper function for
++ * amdgpu_fence_signaled().
++ */
++static bool amdgpu_fence_seq_signaled(struct amdgpu_ring *ring, u64 seq)
++{
++ if (atomic64_read(&ring->fence_drv.last_seq) >= seq)
++ return true;
++
++ /* poll new last sequence at least once */
++ amdgpu_fence_process(ring);
++ if (atomic64_read(&ring->fence_drv.last_seq) >= seq)
++ return true;
++
++ return false;
++}
++
++static bool amdgpu_fence_is_signaled(struct fence *f)
++{
++ struct amdgpu_fence *fence = to_amdgpu_fence(f);
++ struct amdgpu_ring *ring = fence->ring;
++ struct amdgpu_device *adev = ring->adev;
++
++ if (atomic64_read(&ring->fence_drv.last_seq) >= fence->seq)
++ return true;
++
++ if (down_read_trylock(&adev->exclusive_lock)) {
++ amdgpu_fence_process(ring);
++ up_read(&adev->exclusive_lock);
++
++ if (atomic64_read(&ring->fence_drv.last_seq) >= fence->seq)
++ return true;
++ }
++ return false;
++}
++
++/**
++ * amdgpu_fence_enable_signaling - enable signalling on fence
++ * @fence: fence
++ *
++ * This function is called with fence_queue lock held, and adds a callback
++ * to fence_queue that checks if this fence is signaled, and if so it
++ * signals the fence and removes itself.
++ */
++static bool amdgpu_fence_enable_signaling(struct fence *f)
++{
++ struct amdgpu_fence *fence = to_amdgpu_fence(f);
++ struct amdgpu_ring *ring = fence->ring;
++ struct amdgpu_device *adev = ring->adev;
++
++ if (atomic64_read(&ring->fence_drv.last_seq) >= fence->seq)
++ return false;
++
++ if (down_read_trylock(&adev->exclusive_lock)) {
++ amdgpu_irq_get(adev, ring->fence_drv.irq_src,
++ ring->fence_drv.irq_type);
++ if (amdgpu_fence_activity(ring))
++ wake_up_all_locked(&adev->fence_queue);
++
++ /* did fence get signaled after we enabled the sw irq? */
++ if (atomic64_read(&ring->fence_drv.last_seq) >= fence->seq) {
++ amdgpu_irq_put(adev, ring->fence_drv.irq_src,
++ ring->fence_drv.irq_type);
++ up_read(&adev->exclusive_lock);
++ return false;
++ }
++
++ up_read(&adev->exclusive_lock);
++ } else {
++ /* we're probably in a lockup, lets not fiddle too much */
++ if (amdgpu_irq_get_delayed(adev, ring->fence_drv.irq_src,
++ ring->fence_drv.irq_type))
++ ring->fence_drv.delayed_irq = true;
++ amdgpu_fence_schedule_check(ring);
++ }
++
++ fence->fence_wake.flags = 0;
++ fence->fence_wake.private = NULL;
++ fence->fence_wake.func = amdgpu_fence_check_signaled;
++ __add_wait_queue(&adev->fence_queue, &fence->fence_wake);
++ fence_get(f);
++ FENCE_TRACE(&fence->base, "armed on ring %i!\n", ring->idx);
++ return true;
++}
++
++/**
++ * amdgpu_fence_signaled - check if a fence has signaled
++ *
++ * @fence: amdgpu fence object
++ *
++ * Check if the requested fence has signaled (all asics).
++ * Returns true if the fence has signaled or false if it has not.
++ */
++bool amdgpu_fence_signaled(struct amdgpu_fence *fence)
++{
++ if (!fence)
++ return true;
++
++ if (fence->seq == AMDGPU_FENCE_SIGNALED_SEQ)
++ return true;
++
++ if (amdgpu_fence_seq_signaled(fence->ring, fence->seq)) {
++ fence->seq = AMDGPU_FENCE_SIGNALED_SEQ;
++ if (!fence_signal(&fence->base))
++ FENCE_TRACE(&fence->base, "signaled from amdgpu_fence_signaled\n");
++ return true;
++ }
++
++ return false;
++}
++
++/**
++ * amdgpu_fence_any_seq_signaled - check if any sequence number is signaled
++ *
++ * @adev: amdgpu device pointer
++ * @seq: sequence numbers
++ *
++ * Check if the last signaled fence sequnce number is >= the requested
++ * sequence number (all asics).
++ * Returns true if any has signaled (current value is >= requested value)
++ * or false if it has not. Helper function for amdgpu_fence_wait_seq.
++ */
++static bool amdgpu_fence_any_seq_signaled(struct amdgpu_device *adev, u64 *seq)
++{
++ unsigned i;
++
++ for (i = 0; i < AMDGPU_MAX_RINGS; ++i) {
++ if (!adev->rings[i] || !seq[i])
++ continue;
++
++ if (amdgpu_fence_seq_signaled(adev->rings[i], seq[i]))
++ return true;
++ }
++
++ return false;
++}
++
++/**
++ * amdgpu_fence_wait_seq_timeout - wait for a specific sequence numbers
++ *
++ * @adev: amdgpu device pointer
++ * @target_seq: sequence number(s) we want to wait for
++ * @intr: use interruptable sleep
++ * @timeout: maximum time to wait, or MAX_SCHEDULE_TIMEOUT for infinite wait
++ *
++ * Wait for the requested sequence number(s) to be written by any ring
++ * (all asics). Sequnce number array is indexed by ring id.
++ * @intr selects whether to use interruptable (true) or non-interruptable
++ * (false) sleep when waiting for the sequence number. Helper function
++ * for amdgpu_fence_wait_*().
++ * Returns remaining time if the sequence number has passed, 0 when
++ * the wait timeout, or an error for all other cases.
++ * -EDEADLK is returned when a GPU lockup has been detected.
++ */
++long amdgpu_fence_wait_seq_timeout(struct amdgpu_device *adev, u64 *target_seq,
++ bool intr, long timeout)
++{
++ uint64_t last_seq[AMDGPU_MAX_RINGS];
++ bool signaled;
++ int i, r;
++
++ while (!amdgpu_fence_any_seq_signaled(adev, target_seq)) {
++
++ /* Save current sequence values, used to check for GPU lockups */
++ for (i = 0; i < AMDGPU_MAX_RINGS; ++i) {
++ struct amdgpu_ring *ring = adev->rings[i];
++
++ if (!ring || !target_seq[i])
++ continue;
++
++ last_seq[i] = atomic64_read(&ring->fence_drv.last_seq);
++ trace_amdgpu_fence_wait_begin(adev->ddev, i, target_seq[i]);
++ amdgpu_irq_get(adev, ring->fence_drv.irq_src,
++ ring->fence_drv.irq_type);
++ }
++
++ if (intr) {
++ r = wait_event_interruptible_timeout(adev->fence_queue, (
++ (signaled = amdgpu_fence_any_seq_signaled(adev, target_seq))
++ || adev->needs_reset), AMDGPU_FENCE_JIFFIES_TIMEOUT);
++ } else {
++ r = wait_event_timeout(adev->fence_queue, (
++ (signaled = amdgpu_fence_any_seq_signaled(adev, target_seq))
++ || adev->needs_reset), AMDGPU_FENCE_JIFFIES_TIMEOUT);
++ }
++
++ for (i = 0; i < AMDGPU_MAX_RINGS; ++i) {
++ struct amdgpu_ring *ring = adev->rings[i];
++
++ if (!ring || !target_seq[i])
++ continue;
++
++ amdgpu_irq_put(adev, ring->fence_drv.irq_src,
++ ring->fence_drv.irq_type);
++ trace_amdgpu_fence_wait_end(adev->ddev, i, target_seq[i]);
++ }
++
++ if (unlikely(r < 0))
++ return r;
++
++ if (unlikely(!signaled)) {
++
++ if (adev->needs_reset)
++ return -EDEADLK;
++
++ /* we were interrupted for some reason and fence
++ * isn't signaled yet, resume waiting */
++ if (r)
++ continue;
++
++ for (i = 0; i < AMDGPU_MAX_RINGS; ++i) {
++ struct amdgpu_ring *ring = adev->rings[i];
++
++ if (!ring || !target_seq[i])
++ continue;
++
++ if (last_seq[i] != atomic64_read(&ring->fence_drv.last_seq))
++ break;
++ }
++
++ if (i != AMDGPU_MAX_RINGS)
++ continue;
++
++ for (i = 0; i < AMDGPU_MAX_RINGS; ++i) {
++ if (!adev->rings[i] || !target_seq[i])
++ continue;
++
++ if (amdgpu_ring_is_lockup(adev->rings[i]))
++ break;
++ }
++
++ if (i < AMDGPU_MAX_RINGS) {
++ /* good news we believe it's a lockup */
++ dev_warn(adev->dev, "GPU lockup (waiting for "
++ "0x%016llx last fence id 0x%016llx on"
++ " ring %d)\n",
++ target_seq[i], last_seq[i], i);
++
++ /* remember that we need an reset */
++ adev->needs_reset = true;
++ wake_up_all(&adev->fence_queue);
++ return -EDEADLK;
++ }
++
++ if (timeout < MAX_SCHEDULE_TIMEOUT) {
++ timeout -= AMDGPU_FENCE_JIFFIES_TIMEOUT;
++ if (timeout <= 0) {
++ return 0;
++ }
++ }
++ }
++ }
++ return timeout;
++}
++
++/**
++ * amdgpu_fence_wait - wait for a fence to signal
++ *
++ * @fence: amdgpu fence object
++ * @intr: use interruptable sleep
++ *
++ * Wait for the requested fence to signal (all asics).
++ * @intr selects whether to use interruptable (true) or non-interruptable
++ * (false) sleep when waiting for the fence.
++ * Returns 0 if the fence has passed, error for all other cases.
++ */
++int amdgpu_fence_wait(struct amdgpu_fence *fence, bool intr)
++{
++ uint64_t seq[AMDGPU_MAX_RINGS] = {};
++ long r;
++
++ seq[fence->ring->idx] = fence->seq;
++ if (seq[fence->ring->idx] == AMDGPU_FENCE_SIGNALED_SEQ)
++ return 0;
++
++ r = amdgpu_fence_wait_seq_timeout(fence->ring->adev, seq, intr, MAX_SCHEDULE_TIMEOUT);
++ if (r < 0) {
++ return r;
++ }
++
++ fence->seq = AMDGPU_FENCE_SIGNALED_SEQ;
++ r = fence_signal(&fence->base);
++ if (!r)
++ FENCE_TRACE(&fence->base, "signaled from fence_wait\n");
++ return 0;
++}
++
++/**
++ * amdgpu_fence_wait_any - wait for a fence to signal on any ring
++ *
++ * @adev: amdgpu device pointer
++ * @fences: amdgpu fence object(s)
++ * @intr: use interruptable sleep
++ *
++ * Wait for any requested fence to signal (all asics). Fence
++ * array is indexed by ring id. @intr selects whether to use
++ * interruptable (true) or non-interruptable (false) sleep when
++ * waiting for the fences. Used by the suballocator.
++ * Returns 0 if any fence has passed, error for all other cases.
++ */
++int amdgpu_fence_wait_any(struct amdgpu_device *adev,
++ struct amdgpu_fence **fences,
++ bool intr)
++{
++ uint64_t seq[AMDGPU_MAX_RINGS];
++ unsigned i, num_rings = 0;
++ long r;
++
++ for (i = 0; i < AMDGPU_MAX_RINGS; ++i) {
++ seq[i] = 0;
++
++ if (!fences[i]) {
++ continue;
++ }
++
++ seq[i] = fences[i]->seq;
++ ++num_rings;
++
++ /* test if something was allready signaled */
++ if (seq[i] == AMDGPU_FENCE_SIGNALED_SEQ)
++ return 0;
++ }
++
++ /* nothing to wait for ? */
++ if (num_rings == 0)
++ return -ENOENT;
++
++ r = amdgpu_fence_wait_seq_timeout(adev, seq, intr, MAX_SCHEDULE_TIMEOUT);
++ if (r < 0) {
++ return r;
++ }
++ return 0;
++}
++
++/**
++ * amdgpu_fence_wait_next - wait for the next fence to signal
++ *
++ * @adev: amdgpu device pointer
++ * @ring: ring index the fence is associated with
++ *
++ * Wait for the next fence on the requested ring to signal (all asics).
++ * Returns 0 if the next fence has passed, error for all other cases.
++ * Caller must hold ring lock.
++ */
++int amdgpu_fence_wait_next(struct amdgpu_ring *ring)
++{
++ uint64_t seq[AMDGPU_MAX_RINGS] = {};
++ long r;
++
++ seq[ring->idx] = atomic64_read(&ring->fence_drv.last_seq) + 1ULL;
++ if (seq[ring->idx] >= ring->fence_drv.sync_seq[ring->idx]) {
++ /* nothing to wait for, last_seq is
++ already the last emited fence */
++ return -ENOENT;
++ }
++ r = amdgpu_fence_wait_seq_timeout(ring->adev, seq, false, MAX_SCHEDULE_TIMEOUT);
++ if (r < 0)
++ return r;
++ return 0;
++}
++
++/**
++ * amdgpu_fence_wait_empty - wait for all fences to signal
++ *
++ * @adev: amdgpu device pointer
++ * @ring: ring index the fence is associated with
++ *
++ * Wait for all fences on the requested ring to signal (all asics).
++ * Returns 0 if the fences have passed, error for all other cases.
++ * Caller must hold ring lock.
++ */
++int amdgpu_fence_wait_empty(struct amdgpu_ring *ring)
++{
++ struct amdgpu_device *adev = ring->adev;
++ uint64_t seq[AMDGPU_MAX_RINGS] = {};
++ long r;
++
++ seq[ring->idx] = ring->fence_drv.sync_seq[ring->idx];
++ if (!seq[ring->idx])
++ return 0;
++
++ r = amdgpu_fence_wait_seq_timeout(adev, seq, false, MAX_SCHEDULE_TIMEOUT);
++ if (r < 0) {
++ if (r == -EDEADLK)
++ return -EDEADLK;
++
++ dev_err(adev->dev, "error waiting for ring[%d] to become idle (%ld)\n",
++ ring->idx, r);
++ }
++ return 0;
++}
++
++/**
++ * amdgpu_fence_ref - take a ref on a fence
++ *
++ * @fence: amdgpu fence object
++ *
++ * Take a reference on a fence (all asics).
++ * Returns the fence.
++ */
++struct amdgpu_fence *amdgpu_fence_ref(struct amdgpu_fence *fence)
++{
++ fence_get(&fence->base);
++ return fence;
++}
++
++/**
++ * amdgpu_fence_unref - remove a ref on a fence
++ *
++ * @fence: amdgpu fence object
++ *
++ * Remove a reference on a fence (all asics).
++ */
++void amdgpu_fence_unref(struct amdgpu_fence **fence)
++{
++ struct amdgpu_fence *tmp = *fence;
++
++ *fence = NULL;
++ if (tmp)
++ fence_put(&tmp->base);
++}
++
++/**
++ * amdgpu_fence_count_emitted - get the count of emitted fences
++ *
++ * @ring: ring the fence is associated with
++ *
++ * Get the number of fences emitted on the requested ring (all asics).
++ * Returns the number of emitted fences on the ring. Used by the
++ * dynpm code to ring track activity.
++ */
++unsigned amdgpu_fence_count_emitted(struct amdgpu_ring *ring)
++{
++ uint64_t emitted;
++
++ /* We are not protected by ring lock when reading the last sequence
++ * but it's ok to report slightly wrong fence count here.
++ */
++ amdgpu_fence_process(ring);
++ emitted = ring->fence_drv.sync_seq[ring->idx]
++ - atomic64_read(&ring->fence_drv.last_seq);
++ /* to avoid 32bits warp around */
++ if (emitted > 0x10000000)
++ emitted = 0x10000000;
++
++ return (unsigned)emitted;
++}
++
++/**
++ * amdgpu_fence_need_sync - do we need a semaphore
++ *
++ * @fence: amdgpu fence object
++ * @dst_ring: which ring to check against
++ *
++ * Check if the fence needs to be synced against another ring
++ * (all asics). If so, we need to emit a semaphore.
++ * Returns true if we need to sync with another ring, false if
++ * not.
++ */
++bool amdgpu_fence_need_sync(struct amdgpu_fence *fence,
++ struct amdgpu_ring *dst_ring)
++{
++ struct amdgpu_fence_driver *fdrv;
++
++ if (!fence)
++ return false;
++
++ if (fence->ring == dst_ring)
++ return false;
++
++ /* we are protected by the ring mutex */
++ fdrv = &dst_ring->fence_drv;
++ if (fence->seq <= fdrv->sync_seq[fence->ring->idx])
++ return false;
++
++ return true;
++}
++
++/**
++ * amdgpu_fence_note_sync - record the sync point
++ *
++ * @fence: amdgpu fence object
++ * @dst_ring: which ring to check against
++ *
++ * Note the sequence number at which point the fence will
++ * be synced with the requested ring (all asics).
++ */
++void amdgpu_fence_note_sync(struct amdgpu_fence *fence,
++ struct amdgpu_ring *dst_ring)
++{
++ struct amdgpu_fence_driver *dst, *src;
++ unsigned i;
++
++ if (!fence)
++ return;
++
++ if (fence->ring == dst_ring)
++ return;
++
++ /* we are protected by the ring mutex */
++ src = &fence->ring->fence_drv;
++ dst = &dst_ring->fence_drv;
++ for (i = 0; i < AMDGPU_MAX_RINGS; ++i) {
++ if (i == dst_ring->idx)
++ continue;
++
++ dst->sync_seq[i] = max(dst->sync_seq[i], src->sync_seq[i]);
++ }
++}
++
++/**
++ * amdgpu_fence_driver_start_ring - make the fence driver
++ * ready for use on the requested ring.
++ *
++ * @ring: ring to start the fence driver on
++ * @irq_src: interrupt source to use for this ring
++ * @irq_type: interrupt type to use for this ring
++ *
++ * Make the fence driver ready for processing (all asics).
++ * Not all asics have all rings, so each asic will only
++ * start the fence driver on the rings it has.
++ * Returns 0 for success, errors for failure.
++ */
++int amdgpu_fence_driver_start_ring(struct amdgpu_ring *ring,
++ struct amdgpu_irq_src *irq_src,
++ unsigned irq_type)
++{
++ struct amdgpu_device *adev = ring->adev;
++ uint64_t index;
++
++ if (ring != &adev->uvd.ring) {
++ ring->fence_drv.cpu_addr = &adev->wb.wb[ring->fence_offs];
++ ring->fence_drv.gpu_addr = adev->wb.gpu_addr + (ring->fence_offs * 4);
++ } else {
++ /* put fence directly behind firmware */
++ index = ALIGN(adev->uvd.fw->size, 8);
++ ring->fence_drv.cpu_addr = adev->uvd.cpu_addr + index;
++ ring->fence_drv.gpu_addr = adev->uvd.gpu_addr + index;
++ }
++ amdgpu_fence_write(ring, atomic64_read(&ring->fence_drv.last_seq));
++ ring->fence_drv.initialized = true;
++ ring->fence_drv.irq_src = irq_src;
++ ring->fence_drv.irq_type = irq_type;
++ dev_info(adev->dev, "fence driver on ring %d use gpu addr 0x%016llx, "
++ "cpu addr 0x%p\n", ring->idx,
++ ring->fence_drv.gpu_addr, ring->fence_drv.cpu_addr);
++ return 0;
++}
++
++/**
++ * amdgpu_fence_driver_init_ring - init the fence driver
++ * for the requested ring.
++ *
++ * @ring: ring to init the fence driver on
++ *
++ * Init the fence driver for the requested ring (all asics).
++ * Helper function for amdgpu_fence_driver_init().
++ */
++void amdgpu_fence_driver_init_ring(struct amdgpu_ring *ring)
++{
++ int i;
++
++ ring->fence_drv.cpu_addr = NULL;
++ ring->fence_drv.gpu_addr = 0;
++ for (i = 0; i < AMDGPU_MAX_RINGS; ++i)
++ ring->fence_drv.sync_seq[i] = 0;
++
++ atomic64_set(&ring->fence_drv.last_seq, 0);
++ ring->fence_drv.initialized = false;
++
++ INIT_DELAYED_WORK(&ring->fence_drv.lockup_work,
++ amdgpu_fence_check_lockup);
++ ring->fence_drv.ring = ring;
++}
++
++/**
++ * amdgpu_fence_driver_init - init the fence driver
++ * for all possible rings.
++ *
++ * @adev: amdgpu device pointer
++ *
++ * Init the fence driver for all possible rings (all asics).
++ * Not all asics have all rings, so each asic will only
++ * start the fence driver on the rings it has using
++ * amdgpu_fence_driver_start_ring().
++ * Returns 0 for success.
++ */
++int amdgpu_fence_driver_init(struct amdgpu_device *adev)
++{
++ init_waitqueue_head(&adev->fence_queue);
++ if (amdgpu_debugfs_fence_init(adev))
++ dev_err(adev->dev, "fence debugfs file creation failed\n");
++
++ return 0;
++}
++
++/**
++ * amdgpu_fence_driver_fini - tear down the fence driver
++ * for all possible rings.
++ *
++ * @adev: amdgpu device pointer
++ *
++ * Tear down the fence driver for all possible rings (all asics).
++ */
++void amdgpu_fence_driver_fini(struct amdgpu_device *adev)
++{
++ int i, r;
++
++ mutex_lock(&adev->ring_lock);
++ for (i = 0; i < AMDGPU_MAX_RINGS; i++) {
++ struct amdgpu_ring *ring = adev->rings[i];
++ if (!ring || !ring->fence_drv.initialized)
++ continue;
++ r = amdgpu_fence_wait_empty(ring);
++ if (r) {
++ /* no need to trigger GPU reset as we are unloading */
++ amdgpu_fence_driver_force_completion(adev);
++ }
++ wake_up_all(&adev->fence_queue);
++ ring->fence_drv.initialized = false;
++ }
++ mutex_unlock(&adev->ring_lock);
++}
++
++/**
++ * amdgpu_fence_driver_force_completion - force all fence waiter to complete
++ *
++ * @adev: amdgpu device pointer
++ *
++ * In case of GPU reset failure make sure no process keep waiting on fence
++ * that will never complete.
++ */
++void amdgpu_fence_driver_force_completion(struct amdgpu_device *adev)
++{
++ int i;
++
++ for (i = 0; i < AMDGPU_MAX_RINGS; i++) {
++ struct amdgpu_ring *ring = adev->rings[i];
++ if (!ring || !ring->fence_drv.initialized)
++ continue;
++
++ amdgpu_fence_write(ring, ring->fence_drv.sync_seq[i]);
++ }
++}
++
++
++/*
++ * Fence debugfs
++ */
++#if defined(CONFIG_DEBUG_FS)
++static int amdgpu_debugfs_fence_info(struct seq_file *m, void *data)
++{
++ struct drm_info_node *node = (struct drm_info_node *)m->private;
++ struct drm_device *dev = node->minor->dev;
++ struct amdgpu_device *adev = dev->dev_private;
++ int i, j;
++
++ for (i = 0; i < AMDGPU_MAX_RINGS; ++i) {
++ struct amdgpu_ring *ring = adev->rings[i];
++ if (!ring || !ring->fence_drv.initialized)
++ continue;
++
++ amdgpu_fence_process(ring);
++
++ seq_printf(m, "--- ring %d ---\n", i);
++ seq_printf(m, "Last signaled fence 0x%016llx\n",
++ (unsigned long long)atomic64_read(&ring->fence_drv.last_seq));
++ seq_printf(m, "Last emitted 0x%016llx\n",
++ ring->fence_drv.sync_seq[i]);
++
++ for (j = 0; j < AMDGPU_MAX_RINGS; ++j) {
++ struct amdgpu_ring *other = adev->rings[j];
++ if (i != j && other && other->fence_drv.initialized)
++ seq_printf(m, "Last sync to ring %d 0x%016llx\n",
++ j, ring->fence_drv.sync_seq[j]);
++ }
++ }
++ return 0;
++}
++
++static struct drm_info_list amdgpu_debugfs_fence_list[] = {
++ {"amdgpu_fence_info", &amdgpu_debugfs_fence_info, 0, NULL},
++};
++#endif
++
++int amdgpu_debugfs_fence_init(struct amdgpu_device *adev)
++{
++#if defined(CONFIG_DEBUG_FS)
++ return amdgpu_debugfs_add_files(adev, amdgpu_debugfs_fence_list, 1);
++#else
++ return 0;
++#endif
++}
++
++static const char *amdgpu_fence_get_driver_name(struct fence *fence)
++{
++ return "amdgpu";
++}
++
++static const char *amdgpu_fence_get_timeline_name(struct fence *f)
++{
++ struct amdgpu_fence *fence = to_amdgpu_fence(f);
++ return (const char *)fence->ring->name;
++}
++
++static inline bool amdgpu_test_signaled(struct amdgpu_fence *fence)
++{
++ return test_bit(FENCE_FLAG_SIGNALED_BIT, &fence->base.flags);
++}
++
++struct amdgpu_wait_cb {
++ struct fence_cb base;
++ struct task_struct *task;
++};
++
++static void amdgpu_fence_wait_cb(struct fence *fence, struct fence_cb *cb)
++{
++ struct amdgpu_wait_cb *wait =
++ container_of(cb, struct amdgpu_wait_cb, base);
++ wake_up_process(wait->task);
++}
++
++static signed long amdgpu_fence_default_wait(struct fence *f, bool intr,
++ signed long t)
++{
++ struct amdgpu_fence *fence = to_amdgpu_fence(f);
++ struct amdgpu_device *adev = fence->ring->adev;
++ struct amdgpu_wait_cb cb;
++
++ cb.task = current;
++
++ if (fence_add_callback(f, &cb.base, amdgpu_fence_wait_cb))
++ return t;
++
++ while (t > 0) {
++ if (intr)
++ set_current_state(TASK_INTERRUPTIBLE);
++ else
++ set_current_state(TASK_UNINTERRUPTIBLE);
++
++ /*
++ * amdgpu_test_signaled must be called after
++ * set_current_state to prevent a race with wake_up_process
++ */
++ if (amdgpu_test_signaled(fence))
++ break;
++
++ if (adev->needs_reset) {
++ t = -EDEADLK;
++ break;
++ }
++
++ t = schedule_timeout(t);
++
++ if (t > 0 && intr && signal_pending(current))
++ t = -ERESTARTSYS;
++ }
++
++ __set_current_state(TASK_RUNNING);
++ fence_remove_callback(f, &cb.base);
++
++ return t;
++}
++
++const struct fence_ops amdgpu_fence_ops = {
++ .get_driver_name = amdgpu_fence_get_driver_name,
++ .get_timeline_name = amdgpu_fence_get_timeline_name,
++ .enable_signaling = amdgpu_fence_enable_signaling,
++ .signaled = amdgpu_fence_is_signaled,
++ .wait = amdgpu_fence_default_wait,
++ .release = NULL,
++};
+diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_gart.c b/drivers/gpu/drm/amd/amdgpu/amdgpu_gart.c
+new file mode 100644
+index 0000000..e02db0b
+--- /dev/null
++++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_gart.c
+@@ -0,0 +1,371 @@
++/*
++ * Copyright 2008 Advanced Micro Devices, Inc.
++ * Copyright 2008 Red Hat Inc.
++ * Copyright 2009 Jerome Glisse.
++ *
++ * Permission is hereby granted, free of charge, to any person obtaining a
++ * copy of this software and associated documentation files (the "Software"),
++ * to deal in the Software without restriction, including without limitation
++ * the rights to use, copy, modify, merge, publish, distribute, sublicense,
++ * and/or sell copies of the Software, and to permit persons to whom the
++ * Software is furnished to do so, subject to the following conditions:
++ *
++ * The above copyright notice and this permission notice shall be included in
++ * all copies or substantial portions of the Software.
++ *
++ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
++ * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
++ * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
++ * THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR
++ * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE,
++ * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
++ * OTHER DEALINGS IN THE SOFTWARE.
++ *
++ * Authors: Dave Airlie
++ * Alex Deucher
++ * Jerome Glisse
++ */
++#include <drm/drmP.h>
++#include <drm/amdgpu_drm.h>
++#include "amdgpu.h"
++
++/*
++ * GART
++ * The GART (Graphics Aperture Remapping Table) is an aperture
++ * in the GPU's address space. System pages can be mapped into
++ * the aperture and look like contiguous pages from the GPU's
++ * perspective. A page table maps the pages in the aperture
++ * to the actual backing pages in system memory.
++ *
++ * Radeon GPUs support both an internal GART, as described above,
++ * and AGP. AGP works similarly, but the GART table is configured
++ * and maintained by the northbridge rather than the driver.
++ * Radeon hw has a separate AGP aperture that is programmed to
++ * point to the AGP aperture provided by the northbridge and the
++ * requests are passed through to the northbridge aperture.
++ * Both AGP and internal GART can be used at the same time, however
++ * that is not currently supported by the driver.
++ *
++ * This file handles the common internal GART management.
++ */
++
++/*
++ * Common GART table functions.
++ */
++/**
++ * amdgpu_gart_table_ram_alloc - allocate system ram for gart page table
++ *
++ * @adev: amdgpu_device pointer
++ *
++ * Allocate system memory for GART page table
++ * (r1xx-r3xx, non-pcie r4xx, rs400). These asics require the
++ * gart table to be in system memory.
++ * Returns 0 for success, -ENOMEM for failure.
++ */
++int amdgpu_gart_table_ram_alloc(struct amdgpu_device *adev)
++{
++ void *ptr;
++
++ ptr = pci_alloc_consistent(adev->pdev, adev->gart.table_size,
++ &adev->gart.table_addr);
++ if (ptr == NULL) {
++ return -ENOMEM;
++ }
++#ifdef CONFIG_X86
++ if (0) {
++ set_memory_uc((unsigned long)ptr,
++ adev->gart.table_size >> PAGE_SHIFT);
++ }
++#endif
++ adev->gart.ptr = ptr;
++ memset((void *)adev->gart.ptr, 0, adev->gart.table_size);
++ return 0;
++}
++
++/**
++ * amdgpu_gart_table_ram_free - free system ram for gart page table
++ *
++ * @adev: amdgpu_device pointer
++ *
++ * Free system memory for GART page table
++ * (r1xx-r3xx, non-pcie r4xx, rs400). These asics require the
++ * gart table to be in system memory.
++ */
++void amdgpu_gart_table_ram_free(struct amdgpu_device *adev)
++{
++ if (adev->gart.ptr == NULL) {
++ return;
++ }
++#ifdef CONFIG_X86
++ if (0) {
++ set_memory_wb((unsigned long)adev->gart.ptr,
++ adev->gart.table_size >> PAGE_SHIFT);
++ }
++#endif
++ pci_free_consistent(adev->pdev, adev->gart.table_size,
++ (void *)adev->gart.ptr,
++ adev->gart.table_addr);
++ adev->gart.ptr = NULL;
++ adev->gart.table_addr = 0;
++}
++
++/**
++ * amdgpu_gart_table_vram_alloc - allocate vram for gart page table
++ *
++ * @adev: amdgpu_device pointer
++ *
++ * Allocate video memory for GART page table
++ * (pcie r4xx, r5xx+). These asics require the
++ * gart table to be in video memory.
++ * Returns 0 for success, error for failure.
++ */
++int amdgpu_gart_table_vram_alloc(struct amdgpu_device *adev)
++{
++ int r;
++
++ if (adev->gart.robj == NULL) {
++ r = amdgpu_bo_create(adev, adev->gart.table_size,
++ PAGE_SIZE, true, AMDGPU_GEM_DOMAIN_VRAM, 0,
++ NULL, &adev->gart.robj);
++ if (r) {
++ return r;
++ }
++ }
++ return 0;
++}
++
++/**
++ * amdgpu_gart_table_vram_pin - pin gart page table in vram
++ *
++ * @adev: amdgpu_device pointer
++ *
++ * Pin the GART page table in vram so it will not be moved
++ * by the memory manager (pcie r4xx, r5xx+). These asics require the
++ * gart table to be in video memory.
++ * Returns 0 for success, error for failure.
++ */
++int amdgpu_gart_table_vram_pin(struct amdgpu_device *adev)
++{
++ uint64_t gpu_addr;
++ int r;
++
++ r = amdgpu_bo_reserve(adev->gart.robj, false);
++ if (unlikely(r != 0))
++ return r;
++ r = amdgpu_bo_pin(adev->gart.robj,
++ AMDGPU_GEM_DOMAIN_VRAM, &gpu_addr);
++ if (r) {
++ amdgpu_bo_unreserve(adev->gart.robj);
++ return r;
++ }
++ r = amdgpu_bo_kmap(adev->gart.robj, &adev->gart.ptr);
++ if (r)
++ amdgpu_bo_unpin(adev->gart.robj);
++ amdgpu_bo_unreserve(adev->gart.robj);
++ adev->gart.table_addr = gpu_addr;
++ return r;
++}
++
++/**
++ * amdgpu_gart_table_vram_unpin - unpin gart page table in vram
++ *
++ * @adev: amdgpu_device pointer
++ *
++ * Unpin the GART page table in vram (pcie r4xx, r5xx+).
++ * These asics require the gart table to be in video memory.
++ */
++void amdgpu_gart_table_vram_unpin(struct amdgpu_device *adev)
++{
++ int r;
++
++ if (adev->gart.robj == NULL) {
++ return;
++ }
++ r = amdgpu_bo_reserve(adev->gart.robj, false);
++ if (likely(r == 0)) {
++ amdgpu_bo_kunmap(adev->gart.robj);
++ amdgpu_bo_unpin(adev->gart.robj);
++ amdgpu_bo_unreserve(adev->gart.robj);
++ adev->gart.ptr = NULL;
++ }
++}
++
++/**
++ * amdgpu_gart_table_vram_free - free gart page table vram
++ *
++ * @adev: amdgpu_device pointer
++ *
++ * Free the video memory used for the GART page table
++ * (pcie r4xx, r5xx+). These asics require the gart table to
++ * be in video memory.
++ */
++void amdgpu_gart_table_vram_free(struct amdgpu_device *adev)
++{
++ if (adev->gart.robj == NULL) {
++ return;
++ }
++ amdgpu_bo_unref(&adev->gart.robj);
++}
++
++/*
++ * Common gart functions.
++ */
++/**
++ * amdgpu_gart_unbind - unbind pages from the gart page table
++ *
++ * @adev: amdgpu_device pointer
++ * @offset: offset into the GPU's gart aperture
++ * @pages: number of pages to unbind
++ *
++ * Unbinds the requested pages from the gart page table and
++ * replaces them with the dummy page (all asics).
++ */
++void amdgpu_gart_unbind(struct amdgpu_device *adev, unsigned offset,
++ int pages)
++{
++ unsigned t;
++ unsigned p;
++ int i, j;
++ u64 page_base;
++ uint32_t flags = AMDGPU_PTE_SYSTEM;
++
++ if (!adev->gart.ready) {
++ WARN(1, "trying to unbind memory from uninitialized GART !\n");
++ return;
++ }
++
++ t = offset / AMDGPU_GPU_PAGE_SIZE;
++ p = t / (PAGE_SIZE / AMDGPU_GPU_PAGE_SIZE);
++ for (i = 0; i < pages; i++, p++) {
++ if (adev->gart.pages[p]) {
++ adev->gart.pages[p] = NULL;
++ adev->gart.pages_addr[p] = adev->dummy_page.addr;
++ page_base = adev->gart.pages_addr[p];
++ if (!adev->gart.ptr)
++ continue;
++
++ for (j = 0; j < (PAGE_SIZE / AMDGPU_GPU_PAGE_SIZE); j++, t++) {
++ amdgpu_gart_set_pte_pde(adev, adev->gart.ptr,
++ t, page_base, flags);
++ page_base += AMDGPU_GPU_PAGE_SIZE;
++ }
++ }
++ }
++ mb();
++ amdgpu_gart_flush_gpu_tlb(adev, 0);
++}
++
++/**
++ * amdgpu_gart_bind - bind pages into the gart page table
++ *
++ * @adev: amdgpu_device pointer
++ * @offset: offset into the GPU's gart aperture
++ * @pages: number of pages to bind
++ * @pagelist: pages to bind
++ * @dma_addr: DMA addresses of pages
++ *
++ * Binds the requested pages to the gart page table
++ * (all asics).
++ * Returns 0 for success, -EINVAL for failure.
++ */
++int amdgpu_gart_bind(struct amdgpu_device *adev, unsigned offset,
++ int pages, struct page **pagelist, dma_addr_t *dma_addr,
++ uint32_t flags)
++{
++ unsigned t;
++ unsigned p;
++ uint64_t page_base;
++ int i, j;
++
++ if (!adev->gart.ready) {
++ WARN(1, "trying to bind memory to uninitialized GART !\n");
++ return -EINVAL;
++ }
++
++ t = offset / AMDGPU_GPU_PAGE_SIZE;
++ p = t / (PAGE_SIZE / AMDGPU_GPU_PAGE_SIZE);
++
++ for (i = 0; i < pages; i++, p++) {
++ adev->gart.pages_addr[p] = dma_addr[i];
++ adev->gart.pages[p] = pagelist[i];
++ if (adev->gart.ptr) {
++ page_base = adev->gart.pages_addr[p];
++ for (j = 0; j < (PAGE_SIZE / AMDGPU_GPU_PAGE_SIZE); j++, t++) {
++ amdgpu_gart_set_pte_pde(adev, adev->gart.ptr, t, page_base, flags);
++ page_base += AMDGPU_GPU_PAGE_SIZE;
++ }
++ }
++ }
++ mb();
++ amdgpu_gart_flush_gpu_tlb(adev, 0);
++ return 0;
++}
++
++/**
++ * amdgpu_gart_init - init the driver info for managing the gart
++ *
++ * @adev: amdgpu_device pointer
++ *
++ * Allocate the dummy page and init the gart driver info (all asics).
++ * Returns 0 for success, error for failure.
++ */
++int amdgpu_gart_init(struct amdgpu_device *adev)
++{
++ int r, i;
++
++ if (adev->gart.pages) {
++ return 0;
++ }
++ /* We need PAGE_SIZE >= AMDGPU_GPU_PAGE_SIZE */
++ if (PAGE_SIZE < AMDGPU_GPU_PAGE_SIZE) {
++ DRM_ERROR("Page size is smaller than GPU page size!\n");
++ return -EINVAL;
++ }
++ r = amdgpu_dummy_page_init(adev);
++ if (r)
++ return r;
++ /* Compute table size */
++ adev->gart.num_cpu_pages = adev->mc.gtt_size / PAGE_SIZE;
++ adev->gart.num_gpu_pages = adev->mc.gtt_size / AMDGPU_GPU_PAGE_SIZE;
++ DRM_INFO("GART: num cpu pages %u, num gpu pages %u\n",
++ adev->gart.num_cpu_pages, adev->gart.num_gpu_pages);
++ /* Allocate pages table */
++ adev->gart.pages = vzalloc(sizeof(void *) * adev->gart.num_cpu_pages);
++ if (adev->gart.pages == NULL) {
++ amdgpu_gart_fini(adev);
++ return -ENOMEM;
++ }
++ adev->gart.pages_addr = vzalloc(sizeof(dma_addr_t) *
++ adev->gart.num_cpu_pages);
++ if (adev->gart.pages_addr == NULL) {
++ amdgpu_gart_fini(adev);
++ return -ENOMEM;
++ }
++ /* set GART entry to point to the dummy page by default */
++ for (i = 0; i < adev->gart.num_cpu_pages; i++) {
++ adev->gart.pages_addr[i] = adev->dummy_page.addr;
++ }
++ return 0;
++}
++
++/**
++ * amdgpu_gart_fini - tear down the driver info for managing the gart
++ *
++ * @adev: amdgpu_device pointer
++ *
++ * Tear down the gart driver info and free the dummy page (all asics).
++ */
++void amdgpu_gart_fini(struct amdgpu_device *adev)
++{
++ if (adev->gart.pages && adev->gart.pages_addr && adev->gart.ready) {
++ /* unbind pages */
++ amdgpu_gart_unbind(adev, 0, adev->gart.num_cpu_pages);
++ }
++ adev->gart.ready = false;
++ vfree(adev->gart.pages);
++ vfree(adev->gart.pages_addr);
++ adev->gart.pages = NULL;
++ adev->gart.pages_addr = NULL;
++
++ amdgpu_dummy_page_fini(adev);
++}
+diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_gds.h b/drivers/gpu/drm/amd/amdgpu/amdgpu_gds.h
+new file mode 100644
+index 0000000..c3f4e85
+--- /dev/null
++++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_gds.h
+@@ -0,0 +1,72 @@
++/*
++ * Copyright 2014 Advanced Micro Devices, Inc.
++ *
++ * Permission is hereby granted, free of charge, to any person obtaining a
++ * copy of this software and associated documentation files (the "Software"),
++ * to deal in the Software without restriction, including without limitation
++ * the rights to use, copy, modify, merge, publish, distribute, sublicense,
++ * and/or sell copies of the Software, and to permit persons to whom the
++ * Software is furnished to do so, subject to the following conditions:
++ *
++ * The above copyright notice and this permission notice shall be included in
++ * all copies or substantial portions of the Software.
++ *
++ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
++ * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
++ * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
++ * THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR
++ * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE,
++ * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
++ * OTHER DEALINGS IN THE SOFTWARE.
++ *
++ */
++
++#ifndef __AMDGPU_GDS_H__
++#define __AMDGPU_GDS_H__
++
++/* Because TTM request that alloacted buffer should be PAGE_SIZE aligned,
++ * we should report GDS/GWS/OA size as PAGE_SIZE aligned
++ * */
++#define AMDGPU_GDS_SHIFT 2
++#define AMDGPU_GWS_SHIFT PAGE_SHIFT
++#define AMDGPU_OA_SHIFT PAGE_SHIFT
++
++#define AMDGPU_PL_GDS TTM_PL_PRIV0
++#define AMDGPU_PL_GWS TTM_PL_PRIV1
++#define AMDGPU_PL_OA TTM_PL_PRIV2
++
++#define AMDGPU_PL_FLAG_GDS TTM_PL_FLAG_PRIV0
++#define AMDGPU_PL_FLAG_GWS TTM_PL_FLAG_PRIV1
++#define AMDGPU_PL_FLAG_OA TTM_PL_FLAG_PRIV2
++
++struct amdgpu_ring;
++struct amdgpu_bo;
++
++struct amdgpu_gds_asic_info {
++ uint32_t total_size;
++ uint32_t gfx_partition_size;
++ uint32_t cs_partition_size;
++};
++
++struct amdgpu_gds {
++ struct amdgpu_gds_asic_info mem;
++ struct amdgpu_gds_asic_info gws;
++ struct amdgpu_gds_asic_info oa;
++ /* At present, GDS, GWS and OA resources for gfx (graphics)
++ * is always pre-allocated and available for graphics operation.
++ * Such resource is shared between all gfx clients.
++ * TODO: move this operation to user space
++ * */
++ struct amdgpu_bo* gds_gfx_bo;
++ struct amdgpu_bo* gws_gfx_bo;
++ struct amdgpu_bo* oa_gfx_bo;
++};
++
++struct amdgpu_gds_reg_offset {
++ uint32_t mem_base;
++ uint32_t mem_size;
++ uint32_t gws;
++ uint32_t oa;
++};
++
++#endif /* __AMDGPU_GDS_H__ */
+diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_gem.c b/drivers/gpu/drm/amd/amdgpu/amdgpu_gem.c
+new file mode 100644
+index 0000000..5fd0bc7
+--- /dev/null
++++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_gem.c
+@@ -0,0 +1,735 @@
++/*
++ * Copyright 2008 Advanced Micro Devices, Inc.
++ * Copyright 2008 Red Hat Inc.
++ * Copyright 2009 Jerome Glisse.
++ *
++ * Permission is hereby granted, free of charge, to any person obtaining a
++ * copy of this software and associated documentation files (the "Software"),
++ * to deal in the Software without restriction, including without limitation
++ * the rights to use, copy, modify, merge, publish, distribute, sublicense,
++ * and/or sell copies of the Software, and to permit persons to whom the
++ * Software is furnished to do so, subject to the following conditions:
++ *
++ * The above copyright notice and this permission notice shall be included in
++ * all copies or substantial portions of the Software.
++ *
++ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
++ * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
++ * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
++ * THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR
++ * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE,
++ * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
++ * OTHER DEALINGS IN THE SOFTWARE.
++ *
++ * Authors: Dave Airlie
++ * Alex Deucher
++ * Jerome Glisse
++ */
++#include <linux/ktime.h>
++#include <drm/drmP.h>
++#include <drm/amdgpu_drm.h>
++#include "amdgpu.h"
++
++void amdgpu_gem_object_free(struct drm_gem_object *gobj)
++{
++ struct amdgpu_bo *robj = gem_to_amdgpu_bo(gobj);
++
++ if (robj) {
++ if (robj->gem_base.import_attach)
++ drm_prime_gem_destroy(&robj->gem_base, robj->tbo.sg);
++ amdgpu_bo_unref(&robj);
++ }
++}
++
++int amdgpu_gem_object_create(struct amdgpu_device *adev, unsigned long size,
++ int alignment, u32 initial_domain,
++ u64 flags, bool kernel,
++ struct drm_gem_object **obj)
++{
++ struct amdgpu_bo *robj;
++ unsigned long max_size;
++ int r;
++
++ *obj = NULL;
++ /* At least align on page size */
++ if (alignment < PAGE_SIZE) {
++ alignment = PAGE_SIZE;
++ }
++
++ if (!(initial_domain & (AMDGPU_GEM_DOMAIN_GDS | AMDGPU_GEM_DOMAIN_GWS | AMDGPU_GEM_DOMAIN_OA))) {
++ /* Maximum bo size is the unpinned gtt size since we use the gtt to
++ * handle vram to system pool migrations.
++ */
++ max_size = adev->mc.gtt_size - adev->gart_pin_size;
++ if (size > max_size) {
++ DRM_DEBUG("Allocation size %ldMb bigger than %ldMb limit\n",
++ size >> 20, max_size >> 20);
++ return -ENOMEM;
++ }
++ }
++retry:
++ r = amdgpu_bo_create(adev, size, alignment, kernel, initial_domain, flags, NULL, &robj);
++ if (r) {
++ if (r != -ERESTARTSYS) {
++ if (initial_domain == AMDGPU_GEM_DOMAIN_VRAM) {
++ initial_domain |= AMDGPU_GEM_DOMAIN_GTT;
++ goto retry;
++ }
++ DRM_ERROR("Failed to allocate GEM object (%ld, %d, %u, %d)\n",
++ size, initial_domain, alignment, r);
++ }
++ return r;
++ }
++ *obj = &robj->gem_base;
++ robj->pid = task_pid_nr(current);
++
++ mutex_lock(&adev->gem.mutex);
++ list_add_tail(&robj->list, &adev->gem.objects);
++ mutex_unlock(&adev->gem.mutex);
++
++ return 0;
++}
++
++int amdgpu_gem_init(struct amdgpu_device *adev)
++{
++ INIT_LIST_HEAD(&adev->gem.objects);
++ return 0;
++}
++
++void amdgpu_gem_fini(struct amdgpu_device *adev)
++{
++ amdgpu_bo_force_delete(adev);
++}
++
++/*
++ * Call from drm_gem_handle_create which appear in both new and open ioctl
++ * case.
++ */
++int amdgpu_gem_object_open(struct drm_gem_object *obj, struct drm_file *file_priv)
++{
++ struct amdgpu_bo *rbo = gem_to_amdgpu_bo(obj);
++ struct amdgpu_device *adev = rbo->adev;
++ struct amdgpu_fpriv *fpriv = file_priv->driver_priv;
++ struct amdgpu_vm *vm = &fpriv->vm;
++ struct amdgpu_bo_va *bo_va;
++ int r;
++
++ r = amdgpu_bo_reserve(rbo, false);
++ if (r) {
++ return r;
++ }
++
++ bo_va = amdgpu_vm_bo_find(vm, rbo);
++ if (!bo_va) {
++ bo_va = amdgpu_vm_bo_add(adev, vm, rbo);
++ } else {
++ ++bo_va->ref_count;
++ }
++ amdgpu_bo_unreserve(rbo);
++
++ return 0;
++}
++
++void amdgpu_gem_object_close(struct drm_gem_object *obj,
++ struct drm_file *file_priv)
++{
++ struct amdgpu_bo *rbo = gem_to_amdgpu_bo(obj);
++ struct amdgpu_device *adev = rbo->adev;
++ struct amdgpu_fpriv *fpriv = file_priv->driver_priv;
++ struct amdgpu_vm *vm = &fpriv->vm;
++ struct amdgpu_bo_va *bo_va;
++ int r;
++
++ r = amdgpu_bo_reserve(rbo, true);
++ if (r) {
++ dev_err(adev->dev, "leaking bo va because "
++ "we fail to reserve bo (%d)\n", r);
++ return;
++ }
++ bo_va = amdgpu_vm_bo_find(vm, rbo);
++ if (bo_va) {
++ if (--bo_va->ref_count == 0) {
++ amdgpu_vm_bo_rmv(adev, bo_va);
++ }
++ }
++ amdgpu_bo_unreserve(rbo);
++}
++
++static int amdgpu_gem_handle_lockup(struct amdgpu_device *adev, int r)
++{
++ if (r == -EDEADLK) {
++ r = amdgpu_gpu_reset(adev);
++ if (!r)
++ r = -EAGAIN;
++ }
++ return r;
++}
++
++/*
++ * GEM ioctls.
++ */
++int amdgpu_gem_create_ioctl(struct drm_device *dev, void *data,
++ struct drm_file *filp)
++{
++ struct amdgpu_device *adev = dev->dev_private;
++ union drm_amdgpu_gem_create *args = data;
++ uint64_t size = args->in.bo_size;
++ struct drm_gem_object *gobj;
++ uint32_t handle;
++ bool kernel = false;
++ int r;
++
++ down_read(&adev->exclusive_lock);
++ /* create a gem object to contain this object in */
++ if (args->in.domains & (AMDGPU_GEM_DOMAIN_GDS |
++ AMDGPU_GEM_DOMAIN_GWS | AMDGPU_GEM_DOMAIN_OA)) {
++ kernel = true;
++ if (args->in.domains == AMDGPU_GEM_DOMAIN_GDS)
++ size = size << AMDGPU_GDS_SHIFT;
++ else if (args->in.domains == AMDGPU_GEM_DOMAIN_GWS)
++ size = size << AMDGPU_GWS_SHIFT;
++ else if (args->in.domains == AMDGPU_GEM_DOMAIN_OA)
++ size = size << AMDGPU_OA_SHIFT;
++ else {
++ r = -EINVAL;
++ goto error_unlock;
++ }
++ }
++ size = roundup(size, PAGE_SIZE);
++
++ r = amdgpu_gem_object_create(adev, size, args->in.alignment,
++ (u32)(0xffffffff & args->in.domains),
++ args->in.domain_flags,
++ kernel, &gobj);
++ if (r)
++ goto error_unlock;
++
++ r = drm_gem_handle_create(filp, gobj, &handle);
++ /* drop reference from allocate - handle holds it now */
++ drm_gem_object_unreference_unlocked(gobj);
++ if (r)
++ goto error_unlock;
++
++ memset(args, 0, sizeof(*args));
++ args->out.handle = handle;
++ up_read(&adev->exclusive_lock);
++ return 0;
++
++error_unlock:
++ up_read(&adev->exclusive_lock);
++ r = amdgpu_gem_handle_lockup(adev, r);
++ return r;
++}
++
++int amdgpu_gem_userptr_ioctl(struct drm_device *dev, void *data,
++ struct drm_file *filp)
++{
++ struct amdgpu_device *adev = dev->dev_private;
++ struct drm_amdgpu_gem_userptr *args = data;
++ struct drm_gem_object *gobj;
++ struct amdgpu_bo *bo;
++ uint32_t handle;
++ int r;
++
++ if (offset_in_page(args->addr | args->size))
++ return -EINVAL;
++
++ /* reject unknown flag values */
++ if (args->flags & ~(AMDGPU_GEM_USERPTR_READONLY |
++ AMDGPU_GEM_USERPTR_ANONONLY | AMDGPU_GEM_USERPTR_VALIDATE |
++ AMDGPU_GEM_USERPTR_REGISTER))
++ return -EINVAL;
++
++ if (!(args->flags & AMDGPU_GEM_USERPTR_ANONONLY) ||
++ !(args->flags & AMDGPU_GEM_USERPTR_REGISTER)) {
++
++ /* if we want to write to it we must require anonymous
++ memory and install a MMU notifier */
++ return -EACCES;
++ }
++
++ down_read(&adev->exclusive_lock);
++
++ /* create a gem object to contain this object in */
++ r = amdgpu_gem_object_create(adev, args->size, 0,
++ AMDGPU_GEM_DOMAIN_CPU, 0,
++ 0, &gobj);
++ if (r)
++ goto handle_lockup;
++
++ bo = gem_to_amdgpu_bo(gobj);
++ r = amdgpu_ttm_tt_set_userptr(bo->tbo.ttm, args->addr, args->flags);
++ if (r)
++ goto release_object;
++
++ if (args->flags & AMDGPU_GEM_USERPTR_REGISTER) {
++ r = amdgpu_mn_register(bo, args->addr);
++ if (r)
++ goto release_object;
++ }
++
++ if (args->flags & AMDGPU_GEM_USERPTR_VALIDATE) {
++ down_read(&current->mm->mmap_sem);
++ r = amdgpu_bo_reserve(bo, true);
++ if (r) {
++ up_read(&current->mm->mmap_sem);
++ goto release_object;
++ }
++
++ amdgpu_ttm_placement_from_domain(bo, AMDGPU_GEM_DOMAIN_GTT);
++ r = ttm_bo_validate(&bo->tbo, &bo->placement, true, false);
++ amdgpu_bo_unreserve(bo);
++ up_read(&current->mm->mmap_sem);
++ if (r)
++ goto release_object;
++ }
++
++ r = drm_gem_handle_create(filp, gobj, &handle);
++ /* drop reference from allocate - handle holds it now */
++ drm_gem_object_unreference_unlocked(gobj);
++ if (r)
++ goto handle_lockup;
++
++ args->handle = handle;
++ up_read(&adev->exclusive_lock);
++ return 0;
++
++release_object:
++ drm_gem_object_unreference_unlocked(gobj);
++
++handle_lockup:
++ up_read(&adev->exclusive_lock);
++ r = amdgpu_gem_handle_lockup(adev, r);
++
++ return r;
++}
++
++int amdgpu_mode_dumb_mmap(struct drm_file *filp,
++ struct drm_device *dev,
++ uint32_t handle, uint64_t *offset_p)
++{
++ struct drm_gem_object *gobj;
++ struct amdgpu_bo *robj;
++
++ gobj = drm_gem_object_lookup(dev, filp, handle);
++ if (gobj == NULL) {
++ return -ENOENT;
++ }
++ robj = gem_to_amdgpu_bo(gobj);
++ if (amdgpu_ttm_tt_has_userptr(robj->tbo.ttm)) {
++ drm_gem_object_unreference_unlocked(gobj);
++ return -EPERM;
++ }
++ *offset_p = amdgpu_bo_mmap_offset(robj);
++ drm_gem_object_unreference_unlocked(gobj);
++ return 0;
++}
++
++int amdgpu_gem_mmap_ioctl(struct drm_device *dev, void *data,
++ struct drm_file *filp)
++{
++ union drm_amdgpu_gem_mmap *args = data;
++ uint32_t handle = args->in.handle;
++ memset(args, 0, sizeof(*args));
++ return amdgpu_mode_dumb_mmap(filp, dev, handle, &args->out.addr_ptr);
++}
++
++/**
++ * amdgpu_gem_timeout - calculate jiffies timeout from absolute value
++ *
++ * @timeout_ns: timeout in ns
++ *
++ * Calculate the timeout in jiffies from an absolute timeout in ns.
++ */
++unsigned long amdgpu_gem_timeout(uint64_t timeout_ns)
++{
++ unsigned long timeout_jiffies;
++ ktime_t timeout;
++
++ /* clamp timeout if it's to large */
++ if (((int64_t)timeout_ns) < 0)
++ return MAX_SCHEDULE_TIMEOUT;
++
++ timeout = ktime_sub_ns(ktime_get(), timeout_ns);
++ if (ktime_to_ns(timeout) < 0)
++ return 0;
++
++ timeout_jiffies = nsecs_to_jiffies(ktime_to_ns(timeout));
++ /* clamp timeout to avoid unsigned-> signed overflow */
++ if (timeout_jiffies > MAX_SCHEDULE_TIMEOUT )
++ return MAX_SCHEDULE_TIMEOUT - 1;
++
++ return timeout_jiffies;
++}
++
++int amdgpu_gem_wait_idle_ioctl(struct drm_device *dev, void *data,
++ struct drm_file *filp)
++{
++ struct amdgpu_device *adev = dev->dev_private;
++ union drm_amdgpu_gem_wait_idle *args = data;
++ struct drm_gem_object *gobj;
++ struct amdgpu_bo *robj;
++ uint32_t handle = args->in.handle;
++ unsigned long timeout = amdgpu_gem_timeout(args->in.timeout);
++ int r = 0;
++ long ret;
++
++ gobj = drm_gem_object_lookup(dev, filp, handle);
++ if (gobj == NULL) {
++ return -ENOENT;
++ }
++ robj = gem_to_amdgpu_bo(gobj);
++ if (timeout == 0)
++ ret = reservation_object_test_signaled_rcu(robj->tbo.resv, true);
++ else
++ ret = reservation_object_wait_timeout_rcu(robj->tbo.resv, true, true, timeout);
++
++ /* ret == 0 means not signaled,
++ * ret > 0 means signaled
++ * ret < 0 means interrupted before timeout
++ */
++ if (ret >= 0) {
++ memset(args, 0, sizeof(*args));
++ args->out.status = (ret == 0);
++ } else
++ r = ret;
++
++ drm_gem_object_unreference_unlocked(gobj);
++ r = amdgpu_gem_handle_lockup(adev, r);
++ return r;
++}
++
++int amdgpu_gem_metadata_ioctl(struct drm_device *dev, void *data,
++ struct drm_file *filp)
++{
++ struct drm_amdgpu_gem_metadata *args = data;
++ struct drm_gem_object *gobj;
++ struct amdgpu_bo *robj;
++ int r = -1;
++
++ DRM_DEBUG("%d \n", args->handle);
++ gobj = drm_gem_object_lookup(dev, filp, args->handle);
++ if (gobj == NULL)
++ return -ENOENT;
++ robj = gem_to_amdgpu_bo(gobj);
++
++ r = amdgpu_bo_reserve(robj, false);
++ if (unlikely(r != 0))
++ goto out;
++
++ if (args->op == AMDGPU_GEM_METADATA_OP_GET_METADATA) {
++ amdgpu_bo_get_tiling_flags(robj, &args->data.tiling_info);
++ r = amdgpu_bo_get_metadata(robj, args->data.data,
++ sizeof(args->data.data),
++ &args->data.data_size_bytes,
++ &args->data.flags);
++ } else if (args->op == AMDGPU_GEM_METADATA_OP_SET_METADATA) {
++ r = amdgpu_bo_set_tiling_flags(robj, args->data.tiling_info);
++ if (!r)
++ r = amdgpu_bo_set_metadata(robj, args->data.data,
++ args->data.data_size_bytes,
++ args->data.flags);
++ }
++
++ amdgpu_bo_unreserve(robj);
++out:
++ drm_gem_object_unreference_unlocked(gobj);
++ return r;
++}
++
++/**
++ * amdgpu_gem_va_update_vm -update the bo_va in its VM
++ *
++ * @adev: amdgpu_device pointer
++ * @bo_va: bo_va to update
++ *
++ * Update the bo_va directly after setting it's address. Errors are not
++ * vital here, so they are not reported back to userspace.
++ */
++static void amdgpu_gem_va_update_vm(struct amdgpu_device *adev,
++ struct amdgpu_bo_va *bo_va)
++{
++ struct ttm_validate_buffer tv, *entry;
++ struct amdgpu_bo_list_entry *vm_bos;
++ struct ww_acquire_ctx ticket;
++ struct list_head list;
++ unsigned domain;
++ int r;
++
++ INIT_LIST_HEAD(&list);
++
++ tv.bo = &bo_va->bo->tbo;
++ tv.shared = true;
++ list_add(&tv.head, &list);
++
++ vm_bos = amdgpu_vm_get_bos(adev, bo_va->vm, &list);
++ if (!vm_bos)
++ return;
++
++ r = ttm_eu_reserve_buffers(&ticket, &list, true, NULL);
++ if (r)
++ goto error_free;
++
++ list_for_each_entry(entry, &list, head) {
++ domain = amdgpu_mem_type_to_domain(entry->bo->mem.mem_type);
++ /* if anything is swapped out don't swap it in here,
++ just abort and wait for the next CS */
++ if (domain == AMDGPU_GEM_DOMAIN_CPU)
++ goto error_unreserve;
++ }
++
++ mutex_lock(&bo_va->vm->mutex);
++ r = amdgpu_vm_clear_freed(adev, bo_va->vm);
++ if (r)
++ goto error_unlock;
++
++ r = amdgpu_vm_bo_update(adev, bo_va, &bo_va->bo->tbo.mem);
++
++error_unlock:
++ mutex_unlock(&bo_va->vm->mutex);
++
++error_unreserve:
++ ttm_eu_backoff_reservation(&ticket, &list);
++
++error_free:
++ drm_free_large(vm_bos);
++
++ if (r)
++ DRM_ERROR("Couldn't update BO_VA (%d)\n", r);
++}
++
++
++
++int amdgpu_gem_va_ioctl(struct drm_device *dev, void *data,
++ struct drm_file *filp)
++{
++ union drm_amdgpu_gem_va *args = data;
++ struct drm_gem_object *gobj;
++ struct amdgpu_device *adev = dev->dev_private;
++ struct amdgpu_fpriv *fpriv = filp->driver_priv;
++ struct amdgpu_bo *rbo;
++ struct amdgpu_bo_va *bo_va;
++ uint32_t invalid_flags, va_flags = 0;
++ int r = 0;
++
++ if (!adev->vm_manager.enabled) {
++ memset(args, 0, sizeof(*args));
++ args->out.result = AMDGPU_VA_RESULT_ERROR;
++ return -ENOTTY;
++ }
++
++ if (args->in.va_address < AMDGPU_VA_RESERVED_SIZE) {
++ dev_err(&dev->pdev->dev,
++ "va_address 0x%lX is in reserved area 0x%X\n",
++ (unsigned long)args->in.va_address,
++ AMDGPU_VA_RESERVED_SIZE);
++ memset(args, 0, sizeof(*args));
++ args->out.result = AMDGPU_VA_RESULT_ERROR;
++ return -EINVAL;
++ }
++
++ invalid_flags = ~(AMDGPU_VM_PAGE_READABLE | AMDGPU_VM_PAGE_WRITEABLE |
++ AMDGPU_VM_PAGE_EXECUTABLE);
++ if ((args->in.flags & invalid_flags)) {
++ dev_err(&dev->pdev->dev, "invalid flags 0x%08X vs 0x%08X\n",
++ args->in.flags, invalid_flags);
++ memset(args, 0, sizeof(*args));
++ args->out.result = AMDGPU_VA_RESULT_ERROR;
++ return -EINVAL;
++ }
++
++ switch (args->in.operation) {
++ case AMDGPU_VA_OP_MAP:
++ case AMDGPU_VA_OP_UNMAP:
++ break;
++ default:
++ dev_err(&dev->pdev->dev, "unsupported operation %d\n",
++ args->in.operation);
++ memset(args, 0, sizeof(*args));
++ args->out.result = AMDGPU_VA_RESULT_ERROR;
++ return -EINVAL;
++ }
++
++ gobj = drm_gem_object_lookup(dev, filp, args->in.handle);
++ if (gobj == NULL) {
++ memset(args, 0, sizeof(*args));
++ args->out.result = AMDGPU_VA_RESULT_ERROR;
++ return -ENOENT;
++ }
++ rbo = gem_to_amdgpu_bo(gobj);
++ r = amdgpu_bo_reserve(rbo, false);
++ if (r) {
++ if (r != -ERESTARTSYS) {
++ memset(args, 0, sizeof(*args));
++ args->out.result = AMDGPU_VA_RESULT_ERROR;
++ }
++ drm_gem_object_unreference_unlocked(gobj);
++ return r;
++ }
++ bo_va = amdgpu_vm_bo_find(&fpriv->vm, rbo);
++ if (!bo_va) {
++ memset(args, 0, sizeof(*args));
++ args->out.result = AMDGPU_VA_RESULT_ERROR;
++ drm_gem_object_unreference_unlocked(gobj);
++ return -ENOENT;
++ }
++
++ switch (args->in.operation) {
++ case AMDGPU_VA_OP_MAP:
++ if (args->in.flags & AMDGPU_VM_PAGE_READABLE)
++ va_flags |= AMDGPU_PTE_READABLE;
++ if (args->in.flags & AMDGPU_VM_PAGE_WRITEABLE)
++ va_flags |= AMDGPU_PTE_WRITEABLE;
++ if (args->in.flags & AMDGPU_VM_PAGE_EXECUTABLE)
++ va_flags |= AMDGPU_PTE_EXECUTABLE;
++ r = amdgpu_vm_bo_map(adev, bo_va, args->in.va_address, 0,
++ amdgpu_bo_size(bo_va->bo), va_flags);
++ break;
++ case AMDGPU_VA_OP_UNMAP:
++ r = amdgpu_vm_bo_unmap(adev, bo_va, args->in.va_address);
++ break;
++ default:
++ break;
++ }
++
++ if (!r) {
++ amdgpu_gem_va_update_vm(adev, bo_va);
++ memset(args, 0, sizeof(*args));
++ args->out.result = AMDGPU_VA_RESULT_OK;
++ } else {
++ memset(args, 0, sizeof(*args));
++ args->out.result = AMDGPU_VA_RESULT_ERROR;
++ }
++
++ drm_gem_object_unreference_unlocked(gobj);
++ return r;
++}
++
++int amdgpu_gem_op_ioctl(struct drm_device *dev, void *data,
++ struct drm_file *filp)
++{
++ struct drm_amdgpu_gem_op *args = data;
++ struct drm_gem_object *gobj;
++ struct amdgpu_bo *robj;
++ int r;
++
++ gobj = drm_gem_object_lookup(dev, filp, args->handle);
++ if (gobj == NULL) {
++ return -ENOENT;
++ }
++ robj = gem_to_amdgpu_bo(gobj);
++
++ r = amdgpu_bo_reserve(robj, false);
++ if (unlikely(r))
++ goto out;
++
++ switch (args->op) {
++ case AMDGPU_GEM_OP_GET_GEM_CREATE_INFO: {
++ struct drm_amdgpu_gem_create_in info;
++ void __user *out = (void __user *)(long)args->value;
++
++ info.bo_size = robj->gem_base.size;
++ info.alignment = robj->tbo.mem.page_alignment << PAGE_SHIFT;
++ info.domains = robj->initial_domain;
++ info.domain_flags = robj->flags;
++ if (copy_to_user(out, &info, sizeof(info)))
++ r = -EFAULT;
++ break;
++ }
++ case AMDGPU_GEM_OP_SET_INITIAL_DOMAIN:
++ if (amdgpu_ttm_tt_has_userptr(robj->tbo.ttm)) {
++ r = -EPERM;
++ break;
++ }
++ robj->initial_domain = args->value & (AMDGPU_GEM_DOMAIN_VRAM |
++ AMDGPU_GEM_DOMAIN_GTT |
++ AMDGPU_GEM_DOMAIN_CPU);
++ break;
++ default:
++ r = -EINVAL;
++ }
++
++ amdgpu_bo_unreserve(robj);
++out:
++ drm_gem_object_unreference_unlocked(gobj);
++ return r;
++}
++
++int amdgpu_mode_dumb_create(struct drm_file *file_priv,
++ struct drm_device *dev,
++ struct drm_mode_create_dumb *args)
++{
++ struct amdgpu_device *adev = dev->dev_private;
++ struct drm_gem_object *gobj;
++ uint32_t handle;
++ int r;
++
++ args->pitch = amdgpu_align_pitch(adev, args->width, args->bpp, 0) * ((args->bpp + 1) / 8);
++ args->size = args->pitch * args->height;
++ args->size = ALIGN(args->size, PAGE_SIZE);
++
++ r = amdgpu_gem_object_create(adev, args->size, 0,
++ AMDGPU_GEM_DOMAIN_VRAM,
++ 0, ttm_bo_type_device,
++ &gobj);
++ if (r)
++ return -ENOMEM;
++
++ r = drm_gem_handle_create(file_priv, gobj, &handle);
++ /* drop reference from allocate - handle holds it now */
++ drm_gem_object_unreference_unlocked(gobj);
++ if (r) {
++ return r;
++ }
++ args->handle = handle;
++ return 0;
++}
++
++#if defined(CONFIG_DEBUG_FS)
++static int amdgpu_debugfs_gem_info(struct seq_file *m, void *data)
++{
++ struct drm_info_node *node = (struct drm_info_node *)m->private;
++ struct drm_device *dev = node->minor->dev;
++ struct amdgpu_device *adev = dev->dev_private;
++ struct amdgpu_bo *rbo;
++ unsigned i = 0;
++
++ mutex_lock(&adev->gem.mutex);
++ list_for_each_entry(rbo, &adev->gem.objects, list) {
++ unsigned domain;
++ const char *placement;
++
++ domain = amdgpu_mem_type_to_domain(rbo->tbo.mem.mem_type);
++ switch (domain) {
++ case AMDGPU_GEM_DOMAIN_VRAM:
++ placement = "VRAM";
++ break;
++ case AMDGPU_GEM_DOMAIN_GTT:
++ placement = " GTT";
++ break;
++ case AMDGPU_GEM_DOMAIN_CPU:
++ default:
++ placement = " CPU";
++ break;
++ }
++ seq_printf(m, "bo[0x%08x] %8ldkB %8ldMB %s pid %8ld\n",
++ i, amdgpu_bo_size(rbo) >> 10, amdgpu_bo_size(rbo) >> 20,
++ placement, (unsigned long)rbo->pid);
++ i++;
++ }
++ mutex_unlock(&adev->gem.mutex);
++ return 0;
++}
++
++static struct drm_info_list amdgpu_debugfs_gem_list[] = {
++ {"amdgpu_gem_info", &amdgpu_debugfs_gem_info, 0, NULL},
++};
++#endif
++
++int amdgpu_gem_debugfs_init(struct amdgpu_device *adev)
++{
++#if defined(CONFIG_DEBUG_FS)
++ return amdgpu_debugfs_add_files(adev, amdgpu_debugfs_gem_list, 1);
++#endif
++ return 0;
++}
+diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_gfx.c b/drivers/gpu/drm/amd/amdgpu/amdgpu_gfx.c
+new file mode 100644
+index 0000000..9f95da4
+--- /dev/null
++++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_gfx.c
+@@ -0,0 +1,72 @@
++/*
++ * Copyright 2014 Advanced Micro Devices, Inc.
++ * Copyright 2008 Red Hat Inc.
++ * Copyright 2009 Jerome Glisse.
++ *
++ * Permission is hereby granted, free of charge, to any person obtaining a
++ * copy of this software and associated documentation files (the "Software"),
++ * to deal in the Software without restriction, including without limitation
++ * the rights to use, copy, modify, merge, publish, distribute, sublicense,
++ * and/or sell copies of the Software, and to permit persons to whom the
++ * Software is furnished to do so, subject to the following conditions:
++ *
++ * The above copyright notice and this permission notice shall be included in
++ * all copies or substantial portions of the Software.
++ *
++ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
++ * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
++ * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
++ * THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR
++ * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE,
++ * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
++ * OTHER DEALINGS IN THE SOFTWARE.
++ *
++ */
++#include <drm/drmP.h>
++#include "amdgpu.h"
++
++/*
++ * GPU scratch registers helpers function.
++ */
++/**
++ * amdgpu_gfx_scratch_get - Allocate a scratch register
++ *
++ * @adev: amdgpu_device pointer
++ * @reg: scratch register mmio offset
++ *
++ * Allocate a CP scratch register for use by the driver (all asics).
++ * Returns 0 on success or -EINVAL on failure.
++ */
++int amdgpu_gfx_scratch_get(struct amdgpu_device *adev, uint32_t *reg)
++{
++ int i;
++
++ for (i = 0; i < adev->gfx.scratch.num_reg; i++) {
++ if (adev->gfx.scratch.free[i]) {
++ adev->gfx.scratch.free[i] = false;
++ *reg = adev->gfx.scratch.reg[i];
++ return 0;
++ }
++ }
++ return -EINVAL;
++}
++
++/**
++ * amdgpu_gfx_scratch_free - Free a scratch register
++ *
++ * @adev: amdgpu_device pointer
++ * @reg: scratch register mmio offset
++ *
++ * Free a CP scratch register allocated for use by the driver (all asics)
++ */
++void amdgpu_gfx_scratch_free(struct amdgpu_device *adev, uint32_t reg)
++{
++ int i;
++
++ for (i = 0; i < adev->gfx.scratch.num_reg; i++) {
++ if (adev->gfx.scratch.reg[i] == reg) {
++ adev->gfx.scratch.free[i] = true;
++ return;
++ }
++ }
++}
+diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_gfx.h b/drivers/gpu/drm/amd/amdgpu/amdgpu_gfx.h
+new file mode 100644
+index 0000000..dc06cbd
+--- /dev/null
++++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_gfx.h
+@@ -0,0 +1,30 @@
++/*
++ * Copyright 2014 Advanced Micro Devices, Inc.
++ *
++ * Permission is hereby granted, free of charge, to any person obtaining a
++ * copy of this software and associated documentation files (the "Software"),
++ * to deal in the Software without restriction, including without limitation
++ * the rights to use, copy, modify, merge, publish, distribute, sublicense,
++ * and/or sell copies of the Software, and to permit persons to whom the
++ * Software is furnished to do so, subject to the following conditions:
++ *
++ * The above copyright notice and this permission notice shall be included in
++ * all copies or substantial portions of the Software.
++ *
++ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
++ * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
++ * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
++ * THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR
++ * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE,
++ * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
++ * OTHER DEALINGS IN THE SOFTWARE.
++ *
++ */
++
++#ifndef __AMDGPU_GFX_H__
++#define __AMDGPU_GFX_H__
++
++int amdgpu_gfx_scratch_get(struct amdgpu_device *adev, uint32_t *reg);
++void amdgpu_gfx_scratch_free(struct amdgpu_device *adev, uint32_t reg);
++
++#endif
+diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_i2c.c b/drivers/gpu/drm/amd/amdgpu/amdgpu_i2c.c
+new file mode 100644
+index 0000000..31a6763
+--- /dev/null
++++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_i2c.c
+@@ -0,0 +1,395 @@
++/*
++ * Copyright 2007-8 Advanced Micro Devices, Inc.
++ * Copyright 2008 Red Hat Inc.
++ *
++ * Permission is hereby granted, free of charge, to any person obtaining a
++ * copy of this software and associated documentation files (the "Software"),
++ * to deal in the Software without restriction, including without limitation
++ * the rights to use, copy, modify, merge, publish, distribute, sublicense,
++ * and/or sell copies of the Software, and to permit persons to whom the
++ * Software is furnished to do so, subject to the following conditions:
++ *
++ * The above copyright notice and this permission notice shall be included in
++ * all copies or substantial portions of the Software.
++ *
++ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
++ * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
++ * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
++ * THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR
++ * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE,
++ * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
++ * OTHER DEALINGS IN THE SOFTWARE.
++ *
++ * Authors: Dave Airlie
++ * Alex Deucher
++ */
++#include <linux/export.h>
++
++#include <drm/drmP.h>
++#include <drm/drm_edid.h>
++#include <drm/amdgpu_drm.h>
++#include "amdgpu.h"
++#include "amdgpu_i2c.h"
++#include "amdgpu_atombios.h"
++#include "atom.h"
++#include "atombios_dp.h"
++#include "atombios_i2c.h"
++
++/* bit banging i2c */
++static int amdgpu_i2c_pre_xfer(struct i2c_adapter *i2c_adap)
++{
++ struct amdgpu_i2c_chan *i2c = i2c_get_adapdata(i2c_adap);
++ struct amdgpu_device *adev = i2c->dev->dev_private;
++ struct amdgpu_i2c_bus_rec *rec = &i2c->rec;
++ uint32_t temp;
++
++ mutex_lock(&i2c->mutex);
++
++ /* switch the pads to ddc mode */
++ if (rec->hw_capable) {
++ temp = RREG32(rec->mask_clk_reg);
++ temp &= ~(1 << 16);
++ WREG32(rec->mask_clk_reg, temp);
++ }
++
++ /* clear the output pin values */
++ temp = RREG32(rec->a_clk_reg) & ~rec->a_clk_mask;
++ WREG32(rec->a_clk_reg, temp);
++
++ temp = RREG32(rec->a_data_reg) & ~rec->a_data_mask;
++ WREG32(rec->a_data_reg, temp);
++
++ /* set the pins to input */
++ temp = RREG32(rec->en_clk_reg) & ~rec->en_clk_mask;
++ WREG32(rec->en_clk_reg, temp);
++
++ temp = RREG32(rec->en_data_reg) & ~rec->en_data_mask;
++ WREG32(rec->en_data_reg, temp);
++
++ /* mask the gpio pins for software use */
++ temp = RREG32(rec->mask_clk_reg) | rec->mask_clk_mask;
++ WREG32(rec->mask_clk_reg, temp);
++ temp = RREG32(rec->mask_clk_reg);
++
++ temp = RREG32(rec->mask_data_reg) | rec->mask_data_mask;
++ WREG32(rec->mask_data_reg, temp);
++ temp = RREG32(rec->mask_data_reg);
++
++ return 0;
++}
++
++static void amdgpu_i2c_post_xfer(struct i2c_adapter *i2c_adap)
++{
++ struct amdgpu_i2c_chan *i2c = i2c_get_adapdata(i2c_adap);
++ struct amdgpu_device *adev = i2c->dev->dev_private;
++ struct amdgpu_i2c_bus_rec *rec = &i2c->rec;
++ uint32_t temp;
++
++ /* unmask the gpio pins for software use */
++ temp = RREG32(rec->mask_clk_reg) & ~rec->mask_clk_mask;
++ WREG32(rec->mask_clk_reg, temp);
++ temp = RREG32(rec->mask_clk_reg);
++
++ temp = RREG32(rec->mask_data_reg) & ~rec->mask_data_mask;
++ WREG32(rec->mask_data_reg, temp);
++ temp = RREG32(rec->mask_data_reg);
++
++ mutex_unlock(&i2c->mutex);
++}
++
++static int amdgpu_i2c_get_clock(void *i2c_priv)
++{
++ struct amdgpu_i2c_chan *i2c = i2c_priv;
++ struct amdgpu_device *adev = i2c->dev->dev_private;
++ struct amdgpu_i2c_bus_rec *rec = &i2c->rec;
++ uint32_t val;
++
++ /* read the value off the pin */
++ val = RREG32(rec->y_clk_reg);
++ val &= rec->y_clk_mask;
++
++ return (val != 0);
++}
++
++
++static int amdgpu_i2c_get_data(void *i2c_priv)
++{
++ struct amdgpu_i2c_chan *i2c = i2c_priv;
++ struct amdgpu_device *adev = i2c->dev->dev_private;
++ struct amdgpu_i2c_bus_rec *rec = &i2c->rec;
++ uint32_t val;
++
++ /* read the value off the pin */
++ val = RREG32(rec->y_data_reg);
++ val &= rec->y_data_mask;
++
++ return (val != 0);
++}
++
++static void amdgpu_i2c_set_clock(void *i2c_priv, int clock)
++{
++ struct amdgpu_i2c_chan *i2c = i2c_priv;
++ struct amdgpu_device *adev = i2c->dev->dev_private;
++ struct amdgpu_i2c_bus_rec *rec = &i2c->rec;
++ uint32_t val;
++
++ /* set pin direction */
++ val = RREG32(rec->en_clk_reg) & ~rec->en_clk_mask;
++ val |= clock ? 0 : rec->en_clk_mask;
++ WREG32(rec->en_clk_reg, val);
++}
++
++static void amdgpu_i2c_set_data(void *i2c_priv, int data)
++{
++ struct amdgpu_i2c_chan *i2c = i2c_priv;
++ struct amdgpu_device *adev = i2c->dev->dev_private;
++ struct amdgpu_i2c_bus_rec *rec = &i2c->rec;
++ uint32_t val;
++
++ /* set pin direction */
++ val = RREG32(rec->en_data_reg) & ~rec->en_data_mask;
++ val |= data ? 0 : rec->en_data_mask;
++ WREG32(rec->en_data_reg, val);
++}
++
++static const struct i2c_algorithm amdgpu_atombios_i2c_algo = {
++ .master_xfer = amdgpu_atombios_i2c_xfer,
++ .functionality = amdgpu_atombios_i2c_func,
++};
++
++struct amdgpu_i2c_chan *amdgpu_i2c_create(struct drm_device *dev,
++ struct amdgpu_i2c_bus_rec *rec,
++ const char *name)
++{
++ struct amdgpu_i2c_chan *i2c;
++ int ret;
++
++ /* don't add the mm_i2c bus unless hw_i2c is enabled */
++ if (rec->mm_i2c && (amdgpu_hw_i2c == 0))
++ return NULL;
++
++ i2c = kzalloc(sizeof(struct amdgpu_i2c_chan), GFP_KERNEL);
++ if (i2c == NULL)
++ return NULL;
++
++ i2c->rec = *rec;
++ i2c->adapter.owner = THIS_MODULE;
++ i2c->adapter.class = I2C_CLASS_DDC;
++ i2c->adapter.dev.parent = &dev->pdev->dev;
++ i2c->dev = dev;
++ i2c_set_adapdata(&i2c->adapter, i2c);
++ mutex_init(&i2c->mutex);
++ if (rec->hw_capable &&
++ amdgpu_hw_i2c) {
++ /* hw i2c using atom */
++ snprintf(i2c->adapter.name, sizeof(i2c->adapter.name),
++ "AMDGPU i2c hw bus %s", name);
++ i2c->adapter.algo = &amdgpu_atombios_i2c_algo;
++ ret = i2c_add_adapter(&i2c->adapter);
++ if (ret) {
++ DRM_ERROR("Failed to register hw i2c %s\n", name);
++ goto out_free;
++ }
++ } else {
++ /* set the amdgpu bit adapter */
++ snprintf(i2c->adapter.name, sizeof(i2c->adapter.name),
++ "AMDGPU i2c bit bus %s", name);
++ i2c->adapter.algo_data = &i2c->bit;
++ i2c->bit.pre_xfer = amdgpu_i2c_pre_xfer;
++ i2c->bit.post_xfer = amdgpu_i2c_post_xfer;
++ i2c->bit.setsda = amdgpu_i2c_set_data;
++ i2c->bit.setscl = amdgpu_i2c_set_clock;
++ i2c->bit.getsda = amdgpu_i2c_get_data;
++ i2c->bit.getscl = amdgpu_i2c_get_clock;
++ i2c->bit.udelay = 10;
++ i2c->bit.timeout = usecs_to_jiffies(2200); /* from VESA */
++ i2c->bit.data = i2c;
++ ret = i2c_bit_add_bus(&i2c->adapter);
++ if (ret) {
++ DRM_ERROR("Failed to register bit i2c %s\n", name);
++ goto out_free;
++ }
++ }
++
++ return i2c;
++out_free:
++ kfree(i2c);
++ return NULL;
++
++}
++
++void amdgpu_i2c_destroy(struct amdgpu_i2c_chan *i2c)
++{
++ if (!i2c)
++ return;
++ i2c_del_adapter(&i2c->adapter);
++ kfree(i2c);
++}
++
++/* Add the default buses */
++void amdgpu_i2c_init(struct amdgpu_device *adev)
++{
++ if (amdgpu_hw_i2c)
++ DRM_INFO("hw_i2c forced on, you may experience display detection problems!\n");
++
++ if (adev->is_atom_bios)
++ amdgpu_atombios_i2c_init(adev);
++}
++
++/* remove all the buses */
++void amdgpu_i2c_fini(struct amdgpu_device *adev)
++{
++ int i;
++
++ for (i = 0; i < AMDGPU_MAX_I2C_BUS; i++) {
++ if (adev->i2c_bus[i]) {
++ amdgpu_i2c_destroy(adev->i2c_bus[i]);
++ adev->i2c_bus[i] = NULL;
++ }
++ }
++}
++
++/* Add additional buses */
++void amdgpu_i2c_add(struct amdgpu_device *adev,
++ struct amdgpu_i2c_bus_rec *rec,
++ const char *name)
++{
++ struct drm_device *dev = adev->ddev;
++ int i;
++
++ for (i = 0; i < AMDGPU_MAX_I2C_BUS; i++) {
++ if (!adev->i2c_bus[i]) {
++ adev->i2c_bus[i] = amdgpu_i2c_create(dev, rec, name);
++ return;
++ }
++ }
++}
++
++/* looks up bus based on id */
++struct amdgpu_i2c_chan *
++amdgpu_i2c_lookup(struct amdgpu_device *adev,
++ struct amdgpu_i2c_bus_rec *i2c_bus)
++{
++ int i;
++
++ for (i = 0; i < AMDGPU_MAX_I2C_BUS; i++) {
++ if (adev->i2c_bus[i] &&
++ (adev->i2c_bus[i]->rec.i2c_id == i2c_bus->i2c_id)) {
++ return adev->i2c_bus[i];
++ }
++ }
++ return NULL;
++}
++
++static void amdgpu_i2c_get_byte(struct amdgpu_i2c_chan *i2c_bus,
++ u8 slave_addr,
++ u8 addr,
++ u8 *val)
++{
++ u8 out_buf[2];
++ u8 in_buf[2];
++ struct i2c_msg msgs[] = {
++ {
++ .addr = slave_addr,
++ .flags = 0,
++ .len = 1,
++ .buf = out_buf,
++ },
++ {
++ .addr = slave_addr,
++ .flags = I2C_M_RD,
++ .len = 1,
++ .buf = in_buf,
++ }
++ };
++
++ out_buf[0] = addr;
++ out_buf[1] = 0;
++
++ if (i2c_transfer(&i2c_bus->adapter, msgs, 2) == 2) {
++ *val = in_buf[0];
++ DRM_DEBUG("val = 0x%02x\n", *val);
++ } else {
++ DRM_DEBUG("i2c 0x%02x 0x%02x read failed\n",
++ addr, *val);
++ }
++}
++
++static void amdgpu_i2c_put_byte(struct amdgpu_i2c_chan *i2c_bus,
++ u8 slave_addr,
++ u8 addr,
++ u8 val)
++{
++ uint8_t out_buf[2];
++ struct i2c_msg msg = {
++ .addr = slave_addr,
++ .flags = 0,
++ .len = 2,
++ .buf = out_buf,
++ };
++
++ out_buf[0] = addr;
++ out_buf[1] = val;
++
++ if (i2c_transfer(&i2c_bus->adapter, &msg, 1) != 1)
++ DRM_DEBUG("i2c 0x%02x 0x%02x write failed\n",
++ addr, val);
++}
++
++/* ddc router switching */
++void
++amdgpu_i2c_router_select_ddc_port(struct amdgpu_connector *amdgpu_connector)
++{
++ u8 val;
++
++ if (!amdgpu_connector->router.ddc_valid)
++ return;
++
++ if (!amdgpu_connector->router_bus)
++ return;
++
++ amdgpu_i2c_get_byte(amdgpu_connector->router_bus,
++ amdgpu_connector->router.i2c_addr,
++ 0x3, &val);
++ val &= ~amdgpu_connector->router.ddc_mux_control_pin;
++ amdgpu_i2c_put_byte(amdgpu_connector->router_bus,
++ amdgpu_connector->router.i2c_addr,
++ 0x3, val);
++ amdgpu_i2c_get_byte(amdgpu_connector->router_bus,
++ amdgpu_connector->router.i2c_addr,
++ 0x1, &val);
++ val &= ~amdgpu_connector->router.ddc_mux_control_pin;
++ val |= amdgpu_connector->router.ddc_mux_state;
++ amdgpu_i2c_put_byte(amdgpu_connector->router_bus,
++ amdgpu_connector->router.i2c_addr,
++ 0x1, val);
++}
++
++/* clock/data router switching */
++void
++amdgpu_i2c_router_select_cd_port(struct amdgpu_connector *amdgpu_connector)
++{
++ u8 val;
++
++ if (!amdgpu_connector->router.cd_valid)
++ return;
++
++ if (!amdgpu_connector->router_bus)
++ return;
++
++ amdgpu_i2c_get_byte(amdgpu_connector->router_bus,
++ amdgpu_connector->router.i2c_addr,
++ 0x3, &val);
++ val &= ~amdgpu_connector->router.cd_mux_control_pin;
++ amdgpu_i2c_put_byte(amdgpu_connector->router_bus,
++ amdgpu_connector->router.i2c_addr,
++ 0x3, val);
++ amdgpu_i2c_get_byte(amdgpu_connector->router_bus,
++ amdgpu_connector->router.i2c_addr,
++ 0x1, &val);
++ val &= ~amdgpu_connector->router.cd_mux_control_pin;
++ val |= amdgpu_connector->router.cd_mux_state;
++ amdgpu_i2c_put_byte(amdgpu_connector->router_bus,
++ amdgpu_connector->router.i2c_addr,
++ 0x1, val);
++}
+diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_i2c.h b/drivers/gpu/drm/amd/amdgpu/amdgpu_i2c.h
+new file mode 100644
+index 0000000..d81e19b
+--- /dev/null
++++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_i2c.h
+@@ -0,0 +1,44 @@
++/*
++ * Copyright 2014 Advanced Micro Devices, Inc.
++ *
++ * Permission is hereby granted, free of charge, to any person obtaining a
++ * copy of this software and associated documentation files (the "Software"),
++ * to deal in the Software without restriction, including without limitation
++ * the rights to use, copy, modify, merge, publish, distribute, sublicense,
++ * and/or sell copies of the Software, and to permit persons to whom the
++ * Software is furnished to do so, subject to the following conditions:
++ *
++ * The above copyright notice and this permission notice shall be included in
++ * all copies or substantial portions of the Software.
++ *
++ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
++ * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
++ * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
++ * THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR
++ * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE,
++ * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
++ * OTHER DEALINGS IN THE SOFTWARE.
++ *
++ */
++
++#ifndef __AMDGPU_I2C_H__
++#define __AMDGPU_I2C_H__
++
++struct amdgpu_i2c_chan *amdgpu_i2c_create(struct drm_device *dev,
++ struct amdgpu_i2c_bus_rec *rec,
++ const char *name);
++void amdgpu_i2c_destroy(struct amdgpu_i2c_chan *i2c);
++void amdgpu_i2c_init(struct amdgpu_device *adev);
++void amdgpu_i2c_fini(struct amdgpu_device *adev);
++void amdgpu_i2c_add(struct amdgpu_device *adev,
++ struct amdgpu_i2c_bus_rec *rec,
++ const char *name);
++struct amdgpu_i2c_chan *
++amdgpu_i2c_lookup(struct amdgpu_device *adev,
++ struct amdgpu_i2c_bus_rec *i2c_bus);
++void
++amdgpu_i2c_router_select_ddc_port(struct amdgpu_connector *amdgpu_connector);
++void
++amdgpu_i2c_router_select_cd_port(struct amdgpu_connector *amdgpu_connector);
++
++#endif
+diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_ib.c b/drivers/gpu/drm/amd/amdgpu/amdgpu_ib.c
+new file mode 100644
+index 0000000..847cab2
+--- /dev/null
++++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_ib.c
+@@ -0,0 +1,345 @@
++/*
++ * Copyright 2008 Advanced Micro Devices, Inc.
++ * Copyright 2008 Red Hat Inc.
++ * Copyright 2009 Jerome Glisse.
++ *
++ * Permission is hereby granted, free of charge, to any person obtaining a
++ * copy of this software and associated documentation files (the "Software"),
++ * to deal in the Software without restriction, including without limitation
++ * the rights to use, copy, modify, merge, publish, distribute, sublicense,
++ * and/or sell copies of the Software, and to permit persons to whom the
++ * Software is furnished to do so, subject to the following conditions:
++ *
++ * The above copyright notice and this permission notice shall be included in
++ * all copies or substantial portions of the Software.
++ *
++ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
++ * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
++ * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
++ * THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR
++ * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE,
++ * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
++ * OTHER DEALINGS IN THE SOFTWARE.
++ *
++ * Authors: Dave Airlie
++ * Alex Deucher
++ * Jerome Glisse
++ * Christian König
++ */
++#include <linux/seq_file.h>
++#include <linux/slab.h>
++#include <drm/drmP.h>
++#include <drm/amdgpu_drm.h>
++#include "amdgpu.h"
++#include "atom.h"
++
++/*
++ * IB
++ * IBs (Indirect Buffers) and areas of GPU accessible memory where
++ * commands are stored. You can put a pointer to the IB in the
++ * command ring and the hw will fetch the commands from the IB
++ * and execute them. Generally userspace acceleration drivers
++ * produce command buffers which are send to the kernel and
++ * put in IBs for execution by the requested ring.
++ */
++static int amdgpu_debugfs_sa_init(struct amdgpu_device *adev);
++
++/**
++ * amdgpu_ib_get - request an IB (Indirect Buffer)
++ *
++ * @ring: ring index the IB is associated with
++ * @size: requested IB size
++ * @ib: IB object returned
++ *
++ * Request an IB (all asics). IBs are allocated using the
++ * suballocator.
++ * Returns 0 on success, error on failure.
++ */
++int amdgpu_ib_get(struct amdgpu_ring *ring, struct amdgpu_vm *vm,
++ unsigned size, struct amdgpu_ib *ib)
++{
++ struct amdgpu_device *adev = ring->adev;
++ int r;
++
++ if (size) {
++ r = amdgpu_sa_bo_new(adev, &adev->ring_tmp_bo,
++ &ib->sa_bo, size, 256);
++ if (r) {
++ dev_err(adev->dev, "failed to get a new IB (%d)\n", r);
++ return r;
++ }
++
++ ib->ptr = amdgpu_sa_bo_cpu_addr(ib->sa_bo);
++
++ if (!vm)
++ ib->gpu_addr = amdgpu_sa_bo_gpu_addr(ib->sa_bo);
++ else
++ ib->gpu_addr = 0;
++
++ } else {
++ ib->sa_bo = NULL;
++ ib->ptr = NULL;
++ ib->gpu_addr = 0;
++ }
++
++ amdgpu_sync_create(&ib->sync);
++
++ ib->ring = ring;
++ ib->fence = NULL;
++ ib->user = NULL;
++ ib->vm = vm;
++ ib->is_const_ib = false;
++ ib->gds_base = 0;
++ ib->gds_size = 0;
++ ib->gws_base = 0;
++ ib->gws_size = 0;
++ ib->oa_base = 0;
++ ib->oa_size = 0;
++
++ return 0;
++}
++
++/**
++ * amdgpu_ib_free - free an IB (Indirect Buffer)
++ *
++ * @adev: amdgpu_device pointer
++ * @ib: IB object to free
++ *
++ * Free an IB (all asics).
++ */
++void amdgpu_ib_free(struct amdgpu_device *adev, struct amdgpu_ib *ib)
++{
++ amdgpu_sync_free(adev, &ib->sync, ib->fence);
++ amdgpu_sa_bo_free(adev, &ib->sa_bo, ib->fence);
++ amdgpu_fence_unref(&ib->fence);
++}
++
++/**
++ * amdgpu_ib_schedule - schedule an IB (Indirect Buffer) on the ring
++ *
++ * @adev: amdgpu_device pointer
++ * @num_ibs: number of IBs to schedule
++ * @ibs: IB objects to schedule
++ * @owner: owner for creating the fences
++ *
++ * Schedule an IB on the associated ring (all asics).
++ * Returns 0 on success, error on failure.
++ *
++ * On SI, there are two parallel engines fed from the primary ring,
++ * the CE (Constant Engine) and the DE (Drawing Engine). Since
++ * resource descriptors have moved to memory, the CE allows you to
++ * prime the caches while the DE is updating register state so that
++ * the resource descriptors will be already in cache when the draw is
++ * processed. To accomplish this, the userspace driver submits two
++ * IBs, one for the CE and one for the DE. If there is a CE IB (called
++ * a CONST_IB), it will be put on the ring prior to the DE IB. Prior
++ * to SI there was just a DE IB.
++ */
++int amdgpu_ib_schedule(struct amdgpu_device *adev, unsigned num_ibs,
++ struct amdgpu_ib *ibs, void *owner)
++{
++ struct amdgpu_ring *ring;
++ struct amdgpu_vm *vm = ibs->vm;
++ struct amdgpu_ib *ib = &ibs[0];
++ unsigned i;
++ int r = 0;
++ bool flush_hdp = true;
++
++ if (num_ibs == 0)
++ return -EINVAL;
++
++ ring = ibs->ring;
++ if (!ring->ready) {
++ dev_err(adev->dev, "couldn't schedule ib\n");
++ return -EINVAL;
++ }
++
++ r = amdgpu_ring_lock(ring, (256 + AMDGPU_NUM_SYNCS * 8) * num_ibs);
++ if (r) {
++ dev_err(adev->dev, "scheduling IB failed (%d).\n", r);
++ return r;
++ }
++
++ if (vm) {
++ /* grab a vm id if necessary */
++ struct amdgpu_fence *vm_id_fence = NULL;
++ vm_id_fence = amdgpu_vm_grab_id(ibs->ring, ibs->vm);
++ amdgpu_sync_fence(&ibs->sync, vm_id_fence);
++ }
++
++ r = amdgpu_sync_rings(&ibs->sync, ring);
++ if (r) {
++ amdgpu_ring_unlock_undo(ring);
++ dev_err(adev->dev, "failed to sync rings (%d)\n", r);
++ return r;
++ }
++
++ if (vm) {
++ /* do context switch */
++ amdgpu_vm_flush(ring, vm, ib->sync.last_vm_update);
++ }
++
++ if (ring->funcs->emit_gds_switch && ib->vm && ib->gds_needed)
++ amdgpu_ring_emit_gds_switch(ring, ib->vm->ids[ring->idx].id,
++ ib->gds_base, ib->gds_size,
++ ib->gws_base, ib->gws_size,
++ ib->oa_base, ib->oa_size);
++
++ for (i = 0; i < num_ibs; ++i) {
++ ib = &ibs[i];
++
++ if (ib->ring != ring) {
++ amdgpu_ring_unlock_undo(ring);
++ return -EINVAL;
++ }
++ ib->flush_hdp_writefifo = flush_hdp;
++ flush_hdp = false;
++ amdgpu_ring_emit_ib(ring, ib);
++ }
++
++ r = amdgpu_fence_emit(ring, owner, &ib->fence);
++ if (r) {
++ dev_err(adev->dev, "failed to emit fence (%d)\n", r);
++ amdgpu_ring_unlock_undo(ring);
++ return r;
++ }
++
++ /* wrap the last IB with fence */
++ if (ib->user) {
++ uint64_t addr = amdgpu_bo_gpu_offset(ib->user->bo);
++ addr += ib->user->offset;
++ amdgpu_ring_emit_fence(ring, addr, ib->fence->seq, true);
++ }
++
++ if (ib->vm)
++ amdgpu_vm_fence(adev, ib->vm, ib->fence);
++
++ amdgpu_ring_unlock_commit(ring);
++ return 0;
++}
++
++/**
++ * amdgpu_ib_pool_init - Init the IB (Indirect Buffer) pool
++ *
++ * @adev: amdgpu_device pointer
++ *
++ * Initialize the suballocator to manage a pool of memory
++ * for use as IBs (all asics).
++ * Returns 0 on success, error on failure.
++ */
++int amdgpu_ib_pool_init(struct amdgpu_device *adev)
++{
++ int r;
++
++ if (adev->ib_pool_ready) {
++ return 0;
++ }
++ r = amdgpu_sa_bo_manager_init(adev, &adev->ring_tmp_bo,
++ AMDGPU_IB_POOL_SIZE*64*1024,
++ AMDGPU_GPU_PAGE_SIZE,
++ AMDGPU_GEM_DOMAIN_GTT);
++ if (r) {
++ return r;
++ }
++
++ r = amdgpu_sa_bo_manager_start(adev, &adev->ring_tmp_bo);
++ if (r) {
++ return r;
++ }
++
++ adev->ib_pool_ready = true;
++ if (amdgpu_debugfs_sa_init(adev)) {
++ dev_err(adev->dev, "failed to register debugfs file for SA\n");
++ }
++ return 0;
++}
++
++/**
++ * amdgpu_ib_pool_fini - Free the IB (Indirect Buffer) pool
++ *
++ * @adev: amdgpu_device pointer
++ *
++ * Tear down the suballocator managing the pool of memory
++ * for use as IBs (all asics).
++ */
++void amdgpu_ib_pool_fini(struct amdgpu_device *adev)
++{
++ if (adev->ib_pool_ready) {
++ amdgpu_sa_bo_manager_suspend(adev, &adev->ring_tmp_bo);
++ amdgpu_sa_bo_manager_fini(adev, &adev->ring_tmp_bo);
++ adev->ib_pool_ready = false;
++ }
++}
++
++/**
++ * amdgpu_ib_ring_tests - test IBs on the rings
++ *
++ * @adev: amdgpu_device pointer
++ *
++ * Test an IB (Indirect Buffer) on each ring.
++ * If the test fails, disable the ring.
++ * Returns 0 on success, error if the primary GFX ring
++ * IB test fails.
++ */
++int amdgpu_ib_ring_tests(struct amdgpu_device *adev)
++{
++ unsigned i;
++ int r;
++
++ for (i = 0; i < AMDGPU_MAX_RINGS; ++i) {
++ struct amdgpu_ring *ring = adev->rings[i];
++
++ if (!ring || !ring->ready)
++ continue;
++
++ r = amdgpu_ring_test_ib(ring);
++ if (r) {
++ ring->ready = false;
++ adev->needs_reset = false;
++
++ if (ring == &adev->gfx.gfx_ring[0]) {
++ /* oh, oh, that's really bad */
++ DRM_ERROR("amdgpu: failed testing IB on GFX ring (%d).\n", r);
++ adev->accel_working = false;
++ return r;
++
++ } else {
++ /* still not good, but we can live with it */
++ DRM_ERROR("amdgpu: failed testing IB on ring %d (%d).\n", i, r);
++ }
++ }
++ }
++ return 0;
++}
++
++/*
++ * Debugfs info
++ */
++#if defined(CONFIG_DEBUG_FS)
++
++static int amdgpu_debugfs_sa_info(struct seq_file *m, void *data)
++{
++ struct drm_info_node *node = (struct drm_info_node *) m->private;
++ struct drm_device *dev = node->minor->dev;
++ struct amdgpu_device *adev = dev->dev_private;
++
++ amdgpu_sa_bo_dump_debug_info(&adev->ring_tmp_bo, m);
++
++ return 0;
++
++}
++
++static struct drm_info_list amdgpu_debugfs_sa_list[] = {
++ {"amdgpu_sa_info", &amdgpu_debugfs_sa_info, 0, NULL},
++};
++
++#endif
++
++static int amdgpu_debugfs_sa_init(struct amdgpu_device *adev)
++{
++#if defined(CONFIG_DEBUG_FS)
++ return amdgpu_debugfs_add_files(adev, amdgpu_debugfs_sa_list, 1);
++#else
++ return 0;
++#endif
++}
+diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_ih.c b/drivers/gpu/drm/amd/amdgpu/amdgpu_ih.c
+new file mode 100644
+index 0000000..db5422e
+--- /dev/null
++++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_ih.c
+@@ -0,0 +1,216 @@
++/*
++ * Copyright 2014 Advanced Micro Devices, Inc.
++ *
++ * Permission is hereby granted, free of charge, to any person obtaining a
++ * copy of this software and associated documentation files (the "Software"),
++ * to deal in the Software without restriction, including without limitation
++ * the rights to use, copy, modify, merge, publish, distribute, sublicense,
++ * and/or sell copies of the Software, and to permit persons to whom the
++ * Software is furnished to do so, subject to the following conditions:
++ *
++ * The above copyright notice and this permission notice shall be included in
++ * all copies or substantial portions of the Software.
++ *
++ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
++ * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
++ * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
++ * THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR
++ * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE,
++ * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
++ * OTHER DEALINGS IN THE SOFTWARE.
++ *
++ */
++
++#include <drm/drmP.h>
++#include "amdgpu.h"
++#include "amdgpu_ih.h"
++
++/**
++ * amdgpu_ih_ring_alloc - allocate memory for the IH ring
++ *
++ * @adev: amdgpu_device pointer
++ *
++ * Allocate a ring buffer for the interrupt controller.
++ * Returns 0 for success, errors for failure.
++ */
++static int amdgpu_ih_ring_alloc(struct amdgpu_device *adev)
++{
++ int r;
++
++ /* Allocate ring buffer */
++ if (adev->irq.ih.ring_obj == NULL) {
++ r = amdgpu_bo_create(adev, adev->irq.ih.ring_size,
++ PAGE_SIZE, true,
++ AMDGPU_GEM_DOMAIN_GTT, 0,
++ NULL, &adev->irq.ih.ring_obj);
++ if (r) {
++ DRM_ERROR("amdgpu: failed to create ih ring buffer (%d).\n", r);
++ return r;
++ }
++ r = amdgpu_bo_reserve(adev->irq.ih.ring_obj, false);
++ if (unlikely(r != 0))
++ return r;
++ r = amdgpu_bo_pin(adev->irq.ih.ring_obj,
++ AMDGPU_GEM_DOMAIN_GTT,
++ &adev->irq.ih.gpu_addr);
++ if (r) {
++ amdgpu_bo_unreserve(adev->irq.ih.ring_obj);
++ DRM_ERROR("amdgpu: failed to pin ih ring buffer (%d).\n", r);
++ return r;
++ }
++ r = amdgpu_bo_kmap(adev->irq.ih.ring_obj,
++ (void **)&adev->irq.ih.ring);
++ amdgpu_bo_unreserve(adev->irq.ih.ring_obj);
++ if (r) {
++ DRM_ERROR("amdgpu: failed to map ih ring buffer (%d).\n", r);
++ return r;
++ }
++ }
++ return 0;
++}
++
++/**
++ * amdgpu_ih_ring_init - initialize the IH state
++ *
++ * @adev: amdgpu_device pointer
++ *
++ * Initializes the IH state and allocates a buffer
++ * for the IH ring buffer.
++ * Returns 0 for success, errors for failure.
++ */
++int amdgpu_ih_ring_init(struct amdgpu_device *adev, unsigned ring_size,
++ bool use_bus_addr)
++{
++ u32 rb_bufsz;
++ int r;
++
++ /* Align ring size */
++ rb_bufsz = order_base_2(ring_size / 4);
++ ring_size = (1 << rb_bufsz) * 4;
++ adev->irq.ih.ring_size = ring_size;
++ adev->irq.ih.ptr_mask = adev->irq.ih.ring_size - 1;
++ adev->irq.ih.rptr = 0;
++ adev->irq.ih.use_bus_addr = use_bus_addr;
++
++ if (adev->irq.ih.use_bus_addr) {
++ if (!adev->irq.ih.ring) {
++ /* add 8 bytes for the rptr/wptr shadows and
++ * add them to the end of the ring allocation.
++ */
++ adev->irq.ih.ring = kzalloc(adev->irq.ih.ring_size + 8, GFP_KERNEL);
++ if (adev->irq.ih.ring == NULL)
++ return -ENOMEM;
++ adev->irq.ih.rb_dma_addr = pci_map_single(adev->pdev,
++ (void *)adev->irq.ih.ring,
++ adev->irq.ih.ring_size,
++ PCI_DMA_BIDIRECTIONAL);
++ if (pci_dma_mapping_error(adev->pdev, adev->irq.ih.rb_dma_addr)) {
++ dev_err(&adev->pdev->dev, "Failed to DMA MAP the IH RB page\n");
++ kfree((void *)adev->irq.ih.ring);
++ return -ENOMEM;
++ }
++ adev->irq.ih.wptr_offs = (adev->irq.ih.ring_size / 4) + 0;
++ adev->irq.ih.rptr_offs = (adev->irq.ih.ring_size / 4) + 1;
++ }
++ return 0;
++ } else {
++ r = amdgpu_wb_get(adev, &adev->irq.ih.wptr_offs);
++ if (r) {
++ dev_err(adev->dev, "(%d) ih wptr_offs wb alloc failed\n", r);
++ return r;
++ }
++
++ r = amdgpu_wb_get(adev, &adev->irq.ih.rptr_offs);
++ if (r) {
++ amdgpu_wb_free(adev, adev->irq.ih.wptr_offs);
++ dev_err(adev->dev, "(%d) ih rptr_offs wb alloc failed\n", r);
++ return r;
++ }
++
++ return amdgpu_ih_ring_alloc(adev);
++ }
++}
++
++/**
++ * amdgpu_ih_ring_fini - tear down the IH state
++ *
++ * @adev: amdgpu_device pointer
++ *
++ * Tears down the IH state and frees buffer
++ * used for the IH ring buffer.
++ */
++void amdgpu_ih_ring_fini(struct amdgpu_device *adev)
++{
++ int r;
++
++ if (adev->irq.ih.use_bus_addr) {
++ if (adev->irq.ih.ring) {
++ /* add 8 bytes for the rptr/wptr shadows and
++ * add them to the end of the ring allocation.
++ */
++ pci_unmap_single(adev->pdev, adev->irq.ih.rb_dma_addr,
++ adev->irq.ih.ring_size + 8, PCI_DMA_BIDIRECTIONAL);
++ kfree((void *)adev->irq.ih.ring);
++ adev->irq.ih.ring = NULL;
++ }
++ } else {
++ if (adev->irq.ih.ring_obj) {
++ r = amdgpu_bo_reserve(adev->irq.ih.ring_obj, false);
++ if (likely(r == 0)) {
++ amdgpu_bo_kunmap(adev->irq.ih.ring_obj);
++ amdgpu_bo_unpin(adev->irq.ih.ring_obj);
++ amdgpu_bo_unreserve(adev->irq.ih.ring_obj);
++ }
++ amdgpu_bo_unref(&adev->irq.ih.ring_obj);
++ adev->irq.ih.ring = NULL;
++ adev->irq.ih.ring_obj = NULL;
++ }
++ amdgpu_wb_free(adev, adev->irq.ih.wptr_offs);
++ amdgpu_wb_free(adev, adev->irq.ih.rptr_offs);
++ }
++}
++
++/**
++ * amdgpu_ih_process - interrupt handler
++ *
++ * @adev: amdgpu_device pointer
++ *
++ * Interrupt hander (VI), walk the IH ring.
++ * Returns irq process return code.
++ */
++int amdgpu_ih_process(struct amdgpu_device *adev)
++{
++ struct amdgpu_iv_entry entry;
++ u32 wptr;
++
++ if (!adev->irq.ih.enabled || adev->shutdown)
++ return IRQ_NONE;
++
++ wptr = amdgpu_ih_get_wptr(adev);
++
++restart_ih:
++ /* is somebody else already processing irqs? */
++ if (atomic_xchg(&adev->irq.ih.lock, 1))
++ return IRQ_NONE;
++
++ DRM_DEBUG("%s: rptr %d, wptr %d\n", __func__, adev->irq.ih.rptr, wptr);
++
++ /* Order reading of wptr vs. reading of IH ring data */
++ rmb();
++
++ while (adev->irq.ih.rptr != wptr) {
++ amdgpu_ih_decode_iv(adev, &entry);
++ adev->irq.ih.rptr &= adev->irq.ih.ptr_mask;
++
++ amdgpu_irq_dispatch(adev, &entry);
++ }
++ amdgpu_ih_set_rptr(adev);
++ atomic_set(&adev->irq.ih.lock, 0);
++
++ /* make sure wptr hasn't changed while processing */
++ wptr = amdgpu_ih_get_wptr(adev);
++ if (wptr != adev->irq.ih.rptr)
++ goto restart_ih;
++
++ return IRQ_HANDLED;
++}
+diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_ih.h b/drivers/gpu/drm/amd/amdgpu/amdgpu_ih.h
+new file mode 100644
+index 0000000..c62b09e
+--- /dev/null
++++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_ih.h
+@@ -0,0 +1,62 @@
++/*
++ * Copyright 2014 Advanced Micro Devices, Inc.
++ *
++ * Permission is hereby granted, free of charge, to any person obtaining a
++ * copy of this software and associated documentation files (the "Software"),
++ * to deal in the Software without restriction, including without limitation
++ * the rights to use, copy, modify, merge, publish, distribute, sublicense,
++ * and/or sell copies of the Software, and to permit persons to whom the
++ * Software is furnished to do so, subject to the following conditions:
++ *
++ * The above copyright notice and this permission notice shall be included in
++ * all copies or substantial portions of the Software.
++ *
++ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
++ * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
++ * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
++ * THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR
++ * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE,
++ * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
++ * OTHER DEALINGS IN THE SOFTWARE.
++ *
++ */
++
++#ifndef __AMDGPU_IH_H__
++#define __AMDGPU_IH_H__
++
++struct amdgpu_device;
++
++/*
++ * R6xx+ IH ring
++ */
++struct amdgpu_ih_ring {
++ struct amdgpu_bo *ring_obj;
++ volatile uint32_t *ring;
++ unsigned rptr;
++ unsigned ring_size;
++ uint64_t gpu_addr;
++ uint32_t ptr_mask;
++ atomic_t lock;
++ bool enabled;
++ unsigned wptr_offs;
++ unsigned rptr_offs;
++ u32 doorbell_index;
++ bool use_doorbell;
++ bool use_bus_addr;
++ dma_addr_t rb_dma_addr; /* only used when use_bus_addr = true */
++};
++
++struct amdgpu_iv_entry {
++ unsigned src_id;
++ unsigned src_data;
++ unsigned ring_id;
++ unsigned vm_id;
++ unsigned pas_id;
++};
++
++int amdgpu_ih_ring_init(struct amdgpu_device *adev, unsigned ring_size,
++ bool use_bus_addr);
++void amdgpu_ih_ring_fini(struct amdgpu_device *adev);
++int amdgpu_ih_process(struct amdgpu_device *adev);
++
++#endif
+diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_ioc32.c b/drivers/gpu/drm/amd/amdgpu/amdgpu_ioc32.c
+new file mode 100644
+index 0000000..2648291
+--- /dev/null
++++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_ioc32.c
+@@ -0,0 +1,47 @@
++/**
++ * \file amdgpu_ioc32.c
++ *
++ * 32-bit ioctl compatibility routines for the AMDGPU DRM.
++ *
++ * \author Paul Mackerras <paulus@samba.org>
++ *
++ * Copyright (C) Paul Mackerras 2005
++ * All Rights Reserved.
++ *
++ * Permission is hereby granted, free of charge, to any person obtaining a
++ * copy of this software and associated documentation files (the "Software"),
++ * to deal in the Software without restriction, including without limitation
++ * the rights to use, copy, modify, merge, publish, distribute, sublicense,
++ * and/or sell copies of the Software, and to permit persons to whom the
++ * Software is furnished to do so, subject to the following conditions:
++ *
++ * The above copyright notice and this permission notice (including the next
++ * paragraph) shall be included in all copies or substantial portions of the
++ * Software.
++ *
++ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
++ * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
++ * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
++ * THE AUTHOR BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY,
++ * WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
++ * OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS
++ * IN THE SOFTWARE.
++ */
++#include <linux/compat.h>
++
++#include <drm/drmP.h>
++#include <drm/amdgpu_drm.h>
++#include "amdgpu_drv.h"
++
++long amdgpu_kms_compat_ioctl(struct file *filp, unsigned int cmd, unsigned long arg)
++{
++ unsigned int nr = DRM_IOCTL_NR(cmd);
++ int ret;
++
++ if (nr < DRM_COMMAND_BASE)
++ return drm_compat_ioctl(filp, cmd, arg);
++
++ ret = amdgpu_drm_ioctl(filp, cmd, arg);
++
++ return ret;
++}
+diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_irq.c b/drivers/gpu/drm/amd/amdgpu/amdgpu_irq.c
+new file mode 100644
+index 0000000..2187960
+--- /dev/null
++++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_irq.c
+@@ -0,0 +1,456 @@
++/*
++ * Copyright 2008 Advanced Micro Devices, Inc.
++ * Copyright 2008 Red Hat Inc.
++ * Copyright 2009 Jerome Glisse.
++ *
++ * Permission is hereby granted, free of charge, to any person obtaining a
++ * copy of this software and associated documentation files (the "Software"),
++ * to deal in the Software without restriction, including without limitation
++ * the rights to use, copy, modify, merge, publish, distribute, sublicense,
++ * and/or sell copies of the Software, and to permit persons to whom the
++ * Software is furnished to do so, subject to the following conditions:
++ *
++ * The above copyright notice and this permission notice shall be included in
++ * all copies or substantial portions of the Software.
++ *
++ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
++ * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
++ * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
++ * THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR
++ * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE,
++ * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
++ * OTHER DEALINGS IN THE SOFTWARE.
++ *
++ * Authors: Dave Airlie
++ * Alex Deucher
++ * Jerome Glisse
++ */
++#include <drm/drmP.h>
++#include <drm/drm_crtc_helper.h>
++#include <drm/amdgpu_drm.h>
++#include "amdgpu.h"
++#include "amdgpu_ih.h"
++#include "atom.h"
++#include "amdgpu_connectors.h"
++
++#include <linux/pm_runtime.h>
++
++#define AMDGPU_WAIT_IDLE_TIMEOUT 200
++
++/*
++ * Handle hotplug events outside the interrupt handler proper.
++ */
++/**
++ * amdgpu_hotplug_work_func - display hotplug work handler
++ *
++ * @work: work struct
++ *
++ * This is the hot plug event work handler (all asics).
++ * The work gets scheduled from the irq handler if there
++ * was a hot plug interrupt. It walks the connector table
++ * and calls the hotplug handler for each one, then sends
++ * a drm hotplug event to alert userspace.
++ */
++static void amdgpu_hotplug_work_func(struct work_struct *work)
++{
++ struct amdgpu_device *adev = container_of(work, struct amdgpu_device,
++ hotplug_work);
++ struct drm_device *dev = adev->ddev;
++ struct drm_mode_config *mode_config = &dev->mode_config;
++ struct drm_connector *connector;
++
++ if (mode_config->num_connector) {
++ list_for_each_entry(connector, &mode_config->connector_list, head)
++ amdgpu_connector_hotplug(connector);
++ }
++ /* Just fire off a uevent and let userspace tell us what to do */
++ drm_helper_hpd_irq_event(dev);
++}
++
++/**
++ * amdgpu_irq_reset_work_func - execute gpu reset
++ *
++ * @work: work struct
++ *
++ * Execute scheduled gpu reset (cayman+).
++ * This function is called when the irq handler
++ * thinks we need a gpu reset.
++ */
++static void amdgpu_irq_reset_work_func(struct work_struct *work)
++{
++ struct amdgpu_device *adev = container_of(work, struct amdgpu_device,
++ reset_work);
++
++ amdgpu_gpu_reset(adev);
++}
++
++/* Disable *all* interrupts */
++static void amdgpu_irq_disable_all(struct amdgpu_device *adev)
++{
++ unsigned long irqflags;
++ unsigned i, j;
++ int r;
++
++ spin_lock_irqsave(&adev->irq.lock, irqflags);
++ for (i = 0; i < AMDGPU_MAX_IRQ_SRC_ID; ++i) {
++ struct amdgpu_irq_src *src = adev->irq.sources[i];
++
++ if (!src || !src->funcs->set || !src->num_types)
++ continue;
++
++ for (j = 0; j < src->num_types; ++j) {
++ atomic_set(&src->enabled_types[j], 0);
++ r = src->funcs->set(adev, src, j,
++ AMDGPU_IRQ_STATE_DISABLE);
++ if (r)
++ DRM_ERROR("error disabling interrupt (%d)\n",
++ r);
++ }
++ }
++ spin_unlock_irqrestore(&adev->irq.lock, irqflags);
++}
++
++/**
++ * amdgpu_irq_preinstall - drm irq preinstall callback
++ *
++ * @dev: drm dev pointer
++ *
++ * Gets the hw ready to enable irqs (all asics).
++ * This function disables all interrupt sources on the GPU.
++ */
++void amdgpu_irq_preinstall(struct drm_device *dev)
++{
++ struct amdgpu_device *adev = dev->dev_private;
++
++ /* Disable *all* interrupts */
++ amdgpu_irq_disable_all(adev);
++ /* Clear bits */
++ amdgpu_ih_process(adev);
++}
++
++/**
++ * amdgpu_irq_postinstall - drm irq preinstall callback
++ *
++ * @dev: drm dev pointer
++ *
++ * Handles stuff to be done after enabling irqs (all asics).
++ * Returns 0 on success.
++ */
++int amdgpu_irq_postinstall(struct drm_device *dev)
++{
++ dev->max_vblank_count = 0x001fffff;
++ return 0;
++}
++
++/**
++ * amdgpu_irq_uninstall - drm irq uninstall callback
++ *
++ * @dev: drm dev pointer
++ *
++ * This function disables all interrupt sources on the GPU (all asics).
++ */
++void amdgpu_irq_uninstall(struct drm_device *dev)
++{
++ struct amdgpu_device *adev = dev->dev_private;
++
++ if (adev == NULL) {
++ return;
++ }
++ amdgpu_irq_disable_all(adev);
++}
++
++/**
++ * amdgpu_irq_handler - irq handler
++ *
++ * @int irq, void *arg: args
++ *
++ * This is the irq handler for the amdgpu driver (all asics).
++ */
++irqreturn_t amdgpu_irq_handler(int irq, void *arg)
++{
++ struct drm_device *dev = (struct drm_device *) arg;
++ struct amdgpu_device *adev = dev->dev_private;
++ irqreturn_t ret;
++
++ ret = amdgpu_ih_process(adev);
++ if (ret == IRQ_HANDLED)
++ pm_runtime_mark_last_busy(dev->dev);
++ return ret;
++}
++
++/**
++ * amdgpu_msi_ok - asic specific msi checks
++ *
++ * @adev: amdgpu device pointer
++ *
++ * Handles asic specific MSI checks to determine if
++ * MSIs should be enabled on a particular chip (all asics).
++ * Returns true if MSIs should be enabled, false if MSIs
++ * should not be enabled.
++ */
++static bool amdgpu_msi_ok(struct amdgpu_device *adev)
++{
++ /* force MSI on */
++ if (amdgpu_msi == 1)
++ return true;
++ else if (amdgpu_msi == 0)
++ return false;
++
++ return true;
++}
++
++/**
++ * amdgpu_irq_init - init driver interrupt info
++ *
++ * @adev: amdgpu device pointer
++ *
++ * Sets up the work irq handlers, vblank init, MSIs, etc. (all asics).
++ * Returns 0 for success, error for failure.
++ */
++int amdgpu_irq_init(struct amdgpu_device *adev)
++{
++ int r = 0;
++
++ spin_lock_init(&adev->irq.lock);
++ r = drm_vblank_init(adev->ddev, adev->mode_info.num_crtc);
++ if (r) {
++ return r;
++ }
++ /* enable msi */
++ adev->irq.msi_enabled = false;
++
++ if (amdgpu_msi_ok(adev)) {
++ int ret = pci_enable_msi(adev->pdev);
++ if (!ret) {
++ adev->irq.msi_enabled = true;
++ dev_info(adev->dev, "amdgpu: using MSI.\n");
++ }
++ }
++
++ INIT_WORK(&adev->hotplug_work, amdgpu_hotplug_work_func);
++ INIT_WORK(&adev->reset_work, amdgpu_irq_reset_work_func);
++
++ adev->irq.installed = true;
++ r = drm_irq_install(adev->ddev, adev->ddev->pdev->irq);
++ if (r) {
++ adev->irq.installed = false;
++ flush_work(&adev->hotplug_work);
++ return r;
++ }
++
++ DRM_INFO("amdgpu: irq initialized.\n");
++ return 0;
++}
++
++/**
++ * amdgpu_irq_fini - tear down driver interrupt info
++ *
++ * @adev: amdgpu device pointer
++ *
++ * Tears down the work irq handlers, vblank handlers, MSIs, etc. (all asics).
++ */
++void amdgpu_irq_fini(struct amdgpu_device *adev)
++{
++ unsigned i;
++
++ drm_vblank_cleanup(adev->ddev);
++ if (adev->irq.installed) {
++ drm_irq_uninstall(adev->ddev);
++ adev->irq.installed = false;
++ if (adev->irq.msi_enabled)
++ pci_disable_msi(adev->pdev);
++ flush_work(&adev->hotplug_work);
++ }
++
++ for (i = 0; i < AMDGPU_MAX_IRQ_SRC_ID; ++i) {
++ struct amdgpu_irq_src *src = adev->irq.sources[i];
++
++ if (!src)
++ continue;
++
++ kfree(src->enabled_types);
++ src->enabled_types = NULL;
++ }
++}
++
++/**
++ * amdgpu_irq_add_id - register irq source
++ *
++ * @adev: amdgpu device pointer
++ * @src_id: source id for this source
++ * @source: irq source
++ *
++ */
++int amdgpu_irq_add_id(struct amdgpu_device *adev, unsigned src_id,
++ struct amdgpu_irq_src *source)
++{
++ if (src_id >= AMDGPU_MAX_IRQ_SRC_ID)
++ return -EINVAL;
++
++ if (adev->irq.sources[src_id] != NULL)
++ return -EINVAL;
++
++ if (!source->funcs)
++ return -EINVAL;
++
++ if (source->num_types && !source->enabled_types) {
++ atomic_t *types;
++
++ types = kcalloc(source->num_types, sizeof(atomic_t),
++ GFP_KERNEL);
++ if (!types)
++ return -ENOMEM;
++
++ source->enabled_types = types;
++ }
++
++ adev->irq.sources[src_id] = source;
++ return 0;
++}
++
++/**
++ * amdgpu_irq_dispatch - dispatch irq to IP blocks
++ *
++ * @adev: amdgpu device pointer
++ * @entry: interrupt vector
++ *
++ * Dispatches the irq to the different IP blocks
++ */
++void amdgpu_irq_dispatch(struct amdgpu_device *adev,
++ struct amdgpu_iv_entry *entry)
++{
++ unsigned src_id = entry->src_id;
++ struct amdgpu_irq_src *src;
++ int r;
++
++ if (src_id >= AMDGPU_MAX_IRQ_SRC_ID) {
++ DRM_DEBUG("Invalid src_id in IV: %d\n", src_id);
++ return;
++ }
++
++ src = adev->irq.sources[src_id];
++ if (!src) {
++ DRM_DEBUG("Unhandled interrupt src_id: %d\n", src_id);
++ return;
++ }
++
++ r = src->funcs->process(adev, src, entry);
++ if (r)
++ DRM_ERROR("error processing interrupt (%d)\n", r);
++}
++
++/**
++ * amdgpu_irq_update - update hw interrupt state
++ *
++ * @adev: amdgpu device pointer
++ * @src: interrupt src you want to enable
++ * @type: type of interrupt you want to update
++ *
++ * Updates the interrupt state for a specific src (all asics).
++ */
++int amdgpu_irq_update(struct amdgpu_device *adev,
++ struct amdgpu_irq_src *src, unsigned type)
++{
++ unsigned long irqflags;
++ enum amdgpu_interrupt_state state;
++ int r;
++
++ spin_lock_irqsave(&adev->irq.lock, irqflags);
++
++ /* we need to determine after taking the lock, otherwise
++ we might disable just enabled interrupts again */
++ if (amdgpu_irq_enabled(adev, src, type))
++ state = AMDGPU_IRQ_STATE_ENABLE;
++ else
++ state = AMDGPU_IRQ_STATE_DISABLE;
++
++ r = src->funcs->set(adev, src, type, state);
++ spin_unlock_irqrestore(&adev->irq.lock, irqflags);
++ return r;
++}
++
++/**
++ * amdgpu_irq_get - enable interrupt
++ *
++ * @adev: amdgpu device pointer
++ * @src: interrupt src you want to enable
++ * @type: type of interrupt you want to enable
++ *
++ * Enables the interrupt type for a specific src (all asics).
++ */
++int amdgpu_irq_get(struct amdgpu_device *adev, struct amdgpu_irq_src *src,
++ unsigned type)
++{
++ if (!adev->ddev->irq_enabled)
++ return -ENOENT;
++
++ if (type >= src->num_types)
++ return -EINVAL;
++
++ if (!src->enabled_types || !src->funcs->set)
++ return -EINVAL;
++
++ if (atomic_inc_return(&src->enabled_types[type]) == 1)
++ return amdgpu_irq_update(adev, src, type);
++
++ return 0;
++}
++
++bool amdgpu_irq_get_delayed(struct amdgpu_device *adev,
++ struct amdgpu_irq_src *src,
++ unsigned type)
++{
++ if ((type >= src->num_types) || !src->enabled_types)
++ return false;
++ return atomic_inc_return(&src->enabled_types[type]) == 1;
++}
++
++/**
++ * amdgpu_irq_put - disable interrupt
++ *
++ * @adev: amdgpu device pointer
++ * @src: interrupt src you want to disable
++ * @type: type of interrupt you want to disable
++ *
++ * Disables the interrupt type for a specific src (all asics).
++ */
++int amdgpu_irq_put(struct amdgpu_device *adev, struct amdgpu_irq_src *src,
++ unsigned type)
++{
++ if (!adev->ddev->irq_enabled)
++ return -ENOENT;
++
++ if (type >= src->num_types)
++ return -EINVAL;
++
++ if (!src->enabled_types || !src->funcs->set)
++ return -EINVAL;
++
++ if (atomic_dec_and_test(&src->enabled_types[type]))
++ return amdgpu_irq_update(adev, src, type);
++
++ return 0;
++}
++
++/**
++ * amdgpu_irq_enabled - test if irq is enabled or not
++ *
++ * @adev: amdgpu device pointer
++ * @idx: interrupt src you want to test
++ *
++ * Tests if the given interrupt source is enabled or not
++ */
++bool amdgpu_irq_enabled(struct amdgpu_device *adev, struct amdgpu_irq_src *src,
++ unsigned type)
++{
++ if (!adev->ddev->irq_enabled)
++ return false;
++
++ if (type >= src->num_types)
++ return false;
++
++ if (!src->enabled_types || !src->funcs->set)
++ return false;
++
++ return !!atomic_read(&src->enabled_types[type]);
++}
+diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_irq.h b/drivers/gpu/drm/amd/amdgpu/amdgpu_irq.h
+new file mode 100644
+index 0000000..8299795
+--- /dev/null
++++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_irq.h
+@@ -0,0 +1,92 @@
++/*
++ * Copyright 2014 Advanced Micro Devices, Inc.
++ *
++ * Permission is hereby granted, free of charge, to any person obtaining a
++ * copy of this software and associated documentation files (the "Software"),
++ * to deal in the Software without restriction, including without limitation
++ * the rights to use, copy, modify, merge, publish, distribute, sublicense,
++ * and/or sell copies of the Software, and to permit persons to whom the
++ * Software is furnished to do so, subject to the following conditions:
++ *
++ * The above copyright notice and this permission notice shall be included in
++ * all copies or substantial portions of the Software.
++ *
++ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
++ * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
++ * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
++ * THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR
++ * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE,
++ * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
++ * OTHER DEALINGS IN THE SOFTWARE.
++ *
++ */
++
++#ifndef __AMDGPU_IRQ_H__
++#define __AMDGPU_IRQ_H__
++
++#include "amdgpu_ih.h"
++
++#define AMDGPU_MAX_IRQ_SRC_ID 0x100
++
++struct amdgpu_device;
++struct amdgpu_iv_entry;
++
++enum amdgpu_interrupt_state {
++ AMDGPU_IRQ_STATE_DISABLE,
++ AMDGPU_IRQ_STATE_ENABLE,
++};
++
++struct amdgpu_irq_src {
++ unsigned num_types;
++ atomic_t *enabled_types;
++ const struct amdgpu_irq_src_funcs *funcs;
++};
++
++/* provided by interrupt generating IP blocks */
++struct amdgpu_irq_src_funcs {
++ int (*set)(struct amdgpu_device *adev, struct amdgpu_irq_src *source,
++ unsigned type, enum amdgpu_interrupt_state state);
++
++ int (*process)(struct amdgpu_device *adev,
++ struct amdgpu_irq_src *source,
++ struct amdgpu_iv_entry *entry);
++};
++
++struct amdgpu_irq {
++ bool installed;
++ spinlock_t lock;
++ /* interrupt sources */
++ struct amdgpu_irq_src *sources[AMDGPU_MAX_IRQ_SRC_ID];
++
++ /* status, etc. */
++ bool msi_enabled; /* msi enabled */
++
++ /* interrupt ring */
++ struct amdgpu_ih_ring ih;
++ const struct amdgpu_ih_funcs *ih_funcs;
++};
++
++void amdgpu_irq_preinstall(struct drm_device *dev);
++int amdgpu_irq_postinstall(struct drm_device *dev);
++void amdgpu_irq_uninstall(struct drm_device *dev);
++irqreturn_t amdgpu_irq_handler(int irq, void *arg);
++
++int amdgpu_irq_init(struct amdgpu_device *adev);
++void amdgpu_irq_fini(struct amdgpu_device *adev);
++int amdgpu_irq_add_id(struct amdgpu_device *adev, unsigned src_id,
++ struct amdgpu_irq_src *source);
++void amdgpu_irq_dispatch(struct amdgpu_device *adev,
++ struct amdgpu_iv_entry *entry);
++int amdgpu_irq_update(struct amdgpu_device *adev, struct amdgpu_irq_src *src,
++ unsigned type);
++int amdgpu_irq_get(struct amdgpu_device *adev, struct amdgpu_irq_src *src,
++ unsigned type);
++bool amdgpu_irq_get_delayed(struct amdgpu_device *adev,
++ struct amdgpu_irq_src *src,
++ unsigned type);
++int amdgpu_irq_put(struct amdgpu_device *adev, struct amdgpu_irq_src *src,
++ unsigned type);
++bool amdgpu_irq_enabled(struct amdgpu_device *adev, struct amdgpu_irq_src *src,
++ unsigned type);
++
++#endif
+diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_kms.c b/drivers/gpu/drm/amd/amdgpu/amdgpu_kms.c
+new file mode 100644
+index 0000000..c271da3
+--- /dev/null
++++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_kms.c
+@@ -0,0 +1,674 @@
++/*
++ * Copyright 2008 Advanced Micro Devices, Inc.
++ * Copyright 2008 Red Hat Inc.
++ * Copyright 2009 Jerome Glisse.
++ *
++ * Permission is hereby granted, free of charge, to any person obtaining a
++ * copy of this software and associated documentation files (the "Software"),
++ * to deal in the Software without restriction, including without limitation
++ * the rights to use, copy, modify, merge, publish, distribute, sublicense,
++ * and/or sell copies of the Software, and to permit persons to whom the
++ * Software is furnished to do so, subject to the following conditions:
++ *
++ * The above copyright notice and this permission notice shall be included in
++ * all copies or substantial portions of the Software.
++ *
++ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
++ * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
++ * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
++ * THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR
++ * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE,
++ * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
++ * OTHER DEALINGS IN THE SOFTWARE.
++ *
++ * Authors: Dave Airlie
++ * Alex Deucher
++ * Jerome Glisse
++ */
++#include <drm/drmP.h>
++#include "amdgpu.h"
++#include <drm/amdgpu_drm.h>
++#include "amdgpu_uvd.h"
++#include "amdgpu_vce.h"
++
++#include <linux/vga_switcheroo.h>
++#include <linux/slab.h>
++#include <linux/pm_runtime.h>
++
++#if defined(CONFIG_VGA_SWITCHEROO)
++bool amdgpu_has_atpx(void);
++#else
++static inline bool amdgpu_has_atpx(void) { return false; }
++#endif
++
++/**
++ * amdgpu_driver_unload_kms - Main unload function for KMS.
++ *
++ * @dev: drm dev pointer
++ *
++ * This is the main unload function for KMS (all asics).
++ * Returns 0 on success.
++ */
++int amdgpu_driver_unload_kms(struct drm_device *dev)
++{
++ struct amdgpu_device *adev = dev->dev_private;
++
++ if (adev == NULL)
++ return 0;
++
++ if (adev->rmmio == NULL)
++ goto done_free;
++
++ pm_runtime_get_sync(dev->dev);
++
++ amdgpu_acpi_fini(adev);
++
++ amdgpu_device_fini(adev);
++
++done_free:
++ kfree(adev);
++ dev->dev_private = NULL;
++ return 0;
++}
++
++/**
++ * amdgpu_driver_load_kms - Main load function for KMS.
++ *
++ * @dev: drm dev pointer
++ * @flags: device flags
++ *
++ * This is the main load function for KMS (all asics).
++ * Returns 0 on success, error on failure.
++ */
++int amdgpu_driver_load_kms(struct drm_device *dev, unsigned long flags)
++{
++ struct amdgpu_device *adev;
++ int r, acpi_status;
++
++ adev = kzalloc(sizeof(struct amdgpu_device), GFP_KERNEL);
++ if (adev == NULL) {
++ return -ENOMEM;
++ }
++ dev->dev_private = (void *)adev;
++
++ if ((amdgpu_runtime_pm != 0) &&
++ amdgpu_has_atpx() &&
++ ((flags & AMDGPU_IS_APU) == 0))
++ flags |= AMDGPU_IS_PX;
++
++ /* amdgpu_device_init should report only fatal error
++ * like memory allocation failure or iomapping failure,
++ * or memory manager initialization failure, it must
++ * properly initialize the GPU MC controller and permit
++ * VRAM allocation
++ */
++ r = amdgpu_device_init(adev, dev, dev->pdev, flags);
++ if (r) {
++ dev_err(&dev->pdev->dev, "Fatal error during GPU init\n");
++ goto out;
++ }
++
++ /* Call ACPI methods: require modeset init
++ * but failure is not fatal
++ */
++ if (!r) {
++ acpi_status = amdgpu_acpi_init(adev);
++ if (acpi_status)
++ dev_dbg(&dev->pdev->dev,
++ "Error during ACPI methods call\n");
++ }
++
++ if (amdgpu_device_is_px(dev)) {
++ pm_runtime_use_autosuspend(dev->dev);
++ pm_runtime_set_autosuspend_delay(dev->dev, 5000);
++ pm_runtime_set_active(dev->dev);
++ pm_runtime_allow(dev->dev);
++ pm_runtime_mark_last_busy(dev->dev);
++ pm_runtime_put_autosuspend(dev->dev);
++ }
++
++out:
++ if (r)
++ amdgpu_driver_unload_kms(dev);
++
++
++ return r;
++}
++
++/*
++ * Userspace get information ioctl
++ */
++/**
++ * amdgpu_info_ioctl - answer a device specific request.
++ *
++ * @adev: amdgpu device pointer
++ * @data: request object
++ * @filp: drm filp
++ *
++ * This function is used to pass device specific parameters to the userspace
++ * drivers. Examples include: pci device id, pipeline parms, tiling params,
++ * etc. (all asics).
++ * Returns 0 on success, -EINVAL on failure.
++ */
++static int amdgpu_info_ioctl(struct drm_device *dev, void *data, struct drm_file *filp)
++{
++ struct amdgpu_device *adev = dev->dev_private;
++ struct drm_amdgpu_info *info = data;
++ struct amdgpu_mode_info *minfo = &adev->mode_info;
++ void __user *out = (void __user *)(long)info->return_pointer;
++ uint32_t size = info->return_size;
++ struct drm_crtc *crtc;
++ uint32_t ui32 = 0;
++ uint64_t ui64 = 0;
++ int i, found;
++
++ if (!info->return_size || !info->return_pointer)
++ return -EINVAL;
++
++ switch (info->query) {
++ case AMDGPU_INFO_ACCEL_WORKING:
++ ui32 = adev->accel_working;
++ return copy_to_user(out, &ui32, min(size, 4u)) ? -EFAULT : 0;
++ case AMDGPU_INFO_CRTC_FROM_ID:
++ for (i = 0, found = 0; i < adev->mode_info.num_crtc; i++) {
++ crtc = (struct drm_crtc *)minfo->crtcs[i];
++ if (crtc && crtc->base.id == info->mode_crtc.id) {
++ struct amdgpu_crtc *amdgpu_crtc = to_amdgpu_crtc(crtc);
++ ui32 = amdgpu_crtc->crtc_id;
++ found = 1;
++ break;
++ }
++ }
++ if (!found) {
++ DRM_DEBUG_KMS("unknown crtc id %d\n", info->mode_crtc.id);
++ return -EINVAL;
++ }
++ return copy_to_user(out, &ui32, min(size, 4u)) ? -EFAULT : 0;
++ case AMDGPU_INFO_HW_IP_INFO: {
++ struct drm_amdgpu_info_hw_ip ip = {};
++ enum amdgpu_ip_block_type type;
++ uint32_t ring_mask = 0;
++
++ if (info->query_hw_ip.ip_instance >= AMDGPU_HW_IP_INSTANCE_MAX_COUNT)
++ return -EINVAL;
++
++ switch (info->query_hw_ip.type) {
++ case AMDGPU_HW_IP_GFX:
++ type = AMDGPU_IP_BLOCK_TYPE_GFX;
++ for (i = 0; i < adev->gfx.num_gfx_rings; i++)
++ ring_mask |= ((adev->gfx.gfx_ring[i].ready ? 1 : 0) << i);
++ break;
++ case AMDGPU_HW_IP_COMPUTE:
++ type = AMDGPU_IP_BLOCK_TYPE_GFX;
++ for (i = 0; i < adev->gfx.num_compute_rings; i++)
++ ring_mask |= ((adev->gfx.compute_ring[i].ready ? 1 : 0) << i);
++ break;
++ case AMDGPU_HW_IP_DMA:
++ type = AMDGPU_IP_BLOCK_TYPE_SDMA;
++ ring_mask = adev->sdma[0].ring.ready ? 1 : 0;
++ ring_mask |= ((adev->sdma[1].ring.ready ? 1 : 0) << 1);
++ break;
++ case AMDGPU_HW_IP_UVD:
++ type = AMDGPU_IP_BLOCK_TYPE_UVD;
++ ring_mask = adev->uvd.ring.ready ? 1 : 0;
++ break;
++ case AMDGPU_HW_IP_VCE:
++ type = AMDGPU_IP_BLOCK_TYPE_VCE;
++ for (i = 0; i < AMDGPU_MAX_VCE_RINGS; i++)
++ ring_mask |= ((adev->vce.ring[i].ready ? 1 : 0) << i);
++ break;
++ default:
++ return -EINVAL;
++ }
++
++ for (i = 0; i < adev->num_ip_blocks; i++) {
++ if (adev->ip_blocks[i].type == type &&
++ adev->ip_block_enabled[i]) {
++ ip.hw_ip_version_major = adev->ip_blocks[i].major;
++ ip.hw_ip_version_minor = adev->ip_blocks[i].minor;
++ ip.capabilities_flags = 0;
++ ip.available_rings = ring_mask;
++ break;
++ }
++ }
++ return copy_to_user(out, &ip,
++ min((size_t)size, sizeof(ip))) ? -EFAULT : 0;
++ }
++ case AMDGPU_INFO_HW_IP_COUNT: {
++ enum amdgpu_ip_block_type type;
++ uint32_t count = 0;
++
++ switch (info->query_hw_ip.type) {
++ case AMDGPU_HW_IP_GFX:
++ type = AMDGPU_IP_BLOCK_TYPE_GFX;
++ break;
++ case AMDGPU_HW_IP_COMPUTE:
++ type = AMDGPU_IP_BLOCK_TYPE_GFX;
++ break;
++ case AMDGPU_HW_IP_DMA:
++ type = AMDGPU_IP_BLOCK_TYPE_SDMA;
++ break;
++ case AMDGPU_HW_IP_UVD:
++ type = AMDGPU_IP_BLOCK_TYPE_UVD;
++ break;
++ case AMDGPU_HW_IP_VCE:
++ type = AMDGPU_IP_BLOCK_TYPE_VCE;
++ break;
++ default:
++ return -EINVAL;
++ }
++
++ for (i = 0; i < adev->num_ip_blocks; i++)
++ if (adev->ip_blocks[i].type == type &&
++ adev->ip_block_enabled[i] &&
++ count < AMDGPU_HW_IP_INSTANCE_MAX_COUNT)
++ count++;
++
++ return copy_to_user(out, &count, min(size, 4u)) ? -EFAULT : 0;
++ }
++ case AMDGPU_INFO_TIMESTAMP:
++ ui64 = amdgpu_asic_get_gpu_clock_counter(adev);
++ return copy_to_user(out, &ui64, min(size, 8u)) ? -EFAULT : 0;
++ case AMDGPU_INFO_FW_VERSION: {
++ struct drm_amdgpu_info_firmware fw_info;
++
++ /* We only support one instance of each IP block right now. */
++ if (info->query_fw.ip_instance != 0)
++ return -EINVAL;
++
++ switch (info->query_fw.fw_type) {
++ case AMDGPU_INFO_FW_VCE:
++ fw_info.ver = adev->vce.fw_version;
++ fw_info.feature = adev->vce.fb_version;
++ break;
++ case AMDGPU_INFO_FW_UVD:
++ fw_info.ver = 0;
++ fw_info.feature = 0;
++ break;
++ case AMDGPU_INFO_FW_GMC:
++ fw_info.ver = adev->mc.fw_version;
++ fw_info.feature = 0;
++ break;
++ case AMDGPU_INFO_FW_GFX_ME:
++ fw_info.ver = adev->gfx.me_fw_version;
++ fw_info.feature = 0;
++ break;
++ case AMDGPU_INFO_FW_GFX_PFP:
++ fw_info.ver = adev->gfx.pfp_fw_version;
++ fw_info.feature = 0;
++ break;
++ case AMDGPU_INFO_FW_GFX_CE:
++ fw_info.ver = adev->gfx.ce_fw_version;
++ fw_info.feature = 0;
++ break;
++ case AMDGPU_INFO_FW_GFX_RLC:
++ fw_info.ver = adev->gfx.rlc_fw_version;
++ fw_info.feature = 0;
++ break;
++ case AMDGPU_INFO_FW_GFX_MEC:
++ if (info->query_fw.index == 0)
++ fw_info.ver = adev->gfx.mec_fw_version;
++ else if (info->query_fw.index == 1)
++ fw_info.ver = adev->gfx.mec2_fw_version;
++ else
++ return -EINVAL;
++ fw_info.feature = 0;
++ break;
++ case AMDGPU_INFO_FW_SMC:
++ fw_info.ver = adev->pm.fw_version;
++ fw_info.feature = 0;
++ break;
++ case AMDGPU_INFO_FW_SDMA:
++ if (info->query_fw.index >= 2)
++ return -EINVAL;
++ fw_info.ver = adev->sdma[info->query_fw.index].fw_version;
++ fw_info.feature = 0;
++ break;
++ default:
++ return -EINVAL;
++ }
++ return copy_to_user(out, &fw_info,
++ min((size_t)size, sizeof(fw_info))) ? -EFAULT : 0;
++ }
++ case AMDGPU_INFO_NUM_BYTES_MOVED:
++ ui64 = atomic64_read(&adev->num_bytes_moved);
++ return copy_to_user(out, &ui64, min(size, 8u)) ? -EFAULT : 0;
++ case AMDGPU_INFO_VRAM_USAGE:
++ ui64 = atomic64_read(&adev->vram_usage);
++ return copy_to_user(out, &ui64, min(size, 8u)) ? -EFAULT : 0;
++ case AMDGPU_INFO_VIS_VRAM_USAGE:
++ ui64 = atomic64_read(&adev->vram_vis_usage);
++ return copy_to_user(out, &ui64, min(size, 8u)) ? -EFAULT : 0;
++ case AMDGPU_INFO_GTT_USAGE:
++ ui64 = atomic64_read(&adev->gtt_usage);
++ return copy_to_user(out, &ui64, min(size, 8u)) ? -EFAULT : 0;
++ case AMDGPU_INFO_GDS_CONFIG: {
++ struct drm_amdgpu_info_gds gds_info;
++
++ gds_info.gds_gfx_partition_size = adev->gds.mem.gfx_partition_size >> AMDGPU_GDS_SHIFT;
++ gds_info.compute_partition_size = adev->gds.mem.cs_partition_size >> AMDGPU_GDS_SHIFT;
++ gds_info.gds_total_size = adev->gds.mem.total_size >> AMDGPU_GDS_SHIFT;
++ gds_info.gws_per_gfx_partition = adev->gds.gws.gfx_partition_size >> AMDGPU_GWS_SHIFT;
++ gds_info.gws_per_compute_partition = adev->gds.gws.cs_partition_size >> AMDGPU_GWS_SHIFT;
++ gds_info.oa_per_gfx_partition = adev->gds.oa.gfx_partition_size >> AMDGPU_OA_SHIFT;
++ gds_info.oa_per_compute_partition = adev->gds.oa.cs_partition_size >> AMDGPU_OA_SHIFT;
++ return copy_to_user(out, &gds_info,
++ min((size_t)size, sizeof(gds_info))) ? -EFAULT : 0;
++ }
++ case AMDGPU_INFO_VRAM_GTT: {
++ struct drm_amdgpu_info_vram_gtt vram_gtt;
++
++ vram_gtt.vram_size = adev->mc.real_vram_size;
++ vram_gtt.vram_cpu_accessible_size = adev->mc.visible_vram_size;
++ vram_gtt.vram_cpu_accessible_size -= adev->vram_pin_size;
++ vram_gtt.gtt_size = adev->mc.gtt_size;
++ vram_gtt.gtt_size -= adev->gart_pin_size;
++ return copy_to_user(out, &vram_gtt,
++ min((size_t)size, sizeof(vram_gtt))) ? -EFAULT : 0;
++ }
++ case AMDGPU_INFO_READ_MMR_REG: {
++ unsigned n, alloc_size = info->read_mmr_reg.count * 4;
++ uint32_t *regs;
++ unsigned se_num = (info->read_mmr_reg.instance >>
++ AMDGPU_INFO_MMR_SE_INDEX_SHIFT) &
++ AMDGPU_INFO_MMR_SE_INDEX_MASK;
++ unsigned sh_num = (info->read_mmr_reg.instance >>
++ AMDGPU_INFO_MMR_SH_INDEX_SHIFT) &
++ AMDGPU_INFO_MMR_SH_INDEX_MASK;
++
++ /* set full masks if the userspace set all bits
++ * in the bitfields */
++ if (se_num == AMDGPU_INFO_MMR_SE_INDEX_MASK)
++ se_num = 0xffffffff;
++ if (sh_num == AMDGPU_INFO_MMR_SH_INDEX_MASK)
++ sh_num = 0xffffffff;
++
++ regs = kmalloc(alloc_size, GFP_KERNEL);
++ if (!regs)
++ return -ENOMEM;
++
++ for (i = 0; i < info->read_mmr_reg.count; i++)
++ if (amdgpu_asic_read_register(adev, se_num, sh_num,
++ info->read_mmr_reg.dword_offset + i,
++ &regs[i])) {
++ DRM_DEBUG_KMS("unallowed offset %#x\n",
++ info->read_mmr_reg.dword_offset + i);
++ kfree(regs);
++ return -EFAULT;
++ }
++ n = copy_to_user(out, regs, min(size, alloc_size));
++ kfree(regs);
++ return n ? -EFAULT : 0;
++ }
++ case AMDGPU_INFO_DEV_INFO: {
++ struct drm_amdgpu_info_device dev_info;
++ struct amdgpu_cu_info cu_info;
++
++ dev_info.device_id = dev->pdev->device;
++ dev_info.chip_rev = adev->rev_id;
++ dev_info.external_rev = adev->external_rev_id;
++ dev_info.pci_rev = dev->pdev->revision;
++ dev_info.family = adev->family;
++ dev_info.num_shader_engines = adev->gfx.config.max_shader_engines;
++ dev_info.num_shader_arrays_per_engine = adev->gfx.config.max_sh_per_se;
++ /* return all clocks in KHz */
++ dev_info.gpu_counter_freq = amdgpu_asic_get_xclk(adev) * 10;
++ if (adev->pm.dpm_enabled)
++ dev_info.max_engine_clock =
++ adev->pm.dpm.dyn_state.max_clock_voltage_on_ac.sclk * 10;
++ else
++ dev_info.max_engine_clock = adev->pm.default_sclk * 10;
++ dev_info.enabled_rb_pipes_mask = adev->gfx.config.backend_enable_mask;
++ dev_info.num_rb_pipes = adev->gfx.config.max_backends_per_se *
++ adev->gfx.config.max_shader_engines;
++ dev_info.num_hw_gfx_contexts = adev->gfx.config.max_hw_contexts;
++ dev_info._pad = 0;
++ dev_info.ids_flags = 0;
++ if (adev->flags & AMDGPU_IS_APU)
++ dev_info.ids_flags |= AMDGPU_IDS_FLAGS_FUSION;
++ dev_info.virtual_address_offset = AMDGPU_VA_RESERVED_SIZE;
++ dev_info.virtual_address_alignment = max(PAGE_SIZE, 0x10000UL);
++ dev_info.pte_fragment_size = (1 << AMDGPU_LOG2_PAGES_PER_FRAG) *
++ AMDGPU_GPU_PAGE_SIZE;
++ dev_info.gart_page_size = AMDGPU_GPU_PAGE_SIZE;
++
++ amdgpu_asic_get_cu_info(adev, &cu_info);
++ dev_info.cu_active_number = cu_info.number;
++ dev_info.cu_ao_mask = cu_info.ao_cu_mask;
++ memcpy(&dev_info.cu_bitmap[0], &cu_info.bitmap[0], sizeof(cu_info.bitmap));
++
++ return copy_to_user(out, &dev_info,
++ min((size_t)size, sizeof(dev_info))) ? -EFAULT : 0;
++ }
++ default:
++ DRM_DEBUG_KMS("Invalid request %d\n", info->query);
++ return -EINVAL;
++ }
++ return 0;
++}
++
++
++/*
++ * Outdated mess for old drm with Xorg being in charge (void function now).
++ */
++/**
++ * amdgpu_driver_firstopen_kms - drm callback for last close
++ *
++ * @dev: drm dev pointer
++ *
++ * Switch vga switcheroo state after last close (all asics).
++ */
++void amdgpu_driver_lastclose_kms(struct drm_device *dev)
++{
++ vga_switcheroo_process_delayed_switch();
++}
++
++/**
++ * amdgpu_driver_open_kms - drm callback for open
++ *
++ * @dev: drm dev pointer
++ * @file_priv: drm file
++ *
++ * On device open, init vm on cayman+ (all asics).
++ * Returns 0 on success, error on failure.
++ */
++int amdgpu_driver_open_kms(struct drm_device *dev, struct drm_file *file_priv)
++{
++ struct amdgpu_device *adev = dev->dev_private;
++ struct amdgpu_fpriv *fpriv;
++ int r;
++
++ file_priv->driver_priv = NULL;
++
++ r = pm_runtime_get_sync(dev->dev);
++ if (r < 0)
++ return r;
++
++ fpriv = kzalloc(sizeof(*fpriv), GFP_KERNEL);
++ if (unlikely(!fpriv))
++ return -ENOMEM;
++
++ r = amdgpu_vm_init(adev, &fpriv->vm);
++ if (r)
++ goto error_free;
++
++ mutex_init(&fpriv->bo_list_lock);
++ idr_init(&fpriv->bo_list_handles);
++
++ /* init context manager */
++ mutex_init(&fpriv->ctx_mgr.hlock);
++ idr_init(&fpriv->ctx_mgr.ctx_handles);
++ fpriv->ctx_mgr.adev = adev;
++
++ file_priv->driver_priv = fpriv;
++
++ pm_runtime_mark_last_busy(dev->dev);
++ pm_runtime_put_autosuspend(dev->dev);
++ return 0;
++
++error_free:
++ kfree(fpriv);
++
++ return r;
++}
++
++/**
++ * amdgpu_driver_postclose_kms - drm callback for post close
++ *
++ * @dev: drm dev pointer
++ * @file_priv: drm file
++ *
++ * On device post close, tear down vm on cayman+ (all asics).
++ */
++void amdgpu_driver_postclose_kms(struct drm_device *dev,
++ struct drm_file *file_priv)
++{
++ struct amdgpu_device *adev = dev->dev_private;
++ struct amdgpu_fpriv *fpriv = file_priv->driver_priv;
++ struct amdgpu_bo_list *list;
++ int handle;
++
++ if (!fpriv)
++ return;
++
++ amdgpu_vm_fini(adev, &fpriv->vm);
++
++ idr_for_each_entry(&fpriv->bo_list_handles, list, handle)
++ amdgpu_bo_list_free(list);
++
++ idr_destroy(&fpriv->bo_list_handles);
++ mutex_destroy(&fpriv->bo_list_lock);
++
++ /* release context */
++ amdgpu_ctx_fini(fpriv);
++
++ kfree(fpriv);
++ file_priv->driver_priv = NULL;
++}
++
++/**
++ * amdgpu_driver_preclose_kms - drm callback for pre close
++ *
++ * @dev: drm dev pointer
++ * @file_priv: drm file
++ *
++ * On device pre close, tear down hyperz and cmask filps on r1xx-r5xx
++ * (all asics).
++ */
++void amdgpu_driver_preclose_kms(struct drm_device *dev,
++ struct drm_file *file_priv)
++{
++ struct amdgpu_device *adev = dev->dev_private;
++
++ amdgpu_uvd_free_handles(adev, file_priv);
++ amdgpu_vce_free_handles(adev, file_priv);
++}
++
++/*
++ * VBlank related functions.
++ */
++/**
++ * amdgpu_get_vblank_counter_kms - get frame count
++ *
++ * @dev: drm dev pointer
++ * @crtc: crtc to get the frame count from
++ *
++ * Gets the frame count on the requested crtc (all asics).
++ * Returns frame count on success, -EINVAL on failure.
++ */
++u32 amdgpu_get_vblank_counter_kms(struct drm_device *dev, int crtc)
++{
++ struct amdgpu_device *adev = dev->dev_private;
++
++ if (crtc < 0 || crtc >= adev->mode_info.num_crtc) {
++ DRM_ERROR("Invalid crtc %d\n", crtc);
++ return -EINVAL;
++ }
++
++ return amdgpu_display_vblank_get_counter(adev, crtc);
++}
++
++/**
++ * amdgpu_enable_vblank_kms - enable vblank interrupt
++ *
++ * @dev: drm dev pointer
++ * @crtc: crtc to enable vblank interrupt for
++ *
++ * Enable the interrupt on the requested crtc (all asics).
++ * Returns 0 on success, -EINVAL on failure.
++ */
++int amdgpu_enable_vblank_kms(struct drm_device *dev, int crtc)
++{
++ struct amdgpu_device *adev = dev->dev_private;
++ int idx = amdgpu_crtc_idx_to_irq_type(adev, crtc);
++
++ return amdgpu_irq_get(adev, &adev->crtc_irq, idx);
++}
++
++/**
++ * amdgpu_disable_vblank_kms - disable vblank interrupt
++ *
++ * @dev: drm dev pointer
++ * @crtc: crtc to disable vblank interrupt for
++ *
++ * Disable the interrupt on the requested crtc (all asics).
++ */
++void amdgpu_disable_vblank_kms(struct drm_device *dev, int crtc)
++{
++ struct amdgpu_device *adev = dev->dev_private;
++ int idx = amdgpu_crtc_idx_to_irq_type(adev, crtc);
++
++ amdgpu_irq_put(adev, &adev->crtc_irq, idx);
++}
++
++/**
++ * amdgpu_get_vblank_timestamp_kms - get vblank timestamp
++ *
++ * @dev: drm dev pointer
++ * @crtc: crtc to get the timestamp for
++ * @max_error: max error
++ * @vblank_time: time value
++ * @flags: flags passed to the driver
++ *
++ * Gets the timestamp on the requested crtc based on the
++ * scanout position. (all asics).
++ * Returns postive status flags on success, negative error on failure.
++ */
++int amdgpu_get_vblank_timestamp_kms(struct drm_device *dev, int crtc,
++ int *max_error,
++ struct timeval *vblank_time,
++ unsigned flags)
++{
++ struct drm_crtc *drmcrtc;
++ struct amdgpu_device *adev = dev->dev_private;
++
++ if (crtc < 0 || crtc >= dev->num_crtcs) {
++ DRM_ERROR("Invalid crtc %d\n", crtc);
++ return -EINVAL;
++ }
++
++ /* Get associated drm_crtc: */
++ drmcrtc = &adev->mode_info.crtcs[crtc]->base;
++
++ /* Helper routine in DRM core does all the work: */
++ return drm_calc_vbltimestamp_from_scanoutpos(dev, crtc, max_error,
++ vblank_time, flags,
++ drmcrtc, &drmcrtc->hwmode);
++}
++
++const struct drm_ioctl_desc amdgpu_ioctls_kms[] = {
++ DRM_IOCTL_DEF_DRV(AMDGPU_GEM_CREATE, amdgpu_gem_create_ioctl, DRM_AUTH|DRM_UNLOCKED|DRM_RENDER_ALLOW),
++ DRM_IOCTL_DEF_DRV(AMDGPU_CTX, amdgpu_ctx_ioctl, DRM_AUTH|DRM_UNLOCKED|DRM_RENDER_ALLOW),
++ DRM_IOCTL_DEF_DRV(AMDGPU_BO_LIST, amdgpu_bo_list_ioctl, DRM_AUTH|DRM_UNLOCKED|DRM_RENDER_ALLOW),
++ /* KMS */
++ DRM_IOCTL_DEF_DRV(AMDGPU_GEM_MMAP, amdgpu_gem_mmap_ioctl, DRM_AUTH|DRM_UNLOCKED|DRM_RENDER_ALLOW),
++ DRM_IOCTL_DEF_DRV(AMDGPU_GEM_WAIT_IDLE, amdgpu_gem_wait_idle_ioctl, DRM_AUTH|DRM_UNLOCKED|DRM_RENDER_ALLOW),
++ DRM_IOCTL_DEF_DRV(AMDGPU_CS, amdgpu_cs_ioctl, DRM_AUTH|DRM_UNLOCKED|DRM_RENDER_ALLOW),
++ DRM_IOCTL_DEF_DRV(AMDGPU_INFO, amdgpu_info_ioctl, DRM_AUTH|DRM_UNLOCKED|DRM_RENDER_ALLOW),
++ DRM_IOCTL_DEF_DRV(AMDGPU_WAIT_CS, amdgpu_cs_wait_ioctl, DRM_AUTH|DRM_UNLOCKED|DRM_RENDER_ALLOW),
++ DRM_IOCTL_DEF_DRV(AMDGPU_GEM_METADATA, amdgpu_gem_metadata_ioctl, DRM_AUTH|DRM_UNLOCKED|DRM_RENDER_ALLOW),
++ DRM_IOCTL_DEF_DRV(AMDGPU_GEM_VA, amdgpu_gem_va_ioctl, DRM_AUTH|DRM_UNLOCKED|DRM_RENDER_ALLOW),
++ DRM_IOCTL_DEF_DRV(AMDGPU_GEM_OP, amdgpu_gem_op_ioctl, DRM_AUTH|DRM_UNLOCKED|DRM_RENDER_ALLOW),
++ DRM_IOCTL_DEF_DRV(AMDGPU_GEM_USERPTR, amdgpu_gem_userptr_ioctl, DRM_AUTH|DRM_UNLOCKED|DRM_RENDER_ALLOW),
++};
++int amdgpu_max_kms_ioctl = ARRAY_SIZE(amdgpu_ioctls_kms);
+diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_mn.c b/drivers/gpu/drm/amd/amdgpu/amdgpu_mn.c
+new file mode 100644
+index 0000000..e944291
+--- /dev/null
++++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_mn.c
+@@ -0,0 +1,319 @@
++/*
++ * Copyright 2014 Advanced Micro Devices, Inc.
++ * All Rights Reserved.
++ *
++ * Permission is hereby granted, free of charge, to any person obtaining a
++ * copy of this software and associated documentation files (the
++ * "Software"), to deal in the Software without restriction, including
++ * without limitation the rights to use, copy, modify, merge, publish,
++ * distribute, sub license, and/or sell copies of the Software, and to
++ * permit persons to whom the Software is furnished to do so, subject to
++ * the following conditions:
++ *
++ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
++ * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
++ * FITNESS FOR A PARTICULAR PURPOSE AND NON-INFRINGEMENT. IN NO EVENT SHALL
++ * THE COPYRIGHT HOLDERS, AUTHORS AND/OR ITS SUPPLIERS BE LIABLE FOR ANY CLAIM,
++ * DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR
++ * OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE
++ * USE OR OTHER DEALINGS IN THE SOFTWARE.
++ *
++ * The above copyright notice and this permission notice (including the
++ * next paragraph) shall be included in all copies or substantial portions
++ * of the Software.
++ *
++ */
++/*
++ * Authors:
++ * Christian König <christian.koenig@amd.com>
++ */
++
++#include <linux/firmware.h>
++#include <linux/module.h>
++#include <linux/mmu_notifier.h>
++#include <drm/drmP.h>
++#include <drm/drm.h>
++
++#include "amdgpu.h"
++
++struct amdgpu_mn {
++ /* constant after initialisation */
++ struct amdgpu_device *adev;
++ struct mm_struct *mm;
++ struct mmu_notifier mn;
++
++ /* only used on destruction */
++ struct work_struct work;
++
++ /* protected by adev->mn_lock */
++ struct hlist_node node;
++
++ /* objects protected by lock */
++ struct mutex lock;
++ struct rb_root objects;
++};
++
++struct amdgpu_mn_node {
++ struct interval_tree_node it;
++ struct list_head bos;
++};
++
++/**
++ * amdgpu_mn_destroy - destroy the rmn
++ *
++ * @work: previously sheduled work item
++ *
++ * Lazy destroys the notifier from a work item
++ */
++static void amdgpu_mn_destroy(struct work_struct *work)
++{
++ struct amdgpu_mn *rmn = container_of(work, struct amdgpu_mn, work);
++ struct amdgpu_device *adev = rmn->adev;
++ struct amdgpu_mn_node *node, *next_node;
++ struct amdgpu_bo *bo, *next_bo;
++
++ mutex_lock(&adev->mn_lock);
++ mutex_lock(&rmn->lock);
++ hash_del(&rmn->node);
++ rbtree_postorder_for_each_entry_safe(node, next_node, &rmn->objects,
++ it.rb) {
++
++ interval_tree_remove(&node->it, &rmn->objects);
++ list_for_each_entry_safe(bo, next_bo, &node->bos, mn_list) {
++ bo->mn = NULL;
++ list_del_init(&bo->mn_list);
++ }
++ kfree(node);
++ }
++ mutex_unlock(&rmn->lock);
++ mutex_unlock(&adev->mn_lock);
++ mmu_notifier_unregister(&rmn->mn, rmn->mm);
++ kfree(rmn);
++}
++
++/**
++ * amdgpu_mn_release - callback to notify about mm destruction
++ *
++ * @mn: our notifier
++ * @mn: the mm this callback is about
++ *
++ * Shedule a work item to lazy destroy our notifier.
++ */
++static void amdgpu_mn_release(struct mmu_notifier *mn,
++ struct mm_struct *mm)
++{
++ struct amdgpu_mn *rmn = container_of(mn, struct amdgpu_mn, mn);
++ INIT_WORK(&rmn->work, amdgpu_mn_destroy);
++ schedule_work(&rmn->work);
++}
++
++/**
++ * amdgpu_mn_invalidate_range_start - callback to notify about mm change
++ *
++ * @mn: our notifier
++ * @mn: the mm this callback is about
++ * @start: start of updated range
++ * @end: end of updated range
++ *
++ * We block for all BOs between start and end to be idle and
++ * unmap them by move them into system domain again.
++ */
++static void amdgpu_mn_invalidate_range_start(struct mmu_notifier *mn,
++ struct mm_struct *mm,
++ unsigned long start,
++ unsigned long end)
++{
++ struct amdgpu_mn *rmn = container_of(mn, struct amdgpu_mn, mn);
++ struct interval_tree_node *it;
++
++ /* notification is exclusive, but interval is inclusive */
++ end -= 1;
++
++ mutex_lock(&rmn->lock);
++
++ it = interval_tree_iter_first(&rmn->objects, start, end);
++ while (it) {
++ struct amdgpu_mn_node *node;
++ struct amdgpu_bo *bo;
++ int r;
++
++ node = container_of(it, struct amdgpu_mn_node, it);
++ it = interval_tree_iter_next(it, start, end);
++
++ list_for_each_entry(bo, &node->bos, mn_list) {
++
++ r = amdgpu_bo_reserve(bo, true);
++ if (r) {
++ DRM_ERROR("(%d) failed to reserve user bo\n", r);
++ continue;
++ }
++
++ r = reservation_object_wait_timeout_rcu(bo->tbo.resv,
++ true, false, MAX_SCHEDULE_TIMEOUT);
++ if (r)
++ DRM_ERROR("(%d) failed to wait for user bo\n", r);
++
++ amdgpu_ttm_placement_from_domain(bo, AMDGPU_GEM_DOMAIN_CPU);
++ r = ttm_bo_validate(&bo->tbo, &bo->placement, false, false);
++ if (r)
++ DRM_ERROR("(%d) failed to validate user bo\n", r);
++
++ amdgpu_bo_unreserve(bo);
++ }
++ }
++
++ mutex_unlock(&rmn->lock);
++}
++
++static const struct mmu_notifier_ops amdgpu_mn_ops = {
++ .release = amdgpu_mn_release,
++ .invalidate_range_start = amdgpu_mn_invalidate_range_start,
++};
++
++/**
++ * amdgpu_mn_get - create notifier context
++ *
++ * @adev: amdgpu device pointer
++ *
++ * Creates a notifier context for current->mm.
++ */
++static struct amdgpu_mn *amdgpu_mn_get(struct amdgpu_device *adev)
++{
++ struct mm_struct *mm = current->mm;
++ struct amdgpu_mn *rmn;
++ int r;
++
++ down_write(&mm->mmap_sem);
++ mutex_lock(&adev->mn_lock);
++
++ hash_for_each_possible(adev->mn_hash, rmn, node, (unsigned long)mm)
++ if (rmn->mm == mm)
++ goto release_locks;
++
++ rmn = kzalloc(sizeof(*rmn), GFP_KERNEL);
++ if (!rmn) {
++ rmn = ERR_PTR(-ENOMEM);
++ goto release_locks;
++ }
++
++ rmn->adev = adev;
++ rmn->mm = mm;
++ rmn->mn.ops = &amdgpu_mn_ops;
++ mutex_init(&rmn->lock);
++ rmn->objects = RB_ROOT;
++
++ r = __mmu_notifier_register(&rmn->mn, mm);
++ if (r)
++ goto free_rmn;
++
++ hash_add(adev->mn_hash, &rmn->node, (unsigned long)mm);
++
++release_locks:
++ mutex_unlock(&adev->mn_lock);
++ up_write(&mm->mmap_sem);
++
++ return rmn;
++
++free_rmn:
++ mutex_unlock(&adev->mn_lock);
++ up_write(&mm->mmap_sem);
++ kfree(rmn);
++
++ return ERR_PTR(r);
++}
++
++/**
++ * amdgpu_mn_register - register a BO for notifier updates
++ *
++ * @bo: amdgpu buffer object
++ * @addr: userptr addr we should monitor
++ *
++ * Registers an MMU notifier for the given BO at the specified address.
++ * Returns 0 on success, -ERRNO if anything goes wrong.
++ */
++int amdgpu_mn_register(struct amdgpu_bo *bo, unsigned long addr)
++{
++ unsigned long end = addr + amdgpu_bo_size(bo) - 1;
++ struct amdgpu_device *adev = bo->adev;
++ struct amdgpu_mn *rmn;
++ struct amdgpu_mn_node *node = NULL;
++ struct list_head bos;
++ struct interval_tree_node *it;
++
++ rmn = amdgpu_mn_get(adev);
++ if (IS_ERR(rmn))
++ return PTR_ERR(rmn);
++
++ INIT_LIST_HEAD(&bos);
++
++ mutex_lock(&rmn->lock);
++
++ while ((it = interval_tree_iter_first(&rmn->objects, addr, end))) {
++ kfree(node);
++ node = container_of(it, struct amdgpu_mn_node, it);
++ interval_tree_remove(&node->it, &rmn->objects);
++ addr = min(it->start, addr);
++ end = max(it->last, end);
++ list_splice(&node->bos, &bos);
++ }
++
++ if (!node) {
++ node = kmalloc(sizeof(struct amdgpu_mn_node), GFP_KERNEL);
++ if (!node) {
++ mutex_unlock(&rmn->lock);
++ return -ENOMEM;
++ }
++ }
++
++ bo->mn = rmn;
++
++ node->it.start = addr;
++ node->it.last = end;
++ INIT_LIST_HEAD(&node->bos);
++ list_splice(&bos, &node->bos);
++ list_add(&bo->mn_list, &node->bos);
++
++ interval_tree_insert(&node->it, &rmn->objects);
++
++ mutex_unlock(&rmn->lock);
++
++ return 0;
++}
++
++/**
++ * amdgpu_mn_unregister - unregister a BO for notifier updates
++ *
++ * @bo: amdgpu buffer object
++ *
++ * Remove any registration of MMU notifier updates from the buffer object.
++ */
++void amdgpu_mn_unregister(struct amdgpu_bo *bo)
++{
++ struct amdgpu_device *adev = bo->adev;
++ struct amdgpu_mn *rmn;
++ struct list_head *head;
++
++ mutex_lock(&adev->mn_lock);
++ rmn = bo->mn;
++ if (rmn == NULL) {
++ mutex_unlock(&adev->mn_lock);
++ return;
++ }
++
++ mutex_lock(&rmn->lock);
++ /* save the next list entry for later */
++ head = bo->mn_list.next;
++
++ bo->mn = NULL;
++ list_del(&bo->mn_list);
++
++ if (list_empty(head)) {
++ struct amdgpu_mn_node *node;
++ node = container_of(head, struct amdgpu_mn_node, bos);
++ interval_tree_remove(&node->it, &rmn->objects);
++ kfree(node);
++ }
++
++ mutex_unlock(&rmn->lock);
++ mutex_unlock(&adev->mn_lock);
++}
+diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_mode.h b/drivers/gpu/drm/amd/amdgpu/amdgpu_mode.h
+new file mode 100644
+index 0000000..64efe5b
+--- /dev/null
++++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_mode.h
+@@ -0,0 +1,586 @@
++/*
++ * Copyright 2000 ATI Technologies Inc., Markham, Ontario, and
++ * VA Linux Systems Inc., Fremont, California.
++ * Copyright 2008 Red Hat Inc.
++ *
++ * Permission is hereby granted, free of charge, to any person obtaining a
++ * copy of this software and associated documentation files (the "Software"),
++ * to deal in the Software without restriction, including without limitation
++ * the rights to use, copy, modify, merge, publish, distribute, sublicense,
++ * and/or sell copies of the Software, and to permit persons to whom the
++ * Software is furnished to do so, subject to the following conditions:
++ *
++ * The above copyright notice and this permission notice shall be included in
++ * all copies or substantial portions of the Software.
++ *
++ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
++ * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
++ * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
++ * THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR
++ * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE,
++ * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
++ * OTHER DEALINGS IN THE SOFTWARE.
++ *
++ * Original Authors:
++ * Kevin E. Martin, Rickard E. Faith, Alan Hourihane
++ *
++ * Kernel port Author: Dave Airlie
++ */
++
++#ifndef AMDGPU_MODE_H
++#define AMDGPU_MODE_H
++
++#include <drm/drm_crtc.h>
++#include <drm/drm_edid.h>
++#include <drm/drm_dp_helper.h>
++#include <drm/drm_fixed.h>
++#include <drm/drm_crtc_helper.h>
++#include <drm/drm_plane_helper.h>
++#include <linux/i2c.h>
++#include <linux/i2c-algo-bit.h>
++
++struct amdgpu_bo;
++struct amdgpu_device;
++struct amdgpu_encoder;
++struct amdgpu_router;
++struct amdgpu_hpd;
++
++#define to_amdgpu_crtc(x) container_of(x, struct amdgpu_crtc, base)
++#define to_amdgpu_connector(x) container_of(x, struct amdgpu_connector, base)
++#define to_amdgpu_encoder(x) container_of(x, struct amdgpu_encoder, base)
++#define to_amdgpu_framebuffer(x) container_of(x, struct amdgpu_framebuffer, base)
++
++#define AMDGPU_MAX_HPD_PINS 6
++#define AMDGPU_MAX_CRTCS 6
++#define AMDGPU_MAX_AFMT_BLOCKS 7
++
++enum amdgpu_rmx_type {
++ RMX_OFF,
++ RMX_FULL,
++ RMX_CENTER,
++ RMX_ASPECT
++};
++
++enum amdgpu_underscan_type {
++ UNDERSCAN_OFF,
++ UNDERSCAN_ON,
++ UNDERSCAN_AUTO,
++};
++
++#define AMDGPU_HPD_CONNECT_INT_DELAY_IN_MS 50
++#define AMDGPU_HPD_DISCONNECT_INT_DELAY_IN_MS 10
++
++enum amdgpu_hpd_id {
++ AMDGPU_HPD_1 = 0,
++ AMDGPU_HPD_2,
++ AMDGPU_HPD_3,
++ AMDGPU_HPD_4,
++ AMDGPU_HPD_5,
++ AMDGPU_HPD_6,
++ AMDGPU_HPD_LAST,
++ AMDGPU_HPD_NONE = 0xff,
++};
++
++enum amdgpu_crtc_irq {
++ AMDGPU_CRTC_IRQ_VBLANK1 = 0,
++ AMDGPU_CRTC_IRQ_VBLANK2,
++ AMDGPU_CRTC_IRQ_VBLANK3,
++ AMDGPU_CRTC_IRQ_VBLANK4,
++ AMDGPU_CRTC_IRQ_VBLANK5,
++ AMDGPU_CRTC_IRQ_VBLANK6,
++ AMDGPU_CRTC_IRQ_VLINE1,
++ AMDGPU_CRTC_IRQ_VLINE2,
++ AMDGPU_CRTC_IRQ_VLINE3,
++ AMDGPU_CRTC_IRQ_VLINE4,
++ AMDGPU_CRTC_IRQ_VLINE5,
++ AMDGPU_CRTC_IRQ_VLINE6,
++ AMDGPU_CRTC_IRQ_LAST,
++ AMDGPU_CRTC_IRQ_NONE = 0xff
++};
++
++enum amdgpu_pageflip_irq {
++ AMDGPU_PAGEFLIP_IRQ_D1 = 0,
++ AMDGPU_PAGEFLIP_IRQ_D2,
++ AMDGPU_PAGEFLIP_IRQ_D3,
++ AMDGPU_PAGEFLIP_IRQ_D4,
++ AMDGPU_PAGEFLIP_IRQ_D5,
++ AMDGPU_PAGEFLIP_IRQ_D6,
++ AMDGPU_PAGEFLIP_IRQ_LAST,
++ AMDGPU_PAGEFLIP_IRQ_NONE = 0xff
++};
++
++enum amdgpu_flip_status {
++ AMDGPU_FLIP_NONE,
++ AMDGPU_FLIP_PENDING,
++ AMDGPU_FLIP_SUBMITTED
++};
++
++#define AMDGPU_MAX_I2C_BUS 16
++
++/* amdgpu gpio-based i2c
++ * 1. "mask" reg and bits
++ * grabs the gpio pins for software use
++ * 0=not held 1=held
++ * 2. "a" reg and bits
++ * output pin value
++ * 0=low 1=high
++ * 3. "en" reg and bits
++ * sets the pin direction
++ * 0=input 1=output
++ * 4. "y" reg and bits
++ * input pin value
++ * 0=low 1=high
++ */
++struct amdgpu_i2c_bus_rec {
++ bool valid;
++ /* id used by atom */
++ uint8_t i2c_id;
++ /* id used by atom */
++ enum amdgpu_hpd_id hpd;
++ /* can be used with hw i2c engine */
++ bool hw_capable;
++ /* uses multi-media i2c engine */
++ bool mm_i2c;
++ /* regs and bits */
++ uint32_t mask_clk_reg;
++ uint32_t mask_data_reg;
++ uint32_t a_clk_reg;
++ uint32_t a_data_reg;
++ uint32_t en_clk_reg;
++ uint32_t en_data_reg;
++ uint32_t y_clk_reg;
++ uint32_t y_data_reg;
++ uint32_t mask_clk_mask;
++ uint32_t mask_data_mask;
++ uint32_t a_clk_mask;
++ uint32_t a_data_mask;
++ uint32_t en_clk_mask;
++ uint32_t en_data_mask;
++ uint32_t y_clk_mask;
++ uint32_t y_data_mask;
++};
++
++#define AMDGPU_MAX_BIOS_CONNECTOR 16
++
++/* pll flags */
++#define AMDGPU_PLL_USE_BIOS_DIVS (1 << 0)
++#define AMDGPU_PLL_NO_ODD_POST_DIV (1 << 1)
++#define AMDGPU_PLL_USE_REF_DIV (1 << 2)
++#define AMDGPU_PLL_LEGACY (1 << 3)
++#define AMDGPU_PLL_PREFER_LOW_REF_DIV (1 << 4)
++#define AMDGPU_PLL_PREFER_HIGH_REF_DIV (1 << 5)
++#define AMDGPU_PLL_PREFER_LOW_FB_DIV (1 << 6)
++#define AMDGPU_PLL_PREFER_HIGH_FB_DIV (1 << 7)
++#define AMDGPU_PLL_PREFER_LOW_POST_DIV (1 << 8)
++#define AMDGPU_PLL_PREFER_HIGH_POST_DIV (1 << 9)
++#define AMDGPU_PLL_USE_FRAC_FB_DIV (1 << 10)
++#define AMDGPU_PLL_PREFER_CLOSEST_LOWER (1 << 11)
++#define AMDGPU_PLL_USE_POST_DIV (1 << 12)
++#define AMDGPU_PLL_IS_LCD (1 << 13)
++#define AMDGPU_PLL_PREFER_MINM_OVER_MAXP (1 << 14)
++
++struct amdgpu_pll {
++ /* reference frequency */
++ uint32_t reference_freq;
++
++ /* fixed dividers */
++ uint32_t reference_div;
++ uint32_t post_div;
++
++ /* pll in/out limits */
++ uint32_t pll_in_min;
++ uint32_t pll_in_max;
++ uint32_t pll_out_min;
++ uint32_t pll_out_max;
++ uint32_t lcd_pll_out_min;
++ uint32_t lcd_pll_out_max;
++ uint32_t best_vco;
++
++ /* divider limits */
++ uint32_t min_ref_div;
++ uint32_t max_ref_div;
++ uint32_t min_post_div;
++ uint32_t max_post_div;
++ uint32_t min_feedback_div;
++ uint32_t max_feedback_div;
++ uint32_t min_frac_feedback_div;
++ uint32_t max_frac_feedback_div;
++
++ /* flags for the current clock */
++ uint32_t flags;
++
++ /* pll id */
++ uint32_t id;
++};
++
++struct amdgpu_i2c_chan {
++ struct i2c_adapter adapter;
++ struct drm_device *dev;
++ struct i2c_algo_bit_data bit;
++ struct amdgpu_i2c_bus_rec rec;
++ struct drm_dp_aux aux;
++ bool has_aux;
++ struct mutex mutex;
++};
++
++struct amdgpu_fbdev;
++
++struct amdgpu_afmt {
++ bool enabled;
++ int offset;
++ bool last_buffer_filled_status;
++ int id;
++ struct amdgpu_audio_pin *pin;
++};
++
++/*
++ * Audio
++ */
++struct amdgpu_audio_pin {
++ int channels;
++ int rate;
++ int bits_per_sample;
++ u8 status_bits;
++ u8 category_code;
++ u32 offset;
++ bool connected;
++ u32 id;
++};
++
++struct amdgpu_audio {
++ bool enabled;
++ struct amdgpu_audio_pin pin[AMDGPU_MAX_AFMT_BLOCKS];
++ int num_pins;
++};
++
++struct amdgpu_mode_mc_save {
++ u32 vga_render_control;
++ u32 vga_hdp_control;
++ bool crtc_enabled[AMDGPU_MAX_CRTCS];
++};
++
++struct amdgpu_display_funcs {
++ /* vga render */
++ void (*set_vga_render_state)(struct amdgpu_device *adev, bool render);
++ /* display watermarks */
++ void (*bandwidth_update)(struct amdgpu_device *adev);
++ /* get frame count */
++ u32 (*vblank_get_counter)(struct amdgpu_device *adev, int crtc);
++ /* wait for vblank */
++ void (*vblank_wait)(struct amdgpu_device *adev, int crtc);
++ /* is dce hung */
++ bool (*is_display_hung)(struct amdgpu_device *adev);
++ /* set backlight level */
++ void (*backlight_set_level)(struct amdgpu_encoder *amdgpu_encoder,
++ u8 level);
++ /* get backlight level */
++ u8 (*backlight_get_level)(struct amdgpu_encoder *amdgpu_encoder);
++ /* hotplug detect */
++ bool (*hpd_sense)(struct amdgpu_device *adev, enum amdgpu_hpd_id hpd);
++ void (*hpd_set_polarity)(struct amdgpu_device *adev,
++ enum amdgpu_hpd_id hpd);
++ u32 (*hpd_get_gpio_reg)(struct amdgpu_device *adev);
++ /* pageflipping */
++ void (*page_flip)(struct amdgpu_device *adev,
++ int crtc_id, u64 crtc_base);
++ int (*page_flip_get_scanoutpos)(struct amdgpu_device *adev, int crtc,
++ u32 *vbl, u32 *position);
++ /* display topology setup */
++ void (*add_encoder)(struct amdgpu_device *adev,
++ uint32_t encoder_enum,
++ uint32_t supported_device,
++ u16 caps);
++ void (*add_connector)(struct amdgpu_device *adev,
++ uint32_t connector_id,
++ uint32_t supported_device,
++ int connector_type,
++ struct amdgpu_i2c_bus_rec *i2c_bus,
++ uint16_t connector_object_id,
++ struct amdgpu_hpd *hpd,
++ struct amdgpu_router *router);
++ void (*stop_mc_access)(struct amdgpu_device *adev,
++ struct amdgpu_mode_mc_save *save);
++ void (*resume_mc_access)(struct amdgpu_device *adev,
++ struct amdgpu_mode_mc_save *save);
++};
++
++struct amdgpu_mode_info {
++ struct atom_context *atom_context;
++ struct card_info *atom_card_info;
++ bool mode_config_initialized;
++ struct amdgpu_crtc *crtcs[6];
++ struct amdgpu_afmt *afmt[7];
++ /* DVI-I properties */
++ struct drm_property *coherent_mode_property;
++ /* DAC enable load detect */
++ struct drm_property *load_detect_property;
++ /* underscan */
++ struct drm_property *underscan_property;
++ struct drm_property *underscan_hborder_property;
++ struct drm_property *underscan_vborder_property;
++ /* audio */
++ struct drm_property *audio_property;
++ /* FMT dithering */
++ struct drm_property *dither_property;
++ /* hardcoded DFP edid from BIOS */
++ struct edid *bios_hardcoded_edid;
++ int bios_hardcoded_edid_size;
++
++ /* pointer to fbdev info structure */
++ struct amdgpu_fbdev *rfbdev;
++ /* firmware flags */
++ u16 firmware_flags;
++ /* pointer to backlight encoder */
++ struct amdgpu_encoder *bl_encoder;
++ struct amdgpu_audio audio; /* audio stuff */
++ int num_crtc; /* number of crtcs */
++ int num_hpd; /* number of hpd pins */
++ int num_dig; /* number of dig blocks */
++ int disp_priority;
++ const struct amdgpu_display_funcs *funcs;
++};
++
++#define AMDGPU_MAX_BL_LEVEL 0xFF
++
++#if defined(CONFIG_BACKLIGHT_CLASS_DEVICE) || defined(CONFIG_BACKLIGHT_CLASS_DEVICE_MODULE)
++
++struct amdgpu_backlight_privdata {
++ struct amdgpu_encoder *encoder;
++ uint8_t negative;
++};
++
++#endif
++
++struct amdgpu_atom_ss {
++ uint16_t percentage;
++ uint16_t percentage_divider;
++ uint8_t type;
++ uint16_t step;
++ uint8_t delay;
++ uint8_t range;
++ uint8_t refdiv;
++ /* asic_ss */
++ uint16_t rate;
++ uint16_t amount;
++};
++
++struct amdgpu_crtc {
++ struct drm_crtc base;
++ int crtc_id;
++ u16 lut_r[256], lut_g[256], lut_b[256];
++ bool enabled;
++ bool can_tile;
++ uint32_t crtc_offset;
++ struct drm_gem_object *cursor_bo;
++ uint64_t cursor_addr;
++ int cursor_width;
++ int cursor_height;
++ int max_cursor_width;
++ int max_cursor_height;
++ enum amdgpu_rmx_type rmx_type;
++ u8 h_border;
++ u8 v_border;
++ fixed20_12 vsc;
++ fixed20_12 hsc;
++ struct drm_display_mode native_mode;
++ u32 pll_id;
++ /* page flipping */
++ struct workqueue_struct *pflip_queue;
++ struct amdgpu_flip_work *pflip_works;
++ enum amdgpu_flip_status pflip_status;
++ int deferred_flip_completion;
++ /* pll sharing */
++ struct amdgpu_atom_ss ss;
++ bool ss_enabled;
++ u32 adjusted_clock;
++ int bpc;
++ u32 pll_reference_div;
++ u32 pll_post_div;
++ u32 pll_flags;
++ struct drm_encoder *encoder;
++ struct drm_connector *connector;
++ /* for dpm */
++ u32 line_time;
++ u32 wm_low;
++ u32 wm_high;
++ struct drm_display_mode hw_mode;
++};
++
++struct amdgpu_encoder_atom_dig {
++ bool linkb;
++ /* atom dig */
++ bool coherent_mode;
++ int dig_encoder; /* -1 disabled, 0 DIGA, 1 DIGB, etc. */
++ /* atom lvds/edp */
++ uint32_t lcd_misc;
++ uint16_t panel_pwr_delay;
++ uint32_t lcd_ss_id;
++ /* panel mode */
++ struct drm_display_mode native_mode;
++ struct backlight_device *bl_dev;
++ int dpms_mode;
++ uint8_t backlight_level;
++ int panel_mode;
++ struct amdgpu_afmt *afmt;
++};
++
++struct amdgpu_encoder {
++ struct drm_encoder base;
++ uint32_t encoder_enum;
++ uint32_t encoder_id;
++ uint32_t devices;
++ uint32_t active_device;
++ uint32_t flags;
++ uint32_t pixel_clock;
++ enum amdgpu_rmx_type rmx_type;
++ enum amdgpu_underscan_type underscan_type;
++ uint32_t underscan_hborder;
++ uint32_t underscan_vborder;
++ struct drm_display_mode native_mode;
++ void *enc_priv;
++ int audio_polling_active;
++ bool is_ext_encoder;
++ u16 caps;
++};
++
++struct amdgpu_connector_atom_dig {
++ /* displayport */
++ u8 dpcd[DP_RECEIVER_CAP_SIZE];
++ u8 dp_sink_type;
++ int dp_clock;
++ int dp_lane_count;
++ bool edp_on;
++};
++
++struct amdgpu_gpio_rec {
++ bool valid;
++ u8 id;
++ u32 reg;
++ u32 mask;
++ u32 shift;
++};
++
++struct amdgpu_hpd {
++ enum amdgpu_hpd_id hpd;
++ u8 plugged_state;
++ struct amdgpu_gpio_rec gpio;
++};
++
++struct amdgpu_router {
++ u32 router_id;
++ struct amdgpu_i2c_bus_rec i2c_info;
++ u8 i2c_addr;
++ /* i2c mux */
++ bool ddc_valid;
++ u8 ddc_mux_type;
++ u8 ddc_mux_control_pin;
++ u8 ddc_mux_state;
++ /* clock/data mux */
++ bool cd_valid;
++ u8 cd_mux_type;
++ u8 cd_mux_control_pin;
++ u8 cd_mux_state;
++};
++
++enum amdgpu_connector_audio {
++ AMDGPU_AUDIO_DISABLE = 0,
++ AMDGPU_AUDIO_ENABLE = 1,
++ AMDGPU_AUDIO_AUTO = 2
++};
++
++enum amdgpu_connector_dither {
++ AMDGPU_FMT_DITHER_DISABLE = 0,
++ AMDGPU_FMT_DITHER_ENABLE = 1,
++};
++
++struct amdgpu_connector {
++ struct drm_connector base;
++ uint32_t connector_id;
++ uint32_t devices;
++ struct amdgpu_i2c_chan *ddc_bus;
++ /* some systems have an hdmi and vga port with a shared ddc line */
++ bool shared_ddc;
++ bool use_digital;
++ /* we need to mind the EDID between detect
++ and get modes due to analog/digital/tvencoder */
++ struct edid *edid;
++ void *con_priv;
++ bool dac_load_detect;
++ bool detected_by_load; /* if the connection status was determined by load */
++ uint16_t connector_object_id;
++ struct amdgpu_hpd hpd;
++ struct amdgpu_router router;
++ struct amdgpu_i2c_chan *router_bus;
++ enum amdgpu_connector_audio audio;
++ enum amdgpu_connector_dither dither;
++ unsigned pixelclock_for_modeset;
++};
++
++struct amdgpu_framebuffer {
++ struct drm_framebuffer base;
++ struct drm_gem_object *obj;
++};
++
++#define ENCODER_MODE_IS_DP(em) (((em) == ATOM_ENCODER_MODE_DP) || \
++ ((em) == ATOM_ENCODER_MODE_DP_MST))
++
++void amdgpu_link_encoder_connector(struct drm_device *dev);
++
++struct drm_connector *
++amdgpu_get_connector_for_encoder(struct drm_encoder *encoder);
++struct drm_connector *
++amdgpu_get_connector_for_encoder_init(struct drm_encoder *encoder);
++bool amdgpu_dig_monitor_is_duallink(struct drm_encoder *encoder,
++ u32 pixel_clock);
++
++u16 amdgpu_encoder_get_dp_bridge_encoder_id(struct drm_encoder *encoder);
++struct drm_encoder *amdgpu_get_external_encoder(struct drm_encoder *encoder);
++
++bool amdgpu_ddc_probe(struct amdgpu_connector *amdgpu_connector, bool use_aux);
++
++void amdgpu_encoder_set_active_device(struct drm_encoder *encoder);
++
++int amdgpu_get_crtc_scanoutpos(struct drm_device *dev, int crtc,
++ unsigned int flags,
++ int *vpos, int *hpos, ktime_t *stime,
++ ktime_t *etime);
++
++int amdgpu_framebuffer_init(struct drm_device *dev,
++ struct amdgpu_framebuffer *rfb,
++ struct drm_mode_fb_cmd2 *mode_cmd,
++ struct drm_gem_object *obj);
++
++int amdgpufb_remove(struct drm_device *dev, struct drm_framebuffer *fb);
++
++void amdgpu_enc_destroy(struct drm_encoder *encoder);
++void amdgpu_copy_fb(struct drm_device *dev, struct drm_gem_object *dst_obj);
++bool amdgpu_crtc_scaling_mode_fixup(struct drm_crtc *crtc,
++ const struct drm_display_mode *mode,
++ struct drm_display_mode *adjusted_mode);
++void amdgpu_panel_mode_fixup(struct drm_encoder *encoder,
++ struct drm_display_mode *adjusted_mode);
++int amdgpu_crtc_idx_to_irq_type(struct amdgpu_device *adev, int crtc);
++
++/* fbdev layer */
++int amdgpu_fbdev_init(struct amdgpu_device *adev);
++void amdgpu_fbdev_fini(struct amdgpu_device *adev);
++void amdgpu_fbdev_set_suspend(struct amdgpu_device *adev, int state);
++int amdgpu_fbdev_total_size(struct amdgpu_device *adev);
++bool amdgpu_fbdev_robj_is_fb(struct amdgpu_device *adev, struct amdgpu_bo *robj);
++
++void amdgpu_fb_output_poll_changed(struct amdgpu_device *adev);
++
++
++int amdgpu_align_pitch(struct amdgpu_device *adev, int width, int bpp, bool tiled);
++
++/* amdgpu_display.c */
++void amdgpu_print_display_setup(struct drm_device *dev);
++int amdgpu_modeset_create_props(struct amdgpu_device *adev);
++int amdgpu_crtc_set_config(struct drm_mode_set *set);
++int amdgpu_crtc_page_flip(struct drm_crtc *crtc,
++ struct drm_framebuffer *fb,
++ struct drm_pending_vblank_event *event,
++ uint32_t page_flip_flags);
++extern const struct drm_mode_config_funcs amdgpu_mode_funcs;
++
++#endif
+diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_object.c b/drivers/gpu/drm/amd/amdgpu/amdgpu_object.c
+new file mode 100644
+index 0000000..b515827
+--- /dev/null
++++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_object.c
+@@ -0,0 +1,646 @@
++/*
++ * Copyright 2009 Jerome Glisse.
++ * All Rights Reserved.
++ *
++ * Permission is hereby granted, free of charge, to any person obtaining a
++ * copy of this software and associated documentation files (the
++ * "Software"), to deal in the Software without restriction, including
++ * without limitation the rights to use, copy, modify, merge, publish,
++ * distribute, sub license, and/or sell copies of the Software, and to
++ * permit persons to whom the Software is furnished to do so, subject to
++ * the following conditions:
++ *
++ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
++ * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
++ * FITNESS FOR A PARTICULAR PURPOSE AND NON-INFRINGEMENT. IN NO EVENT SHALL
++ * THE COPYRIGHT HOLDERS, AUTHORS AND/OR ITS SUPPLIERS BE LIABLE FOR ANY CLAIM,
++ * DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR
++ * OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE
++ * USE OR OTHER DEALINGS IN THE SOFTWARE.
++ *
++ * The above copyright notice and this permission notice (including the
++ * next paragraph) shall be included in all copies or substantial portions
++ * of the Software.
++ *
++ */
++/*
++ * Authors:
++ * Jerome Glisse <glisse@freedesktop.org>
++ * Thomas Hellstrom <thomas-at-tungstengraphics-dot-com>
++ * Dave Airlie
++ */
++#include <linux/list.h>
++#include <linux/slab.h>
++#include <drm/drmP.h>
++#include <drm/amdgpu_drm.h>
++#include "amdgpu.h"
++#include "amdgpu_trace.h"
++
++
++int amdgpu_ttm_init(struct amdgpu_device *adev);
++void amdgpu_ttm_fini(struct amdgpu_device *adev);
++
++static u64 amdgpu_get_vis_part_size(struct amdgpu_device *adev,
++ struct ttm_mem_reg * mem)
++{
++ u64 ret = 0;
++ if (mem->start << PAGE_SHIFT < adev->mc.visible_vram_size) {
++ ret = (u64)((mem->start << PAGE_SHIFT) + mem->size) >
++ adev->mc.visible_vram_size ?
++ adev->mc.visible_vram_size - (mem->start << PAGE_SHIFT):
++ mem->size;
++ }
++ return ret;
++}
++
++static void amdgpu_update_memory_usage(struct amdgpu_device *adev,
++ struct ttm_mem_reg *old_mem,
++ struct ttm_mem_reg *new_mem)
++{
++ u64 vis_size;
++ if (!adev)
++ return;
++
++ if (new_mem) {
++ switch (new_mem->mem_type) {
++ case TTM_PL_TT:
++ atomic64_add(new_mem->size, &adev->gtt_usage);
++ break;
++ case TTM_PL_VRAM:
++ atomic64_add(new_mem->size, &adev->vram_usage);
++ vis_size = amdgpu_get_vis_part_size(adev, new_mem);
++ atomic64_add(vis_size, &adev->vram_vis_usage);
++ break;
++ }
++ }
++
++ if (old_mem) {
++ switch (old_mem->mem_type) {
++ case TTM_PL_TT:
++ atomic64_sub(old_mem->size, &adev->gtt_usage);
++ break;
++ case TTM_PL_VRAM:
++ atomic64_sub(old_mem->size, &adev->vram_usage);
++ vis_size = amdgpu_get_vis_part_size(adev, old_mem);
++ atomic64_sub(vis_size, &adev->vram_vis_usage);
++ break;
++ }
++ }
++}
++
++static void amdgpu_ttm_bo_destroy(struct ttm_buffer_object *tbo)
++{
++ struct amdgpu_bo *bo;
++
++ bo = container_of(tbo, struct amdgpu_bo, tbo);
++
++ amdgpu_update_memory_usage(bo->adev, &bo->tbo.mem, NULL);
++ amdgpu_mn_unregister(bo);
++
++ mutex_lock(&bo->adev->gem.mutex);
++ list_del_init(&bo->list);
++ mutex_unlock(&bo->adev->gem.mutex);
++ drm_gem_object_release(&bo->gem_base);
++ kfree(bo->metadata);
++ kfree(bo);
++}
++
++bool amdgpu_ttm_bo_is_amdgpu_bo(struct ttm_buffer_object *bo)
++{
++ if (bo->destroy == &amdgpu_ttm_bo_destroy)
++ return true;
++ return false;
++}
++
++void amdgpu_ttm_placement_from_domain(struct amdgpu_bo *rbo, u32 domain)
++{
++ u32 c = 0, i;
++ rbo->placement.placement = rbo->placements;
++ rbo->placement.busy_placement = rbo->placements;
++
++ if (domain & AMDGPU_GEM_DOMAIN_VRAM) {
++ if (rbo->flags & AMDGPU_GEM_CREATE_NO_CPU_ACCESS &&
++ rbo->adev->mc.visible_vram_size < rbo->adev->mc.real_vram_size) {
++ rbo->placements[c].fpfn =
++ rbo->adev->mc.visible_vram_size >> PAGE_SHIFT;
++ rbo->placements[c++].flags = TTM_PL_FLAG_WC | TTM_PL_FLAG_UNCACHED |
++ TTM_PL_FLAG_VRAM;
++ }
++ rbo->placements[c].fpfn = 0;
++ rbo->placements[c++].flags = TTM_PL_FLAG_WC | TTM_PL_FLAG_UNCACHED |
++ TTM_PL_FLAG_VRAM;
++ }
++
++ if (domain & AMDGPU_GEM_DOMAIN_GTT) {
++ if (rbo->flags & AMDGPU_GEM_CREATE_CPU_GTT_UC) {
++ rbo->placements[c].fpfn = 0;
++ rbo->placements[c++].flags = TTM_PL_FLAG_UNCACHED | TTM_PL_FLAG_TT;
++ } else if (rbo->flags & AMDGPU_GEM_CREATE_CPU_GTT_WC) {
++ rbo->placements[c].fpfn = 0;
++ rbo->placements[c++].flags = TTM_PL_FLAG_WC | TTM_PL_FLAG_TT |
++ TTM_PL_FLAG_UNCACHED;
++ } else {
++ rbo->placements[c].fpfn = 0;
++ rbo->placements[c++].flags = TTM_PL_FLAG_CACHED | TTM_PL_FLAG_TT;
++ }
++ }
++
++ if (domain & AMDGPU_GEM_DOMAIN_CPU) {
++ if (rbo->flags & AMDGPU_GEM_CREATE_CPU_GTT_UC) {
++ rbo->placements[c].fpfn = 0;
++ rbo->placements[c++].flags = TTM_PL_FLAG_UNCACHED | TTM_PL_FLAG_SYSTEM;
++ } else if (rbo->flags & AMDGPU_GEM_CREATE_CPU_GTT_WC) {
++ rbo->placements[c].fpfn = 0;
++ rbo->placements[c++].flags = TTM_PL_FLAG_WC | TTM_PL_FLAG_SYSTEM |
++ TTM_PL_FLAG_UNCACHED;
++ } else {
++ rbo->placements[c].fpfn = 0;
++ rbo->placements[c++].flags = TTM_PL_FLAG_CACHED | TTM_PL_FLAG_SYSTEM;
++ }
++ }
++
++ if (domain & AMDGPU_GEM_DOMAIN_GDS) {
++ rbo->placements[c++].flags = TTM_PL_FLAG_UNCACHED |
++ AMDGPU_PL_FLAG_GDS;
++ }
++ if (domain & AMDGPU_GEM_DOMAIN_GWS) {
++ rbo->placements[c++].flags = TTM_PL_FLAG_UNCACHED |
++ AMDGPU_PL_FLAG_GWS;
++ }
++ if (domain & AMDGPU_GEM_DOMAIN_OA) {
++ rbo->placements[c++].flags = TTM_PL_FLAG_UNCACHED |
++ AMDGPU_PL_FLAG_OA;
++ }
++
++ if (!c) {
++ rbo->placements[c].fpfn = 0;
++ rbo->placements[c++].flags = TTM_PL_MASK_CACHING |
++ TTM_PL_FLAG_SYSTEM;
++ }
++ rbo->placement.num_placement = c;
++ rbo->placement.num_busy_placement = c;
++
++ for (i = 0; i < c; i++) {
++ if ((rbo->flags & AMDGPU_GEM_CREATE_CPU_ACCESS_REQUIRED) &&
++ (rbo->placements[i].flags & TTM_PL_FLAG_VRAM) &&
++ !rbo->placements[i].fpfn)
++ rbo->placements[i].lpfn =
++ rbo->adev->mc.visible_vram_size >> PAGE_SHIFT;
++ else
++ rbo->placements[i].lpfn = 0;
++ }
++
++ if (rbo->tbo.mem.size > 512 * 1024) {
++ for (i = 0; i < c; i++) {
++ rbo->placements[i].flags |= TTM_PL_FLAG_TOPDOWN;
++ }
++ }
++}
++
++int amdgpu_bo_create(struct amdgpu_device *adev,
++ unsigned long size, int byte_align, bool kernel, u32 domain, u64 flags,
++ struct sg_table *sg, struct amdgpu_bo **bo_ptr)
++{
++ struct amdgpu_bo *bo;
++ enum ttm_bo_type type;
++ unsigned long page_align;
++ size_t acc_size;
++ int r;
++
++ /* VI has a hw bug where VM PTEs have to be allocated in groups of 8.
++ * do this as a temporary workaround
++ */
++ if (!(domain & (AMDGPU_GEM_DOMAIN_GDS | AMDGPU_GEM_DOMAIN_GWS | AMDGPU_GEM_DOMAIN_OA))) {
++ if (adev->asic_type >= CHIP_TOPAZ) {
++ if (byte_align & 0x7fff)
++ byte_align = ALIGN(byte_align, 0x8000);
++ if (size & 0x7fff)
++ size = ALIGN(size, 0x8000);
++ }
++ }
++
++ page_align = roundup(byte_align, PAGE_SIZE) >> PAGE_SHIFT;
++ size = ALIGN(size, PAGE_SIZE);
++
++ if (kernel) {
++ type = ttm_bo_type_kernel;
++ } else if (sg) {
++ type = ttm_bo_type_sg;
++ } else {
++ type = ttm_bo_type_device;
++ }
++ *bo_ptr = NULL;
++
++ acc_size = ttm_bo_dma_acc_size(&adev->mman.bdev, size,
++ sizeof(struct amdgpu_bo));
++
++ bo = kzalloc(sizeof(struct amdgpu_bo), GFP_KERNEL);
++ if (bo == NULL)
++ return -ENOMEM;
++ r = drm_gem_object_init(adev->ddev, &bo->gem_base, size);
++ if (unlikely(r)) {
++ kfree(bo);
++ return r;
++ }
++ bo->adev = adev;
++ INIT_LIST_HEAD(&bo->list);
++ INIT_LIST_HEAD(&bo->va);
++ bo->initial_domain = domain & (AMDGPU_GEM_DOMAIN_VRAM |
++ AMDGPU_GEM_DOMAIN_GTT |
++ AMDGPU_GEM_DOMAIN_CPU |
++ AMDGPU_GEM_DOMAIN_GDS |
++ AMDGPU_GEM_DOMAIN_GWS |
++ AMDGPU_GEM_DOMAIN_OA);
++
++ bo->flags = flags;
++ amdgpu_ttm_placement_from_domain(bo, domain);
++ /* Kernel allocation are uninterruptible */
++ down_read(&adev->pm.mclk_lock);
++ r = ttm_bo_init(&adev->mman.bdev, &bo->tbo, size, type,
++ &bo->placement, page_align, !kernel, NULL,
++ acc_size, sg, NULL, &amdgpu_ttm_bo_destroy);
++ up_read(&adev->pm.mclk_lock);
++ if (unlikely(r != 0)) {
++ return r;
++ }
++ *bo_ptr = bo;
++
++ trace_amdgpu_bo_create(bo);
++
++ return 0;
++}
++
++int amdgpu_bo_kmap(struct amdgpu_bo *bo, void **ptr)
++{
++ bool is_iomem;
++ int r;
++
++ if (bo->kptr) {
++ if (ptr) {
++ *ptr = bo->kptr;
++ }
++ return 0;
++ }
++ r = ttm_bo_kmap(&bo->tbo, 0, bo->tbo.num_pages, &bo->kmap);
++ if (r) {
++ return r;
++ }
++ bo->kptr = ttm_kmap_obj_virtual(&bo->kmap, &is_iomem);
++ if (ptr) {
++ *ptr = bo->kptr;
++ }
++ return 0;
++}
++
++void amdgpu_bo_kunmap(struct amdgpu_bo *bo)
++{
++ if (bo->kptr == NULL)
++ return;
++ bo->kptr = NULL;
++ ttm_bo_kunmap(&bo->kmap);
++}
++
++struct amdgpu_bo *amdgpu_bo_ref(struct amdgpu_bo *bo)
++{
++ if (bo == NULL)
++ return NULL;
++
++ ttm_bo_reference(&bo->tbo);
++ return bo;
++}
++
++void amdgpu_bo_unref(struct amdgpu_bo **bo)
++{
++ struct ttm_buffer_object *tbo;
++
++ if ((*bo) == NULL)
++ return;
++
++ tbo = &((*bo)->tbo);
++ ttm_bo_unref(&tbo);
++ if (tbo == NULL)
++ *bo = NULL;
++}
++
++int amdgpu_bo_pin_restricted(struct amdgpu_bo *bo, u32 domain, u64 max_offset,
++ u64 *gpu_addr)
++{
++ int r, i;
++
++ if (amdgpu_ttm_tt_has_userptr(bo->tbo.ttm))
++ return -EPERM;
++
++ if (bo->pin_count) {
++ bo->pin_count++;
++ if (gpu_addr)
++ *gpu_addr = amdgpu_bo_gpu_offset(bo);
++
++ if (max_offset != 0) {
++ u64 domain_start;
++
++ if (domain == AMDGPU_GEM_DOMAIN_VRAM)
++ domain_start = bo->adev->mc.vram_start;
++ else
++ domain_start = bo->adev->mc.gtt_start;
++ WARN_ON_ONCE(max_offset <
++ (amdgpu_bo_gpu_offset(bo) - domain_start));
++ }
++
++ return 0;
++ }
++ amdgpu_ttm_placement_from_domain(bo, domain);
++ for (i = 0; i < bo->placement.num_placement; i++) {
++ /* force to pin into visible video ram */
++ if ((bo->placements[i].flags & TTM_PL_FLAG_VRAM) &&
++ !(bo->flags & AMDGPU_GEM_CREATE_NO_CPU_ACCESS) &&
++ (!max_offset || max_offset > bo->adev->mc.visible_vram_size))
++ bo->placements[i].lpfn =
++ bo->adev->mc.visible_vram_size >> PAGE_SHIFT;
++ else
++ bo->placements[i].lpfn = max_offset >> PAGE_SHIFT;
++
++ bo->placements[i].flags |= TTM_PL_FLAG_NO_EVICT;
++ }
++
++ r = ttm_bo_validate(&bo->tbo, &bo->placement, false, false);
++ if (likely(r == 0)) {
++ bo->pin_count = 1;
++ if (gpu_addr != NULL)
++ *gpu_addr = amdgpu_bo_gpu_offset(bo);
++ if (domain == AMDGPU_GEM_DOMAIN_VRAM)
++ bo->adev->vram_pin_size += amdgpu_bo_size(bo);
++ else
++ bo->adev->gart_pin_size += amdgpu_bo_size(bo);
++ } else {
++ dev_err(bo->adev->dev, "%p pin failed\n", bo);
++ }
++ return r;
++}
++
++int amdgpu_bo_pin(struct amdgpu_bo *bo, u32 domain, u64 *gpu_addr)
++{
++ return amdgpu_bo_pin_restricted(bo, domain, 0, gpu_addr);
++}
++
++int amdgpu_bo_unpin(struct amdgpu_bo *bo)
++{
++ int r, i;
++
++ if (!bo->pin_count) {
++ dev_warn(bo->adev->dev, "%p unpin not necessary\n", bo);
++ return 0;
++ }
++ bo->pin_count--;
++ if (bo->pin_count)
++ return 0;
++ for (i = 0; i < bo->placement.num_placement; i++) {
++ bo->placements[i].lpfn = 0;
++ bo->placements[i].flags &= ~TTM_PL_FLAG_NO_EVICT;
++ }
++ r = ttm_bo_validate(&bo->tbo, &bo->placement, false, false);
++ if (likely(r == 0)) {
++ if (bo->tbo.mem.mem_type == TTM_PL_VRAM)
++ bo->adev->vram_pin_size -= amdgpu_bo_size(bo);
++ else
++ bo->adev->gart_pin_size -= amdgpu_bo_size(bo);
++ } else {
++ dev_err(bo->adev->dev, "%p validate failed for unpin\n", bo);
++ }
++ return r;
++}
++
++int amdgpu_bo_evict_vram(struct amdgpu_device *adev)
++{
++ /* late 2.6.33 fix IGP hibernate - we need pm ops to do this correct */
++ if (0 && (adev->flags & AMDGPU_IS_APU)) {
++ /* Useless to evict on IGP chips */
++ return 0;
++ }
++ return ttm_bo_evict_mm(&adev->mman.bdev, TTM_PL_VRAM);
++}
++
++void amdgpu_bo_force_delete(struct amdgpu_device *adev)
++{
++ struct amdgpu_bo *bo, *n;
++
++ if (list_empty(&adev->gem.objects)) {
++ return;
++ }
++ dev_err(adev->dev, "Userspace still has active objects !\n");
++ list_for_each_entry_safe(bo, n, &adev->gem.objects, list) {
++ mutex_lock(&adev->ddev->struct_mutex);
++ dev_err(adev->dev, "%p %p %lu %lu force free\n",
++ &bo->gem_base, bo, (unsigned long)bo->gem_base.size,
++ *((unsigned long *)&bo->gem_base.refcount));
++ mutex_lock(&bo->adev->gem.mutex);
++ list_del_init(&bo->list);
++ mutex_unlock(&bo->adev->gem.mutex);
++ /* this should unref the ttm bo */
++ drm_gem_object_unreference(&bo->gem_base);
++ mutex_unlock(&adev->ddev->struct_mutex);
++ }
++}
++
++int amdgpu_bo_init(struct amdgpu_device *adev)
++{
++ /* Add an MTRR for the VRAM */
++ adev->mc.vram_mtrr = arch_phys_wc_add(adev->mc.aper_base,
++ adev->mc.aper_size);
++ DRM_INFO("Detected VRAM RAM=%lluM, BAR=%lluM\n",
++ adev->mc.mc_vram_size >> 20,
++ (unsigned long long)adev->mc.aper_size >> 20);
++ DRM_INFO("RAM width %dbits DDR\n",
++ adev->mc.vram_width);
++ return amdgpu_ttm_init(adev);
++}
++
++void amdgpu_bo_fini(struct amdgpu_device *adev)
++{
++ amdgpu_ttm_fini(adev);
++ arch_phys_wc_del(adev->mc.vram_mtrr);
++}
++
++int amdgpu_bo_fbdev_mmap(struct amdgpu_bo *bo,
++ struct vm_area_struct *vma)
++{
++ return ttm_fbdev_mmap(vma, &bo->tbo);
++}
++
++int amdgpu_bo_set_tiling_flags(struct amdgpu_bo *bo, u64 tiling_flags)
++{
++ unsigned bankw, bankh, mtaspect, tilesplit, stilesplit;
++
++ bankw = (tiling_flags >> AMDGPU_TILING_EG_BANKW_SHIFT) & AMDGPU_TILING_EG_BANKW_MASK;
++ bankh = (tiling_flags >> AMDGPU_TILING_EG_BANKH_SHIFT) & AMDGPU_TILING_EG_BANKH_MASK;
++ mtaspect = (tiling_flags >> AMDGPU_TILING_EG_MACRO_TILE_ASPECT_SHIFT) & AMDGPU_TILING_EG_MACRO_TILE_ASPECT_MASK;
++ tilesplit = (tiling_flags >> AMDGPU_TILING_EG_TILE_SPLIT_SHIFT) & AMDGPU_TILING_EG_TILE_SPLIT_MASK;
++ stilesplit = (tiling_flags >> AMDGPU_TILING_EG_STENCIL_TILE_SPLIT_SHIFT) & AMDGPU_TILING_EG_STENCIL_TILE_SPLIT_MASK;
++ switch (bankw) {
++ case 0:
++ case 1:
++ case 2:
++ case 4:
++ case 8:
++ break;
++ default:
++ return -EINVAL;
++ }
++ switch (bankh) {
++ case 0:
++ case 1:
++ case 2:
++ case 4:
++ case 8:
++ break;
++ default:
++ return -EINVAL;
++ }
++ switch (mtaspect) {
++ case 0:
++ case 1:
++ case 2:
++ case 4:
++ case 8:
++ break;
++ default:
++ return -EINVAL;
++ }
++ if (tilesplit > 6) {
++ return -EINVAL;
++ }
++ if (stilesplit > 6) {
++ return -EINVAL;
++ }
++
++ bo->tiling_flags = tiling_flags;
++ return 0;
++}
++
++void amdgpu_bo_get_tiling_flags(struct amdgpu_bo *bo, u64 *tiling_flags)
++{
++ lockdep_assert_held(&bo->tbo.resv->lock.base);
++
++ if (tiling_flags)
++ *tiling_flags = bo->tiling_flags;
++}
++
++int amdgpu_bo_set_metadata (struct amdgpu_bo *bo, void *metadata,
++ uint32_t metadata_size, uint64_t flags)
++{
++ void *buffer;
++
++ if (!metadata_size) {
++ if (bo->metadata_size) {
++ kfree(bo->metadata);
++ bo->metadata_size = 0;
++ }
++ return 0;
++ }
++
++ if (metadata == NULL)
++ return -EINVAL;
++
++ buffer = kzalloc(metadata_size, GFP_KERNEL);
++ if (buffer == NULL)
++ return -ENOMEM;
++
++ memcpy(buffer, metadata, metadata_size);
++
++ kfree(bo->metadata);
++ bo->metadata_flags = flags;
++ bo->metadata = buffer;
++ bo->metadata_size = metadata_size;
++
++ return 0;
++}
++
++int amdgpu_bo_get_metadata(struct amdgpu_bo *bo, void *buffer,
++ size_t buffer_size, uint32_t *metadata_size,
++ uint64_t *flags)
++{
++ if (!buffer && !metadata_size)
++ return -EINVAL;
++
++ if (buffer) {
++ if (buffer_size < bo->metadata_size)
++ return -EINVAL;
++
++ if (bo->metadata_size)
++ memcpy(buffer, bo->metadata, bo->metadata_size);
++ }
++
++ if (metadata_size)
++ *metadata_size = bo->metadata_size;
++ if (flags)
++ *flags = bo->metadata_flags;
++
++ return 0;
++}
++
++void amdgpu_bo_move_notify(struct ttm_buffer_object *bo,
++ struct ttm_mem_reg *new_mem)
++{
++ struct amdgpu_bo *rbo;
++
++ if (!amdgpu_ttm_bo_is_amdgpu_bo(bo))
++ return;
++
++ rbo = container_of(bo, struct amdgpu_bo, tbo);
++ amdgpu_vm_bo_invalidate(rbo->adev, rbo);
++
++ /* update statistics */
++ if (!new_mem)
++ return;
++
++ /* move_notify is called before move happens */
++ amdgpu_update_memory_usage(rbo->adev, &bo->mem, new_mem);
++}
++
++int amdgpu_bo_fault_reserve_notify(struct ttm_buffer_object *bo)
++{
++ struct amdgpu_device *adev;
++ struct amdgpu_bo *rbo;
++ unsigned long offset, size;
++ int r;
++
++ if (!amdgpu_ttm_bo_is_amdgpu_bo(bo))
++ return 0;
++ rbo = container_of(bo, struct amdgpu_bo, tbo);
++ adev = rbo->adev;
++ if (bo->mem.mem_type == TTM_PL_VRAM) {
++ size = bo->mem.num_pages << PAGE_SHIFT;
++ offset = bo->mem.start << PAGE_SHIFT;
++ if ((offset + size) > adev->mc.visible_vram_size) {
++ /* hurrah the memory is not visible ! */
++ amdgpu_ttm_placement_from_domain(rbo, AMDGPU_GEM_DOMAIN_VRAM);
++ rbo->placements[0].lpfn = adev->mc.visible_vram_size >> PAGE_SHIFT;
++ r = ttm_bo_validate(bo, &rbo->placement, false, false);
++ if (unlikely(r != 0))
++ return r;
++ offset = bo->mem.start << PAGE_SHIFT;
++ /* this should not happen */
++ if ((offset + size) > adev->mc.visible_vram_size)
++ return -EINVAL;
++ }
++ }
++ return 0;
++}
++
++/**
++ * amdgpu_bo_fence - add fence to buffer object
++ *
++ * @bo: buffer object in question
++ * @fence: fence to add
++ * @shared: true if fence should be added shared
++ *
++ */
++void amdgpu_bo_fence(struct amdgpu_bo *bo, struct amdgpu_fence *fence,
++ bool shared)
++{
++ struct reservation_object *resv = bo->tbo.resv;
++
++ if (shared)
++ reservation_object_add_shared_fence(resv, &fence->base);
++ else
++ reservation_object_add_excl_fence(resv, &fence->base);
++}
+diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_object.h b/drivers/gpu/drm/amd/amdgpu/amdgpu_object.h
+new file mode 100644
+index 0000000..b1e0a03
+--- /dev/null
++++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_object.h
+@@ -0,0 +1,196 @@
++/*
++ * Copyright 2008 Advanced Micro Devices, Inc.
++ * Copyright 2008 Red Hat Inc.
++ * Copyright 2009 Jerome Glisse.
++ *
++ * Permission is hereby granted, free of charge, to any person obtaining a
++ * copy of this software and associated documentation files (the "Software"),
++ * to deal in the Software without restriction, including without limitation
++ * the rights to use, copy, modify, merge, publish, distribute, sublicense,
++ * and/or sell copies of the Software, and to permit persons to whom the
++ * Software is furnished to do so, subject to the following conditions:
++ *
++ * The above copyright notice and this permission notice shall be included in
++ * all copies or substantial portions of the Software.
++ *
++ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
++ * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
++ * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
++ * THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR
++ * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE,
++ * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
++ * OTHER DEALINGS IN THE SOFTWARE.
++ *
++ * Authors: Dave Airlie
++ * Alex Deucher
++ * Jerome Glisse
++ */
++#ifndef __AMDGPU_OBJECT_H__
++#define __AMDGPU_OBJECT_H__
++
++#include <drm/amdgpu_drm.h>
++#include "amdgpu.h"
++
++/**
++ * amdgpu_mem_type_to_domain - return domain corresponding to mem_type
++ * @mem_type: ttm memory type
++ *
++ * Returns corresponding domain of the ttm mem_type
++ */
++static inline unsigned amdgpu_mem_type_to_domain(u32 mem_type)
++{
++ switch (mem_type) {
++ case TTM_PL_VRAM:
++ return AMDGPU_GEM_DOMAIN_VRAM;
++ case TTM_PL_TT:
++ return AMDGPU_GEM_DOMAIN_GTT;
++ case TTM_PL_SYSTEM:
++ return AMDGPU_GEM_DOMAIN_CPU;
++ case AMDGPU_PL_GDS:
++ return AMDGPU_GEM_DOMAIN_GDS;
++ case AMDGPU_PL_GWS:
++ return AMDGPU_GEM_DOMAIN_GWS;
++ case AMDGPU_PL_OA:
++ return AMDGPU_GEM_DOMAIN_OA;
++ default:
++ break;
++ }
++ return 0;
++}
++
++/**
++ * amdgpu_bo_reserve - reserve bo
++ * @bo: bo structure
++ * @no_intr: don't return -ERESTARTSYS on pending signal
++ *
++ * Returns:
++ * -ERESTARTSYS: A wait for the buffer to become unreserved was interrupted by
++ * a signal. Release all buffer reservations and return to user-space.
++ */
++static inline int amdgpu_bo_reserve(struct amdgpu_bo *bo, bool no_intr)
++{
++ int r;
++
++ r = ttm_bo_reserve(&bo->tbo, !no_intr, false, false, 0);
++ if (unlikely(r != 0)) {
++ if (r != -ERESTARTSYS)
++ dev_err(bo->adev->dev, "%p reserve failed\n", bo);
++ return r;
++ }
++ return 0;
++}
++
++static inline void amdgpu_bo_unreserve(struct amdgpu_bo *bo)
++{
++ ttm_bo_unreserve(&bo->tbo);
++}
++
++/**
++ * amdgpu_bo_gpu_offset - return GPU offset of bo
++ * @bo: amdgpu object for which we query the offset
++ *
++ * Returns current GPU offset of the object.
++ *
++ * Note: object should either be pinned or reserved when calling this
++ * function, it might be useful to add check for this for debugging.
++ */
++static inline u64 amdgpu_bo_gpu_offset(struct amdgpu_bo *bo)
++{
++ return bo->tbo.offset;
++}
++
++static inline unsigned long amdgpu_bo_size(struct amdgpu_bo *bo)
++{
++ return bo->tbo.num_pages << PAGE_SHIFT;
++}
++
++static inline unsigned amdgpu_bo_ngpu_pages(struct amdgpu_bo *bo)
++{
++ return (bo->tbo.num_pages << PAGE_SHIFT) / AMDGPU_GPU_PAGE_SIZE;
++}
++
++static inline unsigned amdgpu_bo_gpu_page_alignment(struct amdgpu_bo *bo)
++{
++ return (bo->tbo.mem.page_alignment << PAGE_SHIFT) / AMDGPU_GPU_PAGE_SIZE;
++}
++
++/**
++ * amdgpu_bo_mmap_offset - return mmap offset of bo
++ * @bo: amdgpu object for which we query the offset
++ *
++ * Returns mmap offset of the object.
++ */
++static inline u64 amdgpu_bo_mmap_offset(struct amdgpu_bo *bo)
++{
++ return drm_vma_node_offset_addr(&bo->tbo.vma_node);
++}
++
++int amdgpu_bo_create(struct amdgpu_device *adev,
++ unsigned long size, int byte_align,
++ bool kernel, u32 domain, u64 flags,
++ struct sg_table *sg,
++ struct amdgpu_bo **bo_ptr);
++int amdgpu_bo_kmap(struct amdgpu_bo *bo, void **ptr);
++void amdgpu_bo_kunmap(struct amdgpu_bo *bo);
++struct amdgpu_bo *amdgpu_bo_ref(struct amdgpu_bo *bo);
++void amdgpu_bo_unref(struct amdgpu_bo **bo);
++int amdgpu_bo_pin(struct amdgpu_bo *bo, u32 domain, u64 *gpu_addr);
++int amdgpu_bo_pin_restricted(struct amdgpu_bo *bo, u32 domain,
++ u64 max_offset, u64 *gpu_addr);
++int amdgpu_bo_unpin(struct amdgpu_bo *bo);
++int amdgpu_bo_evict_vram(struct amdgpu_device *adev);
++void amdgpu_bo_force_delete(struct amdgpu_device *adev);
++int amdgpu_bo_init(struct amdgpu_device *adev);
++void amdgpu_bo_fini(struct amdgpu_device *adev);
++int amdgpu_bo_fbdev_mmap(struct amdgpu_bo *bo,
++ struct vm_area_struct *vma);
++int amdgpu_bo_set_tiling_flags(struct amdgpu_bo *bo, u64 tiling_flags);
++void amdgpu_bo_get_tiling_flags(struct amdgpu_bo *bo, u64 *tiling_flags);
++int amdgpu_bo_set_metadata (struct amdgpu_bo *bo, void *metadata,
++ uint32_t metadata_size, uint64_t flags);
++int amdgpu_bo_get_metadata(struct amdgpu_bo *bo, void *buffer,
++ size_t buffer_size, uint32_t *metadata_size,
++ uint64_t *flags);
++void amdgpu_bo_move_notify(struct ttm_buffer_object *bo,
++ struct ttm_mem_reg *new_mem);
++int amdgpu_bo_fault_reserve_notify(struct ttm_buffer_object *bo);
++void amdgpu_bo_fence(struct amdgpu_bo *bo, struct amdgpu_fence *fence,
++ bool shared);
++
++/*
++ * sub allocation
++ */
++
++static inline uint64_t amdgpu_sa_bo_gpu_addr(struct amdgpu_sa_bo *sa_bo)
++{
++ return sa_bo->manager->gpu_addr + sa_bo->soffset;
++}
++
++static inline void * amdgpu_sa_bo_cpu_addr(struct amdgpu_sa_bo *sa_bo)
++{
++ return sa_bo->manager->cpu_ptr + sa_bo->soffset;
++}
++
++int amdgpu_sa_bo_manager_init(struct amdgpu_device *adev,
++ struct amdgpu_sa_manager *sa_manager,
++ unsigned size, u32 align, u32 domain);
++void amdgpu_sa_bo_manager_fini(struct amdgpu_device *adev,
++ struct amdgpu_sa_manager *sa_manager);
++int amdgpu_sa_bo_manager_start(struct amdgpu_device *adev,
++ struct amdgpu_sa_manager *sa_manager);
++int amdgpu_sa_bo_manager_suspend(struct amdgpu_device *adev,
++ struct amdgpu_sa_manager *sa_manager);
++int amdgpu_sa_bo_new(struct amdgpu_device *adev,
++ struct amdgpu_sa_manager *sa_manager,
++ struct amdgpu_sa_bo **sa_bo,
++ unsigned size, unsigned align);
++void amdgpu_sa_bo_free(struct amdgpu_device *adev,
++ struct amdgpu_sa_bo **sa_bo,
++ struct amdgpu_fence *fence);
++#if defined(CONFIG_DEBUG_FS)
++void amdgpu_sa_bo_dump_debug_info(struct amdgpu_sa_manager *sa_manager,
++ struct seq_file *m);
++#endif
++
++
++#endif
+diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_pll.c b/drivers/gpu/drm/amd/amdgpu/amdgpu_pll.c
+new file mode 100644
+index 0000000..d153149
+--- /dev/null
++++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_pll.c
+@@ -0,0 +1,350 @@
++/*
++ * Copyright 2014 Advanced Micro Devices, Inc.
++ *
++ * Permission is hereby granted, free of charge, to any person obtaining a
++ * copy of this software and associated documentation files (the "Software"),
++ * to deal in the Software without restriction, including without limitation
++ * the rights to use, copy, modify, merge, publish, distribute, sublicense,
++ * and/or sell copies of the Software, and to permit persons to whom the
++ * Software is furnished to do so, subject to the following conditions:
++ *
++ * The above copyright notice and this permission notice shall be included in
++ * all copies or substantial portions of the Software.
++ *
++ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
++ * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
++ * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
++ * THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR
++ * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE,
++ * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
++ * OTHER DEALINGS IN THE SOFTWARE.
++ *
++ */
++#include <drm/drmP.h>
++#include <drm/amdgpu_drm.h>
++#include "amdgpu.h"
++#include "atom.h"
++#include "atombios_encoders.h"
++#include <asm/div64.h>
++#include <linux/gcd.h>
++
++/**
++ * amdgpu_pll_reduce_ratio - fractional number reduction
++ *
++ * @nom: nominator
++ * @den: denominator
++ * @nom_min: minimum value for nominator
++ * @den_min: minimum value for denominator
++ *
++ * Find the greatest common divisor and apply it on both nominator and
++ * denominator, but make nominator and denominator are at least as large
++ * as their minimum values.
++ */
++static void amdgpu_pll_reduce_ratio(unsigned *nom, unsigned *den,
++ unsigned nom_min, unsigned den_min)
++{
++ unsigned tmp;
++
++ /* reduce the numbers to a simpler ratio */
++ tmp = gcd(*nom, *den);
++ *nom /= tmp;
++ *den /= tmp;
++
++ /* make sure nominator is large enough */
++ if (*nom < nom_min) {
++ tmp = DIV_ROUND_UP(nom_min, *nom);
++ *nom *= tmp;
++ *den *= tmp;
++ }
++
++ /* make sure the denominator is large enough */
++ if (*den < den_min) {
++ tmp = DIV_ROUND_UP(den_min, *den);
++ *nom *= tmp;
++ *den *= tmp;
++ }
++}
++
++/**
++ * amdgpu_pll_get_fb_ref_div - feedback and ref divider calculation
++ *
++ * @nom: nominator
++ * @den: denominator
++ * @post_div: post divider
++ * @fb_div_max: feedback divider maximum
++ * @ref_div_max: reference divider maximum
++ * @fb_div: resulting feedback divider
++ * @ref_div: resulting reference divider
++ *
++ * Calculate feedback and reference divider for a given post divider. Makes
++ * sure we stay within the limits.
++ */
++static void amdgpu_pll_get_fb_ref_div(unsigned nom, unsigned den, unsigned post_div,
++ unsigned fb_div_max, unsigned ref_div_max,
++ unsigned *fb_div, unsigned *ref_div)
++{
++ /* limit reference * post divider to a maximum */
++ ref_div_max = min(128 / post_div, ref_div_max);
++
++ /* get matching reference and feedback divider */
++ *ref_div = min(max(DIV_ROUND_CLOSEST(den, post_div), 1u), ref_div_max);
++ *fb_div = DIV_ROUND_CLOSEST(nom * *ref_div * post_div, den);
++
++ /* limit fb divider to its maximum */
++ if (*fb_div > fb_div_max) {
++ *ref_div = DIV_ROUND_CLOSEST(*ref_div * fb_div_max, *fb_div);
++ *fb_div = fb_div_max;
++ }
++}
++
++/**
++ * amdgpu_pll_compute - compute PLL paramaters
++ *
++ * @pll: information about the PLL
++ * @dot_clock_p: resulting pixel clock
++ * fb_div_p: resulting feedback divider
++ * frac_fb_div_p: fractional part of the feedback divider
++ * ref_div_p: resulting reference divider
++ * post_div_p: resulting reference divider
++ *
++ * Try to calculate the PLL parameters to generate the given frequency:
++ * dot_clock = (ref_freq * feedback_div) / (ref_div * post_div)
++ */
++void amdgpu_pll_compute(struct amdgpu_pll *pll,
++ u32 freq,
++ u32 *dot_clock_p,
++ u32 *fb_div_p,
++ u32 *frac_fb_div_p,
++ u32 *ref_div_p,
++ u32 *post_div_p)
++{
++ unsigned target_clock = pll->flags & AMDGPU_PLL_USE_FRAC_FB_DIV ?
++ freq : freq / 10;
++
++ unsigned fb_div_min, fb_div_max, fb_div;
++ unsigned post_div_min, post_div_max, post_div;
++ unsigned ref_div_min, ref_div_max, ref_div;
++ unsigned post_div_best, diff_best;
++ unsigned nom, den;
++
++ /* determine allowed feedback divider range */
++ fb_div_min = pll->min_feedback_div;
++ fb_div_max = pll->max_feedback_div;
++
++ if (pll->flags & AMDGPU_PLL_USE_FRAC_FB_DIV) {
++ fb_div_min *= 10;
++ fb_div_max *= 10;
++ }
++
++ /* determine allowed ref divider range */
++ if (pll->flags & AMDGPU_PLL_USE_REF_DIV)
++ ref_div_min = pll->reference_div;
++ else
++ ref_div_min = pll->min_ref_div;
++
++ if (pll->flags & AMDGPU_PLL_USE_FRAC_FB_DIV &&
++ pll->flags & AMDGPU_PLL_USE_REF_DIV)
++ ref_div_max = pll->reference_div;
++ else
++ ref_div_max = pll->max_ref_div;
++
++ /* determine allowed post divider range */
++ if (pll->flags & AMDGPU_PLL_USE_POST_DIV) {
++ post_div_min = pll->post_div;
++ post_div_max = pll->post_div;
++ } else {
++ unsigned vco_min, vco_max;
++
++ if (pll->flags & AMDGPU_PLL_IS_LCD) {
++ vco_min = pll->lcd_pll_out_min;
++ vco_max = pll->lcd_pll_out_max;
++ } else {
++ vco_min = pll->pll_out_min;
++ vco_max = pll->pll_out_max;
++ }
++
++ if (pll->flags & AMDGPU_PLL_USE_FRAC_FB_DIV) {
++ vco_min *= 10;
++ vco_max *= 10;
++ }
++
++ post_div_min = vco_min / target_clock;
++ if ((target_clock * post_div_min) < vco_min)
++ ++post_div_min;
++ if (post_div_min < pll->min_post_div)
++ post_div_min = pll->min_post_div;
++
++ post_div_max = vco_max / target_clock;
++ if ((target_clock * post_div_max) > vco_max)
++ --post_div_max;
++ if (post_div_max > pll->max_post_div)
++ post_div_max = pll->max_post_div;
++ }
++
++ /* represent the searched ratio as fractional number */
++ nom = target_clock;
++ den = pll->reference_freq;
++
++ /* reduce the numbers to a simpler ratio */
++ amdgpu_pll_reduce_ratio(&nom, &den, fb_div_min, post_div_min);
++
++ /* now search for a post divider */
++ if (pll->flags & AMDGPU_PLL_PREFER_MINM_OVER_MAXP)
++ post_div_best = post_div_min;
++ else
++ post_div_best = post_div_max;
++ diff_best = ~0;
++
++ for (post_div = post_div_min; post_div <= post_div_max; ++post_div) {
++ unsigned diff;
++ amdgpu_pll_get_fb_ref_div(nom, den, post_div, fb_div_max,
++ ref_div_max, &fb_div, &ref_div);
++ diff = abs(target_clock - (pll->reference_freq * fb_div) /
++ (ref_div * post_div));
++
++ if (diff < diff_best || (diff == diff_best &&
++ !(pll->flags & AMDGPU_PLL_PREFER_MINM_OVER_MAXP))) {
++
++ post_div_best = post_div;
++ diff_best = diff;
++ }
++ }
++ post_div = post_div_best;
++
++ /* get the feedback and reference divider for the optimal value */
++ amdgpu_pll_get_fb_ref_div(nom, den, post_div, fb_div_max, ref_div_max,
++ &fb_div, &ref_div);
++
++ /* reduce the numbers to a simpler ratio once more */
++ /* this also makes sure that the reference divider is large enough */
++ amdgpu_pll_reduce_ratio(&fb_div, &ref_div, fb_div_min, ref_div_min);
++
++ /* avoid high jitter with small fractional dividers */
++ if (pll->flags & AMDGPU_PLL_USE_FRAC_FB_DIV && (fb_div % 10)) {
++ fb_div_min = max(fb_div_min, (9 - (fb_div % 10)) * 20 + 60);
++ if (fb_div < fb_div_min) {
++ unsigned tmp = DIV_ROUND_UP(fb_div_min, fb_div);
++ fb_div *= tmp;
++ ref_div *= tmp;
++ }
++ }
++
++ /* and finally save the result */
++ if (pll->flags & AMDGPU_PLL_USE_FRAC_FB_DIV) {
++ *fb_div_p = fb_div / 10;
++ *frac_fb_div_p = fb_div % 10;
++ } else {
++ *fb_div_p = fb_div;
++ *frac_fb_div_p = 0;
++ }
++
++ *dot_clock_p = ((pll->reference_freq * *fb_div_p * 10) +
++ (pll->reference_freq * *frac_fb_div_p)) /
++ (ref_div * post_div * 10);
++ *ref_div_p = ref_div;
++ *post_div_p = post_div;
++
++ DRM_DEBUG_KMS("%d - %d, pll dividers - fb: %d.%d ref: %d, post %d\n",
++ freq, *dot_clock_p * 10, *fb_div_p, *frac_fb_div_p,
++ ref_div, post_div);
++}
++
++/**
++ * amdgpu_pll_get_use_mask - look up a mask of which pplls are in use
++ *
++ * @crtc: drm crtc
++ *
++ * Returns the mask of which PPLLs (Pixel PLLs) are in use.
++ */
++u32 amdgpu_pll_get_use_mask(struct drm_crtc *crtc)
++{
++ struct drm_device *dev = crtc->dev;
++ struct drm_crtc *test_crtc;
++ struct amdgpu_crtc *test_amdgpu_crtc;
++ u32 pll_in_use = 0;
++
++ list_for_each_entry(test_crtc, &dev->mode_config.crtc_list, head) {
++ if (crtc == test_crtc)
++ continue;
++
++ test_amdgpu_crtc = to_amdgpu_crtc(test_crtc);
++ if (test_amdgpu_crtc->pll_id != ATOM_PPLL_INVALID)
++ pll_in_use |= (1 << test_amdgpu_crtc->pll_id);
++ }
++ return pll_in_use;
++}
++
++/**
++ * amdgpu_pll_get_shared_dp_ppll - return the PPLL used by another crtc for DP
++ *
++ * @crtc: drm crtc
++ *
++ * Returns the PPLL (Pixel PLL) used by another crtc/encoder which is
++ * also in DP mode. For DP, a single PPLL can be used for all DP
++ * crtcs/encoders.
++ */
++int amdgpu_pll_get_shared_dp_ppll(struct drm_crtc *crtc)
++{
++ struct drm_device *dev = crtc->dev;
++ struct drm_crtc *test_crtc;
++ struct amdgpu_crtc *test_amdgpu_crtc;
++
++ list_for_each_entry(test_crtc, &dev->mode_config.crtc_list, head) {
++ if (crtc == test_crtc)
++ continue;
++ test_amdgpu_crtc = to_amdgpu_crtc(test_crtc);
++ if (test_amdgpu_crtc->encoder &&
++ ENCODER_MODE_IS_DP(amdgpu_atombios_encoder_get_encoder_mode(test_amdgpu_crtc->encoder))) {
++ /* for DP use the same PLL for all */
++ if (test_amdgpu_crtc->pll_id != ATOM_PPLL_INVALID)
++ return test_amdgpu_crtc->pll_id;
++ }
++ }
++ return ATOM_PPLL_INVALID;
++}
++
++/**
++ * amdgpu_pll_get_shared_nondp_ppll - return the PPLL used by another non-DP crtc
++ *
++ * @crtc: drm crtc
++ * @encoder: drm encoder
++ *
++ * Returns the PPLL (Pixel PLL) used by another non-DP crtc/encoder which can
++ * be shared (i.e., same clock).
++ */
++int amdgpu_pll_get_shared_nondp_ppll(struct drm_crtc *crtc)
++{
++ struct amdgpu_crtc *amdgpu_crtc = to_amdgpu_crtc(crtc);
++ struct drm_device *dev = crtc->dev;
++ struct drm_crtc *test_crtc;
++ struct amdgpu_crtc *test_amdgpu_crtc;
++ u32 adjusted_clock, test_adjusted_clock;
++
++ adjusted_clock = amdgpu_crtc->adjusted_clock;
++
++ if (adjusted_clock == 0)
++ return ATOM_PPLL_INVALID;
++
++ list_for_each_entry(test_crtc, &dev->mode_config.crtc_list, head) {
++ if (crtc == test_crtc)
++ continue;
++ test_amdgpu_crtc = to_amdgpu_crtc(test_crtc);
++ if (test_amdgpu_crtc->encoder &&
++ !ENCODER_MODE_IS_DP(amdgpu_atombios_encoder_get_encoder_mode(test_amdgpu_crtc->encoder))) {
++ /* check if we are already driving this connector with another crtc */
++ if (test_amdgpu_crtc->connector == amdgpu_crtc->connector) {
++ /* if we are, return that pll */
++ if (test_amdgpu_crtc->pll_id != ATOM_PPLL_INVALID)
++ return test_amdgpu_crtc->pll_id;
++ }
++ /* for non-DP check the clock */
++ test_adjusted_clock = test_amdgpu_crtc->adjusted_clock;
++ if ((crtc->mode.clock == test_crtc->mode.clock) &&
++ (adjusted_clock == test_adjusted_clock) &&
++ (amdgpu_crtc->ss_enabled == test_amdgpu_crtc->ss_enabled) &&
++ (test_amdgpu_crtc->pll_id != ATOM_PPLL_INVALID))
++ return test_amdgpu_crtc->pll_id;
++ }
++ }
++ return ATOM_PPLL_INVALID;
++}
+diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_pll.h b/drivers/gpu/drm/amd/amdgpu/amdgpu_pll.h
+new file mode 100644
+index 0000000..db6136f
+--- /dev/null
++++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_pll.h
+@@ -0,0 +1,38 @@
++/*
++ * Copyright 2014 Advanced Micro Devices, Inc.
++ *
++ * Permission is hereby granted, free of charge, to any person obtaining a
++ * copy of this software and associated documentation files (the "Software"),
++ * to deal in the Software without restriction, including without limitation
++ * the rights to use, copy, modify, merge, publish, distribute, sublicense,
++ * and/or sell copies of the Software, and to permit persons to whom the
++ * Software is furnished to do so, subject to the following conditions:
++ *
++ * The above copyright notice and this permission notice shall be included in
++ * all copies or substantial portions of the Software.
++ *
++ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
++ * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
++ * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
++ * THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR
++ * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE,
++ * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
++ * OTHER DEALINGS IN THE SOFTWARE.
++ *
++ */
++
++#ifndef __AMDGPU_PLL_H__
++#define __AMDGPU_PLL_H__
++
++void amdgpu_pll_compute(struct amdgpu_pll *pll,
++ u32 freq,
++ u32 *dot_clock_p,
++ u32 *fb_div_p,
++ u32 *frac_fb_div_p,
++ u32 *ref_div_p,
++ u32 *post_div_p);
++u32 amdgpu_pll_get_use_mask(struct drm_crtc *crtc);
++int amdgpu_pll_get_shared_dp_ppll(struct drm_crtc *crtc);
++int amdgpu_pll_get_shared_nondp_ppll(struct drm_crtc *crtc);
++
++#endif
+diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_pm.c b/drivers/gpu/drm/amd/amdgpu/amdgpu_pm.c
+new file mode 100644
+index 0000000..8978254
+--- /dev/null
++++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_pm.c
+@@ -0,0 +1,801 @@
++/*
++ * Permission is hereby granted, free of charge, to any person obtaining a
++ * copy of this software and associated documentation files (the "Software"),
++ * to deal in the Software without restriction, including without limitation
++ * the rights to use, copy, modify, merge, publish, distribute, sublicense,
++ * and/or sell copies of the Software, and to permit persons to whom the
++ * Software is furnished to do so, subject to the following conditions:
++ *
++ * The above copyright notice and this permission notice shall be included in
++ * all copies or substantial portions of the Software.
++ *
++ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
++ * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
++ * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
++ * THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR
++ * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE,
++ * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
++ * OTHER DEALINGS IN THE SOFTWARE.
++ *
++ * Authors: Rafał Miłecki <zajec5@gmail.com>
++ * Alex Deucher <alexdeucher@gmail.com>
++ */
++#include <drm/drmP.h>
++#include "amdgpu.h"
++#include "amdgpu_drv.h"
++#include "amdgpu_pm.h"
++#include "amdgpu_dpm.h"
++#include "atom.h"
++#include <linux/power_supply.h>
++#include <linux/hwmon.h>
++#include <linux/hwmon-sysfs.h>
++
++static int amdgpu_debugfs_pm_init(struct amdgpu_device *adev);
++
++void amdgpu_pm_acpi_event_handler(struct amdgpu_device *adev)
++{
++ if (adev->pm.dpm_enabled) {
++ mutex_lock(&adev->pm.mutex);
++ if (power_supply_is_system_supplied() > 0)
++ adev->pm.dpm.ac_power = true;
++ else
++ adev->pm.dpm.ac_power = false;
++ if (adev->pm.funcs->enable_bapm)
++ amdgpu_dpm_enable_bapm(adev, adev->pm.dpm.ac_power);
++ mutex_unlock(&adev->pm.mutex);
++ }
++}
++
++static ssize_t amdgpu_get_dpm_state(struct device *dev,
++ struct device_attribute *attr,
++ char *buf)
++{
++ struct drm_device *ddev = dev_get_drvdata(dev);
++ struct amdgpu_device *adev = ddev->dev_private;
++ enum amdgpu_pm_state_type pm = adev->pm.dpm.user_state;
++
++ return snprintf(buf, PAGE_SIZE, "%s\n",
++ (pm == POWER_STATE_TYPE_BATTERY) ? "battery" :
++ (pm == POWER_STATE_TYPE_BALANCED) ? "balanced" : "performance");
++}
++
++static ssize_t amdgpu_set_dpm_state(struct device *dev,
++ struct device_attribute *attr,
++ const char *buf,
++ size_t count)
++{
++ struct drm_device *ddev = dev_get_drvdata(dev);
++ struct amdgpu_device *adev = ddev->dev_private;
++
++ mutex_lock(&adev->pm.mutex);
++ if (strncmp("battery", buf, strlen("battery")) == 0)
++ adev->pm.dpm.user_state = POWER_STATE_TYPE_BATTERY;
++ else if (strncmp("balanced", buf, strlen("balanced")) == 0)
++ adev->pm.dpm.user_state = POWER_STATE_TYPE_BALANCED;
++ else if (strncmp("performance", buf, strlen("performance")) == 0)
++ adev->pm.dpm.user_state = POWER_STATE_TYPE_PERFORMANCE;
++ else {
++ mutex_unlock(&adev->pm.mutex);
++ count = -EINVAL;
++ goto fail;
++ }
++ mutex_unlock(&adev->pm.mutex);
++
++ /* Can't set dpm state when the card is off */
++ if (!(adev->flags & AMDGPU_IS_PX) ||
++ (ddev->switch_power_state == DRM_SWITCH_POWER_ON))
++ amdgpu_pm_compute_clocks(adev);
++fail:
++ return count;
++}
++
++static ssize_t amdgpu_get_dpm_forced_performance_level(struct device *dev,
++ struct device_attribute *attr,
++ char *buf)
++{
++ struct drm_device *ddev = dev_get_drvdata(dev);
++ struct amdgpu_device *adev = ddev->dev_private;
++ enum amdgpu_dpm_forced_level level = adev->pm.dpm.forced_level;
++
++ return snprintf(buf, PAGE_SIZE, "%s\n",
++ (level == AMDGPU_DPM_FORCED_LEVEL_AUTO) ? "auto" :
++ (level == AMDGPU_DPM_FORCED_LEVEL_LOW) ? "low" : "high");
++}
++
++static ssize_t amdgpu_set_dpm_forced_performance_level(struct device *dev,
++ struct device_attribute *attr,
++ const char *buf,
++ size_t count)
++{
++ struct drm_device *ddev = dev_get_drvdata(dev);
++ struct amdgpu_device *adev = ddev->dev_private;
++ enum amdgpu_dpm_forced_level level;
++ int ret = 0;
++
++ mutex_lock(&adev->pm.mutex);
++ if (strncmp("low", buf, strlen("low")) == 0) {
++ level = AMDGPU_DPM_FORCED_LEVEL_LOW;
++ } else if (strncmp("high", buf, strlen("high")) == 0) {
++ level = AMDGPU_DPM_FORCED_LEVEL_HIGH;
++ } else if (strncmp("auto", buf, strlen("auto")) == 0) {
++ level = AMDGPU_DPM_FORCED_LEVEL_AUTO;
++ } else {
++ count = -EINVAL;
++ goto fail;
++ }
++ if (adev->pm.funcs->force_performance_level) {
++ if (adev->pm.dpm.thermal_active) {
++ count = -EINVAL;
++ goto fail;
++ }
++ ret = amdgpu_dpm_force_performance_level(adev, level);
++ if (ret)
++ count = -EINVAL;
++ }
++fail:
++ mutex_unlock(&adev->pm.mutex);
++
++ return count;
++}
++
++static DEVICE_ATTR(power_dpm_state, S_IRUGO | S_IWUSR, amdgpu_get_dpm_state, amdgpu_set_dpm_state);
++static DEVICE_ATTR(power_dpm_force_performance_level, S_IRUGO | S_IWUSR,
++ amdgpu_get_dpm_forced_performance_level,
++ amdgpu_set_dpm_forced_performance_level);
++
++static ssize_t amdgpu_hwmon_show_temp(struct device *dev,
++ struct device_attribute *attr,
++ char *buf)
++{
++ struct amdgpu_device *adev = dev_get_drvdata(dev);
++ int temp;
++
++ if (adev->pm.funcs->get_temperature)
++ temp = amdgpu_dpm_get_temperature(adev);
++ else
++ temp = 0;
++
++ return snprintf(buf, PAGE_SIZE, "%d\n", temp);
++}
++
++static ssize_t amdgpu_hwmon_show_temp_thresh(struct device *dev,
++ struct device_attribute *attr,
++ char *buf)
++{
++ struct amdgpu_device *adev = dev_get_drvdata(dev);
++ int hyst = to_sensor_dev_attr(attr)->index;
++ int temp;
++
++ if (hyst)
++ temp = adev->pm.dpm.thermal.min_temp;
++ else
++ temp = adev->pm.dpm.thermal.max_temp;
++
++ return snprintf(buf, PAGE_SIZE, "%d\n", temp);
++}
++
++static ssize_t amdgpu_hwmon_get_pwm1_enable(struct device *dev,
++ struct device_attribute *attr,
++ char *buf)
++{
++ struct amdgpu_device *adev = dev_get_drvdata(dev);
++ u32 pwm_mode = 0;
++
++ if (adev->pm.funcs->get_fan_control_mode)
++ pwm_mode = amdgpu_dpm_get_fan_control_mode(adev);
++
++ /* never 0 (full-speed), fuse or smc-controlled always */
++ return sprintf(buf, "%i\n", pwm_mode == FDO_PWM_MODE_STATIC ? 1 : 2);
++}
++
++static ssize_t amdgpu_hwmon_set_pwm1_enable(struct device *dev,
++ struct device_attribute *attr,
++ const char *buf,
++ size_t count)
++{
++ struct amdgpu_device *adev = dev_get_drvdata(dev);
++ int err;
++ int value;
++
++ if(!adev->pm.funcs->set_fan_control_mode)
++ return -EINVAL;
++
++ err = kstrtoint(buf, 10, &value);
++ if (err)
++ return err;
++
++ switch (value) {
++ case 1: /* manual, percent-based */
++ amdgpu_dpm_set_fan_control_mode(adev, FDO_PWM_MODE_STATIC);
++ break;
++ default: /* disable */
++ amdgpu_dpm_set_fan_control_mode(adev, 0);
++ break;
++ }
++
++ return count;
++}
++
++static ssize_t amdgpu_hwmon_get_pwm1_min(struct device *dev,
++ struct device_attribute *attr,
++ char *buf)
++{
++ return sprintf(buf, "%i\n", 0);
++}
++
++static ssize_t amdgpu_hwmon_get_pwm1_max(struct device *dev,
++ struct device_attribute *attr,
++ char *buf)
++{
++ return sprintf(buf, "%i\n", 255);
++}
++
++static ssize_t amdgpu_hwmon_set_pwm1(struct device *dev,
++ struct device_attribute *attr,
++ const char *buf, size_t count)
++{
++ struct amdgpu_device *adev = dev_get_drvdata(dev);
++ int err;
++ u32 value;
++
++ err = kstrtou32(buf, 10, &value);
++ if (err)
++ return err;
++
++ value = (value * 100) / 255;
++
++ err = amdgpu_dpm_set_fan_speed_percent(adev, value);
++ if (err)
++ return err;
++
++ return count;
++}
++
++static ssize_t amdgpu_hwmon_get_pwm1(struct device *dev,
++ struct device_attribute *attr,
++ char *buf)
++{
++ struct amdgpu_device *adev = dev_get_drvdata(dev);
++ int err;
++ u32 speed;
++
++ err = amdgpu_dpm_get_fan_speed_percent(adev, &speed);
++ if (err)
++ return err;
++
++ speed = (speed * 255) / 100;
++
++ return sprintf(buf, "%i\n", speed);
++}
++
++static SENSOR_DEVICE_ATTR(temp1_input, S_IRUGO, amdgpu_hwmon_show_temp, NULL, 0);
++static SENSOR_DEVICE_ATTR(temp1_crit, S_IRUGO, amdgpu_hwmon_show_temp_thresh, NULL, 0);
++static SENSOR_DEVICE_ATTR(temp1_crit_hyst, S_IRUGO, amdgpu_hwmon_show_temp_thresh, NULL, 1);
++static SENSOR_DEVICE_ATTR(pwm1, S_IRUGO | S_IWUSR, amdgpu_hwmon_get_pwm1, amdgpu_hwmon_set_pwm1, 0);
++static SENSOR_DEVICE_ATTR(pwm1_enable, S_IRUGO | S_IWUSR, amdgpu_hwmon_get_pwm1_enable, amdgpu_hwmon_set_pwm1_enable, 0);
++static SENSOR_DEVICE_ATTR(pwm1_min, S_IRUGO, amdgpu_hwmon_get_pwm1_min, NULL, 0);
++static SENSOR_DEVICE_ATTR(pwm1_max, S_IRUGO, amdgpu_hwmon_get_pwm1_max, NULL, 0);
++
++static struct attribute *hwmon_attributes[] = {
++ &sensor_dev_attr_temp1_input.dev_attr.attr,
++ &sensor_dev_attr_temp1_crit.dev_attr.attr,
++ &sensor_dev_attr_temp1_crit_hyst.dev_attr.attr,
++ &sensor_dev_attr_pwm1.dev_attr.attr,
++ &sensor_dev_attr_pwm1_enable.dev_attr.attr,
++ &sensor_dev_attr_pwm1_min.dev_attr.attr,
++ &sensor_dev_attr_pwm1_max.dev_attr.attr,
++ NULL
++};
++
++static umode_t hwmon_attributes_visible(struct kobject *kobj,
++ struct attribute *attr, int index)
++{
++ struct device *dev = container_of(kobj, struct device, kobj);
++ struct amdgpu_device *adev = dev_get_drvdata(dev);
++ umode_t effective_mode = attr->mode;
++
++ /* Skip limit attributes if DPM is not enabled */
++ if (!adev->pm.dpm_enabled &&
++ (attr == &sensor_dev_attr_temp1_crit.dev_attr.attr ||
++ attr == &sensor_dev_attr_temp1_crit_hyst.dev_attr.attr))
++ return 0;
++
++ /* Skip fan attributes if fan is not present */
++ if (adev->pm.no_fan &&
++ (attr == &sensor_dev_attr_pwm1.dev_attr.attr ||
++ attr == &sensor_dev_attr_pwm1_enable.dev_attr.attr ||
++ attr == &sensor_dev_attr_pwm1_max.dev_attr.attr ||
++ attr == &sensor_dev_attr_pwm1_min.dev_attr.attr))
++ return 0;
++
++ /* mask fan attributes if we have no bindings for this asic to expose */
++ if ((!adev->pm.funcs->get_fan_speed_percent &&
++ attr == &sensor_dev_attr_pwm1.dev_attr.attr) || /* can't query fan */
++ (!adev->pm.funcs->get_fan_control_mode &&
++ attr == &sensor_dev_attr_pwm1_enable.dev_attr.attr)) /* can't query state */
++ effective_mode &= ~S_IRUGO;
++
++ if ((!adev->pm.funcs->set_fan_speed_percent &&
++ attr == &sensor_dev_attr_pwm1.dev_attr.attr) || /* can't manage fan */
++ (!adev->pm.funcs->set_fan_control_mode &&
++ attr == &sensor_dev_attr_pwm1_enable.dev_attr.attr)) /* can't manage state */
++ effective_mode &= ~S_IWUSR;
++
++ /* hide max/min values if we can't both query and manage the fan */
++ if ((!adev->pm.funcs->set_fan_speed_percent &&
++ !adev->pm.funcs->get_fan_speed_percent) &&
++ (attr == &sensor_dev_attr_pwm1_max.dev_attr.attr ||
++ attr == &sensor_dev_attr_pwm1_min.dev_attr.attr))
++ return 0;
++
++ return effective_mode;
++}
++
++static const struct attribute_group hwmon_attrgroup = {
++ .attrs = hwmon_attributes,
++ .is_visible = hwmon_attributes_visible,
++};
++
++static const struct attribute_group *hwmon_groups[] = {
++ &hwmon_attrgroup,
++ NULL
++};
++
++void amdgpu_dpm_thermal_work_handler(struct work_struct *work)
++{
++ struct amdgpu_device *adev =
++ container_of(work, struct amdgpu_device,
++ pm.dpm.thermal.work);
++ /* switch to the thermal state */
++ enum amdgpu_pm_state_type dpm_state = POWER_STATE_TYPE_INTERNAL_THERMAL;
++
++ if (!adev->pm.dpm_enabled)
++ return;
++
++ if (adev->pm.funcs->get_temperature) {
++ int temp = amdgpu_dpm_get_temperature(adev);
++
++ if (temp < adev->pm.dpm.thermal.min_temp)
++ /* switch back the user state */
++ dpm_state = adev->pm.dpm.user_state;
++ } else {
++ if (adev->pm.dpm.thermal.high_to_low)
++ /* switch back the user state */
++ dpm_state = adev->pm.dpm.user_state;
++ }
++ mutex_lock(&adev->pm.mutex);
++ if (dpm_state == POWER_STATE_TYPE_INTERNAL_THERMAL)
++ adev->pm.dpm.thermal_active = true;
++ else
++ adev->pm.dpm.thermal_active = false;
++ adev->pm.dpm.state = dpm_state;
++ mutex_unlock(&adev->pm.mutex);
++
++ amdgpu_pm_compute_clocks(adev);
++}
++
++static struct amdgpu_ps *amdgpu_dpm_pick_power_state(struct amdgpu_device *adev,
++ enum amdgpu_pm_state_type dpm_state)
++{
++ int i;
++ struct amdgpu_ps *ps;
++ u32 ui_class;
++ bool single_display = (adev->pm.dpm.new_active_crtc_count < 2) ?
++ true : false;
++
++ /* check if the vblank period is too short to adjust the mclk */
++ if (single_display && adev->pm.funcs->vblank_too_short) {
++ if (amdgpu_dpm_vblank_too_short(adev))
++ single_display = false;
++ }
++
++ /* certain older asics have a separare 3D performance state,
++ * so try that first if the user selected performance
++ */
++ if (dpm_state == POWER_STATE_TYPE_PERFORMANCE)
++ dpm_state = POWER_STATE_TYPE_INTERNAL_3DPERF;
++ /* balanced states don't exist at the moment */
++ if (dpm_state == POWER_STATE_TYPE_BALANCED)
++ dpm_state = POWER_STATE_TYPE_PERFORMANCE;
++
++restart_search:
++ /* Pick the best power state based on current conditions */
++ for (i = 0; i < adev->pm.dpm.num_ps; i++) {
++ ps = &adev->pm.dpm.ps[i];
++ ui_class = ps->class & ATOM_PPLIB_CLASSIFICATION_UI_MASK;
++ switch (dpm_state) {
++ /* user states */
++ case POWER_STATE_TYPE_BATTERY:
++ if (ui_class == ATOM_PPLIB_CLASSIFICATION_UI_BATTERY) {
++ if (ps->caps & ATOM_PPLIB_SINGLE_DISPLAY_ONLY) {
++ if (single_display)
++ return ps;
++ } else
++ return ps;
++ }
++ break;
++ case POWER_STATE_TYPE_BALANCED:
++ if (ui_class == ATOM_PPLIB_CLASSIFICATION_UI_BALANCED) {
++ if (ps->caps & ATOM_PPLIB_SINGLE_DISPLAY_ONLY) {
++ if (single_display)
++ return ps;
++ } else
++ return ps;
++ }
++ break;
++ case POWER_STATE_TYPE_PERFORMANCE:
++ if (ui_class == ATOM_PPLIB_CLASSIFICATION_UI_PERFORMANCE) {
++ if (ps->caps & ATOM_PPLIB_SINGLE_DISPLAY_ONLY) {
++ if (single_display)
++ return ps;
++ } else
++ return ps;
++ }
++ break;
++ /* internal states */
++ case POWER_STATE_TYPE_INTERNAL_UVD:
++ if (adev->pm.dpm.uvd_ps)
++ return adev->pm.dpm.uvd_ps;
++ else
++ break;
++ case POWER_STATE_TYPE_INTERNAL_UVD_SD:
++ if (ps->class & ATOM_PPLIB_CLASSIFICATION_SDSTATE)
++ return ps;
++ break;
++ case POWER_STATE_TYPE_INTERNAL_UVD_HD:
++ if (ps->class & ATOM_PPLIB_CLASSIFICATION_HDSTATE)
++ return ps;
++ break;
++ case POWER_STATE_TYPE_INTERNAL_UVD_HD2:
++ if (ps->class & ATOM_PPLIB_CLASSIFICATION_HD2STATE)
++ return ps;
++ break;
++ case POWER_STATE_TYPE_INTERNAL_UVD_MVC:
++ if (ps->class2 & ATOM_PPLIB_CLASSIFICATION2_MVC)
++ return ps;
++ break;
++ case POWER_STATE_TYPE_INTERNAL_BOOT:
++ return adev->pm.dpm.boot_ps;
++ case POWER_STATE_TYPE_INTERNAL_THERMAL:
++ if (ps->class & ATOM_PPLIB_CLASSIFICATION_THERMAL)
++ return ps;
++ break;
++ case POWER_STATE_TYPE_INTERNAL_ACPI:
++ if (ps->class & ATOM_PPLIB_CLASSIFICATION_ACPI)
++ return ps;
++ break;
++ case POWER_STATE_TYPE_INTERNAL_ULV:
++ if (ps->class2 & ATOM_PPLIB_CLASSIFICATION2_ULV)
++ return ps;
++ break;
++ case POWER_STATE_TYPE_INTERNAL_3DPERF:
++ if (ps->class & ATOM_PPLIB_CLASSIFICATION_3DPERFORMANCE)
++ return ps;
++ break;
++ default:
++ break;
++ }
++ }
++ /* use a fallback state if we didn't match */
++ switch (dpm_state) {
++ case POWER_STATE_TYPE_INTERNAL_UVD_SD:
++ dpm_state = POWER_STATE_TYPE_INTERNAL_UVD_HD;
++ goto restart_search;
++ case POWER_STATE_TYPE_INTERNAL_UVD_HD:
++ case POWER_STATE_TYPE_INTERNAL_UVD_HD2:
++ case POWER_STATE_TYPE_INTERNAL_UVD_MVC:
++ if (adev->pm.dpm.uvd_ps) {
++ return adev->pm.dpm.uvd_ps;
++ } else {
++ dpm_state = POWER_STATE_TYPE_PERFORMANCE;
++ goto restart_search;
++ }
++ case POWER_STATE_TYPE_INTERNAL_THERMAL:
++ dpm_state = POWER_STATE_TYPE_INTERNAL_ACPI;
++ goto restart_search;
++ case POWER_STATE_TYPE_INTERNAL_ACPI:
++ dpm_state = POWER_STATE_TYPE_BATTERY;
++ goto restart_search;
++ case POWER_STATE_TYPE_BATTERY:
++ case POWER_STATE_TYPE_BALANCED:
++ case POWER_STATE_TYPE_INTERNAL_3DPERF:
++ dpm_state = POWER_STATE_TYPE_PERFORMANCE;
++ goto restart_search;
++ default:
++ break;
++ }
++
++ return NULL;
++}
++
++static void amdgpu_dpm_change_power_state_locked(struct amdgpu_device *adev)
++{
++ int i;
++ struct amdgpu_ps *ps;
++ enum amdgpu_pm_state_type dpm_state;
++ int ret;
++
++ /* if dpm init failed */
++ if (!adev->pm.dpm_enabled)
++ return;
++
++ if (adev->pm.dpm.user_state != adev->pm.dpm.state) {
++ /* add other state override checks here */
++ if ((!adev->pm.dpm.thermal_active) &&
++ (!adev->pm.dpm.uvd_active))
++ adev->pm.dpm.state = adev->pm.dpm.user_state;
++ }
++ dpm_state = adev->pm.dpm.state;
++
++ ps = amdgpu_dpm_pick_power_state(adev, dpm_state);
++ if (ps)
++ adev->pm.dpm.requested_ps = ps;
++ else
++ return;
++
++ /* no need to reprogram if nothing changed unless we are on BTC+ */
++ if (adev->pm.dpm.current_ps == adev->pm.dpm.requested_ps) {
++ /* vce just modifies an existing state so force a change */
++ if (ps->vce_active != adev->pm.dpm.vce_active)
++ goto force;
++ if (adev->flags & AMDGPU_IS_APU) {
++ /* for APUs if the num crtcs changed but state is the same,
++ * all we need to do is update the display configuration.
++ */
++ if (adev->pm.dpm.new_active_crtcs != adev->pm.dpm.current_active_crtcs) {
++ /* update display watermarks based on new power state */
++ amdgpu_display_bandwidth_update(adev);
++ /* update displays */
++ amdgpu_dpm_display_configuration_changed(adev);
++ adev->pm.dpm.current_active_crtcs = adev->pm.dpm.new_active_crtcs;
++ adev->pm.dpm.current_active_crtc_count = adev->pm.dpm.new_active_crtc_count;
++ }
++ return;
++ } else {
++ /* for BTC+ if the num crtcs hasn't changed and state is the same,
++ * nothing to do, if the num crtcs is > 1 and state is the same,
++ * update display configuration.
++ */
++ if (adev->pm.dpm.new_active_crtcs ==
++ adev->pm.dpm.current_active_crtcs) {
++ return;
++ } else if ((adev->pm.dpm.current_active_crtc_count > 1) &&
++ (adev->pm.dpm.new_active_crtc_count > 1)) {
++ /* update display watermarks based on new power state */
++ amdgpu_display_bandwidth_update(adev);
++ /* update displays */
++ amdgpu_dpm_display_configuration_changed(adev);
++ adev->pm.dpm.current_active_crtcs = adev->pm.dpm.new_active_crtcs;
++ adev->pm.dpm.current_active_crtc_count = adev->pm.dpm.new_active_crtc_count;
++ return;
++ }
++ }
++ }
++
++force:
++ if (amdgpu_dpm == 1) {
++ printk("switching from power state:\n");
++ amdgpu_dpm_print_power_state(adev, adev->pm.dpm.current_ps);
++ printk("switching to power state:\n");
++ amdgpu_dpm_print_power_state(adev, adev->pm.dpm.requested_ps);
++ }
++
++ mutex_lock(&adev->ddev->struct_mutex);
++ down_write(&adev->pm.mclk_lock);
++ mutex_lock(&adev->ring_lock);
++
++ /* update whether vce is active */
++ ps->vce_active = adev->pm.dpm.vce_active;
++
++ ret = amdgpu_dpm_pre_set_power_state(adev);
++ if (ret)
++ goto done;
++
++ /* update display watermarks based on new power state */
++ amdgpu_display_bandwidth_update(adev);
++ /* update displays */
++ amdgpu_dpm_display_configuration_changed(adev);
++
++ adev->pm.dpm.current_active_crtcs = adev->pm.dpm.new_active_crtcs;
++ adev->pm.dpm.current_active_crtc_count = adev->pm.dpm.new_active_crtc_count;
++
++ /* wait for the rings to drain */
++ for (i = 0; i < AMDGPU_MAX_RINGS; i++) {
++ struct amdgpu_ring *ring = adev->rings[i];
++ if (ring && ring->ready)
++ amdgpu_fence_wait_empty(ring);
++ }
++
++ /* program the new power state */
++ amdgpu_dpm_set_power_state(adev);
++
++ /* update current power state */
++ adev->pm.dpm.current_ps = adev->pm.dpm.requested_ps;
++
++ amdgpu_dpm_post_set_power_state(adev);
++
++ if (adev->pm.funcs->force_performance_level) {
++ if (adev->pm.dpm.thermal_active) {
++ enum amdgpu_dpm_forced_level level = adev->pm.dpm.forced_level;
++ /* force low perf level for thermal */
++ amdgpu_dpm_force_performance_level(adev, AMDGPU_DPM_FORCED_LEVEL_LOW);
++ /* save the user's level */
++ adev->pm.dpm.forced_level = level;
++ } else {
++ /* otherwise, user selected level */
++ amdgpu_dpm_force_performance_level(adev, adev->pm.dpm.forced_level);
++ }
++ }
++
++done:
++ mutex_unlock(&adev->ring_lock);
++ up_write(&adev->pm.mclk_lock);
++ mutex_unlock(&adev->ddev->struct_mutex);
++}
++
++void amdgpu_dpm_enable_uvd(struct amdgpu_device *adev, bool enable)
++{
++ if (adev->pm.funcs->powergate_uvd) {
++ mutex_lock(&adev->pm.mutex);
++ /* enable/disable UVD */
++ amdgpu_dpm_powergate_uvd(adev, !enable);
++ mutex_unlock(&adev->pm.mutex);
++ } else {
++ if (enable) {
++ mutex_lock(&adev->pm.mutex);
++ adev->pm.dpm.uvd_active = true;
++ adev->pm.dpm.state = POWER_STATE_TYPE_INTERNAL_UVD;
++ mutex_unlock(&adev->pm.mutex);
++ } else {
++ mutex_lock(&adev->pm.mutex);
++ adev->pm.dpm.uvd_active = false;
++ mutex_unlock(&adev->pm.mutex);
++ }
++
++ amdgpu_pm_compute_clocks(adev);
++ }
++}
++
++void amdgpu_dpm_enable_vce(struct amdgpu_device *adev, bool enable)
++{
++ if (enable) {
++ mutex_lock(&adev->pm.mutex);
++ adev->pm.dpm.vce_active = true;
++ /* XXX select vce level based on ring/task */
++ adev->pm.dpm.vce_level = AMDGPU_VCE_LEVEL_AC_ALL;
++ mutex_unlock(&adev->pm.mutex);
++ } else {
++ mutex_lock(&adev->pm.mutex);
++ adev->pm.dpm.vce_active = false;
++ mutex_unlock(&adev->pm.mutex);
++ }
++
++ amdgpu_pm_compute_clocks(adev);
++}
++
++void amdgpu_pm_print_power_states(struct amdgpu_device *adev)
++{
++ int i;
++
++ for (i = 0; i < adev->pm.dpm.num_ps; i++) {
++ printk("== power state %d ==\n", i);
++ amdgpu_dpm_print_power_state(adev, &adev->pm.dpm.ps[i]);
++ }
++}
++
++int amdgpu_pm_sysfs_init(struct amdgpu_device *adev)
++{
++ int ret;
++
++ if (adev->pm.funcs->get_temperature == NULL)
++ return 0;
++ adev->pm.int_hwmon_dev = hwmon_device_register_with_groups(adev->dev,
++ DRIVER_NAME, adev,
++ hwmon_groups);
++ if (IS_ERR(adev->pm.int_hwmon_dev)) {
++ ret = PTR_ERR(adev->pm.int_hwmon_dev);
++ dev_err(adev->dev,
++ "Unable to register hwmon device: %d\n", ret);
++ return ret;
++ }
++
++ ret = device_create_file(adev->dev, &dev_attr_power_dpm_state);
++ if (ret) {
++ DRM_ERROR("failed to create device file for dpm state\n");
++ return ret;
++ }
++ ret = device_create_file(adev->dev, &dev_attr_power_dpm_force_performance_level);
++ if (ret) {
++ DRM_ERROR("failed to create device file for dpm state\n");
++ return ret;
++ }
++ ret = amdgpu_debugfs_pm_init(adev);
++ if (ret) {
++ DRM_ERROR("Failed to register debugfs file for dpm!\n");
++ return ret;
++ }
++
++ return 0;
++}
++
++void amdgpu_pm_sysfs_fini(struct amdgpu_device *adev)
++{
++ if (adev->pm.int_hwmon_dev)
++ hwmon_device_unregister(adev->pm.int_hwmon_dev);
++ device_remove_file(adev->dev, &dev_attr_power_dpm_state);
++ device_remove_file(adev->dev, &dev_attr_power_dpm_force_performance_level);
++}
++
++void amdgpu_pm_compute_clocks(struct amdgpu_device *adev)
++{
++ struct drm_device *ddev = adev->ddev;
++ struct drm_crtc *crtc;
++ struct amdgpu_crtc *amdgpu_crtc;
++
++ if (!adev->pm.dpm_enabled)
++ return;
++
++ mutex_lock(&adev->pm.mutex);
++
++ /* update active crtc counts */
++ adev->pm.dpm.new_active_crtcs = 0;
++ adev->pm.dpm.new_active_crtc_count = 0;
++ if (adev->mode_info.num_crtc && adev->mode_info.mode_config_initialized) {
++ list_for_each_entry(crtc,
++ &ddev->mode_config.crtc_list, head) {
++ amdgpu_crtc = to_amdgpu_crtc(crtc);
++ if (crtc->enabled) {
++ adev->pm.dpm.new_active_crtcs |= (1 << amdgpu_crtc->crtc_id);
++ adev->pm.dpm.new_active_crtc_count++;
++ }
++ }
++ }
++
++ /* update battery/ac status */
++ if (power_supply_is_system_supplied() > 0)
++ adev->pm.dpm.ac_power = true;
++ else
++ adev->pm.dpm.ac_power = false;
++
++ amdgpu_dpm_change_power_state_locked(adev);
++
++ mutex_unlock(&adev->pm.mutex);
++
++}
++
++/*
++ * Debugfs info
++ */
++#if defined(CONFIG_DEBUG_FS)
++
++static int amdgpu_debugfs_pm_info(struct seq_file *m, void *data)
++{
++ struct drm_info_node *node = (struct drm_info_node *) m->private;
++ struct drm_device *dev = node->minor->dev;
++ struct amdgpu_device *adev = dev->dev_private;
++
++ if (adev->pm.dpm_enabled) {
++ mutex_lock(&adev->pm.mutex);
++ if (adev->pm.funcs->debugfs_print_current_performance_level)
++ amdgpu_dpm_debugfs_print_current_performance_level(adev, m);
++ else
++ seq_printf(m, "Debugfs support not implemented for this asic\n");
++ mutex_unlock(&adev->pm.mutex);
++ }
++
++ return 0;
++}
++
++static struct drm_info_list amdgpu_pm_info_list[] = {
++ {"amdgpu_pm_info", amdgpu_debugfs_pm_info, 0, NULL},
++};
++#endif
++
++static int amdgpu_debugfs_pm_init(struct amdgpu_device *adev)
++{
++#if defined(CONFIG_DEBUG_FS)
++ return amdgpu_debugfs_add_files(adev, amdgpu_pm_info_list, ARRAY_SIZE(amdgpu_pm_info_list));
++#else
++ return 0;
++#endif
++}
+diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_pm.h b/drivers/gpu/drm/amd/amdgpu/amdgpu_pm.h
+new file mode 100644
+index 0000000..5fd7734
+--- /dev/null
++++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_pm.h
+@@ -0,0 +1,35 @@
++/*
++ * Copyright 2014 Advanced Micro Devices, Inc.
++ *
++ * Permission is hereby granted, free of charge, to any person obtaining a
++ * copy of this software and associated documentation files (the "Software"),
++ * to deal in the Software without restriction, including without limitation
++ * the rights to use, copy, modify, merge, publish, distribute, sublicense,
++ * and/or sell copies of the Software, and to permit persons to whom the
++ * Software is furnished to do so, subject to the following conditions:
++ *
++ * The above copyright notice and this permission notice shall be included in
++ * all copies or substantial portions of the Software.
++ *
++ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
++ * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
++ * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
++ * THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR
++ * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE,
++ * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
++ * OTHER DEALINGS IN THE SOFTWARE.
++ *
++ */
++
++#ifndef __AMDGPU_PM_H__
++#define __AMDGPU_PM_H__
++
++int amdgpu_pm_sysfs_init(struct amdgpu_device *adev);
++void amdgpu_pm_sysfs_fini(struct amdgpu_device *adev);
++void amdgpu_pm_print_power_states(struct amdgpu_device *adev);
++void amdgpu_pm_compute_clocks(struct amdgpu_device *adev);
++void amdgpu_dpm_thermal_work_handler(struct work_struct *work);
++void amdgpu_dpm_enable_uvd(struct amdgpu_device *adev, bool enable);
++void amdgpu_dpm_enable_vce(struct amdgpu_device *adev, bool enable);
++
++#endif
+diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_prime.c b/drivers/gpu/drm/amd/amdgpu/amdgpu_prime.c
+new file mode 100644
+index 0000000..d9652fe
+--- /dev/null
++++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_prime.c
+@@ -0,0 +1,125 @@
++/*
++ * Copyright 2012 Advanced Micro Devices, Inc.
++ *
++ * Permission is hereby granted, free of charge, to any person obtaining a
++ * copy of this software and associated documentation files (the "Software"),
++ * to deal in the Software without restriction, including without limitation
++ * the rights to use, copy, modify, merge, publish, distribute, sublicense,
++ * and/or sell copies of the Software, and to permit persons to whom the
++ * Software is furnished to do so, subject to the following conditions:
++ *
++ * The above copyright notice and this permission notice shall be included in
++ * all copies or substantial portions of the Software.
++ *
++ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
++ * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
++ * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
++ * THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR
++ * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE,
++ * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
++ * OTHER DEALINGS IN THE SOFTWARE.
++ *
++ * based on nouveau_prime.c
++ *
++ * Authors: Alex Deucher
++ */
++#include <drm/drmP.h>
++
++#include "amdgpu.h"
++#include <drm/amdgpu_drm.h>
++#include <linux/dma-buf.h>
++
++struct sg_table *amdgpu_gem_prime_get_sg_table(struct drm_gem_object *obj)
++{
++ struct amdgpu_bo *bo = gem_to_amdgpu_bo(obj);
++ int npages = bo->tbo.num_pages;
++
++ return drm_prime_pages_to_sg(bo->tbo.ttm->pages, npages);
++}
++
++void *amdgpu_gem_prime_vmap(struct drm_gem_object *obj)
++{
++ struct amdgpu_bo *bo = gem_to_amdgpu_bo(obj);
++ int ret;
++
++ ret = ttm_bo_kmap(&bo->tbo, 0, bo->tbo.num_pages,
++ &bo->dma_buf_vmap);
++ if (ret)
++ return ERR_PTR(ret);
++
++ return bo->dma_buf_vmap.virtual;
++}
++
++void amdgpu_gem_prime_vunmap(struct drm_gem_object *obj, void *vaddr)
++{
++ struct amdgpu_bo *bo = gem_to_amdgpu_bo(obj);
++
++ ttm_bo_kunmap(&bo->dma_buf_vmap);
++}
++
++struct drm_gem_object *amdgpu_gem_prime_import_sg_table(struct drm_device *dev,
++ struct dma_buf_attachment *attach,
++ struct sg_table *sg)
++{
++ struct amdgpu_device *adev = dev->dev_private;
++ struct amdgpu_bo *bo;
++ int ret;
++
++ ret = amdgpu_bo_create(adev, attach->dmabuf->size, PAGE_SIZE, false,
++ AMDGPU_GEM_DOMAIN_GTT, 0, sg, &bo);
++ if (ret)
++ return ERR_PTR(ret);
++
++ mutex_lock(&adev->gem.mutex);
++ list_add_tail(&bo->list, &adev->gem.objects);
++ mutex_unlock(&adev->gem.mutex);
++
++ return &bo->gem_base;
++}
++
++int amdgpu_gem_prime_pin(struct drm_gem_object *obj)
++{
++ struct amdgpu_bo *bo = gem_to_amdgpu_bo(obj);
++ int ret = 0;
++
++ ret = amdgpu_bo_reserve(bo, false);
++ if (unlikely(ret != 0))
++ return ret;
++
++ /* pin buffer into GTT */
++ ret = amdgpu_bo_pin(bo, AMDGPU_GEM_DOMAIN_GTT, NULL);
++ amdgpu_bo_unreserve(bo);
++ return ret;
++}
++
++void amdgpu_gem_prime_unpin(struct drm_gem_object *obj)
++{
++ struct amdgpu_bo *bo = gem_to_amdgpu_bo(obj);
++ int ret = 0;
++
++ ret = amdgpu_bo_reserve(bo, false);
++ if (unlikely(ret != 0))
++ return;
++
++ amdgpu_bo_unpin(bo);
++ amdgpu_bo_unreserve(bo);
++}
++
++struct reservation_object *amdgpu_gem_prime_res_obj(struct drm_gem_object *obj)
++{
++ struct amdgpu_bo *bo = gem_to_amdgpu_bo(obj);
++
++ return bo->tbo.resv;
++}
++
++struct dma_buf *amdgpu_gem_prime_export(struct drm_device *dev,
++ struct drm_gem_object *gobj,
++ int flags)
++{
++ struct amdgpu_bo *bo = gem_to_amdgpu_bo(gobj);
++
++ if (amdgpu_ttm_tt_has_userptr(bo->tbo.ttm))
++ return ERR_PTR(-EPERM);
++
++ return drm_gem_prime_export(dev, gobj, flags);
++}
+diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_ring.c b/drivers/gpu/drm/amd/amdgpu/amdgpu_ring.c
+new file mode 100644
+index 0000000..855e219
+--- /dev/null
++++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_ring.c
+@@ -0,0 +1,561 @@
++/*
++ * Copyright 2008 Advanced Micro Devices, Inc.
++ * Copyright 2008 Red Hat Inc.
++ * Copyright 2009 Jerome Glisse.
++ *
++ * Permission is hereby granted, free of charge, to any person obtaining a
++ * copy of this software and associated documentation files (the "Software"),
++ * to deal in the Software without restriction, including without limitation
++ * the rights to use, copy, modify, merge, publish, distribute, sublicense,
++ * and/or sell copies of the Software, and to permit persons to whom the
++ * Software is furnished to do so, subject to the following conditions:
++ *
++ * The above copyright notice and this permission notice shall be included in
++ * all copies or substantial portions of the Software.
++ *
++ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
++ * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
++ * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
++ * THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR
++ * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE,
++ * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
++ * OTHER DEALINGS IN THE SOFTWARE.
++ *
++ * Authors: Dave Airlie
++ * Alex Deucher
++ * Jerome Glisse
++ * Christian König
++ */
++#include <linux/seq_file.h>
++#include <linux/slab.h>
++#include <drm/drmP.h>
++#include <drm/amdgpu_drm.h>
++#include "amdgpu.h"
++#include "atom.h"
++
++/*
++ * Rings
++ * Most engines on the GPU are fed via ring buffers. Ring
++ * buffers are areas of GPU accessible memory that the host
++ * writes commands into and the GPU reads commands out of.
++ * There is a rptr (read pointer) that determines where the
++ * GPU is currently reading, and a wptr (write pointer)
++ * which determines where the host has written. When the
++ * pointers are equal, the ring is idle. When the host
++ * writes commands to the ring buffer, it increments the
++ * wptr. The GPU then starts fetching commands and executes
++ * them until the pointers are equal again.
++ */
++static int amdgpu_debugfs_ring_init(struct amdgpu_device *adev, struct amdgpu_ring *ring);
++
++/**
++ * amdgpu_ring_free_size - update the free size
++ *
++ * @adev: amdgpu_device pointer
++ * @ring: amdgpu_ring structure holding ring information
++ *
++ * Update the free dw slots in the ring buffer (all asics).
++ */
++void amdgpu_ring_free_size(struct amdgpu_ring *ring)
++{
++ uint32_t rptr = amdgpu_ring_get_rptr(ring);
++
++ /* This works because ring_size is a power of 2 */
++ ring->ring_free_dw = rptr + (ring->ring_size / 4);
++ ring->ring_free_dw -= ring->wptr;
++ ring->ring_free_dw &= ring->ptr_mask;
++ if (!ring->ring_free_dw) {
++ /* this is an empty ring */
++ ring->ring_free_dw = ring->ring_size / 4;
++ /* update lockup info to avoid false positive */
++ amdgpu_ring_lockup_update(ring);
++ }
++}
++
++/**
++ * amdgpu_ring_alloc - allocate space on the ring buffer
++ *
++ * @adev: amdgpu_device pointer
++ * @ring: amdgpu_ring structure holding ring information
++ * @ndw: number of dwords to allocate in the ring buffer
++ *
++ * Allocate @ndw dwords in the ring buffer (all asics).
++ * Returns 0 on success, error on failure.
++ */
++int amdgpu_ring_alloc(struct amdgpu_ring *ring, unsigned ndw)
++{
++ int r;
++
++ /* make sure we aren't trying to allocate more space than there is on the ring */
++ if (ndw > (ring->ring_size / 4))
++ return -ENOMEM;
++ /* Align requested size with padding so unlock_commit can
++ * pad safely */
++ amdgpu_ring_free_size(ring);
++ ndw = (ndw + ring->align_mask) & ~ring->align_mask;
++ while (ndw > (ring->ring_free_dw - 1)) {
++ amdgpu_ring_free_size(ring);
++ if (ndw < ring->ring_free_dw) {
++ break;
++ }
++ r = amdgpu_fence_wait_next(ring);
++ if (r)
++ return r;
++ }
++ ring->count_dw = ndw;
++ ring->wptr_old = ring->wptr;
++ return 0;
++}
++
++/**
++ * amdgpu_ring_lock - lock the ring and allocate space on it
++ *
++ * @adev: amdgpu_device pointer
++ * @ring: amdgpu_ring structure holding ring information
++ * @ndw: number of dwords to allocate in the ring buffer
++ *
++ * Lock the ring and allocate @ndw dwords in the ring buffer
++ * (all asics).
++ * Returns 0 on success, error on failure.
++ */
++int amdgpu_ring_lock(struct amdgpu_ring *ring, unsigned ndw)
++{
++ int r;
++
++ mutex_lock(ring->ring_lock);
++ r = amdgpu_ring_alloc(ring, ndw);
++ if (r) {
++ mutex_unlock(ring->ring_lock);
++ return r;
++ }
++ return 0;
++}
++
++/**
++ * amdgpu_ring_commit - tell the GPU to execute the new
++ * commands on the ring buffer
++ *
++ * @adev: amdgpu_device pointer
++ * @ring: amdgpu_ring structure holding ring information
++ *
++ * Update the wptr (write pointer) to tell the GPU to
++ * execute new commands on the ring buffer (all asics).
++ */
++void amdgpu_ring_commit(struct amdgpu_ring *ring)
++{
++ /* We pad to match fetch size */
++ while (ring->wptr & ring->align_mask) {
++ amdgpu_ring_write(ring, ring->nop);
++ }
++ mb();
++ amdgpu_ring_set_wptr(ring);
++}
++
++/**
++ * amdgpu_ring_unlock_commit - tell the GPU to execute the new
++ * commands on the ring buffer and unlock it
++ *
++ * @ring: amdgpu_ring structure holding ring information
++ *
++ * Call amdgpu_ring_commit() then unlock the ring (all asics).
++ */
++void amdgpu_ring_unlock_commit(struct amdgpu_ring *ring)
++{
++ amdgpu_ring_commit(ring);
++ mutex_unlock(ring->ring_lock);
++}
++
++/**
++ * amdgpu_ring_undo - reset the wptr
++ *
++ * @ring: amdgpu_ring structure holding ring information
++ *
++ * Reset the driver's copy of the wptr (all asics).
++ */
++void amdgpu_ring_undo(struct amdgpu_ring *ring)
++{
++ ring->wptr = ring->wptr_old;
++}
++
++/**
++ * amdgpu_ring_unlock_undo - reset the wptr and unlock the ring
++ *
++ * @ring: amdgpu_ring structure holding ring information
++ *
++ * Call amdgpu_ring_undo() then unlock the ring (all asics).
++ */
++void amdgpu_ring_unlock_undo(struct amdgpu_ring *ring)
++{
++ amdgpu_ring_undo(ring);
++ mutex_unlock(ring->ring_lock);
++}
++
++/**
++ * amdgpu_ring_lockup_update - update lockup variables
++ *
++ * @ring: amdgpu_ring structure holding ring information
++ *
++ * Update the last rptr value and timestamp (all asics).
++ */
++void amdgpu_ring_lockup_update(struct amdgpu_ring *ring)
++{
++ atomic_set(&ring->last_rptr, amdgpu_ring_get_rptr(ring));
++ atomic64_set(&ring->last_activity, jiffies_64);
++}
++
++/**
++ * amdgpu_ring_test_lockup() - check if ring is lockedup by recording information
++ * @ring: amdgpu_ring structure holding ring information
++ *
++ */
++bool amdgpu_ring_test_lockup(struct amdgpu_ring *ring)
++{
++ uint32_t rptr = amdgpu_ring_get_rptr(ring);
++ uint64_t last = atomic64_read(&ring->last_activity);
++ uint64_t elapsed;
++
++ if (rptr != atomic_read(&ring->last_rptr)) {
++ /* ring is still working, no lockup */
++ amdgpu_ring_lockup_update(ring);
++ return false;
++ }
++
++ elapsed = jiffies_to_msecs(jiffies_64 - last);
++ if (amdgpu_lockup_timeout && elapsed >= amdgpu_lockup_timeout) {
++ dev_err(ring->adev->dev, "ring %d stalled for more than %llumsec\n",
++ ring->idx, elapsed);
++ return true;
++ }
++ /* give a chance to the GPU ... */
++ return false;
++}
++
++/**
++ * amdgpu_ring_backup - Back up the content of a ring
++ *
++ * @ring: the ring we want to back up
++ *
++ * Saves all unprocessed commits from a ring, returns the number of dwords saved.
++ */
++unsigned amdgpu_ring_backup(struct amdgpu_ring *ring,
++ uint32_t **data)
++{
++ unsigned size, ptr, i;
++
++ /* just in case lock the ring */
++ mutex_lock(ring->ring_lock);
++ *data = NULL;
++
++ if (ring->ring_obj == NULL) {
++ mutex_unlock(ring->ring_lock);
++ return 0;
++ }
++
++ /* it doesn't make sense to save anything if all fences are signaled */
++ if (!amdgpu_fence_count_emitted(ring)) {
++ mutex_unlock(ring->ring_lock);
++ return 0;
++ }
++
++ ptr = le32_to_cpu(*ring->next_rptr_cpu_addr);
++
++ size = ring->wptr + (ring->ring_size / 4);
++ size -= ptr;
++ size &= ring->ptr_mask;
++ if (size == 0) {
++ mutex_unlock(ring->ring_lock);
++ return 0;
++ }
++
++ /* and then save the content of the ring */
++ *data = kmalloc_array(size, sizeof(uint32_t), GFP_KERNEL);
++ if (!*data) {
++ mutex_unlock(ring->ring_lock);
++ return 0;
++ }
++ for (i = 0; i < size; ++i) {
++ (*data)[i] = ring->ring[ptr++];
++ ptr &= ring->ptr_mask;
++ }
++
++ mutex_unlock(ring->ring_lock);
++ return size;
++}
++
++/**
++ * amdgpu_ring_restore - append saved commands to the ring again
++ *
++ * @ring: ring to append commands to
++ * @size: number of dwords we want to write
++ * @data: saved commands
++ *
++ * Allocates space on the ring and restore the previously saved commands.
++ */
++int amdgpu_ring_restore(struct amdgpu_ring *ring,
++ unsigned size, uint32_t *data)
++{
++ int i, r;
++
++ if (!size || !data)
++ return 0;
++
++ /* restore the saved ring content */
++ r = amdgpu_ring_lock(ring, size);
++ if (r)
++ return r;
++
++ for (i = 0; i < size; ++i) {
++ amdgpu_ring_write(ring, data[i]);
++ }
++
++ amdgpu_ring_unlock_commit(ring);
++ kfree(data);
++ return 0;
++}
++
++/**
++ * amdgpu_ring_init - init driver ring struct.
++ *
++ * @adev: amdgpu_device pointer
++ * @ring: amdgpu_ring structure holding ring information
++ * @ring_size: size of the ring
++ * @nop: nop packet for this ring
++ *
++ * Initialize the driver information for the selected ring (all asics).
++ * Returns 0 on success, error on failure.
++ */
++int amdgpu_ring_init(struct amdgpu_device *adev, struct amdgpu_ring *ring,
++ unsigned ring_size, u32 nop, u32 align_mask,
++ struct amdgpu_irq_src *irq_src, unsigned irq_type,
++ enum amdgpu_ring_type ring_type)
++{
++ u32 rb_bufsz;
++ int r;
++
++ if (ring->adev == NULL) {
++ if (adev->num_rings >= AMDGPU_MAX_RINGS)
++ return -EINVAL;
++
++ ring->adev = adev;
++ ring->idx = adev->num_rings++;
++ adev->rings[ring->idx] = ring;
++ amdgpu_fence_driver_init_ring(ring);
++ }
++
++ r = amdgpu_wb_get(adev, &ring->rptr_offs);
++ if (r) {
++ dev_err(adev->dev, "(%d) ring rptr_offs wb alloc failed\n", r);
++ return r;
++ }
++
++ r = amdgpu_wb_get(adev, &ring->wptr_offs);
++ if (r) {
++ dev_err(adev->dev, "(%d) ring wptr_offs wb alloc failed\n", r);
++ return r;
++ }
++
++ r = amdgpu_wb_get(adev, &ring->fence_offs);
++ if (r) {
++ dev_err(adev->dev, "(%d) ring fence_offs wb alloc failed\n", r);
++ return r;
++ }
++
++ r = amdgpu_wb_get(adev, &ring->next_rptr_offs);
++ if (r) {
++ dev_err(adev->dev, "(%d) ring next_rptr wb alloc failed\n", r);
++ return r;
++ }
++ ring->next_rptr_gpu_addr = adev->wb.gpu_addr + (ring->next_rptr_offs * 4);
++ ring->next_rptr_cpu_addr = &adev->wb.wb[ring->next_rptr_offs];
++
++ r = amdgpu_fence_driver_start_ring(ring, irq_src, irq_type);
++ if (r) {
++ dev_err(adev->dev, "failed initializing fences (%d).\n", r);
++ return r;
++ }
++
++ ring->ring_lock = &adev->ring_lock;
++ /* Align ring size */
++ rb_bufsz = order_base_2(ring_size / 8);
++ ring_size = (1 << (rb_bufsz + 1)) * 4;
++ ring->ring_size = ring_size;
++ ring->align_mask = align_mask;
++ ring->nop = nop;
++ ring->type = ring_type;
++
++ /* Allocate ring buffer */
++ if (ring->ring_obj == NULL) {
++ r = amdgpu_bo_create(adev, ring->ring_size, PAGE_SIZE, true,
++ AMDGPU_GEM_DOMAIN_GTT, 0,
++ NULL, &ring->ring_obj);
++ if (r) {
++ dev_err(adev->dev, "(%d) ring create failed\n", r);
++ return r;
++ }
++ r = amdgpu_bo_reserve(ring->ring_obj, false);
++ if (unlikely(r != 0))
++ return r;
++ r = amdgpu_bo_pin(ring->ring_obj, AMDGPU_GEM_DOMAIN_GTT,
++ &ring->gpu_addr);
++ if (r) {
++ amdgpu_bo_unreserve(ring->ring_obj);
++ dev_err(adev->dev, "(%d) ring pin failed\n", r);
++ return r;
++ }
++ r = amdgpu_bo_kmap(ring->ring_obj,
++ (void **)&ring->ring);
++ amdgpu_bo_unreserve(ring->ring_obj);
++ if (r) {
++ dev_err(adev->dev, "(%d) ring map failed\n", r);
++ return r;
++ }
++ }
++ ring->ptr_mask = (ring->ring_size / 4) - 1;
++ ring->ring_free_dw = ring->ring_size / 4;
++
++ if (amdgpu_debugfs_ring_init(adev, ring)) {
++ DRM_ERROR("Failed to register debugfs file for rings !\n");
++ }
++ amdgpu_ring_lockup_update(ring);
++ return 0;
++}
++
++/**
++ * amdgpu_ring_fini - tear down the driver ring struct.
++ *
++ * @adev: amdgpu_device pointer
++ * @ring: amdgpu_ring structure holding ring information
++ *
++ * Tear down the driver information for the selected ring (all asics).
++ */
++void amdgpu_ring_fini(struct amdgpu_ring *ring)
++{
++ int r;
++ struct amdgpu_bo *ring_obj;
++
++ if (ring->ring_lock == NULL)
++ return;
++
++ mutex_lock(ring->ring_lock);
++ ring_obj = ring->ring_obj;
++ ring->ready = false;
++ ring->ring = NULL;
++ ring->ring_obj = NULL;
++ mutex_unlock(ring->ring_lock);
++
++ amdgpu_wb_free(ring->adev, ring->fence_offs);
++ amdgpu_wb_free(ring->adev, ring->rptr_offs);
++ amdgpu_wb_free(ring->adev, ring->wptr_offs);
++ amdgpu_wb_free(ring->adev, ring->next_rptr_offs);
++
++ if (ring_obj) {
++ r = amdgpu_bo_reserve(ring_obj, false);
++ if (likely(r == 0)) {
++ amdgpu_bo_kunmap(ring_obj);
++ amdgpu_bo_unpin(ring_obj);
++ amdgpu_bo_unreserve(ring_obj);
++ }
++ amdgpu_bo_unref(&ring_obj);
++ }
++}
++
++/*
++ * Debugfs info
++ */
++#if defined(CONFIG_DEBUG_FS)
++
++static int amdgpu_debugfs_ring_info(struct seq_file *m, void *data)
++{
++ struct drm_info_node *node = (struct drm_info_node *) m->private;
++ struct drm_device *dev = node->minor->dev;
++ struct amdgpu_device *adev = dev->dev_private;
++ int roffset = *(int*)node->info_ent->data;
++ struct amdgpu_ring *ring = (void *)(((uint8_t*)adev) + roffset);
++
++ uint32_t rptr, wptr, rptr_next;
++ unsigned count, i, j;
++
++ amdgpu_ring_free_size(ring);
++ count = (ring->ring_size / 4) - ring->ring_free_dw;
++
++ wptr = amdgpu_ring_get_wptr(ring);
++ seq_printf(m, "wptr: 0x%08x [%5d]\n",
++ wptr, wptr);
++
++ rptr = amdgpu_ring_get_rptr(ring);
++ seq_printf(m, "rptr: 0x%08x [%5d]\n",
++ rptr, rptr);
++
++ rptr_next = ~0;
++
++ seq_printf(m, "driver's copy of the wptr: 0x%08x [%5d]\n",
++ ring->wptr, ring->wptr);
++ seq_printf(m, "last semaphore signal addr : 0x%016llx\n",
++ ring->last_semaphore_signal_addr);
++ seq_printf(m, "last semaphore wait addr : 0x%016llx\n",
++ ring->last_semaphore_wait_addr);
++ seq_printf(m, "%u free dwords in ring\n", ring->ring_free_dw);
++ seq_printf(m, "%u dwords in ring\n", count);
++
++ if (!ring->ready)
++ return 0;
++
++ /* print 8 dw before current rptr as often it's the last executed
++ * packet that is the root issue
++ */
++ i = (rptr + ring->ptr_mask + 1 - 32) & ring->ptr_mask;
++ for (j = 0; j <= (count + 32); j++) {
++ seq_printf(m, "r[%5d]=0x%08x", i, ring->ring[i]);
++ if (rptr == i)
++ seq_puts(m, " *");
++ if (rptr_next == i)
++ seq_puts(m, " #");
++ seq_puts(m, "\n");
++ i = (i + 1) & ring->ptr_mask;
++ }
++ return 0;
++}
++
++/* TODO: clean this up !*/
++static int amdgpu_gfx_index = offsetof(struct amdgpu_device, gfx.gfx_ring[0]);
++static int cayman_cp1_index = offsetof(struct amdgpu_device, gfx.compute_ring[0]);
++static int cayman_cp2_index = offsetof(struct amdgpu_device, gfx.compute_ring[1]);
++static int amdgpu_dma1_index = offsetof(struct amdgpu_device, sdma[0].ring);
++static int amdgpu_dma2_index = offsetof(struct amdgpu_device, sdma[1].ring);
++static int r600_uvd_index = offsetof(struct amdgpu_device, uvd.ring);
++static int si_vce1_index = offsetof(struct amdgpu_device, vce.ring[0]);
++static int si_vce2_index = offsetof(struct amdgpu_device, vce.ring[1]);
++
++static struct drm_info_list amdgpu_debugfs_ring_info_list[] = {
++ {"amdgpu_ring_gfx", amdgpu_debugfs_ring_info, 0, &amdgpu_gfx_index},
++ {"amdgpu_ring_cp1", amdgpu_debugfs_ring_info, 0, &cayman_cp1_index},
++ {"amdgpu_ring_cp2", amdgpu_debugfs_ring_info, 0, &cayman_cp2_index},
++ {"amdgpu_ring_dma1", amdgpu_debugfs_ring_info, 0, &amdgpu_dma1_index},
++ {"amdgpu_ring_dma2", amdgpu_debugfs_ring_info, 0, &amdgpu_dma2_index},
++ {"amdgpu_ring_uvd", amdgpu_debugfs_ring_info, 0, &r600_uvd_index},
++ {"amdgpu_ring_vce1", amdgpu_debugfs_ring_info, 0, &si_vce1_index},
++ {"amdgpu_ring_vce2", amdgpu_debugfs_ring_info, 0, &si_vce2_index},
++};
++
++#endif
++
++static int amdgpu_debugfs_ring_init(struct amdgpu_device *adev, struct amdgpu_ring *ring)
++{
++#if defined(CONFIG_DEBUG_FS)
++ unsigned i;
++ for (i = 0; i < ARRAY_SIZE(amdgpu_debugfs_ring_info_list); ++i) {
++ struct drm_info_list *info = &amdgpu_debugfs_ring_info_list[i];
++ int roffset = *(int*)amdgpu_debugfs_ring_info_list[i].data;
++ struct amdgpu_ring *other = (void *)(((uint8_t*)adev) + roffset);
++ unsigned r;
++
++ if (other != ring)
++ continue;
++
++ r = amdgpu_debugfs_add_files(adev, info, 1);
++ if (r)
++ return r;
++ }
++#endif
++ return 0;
++}
+diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_sa.c b/drivers/gpu/drm/amd/amdgpu/amdgpu_sa.c
+new file mode 100644
+index 0000000..eb20987
+--- /dev/null
++++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_sa.c
+@@ -0,0 +1,419 @@
++/*
++ * Copyright 2011 Red Hat Inc.
++ * All Rights Reserved.
++ *
++ * Permission is hereby granted, free of charge, to any person obtaining a
++ * copy of this software and associated documentation files (the
++ * "Software"), to deal in the Software without restriction, including
++ * without limitation the rights to use, copy, modify, merge, publish,
++ * distribute, sub license, and/or sell copies of the Software, and to
++ * permit persons to whom the Software is furnished to do so, subject to
++ * the following conditions:
++ *
++ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
++ * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
++ * FITNESS FOR A PARTICULAR PURPOSE AND NON-INFRINGEMENT. IN NO EVENT SHALL
++ * THE COPYRIGHT HOLDERS, AUTHORS AND/OR ITS SUPPLIERS BE LIABLE FOR ANY CLAIM,
++ * DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR
++ * OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE
++ * USE OR OTHER DEALINGS IN THE SOFTWARE.
++ *
++ * The above copyright notice and this permission notice (including the
++ * next paragraph) shall be included in all copies or substantial portions
++ * of the Software.
++ *
++ */
++/*
++ * Authors:
++ * Jerome Glisse <glisse@freedesktop.org>
++ */
++/* Algorithm:
++ *
++ * We store the last allocated bo in "hole", we always try to allocate
++ * after the last allocated bo. Principle is that in a linear GPU ring
++ * progression was is after last is the oldest bo we allocated and thus
++ * the first one that should no longer be in use by the GPU.
++ *
++ * If it's not the case we skip over the bo after last to the closest
++ * done bo if such one exist. If none exist and we are not asked to
++ * block we report failure to allocate.
++ *
++ * If we are asked to block we wait on all the oldest fence of all
++ * rings. We just wait for any of those fence to complete.
++ */
++#include <drm/drmP.h>
++#include "amdgpu.h"
++
++static void amdgpu_sa_bo_remove_locked(struct amdgpu_sa_bo *sa_bo);
++static void amdgpu_sa_bo_try_free(struct amdgpu_sa_manager *sa_manager);
++
++int amdgpu_sa_bo_manager_init(struct amdgpu_device *adev,
++ struct amdgpu_sa_manager *sa_manager,
++ unsigned size, u32 align, u32 domain)
++{
++ int i, r;
++
++ init_waitqueue_head(&sa_manager->wq);
++ sa_manager->bo = NULL;
++ sa_manager->size = size;
++ sa_manager->domain = domain;
++ sa_manager->align = align;
++ sa_manager->hole = &sa_manager->olist;
++ INIT_LIST_HEAD(&sa_manager->olist);
++ for (i = 0; i < AMDGPU_MAX_RINGS; ++i) {
++ INIT_LIST_HEAD(&sa_manager->flist[i]);
++ }
++
++ r = amdgpu_bo_create(adev, size, align, true,
++ domain, 0, NULL, &sa_manager->bo);
++ if (r) {
++ dev_err(adev->dev, "(%d) failed to allocate bo for manager\n", r);
++ return r;
++ }
++
++ return r;
++}
++
++void amdgpu_sa_bo_manager_fini(struct amdgpu_device *adev,
++ struct amdgpu_sa_manager *sa_manager)
++{
++ struct amdgpu_sa_bo *sa_bo, *tmp;
++
++ if (!list_empty(&sa_manager->olist)) {
++ sa_manager->hole = &sa_manager->olist,
++ amdgpu_sa_bo_try_free(sa_manager);
++ if (!list_empty(&sa_manager->olist)) {
++ dev_err(adev->dev, "sa_manager is not empty, clearing anyway\n");
++ }
++ }
++ list_for_each_entry_safe(sa_bo, tmp, &sa_manager->olist, olist) {
++ amdgpu_sa_bo_remove_locked(sa_bo);
++ }
++ amdgpu_bo_unref(&sa_manager->bo);
++ sa_manager->size = 0;
++}
++
++int amdgpu_sa_bo_manager_start(struct amdgpu_device *adev,
++ struct amdgpu_sa_manager *sa_manager)
++{
++ int r;
++
++ if (sa_manager->bo == NULL) {
++ dev_err(adev->dev, "no bo for sa manager\n");
++ return -EINVAL;
++ }
++
++ /* map the buffer */
++ r = amdgpu_bo_reserve(sa_manager->bo, false);
++ if (r) {
++ dev_err(adev->dev, "(%d) failed to reserve manager bo\n", r);
++ return r;
++ }
++ r = amdgpu_bo_pin(sa_manager->bo, sa_manager->domain, &sa_manager->gpu_addr);
++ if (r) {
++ amdgpu_bo_unreserve(sa_manager->bo);
++ dev_err(adev->dev, "(%d) failed to pin manager bo\n", r);
++ return r;
++ }
++ r = amdgpu_bo_kmap(sa_manager->bo, &sa_manager->cpu_ptr);
++ amdgpu_bo_unreserve(sa_manager->bo);
++ return r;
++}
++
++int amdgpu_sa_bo_manager_suspend(struct amdgpu_device *adev,
++ struct amdgpu_sa_manager *sa_manager)
++{
++ int r;
++
++ if (sa_manager->bo == NULL) {
++ dev_err(adev->dev, "no bo for sa manager\n");
++ return -EINVAL;
++ }
++
++ r = amdgpu_bo_reserve(sa_manager->bo, false);
++ if (!r) {
++ amdgpu_bo_kunmap(sa_manager->bo);
++ amdgpu_bo_unpin(sa_manager->bo);
++ amdgpu_bo_unreserve(sa_manager->bo);
++ }
++ return r;
++}
++
++static void amdgpu_sa_bo_remove_locked(struct amdgpu_sa_bo *sa_bo)
++{
++ struct amdgpu_sa_manager *sa_manager = sa_bo->manager;
++ if (sa_manager->hole == &sa_bo->olist) {
++ sa_manager->hole = sa_bo->olist.prev;
++ }
++ list_del_init(&sa_bo->olist);
++ list_del_init(&sa_bo->flist);
++ amdgpu_fence_unref(&sa_bo->fence);
++ kfree(sa_bo);
++}
++
++static void amdgpu_sa_bo_try_free(struct amdgpu_sa_manager *sa_manager)
++{
++ struct amdgpu_sa_bo *sa_bo, *tmp;
++
++ if (sa_manager->hole->next == &sa_manager->olist)
++ return;
++
++ sa_bo = list_entry(sa_manager->hole->next, struct amdgpu_sa_bo, olist);
++ list_for_each_entry_safe_from(sa_bo, tmp, &sa_manager->olist, olist) {
++ if (sa_bo->fence == NULL || !amdgpu_fence_signaled(sa_bo->fence)) {
++ return;
++ }
++ amdgpu_sa_bo_remove_locked(sa_bo);
++ }
++}
++
++static inline unsigned amdgpu_sa_bo_hole_soffset(struct amdgpu_sa_manager *sa_manager)
++{
++ struct list_head *hole = sa_manager->hole;
++
++ if (hole != &sa_manager->olist) {
++ return list_entry(hole, struct amdgpu_sa_bo, olist)->eoffset;
++ }
++ return 0;
++}
++
++static inline unsigned amdgpu_sa_bo_hole_eoffset(struct amdgpu_sa_manager *sa_manager)
++{
++ struct list_head *hole = sa_manager->hole;
++
++ if (hole->next != &sa_manager->olist) {
++ return list_entry(hole->next, struct amdgpu_sa_bo, olist)->soffset;
++ }
++ return sa_manager->size;
++}
++
++static bool amdgpu_sa_bo_try_alloc(struct amdgpu_sa_manager *sa_manager,
++ struct amdgpu_sa_bo *sa_bo,
++ unsigned size, unsigned align)
++{
++ unsigned soffset, eoffset, wasted;
++
++ soffset = amdgpu_sa_bo_hole_soffset(sa_manager);
++ eoffset = amdgpu_sa_bo_hole_eoffset(sa_manager);
++ wasted = (align - (soffset % align)) % align;
++
++ if ((eoffset - soffset) >= (size + wasted)) {
++ soffset += wasted;
++
++ sa_bo->manager = sa_manager;
++ sa_bo->soffset = soffset;
++ sa_bo->eoffset = soffset + size;
++ list_add(&sa_bo->olist, sa_manager->hole);
++ INIT_LIST_HEAD(&sa_bo->flist);
++ sa_manager->hole = &sa_bo->olist;
++ return true;
++ }
++ return false;
++}
++
++/**
++ * amdgpu_sa_event - Check if we can stop waiting
++ *
++ * @sa_manager: pointer to the sa_manager
++ * @size: number of bytes we want to allocate
++ * @align: alignment we need to match
++ *
++ * Check if either there is a fence we can wait for or
++ * enough free memory to satisfy the allocation directly
++ */
++static bool amdgpu_sa_event(struct amdgpu_sa_manager *sa_manager,
++ unsigned size, unsigned align)
++{
++ unsigned soffset, eoffset, wasted;
++ int i;
++
++ for (i = 0; i < AMDGPU_MAX_RINGS; ++i) {
++ if (!list_empty(&sa_manager->flist[i])) {
++ return true;
++ }
++ }
++
++ soffset = amdgpu_sa_bo_hole_soffset(sa_manager);
++ eoffset = amdgpu_sa_bo_hole_eoffset(sa_manager);
++ wasted = (align - (soffset % align)) % align;
++
++ if ((eoffset - soffset) >= (size + wasted)) {
++ return true;
++ }
++
++ return false;
++}
++
++static bool amdgpu_sa_bo_next_hole(struct amdgpu_sa_manager *sa_manager,
++ struct amdgpu_fence **fences,
++ unsigned *tries)
++{
++ struct amdgpu_sa_bo *best_bo = NULL;
++ unsigned i, soffset, best, tmp;
++
++ /* if hole points to the end of the buffer */
++ if (sa_manager->hole->next == &sa_manager->olist) {
++ /* try again with its beginning */
++ sa_manager->hole = &sa_manager->olist;
++ return true;
++ }
++
++ soffset = amdgpu_sa_bo_hole_soffset(sa_manager);
++ /* to handle wrap around we add sa_manager->size */
++ best = sa_manager->size * 2;
++ /* go over all fence list and try to find the closest sa_bo
++ * of the current last
++ */
++ for (i = 0; i < AMDGPU_MAX_RINGS; ++i) {
++ struct amdgpu_sa_bo *sa_bo;
++
++ if (list_empty(&sa_manager->flist[i])) {
++ continue;
++ }
++
++ sa_bo = list_first_entry(&sa_manager->flist[i],
++ struct amdgpu_sa_bo, flist);
++
++ if (!amdgpu_fence_signaled(sa_bo->fence)) {
++ fences[i] = sa_bo->fence;
++ continue;
++ }
++
++ /* limit the number of tries each ring gets */
++ if (tries[i] > 2) {
++ continue;
++ }
++
++ tmp = sa_bo->soffset;
++ if (tmp < soffset) {
++ /* wrap around, pretend it's after */
++ tmp += sa_manager->size;
++ }
++ tmp -= soffset;
++ if (tmp < best) {
++ /* this sa bo is the closest one */
++ best = tmp;
++ best_bo = sa_bo;
++ }
++ }
++
++ if (best_bo) {
++ ++tries[best_bo->fence->ring->idx];
++ sa_manager->hole = best_bo->olist.prev;
++
++ /* we knew that this one is signaled,
++ so it's save to remote it */
++ amdgpu_sa_bo_remove_locked(best_bo);
++ return true;
++ }
++ return false;
++}
++
++int amdgpu_sa_bo_new(struct amdgpu_device *adev,
++ struct amdgpu_sa_manager *sa_manager,
++ struct amdgpu_sa_bo **sa_bo,
++ unsigned size, unsigned align)
++{
++ struct amdgpu_fence *fences[AMDGPU_MAX_RINGS];
++ unsigned tries[AMDGPU_MAX_RINGS];
++ int i, r;
++
++ BUG_ON(align > sa_manager->align);
++ BUG_ON(size > sa_manager->size);
++
++ *sa_bo = kmalloc(sizeof(struct amdgpu_sa_bo), GFP_KERNEL);
++ if ((*sa_bo) == NULL) {
++ return -ENOMEM;
++ }
++ (*sa_bo)->manager = sa_manager;
++ (*sa_bo)->fence = NULL;
++ INIT_LIST_HEAD(&(*sa_bo)->olist);
++ INIT_LIST_HEAD(&(*sa_bo)->flist);
++
++ spin_lock(&sa_manager->wq.lock);
++ do {
++ for (i = 0; i < AMDGPU_MAX_RINGS; ++i) {
++ fences[i] = NULL;
++ tries[i] = 0;
++ }
++
++ do {
++ amdgpu_sa_bo_try_free(sa_manager);
++
++ if (amdgpu_sa_bo_try_alloc(sa_manager, *sa_bo,
++ size, align)) {
++ spin_unlock(&sa_manager->wq.lock);
++ return 0;
++ }
++
++ /* see if we can skip over some allocations */
++ } while (amdgpu_sa_bo_next_hole(sa_manager, fences, tries));
++
++ spin_unlock(&sa_manager->wq.lock);
++ r = amdgpu_fence_wait_any(adev, fences, false);
++ spin_lock(&sa_manager->wq.lock);
++ /* if we have nothing to wait for block */
++ if (r == -ENOENT) {
++ r = wait_event_interruptible_locked(
++ sa_manager->wq,
++ amdgpu_sa_event(sa_manager, size, align)
++ );
++ }
++
++ } while (!r);
++
++ spin_unlock(&sa_manager->wq.lock);
++ kfree(*sa_bo);
++ *sa_bo = NULL;
++ return r;
++}
++
++void amdgpu_sa_bo_free(struct amdgpu_device *adev, struct amdgpu_sa_bo **sa_bo,
++ struct amdgpu_fence *fence)
++{
++ struct amdgpu_sa_manager *sa_manager;
++
++ if (sa_bo == NULL || *sa_bo == NULL) {
++ return;
++ }
++
++ sa_manager = (*sa_bo)->manager;
++ spin_lock(&sa_manager->wq.lock);
++ if (fence && !amdgpu_fence_signaled(fence)) {
++ (*sa_bo)->fence = amdgpu_fence_ref(fence);
++ list_add_tail(&(*sa_bo)->flist,
++ &sa_manager->flist[fence->ring->idx]);
++ } else {
++ amdgpu_sa_bo_remove_locked(*sa_bo);
++ }
++ wake_up_all_locked(&sa_manager->wq);
++ spin_unlock(&sa_manager->wq.lock);
++ *sa_bo = NULL;
++}
++
++#if defined(CONFIG_DEBUG_FS)
++void amdgpu_sa_bo_dump_debug_info(struct amdgpu_sa_manager *sa_manager,
++ struct seq_file *m)
++{
++ struct amdgpu_sa_bo *i;
++
++ spin_lock(&sa_manager->wq.lock);
++ list_for_each_entry(i, &sa_manager->olist, olist) {
++ uint64_t soffset = i->soffset + sa_manager->gpu_addr;
++ uint64_t eoffset = i->eoffset + sa_manager->gpu_addr;
++ if (&i->olist == sa_manager->hole) {
++ seq_printf(m, ">");
++ } else {
++ seq_printf(m, " ");
++ }
++ seq_printf(m, "[0x%010llx 0x%010llx] size %8lld",
++ soffset, eoffset, eoffset - soffset);
++ if (i->fence) {
++ seq_printf(m, " protected by 0x%016llx on ring %d",
++ i->fence->seq, i->fence->ring->idx);
++ }
++ seq_printf(m, "\n");
++ }
++ spin_unlock(&sa_manager->wq.lock);
++}
++#endif
+diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_semaphore.c b/drivers/gpu/drm/amd/amdgpu/amdgpu_semaphore.c
+new file mode 100644
+index 0000000..d6d41a4
+--- /dev/null
++++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_semaphore.c
+@@ -0,0 +1,102 @@
++/*
++ * Copyright 2011 Christian König.
++ * All Rights Reserved.
++ *
++ * Permission is hereby granted, free of charge, to any person obtaining a
++ * copy of this software and associated documentation files (the
++ * "Software"), to deal in the Software without restriction, including
++ * without limitation the rights to use, copy, modify, merge, publish,
++ * distribute, sub license, and/or sell copies of the Software, and to
++ * permit persons to whom the Software is furnished to do so, subject to
++ * the following conditions:
++ *
++ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
++ * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
++ * FITNESS FOR A PARTICULAR PURPOSE AND NON-INFRINGEMENT. IN NO EVENT SHALL
++ * THE COPYRIGHT HOLDERS, AUTHORS AND/OR ITS SUPPLIERS BE LIABLE FOR ANY CLAIM,
++ * DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR
++ * OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE
++ * USE OR OTHER DEALINGS IN THE SOFTWARE.
++ *
++ * The above copyright notice and this permission notice (including the
++ * next paragraph) shall be included in all copies or substantial portions
++ * of the Software.
++ *
++ */
++/*
++ * Authors:
++ * Christian König <deathsimple@vodafone.de>
++ */
++#include <drm/drmP.h>
++#include "amdgpu.h"
++#include "amdgpu_trace.h"
++
++int amdgpu_semaphore_create(struct amdgpu_device *adev,
++ struct amdgpu_semaphore **semaphore)
++{
++ int r;
++
++ *semaphore = kmalloc(sizeof(struct amdgpu_semaphore), GFP_KERNEL);
++ if (*semaphore == NULL) {
++ return -ENOMEM;
++ }
++ r = amdgpu_sa_bo_new(adev, &adev->ring_tmp_bo,
++ &(*semaphore)->sa_bo, 8, 8);
++ if (r) {
++ kfree(*semaphore);
++ *semaphore = NULL;
++ return r;
++ }
++ (*semaphore)->waiters = 0;
++ (*semaphore)->gpu_addr = amdgpu_sa_bo_gpu_addr((*semaphore)->sa_bo);
++
++ *((uint64_t *)amdgpu_sa_bo_cpu_addr((*semaphore)->sa_bo)) = 0;
++
++ return 0;
++}
++
++bool amdgpu_semaphore_emit_signal(struct amdgpu_ring *ring,
++ struct amdgpu_semaphore *semaphore)
++{
++ trace_amdgpu_semaphore_signale(ring->idx, semaphore);
++
++ if (amdgpu_ring_emit_semaphore(ring, semaphore, false)) {
++ --semaphore->waiters;
++
++ /* for debugging lockup only, used by sysfs debug files */
++ ring->last_semaphore_signal_addr = semaphore->gpu_addr;
++ return true;
++ }
++ return false;
++}
++
++bool amdgpu_semaphore_emit_wait(struct amdgpu_ring *ring,
++ struct amdgpu_semaphore *semaphore)
++{
++ trace_amdgpu_semaphore_wait(ring->idx, semaphore);
++
++ if (amdgpu_ring_emit_semaphore(ring, semaphore, true)) {
++ ++semaphore->waiters;
++
++ /* for debugging lockup only, used by sysfs debug files */
++ ring->last_semaphore_wait_addr = semaphore->gpu_addr;
++ return true;
++ }
++ return false;
++}
++
++void amdgpu_semaphore_free(struct amdgpu_device *adev,
++ struct amdgpu_semaphore **semaphore,
++ struct amdgpu_fence *fence)
++{
++ if (semaphore == NULL || *semaphore == NULL) {
++ return;
++ }
++ if ((*semaphore)->waiters > 0) {
++ dev_err(adev->dev, "semaphore %p has more waiters than signalers,"
++ " hardware lockup imminent!\n", *semaphore);
++ }
++ amdgpu_sa_bo_free(adev, &(*semaphore)->sa_bo, fence);
++ kfree(*semaphore);
++ *semaphore = NULL;
++}
+diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_sync.c b/drivers/gpu/drm/amd/amdgpu/amdgpu_sync.c
+new file mode 100644
+index 0000000..855d56a
+--- /dev/null
++++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_sync.c
+@@ -0,0 +1,231 @@
++/*
++ * Copyright 2014 Advanced Micro Devices, Inc.
++ * All Rights Reserved.
++ *
++ * Permission is hereby granted, free of charge, to any person obtaining a
++ * copy of this software and associated documentation files (the
++ * "Software"), to deal in the Software without restriction, including
++ * without limitation the rights to use, copy, modify, merge, publish,
++ * distribute, sub license, and/or sell copies of the Software, and to
++ * permit persons to whom the Software is furnished to do so, subject to
++ * the following conditions:
++ *
++ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
++ * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
++ * FITNESS FOR A PARTICULAR PURPOSE AND NON-INFRINGEMENT. IN NO EVENT SHALL
++ * THE COPYRIGHT HOLDERS, AUTHORS AND/OR ITS SUPPLIERS BE LIABLE FOR ANY CLAIM,
++ * DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR
++ * OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE
++ * USE OR OTHER DEALINGS IN THE SOFTWARE.
++ *
++ * The above copyright notice and this permission notice (including the
++ * next paragraph) shall be included in all copies or substantial portions
++ * of the Software.
++ *
++ */
++/*
++ * Authors:
++ * Christian König <christian.koenig@amd.com>
++ */
++
++#include <drm/drmP.h>
++#include "amdgpu.h"
++#include "amdgpu_trace.h"
++
++/**
++ * amdgpu_sync_create - zero init sync object
++ *
++ * @sync: sync object to initialize
++ *
++ * Just clear the sync object for now.
++ */
++void amdgpu_sync_create(struct amdgpu_sync *sync)
++{
++ unsigned i;
++
++ for (i = 0; i < AMDGPU_NUM_SYNCS; ++i)
++ sync->semaphores[i] = NULL;
++
++ for (i = 0; i < AMDGPU_MAX_RINGS; ++i)
++ sync->sync_to[i] = NULL;
++
++ sync->last_vm_update = NULL;
++}
++
++/**
++ * amdgpu_sync_fence - use the semaphore to sync to a fence
++ *
++ * @sync: sync object to add fence to
++ * @fence: fence to sync to
++ *
++ * Sync to the fence using the semaphore objects
++ */
++void amdgpu_sync_fence(struct amdgpu_sync *sync,
++ struct amdgpu_fence *fence)
++{
++ struct amdgpu_fence *other;
++
++ if (!fence)
++ return;
++
++ other = sync->sync_to[fence->ring->idx];
++ sync->sync_to[fence->ring->idx] = amdgpu_fence_ref(
++ amdgpu_fence_later(fence, other));
++ amdgpu_fence_unref(&other);
++
++ if (fence->owner == AMDGPU_FENCE_OWNER_VM) {
++ other = sync->last_vm_update;
++ sync->last_vm_update = amdgpu_fence_ref(
++ amdgpu_fence_later(fence, other));
++ amdgpu_fence_unref(&other);
++ }
++}
++
++/**
++ * amdgpu_sync_resv - use the semaphores to sync to a reservation object
++ *
++ * @sync: sync object to add fences from reservation object to
++ * @resv: reservation object with embedded fence
++ * @shared: true if we should only sync to the exclusive fence
++ *
++ * Sync to the fence using the semaphore objects
++ */
++int amdgpu_sync_resv(struct amdgpu_device *adev,
++ struct amdgpu_sync *sync,
++ struct reservation_object *resv,
++ void *owner)
++{
++ struct reservation_object_list *flist;
++ struct fence *f;
++ struct amdgpu_fence *fence;
++ unsigned i;
++ int r = 0;
++
++ /* always sync to the exclusive fence */
++ f = reservation_object_get_excl(resv);
++ fence = f ? to_amdgpu_fence(f) : NULL;
++ if (fence && fence->ring->adev == adev)
++ amdgpu_sync_fence(sync, fence);
++ else if (f)
++ r = fence_wait(f, true);
++
++ flist = reservation_object_get_list(resv);
++ if (!flist || r)
++ return r;
++
++ for (i = 0; i < flist->shared_count; ++i) {
++ f = rcu_dereference_protected(flist->shared[i],
++ reservation_object_held(resv));
++ fence = to_amdgpu_fence(f);
++ if (fence && fence->ring->adev == adev) {
++ if (fence->owner != owner ||
++ fence->owner == AMDGPU_FENCE_OWNER_UNDEFINED)
++ amdgpu_sync_fence(sync, fence);
++ } else {
++ r = fence_wait(f, true);
++ if (r)
++ break;
++ }
++ }
++ return r;
++}
++
++/**
++ * amdgpu_sync_rings - sync ring to all registered fences
++ *
++ * @sync: sync object to use
++ * @ring: ring that needs sync
++ *
++ * Ensure that all registered fences are signaled before letting
++ * the ring continue. The caller must hold the ring lock.
++ */
++int amdgpu_sync_rings(struct amdgpu_sync *sync,
++ struct amdgpu_ring *ring)
++{
++ struct amdgpu_device *adev = ring->adev;
++ unsigned count = 0;
++ int i, r;
++
++ for (i = 0; i < AMDGPU_MAX_RINGS; ++i) {
++ struct amdgpu_fence *fence = sync->sync_to[i];
++ struct amdgpu_semaphore *semaphore;
++ struct amdgpu_ring *other = adev->rings[i];
++
++ /* check if we really need to sync */
++ if (!amdgpu_fence_need_sync(fence, ring))
++ continue;
++
++ /* prevent GPU deadlocks */
++ if (!other->ready) {
++ dev_err(adev->dev, "Syncing to a disabled ring!");
++ return -EINVAL;
++ }
++
++ if (count >= AMDGPU_NUM_SYNCS) {
++ /* not enough room, wait manually */
++ r = amdgpu_fence_wait(fence, false);
++ if (r)
++ return r;
++ continue;
++ }
++ r = amdgpu_semaphore_create(adev, &semaphore);
++ if (r)
++ return r;
++
++ sync->semaphores[count++] = semaphore;
++
++ /* allocate enough space for sync command */
++ r = amdgpu_ring_alloc(other, 16);
++ if (r)
++ return r;
++
++ /* emit the signal semaphore */
++ if (!amdgpu_semaphore_emit_signal(other, semaphore)) {
++ /* signaling wasn't successful wait manually */
++ amdgpu_ring_undo(other);
++ r = amdgpu_fence_wait(fence, false);
++ if (r)
++ return r;
++ continue;
++ }
++
++ /* we assume caller has already allocated space on waiters ring */
++ if (!amdgpu_semaphore_emit_wait(ring, semaphore)) {
++ /* waiting wasn't successful wait manually */
++ amdgpu_ring_undo(other);
++ r = amdgpu_fence_wait(fence, false);
++ if (r)
++ return r;
++ continue;
++ }
++
++ amdgpu_ring_commit(other);
++ amdgpu_fence_note_sync(fence, ring);
++ }
++
++ return 0;
++}
++
++/**
++ * amdgpu_sync_free - free the sync object
++ *
++ * @adev: amdgpu_device pointer
++ * @sync: sync object to use
++ * @fence: fence to use for the free
++ *
++ * Free the sync object by freeing all semaphores in it.
++ */
++void amdgpu_sync_free(struct amdgpu_device *adev,
++ struct amdgpu_sync *sync,
++ struct amdgpu_fence *fence)
++{
++ unsigned i;
++
++ for (i = 0; i < AMDGPU_NUM_SYNCS; ++i)
++ amdgpu_semaphore_free(adev, &sync->semaphores[i], fence);
++
++ for (i = 0; i < AMDGPU_MAX_RINGS; ++i)
++ amdgpu_fence_unref(&sync->sync_to[i]);
++
++ amdgpu_fence_unref(&sync->last_vm_update);
++}
+diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_test.c b/drivers/gpu/drm/amd/amdgpu/amdgpu_test.c
+new file mode 100644
+index 0000000..df20299
+--- /dev/null
++++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_test.c
+@@ -0,0 +1,552 @@
++/*
++ * Copyright 2009 VMware, Inc.
++ *
++ * Permission is hereby granted, free of charge, to any person obtaining a
++ * copy of this software and associated documentation files (the "Software"),
++ * to deal in the Software without restriction, including without limitation
++ * the rights to use, copy, modify, merge, publish, distribute, sublicense,
++ * and/or sell copies of the Software, and to permit persons to whom the
++ * Software is furnished to do so, subject to the following conditions:
++ *
++ * The above copyright notice and this permission notice shall be included in
++ * all copies or substantial portions of the Software.
++ *
++ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
++ * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
++ * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
++ * THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR
++ * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE,
++ * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
++ * OTHER DEALINGS IN THE SOFTWARE.
++ *
++ * Authors: Michel Dänzer
++ */
++#include <drm/drmP.h>
++#include <drm/amdgpu_drm.h>
++#include "amdgpu.h"
++#include "amdgpu_uvd.h"
++#include "amdgpu_vce.h"
++
++/* Test BO GTT->VRAM and VRAM->GTT GPU copies across the whole GTT aperture */
++static void amdgpu_do_test_moves(struct amdgpu_device *adev)
++{
++ struct amdgpu_ring *ring = adev->mman.buffer_funcs_ring;
++ struct amdgpu_bo *vram_obj = NULL;
++ struct amdgpu_bo **gtt_obj = NULL;
++ uint64_t gtt_addr, vram_addr;
++ unsigned n, size;
++ int i, r;
++
++ size = 1024 * 1024;
++
++ /* Number of tests =
++ * (Total GTT - IB pool - writeback page - ring buffers) / test size
++ */
++ n = adev->mc.gtt_size - AMDGPU_IB_POOL_SIZE*64*1024;
++ for (i = 0; i < AMDGPU_MAX_RINGS; ++i)
++ if (adev->rings[i])
++ n -= adev->rings[i]->ring_size;
++ if (adev->wb.wb_obj)
++ n -= AMDGPU_GPU_PAGE_SIZE;
++ if (adev->irq.ih.ring_obj)
++ n -= adev->irq.ih.ring_size;
++ n /= size;
++
++ gtt_obj = kzalloc(n * sizeof(*gtt_obj), GFP_KERNEL);
++ if (!gtt_obj) {
++ DRM_ERROR("Failed to allocate %d pointers\n", n);
++ r = 1;
++ goto out_cleanup;
++ }
++
++ r = amdgpu_bo_create(adev, size, PAGE_SIZE, true, AMDGPU_GEM_DOMAIN_VRAM, 0,
++ NULL, &vram_obj);
++ if (r) {
++ DRM_ERROR("Failed to create VRAM object\n");
++ goto out_cleanup;
++ }
++ r = amdgpu_bo_reserve(vram_obj, false);
++ if (unlikely(r != 0))
++ goto out_unref;
++ r = amdgpu_bo_pin(vram_obj, AMDGPU_GEM_DOMAIN_VRAM, &vram_addr);
++ if (r) {
++ DRM_ERROR("Failed to pin VRAM object\n");
++ goto out_unres;
++ }
++ for (i = 0; i < n; i++) {
++ void *gtt_map, *vram_map;
++ void **gtt_start, **gtt_end;
++ void **vram_start, **vram_end;
++ struct amdgpu_fence *fence = NULL;
++
++ r = amdgpu_bo_create(adev, size, PAGE_SIZE, true,
++ AMDGPU_GEM_DOMAIN_GTT, 0, NULL, gtt_obj + i);
++ if (r) {
++ DRM_ERROR("Failed to create GTT object %d\n", i);
++ goto out_lclean;
++ }
++
++ r = amdgpu_bo_reserve(gtt_obj[i], false);
++ if (unlikely(r != 0))
++ goto out_lclean_unref;
++ r = amdgpu_bo_pin(gtt_obj[i], AMDGPU_GEM_DOMAIN_GTT, &gtt_addr);
++ if (r) {
++ DRM_ERROR("Failed to pin GTT object %d\n", i);
++ goto out_lclean_unres;
++ }
++
++ r = amdgpu_bo_kmap(gtt_obj[i], &gtt_map);
++ if (r) {
++ DRM_ERROR("Failed to map GTT object %d\n", i);
++ goto out_lclean_unpin;
++ }
++
++ for (gtt_start = gtt_map, gtt_end = gtt_map + size;
++ gtt_start < gtt_end;
++ gtt_start++)
++ *gtt_start = gtt_start;
++
++ amdgpu_bo_kunmap(gtt_obj[i]);
++
++ r = amdgpu_copy_buffer(ring, gtt_addr, vram_addr,
++ size, NULL, &fence);
++
++ if (r) {
++ DRM_ERROR("Failed GTT->VRAM copy %d\n", i);
++ goto out_lclean_unpin;
++ }
++
++ r = amdgpu_fence_wait(fence, false);
++ if (r) {
++ DRM_ERROR("Failed to wait for GTT->VRAM fence %d\n", i);
++ goto out_lclean_unpin;
++ }
++
++ amdgpu_fence_unref(&fence);
++
++ r = amdgpu_bo_kmap(vram_obj, &vram_map);
++ if (r) {
++ DRM_ERROR("Failed to map VRAM object after copy %d\n", i);
++ goto out_lclean_unpin;
++ }
++
++ for (gtt_start = gtt_map, gtt_end = gtt_map + size,
++ vram_start = vram_map, vram_end = vram_map + size;
++ vram_start < vram_end;
++ gtt_start++, vram_start++) {
++ if (*vram_start != gtt_start) {
++ DRM_ERROR("Incorrect GTT->VRAM copy %d: Got 0x%p, "
++ "expected 0x%p (GTT/VRAM offset "
++ "0x%16llx/0x%16llx)\n",
++ i, *vram_start, gtt_start,
++ (unsigned long long)
++ (gtt_addr - adev->mc.gtt_start +
++ (void*)gtt_start - gtt_map),
++ (unsigned long long)
++ (vram_addr - adev->mc.vram_start +
++ (void*)gtt_start - gtt_map));
++ amdgpu_bo_kunmap(vram_obj);
++ goto out_lclean_unpin;
++ }
++ *vram_start = vram_start;
++ }
++
++ amdgpu_bo_kunmap(vram_obj);
++
++ r = amdgpu_copy_buffer(ring, vram_addr, gtt_addr,
++ size, NULL, &fence);
++
++ if (r) {
++ DRM_ERROR("Failed VRAM->GTT copy %d\n", i);
++ goto out_lclean_unpin;
++ }
++
++ r = amdgpu_fence_wait(fence, false);
++ if (r) {
++ DRM_ERROR("Failed to wait for VRAM->GTT fence %d\n", i);
++ goto out_lclean_unpin;
++ }
++
++ amdgpu_fence_unref(&fence);
++
++ r = amdgpu_bo_kmap(gtt_obj[i], &gtt_map);
++ if (r) {
++ DRM_ERROR("Failed to map GTT object after copy %d\n", i);
++ goto out_lclean_unpin;
++ }
++
++ for (gtt_start = gtt_map, gtt_end = gtt_map + size,
++ vram_start = vram_map, vram_end = vram_map + size;
++ gtt_start < gtt_end;
++ gtt_start++, vram_start++) {
++ if (*gtt_start != vram_start) {
++ DRM_ERROR("Incorrect VRAM->GTT copy %d: Got 0x%p, "
++ "expected 0x%p (VRAM/GTT offset "
++ "0x%16llx/0x%16llx)\n",
++ i, *gtt_start, vram_start,
++ (unsigned long long)
++ (vram_addr - adev->mc.vram_start +
++ (void*)vram_start - vram_map),
++ (unsigned long long)
++ (gtt_addr - adev->mc.gtt_start +
++ (void*)vram_start - vram_map));
++ amdgpu_bo_kunmap(gtt_obj[i]);
++ goto out_lclean_unpin;
++ }
++ }
++
++ amdgpu_bo_kunmap(gtt_obj[i]);
++
++ DRM_INFO("Tested GTT->VRAM and VRAM->GTT copy for GTT offset 0x%llx\n",
++ gtt_addr - adev->mc.gtt_start);
++ continue;
++
++out_lclean_unpin:
++ amdgpu_bo_unpin(gtt_obj[i]);
++out_lclean_unres:
++ amdgpu_bo_unreserve(gtt_obj[i]);
++out_lclean_unref:
++ amdgpu_bo_unref(&gtt_obj[i]);
++out_lclean:
++ for (--i; i >= 0; --i) {
++ amdgpu_bo_unpin(gtt_obj[i]);
++ amdgpu_bo_unreserve(gtt_obj[i]);
++ amdgpu_bo_unref(&gtt_obj[i]);
++ }
++ if (fence)
++ amdgpu_fence_unref(&fence);
++ break;
++ }
++
++ amdgpu_bo_unpin(vram_obj);
++out_unres:
++ amdgpu_bo_unreserve(vram_obj);
++out_unref:
++ amdgpu_bo_unref(&vram_obj);
++out_cleanup:
++ kfree(gtt_obj);
++ if (r) {
++ printk(KERN_WARNING "Error while testing BO move.\n");
++ }
++}
++
++void amdgpu_test_moves(struct amdgpu_device *adev)
++{
++ if (adev->mman.buffer_funcs)
++ amdgpu_do_test_moves(adev);
++}
++
++static int amdgpu_test_create_and_emit_fence(struct amdgpu_device *adev,
++ struct amdgpu_ring *ring,
++ struct amdgpu_fence **fence)
++{
++ uint32_t handle = ring->idx ^ 0xdeafbeef;
++ int r;
++
++ if (ring == &adev->uvd.ring) {
++ r = amdgpu_uvd_get_create_msg(ring, handle, NULL);
++ if (r) {
++ DRM_ERROR("Failed to get dummy create msg\n");
++ return r;
++ }
++
++ r = amdgpu_uvd_get_destroy_msg(ring, handle, fence);
++ if (r) {
++ DRM_ERROR("Failed to get dummy destroy msg\n");
++ return r;
++ }
++
++ } else if (ring == &adev->vce.ring[0] ||
++ ring == &adev->vce.ring[1]) {
++ r = amdgpu_vce_get_create_msg(ring, handle, NULL);
++ if (r) {
++ DRM_ERROR("Failed to get dummy create msg\n");
++ return r;
++ }
++
++ r = amdgpu_vce_get_destroy_msg(ring, handle, fence);
++ if (r) {
++ DRM_ERROR("Failed to get dummy destroy msg\n");
++ return r;
++ }
++
++ } else {
++ r = amdgpu_ring_lock(ring, 64);
++ if (r) {
++ DRM_ERROR("Failed to lock ring A %d\n", ring->idx);
++ return r;
++ }
++ amdgpu_fence_emit(ring, AMDGPU_FENCE_OWNER_UNDEFINED, fence);
++ amdgpu_ring_unlock_commit(ring);
++ }
++ return 0;
++}
++
++void amdgpu_test_ring_sync(struct amdgpu_device *adev,
++ struct amdgpu_ring *ringA,
++ struct amdgpu_ring *ringB)
++{
++ struct amdgpu_fence *fence1 = NULL, *fence2 = NULL;
++ struct amdgpu_semaphore *semaphore = NULL;
++ int r;
++
++ r = amdgpu_semaphore_create(adev, &semaphore);
++ if (r) {
++ DRM_ERROR("Failed to create semaphore\n");
++ goto out_cleanup;
++ }
++
++ r = amdgpu_ring_lock(ringA, 64);
++ if (r) {
++ DRM_ERROR("Failed to lock ring A %d\n", ringA->idx);
++ goto out_cleanup;
++ }
++ amdgpu_semaphore_emit_wait(ringA, semaphore);
++ amdgpu_ring_unlock_commit(ringA);
++
++ r = amdgpu_test_create_and_emit_fence(adev, ringA, &fence1);
++ if (r)
++ goto out_cleanup;
++
++ r = amdgpu_ring_lock(ringA, 64);
++ if (r) {
++ DRM_ERROR("Failed to lock ring A %d\n", ringA->idx);
++ goto out_cleanup;
++ }
++ amdgpu_semaphore_emit_wait(ringA, semaphore);
++ amdgpu_ring_unlock_commit(ringA);
++
++ r = amdgpu_test_create_and_emit_fence(adev, ringA, &fence2);
++ if (r)
++ goto out_cleanup;
++
++ mdelay(1000);
++
++ if (amdgpu_fence_signaled(fence1)) {
++ DRM_ERROR("Fence 1 signaled without waiting for semaphore.\n");
++ goto out_cleanup;
++ }
++
++ r = amdgpu_ring_lock(ringB, 64);
++ if (r) {
++ DRM_ERROR("Failed to lock ring B %p\n", ringB);
++ goto out_cleanup;
++ }
++ amdgpu_semaphore_emit_signal(ringB, semaphore);
++ amdgpu_ring_unlock_commit(ringB);
++
++ r = amdgpu_fence_wait(fence1, false);
++ if (r) {
++ DRM_ERROR("Failed to wait for sync fence 1\n");
++ goto out_cleanup;
++ }
++
++ mdelay(1000);
++
++ if (amdgpu_fence_signaled(fence2)) {
++ DRM_ERROR("Fence 2 signaled without waiting for semaphore.\n");
++ goto out_cleanup;
++ }
++
++ r = amdgpu_ring_lock(ringB, 64);
++ if (r) {
++ DRM_ERROR("Failed to lock ring B %p\n", ringB);
++ goto out_cleanup;
++ }
++ amdgpu_semaphore_emit_signal(ringB, semaphore);
++ amdgpu_ring_unlock_commit(ringB);
++
++ r = amdgpu_fence_wait(fence2, false);
++ if (r) {
++ DRM_ERROR("Failed to wait for sync fence 1\n");
++ goto out_cleanup;
++ }
++
++out_cleanup:
++ amdgpu_semaphore_free(adev, &semaphore, NULL);
++
++ if (fence1)
++ amdgpu_fence_unref(&fence1);
++
++ if (fence2)
++ amdgpu_fence_unref(&fence2);
++
++ if (r)
++ printk(KERN_WARNING "Error while testing ring sync (%d).\n", r);
++}
++
++static void amdgpu_test_ring_sync2(struct amdgpu_device *adev,
++ struct amdgpu_ring *ringA,
++ struct amdgpu_ring *ringB,
++ struct amdgpu_ring *ringC)
++{
++ struct amdgpu_fence *fenceA = NULL, *fenceB = NULL;
++ struct amdgpu_semaphore *semaphore = NULL;
++ bool sigA, sigB;
++ int i, r;
++
++ r = amdgpu_semaphore_create(adev, &semaphore);
++ if (r) {
++ DRM_ERROR("Failed to create semaphore\n");
++ goto out_cleanup;
++ }
++
++ r = amdgpu_ring_lock(ringA, 64);
++ if (r) {
++ DRM_ERROR("Failed to lock ring A %d\n", ringA->idx);
++ goto out_cleanup;
++ }
++ amdgpu_semaphore_emit_wait(ringA, semaphore);
++ amdgpu_ring_unlock_commit(ringA);
++
++ r = amdgpu_test_create_and_emit_fence(adev, ringA, &fenceA);
++ if (r)
++ goto out_cleanup;
++
++ r = amdgpu_ring_lock(ringB, 64);
++ if (r) {
++ DRM_ERROR("Failed to lock ring B %d\n", ringB->idx);
++ goto out_cleanup;
++ }
++ amdgpu_semaphore_emit_wait(ringB, semaphore);
++ amdgpu_ring_unlock_commit(ringB);
++ r = amdgpu_test_create_and_emit_fence(adev, ringB, &fenceB);
++ if (r)
++ goto out_cleanup;
++
++ mdelay(1000);
++
++ if (amdgpu_fence_signaled(fenceA)) {
++ DRM_ERROR("Fence A signaled without waiting for semaphore.\n");
++ goto out_cleanup;
++ }
++ if (amdgpu_fence_signaled(fenceB)) {
++ DRM_ERROR("Fence B signaled without waiting for semaphore.\n");
++ goto out_cleanup;
++ }
++
++ r = amdgpu_ring_lock(ringC, 64);
++ if (r) {
++ DRM_ERROR("Failed to lock ring B %p\n", ringC);
++ goto out_cleanup;
++ }
++ amdgpu_semaphore_emit_signal(ringC, semaphore);
++ amdgpu_ring_unlock_commit(ringC);
++
++ for (i = 0; i < 30; ++i) {
++ mdelay(100);
++ sigA = amdgpu_fence_signaled(fenceA);
++ sigB = amdgpu_fence_signaled(fenceB);
++ if (sigA || sigB)
++ break;
++ }
++
++ if (!sigA && !sigB) {
++ DRM_ERROR("Neither fence A nor B has been signaled\n");
++ goto out_cleanup;
++ } else if (sigA && sigB) {
++ DRM_ERROR("Both fence A and B has been signaled\n");
++ goto out_cleanup;
++ }
++
++ DRM_INFO("Fence %c was first signaled\n", sigA ? 'A' : 'B');
++
++ r = amdgpu_ring_lock(ringC, 64);
++ if (r) {
++ DRM_ERROR("Failed to lock ring B %p\n", ringC);
++ goto out_cleanup;
++ }
++ amdgpu_semaphore_emit_signal(ringC, semaphore);
++ amdgpu_ring_unlock_commit(ringC);
++
++ mdelay(1000);
++
++ r = amdgpu_fence_wait(fenceA, false);
++ if (r) {
++ DRM_ERROR("Failed to wait for sync fence A\n");
++ goto out_cleanup;
++ }
++ r = amdgpu_fence_wait(fenceB, false);
++ if (r) {
++ DRM_ERROR("Failed to wait for sync fence B\n");
++ goto out_cleanup;
++ }
++
++out_cleanup:
++ amdgpu_semaphore_free(adev, &semaphore, NULL);
++
++ if (fenceA)
++ amdgpu_fence_unref(&fenceA);
++
++ if (fenceB)
++ amdgpu_fence_unref(&fenceB);
++
++ if (r)
++ printk(KERN_WARNING "Error while testing ring sync (%d).\n", r);
++}
++
++static bool amdgpu_test_sync_possible(struct amdgpu_ring *ringA,
++ struct amdgpu_ring *ringB)
++{
++ if (ringA == &ringA->adev->vce.ring[0] &&
++ ringB == &ringB->adev->vce.ring[1])
++ return false;
++
++ return true;
++}
++
++void amdgpu_test_syncing(struct amdgpu_device *adev)
++{
++ int i, j, k;
++
++ for (i = 1; i < AMDGPU_MAX_RINGS; ++i) {
++ struct amdgpu_ring *ringA = adev->rings[i];
++ if (!ringA || !ringA->ready)
++ continue;
++
++ for (j = 0; j < i; ++j) {
++ struct amdgpu_ring *ringB = adev->rings[j];
++ if (!ringB || !ringB->ready)
++ continue;
++
++ if (!amdgpu_test_sync_possible(ringA, ringB))
++ continue;
++
++ DRM_INFO("Testing syncing between rings %d and %d...\n", i, j);
++ amdgpu_test_ring_sync(adev, ringA, ringB);
++
++ DRM_INFO("Testing syncing between rings %d and %d...\n", j, i);
++ amdgpu_test_ring_sync(adev, ringB, ringA);
++
++ for (k = 0; k < j; ++k) {
++ struct amdgpu_ring *ringC = adev->rings[k];
++ if (!ringC || !ringC->ready)
++ continue;
++
++ if (!amdgpu_test_sync_possible(ringA, ringC))
++ continue;
++
++ if (!amdgpu_test_sync_possible(ringB, ringC))
++ continue;
++
++ DRM_INFO("Testing syncing between rings %d, %d and %d...\n", i, j, k);
++ amdgpu_test_ring_sync2(adev, ringA, ringB, ringC);
++
++ DRM_INFO("Testing syncing between rings %d, %d and %d...\n", i, k, j);
++ amdgpu_test_ring_sync2(adev, ringA, ringC, ringB);
++
++ DRM_INFO("Testing syncing between rings %d, %d and %d...\n", j, i, k);
++ amdgpu_test_ring_sync2(adev, ringB, ringA, ringC);
++
++ DRM_INFO("Testing syncing between rings %d, %d and %d...\n", j, k, i);
++ amdgpu_test_ring_sync2(adev, ringB, ringC, ringA);
++
++ DRM_INFO("Testing syncing between rings %d, %d and %d...\n", k, i, j);
++ amdgpu_test_ring_sync2(adev, ringC, ringA, ringB);
++
++ DRM_INFO("Testing syncing between rings %d, %d and %d...\n", k, j, i);
++ amdgpu_test_ring_sync2(adev, ringC, ringB, ringA);
++ }
++ }
++ }
++}
+diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_trace.h b/drivers/gpu/drm/amd/amdgpu/amdgpu_trace.h
+new file mode 100644
+index 0000000..b57647e
+--- /dev/null
++++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_trace.h
+@@ -0,0 +1,209 @@
++#if !defined(_AMDGPU_TRACE_H) || defined(TRACE_HEADER_MULTI_READ)
++#define _AMDGPU_TRACE_H_
++
++#include <linux/stringify.h>
++#include <linux/types.h>
++#include <linux/tracepoint.h>
++
++#include <drm/drmP.h>
++
++#undef TRACE_SYSTEM
++#define TRACE_SYSTEM amdgpu
++#define TRACE_SYSTEM_STRING __stringify(TRACE_SYSTEM)
++#define TRACE_INCLUDE_FILE amdgpu_trace
++
++TRACE_EVENT(amdgpu_bo_create,
++ TP_PROTO(struct amdgpu_bo *bo),
++ TP_ARGS(bo),
++ TP_STRUCT__entry(
++ __field(struct amdgpu_bo *, bo)
++ __field(u32, pages)
++ ),
++
++ TP_fast_assign(
++ __entry->bo = bo;
++ __entry->pages = bo->tbo.num_pages;
++ ),
++ TP_printk("bo=%p, pages=%u", __entry->bo, __entry->pages)
++);
++
++TRACE_EVENT(amdgpu_cs,
++ TP_PROTO(struct amdgpu_cs_parser *p, int i),
++ TP_ARGS(p, i),
++ TP_STRUCT__entry(
++ __field(u32, ring)
++ __field(u32, dw)
++ __field(u32, fences)
++ ),
++
++ TP_fast_assign(
++ __entry->ring = p->ibs[i].ring->idx;
++ __entry->dw = p->ibs[i].length_dw;
++ __entry->fences = amdgpu_fence_count_emitted(
++ p->ibs[i].ring);
++ ),
++ TP_printk("ring=%u, dw=%u, fences=%u",
++ __entry->ring, __entry->dw,
++ __entry->fences)
++);
++
++TRACE_EVENT(amdgpu_vm_grab_id,
++ TP_PROTO(unsigned vmid, int ring),
++ TP_ARGS(vmid, ring),
++ TP_STRUCT__entry(
++ __field(u32, vmid)
++ __field(u32, ring)
++ ),
++
++ TP_fast_assign(
++ __entry->vmid = vmid;
++ __entry->ring = ring;
++ ),
++ TP_printk("vmid=%u, ring=%u", __entry->vmid, __entry->ring)
++);
++
++TRACE_EVENT(amdgpu_vm_bo_update,
++ TP_PROTO(struct amdgpu_bo_va_mapping *mapping),
++ TP_ARGS(mapping),
++ TP_STRUCT__entry(
++ __field(u64, soffset)
++ __field(u64, eoffset)
++ __field(u32, flags)
++ ),
++
++ TP_fast_assign(
++ __entry->soffset = mapping->it.start;
++ __entry->eoffset = mapping->it.last + 1;
++ __entry->flags = mapping->flags;
++ ),
++ TP_printk("soffs=%010llx, eoffs=%010llx, flags=%08x",
++ __entry->soffset, __entry->eoffset, __entry->flags)
++);
++
++TRACE_EVENT(amdgpu_vm_set_page,
++ TP_PROTO(uint64_t pe, uint64_t addr, unsigned count,
++ uint32_t incr, uint32_t flags),
++ TP_ARGS(pe, addr, count, incr, flags),
++ TP_STRUCT__entry(
++ __field(u64, pe)
++ __field(u64, addr)
++ __field(u32, count)
++ __field(u32, incr)
++ __field(u32, flags)
++ ),
++
++ TP_fast_assign(
++ __entry->pe = pe;
++ __entry->addr = addr;
++ __entry->count = count;
++ __entry->incr = incr;
++ __entry->flags = flags;
++ ),
++ TP_printk("pe=%010Lx, addr=%010Lx, incr=%u, flags=%08x, count=%u",
++ __entry->pe, __entry->addr, __entry->incr,
++ __entry->flags, __entry->count)
++);
++
++TRACE_EVENT(amdgpu_vm_flush,
++ TP_PROTO(uint64_t pd_addr, unsigned ring, unsigned id),
++ TP_ARGS(pd_addr, ring, id),
++ TP_STRUCT__entry(
++ __field(u64, pd_addr)
++ __field(u32, ring)
++ __field(u32, id)
++ ),
++
++ TP_fast_assign(
++ __entry->pd_addr = pd_addr;
++ __entry->ring = ring;
++ __entry->id = id;
++ ),
++ TP_printk("pd_addr=%010Lx, ring=%u, id=%u",
++ __entry->pd_addr, __entry->ring, __entry->id)
++);
++
++DECLARE_EVENT_CLASS(amdgpu_fence_request,
++
++ TP_PROTO(struct drm_device *dev, int ring, u32 seqno),
++
++ TP_ARGS(dev, ring, seqno),
++
++ TP_STRUCT__entry(
++ __field(u32, dev)
++ __field(int, ring)
++ __field(u32, seqno)
++ ),
++
++ TP_fast_assign(
++ __entry->dev = dev->primary->index;
++ __entry->ring = ring;
++ __entry->seqno = seqno;
++ ),
++
++ TP_printk("dev=%u, ring=%d, seqno=%u",
++ __entry->dev, __entry->ring, __entry->seqno)
++);
++
++DEFINE_EVENT(amdgpu_fence_request, amdgpu_fence_emit,
++
++ TP_PROTO(struct drm_device *dev, int ring, u32 seqno),
++
++ TP_ARGS(dev, ring, seqno)
++);
++
++DEFINE_EVENT(amdgpu_fence_request, amdgpu_fence_wait_begin,
++
++ TP_PROTO(struct drm_device *dev, int ring, u32 seqno),
++
++ TP_ARGS(dev, ring, seqno)
++);
++
++DEFINE_EVENT(amdgpu_fence_request, amdgpu_fence_wait_end,
++
++ TP_PROTO(struct drm_device *dev, int ring, u32 seqno),
++
++ TP_ARGS(dev, ring, seqno)
++);
++
++DECLARE_EVENT_CLASS(amdgpu_semaphore_request,
++
++ TP_PROTO(int ring, struct amdgpu_semaphore *sem),
++
++ TP_ARGS(ring, sem),
++
++ TP_STRUCT__entry(
++ __field(int, ring)
++ __field(signed, waiters)
++ __field(uint64_t, gpu_addr)
++ ),
++
++ TP_fast_assign(
++ __entry->ring = ring;
++ __entry->waiters = sem->waiters;
++ __entry->gpu_addr = sem->gpu_addr;
++ ),
++
++ TP_printk("ring=%u, waiters=%d, addr=%010Lx", __entry->ring,
++ __entry->waiters, __entry->gpu_addr)
++);
++
++DEFINE_EVENT(amdgpu_semaphore_request, amdgpu_semaphore_signale,
++
++ TP_PROTO(int ring, struct amdgpu_semaphore *sem),
++
++ TP_ARGS(ring, sem)
++);
++
++DEFINE_EVENT(amdgpu_semaphore_request, amdgpu_semaphore_wait,
++
++ TP_PROTO(int ring, struct amdgpu_semaphore *sem),
++
++ TP_ARGS(ring, sem)
++);
++
++#endif
++
++/* This part must be outside protection */
++#undef TRACE_INCLUDE_PATH
++#define TRACE_INCLUDE_PATH .
++#include <trace/define_trace.h>
+diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_trace_points.c b/drivers/gpu/drm/amd/amdgpu/amdgpu_trace_points.c
+new file mode 100644
+index 0000000..385b7e1
+--- /dev/null
++++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_trace_points.c
+@@ -0,0 +1,9 @@
++/* Copyright Red Hat Inc 2010.
++ * Author : Dave Airlie <airlied@redhat.com>
++ */
++#include <drm/drmP.h>
++#include <drm/amdgpu_drm.h>
++#include "amdgpu.h"
++
++#define CREATE_TRACE_POINTS
++#include "amdgpu_trace.h"
+diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_ttm.c b/drivers/gpu/drm/amd/amdgpu/amdgpu_ttm.c
+new file mode 100644
+index 0000000..120e6e7
+--- /dev/null
++++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_ttm.c
+@@ -0,0 +1,1249 @@
++/*
++ * Copyright 2009 Jerome Glisse.
++ * All Rights Reserved.
++ *
++ * Permission is hereby granted, free of charge, to any person obtaining a
++ * copy of this software and associated documentation files (the
++ * "Software"), to deal in the Software without restriction, including
++ * without limitation the rights to use, copy, modify, merge, publish,
++ * distribute, sub license, and/or sell copies of the Software, and to
++ * permit persons to whom the Software is furnished to do so, subject to
++ * the following conditions:
++ *
++ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
++ * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
++ * FITNESS FOR A PARTICULAR PURPOSE AND NON-INFRINGEMENT. IN NO EVENT SHALL
++ * THE COPYRIGHT HOLDERS, AUTHORS AND/OR ITS SUPPLIERS BE LIABLE FOR ANY CLAIM,
++ * DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR
++ * OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE
++ * USE OR OTHER DEALINGS IN THE SOFTWARE.
++ *
++ * The above copyright notice and this permission notice (including the
++ * next paragraph) shall be included in all copies or substantial portions
++ * of the Software.
++ *
++ */
++/*
++ * Authors:
++ * Jerome Glisse <glisse@freedesktop.org>
++ * Thomas Hellstrom <thomas-at-tungstengraphics-dot-com>
++ * Dave Airlie
++ */
++#include <ttm/ttm_bo_api.h>
++#include <ttm/ttm_bo_driver.h>
++#include <ttm/ttm_placement.h>
++#include <ttm/ttm_module.h>
++#include <ttm/ttm_page_alloc.h>
++#include <drm/drmP.h>
++#include <drm/amdgpu_drm.h>
++#include <linux/seq_file.h>
++#include <linux/slab.h>
++#include <linux/swiotlb.h>
++#include <linux/swap.h>
++#include <linux/pagemap.h>
++#include <linux/debugfs.h>
++#include "amdgpu.h"
++#include "bif/bif_4_1_d.h"
++
++#define DRM_FILE_PAGE_OFFSET (0x100000000ULL >> PAGE_SHIFT)
++
++static int amdgpu_ttm_debugfs_init(struct amdgpu_device *adev);
++static void amdgpu_ttm_debugfs_fini(struct amdgpu_device *adev);
++
++static struct amdgpu_device *amdgpu_get_adev(struct ttm_bo_device *bdev)
++{
++ struct amdgpu_mman *mman;
++ struct amdgpu_device *adev;
++
++ mman = container_of(bdev, struct amdgpu_mman, bdev);
++ adev = container_of(mman, struct amdgpu_device, mman);
++ return adev;
++}
++
++
++/*
++ * Global memory.
++ */
++static int amdgpu_ttm_mem_global_init(struct drm_global_reference *ref)
++{
++ return ttm_mem_global_init(ref->object);
++}
++
++static void amdgpu_ttm_mem_global_release(struct drm_global_reference *ref)
++{
++ ttm_mem_global_release(ref->object);
++}
++
++static int amdgpu_ttm_global_init(struct amdgpu_device *adev)
++{
++ struct drm_global_reference *global_ref;
++ int r;
++
++ adev->mman.mem_global_referenced = false;
++ global_ref = &adev->mman.mem_global_ref;
++ global_ref->global_type = DRM_GLOBAL_TTM_MEM;
++ global_ref->size = sizeof(struct ttm_mem_global);
++ global_ref->init = &amdgpu_ttm_mem_global_init;
++ global_ref->release = &amdgpu_ttm_mem_global_release;
++ r = drm_global_item_ref(global_ref);
++ if (r != 0) {
++ DRM_ERROR("Failed setting up TTM memory accounting "
++ "subsystem.\n");
++ return r;
++ }
++
++ adev->mman.bo_global_ref.mem_glob =
++ adev->mman.mem_global_ref.object;
++ global_ref = &adev->mman.bo_global_ref.ref;
++ global_ref->global_type = DRM_GLOBAL_TTM_BO;
++ global_ref->size = sizeof(struct ttm_bo_global);
++ global_ref->init = &ttm_bo_global_init;
++ global_ref->release = &ttm_bo_global_release;
++ r = drm_global_item_ref(global_ref);
++ if (r != 0) {
++ DRM_ERROR("Failed setting up TTM BO subsystem.\n");
++ drm_global_item_unref(&adev->mman.mem_global_ref);
++ return r;
++ }
++
++ adev->mman.mem_global_referenced = true;
++ return 0;
++}
++
++static void amdgpu_ttm_global_fini(struct amdgpu_device *adev)
++{
++ if (adev->mman.mem_global_referenced) {
++ drm_global_item_unref(&adev->mman.bo_global_ref.ref);
++ drm_global_item_unref(&adev->mman.mem_global_ref);
++ adev->mman.mem_global_referenced = false;
++ }
++}
++
++static int amdgpu_invalidate_caches(struct ttm_bo_device *bdev, uint32_t flags)
++{
++ return 0;
++}
++
++static int amdgpu_init_mem_type(struct ttm_bo_device *bdev, uint32_t type,
++ struct ttm_mem_type_manager *man)
++{
++ struct amdgpu_device *adev;
++
++ adev = amdgpu_get_adev(bdev);
++
++ switch (type) {
++ case TTM_PL_SYSTEM:
++ /* System memory */
++ man->flags = TTM_MEMTYPE_FLAG_MAPPABLE;
++ man->available_caching = TTM_PL_MASK_CACHING;
++ man->default_caching = TTM_PL_FLAG_CACHED;
++ break;
++ case TTM_PL_TT:
++ man->func = &ttm_bo_manager_func;
++ man->gpu_offset = adev->mc.gtt_start;
++ man->available_caching = TTM_PL_MASK_CACHING;
++ man->default_caching = TTM_PL_FLAG_CACHED;
++ man->flags = TTM_MEMTYPE_FLAG_MAPPABLE | TTM_MEMTYPE_FLAG_CMA;
++ break;
++ case TTM_PL_VRAM:
++ /* "On-card" video ram */
++ man->func = &ttm_bo_manager_func;
++ man->gpu_offset = adev->mc.vram_start;
++ man->flags = TTM_MEMTYPE_FLAG_FIXED |
++ TTM_MEMTYPE_FLAG_MAPPABLE;
++ man->available_caching = TTM_PL_FLAG_UNCACHED | TTM_PL_FLAG_WC;
++ man->default_caching = TTM_PL_FLAG_WC;
++ break;
++ case AMDGPU_PL_GDS:
++ case AMDGPU_PL_GWS:
++ case AMDGPU_PL_OA:
++ /* On-chip GDS memory*/
++ man->func = &ttm_bo_manager_func;
++ man->gpu_offset = 0;
++ man->flags = TTM_MEMTYPE_FLAG_FIXED | TTM_MEMTYPE_FLAG_CMA;
++ man->available_caching = TTM_PL_FLAG_UNCACHED;
++ man->default_caching = TTM_PL_FLAG_UNCACHED;
++ break;
++ default:
++ DRM_ERROR("Unsupported memory type %u\n", (unsigned)type);
++ return -EINVAL;
++ }
++ return 0;
++}
++
++static void amdgpu_evict_flags(struct ttm_buffer_object *bo,
++ struct ttm_placement *placement)
++{
++ struct amdgpu_bo *rbo;
++ static struct ttm_place placements = {
++ .fpfn = 0,
++ .lpfn = 0,
++ .flags = TTM_PL_MASK_CACHING | TTM_PL_FLAG_SYSTEM
++ };
++
++ if (!amdgpu_ttm_bo_is_amdgpu_bo(bo)) {
++ placement->placement = &placements;
++ placement->busy_placement = &placements;
++ placement->num_placement = 1;
++ placement->num_busy_placement = 1;
++ return;
++ }
++ rbo = container_of(bo, struct amdgpu_bo, tbo);
++ switch (bo->mem.mem_type) {
++ case TTM_PL_VRAM:
++ if (rbo->adev->mman.buffer_funcs_ring->ready == false)
++ amdgpu_ttm_placement_from_domain(rbo, AMDGPU_GEM_DOMAIN_CPU);
++ else
++ amdgpu_ttm_placement_from_domain(rbo, AMDGPU_GEM_DOMAIN_GTT);
++ break;
++ case TTM_PL_TT:
++ default:
++ amdgpu_ttm_placement_from_domain(rbo, AMDGPU_GEM_DOMAIN_CPU);
++ }
++ *placement = rbo->placement;
++}
++
++static int amdgpu_verify_access(struct ttm_buffer_object *bo, struct file *filp)
++{
++ struct amdgpu_bo *rbo = container_of(bo, struct amdgpu_bo, tbo);
++
++ return drm_vma_node_verify_access(&rbo->gem_base.vma_node, filp);
++}
++
++static void amdgpu_move_null(struct ttm_buffer_object *bo,
++ struct ttm_mem_reg *new_mem)
++{
++ struct ttm_mem_reg *old_mem = &bo->mem;
++
++ BUG_ON(old_mem->mm_node != NULL);
++ *old_mem = *new_mem;
++ new_mem->mm_node = NULL;
++}
++
++static int amdgpu_move_blit(struct ttm_buffer_object *bo,
++ bool evict, bool no_wait_gpu,
++ struct ttm_mem_reg *new_mem,
++ struct ttm_mem_reg *old_mem)
++{
++ struct amdgpu_device *adev;
++ struct amdgpu_ring *ring;
++ uint64_t old_start, new_start;
++ struct amdgpu_fence *fence;
++ int r;
++
++ adev = amdgpu_get_adev(bo->bdev);
++ ring = adev->mman.buffer_funcs_ring;
++ old_start = old_mem->start << PAGE_SHIFT;
++ new_start = new_mem->start << PAGE_SHIFT;
++
++ switch (old_mem->mem_type) {
++ case TTM_PL_VRAM:
++ old_start += adev->mc.vram_start;
++ break;
++ case TTM_PL_TT:
++ old_start += adev->mc.gtt_start;
++ break;
++ default:
++ DRM_ERROR("Unknown placement %d\n", old_mem->mem_type);
++ return -EINVAL;
++ }
++ switch (new_mem->mem_type) {
++ case TTM_PL_VRAM:
++ new_start += adev->mc.vram_start;
++ break;
++ case TTM_PL_TT:
++ new_start += adev->mc.gtt_start;
++ break;
++ default:
++ DRM_ERROR("Unknown placement %d\n", old_mem->mem_type);
++ return -EINVAL;
++ }
++ if (!ring->ready) {
++ DRM_ERROR("Trying to move memory with ring turned off.\n");
++ return -EINVAL;
++ }
++
++ BUILD_BUG_ON((PAGE_SIZE % AMDGPU_GPU_PAGE_SIZE) != 0);
++
++ r = amdgpu_copy_buffer(ring, old_start, new_start,
++ new_mem->num_pages * PAGE_SIZE, /* bytes */
++ bo->resv, &fence);
++ /* FIXME: handle copy error */
++ r = ttm_bo_move_accel_cleanup(bo, &fence->base,
++ evict, no_wait_gpu, new_mem);
++ amdgpu_fence_unref(&fence);
++ return r;
++}
++
++static int amdgpu_move_vram_ram(struct ttm_buffer_object *bo,
++ bool evict, bool interruptible,
++ bool no_wait_gpu,
++ struct ttm_mem_reg *new_mem)
++{
++ struct amdgpu_device *adev;
++ struct ttm_mem_reg *old_mem = &bo->mem;
++ struct ttm_mem_reg tmp_mem;
++ struct ttm_place placements;
++ struct ttm_placement placement;
++ int r;
++
++ adev = amdgpu_get_adev(bo->bdev);
++ tmp_mem = *new_mem;
++ tmp_mem.mm_node = NULL;
++ placement.num_placement = 1;
++ placement.placement = &placements;
++ placement.num_busy_placement = 1;
++ placement.busy_placement = &placements;
++ placements.fpfn = 0;
++ placements.lpfn = 0;
++ placements.flags = TTM_PL_MASK_CACHING | TTM_PL_FLAG_TT;
++ r = ttm_bo_mem_space(bo, &placement, &tmp_mem,
++ interruptible, no_wait_gpu);
++ if (unlikely(r)) {
++ return r;
++ }
++
++ r = ttm_tt_set_placement_caching(bo->ttm, tmp_mem.placement);
++ if (unlikely(r)) {
++ goto out_cleanup;
++ }
++
++ r = ttm_tt_bind(bo->ttm, &tmp_mem);
++ if (unlikely(r)) {
++ goto out_cleanup;
++ }
++ r = amdgpu_move_blit(bo, true, no_wait_gpu, &tmp_mem, old_mem);
++ if (unlikely(r)) {
++ goto out_cleanup;
++ }
++ r = ttm_bo_move_ttm(bo, true, no_wait_gpu, new_mem);
++out_cleanup:
++ ttm_bo_mem_put(bo, &tmp_mem);
++ return r;
++}
++
++static int amdgpu_move_ram_vram(struct ttm_buffer_object *bo,
++ bool evict, bool interruptible,
++ bool no_wait_gpu,
++ struct ttm_mem_reg *new_mem)
++{
++ struct amdgpu_device *adev;
++ struct ttm_mem_reg *old_mem = &bo->mem;
++ struct ttm_mem_reg tmp_mem;
++ struct ttm_placement placement;
++ struct ttm_place placements;
++ int r;
++
++ adev = amdgpu_get_adev(bo->bdev);
++ tmp_mem = *new_mem;
++ tmp_mem.mm_node = NULL;
++ placement.num_placement = 1;
++ placement.placement = &placements;
++ placement.num_busy_placement = 1;
++ placement.busy_placement = &placements;
++ placements.fpfn = 0;
++ placements.lpfn = 0;
++ placements.flags = TTM_PL_MASK_CACHING | TTM_PL_FLAG_TT;
++ r = ttm_bo_mem_space(bo, &placement, &tmp_mem,
++ interruptible, no_wait_gpu);
++ if (unlikely(r)) {
++ return r;
++ }
++ r = ttm_bo_move_ttm(bo, true, no_wait_gpu, &tmp_mem);
++ if (unlikely(r)) {
++ goto out_cleanup;
++ }
++ r = amdgpu_move_blit(bo, true, no_wait_gpu, new_mem, old_mem);
++ if (unlikely(r)) {
++ goto out_cleanup;
++ }
++out_cleanup:
++ ttm_bo_mem_put(bo, &tmp_mem);
++ return r;
++}
++
++static int amdgpu_bo_move(struct ttm_buffer_object *bo,
++ bool evict, bool interruptible,
++ bool no_wait_gpu,
++ struct ttm_mem_reg *new_mem)
++{
++ struct amdgpu_device *adev;
++ struct ttm_mem_reg *old_mem = &bo->mem;
++ int r;
++
++ adev = amdgpu_get_adev(bo->bdev);
++ if (old_mem->mem_type == TTM_PL_SYSTEM && bo->ttm == NULL) {
++ amdgpu_move_null(bo, new_mem);
++ return 0;
++ }
++ if ((old_mem->mem_type == TTM_PL_TT &&
++ new_mem->mem_type == TTM_PL_SYSTEM) ||
++ (old_mem->mem_type == TTM_PL_SYSTEM &&
++ new_mem->mem_type == TTM_PL_TT)) {
++ /* bind is enough */
++ amdgpu_move_null(bo, new_mem);
++ return 0;
++ }
++ if (adev->mman.buffer_funcs == NULL ||
++ adev->mman.buffer_funcs_ring == NULL ||
++ !adev->mman.buffer_funcs_ring->ready) {
++ /* use memcpy */
++ goto memcpy;
++ }
++
++ if (old_mem->mem_type == TTM_PL_VRAM &&
++ new_mem->mem_type == TTM_PL_SYSTEM) {
++ r = amdgpu_move_vram_ram(bo, evict, interruptible,
++ no_wait_gpu, new_mem);
++ } else if (old_mem->mem_type == TTM_PL_SYSTEM &&
++ new_mem->mem_type == TTM_PL_VRAM) {
++ r = amdgpu_move_ram_vram(bo, evict, interruptible,
++ no_wait_gpu, new_mem);
++ } else {
++ r = amdgpu_move_blit(bo, evict, no_wait_gpu, new_mem, old_mem);
++ }
++
++ if (r) {
++memcpy:
++ r = ttm_bo_move_memcpy(bo, evict, no_wait_gpu, new_mem);
++ if (r) {
++ return r;
++ }
++ }
++
++ /* update statistics */
++ atomic64_add((u64)bo->num_pages << PAGE_SHIFT, &adev->num_bytes_moved);
++ return 0;
++}
++
++static int amdgpu_ttm_io_mem_reserve(struct ttm_bo_device *bdev, struct ttm_mem_reg *mem)
++{
++ struct ttm_mem_type_manager *man = &bdev->man[mem->mem_type];
++ struct amdgpu_device *adev = amdgpu_get_adev(bdev);
++
++ mem->bus.addr = NULL;
++ mem->bus.offset = 0;
++ mem->bus.size = mem->num_pages << PAGE_SHIFT;
++ mem->bus.base = 0;
++ mem->bus.is_iomem = false;
++ if (!(man->flags & TTM_MEMTYPE_FLAG_MAPPABLE))
++ return -EINVAL;
++ switch (mem->mem_type) {
++ case TTM_PL_SYSTEM:
++ /* system memory */
++ return 0;
++ case TTM_PL_TT:
++ break;
++ case TTM_PL_VRAM:
++ mem->bus.offset = mem->start << PAGE_SHIFT;
++ /* check if it's visible */
++ if ((mem->bus.offset + mem->bus.size) > adev->mc.visible_vram_size)
++ return -EINVAL;
++ mem->bus.base = adev->mc.aper_base;
++ mem->bus.is_iomem = true;
++#ifdef __alpha__
++ /*
++ * Alpha: use bus.addr to hold the ioremap() return,
++ * so we can modify bus.base below.
++ */
++ if (mem->placement & TTM_PL_FLAG_WC)
++ mem->bus.addr =
++ ioremap_wc(mem->bus.base + mem->bus.offset,
++ mem->bus.size);
++ else
++ mem->bus.addr =
++ ioremap_nocache(mem->bus.base + mem->bus.offset,
++ mem->bus.size);
++
++ /*
++ * Alpha: Use just the bus offset plus
++ * the hose/domain memory base for bus.base.
++ * It then can be used to build PTEs for VRAM
++ * access, as done in ttm_bo_vm_fault().
++ */
++ mem->bus.base = (mem->bus.base & 0x0ffffffffUL) +
++ adev->ddev->hose->dense_mem_base;
++#endif
++ break;
++ default:
++ return -EINVAL;
++ }
++ return 0;
++}
++
++static void amdgpu_ttm_io_mem_free(struct ttm_bo_device *bdev, struct ttm_mem_reg *mem)
++{
++}
++
++/*
++ * TTM backend functions.
++ */
++struct amdgpu_ttm_tt {
++ struct ttm_dma_tt ttm;
++ struct amdgpu_device *adev;
++ u64 offset;
++ uint64_t userptr;
++ struct mm_struct *usermm;
++ uint32_t userflags;
++};
++
++/* prepare the sg table with the user pages */
++static int amdgpu_ttm_tt_pin_userptr(struct ttm_tt *ttm)
++{
++ struct amdgpu_device *adev = amdgpu_get_adev(ttm->bdev);
++ struct amdgpu_ttm_tt *gtt = (void *)ttm;
++ unsigned pinned = 0, nents;
++ int r;
++
++ int write = !(gtt->userflags & AMDGPU_GEM_USERPTR_READONLY);
++ enum dma_data_direction direction = write ?
++ DMA_BIDIRECTIONAL : DMA_TO_DEVICE;
++
++ if (current->mm != gtt->usermm)
++ return -EPERM;
++
++ if (gtt->userflags & AMDGPU_GEM_USERPTR_ANONONLY) {
++ /* check that we only pin down anonymous memory
++ to prevent problems with writeback */
++ unsigned long end = gtt->userptr + ttm->num_pages * PAGE_SIZE;
++ struct vm_area_struct *vma;
++
++ vma = find_vma(gtt->usermm, gtt->userptr);
++ if (!vma || vma->vm_file || vma->vm_end < end)
++ return -EPERM;
++ }
++
++ do {
++ unsigned num_pages = ttm->num_pages - pinned;
++ uint64_t userptr = gtt->userptr + pinned * PAGE_SIZE;
++ struct page **pages = ttm->pages + pinned;
++
++ r = get_user_pages(current, current->mm, userptr, num_pages,
++ write, 0, pages, NULL);
++ if (r < 0)
++ goto release_pages;
++
++ pinned += r;
++
++ } while (pinned < ttm->num_pages);
++
++ r = sg_alloc_table_from_pages(ttm->sg, ttm->pages, ttm->num_pages, 0,
++ ttm->num_pages << PAGE_SHIFT,
++ GFP_KERNEL);
++ if (r)
++ goto release_sg;
++
++ r = -ENOMEM;
++ nents = dma_map_sg(adev->dev, ttm->sg->sgl, ttm->sg->nents, direction);
++ if (nents != ttm->sg->nents)
++ goto release_sg;
++
++ drm_prime_sg_to_page_addr_arrays(ttm->sg, ttm->pages,
++ gtt->ttm.dma_address, ttm->num_pages);
++
++ return 0;
++
++release_sg:
++ kfree(ttm->sg);
++
++release_pages:
++ release_pages(ttm->pages, pinned, 0);
++ return r;
++}
++
++static void amdgpu_ttm_tt_unpin_userptr(struct ttm_tt *ttm)
++{
++ struct amdgpu_device *adev = amdgpu_get_adev(ttm->bdev);
++ struct amdgpu_ttm_tt *gtt = (void *)ttm;
++ struct scatterlist *sg;
++ int i;
++
++ int write = !(gtt->userflags & AMDGPU_GEM_USERPTR_READONLY);
++ enum dma_data_direction direction = write ?
++ DMA_BIDIRECTIONAL : DMA_TO_DEVICE;
++
++ /* double check that we don't free the table twice */
++ if (!ttm->sg->sgl)
++ return;
++
++ /* free the sg table and pages again */
++ dma_unmap_sg(adev->dev, ttm->sg->sgl, ttm->sg->nents, direction);
++
++ for_each_sg(ttm->sg->sgl, sg, ttm->sg->nents, i) {
++ struct page *page = sg_page(sg);
++
++ if (!(gtt->userflags & AMDGPU_GEM_USERPTR_READONLY))
++ set_page_dirty(page);
++
++ mark_page_accessed(page);
++ page_cache_release(page);
++ }
++
++ sg_free_table(ttm->sg);
++}
++
++static int amdgpu_ttm_backend_bind(struct ttm_tt *ttm,
++ struct ttm_mem_reg *bo_mem)
++{
++ struct amdgpu_ttm_tt *gtt = (void*)ttm;
++ uint32_t flags = amdgpu_ttm_tt_pte_flags(gtt->adev, ttm, bo_mem);
++ int r;
++
++ if (gtt->userptr)
++ amdgpu_ttm_tt_pin_userptr(ttm);
++
++ gtt->offset = (unsigned long)(bo_mem->start << PAGE_SHIFT);
++ if (!ttm->num_pages) {
++ WARN(1, "nothing to bind %lu pages for mreg %p back %p!\n",
++ ttm->num_pages, bo_mem, ttm);
++ }
++
++ if (bo_mem->mem_type == AMDGPU_PL_GDS ||
++ bo_mem->mem_type == AMDGPU_PL_GWS ||
++ bo_mem->mem_type == AMDGPU_PL_OA)
++ return -EINVAL;
++
++ r = amdgpu_gart_bind(gtt->adev, gtt->offset, ttm->num_pages,
++ ttm->pages, gtt->ttm.dma_address, flags);
++
++ if (r) {
++ DRM_ERROR("failed to bind %lu pages at 0x%08X\n",
++ ttm->num_pages, (unsigned)gtt->offset);
++ return r;
++ }
++ return 0;
++}
++
++static int amdgpu_ttm_backend_unbind(struct ttm_tt *ttm)
++{
++ struct amdgpu_ttm_tt *gtt = (void *)ttm;
++
++ /* unbind shouldn't be done for GDS/GWS/OA in ttm_bo_clean_mm */
++ if (gtt->adev->gart.ready)
++ amdgpu_gart_unbind(gtt->adev, gtt->offset, ttm->num_pages);
++
++ if (gtt->userptr)
++ amdgpu_ttm_tt_unpin_userptr(ttm);
++
++ return 0;
++}
++
++static void amdgpu_ttm_backend_destroy(struct ttm_tt *ttm)
++{
++ struct amdgpu_ttm_tt *gtt = (void *)ttm;
++
++ ttm_dma_tt_fini(&gtt->ttm);
++ kfree(gtt);
++}
++
++static struct ttm_backend_func amdgpu_backend_func = {
++ .bind = &amdgpu_ttm_backend_bind,
++ .unbind = &amdgpu_ttm_backend_unbind,
++ .destroy = &amdgpu_ttm_backend_destroy,
++};
++
++static struct ttm_tt *amdgpu_ttm_tt_create(struct ttm_bo_device *bdev,
++ unsigned long size, uint32_t page_flags,
++ struct page *dummy_read_page)
++{
++ struct amdgpu_device *adev;
++ struct amdgpu_ttm_tt *gtt;
++
++ adev = amdgpu_get_adev(bdev);
++
++ gtt = kzalloc(sizeof(struct amdgpu_ttm_tt), GFP_KERNEL);
++ if (gtt == NULL) {
++ return NULL;
++ }
++ gtt->ttm.ttm.func = &amdgpu_backend_func;
++ gtt->adev = adev;
++ if (ttm_dma_tt_init(&gtt->ttm, bdev, size, page_flags, dummy_read_page)) {
++ kfree(gtt);
++ return NULL;
++ }
++ return &gtt->ttm.ttm;
++}
++
++static int amdgpu_ttm_tt_populate(struct ttm_tt *ttm)
++{
++ struct amdgpu_device *adev;
++ struct amdgpu_ttm_tt *gtt = (void *)ttm;
++ unsigned i;
++ int r;
++ bool slave = !!(ttm->page_flags & TTM_PAGE_FLAG_SG);
++
++ if (ttm->state != tt_unpopulated)
++ return 0;
++
++ if (gtt && gtt->userptr) {
++ ttm->sg = kcalloc(1, sizeof(struct sg_table), GFP_KERNEL);
++ if (!ttm->sg)
++ return -ENOMEM;
++
++ ttm->page_flags |= TTM_PAGE_FLAG_SG;
++ ttm->state = tt_unbound;
++ return 0;
++ }
++
++ if (slave && ttm->sg) {
++ drm_prime_sg_to_page_addr_arrays(ttm->sg, ttm->pages,
++ gtt->ttm.dma_address, ttm->num_pages);
++ ttm->state = tt_unbound;
++ return 0;
++ }
++
++ adev = amdgpu_get_adev(ttm->bdev);
++
++#ifdef CONFIG_SWIOTLB
++ if (swiotlb_nr_tbl()) {
++ return ttm_dma_populate(&gtt->ttm, adev->dev);
++ }
++#endif
++
++ r = ttm_pool_populate(ttm);
++ if (r) {
++ return r;
++ }
++
++ for (i = 0; i < ttm->num_pages; i++) {
++ gtt->ttm.dma_address[i] = pci_map_page(adev->pdev, ttm->pages[i],
++ 0, PAGE_SIZE,
++ PCI_DMA_BIDIRECTIONAL);
++ if (pci_dma_mapping_error(adev->pdev, gtt->ttm.dma_address[i])) {
++ while (--i) {
++ pci_unmap_page(adev->pdev, gtt->ttm.dma_address[i],
++ PAGE_SIZE, PCI_DMA_BIDIRECTIONAL);
++ gtt->ttm.dma_address[i] = 0;
++ }
++ ttm_pool_unpopulate(ttm);
++ return -EFAULT;
++ }
++ }
++ return 0;
++}
++
++static void amdgpu_ttm_tt_unpopulate(struct ttm_tt *ttm)
++{
++ struct amdgpu_device *adev;
++ struct amdgpu_ttm_tt *gtt = (void *)ttm;
++ unsigned i;
++ bool slave = !!(ttm->page_flags & TTM_PAGE_FLAG_SG);
++
++ if (gtt && gtt->userptr) {
++ kfree(ttm->sg);
++ ttm->page_flags &= ~TTM_PAGE_FLAG_SG;
++ return;
++ }
++
++ if (slave)
++ return;
++
++ adev = amdgpu_get_adev(ttm->bdev);
++
++#ifdef CONFIG_SWIOTLB
++ if (swiotlb_nr_tbl()) {
++ ttm_dma_unpopulate(&gtt->ttm, adev->dev);
++ return;
++ }
++#endif
++
++ for (i = 0; i < ttm->num_pages; i++) {
++ if (gtt->ttm.dma_address[i]) {
++ pci_unmap_page(adev->pdev, gtt->ttm.dma_address[i],
++ PAGE_SIZE, PCI_DMA_BIDIRECTIONAL);
++ }
++ }
++
++ ttm_pool_unpopulate(ttm);
++}
++
++int amdgpu_ttm_tt_set_userptr(struct ttm_tt *ttm, uint64_t addr,
++ uint32_t flags)
++{
++ struct amdgpu_ttm_tt *gtt = (void *)ttm;
++
++ if (gtt == NULL)
++ return -EINVAL;
++
++ gtt->userptr = addr;
++ gtt->usermm = current->mm;
++ gtt->userflags = flags;
++ return 0;
++}
++
++bool amdgpu_ttm_tt_has_userptr(struct ttm_tt *ttm)
++{
++ struct amdgpu_ttm_tt *gtt = (void *)ttm;
++
++ if (gtt == NULL)
++ return false;
++
++ return !!gtt->userptr;
++}
++
++bool amdgpu_ttm_tt_is_readonly(struct ttm_tt *ttm)
++{
++ struct amdgpu_ttm_tt *gtt = (void *)ttm;
++
++ if (gtt == NULL)
++ return false;
++
++ return !!(gtt->userflags & AMDGPU_GEM_USERPTR_READONLY);
++}
++
++uint32_t amdgpu_ttm_tt_pte_flags(struct amdgpu_device *adev, struct ttm_tt *ttm,
++ struct ttm_mem_reg *mem)
++{
++ uint32_t flags = 0;
++
++ if (mem && mem->mem_type != TTM_PL_SYSTEM)
++ flags |= AMDGPU_PTE_VALID;
++
++ if (mem && mem->mem_type == TTM_PL_TT)
++ flags |= AMDGPU_PTE_SYSTEM;
++
++ if (!ttm || ttm->caching_state == tt_cached)
++ flags |= AMDGPU_PTE_SNOOPED;
++
++ if (adev->asic_type >= CHIP_TOPAZ)
++ flags |= AMDGPU_PTE_EXECUTABLE;
++
++ flags |= AMDGPU_PTE_READABLE;
++
++ if (!amdgpu_ttm_tt_is_readonly(ttm))
++ flags |= AMDGPU_PTE_WRITEABLE;
++
++ return flags;
++}
++
++static struct ttm_bo_driver amdgpu_bo_driver = {
++ .ttm_tt_create = &amdgpu_ttm_tt_create,
++ .ttm_tt_populate = &amdgpu_ttm_tt_populate,
++ .ttm_tt_unpopulate = &amdgpu_ttm_tt_unpopulate,
++ .invalidate_caches = &amdgpu_invalidate_caches,
++ .init_mem_type = &amdgpu_init_mem_type,
++ .evict_flags = &amdgpu_evict_flags,
++ .move = &amdgpu_bo_move,
++ .verify_access = &amdgpu_verify_access,
++ .move_notify = &amdgpu_bo_move_notify,
++ .fault_reserve_notify = &amdgpu_bo_fault_reserve_notify,
++ .io_mem_reserve = &amdgpu_ttm_io_mem_reserve,
++ .io_mem_free = &amdgpu_ttm_io_mem_free,
++};
++
++int amdgpu_ttm_init(struct amdgpu_device *adev)
++{
++ int r;
++
++ r = amdgpu_ttm_global_init(adev);
++ if (r) {
++ return r;
++ }
++ /* No others user of address space so set it to 0 */
++ r = ttm_bo_device_init(&adev->mman.bdev,
++ adev->mman.bo_global_ref.ref.object,
++ &amdgpu_bo_driver,
++ adev->ddev->anon_inode->i_mapping,
++ DRM_FILE_PAGE_OFFSET,
++ adev->need_dma32);
++ if (r) {
++ DRM_ERROR("failed initializing buffer object driver(%d).\n", r);
++ return r;
++ }
++ adev->mman.initialized = true;
++ r = ttm_bo_init_mm(&adev->mman.bdev, TTM_PL_VRAM,
++ adev->mc.real_vram_size >> PAGE_SHIFT);
++ if (r) {
++ DRM_ERROR("Failed initializing VRAM heap.\n");
++ return r;
++ }
++ /* Change the size here instead of the init above so only lpfn is affected */
++ amdgpu_ttm_set_active_vram_size(adev, adev->mc.visible_vram_size);
++
++ r = amdgpu_bo_create(adev, 256 * 1024, PAGE_SIZE, true,
++ AMDGPU_GEM_DOMAIN_VRAM, 0,
++ NULL, &adev->stollen_vga_memory);
++ if (r) {
++ return r;
++ }
++ r = amdgpu_bo_reserve(adev->stollen_vga_memory, false);
++ if (r)
++ return r;
++ r = amdgpu_bo_pin(adev->stollen_vga_memory, AMDGPU_GEM_DOMAIN_VRAM, NULL);
++ amdgpu_bo_unreserve(adev->stollen_vga_memory);
++ if (r) {
++ amdgpu_bo_unref(&adev->stollen_vga_memory);
++ return r;
++ }
++ DRM_INFO("amdgpu: %uM of VRAM memory ready\n",
++ (unsigned) (adev->mc.real_vram_size / (1024 * 1024)));
++ r = ttm_bo_init_mm(&adev->mman.bdev, TTM_PL_TT,
++ adev->mc.gtt_size >> PAGE_SHIFT);
++ if (r) {
++ DRM_ERROR("Failed initializing GTT heap.\n");
++ return r;
++ }
++ DRM_INFO("amdgpu: %uM of GTT memory ready.\n",
++ (unsigned)(adev->mc.gtt_size / (1024 * 1024)));
++
++ adev->gds.mem.total_size = adev->gds.mem.total_size << AMDGPU_GDS_SHIFT;
++ adev->gds.mem.gfx_partition_size = adev->gds.mem.gfx_partition_size << AMDGPU_GDS_SHIFT;
++ adev->gds.mem.cs_partition_size = adev->gds.mem.cs_partition_size << AMDGPU_GDS_SHIFT;
++ adev->gds.gws.total_size = adev->gds.gws.total_size << AMDGPU_GWS_SHIFT;
++ adev->gds.gws.gfx_partition_size = adev->gds.gws.gfx_partition_size << AMDGPU_GWS_SHIFT;
++ adev->gds.gws.cs_partition_size = adev->gds.gws.cs_partition_size << AMDGPU_GWS_SHIFT;
++ adev->gds.oa.total_size = adev->gds.oa.total_size << AMDGPU_OA_SHIFT;
++ adev->gds.oa.gfx_partition_size = adev->gds.oa.gfx_partition_size << AMDGPU_OA_SHIFT;
++ adev->gds.oa.cs_partition_size = adev->gds.oa.cs_partition_size << AMDGPU_OA_SHIFT;
++ /* GDS Memory */
++ r = ttm_bo_init_mm(&adev->mman.bdev, AMDGPU_PL_GDS,
++ adev->gds.mem.total_size >> PAGE_SHIFT);
++ if (r) {
++ DRM_ERROR("Failed initializing GDS heap.\n");
++ return r;
++ }
++
++ /* GWS */
++ r = ttm_bo_init_mm(&adev->mman.bdev, AMDGPU_PL_GWS,
++ adev->gds.gws.total_size >> PAGE_SHIFT);
++ if (r) {
++ DRM_ERROR("Failed initializing gws heap.\n");
++ return r;
++ }
++
++ /* OA */
++ r = ttm_bo_init_mm(&adev->mman.bdev, AMDGPU_PL_OA,
++ adev->gds.oa.total_size >> PAGE_SHIFT);
++ if (r) {
++ DRM_ERROR("Failed initializing oa heap.\n");
++ return r;
++ }
++
++ r = amdgpu_ttm_debugfs_init(adev);
++ if (r) {
++ DRM_ERROR("Failed to init debugfs\n");
++ return r;
++ }
++ return 0;
++}
++
++void amdgpu_ttm_fini(struct amdgpu_device *adev)
++{
++ int r;
++
++ if (!adev->mman.initialized)
++ return;
++ amdgpu_ttm_debugfs_fini(adev);
++ if (adev->stollen_vga_memory) {
++ r = amdgpu_bo_reserve(adev->stollen_vga_memory, false);
++ if (r == 0) {
++ amdgpu_bo_unpin(adev->stollen_vga_memory);
++ amdgpu_bo_unreserve(adev->stollen_vga_memory);
++ }
++ amdgpu_bo_unref(&adev->stollen_vga_memory);
++ }
++ ttm_bo_clean_mm(&adev->mman.bdev, TTM_PL_VRAM);
++ ttm_bo_clean_mm(&adev->mman.bdev, TTM_PL_TT);
++ ttm_bo_clean_mm(&adev->mman.bdev, AMDGPU_PL_GDS);
++ ttm_bo_clean_mm(&adev->mman.bdev, AMDGPU_PL_GWS);
++ ttm_bo_clean_mm(&adev->mman.bdev, AMDGPU_PL_OA);
++ ttm_bo_device_release(&adev->mman.bdev);
++ amdgpu_gart_fini(adev);
++ amdgpu_ttm_global_fini(adev);
++ adev->mman.initialized = false;
++ DRM_INFO("amdgpu: ttm finalized\n");
++}
++
++/* this should only be called at bootup or when userspace
++ * isn't running */
++void amdgpu_ttm_set_active_vram_size(struct amdgpu_device *adev, u64 size)
++{
++ struct ttm_mem_type_manager *man;
++
++ if (!adev->mman.initialized)
++ return;
++
++ man = &adev->mman.bdev.man[TTM_PL_VRAM];
++ /* this just adjusts TTM size idea, which sets lpfn to the correct value */
++ man->size = size >> PAGE_SHIFT;
++}
++
++static struct vm_operations_struct amdgpu_ttm_vm_ops;
++static const struct vm_operations_struct *ttm_vm_ops = NULL;
++
++static int amdgpu_ttm_fault(struct vm_area_struct *vma, struct vm_fault *vmf)
++{
++ struct ttm_buffer_object *bo;
++ struct amdgpu_device *adev;
++ int r;
++
++ bo = (struct ttm_buffer_object *)vma->vm_private_data;
++ if (bo == NULL) {
++ return VM_FAULT_NOPAGE;
++ }
++ adev = amdgpu_get_adev(bo->bdev);
++ down_read(&adev->pm.mclk_lock);
++ r = ttm_vm_ops->fault(vma, vmf);
++ up_read(&adev->pm.mclk_lock);
++ return r;
++}
++
++int amdgpu_mmap(struct file *filp, struct vm_area_struct *vma)
++{
++ struct drm_file *file_priv;
++ struct amdgpu_device *adev;
++ int r;
++
++ if (unlikely(vma->vm_pgoff < DRM_FILE_PAGE_OFFSET)) {
++ return -EINVAL;
++ }
++
++ file_priv = filp->private_data;
++ adev = file_priv->minor->dev->dev_private;
++ if (adev == NULL) {
++ return -EINVAL;
++ }
++ r = ttm_bo_mmap(filp, vma, &adev->mman.bdev);
++ if (unlikely(r != 0)) {
++ return r;
++ }
++ if (unlikely(ttm_vm_ops == NULL)) {
++ ttm_vm_ops = vma->vm_ops;
++ amdgpu_ttm_vm_ops = *ttm_vm_ops;
++ amdgpu_ttm_vm_ops.fault = &amdgpu_ttm_fault;
++ }
++ vma->vm_ops = &amdgpu_ttm_vm_ops;
++ return 0;
++}
++
++int amdgpu_copy_buffer(struct amdgpu_ring *ring,
++ uint64_t src_offset,
++ uint64_t dst_offset,
++ uint32_t byte_count,
++ struct reservation_object *resv,
++ struct amdgpu_fence **fence)
++{
++ struct amdgpu_device *adev = ring->adev;
++ struct amdgpu_sync sync;
++ uint32_t max_bytes;
++ unsigned num_loops, num_dw;
++ unsigned i;
++ int r;
++
++ /* sync other rings */
++ amdgpu_sync_create(&sync);
++ if (resv) {
++ r = amdgpu_sync_resv(adev, &sync, resv, false);
++ if (r) {
++ DRM_ERROR("sync failed (%d).\n", r);
++ amdgpu_sync_free(adev, &sync, NULL);
++ return r;
++ }
++ }
++
++ max_bytes = adev->mman.buffer_funcs->copy_max_bytes;
++ num_loops = DIV_ROUND_UP(byte_count, max_bytes);
++ num_dw = num_loops * adev->mman.buffer_funcs->copy_num_dw;
++
++ /* for fence and sync */
++ num_dw += 64 + AMDGPU_NUM_SYNCS * 8;
++
++ r = amdgpu_ring_lock(ring, num_dw);
++ if (r) {
++ DRM_ERROR("ring lock failed (%d).\n", r);
++ amdgpu_sync_free(adev, &sync, NULL);
++ return r;
++ }
++
++ amdgpu_sync_rings(&sync, ring);
++
++ for (i = 0; i < num_loops; i++) {
++ uint32_t cur_size_in_bytes = min(byte_count, max_bytes);
++
++ amdgpu_emit_copy_buffer(adev, ring, src_offset, dst_offset,
++ cur_size_in_bytes);
++
++ src_offset += cur_size_in_bytes;
++ dst_offset += cur_size_in_bytes;
++ byte_count -= cur_size_in_bytes;
++ }
++
++ r = amdgpu_fence_emit(ring, AMDGPU_FENCE_OWNER_MOVE, fence);
++ if (r) {
++ amdgpu_ring_unlock_undo(ring);
++ amdgpu_sync_free(adev, &sync, NULL);
++ return r;
++ }
++
++ amdgpu_ring_unlock_commit(ring);
++ amdgpu_sync_free(adev, &sync, *fence);
++
++ return 0;
++}
++
++#if defined(CONFIG_DEBUG_FS)
++
++static int amdgpu_mm_dump_table(struct seq_file *m, void *data)
++{
++ struct drm_info_node *node = (struct drm_info_node *)m->private;
++ unsigned ttm_pl = *(int *)node->info_ent->data;
++ struct drm_device *dev = node->minor->dev;
++ struct amdgpu_device *adev = dev->dev_private;
++ struct drm_mm *mm = (struct drm_mm *)adev->mman.bdev.man[ttm_pl].priv;
++ int ret;
++ struct ttm_bo_global *glob = adev->mman.bdev.glob;
++
++ spin_lock(&glob->lru_lock);
++ ret = drm_mm_dump_table(m, mm);
++ spin_unlock(&glob->lru_lock);
++ return ret;
++}
++
++static int ttm_pl_vram = TTM_PL_VRAM;
++static int ttm_pl_tt = TTM_PL_TT;
++
++static struct drm_info_list amdgpu_ttm_debugfs_list[] = {
++ {"amdgpu_vram_mm", amdgpu_mm_dump_table, 0, &ttm_pl_vram},
++ {"amdgpu_gtt_mm", amdgpu_mm_dump_table, 0, &ttm_pl_tt},
++ {"ttm_page_pool", ttm_page_alloc_debugfs, 0, NULL},
++#ifdef CONFIG_SWIOTLB
++ {"ttm_dma_page_pool", ttm_dma_page_alloc_debugfs, 0, NULL}
++#endif
++};
++
++static ssize_t amdgpu_ttm_vram_read(struct file *f, char __user *buf,
++ size_t size, loff_t *pos)
++{
++ struct amdgpu_device *adev = f->f_inode->i_private;
++ ssize_t result = 0;
++ int r;
++
++ if (size & 0x3 || *pos & 0x3)
++ return -EINVAL;
++
++ while (size) {
++ unsigned long flags;
++ uint32_t value;
++
++ if (*pos >= adev->mc.mc_vram_size)
++ return result;
++
++ spin_lock_irqsave(&adev->mmio_idx_lock, flags);
++ WREG32(mmMM_INDEX, ((uint32_t)*pos) | 0x80000000);
++ WREG32(mmMM_INDEX_HI, *pos >> 31);
++ value = RREG32(mmMM_DATA);
++ spin_unlock_irqrestore(&adev->mmio_idx_lock, flags);
++
++ r = put_user(value, (uint32_t *)buf);
++ if (r)
++ return r;
++
++ result += 4;
++ buf += 4;
++ *pos += 4;
++ size -= 4;
++ }
++
++ return result;
++}
++
++static const struct file_operations amdgpu_ttm_vram_fops = {
++ .owner = THIS_MODULE,
++ .read = amdgpu_ttm_vram_read,
++ .llseek = default_llseek
++};
++
++static ssize_t amdgpu_ttm_gtt_read(struct file *f, char __user *buf,
++ size_t size, loff_t *pos)
++{
++ struct amdgpu_device *adev = f->f_inode->i_private;
++ ssize_t result = 0;
++ int r;
++
++ while (size) {
++ loff_t p = *pos / PAGE_SIZE;
++ unsigned off = *pos & ~PAGE_MASK;
++ size_t cur_size = min_t(size_t, size, PAGE_SIZE - off);
++ struct page *page;
++ void *ptr;
++
++ if (p >= adev->gart.num_cpu_pages)
++ return result;
++
++ page = adev->gart.pages[p];
++ if (page) {
++ ptr = kmap(page);
++ ptr += off;
++
++ r = copy_to_user(buf, ptr, cur_size);
++ kunmap(adev->gart.pages[p]);
++ } else
++ r = clear_user(buf, cur_size);
++
++ if (r)
++ return -EFAULT;
++
++ result += cur_size;
++ buf += cur_size;
++ *pos += cur_size;
++ size -= cur_size;
++ }
++
++ return result;
++}
++
++static const struct file_operations amdgpu_ttm_gtt_fops = {
++ .owner = THIS_MODULE,
++ .read = amdgpu_ttm_gtt_read,
++ .llseek = default_llseek
++};
++
++#endif
++
++static int amdgpu_ttm_debugfs_init(struct amdgpu_device *adev)
++{
++#if defined(CONFIG_DEBUG_FS)
++ unsigned count;
++
++ struct drm_minor *minor = adev->ddev->primary;
++ struct dentry *ent, *root = minor->debugfs_root;
++
++ ent = debugfs_create_file("amdgpu_vram", S_IFREG | S_IRUGO, root,
++ adev, &amdgpu_ttm_vram_fops);
++ if (IS_ERR(ent))
++ return PTR_ERR(ent);
++ i_size_write(ent->d_inode, adev->mc.mc_vram_size);
++ adev->mman.vram = ent;
++
++ ent = debugfs_create_file("amdgpu_gtt", S_IFREG | S_IRUGO, root,
++ adev, &amdgpu_ttm_gtt_fops);
++ if (IS_ERR(ent))
++ return PTR_ERR(ent);
++ i_size_write(ent->d_inode, adev->mc.gtt_size);
++ adev->mman.gtt = ent;
++
++ count = ARRAY_SIZE(amdgpu_ttm_debugfs_list);
++
++#ifdef CONFIG_SWIOTLB
++ if (!swiotlb_nr_tbl())
++ --count;
++#endif
++
++ return amdgpu_debugfs_add_files(adev, amdgpu_ttm_debugfs_list, count);
++#else
++
++ return 0;
++#endif
++}
++
++static void amdgpu_ttm_debugfs_fini(struct amdgpu_device *adev)
++{
++#if defined(CONFIG_DEBUG_FS)
++
++ debugfs_remove(adev->mman.vram);
++ adev->mman.vram = NULL;
++
++ debugfs_remove(adev->mman.gtt);
++ adev->mman.gtt = NULL;
++#endif
++}
+diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_ucode.c b/drivers/gpu/drm/amd/amdgpu/amdgpu_ucode.c
+new file mode 100644
+index 0000000..93af9f9
+--- /dev/null
++++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_ucode.c
+@@ -0,0 +1,317 @@
++/*
++ * Copyright 2014 Advanced Micro Devices, Inc.
++ *
++ * Permission is hereby granted, free of charge, to any person obtaining a
++ * copy of this software and associated documentation files (the "Software"),
++ * to deal in the Software without restriction, including without limitation
++ * the rights to use, copy, modify, merge, publish, distribute, sublicense,
++ * and/or sell copies of the Software, and to permit persons to whom the
++ * Software is furnished to do so, subject to the following conditions:
++ *
++ * The above copyright notice and this permission notice shall be included in
++ * all copies or substantial portions of the Software.
++ *
++ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
++ * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
++ * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
++ * THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR
++ * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE,
++ * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
++ * OTHER DEALINGS IN THE SOFTWARE.
++ *
++ */
++
++#include <linux/firmware.h>
++#include <linux/slab.h>
++#include <linux/module.h>
++#include <drm/drmP.h>
++#include "amdgpu.h"
++#include "amdgpu_ucode.h"
++
++static void amdgpu_ucode_print_common_hdr(const struct common_firmware_header *hdr)
++{
++ DRM_DEBUG("size_bytes: %u\n", le32_to_cpu(hdr->size_bytes));
++ DRM_DEBUG("header_size_bytes: %u\n", le32_to_cpu(hdr->header_size_bytes));
++ DRM_DEBUG("header_version_major: %u\n", le16_to_cpu(hdr->header_version_major));
++ DRM_DEBUG("header_version_minor: %u\n", le16_to_cpu(hdr->header_version_minor));
++ DRM_DEBUG("ip_version_major: %u\n", le16_to_cpu(hdr->ip_version_major));
++ DRM_DEBUG("ip_version_minor: %u\n", le16_to_cpu(hdr->ip_version_minor));
++ DRM_DEBUG("ucode_version: 0x%08x\n", le32_to_cpu(hdr->ucode_version));
++ DRM_DEBUG("ucode_size_bytes: %u\n", le32_to_cpu(hdr->ucode_size_bytes));
++ DRM_DEBUG("ucode_array_offset_bytes: %u\n",
++ le32_to_cpu(hdr->ucode_array_offset_bytes));
++ DRM_DEBUG("crc32: 0x%08x\n", le32_to_cpu(hdr->crc32));
++}
++
++void amdgpu_ucode_print_mc_hdr(const struct common_firmware_header *hdr)
++{
++ uint16_t version_major = le16_to_cpu(hdr->header_version_major);
++ uint16_t version_minor = le16_to_cpu(hdr->header_version_minor);
++
++ DRM_DEBUG("MC\n");
++ amdgpu_ucode_print_common_hdr(hdr);
++
++ if (version_major == 1) {
++ const struct mc_firmware_header_v1_0 *mc_hdr =
++ container_of(hdr, struct mc_firmware_header_v1_0, header);
++
++ DRM_DEBUG("io_debug_size_bytes: %u\n",
++ le32_to_cpu(mc_hdr->io_debug_size_bytes));
++ DRM_DEBUG("io_debug_array_offset_bytes: %u\n",
++ le32_to_cpu(mc_hdr->io_debug_array_offset_bytes));
++ } else {
++ DRM_ERROR("Unknown MC ucode version: %u.%u\n", version_major, version_minor);
++ }
++}
++
++void amdgpu_ucode_print_smc_hdr(const struct common_firmware_header *hdr)
++{
++ uint16_t version_major = le16_to_cpu(hdr->header_version_major);
++ uint16_t version_minor = le16_to_cpu(hdr->header_version_minor);
++
++ DRM_DEBUG("SMC\n");
++ amdgpu_ucode_print_common_hdr(hdr);
++
++ if (version_major == 1) {
++ const struct smc_firmware_header_v1_0 *smc_hdr =
++ container_of(hdr, struct smc_firmware_header_v1_0, header);
++
++ DRM_DEBUG("ucode_start_addr: %u\n", le32_to_cpu(smc_hdr->ucode_start_addr));
++ } else {
++ DRM_ERROR("Unknown SMC ucode version: %u.%u\n", version_major, version_minor);
++ }
++}
++
++void amdgpu_ucode_print_gfx_hdr(const struct common_firmware_header *hdr)
++{
++ uint16_t version_major = le16_to_cpu(hdr->header_version_major);
++ uint16_t version_minor = le16_to_cpu(hdr->header_version_minor);
++
++ DRM_DEBUG("GFX\n");
++ amdgpu_ucode_print_common_hdr(hdr);
++
++ if (version_major == 1) {
++ const struct gfx_firmware_header_v1_0 *gfx_hdr =
++ container_of(hdr, struct gfx_firmware_header_v1_0, header);
++
++ DRM_DEBUG("ucode_feature_version: %u\n",
++ le32_to_cpu(gfx_hdr->ucode_feature_version));
++ DRM_DEBUG("jt_offset: %u\n", le32_to_cpu(gfx_hdr->jt_offset));
++ DRM_DEBUG("jt_size: %u\n", le32_to_cpu(gfx_hdr->jt_size));
++ } else {
++ DRM_ERROR("Unknown GFX ucode version: %u.%u\n", version_major, version_minor);
++ }
++}
++
++void amdgpu_ucode_print_rlc_hdr(const struct common_firmware_header *hdr)
++{
++ uint16_t version_major = le16_to_cpu(hdr->header_version_major);
++ uint16_t version_minor = le16_to_cpu(hdr->header_version_minor);
++
++ DRM_DEBUG("RLC\n");
++ amdgpu_ucode_print_common_hdr(hdr);
++
++ if (version_major == 1) {
++ const struct rlc_firmware_header_v1_0 *rlc_hdr =
++ container_of(hdr, struct rlc_firmware_header_v1_0, header);
++
++ DRM_DEBUG("ucode_feature_version: %u\n",
++ le32_to_cpu(rlc_hdr->ucode_feature_version));
++ DRM_DEBUG("save_and_restore_offset: %u\n",
++ le32_to_cpu(rlc_hdr->save_and_restore_offset));
++ DRM_DEBUG("clear_state_descriptor_offset: %u\n",
++ le32_to_cpu(rlc_hdr->clear_state_descriptor_offset));
++ DRM_DEBUG("avail_scratch_ram_locations: %u\n",
++ le32_to_cpu(rlc_hdr->avail_scratch_ram_locations));
++ DRM_DEBUG("master_pkt_description_offset: %u\n",
++ le32_to_cpu(rlc_hdr->master_pkt_description_offset));
++ } else if (version_major == 2) {
++ const struct rlc_firmware_header_v2_0 *rlc_hdr =
++ container_of(hdr, struct rlc_firmware_header_v2_0, header);
++
++ DRM_DEBUG("ucode_feature_version: %u\n",
++ le32_to_cpu(rlc_hdr->ucode_feature_version));
++ DRM_DEBUG("jt_offset: %u\n", le32_to_cpu(rlc_hdr->jt_offset));
++ DRM_DEBUG("jt_size: %u\n", le32_to_cpu(rlc_hdr->jt_size));
++ DRM_DEBUG("save_and_restore_offset: %u\n",
++ le32_to_cpu(rlc_hdr->save_and_restore_offset));
++ DRM_DEBUG("clear_state_descriptor_offset: %u\n",
++ le32_to_cpu(rlc_hdr->clear_state_descriptor_offset));
++ DRM_DEBUG("avail_scratch_ram_locations: %u\n",
++ le32_to_cpu(rlc_hdr->avail_scratch_ram_locations));
++ DRM_DEBUG("reg_restore_list_size: %u\n",
++ le32_to_cpu(rlc_hdr->reg_restore_list_size));
++ DRM_DEBUG("reg_list_format_start: %u\n",
++ le32_to_cpu(rlc_hdr->reg_list_format_start));
++ DRM_DEBUG("reg_list_format_separate_start: %u\n",
++ le32_to_cpu(rlc_hdr->reg_list_format_separate_start));
++ DRM_DEBUG("starting_offsets_start: %u\n",
++ le32_to_cpu(rlc_hdr->starting_offsets_start));
++ DRM_DEBUG("reg_list_format_size_bytes: %u\n",
++ le32_to_cpu(rlc_hdr->reg_list_format_size_bytes));
++ DRM_DEBUG("reg_list_format_array_offset_bytes: %u\n",
++ le32_to_cpu(rlc_hdr->reg_list_format_array_offset_bytes));
++ DRM_DEBUG("reg_list_size_bytes: %u\n",
++ le32_to_cpu(rlc_hdr->reg_list_size_bytes));
++ DRM_DEBUG("reg_list_array_offset_bytes: %u\n",
++ le32_to_cpu(rlc_hdr->reg_list_array_offset_bytes));
++ DRM_DEBUG("reg_list_format_separate_size_bytes: %u\n",
++ le32_to_cpu(rlc_hdr->reg_list_format_separate_size_bytes));
++ DRM_DEBUG("reg_list_format_separate_array_offset_bytes: %u\n",
++ le32_to_cpu(rlc_hdr->reg_list_format_separate_array_offset_bytes));
++ DRM_DEBUG("reg_list_separate_size_bytes: %u\n",
++ le32_to_cpu(rlc_hdr->reg_list_separate_size_bytes));
++ DRM_DEBUG("reg_list_separate_size_bytes: %u\n",
++ le32_to_cpu(rlc_hdr->reg_list_separate_size_bytes));
++ } else {
++ DRM_ERROR("Unknown RLC ucode version: %u.%u\n", version_major, version_minor);
++ }
++}
++
++void amdgpu_ucode_print_sdma_hdr(const struct common_firmware_header *hdr)
++{
++ uint16_t version_major = le16_to_cpu(hdr->header_version_major);
++ uint16_t version_minor = le16_to_cpu(hdr->header_version_minor);
++
++ DRM_DEBUG("SDMA\n");
++ amdgpu_ucode_print_common_hdr(hdr);
++
++ if (version_major == 1) {
++ const struct sdma_firmware_header_v1_0 *sdma_hdr =
++ container_of(hdr, struct sdma_firmware_header_v1_0, header);
++
++ DRM_DEBUG("ucode_feature_version: %u\n",
++ le32_to_cpu(sdma_hdr->ucode_feature_version));
++ DRM_DEBUG("ucode_change_version: %u\n",
++ le32_to_cpu(sdma_hdr->ucode_change_version));
++ DRM_DEBUG("jt_offset: %u\n", le32_to_cpu(sdma_hdr->jt_offset));
++ DRM_DEBUG("jt_size: %u\n", le32_to_cpu(sdma_hdr->jt_size));
++ if (version_minor >= 1) {
++ const struct sdma_firmware_header_v1_1 *sdma_v1_1_hdr =
++ container_of(sdma_hdr, struct sdma_firmware_header_v1_1, v1_0);
++ DRM_DEBUG("digest_size: %u\n", le32_to_cpu(sdma_v1_1_hdr->digest_size));
++ }
++ } else {
++ DRM_ERROR("Unknown SDMA ucode version: %u.%u\n",
++ version_major, version_minor);
++ }
++}
++
++int amdgpu_ucode_validate(const struct firmware *fw)
++{
++ const struct common_firmware_header *hdr =
++ (const struct common_firmware_header *)fw->data;
++
++ if (fw->size == le32_to_cpu(hdr->size_bytes))
++ return 0;
++
++ return -EINVAL;
++}
++
++bool amdgpu_ucode_hdr_version(union amdgpu_firmware_header *hdr,
++ uint16_t hdr_major, uint16_t hdr_minor)
++{
++ if ((hdr->common.header_version_major == hdr_major) &&
++ (hdr->common.header_version_minor == hdr_minor))
++ return 0;
++ return 1;
++}
++
++static int amdgpu_ucode_init_single_fw(struct amdgpu_firmware_info *ucode,
++ uint64_t mc_addr, void *kptr)
++{
++ const struct common_firmware_header *header = NULL;
++
++ if (NULL == ucode->fw)
++ return 0;
++
++ ucode->mc_addr = mc_addr;
++ ucode->kaddr = kptr;
++
++ header = (const struct common_firmware_header *)ucode->fw->data;
++ memcpy(ucode->kaddr, (void *)((uint8_t *)ucode->fw->data +
++ le32_to_cpu(header->ucode_array_offset_bytes)),
++ le32_to_cpu(header->ucode_size_bytes));
++
++ return 0;
++}
++
++int amdgpu_ucode_init_bo(struct amdgpu_device *adev)
++{
++ struct amdgpu_bo **bo = &adev->firmware.fw_buf;
++ uint64_t fw_mc_addr;
++ void *fw_buf_ptr = NULL;
++ uint64_t fw_offset = 0;
++ int i, err;
++ struct amdgpu_firmware_info *ucode = NULL;
++ const struct common_firmware_header *header = NULL;
++
++ err = amdgpu_bo_create(adev, adev->firmware.fw_size, PAGE_SIZE, true,
++ AMDGPU_GEM_DOMAIN_GTT, 0, NULL, bo);
++ if (err) {
++ dev_err(adev->dev, "(%d) Firmware buffer allocate failed\n", err);
++ err = -ENOMEM;
++ goto failed;
++ }
++
++ err = amdgpu_bo_reserve(*bo, false);
++ if (err) {
++ amdgpu_bo_unref(bo);
++ dev_err(adev->dev, "(%d) Firmware buffer reserve failed\n", err);
++ goto failed;
++ }
++
++ err = amdgpu_bo_pin(*bo, AMDGPU_GEM_DOMAIN_GTT, &fw_mc_addr);
++ if (err) {
++ amdgpu_bo_unreserve(*bo);
++ amdgpu_bo_unref(bo);
++ dev_err(adev->dev, "(%d) Firmware buffer pin failed\n", err);
++ goto failed;
++ }
++
++ err = amdgpu_bo_kmap(*bo, &fw_buf_ptr);
++ if (err) {
++ dev_err(adev->dev, "(%d) Firmware buffer kmap failed\n", err);
++ amdgpu_bo_unpin(*bo);
++ amdgpu_bo_unreserve(*bo);
++ amdgpu_bo_unref(bo);
++ goto failed;
++ }
++
++ amdgpu_bo_unreserve(*bo);
++
++ fw_offset = 0;
++ for (i = 0; i < AMDGPU_UCODE_ID_MAXIMUM; i++) {
++ ucode = &adev->firmware.ucode[i];
++ if (ucode->fw) {
++ header = (const struct common_firmware_header *)ucode->fw->data;
++ amdgpu_ucode_init_single_fw(ucode, fw_mc_addr + fw_offset,
++ (void *)((uint64_t)fw_buf_ptr + fw_offset));
++ fw_offset += ALIGN(le32_to_cpu(header->ucode_size_bytes), PAGE_SIZE);
++ }
++ }
++
++failed:
++ if (err)
++ adev->firmware.smu_load = false;
++
++ return err;
++}
++
++int amdgpu_ucode_fini_bo(struct amdgpu_device *adev)
++{
++ int i;
++ struct amdgpu_firmware_info *ucode = NULL;
++
++ for (i = 0; i < AMDGPU_UCODE_ID_MAXIMUM; i++) {
++ ucode = &adev->firmware.ucode[i];
++ if (ucode->fw) {
++ ucode->mc_addr = 0;
++ ucode->kaddr = NULL;
++ }
++ }
++ amdgpu_bo_unref(&adev->firmware.fw_buf);
++ adev->firmware.fw_buf = NULL;
++
++ return 0;
++}
+diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_ucode.h b/drivers/gpu/drm/amd/amdgpu/amdgpu_ucode.h
+new file mode 100644
+index 0000000..e468be4
+--- /dev/null
++++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_ucode.h
+@@ -0,0 +1,176 @@
++/*
++ * Copyright 2012 Advanced Micro Devices, Inc.
++ *
++ * Permission is hereby granted, free of charge, to any person obtaining a
++ * copy of this software and associated documentation files (the "Software"),
++ * to deal in the Software without restriction, including without limitation
++ * the rights to use, copy, modify, merge, publish, distribute, sublicense,
++ * and/or sell copies of the Software, and to permit persons to whom the
++ * Software is furnished to do so, subject to the following conditions:
++ *
++ * The above copyright notice and this permission notice shall be included in
++ * all copies or substantial portions of the Software.
++ *
++ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
++ * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
++ * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
++ * THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR
++ * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE,
++ * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
++ * OTHER DEALINGS IN THE SOFTWARE.
++ *
++ */
++#ifndef __AMDGPU_UCODE_H__
++#define __AMDGPU_UCODE_H__
++
++struct common_firmware_header {
++ uint32_t size_bytes; /* size of the entire header+image(s) in bytes */
++ uint32_t header_size_bytes; /* size of just the header in bytes */
++ uint16_t header_version_major; /* header version */
++ uint16_t header_version_minor; /* header version */
++ uint16_t ip_version_major; /* IP version */
++ uint16_t ip_version_minor; /* IP version */
++ uint32_t ucode_version;
++ uint32_t ucode_size_bytes; /* size of ucode in bytes */
++ uint32_t ucode_array_offset_bytes; /* payload offset from the start of the header */
++ uint32_t crc32; /* crc32 checksum of the payload */
++};
++
++/* version_major=1, version_minor=0 */
++struct mc_firmware_header_v1_0 {
++ struct common_firmware_header header;
++ uint32_t io_debug_size_bytes; /* size of debug array in dwords */
++ uint32_t io_debug_array_offset_bytes; /* payload offset from the start of the header */
++};
++
++/* version_major=1, version_minor=0 */
++struct smc_firmware_header_v1_0 {
++ struct common_firmware_header header;
++ uint32_t ucode_start_addr;
++};
++
++/* version_major=1, version_minor=0 */
++struct gfx_firmware_header_v1_0 {
++ struct common_firmware_header header;
++ uint32_t ucode_feature_version;
++ uint32_t jt_offset; /* jt location */
++ uint32_t jt_size; /* size of jt */
++};
++
++/* version_major=1, version_minor=0 */
++struct rlc_firmware_header_v1_0 {
++ struct common_firmware_header header;
++ uint32_t ucode_feature_version;
++ uint32_t save_and_restore_offset;
++ uint32_t clear_state_descriptor_offset;
++ uint32_t avail_scratch_ram_locations;
++ uint32_t master_pkt_description_offset;
++};
++
++/* version_major=2, version_minor=0 */
++struct rlc_firmware_header_v2_0 {
++ struct common_firmware_header header;
++ uint32_t ucode_feature_version;
++ uint32_t jt_offset; /* jt location */
++ uint32_t jt_size; /* size of jt */
++ uint32_t save_and_restore_offset;
++ uint32_t clear_state_descriptor_offset;
++ uint32_t avail_scratch_ram_locations;
++ uint32_t reg_restore_list_size;
++ uint32_t reg_list_format_start;
++ uint32_t reg_list_format_separate_start;
++ uint32_t starting_offsets_start;
++ uint32_t reg_list_format_size_bytes; /* size of reg list format array in bytes */
++ uint32_t reg_list_format_array_offset_bytes; /* payload offset from the start of the header */
++ uint32_t reg_list_size_bytes; /* size of reg list array in bytes */
++ uint32_t reg_list_array_offset_bytes; /* payload offset from the start of the header */
++ uint32_t reg_list_format_separate_size_bytes; /* size of reg list format array in bytes */
++ uint32_t reg_list_format_separate_array_offset_bytes; /* payload offset from the start of the header */
++ uint32_t reg_list_separate_size_bytes; /* size of reg list array in bytes */
++ uint32_t reg_list_separate_array_offset_bytes; /* payload offset from the start of the header */
++};
++
++/* version_major=1, version_minor=0 */
++struct sdma_firmware_header_v1_0 {
++ struct common_firmware_header header;
++ uint32_t ucode_feature_version;
++ uint32_t ucode_change_version;
++ uint32_t jt_offset; /* jt location */
++ uint32_t jt_size; /* size of jt */
++};
++
++/* version_major=1, version_minor=1 */
++struct sdma_firmware_header_v1_1 {
++ struct sdma_firmware_header_v1_0 v1_0;
++ uint32_t digest_size;
++};
++
++/* header is fixed size */
++union amdgpu_firmware_header {
++ struct common_firmware_header common;
++ struct mc_firmware_header_v1_0 mc;
++ struct smc_firmware_header_v1_0 smc;
++ struct gfx_firmware_header_v1_0 gfx;
++ struct rlc_firmware_header_v1_0 rlc;
++ struct rlc_firmware_header_v2_0 rlc_v2_0;
++ struct sdma_firmware_header_v1_0 sdma;
++ struct sdma_firmware_header_v1_1 sdma_v1_1;
++ uint8_t raw[0x100];
++};
++
++/*
++ * fw loading support
++ */
++enum AMDGPU_UCODE_ID {
++ AMDGPU_UCODE_ID_SDMA0 = 0,
++ AMDGPU_UCODE_ID_SDMA1,
++ AMDGPU_UCODE_ID_CP_CE,
++ AMDGPU_UCODE_ID_CP_PFP,
++ AMDGPU_UCODE_ID_CP_ME,
++ AMDGPU_UCODE_ID_CP_MEC1,
++ AMDGPU_UCODE_ID_CP_MEC2,
++ AMDGPU_UCODE_ID_RLC_G,
++ AMDGPU_UCODE_ID_MAXIMUM,
++};
++
++/* engine firmware status */
++enum AMDGPU_UCODE_STATUS {
++ AMDGPU_UCODE_STATUS_INVALID,
++ AMDGPU_UCODE_STATUS_NOT_LOADED,
++ AMDGPU_UCODE_STATUS_LOADED,
++};
++
++/* conform to smu_ucode_xfer_cz.h */
++#define AMDGPU_SDMA0_UCODE_LOADED 0x00000001
++#define AMDGPU_SDMA1_UCODE_LOADED 0x00000002
++#define AMDGPU_CPCE_UCODE_LOADED 0x00000004
++#define AMDGPU_CPPFP_UCODE_LOADED 0x00000008
++#define AMDGPU_CPME_UCODE_LOADED 0x00000010
++#define AMDGPU_CPMEC1_UCODE_LOADED 0x00000020
++#define AMDGPU_CPMEC2_UCODE_LOADED 0x00000040
++#define AMDGPU_CPRLC_UCODE_LOADED 0x00000100
++
++/* amdgpu firmware info */
++struct amdgpu_firmware_info {
++ /* ucode ID */
++ enum AMDGPU_UCODE_ID ucode_id;
++ /* request_firmware */
++ const struct firmware *fw;
++ /* starting mc address */
++ uint64_t mc_addr;
++ /* kernel linear address */
++ void *kaddr;
++};
++
++void amdgpu_ucode_print_mc_hdr(const struct common_firmware_header *hdr);
++void amdgpu_ucode_print_smc_hdr(const struct common_firmware_header *hdr);
++void amdgpu_ucode_print_gfx_hdr(const struct common_firmware_header *hdr);
++void amdgpu_ucode_print_rlc_hdr(const struct common_firmware_header *hdr);
++void amdgpu_ucode_print_sdma_hdr(const struct common_firmware_header *hdr);
++int amdgpu_ucode_validate(const struct firmware *fw);
++bool amdgpu_ucode_hdr_version(union amdgpu_firmware_header *hdr,
++ uint16_t hdr_major, uint16_t hdr_minor);
++int amdgpu_ucode_init_bo(struct amdgpu_device *adev);
++int amdgpu_ucode_fini_bo(struct amdgpu_device *adev);
++
++#endif
+diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_uvd.c b/drivers/gpu/drm/amd/amdgpu/amdgpu_uvd.c
+new file mode 100644
+index 0000000..c03bce6
+--- /dev/null
++++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_uvd.c
+@@ -0,0 +1,976 @@
++/*
++ * Copyright 2011 Advanced Micro Devices, Inc.
++ * All Rights Reserved.
++ *
++ * Permission is hereby granted, free of charge, to any person obtaining a
++ * copy of this software and associated documentation files (the
++ * "Software"), to deal in the Software without restriction, including
++ * without limitation the rights to use, copy, modify, merge, publish,
++ * distribute, sub license, and/or sell copies of the Software, and to
++ * permit persons to whom the Software is furnished to do so, subject to
++ * the following conditions:
++ *
++ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
++ * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
++ * FITNESS FOR A PARTICULAR PURPOSE AND NON-INFRINGEMENT. IN NO EVENT SHALL
++ * THE COPYRIGHT HOLDERS, AUTHORS AND/OR ITS SUPPLIERS BE LIABLE FOR ANY CLAIM,
++ * DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR
++ * OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE
++ * USE OR OTHER DEALINGS IN THE SOFTWARE.
++ *
++ * The above copyright notice and this permission notice (including the
++ * next paragraph) shall be included in all copies or substantial portions
++ * of the Software.
++ *
++ */
++/*
++ * Authors:
++ * Christian König <deathsimple@vodafone.de>
++ */
++
++#include <linux/firmware.h>
++#include <linux/module.h>
++#include <drm/drmP.h>
++#include <drm/drm.h>
++
++#include "amdgpu.h"
++#include "amdgpu_pm.h"
++#include "amdgpu_uvd.h"
++#include "cikd.h"
++#include "uvd/uvd_4_2_d.h"
++
++/* 1 second timeout */
++#define UVD_IDLE_TIMEOUT_MS 1000
++
++/* Firmware Names */
++#ifdef CONFIG_DRM_AMDGPU_CIK
++#define FIRMWARE_BONAIRE "radeon/bonaire_uvd.bin"
++#define FIRMWARE_KABINI "radeon/kabini_uvd.bin"
++#define FIRMWARE_KAVERI "radeon/kaveri_uvd.bin"
++#define FIRMWARE_HAWAII "radeon/hawaii_uvd.bin"
++#define FIRMWARE_MULLINS "radeon/mullins_uvd.bin"
++#endif
++#define FIRMWARE_TONGA "radeon/tonga_uvd.bin"
++#define FIRMWARE_CARRIZO "radeon/carrizo_uvd.bin"
++
++/**
++ * amdgpu_uvd_cs_ctx - Command submission parser context
++ *
++ * Used for emulating virtual memory support on UVD 4.2.
++ */
++struct amdgpu_uvd_cs_ctx {
++ struct amdgpu_cs_parser *parser;
++ unsigned reg, count;
++ unsigned data0, data1;
++ unsigned idx;
++ unsigned ib_idx;
++
++ /* does the IB has a msg command */
++ bool has_msg_cmd;
++
++ /* minimum buffer sizes */
++ unsigned *buf_sizes;
++};
++
++#ifdef CONFIG_DRM_AMDGPU_CIK
++MODULE_FIRMWARE(FIRMWARE_BONAIRE);
++MODULE_FIRMWARE(FIRMWARE_KABINI);
++MODULE_FIRMWARE(FIRMWARE_KAVERI);
++MODULE_FIRMWARE(FIRMWARE_HAWAII);
++MODULE_FIRMWARE(FIRMWARE_MULLINS);
++#endif
++MODULE_FIRMWARE(FIRMWARE_TONGA);
++MODULE_FIRMWARE(FIRMWARE_CARRIZO);
++
++static void amdgpu_uvd_note_usage(struct amdgpu_device *adev);
++static void amdgpu_uvd_idle_work_handler(struct work_struct *work);
++
++int amdgpu_uvd_sw_init(struct amdgpu_device *adev)
++{
++ unsigned long bo_size;
++ const char *fw_name;
++ const struct common_firmware_header *hdr;
++ unsigned version_major, version_minor, family_id;
++ int i, r;
++
++ INIT_DELAYED_WORK(&adev->uvd.idle_work, amdgpu_uvd_idle_work_handler);
++
++ switch (adev->asic_type) {
++#ifdef CONFIG_DRM_AMDGPU_CIK
++ case CHIP_BONAIRE:
++ fw_name = FIRMWARE_BONAIRE;
++ break;
++ case CHIP_KABINI:
++ fw_name = FIRMWARE_KABINI;
++ break;
++ case CHIP_KAVERI:
++ fw_name = FIRMWARE_KAVERI;
++ break;
++ case CHIP_HAWAII:
++ fw_name = FIRMWARE_HAWAII;
++ break;
++ case CHIP_MULLINS:
++ fw_name = FIRMWARE_MULLINS;
++ break;
++#endif
++ case CHIP_TONGA:
++ fw_name = FIRMWARE_TONGA;
++ break;
++ case CHIP_CARRIZO:
++ fw_name = FIRMWARE_CARRIZO;
++ break;
++ default:
++ return -EINVAL;
++ }
++
++ r = request_firmware(&adev->uvd.fw, fw_name, adev->dev);
++ if (r) {
++ dev_err(adev->dev, "amdgpu_uvd: Can't load firmware \"%s\"\n",
++ fw_name);
++ return r;
++ }
++
++ r = amdgpu_ucode_validate(adev->uvd.fw);
++ if (r) {
++ dev_err(adev->dev, "amdgpu_uvd: Can't validate firmware \"%s\"\n",
++ fw_name);
++ release_firmware(adev->uvd.fw);
++ adev->uvd.fw = NULL;
++ return r;
++ }
++
++ hdr = (const struct common_firmware_header *)adev->uvd.fw->data;
++ family_id = le32_to_cpu(hdr->ucode_version) & 0xff;
++ version_major = (le32_to_cpu(hdr->ucode_version) >> 24) & 0xff;
++ version_minor = (le32_to_cpu(hdr->ucode_version) >> 8) & 0xff;
++ DRM_INFO("Found UVD firmware Version: %hu.%hu Family ID: %hu\n",
++ version_major, version_minor, family_id);
++
++ bo_size = AMDGPU_GPU_PAGE_ALIGN(le32_to_cpu(hdr->ucode_size_bytes) + 8)
++ + AMDGPU_UVD_STACK_SIZE + AMDGPU_UVD_HEAP_SIZE;
++ r = amdgpu_bo_create(adev, bo_size, PAGE_SIZE, true,
++ AMDGPU_GEM_DOMAIN_VRAM, 0, NULL, &adev->uvd.vcpu_bo);
++ if (r) {
++ dev_err(adev->dev, "(%d) failed to allocate UVD bo\n", r);
++ return r;
++ }
++
++ r = amdgpu_bo_reserve(adev->uvd.vcpu_bo, false);
++ if (r) {
++ amdgpu_bo_unref(&adev->uvd.vcpu_bo);
++ dev_err(adev->dev, "(%d) failed to reserve UVD bo\n", r);
++ return r;
++ }
++
++ r = amdgpu_bo_pin(adev->uvd.vcpu_bo, AMDGPU_GEM_DOMAIN_VRAM,
++ &adev->uvd.gpu_addr);
++ if (r) {
++ amdgpu_bo_unreserve(adev->uvd.vcpu_bo);
++ amdgpu_bo_unref(&adev->uvd.vcpu_bo);
++ dev_err(adev->dev, "(%d) UVD bo pin failed\n", r);
++ return r;
++ }
++
++ r = amdgpu_bo_kmap(adev->uvd.vcpu_bo, &adev->uvd.cpu_addr);
++ if (r) {
++ dev_err(adev->dev, "(%d) UVD map failed\n", r);
++ return r;
++ }
++
++ amdgpu_bo_unreserve(adev->uvd.vcpu_bo);
++
++ for (i = 0; i < AMDGPU_MAX_UVD_HANDLES; ++i) {
++ atomic_set(&adev->uvd.handles[i], 0);
++ adev->uvd.filp[i] = NULL;
++ }
++
++ /* from uvd v5.0 HW addressing capacity increased to 64 bits */
++ if (!amdgpu_ip_block_version_cmp(adev, AMDGPU_IP_BLOCK_TYPE_UVD, 5, 0))
++ adev->uvd.address_64_bit = true;
++
++ return 0;
++}
++
++int amdgpu_uvd_sw_fini(struct amdgpu_device *adev)
++{
++ int r;
++
++ if (adev->uvd.vcpu_bo == NULL)
++ return 0;
++
++ r = amdgpu_bo_reserve(adev->uvd.vcpu_bo, false);
++ if (!r) {
++ amdgpu_bo_kunmap(adev->uvd.vcpu_bo);
++ amdgpu_bo_unpin(adev->uvd.vcpu_bo);
++ amdgpu_bo_unreserve(adev->uvd.vcpu_bo);
++ }
++
++ amdgpu_bo_unref(&adev->uvd.vcpu_bo);
++
++ amdgpu_ring_fini(&adev->uvd.ring);
++
++ release_firmware(adev->uvd.fw);
++
++ return 0;
++}
++
++int amdgpu_uvd_suspend(struct amdgpu_device *adev)
++{
++ unsigned size;
++ void *ptr;
++ const struct common_firmware_header *hdr;
++ int i;
++
++ if (adev->uvd.vcpu_bo == NULL)
++ return 0;
++
++ for (i = 0; i < AMDGPU_MAX_UVD_HANDLES; ++i)
++ if (atomic_read(&adev->uvd.handles[i]))
++ break;
++
++ if (i == AMDGPU_MAX_UVD_HANDLES)
++ return 0;
++
++ hdr = (const struct common_firmware_header *)adev->uvd.fw->data;
++
++ size = amdgpu_bo_size(adev->uvd.vcpu_bo);
++ size -= le32_to_cpu(hdr->ucode_size_bytes);
++
++ ptr = adev->uvd.cpu_addr;
++ ptr += le32_to_cpu(hdr->ucode_size_bytes);
++
++ adev->uvd.saved_bo = kmalloc(size, GFP_KERNEL);
++ memcpy(adev->uvd.saved_bo, ptr, size);
++
++ return 0;
++}
++
++int amdgpu_uvd_resume(struct amdgpu_device *adev)
++{
++ unsigned size;
++ void *ptr;
++ const struct common_firmware_header *hdr;
++ unsigned offset;
++
++ if (adev->uvd.vcpu_bo == NULL)
++ return -EINVAL;
++
++ hdr = (const struct common_firmware_header *)adev->uvd.fw->data;
++ offset = le32_to_cpu(hdr->ucode_array_offset_bytes);
++ memcpy(adev->uvd.cpu_addr, (adev->uvd.fw->data) + offset,
++ (adev->uvd.fw->size) - offset);
++
++ size = amdgpu_bo_size(adev->uvd.vcpu_bo);
++ size -= le32_to_cpu(hdr->ucode_size_bytes);
++ ptr = adev->uvd.cpu_addr;
++ ptr += le32_to_cpu(hdr->ucode_size_bytes);
++
++ if (adev->uvd.saved_bo != NULL) {
++ memcpy(ptr, adev->uvd.saved_bo, size);
++ kfree(adev->uvd.saved_bo);
++ adev->uvd.saved_bo = NULL;
++ } else
++ memset(ptr, 0, size);
++
++ return 0;
++}
++
++void amdgpu_uvd_free_handles(struct amdgpu_device *adev, struct drm_file *filp)
++{
++ struct amdgpu_ring *ring = &adev->uvd.ring;
++ int i, r;
++
++ for (i = 0; i < AMDGPU_MAX_UVD_HANDLES; ++i) {
++ uint32_t handle = atomic_read(&adev->uvd.handles[i]);
++ if (handle != 0 && adev->uvd.filp[i] == filp) {
++ struct amdgpu_fence *fence;
++
++ amdgpu_uvd_note_usage(adev);
++
++ r = amdgpu_uvd_get_destroy_msg(ring, handle, &fence);
++ if (r) {
++ DRM_ERROR("Error destroying UVD (%d)!\n", r);
++ continue;
++ }
++
++ amdgpu_fence_wait(fence, false);
++ amdgpu_fence_unref(&fence);
++
++ adev->uvd.filp[i] = NULL;
++ atomic_set(&adev->uvd.handles[i], 0);
++ }
++ }
++}
++
++static void amdgpu_uvd_force_into_uvd_segment(struct amdgpu_bo *rbo)
++{
++ int i;
++ for (i = 0; i < rbo->placement.num_placement; ++i) {
++ rbo->placements[i].fpfn = 0 >> PAGE_SHIFT;
++ rbo->placements[i].lpfn = (256 * 1024 * 1024) >> PAGE_SHIFT;
++ }
++}
++
++/**
++ * amdgpu_uvd_cs_pass1 - first parsing round
++ *
++ * @ctx: UVD parser context
++ *
++ * Make sure UVD message and feedback buffers are in VRAM and
++ * nobody is violating an 256MB boundary.
++ */
++static int amdgpu_uvd_cs_pass1(struct amdgpu_uvd_cs_ctx *ctx)
++{
++ struct amdgpu_bo_va_mapping *mapping;
++ struct amdgpu_bo *bo;
++ uint32_t cmd, lo, hi;
++ uint64_t addr;
++ int r = 0;
++
++ lo = amdgpu_get_ib_value(ctx->parser, ctx->ib_idx, ctx->data0);
++ hi = amdgpu_get_ib_value(ctx->parser, ctx->ib_idx, ctx->data1);
++ addr = ((uint64_t)lo) | (((uint64_t)hi) << 32);
++
++ mapping = amdgpu_cs_find_mapping(ctx->parser, addr, &bo);
++ if (mapping == NULL) {
++ DRM_ERROR("Can't find BO for addr 0x%08Lx\n", addr);
++ return -EINVAL;
++ }
++
++ if (!ctx->parser->adev->uvd.address_64_bit) {
++ /* check if it's a message or feedback command */
++ cmd = amdgpu_get_ib_value(ctx->parser, ctx->ib_idx, ctx->idx) >> 1;
++ if (cmd == 0x0 || cmd == 0x3) {
++ /* yes, force it into VRAM */
++ uint32_t domain = AMDGPU_GEM_DOMAIN_VRAM;
++ amdgpu_ttm_placement_from_domain(bo, domain);
++ }
++ amdgpu_uvd_force_into_uvd_segment(bo);
++
++ r = ttm_bo_validate(&bo->tbo, &bo->placement, false, false);
++ }
++
++ return r;
++}
++
++/**
++ * amdgpu_uvd_cs_msg_decode - handle UVD decode message
++ *
++ * @msg: pointer to message structure
++ * @buf_sizes: returned buffer sizes
++ *
++ * Peek into the decode message and calculate the necessary buffer sizes.
++ */
++static int amdgpu_uvd_cs_msg_decode(uint32_t *msg, unsigned buf_sizes[])
++{
++ unsigned stream_type = msg[4];
++ unsigned width = msg[6];
++ unsigned height = msg[7];
++ unsigned dpb_size = msg[9];
++ unsigned pitch = msg[28];
++ unsigned level = msg[57];
++
++ unsigned width_in_mb = width / 16;
++ unsigned height_in_mb = ALIGN(height / 16, 2);
++ unsigned fs_in_mb = width_in_mb * height_in_mb;
++
++ unsigned image_size, tmp, min_dpb_size, num_dpb_buffer;
++
++ image_size = width * height;
++ image_size += image_size / 2;
++ image_size = ALIGN(image_size, 1024);
++
++ switch (stream_type) {
++ case 0: /* H264 */
++ case 7: /* H264 Perf */
++ switch(level) {
++ case 30:
++ num_dpb_buffer = 8100 / fs_in_mb;
++ break;
++ case 31:
++ num_dpb_buffer = 18000 / fs_in_mb;
++ break;
++ case 32:
++ num_dpb_buffer = 20480 / fs_in_mb;
++ break;
++ case 41:
++ num_dpb_buffer = 32768 / fs_in_mb;
++ break;
++ case 42:
++ num_dpb_buffer = 34816 / fs_in_mb;
++ break;
++ case 50:
++ num_dpb_buffer = 110400 / fs_in_mb;
++ break;
++ case 51:
++ num_dpb_buffer = 184320 / fs_in_mb;
++ break;
++ default:
++ num_dpb_buffer = 184320 / fs_in_mb;
++ break;
++ }
++ num_dpb_buffer++;
++ if (num_dpb_buffer > 17)
++ num_dpb_buffer = 17;
++
++ /* reference picture buffer */
++ min_dpb_size = image_size * num_dpb_buffer;
++
++ /* macroblock context buffer */
++ min_dpb_size += width_in_mb * height_in_mb * num_dpb_buffer * 192;
++
++ /* IT surface buffer */
++ min_dpb_size += width_in_mb * height_in_mb * 32;
++ break;
++
++ case 1: /* VC1 */
++
++ /* reference picture buffer */
++ min_dpb_size = image_size * 3;
++
++ /* CONTEXT_BUFFER */
++ min_dpb_size += width_in_mb * height_in_mb * 128;
++
++ /* IT surface buffer */
++ min_dpb_size += width_in_mb * 64;
++
++ /* DB surface buffer */
++ min_dpb_size += width_in_mb * 128;
++
++ /* BP */
++ tmp = max(width_in_mb, height_in_mb);
++ min_dpb_size += ALIGN(tmp * 7 * 16, 64);
++ break;
++
++ case 3: /* MPEG2 */
++
++ /* reference picture buffer */
++ min_dpb_size = image_size * 3;
++ break;
++
++ case 4: /* MPEG4 */
++
++ /* reference picture buffer */
++ min_dpb_size = image_size * 3;
++
++ /* CM */
++ min_dpb_size += width_in_mb * height_in_mb * 64;
++
++ /* IT surface buffer */
++ min_dpb_size += ALIGN(width_in_mb * height_in_mb * 32, 64);
++ break;
++
++ default:
++ DRM_ERROR("UVD codec not handled %d!\n", stream_type);
++ return -EINVAL;
++ }
++
++ if (width > pitch) {
++ DRM_ERROR("Invalid UVD decoding target pitch!\n");
++ return -EINVAL;
++ }
++
++ if (dpb_size < min_dpb_size) {
++ DRM_ERROR("Invalid dpb_size in UVD message (%d / %d)!\n",
++ dpb_size, min_dpb_size);
++ return -EINVAL;
++ }
++
++ buf_sizes[0x1] = dpb_size;
++ buf_sizes[0x2] = image_size;
++ return 0;
++}
++
++/**
++ * amdgpu_uvd_cs_msg - handle UVD message
++ *
++ * @ctx: UVD parser context
++ * @bo: buffer object containing the message
++ * @offset: offset into the buffer object
++ *
++ * Peek into the UVD message and extract the session id.
++ * Make sure that we don't open up to many sessions.
++ */
++static int amdgpu_uvd_cs_msg(struct amdgpu_uvd_cs_ctx *ctx,
++ struct amdgpu_bo *bo, unsigned offset)
++{
++ struct amdgpu_device *adev = ctx->parser->adev;
++ int32_t *msg, msg_type, handle;
++ struct fence *f;
++ void *ptr;
++
++ int i, r;
++
++ if (offset & 0x3F) {
++ DRM_ERROR("UVD messages must be 64 byte aligned!\n");
++ return -EINVAL;
++ }
++
++ f = reservation_object_get_excl(bo->tbo.resv);
++ if (f) {
++ r = amdgpu_fence_wait((struct amdgpu_fence *)f, false);
++ if (r) {
++ DRM_ERROR("Failed waiting for UVD message (%d)!\n", r);
++ return r;
++ }
++ }
++
++ r = amdgpu_bo_kmap(bo, &ptr);
++ if (r) {
++ DRM_ERROR("Failed mapping the UVD message (%d)!\n", r);
++ return r;
++ }
++
++ msg = ptr + offset;
++
++ msg_type = msg[1];
++ handle = msg[2];
++
++ if (handle == 0) {
++ DRM_ERROR("Invalid UVD handle!\n");
++ return -EINVAL;
++ }
++
++ if (msg_type == 1) {
++ /* it's a decode msg, calc buffer sizes */
++ r = amdgpu_uvd_cs_msg_decode(msg, ctx->buf_sizes);
++ amdgpu_bo_kunmap(bo);
++ if (r)
++ return r;
++
++ } else if (msg_type == 2) {
++ /* it's a destroy msg, free the handle */
++ for (i = 0; i < AMDGPU_MAX_UVD_HANDLES; ++i)
++ atomic_cmpxchg(&adev->uvd.handles[i], handle, 0);
++ amdgpu_bo_kunmap(bo);
++ return 0;
++ } else {
++ /* it's a create msg */
++ amdgpu_bo_kunmap(bo);
++
++ if (msg_type != 0) {
++ DRM_ERROR("Illegal UVD message type (%d)!\n", msg_type);
++ return -EINVAL;
++ }
++
++ /* it's a create msg, no special handling needed */
++ }
++
++ /* create or decode, validate the handle */
++ for (i = 0; i < AMDGPU_MAX_UVD_HANDLES; ++i) {
++ if (atomic_read(&adev->uvd.handles[i]) == handle)
++ return 0;
++ }
++
++ /* handle not found try to alloc a new one */
++ for (i = 0; i < AMDGPU_MAX_UVD_HANDLES; ++i) {
++ if (!atomic_cmpxchg(&adev->uvd.handles[i], 0, handle)) {
++ adev->uvd.filp[i] = ctx->parser->filp;
++ return 0;
++ }
++ }
++
++ DRM_ERROR("No more free UVD handles!\n");
++ return -EINVAL;
++}
++
++/**
++ * amdgpu_uvd_cs_pass2 - second parsing round
++ *
++ * @ctx: UVD parser context
++ *
++ * Patch buffer addresses, make sure buffer sizes are correct.
++ */
++static int amdgpu_uvd_cs_pass2(struct amdgpu_uvd_cs_ctx *ctx)
++{
++ struct amdgpu_bo_va_mapping *mapping;
++ struct amdgpu_bo *bo;
++ struct amdgpu_ib *ib;
++ uint32_t cmd, lo, hi;
++ uint64_t start, end;
++ uint64_t addr;
++ int r;
++
++ lo = amdgpu_get_ib_value(ctx->parser, ctx->ib_idx, ctx->data0);
++ hi = amdgpu_get_ib_value(ctx->parser, ctx->ib_idx, ctx->data1);
++ addr = ((uint64_t)lo) | (((uint64_t)hi) << 32);
++
++ mapping = amdgpu_cs_find_mapping(ctx->parser, addr, &bo);
++ if (mapping == NULL)
++ return -EINVAL;
++
++ start = amdgpu_bo_gpu_offset(bo);
++
++ end = (mapping->it.last + 1 - mapping->it.start);
++ end = end * AMDGPU_GPU_PAGE_SIZE + start;
++
++ addr -= ((uint64_t)mapping->it.start) * AMDGPU_GPU_PAGE_SIZE;
++ start += addr;
++
++ ib = &ctx->parser->ibs[ctx->ib_idx];
++ ib->ptr[ctx->data0] = start & 0xFFFFFFFF;
++ ib->ptr[ctx->data1] = start >> 32;
++
++ cmd = amdgpu_get_ib_value(ctx->parser, ctx->ib_idx, ctx->idx) >> 1;
++ if (cmd < 0x4) {
++ if ((end - start) < ctx->buf_sizes[cmd]) {
++ DRM_ERROR("buffer (%d) to small (%d / %d)!\n", cmd,
++ (unsigned)(end - start),
++ ctx->buf_sizes[cmd]);
++ return -EINVAL;
++ }
++
++ } else if ((cmd != 0x100) && (cmd != 0x204)) {
++ DRM_ERROR("invalid UVD command %X!\n", cmd);
++ return -EINVAL;
++ }
++
++ if (!ctx->parser->adev->uvd.address_64_bit) {
++ if ((start >> 28) != ((end - 1) >> 28)) {
++ DRM_ERROR("reloc %LX-%LX crossing 256MB boundary!\n",
++ start, end);
++ return -EINVAL;
++ }
++
++ if ((cmd == 0 || cmd == 0x3) &&
++ (start >> 28) != (ctx->parser->adev->uvd.gpu_addr >> 28)) {
++ DRM_ERROR("msg/fb buffer %LX-%LX out of 256MB segment!\n",
++ start, end);
++ return -EINVAL;
++ }
++ }
++
++ if (cmd == 0) {
++ ctx->has_msg_cmd = true;
++ r = amdgpu_uvd_cs_msg(ctx, bo, addr);
++ if (r)
++ return r;
++ } else if (!ctx->has_msg_cmd) {
++ DRM_ERROR("Message needed before other commands are send!\n");
++ return -EINVAL;
++ }
++
++ return 0;
++}
++
++/**
++ * amdgpu_uvd_cs_reg - parse register writes
++ *
++ * @ctx: UVD parser context
++ * @cb: callback function
++ *
++ * Parse the register writes, call cb on each complete command.
++ */
++static int amdgpu_uvd_cs_reg(struct amdgpu_uvd_cs_ctx *ctx,
++ int (*cb)(struct amdgpu_uvd_cs_ctx *ctx))
++{
++ struct amdgpu_ib *ib = &ctx->parser->ibs[ctx->ib_idx];
++ int i, r;
++
++ ctx->idx++;
++ for (i = 0; i <= ctx->count; ++i) {
++ unsigned reg = ctx->reg + i;
++
++ if (ctx->idx >= ib->length_dw) {
++ DRM_ERROR("Register command after end of CS!\n");
++ return -EINVAL;
++ }
++
++ switch (reg) {
++ case mmUVD_GPCOM_VCPU_DATA0:
++ ctx->data0 = ctx->idx;
++ break;
++ case mmUVD_GPCOM_VCPU_DATA1:
++ ctx->data1 = ctx->idx;
++ break;
++ case mmUVD_GPCOM_VCPU_CMD:
++ r = cb(ctx);
++ if (r)
++ return r;
++ break;
++ case mmUVD_ENGINE_CNTL:
++ break;
++ default:
++ DRM_ERROR("Invalid reg 0x%X!\n", reg);
++ return -EINVAL;
++ }
++ ctx->idx++;
++ }
++ return 0;
++}
++
++/**
++ * amdgpu_uvd_cs_packets - parse UVD packets
++ *
++ * @ctx: UVD parser context
++ * @cb: callback function
++ *
++ * Parse the command stream packets.
++ */
++static int amdgpu_uvd_cs_packets(struct amdgpu_uvd_cs_ctx *ctx,
++ int (*cb)(struct amdgpu_uvd_cs_ctx *ctx))
++{
++ struct amdgpu_ib *ib = &ctx->parser->ibs[ctx->ib_idx];
++ int r;
++
++ for (ctx->idx = 0 ; ctx->idx < ib->length_dw; ) {
++ uint32_t cmd = amdgpu_get_ib_value(ctx->parser, ctx->ib_idx, ctx->idx);
++ unsigned type = CP_PACKET_GET_TYPE(cmd);
++ switch (type) {
++ case PACKET_TYPE0:
++ ctx->reg = CP_PACKET0_GET_REG(cmd);
++ ctx->count = CP_PACKET_GET_COUNT(cmd);
++ r = amdgpu_uvd_cs_reg(ctx, cb);
++ if (r)
++ return r;
++ break;
++ case PACKET_TYPE2:
++ ++ctx->idx;
++ break;
++ default:
++ DRM_ERROR("Unknown packet type %d !\n", type);
++ return -EINVAL;
++ }
++ }
++ return 0;
++}
++
++/**
++ * amdgpu_uvd_ring_parse_cs - UVD command submission parser
++ *
++ * @parser: Command submission parser context
++ *
++ * Parse the command stream, patch in addresses as necessary.
++ */
++int amdgpu_uvd_ring_parse_cs(struct amdgpu_cs_parser *parser, uint32_t ib_idx)
++{
++ struct amdgpu_uvd_cs_ctx ctx = {};
++ unsigned buf_sizes[] = {
++ [0x00000000] = 2048,
++ [0x00000001] = 32 * 1024 * 1024,
++ [0x00000002] = 2048 * 1152 * 3,
++ [0x00000003] = 2048,
++ };
++ struct amdgpu_ib *ib = &parser->ibs[ib_idx];
++ int r;
++
++ if (ib->length_dw % 16) {
++ DRM_ERROR("UVD IB length (%d) not 16 dwords aligned!\n",
++ ib->length_dw);
++ return -EINVAL;
++ }
++
++ ctx.parser = parser;
++ ctx.buf_sizes = buf_sizes;
++ ctx.ib_idx = ib_idx;
++
++ /* first round, make sure the buffers are actually in the UVD segment */
++ r = amdgpu_uvd_cs_packets(&ctx, amdgpu_uvd_cs_pass1);
++ if (r)
++ return r;
++
++ /* second round, patch buffer addresses into the command stream */
++ r = amdgpu_uvd_cs_packets(&ctx, amdgpu_uvd_cs_pass2);
++ if (r)
++ return r;
++
++ if (!ctx.has_msg_cmd) {
++ DRM_ERROR("UVD-IBs need a msg command!\n");
++ return -EINVAL;
++ }
++
++ amdgpu_uvd_note_usage(ctx.parser->adev);
++
++ return 0;
++}
++
++static int amdgpu_uvd_send_msg(struct amdgpu_ring *ring,
++ struct amdgpu_bo *bo,
++ struct amdgpu_fence **fence)
++{
++ struct ttm_validate_buffer tv;
++ struct ww_acquire_ctx ticket;
++ struct list_head head;
++ struct amdgpu_ib ib;
++ uint64_t addr;
++ int i, r;
++
++ memset(&tv, 0, sizeof(tv));
++ tv.bo = &bo->tbo;
++
++ INIT_LIST_HEAD(&head);
++ list_add(&tv.head, &head);
++
++ r = ttm_eu_reserve_buffers(&ticket, &head, true, NULL);
++ if (r)
++ return r;
++
++ if (!bo->adev->uvd.address_64_bit) {
++ amdgpu_ttm_placement_from_domain(bo, AMDGPU_GEM_DOMAIN_VRAM);
++ amdgpu_uvd_force_into_uvd_segment(bo);
++ }
++
++ r = ttm_bo_validate(&bo->tbo, &bo->placement, true, false);
++ if (r)
++ goto err;
++
++ r = amdgpu_ib_get(ring, NULL, 64, &ib);
++ if (r)
++ goto err;
++
++ addr = amdgpu_bo_gpu_offset(bo);
++ ib.ptr[0] = PACKET0(mmUVD_GPCOM_VCPU_DATA0, 0);
++ ib.ptr[1] = addr;
++ ib.ptr[2] = PACKET0(mmUVD_GPCOM_VCPU_DATA1, 0);
++ ib.ptr[3] = addr >> 32;
++ ib.ptr[4] = PACKET0(mmUVD_GPCOM_VCPU_CMD, 0);
++ ib.ptr[5] = 0;
++ for (i = 6; i < 16; ++i)
++ ib.ptr[i] = PACKET2(0);
++ ib.length_dw = 16;
++
++ r = amdgpu_ib_schedule(ring->adev, 1, &ib, AMDGPU_FENCE_OWNER_UNDEFINED);
++ if (r)
++ goto err;
++ ttm_eu_fence_buffer_objects(&ticket, &head, &ib.fence->base);
++
++ if (fence)
++ *fence = amdgpu_fence_ref(ib.fence);
++
++ amdgpu_ib_free(ring->adev, &ib);
++ amdgpu_bo_unref(&bo);
++ return 0;
++
++err:
++ ttm_eu_backoff_reservation(&ticket, &head);
++ return r;
++}
++
++/* multiple fence commands without any stream commands in between can
++ crash the vcpu so just try to emmit a dummy create/destroy msg to
++ avoid this */
++int amdgpu_uvd_get_create_msg(struct amdgpu_ring *ring, uint32_t handle,
++ struct amdgpu_fence **fence)
++{
++ struct amdgpu_device *adev = ring->adev;
++ struct amdgpu_bo *bo;
++ uint32_t *msg;
++ int r, i;
++
++ r = amdgpu_bo_create(adev, 1024, PAGE_SIZE, true,
++ AMDGPU_GEM_DOMAIN_VRAM, 0, NULL, &bo);
++ if (r)
++ return r;
++
++ r = amdgpu_bo_reserve(bo, false);
++ if (r) {
++ amdgpu_bo_unref(&bo);
++ return r;
++ }
++
++ r = amdgpu_bo_kmap(bo, (void **)&msg);
++ if (r) {
++ amdgpu_bo_unreserve(bo);
++ amdgpu_bo_unref(&bo);
++ return r;
++ }
++
++ /* stitch together an UVD create msg */
++ msg[0] = cpu_to_le32(0x00000de4);
++ msg[1] = cpu_to_le32(0x00000000);
++ msg[2] = cpu_to_le32(handle);
++ msg[3] = cpu_to_le32(0x00000000);
++ msg[4] = cpu_to_le32(0x00000000);
++ msg[5] = cpu_to_le32(0x00000000);
++ msg[6] = cpu_to_le32(0x00000000);
++ msg[7] = cpu_to_le32(0x00000780);
++ msg[8] = cpu_to_le32(0x00000440);
++ msg[9] = cpu_to_le32(0x00000000);
++ msg[10] = cpu_to_le32(0x01b37000);
++ for (i = 11; i < 1024; ++i)
++ msg[i] = cpu_to_le32(0x0);
++
++ amdgpu_bo_kunmap(bo);
++ amdgpu_bo_unreserve(bo);
++
++ return amdgpu_uvd_send_msg(ring, bo, fence);
++}
++
++int amdgpu_uvd_get_destroy_msg(struct amdgpu_ring *ring, uint32_t handle,
++ struct amdgpu_fence **fence)
++{
++ struct amdgpu_device *adev = ring->adev;
++ struct amdgpu_bo *bo;
++ uint32_t *msg;
++ int r, i;
++
++ r = amdgpu_bo_create(adev, 1024, PAGE_SIZE, true,
++ AMDGPU_GEM_DOMAIN_VRAM, 0, NULL, &bo);
++ if (r)
++ return r;
++
++ r = amdgpu_bo_reserve(bo, false);
++ if (r) {
++ amdgpu_bo_unref(&bo);
++ return r;
++ }
++
++ r = amdgpu_bo_kmap(bo, (void **)&msg);
++ if (r) {
++ amdgpu_bo_unreserve(bo);
++ amdgpu_bo_unref(&bo);
++ return r;
++ }
++
++ /* stitch together an UVD destroy msg */
++ msg[0] = cpu_to_le32(0x00000de4);
++ msg[1] = cpu_to_le32(0x00000002);
++ msg[2] = cpu_to_le32(handle);
++ msg[3] = cpu_to_le32(0x00000000);
++ for (i = 4; i < 1024; ++i)
++ msg[i] = cpu_to_le32(0x0);
++
++ amdgpu_bo_kunmap(bo);
++ amdgpu_bo_unreserve(bo);
++
++ return amdgpu_uvd_send_msg(ring, bo, fence);
++}
++
++static void amdgpu_uvd_idle_work_handler(struct work_struct *work)
++{
++ struct amdgpu_device *adev =
++ container_of(work, struct amdgpu_device, uvd.idle_work.work);
++ unsigned i, fences, handles = 0;
++
++ fences = amdgpu_fence_count_emitted(&adev->uvd.ring);
++
++ for (i = 0; i < AMDGPU_MAX_UVD_HANDLES; ++i)
++ if (atomic_read(&adev->uvd.handles[i]))
++ ++handles;
++
++ if (fences == 0 && handles == 0) {
++ if (adev->pm.dpm_enabled) {
++ amdgpu_dpm_enable_uvd(adev, false);
++ } else {
++ amdgpu_asic_set_uvd_clocks(adev, 0, 0);
++ }
++ } else {
++ schedule_delayed_work(&adev->uvd.idle_work,
++ msecs_to_jiffies(UVD_IDLE_TIMEOUT_MS));
++ }
++}
++
++static void amdgpu_uvd_note_usage(struct amdgpu_device *adev)
++{
++ bool set_clocks = !cancel_delayed_work_sync(&adev->uvd.idle_work);
++ set_clocks &= schedule_delayed_work(&adev->uvd.idle_work,
++ msecs_to_jiffies(UVD_IDLE_TIMEOUT_MS));
++
++ if (set_clocks) {
++ if (adev->pm.dpm_enabled) {
++ amdgpu_dpm_enable_uvd(adev, true);
++ } else {
++ amdgpu_asic_set_uvd_clocks(adev, 53300, 40000);
++ }
++ }
++}
+diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_uvd.h b/drivers/gpu/drm/amd/amdgpu/amdgpu_uvd.h
+new file mode 100644
+index 0000000..2255aa7
+--- /dev/null
++++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_uvd.h
+@@ -0,0 +1,39 @@
++/*
++ * Copyright 2014 Advanced Micro Devices, Inc.
++ *
++ * Permission is hereby granted, free of charge, to any person obtaining a
++ * copy of this software and associated documentation files (the "Software"),
++ * to deal in the Software without restriction, including without limitation
++ * the rights to use, copy, modify, merge, publish, distribute, sublicense,
++ * and/or sell copies of the Software, and to permit persons to whom the
++ * Software is furnished to do so, subject to the following conditions:
++ *
++ * The above copyright notice and this permission notice shall be included in
++ * all copies or substantial portions of the Software.
++ *
++ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
++ * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
++ * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
++ * THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR
++ * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE,
++ * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
++ * OTHER DEALINGS IN THE SOFTWARE.
++ *
++ */
++
++#ifndef __AMDGPU_UVD_H__
++#define __AMDGPU_UVD_H__
++
++int amdgpu_uvd_sw_init(struct amdgpu_device *adev);
++int amdgpu_uvd_sw_fini(struct amdgpu_device *adev);
++int amdgpu_uvd_suspend(struct amdgpu_device *adev);
++int amdgpu_uvd_resume(struct amdgpu_device *adev);
++int amdgpu_uvd_get_create_msg(struct amdgpu_ring *ring, uint32_t handle,
++ struct amdgpu_fence **fence);
++int amdgpu_uvd_get_destroy_msg(struct amdgpu_ring *ring, uint32_t handle,
++ struct amdgpu_fence **fence);
++void amdgpu_uvd_free_handles(struct amdgpu_device *adev,
++ struct drm_file *filp);
++int amdgpu_uvd_ring_parse_cs(struct amdgpu_cs_parser *parser, uint32_t ib_idx);
++
++#endif
+diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_vce.c b/drivers/gpu/drm/amd/amdgpu/amdgpu_vce.c
+new file mode 100644
+index 0000000..c65d93c
+--- /dev/null
++++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_vce.c
+@@ -0,0 +1,727 @@
++/*
++ * Copyright 2013 Advanced Micro Devices, Inc.
++ * All Rights Reserved.
++ *
++ * Permission is hereby granted, free of charge, to any person obtaining a
++ * copy of this software and associated documentation files (the
++ * "Software"), to deal in the Software without restriction, including
++ * without limitation the rights to use, copy, modify, merge, publish,
++ * distribute, sub license, and/or sell copies of the Software, and to
++ * permit persons to whom the Software is furnished to do so, subject to
++ * the following conditions:
++ *
++ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
++ * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
++ * FITNESS FOR A PARTICULAR PURPOSE AND NON-INFRINGEMENT. IN NO EVENT SHALL
++ * THE COPYRIGHT HOLDERS, AUTHORS AND/OR ITS SUPPLIERS BE LIABLE FOR ANY CLAIM,
++ * DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR
++ * OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE
++ * USE OR OTHER DEALINGS IN THE SOFTWARE.
++ *
++ * The above copyright notice and this permission notice (including the
++ * next paragraph) shall be included in all copies or substantial portions
++ * of the Software.
++ *
++ * Authors: Christian König <christian.koenig@amd.com>
++ */
++
++#include <linux/firmware.h>
++#include <linux/module.h>
++#include <drm/drmP.h>
++#include <drm/drm.h>
++
++#include "amdgpu.h"
++#include "amdgpu_pm.h"
++#include "amdgpu_vce.h"
++#include "cikd.h"
++
++/* 1 second timeout */
++#define VCE_IDLE_TIMEOUT_MS 1000
++
++/* Firmware Names */
++#ifdef CONFIG_DRM_AMDGPU_CIK
++#define FIRMWARE_BONAIRE "radeon/bonaire_vce.bin"
++#define FIRMWARE_KABINI "radeon/kabini_vce.bin"
++#define FIRMWARE_KAVERI "radeon/kaveri_vce.bin"
++#define FIRMWARE_HAWAII "radeon/hawaii_vce.bin"
++#define FIRMWARE_MULLINS "radeon/mullins_vce.bin"
++#endif
++#define FIRMWARE_TONGA "radeon/tonga_vce.bin"
++#define FIRMWARE_CARRIZO "radeon/carrizo_vce.bin"
++
++#ifdef CONFIG_DRM_AMDGPU_CIK
++MODULE_FIRMWARE(FIRMWARE_BONAIRE);
++MODULE_FIRMWARE(FIRMWARE_KABINI);
++MODULE_FIRMWARE(FIRMWARE_KAVERI);
++MODULE_FIRMWARE(FIRMWARE_HAWAII);
++MODULE_FIRMWARE(FIRMWARE_MULLINS);
++#endif
++MODULE_FIRMWARE(FIRMWARE_TONGA);
++MODULE_FIRMWARE(FIRMWARE_CARRIZO);
++
++static void amdgpu_vce_idle_work_handler(struct work_struct *work);
++
++/**
++ * amdgpu_vce_init - allocate memory, load vce firmware
++ *
++ * @adev: amdgpu_device pointer
++ *
++ * First step to get VCE online, allocate memory and load the firmware
++ */
++int amdgpu_vce_sw_init(struct amdgpu_device *adev)
++{
++ unsigned long size;
++ const char *fw_name;
++ const struct common_firmware_header *hdr;
++ unsigned ucode_version, version_major, version_minor, binary_id;
++ int i, r;
++
++ INIT_DELAYED_WORK(&adev->vce.idle_work, amdgpu_vce_idle_work_handler);
++
++ switch (adev->asic_type) {
++#ifdef CONFIG_DRM_AMDGPU_CIK
++ case CHIP_BONAIRE:
++ fw_name = FIRMWARE_BONAIRE;
++ break;
++ case CHIP_KAVERI:
++ fw_name = FIRMWARE_KAVERI;
++ break;
++ case CHIP_KABINI:
++ fw_name = FIRMWARE_KABINI;
++ break;
++ case CHIP_HAWAII:
++ fw_name = FIRMWARE_HAWAII;
++ break;
++ case CHIP_MULLINS:
++ fw_name = FIRMWARE_MULLINS;
++ break;
++#endif
++ case CHIP_TONGA:
++ fw_name = FIRMWARE_TONGA;
++ break;
++ case CHIP_CARRIZO:
++ fw_name = FIRMWARE_CARRIZO;
++ break;
++
++ default:
++ return -EINVAL;
++ }
++
++ r = request_firmware(&adev->vce.fw, fw_name, adev->dev);
++ if (r) {
++ dev_err(adev->dev, "amdgpu_vce: Can't load firmware \"%s\"\n",
++ fw_name);
++ return r;
++ }
++
++ r = amdgpu_ucode_validate(adev->vce.fw);
++ if (r) {
++ dev_err(adev->dev, "amdgpu_vce: Can't validate firmware \"%s\"\n",
++ fw_name);
++ release_firmware(adev->vce.fw);
++ adev->vce.fw = NULL;
++ return r;
++ }
++
++ hdr = (const struct common_firmware_header *)adev->vce.fw->data;
++
++ ucode_version = le32_to_cpu(hdr->ucode_version);
++ version_major = (ucode_version >> 20) & 0xfff;
++ version_minor = (ucode_version >> 8) & 0xfff;
++ binary_id = ucode_version & 0xff;
++ DRM_INFO("Found VCE firmware Version: %hhd.%hhd Binary ID: %hhd\n",
++ version_major, version_minor, binary_id);
++ adev->vce.fw_version = ((version_major << 24) | (version_minor << 16) |
++ (binary_id << 8));
++
++ /* allocate firmware, stack and heap BO */
++
++ size = AMDGPU_GPU_PAGE_ALIGN(le32_to_cpu(hdr->ucode_size_bytes)) +
++ AMDGPU_VCE_STACK_SIZE + AMDGPU_VCE_HEAP_SIZE;
++ r = amdgpu_bo_create(adev, size, PAGE_SIZE, true,
++ AMDGPU_GEM_DOMAIN_VRAM, 0, NULL, &adev->vce.vcpu_bo);
++ if (r) {
++ dev_err(adev->dev, "(%d) failed to allocate VCE bo\n", r);
++ return r;
++ }
++
++ r = amdgpu_bo_reserve(adev->vce.vcpu_bo, false);
++ if (r) {
++ amdgpu_bo_unref(&adev->vce.vcpu_bo);
++ dev_err(adev->dev, "(%d) failed to reserve VCE bo\n", r);
++ return r;
++ }
++
++ r = amdgpu_bo_pin(adev->vce.vcpu_bo, AMDGPU_GEM_DOMAIN_VRAM,
++ &adev->vce.gpu_addr);
++ amdgpu_bo_unreserve(adev->vce.vcpu_bo);
++ if (r) {
++ amdgpu_bo_unref(&adev->vce.vcpu_bo);
++ dev_err(adev->dev, "(%d) VCE bo pin failed\n", r);
++ return r;
++ }
++
++ for (i = 0; i < AMDGPU_MAX_VCE_HANDLES; ++i) {
++ atomic_set(&adev->vce.handles[i], 0);
++ adev->vce.filp[i] = NULL;
++ }
++
++ return 0;
++}
++
++/**
++ * amdgpu_vce_fini - free memory
++ *
++ * @adev: amdgpu_device pointer
++ *
++ * Last step on VCE teardown, free firmware memory
++ */
++int amdgpu_vce_sw_fini(struct amdgpu_device *adev)
++{
++ if (adev->vce.vcpu_bo == NULL)
++ return 0;
++
++ amdgpu_bo_unref(&adev->vce.vcpu_bo);
++
++ amdgpu_ring_fini(&adev->vce.ring[0]);
++ amdgpu_ring_fini(&adev->vce.ring[1]);
++
++ release_firmware(adev->vce.fw);
++
++ return 0;
++}
++
++/**
++ * amdgpu_vce_suspend - unpin VCE fw memory
++ *
++ * @adev: amdgpu_device pointer
++ *
++ */
++int amdgpu_vce_suspend(struct amdgpu_device *adev)
++{
++ int i;
++
++ if (adev->vce.vcpu_bo == NULL)
++ return 0;
++
++ for (i = 0; i < AMDGPU_MAX_VCE_HANDLES; ++i)
++ if (atomic_read(&adev->vce.handles[i]))
++ break;
++
++ if (i == AMDGPU_MAX_VCE_HANDLES)
++ return 0;
++
++ /* TODO: suspending running encoding sessions isn't supported */
++ return -EINVAL;
++}
++
++/**
++ * amdgpu_vce_resume - pin VCE fw memory
++ *
++ * @adev: amdgpu_device pointer
++ *
++ */
++int amdgpu_vce_resume(struct amdgpu_device *adev)
++{
++ void *cpu_addr;
++ const struct common_firmware_header *hdr;
++ unsigned offset;
++ int r;
++
++ if (adev->vce.vcpu_bo == NULL)
++ return -EINVAL;
++
++ r = amdgpu_bo_reserve(adev->vce.vcpu_bo, false);
++ if (r) {
++ dev_err(adev->dev, "(%d) failed to reserve VCE bo\n", r);
++ return r;
++ }
++
++ r = amdgpu_bo_kmap(adev->vce.vcpu_bo, &cpu_addr);
++ if (r) {
++ amdgpu_bo_unreserve(adev->vce.vcpu_bo);
++ dev_err(adev->dev, "(%d) VCE map failed\n", r);
++ return r;
++ }
++
++ hdr = (const struct common_firmware_header *)adev->vce.fw->data;
++ offset = le32_to_cpu(hdr->ucode_array_offset_bytes);
++ memcpy(cpu_addr, (adev->vce.fw->data) + offset,
++ (adev->vce.fw->size) - offset);
++
++ amdgpu_bo_kunmap(adev->vce.vcpu_bo);
++
++ amdgpu_bo_unreserve(adev->vce.vcpu_bo);
++
++ return 0;
++}
++
++/**
++ * amdgpu_vce_idle_work_handler - power off VCE
++ *
++ * @work: pointer to work structure
++ *
++ * power of VCE when it's not used any more
++ */
++static void amdgpu_vce_idle_work_handler(struct work_struct *work)
++{
++ struct amdgpu_device *adev =
++ container_of(work, struct amdgpu_device, vce.idle_work.work);
++
++ if ((amdgpu_fence_count_emitted(&adev->vce.ring[0]) == 0) &&
++ (amdgpu_fence_count_emitted(&adev->vce.ring[1]) == 0)) {
++ if (adev->pm.dpm_enabled) {
++ amdgpu_dpm_enable_vce(adev, false);
++ } else {
++ amdgpu_asic_set_vce_clocks(adev, 0, 0);
++ }
++ } else {
++ schedule_delayed_work(&adev->vce.idle_work,
++ msecs_to_jiffies(VCE_IDLE_TIMEOUT_MS));
++ }
++}
++
++/**
++ * amdgpu_vce_note_usage - power up VCE
++ *
++ * @adev: amdgpu_device pointer
++ *
++ * Make sure VCE is powerd up when we want to use it
++ */
++static void amdgpu_vce_note_usage(struct amdgpu_device *adev)
++{
++ bool streams_changed = false;
++ bool set_clocks = !cancel_delayed_work_sync(&adev->vce.idle_work);
++ set_clocks &= schedule_delayed_work(&adev->vce.idle_work,
++ msecs_to_jiffies(VCE_IDLE_TIMEOUT_MS));
++
++ if (adev->pm.dpm_enabled) {
++ /* XXX figure out if the streams changed */
++ streams_changed = false;
++ }
++
++ if (set_clocks || streams_changed) {
++ if (adev->pm.dpm_enabled) {
++ amdgpu_dpm_enable_vce(adev, true);
++ } else {
++ amdgpu_asic_set_vce_clocks(adev, 53300, 40000);
++ }
++ }
++}
++
++/**
++ * amdgpu_vce_free_handles - free still open VCE handles
++ *
++ * @adev: amdgpu_device pointer
++ * @filp: drm file pointer
++ *
++ * Close all VCE handles still open by this file pointer
++ */
++void amdgpu_vce_free_handles(struct amdgpu_device *adev, struct drm_file *filp)
++{
++ struct amdgpu_ring *ring = &adev->vce.ring[0];
++ int i, r;
++ for (i = 0; i < AMDGPU_MAX_VCE_HANDLES; ++i) {
++ uint32_t handle = atomic_read(&adev->vce.handles[i]);
++ if (!handle || adev->vce.filp[i] != filp)
++ continue;
++
++ amdgpu_vce_note_usage(adev);
++
++ r = amdgpu_vce_get_destroy_msg(ring, handle, NULL);
++ if (r)
++ DRM_ERROR("Error destroying VCE handle (%d)!\n", r);
++
++ adev->vce.filp[i] = NULL;
++ atomic_set(&adev->vce.handles[i], 0);
++ }
++}
++
++/**
++ * amdgpu_vce_get_create_msg - generate a VCE create msg
++ *
++ * @adev: amdgpu_device pointer
++ * @ring: ring we should submit the msg to
++ * @handle: VCE session handle to use
++ * @fence: optional fence to return
++ *
++ * Open up a stream for HW test
++ */
++int amdgpu_vce_get_create_msg(struct amdgpu_ring *ring, uint32_t handle,
++ struct amdgpu_fence **fence)
++{
++ const unsigned ib_size_dw = 1024;
++ struct amdgpu_ib ib;
++ uint64_t dummy;
++ int i, r;
++
++ r = amdgpu_ib_get(ring, NULL, ib_size_dw * 4, &ib);
++ if (r) {
++ DRM_ERROR("amdgpu: failed to get ib (%d).\n", r);
++ return r;
++ }
++
++ dummy = ib.gpu_addr + 1024;
++
++ /* stitch together an VCE create msg */
++ ib.length_dw = 0;
++ ib.ptr[ib.length_dw++] = 0x0000000c; /* len */
++ ib.ptr[ib.length_dw++] = 0x00000001; /* session cmd */
++ ib.ptr[ib.length_dw++] = handle;
++
++ ib.ptr[ib.length_dw++] = 0x00000030; /* len */
++ ib.ptr[ib.length_dw++] = 0x01000001; /* create cmd */
++ ib.ptr[ib.length_dw++] = 0x00000000;
++ ib.ptr[ib.length_dw++] = 0x00000042;
++ ib.ptr[ib.length_dw++] = 0x0000000a;
++ ib.ptr[ib.length_dw++] = 0x00000001;
++ ib.ptr[ib.length_dw++] = 0x00000080;
++ ib.ptr[ib.length_dw++] = 0x00000060;
++ ib.ptr[ib.length_dw++] = 0x00000100;
++ ib.ptr[ib.length_dw++] = 0x00000100;
++ ib.ptr[ib.length_dw++] = 0x0000000c;
++ ib.ptr[ib.length_dw++] = 0x00000000;
++
++ ib.ptr[ib.length_dw++] = 0x00000014; /* len */
++ ib.ptr[ib.length_dw++] = 0x05000005; /* feedback buffer */
++ ib.ptr[ib.length_dw++] = upper_32_bits(dummy);
++ ib.ptr[ib.length_dw++] = dummy;
++ ib.ptr[ib.length_dw++] = 0x00000001;
++
++ for (i = ib.length_dw; i < ib_size_dw; ++i)
++ ib.ptr[i] = 0x0;
++
++ r = amdgpu_ib_schedule(ring->adev, 1, &ib, AMDGPU_FENCE_OWNER_UNDEFINED);
++ if (r) {
++ DRM_ERROR("amdgpu: failed to schedule ib (%d).\n", r);
++ }
++
++ if (fence)
++ *fence = amdgpu_fence_ref(ib.fence);
++
++ amdgpu_ib_free(ring->adev, &ib);
++
++ return r;
++}
++
++/**
++ * amdgpu_vce_get_destroy_msg - generate a VCE destroy msg
++ *
++ * @adev: amdgpu_device pointer
++ * @ring: ring we should submit the msg to
++ * @handle: VCE session handle to use
++ * @fence: optional fence to return
++ *
++ * Close up a stream for HW test or if userspace failed to do so
++ */
++int amdgpu_vce_get_destroy_msg(struct amdgpu_ring *ring, uint32_t handle,
++ struct amdgpu_fence **fence)
++{
++ const unsigned ib_size_dw = 1024;
++ struct amdgpu_ib ib;
++ uint64_t dummy;
++ int i, r;
++
++ r = amdgpu_ib_get(ring, NULL, ib_size_dw * 4, &ib);
++ if (r) {
++ DRM_ERROR("amdgpu: failed to get ib (%d).\n", r);
++ return r;
++ }
++
++ dummy = ib.gpu_addr + 1024;
++
++ /* stitch together an VCE destroy msg */
++ ib.length_dw = 0;
++ ib.ptr[ib.length_dw++] = 0x0000000c; /* len */
++ ib.ptr[ib.length_dw++] = 0x00000001; /* session cmd */
++ ib.ptr[ib.length_dw++] = handle;
++
++ ib.ptr[ib.length_dw++] = 0x00000014; /* len */
++ ib.ptr[ib.length_dw++] = 0x05000005; /* feedback buffer */
++ ib.ptr[ib.length_dw++] = upper_32_bits(dummy);
++ ib.ptr[ib.length_dw++] = dummy;
++ ib.ptr[ib.length_dw++] = 0x00000001;
++
++ ib.ptr[ib.length_dw++] = 0x00000008; /* len */
++ ib.ptr[ib.length_dw++] = 0x02000001; /* destroy cmd */
++
++ for (i = ib.length_dw; i < ib_size_dw; ++i)
++ ib.ptr[i] = 0x0;
++
++ r = amdgpu_ib_schedule(ring->adev, 1, &ib, AMDGPU_FENCE_OWNER_UNDEFINED);
++ if (r) {
++ DRM_ERROR("amdgpu: failed to schedule ib (%d).\n", r);
++ }
++
++ if (fence)
++ *fence = amdgpu_fence_ref(ib.fence);
++
++ amdgpu_ib_free(ring->adev, &ib);
++
++ return r;
++}
++
++/**
++ * amdgpu_vce_cs_reloc - command submission relocation
++ *
++ * @p: parser context
++ * @lo: address of lower dword
++ * @hi: address of higher dword
++ *
++ * Patch relocation inside command stream with real buffer address
++ */
++int amdgpu_vce_cs_reloc(struct amdgpu_cs_parser *p, uint32_t ib_idx, int lo, int hi)
++{
++ struct amdgpu_bo_va_mapping *mapping;
++ struct amdgpu_ib *ib = &p->ibs[ib_idx];
++ struct amdgpu_bo *bo;
++ uint64_t addr;
++
++ addr = ((uint64_t)amdgpu_get_ib_value(p, ib_idx, lo)) |
++ ((uint64_t)amdgpu_get_ib_value(p, ib_idx, hi)) << 32;
++
++ mapping = amdgpu_cs_find_mapping(p, addr, &bo);
++ if (mapping == NULL) {
++ DRM_ERROR("Can't find BO for addr 0x%010Lx %d %d\n",
++ addr, lo, hi);
++ return -EINVAL;
++ }
++
++ addr -= ((uint64_t)mapping->it.start) * AMDGPU_GPU_PAGE_SIZE;
++ addr += amdgpu_bo_gpu_offset(bo);
++
++ ib->ptr[lo] = addr & 0xFFFFFFFF;
++ ib->ptr[hi] = addr >> 32;
++
++ return 0;
++}
++
++/**
++ * amdgpu_vce_cs_parse - parse and validate the command stream
++ *
++ * @p: parser context
++ *
++ */
++int amdgpu_vce_ring_parse_cs(struct amdgpu_cs_parser *p, uint32_t ib_idx)
++{
++ uint32_t handle = 0;
++ bool destroy = false;
++ int i, r, idx = 0;
++ struct amdgpu_ib *ib = &p->ibs[ib_idx];
++
++ amdgpu_vce_note_usage(p->adev);
++
++ while (idx < ib->length_dw) {
++ uint32_t len = amdgpu_get_ib_value(p, ib_idx, idx);
++ uint32_t cmd = amdgpu_get_ib_value(p, ib_idx, idx + 1);
++
++ if ((len < 8) || (len & 3)) {
++ DRM_ERROR("invalid VCE command length (%d)!\n", len);
++ return -EINVAL;
++ }
++
++ switch (cmd) {
++ case 0x00000001: // session
++ handle = amdgpu_get_ib_value(p, ib_idx, idx + 2);
++ break;
++
++ case 0x00000002: // task info
++ case 0x01000001: // create
++ case 0x04000001: // config extension
++ case 0x04000002: // pic control
++ case 0x04000005: // rate control
++ case 0x04000007: // motion estimation
++ case 0x04000008: // rdo
++ case 0x04000009: // vui
++ case 0x05000002: // auxiliary buffer
++ break;
++
++ case 0x03000001: // encode
++ r = amdgpu_vce_cs_reloc(p, ib_idx, idx + 10, idx + 9);
++ if (r)
++ return r;
++
++ r = amdgpu_vce_cs_reloc(p, ib_idx, idx + 12, idx + 11);
++ if (r)
++ return r;
++ break;
++
++ case 0x02000001: // destroy
++ destroy = true;
++ break;
++
++ case 0x05000001: // context buffer
++ case 0x05000004: // video bitstream buffer
++ case 0x05000005: // feedback buffer
++ r = amdgpu_vce_cs_reloc(p, ib_idx, idx + 3, idx + 2);
++ if (r)
++ return r;
++ break;
++
++ default:
++ DRM_ERROR("invalid VCE command (0x%x)!\n", cmd);
++ return -EINVAL;
++ }
++
++ idx += len / 4;
++ }
++
++ if (destroy) {
++ /* IB contains a destroy msg, free the handle */
++ for (i = 0; i < AMDGPU_MAX_VCE_HANDLES; ++i)
++ atomic_cmpxchg(&p->adev->vce.handles[i], handle, 0);
++
++ return 0;
++ }
++
++ /* create or encode, validate the handle */
++ for (i = 0; i < AMDGPU_MAX_VCE_HANDLES; ++i) {
++ if (atomic_read(&p->adev->vce.handles[i]) == handle)
++ return 0;
++ }
++
++ /* handle not found try to alloc a new one */
++ for (i = 0; i < AMDGPU_MAX_VCE_HANDLES; ++i) {
++ if (!atomic_cmpxchg(&p->adev->vce.handles[i], 0, handle)) {
++ p->adev->vce.filp[i] = p->filp;
++ return 0;
++ }
++ }
++
++ DRM_ERROR("No more free VCE handles!\n");
++
++ return -EINVAL;
++}
++
++/**
++ * amdgpu_vce_ring_emit_semaphore - emit a semaphore command
++ *
++ * @ring: engine to use
++ * @semaphore: address of semaphore
++ * @emit_wait: true=emit wait, false=emit signal
++ *
++ */
++bool amdgpu_vce_ring_emit_semaphore(struct amdgpu_ring *ring,
++ struct amdgpu_semaphore *semaphore,
++ bool emit_wait)
++{
++ uint64_t addr = semaphore->gpu_addr;
++
++ amdgpu_ring_write(ring, VCE_CMD_SEMAPHORE);
++ amdgpu_ring_write(ring, (addr >> 3) & 0x000FFFFF);
++ amdgpu_ring_write(ring, (addr >> 23) & 0x000FFFFF);
++ amdgpu_ring_write(ring, 0x01003000 | (emit_wait ? 1 : 0));
++ if (!emit_wait)
++ amdgpu_ring_write(ring, VCE_CMD_END);
++
++ return true;
++}
++
++/**
++ * amdgpu_vce_ring_emit_ib - execute indirect buffer
++ *
++ * @ring: engine to use
++ * @ib: the IB to execute
++ *
++ */
++void amdgpu_vce_ring_emit_ib(struct amdgpu_ring *ring, struct amdgpu_ib *ib)
++{
++ amdgpu_ring_write(ring, VCE_CMD_IB);
++ amdgpu_ring_write(ring, lower_32_bits(ib->gpu_addr));
++ amdgpu_ring_write(ring, upper_32_bits(ib->gpu_addr));
++ amdgpu_ring_write(ring, ib->length_dw);
++}
++
++/**
++ * amdgpu_vce_ring_emit_fence - add a fence command to the ring
++ *
++ * @ring: engine to use
++ * @fence: the fence
++ *
++ */
++void amdgpu_vce_ring_emit_fence(struct amdgpu_ring *ring, u64 addr, u64 seq,
++ bool write64bits)
++{
++ WARN_ON(write64bits);
++
++ amdgpu_ring_write(ring, VCE_CMD_FENCE);
++ amdgpu_ring_write(ring, addr);
++ amdgpu_ring_write(ring, upper_32_bits(addr));
++ amdgpu_ring_write(ring, seq);
++ amdgpu_ring_write(ring, VCE_CMD_TRAP);
++ amdgpu_ring_write(ring, VCE_CMD_END);
++}
++
++/**
++ * amdgpu_vce_ring_test_ring - test if VCE ring is working
++ *
++ * @ring: the engine to test on
++ *
++ */
++int amdgpu_vce_ring_test_ring(struct amdgpu_ring *ring)
++{
++ struct amdgpu_device *adev = ring->adev;
++ uint32_t rptr = amdgpu_ring_get_rptr(ring);
++ unsigned i;
++ int r;
++
++ r = amdgpu_ring_lock(ring, 16);
++ if (r) {
++ DRM_ERROR("amdgpu: vce failed to lock ring %d (%d).\n",
++ ring->idx, r);
++ return r;
++ }
++ amdgpu_ring_write(ring, VCE_CMD_END);
++ amdgpu_ring_unlock_commit(ring);
++
++ for (i = 0; i < adev->usec_timeout; i++) {
++ if (amdgpu_ring_get_rptr(ring) != rptr)
++ break;
++ DRM_UDELAY(1);
++ }
++
++ if (i < adev->usec_timeout) {
++ DRM_INFO("ring test on %d succeeded in %d usecs\n",
++ ring->idx, i);
++ } else {
++ DRM_ERROR("amdgpu: ring %d test failed\n",
++ ring->idx);
++ r = -ETIMEDOUT;
++ }
++
++ return r;
++}
++
++/**
++ * amdgpu_vce_ring_test_ib - test if VCE IBs are working
++ *
++ * @ring: the engine to test on
++ *
++ */
++int amdgpu_vce_ring_test_ib(struct amdgpu_ring *ring)
++{
++ struct amdgpu_fence *fence = NULL;
++ int r;
++
++ r = amdgpu_vce_get_create_msg(ring, 1, NULL);
++ if (r) {
++ DRM_ERROR("amdgpu: failed to get create msg (%d).\n", r);
++ goto error;
++ }
++
++ r = amdgpu_vce_get_destroy_msg(ring, 1, &fence);
++ if (r) {
++ DRM_ERROR("amdgpu: failed to get destroy ib (%d).\n", r);
++ goto error;
++ }
++
++ r = amdgpu_fence_wait(fence, false);
++ if (r) {
++ DRM_ERROR("amdgpu: fence wait failed (%d).\n", r);
++ } else {
++ DRM_INFO("ib test on ring %d succeeded\n", ring->idx);
++ }
++error:
++ amdgpu_fence_unref(&fence);
++ return r;
++}
+diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_vce.h b/drivers/gpu/drm/amd/amdgpu/amdgpu_vce.h
+new file mode 100644
+index 0000000..b9411e4
+--- /dev/null
++++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_vce.h
+@@ -0,0 +1,47 @@
++/*
++ * Copyright 2014 Advanced Micro Devices, Inc.
++ *
++ * Permission is hereby granted, free of charge, to any person obtaining a
++ * copy of this software and associated documentation files (the "Software"),
++ * to deal in the Software without restriction, including without limitation
++ * the rights to use, copy, modify, merge, publish, distribute, sublicense,
++ * and/or sell copies of the Software, and to permit persons to whom the
++ * Software is furnished to do so, subject to the following conditions:
++ *
++ * The above copyright notice and this permission notice shall be included in
++ * all copies or substantial portions of the Software.
++ *
++ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
++ * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
++ * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
++ * THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR
++ * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE,
++ * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
++ * OTHER DEALINGS IN THE SOFTWARE.
++ *
++ */
++
++#ifndef __AMDGPU_VCE_H__
++#define __AMDGPU_VCE_H__
++
++int amdgpu_vce_sw_init(struct amdgpu_device *adev);
++int amdgpu_vce_sw_fini(struct amdgpu_device *adev);
++int amdgpu_vce_suspend(struct amdgpu_device *adev);
++int amdgpu_vce_resume(struct amdgpu_device *adev);
++int amdgpu_vce_get_create_msg(struct amdgpu_ring *ring, uint32_t handle,
++ struct amdgpu_fence **fence);
++int amdgpu_vce_get_destroy_msg(struct amdgpu_ring *ring, uint32_t handle,
++ struct amdgpu_fence **fence);
++void amdgpu_vce_free_handles(struct amdgpu_device *adev, struct drm_file *filp);
++int amdgpu_vce_cs_reloc(struct amdgpu_cs_parser *p, uint32_t ib_idx, int lo, int hi);
++int amdgpu_vce_ring_parse_cs(struct amdgpu_cs_parser *p, uint32_t ib_idx);
++bool amdgpu_vce_ring_emit_semaphore(struct amdgpu_ring *ring,
++ struct amdgpu_semaphore *semaphore,
++ bool emit_wait);
++void amdgpu_vce_ring_emit_ib(struct amdgpu_ring *ring, struct amdgpu_ib *ib);
++void amdgpu_vce_ring_emit_fence(struct amdgpu_ring *ring, u64 addr, u64 seq,
++ bool write64bit);
++int amdgpu_vce_ring_test_ring(struct amdgpu_ring *ring);
++int amdgpu_vce_ring_test_ib(struct amdgpu_ring *ring);
++
++#endif
+diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_vm.c b/drivers/gpu/drm/amd/amdgpu/amdgpu_vm.c
+new file mode 100644
+index 0000000..1cc01fb
+--- /dev/null
++++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_vm.c
+@@ -0,0 +1,1248 @@
++/*
++ * Copyright 2008 Advanced Micro Devices, Inc.
++ * Copyright 2008 Red Hat Inc.
++ * Copyright 2009 Jerome Glisse.
++ *
++ * Permission is hereby granted, free of charge, to any person obtaining a
++ * copy of this software and associated documentation files (the "Software"),
++ * to deal in the Software without restriction, including without limitation
++ * the rights to use, copy, modify, merge, publish, distribute, sublicense,
++ * and/or sell copies of the Software, and to permit persons to whom the
++ * Software is furnished to do so, subject to the following conditions:
++ *
++ * The above copyright notice and this permission notice shall be included in
++ * all copies or substantial portions of the Software.
++ *
++ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
++ * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
++ * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
++ * THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR
++ * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE,
++ * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
++ * OTHER DEALINGS IN THE SOFTWARE.
++ *
++ * Authors: Dave Airlie
++ * Alex Deucher
++ * Jerome Glisse
++ */
++#include <drm/drmP.h>
++#include <drm/amdgpu_drm.h>
++#include "amdgpu.h"
++#include "amdgpu_trace.h"
++
++/*
++ * GPUVM
++ * GPUVM is similar to the legacy gart on older asics, however
++ * rather than there being a single global gart table
++ * for the entire GPU, there are multiple VM page tables active
++ * at any given time. The VM page tables can contain a mix
++ * vram pages and system memory pages and system memory pages
++ * can be mapped as snooped (cached system pages) or unsnooped
++ * (uncached system pages).
++ * Each VM has an ID associated with it and there is a page table
++ * associated with each VMID. When execting a command buffer,
++ * the kernel tells the the ring what VMID to use for that command
++ * buffer. VMIDs are allocated dynamically as commands are submitted.
++ * The userspace drivers maintain their own address space and the kernel
++ * sets up their pages tables accordingly when they submit their
++ * command buffers and a VMID is assigned.
++ * Cayman/Trinity support up to 8 active VMs at any given time;
++ * SI supports 16.
++ */
++
++/**
++ * amdgpu_vm_num_pde - return the number of page directory entries
++ *
++ * @adev: amdgpu_device pointer
++ *
++ * Calculate the number of page directory entries (cayman+).
++ */
++static unsigned amdgpu_vm_num_pdes(struct amdgpu_device *adev)
++{
++ return adev->vm_manager.max_pfn >> amdgpu_vm_block_size;
++}
++
++/**
++ * amdgpu_vm_directory_size - returns the size of the page directory in bytes
++ *
++ * @adev: amdgpu_device pointer
++ *
++ * Calculate the size of the page directory in bytes (cayman+).
++ */
++static unsigned amdgpu_vm_directory_size(struct amdgpu_device *adev)
++{
++ return AMDGPU_GPU_PAGE_ALIGN(amdgpu_vm_num_pdes(adev) * 8);
++}
++
++/**
++ * amdgpu_vm_get_bos - add the vm BOs to a validation list
++ *
++ * @vm: vm providing the BOs
++ * @head: head of validation list
++ *
++ * Add the page directory to the list of BOs to
++ * validate for command submission (cayman+).
++ */
++struct amdgpu_bo_list_entry *amdgpu_vm_get_bos(struct amdgpu_device *adev,
++ struct amdgpu_vm *vm,
++ struct list_head *head)
++{
++ struct amdgpu_bo_list_entry *list;
++ unsigned i, idx;
++
++ list = drm_malloc_ab(vm->max_pde_used + 2,
++ sizeof(struct amdgpu_bo_list_entry));
++ if (!list)
++ return NULL;
++
++ /* add the vm page table to the list */
++ list[0].robj = vm->page_directory;
++ list[0].prefered_domains = AMDGPU_GEM_DOMAIN_VRAM;
++ list[0].allowed_domains = AMDGPU_GEM_DOMAIN_VRAM;
++ list[0].priority = 0;
++ list[0].tv.bo = &vm->page_directory->tbo;
++ list[0].tv.shared = true;
++ list_add(&list[0].tv.head, head);
++
++ for (i = 0, idx = 1; i <= vm->max_pde_used; i++) {
++ if (!vm->page_tables[i].bo)
++ continue;
++
++ list[idx].robj = vm->page_tables[i].bo;
++ list[idx].prefered_domains = AMDGPU_GEM_DOMAIN_VRAM;
++ list[idx].allowed_domains = AMDGPU_GEM_DOMAIN_VRAM;
++ list[idx].priority = 0;
++ list[idx].tv.bo = &list[idx].robj->tbo;
++ list[idx].tv.shared = true;
++ list_add(&list[idx++].tv.head, head);
++ }
++
++ return list;
++}
++
++/**
++ * amdgpu_vm_grab_id - allocate the next free VMID
++ *
++ * @ring: ring we want to submit job to
++ * @vm: vm to allocate id for
++ *
++ * Allocate an id for the vm (cayman+).
++ * Returns the fence we need to sync to (if any).
++ *
++ * Global and local mutex must be locked!
++ */
++struct amdgpu_fence *amdgpu_vm_grab_id(struct amdgpu_ring *ring,
++ struct amdgpu_vm *vm)
++{
++ struct amdgpu_fence *best[AMDGPU_MAX_RINGS] = {};
++ struct amdgpu_vm_id *vm_id = &vm->ids[ring->idx];
++ struct amdgpu_device *adev = ring->adev;
++
++ unsigned choices[2] = {};
++ unsigned i;
++
++ /* check if the id is still valid */
++ if (vm_id->id && vm_id->last_id_use &&
++ vm_id->last_id_use == adev->vm_manager.active[vm_id->id])
++ return NULL;
++
++ /* we definately need to flush */
++ vm_id->pd_gpu_addr = ~0ll;
++
++ /* skip over VMID 0, since it is the system VM */
++ for (i = 1; i < adev->vm_manager.nvm; ++i) {
++ struct amdgpu_fence *fence = adev->vm_manager.active[i];
++
++ if (fence == NULL) {
++ /* found a free one */
++ vm_id->id = i;
++ trace_amdgpu_vm_grab_id(i, ring->idx);
++ return NULL;
++ }
++
++ if (amdgpu_fence_is_earlier(fence, best[fence->ring->idx])) {
++ best[fence->ring->idx] = fence;
++ choices[fence->ring == ring ? 0 : 1] = i;
++ }
++ }
++
++ for (i = 0; i < 2; ++i) {
++ if (choices[i]) {
++ vm_id->id = choices[i];
++ trace_amdgpu_vm_grab_id(choices[i], ring->idx);
++ return adev->vm_manager.active[choices[i]];
++ }
++ }
++
++ /* should never happen */
++ BUG();
++ return NULL;
++}
++
++/**
++ * amdgpu_vm_flush - hardware flush the vm
++ *
++ * @ring: ring to use for flush
++ * @vm: vm we want to flush
++ * @updates: last vm update that we waited for
++ *
++ * Flush the vm (cayman+).
++ *
++ * Global and local mutex must be locked!
++ */
++void amdgpu_vm_flush(struct amdgpu_ring *ring,
++ struct amdgpu_vm *vm,
++ struct amdgpu_fence *updates)
++{
++ uint64_t pd_addr = amdgpu_bo_gpu_offset(vm->page_directory);
++ struct amdgpu_vm_id *vm_id = &vm->ids[ring->idx];
++
++ if (pd_addr != vm_id->pd_gpu_addr || !vm_id->flushed_updates ||
++ amdgpu_fence_is_earlier(vm_id->flushed_updates, updates)) {
++
++ trace_amdgpu_vm_flush(pd_addr, ring->idx, vm_id->id);
++ amdgpu_fence_unref(&vm_id->flushed_updates);
++ vm_id->flushed_updates = amdgpu_fence_ref(updates);
++ vm_id->pd_gpu_addr = pd_addr;
++ amdgpu_ring_emit_vm_flush(ring, vm_id->id, vm_id->pd_gpu_addr);
++ }
++}
++
++/**
++ * amdgpu_vm_fence - remember fence for vm
++ *
++ * @adev: amdgpu_device pointer
++ * @vm: vm we want to fence
++ * @fence: fence to remember
++ *
++ * Fence the vm (cayman+).
++ * Set the fence used to protect page table and id.
++ *
++ * Global and local mutex must be locked!
++ */
++void amdgpu_vm_fence(struct amdgpu_device *adev,
++ struct amdgpu_vm *vm,
++ struct amdgpu_fence *fence)
++{
++ unsigned ridx = fence->ring->idx;
++ unsigned vm_id = vm->ids[ridx].id;
++
++ amdgpu_fence_unref(&adev->vm_manager.active[vm_id]);
++ adev->vm_manager.active[vm_id] = amdgpu_fence_ref(fence);
++
++ amdgpu_fence_unref(&vm->ids[ridx].last_id_use);
++ vm->ids[ridx].last_id_use = amdgpu_fence_ref(fence);
++}
++
++/**
++ * amdgpu_vm_bo_find - find the bo_va for a specific vm & bo
++ *
++ * @vm: requested vm
++ * @bo: requested buffer object
++ *
++ * Find @bo inside the requested vm (cayman+).
++ * Search inside the @bos vm list for the requested vm
++ * Returns the found bo_va or NULL if none is found
++ *
++ * Object has to be reserved!
++ */
++struct amdgpu_bo_va *amdgpu_vm_bo_find(struct amdgpu_vm *vm,
++ struct amdgpu_bo *bo)
++{
++ struct amdgpu_bo_va *bo_va;
++
++ list_for_each_entry(bo_va, &bo->va, bo_list) {
++ if (bo_va->vm == vm) {
++ return bo_va;
++ }
++ }
++ return NULL;
++}
++
++/**
++ * amdgpu_vm_update_pages - helper to call the right asic function
++ *
++ * @adev: amdgpu_device pointer
++ * @ib: indirect buffer to fill with commands
++ * @pe: addr of the page entry
++ * @addr: dst addr to write into pe
++ * @count: number of page entries to update
++ * @incr: increase next addr by incr bytes
++ * @flags: hw access flags
++ * @gtt_flags: GTT hw access flags
++ *
++ * Traces the parameters and calls the right asic functions
++ * to setup the page table using the DMA.
++ */
++static void amdgpu_vm_update_pages(struct amdgpu_device *adev,
++ struct amdgpu_ib *ib,
++ uint64_t pe, uint64_t addr,
++ unsigned count, uint32_t incr,
++ uint32_t flags, uint32_t gtt_flags)
++{
++ trace_amdgpu_vm_set_page(pe, addr, count, incr, flags);
++
++ if ((flags & AMDGPU_PTE_SYSTEM) && (flags == gtt_flags)) {
++ uint64_t src = adev->gart.table_addr + (addr >> 12) * 8;
++ amdgpu_vm_copy_pte(adev, ib, pe, src, count);
++
++ } else if ((flags & AMDGPU_PTE_SYSTEM) || (count < 3)) {
++ amdgpu_vm_write_pte(adev, ib, pe, addr,
++ count, incr, flags);
++
++ } else {
++ amdgpu_vm_set_pte_pde(adev, ib, pe, addr,
++ count, incr, flags);
++ }
++}
++
++/**
++ * amdgpu_vm_clear_bo - initially clear the page dir/table
++ *
++ * @adev: amdgpu_device pointer
++ * @bo: bo to clear
++ */
++static int amdgpu_vm_clear_bo(struct amdgpu_device *adev,
++ struct amdgpu_bo *bo)
++{
++ struct amdgpu_ring *ring = adev->vm_manager.vm_pte_funcs_ring;
++ struct amdgpu_ib ib;
++ unsigned entries;
++ uint64_t addr;
++ int r;
++
++ r = amdgpu_bo_reserve(bo, false);
++ if (r)
++ return r;
++
++ r = ttm_bo_validate(&bo->tbo, &bo->placement, true, false);
++ if (r)
++ goto error_unreserve;
++
++ addr = amdgpu_bo_gpu_offset(bo);
++ entries = amdgpu_bo_size(bo) / 8;
++
++ r = amdgpu_ib_get(ring, NULL, entries * 2 + 64, &ib);
++ if (r)
++ goto error_unreserve;
++
++ ib.length_dw = 0;
++
++ amdgpu_vm_update_pages(adev, &ib, addr, 0, entries, 0, 0, 0);
++ amdgpu_vm_pad_ib(adev, &ib);
++ WARN_ON(ib.length_dw > 64);
++
++ r = amdgpu_ib_schedule(adev, 1, &ib, AMDGPU_FENCE_OWNER_VM);
++ if (r)
++ goto error_free;
++
++ amdgpu_bo_fence(bo, ib.fence, false);
++
++error_free:
++ amdgpu_ib_free(adev, &ib);
++
++error_unreserve:
++ amdgpu_bo_unreserve(bo);
++ return r;
++}
++
++/**
++ * amdgpu_vm_map_gart - get the physical address of a gart page
++ *
++ * @adev: amdgpu_device pointer
++ * @addr: the unmapped addr
++ *
++ * Look up the physical address of the page that the pte resolves
++ * to (cayman+).
++ * Returns the physical address of the page.
++ */
++uint64_t amdgpu_vm_map_gart(struct amdgpu_device *adev, uint64_t addr)
++{
++ uint64_t result;
++
++ /* page table offset */
++ result = adev->gart.pages_addr[addr >> PAGE_SHIFT];
++
++ /* in case cpu page size != gpu page size*/
++ result |= addr & (~PAGE_MASK);
++
++ return result;
++}
++
++/**
++ * amdgpu_vm_update_pdes - make sure that page directory is valid
++ *
++ * @adev: amdgpu_device pointer
++ * @vm: requested vm
++ * @start: start of GPU address range
++ * @end: end of GPU address range
++ *
++ * Allocates new page tables if necessary
++ * and updates the page directory (cayman+).
++ * Returns 0 for success, error for failure.
++ *
++ * Global and local mutex must be locked!
++ */
++int amdgpu_vm_update_page_directory(struct amdgpu_device *adev,
++ struct amdgpu_vm *vm)
++{
++ struct amdgpu_ring *ring = adev->vm_manager.vm_pte_funcs_ring;
++ struct amdgpu_bo *pd = vm->page_directory;
++ uint64_t pd_addr = amdgpu_bo_gpu_offset(pd);
++ uint32_t incr = AMDGPU_VM_PTE_COUNT * 8;
++ uint64_t last_pde = ~0, last_pt = ~0;
++ unsigned count = 0, pt_idx, ndw;
++ struct amdgpu_ib ib;
++ int r;
++
++ /* padding, etc. */
++ ndw = 64;
++
++ /* assume the worst case */
++ ndw += vm->max_pde_used * 6;
++
++ /* update too big for an IB */
++ if (ndw > 0xfffff)
++ return -ENOMEM;
++
++ r = amdgpu_ib_get(ring, NULL, ndw * 4, &ib);
++ if (r)
++ return r;
++ ib.length_dw = 0;
++
++ /* walk over the address space and update the page directory */
++ for (pt_idx = 0; pt_idx <= vm->max_pde_used; ++pt_idx) {
++ struct amdgpu_bo *bo = vm->page_tables[pt_idx].bo;
++ uint64_t pde, pt;
++
++ if (bo == NULL)
++ continue;
++
++ pt = amdgpu_bo_gpu_offset(bo);
++ if (vm->page_tables[pt_idx].addr == pt)
++ continue;
++ vm->page_tables[pt_idx].addr = pt;
++
++ pde = pd_addr + pt_idx * 8;
++ if (((last_pde + 8 * count) != pde) ||
++ ((last_pt + incr * count) != pt)) {
++
++ if (count) {
++ amdgpu_vm_update_pages(adev, &ib, last_pde,
++ last_pt, count, incr,
++ AMDGPU_PTE_VALID, 0);
++ }
++
++ count = 1;
++ last_pde = pde;
++ last_pt = pt;
++ } else {
++ ++count;
++ }
++ }
++
++ if (count)
++ amdgpu_vm_update_pages(adev, &ib, last_pde, last_pt, count,
++ incr, AMDGPU_PTE_VALID, 0);
++
++ if (ib.length_dw != 0) {
++ amdgpu_vm_pad_ib(adev, &ib);
++ amdgpu_sync_resv(adev, &ib.sync, pd->tbo.resv, AMDGPU_FENCE_OWNER_VM);
++ WARN_ON(ib.length_dw > ndw);
++ r = amdgpu_ib_schedule(adev, 1, &ib, AMDGPU_FENCE_OWNER_VM);
++ if (r) {
++ amdgpu_ib_free(adev, &ib);
++ return r;
++ }
++ amdgpu_bo_fence(pd, ib.fence, false);
++ }
++ amdgpu_ib_free(adev, &ib);
++
++ return 0;
++}
++
++/**
++ * amdgpu_vm_frag_ptes - add fragment information to PTEs
++ *
++ * @adev: amdgpu_device pointer
++ * @ib: IB for the update
++ * @pe_start: first PTE to handle
++ * @pe_end: last PTE to handle
++ * @addr: addr those PTEs should point to
++ * @flags: hw mapping flags
++ * @gtt_flags: GTT hw mapping flags
++ *
++ * Global and local mutex must be locked!
++ */
++static void amdgpu_vm_frag_ptes(struct amdgpu_device *adev,
++ struct amdgpu_ib *ib,
++ uint64_t pe_start, uint64_t pe_end,
++ uint64_t addr, uint32_t flags,
++ uint32_t gtt_flags)
++{
++ /**
++ * The MC L1 TLB supports variable sized pages, based on a fragment
++ * field in the PTE. When this field is set to a non-zero value, page
++ * granularity is increased from 4KB to (1 << (12 + frag)). The PTE
++ * flags are considered valid for all PTEs within the fragment range
++ * and corresponding mappings are assumed to be physically contiguous.
++ *
++ * The L1 TLB can store a single PTE for the whole fragment,
++ * significantly increasing the space available for translation
++ * caching. This leads to large improvements in throughput when the
++ * TLB is under pressure.
++ *
++ * The L2 TLB distributes small and large fragments into two
++ * asymmetric partitions. The large fragment cache is significantly
++ * larger. Thus, we try to use large fragments wherever possible.
++ * Userspace can support this by aligning virtual base address and
++ * allocation size to the fragment size.
++ */
++
++ /* SI and newer are optimized for 64KB */
++ uint64_t frag_flags = AMDGPU_PTE_FRAG_64KB;
++ uint64_t frag_align = 0x80;
++
++ uint64_t frag_start = ALIGN(pe_start, frag_align);
++ uint64_t frag_end = pe_end & ~(frag_align - 1);
++
++ unsigned count;
++
++ /* system pages are non continuously */
++ if ((flags & AMDGPU_PTE_SYSTEM) || !(flags & AMDGPU_PTE_VALID) ||
++ (frag_start >= frag_end)) {
++
++ count = (pe_end - pe_start) / 8;
++ amdgpu_vm_update_pages(adev, ib, pe_start, addr, count,
++ AMDGPU_GPU_PAGE_SIZE, flags, gtt_flags);
++ return;
++ }
++
++ /* handle the 4K area at the beginning */
++ if (pe_start != frag_start) {
++ count = (frag_start - pe_start) / 8;
++ amdgpu_vm_update_pages(adev, ib, pe_start, addr, count,
++ AMDGPU_GPU_PAGE_SIZE, flags, gtt_flags);
++ addr += AMDGPU_GPU_PAGE_SIZE * count;
++ }
++
++ /* handle the area in the middle */
++ count = (frag_end - frag_start) / 8;
++ amdgpu_vm_update_pages(adev, ib, frag_start, addr, count,
++ AMDGPU_GPU_PAGE_SIZE, flags | frag_flags,
++ gtt_flags);
++
++ /* handle the 4K area at the end */
++ if (frag_end != pe_end) {
++ addr += AMDGPU_GPU_PAGE_SIZE * count;
++ count = (pe_end - frag_end) / 8;
++ amdgpu_vm_update_pages(adev, ib, frag_end, addr, count,
++ AMDGPU_GPU_PAGE_SIZE, flags, gtt_flags);
++ }
++}
++
++/**
++ * amdgpu_vm_update_ptes - make sure that page tables are valid
++ *
++ * @adev: amdgpu_device pointer
++ * @vm: requested vm
++ * @start: start of GPU address range
++ * @end: end of GPU address range
++ * @dst: destination address to map to
++ * @flags: mapping flags
++ *
++ * Update the page tables in the range @start - @end (cayman+).
++ *
++ * Global and local mutex must be locked!
++ */
++static int amdgpu_vm_update_ptes(struct amdgpu_device *adev,
++ struct amdgpu_vm *vm,
++ struct amdgpu_ib *ib,
++ uint64_t start, uint64_t end,
++ uint64_t dst, uint32_t flags,
++ uint32_t gtt_flags)
++{
++ uint64_t mask = AMDGPU_VM_PTE_COUNT - 1;
++ uint64_t last_pte = ~0, last_dst = ~0;
++ unsigned count = 0;
++ uint64_t addr;
++
++ /* walk over the address space and update the page tables */
++ for (addr = start; addr < end; ) {
++ uint64_t pt_idx = addr >> amdgpu_vm_block_size;
++ struct amdgpu_bo *pt = vm->page_tables[pt_idx].bo;
++ unsigned nptes;
++ uint64_t pte;
++ int r;
++
++ amdgpu_sync_resv(adev, &ib->sync, pt->tbo.resv,
++ AMDGPU_FENCE_OWNER_VM);
++ r = reservation_object_reserve_shared(pt->tbo.resv);
++ if (r)
++ return r;
++
++ if ((addr & ~mask) == (end & ~mask))
++ nptes = end - addr;
++ else
++ nptes = AMDGPU_VM_PTE_COUNT - (addr & mask);
++
++ pte = amdgpu_bo_gpu_offset(pt);
++ pte += (addr & mask) * 8;
++
++ if ((last_pte + 8 * count) != pte) {
++
++ if (count) {
++ amdgpu_vm_frag_ptes(adev, ib, last_pte,
++ last_pte + 8 * count,
++ last_dst, flags,
++ gtt_flags);
++ }
++
++ count = nptes;
++ last_pte = pte;
++ last_dst = dst;
++ } else {
++ count += nptes;
++ }
++
++ addr += nptes;
++ dst += nptes * AMDGPU_GPU_PAGE_SIZE;
++ }
++
++ if (count) {
++ amdgpu_vm_frag_ptes(adev, ib, last_pte,
++ last_pte + 8 * count,
++ last_dst, flags, gtt_flags);
++ }
++
++ return 0;
++}
++
++/**
++ * amdgpu_vm_fence_pts - fence page tables after an update
++ *
++ * @vm: requested vm
++ * @start: start of GPU address range
++ * @end: end of GPU address range
++ * @fence: fence to use
++ *
++ * Fence the page tables in the range @start - @end (cayman+).
++ *
++ * Global and local mutex must be locked!
++ */
++static void amdgpu_vm_fence_pts(struct amdgpu_vm *vm,
++ uint64_t start, uint64_t end,
++ struct amdgpu_fence *fence)
++{
++ unsigned i;
++
++ start >>= amdgpu_vm_block_size;
++ end >>= amdgpu_vm_block_size;
++
++ for (i = start; i <= end; ++i)
++ amdgpu_bo_fence(vm->page_tables[i].bo, fence, true);
++}
++
++/**
++ * amdgpu_vm_bo_update_mapping - update a mapping in the vm page table
++ *
++ * @adev: amdgpu_device pointer
++ * @vm: requested vm
++ * @mapping: mapped range and flags to use for the update
++ * @addr: addr to set the area to
++ * @gtt_flags: flags as they are used for GTT
++ * @fence: optional resulting fence
++ *
++ * Fill in the page table entries for @mapping.
++ * Returns 0 for success, -EINVAL for failure.
++ *
++ * Object have to be reserved and mutex must be locked!
++ */
++static int amdgpu_vm_bo_update_mapping(struct amdgpu_device *adev,
++ struct amdgpu_vm *vm,
++ struct amdgpu_bo_va_mapping *mapping,
++ uint64_t addr, uint32_t gtt_flags,
++ struct amdgpu_fence **fence)
++{
++ struct amdgpu_ring *ring = adev->vm_manager.vm_pte_funcs_ring;
++ unsigned nptes, ncmds, ndw;
++ uint32_t flags = gtt_flags;
++ struct amdgpu_ib ib;
++ int r;
++
++ /* normally,bo_va->flags only contians READABLE and WIRTEABLE bit go here
++ * but in case of something, we filter the flags in first place
++ */
++ if (!(mapping->flags & AMDGPU_PTE_READABLE))
++ flags &= ~AMDGPU_PTE_READABLE;
++ if (!(mapping->flags & AMDGPU_PTE_WRITEABLE))
++ flags &= ~AMDGPU_PTE_WRITEABLE;
++
++ trace_amdgpu_vm_bo_update(mapping);
++
++ nptes = mapping->it.last - mapping->it.start + 1;
++
++ /*
++ * reserve space for one command every (1 << BLOCK_SIZE)
++ * entries or 2k dwords (whatever is smaller)
++ */
++ ncmds = (nptes >> min(amdgpu_vm_block_size, 11)) + 1;
++
++ /* padding, etc. */
++ ndw = 64;
++
++ if ((flags & AMDGPU_PTE_SYSTEM) && (flags == gtt_flags)) {
++ /* only copy commands needed */
++ ndw += ncmds * 7;
++
++ } else if (flags & AMDGPU_PTE_SYSTEM) {
++ /* header for write data commands */
++ ndw += ncmds * 4;
++
++ /* body of write data command */
++ ndw += nptes * 2;
++
++ } else {
++ /* set page commands needed */
++ ndw += ncmds * 10;
++
++ /* two extra commands for begin/end of fragment */
++ ndw += 2 * 10;
++ }
++
++ /* update too big for an IB */
++ if (ndw > 0xfffff)
++ return -ENOMEM;
++
++ r = amdgpu_ib_get(ring, NULL, ndw * 4, &ib);
++ if (r)
++ return r;
++ ib.length_dw = 0;
++
++ if (!(flags & AMDGPU_PTE_VALID)) {
++ unsigned i;
++
++ for (i = 0; i < AMDGPU_MAX_RINGS; ++i) {
++ struct amdgpu_fence *f = vm->ids[i].last_id_use;
++ amdgpu_sync_fence(&ib.sync, f);
++ }
++ }
++
++ r = amdgpu_vm_update_ptes(adev, vm, &ib, mapping->it.start,
++ mapping->it.last + 1, addr + mapping->offset,
++ flags, gtt_flags);
++
++ if (r) {
++ amdgpu_ib_free(adev, &ib);
++ return r;
++ }
++
++ amdgpu_vm_pad_ib(adev, &ib);
++ WARN_ON(ib.length_dw > ndw);
++
++ r = amdgpu_ib_schedule(adev, 1, &ib, AMDGPU_FENCE_OWNER_VM);
++ if (r) {
++ amdgpu_ib_free(adev, &ib);
++ return r;
++ }
++ amdgpu_vm_fence_pts(vm, mapping->it.start,
++ mapping->it.last + 1, ib.fence);
++ if (fence) {
++ amdgpu_fence_unref(fence);
++ *fence = amdgpu_fence_ref(ib.fence);
++ }
++ amdgpu_ib_free(adev, &ib);
++
++ return 0;
++}
++
++/**
++ * amdgpu_vm_bo_update - update all BO mappings in the vm page table
++ *
++ * @adev: amdgpu_device pointer
++ * @bo_va: requested BO and VM object
++ * @mem: ttm mem
++ *
++ * Fill in the page table entries for @bo_va.
++ * Returns 0 for success, -EINVAL for failure.
++ *
++ * Object have to be reserved and mutex must be locked!
++ */
++int amdgpu_vm_bo_update(struct amdgpu_device *adev,
++ struct amdgpu_bo_va *bo_va,
++ struct ttm_mem_reg *mem)
++{
++ struct amdgpu_vm *vm = bo_va->vm;
++ struct amdgpu_bo_va_mapping *mapping;
++ uint32_t flags;
++ uint64_t addr;
++ int r;
++
++ if (mem) {
++ addr = mem->start << PAGE_SHIFT;
++ if (mem->mem_type != TTM_PL_TT)
++ addr += adev->vm_manager.vram_base_offset;
++ } else {
++ addr = 0;
++ }
++
++ if (addr == bo_va->addr)
++ return 0;
++
++ flags = amdgpu_ttm_tt_pte_flags(adev, bo_va->bo->tbo.ttm, mem);
++
++ list_for_each_entry(mapping, &bo_va->mappings, list) {
++ r = amdgpu_vm_bo_update_mapping(adev, vm, mapping, addr,
++ flags, &bo_va->last_pt_update);
++ if (r)
++ return r;
++ }
++
++ bo_va->addr = addr;
++ spin_lock(&vm->status_lock);
++ list_del_init(&bo_va->vm_status);
++ spin_unlock(&vm->status_lock);
++
++ return 0;
++}
++
++/**
++ * amdgpu_vm_clear_freed - clear freed BOs in the PT
++ *
++ * @adev: amdgpu_device pointer
++ * @vm: requested vm
++ *
++ * Make sure all freed BOs are cleared in the PT.
++ * Returns 0 for success.
++ *
++ * PTs have to be reserved and mutex must be locked!
++ */
++int amdgpu_vm_clear_freed(struct amdgpu_device *adev,
++ struct amdgpu_vm *vm)
++{
++ struct amdgpu_bo_va_mapping *mapping;
++ int r;
++
++ while (!list_empty(&vm->freed)) {
++ mapping = list_first_entry(&vm->freed,
++ struct amdgpu_bo_va_mapping, list);
++ list_del(&mapping->list);
++
++ r = amdgpu_vm_bo_update_mapping(adev, vm, mapping, 0, 0, NULL);
++ kfree(mapping);
++ if (r)
++ return r;
++
++ }
++ return 0;
++
++}
++
++/**
++ * amdgpu_vm_clear_invalids - clear invalidated BOs in the PT
++ *
++ * @adev: amdgpu_device pointer
++ * @vm: requested vm
++ *
++ * Make sure all invalidated BOs are cleared in the PT.
++ * Returns 0 for success.
++ *
++ * PTs have to be reserved and mutex must be locked!
++ */
++int amdgpu_vm_clear_invalids(struct amdgpu_device *adev,
++ struct amdgpu_vm *vm)
++{
++ struct amdgpu_bo_va *bo_va;
++ int r;
++
++ spin_lock(&vm->status_lock);
++ while (!list_empty(&vm->invalidated)) {
++ bo_va = list_first_entry(&vm->invalidated,
++ struct amdgpu_bo_va, vm_status);
++ spin_unlock(&vm->status_lock);
++
++ r = amdgpu_vm_bo_update(adev, bo_va, NULL);
++ if (r)
++ return r;
++
++ spin_lock(&vm->status_lock);
++ }
++ spin_unlock(&vm->status_lock);
++
++ return 0;
++}
++
++/**
++ * amdgpu_vm_bo_add - add a bo to a specific vm
++ *
++ * @adev: amdgpu_device pointer
++ * @vm: requested vm
++ * @bo: amdgpu buffer object
++ *
++ * Add @bo into the requested vm (cayman+).
++ * Add @bo to the list of bos associated with the vm
++ * Returns newly added bo_va or NULL for failure
++ *
++ * Object has to be reserved!
++ */
++struct amdgpu_bo_va *amdgpu_vm_bo_add(struct amdgpu_device *adev,
++ struct amdgpu_vm *vm,
++ struct amdgpu_bo *bo)
++{
++ struct amdgpu_bo_va *bo_va;
++
++ bo_va = kzalloc(sizeof(struct amdgpu_bo_va), GFP_KERNEL);
++ if (bo_va == NULL) {
++ return NULL;
++ }
++ bo_va->vm = vm;
++ bo_va->bo = bo;
++ bo_va->addr = 0;
++ bo_va->ref_count = 1;
++ INIT_LIST_HEAD(&bo_va->bo_list);
++ INIT_LIST_HEAD(&bo_va->mappings);
++ INIT_LIST_HEAD(&bo_va->vm_status);
++
++ mutex_lock(&vm->mutex);
++ list_add_tail(&bo_va->bo_list, &bo->va);
++ mutex_unlock(&vm->mutex);
++
++ return bo_va;
++}
++
++/**
++ * amdgpu_vm_bo_map - map bo inside a vm
++ *
++ * @adev: amdgpu_device pointer
++ * @bo_va: bo_va to store the address
++ * @saddr: where to map the BO
++ * @offset: requested offset in the BO
++ * @flags: attributes of pages (read/write/valid/etc.)
++ *
++ * Add a mapping of the BO at the specefied addr into the VM.
++ * Returns 0 for success, error for failure.
++ *
++ * Object has to be reserved and gets unreserved by this function!
++ */
++int amdgpu_vm_bo_map(struct amdgpu_device *adev,
++ struct amdgpu_bo_va *bo_va,
++ uint64_t saddr, uint64_t offset,
++ uint64_t size, uint32_t flags)
++{
++ struct amdgpu_bo_va_mapping *mapping;
++ struct amdgpu_vm *vm = bo_va->vm;
++ struct interval_tree_node *it;
++ unsigned last_pfn, pt_idx;
++ uint64_t eaddr;
++ int r;
++
++ /* make sure object fit at this offset */
++ eaddr = saddr + size;
++ if ((saddr >= eaddr) || (offset + size > amdgpu_bo_size(bo_va->bo))) {
++ amdgpu_bo_unreserve(bo_va->bo);
++ return -EINVAL;
++ }
++
++ last_pfn = eaddr / AMDGPU_GPU_PAGE_SIZE;
++ if (last_pfn > adev->vm_manager.max_pfn) {
++ dev_err(adev->dev, "va above limit (0x%08X > 0x%08X)\n",
++ last_pfn, adev->vm_manager.max_pfn);
++ amdgpu_bo_unreserve(bo_va->bo);
++ return -EINVAL;
++ }
++
++ mutex_lock(&vm->mutex);
++
++ saddr /= AMDGPU_GPU_PAGE_SIZE;
++ eaddr /= AMDGPU_GPU_PAGE_SIZE;
++
++ it = interval_tree_iter_first(&vm->va, saddr, eaddr - 1);
++ if (it) {
++ struct amdgpu_bo_va_mapping *tmp;
++ tmp = container_of(it, struct amdgpu_bo_va_mapping, it);
++ /* bo and tmp overlap, invalid addr */
++ dev_err(adev->dev, "bo %p va 0x%010Lx-0x%010Lx conflict with "
++ "0x%010lx-0x%010lx\n", bo_va->bo, saddr, eaddr,
++ tmp->it.start, tmp->it.last + 1);
++ amdgpu_bo_unreserve(bo_va->bo);
++ r = -EINVAL;
++ goto error_unlock;
++ }
++
++ mapping = kmalloc(sizeof(*mapping), GFP_KERNEL);
++ if (!mapping) {
++ amdgpu_bo_unreserve(bo_va->bo);
++ r = -ENOMEM;
++ goto error_unlock;
++ }
++
++ INIT_LIST_HEAD(&mapping->list);
++ mapping->it.start = saddr;
++ mapping->it.last = eaddr - 1;
++ mapping->offset = offset;
++ mapping->flags = flags;
++
++ list_add(&mapping->list, &bo_va->mappings);
++ interval_tree_insert(&mapping->it, &vm->va);
++
++ /* Make sure the page tables are allocated */
++ saddr >>= amdgpu_vm_block_size;
++ eaddr >>= amdgpu_vm_block_size;
++
++ BUG_ON(eaddr >= amdgpu_vm_num_pdes(adev));
++
++ if (eaddr > vm->max_pde_used)
++ vm->max_pde_used = eaddr;
++
++ amdgpu_bo_unreserve(bo_va->bo);
++
++ /* walk over the address space and allocate the page tables */
++ for (pt_idx = saddr; pt_idx <= eaddr; ++pt_idx) {
++ struct amdgpu_bo *pt;
++
++ if (vm->page_tables[pt_idx].bo)
++ continue;
++
++ /* drop mutex to allocate and clear page table */
++ mutex_unlock(&vm->mutex);
++
++ r = amdgpu_bo_create(adev, AMDGPU_VM_PTE_COUNT * 8,
++ AMDGPU_GPU_PAGE_SIZE, true,
++ AMDGPU_GEM_DOMAIN_VRAM, 0, NULL, &pt);
++ if (r)
++ goto error_free;
++
++ r = amdgpu_vm_clear_bo(adev, pt);
++ if (r) {
++ amdgpu_bo_unref(&pt);
++ goto error_free;
++ }
++
++ /* aquire mutex again */
++ mutex_lock(&vm->mutex);
++ if (vm->page_tables[pt_idx].bo) {
++ /* someone else allocated the pt in the meantime */
++ mutex_unlock(&vm->mutex);
++ amdgpu_bo_unref(&pt);
++ mutex_lock(&vm->mutex);
++ continue;
++ }
++
++ vm->page_tables[pt_idx].addr = 0;
++ vm->page_tables[pt_idx].bo = pt;
++ }
++
++ mutex_unlock(&vm->mutex);
++ return 0;
++
++error_free:
++ mutex_lock(&vm->mutex);
++ list_del(&mapping->list);
++ interval_tree_remove(&mapping->it, &vm->va);
++ kfree(mapping);
++
++error_unlock:
++ mutex_unlock(&vm->mutex);
++ return r;
++}
++
++/**
++ * amdgpu_vm_bo_unmap - remove bo mapping from vm
++ *
++ * @adev: amdgpu_device pointer
++ * @bo_va: bo_va to remove the address from
++ * @saddr: where to the BO is mapped
++ *
++ * Remove a mapping of the BO at the specefied addr from the VM.
++ * Returns 0 for success, error for failure.
++ *
++ * Object has to be reserved and gets unreserved by this function!
++ */
++int amdgpu_vm_bo_unmap(struct amdgpu_device *adev,
++ struct amdgpu_bo_va *bo_va,
++ uint64_t saddr)
++{
++ struct amdgpu_bo_va_mapping *mapping;
++ struct amdgpu_vm *vm = bo_va->vm;
++
++ list_for_each_entry(mapping, &bo_va->mappings, list) {
++ if (mapping->it.start == saddr)
++ break;
++ }
++
++ if (&mapping->list == &bo_va->mappings) {
++ amdgpu_bo_unreserve(bo_va->bo);
++ return -ENOENT;
++ }
++
++ mutex_lock(&vm->mutex);
++ list_del(&mapping->list);
++ interval_tree_remove(&mapping->it, &vm->va);
++
++ if (bo_va->addr) {
++ /* clear the old address */
++ list_add(&mapping->list, &vm->freed);
++ } else {
++ kfree(mapping);
++ }
++ mutex_unlock(&vm->mutex);
++ amdgpu_bo_unreserve(bo_va->bo);
++
++ return 0;
++}
++
++/**
++ * amdgpu_vm_bo_rmv - remove a bo to a specific vm
++ *
++ * @adev: amdgpu_device pointer
++ * @bo_va: requested bo_va
++ *
++ * Remove @bo_va->bo from the requested vm (cayman+).
++ *
++ * Object have to be reserved!
++ */
++void amdgpu_vm_bo_rmv(struct amdgpu_device *adev,
++ struct amdgpu_bo_va *bo_va)
++{
++ struct amdgpu_bo_va_mapping *mapping, *next;
++ struct amdgpu_vm *vm = bo_va->vm;
++
++ list_del(&bo_va->bo_list);
++
++ mutex_lock(&vm->mutex);
++
++ spin_lock(&vm->status_lock);
++ list_del(&bo_va->vm_status);
++ spin_unlock(&vm->status_lock);
++
++ list_for_each_entry_safe(mapping, next, &bo_va->mappings, list) {
++ list_del(&mapping->list);
++ interval_tree_remove(&mapping->it, &vm->va);
++ if (bo_va->addr)
++ list_add(&mapping->list, &vm->freed);
++ else
++ kfree(mapping);
++ }
++ amdgpu_fence_unref(&bo_va->last_pt_update);
++ kfree(bo_va);
++
++ mutex_unlock(&vm->mutex);
++}
++
++/**
++ * amdgpu_vm_bo_invalidate - mark the bo as invalid
++ *
++ * @adev: amdgpu_device pointer
++ * @vm: requested vm
++ * @bo: amdgpu buffer object
++ *
++ * Mark @bo as invalid (cayman+).
++ */
++void amdgpu_vm_bo_invalidate(struct amdgpu_device *adev,
++ struct amdgpu_bo *bo)
++{
++ struct amdgpu_bo_va *bo_va;
++
++ list_for_each_entry(bo_va, &bo->va, bo_list) {
++ if (bo_va->addr) {
++ spin_lock(&bo_va->vm->status_lock);
++ list_del(&bo_va->vm_status);
++ list_add(&bo_va->vm_status, &bo_va->vm->invalidated);
++ spin_unlock(&bo_va->vm->status_lock);
++ }
++ }
++}
++
++/**
++ * amdgpu_vm_init - initialize a vm instance
++ *
++ * @adev: amdgpu_device pointer
++ * @vm: requested vm
++ *
++ * Init @vm fields (cayman+).
++ */
++int amdgpu_vm_init(struct amdgpu_device *adev, struct amdgpu_vm *vm)
++{
++ const unsigned align = min(AMDGPU_VM_PTB_ALIGN_SIZE,
++ AMDGPU_VM_PTE_COUNT * 8);
++ unsigned pd_size, pd_entries, pts_size;
++ int i, r;
++
++ for (i = 0; i < AMDGPU_MAX_RINGS; ++i) {
++ vm->ids[i].id = 0;
++ vm->ids[i].flushed_updates = NULL;
++ vm->ids[i].last_id_use = NULL;
++ }
++ mutex_init(&vm->mutex);
++ vm->va = RB_ROOT;
++ spin_lock_init(&vm->status_lock);
++ INIT_LIST_HEAD(&vm->invalidated);
++ INIT_LIST_HEAD(&vm->freed);
++
++ pd_size = amdgpu_vm_directory_size(adev);
++ pd_entries = amdgpu_vm_num_pdes(adev);
++
++ /* allocate page table array */
++ pts_size = pd_entries * sizeof(struct amdgpu_vm_pt);
++ vm->page_tables = kzalloc(pts_size, GFP_KERNEL);
++ if (vm->page_tables == NULL) {
++ DRM_ERROR("Cannot allocate memory for page table array\n");
++ return -ENOMEM;
++ }
++
++ r = amdgpu_bo_create(adev, pd_size, align, true,
++ AMDGPU_GEM_DOMAIN_VRAM, 0,
++ NULL, &vm->page_directory);
++ if (r)
++ return r;
++
++ r = amdgpu_vm_clear_bo(adev, vm->page_directory);
++ if (r) {
++ amdgpu_bo_unref(&vm->page_directory);
++ vm->page_directory = NULL;
++ return r;
++ }
++
++ return 0;
++}
++
++/**
++ * amdgpu_vm_fini - tear down a vm instance
++ *
++ * @adev: amdgpu_device pointer
++ * @vm: requested vm
++ *
++ * Tear down @vm (cayman+).
++ * Unbind the VM and remove all bos from the vm bo list
++ */
++void amdgpu_vm_fini(struct amdgpu_device *adev, struct amdgpu_vm *vm)
++{
++ struct amdgpu_bo_va_mapping *mapping, *tmp;
++ int i;
++
++ if (!RB_EMPTY_ROOT(&vm->va)) {
++ dev_err(adev->dev, "still active bo inside vm\n");
++ }
++ rbtree_postorder_for_each_entry_safe(mapping, tmp, &vm->va, it.rb) {
++ list_del(&mapping->list);
++ interval_tree_remove(&mapping->it, &vm->va);
++ kfree(mapping);
++ }
++ list_for_each_entry_safe(mapping, tmp, &vm->freed, list) {
++ list_del(&mapping->list);
++ kfree(mapping);
++ }
++
++ for (i = 0; i < amdgpu_vm_num_pdes(adev); i++)
++ amdgpu_bo_unref(&vm->page_tables[i].bo);
++ kfree(vm->page_tables);
++
++ amdgpu_bo_unref(&vm->page_directory);
++
++ for (i = 0; i < AMDGPU_MAX_RINGS; ++i) {
++ amdgpu_fence_unref(&vm->ids[i].flushed_updates);
++ amdgpu_fence_unref(&vm->ids[i].last_id_use);
++ }
++
++ mutex_destroy(&vm->mutex);
++}
+diff --git a/drivers/gpu/drm/amd/amdgpu/atom-bits.h b/drivers/gpu/drm/amd/amdgpu/atom-bits.h
+new file mode 100644
+index 0000000..e8fae5c
+--- /dev/null
++++ b/drivers/gpu/drm/amd/amdgpu/atom-bits.h
+@@ -0,0 +1,48 @@
++/*
++ * Copyright 2008 Advanced Micro Devices, Inc.
++ *
++ * Permission is hereby granted, free of charge, to any person obtaining a
++ * copy of this software and associated documentation files (the "Software"),
++ * to deal in the Software without restriction, including without limitation
++ * the rights to use, copy, modify, merge, publish, distribute, sublicense,
++ * and/or sell copies of the Software, and to permit persons to whom the
++ * Software is furnished to do so, subject to the following conditions:
++ *
++ * The above copyright notice and this permission notice shall be included in
++ * all copies or substantial portions of the Software.
++ *
++ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
++ * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
++ * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
++ * THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR
++ * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE,
++ * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
++ * OTHER DEALINGS IN THE SOFTWARE.
++ *
++ * Author: Stanislaw Skowronek
++ */
++
++#ifndef ATOM_BITS_H
++#define ATOM_BITS_H
++
++static inline uint8_t get_u8(void *bios, int ptr)
++{
++ return ((unsigned char *)bios)[ptr];
++}
++#define U8(ptr) get_u8(ctx->ctx->bios, (ptr))
++#define CU8(ptr) get_u8(ctx->bios, (ptr))
++static inline uint16_t get_u16(void *bios, int ptr)
++{
++ return get_u8(bios ,ptr)|(((uint16_t)get_u8(bios, ptr+1))<<8);
++}
++#define U16(ptr) get_u16(ctx->ctx->bios, (ptr))
++#define CU16(ptr) get_u16(ctx->bios, (ptr))
++static inline uint32_t get_u32(void *bios, int ptr)
++{
++ return get_u16(bios, ptr)|(((uint32_t)get_u16(bios, ptr+2))<<16);
++}
++#define U32(ptr) get_u32(ctx->ctx->bios, (ptr))
++#define CU32(ptr) get_u32(ctx->bios, (ptr))
++#define CSTR(ptr) (((char *)(ctx->bios))+(ptr))
++
++#endif
+diff --git a/drivers/gpu/drm/amd/amdgpu/atom-names.h b/drivers/gpu/drm/amd/amdgpu/atom-names.h
+new file mode 100644
+index 0000000..6f907a5
+--- /dev/null
++++ b/drivers/gpu/drm/amd/amdgpu/atom-names.h
+@@ -0,0 +1,100 @@
++/*
++ * Copyright 2008 Advanced Micro Devices, Inc.
++ *
++ * Permission is hereby granted, free of charge, to any person obtaining a
++ * copy of this software and associated documentation files (the "Software"),
++ * to deal in the Software without restriction, including without limitation
++ * the rights to use, copy, modify, merge, publish, distribute, sublicense,
++ * and/or sell copies of the Software, and to permit persons to whom the
++ * Software is furnished to do so, subject to the following conditions:
++ *
++ * The above copyright notice and this permission notice shall be included in
++ * all copies or substantial portions of the Software.
++ *
++ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
++ * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
++ * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
++ * THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR
++ * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE,
++ * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
++ * OTHER DEALINGS IN THE SOFTWARE.
++ *
++ * Author: Stanislaw Skowronek
++ */
++
++#ifndef ATOM_NAMES_H
++#define ATOM_NAMES_H
++
++#include "atom.h"
++
++#ifdef ATOM_DEBUG
++
++#define ATOM_OP_NAMES_CNT 123
++static char *atom_op_names[ATOM_OP_NAMES_CNT] = {
++"RESERVED", "MOVE_REG", "MOVE_PS", "MOVE_WS", "MOVE_FB", "MOVE_PLL",
++"MOVE_MC", "AND_REG", "AND_PS", "AND_WS", "AND_FB", "AND_PLL", "AND_MC",
++"OR_REG", "OR_PS", "OR_WS", "OR_FB", "OR_PLL", "OR_MC", "SHIFT_LEFT_REG",
++"SHIFT_LEFT_PS", "SHIFT_LEFT_WS", "SHIFT_LEFT_FB", "SHIFT_LEFT_PLL",
++"SHIFT_LEFT_MC", "SHIFT_RIGHT_REG", "SHIFT_RIGHT_PS", "SHIFT_RIGHT_WS",
++"SHIFT_RIGHT_FB", "SHIFT_RIGHT_PLL", "SHIFT_RIGHT_MC", "MUL_REG",
++"MUL_PS", "MUL_WS", "MUL_FB", "MUL_PLL", "MUL_MC", "DIV_REG", "DIV_PS",
++"DIV_WS", "DIV_FB", "DIV_PLL", "DIV_MC", "ADD_REG", "ADD_PS", "ADD_WS",
++"ADD_FB", "ADD_PLL", "ADD_MC", "SUB_REG", "SUB_PS", "SUB_WS", "SUB_FB",
++"SUB_PLL", "SUB_MC", "SET_ATI_PORT", "SET_PCI_PORT", "SET_SYS_IO_PORT",
++"SET_REG_BLOCK", "SET_FB_BASE", "COMPARE_REG", "COMPARE_PS",
++"COMPARE_WS", "COMPARE_FB", "COMPARE_PLL", "COMPARE_MC", "SWITCH",
++"JUMP", "JUMP_EQUAL", "JUMP_BELOW", "JUMP_ABOVE", "JUMP_BELOW_OR_EQUAL",
++"JUMP_ABOVE_OR_EQUAL", "JUMP_NOT_EQUAL", "TEST_REG", "TEST_PS", "TEST_WS",
++"TEST_FB", "TEST_PLL", "TEST_MC", "DELAY_MILLISEC", "DELAY_MICROSEC",
++"CALL_TABLE", "REPEAT", "CLEAR_REG", "CLEAR_PS", "CLEAR_WS", "CLEAR_FB",
++"CLEAR_PLL", "CLEAR_MC", "NOP", "EOT", "MASK_REG", "MASK_PS", "MASK_WS",
++"MASK_FB", "MASK_PLL", "MASK_MC", "POST_CARD", "BEEP", "SAVE_REG",
++"RESTORE_REG", "SET_DATA_BLOCK", "XOR_REG", "XOR_PS", "XOR_WS", "XOR_FB",
++"XOR_PLL", "XOR_MC", "SHL_REG", "SHL_PS", "SHL_WS", "SHL_FB", "SHL_PLL",
++"SHL_MC", "SHR_REG", "SHR_PS", "SHR_WS", "SHR_FB", "SHR_PLL", "SHR_MC",
++"DEBUG", "CTB_DS",
++};
++
++#define ATOM_TABLE_NAMES_CNT 74
++static char *atom_table_names[ATOM_TABLE_NAMES_CNT] = {
++"ASIC_Init", "GetDisplaySurfaceSize", "ASIC_RegistersInit",
++"VRAM_BlockVenderDetection", "SetClocksRatio", "MemoryControllerInit",
++"GPIO_PinInit", "MemoryParamAdjust", "DVOEncoderControl",
++"GPIOPinControl", "SetEngineClock", "SetMemoryClock", "SetPixelClock",
++"DynamicClockGating", "ResetMemoryDLL", "ResetMemoryDevice",
++"MemoryPLLInit", "EnableMemorySelfRefresh", "AdjustMemoryController",
++"EnableASIC_StaticPwrMgt", "ASIC_StaticPwrMgtStatusChange",
++"DAC_LoadDetection", "TMDS2EncoderControl", "LCD1OutputControl",
++"DAC1EncoderControl", "DAC2EncoderControl", "DVOOutputControl",
++"CV1OutputControl", "SetCRTC_DPM_State", "TVEncoderControl",
++"TMDS1EncoderControl", "LVDSEncoderControl", "TV1OutputControl",
++"EnableScaler", "BlankCRTC", "EnableCRTC", "GetPixelClock",
++"EnableVGA_Render", "EnableVGA_Access", "SetCRTC_Timing",
++"SetCRTC_OverScan", "SetCRTC_Replication", "SelectCRTC_Source",
++"EnableGraphSurfaces", "UpdateCRTC_DoubleBufferRegisters",
++"LUT_AutoFill", "EnableHW_IconCursor", "GetMemoryClock",
++"GetEngineClock", "SetCRTC_UsingDTDTiming", "TVBootUpStdPinDetection",
++"DFP2OutputControl", "VRAM_BlockDetectionByStrap", "MemoryCleanUp",
++"ReadEDIDFromHWAssistedI2C", "WriteOneByteToHWAssistedI2C",
++"ReadHWAssistedI2CStatus", "SpeedFanControl", "PowerConnectorDetection",
++"MC_Synchronization", "ComputeMemoryEnginePLL", "MemoryRefreshConversion",
++"VRAM_GetCurrentInfoBlock", "DynamicMemorySettings", "MemoryTraining",
++"EnableLVDS_SS", "DFP1OutputControl", "SetVoltage", "CRT1OutputControl",
++"CRT2OutputControl", "SetupHWAssistedI2CStatus", "ClockSource",
++"MemoryDeviceInit", "EnableYUV",
++};
++
++#define ATOM_IO_NAMES_CNT 5
++static char *atom_io_names[ATOM_IO_NAMES_CNT] = {
++"MM", "PLL", "MC", "PCIE", "PCIE PORT",
++};
++
++#else
++
++#define ATOM_OP_NAMES_CNT 0
++#define ATOM_TABLE_NAMES_CNT 0
++#define ATOM_IO_NAMES_CNT 0
++
++#endif
++
++#endif
+diff --git a/drivers/gpu/drm/amd/amdgpu/atom-types.h b/drivers/gpu/drm/amd/amdgpu/atom-types.h
+new file mode 100644
+index 0000000..1125b86
+--- /dev/null
++++ b/drivers/gpu/drm/amd/amdgpu/atom-types.h
+@@ -0,0 +1,42 @@
++/*
++ * Copyright 2008 Red Hat Inc.
++ *
++ * Permission is hereby granted, free of charge, to any person obtaining a
++ * copy of this software and associated documentation files (the "Software"),
++ * to deal in the Software without restriction, including without limitation
++ * the rights to use, copy, modify, merge, publish, distribute, sublicense,
++ * and/or sell copies of the Software, and to permit persons to whom the
++ * Software is furnished to do so, subject to the following conditions:
++ *
++ * The above copyright notice and this permission notice shall be included in
++ * all copies or substantial portions of the Software.
++ *
++ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
++ * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
++ * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
++ * THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR
++ * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE,
++ * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
++ * OTHER DEALINGS IN THE SOFTWARE.
++ *
++ * Author: Dave Airlie
++ */
++
++#ifndef ATOM_TYPES_H
++#define ATOM_TYPES_H
++
++/* sync atom types to kernel types */
++
++typedef uint16_t USHORT;
++typedef uint32_t ULONG;
++typedef uint8_t UCHAR;
++
++
++#ifndef ATOM_BIG_ENDIAN
++#if defined(__BIG_ENDIAN)
++#define ATOM_BIG_ENDIAN 1
++#else
++#define ATOM_BIG_ENDIAN 0
++#endif
++#endif
++#endif
+diff --git a/drivers/gpu/drm/amd/amdgpu/atom.c b/drivers/gpu/drm/amd/amdgpu/atom.c
+new file mode 100644
+index 0000000..a0346a9
+--- /dev/null
++++ b/drivers/gpu/drm/amd/amdgpu/atom.c
+@@ -0,0 +1,1408 @@
++/*
++ * Copyright 2008 Advanced Micro Devices, Inc.
++ *
++ * Permission is hereby granted, free of charge, to any person obtaining a
++ * copy of this software and associated documentation files (the "Software"),
++ * to deal in the Software without restriction, including without limitation
++ * the rights to use, copy, modify, merge, publish, distribute, sublicense,
++ * and/or sell copies of the Software, and to permit persons to whom the
++ * Software is furnished to do so, subject to the following conditions:
++ *
++ * The above copyright notice and this permission notice shall be included in
++ * all copies or substantial portions of the Software.
++ *
++ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
++ * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
++ * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
++ * THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR
++ * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE,
++ * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
++ * OTHER DEALINGS IN THE SOFTWARE.
++ *
++ * Author: Stanislaw Skowronek
++ */
++
++#include <linux/module.h>
++#include <linux/sched.h>
++#include <linux/slab.h>
++#include <asm/unaligned.h>
++
++#define ATOM_DEBUG
++
++#include "atom.h"
++#include "atom-names.h"
++#include "atom-bits.h"
++#include "amdgpu.h"
++
++#define ATOM_COND_ABOVE 0
++#define ATOM_COND_ABOVEOREQUAL 1
++#define ATOM_COND_ALWAYS 2
++#define ATOM_COND_BELOW 3
++#define ATOM_COND_BELOWOREQUAL 4
++#define ATOM_COND_EQUAL 5
++#define ATOM_COND_NOTEQUAL 6
++
++#define ATOM_PORT_ATI 0
++#define ATOM_PORT_PCI 1
++#define ATOM_PORT_SYSIO 2
++
++#define ATOM_UNIT_MICROSEC 0
++#define ATOM_UNIT_MILLISEC 1
++
++#define PLL_INDEX 2
++#define PLL_DATA 3
++
++typedef struct {
++ struct atom_context *ctx;
++ uint32_t *ps, *ws;
++ int ps_shift;
++ uint16_t start;
++ unsigned last_jump;
++ unsigned long last_jump_jiffies;
++ bool abort;
++} atom_exec_context;
++
++int amdgpu_atom_debug = 0;
++static int amdgpu_atom_execute_table_locked(struct atom_context *ctx, int index, uint32_t * params);
++int amdgpu_atom_execute_table(struct atom_context *ctx, int index, uint32_t * params);
++
++static uint32_t atom_arg_mask[8] =
++ { 0xFFFFFFFF, 0xFFFF, 0xFFFF00, 0xFFFF0000, 0xFF, 0xFF00, 0xFF0000,
++0xFF000000 };
++static int atom_arg_shift[8] = { 0, 0, 8, 16, 0, 8, 16, 24 };
++
++static int atom_dst_to_src[8][4] = {
++ /* translate destination alignment field to the source alignment encoding */
++ {0, 0, 0, 0},
++ {1, 2, 3, 0},
++ {1, 2, 3, 0},
++ {1, 2, 3, 0},
++ {4, 5, 6, 7},
++ {4, 5, 6, 7},
++ {4, 5, 6, 7},
++ {4, 5, 6, 7},
++};
++static int atom_def_dst[8] = { 0, 0, 1, 2, 0, 1, 2, 3 };
++
++static int debug_depth = 0;
++#ifdef ATOM_DEBUG
++static void debug_print_spaces(int n)
++{
++ while (n--)
++ printk(" ");
++}
++
++#define DEBUG(...) do if (amdgpu_atom_debug) { printk(KERN_DEBUG __VA_ARGS__); } while (0)
++#define SDEBUG(...) do if (amdgpu_atom_debug) { printk(KERN_DEBUG); debug_print_spaces(debug_depth); printk(__VA_ARGS__); } while (0)
++#else
++#define DEBUG(...) do { } while (0)
++#define SDEBUG(...) do { } while (0)
++#endif
++
++static uint32_t atom_iio_execute(struct atom_context *ctx, int base,
++ uint32_t index, uint32_t data)
++{
++ uint32_t temp = 0xCDCDCDCD;
++
++ while (1)
++ switch (CU8(base)) {
++ case ATOM_IIO_NOP:
++ base++;
++ break;
++ case ATOM_IIO_READ:
++ temp = ctx->card->ioreg_read(ctx->card, CU16(base + 1));
++ base += 3;
++ break;
++ case ATOM_IIO_WRITE:
++ ctx->card->ioreg_write(ctx->card, CU16(base + 1), temp);
++ base += 3;
++ break;
++ case ATOM_IIO_CLEAR:
++ temp &=
++ ~((0xFFFFFFFF >> (32 - CU8(base + 1))) <<
++ CU8(base + 2));
++ base += 3;
++ break;
++ case ATOM_IIO_SET:
++ temp |=
++ (0xFFFFFFFF >> (32 - CU8(base + 1))) << CU8(base +
++ 2);
++ base += 3;
++ break;
++ case ATOM_IIO_MOVE_INDEX:
++ temp &=
++ ~((0xFFFFFFFF >> (32 - CU8(base + 1))) <<
++ CU8(base + 3));
++ temp |=
++ ((index >> CU8(base + 2)) &
++ (0xFFFFFFFF >> (32 - CU8(base + 1)))) << CU8(base +
++ 3);
++ base += 4;
++ break;
++ case ATOM_IIO_MOVE_DATA:
++ temp &=
++ ~((0xFFFFFFFF >> (32 - CU8(base + 1))) <<
++ CU8(base + 3));
++ temp |=
++ ((data >> CU8(base + 2)) &
++ (0xFFFFFFFF >> (32 - CU8(base + 1)))) << CU8(base +
++ 3);
++ base += 4;
++ break;
++ case ATOM_IIO_MOVE_ATTR:
++ temp &=
++ ~((0xFFFFFFFF >> (32 - CU8(base + 1))) <<
++ CU8(base + 3));
++ temp |=
++ ((ctx->
++ io_attr >> CU8(base + 2)) & (0xFFFFFFFF >> (32 -
++ CU8
++ (base
++ +
++ 1))))
++ << CU8(base + 3);
++ base += 4;
++ break;
++ case ATOM_IIO_END:
++ return temp;
++ default:
++ printk(KERN_INFO "Unknown IIO opcode.\n");
++ return 0;
++ }
++}
++
++static uint32_t atom_get_src_int(atom_exec_context *ctx, uint8_t attr,
++ int *ptr, uint32_t *saved, int print)
++{
++ uint32_t idx, val = 0xCDCDCDCD, align, arg;
++ struct atom_context *gctx = ctx->ctx;
++ arg = attr & 7;
++ align = (attr >> 3) & 7;
++ switch (arg) {
++ case ATOM_ARG_REG:
++ idx = U16(*ptr);
++ (*ptr) += 2;
++ if (print)
++ DEBUG("REG[0x%04X]", idx);
++ idx += gctx->reg_block;
++ switch (gctx->io_mode) {
++ case ATOM_IO_MM:
++ val = gctx->card->reg_read(gctx->card, idx);
++ break;
++ case ATOM_IO_PCI:
++ printk(KERN_INFO
++ "PCI registers are not implemented.\n");
++ return 0;
++ case ATOM_IO_SYSIO:
++ printk(KERN_INFO
++ "SYSIO registers are not implemented.\n");
++ return 0;
++ default:
++ if (!(gctx->io_mode & 0x80)) {
++ printk(KERN_INFO "Bad IO mode.\n");
++ return 0;
++ }
++ if (!gctx->iio[gctx->io_mode & 0x7F]) {
++ printk(KERN_INFO
++ "Undefined indirect IO read method %d.\n",
++ gctx->io_mode & 0x7F);
++ return 0;
++ }
++ val =
++ atom_iio_execute(gctx,
++ gctx->iio[gctx->io_mode & 0x7F],
++ idx, 0);
++ }
++ break;
++ case ATOM_ARG_PS:
++ idx = U8(*ptr);
++ (*ptr)++;
++ /* get_unaligned_le32 avoids unaligned accesses from atombios
++ * tables, noticed on a DEC Alpha. */
++ val = get_unaligned_le32((u32 *)&ctx->ps[idx]);
++ if (print)
++ DEBUG("PS[0x%02X,0x%04X]", idx, val);
++ break;
++ case ATOM_ARG_WS:
++ idx = U8(*ptr);
++ (*ptr)++;
++ if (print)
++ DEBUG("WS[0x%02X]", idx);
++ switch (idx) {
++ case ATOM_WS_QUOTIENT:
++ val = gctx->divmul[0];
++ break;
++ case ATOM_WS_REMAINDER:
++ val = gctx->divmul[1];
++ break;
++ case ATOM_WS_DATAPTR:
++ val = gctx->data_block;
++ break;
++ case ATOM_WS_SHIFT:
++ val = gctx->shift;
++ break;
++ case ATOM_WS_OR_MASK:
++ val = 1 << gctx->shift;
++ break;
++ case ATOM_WS_AND_MASK:
++ val = ~(1 << gctx->shift);
++ break;
++ case ATOM_WS_FB_WINDOW:
++ val = gctx->fb_base;
++ break;
++ case ATOM_WS_ATTRIBUTES:
++ val = gctx->io_attr;
++ break;
++ case ATOM_WS_REGPTR:
++ val = gctx->reg_block;
++ break;
++ default:
++ val = ctx->ws[idx];
++ }
++ break;
++ case ATOM_ARG_ID:
++ idx = U16(*ptr);
++ (*ptr) += 2;
++ if (print) {
++ if (gctx->data_block)
++ DEBUG("ID[0x%04X+%04X]", idx, gctx->data_block);
++ else
++ DEBUG("ID[0x%04X]", idx);
++ }
++ val = U32(idx + gctx->data_block);
++ break;
++ case ATOM_ARG_FB:
++ idx = U8(*ptr);
++ (*ptr)++;
++ if ((gctx->fb_base + (idx * 4)) > gctx->scratch_size_bytes) {
++ DRM_ERROR("ATOM: fb read beyond scratch region: %d vs. %d\n",
++ gctx->fb_base + (idx * 4), gctx->scratch_size_bytes);
++ val = 0;
++ } else
++ val = gctx->scratch[(gctx->fb_base / 4) + idx];
++ if (print)
++ DEBUG("FB[0x%02X]", idx);
++ break;
++ case ATOM_ARG_IMM:
++ switch (align) {
++ case ATOM_SRC_DWORD:
++ val = U32(*ptr);
++ (*ptr) += 4;
++ if (print)
++ DEBUG("IMM 0x%08X\n", val);
++ return val;
++ case ATOM_SRC_WORD0:
++ case ATOM_SRC_WORD8:
++ case ATOM_SRC_WORD16:
++ val = U16(*ptr);
++ (*ptr) += 2;
++ if (print)
++ DEBUG("IMM 0x%04X\n", val);
++ return val;
++ case ATOM_SRC_BYTE0:
++ case ATOM_SRC_BYTE8:
++ case ATOM_SRC_BYTE16:
++ case ATOM_SRC_BYTE24:
++ val = U8(*ptr);
++ (*ptr)++;
++ if (print)
++ DEBUG("IMM 0x%02X\n", val);
++ return val;
++ }
++ return 0;
++ case ATOM_ARG_PLL:
++ idx = U8(*ptr);
++ (*ptr)++;
++ if (print)
++ DEBUG("PLL[0x%02X]", idx);
++ val = gctx->card->pll_read(gctx->card, idx);
++ break;
++ case ATOM_ARG_MC:
++ idx = U8(*ptr);
++ (*ptr)++;
++ if (print)
++ DEBUG("MC[0x%02X]", idx);
++ val = gctx->card->mc_read(gctx->card, idx);
++ break;
++ }
++ if (saved)
++ *saved = val;
++ val &= atom_arg_mask[align];
++ val >>= atom_arg_shift[align];
++ if (print)
++ switch (align) {
++ case ATOM_SRC_DWORD:
++ DEBUG(".[31:0] -> 0x%08X\n", val);
++ break;
++ case ATOM_SRC_WORD0:
++ DEBUG(".[15:0] -> 0x%04X\n", val);
++ break;
++ case ATOM_SRC_WORD8:
++ DEBUG(".[23:8] -> 0x%04X\n", val);
++ break;
++ case ATOM_SRC_WORD16:
++ DEBUG(".[31:16] -> 0x%04X\n", val);
++ break;
++ case ATOM_SRC_BYTE0:
++ DEBUG(".[7:0] -> 0x%02X\n", val);
++ break;
++ case ATOM_SRC_BYTE8:
++ DEBUG(".[15:8] -> 0x%02X\n", val);
++ break;
++ case ATOM_SRC_BYTE16:
++ DEBUG(".[23:16] -> 0x%02X\n", val);
++ break;
++ case ATOM_SRC_BYTE24:
++ DEBUG(".[31:24] -> 0x%02X\n", val);
++ break;
++ }
++ return val;
++}
++
++static void atom_skip_src_int(atom_exec_context *ctx, uint8_t attr, int *ptr)
++{
++ uint32_t align = (attr >> 3) & 7, arg = attr & 7;
++ switch (arg) {
++ case ATOM_ARG_REG:
++ case ATOM_ARG_ID:
++ (*ptr) += 2;
++ break;
++ case ATOM_ARG_PLL:
++ case ATOM_ARG_MC:
++ case ATOM_ARG_PS:
++ case ATOM_ARG_WS:
++ case ATOM_ARG_FB:
++ (*ptr)++;
++ break;
++ case ATOM_ARG_IMM:
++ switch (align) {
++ case ATOM_SRC_DWORD:
++ (*ptr) += 4;
++ return;
++ case ATOM_SRC_WORD0:
++ case ATOM_SRC_WORD8:
++ case ATOM_SRC_WORD16:
++ (*ptr) += 2;
++ return;
++ case ATOM_SRC_BYTE0:
++ case ATOM_SRC_BYTE8:
++ case ATOM_SRC_BYTE16:
++ case ATOM_SRC_BYTE24:
++ (*ptr)++;
++ return;
++ }
++ return;
++ }
++}
++
++static uint32_t atom_get_src(atom_exec_context *ctx, uint8_t attr, int *ptr)
++{
++ return atom_get_src_int(ctx, attr, ptr, NULL, 1);
++}
++
++static uint32_t atom_get_src_direct(atom_exec_context *ctx, uint8_t align, int *ptr)
++{
++ uint32_t val = 0xCDCDCDCD;
++
++ switch (align) {
++ case ATOM_SRC_DWORD:
++ val = U32(*ptr);
++ (*ptr) += 4;
++ break;
++ case ATOM_SRC_WORD0:
++ case ATOM_SRC_WORD8:
++ case ATOM_SRC_WORD16:
++ val = U16(*ptr);
++ (*ptr) += 2;
++ break;
++ case ATOM_SRC_BYTE0:
++ case ATOM_SRC_BYTE8:
++ case ATOM_SRC_BYTE16:
++ case ATOM_SRC_BYTE24:
++ val = U8(*ptr);
++ (*ptr)++;
++ break;
++ }
++ return val;
++}
++
++static uint32_t atom_get_dst(atom_exec_context *ctx, int arg, uint8_t attr,
++ int *ptr, uint32_t *saved, int print)
++{
++ return atom_get_src_int(ctx,
++ arg | atom_dst_to_src[(attr >> 3) &
++ 7][(attr >> 6) & 3] << 3,
++ ptr, saved, print);
++}
++
++static void atom_skip_dst(atom_exec_context *ctx, int arg, uint8_t attr, int *ptr)
++{
++ atom_skip_src_int(ctx,
++ arg | atom_dst_to_src[(attr >> 3) & 7][(attr >> 6) &
++ 3] << 3, ptr);
++}
++
++static void atom_put_dst(atom_exec_context *ctx, int arg, uint8_t attr,
++ int *ptr, uint32_t val, uint32_t saved)
++{
++ uint32_t align =
++ atom_dst_to_src[(attr >> 3) & 7][(attr >> 6) & 3], old_val =
++ val, idx;
++ struct atom_context *gctx = ctx->ctx;
++ old_val &= atom_arg_mask[align] >> atom_arg_shift[align];
++ val <<= atom_arg_shift[align];
++ val &= atom_arg_mask[align];
++ saved &= ~atom_arg_mask[align];
++ val |= saved;
++ switch (arg) {
++ case ATOM_ARG_REG:
++ idx = U16(*ptr);
++ (*ptr) += 2;
++ DEBUG("REG[0x%04X]", idx);
++ idx += gctx->reg_block;
++ switch (gctx->io_mode) {
++ case ATOM_IO_MM:
++ if (idx == 0)
++ gctx->card->reg_write(gctx->card, idx,
++ val << 2);
++ else
++ gctx->card->reg_write(gctx->card, idx, val);
++ break;
++ case ATOM_IO_PCI:
++ printk(KERN_INFO
++ "PCI registers are not implemented.\n");
++ return;
++ case ATOM_IO_SYSIO:
++ printk(KERN_INFO
++ "SYSIO registers are not implemented.\n");
++ return;
++ default:
++ if (!(gctx->io_mode & 0x80)) {
++ printk(KERN_INFO "Bad IO mode.\n");
++ return;
++ }
++ if (!gctx->iio[gctx->io_mode & 0xFF]) {
++ printk(KERN_INFO
++ "Undefined indirect IO write method %d.\n",
++ gctx->io_mode & 0x7F);
++ return;
++ }
++ atom_iio_execute(gctx, gctx->iio[gctx->io_mode & 0xFF],
++ idx, val);
++ }
++ break;
++ case ATOM_ARG_PS:
++ idx = U8(*ptr);
++ (*ptr)++;
++ DEBUG("PS[0x%02X]", idx);
++ ctx->ps[idx] = cpu_to_le32(val);
++ break;
++ case ATOM_ARG_WS:
++ idx = U8(*ptr);
++ (*ptr)++;
++ DEBUG("WS[0x%02X]", idx);
++ switch (idx) {
++ case ATOM_WS_QUOTIENT:
++ gctx->divmul[0] = val;
++ break;
++ case ATOM_WS_REMAINDER:
++ gctx->divmul[1] = val;
++ break;
++ case ATOM_WS_DATAPTR:
++ gctx->data_block = val;
++ break;
++ case ATOM_WS_SHIFT:
++ gctx->shift = val;
++ break;
++ case ATOM_WS_OR_MASK:
++ case ATOM_WS_AND_MASK:
++ break;
++ case ATOM_WS_FB_WINDOW:
++ gctx->fb_base = val;
++ break;
++ case ATOM_WS_ATTRIBUTES:
++ gctx->io_attr = val;
++ break;
++ case ATOM_WS_REGPTR:
++ gctx->reg_block = val;
++ break;
++ default:
++ ctx->ws[idx] = val;
++ }
++ break;
++ case ATOM_ARG_FB:
++ idx = U8(*ptr);
++ (*ptr)++;
++ if ((gctx->fb_base + (idx * 4)) > gctx->scratch_size_bytes) {
++ DRM_ERROR("ATOM: fb write beyond scratch region: %d vs. %d\n",
++ gctx->fb_base + (idx * 4), gctx->scratch_size_bytes);
++ } else
++ gctx->scratch[(gctx->fb_base / 4) + idx] = val;
++ DEBUG("FB[0x%02X]", idx);
++ break;
++ case ATOM_ARG_PLL:
++ idx = U8(*ptr);
++ (*ptr)++;
++ DEBUG("PLL[0x%02X]", idx);
++ gctx->card->pll_write(gctx->card, idx, val);
++ break;
++ case ATOM_ARG_MC:
++ idx = U8(*ptr);
++ (*ptr)++;
++ DEBUG("MC[0x%02X]", idx);
++ gctx->card->mc_write(gctx->card, idx, val);
++ return;
++ }
++ switch (align) {
++ case ATOM_SRC_DWORD:
++ DEBUG(".[31:0] <- 0x%08X\n", old_val);
++ break;
++ case ATOM_SRC_WORD0:
++ DEBUG(".[15:0] <- 0x%04X\n", old_val);
++ break;
++ case ATOM_SRC_WORD8:
++ DEBUG(".[23:8] <- 0x%04X\n", old_val);
++ break;
++ case ATOM_SRC_WORD16:
++ DEBUG(".[31:16] <- 0x%04X\n", old_val);
++ break;
++ case ATOM_SRC_BYTE0:
++ DEBUG(".[7:0] <- 0x%02X\n", old_val);
++ break;
++ case ATOM_SRC_BYTE8:
++ DEBUG(".[15:8] <- 0x%02X\n", old_val);
++ break;
++ case ATOM_SRC_BYTE16:
++ DEBUG(".[23:16] <- 0x%02X\n", old_val);
++ break;
++ case ATOM_SRC_BYTE24:
++ DEBUG(".[31:24] <- 0x%02X\n", old_val);
++ break;
++ }
++}
++
++static void atom_op_add(atom_exec_context *ctx, int *ptr, int arg)
++{
++ uint8_t attr = U8((*ptr)++);
++ uint32_t dst, src, saved;
++ int dptr = *ptr;
++ SDEBUG(" dst: ");
++ dst = atom_get_dst(ctx, arg, attr, ptr, &saved, 1);
++ SDEBUG(" src: ");
++ src = atom_get_src(ctx, attr, ptr);
++ dst += src;
++ SDEBUG(" dst: ");
++ atom_put_dst(ctx, arg, attr, &dptr, dst, saved);
++}
++
++static void atom_op_and(atom_exec_context *ctx, int *ptr, int arg)
++{
++ uint8_t attr = U8((*ptr)++);
++ uint32_t dst, src, saved;
++ int dptr = *ptr;
++ SDEBUG(" dst: ");
++ dst = atom_get_dst(ctx, arg, attr, ptr, &saved, 1);
++ SDEBUG(" src: ");
++ src = atom_get_src(ctx, attr, ptr);
++ dst &= src;
++ SDEBUG(" dst: ");
++ atom_put_dst(ctx, arg, attr, &dptr, dst, saved);
++}
++
++static void atom_op_beep(atom_exec_context *ctx, int *ptr, int arg)
++{
++ printk("ATOM BIOS beeped!\n");
++}
++
++static void atom_op_calltable(atom_exec_context *ctx, int *ptr, int arg)
++{
++ int idx = U8((*ptr)++);
++ int r = 0;
++
++ if (idx < ATOM_TABLE_NAMES_CNT)
++ SDEBUG(" table: %d (%s)\n", idx, atom_table_names[idx]);
++ else
++ SDEBUG(" table: %d\n", idx);
++ if (U16(ctx->ctx->cmd_table + 4 + 2 * idx))
++ r = amdgpu_atom_execute_table_locked(ctx->ctx, idx, ctx->ps + ctx->ps_shift);
++ if (r) {
++ ctx->abort = true;
++ }
++}
++
++static void atom_op_clear(atom_exec_context *ctx, int *ptr, int arg)
++{
++ uint8_t attr = U8((*ptr)++);
++ uint32_t saved;
++ int dptr = *ptr;
++ attr &= 0x38;
++ attr |= atom_def_dst[attr >> 3] << 6;
++ atom_get_dst(ctx, arg, attr, ptr, &saved, 0);
++ SDEBUG(" dst: ");
++ atom_put_dst(ctx, arg, attr, &dptr, 0, saved);
++}
++
++static void atom_op_compare(atom_exec_context *ctx, int *ptr, int arg)
++{
++ uint8_t attr = U8((*ptr)++);
++ uint32_t dst, src;
++ SDEBUG(" src1: ");
++ dst = atom_get_dst(ctx, arg, attr, ptr, NULL, 1);
++ SDEBUG(" src2: ");
++ src = atom_get_src(ctx, attr, ptr);
++ ctx->ctx->cs_equal = (dst == src);
++ ctx->ctx->cs_above = (dst > src);
++ SDEBUG(" result: %s %s\n", ctx->ctx->cs_equal ? "EQ" : "NE",
++ ctx->ctx->cs_above ? "GT" : "LE");
++}
++
++static void atom_op_delay(atom_exec_context *ctx, int *ptr, int arg)
++{
++ unsigned count = U8((*ptr)++);
++ SDEBUG(" count: %d\n", count);
++ if (arg == ATOM_UNIT_MICROSEC)
++ udelay(count);
++ else if (!drm_can_sleep())
++ mdelay(count);
++ else
++ msleep(count);
++}
++
++static void atom_op_div(atom_exec_context *ctx, int *ptr, int arg)
++{
++ uint8_t attr = U8((*ptr)++);
++ uint32_t dst, src;
++ SDEBUG(" src1: ");
++ dst = atom_get_dst(ctx, arg, attr, ptr, NULL, 1);
++ SDEBUG(" src2: ");
++ src = atom_get_src(ctx, attr, ptr);
++ if (src != 0) {
++ ctx->ctx->divmul[0] = dst / src;
++ ctx->ctx->divmul[1] = dst % src;
++ } else {
++ ctx->ctx->divmul[0] = 0;
++ ctx->ctx->divmul[1] = 0;
++ }
++}
++
++static void atom_op_eot(atom_exec_context *ctx, int *ptr, int arg)
++{
++ /* functionally, a nop */
++}
++
++static void atom_op_jump(atom_exec_context *ctx, int *ptr, int arg)
++{
++ int execute = 0, target = U16(*ptr);
++ unsigned long cjiffies;
++
++ (*ptr) += 2;
++ switch (arg) {
++ case ATOM_COND_ABOVE:
++ execute = ctx->ctx->cs_above;
++ break;
++ case ATOM_COND_ABOVEOREQUAL:
++ execute = ctx->ctx->cs_above || ctx->ctx->cs_equal;
++ break;
++ case ATOM_COND_ALWAYS:
++ execute = 1;
++ break;
++ case ATOM_COND_BELOW:
++ execute = !(ctx->ctx->cs_above || ctx->ctx->cs_equal);
++ break;
++ case ATOM_COND_BELOWOREQUAL:
++ execute = !ctx->ctx->cs_above;
++ break;
++ case ATOM_COND_EQUAL:
++ execute = ctx->ctx->cs_equal;
++ break;
++ case ATOM_COND_NOTEQUAL:
++ execute = !ctx->ctx->cs_equal;
++ break;
++ }
++ if (arg != ATOM_COND_ALWAYS)
++ SDEBUG(" taken: %s\n", execute ? "yes" : "no");
++ SDEBUG(" target: 0x%04X\n", target);
++ if (execute) {
++ if (ctx->last_jump == (ctx->start + target)) {
++ cjiffies = jiffies;
++ if (time_after(cjiffies, ctx->last_jump_jiffies)) {
++ cjiffies -= ctx->last_jump_jiffies;
++ if ((jiffies_to_msecs(cjiffies) > 5000)) {
++ DRM_ERROR("atombios stuck in loop for more than 5secs aborting\n");
++ ctx->abort = true;
++ }
++ } else {
++ /* jiffies wrap around we will just wait a little longer */
++ ctx->last_jump_jiffies = jiffies;
++ }
++ } else {
++ ctx->last_jump = ctx->start + target;
++ ctx->last_jump_jiffies = jiffies;
++ }
++ *ptr = ctx->start + target;
++ }
++}
++
++static void atom_op_mask(atom_exec_context *ctx, int *ptr, int arg)
++{
++ uint8_t attr = U8((*ptr)++);
++ uint32_t dst, mask, src, saved;
++ int dptr = *ptr;
++ SDEBUG(" dst: ");
++ dst = atom_get_dst(ctx, arg, attr, ptr, &saved, 1);
++ mask = atom_get_src_direct(ctx, ((attr >> 3) & 7), ptr);
++ SDEBUG(" mask: 0x%08x", mask);
++ SDEBUG(" src: ");
++ src = atom_get_src(ctx, attr, ptr);
++ dst &= mask;
++ dst |= src;
++ SDEBUG(" dst: ");
++ atom_put_dst(ctx, arg, attr, &dptr, dst, saved);
++}
++
++static void atom_op_move(atom_exec_context *ctx, int *ptr, int arg)
++{
++ uint8_t attr = U8((*ptr)++);
++ uint32_t src, saved;
++ int dptr = *ptr;
++ if (((attr >> 3) & 7) != ATOM_SRC_DWORD)
++ atom_get_dst(ctx, arg, attr, ptr, &saved, 0);
++ else {
++ atom_skip_dst(ctx, arg, attr, ptr);
++ saved = 0xCDCDCDCD;
++ }
++ SDEBUG(" src: ");
++ src = atom_get_src(ctx, attr, ptr);
++ SDEBUG(" dst: ");
++ atom_put_dst(ctx, arg, attr, &dptr, src, saved);
++}
++
++static void atom_op_mul(atom_exec_context *ctx, int *ptr, int arg)
++{
++ uint8_t attr = U8((*ptr)++);
++ uint32_t dst, src;
++ SDEBUG(" src1: ");
++ dst = atom_get_dst(ctx, arg, attr, ptr, NULL, 1);
++ SDEBUG(" src2: ");
++ src = atom_get_src(ctx, attr, ptr);
++ ctx->ctx->divmul[0] = dst * src;
++}
++
++static void atom_op_nop(atom_exec_context *ctx, int *ptr, int arg)
++{
++ /* nothing */
++}
++
++static void atom_op_or(atom_exec_context *ctx, int *ptr, int arg)
++{
++ uint8_t attr = U8((*ptr)++);
++ uint32_t dst, src, saved;
++ int dptr = *ptr;
++ SDEBUG(" dst: ");
++ dst = atom_get_dst(ctx, arg, attr, ptr, &saved, 1);
++ SDEBUG(" src: ");
++ src = atom_get_src(ctx, attr, ptr);
++ dst |= src;
++ SDEBUG(" dst: ");
++ atom_put_dst(ctx, arg, attr, &dptr, dst, saved);
++}
++
++static void atom_op_postcard(atom_exec_context *ctx, int *ptr, int arg)
++{
++ uint8_t val = U8((*ptr)++);
++ SDEBUG("POST card output: 0x%02X\n", val);
++}
++
++static void atom_op_repeat(atom_exec_context *ctx, int *ptr, int arg)
++{
++ printk(KERN_INFO "unimplemented!\n");
++}
++
++static void atom_op_restorereg(atom_exec_context *ctx, int *ptr, int arg)
++{
++ printk(KERN_INFO "unimplemented!\n");
++}
++
++static void atom_op_savereg(atom_exec_context *ctx, int *ptr, int arg)
++{
++ printk(KERN_INFO "unimplemented!\n");
++}
++
++static void atom_op_setdatablock(atom_exec_context *ctx, int *ptr, int arg)
++{
++ int idx = U8(*ptr);
++ (*ptr)++;
++ SDEBUG(" block: %d\n", idx);
++ if (!idx)
++ ctx->ctx->data_block = 0;
++ else if (idx == 255)
++ ctx->ctx->data_block = ctx->start;
++ else
++ ctx->ctx->data_block = U16(ctx->ctx->data_table + 4 + 2 * idx);
++ SDEBUG(" base: 0x%04X\n", ctx->ctx->data_block);
++}
++
++static void atom_op_setfbbase(atom_exec_context *ctx, int *ptr, int arg)
++{
++ uint8_t attr = U8((*ptr)++);
++ SDEBUG(" fb_base: ");
++ ctx->ctx->fb_base = atom_get_src(ctx, attr, ptr);
++}
++
++static void atom_op_setport(atom_exec_context *ctx, int *ptr, int arg)
++{
++ int port;
++ switch (arg) {
++ case ATOM_PORT_ATI:
++ port = U16(*ptr);
++ if (port < ATOM_IO_NAMES_CNT)
++ SDEBUG(" port: %d (%s)\n", port, atom_io_names[port]);
++ else
++ SDEBUG(" port: %d\n", port);
++ if (!port)
++ ctx->ctx->io_mode = ATOM_IO_MM;
++ else
++ ctx->ctx->io_mode = ATOM_IO_IIO | port;
++ (*ptr) += 2;
++ break;
++ case ATOM_PORT_PCI:
++ ctx->ctx->io_mode = ATOM_IO_PCI;
++ (*ptr)++;
++ break;
++ case ATOM_PORT_SYSIO:
++ ctx->ctx->io_mode = ATOM_IO_SYSIO;
++ (*ptr)++;
++ break;
++ }
++}
++
++static void atom_op_setregblock(atom_exec_context *ctx, int *ptr, int arg)
++{
++ ctx->ctx->reg_block = U16(*ptr);
++ (*ptr) += 2;
++ SDEBUG(" base: 0x%04X\n", ctx->ctx->reg_block);
++}
++
++static void atom_op_shift_left(atom_exec_context *ctx, int *ptr, int arg)
++{
++ uint8_t attr = U8((*ptr)++), shift;
++ uint32_t saved, dst;
++ int dptr = *ptr;
++ attr &= 0x38;
++ attr |= atom_def_dst[attr >> 3] << 6;
++ SDEBUG(" dst: ");
++ dst = atom_get_dst(ctx, arg, attr, ptr, &saved, 1);
++ shift = atom_get_src_direct(ctx, ATOM_SRC_BYTE0, ptr);
++ SDEBUG(" shift: %d\n", shift);
++ dst <<= shift;
++ SDEBUG(" dst: ");
++ atom_put_dst(ctx, arg, attr, &dptr, dst, saved);
++}
++
++static void atom_op_shift_right(atom_exec_context *ctx, int *ptr, int arg)
++{
++ uint8_t attr = U8((*ptr)++), shift;
++ uint32_t saved, dst;
++ int dptr = *ptr;
++ attr &= 0x38;
++ attr |= atom_def_dst[attr >> 3] << 6;
++ SDEBUG(" dst: ");
++ dst = atom_get_dst(ctx, arg, attr, ptr, &saved, 1);
++ shift = atom_get_src_direct(ctx, ATOM_SRC_BYTE0, ptr);
++ SDEBUG(" shift: %d\n", shift);
++ dst >>= shift;
++ SDEBUG(" dst: ");
++ atom_put_dst(ctx, arg, attr, &dptr, dst, saved);
++}
++
++static void atom_op_shl(atom_exec_context *ctx, int *ptr, int arg)
++{
++ uint8_t attr = U8((*ptr)++), shift;
++ uint32_t saved, dst;
++ int dptr = *ptr;
++ uint32_t dst_align = atom_dst_to_src[(attr >> 3) & 7][(attr >> 6) & 3];
++ SDEBUG(" dst: ");
++ dst = atom_get_dst(ctx, arg, attr, ptr, &saved, 1);
++ /* op needs to full dst value */
++ dst = saved;
++ shift = atom_get_src(ctx, attr, ptr);
++ SDEBUG(" shift: %d\n", shift);
++ dst <<= shift;
++ dst &= atom_arg_mask[dst_align];
++ dst >>= atom_arg_shift[dst_align];
++ SDEBUG(" dst: ");
++ atom_put_dst(ctx, arg, attr, &dptr, dst, saved);
++}
++
++static void atom_op_shr(atom_exec_context *ctx, int *ptr, int arg)
++{
++ uint8_t attr = U8((*ptr)++), shift;
++ uint32_t saved, dst;
++ int dptr = *ptr;
++ uint32_t dst_align = atom_dst_to_src[(attr >> 3) & 7][(attr >> 6) & 3];
++ SDEBUG(" dst: ");
++ dst = atom_get_dst(ctx, arg, attr, ptr, &saved, 1);
++ /* op needs to full dst value */
++ dst = saved;
++ shift = atom_get_src(ctx, attr, ptr);
++ SDEBUG(" shift: %d\n", shift);
++ dst >>= shift;
++ dst &= atom_arg_mask[dst_align];
++ dst >>= atom_arg_shift[dst_align];
++ SDEBUG(" dst: ");
++ atom_put_dst(ctx, arg, attr, &dptr, dst, saved);
++}
++
++static void atom_op_sub(atom_exec_context *ctx, int *ptr, int arg)
++{
++ uint8_t attr = U8((*ptr)++);
++ uint32_t dst, src, saved;
++ int dptr = *ptr;
++ SDEBUG(" dst: ");
++ dst = atom_get_dst(ctx, arg, attr, ptr, &saved, 1);
++ SDEBUG(" src: ");
++ src = atom_get_src(ctx, attr, ptr);
++ dst -= src;
++ SDEBUG(" dst: ");
++ atom_put_dst(ctx, arg, attr, &dptr, dst, saved);
++}
++
++static void atom_op_switch(atom_exec_context *ctx, int *ptr, int arg)
++{
++ uint8_t attr = U8((*ptr)++);
++ uint32_t src, val, target;
++ SDEBUG(" switch: ");
++ src = atom_get_src(ctx, attr, ptr);
++ while (U16(*ptr) != ATOM_CASE_END)
++ if (U8(*ptr) == ATOM_CASE_MAGIC) {
++ (*ptr)++;
++ SDEBUG(" case: ");
++ val =
++ atom_get_src(ctx, (attr & 0x38) | ATOM_ARG_IMM,
++ ptr);
++ target = U16(*ptr);
++ if (val == src) {
++ SDEBUG(" target: %04X\n", target);
++ *ptr = ctx->start + target;
++ return;
++ }
++ (*ptr) += 2;
++ } else {
++ printk(KERN_INFO "Bad case.\n");
++ return;
++ }
++ (*ptr) += 2;
++}
++
++static void atom_op_test(atom_exec_context *ctx, int *ptr, int arg)
++{
++ uint8_t attr = U8((*ptr)++);
++ uint32_t dst, src;
++ SDEBUG(" src1: ");
++ dst = atom_get_dst(ctx, arg, attr, ptr, NULL, 1);
++ SDEBUG(" src2: ");
++ src = atom_get_src(ctx, attr, ptr);
++ ctx->ctx->cs_equal = ((dst & src) == 0);
++ SDEBUG(" result: %s\n", ctx->ctx->cs_equal ? "EQ" : "NE");
++}
++
++static void atom_op_xor(atom_exec_context *ctx, int *ptr, int arg)
++{
++ uint8_t attr = U8((*ptr)++);
++ uint32_t dst, src, saved;
++ int dptr = *ptr;
++ SDEBUG(" dst: ");
++ dst = atom_get_dst(ctx, arg, attr, ptr, &saved, 1);
++ SDEBUG(" src: ");
++ src = atom_get_src(ctx, attr, ptr);
++ dst ^= src;
++ SDEBUG(" dst: ");
++ atom_put_dst(ctx, arg, attr, &dptr, dst, saved);
++}
++
++static void atom_op_debug(atom_exec_context *ctx, int *ptr, int arg)
++{
++ printk(KERN_INFO "unimplemented!\n");
++}
++
++static struct {
++ void (*func) (atom_exec_context *, int *, int);
++ int arg;
++} opcode_table[ATOM_OP_CNT] = {
++ {
++ NULL, 0}, {
++ atom_op_move, ATOM_ARG_REG}, {
++ atom_op_move, ATOM_ARG_PS}, {
++ atom_op_move, ATOM_ARG_WS}, {
++ atom_op_move, ATOM_ARG_FB}, {
++ atom_op_move, ATOM_ARG_PLL}, {
++ atom_op_move, ATOM_ARG_MC}, {
++ atom_op_and, ATOM_ARG_REG}, {
++ atom_op_and, ATOM_ARG_PS}, {
++ atom_op_and, ATOM_ARG_WS}, {
++ atom_op_and, ATOM_ARG_FB}, {
++ atom_op_and, ATOM_ARG_PLL}, {
++ atom_op_and, ATOM_ARG_MC}, {
++ atom_op_or, ATOM_ARG_REG}, {
++ atom_op_or, ATOM_ARG_PS}, {
++ atom_op_or, ATOM_ARG_WS}, {
++ atom_op_or, ATOM_ARG_FB}, {
++ atom_op_or, ATOM_ARG_PLL}, {
++ atom_op_or, ATOM_ARG_MC}, {
++ atom_op_shift_left, ATOM_ARG_REG}, {
++ atom_op_shift_left, ATOM_ARG_PS}, {
++ atom_op_shift_left, ATOM_ARG_WS}, {
++ atom_op_shift_left, ATOM_ARG_FB}, {
++ atom_op_shift_left, ATOM_ARG_PLL}, {
++ atom_op_shift_left, ATOM_ARG_MC}, {
++ atom_op_shift_right, ATOM_ARG_REG}, {
++ atom_op_shift_right, ATOM_ARG_PS}, {
++ atom_op_shift_right, ATOM_ARG_WS}, {
++ atom_op_shift_right, ATOM_ARG_FB}, {
++ atom_op_shift_right, ATOM_ARG_PLL}, {
++ atom_op_shift_right, ATOM_ARG_MC}, {
++ atom_op_mul, ATOM_ARG_REG}, {
++ atom_op_mul, ATOM_ARG_PS}, {
++ atom_op_mul, ATOM_ARG_WS}, {
++ atom_op_mul, ATOM_ARG_FB}, {
++ atom_op_mul, ATOM_ARG_PLL}, {
++ atom_op_mul, ATOM_ARG_MC}, {
++ atom_op_div, ATOM_ARG_REG}, {
++ atom_op_div, ATOM_ARG_PS}, {
++ atom_op_div, ATOM_ARG_WS}, {
++ atom_op_div, ATOM_ARG_FB}, {
++ atom_op_div, ATOM_ARG_PLL}, {
++ atom_op_div, ATOM_ARG_MC}, {
++ atom_op_add, ATOM_ARG_REG}, {
++ atom_op_add, ATOM_ARG_PS}, {
++ atom_op_add, ATOM_ARG_WS}, {
++ atom_op_add, ATOM_ARG_FB}, {
++ atom_op_add, ATOM_ARG_PLL}, {
++ atom_op_add, ATOM_ARG_MC}, {
++ atom_op_sub, ATOM_ARG_REG}, {
++ atom_op_sub, ATOM_ARG_PS}, {
++ atom_op_sub, ATOM_ARG_WS}, {
++ atom_op_sub, ATOM_ARG_FB}, {
++ atom_op_sub, ATOM_ARG_PLL}, {
++ atom_op_sub, ATOM_ARG_MC}, {
++ atom_op_setport, ATOM_PORT_ATI}, {
++ atom_op_setport, ATOM_PORT_PCI}, {
++ atom_op_setport, ATOM_PORT_SYSIO}, {
++ atom_op_setregblock, 0}, {
++ atom_op_setfbbase, 0}, {
++ atom_op_compare, ATOM_ARG_REG}, {
++ atom_op_compare, ATOM_ARG_PS}, {
++ atom_op_compare, ATOM_ARG_WS}, {
++ atom_op_compare, ATOM_ARG_FB}, {
++ atom_op_compare, ATOM_ARG_PLL}, {
++ atom_op_compare, ATOM_ARG_MC}, {
++ atom_op_switch, 0}, {
++ atom_op_jump, ATOM_COND_ALWAYS}, {
++ atom_op_jump, ATOM_COND_EQUAL}, {
++ atom_op_jump, ATOM_COND_BELOW}, {
++ atom_op_jump, ATOM_COND_ABOVE}, {
++ atom_op_jump, ATOM_COND_BELOWOREQUAL}, {
++ atom_op_jump, ATOM_COND_ABOVEOREQUAL}, {
++ atom_op_jump, ATOM_COND_NOTEQUAL}, {
++ atom_op_test, ATOM_ARG_REG}, {
++ atom_op_test, ATOM_ARG_PS}, {
++ atom_op_test, ATOM_ARG_WS}, {
++ atom_op_test, ATOM_ARG_FB}, {
++ atom_op_test, ATOM_ARG_PLL}, {
++ atom_op_test, ATOM_ARG_MC}, {
++ atom_op_delay, ATOM_UNIT_MILLISEC}, {
++ atom_op_delay, ATOM_UNIT_MICROSEC}, {
++ atom_op_calltable, 0}, {
++ atom_op_repeat, 0}, {
++ atom_op_clear, ATOM_ARG_REG}, {
++ atom_op_clear, ATOM_ARG_PS}, {
++ atom_op_clear, ATOM_ARG_WS}, {
++ atom_op_clear, ATOM_ARG_FB}, {
++ atom_op_clear, ATOM_ARG_PLL}, {
++ atom_op_clear, ATOM_ARG_MC}, {
++ atom_op_nop, 0}, {
++ atom_op_eot, 0}, {
++ atom_op_mask, ATOM_ARG_REG}, {
++ atom_op_mask, ATOM_ARG_PS}, {
++ atom_op_mask, ATOM_ARG_WS}, {
++ atom_op_mask, ATOM_ARG_FB}, {
++ atom_op_mask, ATOM_ARG_PLL}, {
++ atom_op_mask, ATOM_ARG_MC}, {
++ atom_op_postcard, 0}, {
++ atom_op_beep, 0}, {
++ atom_op_savereg, 0}, {
++ atom_op_restorereg, 0}, {
++ atom_op_setdatablock, 0}, {
++ atom_op_xor, ATOM_ARG_REG}, {
++ atom_op_xor, ATOM_ARG_PS}, {
++ atom_op_xor, ATOM_ARG_WS}, {
++ atom_op_xor, ATOM_ARG_FB}, {
++ atom_op_xor, ATOM_ARG_PLL}, {
++ atom_op_xor, ATOM_ARG_MC}, {
++ atom_op_shl, ATOM_ARG_REG}, {
++ atom_op_shl, ATOM_ARG_PS}, {
++ atom_op_shl, ATOM_ARG_WS}, {
++ atom_op_shl, ATOM_ARG_FB}, {
++ atom_op_shl, ATOM_ARG_PLL}, {
++ atom_op_shl, ATOM_ARG_MC}, {
++ atom_op_shr, ATOM_ARG_REG}, {
++ atom_op_shr, ATOM_ARG_PS}, {
++ atom_op_shr, ATOM_ARG_WS}, {
++ atom_op_shr, ATOM_ARG_FB}, {
++ atom_op_shr, ATOM_ARG_PLL}, {
++ atom_op_shr, ATOM_ARG_MC}, {
++atom_op_debug, 0},};
++
++static int amdgpu_atom_execute_table_locked(struct atom_context *ctx, int index, uint32_t * params)
++{
++ int base = CU16(ctx->cmd_table + 4 + 2 * index);
++ int len, ws, ps, ptr;
++ unsigned char op;
++ atom_exec_context ectx;
++ int ret = 0;
++
++ if (!base)
++ return -EINVAL;
++
++ len = CU16(base + ATOM_CT_SIZE_PTR);
++ ws = CU8(base + ATOM_CT_WS_PTR);
++ ps = CU8(base + ATOM_CT_PS_PTR) & ATOM_CT_PS_MASK;
++ ptr = base + ATOM_CT_CODE_PTR;
++
++ SDEBUG(">> execute %04X (len %d, WS %d, PS %d)\n", base, len, ws, ps);
++
++ ectx.ctx = ctx;
++ ectx.ps_shift = ps / 4;
++ ectx.start = base;
++ ectx.ps = params;
++ ectx.abort = false;
++ ectx.last_jump = 0;
++ if (ws)
++ ectx.ws = kzalloc(4 * ws, GFP_KERNEL);
++ else
++ ectx.ws = NULL;
++
++ debug_depth++;
++ while (1) {
++ op = CU8(ptr++);
++ if (op < ATOM_OP_NAMES_CNT)
++ SDEBUG("%s @ 0x%04X\n", atom_op_names[op], ptr - 1);
++ else
++ SDEBUG("[%d] @ 0x%04X\n", op, ptr - 1);
++ if (ectx.abort) {
++ DRM_ERROR("atombios stuck executing %04X (len %d, WS %d, PS %d) @ 0x%04X\n",
++ base, len, ws, ps, ptr - 1);
++ ret = -EINVAL;
++ goto free;
++ }
++
++ if (op < ATOM_OP_CNT && op > 0)
++ opcode_table[op].func(&ectx, &ptr,
++ opcode_table[op].arg);
++ else
++ break;
++
++ if (op == ATOM_OP_EOT)
++ break;
++ }
++ debug_depth--;
++ SDEBUG("<<\n");
++
++free:
++ if (ws)
++ kfree(ectx.ws);
++ return ret;
++}
++
++int amdgpu_atom_execute_table(struct atom_context *ctx, int index, uint32_t * params)
++{
++ int r;
++
++ mutex_lock(&ctx->mutex);
++ /* reset data block */
++ ctx->data_block = 0;
++ /* reset reg block */
++ ctx->reg_block = 0;
++ /* reset fb window */
++ ctx->fb_base = 0;
++ /* reset io mode */
++ ctx->io_mode = ATOM_IO_MM;
++ /* reset divmul */
++ ctx->divmul[0] = 0;
++ ctx->divmul[1] = 0;
++ r = amdgpu_atom_execute_table_locked(ctx, index, params);
++ mutex_unlock(&ctx->mutex);
++ return r;
++}
++
++static int atom_iio_len[] = { 1, 2, 3, 3, 3, 3, 4, 4, 4, 3 };
++
++static void atom_index_iio(struct atom_context *ctx, int base)
++{
++ ctx->iio = kzalloc(2 * 256, GFP_KERNEL);
++ if (!ctx->iio)
++ return;
++ while (CU8(base) == ATOM_IIO_START) {
++ ctx->iio[CU8(base + 1)] = base + 2;
++ base += 2;
++ while (CU8(base) != ATOM_IIO_END)
++ base += atom_iio_len[CU8(base)];
++ base += 3;
++ }
++}
++
++struct atom_context *amdgpu_atom_parse(struct card_info *card, void *bios)
++{
++ int base;
++ struct atom_context *ctx =
++ kzalloc(sizeof(struct atom_context), GFP_KERNEL);
++ char *str;
++ char name[512];
++ int i;
++
++ if (!ctx)
++ return NULL;
++
++ ctx->card = card;
++ ctx->bios = bios;
++
++ if (CU16(0) != ATOM_BIOS_MAGIC) {
++ printk(KERN_INFO "Invalid BIOS magic.\n");
++ kfree(ctx);
++ return NULL;
++ }
++ if (strncmp
++ (CSTR(ATOM_ATI_MAGIC_PTR), ATOM_ATI_MAGIC,
++ strlen(ATOM_ATI_MAGIC))) {
++ printk(KERN_INFO "Invalid ATI magic.\n");
++ kfree(ctx);
++ return NULL;
++ }
++
++ base = CU16(ATOM_ROM_TABLE_PTR);
++ if (strncmp
++ (CSTR(base + ATOM_ROM_MAGIC_PTR), ATOM_ROM_MAGIC,
++ strlen(ATOM_ROM_MAGIC))) {
++ printk(KERN_INFO "Invalid ATOM magic.\n");
++ kfree(ctx);
++ return NULL;
++ }
++
++ ctx->cmd_table = CU16(base + ATOM_ROM_CMD_PTR);
++ ctx->data_table = CU16(base + ATOM_ROM_DATA_PTR);
++ atom_index_iio(ctx, CU16(ctx->data_table + ATOM_DATA_IIO_PTR) + 4);
++ if (!ctx->iio) {
++ amdgpu_atom_destroy(ctx);
++ return NULL;
++ }
++
++ str = CSTR(CU16(base + ATOM_ROM_MSG_PTR));
++ while (*str && ((*str == '\n') || (*str == '\r')))
++ str++;
++ /* name string isn't always 0 terminated */
++ for (i = 0; i < 511; i++) {
++ name[i] = str[i];
++ if (name[i] < '.' || name[i] > 'z') {
++ name[i] = 0;
++ break;
++ }
++ }
++ printk(KERN_INFO "ATOM BIOS: %s\n", name);
++
++ return ctx;
++}
++
++int amdgpu_atom_asic_init(struct atom_context *ctx)
++{
++ int hwi = CU16(ctx->data_table + ATOM_DATA_FWI_PTR);
++ uint32_t ps[16];
++ int ret;
++
++ memset(ps, 0, 64);
++
++ ps[0] = cpu_to_le32(CU32(hwi + ATOM_FWI_DEFSCLK_PTR));
++ ps[1] = cpu_to_le32(CU32(hwi + ATOM_FWI_DEFMCLK_PTR));
++ if (!ps[0] || !ps[1])
++ return 1;
++
++ if (!CU16(ctx->cmd_table + 4 + 2 * ATOM_CMD_INIT))
++ return 1;
++ ret = amdgpu_atom_execute_table(ctx, ATOM_CMD_INIT, ps);
++ if (ret)
++ return ret;
++
++ memset(ps, 0, 64);
++
++ return ret;
++}
++
++void amdgpu_atom_destroy(struct atom_context *ctx)
++{
++ kfree(ctx->iio);
++ kfree(ctx);
++}
++
++bool amdgpu_atom_parse_data_header(struct atom_context *ctx, int index,
++ uint16_t * size, uint8_t * frev, uint8_t * crev,
++ uint16_t * data_start)
++{
++ int offset = index * 2 + 4;
++ int idx = CU16(ctx->data_table + offset);
++ u16 *mdt = (u16 *)(ctx->bios + ctx->data_table + 4);
++
++ if (!mdt[index])
++ return false;
++
++ if (size)
++ *size = CU16(idx);
++ if (frev)
++ *frev = CU8(idx + 2);
++ if (crev)
++ *crev = CU8(idx + 3);
++ *data_start = idx;
++ return true;
++}
++
++bool amdgpu_atom_parse_cmd_header(struct atom_context *ctx, int index, uint8_t * frev,
++ uint8_t * crev)
++{
++ int offset = index * 2 + 4;
++ int idx = CU16(ctx->cmd_table + offset);
++ u16 *mct = (u16 *)(ctx->bios + ctx->cmd_table + 4);
++
++ if (!mct[index])
++ return false;
++
++ if (frev)
++ *frev = CU8(idx + 2);
++ if (crev)
++ *crev = CU8(idx + 3);
++ return true;
++}
++
++int amdgpu_atom_allocate_fb_scratch(struct atom_context *ctx)
++{
++ int index = GetIndexIntoMasterTable(DATA, VRAM_UsageByFirmware);
++ uint16_t data_offset;
++ int usage_bytes = 0;
++ struct _ATOM_VRAM_USAGE_BY_FIRMWARE *firmware_usage;
++
++ if (amdgpu_atom_parse_data_header(ctx, index, NULL, NULL, NULL, &data_offset)) {
++ firmware_usage = (struct _ATOM_VRAM_USAGE_BY_FIRMWARE *)(ctx->bios + data_offset);
++
++ DRM_DEBUG("atom firmware requested %08x %dkb\n",
++ le32_to_cpu(firmware_usage->asFirmwareVramReserveInfo[0].ulStartAddrUsedByFirmware),
++ le16_to_cpu(firmware_usage->asFirmwareVramReserveInfo[0].usFirmwareUseInKb));
++
++ usage_bytes = le16_to_cpu(firmware_usage->asFirmwareVramReserveInfo[0].usFirmwareUseInKb) * 1024;
++ }
++ ctx->scratch_size_bytes = 0;
++ if (usage_bytes == 0)
++ usage_bytes = 20 * 1024;
++ /* allocate some scratch memory */
++ ctx->scratch = kzalloc(usage_bytes, GFP_KERNEL);
++ if (!ctx->scratch)
++ return -ENOMEM;
++ ctx->scratch_size_bytes = usage_bytes;
++ return 0;
++}
+diff --git a/drivers/gpu/drm/amd/amdgpu/atom.h b/drivers/gpu/drm/amd/amdgpu/atom.h
+new file mode 100644
+index 0000000..09d0f82
+--- /dev/null
++++ b/drivers/gpu/drm/amd/amdgpu/atom.h
+@@ -0,0 +1,159 @@
++/*
++ * Copyright 2008 Advanced Micro Devices, Inc.
++ *
++ * Permission is hereby granted, free of charge, to any person obtaining a
++ * copy of this software and associated documentation files (the "Software"),
++ * to deal in the Software without restriction, including without limitation
++ * the rights to use, copy, modify, merge, publish, distribute, sublicense,
++ * and/or sell copies of the Software, and to permit persons to whom the
++ * Software is furnished to do so, subject to the following conditions:
++ *
++ * The above copyright notice and this permission notice shall be included in
++ * all copies or substantial portions of the Software.
++ *
++ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
++ * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
++ * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
++ * THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR
++ * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE,
++ * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
++ * OTHER DEALINGS IN THE SOFTWARE.
++ *
++ * Author: Stanislaw Skowronek
++ */
++
++#ifndef ATOM_H
++#define ATOM_H
++
++#include <linux/types.h>
++#include <drm/drmP.h>
++
++#define ATOM_BIOS_MAGIC 0xAA55
++#define ATOM_ATI_MAGIC_PTR 0x30
++#define ATOM_ATI_MAGIC " 761295520"
++#define ATOM_ROM_TABLE_PTR 0x48
++
++#define ATOM_ROM_MAGIC "ATOM"
++#define ATOM_ROM_MAGIC_PTR 4
++
++#define ATOM_ROM_MSG_PTR 0x10
++#define ATOM_ROM_CMD_PTR 0x1E
++#define ATOM_ROM_DATA_PTR 0x20
++
++#define ATOM_CMD_INIT 0
++#define ATOM_CMD_SETSCLK 0x0A
++#define ATOM_CMD_SETMCLK 0x0B
++#define ATOM_CMD_SETPCLK 0x0C
++#define ATOM_CMD_SPDFANCNTL 0x39
++
++#define ATOM_DATA_FWI_PTR 0xC
++#define ATOM_DATA_IIO_PTR 0x32
++
++#define ATOM_FWI_DEFSCLK_PTR 8
++#define ATOM_FWI_DEFMCLK_PTR 0xC
++#define ATOM_FWI_MAXSCLK_PTR 0x24
++#define ATOM_FWI_MAXMCLK_PTR 0x28
++
++#define ATOM_CT_SIZE_PTR 0
++#define ATOM_CT_WS_PTR 4
++#define ATOM_CT_PS_PTR 5
++#define ATOM_CT_PS_MASK 0x7F
++#define ATOM_CT_CODE_PTR 6
++
++#define ATOM_OP_CNT 123
++#define ATOM_OP_EOT 91
++
++#define ATOM_CASE_MAGIC 0x63
++#define ATOM_CASE_END 0x5A5A
++
++#define ATOM_ARG_REG 0
++#define ATOM_ARG_PS 1
++#define ATOM_ARG_WS 2
++#define ATOM_ARG_FB 3
++#define ATOM_ARG_ID 4
++#define ATOM_ARG_IMM 5
++#define ATOM_ARG_PLL 6
++#define ATOM_ARG_MC 7
++
++#define ATOM_SRC_DWORD 0
++#define ATOM_SRC_WORD0 1
++#define ATOM_SRC_WORD8 2
++#define ATOM_SRC_WORD16 3
++#define ATOM_SRC_BYTE0 4
++#define ATOM_SRC_BYTE8 5
++#define ATOM_SRC_BYTE16 6
++#define ATOM_SRC_BYTE24 7
++
++#define ATOM_WS_QUOTIENT 0x40
++#define ATOM_WS_REMAINDER 0x41
++#define ATOM_WS_DATAPTR 0x42
++#define ATOM_WS_SHIFT 0x43
++#define ATOM_WS_OR_MASK 0x44
++#define ATOM_WS_AND_MASK 0x45
++#define ATOM_WS_FB_WINDOW 0x46
++#define ATOM_WS_ATTRIBUTES 0x47
++#define ATOM_WS_REGPTR 0x48
++
++#define ATOM_IIO_NOP 0
++#define ATOM_IIO_START 1
++#define ATOM_IIO_READ 2
++#define ATOM_IIO_WRITE 3
++#define ATOM_IIO_CLEAR 4
++#define ATOM_IIO_SET 5
++#define ATOM_IIO_MOVE_INDEX 6
++#define ATOM_IIO_MOVE_ATTR 7
++#define ATOM_IIO_MOVE_DATA 8
++#define ATOM_IIO_END 9
++
++#define ATOM_IO_MM 0
++#define ATOM_IO_PCI 1
++#define ATOM_IO_SYSIO 2
++#define ATOM_IO_IIO 0x80
++
++struct card_info {
++ struct drm_device *dev;
++ void (* reg_write)(struct card_info *, uint32_t, uint32_t); /* filled by driver */
++ uint32_t (* reg_read)(struct card_info *, uint32_t); /* filled by driver */
++ void (* ioreg_write)(struct card_info *, uint32_t, uint32_t); /* filled by driver */
++ uint32_t (* ioreg_read)(struct card_info *, uint32_t); /* filled by driver */
++ void (* mc_write)(struct card_info *, uint32_t, uint32_t); /* filled by driver */
++ uint32_t (* mc_read)(struct card_info *, uint32_t); /* filled by driver */
++ void (* pll_write)(struct card_info *, uint32_t, uint32_t); /* filled by driver */
++ uint32_t (* pll_read)(struct card_info *, uint32_t); /* filled by driver */
++};
++
++struct atom_context {
++ struct card_info *card;
++ struct mutex mutex;
++ void *bios;
++ uint32_t cmd_table, data_table;
++ uint16_t *iio;
++
++ uint16_t data_block;
++ uint32_t fb_base;
++ uint32_t divmul[2];
++ uint16_t io_attr;
++ uint16_t reg_block;
++ uint8_t shift;
++ int cs_equal, cs_above;
++ int io_mode;
++ uint32_t *scratch;
++ int scratch_size_bytes;
++};
++
++extern int amdgpu_atom_debug;
++
++struct atom_context *amdgpu_atom_parse(struct card_info *, void *);
++int amdgpu_atom_execute_table(struct atom_context *, int, uint32_t *);
++int amdgpu_atom_asic_init(struct atom_context *);
++void amdgpu_atom_destroy(struct atom_context *);
++bool amdgpu_atom_parse_data_header(struct atom_context *ctx, int index, uint16_t *size,
++ uint8_t *frev, uint8_t *crev, uint16_t *data_start);
++bool amdgpu_atom_parse_cmd_header(struct atom_context *ctx, int index,
++ uint8_t *frev, uint8_t *crev);
++int amdgpu_atom_allocate_fb_scratch(struct atom_context *ctx);
++#include "atom-types.h"
++#include "atombios.h"
++#include "ObjectID.h"
++
++#endif
+diff --git a/drivers/gpu/drm/amd/amdgpu/atombios_crtc.c b/drivers/gpu/drm/amd/amdgpu/atombios_crtc.c
+new file mode 100644
+index 0000000..49aa350
+--- /dev/null
++++ b/drivers/gpu/drm/amd/amdgpu/atombios_crtc.c
+@@ -0,0 +1,807 @@
++/*
++ * Copyright 2007-8 Advanced Micro Devices, Inc.
++ * Copyright 2008 Red Hat Inc.
++ *
++ * Permission is hereby granted, free of charge, to any person obtaining a
++ * copy of this software and associated documentation files (the "Software"),
++ * to deal in the Software without restriction, including without limitation
++ * the rights to use, copy, modify, merge, publish, distribute, sublicense,
++ * and/or sell copies of the Software, and to permit persons to whom the
++ * Software is furnished to do so, subject to the following conditions:
++ *
++ * The above copyright notice and this permission notice shall be included in
++ * all copies or substantial portions of the Software.
++ *
++ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
++ * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
++ * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
++ * THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR
++ * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE,
++ * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
++ * OTHER DEALINGS IN THE SOFTWARE.
++ *
++ * Authors: Dave Airlie
++ * Alex Deucher
++ */
++#include <drm/drmP.h>
++#include <drm/drm_crtc_helper.h>
++#include <drm/amdgpu_drm.h>
++#include <drm/drm_fixed.h>
++#include "amdgpu.h"
++#include "atom.h"
++#include "atom-bits.h"
++#include "atombios_encoders.h"
++#include "amdgpu_atombios.h"
++#include "amdgpu_pll.h"
++#include "amdgpu_connectors.h"
++
++void amdgpu_atombios_crtc_overscan_setup(struct drm_crtc *crtc,
++ struct drm_display_mode *mode,
++ struct drm_display_mode *adjusted_mode)
++{
++ struct drm_device *dev = crtc->dev;
++ struct amdgpu_device *adev = dev->dev_private;
++ struct amdgpu_crtc *amdgpu_crtc = to_amdgpu_crtc(crtc);
++ SET_CRTC_OVERSCAN_PS_ALLOCATION args;
++ int index = GetIndexIntoMasterTable(COMMAND, SetCRTC_OverScan);
++ int a1, a2;
++
++ memset(&args, 0, sizeof(args));
++
++ args.ucCRTC = amdgpu_crtc->crtc_id;
++
++ switch (amdgpu_crtc->rmx_type) {
++ case RMX_CENTER:
++ args.usOverscanTop = cpu_to_le16((adjusted_mode->crtc_vdisplay - mode->crtc_vdisplay) / 2);
++ args.usOverscanBottom = cpu_to_le16((adjusted_mode->crtc_vdisplay - mode->crtc_vdisplay) / 2);
++ args.usOverscanLeft = cpu_to_le16((adjusted_mode->crtc_hdisplay - mode->crtc_hdisplay) / 2);
++ args.usOverscanRight = cpu_to_le16((adjusted_mode->crtc_hdisplay - mode->crtc_hdisplay) / 2);
++ break;
++ case RMX_ASPECT:
++ a1 = mode->crtc_vdisplay * adjusted_mode->crtc_hdisplay;
++ a2 = adjusted_mode->crtc_vdisplay * mode->crtc_hdisplay;
++
++ if (a1 > a2) {
++ args.usOverscanLeft = cpu_to_le16((adjusted_mode->crtc_hdisplay - (a2 / mode->crtc_vdisplay)) / 2);
++ args.usOverscanRight = cpu_to_le16((adjusted_mode->crtc_hdisplay - (a2 / mode->crtc_vdisplay)) / 2);
++ } else if (a2 > a1) {
++ args.usOverscanTop = cpu_to_le16((adjusted_mode->crtc_vdisplay - (a1 / mode->crtc_hdisplay)) / 2);
++ args.usOverscanBottom = cpu_to_le16((adjusted_mode->crtc_vdisplay - (a1 / mode->crtc_hdisplay)) / 2);
++ }
++ break;
++ case RMX_FULL:
++ default:
++ args.usOverscanRight = cpu_to_le16(amdgpu_crtc->h_border);
++ args.usOverscanLeft = cpu_to_le16(amdgpu_crtc->h_border);
++ args.usOverscanBottom = cpu_to_le16(amdgpu_crtc->v_border);
++ args.usOverscanTop = cpu_to_le16(amdgpu_crtc->v_border);
++ break;
++ }
++ amdgpu_atom_execute_table(adev->mode_info.atom_context, index, (uint32_t *)&args);
++}
++
++void amdgpu_atombios_crtc_scaler_setup(struct drm_crtc *crtc)
++{
++ struct drm_device *dev = crtc->dev;
++ struct amdgpu_device *adev = dev->dev_private;
++ struct amdgpu_crtc *amdgpu_crtc = to_amdgpu_crtc(crtc);
++ ENABLE_SCALER_PS_ALLOCATION args;
++ int index = GetIndexIntoMasterTable(COMMAND, EnableScaler);
++
++ memset(&args, 0, sizeof(args));
++
++ args.ucScaler = amdgpu_crtc->crtc_id;
++
++ switch (amdgpu_crtc->rmx_type) {
++ case RMX_FULL:
++ args.ucEnable = ATOM_SCALER_EXPANSION;
++ break;
++ case RMX_CENTER:
++ args.ucEnable = ATOM_SCALER_CENTER;
++ break;
++ case RMX_ASPECT:
++ args.ucEnable = ATOM_SCALER_EXPANSION;
++ break;
++ default:
++ args.ucEnable = ATOM_SCALER_DISABLE;
++ break;
++ }
++ amdgpu_atom_execute_table(adev->mode_info.atom_context, index, (uint32_t *)&args);
++}
++
++void amdgpu_atombios_crtc_lock(struct drm_crtc *crtc, int lock)
++{
++ struct amdgpu_crtc *amdgpu_crtc = to_amdgpu_crtc(crtc);
++ struct drm_device *dev = crtc->dev;
++ struct amdgpu_device *adev = dev->dev_private;
++ int index =
++ GetIndexIntoMasterTable(COMMAND, UpdateCRTC_DoubleBufferRegisters);
++ ENABLE_CRTC_PS_ALLOCATION args;
++
++ memset(&args, 0, sizeof(args));
++
++ args.ucCRTC = amdgpu_crtc->crtc_id;
++ args.ucEnable = lock;
++
++ amdgpu_atom_execute_table(adev->mode_info.atom_context, index, (uint32_t *)&args);
++}
++
++void amdgpu_atombios_crtc_enable(struct drm_crtc *crtc, int state)
++{
++ struct amdgpu_crtc *amdgpu_crtc = to_amdgpu_crtc(crtc);
++ struct drm_device *dev = crtc->dev;
++ struct amdgpu_device *adev = dev->dev_private;
++ int index = GetIndexIntoMasterTable(COMMAND, EnableCRTC);
++ ENABLE_CRTC_PS_ALLOCATION args;
++
++ memset(&args, 0, sizeof(args));
++
++ args.ucCRTC = amdgpu_crtc->crtc_id;
++ args.ucEnable = state;
++
++ amdgpu_atom_execute_table(adev->mode_info.atom_context, index, (uint32_t *)&args);
++}
++
++void amdgpu_atombios_crtc_blank(struct drm_crtc *crtc, int state)
++{
++ struct amdgpu_crtc *amdgpu_crtc = to_amdgpu_crtc(crtc);
++ struct drm_device *dev = crtc->dev;
++ struct amdgpu_device *adev = dev->dev_private;
++ int index = GetIndexIntoMasterTable(COMMAND, BlankCRTC);
++ BLANK_CRTC_PS_ALLOCATION args;
++
++ memset(&args, 0, sizeof(args));
++
++ args.ucCRTC = amdgpu_crtc->crtc_id;
++ args.ucBlanking = state;
++
++ amdgpu_atom_execute_table(adev->mode_info.atom_context, index, (uint32_t *)&args);
++}
++
++void amdgpu_atombios_crtc_powergate(struct drm_crtc *crtc, int state)
++{
++ struct amdgpu_crtc *amdgpu_crtc = to_amdgpu_crtc(crtc);
++ struct drm_device *dev = crtc->dev;
++ struct amdgpu_device *adev = dev->dev_private;
++ int index = GetIndexIntoMasterTable(COMMAND, EnableDispPowerGating);
++ ENABLE_DISP_POWER_GATING_PARAMETERS_V2_1 args;
++
++ memset(&args, 0, sizeof(args));
++
++ args.ucDispPipeId = amdgpu_crtc->crtc_id;
++ args.ucEnable = state;
++
++ amdgpu_atom_execute_table(adev->mode_info.atom_context, index, (uint32_t *)&args);
++}
++
++void amdgpu_atombios_crtc_powergate_init(struct amdgpu_device *adev)
++{
++ int index = GetIndexIntoMasterTable(COMMAND, EnableDispPowerGating);
++ ENABLE_DISP_POWER_GATING_PARAMETERS_V2_1 args;
++
++ memset(&args, 0, sizeof(args));
++
++ args.ucEnable = ATOM_INIT;
++
++ amdgpu_atom_execute_table(adev->mode_info.atom_context, index, (uint32_t *)&args);
++}
++
++void amdgpu_atombios_crtc_set_dtd_timing(struct drm_crtc *crtc,
++ struct drm_display_mode *mode)
++{
++ struct amdgpu_crtc *amdgpu_crtc = to_amdgpu_crtc(crtc);
++ struct drm_device *dev = crtc->dev;
++ struct amdgpu_device *adev = dev->dev_private;
++ SET_CRTC_USING_DTD_TIMING_PARAMETERS args;
++ int index = GetIndexIntoMasterTable(COMMAND, SetCRTC_UsingDTDTiming);
++ u16 misc = 0;
++
++ memset(&args, 0, sizeof(args));
++ args.usH_Size = cpu_to_le16(mode->crtc_hdisplay - (amdgpu_crtc->h_border * 2));
++ args.usH_Blanking_Time =
++ cpu_to_le16(mode->crtc_hblank_end - mode->crtc_hdisplay + (amdgpu_crtc->h_border * 2));
++ args.usV_Size = cpu_to_le16(mode->crtc_vdisplay - (amdgpu_crtc->v_border * 2));
++ args.usV_Blanking_Time =
++ cpu_to_le16(mode->crtc_vblank_end - mode->crtc_vdisplay + (amdgpu_crtc->v_border * 2));
++ args.usH_SyncOffset =
++ cpu_to_le16(mode->crtc_hsync_start - mode->crtc_hdisplay + amdgpu_crtc->h_border);
++ args.usH_SyncWidth =
++ cpu_to_le16(mode->crtc_hsync_end - mode->crtc_hsync_start);
++ args.usV_SyncOffset =
++ cpu_to_le16(mode->crtc_vsync_start - mode->crtc_vdisplay + amdgpu_crtc->v_border);
++ args.usV_SyncWidth =
++ cpu_to_le16(mode->crtc_vsync_end - mode->crtc_vsync_start);
++ args.ucH_Border = amdgpu_crtc->h_border;
++ args.ucV_Border = amdgpu_crtc->v_border;
++
++ if (mode->flags & DRM_MODE_FLAG_NVSYNC)
++ misc |= ATOM_VSYNC_POLARITY;
++ if (mode->flags & DRM_MODE_FLAG_NHSYNC)
++ misc |= ATOM_HSYNC_POLARITY;
++ if (mode->flags & DRM_MODE_FLAG_CSYNC)
++ misc |= ATOM_COMPOSITESYNC;
++ if (mode->flags & DRM_MODE_FLAG_INTERLACE)
++ misc |= ATOM_INTERLACE;
++ if (mode->flags & DRM_MODE_FLAG_DBLSCAN)
++ misc |= ATOM_DOUBLE_CLOCK_MODE;
++
++ args.susModeMiscInfo.usAccess = cpu_to_le16(misc);
++ args.ucCRTC = amdgpu_crtc->crtc_id;
++
++ amdgpu_atom_execute_table(adev->mode_info.atom_context, index, (uint32_t *)&args);
++}
++
++union atom_enable_ss {
++ ENABLE_SPREAD_SPECTRUM_ON_PPLL_PS_ALLOCATION v1;
++ ENABLE_SPREAD_SPECTRUM_ON_PPLL_V2 v2;
++ ENABLE_SPREAD_SPECTRUM_ON_PPLL_V3 v3;
++};
++
++static void amdgpu_atombios_crtc_program_ss(struct amdgpu_device *adev,
++ int enable,
++ int pll_id,
++ int crtc_id,
++ struct amdgpu_atom_ss *ss)
++{
++ unsigned i;
++ int index = GetIndexIntoMasterTable(COMMAND, EnableSpreadSpectrumOnPPLL);
++ union atom_enable_ss args;
++
++ if (enable) {
++ /* Don't mess with SS if percentage is 0 or external ss.
++ * SS is already disabled previously, and disabling it
++ * again can cause display problems if the pll is already
++ * programmed.
++ */
++ if (ss->percentage == 0)
++ return;
++ if (ss->type & ATOM_EXTERNAL_SS_MASK)
++ return;
++ } else {
++ for (i = 0; i < adev->mode_info.num_crtc; i++) {
++ if (adev->mode_info.crtcs[i] &&
++ adev->mode_info.crtcs[i]->enabled &&
++ i != crtc_id &&
++ pll_id == adev->mode_info.crtcs[i]->pll_id) {
++ /* one other crtc is using this pll don't turn
++ * off spread spectrum as it might turn off
++ * display on active crtc
++ */
++ return;
++ }
++ }
++ }
++
++ memset(&args, 0, sizeof(args));
++
++ args.v3.usSpreadSpectrumAmountFrac = cpu_to_le16(0);
++ args.v3.ucSpreadSpectrumType = ss->type & ATOM_SS_CENTRE_SPREAD_MODE_MASK;
++ switch (pll_id) {
++ case ATOM_PPLL1:
++ args.v3.ucSpreadSpectrumType |= ATOM_PPLL_SS_TYPE_V3_P1PLL;
++ break;
++ case ATOM_PPLL2:
++ args.v3.ucSpreadSpectrumType |= ATOM_PPLL_SS_TYPE_V3_P2PLL;
++ break;
++ case ATOM_DCPLL:
++ args.v3.ucSpreadSpectrumType |= ATOM_PPLL_SS_TYPE_V3_DCPLL;
++ break;
++ case ATOM_PPLL_INVALID:
++ return;
++ }
++ args.v3.usSpreadSpectrumAmount = cpu_to_le16(ss->amount);
++ args.v3.usSpreadSpectrumStep = cpu_to_le16(ss->step);
++ args.v3.ucEnable = enable;
++
++ amdgpu_atom_execute_table(adev->mode_info.atom_context, index, (uint32_t *)&args);
++}
++
++union adjust_pixel_clock {
++ ADJUST_DISPLAY_PLL_PS_ALLOCATION v1;
++ ADJUST_DISPLAY_PLL_PS_ALLOCATION_V3 v3;
++};
++
++static u32 amdgpu_atombios_crtc_adjust_pll(struct drm_crtc *crtc,
++ struct drm_display_mode *mode)
++{
++ struct amdgpu_crtc *amdgpu_crtc = to_amdgpu_crtc(crtc);
++ struct drm_device *dev = crtc->dev;
++ struct amdgpu_device *adev = dev->dev_private;
++ struct drm_encoder *encoder = amdgpu_crtc->encoder;
++ struct amdgpu_encoder *amdgpu_encoder = to_amdgpu_encoder(encoder);
++ struct drm_connector *connector = amdgpu_get_connector_for_encoder(encoder);
++ u32 adjusted_clock = mode->clock;
++ int encoder_mode = amdgpu_atombios_encoder_get_encoder_mode(encoder);
++ u32 dp_clock = mode->clock;
++ u32 clock = mode->clock;
++ int bpc = amdgpu_crtc->bpc;
++ bool is_duallink = amdgpu_dig_monitor_is_duallink(encoder, mode->clock);
++ union adjust_pixel_clock args;
++ u8 frev, crev;
++ int index;
++
++ amdgpu_crtc->pll_flags = AMDGPU_PLL_USE_FRAC_FB_DIV;
++
++ if ((amdgpu_encoder->devices & (ATOM_DEVICE_LCD_SUPPORT | ATOM_DEVICE_DFP_SUPPORT)) ||
++ (amdgpu_encoder_get_dp_bridge_encoder_id(encoder) != ENCODER_OBJECT_ID_NONE)) {
++ if (connector) {
++ struct amdgpu_connector *amdgpu_connector = to_amdgpu_connector(connector);
++ struct amdgpu_connector_atom_dig *dig_connector =
++ amdgpu_connector->con_priv;
++
++ dp_clock = dig_connector->dp_clock;
++ }
++ }
++
++ /* use recommended ref_div for ss */
++ if (amdgpu_encoder->devices & (ATOM_DEVICE_LCD_SUPPORT)) {
++ if (amdgpu_crtc->ss_enabled) {
++ if (amdgpu_crtc->ss.refdiv) {
++ amdgpu_crtc->pll_flags |= AMDGPU_PLL_USE_REF_DIV;
++ amdgpu_crtc->pll_reference_div = amdgpu_crtc->ss.refdiv;
++ amdgpu_crtc->pll_flags |= AMDGPU_PLL_USE_FRAC_FB_DIV;
++ }
++ }
++ }
++
++ /* DVO wants 2x pixel clock if the DVO chip is in 12 bit mode */
++ if (amdgpu_encoder->encoder_id == ENCODER_OBJECT_ID_INTERNAL_KLDSCP_DVO1)
++ adjusted_clock = mode->clock * 2;
++ if (amdgpu_encoder->active_device & (ATOM_DEVICE_TV_SUPPORT))
++ amdgpu_crtc->pll_flags |= AMDGPU_PLL_PREFER_CLOSEST_LOWER;
++ if (amdgpu_encoder->devices & (ATOM_DEVICE_LCD_SUPPORT))
++ amdgpu_crtc->pll_flags |= AMDGPU_PLL_IS_LCD;
++
++
++ /* adjust pll for deep color modes */
++ if (encoder_mode == ATOM_ENCODER_MODE_HDMI) {
++ switch (bpc) {
++ case 8:
++ default:
++ break;
++ case 10:
++ clock = (clock * 5) / 4;
++ break;
++ case 12:
++ clock = (clock * 3) / 2;
++ break;
++ case 16:
++ clock = clock * 2;
++ break;
++ }
++ }
++
++ /* DCE3+ has an AdjustDisplayPll that will adjust the pixel clock
++ * accordingly based on the encoder/transmitter to work around
++ * special hw requirements.
++ */
++ index = GetIndexIntoMasterTable(COMMAND, AdjustDisplayPll);
++ if (!amdgpu_atom_parse_cmd_header(adev->mode_info.atom_context, index, &frev,
++ &crev))
++ return adjusted_clock;
++
++ memset(&args, 0, sizeof(args));
++
++ switch (frev) {
++ case 1:
++ switch (crev) {
++ case 1:
++ case 2:
++ args.v1.usPixelClock = cpu_to_le16(clock / 10);
++ args.v1.ucTransmitterID = amdgpu_encoder->encoder_id;
++ args.v1.ucEncodeMode = encoder_mode;
++ if (amdgpu_crtc->ss_enabled && amdgpu_crtc->ss.percentage)
++ args.v1.ucConfig |=
++ ADJUST_DISPLAY_CONFIG_SS_ENABLE;
++
++ amdgpu_atom_execute_table(adev->mode_info.atom_context,
++ index, (uint32_t *)&args);
++ adjusted_clock = le16_to_cpu(args.v1.usPixelClock) * 10;
++ break;
++ case 3:
++ args.v3.sInput.usPixelClock = cpu_to_le16(clock / 10);
++ args.v3.sInput.ucTransmitterID = amdgpu_encoder->encoder_id;
++ args.v3.sInput.ucEncodeMode = encoder_mode;
++ args.v3.sInput.ucDispPllConfig = 0;
++ if (amdgpu_crtc->ss_enabled && amdgpu_crtc->ss.percentage)
++ args.v3.sInput.ucDispPllConfig |=
++ DISPPLL_CONFIG_SS_ENABLE;
++ if (ENCODER_MODE_IS_DP(encoder_mode)) {
++ args.v3.sInput.ucDispPllConfig |=
++ DISPPLL_CONFIG_COHERENT_MODE;
++ /* 16200 or 27000 */
++ args.v3.sInput.usPixelClock = cpu_to_le16(dp_clock / 10);
++ } else if (amdgpu_encoder->devices & (ATOM_DEVICE_DFP_SUPPORT)) {
++ struct amdgpu_encoder_atom_dig *dig = amdgpu_encoder->enc_priv;
++ if (dig->coherent_mode)
++ args.v3.sInput.ucDispPllConfig |=
++ DISPPLL_CONFIG_COHERENT_MODE;
++ if (is_duallink)
++ args.v3.sInput.ucDispPllConfig |=
++ DISPPLL_CONFIG_DUAL_LINK;
++ }
++ if (amdgpu_encoder_get_dp_bridge_encoder_id(encoder) !=
++ ENCODER_OBJECT_ID_NONE)
++ args.v3.sInput.ucExtTransmitterID =
++ amdgpu_encoder_get_dp_bridge_encoder_id(encoder);
++ else
++ args.v3.sInput.ucExtTransmitterID = 0;
++
++ amdgpu_atom_execute_table(adev->mode_info.atom_context,
++ index, (uint32_t *)&args);
++ adjusted_clock = le32_to_cpu(args.v3.sOutput.ulDispPllFreq) * 10;
++ if (args.v3.sOutput.ucRefDiv) {
++ amdgpu_crtc->pll_flags |= AMDGPU_PLL_USE_FRAC_FB_DIV;
++ amdgpu_crtc->pll_flags |= AMDGPU_PLL_USE_REF_DIV;
++ amdgpu_crtc->pll_reference_div = args.v3.sOutput.ucRefDiv;
++ }
++ if (args.v3.sOutput.ucPostDiv) {
++ amdgpu_crtc->pll_flags |= AMDGPU_PLL_USE_FRAC_FB_DIV;
++ amdgpu_crtc->pll_flags |= AMDGPU_PLL_USE_POST_DIV;
++ amdgpu_crtc->pll_post_div = args.v3.sOutput.ucPostDiv;
++ }
++ break;
++ default:
++ DRM_ERROR("Unknown table version %d %d\n", frev, crev);
++ return adjusted_clock;
++ }
++ break;
++ default:
++ DRM_ERROR("Unknown table version %d %d\n", frev, crev);
++ return adjusted_clock;
++ }
++
++ return adjusted_clock;
++}
++
++union set_pixel_clock {
++ SET_PIXEL_CLOCK_PS_ALLOCATION base;
++ PIXEL_CLOCK_PARAMETERS v1;
++ PIXEL_CLOCK_PARAMETERS_V2 v2;
++ PIXEL_CLOCK_PARAMETERS_V3 v3;
++ PIXEL_CLOCK_PARAMETERS_V5 v5;
++ PIXEL_CLOCK_PARAMETERS_V6 v6;
++};
++
++/* on DCE5, make sure the voltage is high enough to support the
++ * required disp clk.
++ */
++void amdgpu_atombios_crtc_set_disp_eng_pll(struct amdgpu_device *adev,
++ u32 dispclk)
++{
++ u8 frev, crev;
++ int index;
++ union set_pixel_clock args;
++
++ memset(&args, 0, sizeof(args));
++
++ index = GetIndexIntoMasterTable(COMMAND, SetPixelClock);
++ if (!amdgpu_atom_parse_cmd_header(adev->mode_info.atom_context, index, &frev,
++ &crev))
++ return;
++
++ switch (frev) {
++ case 1:
++ switch (crev) {
++ case 5:
++ /* if the default dcpll clock is specified,
++ * SetPixelClock provides the dividers
++ */
++ args.v5.ucCRTC = ATOM_CRTC_INVALID;
++ args.v5.usPixelClock = cpu_to_le16(dispclk);
++ args.v5.ucPpll = ATOM_DCPLL;
++ break;
++ case 6:
++ /* if the default dcpll clock is specified,
++ * SetPixelClock provides the dividers
++ */
++ args.v6.ulDispEngClkFreq = cpu_to_le32(dispclk);
++ args.v6.ucPpll = ATOM_EXT_PLL1;
++ break;
++ default:
++ DRM_ERROR("Unknown table version %d %d\n", frev, crev);
++ return;
++ }
++ break;
++ default:
++ DRM_ERROR("Unknown table version %d %d\n", frev, crev);
++ return;
++ }
++ amdgpu_atom_execute_table(adev->mode_info.atom_context, index, (uint32_t *)&args);
++}
++
++static bool is_pixel_clock_source_from_pll(u32 encoder_mode, int pll_id)
++{
++ if (ENCODER_MODE_IS_DP(encoder_mode)) {
++ if (pll_id < ATOM_EXT_PLL1)
++ return true;
++ else
++ return false;
++ } else {
++ return true;
++ }
++}
++
++void amdgpu_atombios_crtc_program_pll(struct drm_crtc *crtc,
++ u32 crtc_id,
++ int pll_id,
++ u32 encoder_mode,
++ u32 encoder_id,
++ u32 clock,
++ u32 ref_div,
++ u32 fb_div,
++ u32 frac_fb_div,
++ u32 post_div,
++ int bpc,
++ bool ss_enabled,
++ struct amdgpu_atom_ss *ss)
++{
++ struct drm_device *dev = crtc->dev;
++ struct amdgpu_device *adev = dev->dev_private;
++ u8 frev, crev;
++ int index = GetIndexIntoMasterTable(COMMAND, SetPixelClock);
++ union set_pixel_clock args;
++
++ memset(&args, 0, sizeof(args));
++
++ if (!amdgpu_atom_parse_cmd_header(adev->mode_info.atom_context, index, &frev,
++ &crev))
++ return;
++
++ switch (frev) {
++ case 1:
++ switch (crev) {
++ case 1:
++ if (clock == ATOM_DISABLE)
++ return;
++ args.v1.usPixelClock = cpu_to_le16(clock / 10);
++ args.v1.usRefDiv = cpu_to_le16(ref_div);
++ args.v1.usFbDiv = cpu_to_le16(fb_div);
++ args.v1.ucFracFbDiv = frac_fb_div;
++ args.v1.ucPostDiv = post_div;
++ args.v1.ucPpll = pll_id;
++ args.v1.ucCRTC = crtc_id;
++ args.v1.ucRefDivSrc = 1;
++ break;
++ case 2:
++ args.v2.usPixelClock = cpu_to_le16(clock / 10);
++ args.v2.usRefDiv = cpu_to_le16(ref_div);
++ args.v2.usFbDiv = cpu_to_le16(fb_div);
++ args.v2.ucFracFbDiv = frac_fb_div;
++ args.v2.ucPostDiv = post_div;
++ args.v2.ucPpll = pll_id;
++ args.v2.ucCRTC = crtc_id;
++ args.v2.ucRefDivSrc = 1;
++ break;
++ case 3:
++ args.v3.usPixelClock = cpu_to_le16(clock / 10);
++ args.v3.usRefDiv = cpu_to_le16(ref_div);
++ args.v3.usFbDiv = cpu_to_le16(fb_div);
++ args.v3.ucFracFbDiv = frac_fb_div;
++ args.v3.ucPostDiv = post_div;
++ args.v3.ucPpll = pll_id;
++ if (crtc_id == ATOM_CRTC2)
++ args.v3.ucMiscInfo = PIXEL_CLOCK_MISC_CRTC_SEL_CRTC2;
++ else
++ args.v3.ucMiscInfo = PIXEL_CLOCK_MISC_CRTC_SEL_CRTC1;
++ if (ss_enabled && (ss->type & ATOM_EXTERNAL_SS_MASK))
++ args.v3.ucMiscInfo |= PIXEL_CLOCK_MISC_REF_DIV_SRC;
++ args.v3.ucTransmitterId = encoder_id;
++ args.v3.ucEncoderMode = encoder_mode;
++ break;
++ case 5:
++ args.v5.ucCRTC = crtc_id;
++ args.v5.usPixelClock = cpu_to_le16(clock / 10);
++ args.v5.ucRefDiv = ref_div;
++ args.v5.usFbDiv = cpu_to_le16(fb_div);
++ args.v5.ulFbDivDecFrac = cpu_to_le32(frac_fb_div * 100000);
++ args.v5.ucPostDiv = post_div;
++ args.v5.ucMiscInfo = 0; /* HDMI depth, etc. */
++ if ((ss_enabled && (ss->type & ATOM_EXTERNAL_SS_MASK)) &&
++ (pll_id < ATOM_EXT_PLL1))
++ args.v5.ucMiscInfo |= PIXEL_CLOCK_V5_MISC_REF_DIV_SRC;
++ if (encoder_mode == ATOM_ENCODER_MODE_HDMI) {
++ switch (bpc) {
++ case 8:
++ default:
++ args.v5.ucMiscInfo |= PIXEL_CLOCK_V5_MISC_HDMI_24BPP;
++ break;
++ case 10:
++ /* yes this is correct, the atom define is wrong */
++ args.v5.ucMiscInfo |= PIXEL_CLOCK_V5_MISC_HDMI_32BPP;
++ break;
++ case 12:
++ /* yes this is correct, the atom define is wrong */
++ args.v5.ucMiscInfo |= PIXEL_CLOCK_V5_MISC_HDMI_30BPP;
++ break;
++ }
++ }
++ args.v5.ucTransmitterID = encoder_id;
++ args.v5.ucEncoderMode = encoder_mode;
++ args.v5.ucPpll = pll_id;
++ break;
++ case 6:
++ args.v6.ulDispEngClkFreq = cpu_to_le32(crtc_id << 24 | clock / 10);
++ args.v6.ucRefDiv = ref_div;
++ args.v6.usFbDiv = cpu_to_le16(fb_div);
++ args.v6.ulFbDivDecFrac = cpu_to_le32(frac_fb_div * 100000);
++ args.v6.ucPostDiv = post_div;
++ args.v6.ucMiscInfo = 0; /* HDMI depth, etc. */
++ if ((ss_enabled && (ss->type & ATOM_EXTERNAL_SS_MASK)) &&
++ (pll_id < ATOM_EXT_PLL1) &&
++ !is_pixel_clock_source_from_pll(encoder_mode, pll_id))
++ args.v6.ucMiscInfo |= PIXEL_CLOCK_V6_MISC_REF_DIV_SRC;
++ if (encoder_mode == ATOM_ENCODER_MODE_HDMI) {
++ switch (bpc) {
++ case 8:
++ default:
++ args.v6.ucMiscInfo |= PIXEL_CLOCK_V6_MISC_HDMI_24BPP;
++ break;
++ case 10:
++ args.v6.ucMiscInfo |= PIXEL_CLOCK_V6_MISC_HDMI_30BPP_V6;
++ break;
++ case 12:
++ args.v6.ucMiscInfo |= PIXEL_CLOCK_V6_MISC_HDMI_36BPP_V6;
++ break;
++ case 16:
++ args.v6.ucMiscInfo |= PIXEL_CLOCK_V6_MISC_HDMI_48BPP;
++ break;
++ }
++ }
++ args.v6.ucTransmitterID = encoder_id;
++ args.v6.ucEncoderMode = encoder_mode;
++ args.v6.ucPpll = pll_id;
++ break;
++ default:
++ DRM_ERROR("Unknown table version %d %d\n", frev, crev);
++ return;
++ }
++ break;
++ default:
++ DRM_ERROR("Unknown table version %d %d\n", frev, crev);
++ return;
++ }
++
++ amdgpu_atom_execute_table(adev->mode_info.atom_context, index, (uint32_t *)&args);
++}
++
++int amdgpu_atombios_crtc_prepare_pll(struct drm_crtc *crtc,
++ struct drm_display_mode *mode)
++{
++ struct amdgpu_crtc *amdgpu_crtc = to_amdgpu_crtc(crtc);
++ struct drm_device *dev = crtc->dev;
++ struct amdgpu_device *adev = dev->dev_private;
++ struct amdgpu_encoder *amdgpu_encoder =
++ to_amdgpu_encoder(amdgpu_crtc->encoder);
++ int encoder_mode = amdgpu_atombios_encoder_get_encoder_mode(amdgpu_crtc->encoder);
++
++ amdgpu_crtc->bpc = 8;
++ amdgpu_crtc->ss_enabled = false;
++
++ if ((amdgpu_encoder->active_device & (ATOM_DEVICE_LCD_SUPPORT | ATOM_DEVICE_DFP_SUPPORT)) ||
++ (amdgpu_encoder_get_dp_bridge_encoder_id(amdgpu_crtc->encoder) != ENCODER_OBJECT_ID_NONE)) {
++ struct amdgpu_encoder_atom_dig *dig = amdgpu_encoder->enc_priv;
++ struct drm_connector *connector =
++ amdgpu_get_connector_for_encoder(amdgpu_crtc->encoder);
++ struct amdgpu_connector *amdgpu_connector =
++ to_amdgpu_connector(connector);
++ struct amdgpu_connector_atom_dig *dig_connector =
++ amdgpu_connector->con_priv;
++ int dp_clock;
++
++ /* Assign mode clock for hdmi deep color max clock limit check */
++ amdgpu_connector->pixelclock_for_modeset = mode->clock;
++ amdgpu_crtc->bpc = amdgpu_connector_get_monitor_bpc(connector);
++
++ switch (encoder_mode) {
++ case ATOM_ENCODER_MODE_DP_MST:
++ case ATOM_ENCODER_MODE_DP:
++ /* DP/eDP */
++ dp_clock = dig_connector->dp_clock / 10;
++ amdgpu_crtc->ss_enabled =
++ amdgpu_atombios_get_asic_ss_info(adev, &amdgpu_crtc->ss,
++ ASIC_INTERNAL_SS_ON_DP,
++ dp_clock);
++ break;
++ case ATOM_ENCODER_MODE_LVDS:
++ amdgpu_crtc->ss_enabled =
++ amdgpu_atombios_get_asic_ss_info(adev,
++ &amdgpu_crtc->ss,
++ dig->lcd_ss_id,
++ mode->clock / 10);
++ break;
++ case ATOM_ENCODER_MODE_DVI:
++ amdgpu_crtc->ss_enabled =
++ amdgpu_atombios_get_asic_ss_info(adev,
++ &amdgpu_crtc->ss,
++ ASIC_INTERNAL_SS_ON_TMDS,
++ mode->clock / 10);
++ break;
++ case ATOM_ENCODER_MODE_HDMI:
++ amdgpu_crtc->ss_enabled =
++ amdgpu_atombios_get_asic_ss_info(adev,
++ &amdgpu_crtc->ss,
++ ASIC_INTERNAL_SS_ON_HDMI,
++ mode->clock / 10);
++ break;
++ default:
++ break;
++ }
++ }
++
++ /* adjust pixel clock as needed */
++ amdgpu_crtc->adjusted_clock = amdgpu_atombios_crtc_adjust_pll(crtc, mode);
++
++ return 0;
++}
++
++void amdgpu_atombios_crtc_set_pll(struct drm_crtc *crtc, struct drm_display_mode *mode)
++{
++ struct amdgpu_crtc *amdgpu_crtc = to_amdgpu_crtc(crtc);
++ struct drm_device *dev = crtc->dev;
++ struct amdgpu_device *adev = dev->dev_private;
++ struct amdgpu_encoder *amdgpu_encoder =
++ to_amdgpu_encoder(amdgpu_crtc->encoder);
++ u32 pll_clock = mode->clock;
++ u32 clock = mode->clock;
++ u32 ref_div = 0, fb_div = 0, frac_fb_div = 0, post_div = 0;
++ struct amdgpu_pll *pll;
++ int encoder_mode = amdgpu_atombios_encoder_get_encoder_mode(amdgpu_crtc->encoder);
++
++ /* pass the actual clock to amdgpu_atombios_crtc_program_pll for HDMI */
++ if ((encoder_mode == ATOM_ENCODER_MODE_HDMI) &&
++ (amdgpu_crtc->bpc > 8))
++ clock = amdgpu_crtc->adjusted_clock;
++
++ switch (amdgpu_crtc->pll_id) {
++ case ATOM_PPLL1:
++ pll = &adev->clock.ppll[0];
++ break;
++ case ATOM_PPLL2:
++ pll = &adev->clock.ppll[1];
++ break;
++ case ATOM_PPLL0:
++ case ATOM_PPLL_INVALID:
++ default:
++ pll = &adev->clock.ppll[2];
++ break;
++ }
++
++ /* update pll params */
++ pll->flags = amdgpu_crtc->pll_flags;
++ pll->reference_div = amdgpu_crtc->pll_reference_div;
++ pll->post_div = amdgpu_crtc->pll_post_div;
++
++ amdgpu_pll_compute(pll, amdgpu_crtc->adjusted_clock, &pll_clock,
++ &fb_div, &frac_fb_div, &ref_div, &post_div);
++
++ amdgpu_atombios_crtc_program_ss(adev, ATOM_DISABLE, amdgpu_crtc->pll_id,
++ amdgpu_crtc->crtc_id, &amdgpu_crtc->ss);
++
++ amdgpu_atombios_crtc_program_pll(crtc, amdgpu_crtc->crtc_id, amdgpu_crtc->pll_id,
++ encoder_mode, amdgpu_encoder->encoder_id, clock,
++ ref_div, fb_div, frac_fb_div, post_div,
++ amdgpu_crtc->bpc, amdgpu_crtc->ss_enabled, &amdgpu_crtc->ss);
++
++ if (amdgpu_crtc->ss_enabled) {
++ /* calculate ss amount and step size */
++ u32 step_size;
++ u32 amount = (((fb_div * 10) + frac_fb_div) *
++ (u32)amdgpu_crtc->ss.percentage) /
++ (100 * (u32)amdgpu_crtc->ss.percentage_divider);
++ amdgpu_crtc->ss.amount = (amount / 10) & ATOM_PPLL_SS_AMOUNT_V2_FBDIV_MASK;
++ amdgpu_crtc->ss.amount |= ((amount - (amount / 10)) << ATOM_PPLL_SS_AMOUNT_V2_NFRAC_SHIFT) &
++ ATOM_PPLL_SS_AMOUNT_V2_NFRAC_MASK;
++ if (amdgpu_crtc->ss.type & ATOM_PPLL_SS_TYPE_V2_CENTRE_SPREAD)
++ step_size = (4 * amount * ref_div * ((u32)amdgpu_crtc->ss.rate * 2048)) /
++ (125 * 25 * pll->reference_freq / 100);
++ else
++ step_size = (2 * amount * ref_div * ((u32)amdgpu_crtc->ss.rate * 2048)) /
++ (125 * 25 * pll->reference_freq / 100);
++ amdgpu_crtc->ss.step = step_size;
++
++ amdgpu_atombios_crtc_program_ss(adev, ATOM_ENABLE, amdgpu_crtc->pll_id,
++ amdgpu_crtc->crtc_id, &amdgpu_crtc->ss);
++ }
++}
++
+diff --git a/drivers/gpu/drm/amd/amdgpu/atombios_crtc.h b/drivers/gpu/drm/amd/amdgpu/atombios_crtc.h
+new file mode 100644
+index 0000000..c670833
+--- /dev/null
++++ b/drivers/gpu/drm/amd/amdgpu/atombios_crtc.h
+@@ -0,0 +1,58 @@
++/*
++ * Copyright 2014 Advanced Micro Devices, Inc.
++ *
++ * Permission is hereby granted, free of charge, to any person obtaining a
++ * copy of this software and associated documentation files (the "Software"),
++ * to deal in the Software without restriction, including without limitation
++ * the rights to use, copy, modify, merge, publish, distribute, sublicense,
++ * and/or sell copies of the Software, and to permit persons to whom the
++ * Software is furnished to do so, subject to the following conditions:
++ *
++ * The above copyright notice and this permission notice shall be included in
++ * all copies or substantial portions of the Software.
++ *
++ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
++ * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
++ * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
++ * THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR
++ * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE,
++ * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
++ * OTHER DEALINGS IN THE SOFTWARE.
++ *
++ */
++
++#ifndef __ATOMBIOS_CRTC_H__
++#define __ATOMBIOS_CRTC_H__
++
++void amdgpu_atombios_crtc_overscan_setup(struct drm_crtc *crtc,
++ struct drm_display_mode *mode,
++ struct drm_display_mode *adjusted_mode);
++void amdgpu_atombios_crtc_scaler_setup(struct drm_crtc *crtc);
++void amdgpu_atombios_crtc_lock(struct drm_crtc *crtc, int lock);
++void amdgpu_atombios_crtc_enable(struct drm_crtc *crtc, int state);
++void amdgpu_atombios_crtc_blank(struct drm_crtc *crtc, int state);
++void amdgpu_atombios_crtc_powergate(struct drm_crtc *crtc, int state);
++void amdgpu_atombios_crtc_powergate_init(struct amdgpu_device *adev);
++void amdgpu_atombios_crtc_set_dtd_timing(struct drm_crtc *crtc,
++ struct drm_display_mode *mode);
++void amdgpu_atombios_crtc_set_disp_eng_pll(struct amdgpu_device *adev,
++ u32 dispclk);
++void amdgpu_atombios_crtc_program_pll(struct drm_crtc *crtc,
++ u32 crtc_id,
++ int pll_id,
++ u32 encoder_mode,
++ u32 encoder_id,
++ u32 clock,
++ u32 ref_div,
++ u32 fb_div,
++ u32 frac_fb_div,
++ u32 post_div,
++ int bpc,
++ bool ss_enabled,
++ struct amdgpu_atom_ss *ss);
++int amdgpu_atombios_crtc_prepare_pll(struct drm_crtc *crtc,
++ struct drm_display_mode *mode);
++void amdgpu_atombios_crtc_set_pll(struct drm_crtc *crtc,
++ struct drm_display_mode *mode);
++
++#endif
+diff --git a/drivers/gpu/drm/amd/amdgpu/atombios_dp.c b/drivers/gpu/drm/amd/amdgpu/atombios_dp.c
+new file mode 100644
+index 0000000..e00b8ad
+--- /dev/null
++++ b/drivers/gpu/drm/amd/amdgpu/atombios_dp.c
+@@ -0,0 +1,774 @@
++/*
++ * Copyright 2007-8 Advanced Micro Devices, Inc.
++ * Copyright 2008 Red Hat Inc.
++ *
++ * Permission is hereby granted, free of charge, to any person obtaining a
++ * copy of this software and associated documentation files (the "Software"),
++ * to deal in the Software without restriction, including without limitation
++ * the rights to use, copy, modify, merge, publish, distribute, sublicense,
++ * and/or sell copies of the Software, and to permit persons to whom the
++ * Software is furnished to do so, subject to the following conditions:
++ *
++ * The above copyright notice and this permission notice shall be included in
++ * all copies or substantial portions of the Software.
++ *
++ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
++ * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
++ * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
++ * THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR
++ * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE,
++ * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
++ * OTHER DEALINGS IN THE SOFTWARE.
++ *
++ * Authors: Dave Airlie
++ * Alex Deucher
++ * Jerome Glisse
++ */
++#include <drm/drmP.h>
++#include <drm/amdgpu_drm.h>
++#include "amdgpu.h"
++
++#include "atom.h"
++#include "atom-bits.h"
++#include "atombios_encoders.h"
++#include "atombios_dp.h"
++#include "amdgpu_connectors.h"
++#include "amdgpu_atombios.h"
++#include <drm/drm_dp_helper.h>
++
++/* move these to drm_dp_helper.c/h */
++#define DP_LINK_CONFIGURATION_SIZE 9
++#define DP_DPCD_SIZE DP_RECEIVER_CAP_SIZE
++
++static char *voltage_names[] = {
++ "0.4V", "0.6V", "0.8V", "1.2V"
++};
++static char *pre_emph_names[] = {
++ "0dB", "3.5dB", "6dB", "9.5dB"
++};
++
++/***** amdgpu AUX functions *****/
++
++union aux_channel_transaction {
++ PROCESS_AUX_CHANNEL_TRANSACTION_PS_ALLOCATION v1;
++ PROCESS_AUX_CHANNEL_TRANSACTION_PARAMETERS_V2 v2;
++};
++
++static int amdgpu_atombios_dp_process_aux_ch(struct amdgpu_i2c_chan *chan,
++ u8 *send, int send_bytes,
++ u8 *recv, int recv_size,
++ u8 delay, u8 *ack)
++{
++ struct drm_device *dev = chan->dev;
++ struct amdgpu_device *adev = dev->dev_private;
++ union aux_channel_transaction args;
++ int index = GetIndexIntoMasterTable(COMMAND, ProcessAuxChannelTransaction);
++ unsigned char *base;
++ int recv_bytes;
++ int r = 0;
++
++ memset(&args, 0, sizeof(args));
++
++ mutex_lock(&chan->mutex);
++
++ base = (unsigned char *)(adev->mode_info.atom_context->scratch + 1);
++
++ amdgpu_atombios_copy_swap(base, send, send_bytes, true);
++
++ args.v2.lpAuxRequest = cpu_to_le16((u16)(0 + 4));
++ args.v2.lpDataOut = cpu_to_le16((u16)(16 + 4));
++ args.v2.ucDataOutLen = 0;
++ args.v2.ucChannelID = chan->rec.i2c_id;
++ args.v2.ucDelay = delay / 10;
++ args.v2.ucHPD_ID = chan->rec.hpd;
++
++ amdgpu_atom_execute_table(adev->mode_info.atom_context, index, (uint32_t *)&args);
++
++ *ack = args.v2.ucReplyStatus;
++
++ /* timeout */
++ if (args.v2.ucReplyStatus == 1) {
++ DRM_DEBUG_KMS("dp_aux_ch timeout\n");
++ r = -ETIMEDOUT;
++ goto done;
++ }
++
++ /* flags not zero */
++ if (args.v2.ucReplyStatus == 2) {
++ DRM_DEBUG_KMS("dp_aux_ch flags not zero\n");
++ r = -EIO;
++ goto done;
++ }
++
++ /* error */
++ if (args.v2.ucReplyStatus == 3) {
++ DRM_DEBUG_KMS("dp_aux_ch error\n");
++ r = -EIO;
++ goto done;
++ }
++
++ recv_bytes = args.v1.ucDataOutLen;
++ if (recv_bytes > recv_size)
++ recv_bytes = recv_size;
++
++ if (recv && recv_size)
++ amdgpu_atombios_copy_swap(recv, base + 16, recv_bytes, false);
++
++ r = recv_bytes;
++done:
++ mutex_unlock(&chan->mutex);
++
++ return r;
++}
++
++#define BARE_ADDRESS_SIZE 3
++#define HEADER_SIZE (BARE_ADDRESS_SIZE + 1)
++
++static ssize_t
++amdgpu_atombios_dp_aux_transfer(struct drm_dp_aux *aux, struct drm_dp_aux_msg *msg)
++{
++ struct amdgpu_i2c_chan *chan =
++ container_of(aux, struct amdgpu_i2c_chan, aux);
++ int ret;
++ u8 tx_buf[20];
++ size_t tx_size;
++ u8 ack, delay = 0;
++
++ if (WARN_ON(msg->size > 16))
++ return -E2BIG;
++
++ tx_buf[0] = msg->address & 0xff;
++ tx_buf[1] = msg->address >> 8;
++ tx_buf[2] = msg->request << 4;
++ tx_buf[3] = msg->size ? (msg->size - 1) : 0;
++
++ switch (msg->request & ~DP_AUX_I2C_MOT) {
++ case DP_AUX_NATIVE_WRITE:
++ case DP_AUX_I2C_WRITE:
++ /* tx_size needs to be 4 even for bare address packets since the atom
++ * table needs the info in tx_buf[3].
++ */
++ tx_size = HEADER_SIZE + msg->size;
++ if (msg->size == 0)
++ tx_buf[3] |= BARE_ADDRESS_SIZE << 4;
++ else
++ tx_buf[3] |= tx_size << 4;
++ memcpy(tx_buf + HEADER_SIZE, msg->buffer, msg->size);
++ ret = amdgpu_atombios_dp_process_aux_ch(chan,
++ tx_buf, tx_size, NULL, 0, delay, &ack);
++ if (ret >= 0)
++ /* Return payload size. */
++ ret = msg->size;
++ break;
++ case DP_AUX_NATIVE_READ:
++ case DP_AUX_I2C_READ:
++ /* tx_size needs to be 4 even for bare address packets since the atom
++ * table needs the info in tx_buf[3].
++ */
++ tx_size = HEADER_SIZE;
++ if (msg->size == 0)
++ tx_buf[3] |= BARE_ADDRESS_SIZE << 4;
++ else
++ tx_buf[3] |= tx_size << 4;
++ ret = amdgpu_atombios_dp_process_aux_ch(chan,
++ tx_buf, tx_size, msg->buffer, msg->size, delay, &ack);
++ break;
++ default:
++ ret = -EINVAL;
++ break;
++ }
++
++ if (ret >= 0)
++ msg->reply = ack >> 4;
++
++ return ret;
++}
++
++void amdgpu_atombios_dp_aux_init(struct amdgpu_connector *amdgpu_connector)
++{
++ int ret;
++
++ amdgpu_connector->ddc_bus->rec.hpd = amdgpu_connector->hpd.hpd;
++ amdgpu_connector->ddc_bus->aux.dev = amdgpu_connector->base.kdev;
++ amdgpu_connector->ddc_bus->aux.transfer = amdgpu_atombios_dp_aux_transfer;
++ ret = drm_dp_aux_register(&amdgpu_connector->ddc_bus->aux);
++ if (!ret)
++ amdgpu_connector->ddc_bus->has_aux = true;
++
++ WARN(ret, "drm_dp_aux_register_i2c_bus() failed with error %d\n", ret);
++}
++
++/***** general DP utility functions *****/
++
++#define DP_VOLTAGE_MAX DP_TRAIN_VOLTAGE_SWING_LEVEL_3
++#define DP_PRE_EMPHASIS_MAX DP_TRAIN_PRE_EMPH_LEVEL_3
++
++static void amdgpu_atombios_dp_get_adjust_train(u8 link_status[DP_LINK_STATUS_SIZE],
++ int lane_count,
++ u8 train_set[4])
++{
++ u8 v = 0;
++ u8 p = 0;
++ int lane;
++
++ for (lane = 0; lane < lane_count; lane++) {
++ u8 this_v = drm_dp_get_adjust_request_voltage(link_status, lane);
++ u8 this_p = drm_dp_get_adjust_request_pre_emphasis(link_status, lane);
++
++ DRM_DEBUG_KMS("requested signal parameters: lane %d voltage %s pre_emph %s\n",
++ lane,
++ voltage_names[this_v >> DP_TRAIN_VOLTAGE_SWING_SHIFT],
++ pre_emph_names[this_p >> DP_TRAIN_PRE_EMPHASIS_SHIFT]);
++
++ if (this_v > v)
++ v = this_v;
++ if (this_p > p)
++ p = this_p;
++ }
++
++ if (v >= DP_VOLTAGE_MAX)
++ v |= DP_TRAIN_MAX_SWING_REACHED;
++
++ if (p >= DP_PRE_EMPHASIS_MAX)
++ p |= DP_TRAIN_MAX_PRE_EMPHASIS_REACHED;
++
++ DRM_DEBUG_KMS("using signal parameters: voltage %s pre_emph %s\n",
++ voltage_names[(v & DP_TRAIN_VOLTAGE_SWING_MASK) >> DP_TRAIN_VOLTAGE_SWING_SHIFT],
++ pre_emph_names[(p & DP_TRAIN_PRE_EMPHASIS_MASK) >> DP_TRAIN_PRE_EMPHASIS_SHIFT]);
++
++ for (lane = 0; lane < 4; lane++)
++ train_set[lane] = v | p;
++}
++
++/* convert bits per color to bits per pixel */
++/* get bpc from the EDID */
++static int amdgpu_atombios_dp_convert_bpc_to_bpp(int bpc)
++{
++ if (bpc == 0)
++ return 24;
++ else
++ return bpc * 3;
++}
++
++/* get the max pix clock supported by the link rate and lane num */
++static int amdgpu_atombios_dp_get_max_dp_pix_clock(int link_rate,
++ int lane_num,
++ int bpp)
++{
++ return (link_rate * lane_num * 8) / bpp;
++}
++
++/***** amdgpu specific DP functions *****/
++
++/* First get the min lane# when low rate is used according to pixel clock
++ * (prefer low rate), second check max lane# supported by DP panel,
++ * if the max lane# < low rate lane# then use max lane# instead.
++ */
++static int amdgpu_atombios_dp_get_dp_lane_number(struct drm_connector *connector,
++ u8 dpcd[DP_DPCD_SIZE],
++ int pix_clock)
++{
++ int bpp = amdgpu_atombios_dp_convert_bpc_to_bpp(amdgpu_connector_get_monitor_bpc(connector));
++ int max_link_rate = drm_dp_max_link_rate(dpcd);
++ int max_lane_num = drm_dp_max_lane_count(dpcd);
++ int lane_num;
++ int max_dp_pix_clock;
++
++ for (lane_num = 1; lane_num < max_lane_num; lane_num <<= 1) {
++ max_dp_pix_clock = amdgpu_atombios_dp_get_max_dp_pix_clock(max_link_rate, lane_num, bpp);
++ if (pix_clock <= max_dp_pix_clock)
++ break;
++ }
++
++ return lane_num;
++}
++
++static int amdgpu_atombios_dp_get_dp_link_clock(struct drm_connector *connector,
++ u8 dpcd[DP_DPCD_SIZE],
++ int pix_clock)
++{
++ int bpp = amdgpu_atombios_dp_convert_bpc_to_bpp(amdgpu_connector_get_monitor_bpc(connector));
++ int lane_num, max_pix_clock;
++
++ if (amdgpu_connector_encoder_get_dp_bridge_encoder_id(connector) ==
++ ENCODER_OBJECT_ID_NUTMEG)
++ return 270000;
++
++ lane_num = amdgpu_atombios_dp_get_dp_lane_number(connector, dpcd, pix_clock);
++ max_pix_clock = amdgpu_atombios_dp_get_max_dp_pix_clock(162000, lane_num, bpp);
++ if (pix_clock <= max_pix_clock)
++ return 162000;
++ max_pix_clock = amdgpu_atombios_dp_get_max_dp_pix_clock(270000, lane_num, bpp);
++ if (pix_clock <= max_pix_clock)
++ return 270000;
++ if (amdgpu_connector_is_dp12_capable(connector)) {
++ max_pix_clock = amdgpu_atombios_dp_get_max_dp_pix_clock(540000, lane_num, bpp);
++ if (pix_clock <= max_pix_clock)
++ return 540000;
++ }
++
++ return drm_dp_max_link_rate(dpcd);
++}
++
++static u8 amdgpu_atombios_dp_encoder_service(struct amdgpu_device *adev,
++ int action, int dp_clock,
++ u8 ucconfig, u8 lane_num)
++{
++ DP_ENCODER_SERVICE_PARAMETERS args;
++ int index = GetIndexIntoMasterTable(COMMAND, DPEncoderService);
++
++ memset(&args, 0, sizeof(args));
++ args.ucLinkClock = dp_clock / 10;
++ args.ucConfig = ucconfig;
++ args.ucAction = action;
++ args.ucLaneNum = lane_num;
++ args.ucStatus = 0;
++
++ amdgpu_atom_execute_table(adev->mode_info.atom_context, index, (uint32_t *)&args);
++ return args.ucStatus;
++}
++
++u8 amdgpu_atombios_dp_get_sinktype(struct amdgpu_connector *amdgpu_connector)
++{
++ struct drm_device *dev = amdgpu_connector->base.dev;
++ struct amdgpu_device *adev = dev->dev_private;
++
++ return amdgpu_atombios_dp_encoder_service(adev, ATOM_DP_ACTION_GET_SINK_TYPE, 0,
++ amdgpu_connector->ddc_bus->rec.i2c_id, 0);
++}
++
++static void amdgpu_atombios_dp_probe_oui(struct amdgpu_connector *amdgpu_connector)
++{
++ struct amdgpu_connector_atom_dig *dig_connector = amdgpu_connector->con_priv;
++ u8 buf[3];
++
++ if (!(dig_connector->dpcd[DP_DOWN_STREAM_PORT_COUNT] & DP_OUI_SUPPORT))
++ return;
++
++ if (drm_dp_dpcd_read(&amdgpu_connector->ddc_bus->aux, DP_SINK_OUI, buf, 3) == 3)
++ DRM_DEBUG_KMS("Sink OUI: %02hx%02hx%02hx\n",
++ buf[0], buf[1], buf[2]);
++
++ if (drm_dp_dpcd_read(&amdgpu_connector->ddc_bus->aux, DP_BRANCH_OUI, buf, 3) == 3)
++ DRM_DEBUG_KMS("Branch OUI: %02hx%02hx%02hx\n",
++ buf[0], buf[1], buf[2]);
++}
++
++int amdgpu_atombios_dp_get_dpcd(struct amdgpu_connector *amdgpu_connector)
++{
++ struct amdgpu_connector_atom_dig *dig_connector = amdgpu_connector->con_priv;
++ u8 msg[DP_DPCD_SIZE];
++ int ret, i;
++
++ ret = drm_dp_dpcd_read(&amdgpu_connector->ddc_bus->aux, DP_DPCD_REV, msg,
++ DP_DPCD_SIZE);
++ if (ret > 0) {
++ memcpy(dig_connector->dpcd, msg, DP_DPCD_SIZE);
++ DRM_DEBUG_KMS("DPCD: ");
++ for (i = 0; i < DP_DPCD_SIZE; i++)
++ DRM_DEBUG_KMS("%02x ", msg[i]);
++ DRM_DEBUG_KMS("\n");
++
++ amdgpu_atombios_dp_probe_oui(amdgpu_connector);
++
++ return 0;
++ }
++ dig_connector->dpcd[0] = 0;
++ return -EINVAL;
++}
++
++int amdgpu_atombios_dp_get_panel_mode(struct drm_encoder *encoder,
++ struct drm_connector *connector)
++{
++ struct amdgpu_connector *amdgpu_connector = to_amdgpu_connector(connector);
++ struct amdgpu_connector_atom_dig *dig_connector;
++ int panel_mode = DP_PANEL_MODE_EXTERNAL_DP_MODE;
++ u16 dp_bridge = amdgpu_connector_encoder_get_dp_bridge_encoder_id(connector);
++ u8 tmp;
++
++ if (!amdgpu_connector->con_priv)
++ return panel_mode;
++
++ dig_connector = amdgpu_connector->con_priv;
++
++ if (dp_bridge != ENCODER_OBJECT_ID_NONE) {
++ /* DP bridge chips */
++ if (drm_dp_dpcd_readb(&amdgpu_connector->ddc_bus->aux,
++ DP_EDP_CONFIGURATION_CAP, &tmp) == 1) {
++ if (tmp & 1)
++ panel_mode = DP_PANEL_MODE_INTERNAL_DP2_MODE;
++ else if ((dp_bridge == ENCODER_OBJECT_ID_NUTMEG) ||
++ (dp_bridge == ENCODER_OBJECT_ID_TRAVIS))
++ panel_mode = DP_PANEL_MODE_INTERNAL_DP1_MODE;
++ else
++ panel_mode = DP_PANEL_MODE_EXTERNAL_DP_MODE;
++ }
++ } else if (connector->connector_type == DRM_MODE_CONNECTOR_eDP) {
++ /* eDP */
++ if (drm_dp_dpcd_readb(&amdgpu_connector->ddc_bus->aux,
++ DP_EDP_CONFIGURATION_CAP, &tmp) == 1) {
++ if (tmp & 1)
++ panel_mode = DP_PANEL_MODE_INTERNAL_DP2_MODE;
++ }
++ }
++
++ return panel_mode;
++}
++
++void amdgpu_atombios_dp_set_link_config(struct drm_connector *connector,
++ const struct drm_display_mode *mode)
++{
++ struct amdgpu_connector *amdgpu_connector = to_amdgpu_connector(connector);
++ struct amdgpu_connector_atom_dig *dig_connector;
++
++ if (!amdgpu_connector->con_priv)
++ return;
++ dig_connector = amdgpu_connector->con_priv;
++
++ if ((dig_connector->dp_sink_type == CONNECTOR_OBJECT_ID_DISPLAYPORT) ||
++ (dig_connector->dp_sink_type == CONNECTOR_OBJECT_ID_eDP)) {
++ dig_connector->dp_clock =
++ amdgpu_atombios_dp_get_dp_link_clock(connector, dig_connector->dpcd, mode->clock);
++ dig_connector->dp_lane_count =
++ amdgpu_atombios_dp_get_dp_lane_number(connector, dig_connector->dpcd, mode->clock);
++ }
++}
++
++int amdgpu_atombios_dp_mode_valid_helper(struct drm_connector *connector,
++ struct drm_display_mode *mode)
++{
++ struct amdgpu_connector *amdgpu_connector = to_amdgpu_connector(connector);
++ struct amdgpu_connector_atom_dig *dig_connector;
++ int dp_clock;
++
++ if (!amdgpu_connector->con_priv)
++ return MODE_CLOCK_HIGH;
++ dig_connector = amdgpu_connector->con_priv;
++
++ dp_clock =
++ amdgpu_atombios_dp_get_dp_link_clock(connector, dig_connector->dpcd, mode->clock);
++
++ if ((dp_clock == 540000) &&
++ (!amdgpu_connector_is_dp12_capable(connector)))
++ return MODE_CLOCK_HIGH;
++
++ return MODE_OK;
++}
++
++bool amdgpu_atombios_dp_needs_link_train(struct amdgpu_connector *amdgpu_connector)
++{
++ u8 link_status[DP_LINK_STATUS_SIZE];
++ struct amdgpu_connector_atom_dig *dig = amdgpu_connector->con_priv;
++
++ if (drm_dp_dpcd_read_link_status(&amdgpu_connector->ddc_bus->aux, link_status)
++ <= 0)
++ return false;
++ if (drm_dp_channel_eq_ok(link_status, dig->dp_lane_count))
++ return false;
++ return true;
++}
++
++void amdgpu_atombios_dp_set_rx_power_state(struct drm_connector *connector,
++ u8 power_state)
++{
++ struct amdgpu_connector *amdgpu_connector = to_amdgpu_connector(connector);
++ struct amdgpu_connector_atom_dig *dig_connector;
++
++ if (!amdgpu_connector->con_priv)
++ return;
++
++ dig_connector = amdgpu_connector->con_priv;
++
++ /* power up/down the sink */
++ if (dig_connector->dpcd[0] >= 0x11) {
++ drm_dp_dpcd_writeb(&amdgpu_connector->ddc_bus->aux,
++ DP_SET_POWER, power_state);
++ usleep_range(1000, 2000);
++ }
++}
++
++struct amdgpu_atombios_dp_link_train_info {
++ struct amdgpu_device *adev;
++ struct drm_encoder *encoder;
++ struct drm_connector *connector;
++ int dp_clock;
++ int dp_lane_count;
++ bool tp3_supported;
++ u8 dpcd[DP_RECEIVER_CAP_SIZE];
++ u8 train_set[4];
++ u8 link_status[DP_LINK_STATUS_SIZE];
++ u8 tries;
++ struct drm_dp_aux *aux;
++};
++
++static void
++amdgpu_atombios_dp_update_vs_emph(struct amdgpu_atombios_dp_link_train_info *dp_info)
++{
++ /* set the initial vs/emph on the source */
++ amdgpu_atombios_encoder_setup_dig_transmitter(dp_info->encoder,
++ ATOM_TRANSMITTER_ACTION_SETUP_VSEMPH,
++ 0, dp_info->train_set[0]); /* sets all lanes at once */
++
++ /* set the vs/emph on the sink */
++ drm_dp_dpcd_write(dp_info->aux, DP_TRAINING_LANE0_SET,
++ dp_info->train_set, dp_info->dp_lane_count);
++}
++
++static void
++amdgpu_atombios_dp_set_tp(struct amdgpu_atombios_dp_link_train_info *dp_info, int tp)
++{
++ int rtp = 0;
++
++ /* set training pattern on the source */
++ switch (tp) {
++ case DP_TRAINING_PATTERN_1:
++ rtp = ATOM_ENCODER_CMD_DP_LINK_TRAINING_PATTERN1;
++ break;
++ case DP_TRAINING_PATTERN_2:
++ rtp = ATOM_ENCODER_CMD_DP_LINK_TRAINING_PATTERN2;
++ break;
++ case DP_TRAINING_PATTERN_3:
++ rtp = ATOM_ENCODER_CMD_DP_LINK_TRAINING_PATTERN3;
++ break;
++ }
++ amdgpu_atombios_encoder_setup_dig_encoder(dp_info->encoder, rtp, 0);
++
++ /* enable training pattern on the sink */
++ drm_dp_dpcd_writeb(dp_info->aux, DP_TRAINING_PATTERN_SET, tp);
++}
++
++static int
++amdgpu_atombios_dp_link_train_init(struct amdgpu_atombios_dp_link_train_info *dp_info)
++{
++ struct amdgpu_encoder *amdgpu_encoder = to_amdgpu_encoder(dp_info->encoder);
++ struct amdgpu_encoder_atom_dig *dig = amdgpu_encoder->enc_priv;
++ u8 tmp;
++
++ /* power up the sink */
++ amdgpu_atombios_dp_set_rx_power_state(dp_info->connector, DP_SET_POWER_D0);
++
++ /* possibly enable downspread on the sink */
++ if (dp_info->dpcd[3] & 0x1)
++ drm_dp_dpcd_writeb(dp_info->aux,
++ DP_DOWNSPREAD_CTRL, DP_SPREAD_AMP_0_5);
++ else
++ drm_dp_dpcd_writeb(dp_info->aux,
++ DP_DOWNSPREAD_CTRL, 0);
++
++ if (dig->panel_mode == DP_PANEL_MODE_INTERNAL_DP2_MODE)
++ drm_dp_dpcd_writeb(dp_info->aux, DP_EDP_CONFIGURATION_SET, 1);
++
++ /* set the lane count on the sink */
++ tmp = dp_info->dp_lane_count;
++ if (drm_dp_enhanced_frame_cap(dp_info->dpcd))
++ tmp |= DP_LANE_COUNT_ENHANCED_FRAME_EN;
++ drm_dp_dpcd_writeb(dp_info->aux, DP_LANE_COUNT_SET, tmp);
++
++ /* set the link rate on the sink */
++ tmp = drm_dp_link_rate_to_bw_code(dp_info->dp_clock);
++ drm_dp_dpcd_writeb(dp_info->aux, DP_LINK_BW_SET, tmp);
++
++ /* start training on the source */
++ amdgpu_atombios_encoder_setup_dig_encoder(dp_info->encoder,
++ ATOM_ENCODER_CMD_DP_LINK_TRAINING_START, 0);
++
++ /* disable the training pattern on the sink */
++ drm_dp_dpcd_writeb(dp_info->aux,
++ DP_TRAINING_PATTERN_SET,
++ DP_TRAINING_PATTERN_DISABLE);
++
++ return 0;
++}
++
++static int
++amdgpu_atombios_dp_link_train_finish(struct amdgpu_atombios_dp_link_train_info *dp_info)
++{
++ udelay(400);
++
++ /* disable the training pattern on the sink */
++ drm_dp_dpcd_writeb(dp_info->aux,
++ DP_TRAINING_PATTERN_SET,
++ DP_TRAINING_PATTERN_DISABLE);
++
++ /* disable the training pattern on the source */
++ amdgpu_atombios_encoder_setup_dig_encoder(dp_info->encoder,
++ ATOM_ENCODER_CMD_DP_LINK_TRAINING_COMPLETE, 0);
++
++ return 0;
++}
++
++static int
++amdgpu_atombios_dp_link_train_cr(struct amdgpu_atombios_dp_link_train_info *dp_info)
++{
++ bool clock_recovery;
++ u8 voltage;
++ int i;
++
++ amdgpu_atombios_dp_set_tp(dp_info, DP_TRAINING_PATTERN_1);
++ memset(dp_info->train_set, 0, 4);
++ amdgpu_atombios_dp_update_vs_emph(dp_info);
++
++ udelay(400);
++
++ /* clock recovery loop */
++ clock_recovery = false;
++ dp_info->tries = 0;
++ voltage = 0xff;
++ while (1) {
++ drm_dp_link_train_clock_recovery_delay(dp_info->dpcd);
++
++ if (drm_dp_dpcd_read_link_status(dp_info->aux,
++ dp_info->link_status) <= 0) {
++ DRM_ERROR("displayport link status failed\n");
++ break;
++ }
++
++ if (drm_dp_clock_recovery_ok(dp_info->link_status, dp_info->dp_lane_count)) {
++ clock_recovery = true;
++ break;
++ }
++
++ for (i = 0; i < dp_info->dp_lane_count; i++) {
++ if ((dp_info->train_set[i] & DP_TRAIN_MAX_SWING_REACHED) == 0)
++ break;
++ }
++ if (i == dp_info->dp_lane_count) {
++ DRM_ERROR("clock recovery reached max voltage\n");
++ break;
++ }
++
++ if ((dp_info->train_set[0] & DP_TRAIN_VOLTAGE_SWING_MASK) == voltage) {
++ ++dp_info->tries;
++ if (dp_info->tries == 5) {
++ DRM_ERROR("clock recovery tried 5 times\n");
++ break;
++ }
++ } else
++ dp_info->tries = 0;
++
++ voltage = dp_info->train_set[0] & DP_TRAIN_VOLTAGE_SWING_MASK;
++
++ /* Compute new train_set as requested by sink */
++ amdgpu_atombios_dp_get_adjust_train(dp_info->link_status, dp_info->dp_lane_count,
++ dp_info->train_set);
++
++ amdgpu_atombios_dp_update_vs_emph(dp_info);
++ }
++ if (!clock_recovery) {
++ DRM_ERROR("clock recovery failed\n");
++ return -1;
++ } else {
++ DRM_DEBUG_KMS("clock recovery at voltage %d pre-emphasis %d\n",
++ dp_info->train_set[0] & DP_TRAIN_VOLTAGE_SWING_MASK,
++ (dp_info->train_set[0] & DP_TRAIN_PRE_EMPHASIS_MASK) >>
++ DP_TRAIN_PRE_EMPHASIS_SHIFT);
++ return 0;
++ }
++}
++
++static int
++amdgpu_atombios_dp_link_train_ce(struct amdgpu_atombios_dp_link_train_info *dp_info)
++{
++ bool channel_eq;
++
++ if (dp_info->tp3_supported)
++ amdgpu_atombios_dp_set_tp(dp_info, DP_TRAINING_PATTERN_3);
++ else
++ amdgpu_atombios_dp_set_tp(dp_info, DP_TRAINING_PATTERN_2);
++
++ /* channel equalization loop */
++ dp_info->tries = 0;
++ channel_eq = false;
++ while (1) {
++ drm_dp_link_train_channel_eq_delay(dp_info->dpcd);
++
++ if (drm_dp_dpcd_read_link_status(dp_info->aux,
++ dp_info->link_status) <= 0) {
++ DRM_ERROR("displayport link status failed\n");
++ break;
++ }
++
++ if (drm_dp_channel_eq_ok(dp_info->link_status, dp_info->dp_lane_count)) {
++ channel_eq = true;
++ break;
++ }
++
++ /* Try 5 times */
++ if (dp_info->tries > 5) {
++ DRM_ERROR("channel eq failed: 5 tries\n");
++ break;
++ }
++
++ /* Compute new train_set as requested by sink */
++ amdgpu_atombios_dp_get_adjust_train(dp_info->link_status, dp_info->dp_lane_count,
++ dp_info->train_set);
++
++ amdgpu_atombios_dp_update_vs_emph(dp_info);
++ dp_info->tries++;
++ }
++
++ if (!channel_eq) {
++ DRM_ERROR("channel eq failed\n");
++ return -1;
++ } else {
++ DRM_DEBUG_KMS("channel eq at voltage %d pre-emphasis %d\n",
++ dp_info->train_set[0] & DP_TRAIN_VOLTAGE_SWING_MASK,
++ (dp_info->train_set[0] & DP_TRAIN_PRE_EMPHASIS_MASK)
++ >> DP_TRAIN_PRE_EMPHASIS_SHIFT);
++ return 0;
++ }
++}
++
++void amdgpu_atombios_dp_link_train(struct drm_encoder *encoder,
++ struct drm_connector *connector)
++{
++ struct drm_device *dev = encoder->dev;
++ struct amdgpu_device *adev = dev->dev_private;
++ struct amdgpu_encoder *amdgpu_encoder = to_amdgpu_encoder(encoder);
++ struct amdgpu_encoder_atom_dig *dig;
++ struct amdgpu_connector *amdgpu_connector;
++ struct amdgpu_connector_atom_dig *dig_connector;
++ struct amdgpu_atombios_dp_link_train_info dp_info;
++ u8 tmp;
++
++ if (!amdgpu_encoder->enc_priv)
++ return;
++ dig = amdgpu_encoder->enc_priv;
++
++ amdgpu_connector = to_amdgpu_connector(connector);
++ if (!amdgpu_connector->con_priv)
++ return;
++ dig_connector = amdgpu_connector->con_priv;
++
++ if ((dig_connector->dp_sink_type != CONNECTOR_OBJECT_ID_DISPLAYPORT) &&
++ (dig_connector->dp_sink_type != CONNECTOR_OBJECT_ID_eDP))
++ return;
++
++ if (drm_dp_dpcd_readb(&amdgpu_connector->ddc_bus->aux, DP_MAX_LANE_COUNT, &tmp)
++ == 1) {
++ if (tmp & DP_TPS3_SUPPORTED)
++ dp_info.tp3_supported = true;
++ else
++ dp_info.tp3_supported = false;
++ } else {
++ dp_info.tp3_supported = false;
++ }
++
++ memcpy(dp_info.dpcd, dig_connector->dpcd, DP_RECEIVER_CAP_SIZE);
++ dp_info.adev = adev;
++ dp_info.encoder = encoder;
++ dp_info.connector = connector;
++ dp_info.dp_lane_count = dig_connector->dp_lane_count;
++ dp_info.dp_clock = dig_connector->dp_clock;
++ dp_info.aux = &amdgpu_connector->ddc_bus->aux;
++
++ if (amdgpu_atombios_dp_link_train_init(&dp_info))
++ goto done;
++ if (amdgpu_atombios_dp_link_train_cr(&dp_info))
++ goto done;
++ if (amdgpu_atombios_dp_link_train_ce(&dp_info))
++ goto done;
++done:
++ if (amdgpu_atombios_dp_link_train_finish(&dp_info))
++ return;
++}
+diff --git a/drivers/gpu/drm/amd/amdgpu/atombios_dp.h b/drivers/gpu/drm/amd/amdgpu/atombios_dp.h
+new file mode 100644
+index 0000000..f59d85e
+--- /dev/null
++++ b/drivers/gpu/drm/amd/amdgpu/atombios_dp.h
+@@ -0,0 +1,42 @@
++/*
++ * Copyright 2014 Advanced Micro Devices, Inc.
++ *
++ * Permission is hereby granted, free of charge, to any person obtaining a
++ * copy of this software and associated documentation files (the "Software"),
++ * to deal in the Software without restriction, including without limitation
++ * the rights to use, copy, modify, merge, publish, distribute, sublicense,
++ * and/or sell copies of the Software, and to permit persons to whom the
++ * Software is furnished to do so, subject to the following conditions:
++ *
++ * The above copyright notice and this permission notice shall be included in
++ * all copies or substantial portions of the Software.
++ *
++ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
++ * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
++ * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
++ * THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR
++ * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE,
++ * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
++ * OTHER DEALINGS IN THE SOFTWARE.
++ *
++ */
++
++#ifndef __ATOMBIOS_DP_H__
++#define __ATOMBIOS_DP_H__
++
++void amdgpu_atombios_dp_aux_init(struct amdgpu_connector *amdgpu_connector);
++u8 amdgpu_atombios_dp_get_sinktype(struct amdgpu_connector *amdgpu_connector);
++int amdgpu_atombios_dp_get_dpcd(struct amdgpu_connector *amdgpu_connector);
++int amdgpu_atombios_dp_get_panel_mode(struct drm_encoder *encoder,
++ struct drm_connector *connector);
++void amdgpu_atombios_dp_set_link_config(struct drm_connector *connector,
++ const struct drm_display_mode *mode);
++int amdgpu_atombios_dp_mode_valid_helper(struct drm_connector *connector,
++ struct drm_display_mode *mode);
++bool amdgpu_atombios_dp_needs_link_train(struct amdgpu_connector *amdgpu_connector);
++void amdgpu_atombios_dp_set_rx_power_state(struct drm_connector *connector,
++ u8 power_state);
++void amdgpu_atombios_dp_link_train(struct drm_encoder *encoder,
++ struct drm_connector *connector);
++
++#endif
+diff --git a/drivers/gpu/drm/amd/amdgpu/atombios_encoders.c b/drivers/gpu/drm/amd/amdgpu/atombios_encoders.c
+new file mode 100644
+index 0000000..ae8caca
+--- /dev/null
++++ b/drivers/gpu/drm/amd/amdgpu/atombios_encoders.c
+@@ -0,0 +1,2066 @@
++/*
++ * Copyright 2007-11 Advanced Micro Devices, Inc.
++ * Copyright 2008 Red Hat Inc.
++ *
++ * Permission is hereby granted, free of charge, to any person obtaining a
++ * copy of this software and associated documentation files (the "Software"),
++ * to deal in the Software without restriction, including without limitation
++ * the rights to use, copy, modify, merge, publish, distribute, sublicense,
++ * and/or sell copies of the Software, and to permit persons to whom the
++ * Software is furnished to do so, subject to the following conditions:
++ *
++ * The above copyright notice and this permission notice shall be included in
++ * all copies or substantial portions of the Software.
++ *
++ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
++ * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
++ * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
++ * THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR
++ * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE,
++ * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
++ * OTHER DEALINGS IN THE SOFTWARE.
++ *
++ * Authors: Dave Airlie
++ * Alex Deucher
++ */
++#include <drm/drmP.h>
++#include <drm/drm_crtc_helper.h>
++#include <drm/amdgpu_drm.h>
++#include "amdgpu.h"
++#include "amdgpu_connectors.h"
++#include "atom.h"
++#include "atombios_encoders.h"
++#include "atombios_dp.h"
++#include <linux/backlight.h>
++#include "bif/bif_4_1_d.h"
++
++static u8
++amdgpu_atombios_encoder_get_backlight_level_from_reg(struct amdgpu_device *adev)
++{
++ u8 backlight_level;
++ u32 bios_2_scratch;
++
++ bios_2_scratch = RREG32(mmBIOS_SCRATCH_2);
++
++ backlight_level = ((bios_2_scratch & ATOM_S2_CURRENT_BL_LEVEL_MASK) >>
++ ATOM_S2_CURRENT_BL_LEVEL_SHIFT);
++
++ return backlight_level;
++}
++
++static void
++amdgpu_atombios_encoder_set_backlight_level_to_reg(struct amdgpu_device *adev,
++ u8 backlight_level)
++{
++ u32 bios_2_scratch;
++
++ bios_2_scratch = RREG32(mmBIOS_SCRATCH_2);
++
++ bios_2_scratch &= ~ATOM_S2_CURRENT_BL_LEVEL_MASK;
++ bios_2_scratch |= ((backlight_level << ATOM_S2_CURRENT_BL_LEVEL_SHIFT) &
++ ATOM_S2_CURRENT_BL_LEVEL_MASK);
++
++ WREG32(mmBIOS_SCRATCH_2, bios_2_scratch);
++}
++
++u8
++amdgpu_atombios_encoder_get_backlight_level(struct amdgpu_encoder *amdgpu_encoder)
++{
++ struct drm_device *dev = amdgpu_encoder->base.dev;
++ struct amdgpu_device *adev = dev->dev_private;
++
++ if (!(adev->mode_info.firmware_flags & ATOM_BIOS_INFO_BL_CONTROLLED_BY_GPU))
++ return 0;
++
++ return amdgpu_atombios_encoder_get_backlight_level_from_reg(adev);
++}
++
++void
++amdgpu_atombios_encoder_set_backlight_level(struct amdgpu_encoder *amdgpu_encoder,
++ u8 level)
++{
++ struct drm_encoder *encoder = &amdgpu_encoder->base;
++ struct drm_device *dev = amdgpu_encoder->base.dev;
++ struct amdgpu_device *adev = dev->dev_private;
++ struct amdgpu_encoder_atom_dig *dig;
++
++ if (!(adev->mode_info.firmware_flags & ATOM_BIOS_INFO_BL_CONTROLLED_BY_GPU))
++ return;
++
++ if ((amdgpu_encoder->devices & (ATOM_DEVICE_LCD_SUPPORT)) &&
++ amdgpu_encoder->enc_priv) {
++ dig = amdgpu_encoder->enc_priv;
++ dig->backlight_level = level;
++ amdgpu_atombios_encoder_set_backlight_level_to_reg(adev, dig->backlight_level);
++
++ switch (amdgpu_encoder->encoder_id) {
++ case ENCODER_OBJECT_ID_INTERNAL_UNIPHY:
++ case ENCODER_OBJECT_ID_INTERNAL_KLDSCP_LVTMA:
++ case ENCODER_OBJECT_ID_INTERNAL_UNIPHY1:
++ case ENCODER_OBJECT_ID_INTERNAL_UNIPHY2:
++ if (dig->backlight_level == 0)
++ amdgpu_atombios_encoder_setup_dig_transmitter(encoder,
++ ATOM_TRANSMITTER_ACTION_LCD_BLOFF, 0, 0);
++ else {
++ amdgpu_atombios_encoder_setup_dig_transmitter(encoder,
++ ATOM_TRANSMITTER_ACTION_BL_BRIGHTNESS_CONTROL, 0, 0);
++ amdgpu_atombios_encoder_setup_dig_transmitter(encoder,
++ ATOM_TRANSMITTER_ACTION_LCD_BLON, 0, 0);
++ }
++ break;
++ default:
++ break;
++ }
++ }
++}
++
++#if defined(CONFIG_BACKLIGHT_CLASS_DEVICE) || defined(CONFIG_BACKLIGHT_CLASS_DEVICE_MODULE)
++
++static u8 amdgpu_atombios_encoder_backlight_level(struct backlight_device *bd)
++{
++ u8 level;
++
++ /* Convert brightness to hardware level */
++ if (bd->props.brightness < 0)
++ level = 0;
++ else if (bd->props.brightness > AMDGPU_MAX_BL_LEVEL)
++ level = AMDGPU_MAX_BL_LEVEL;
++ else
++ level = bd->props.brightness;
++
++ return level;
++}
++
++static int amdgpu_atombios_encoder_update_backlight_status(struct backlight_device *bd)
++{
++ struct amdgpu_backlight_privdata *pdata = bl_get_data(bd);
++ struct amdgpu_encoder *amdgpu_encoder = pdata->encoder;
++
++ amdgpu_atombios_encoder_set_backlight_level(amdgpu_encoder,
++ amdgpu_atombios_encoder_backlight_level(bd));
++
++ return 0;
++}
++
++static int
++amdgpu_atombios_encoder_get_backlight_brightness(struct backlight_device *bd)
++{
++ struct amdgpu_backlight_privdata *pdata = bl_get_data(bd);
++ struct amdgpu_encoder *amdgpu_encoder = pdata->encoder;
++ struct drm_device *dev = amdgpu_encoder->base.dev;
++ struct amdgpu_device *adev = dev->dev_private;
++
++ return amdgpu_atombios_encoder_get_backlight_level_from_reg(adev);
++}
++
++static const struct backlight_ops amdgpu_atombios_encoder_backlight_ops = {
++ .get_brightness = amdgpu_atombios_encoder_get_backlight_brightness,
++ .update_status = amdgpu_atombios_encoder_update_backlight_status,
++};
++
++void amdgpu_atombios_encoder_init_backlight(struct amdgpu_encoder *amdgpu_encoder,
++ struct drm_connector *drm_connector)
++{
++ struct drm_device *dev = amdgpu_encoder->base.dev;
++ struct amdgpu_device *adev = dev->dev_private;
++ struct backlight_device *bd;
++ struct backlight_properties props;
++ struct amdgpu_backlight_privdata *pdata;
++ struct amdgpu_encoder_atom_dig *dig;
++ u8 backlight_level;
++ char bl_name[16];
++
++ /* Mac laptops with multiple GPUs use the gmux driver for backlight
++ * so don't register a backlight device
++ */
++ if ((adev->pdev->subsystem_vendor == PCI_VENDOR_ID_APPLE) &&
++ (adev->pdev->device == 0x6741))
++ return;
++
++ if (!amdgpu_encoder->enc_priv)
++ return;
++
++ if (!adev->is_atom_bios)
++ return;
++
++ if (!(adev->mode_info.firmware_flags & ATOM_BIOS_INFO_BL_CONTROLLED_BY_GPU))
++ return;
++
++ pdata = kmalloc(sizeof(struct amdgpu_backlight_privdata), GFP_KERNEL);
++ if (!pdata) {
++ DRM_ERROR("Memory allocation failed\n");
++ goto error;
++ }
++
++ memset(&props, 0, sizeof(props));
++ props.max_brightness = AMDGPU_MAX_BL_LEVEL;
++ props.type = BACKLIGHT_RAW;
++ snprintf(bl_name, sizeof(bl_name),
++ "amdgpu_bl%d", dev->primary->index);
++ bd = backlight_device_register(bl_name, drm_connector->kdev,
++ pdata, &amdgpu_atombios_encoder_backlight_ops, &props);
++ if (IS_ERR(bd)) {
++ DRM_ERROR("Backlight registration failed\n");
++ goto error;
++ }
++
++ pdata->encoder = amdgpu_encoder;
++
++ backlight_level = amdgpu_atombios_encoder_get_backlight_level_from_reg(adev);
++
++ dig = amdgpu_encoder->enc_priv;
++ dig->bl_dev = bd;
++
++ bd->props.brightness = amdgpu_atombios_encoder_get_backlight_brightness(bd);
++ bd->props.power = FB_BLANK_UNBLANK;
++ backlight_update_status(bd);
++
++ DRM_INFO("amdgpu atom DIG backlight initialized\n");
++
++ return;
++
++error:
++ kfree(pdata);
++ return;
++}
++
++void
++amdgpu_atombios_encoder_fini_backlight(struct amdgpu_encoder *amdgpu_encoder)
++{
++ struct drm_device *dev = amdgpu_encoder->base.dev;
++ struct amdgpu_device *adev = dev->dev_private;
++ struct backlight_device *bd = NULL;
++ struct amdgpu_encoder_atom_dig *dig;
++
++ if (!amdgpu_encoder->enc_priv)
++ return;
++
++ if (!adev->is_atom_bios)
++ return;
++
++ if (!(adev->mode_info.firmware_flags & ATOM_BIOS_INFO_BL_CONTROLLED_BY_GPU))
++ return;
++
++ dig = amdgpu_encoder->enc_priv;
++ bd = dig->bl_dev;
++ dig->bl_dev = NULL;
++
++ if (bd) {
++ struct amdgpu_legacy_backlight_privdata *pdata;
++
++ pdata = bl_get_data(bd);
++ backlight_device_unregister(bd);
++ kfree(pdata);
++
++ DRM_INFO("amdgpu atom LVDS backlight unloaded\n");
++ }
++}
++
++#else /* !CONFIG_BACKLIGHT_CLASS_DEVICE */
++
++void amdgpu_atombios_encoder_init_backlight(struct amdgpu_encoder *encoder)
++{
++}
++
++void amdgpu_atombios_encoder_fini_backlight(struct amdgpu_encoder *encoder)
++{
++}
++
++#endif
++
++bool amdgpu_atombios_encoder_is_digital(struct drm_encoder *encoder)
++{
++ struct amdgpu_encoder *amdgpu_encoder = to_amdgpu_encoder(encoder);
++ switch (amdgpu_encoder->encoder_id) {
++ case ENCODER_OBJECT_ID_INTERNAL_KLDSCP_DVO1:
++ case ENCODER_OBJECT_ID_INTERNAL_UNIPHY:
++ case ENCODER_OBJECT_ID_INTERNAL_UNIPHY1:
++ case ENCODER_OBJECT_ID_INTERNAL_UNIPHY2:
++ case ENCODER_OBJECT_ID_INTERNAL_UNIPHY3:
++ return true;
++ default:
++ return false;
++ }
++}
++
++bool amdgpu_atombios_encoder_mode_fixup(struct drm_encoder *encoder,
++ const struct drm_display_mode *mode,
++ struct drm_display_mode *adjusted_mode)
++{
++ struct amdgpu_encoder *amdgpu_encoder = to_amdgpu_encoder(encoder);
++
++ /* set the active encoder to connector routing */
++ amdgpu_encoder_set_active_device(encoder);
++ drm_mode_set_crtcinfo(adjusted_mode, 0);
++
++ /* hw bug */
++ if ((mode->flags & DRM_MODE_FLAG_INTERLACE)
++ && (mode->crtc_vsync_start < (mode->crtc_vdisplay + 2)))
++ adjusted_mode->crtc_vsync_start = adjusted_mode->crtc_vdisplay + 2;
++
++ /* get the native mode for scaling */
++ if (amdgpu_encoder->active_device & (ATOM_DEVICE_LCD_SUPPORT))
++ amdgpu_panel_mode_fixup(encoder, adjusted_mode);
++ else if (amdgpu_encoder->rmx_type != RMX_OFF)
++ amdgpu_panel_mode_fixup(encoder, adjusted_mode);
++
++ if ((amdgpu_encoder->active_device & (ATOM_DEVICE_DFP_SUPPORT | ATOM_DEVICE_LCD_SUPPORT)) ||
++ (amdgpu_encoder_get_dp_bridge_encoder_id(encoder) != ENCODER_OBJECT_ID_NONE)) {
++ struct drm_connector *connector = amdgpu_get_connector_for_encoder(encoder);
++ amdgpu_atombios_dp_set_link_config(connector, adjusted_mode);
++ }
++
++ return true;
++}
++
++static void
++amdgpu_atombios_encoder_setup_dac(struct drm_encoder *encoder, int action)
++{
++ struct drm_device *dev = encoder->dev;
++ struct amdgpu_device *adev = dev->dev_private;
++ struct amdgpu_encoder *amdgpu_encoder = to_amdgpu_encoder(encoder);
++ DAC_ENCODER_CONTROL_PS_ALLOCATION args;
++ int index = 0;
++
++ memset(&args, 0, sizeof(args));
++
++ switch (amdgpu_encoder->encoder_id) {
++ case ENCODER_OBJECT_ID_INTERNAL_DAC1:
++ case ENCODER_OBJECT_ID_INTERNAL_KLDSCP_DAC1:
++ index = GetIndexIntoMasterTable(COMMAND, DAC1EncoderControl);
++ break;
++ case ENCODER_OBJECT_ID_INTERNAL_DAC2:
++ case ENCODER_OBJECT_ID_INTERNAL_KLDSCP_DAC2:
++ index = GetIndexIntoMasterTable(COMMAND, DAC2EncoderControl);
++ break;
++ }
++
++ args.ucAction = action;
++ args.ucDacStandard = ATOM_DAC1_PS2;
++ args.usPixelClock = cpu_to_le16(amdgpu_encoder->pixel_clock / 10);
++
++ amdgpu_atom_execute_table(adev->mode_info.atom_context, index, (uint32_t *)&args);
++
++}
++
++static u8 amdgpu_atombios_encoder_get_bpc(struct drm_encoder *encoder)
++{
++ int bpc = 8;
++
++ if (encoder->crtc) {
++ struct amdgpu_crtc *amdgpu_crtc = to_amdgpu_crtc(encoder->crtc);
++ bpc = amdgpu_crtc->bpc;
++ }
++
++ switch (bpc) {
++ case 0:
++ return PANEL_BPC_UNDEFINE;
++ case 6:
++ return PANEL_6BIT_PER_COLOR;
++ case 8:
++ default:
++ return PANEL_8BIT_PER_COLOR;
++ case 10:
++ return PANEL_10BIT_PER_COLOR;
++ case 12:
++ return PANEL_12BIT_PER_COLOR;
++ case 16:
++ return PANEL_16BIT_PER_COLOR;
++ }
++}
++
++union dvo_encoder_control {
++ ENABLE_EXTERNAL_TMDS_ENCODER_PS_ALLOCATION ext_tmds;
++ DVO_ENCODER_CONTROL_PS_ALLOCATION dvo;
++ DVO_ENCODER_CONTROL_PS_ALLOCATION_V3 dvo_v3;
++ DVO_ENCODER_CONTROL_PS_ALLOCATION_V1_4 dvo_v4;
++};
++
++static void
++amdgpu_atombios_encoder_setup_dvo(struct drm_encoder *encoder, int action)
++{
++ struct drm_device *dev = encoder->dev;
++ struct amdgpu_device *adev = dev->dev_private;
++ struct amdgpu_encoder *amdgpu_encoder = to_amdgpu_encoder(encoder);
++ union dvo_encoder_control args;
++ int index = GetIndexIntoMasterTable(COMMAND, DVOEncoderControl);
++ uint8_t frev, crev;
++
++ memset(&args, 0, sizeof(args));
++
++ if (!amdgpu_atom_parse_cmd_header(adev->mode_info.atom_context, index, &frev, &crev))
++ return;
++
++ switch (frev) {
++ case 1:
++ switch (crev) {
++ case 1:
++ /* R4xx, R5xx */
++ args.ext_tmds.sXTmdsEncoder.ucEnable = action;
++
++ if (amdgpu_dig_monitor_is_duallink(encoder, amdgpu_encoder->pixel_clock))
++ args.ext_tmds.sXTmdsEncoder.ucMisc |= PANEL_ENCODER_MISC_DUAL;
++
++ args.ext_tmds.sXTmdsEncoder.ucMisc |= ATOM_PANEL_MISC_888RGB;
++ break;
++ case 2:
++ /* RS600/690/740 */
++ args.dvo.sDVOEncoder.ucAction = action;
++ args.dvo.sDVOEncoder.usPixelClock = cpu_to_le16(amdgpu_encoder->pixel_clock / 10);
++ /* DFP1, CRT1, TV1 depending on the type of port */
++ args.dvo.sDVOEncoder.ucDeviceType = ATOM_DEVICE_DFP1_INDEX;
++
++ if (amdgpu_dig_monitor_is_duallink(encoder, amdgpu_encoder->pixel_clock))
++ args.dvo.sDVOEncoder.usDevAttr.sDigAttrib.ucAttribute |= PANEL_ENCODER_MISC_DUAL;
++ break;
++ case 3:
++ /* R6xx */
++ args.dvo_v3.ucAction = action;
++ args.dvo_v3.usPixelClock = cpu_to_le16(amdgpu_encoder->pixel_clock / 10);
++ args.dvo_v3.ucDVOConfig = 0; /* XXX */
++ break;
++ case 4:
++ /* DCE8 */
++ args.dvo_v4.ucAction = action;
++ args.dvo_v4.usPixelClock = cpu_to_le16(amdgpu_encoder->pixel_clock / 10);
++ args.dvo_v4.ucDVOConfig = 0; /* XXX */
++ args.dvo_v4.ucBitPerColor = amdgpu_atombios_encoder_get_bpc(encoder);
++ break;
++ default:
++ DRM_ERROR("Unknown table version %d, %d\n", frev, crev);
++ break;
++ }
++ break;
++ default:
++ DRM_ERROR("Unknown table version %d, %d\n", frev, crev);
++ break;
++ }
++
++ amdgpu_atom_execute_table(adev->mode_info.atom_context, index, (uint32_t *)&args);
++}
++
++int amdgpu_atombios_encoder_get_encoder_mode(struct drm_encoder *encoder)
++{
++ struct amdgpu_encoder *amdgpu_encoder = to_amdgpu_encoder(encoder);
++ struct drm_connector *connector;
++ struct amdgpu_connector *amdgpu_connector;
++ struct amdgpu_connector_atom_dig *dig_connector;
++
++ /* dp bridges are always DP */
++ if (amdgpu_encoder_get_dp_bridge_encoder_id(encoder) != ENCODER_OBJECT_ID_NONE)
++ return ATOM_ENCODER_MODE_DP;
++
++ /* DVO is always DVO */
++ if ((amdgpu_encoder->encoder_id == ENCODER_OBJECT_ID_INTERNAL_DVO1) ||
++ (amdgpu_encoder->encoder_id == ENCODER_OBJECT_ID_INTERNAL_KLDSCP_DVO1))
++ return ATOM_ENCODER_MODE_DVO;
++
++ connector = amdgpu_get_connector_for_encoder(encoder);
++ /* if we don't have an active device yet, just use one of
++ * the connectors tied to the encoder.
++ */
++ if (!connector)
++ connector = amdgpu_get_connector_for_encoder_init(encoder);
++ amdgpu_connector = to_amdgpu_connector(connector);
++
++ switch (connector->connector_type) {
++ case DRM_MODE_CONNECTOR_DVII:
++ case DRM_MODE_CONNECTOR_HDMIB: /* HDMI-B is basically DL-DVI; analog works fine */
++ if (amdgpu_audio != 0) {
++ if (amdgpu_connector->use_digital &&
++ (amdgpu_connector->audio == AMDGPU_AUDIO_ENABLE))
++ return ATOM_ENCODER_MODE_HDMI;
++ else if (drm_detect_hdmi_monitor(amdgpu_connector_edid(connector)) &&
++ (amdgpu_connector->audio == AMDGPU_AUDIO_AUTO))
++ return ATOM_ENCODER_MODE_HDMI;
++ else if (amdgpu_connector->use_digital)
++ return ATOM_ENCODER_MODE_DVI;
++ else
++ return ATOM_ENCODER_MODE_CRT;
++ } else if (amdgpu_connector->use_digital) {
++ return ATOM_ENCODER_MODE_DVI;
++ } else {
++ return ATOM_ENCODER_MODE_CRT;
++ }
++ break;
++ case DRM_MODE_CONNECTOR_DVID:
++ case DRM_MODE_CONNECTOR_HDMIA:
++ default:
++ if (amdgpu_audio != 0) {
++ if (amdgpu_connector->audio == AMDGPU_AUDIO_ENABLE)
++ return ATOM_ENCODER_MODE_HDMI;
++ else if (drm_detect_hdmi_monitor(amdgpu_connector_edid(connector)) &&
++ (amdgpu_connector->audio == AMDGPU_AUDIO_AUTO))
++ return ATOM_ENCODER_MODE_HDMI;
++ else
++ return ATOM_ENCODER_MODE_DVI;
++ } else {
++ return ATOM_ENCODER_MODE_DVI;
++ }
++ break;
++ case DRM_MODE_CONNECTOR_LVDS:
++ return ATOM_ENCODER_MODE_LVDS;
++ break;
++ case DRM_MODE_CONNECTOR_DisplayPort:
++ dig_connector = amdgpu_connector->con_priv;
++ if ((dig_connector->dp_sink_type == CONNECTOR_OBJECT_ID_DISPLAYPORT) ||
++ (dig_connector->dp_sink_type == CONNECTOR_OBJECT_ID_eDP)) {
++ return ATOM_ENCODER_MODE_DP;
++ } else if (amdgpu_audio != 0) {
++ if (amdgpu_connector->audio == AMDGPU_AUDIO_ENABLE)
++ return ATOM_ENCODER_MODE_HDMI;
++ else if (drm_detect_hdmi_monitor(amdgpu_connector_edid(connector)) &&
++ (amdgpu_connector->audio == AMDGPU_AUDIO_AUTO))
++ return ATOM_ENCODER_MODE_HDMI;
++ else
++ return ATOM_ENCODER_MODE_DVI;
++ } else {
++ return ATOM_ENCODER_MODE_DVI;
++ }
++ break;
++ case DRM_MODE_CONNECTOR_eDP:
++ return ATOM_ENCODER_MODE_DP;
++ case DRM_MODE_CONNECTOR_DVIA:
++ case DRM_MODE_CONNECTOR_VGA:
++ return ATOM_ENCODER_MODE_CRT;
++ break;
++ case DRM_MODE_CONNECTOR_Composite:
++ case DRM_MODE_CONNECTOR_SVIDEO:
++ case DRM_MODE_CONNECTOR_9PinDIN:
++ /* fix me */
++ return ATOM_ENCODER_MODE_TV;
++ /*return ATOM_ENCODER_MODE_CV;*/
++ break;
++ }
++}
++
++/*
++ * DIG Encoder/Transmitter Setup
++ *
++ * DCE 6.0
++ * - 3 DIG transmitter blocks UNIPHY0/1/2 (links A and B).
++ * Supports up to 6 digital outputs
++ * - 6 DIG encoder blocks.
++ * - DIG to PHY mapping is hardcoded
++ * DIG1 drives UNIPHY0 link A, A+B
++ * DIG2 drives UNIPHY0 link B
++ * DIG3 drives UNIPHY1 link A, A+B
++ * DIG4 drives UNIPHY1 link B
++ * DIG5 drives UNIPHY2 link A, A+B
++ * DIG6 drives UNIPHY2 link B
++ *
++ * Routing
++ * crtc -> dig encoder -> UNIPHY/LVTMA (1 or 2 links)
++ * Examples:
++ * crtc0 -> dig2 -> LVTMA links A+B -> TMDS/HDMI
++ * crtc1 -> dig1 -> UNIPHY0 link B -> DP
++ * crtc0 -> dig1 -> UNIPHY2 link A -> LVDS
++ * crtc1 -> dig2 -> UNIPHY1 link B+A -> TMDS/HDMI
++ */
++
++union dig_encoder_control {
++ DIG_ENCODER_CONTROL_PS_ALLOCATION v1;
++ DIG_ENCODER_CONTROL_PARAMETERS_V2 v2;
++ DIG_ENCODER_CONTROL_PARAMETERS_V3 v3;
++ DIG_ENCODER_CONTROL_PARAMETERS_V4 v4;
++};
++
++void
++amdgpu_atombios_encoder_setup_dig_encoder(struct drm_encoder *encoder,
++ int action, int panel_mode)
++{
++ struct drm_device *dev = encoder->dev;
++ struct amdgpu_device *adev = dev->dev_private;
++ struct amdgpu_encoder *amdgpu_encoder = to_amdgpu_encoder(encoder);
++ struct amdgpu_encoder_atom_dig *dig = amdgpu_encoder->enc_priv;
++ struct drm_connector *connector = amdgpu_get_connector_for_encoder(encoder);
++ union dig_encoder_control args;
++ int index = GetIndexIntoMasterTable(COMMAND, DIGxEncoderControl);
++ uint8_t frev, crev;
++ int dp_clock = 0;
++ int dp_lane_count = 0;
++ int hpd_id = AMDGPU_HPD_NONE;
++
++ if (connector) {
++ struct amdgpu_connector *amdgpu_connector = to_amdgpu_connector(connector);
++ struct amdgpu_connector_atom_dig *dig_connector =
++ amdgpu_connector->con_priv;
++
++ dp_clock = dig_connector->dp_clock;
++ dp_lane_count = dig_connector->dp_lane_count;
++ hpd_id = amdgpu_connector->hpd.hpd;
++ }
++
++ /* no dig encoder assigned */
++ if (dig->dig_encoder == -1)
++ return;
++
++ memset(&args, 0, sizeof(args));
++
++ if (!amdgpu_atom_parse_cmd_header(adev->mode_info.atom_context, index, &frev, &crev))
++ return;
++
++ switch (frev) {
++ case 1:
++ switch (crev) {
++ case 1:
++ args.v1.ucAction = action;
++ args.v1.usPixelClock = cpu_to_le16(amdgpu_encoder->pixel_clock / 10);
++ if (action == ATOM_ENCODER_CMD_SETUP_PANEL_MODE)
++ args.v3.ucPanelMode = panel_mode;
++ else
++ args.v1.ucEncoderMode = amdgpu_atombios_encoder_get_encoder_mode(encoder);
++
++ if (ENCODER_MODE_IS_DP(args.v1.ucEncoderMode))
++ args.v1.ucLaneNum = dp_lane_count;
++ else if (amdgpu_dig_monitor_is_duallink(encoder, amdgpu_encoder->pixel_clock))
++ args.v1.ucLaneNum = 8;
++ else
++ args.v1.ucLaneNum = 4;
++
++ if (ENCODER_MODE_IS_DP(args.v1.ucEncoderMode) && (dp_clock == 270000))
++ args.v1.ucConfig |= ATOM_ENCODER_CONFIG_DPLINKRATE_2_70GHZ;
++ switch (amdgpu_encoder->encoder_id) {
++ case ENCODER_OBJECT_ID_INTERNAL_UNIPHY:
++ args.v1.ucConfig = ATOM_ENCODER_CONFIG_V2_TRANSMITTER1;
++ break;
++ case ENCODER_OBJECT_ID_INTERNAL_UNIPHY1:
++ case ENCODER_OBJECT_ID_INTERNAL_KLDSCP_LVTMA:
++ args.v1.ucConfig = ATOM_ENCODER_CONFIG_V2_TRANSMITTER2;
++ break;
++ case ENCODER_OBJECT_ID_INTERNAL_UNIPHY2:
++ args.v1.ucConfig = ATOM_ENCODER_CONFIG_V2_TRANSMITTER3;
++ break;
++ }
++ if (dig->linkb)
++ args.v1.ucConfig |= ATOM_ENCODER_CONFIG_LINKB;
++ else
++ args.v1.ucConfig |= ATOM_ENCODER_CONFIG_LINKA;
++ break;
++ case 2:
++ case 3:
++ args.v3.ucAction = action;
++ args.v3.usPixelClock = cpu_to_le16(amdgpu_encoder->pixel_clock / 10);
++ if (action == ATOM_ENCODER_CMD_SETUP_PANEL_MODE)
++ args.v3.ucPanelMode = panel_mode;
++ else
++ args.v3.ucEncoderMode = amdgpu_atombios_encoder_get_encoder_mode(encoder);
++
++ if (ENCODER_MODE_IS_DP(args.v3.ucEncoderMode))
++ args.v3.ucLaneNum = dp_lane_count;
++ else if (amdgpu_dig_monitor_is_duallink(encoder, amdgpu_encoder->pixel_clock))
++ args.v3.ucLaneNum = 8;
++ else
++ args.v3.ucLaneNum = 4;
++
++ if (ENCODER_MODE_IS_DP(args.v3.ucEncoderMode) && (dp_clock == 270000))
++ args.v1.ucConfig |= ATOM_ENCODER_CONFIG_V3_DPLINKRATE_2_70GHZ;
++ args.v3.acConfig.ucDigSel = dig->dig_encoder;
++ args.v3.ucBitPerColor = amdgpu_atombios_encoder_get_bpc(encoder);
++ break;
++ case 4:
++ args.v4.ucAction = action;
++ args.v4.usPixelClock = cpu_to_le16(amdgpu_encoder->pixel_clock / 10);
++ if (action == ATOM_ENCODER_CMD_SETUP_PANEL_MODE)
++ args.v4.ucPanelMode = panel_mode;
++ else
++ args.v4.ucEncoderMode = amdgpu_atombios_encoder_get_encoder_mode(encoder);
++
++ if (ENCODER_MODE_IS_DP(args.v4.ucEncoderMode))
++ args.v4.ucLaneNum = dp_lane_count;
++ else if (amdgpu_dig_monitor_is_duallink(encoder, amdgpu_encoder->pixel_clock))
++ args.v4.ucLaneNum = 8;
++ else
++ args.v4.ucLaneNum = 4;
++
++ if (ENCODER_MODE_IS_DP(args.v4.ucEncoderMode)) {
++ if (dp_clock == 540000)
++ args.v1.ucConfig |= ATOM_ENCODER_CONFIG_V4_DPLINKRATE_5_40GHZ;
++ else if (dp_clock == 324000)
++ args.v1.ucConfig |= ATOM_ENCODER_CONFIG_V4_DPLINKRATE_3_24GHZ;
++ else if (dp_clock == 270000)
++ args.v1.ucConfig |= ATOM_ENCODER_CONFIG_V4_DPLINKRATE_2_70GHZ;
++ else
++ args.v1.ucConfig |= ATOM_ENCODER_CONFIG_V4_DPLINKRATE_1_62GHZ;
++ }
++ args.v4.acConfig.ucDigSel = dig->dig_encoder;
++ args.v4.ucBitPerColor = amdgpu_atombios_encoder_get_bpc(encoder);
++ if (hpd_id == AMDGPU_HPD_NONE)
++ args.v4.ucHPD_ID = 0;
++ else
++ args.v4.ucHPD_ID = hpd_id + 1;
++ break;
++ default:
++ DRM_ERROR("Unknown table version %d, %d\n", frev, crev);
++ break;
++ }
++ break;
++ default:
++ DRM_ERROR("Unknown table version %d, %d\n", frev, crev);
++ break;
++ }
++
++ amdgpu_atom_execute_table(adev->mode_info.atom_context, index, (uint32_t *)&args);
++
++}
++
++union dig_transmitter_control {
++ DIG_TRANSMITTER_CONTROL_PS_ALLOCATION v1;
++ DIG_TRANSMITTER_CONTROL_PARAMETERS_V2 v2;
++ DIG_TRANSMITTER_CONTROL_PARAMETERS_V3 v3;
++ DIG_TRANSMITTER_CONTROL_PARAMETERS_V4 v4;
++ DIG_TRANSMITTER_CONTROL_PARAMETERS_V1_5 v5;
++};
++
++void
++amdgpu_atombios_encoder_setup_dig_transmitter(struct drm_encoder *encoder, int action,
++ uint8_t lane_num, uint8_t lane_set)
++{
++ struct drm_device *dev = encoder->dev;
++ struct amdgpu_device *adev = dev->dev_private;
++ struct amdgpu_encoder *amdgpu_encoder = to_amdgpu_encoder(encoder);
++ struct amdgpu_encoder_atom_dig *dig = amdgpu_encoder->enc_priv;
++ struct drm_connector *connector;
++ union dig_transmitter_control args;
++ int index = 0;
++ uint8_t frev, crev;
++ bool is_dp = false;
++ int pll_id = 0;
++ int dp_clock = 0;
++ int dp_lane_count = 0;
++ int connector_object_id = 0;
++ int igp_lane_info = 0;
++ int dig_encoder = dig->dig_encoder;
++ int hpd_id = AMDGPU_HPD_NONE;
++
++ if (action == ATOM_TRANSMITTER_ACTION_INIT) {
++ connector = amdgpu_get_connector_for_encoder_init(encoder);
++ /* just needed to avoid bailing in the encoder check. the encoder
++ * isn't used for init
++ */
++ dig_encoder = 0;
++ } else
++ connector = amdgpu_get_connector_for_encoder(encoder);
++
++ if (connector) {
++ struct amdgpu_connector *amdgpu_connector = to_amdgpu_connector(connector);
++ struct amdgpu_connector_atom_dig *dig_connector =
++ amdgpu_connector->con_priv;
++
++ hpd_id = amdgpu_connector->hpd.hpd;
++ dp_clock = dig_connector->dp_clock;
++ dp_lane_count = dig_connector->dp_lane_count;
++ connector_object_id =
++ (amdgpu_connector->connector_object_id & OBJECT_ID_MASK) >> OBJECT_ID_SHIFT;
++ }
++
++ if (encoder->crtc) {
++ struct amdgpu_crtc *amdgpu_crtc = to_amdgpu_crtc(encoder->crtc);
++ pll_id = amdgpu_crtc->pll_id;
++ }
++
++ /* no dig encoder assigned */
++ if (dig_encoder == -1)
++ return;
++
++ if (ENCODER_MODE_IS_DP(amdgpu_atombios_encoder_get_encoder_mode(encoder)))
++ is_dp = true;
++
++ memset(&args, 0, sizeof(args));
++
++ switch (amdgpu_encoder->encoder_id) {
++ case ENCODER_OBJECT_ID_INTERNAL_KLDSCP_DVO1:
++ index = GetIndexIntoMasterTable(COMMAND, DVOOutputControl);
++ break;
++ case ENCODER_OBJECT_ID_INTERNAL_UNIPHY:
++ case ENCODER_OBJECT_ID_INTERNAL_UNIPHY1:
++ case ENCODER_OBJECT_ID_INTERNAL_UNIPHY2:
++ case ENCODER_OBJECT_ID_INTERNAL_UNIPHY3:
++ index = GetIndexIntoMasterTable(COMMAND, UNIPHYTransmitterControl);
++ break;
++ case ENCODER_OBJECT_ID_INTERNAL_KLDSCP_LVTMA:
++ index = GetIndexIntoMasterTable(COMMAND, LVTMATransmitterControl);
++ break;
++ }
++
++ if (!amdgpu_atom_parse_cmd_header(adev->mode_info.atom_context, index, &frev, &crev))
++ return;
++
++ switch (frev) {
++ case 1:
++ switch (crev) {
++ case 1:
++ args.v1.ucAction = action;
++ if (action == ATOM_TRANSMITTER_ACTION_INIT) {
++ args.v1.usInitInfo = cpu_to_le16(connector_object_id);
++ } else if (action == ATOM_TRANSMITTER_ACTION_SETUP_VSEMPH) {
++ args.v1.asMode.ucLaneSel = lane_num;
++ args.v1.asMode.ucLaneSet = lane_set;
++ } else {
++ if (is_dp)
++ args.v1.usPixelClock = cpu_to_le16(dp_clock / 10);
++ else if (amdgpu_dig_monitor_is_duallink(encoder, amdgpu_encoder->pixel_clock))
++ args.v1.usPixelClock = cpu_to_le16((amdgpu_encoder->pixel_clock / 2) / 10);
++ else
++ args.v1.usPixelClock = cpu_to_le16(amdgpu_encoder->pixel_clock / 10);
++ }
++
++ args.v1.ucConfig = ATOM_TRANSMITTER_CONFIG_CLKSRC_PPLL;
++
++ if (dig_encoder)
++ args.v1.ucConfig |= ATOM_TRANSMITTER_CONFIG_DIG2_ENCODER;
++ else
++ args.v1.ucConfig |= ATOM_TRANSMITTER_CONFIG_DIG1_ENCODER;
++
++ if ((adev->flags & AMDGPU_IS_APU) &&
++ (amdgpu_encoder->encoder_id == ENCODER_OBJECT_ID_INTERNAL_UNIPHY)) {
++ if (is_dp ||
++ !amdgpu_dig_monitor_is_duallink(encoder, amdgpu_encoder->pixel_clock)) {
++ if (igp_lane_info & 0x1)
++ args.v1.ucConfig |= ATOM_TRANSMITTER_CONFIG_LANE_0_3;
++ else if (igp_lane_info & 0x2)
++ args.v1.ucConfig |= ATOM_TRANSMITTER_CONFIG_LANE_4_7;
++ else if (igp_lane_info & 0x4)
++ args.v1.ucConfig |= ATOM_TRANSMITTER_CONFIG_LANE_8_11;
++ else if (igp_lane_info & 0x8)
++ args.v1.ucConfig |= ATOM_TRANSMITTER_CONFIG_LANE_12_15;
++ } else {
++ if (igp_lane_info & 0x3)
++ args.v1.ucConfig |= ATOM_TRANSMITTER_CONFIG_LANE_0_7;
++ else if (igp_lane_info & 0xc)
++ args.v1.ucConfig |= ATOM_TRANSMITTER_CONFIG_LANE_8_15;
++ }
++ }
++
++ if (dig->linkb)
++ args.v1.ucConfig |= ATOM_TRANSMITTER_CONFIG_LINKB;
++ else
++ args.v1.ucConfig |= ATOM_TRANSMITTER_CONFIG_LINKA;
++
++ if (is_dp)
++ args.v1.ucConfig |= ATOM_TRANSMITTER_CONFIG_COHERENT;
++ else if (amdgpu_encoder->devices & (ATOM_DEVICE_DFP_SUPPORT)) {
++ if (dig->coherent_mode)
++ args.v1.ucConfig |= ATOM_TRANSMITTER_CONFIG_COHERENT;
++ if (amdgpu_dig_monitor_is_duallink(encoder, amdgpu_encoder->pixel_clock))
++ args.v1.ucConfig |= ATOM_TRANSMITTER_CONFIG_8LANE_LINK;
++ }
++ break;
++ case 2:
++ args.v2.ucAction = action;
++ if (action == ATOM_TRANSMITTER_ACTION_INIT) {
++ args.v2.usInitInfo = cpu_to_le16(connector_object_id);
++ } else if (action == ATOM_TRANSMITTER_ACTION_SETUP_VSEMPH) {
++ args.v2.asMode.ucLaneSel = lane_num;
++ args.v2.asMode.ucLaneSet = lane_set;
++ } else {
++ if (is_dp)
++ args.v2.usPixelClock = cpu_to_le16(dp_clock / 10);
++ else if (amdgpu_dig_monitor_is_duallink(encoder, amdgpu_encoder->pixel_clock))
++ args.v2.usPixelClock = cpu_to_le16((amdgpu_encoder->pixel_clock / 2) / 10);
++ else
++ args.v2.usPixelClock = cpu_to_le16(amdgpu_encoder->pixel_clock / 10);
++ }
++
++ args.v2.acConfig.ucEncoderSel = dig_encoder;
++ if (dig->linkb)
++ args.v2.acConfig.ucLinkSel = 1;
++
++ switch (amdgpu_encoder->encoder_id) {
++ case ENCODER_OBJECT_ID_INTERNAL_UNIPHY:
++ args.v2.acConfig.ucTransmitterSel = 0;
++ break;
++ case ENCODER_OBJECT_ID_INTERNAL_UNIPHY1:
++ args.v2.acConfig.ucTransmitterSel = 1;
++ break;
++ case ENCODER_OBJECT_ID_INTERNAL_UNIPHY2:
++ args.v2.acConfig.ucTransmitterSel = 2;
++ break;
++ }
++
++ if (is_dp) {
++ args.v2.acConfig.fCoherentMode = 1;
++ args.v2.acConfig.fDPConnector = 1;
++ } else if (amdgpu_encoder->devices & (ATOM_DEVICE_DFP_SUPPORT)) {
++ if (dig->coherent_mode)
++ args.v2.acConfig.fCoherentMode = 1;
++ if (amdgpu_dig_monitor_is_duallink(encoder, amdgpu_encoder->pixel_clock))
++ args.v2.acConfig.fDualLinkConnector = 1;
++ }
++ break;
++ case 3:
++ args.v3.ucAction = action;
++ if (action == ATOM_TRANSMITTER_ACTION_INIT) {
++ args.v3.usInitInfo = cpu_to_le16(connector_object_id);
++ } else if (action == ATOM_TRANSMITTER_ACTION_SETUP_VSEMPH) {
++ args.v3.asMode.ucLaneSel = lane_num;
++ args.v3.asMode.ucLaneSet = lane_set;
++ } else {
++ if (is_dp)
++ args.v3.usPixelClock = cpu_to_le16(dp_clock / 10);
++ else if (amdgpu_dig_monitor_is_duallink(encoder, amdgpu_encoder->pixel_clock))
++ args.v3.usPixelClock = cpu_to_le16((amdgpu_encoder->pixel_clock / 2) / 10);
++ else
++ args.v3.usPixelClock = cpu_to_le16(amdgpu_encoder->pixel_clock / 10);
++ }
++
++ if (is_dp)
++ args.v3.ucLaneNum = dp_lane_count;
++ else if (amdgpu_dig_monitor_is_duallink(encoder, amdgpu_encoder->pixel_clock))
++ args.v3.ucLaneNum = 8;
++ else
++ args.v3.ucLaneNum = 4;
++
++ if (dig->linkb)
++ args.v3.acConfig.ucLinkSel = 1;
++ if (dig_encoder & 1)
++ args.v3.acConfig.ucEncoderSel = 1;
++
++ /* Select the PLL for the PHY
++ * DP PHY should be clocked from external src if there is
++ * one.
++ */
++ /* On DCE4, if there is an external clock, it generates the DP ref clock */
++ if (is_dp && adev->clock.dp_extclk)
++ args.v3.acConfig.ucRefClkSource = 2; /* external src */
++ else
++ args.v3.acConfig.ucRefClkSource = pll_id;
++
++ switch (amdgpu_encoder->encoder_id) {
++ case ENCODER_OBJECT_ID_INTERNAL_UNIPHY:
++ args.v3.acConfig.ucTransmitterSel = 0;
++ break;
++ case ENCODER_OBJECT_ID_INTERNAL_UNIPHY1:
++ args.v3.acConfig.ucTransmitterSel = 1;
++ break;
++ case ENCODER_OBJECT_ID_INTERNAL_UNIPHY2:
++ args.v3.acConfig.ucTransmitterSel = 2;
++ break;
++ }
++
++ if (is_dp)
++ args.v3.acConfig.fCoherentMode = 1; /* DP requires coherent */
++ else if (amdgpu_encoder->devices & (ATOM_DEVICE_DFP_SUPPORT)) {
++ if (dig->coherent_mode)
++ args.v3.acConfig.fCoherentMode = 1;
++ if (amdgpu_dig_monitor_is_duallink(encoder, amdgpu_encoder->pixel_clock))
++ args.v3.acConfig.fDualLinkConnector = 1;
++ }
++ break;
++ case 4:
++ args.v4.ucAction = action;
++ if (action == ATOM_TRANSMITTER_ACTION_INIT) {
++ args.v4.usInitInfo = cpu_to_le16(connector_object_id);
++ } else if (action == ATOM_TRANSMITTER_ACTION_SETUP_VSEMPH) {
++ args.v4.asMode.ucLaneSel = lane_num;
++ args.v4.asMode.ucLaneSet = lane_set;
++ } else {
++ if (is_dp)
++ args.v4.usPixelClock = cpu_to_le16(dp_clock / 10);
++ else if (amdgpu_dig_monitor_is_duallink(encoder, amdgpu_encoder->pixel_clock))
++ args.v4.usPixelClock = cpu_to_le16((amdgpu_encoder->pixel_clock / 2) / 10);
++ else
++ args.v4.usPixelClock = cpu_to_le16(amdgpu_encoder->pixel_clock / 10);
++ }
++
++ if (is_dp)
++ args.v4.ucLaneNum = dp_lane_count;
++ else if (amdgpu_dig_monitor_is_duallink(encoder, amdgpu_encoder->pixel_clock))
++ args.v4.ucLaneNum = 8;
++ else
++ args.v4.ucLaneNum = 4;
++
++ if (dig->linkb)
++ args.v4.acConfig.ucLinkSel = 1;
++ if (dig_encoder & 1)
++ args.v4.acConfig.ucEncoderSel = 1;
++
++ /* Select the PLL for the PHY
++ * DP PHY should be clocked from external src if there is
++ * one.
++ */
++ /* On DCE5 DCPLL usually generates the DP ref clock */
++ if (is_dp) {
++ if (adev->clock.dp_extclk)
++ args.v4.acConfig.ucRefClkSource = ENCODER_REFCLK_SRC_EXTCLK;
++ else
++ args.v4.acConfig.ucRefClkSource = ENCODER_REFCLK_SRC_DCPLL;
++ } else
++ args.v4.acConfig.ucRefClkSource = pll_id;
++
++ switch (amdgpu_encoder->encoder_id) {
++ case ENCODER_OBJECT_ID_INTERNAL_UNIPHY:
++ args.v4.acConfig.ucTransmitterSel = 0;
++ break;
++ case ENCODER_OBJECT_ID_INTERNAL_UNIPHY1:
++ args.v4.acConfig.ucTransmitterSel = 1;
++ break;
++ case ENCODER_OBJECT_ID_INTERNAL_UNIPHY2:
++ args.v4.acConfig.ucTransmitterSel = 2;
++ break;
++ }
++
++ if (is_dp)
++ args.v4.acConfig.fCoherentMode = 1; /* DP requires coherent */
++ else if (amdgpu_encoder->devices & (ATOM_DEVICE_DFP_SUPPORT)) {
++ if (dig->coherent_mode)
++ args.v4.acConfig.fCoherentMode = 1;
++ if (amdgpu_dig_monitor_is_duallink(encoder, amdgpu_encoder->pixel_clock))
++ args.v4.acConfig.fDualLinkConnector = 1;
++ }
++ break;
++ case 5:
++ args.v5.ucAction = action;
++ if (is_dp)
++ args.v5.usSymClock = cpu_to_le16(dp_clock / 10);
++ else
++ args.v5.usSymClock = cpu_to_le16(amdgpu_encoder->pixel_clock / 10);
++
++ switch (amdgpu_encoder->encoder_id) {
++ case ENCODER_OBJECT_ID_INTERNAL_UNIPHY:
++ if (dig->linkb)
++ args.v5.ucPhyId = ATOM_PHY_ID_UNIPHYB;
++ else
++ args.v5.ucPhyId = ATOM_PHY_ID_UNIPHYA;
++ break;
++ case ENCODER_OBJECT_ID_INTERNAL_UNIPHY1:
++ if (dig->linkb)
++ args.v5.ucPhyId = ATOM_PHY_ID_UNIPHYD;
++ else
++ args.v5.ucPhyId = ATOM_PHY_ID_UNIPHYC;
++ break;
++ case ENCODER_OBJECT_ID_INTERNAL_UNIPHY2:
++ if (dig->linkb)
++ args.v5.ucPhyId = ATOM_PHY_ID_UNIPHYF;
++ else
++ args.v5.ucPhyId = ATOM_PHY_ID_UNIPHYE;
++ break;
++ case ENCODER_OBJECT_ID_INTERNAL_UNIPHY3:
++ args.v5.ucPhyId = ATOM_PHY_ID_UNIPHYG;
++ break;
++ }
++ if (is_dp)
++ args.v5.ucLaneNum = dp_lane_count;
++ else if (amdgpu_dig_monitor_is_duallink(encoder, amdgpu_encoder->pixel_clock))
++ args.v5.ucLaneNum = 8;
++ else
++ args.v5.ucLaneNum = 4;
++ args.v5.ucConnObjId = connector_object_id;
++ args.v5.ucDigMode = amdgpu_atombios_encoder_get_encoder_mode(encoder);
++
++ if (is_dp && adev->clock.dp_extclk)
++ args.v5.asConfig.ucPhyClkSrcId = ENCODER_REFCLK_SRC_EXTCLK;
++ else
++ args.v5.asConfig.ucPhyClkSrcId = pll_id;
++
++ if (is_dp)
++ args.v5.asConfig.ucCoherentMode = 1; /* DP requires coherent */
++ else if (amdgpu_encoder->devices & (ATOM_DEVICE_DFP_SUPPORT)) {
++ if (dig->coherent_mode)
++ args.v5.asConfig.ucCoherentMode = 1;
++ }
++ if (hpd_id == AMDGPU_HPD_NONE)
++ args.v5.asConfig.ucHPDSel = 0;
++ else
++ args.v5.asConfig.ucHPDSel = hpd_id + 1;
++ args.v5.ucDigEncoderSel = 1 << dig_encoder;
++ args.v5.ucDPLaneSet = lane_set;
++ break;
++ default:
++ DRM_ERROR("Unknown table version %d, %d\n", frev, crev);
++ break;
++ }
++ break;
++ default:
++ DRM_ERROR("Unknown table version %d, %d\n", frev, crev);
++ break;
++ }
++
++ amdgpu_atom_execute_table(adev->mode_info.atom_context, index, (uint32_t *)&args);
++}
++
++bool
++amdgpu_atombios_encoder_set_edp_panel_power(struct drm_connector *connector,
++ int action)
++{
++ struct amdgpu_connector *amdgpu_connector = to_amdgpu_connector(connector);
++ struct drm_device *dev = amdgpu_connector->base.dev;
++ struct amdgpu_device *adev = dev->dev_private;
++ union dig_transmitter_control args;
++ int index = GetIndexIntoMasterTable(COMMAND, UNIPHYTransmitterControl);
++ uint8_t frev, crev;
++
++ if (connector->connector_type != DRM_MODE_CONNECTOR_eDP)
++ goto done;
++
++ if ((action != ATOM_TRANSMITTER_ACTION_POWER_ON) &&
++ (action != ATOM_TRANSMITTER_ACTION_POWER_OFF))
++ goto done;
++
++ if (!amdgpu_atom_parse_cmd_header(adev->mode_info.atom_context, index, &frev, &crev))
++ goto done;
++
++ memset(&args, 0, sizeof(args));
++
++ args.v1.ucAction = action;
++
++ amdgpu_atom_execute_table(adev->mode_info.atom_context, index, (uint32_t *)&args);
++
++ /* wait for the panel to power up */
++ if (action == ATOM_TRANSMITTER_ACTION_POWER_ON) {
++ int i;
++
++ for (i = 0; i < 300; i++) {
++ if (amdgpu_display_hpd_sense(adev, amdgpu_connector->hpd.hpd))
++ return true;
++ mdelay(1);
++ }
++ return false;
++ }
++done:
++ return true;
++}
++
++union external_encoder_control {
++ EXTERNAL_ENCODER_CONTROL_PS_ALLOCATION v1;
++ EXTERNAL_ENCODER_CONTROL_PS_ALLOCATION_V3 v3;
++};
++
++static void
++amdgpu_atombios_encoder_setup_external_encoder(struct drm_encoder *encoder,
++ struct drm_encoder *ext_encoder,
++ int action)
++{
++ struct drm_device *dev = encoder->dev;
++ struct amdgpu_device *adev = dev->dev_private;
++ struct amdgpu_encoder *amdgpu_encoder = to_amdgpu_encoder(encoder);
++ struct amdgpu_encoder *ext_amdgpu_encoder = to_amdgpu_encoder(ext_encoder);
++ union external_encoder_control args;
++ struct drm_connector *connector;
++ int index = GetIndexIntoMasterTable(COMMAND, ExternalEncoderControl);
++ u8 frev, crev;
++ int dp_clock = 0;
++ int dp_lane_count = 0;
++ int connector_object_id = 0;
++ u32 ext_enum = (ext_amdgpu_encoder->encoder_enum & ENUM_ID_MASK) >> ENUM_ID_SHIFT;
++
++ if (action == EXTERNAL_ENCODER_ACTION_V3_ENCODER_INIT)
++ connector = amdgpu_get_connector_for_encoder_init(encoder);
++ else
++ connector = amdgpu_get_connector_for_encoder(encoder);
++
++ if (connector) {
++ struct amdgpu_connector *amdgpu_connector = to_amdgpu_connector(connector);
++ struct amdgpu_connector_atom_dig *dig_connector =
++ amdgpu_connector->con_priv;
++
++ dp_clock = dig_connector->dp_clock;
++ dp_lane_count = dig_connector->dp_lane_count;
++ connector_object_id =
++ (amdgpu_connector->connector_object_id & OBJECT_ID_MASK) >> OBJECT_ID_SHIFT;
++ }
++
++ memset(&args, 0, sizeof(args));
++
++ if (!amdgpu_atom_parse_cmd_header(adev->mode_info.atom_context, index, &frev, &crev))
++ return;
++
++ switch (frev) {
++ case 1:
++ /* no params on frev 1 */
++ break;
++ case 2:
++ switch (crev) {
++ case 1:
++ case 2:
++ args.v1.sDigEncoder.ucAction = action;
++ args.v1.sDigEncoder.usPixelClock = cpu_to_le16(amdgpu_encoder->pixel_clock / 10);
++ args.v1.sDigEncoder.ucEncoderMode =
++ amdgpu_atombios_encoder_get_encoder_mode(encoder);
++
++ if (ENCODER_MODE_IS_DP(args.v1.sDigEncoder.ucEncoderMode)) {
++ if (dp_clock == 270000)
++ args.v1.sDigEncoder.ucConfig |= ATOM_ENCODER_CONFIG_DPLINKRATE_2_70GHZ;
++ args.v1.sDigEncoder.ucLaneNum = dp_lane_count;
++ } else if (amdgpu_dig_monitor_is_duallink(encoder, amdgpu_encoder->pixel_clock))
++ args.v1.sDigEncoder.ucLaneNum = 8;
++ else
++ args.v1.sDigEncoder.ucLaneNum = 4;
++ break;
++ case 3:
++ args.v3.sExtEncoder.ucAction = action;
++ if (action == EXTERNAL_ENCODER_ACTION_V3_ENCODER_INIT)
++ args.v3.sExtEncoder.usConnectorId = cpu_to_le16(connector_object_id);
++ else
++ args.v3.sExtEncoder.usPixelClock = cpu_to_le16(amdgpu_encoder->pixel_clock / 10);
++ args.v3.sExtEncoder.ucEncoderMode =
++ amdgpu_atombios_encoder_get_encoder_mode(encoder);
++
++ if (ENCODER_MODE_IS_DP(args.v3.sExtEncoder.ucEncoderMode)) {
++ if (dp_clock == 270000)
++ args.v3.sExtEncoder.ucConfig |= EXTERNAL_ENCODER_CONFIG_V3_DPLINKRATE_2_70GHZ;
++ else if (dp_clock == 540000)
++ args.v3.sExtEncoder.ucConfig |= EXTERNAL_ENCODER_CONFIG_V3_DPLINKRATE_5_40GHZ;
++ args.v3.sExtEncoder.ucLaneNum = dp_lane_count;
++ } else if (amdgpu_dig_monitor_is_duallink(encoder, amdgpu_encoder->pixel_clock))
++ args.v3.sExtEncoder.ucLaneNum = 8;
++ else
++ args.v3.sExtEncoder.ucLaneNum = 4;
++ switch (ext_enum) {
++ case GRAPH_OBJECT_ENUM_ID1:
++ args.v3.sExtEncoder.ucConfig |= EXTERNAL_ENCODER_CONFIG_V3_ENCODER1;
++ break;
++ case GRAPH_OBJECT_ENUM_ID2:
++ args.v3.sExtEncoder.ucConfig |= EXTERNAL_ENCODER_CONFIG_V3_ENCODER2;
++ break;
++ case GRAPH_OBJECT_ENUM_ID3:
++ args.v3.sExtEncoder.ucConfig |= EXTERNAL_ENCODER_CONFIG_V3_ENCODER3;
++ break;
++ }
++ args.v3.sExtEncoder.ucBitPerColor = amdgpu_atombios_encoder_get_bpc(encoder);
++ break;
++ default:
++ DRM_ERROR("Unknown table version: %d, %d\n", frev, crev);
++ return;
++ }
++ break;
++ default:
++ DRM_ERROR("Unknown table version: %d, %d\n", frev, crev);
++ return;
++ }
++ amdgpu_atom_execute_table(adev->mode_info.atom_context, index, (uint32_t *)&args);
++}
++
++static void
++amdgpu_atombios_encoder_setup_dig(struct drm_encoder *encoder, int action)
++{
++ struct amdgpu_encoder *amdgpu_encoder = to_amdgpu_encoder(encoder);
++ struct drm_encoder *ext_encoder = amdgpu_get_external_encoder(encoder);
++ struct amdgpu_encoder_atom_dig *dig = amdgpu_encoder->enc_priv;
++ struct drm_connector *connector = amdgpu_get_connector_for_encoder(encoder);
++ struct amdgpu_connector *amdgpu_connector = NULL;
++ struct amdgpu_connector_atom_dig *amdgpu_dig_connector = NULL;
++
++ if (connector) {
++ amdgpu_connector = to_amdgpu_connector(connector);
++ amdgpu_dig_connector = amdgpu_connector->con_priv;
++ }
++
++ if (action == ATOM_ENABLE) {
++ if (!connector)
++ dig->panel_mode = DP_PANEL_MODE_EXTERNAL_DP_MODE;
++ else
++ dig->panel_mode = amdgpu_atombios_dp_get_panel_mode(encoder, connector);
++
++ /* setup and enable the encoder */
++ amdgpu_atombios_encoder_setup_dig_encoder(encoder, ATOM_ENCODER_CMD_SETUP, 0);
++ amdgpu_atombios_encoder_setup_dig_encoder(encoder,
++ ATOM_ENCODER_CMD_SETUP_PANEL_MODE,
++ dig->panel_mode);
++ if (ext_encoder)
++ amdgpu_atombios_encoder_setup_external_encoder(encoder, ext_encoder,
++ EXTERNAL_ENCODER_ACTION_V3_ENCODER_SETUP);
++ if (ENCODER_MODE_IS_DP(amdgpu_atombios_encoder_get_encoder_mode(encoder)) &&
++ connector) {
++ if (connector->connector_type == DRM_MODE_CONNECTOR_eDP) {
++ amdgpu_atombios_encoder_set_edp_panel_power(connector,
++ ATOM_TRANSMITTER_ACTION_POWER_ON);
++ amdgpu_dig_connector->edp_on = true;
++ }
++ }
++ /* enable the transmitter */
++ amdgpu_atombios_encoder_setup_dig_transmitter(encoder,
++ ATOM_TRANSMITTER_ACTION_ENABLE,
++ 0, 0);
++ if (ENCODER_MODE_IS_DP(amdgpu_atombios_encoder_get_encoder_mode(encoder)) &&
++ connector) {
++ /* DP_SET_POWER_D0 is set in amdgpu_atombios_dp_link_train */
++ amdgpu_atombios_dp_link_train(encoder, connector);
++ amdgpu_atombios_encoder_setup_dig_encoder(encoder, ATOM_ENCODER_CMD_DP_VIDEO_ON, 0);
++ }
++ if (amdgpu_encoder->devices & (ATOM_DEVICE_LCD_SUPPORT))
++ amdgpu_atombios_encoder_setup_dig_transmitter(encoder,
++ ATOM_TRANSMITTER_ACTION_LCD_BLON, 0, 0);
++ if (ext_encoder)
++ amdgpu_atombios_encoder_setup_external_encoder(encoder, ext_encoder, ATOM_ENABLE);
++ } else {
++ if (ENCODER_MODE_IS_DP(amdgpu_atombios_encoder_get_encoder_mode(encoder)) &&
++ connector)
++ amdgpu_atombios_encoder_setup_dig_encoder(encoder,
++ ATOM_ENCODER_CMD_DP_VIDEO_OFF, 0);
++ if (ext_encoder)
++ amdgpu_atombios_encoder_setup_external_encoder(encoder, ext_encoder, ATOM_DISABLE);
++ if (amdgpu_encoder->devices & (ATOM_DEVICE_LCD_SUPPORT))
++ amdgpu_atombios_encoder_setup_dig_transmitter(encoder,
++ ATOM_TRANSMITTER_ACTION_LCD_BLOFF, 0, 0);
++
++ if (ENCODER_MODE_IS_DP(amdgpu_atombios_encoder_get_encoder_mode(encoder)) &&
++ connector)
++ amdgpu_atombios_dp_set_rx_power_state(connector, DP_SET_POWER_D3);
++ /* disable the transmitter */
++ amdgpu_atombios_encoder_setup_dig_transmitter(encoder,
++ ATOM_TRANSMITTER_ACTION_DISABLE, 0, 0);
++ if (ENCODER_MODE_IS_DP(amdgpu_atombios_encoder_get_encoder_mode(encoder)) &&
++ connector) {
++ if (connector->connector_type == DRM_MODE_CONNECTOR_eDP) {
++ amdgpu_atombios_encoder_set_edp_panel_power(connector,
++ ATOM_TRANSMITTER_ACTION_POWER_OFF);
++ amdgpu_dig_connector->edp_on = false;
++ }
++ }
++ }
++}
++
++void
++amdgpu_atombios_encoder_dpms(struct drm_encoder *encoder, int mode)
++{
++ struct amdgpu_encoder *amdgpu_encoder = to_amdgpu_encoder(encoder);
++
++ DRM_DEBUG_KMS("encoder dpms %d to mode %d, devices %08x, active_devices %08x\n",
++ amdgpu_encoder->encoder_id, mode, amdgpu_encoder->devices,
++ amdgpu_encoder->active_device);
++ switch (amdgpu_encoder->encoder_id) {
++ case ENCODER_OBJECT_ID_INTERNAL_UNIPHY:
++ case ENCODER_OBJECT_ID_INTERNAL_UNIPHY1:
++ case ENCODER_OBJECT_ID_INTERNAL_UNIPHY2:
++ case ENCODER_OBJECT_ID_INTERNAL_UNIPHY3:
++ switch (mode) {
++ case DRM_MODE_DPMS_ON:
++ amdgpu_atombios_encoder_setup_dig(encoder, ATOM_ENABLE);
++ break;
++ case DRM_MODE_DPMS_STANDBY:
++ case DRM_MODE_DPMS_SUSPEND:
++ case DRM_MODE_DPMS_OFF:
++ amdgpu_atombios_encoder_setup_dig(encoder, ATOM_DISABLE);
++ break;
++ }
++ break;
++ case ENCODER_OBJECT_ID_INTERNAL_KLDSCP_DVO1:
++ switch (mode) {
++ case DRM_MODE_DPMS_ON:
++ amdgpu_atombios_encoder_setup_dvo(encoder, ATOM_ENABLE);
++ break;
++ case DRM_MODE_DPMS_STANDBY:
++ case DRM_MODE_DPMS_SUSPEND:
++ case DRM_MODE_DPMS_OFF:
++ amdgpu_atombios_encoder_setup_dvo(encoder, ATOM_DISABLE);
++ break;
++ }
++ break;
++ case ENCODER_OBJECT_ID_INTERNAL_KLDSCP_DAC1:
++ switch (mode) {
++ case DRM_MODE_DPMS_ON:
++ amdgpu_atombios_encoder_setup_dac(encoder, ATOM_ENABLE);
++ break;
++ case DRM_MODE_DPMS_STANDBY:
++ case DRM_MODE_DPMS_SUSPEND:
++ case DRM_MODE_DPMS_OFF:
++ amdgpu_atombios_encoder_setup_dac(encoder, ATOM_DISABLE);
++ break;
++ }
++ break;
++ default:
++ return;
++ }
++}
++
++union crtc_source_param {
++ SELECT_CRTC_SOURCE_PS_ALLOCATION v1;
++ SELECT_CRTC_SOURCE_PARAMETERS_V2 v2;
++ SELECT_CRTC_SOURCE_PARAMETERS_V3 v3;
++};
++
++void
++amdgpu_atombios_encoder_set_crtc_source(struct drm_encoder *encoder)
++{
++ struct drm_device *dev = encoder->dev;
++ struct amdgpu_device *adev = dev->dev_private;
++ struct amdgpu_encoder *amdgpu_encoder = to_amdgpu_encoder(encoder);
++ struct amdgpu_crtc *amdgpu_crtc = to_amdgpu_crtc(encoder->crtc);
++ union crtc_source_param args;
++ int index = GetIndexIntoMasterTable(COMMAND, SelectCRTC_Source);
++ uint8_t frev, crev;
++ struct amdgpu_encoder_atom_dig *dig;
++
++ memset(&args, 0, sizeof(args));
++
++ if (!amdgpu_atom_parse_cmd_header(adev->mode_info.atom_context, index, &frev, &crev))
++ return;
++
++ switch (frev) {
++ case 1:
++ switch (crev) {
++ case 1:
++ default:
++ args.v1.ucCRTC = amdgpu_crtc->crtc_id;
++ switch (amdgpu_encoder->encoder_id) {
++ case ENCODER_OBJECT_ID_INTERNAL_TMDS1:
++ case ENCODER_OBJECT_ID_INTERNAL_KLDSCP_TMDS1:
++ args.v1.ucDevice = ATOM_DEVICE_DFP1_INDEX;
++ break;
++ case ENCODER_OBJECT_ID_INTERNAL_LVDS:
++ case ENCODER_OBJECT_ID_INTERNAL_LVTM1:
++ if (amdgpu_encoder->devices & ATOM_DEVICE_LCD1_SUPPORT)
++ args.v1.ucDevice = ATOM_DEVICE_LCD1_INDEX;
++ else
++ args.v1.ucDevice = ATOM_DEVICE_DFP3_INDEX;
++ break;
++ case ENCODER_OBJECT_ID_INTERNAL_DVO1:
++ case ENCODER_OBJECT_ID_INTERNAL_DDI:
++ case ENCODER_OBJECT_ID_INTERNAL_KLDSCP_DVO1:
++ args.v1.ucDevice = ATOM_DEVICE_DFP2_INDEX;
++ break;
++ case ENCODER_OBJECT_ID_INTERNAL_DAC1:
++ case ENCODER_OBJECT_ID_INTERNAL_KLDSCP_DAC1:
++ if (amdgpu_encoder->active_device & (ATOM_DEVICE_TV_SUPPORT))
++ args.v1.ucDevice = ATOM_DEVICE_TV1_INDEX;
++ else if (amdgpu_encoder->active_device & (ATOM_DEVICE_CV_SUPPORT))
++ args.v1.ucDevice = ATOM_DEVICE_CV_INDEX;
++ else
++ args.v1.ucDevice = ATOM_DEVICE_CRT1_INDEX;
++ break;
++ case ENCODER_OBJECT_ID_INTERNAL_DAC2:
++ case ENCODER_OBJECT_ID_INTERNAL_KLDSCP_DAC2:
++ if (amdgpu_encoder->active_device & (ATOM_DEVICE_TV_SUPPORT))
++ args.v1.ucDevice = ATOM_DEVICE_TV1_INDEX;
++ else if (amdgpu_encoder->active_device & (ATOM_DEVICE_CV_SUPPORT))
++ args.v1.ucDevice = ATOM_DEVICE_CV_INDEX;
++ else
++ args.v1.ucDevice = ATOM_DEVICE_CRT2_INDEX;
++ break;
++ }
++ break;
++ case 2:
++ args.v2.ucCRTC = amdgpu_crtc->crtc_id;
++ if (amdgpu_encoder_get_dp_bridge_encoder_id(encoder) != ENCODER_OBJECT_ID_NONE) {
++ struct drm_connector *connector = amdgpu_get_connector_for_encoder(encoder);
++
++ if (connector->connector_type == DRM_MODE_CONNECTOR_LVDS)
++ args.v2.ucEncodeMode = ATOM_ENCODER_MODE_LVDS;
++ else if (connector->connector_type == DRM_MODE_CONNECTOR_VGA)
++ args.v2.ucEncodeMode = ATOM_ENCODER_MODE_CRT;
++ else
++ args.v2.ucEncodeMode = amdgpu_atombios_encoder_get_encoder_mode(encoder);
++ } else if (amdgpu_encoder->devices & (ATOM_DEVICE_LCD_SUPPORT)) {
++ args.v2.ucEncodeMode = ATOM_ENCODER_MODE_LVDS;
++ } else {
++ args.v2.ucEncodeMode = amdgpu_atombios_encoder_get_encoder_mode(encoder);
++ }
++ switch (amdgpu_encoder->encoder_id) {
++ case ENCODER_OBJECT_ID_INTERNAL_UNIPHY:
++ case ENCODER_OBJECT_ID_INTERNAL_UNIPHY1:
++ case ENCODER_OBJECT_ID_INTERNAL_UNIPHY2:
++ case ENCODER_OBJECT_ID_INTERNAL_UNIPHY3:
++ case ENCODER_OBJECT_ID_INTERNAL_KLDSCP_LVTMA:
++ dig = amdgpu_encoder->enc_priv;
++ switch (dig->dig_encoder) {
++ case 0:
++ args.v2.ucEncoderID = ASIC_INT_DIG1_ENCODER_ID;
++ break;
++ case 1:
++ args.v2.ucEncoderID = ASIC_INT_DIG2_ENCODER_ID;
++ break;
++ case 2:
++ args.v2.ucEncoderID = ASIC_INT_DIG3_ENCODER_ID;
++ break;
++ case 3:
++ args.v2.ucEncoderID = ASIC_INT_DIG4_ENCODER_ID;
++ break;
++ case 4:
++ args.v2.ucEncoderID = ASIC_INT_DIG5_ENCODER_ID;
++ break;
++ case 5:
++ args.v2.ucEncoderID = ASIC_INT_DIG6_ENCODER_ID;
++ break;
++ case 6:
++ args.v2.ucEncoderID = ASIC_INT_DIG7_ENCODER_ID;
++ break;
++ }
++ break;
++ case ENCODER_OBJECT_ID_INTERNAL_KLDSCP_DVO1:
++ args.v2.ucEncoderID = ASIC_INT_DVO_ENCODER_ID;
++ break;
++ case ENCODER_OBJECT_ID_INTERNAL_KLDSCP_DAC1:
++ if (amdgpu_encoder->active_device & (ATOM_DEVICE_TV_SUPPORT))
++ args.v2.ucEncoderID = ASIC_INT_TV_ENCODER_ID;
++ else if (amdgpu_encoder->active_device & (ATOM_DEVICE_CV_SUPPORT))
++ args.v2.ucEncoderID = ASIC_INT_TV_ENCODER_ID;
++ else
++ args.v2.ucEncoderID = ASIC_INT_DAC1_ENCODER_ID;
++ break;
++ case ENCODER_OBJECT_ID_INTERNAL_KLDSCP_DAC2:
++ if (amdgpu_encoder->active_device & (ATOM_DEVICE_TV_SUPPORT))
++ args.v2.ucEncoderID = ASIC_INT_TV_ENCODER_ID;
++ else if (amdgpu_encoder->active_device & (ATOM_DEVICE_CV_SUPPORT))
++ args.v2.ucEncoderID = ASIC_INT_TV_ENCODER_ID;
++ else
++ args.v2.ucEncoderID = ASIC_INT_DAC2_ENCODER_ID;
++ break;
++ }
++ break;
++ case 3:
++ args.v3.ucCRTC = amdgpu_crtc->crtc_id;
++ if (amdgpu_encoder_get_dp_bridge_encoder_id(encoder) != ENCODER_OBJECT_ID_NONE) {
++ struct drm_connector *connector = amdgpu_get_connector_for_encoder(encoder);
++
++ if (connector->connector_type == DRM_MODE_CONNECTOR_LVDS)
++ args.v2.ucEncodeMode = ATOM_ENCODER_MODE_LVDS;
++ else if (connector->connector_type == DRM_MODE_CONNECTOR_VGA)
++ args.v2.ucEncodeMode = ATOM_ENCODER_MODE_CRT;
++ else
++ args.v2.ucEncodeMode = amdgpu_atombios_encoder_get_encoder_mode(encoder);
++ } else if (amdgpu_encoder->devices & (ATOM_DEVICE_LCD_SUPPORT)) {
++ args.v2.ucEncodeMode = ATOM_ENCODER_MODE_LVDS;
++ } else {
++ args.v2.ucEncodeMode = amdgpu_atombios_encoder_get_encoder_mode(encoder);
++ }
++ args.v3.ucDstBpc = amdgpu_atombios_encoder_get_bpc(encoder);
++ switch (amdgpu_encoder->encoder_id) {
++ case ENCODER_OBJECT_ID_INTERNAL_UNIPHY:
++ case ENCODER_OBJECT_ID_INTERNAL_UNIPHY1:
++ case ENCODER_OBJECT_ID_INTERNAL_UNIPHY2:
++ case ENCODER_OBJECT_ID_INTERNAL_UNIPHY3:
++ case ENCODER_OBJECT_ID_INTERNAL_KLDSCP_LVTMA:
++ dig = amdgpu_encoder->enc_priv;
++ switch (dig->dig_encoder) {
++ case 0:
++ args.v3.ucEncoderID = ASIC_INT_DIG1_ENCODER_ID;
++ break;
++ case 1:
++ args.v3.ucEncoderID = ASIC_INT_DIG2_ENCODER_ID;
++ break;
++ case 2:
++ args.v3.ucEncoderID = ASIC_INT_DIG3_ENCODER_ID;
++ break;
++ case 3:
++ args.v3.ucEncoderID = ASIC_INT_DIG4_ENCODER_ID;
++ break;
++ case 4:
++ args.v3.ucEncoderID = ASIC_INT_DIG5_ENCODER_ID;
++ break;
++ case 5:
++ args.v3.ucEncoderID = ASIC_INT_DIG6_ENCODER_ID;
++ break;
++ case 6:
++ args.v3.ucEncoderID = ASIC_INT_DIG7_ENCODER_ID;
++ break;
++ }
++ break;
++ case ENCODER_OBJECT_ID_INTERNAL_KLDSCP_DVO1:
++ args.v3.ucEncoderID = ASIC_INT_DVO_ENCODER_ID;
++ break;
++ case ENCODER_OBJECT_ID_INTERNAL_KLDSCP_DAC1:
++ if (amdgpu_encoder->active_device & (ATOM_DEVICE_TV_SUPPORT))
++ args.v3.ucEncoderID = ASIC_INT_TV_ENCODER_ID;
++ else if (amdgpu_encoder->active_device & (ATOM_DEVICE_CV_SUPPORT))
++ args.v3.ucEncoderID = ASIC_INT_TV_ENCODER_ID;
++ else
++ args.v3.ucEncoderID = ASIC_INT_DAC1_ENCODER_ID;
++ break;
++ case ENCODER_OBJECT_ID_INTERNAL_KLDSCP_DAC2:
++ if (amdgpu_encoder->active_device & (ATOM_DEVICE_TV_SUPPORT))
++ args.v3.ucEncoderID = ASIC_INT_TV_ENCODER_ID;
++ else if (amdgpu_encoder->active_device & (ATOM_DEVICE_CV_SUPPORT))
++ args.v3.ucEncoderID = ASIC_INT_TV_ENCODER_ID;
++ else
++ args.v3.ucEncoderID = ASIC_INT_DAC2_ENCODER_ID;
++ break;
++ }
++ break;
++ }
++ break;
++ default:
++ DRM_ERROR("Unknown table version: %d, %d\n", frev, crev);
++ return;
++ }
++
++ amdgpu_atom_execute_table(adev->mode_info.atom_context, index, (uint32_t *)&args);
++}
++
++/* This only needs to be called once at startup */
++void
++amdgpu_atombios_encoder_init_dig(struct amdgpu_device *adev)
++{
++ struct drm_device *dev = adev->ddev;
++ struct drm_encoder *encoder;
++
++ list_for_each_entry(encoder, &dev->mode_config.encoder_list, head) {
++ struct amdgpu_encoder *amdgpu_encoder = to_amdgpu_encoder(encoder);
++ struct drm_encoder *ext_encoder = amdgpu_get_external_encoder(encoder);
++
++ switch (amdgpu_encoder->encoder_id) {
++ case ENCODER_OBJECT_ID_INTERNAL_UNIPHY:
++ case ENCODER_OBJECT_ID_INTERNAL_UNIPHY1:
++ case ENCODER_OBJECT_ID_INTERNAL_UNIPHY2:
++ case ENCODER_OBJECT_ID_INTERNAL_UNIPHY3:
++ amdgpu_atombios_encoder_setup_dig_transmitter(encoder, ATOM_TRANSMITTER_ACTION_INIT,
++ 0, 0);
++ break;
++ }
++
++ if (ext_encoder)
++ amdgpu_atombios_encoder_setup_external_encoder(encoder, ext_encoder,
++ EXTERNAL_ENCODER_ACTION_V3_ENCODER_INIT);
++ }
++}
++
++static bool
++amdgpu_atombios_encoder_dac_load_detect(struct drm_encoder *encoder,
++ struct drm_connector *connector)
++{
++ struct drm_device *dev = encoder->dev;
++ struct amdgpu_device *adev = dev->dev_private;
++ struct amdgpu_encoder *amdgpu_encoder = to_amdgpu_encoder(encoder);
++ struct amdgpu_connector *amdgpu_connector = to_amdgpu_connector(connector);
++
++ if (amdgpu_encoder->devices & (ATOM_DEVICE_TV_SUPPORT |
++ ATOM_DEVICE_CV_SUPPORT |
++ ATOM_DEVICE_CRT_SUPPORT)) {
++ DAC_LOAD_DETECTION_PS_ALLOCATION args;
++ int index = GetIndexIntoMasterTable(COMMAND, DAC_LoadDetection);
++ uint8_t frev, crev;
++
++ memset(&args, 0, sizeof(args));
++
++ if (!amdgpu_atom_parse_cmd_header(adev->mode_info.atom_context, index, &frev, &crev))
++ return false;
++
++ args.sDacload.ucMisc = 0;
++
++ if ((amdgpu_encoder->encoder_id == ENCODER_OBJECT_ID_INTERNAL_DAC1) ||
++ (amdgpu_encoder->encoder_id == ENCODER_OBJECT_ID_INTERNAL_KLDSCP_DAC1))
++ args.sDacload.ucDacType = ATOM_DAC_A;
++ else
++ args.sDacload.ucDacType = ATOM_DAC_B;
++
++ if (amdgpu_connector->devices & ATOM_DEVICE_CRT1_SUPPORT)
++ args.sDacload.usDeviceID = cpu_to_le16(ATOM_DEVICE_CRT1_SUPPORT);
++ else if (amdgpu_connector->devices & ATOM_DEVICE_CRT2_SUPPORT)
++ args.sDacload.usDeviceID = cpu_to_le16(ATOM_DEVICE_CRT2_SUPPORT);
++ else if (amdgpu_connector->devices & ATOM_DEVICE_CV_SUPPORT) {
++ args.sDacload.usDeviceID = cpu_to_le16(ATOM_DEVICE_CV_SUPPORT);
++ if (crev >= 3)
++ args.sDacload.ucMisc = DAC_LOAD_MISC_YPrPb;
++ } else if (amdgpu_connector->devices & ATOM_DEVICE_TV1_SUPPORT) {
++ args.sDacload.usDeviceID = cpu_to_le16(ATOM_DEVICE_TV1_SUPPORT);
++ if (crev >= 3)
++ args.sDacload.ucMisc = DAC_LOAD_MISC_YPrPb;
++ }
++
++ amdgpu_atom_execute_table(adev->mode_info.atom_context, index, (uint32_t *)&args);
++
++ return true;
++ } else
++ return false;
++}
++
++enum drm_connector_status
++amdgpu_atombios_encoder_dac_detect(struct drm_encoder *encoder,
++ struct drm_connector *connector)
++{
++ struct drm_device *dev = encoder->dev;
++ struct amdgpu_device *adev = dev->dev_private;
++ struct amdgpu_encoder *amdgpu_encoder = to_amdgpu_encoder(encoder);
++ struct amdgpu_connector *amdgpu_connector = to_amdgpu_connector(connector);
++ uint32_t bios_0_scratch;
++
++ if (!amdgpu_atombios_encoder_dac_load_detect(encoder, connector)) {
++ DRM_DEBUG_KMS("detect returned false \n");
++ return connector_status_unknown;
++ }
++
++ bios_0_scratch = RREG32(mmBIOS_SCRATCH_0);
++
++ DRM_DEBUG_KMS("Bios 0 scratch %x %08x\n", bios_0_scratch, amdgpu_encoder->devices);
++ if (amdgpu_connector->devices & ATOM_DEVICE_CRT1_SUPPORT) {
++ if (bios_0_scratch & ATOM_S0_CRT1_MASK)
++ return connector_status_connected;
++ }
++ if (amdgpu_connector->devices & ATOM_DEVICE_CRT2_SUPPORT) {
++ if (bios_0_scratch & ATOM_S0_CRT2_MASK)
++ return connector_status_connected;
++ }
++ if (amdgpu_connector->devices & ATOM_DEVICE_CV_SUPPORT) {
++ if (bios_0_scratch & (ATOM_S0_CV_MASK|ATOM_S0_CV_MASK_A))
++ return connector_status_connected;
++ }
++ if (amdgpu_connector->devices & ATOM_DEVICE_TV1_SUPPORT) {
++ if (bios_0_scratch & (ATOM_S0_TV1_COMPOSITE | ATOM_S0_TV1_COMPOSITE_A))
++ return connector_status_connected; /* CTV */
++ else if (bios_0_scratch & (ATOM_S0_TV1_SVIDEO | ATOM_S0_TV1_SVIDEO_A))
++ return connector_status_connected; /* STV */
++ }
++ return connector_status_disconnected;
++}
++
++enum drm_connector_status
++amdgpu_atombios_encoder_dig_detect(struct drm_encoder *encoder,
++ struct drm_connector *connector)
++{
++ struct drm_device *dev = encoder->dev;
++ struct amdgpu_device *adev = dev->dev_private;
++ struct amdgpu_encoder *amdgpu_encoder = to_amdgpu_encoder(encoder);
++ struct amdgpu_connector *amdgpu_connector = to_amdgpu_connector(connector);
++ struct drm_encoder *ext_encoder = amdgpu_get_external_encoder(encoder);
++ u32 bios_0_scratch;
++
++ if (!ext_encoder)
++ return connector_status_unknown;
++
++ if ((amdgpu_connector->devices & ATOM_DEVICE_CRT_SUPPORT) == 0)
++ return connector_status_unknown;
++
++ /* load detect on the dp bridge */
++ amdgpu_atombios_encoder_setup_external_encoder(encoder, ext_encoder,
++ EXTERNAL_ENCODER_ACTION_V3_DACLOAD_DETECTION);
++
++ bios_0_scratch = RREG32(mmBIOS_SCRATCH_0);
++
++ DRM_DEBUG_KMS("Bios 0 scratch %x %08x\n", bios_0_scratch, amdgpu_encoder->devices);
++ if (amdgpu_connector->devices & ATOM_DEVICE_CRT1_SUPPORT) {
++ if (bios_0_scratch & ATOM_S0_CRT1_MASK)
++ return connector_status_connected;
++ }
++ if (amdgpu_connector->devices & ATOM_DEVICE_CRT2_SUPPORT) {
++ if (bios_0_scratch & ATOM_S0_CRT2_MASK)
++ return connector_status_connected;
++ }
++ if (amdgpu_connector->devices & ATOM_DEVICE_CV_SUPPORT) {
++ if (bios_0_scratch & (ATOM_S0_CV_MASK|ATOM_S0_CV_MASK_A))
++ return connector_status_connected;
++ }
++ if (amdgpu_connector->devices & ATOM_DEVICE_TV1_SUPPORT) {
++ if (bios_0_scratch & (ATOM_S0_TV1_COMPOSITE | ATOM_S0_TV1_COMPOSITE_A))
++ return connector_status_connected; /* CTV */
++ else if (bios_0_scratch & (ATOM_S0_TV1_SVIDEO | ATOM_S0_TV1_SVIDEO_A))
++ return connector_status_connected; /* STV */
++ }
++ return connector_status_disconnected;
++}
++
++void
++amdgpu_atombios_encoder_setup_ext_encoder_ddc(struct drm_encoder *encoder)
++{
++ struct drm_encoder *ext_encoder = amdgpu_get_external_encoder(encoder);
++
++ if (ext_encoder)
++ /* ddc_setup on the dp bridge */
++ amdgpu_atombios_encoder_setup_external_encoder(encoder, ext_encoder,
++ EXTERNAL_ENCODER_ACTION_V3_DDC_SETUP);
++
++}
++
++void
++amdgpu_atombios_encoder_set_bios_scratch_regs(struct drm_connector *connector,
++ struct drm_encoder *encoder,
++ bool connected)
++{
++ struct drm_device *dev = connector->dev;
++ struct amdgpu_device *adev = dev->dev_private;
++ struct amdgpu_connector *amdgpu_connector =
++ to_amdgpu_connector(connector);
++ struct amdgpu_encoder *amdgpu_encoder = to_amdgpu_encoder(encoder);
++ uint32_t bios_0_scratch, bios_3_scratch, bios_6_scratch;
++
++ bios_0_scratch = RREG32(mmBIOS_SCRATCH_0);
++ bios_3_scratch = RREG32(mmBIOS_SCRATCH_3);
++ bios_6_scratch = RREG32(mmBIOS_SCRATCH_6);
++
++ if ((amdgpu_encoder->devices & ATOM_DEVICE_LCD1_SUPPORT) &&
++ (amdgpu_connector->devices & ATOM_DEVICE_LCD1_SUPPORT)) {
++ if (connected) {
++ DRM_DEBUG_KMS("LCD1 connected\n");
++ bios_0_scratch |= ATOM_S0_LCD1;
++ bios_3_scratch |= ATOM_S3_LCD1_ACTIVE;
++ bios_6_scratch |= ATOM_S6_ACC_REQ_LCD1;
++ } else {
++ DRM_DEBUG_KMS("LCD1 disconnected\n");
++ bios_0_scratch &= ~ATOM_S0_LCD1;
++ bios_3_scratch &= ~ATOM_S3_LCD1_ACTIVE;
++ bios_6_scratch &= ~ATOM_S6_ACC_REQ_LCD1;
++ }
++ }
++ if ((amdgpu_encoder->devices & ATOM_DEVICE_CRT1_SUPPORT) &&
++ (amdgpu_connector->devices & ATOM_DEVICE_CRT1_SUPPORT)) {
++ if (connected) {
++ DRM_DEBUG_KMS("CRT1 connected\n");
++ bios_0_scratch |= ATOM_S0_CRT1_COLOR;
++ bios_3_scratch |= ATOM_S3_CRT1_ACTIVE;
++ bios_6_scratch |= ATOM_S6_ACC_REQ_CRT1;
++ } else {
++ DRM_DEBUG_KMS("CRT1 disconnected\n");
++ bios_0_scratch &= ~ATOM_S0_CRT1_MASK;
++ bios_3_scratch &= ~ATOM_S3_CRT1_ACTIVE;
++ bios_6_scratch &= ~ATOM_S6_ACC_REQ_CRT1;
++ }
++ }
++ if ((amdgpu_encoder->devices & ATOM_DEVICE_CRT2_SUPPORT) &&
++ (amdgpu_connector->devices & ATOM_DEVICE_CRT2_SUPPORT)) {
++ if (connected) {
++ DRM_DEBUG_KMS("CRT2 connected\n");
++ bios_0_scratch |= ATOM_S0_CRT2_COLOR;
++ bios_3_scratch |= ATOM_S3_CRT2_ACTIVE;
++ bios_6_scratch |= ATOM_S6_ACC_REQ_CRT2;
++ } else {
++ DRM_DEBUG_KMS("CRT2 disconnected\n");
++ bios_0_scratch &= ~ATOM_S0_CRT2_MASK;
++ bios_3_scratch &= ~ATOM_S3_CRT2_ACTIVE;
++ bios_6_scratch &= ~ATOM_S6_ACC_REQ_CRT2;
++ }
++ }
++ if ((amdgpu_encoder->devices & ATOM_DEVICE_DFP1_SUPPORT) &&
++ (amdgpu_connector->devices & ATOM_DEVICE_DFP1_SUPPORT)) {
++ if (connected) {
++ DRM_DEBUG_KMS("DFP1 connected\n");
++ bios_0_scratch |= ATOM_S0_DFP1;
++ bios_3_scratch |= ATOM_S3_DFP1_ACTIVE;
++ bios_6_scratch |= ATOM_S6_ACC_REQ_DFP1;
++ } else {
++ DRM_DEBUG_KMS("DFP1 disconnected\n");
++ bios_0_scratch &= ~ATOM_S0_DFP1;
++ bios_3_scratch &= ~ATOM_S3_DFP1_ACTIVE;
++ bios_6_scratch &= ~ATOM_S6_ACC_REQ_DFP1;
++ }
++ }
++ if ((amdgpu_encoder->devices & ATOM_DEVICE_DFP2_SUPPORT) &&
++ (amdgpu_connector->devices & ATOM_DEVICE_DFP2_SUPPORT)) {
++ if (connected) {
++ DRM_DEBUG_KMS("DFP2 connected\n");
++ bios_0_scratch |= ATOM_S0_DFP2;
++ bios_3_scratch |= ATOM_S3_DFP2_ACTIVE;
++ bios_6_scratch |= ATOM_S6_ACC_REQ_DFP2;
++ } else {
++ DRM_DEBUG_KMS("DFP2 disconnected\n");
++ bios_0_scratch &= ~ATOM_S0_DFP2;
++ bios_3_scratch &= ~ATOM_S3_DFP2_ACTIVE;
++ bios_6_scratch &= ~ATOM_S6_ACC_REQ_DFP2;
++ }
++ }
++ if ((amdgpu_encoder->devices & ATOM_DEVICE_DFP3_SUPPORT) &&
++ (amdgpu_connector->devices & ATOM_DEVICE_DFP3_SUPPORT)) {
++ if (connected) {
++ DRM_DEBUG_KMS("DFP3 connected\n");
++ bios_0_scratch |= ATOM_S0_DFP3;
++ bios_3_scratch |= ATOM_S3_DFP3_ACTIVE;
++ bios_6_scratch |= ATOM_S6_ACC_REQ_DFP3;
++ } else {
++ DRM_DEBUG_KMS("DFP3 disconnected\n");
++ bios_0_scratch &= ~ATOM_S0_DFP3;
++ bios_3_scratch &= ~ATOM_S3_DFP3_ACTIVE;
++ bios_6_scratch &= ~ATOM_S6_ACC_REQ_DFP3;
++ }
++ }
++ if ((amdgpu_encoder->devices & ATOM_DEVICE_DFP4_SUPPORT) &&
++ (amdgpu_connector->devices & ATOM_DEVICE_DFP4_SUPPORT)) {
++ if (connected) {
++ DRM_DEBUG_KMS("DFP4 connected\n");
++ bios_0_scratch |= ATOM_S0_DFP4;
++ bios_3_scratch |= ATOM_S3_DFP4_ACTIVE;
++ bios_6_scratch |= ATOM_S6_ACC_REQ_DFP4;
++ } else {
++ DRM_DEBUG_KMS("DFP4 disconnected\n");
++ bios_0_scratch &= ~ATOM_S0_DFP4;
++ bios_3_scratch &= ~ATOM_S3_DFP4_ACTIVE;
++ bios_6_scratch &= ~ATOM_S6_ACC_REQ_DFP4;
++ }
++ }
++ if ((amdgpu_encoder->devices & ATOM_DEVICE_DFP5_SUPPORT) &&
++ (amdgpu_connector->devices & ATOM_DEVICE_DFP5_SUPPORT)) {
++ if (connected) {
++ DRM_DEBUG_KMS("DFP5 connected\n");
++ bios_0_scratch |= ATOM_S0_DFP5;
++ bios_3_scratch |= ATOM_S3_DFP5_ACTIVE;
++ bios_6_scratch |= ATOM_S6_ACC_REQ_DFP5;
++ } else {
++ DRM_DEBUG_KMS("DFP5 disconnected\n");
++ bios_0_scratch &= ~ATOM_S0_DFP5;
++ bios_3_scratch &= ~ATOM_S3_DFP5_ACTIVE;
++ bios_6_scratch &= ~ATOM_S6_ACC_REQ_DFP5;
++ }
++ }
++ if ((amdgpu_encoder->devices & ATOM_DEVICE_DFP6_SUPPORT) &&
++ (amdgpu_connector->devices & ATOM_DEVICE_DFP6_SUPPORT)) {
++ if (connected) {
++ DRM_DEBUG_KMS("DFP6 connected\n");
++ bios_0_scratch |= ATOM_S0_DFP6;
++ bios_3_scratch |= ATOM_S3_DFP6_ACTIVE;
++ bios_6_scratch |= ATOM_S6_ACC_REQ_DFP6;
++ } else {
++ DRM_DEBUG_KMS("DFP6 disconnected\n");
++ bios_0_scratch &= ~ATOM_S0_DFP6;
++ bios_3_scratch &= ~ATOM_S3_DFP6_ACTIVE;
++ bios_6_scratch &= ~ATOM_S6_ACC_REQ_DFP6;
++ }
++ }
++
++ WREG32(mmBIOS_SCRATCH_0, bios_0_scratch);
++ WREG32(mmBIOS_SCRATCH_3, bios_3_scratch);
++ WREG32(mmBIOS_SCRATCH_6, bios_6_scratch);
++}
++
++union lvds_info {
++ struct _ATOM_LVDS_INFO info;
++ struct _ATOM_LVDS_INFO_V12 info_12;
++};
++
++struct amdgpu_encoder_atom_dig *
++amdgpu_atombios_encoder_get_lcd_info(struct amdgpu_encoder *encoder)
++{
++ struct drm_device *dev = encoder->base.dev;
++ struct amdgpu_device *adev = dev->dev_private;
++ struct amdgpu_mode_info *mode_info = &adev->mode_info;
++ int index = GetIndexIntoMasterTable(DATA, LVDS_Info);
++ uint16_t data_offset, misc;
++ union lvds_info *lvds_info;
++ uint8_t frev, crev;
++ struct amdgpu_encoder_atom_dig *lvds = NULL;
++ int encoder_enum = (encoder->encoder_enum & ENUM_ID_MASK) >> ENUM_ID_SHIFT;
++
++ if (amdgpu_atom_parse_data_header(mode_info->atom_context, index, NULL,
++ &frev, &crev, &data_offset)) {
++ lvds_info =
++ (union lvds_info *)(mode_info->atom_context->bios + data_offset);
++ lvds =
++ kzalloc(sizeof(struct amdgpu_encoder_atom_dig), GFP_KERNEL);
++
++ if (!lvds)
++ return NULL;
++
++ lvds->native_mode.clock =
++ le16_to_cpu(lvds_info->info.sLCDTiming.usPixClk) * 10;
++ lvds->native_mode.hdisplay =
++ le16_to_cpu(lvds_info->info.sLCDTiming.usHActive);
++ lvds->native_mode.vdisplay =
++ le16_to_cpu(lvds_info->info.sLCDTiming.usVActive);
++ lvds->native_mode.htotal = lvds->native_mode.hdisplay +
++ le16_to_cpu(lvds_info->info.sLCDTiming.usHBlanking_Time);
++ lvds->native_mode.hsync_start = lvds->native_mode.hdisplay +
++ le16_to_cpu(lvds_info->info.sLCDTiming.usHSyncOffset);
++ lvds->native_mode.hsync_end = lvds->native_mode.hsync_start +
++ le16_to_cpu(lvds_info->info.sLCDTiming.usHSyncWidth);
++ lvds->native_mode.vtotal = lvds->native_mode.vdisplay +
++ le16_to_cpu(lvds_info->info.sLCDTiming.usVBlanking_Time);
++ lvds->native_mode.vsync_start = lvds->native_mode.vdisplay +
++ le16_to_cpu(lvds_info->info.sLCDTiming.usVSyncOffset);
++ lvds->native_mode.vsync_end = lvds->native_mode.vsync_start +
++ le16_to_cpu(lvds_info->info.sLCDTiming.usVSyncWidth);
++ lvds->panel_pwr_delay =
++ le16_to_cpu(lvds_info->info.usOffDelayInMs);
++ lvds->lcd_misc = lvds_info->info.ucLVDS_Misc;
++
++ misc = le16_to_cpu(lvds_info->info.sLCDTiming.susModeMiscInfo.usAccess);
++ if (misc & ATOM_VSYNC_POLARITY)
++ lvds->native_mode.flags |= DRM_MODE_FLAG_NVSYNC;
++ if (misc & ATOM_HSYNC_POLARITY)
++ lvds->native_mode.flags |= DRM_MODE_FLAG_NHSYNC;
++ if (misc & ATOM_COMPOSITESYNC)
++ lvds->native_mode.flags |= DRM_MODE_FLAG_CSYNC;
++ if (misc & ATOM_INTERLACE)
++ lvds->native_mode.flags |= DRM_MODE_FLAG_INTERLACE;
++ if (misc & ATOM_DOUBLE_CLOCK_MODE)
++ lvds->native_mode.flags |= DRM_MODE_FLAG_DBLSCAN;
++
++ lvds->native_mode.width_mm = le16_to_cpu(lvds_info->info.sLCDTiming.usImageHSize);
++ lvds->native_mode.height_mm = le16_to_cpu(lvds_info->info.sLCDTiming.usImageVSize);
++
++ /* set crtc values */
++ drm_mode_set_crtcinfo(&lvds->native_mode, CRTC_INTERLACE_HALVE_V);
++
++ lvds->lcd_ss_id = lvds_info->info.ucSS_Id;
++
++ encoder->native_mode = lvds->native_mode;
++
++ if (encoder_enum == 2)
++ lvds->linkb = true;
++ else
++ lvds->linkb = false;
++
++ /* parse the lcd record table */
++ if (le16_to_cpu(lvds_info->info.usModePatchTableOffset)) {
++ ATOM_FAKE_EDID_PATCH_RECORD *fake_edid_record;
++ ATOM_PANEL_RESOLUTION_PATCH_RECORD *panel_res_record;
++ bool bad_record = false;
++ u8 *record;
++
++ if ((frev == 1) && (crev < 2))
++ /* absolute */
++ record = (u8 *)(mode_info->atom_context->bios +
++ le16_to_cpu(lvds_info->info.usModePatchTableOffset));
++ else
++ /* relative */
++ record = (u8 *)(mode_info->atom_context->bios +
++ data_offset +
++ le16_to_cpu(lvds_info->info.usModePatchTableOffset));
++ while (*record != ATOM_RECORD_END_TYPE) {
++ switch (*record) {
++ case LCD_MODE_PATCH_RECORD_MODE_TYPE:
++ record += sizeof(ATOM_PATCH_RECORD_MODE);
++ break;
++ case LCD_RTS_RECORD_TYPE:
++ record += sizeof(ATOM_LCD_RTS_RECORD);
++ break;
++ case LCD_CAP_RECORD_TYPE:
++ record += sizeof(ATOM_LCD_MODE_CONTROL_CAP);
++ break;
++ case LCD_FAKE_EDID_PATCH_RECORD_TYPE:
++ fake_edid_record = (ATOM_FAKE_EDID_PATCH_RECORD *)record;
++ if (fake_edid_record->ucFakeEDIDLength) {
++ struct edid *edid;
++ int edid_size =
++ max((int)EDID_LENGTH, (int)fake_edid_record->ucFakeEDIDLength);
++ edid = kmalloc(edid_size, GFP_KERNEL);
++ if (edid) {
++ memcpy((u8 *)edid, (u8 *)&fake_edid_record->ucFakeEDIDString[0],
++ fake_edid_record->ucFakeEDIDLength);
++
++ if (drm_edid_is_valid(edid)) {
++ adev->mode_info.bios_hardcoded_edid = edid;
++ adev->mode_info.bios_hardcoded_edid_size = edid_size;
++ } else
++ kfree(edid);
++ }
++ }
++ record += fake_edid_record->ucFakeEDIDLength ?
++ fake_edid_record->ucFakeEDIDLength + 2 :
++ sizeof(ATOM_FAKE_EDID_PATCH_RECORD);
++ break;
++ case LCD_PANEL_RESOLUTION_RECORD_TYPE:
++ panel_res_record = (ATOM_PANEL_RESOLUTION_PATCH_RECORD *)record;
++ lvds->native_mode.width_mm = panel_res_record->usHSize;
++ lvds->native_mode.height_mm = panel_res_record->usVSize;
++ record += sizeof(ATOM_PANEL_RESOLUTION_PATCH_RECORD);
++ break;
++ default:
++ DRM_ERROR("Bad LCD record %d\n", *record);
++ bad_record = true;
++ break;
++ }
++ if (bad_record)
++ break;
++ }
++ }
++ }
++ return lvds;
++}
++
++struct amdgpu_encoder_atom_dig *
++amdgpu_atombios_encoder_get_dig_info(struct amdgpu_encoder *amdgpu_encoder)
++{
++ int encoder_enum = (amdgpu_encoder->encoder_enum & ENUM_ID_MASK) >> ENUM_ID_SHIFT;
++ struct amdgpu_encoder_atom_dig *dig = kzalloc(sizeof(struct amdgpu_encoder_atom_dig), GFP_KERNEL);
++
++ if (!dig)
++ return NULL;
++
++ /* coherent mode by default */
++ dig->coherent_mode = true;
++ dig->dig_encoder = -1;
++
++ if (encoder_enum == 2)
++ dig->linkb = true;
++ else
++ dig->linkb = false;
++
++ return dig;
++}
++
+diff --git a/drivers/gpu/drm/amd/amdgpu/atombios_encoders.h b/drivers/gpu/drm/amd/amdgpu/atombios_encoders.h
+new file mode 100644
+index 0000000..2bdec40
+--- /dev/null
++++ b/drivers/gpu/drm/amd/amdgpu/atombios_encoders.h
+@@ -0,0 +1,73 @@
++/*
++ * Copyright 2014 Advanced Micro Devices, Inc.
++ *
++ * Permission is hereby granted, free of charge, to any person obtaining a
++ * copy of this software and associated documentation files (the "Software"),
++ * to deal in the Software without restriction, including without limitation
++ * the rights to use, copy, modify, merge, publish, distribute, sublicense,
++ * and/or sell copies of the Software, and to permit persons to whom the
++ * Software is furnished to do so, subject to the following conditions:
++ *
++ * The above copyright notice and this permission notice shall be included in
++ * all copies or substantial portions of the Software.
++ *
++ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
++ * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
++ * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
++ * THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR
++ * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE,
++ * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
++ * OTHER DEALINGS IN THE SOFTWARE.
++ *
++ */
++
++#ifndef __ATOMBIOS_ENCODER_H__
++#define __ATOMBIOS_ENCODER_H__
++
++u8
++amdgpu_atombios_encoder_get_backlight_level(struct amdgpu_encoder *amdgpu_encoder);
++void
++amdgpu_atombios_encoder_set_backlight_level(struct amdgpu_encoder *amdgpu_encoder,
++ u8 level);
++void amdgpu_atombios_encoder_init_backlight(struct amdgpu_encoder *amdgpu_encoder,
++ struct drm_connector *drm_connector);
++void
++amdgpu_atombios_encoder_fini_backlight(struct amdgpu_encoder *amdgpu_encoder);
++bool amdgpu_atombios_encoder_is_digital(struct drm_encoder *encoder);
++bool amdgpu_atombios_encoder_mode_fixup(struct drm_encoder *encoder,
++ const struct drm_display_mode *mode,
++ struct drm_display_mode *adjusted_mode);
++int amdgpu_atombios_encoder_get_encoder_mode(struct drm_encoder *encoder);
++void
++amdgpu_atombios_encoder_setup_dig_encoder(struct drm_encoder *encoder,
++ int action, int panel_mode);
++void
++amdgpu_atombios_encoder_setup_dig_transmitter(struct drm_encoder *encoder, int action,
++ uint8_t lane_num, uint8_t lane_set);
++bool
++amdgpu_atombios_encoder_set_edp_panel_power(struct drm_connector *connector,
++ int action);
++void
++amdgpu_atombios_encoder_dpms(struct drm_encoder *encoder, int mode);
++void
++amdgpu_atombios_encoder_set_crtc_source(struct drm_encoder *encoder);
++void
++amdgpu_atombios_encoder_init_dig(struct amdgpu_device *adev);
++enum drm_connector_status
++amdgpu_atombios_encoder_dac_detect(struct drm_encoder *encoder,
++ struct drm_connector *connector);
++enum drm_connector_status
++amdgpu_atombios_encoder_dig_detect(struct drm_encoder *encoder,
++ struct drm_connector *connector);
++void
++amdgpu_atombios_encoder_setup_ext_encoder_ddc(struct drm_encoder *encoder);
++void
++amdgpu_atombios_encoder_set_bios_scratch_regs(struct drm_connector *connector,
++ struct drm_encoder *encoder,
++ bool connected);
++struct amdgpu_encoder_atom_dig *
++amdgpu_atombios_encoder_get_lcd_info(struct amdgpu_encoder *encoder);
++struct amdgpu_encoder_atom_dig *
++amdgpu_atombios_encoder_get_dig_info(struct amdgpu_encoder *amdgpu_encoder);
++
++#endif
+diff --git a/drivers/gpu/drm/amd/amdgpu/atombios_i2c.c b/drivers/gpu/drm/amd/amdgpu/atombios_i2c.c
+new file mode 100644
+index 0000000..13cdb01
+--- /dev/null
++++ b/drivers/gpu/drm/amd/amdgpu/atombios_i2c.c
+@@ -0,0 +1,158 @@
++/*
++ * Copyright 2011 Advanced Micro Devices, Inc.
++ *
++ * Permission is hereby granted, free of charge, to any person obtaining a
++ * copy of this software and associated documentation files (the "Software"),
++ * to deal in the Software without restriction, including without limitation
++ * the rights to use, copy, modify, merge, publish, distribute, sublicense,
++ * and/or sell copies of the Software, and to permit persons to whom the
++ * Software is furnished to do so, subject to the following conditions:
++ *
++ * The above copyright notice and this permission notice shall be included in
++ * all copies or substantial portions of the Software.
++ *
++ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
++ * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
++ * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
++ * THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR
++ * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE,
++ * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
++ * OTHER DEALINGS IN THE SOFTWARE.
++ *
++ * Authors: Alex Deucher
++ *
++ */
++#include <drm/drmP.h>
++#include <drm/amdgpu_drm.h>
++#include "amdgpu.h"
++#include "atom.h"
++#include "amdgpu_atombios.h"
++
++#define TARGET_HW_I2C_CLOCK 50
++
++/* these are a limitation of ProcessI2cChannelTransaction not the hw */
++#define ATOM_MAX_HW_I2C_WRITE 3
++#define ATOM_MAX_HW_I2C_READ 255
++
++static int amdgpu_atombios_i2c_process_i2c_ch(struct amdgpu_i2c_chan *chan,
++ u8 slave_addr, u8 flags,
++ u8 *buf, u8 num)
++{
++ struct drm_device *dev = chan->dev;
++ struct amdgpu_device *adev = dev->dev_private;
++ PROCESS_I2C_CHANNEL_TRANSACTION_PS_ALLOCATION args;
++ int index = GetIndexIntoMasterTable(COMMAND, ProcessI2cChannelTransaction);
++ unsigned char *base;
++ u16 out = cpu_to_le16(0);
++ int r = 0;
++
++ memset(&args, 0, sizeof(args));
++
++ mutex_lock(&chan->mutex);
++
++ base = (unsigned char *)adev->mode_info.atom_context->scratch;
++
++ if (flags & HW_I2C_WRITE) {
++ if (num > ATOM_MAX_HW_I2C_WRITE) {
++ DRM_ERROR("hw i2c: tried to write too many bytes (%d vs 3)\n", num);
++ r = -EINVAL;
++ goto done;
++ }
++ if (buf == NULL)
++ args.ucRegIndex = 0;
++ else
++ args.ucRegIndex = buf[0];
++ if (num)
++ num--;
++ if (num)
++ memcpy(&out, &buf[1], num);
++ args.lpI2CDataOut = cpu_to_le16(out);
++ } else {
++ if (num > ATOM_MAX_HW_I2C_READ) {
++ DRM_ERROR("hw i2c: tried to read too many bytes (%d vs 255)\n", num);
++ r = -EINVAL;
++ goto done;
++ }
++ args.ucRegIndex = 0;
++ args.lpI2CDataOut = 0;
++ }
++
++ args.ucFlag = flags;
++ args.ucI2CSpeed = TARGET_HW_I2C_CLOCK;
++ args.ucTransBytes = num;
++ args.ucSlaveAddr = slave_addr << 1;
++ args.ucLineNumber = chan->rec.i2c_id;
++
++ amdgpu_atom_execute_table(adev->mode_info.atom_context, index, (uint32_t *)&args);
++
++ /* error */
++ if (args.ucStatus != HW_ASSISTED_I2C_STATUS_SUCCESS) {
++ DRM_DEBUG_KMS("hw_i2c error\n");
++ r = -EIO;
++ goto done;
++ }
++
++ if (!(flags & HW_I2C_WRITE))
++ amdgpu_atombios_copy_swap(buf, base, num, false);
++
++done:
++ mutex_unlock(&chan->mutex);
++
++ return r;
++}
++
++int amdgpu_atombios_i2c_xfer(struct i2c_adapter *i2c_adap,
++ struct i2c_msg *msgs, int num)
++{
++ struct amdgpu_i2c_chan *i2c = i2c_get_adapdata(i2c_adap);
++ struct i2c_msg *p;
++ int i, remaining, current_count, buffer_offset, max_bytes, ret;
++ u8 flags;
++
++ /* check for bus probe */
++ p = &msgs[0];
++ if ((num == 1) && (p->len == 0)) {
++ ret = amdgpu_atombios_i2c_process_i2c_ch(i2c,
++ p->addr, HW_I2C_WRITE,
++ NULL, 0);
++ if (ret)
++ return ret;
++ else
++ return num;
++ }
++
++ for (i = 0; i < num; i++) {
++ p = &msgs[i];
++ remaining = p->len;
++ buffer_offset = 0;
++ /* max_bytes are a limitation of ProcessI2cChannelTransaction not the hw */
++ if (p->flags & I2C_M_RD) {
++ max_bytes = ATOM_MAX_HW_I2C_READ;
++ flags = HW_I2C_READ;
++ } else {
++ max_bytes = ATOM_MAX_HW_I2C_WRITE;
++ flags = HW_I2C_WRITE;
++ }
++ while (remaining) {
++ if (remaining > max_bytes)
++ current_count = max_bytes;
++ else
++ current_count = remaining;
++ ret = amdgpu_atombios_i2c_process_i2c_ch(i2c,
++ p->addr, flags,
++ &p->buf[buffer_offset], current_count);
++ if (ret)
++ return ret;
++ remaining -= current_count;
++ buffer_offset += current_count;
++ }
++ }
++
++ return num;
++}
++
++u32 amdgpu_atombios_i2c_func(struct i2c_adapter *adap)
++{
++ return I2C_FUNC_I2C | I2C_FUNC_SMBUS_EMUL;
++}
++
+diff --git a/drivers/gpu/drm/amd/amdgpu/atombios_i2c.h b/drivers/gpu/drm/amd/amdgpu/atombios_i2c.h
+new file mode 100644
+index 0000000..d6128d9d
+--- /dev/null
++++ b/drivers/gpu/drm/amd/amdgpu/atombios_i2c.h
+@@ -0,0 +1,31 @@
++/*
++ * Copyright 2014 Advanced Micro Devices, Inc.
++ *
++ * Permission is hereby granted, free of charge, to any person obtaining a
++ * copy of this software and associated documentation files (the "Software"),
++ * to deal in the Software without restriction, including without limitation
++ * the rights to use, copy, modify, merge, publish, distribute, sublicense,
++ * and/or sell copies of the Software, and to permit persons to whom the
++ * Software is furnished to do so, subject to the following conditions:
++ *
++ * The above copyright notice and this permission notice shall be included in
++ * all copies or substantial portions of the Software.
++ *
++ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
++ * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
++ * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
++ * THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR
++ * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE,
++ * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
++ * OTHER DEALINGS IN THE SOFTWARE.
++ *
++ */
++
++#ifndef __ATOMBIOS_I2C_H__
++#define __ATOMBIOS_I2C_H__
++
++int amdgpu_atombios_i2c_xfer(struct i2c_adapter *i2c_adap,
++ struct i2c_msg *msgs, int num);
++u32 amdgpu_atombios_i2c_func(struct i2c_adapter *adap);
++
++#endif
+diff --git a/drivers/gpu/drm/amd/amdgpu/cikd.h b/drivers/gpu/drm/amd/amdgpu/cikd.h
+new file mode 100644
+index 0000000..11828e2
+--- /dev/null
++++ b/drivers/gpu/drm/amd/amdgpu/cikd.h
+@@ -0,0 +1,550 @@
++/*
++ * Copyright 2012 Advanced Micro Devices, Inc.
++ *
++ * Permission is hereby granted, free of charge, to any person obtaining a
++ * copy of this software and associated documentation files (the "Software"),
++ * to deal in the Software without restriction, including without limitation
++ * the rights to use, copy, modify, merge, publish, distribute, sublicense,
++ * and/or sell copies of the Software, and to permit persons to whom the
++ * Software is furnished to do so, subject to the following conditions:
++ *
++ * The above copyright notice and this permission notice shall be included in
++ * all copies or substantial portions of the Software.
++ *
++ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
++ * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
++ * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
++ * THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR
++ * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE,
++ * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
++ * OTHER DEALINGS IN THE SOFTWARE.
++ *
++ * Authors: Alex Deucher
++ */
++#ifndef CIK_H
++#define CIK_H
++
++#define MC_SEQ_MISC0__GDDR5__SHIFT 0x1c
++#define MC_SEQ_MISC0__GDDR5_MASK 0xf0000000
++#define MC_SEQ_MISC0__GDDR5_VALUE 5
++
++#define CP_ME_TABLE_SIZE 96
++
++/* display controller offsets used for crtc/cur/lut/grph/viewport/etc. */
++#define CRTC0_REGISTER_OFFSET (0x1b7c - 0x1b7c)
++#define CRTC1_REGISTER_OFFSET (0x1e7c - 0x1b7c)
++#define CRTC2_REGISTER_OFFSET (0x417c - 0x1b7c)
++#define CRTC3_REGISTER_OFFSET (0x447c - 0x1b7c)
++#define CRTC4_REGISTER_OFFSET (0x477c - 0x1b7c)
++#define CRTC5_REGISTER_OFFSET (0x4a7c - 0x1b7c)
++
++#define BONAIRE_GB_ADDR_CONFIG_GOLDEN 0x12010001
++#define HAWAII_GB_ADDR_CONFIG_GOLDEN 0x12011003
++
++#define CIK_RB_BITMAP_WIDTH_PER_SH 2
++#define HAWAII_RB_BITMAP_WIDTH_PER_SH 4
++
++#define AMDGPU_NUM_OF_VMIDS 8
++
++#define PIPEID(x) ((x) << 0)
++#define MEID(x) ((x) << 2)
++#define VMID(x) ((x) << 4)
++#define QUEUEID(x) ((x) << 8)
++
++#define mmCC_DRM_ID_STRAPS 0x1559
++#define CC_DRM_ID_STRAPS__ATI_REV_ID_MASK 0xf0000000
++
++#define mmCHUB_CONTROL 0x619
++#define BYPASS_VM (1 << 0)
++
++#define SYSTEM_APERTURE_UNMAPPED_ACCESS_PASS_THRU (0 << 5)
++
++#define mmGRPH_LUT_10BIT_BYPASS_CONTROL 0x1a02
++#define LUT_10BIT_BYPASS_EN (1 << 8)
++
++# define CURSOR_MONO 0
++# define CURSOR_24_1 1
++# define CURSOR_24_8_PRE_MULT 2
++# define CURSOR_24_8_UNPRE_MULT 3
++# define CURSOR_URGENT_ALWAYS 0
++# define CURSOR_URGENT_1_8 1
++# define CURSOR_URGENT_1_4 2
++# define CURSOR_URGENT_3_8 3
++# define CURSOR_URGENT_1_2 4
++
++# define GRPH_DEPTH_8BPP 0
++# define GRPH_DEPTH_16BPP 1
++# define GRPH_DEPTH_32BPP 2
++/* 8 BPP */
++# define GRPH_FORMAT_INDEXED 0
++/* 16 BPP */
++# define GRPH_FORMAT_ARGB1555 0
++# define GRPH_FORMAT_ARGB565 1
++# define GRPH_FORMAT_ARGB4444 2
++# define GRPH_FORMAT_AI88 3
++# define GRPH_FORMAT_MONO16 4
++# define GRPH_FORMAT_BGRA5551 5
++/* 32 BPP */
++# define GRPH_FORMAT_ARGB8888 0
++# define GRPH_FORMAT_ARGB2101010 1
++# define GRPH_FORMAT_32BPP_DIG 2
++# define GRPH_FORMAT_8B_ARGB2101010 3
++# define GRPH_FORMAT_BGRA1010102 4
++# define GRPH_FORMAT_8B_BGRA1010102 5
++# define GRPH_FORMAT_RGB111110 6
++# define GRPH_FORMAT_BGR101111 7
++# define ADDR_SURF_MACRO_TILE_ASPECT_1 0
++# define ADDR_SURF_MACRO_TILE_ASPECT_2 1
++# define ADDR_SURF_MACRO_TILE_ASPECT_4 2
++# define ADDR_SURF_MACRO_TILE_ASPECT_8 3
++# define GRPH_ARRAY_LINEAR_GENERAL 0
++# define GRPH_ARRAY_LINEAR_ALIGNED 1
++# define GRPH_ARRAY_1D_TILED_THIN1 2
++# define GRPH_ARRAY_2D_TILED_THIN1 4
++# define DISPLAY_MICRO_TILING 0
++# define THIN_MICRO_TILING 1
++# define DEPTH_MICRO_TILING 2
++# define ROTATED_MICRO_TILING 4
++# define GRPH_ENDIAN_NONE 0
++# define GRPH_ENDIAN_8IN16 1
++# define GRPH_ENDIAN_8IN32 2
++# define GRPH_ENDIAN_8IN64 3
++# define GRPH_RED_SEL_R 0
++# define GRPH_RED_SEL_G 1
++# define GRPH_RED_SEL_B 2
++# define GRPH_RED_SEL_A 3
++# define GRPH_GREEN_SEL_G 0
++# define GRPH_GREEN_SEL_B 1
++# define GRPH_GREEN_SEL_A 2
++# define GRPH_GREEN_SEL_R 3
++# define GRPH_BLUE_SEL_B 0
++# define GRPH_BLUE_SEL_A 1
++# define GRPH_BLUE_SEL_R 2
++# define GRPH_BLUE_SEL_G 3
++# define GRPH_ALPHA_SEL_A 0
++# define GRPH_ALPHA_SEL_R 1
++# define GRPH_ALPHA_SEL_G 2
++# define GRPH_ALPHA_SEL_B 3
++# define INPUT_GAMMA_USE_LUT 0
++# define INPUT_GAMMA_BYPASS 1
++# define INPUT_GAMMA_SRGB_24 2
++# define INPUT_GAMMA_XVYCC_222 3
++
++# define INPUT_CSC_BYPASS 0
++# define INPUT_CSC_PROG_COEFF 1
++# define INPUT_CSC_PROG_SHARED_MATRIXA 2
++
++# define OUTPUT_CSC_BYPASS 0
++# define OUTPUT_CSC_TV_RGB 1
++# define OUTPUT_CSC_YCBCR_601 2
++# define OUTPUT_CSC_YCBCR_709 3
++# define OUTPUT_CSC_PROG_COEFF 4
++# define OUTPUT_CSC_PROG_SHARED_MATRIXB 5
++
++# define DEGAMMA_BYPASS 0
++# define DEGAMMA_SRGB_24 1
++# define DEGAMMA_XVYCC_222 2
++# define GAMUT_REMAP_BYPASS 0
++# define GAMUT_REMAP_PROG_COEFF 1
++# define GAMUT_REMAP_PROG_SHARED_MATRIXA 2
++# define GAMUT_REMAP_PROG_SHARED_MATRIXB 3
++
++# define REGAMMA_BYPASS 0
++# define REGAMMA_SRGB_24 1
++# define REGAMMA_XVYCC_222 2
++# define REGAMMA_PROG_A 3
++# define REGAMMA_PROG_B 4
++
++# define FMT_CLAMP_6BPC 0
++# define FMT_CLAMP_8BPC 1
++# define FMT_CLAMP_10BPC 2
++
++# define HDMI_24BIT_DEEP_COLOR 0
++# define HDMI_30BIT_DEEP_COLOR 1
++# define HDMI_36BIT_DEEP_COLOR 2
++# define HDMI_ACR_HW 0
++# define HDMI_ACR_32 1
++# define HDMI_ACR_44 2
++# define HDMI_ACR_48 3
++# define HDMI_ACR_X1 1
++# define HDMI_ACR_X2 2
++# define HDMI_ACR_X4 4
++# define AFMT_AVI_INFO_Y_RGB 0
++# define AFMT_AVI_INFO_Y_YCBCR422 1
++# define AFMT_AVI_INFO_Y_YCBCR444 2
++
++#define NO_AUTO 0
++#define ES_AUTO 1
++#define GS_AUTO 2
++#define ES_AND_GS_AUTO 3
++
++# define ARRAY_MODE(x) ((x) << 2)
++# define PIPE_CONFIG(x) ((x) << 6)
++# define TILE_SPLIT(x) ((x) << 11)
++# define MICRO_TILE_MODE_NEW(x) ((x) << 22)
++# define SAMPLE_SPLIT(x) ((x) << 25)
++# define BANK_WIDTH(x) ((x) << 0)
++# define BANK_HEIGHT(x) ((x) << 2)
++# define MACRO_TILE_ASPECT(x) ((x) << 4)
++# define NUM_BANKS(x) ((x) << 6)
++
++#define MSG_ENTER_RLC_SAFE_MODE 1
++#define MSG_EXIT_RLC_SAFE_MODE 0
++
++/*
++ * PM4
++ */
++#define PACKET_TYPE0 0
++#define PACKET_TYPE1 1
++#define PACKET_TYPE2 2
++#define PACKET_TYPE3 3
++
++#define CP_PACKET_GET_TYPE(h) (((h) >> 30) & 3)
++#define CP_PACKET_GET_COUNT(h) (((h) >> 16) & 0x3FFF)
++#define CP_PACKET0_GET_REG(h) ((h) & 0xFFFF)
++#define CP_PACKET3_GET_OPCODE(h) (((h) >> 8) & 0xFF)
++#define PACKET0(reg, n) ((PACKET_TYPE0 << 30) | \
++ ((reg) & 0xFFFF) | \
++ ((n) & 0x3FFF) << 16)
++#define CP_PACKET2 0x80000000
++#define PACKET2_PAD_SHIFT 0
++#define PACKET2_PAD_MASK (0x3fffffff << 0)
++
++#define PACKET2(v) (CP_PACKET2 | REG_SET(PACKET2_PAD, (v)))
++
++#define PACKET3(op, n) ((PACKET_TYPE3 << 30) | \
++ (((op) & 0xFF) << 8) | \
++ ((n) & 0x3FFF) << 16)
++
++#define PACKET3_COMPUTE(op, n) (PACKET3(op, n) | 1 << 1)
++
++/* Packet 3 types */
++#define PACKET3_NOP 0x10
++#define PACKET3_SET_BASE 0x11
++#define PACKET3_BASE_INDEX(x) ((x) << 0)
++#define CE_PARTITION_BASE 3
++#define PACKET3_CLEAR_STATE 0x12
++#define PACKET3_INDEX_BUFFER_SIZE 0x13
++#define PACKET3_DISPATCH_DIRECT 0x15
++#define PACKET3_DISPATCH_INDIRECT 0x16
++#define PACKET3_ATOMIC_GDS 0x1D
++#define PACKET3_ATOMIC_MEM 0x1E
++#define PACKET3_OCCLUSION_QUERY 0x1F
++#define PACKET3_SET_PREDICATION 0x20
++#define PACKET3_REG_RMW 0x21
++#define PACKET3_COND_EXEC 0x22
++#define PACKET3_PRED_EXEC 0x23
++#define PACKET3_DRAW_INDIRECT 0x24
++#define PACKET3_DRAW_INDEX_INDIRECT 0x25
++#define PACKET3_INDEX_BASE 0x26
++#define PACKET3_DRAW_INDEX_2 0x27
++#define PACKET3_CONTEXT_CONTROL 0x28
++#define PACKET3_INDEX_TYPE 0x2A
++#define PACKET3_DRAW_INDIRECT_MULTI 0x2C
++#define PACKET3_DRAW_INDEX_AUTO 0x2D
++#define PACKET3_NUM_INSTANCES 0x2F
++#define PACKET3_DRAW_INDEX_MULTI_AUTO 0x30
++#define PACKET3_INDIRECT_BUFFER_CONST 0x33
++#define PACKET3_STRMOUT_BUFFER_UPDATE 0x34
++#define PACKET3_DRAW_INDEX_OFFSET_2 0x35
++#define PACKET3_DRAW_PREAMBLE 0x36
++#define PACKET3_WRITE_DATA 0x37
++#define WRITE_DATA_DST_SEL(x) ((x) << 8)
++ /* 0 - register
++ * 1 - memory (sync - via GRBM)
++ * 2 - gl2
++ * 3 - gds
++ * 4 - reserved
++ * 5 - memory (async - direct)
++ */
++#define WR_ONE_ADDR (1 << 16)
++#define WR_CONFIRM (1 << 20)
++#define WRITE_DATA_CACHE_POLICY(x) ((x) << 25)
++ /* 0 - LRU
++ * 1 - Stream
++ */
++#define WRITE_DATA_ENGINE_SEL(x) ((x) << 30)
++ /* 0 - me
++ * 1 - pfp
++ * 2 - ce
++ */
++#define PACKET3_DRAW_INDEX_INDIRECT_MULTI 0x38
++#define PACKET3_MEM_SEMAPHORE 0x39
++# define PACKET3_SEM_USE_MAILBOX (0x1 << 16)
++# define PACKET3_SEM_SEL_SIGNAL_TYPE (0x1 << 20) /* 0 = increment, 1 = write 1 */
++# define PACKET3_SEM_CLIENT_CODE ((x) << 24) /* 0 = CP, 1 = CB, 2 = DB */
++# define PACKET3_SEM_SEL_SIGNAL (0x6 << 29)
++# define PACKET3_SEM_SEL_WAIT (0x7 << 29)
++#define PACKET3_COPY_DW 0x3B
++#define PACKET3_WAIT_REG_MEM 0x3C
++#define WAIT_REG_MEM_FUNCTION(x) ((x) << 0)
++ /* 0 - always
++ * 1 - <
++ * 2 - <=
++ * 3 - ==
++ * 4 - !=
++ * 5 - >=
++ * 6 - >
++ */
++#define WAIT_REG_MEM_MEM_SPACE(x) ((x) << 4)
++ /* 0 - reg
++ * 1 - mem
++ */
++#define WAIT_REG_MEM_OPERATION(x) ((x) << 6)
++ /* 0 - wait_reg_mem
++ * 1 - wr_wait_wr_reg
++ */
++#define WAIT_REG_MEM_ENGINE(x) ((x) << 8)
++ /* 0 - me
++ * 1 - pfp
++ */
++#define PACKET3_INDIRECT_BUFFER 0x3F
++#define INDIRECT_BUFFER_TCL2_VOLATILE (1 << 22)
++#define INDIRECT_BUFFER_VALID (1 << 23)
++#define INDIRECT_BUFFER_CACHE_POLICY(x) ((x) << 28)
++ /* 0 - LRU
++ * 1 - Stream
++ * 2 - Bypass
++ */
++#define PACKET3_COPY_DATA 0x40
++#define PACKET3_PFP_SYNC_ME 0x42
++#define PACKET3_SURFACE_SYNC 0x43
++# define PACKET3_DEST_BASE_0_ENA (1 << 0)
++# define PACKET3_DEST_BASE_1_ENA (1 << 1)
++# define PACKET3_CB0_DEST_BASE_ENA (1 << 6)
++# define PACKET3_CB1_DEST_BASE_ENA (1 << 7)
++# define PACKET3_CB2_DEST_BASE_ENA (1 << 8)
++# define PACKET3_CB3_DEST_BASE_ENA (1 << 9)
++# define PACKET3_CB4_DEST_BASE_ENA (1 << 10)
++# define PACKET3_CB5_DEST_BASE_ENA (1 << 11)
++# define PACKET3_CB6_DEST_BASE_ENA (1 << 12)
++# define PACKET3_CB7_DEST_BASE_ENA (1 << 13)
++# define PACKET3_DB_DEST_BASE_ENA (1 << 14)
++# define PACKET3_TCL1_VOL_ACTION_ENA (1 << 15)
++# define PACKET3_TC_VOL_ACTION_ENA (1 << 16) /* L2 */
++# define PACKET3_TC_WB_ACTION_ENA (1 << 18) /* L2 */
++# define PACKET3_DEST_BASE_2_ENA (1 << 19)
++# define PACKET3_DEST_BASE_3_ENA (1 << 21)
++# define PACKET3_TCL1_ACTION_ENA (1 << 22)
++# define PACKET3_TC_ACTION_ENA (1 << 23) /* L2 */
++# define PACKET3_CB_ACTION_ENA (1 << 25)
++# define PACKET3_DB_ACTION_ENA (1 << 26)
++# define PACKET3_SH_KCACHE_ACTION_ENA (1 << 27)
++# define PACKET3_SH_KCACHE_VOL_ACTION_ENA (1 << 28)
++# define PACKET3_SH_ICACHE_ACTION_ENA (1 << 29)
++#define PACKET3_COND_WRITE 0x45
++#define PACKET3_EVENT_WRITE 0x46
++#define EVENT_TYPE(x) ((x) << 0)
++#define EVENT_INDEX(x) ((x) << 8)
++ /* 0 - any non-TS event
++ * 1 - ZPASS_DONE, PIXEL_PIPE_STAT_*
++ * 2 - SAMPLE_PIPELINESTAT
++ * 3 - SAMPLE_STREAMOUTSTAT*
++ * 4 - *S_PARTIAL_FLUSH
++ * 5 - EOP events
++ * 6 - EOS events
++ */
++#define PACKET3_EVENT_WRITE_EOP 0x47
++#define EOP_TCL1_VOL_ACTION_EN (1 << 12)
++#define EOP_TC_VOL_ACTION_EN (1 << 13) /* L2 */
++#define EOP_TC_WB_ACTION_EN (1 << 15) /* L2 */
++#define EOP_TCL1_ACTION_EN (1 << 16)
++#define EOP_TC_ACTION_EN (1 << 17) /* L2 */
++#define EOP_TCL2_VOLATILE (1 << 24)
++#define EOP_CACHE_POLICY(x) ((x) << 25)
++ /* 0 - LRU
++ * 1 - Stream
++ * 2 - Bypass
++ */
++#define DATA_SEL(x) ((x) << 29)
++ /* 0 - discard
++ * 1 - send low 32bit data
++ * 2 - send 64bit data
++ * 3 - send 64bit GPU counter value
++ * 4 - send 64bit sys counter value
++ */
++#define INT_SEL(x) ((x) << 24)
++ /* 0 - none
++ * 1 - interrupt only (DATA_SEL = 0)
++ * 2 - interrupt when data write is confirmed
++ */
++#define DST_SEL(x) ((x) << 16)
++ /* 0 - MC
++ * 1 - TC/L2
++ */
++#define PACKET3_EVENT_WRITE_EOS 0x48
++#define PACKET3_RELEASE_MEM 0x49
++#define PACKET3_PREAMBLE_CNTL 0x4A
++# define PACKET3_PREAMBLE_BEGIN_CLEAR_STATE (2 << 28)
++# define PACKET3_PREAMBLE_END_CLEAR_STATE (3 << 28)
++#define PACKET3_DMA_DATA 0x50
++/* 1. header
++ * 2. CONTROL
++ * 3. SRC_ADDR_LO or DATA [31:0]
++ * 4. SRC_ADDR_HI [31:0]
++ * 5. DST_ADDR_LO [31:0]
++ * 6. DST_ADDR_HI [7:0]
++ * 7. COMMAND [30:21] | BYTE_COUNT [20:0]
++ */
++/* CONTROL */
++# define PACKET3_DMA_DATA_ENGINE(x) ((x) << 0)
++ /* 0 - ME
++ * 1 - PFP
++ */
++# define PACKET3_DMA_DATA_SRC_CACHE_POLICY(x) ((x) << 13)
++ /* 0 - LRU
++ * 1 - Stream
++ * 2 - Bypass
++ */
++# define PACKET3_DMA_DATA_SRC_VOLATILE (1 << 15)
++# define PACKET3_DMA_DATA_DST_SEL(x) ((x) << 20)
++ /* 0 - DST_ADDR using DAS
++ * 1 - GDS
++ * 3 - DST_ADDR using L2
++ */
++# define PACKET3_DMA_DATA_DST_CACHE_POLICY(x) ((x) << 25)
++ /* 0 - LRU
++ * 1 - Stream
++ * 2 - Bypass
++ */
++# define PACKET3_DMA_DATA_DST_VOLATILE (1 << 27)
++# define PACKET3_DMA_DATA_SRC_SEL(x) ((x) << 29)
++ /* 0 - SRC_ADDR using SAS
++ * 1 - GDS
++ * 2 - DATA
++ * 3 - SRC_ADDR using L2
++ */
++# define PACKET3_DMA_DATA_CP_SYNC (1 << 31)
++/* COMMAND */
++# define PACKET3_DMA_DATA_DIS_WC (1 << 21)
++# define PACKET3_DMA_DATA_CMD_SRC_SWAP(x) ((x) << 22)
++ /* 0 - none
++ * 1 - 8 in 16
++ * 2 - 8 in 32
++ * 3 - 8 in 64
++ */
++# define PACKET3_DMA_DATA_CMD_DST_SWAP(x) ((x) << 24)
++ /* 0 - none
++ * 1 - 8 in 16
++ * 2 - 8 in 32
++ * 3 - 8 in 64
++ */
++# define PACKET3_DMA_DATA_CMD_SAS (1 << 26)
++ /* 0 - memory
++ * 1 - register
++ */
++# define PACKET3_DMA_DATA_CMD_DAS (1 << 27)
++ /* 0 - memory
++ * 1 - register
++ */
++# define PACKET3_DMA_DATA_CMD_SAIC (1 << 28)
++# define PACKET3_DMA_DATA_CMD_DAIC (1 << 29)
++# define PACKET3_DMA_DATA_CMD_RAW_WAIT (1 << 30)
++#define PACKET3_AQUIRE_MEM 0x58
++#define PACKET3_REWIND 0x59
++#define PACKET3_LOAD_UCONFIG_REG 0x5E
++#define PACKET3_LOAD_SH_REG 0x5F
++#define PACKET3_LOAD_CONFIG_REG 0x60
++#define PACKET3_LOAD_CONTEXT_REG 0x61
++#define PACKET3_SET_CONFIG_REG 0x68
++#define PACKET3_SET_CONFIG_REG_START 0x00002000
++#define PACKET3_SET_CONFIG_REG_END 0x00002c00
++#define PACKET3_SET_CONTEXT_REG 0x69
++#define PACKET3_SET_CONTEXT_REG_START 0x0000a000
++#define PACKET3_SET_CONTEXT_REG_END 0x0000a400
++#define PACKET3_SET_CONTEXT_REG_INDIRECT 0x73
++#define PACKET3_SET_SH_REG 0x76
++#define PACKET3_SET_SH_REG_START 0x00002c00
++#define PACKET3_SET_SH_REG_END 0x00003000
++#define PACKET3_SET_SH_REG_OFFSET 0x77
++#define PACKET3_SET_QUEUE_REG 0x78
++#define PACKET3_SET_UCONFIG_REG 0x79
++#define PACKET3_SET_UCONFIG_REG_START 0x0000c000
++#define PACKET3_SET_UCONFIG_REG_END 0x0000c400
++#define PACKET3_SCRATCH_RAM_WRITE 0x7D
++#define PACKET3_SCRATCH_RAM_READ 0x7E
++#define PACKET3_LOAD_CONST_RAM 0x80
++#define PACKET3_WRITE_CONST_RAM 0x81
++#define PACKET3_DUMP_CONST_RAM 0x83
++#define PACKET3_INCREMENT_CE_COUNTER 0x84
++#define PACKET3_INCREMENT_DE_COUNTER 0x85
++#define PACKET3_WAIT_ON_CE_COUNTER 0x86
++#define PACKET3_WAIT_ON_DE_COUNTER_DIFF 0x88
++#define PACKET3_SWITCH_BUFFER 0x8B
++
++/* SDMA - first instance at 0xd000, second at 0xd800 */
++#define SDMA0_REGISTER_OFFSET 0x0 /* not a register */
++#define SDMA1_REGISTER_OFFSET 0x200 /* not a register */
++#define SDMA_MAX_INSTANCE 2
++
++#define SDMA_PACKET(op, sub_op, e) ((((e) & 0xFFFF) << 16) | \
++ (((sub_op) & 0xFF) << 8) | \
++ (((op) & 0xFF) << 0))
++/* sDMA opcodes */
++#define SDMA_OPCODE_NOP 0
++#define SDMA_OPCODE_COPY 1
++# define SDMA_COPY_SUB_OPCODE_LINEAR 0
++# define SDMA_COPY_SUB_OPCODE_TILED 1
++# define SDMA_COPY_SUB_OPCODE_SOA 3
++# define SDMA_COPY_SUB_OPCODE_LINEAR_SUB_WINDOW 4
++# define SDMA_COPY_SUB_OPCODE_TILED_SUB_WINDOW 5
++# define SDMA_COPY_SUB_OPCODE_T2T_SUB_WINDOW 6
++#define SDMA_OPCODE_WRITE 2
++# define SDMA_WRITE_SUB_OPCODE_LINEAR 0
++# define SDMA_WRTIE_SUB_OPCODE_TILED 1
++#define SDMA_OPCODE_INDIRECT_BUFFER 4
++#define SDMA_OPCODE_FENCE 5
++#define SDMA_OPCODE_TRAP 6
++#define SDMA_OPCODE_SEMAPHORE 7
++# define SDMA_SEMAPHORE_EXTRA_O (1 << 13)
++ /* 0 - increment
++ * 1 - write 1
++ */
++# define SDMA_SEMAPHORE_EXTRA_S (1 << 14)
++ /* 0 - wait
++ * 1 - signal
++ */
++# define SDMA_SEMAPHORE_EXTRA_M (1 << 15)
++ /* mailbox */
++#define SDMA_OPCODE_POLL_REG_MEM 8
++# define SDMA_POLL_REG_MEM_EXTRA_OP(x) ((x) << 10)
++ /* 0 - wait_reg_mem
++ * 1 - wr_wait_wr_reg
++ */
++# define SDMA_POLL_REG_MEM_EXTRA_FUNC(x) ((x) << 12)
++ /* 0 - always
++ * 1 - <
++ * 2 - <=
++ * 3 - ==
++ * 4 - !=
++ * 5 - >=
++ * 6 - >
++ */
++# define SDMA_POLL_REG_MEM_EXTRA_M (1 << 15)
++ /* 0 = register
++ * 1 = memory
++ */
++#define SDMA_OPCODE_COND_EXEC 9
++#define SDMA_OPCODE_CONSTANT_FILL 11
++# define SDMA_CONSTANT_FILL_EXTRA_SIZE(x) ((x) << 14)
++ /* 0 = byte fill
++ * 2 = DW fill
++ */
++#define SDMA_OPCODE_GENERATE_PTE_PDE 12
++#define SDMA_OPCODE_TIMESTAMP 13
++# define SDMA_TIMESTAMP_SUB_OPCODE_SET_LOCAL 0
++# define SDMA_TIMESTAMP_SUB_OPCODE_GET_LOCAL 1
++# define SDMA_TIMESTAMP_SUB_OPCODE_GET_GLOBAL 2
++#define SDMA_OPCODE_SRBM_WRITE 14
++# define SDMA_SRBM_WRITE_EXTRA_BYTE_ENABLE(x) ((x) << 12)
++ /* byte mask */
++
++#define VCE_CMD_NO_OP 0x00000000
++#define VCE_CMD_END 0x00000001
++#define VCE_CMD_IB 0x00000002
++#define VCE_CMD_FENCE 0x00000003
++#define VCE_CMD_TRAP 0x00000004
++#define VCE_CMD_IB_AUTO 0x00000005
++#define VCE_CMD_SEMAPHORE 0x00000006
++
++#endif
+--
+cgit v0.10.2
+